Merge 6.1.40 into android14-6.1-lts

Changes in 6.1.40
	HID: amd_sfh: Rename the float32 variable
	HID: amd_sfh: Fix for shift-out-of-bounds
	net: lan743x: Don't sleep in atomic context
	workqueue: clean up WORK_* constant types, clarify masking
	ksmbd: add missing compound request handing in some commands
	ksmbd: fix out of bounds read in smb2_sess_setup
	drm/panel: simple: Add connector_type for innolux_at043tn24
	drm/bridge: ti-sn65dsi86: Fix auxiliary bus lifetime
	swiotlb: always set the number of areas before allocating the pool
	swiotlb: reduce the swiotlb buffer size on allocation failure
	swiotlb: reduce the number of areas to match actual memory pool size
	drm/panel: simple: Add Powertip PH800480T013 drm_display_mode flags
	ice: Fix max_rate check while configuring TX rate limits
	igc: Remove delay during TX ring configuration
	net/mlx5e: fix double free in mlx5e_destroy_flow_table
	net/mlx5e: fix memory leak in mlx5e_fs_tt_redirect_any_create
	net/mlx5e: fix memory leak in mlx5e_ptp_open
	net/mlx5e: Check for NOT_READY flag state after locking
	igc: set TP bit in 'supported' and 'advertising' fields of ethtool_link_ksettings
	igc: Handle PPS start time programming for past time values
	blk-crypto: use dynamic lock class for blk_crypto_profile::lock
	scsi: qla2xxx: Fix error code in qla2x00_start_sp()
	scsi: ufs: ufs-mediatek: Add dependency for RESET_CONTROLLER
	bpf: Fix max stack depth check for async callbacks
	net: mvneta: fix txq_map in case of txq_number==1
	net/sched: cls_fw: Fix improper refcount update leads to use-after-free
	gve: Set default duplex configuration to full
	octeontx2-af: Promisc enable/disable through mbox
	octeontx2-af: Move validation of ptp pointer before its usage
	ionic: remove WARN_ON to prevent panic_on_warn
	net: bgmac: postpone turning IRQs off to avoid SoC hangs
	net: prevent skb corruption on frag list segmentation
	icmp6: Fix null-ptr-deref of ip6_null_entry->rt6i_idev in icmp6_dev().
	udp6: fix udp6_ehashfn() typo
	ntb: idt: Fix error handling in idt_pci_driver_init()
	NTB: amd: Fix error handling in amd_ntb_pci_driver_init()
	ntb: intel: Fix error handling in intel_ntb_pci_driver_init()
	NTB: ntb_transport: fix possible memory leak while device_register() fails
	NTB: ntb_tool: Add check for devm_kcalloc
	ipv6/addrconf: fix a potential refcount underflow for idev
	net: dsa: qca8k: Add check for skb_copy
	platform/x86: wmi: Break possible infinite loop when parsing GUID
	kernel/trace: Fix cleanup logic of enable_trace_eprobe
	igc: Fix launchtime before start of cycle
	igc: Fix inserting of empty frame for launchtime
	nvme: fix the NVME_ID_NS_NVM_STS_MASK definition
	riscv, bpf: Fix inconsistent JIT image generation
	drm/i915: Don't preserve dpll_hw_state for slave crtc in Bigjoiner
	drm/i915: Fix one wrong caching mode enum usage
	octeontx2-pf: Add additional check for MCAM rules
	erofs: avoid useless loops in z_erofs_pcluster_readmore() when reading beyond EOF
	erofs: avoid infinite loop in z_erofs_do_read_page() when reading beyond EOF
	erofs: fix fsdax unavailability for chunk-based regular files
	wifi: airo: avoid uninitialized warning in airo_get_rate()
	bpf: cpumap: Fix memory leak in cpu_map_update_elem
	net/sched: flower: Ensure both minimum and maximum ports are specified
	riscv: mm: fix truncation warning on RV32
	netdevsim: fix uninitialized data in nsim_dev_trap_fa_cookie_write()
	net/sched: make psched_mtu() RTNL-less safe
	wifi: rtw89: debug: fix error code in rtw89_debug_priv_send_h2c_set()
	net/sched: sch_qfq: refactor parsing of netlink parameters
	net/sched: sch_qfq: account for stab overhead in qfq_enqueue
	nvme-pci: fix DMA direction of unmapping integrity data
	fs/ntfs3: Check fields while reading
	ovl: let helper ovl_i_path_real() return the realinode
	ovl: fix null pointer dereference in ovl_get_acl_rcu()
	cifs: fix session state check in smb2_find_smb_ses
	drm/client: Send hotplug event after registering a client
	drm/amdgpu/sdma4: set align mask to 255
	drm/amd/pm: revise the ASPM settings for thunderbolt attached scenario
	drm/amdgpu: add the fan abnormal detection feature
	drm/amdgpu: Fix minmax warning
	drm/amd/pm: add abnormal fan detection for smu 13.0.0
	f2fs: fix the wrong condition to determine atomic context
	f2fs: fix deadlock in i_xattr_sem and inode page lock
	pinctrl: amd: Add Z-state wake control bits
	pinctrl: amd: Adjust debugfs output
	pinctrl: amd: Add fields for interrupt status and wake status
	pinctrl: amd: Detect internal GPIO0 debounce handling
	pinctrl: amd: Fix mistake in handling clearing pins at startup
	pinctrl: amd: Detect and mask spurious interrupts
	pinctrl: amd: Revert "pinctrl: amd: disable and mask interrupts on probe"
	pinctrl: amd: Only use special debounce behavior for GPIO 0
	pinctrl: amd: Use amd_pinconf_set() for all config options
	pinctrl: amd: Drop pull up select configuration
	pinctrl: amd: Unify debounce handling into amd_pinconf_set()
	tpm: Do not remap from ACPI resources again for Pluton TPM
	tpm: tpm_vtpm_proxy: fix a race condition in /dev/vtpmx creation
	tpm: tis_i2c: Limit read bursts to I2C_SMBUS_BLOCK_MAX (32) bytes
	tpm: tis_i2c: Limit write bursts to I2C_SMBUS_BLOCK_MAX (32) bytes
	tpm: return false from tpm_amd_is_rng_defective on non-x86 platforms
	mtd: rawnand: meson: fix unaligned DMA buffers handling
	net: bcmgenet: Ensure MDIO unregistration has clocks enabled
	net: phy: dp83td510: fix kernel stall during netboot in DP83TD510E PHY driver
	kasan: add kasan_tag_mismatch prototype
	tracing/user_events: Fix incorrect return value for writing operation when events are disabled
	powerpc: Fail build if using recordmcount with binutils v2.37
	misc: fastrpc: Create fastrpc scalar with correct buffer count
	powerpc/security: Fix Speculation_Store_Bypass reporting on Power10
	powerpc/64s: Fix native_hpte_remove() to be irq-safe
	MIPS: Loongson: Fix cpu_probe_loongson() again
	MIPS: KVM: Fix NULL pointer dereference
	ext4: Fix reusing stale buffer heads from last failed mounting
	ext4: fix wrong unit use in ext4_mb_clear_bb
	ext4: get block from bh in ext4_free_blocks for fast commit replay
	ext4: fix wrong unit use in ext4_mb_new_blocks
	ext4: fix to check return value of freeze_bdev() in ext4_shutdown()
	ext4: turn quotas off if mount failed after enabling quotas
	ext4: only update i_reserved_data_blocks on successful block allocation
	fs: dlm: revert check required context while close
	soc: qcom: mdt_loader: Fix unconditional call to scm_pas_mem_setup
	ext2/dax: Fix ext2_setsize when len is page aligned
	jfs: jfs_dmap: Validate db_l2nbperpage while mounting
	hwrng: imx-rngc - fix the timeout for init and self check
	dm integrity: reduce vmalloc space footprint on 32-bit architectures
	scsi: mpi3mr: Propagate sense data for admin queue SCSI I/O
	s390/zcrypt: do not retry administrative requests
	PCI/PM: Avoid putting EloPOS E2/S2/H2 PCIe Ports in D3cold
	PCI: Release resource invalidated by coalescing
	PCI: Add function 1 DMA alias quirk for Marvell 88SE9235
	PCI: qcom: Disable write access to read only registers for IP v2.3.3
	PCI: epf-test: Fix DMA transfer completion initialization
	PCI: epf-test: Fix DMA transfer completion detection
	PCI: rockchip: Assert PCI Configuration Enable bit after probe
	PCI: rockchip: Write PCI Device ID to correct register
	PCI: rockchip: Add poll and timeout to wait for PHY PLLs to be locked
	PCI: rockchip: Fix legacy IRQ generation for RK3399 PCIe endpoint core
	PCI: rockchip: Use u32 variable to access 32-bit registers
	PCI: rockchip: Set address alignment for endpoint mode
	misc: pci_endpoint_test: Free IRQs before removing the device
	misc: pci_endpoint_test: Re-init completion for every test
	mfd: pm8008: Fix module autoloading
	md/raid0: add discard support for the 'original' layout
	dm init: add dm-mod.waitfor to wait for asynchronously probed block devices
	fs: dlm: return positive pid value for F_GETLK
	fs: dlm: fix cleanup pending ops when interrupted
	fs: dlm: interrupt posix locks only when process is killed
	fs: dlm: make F_SETLK use unkillable wait_event
	fs: dlm: fix mismatch of plock results from userspace
	scsi: lpfc: Fix double free in lpfc_cmpl_els_logo_acc() caused by lpfc_nlp_not_used()
	drm/atomic: Allow vblank-enabled + self-refresh "disable"
	drm/rockchip: vop: Leave vblank enabled in self-refresh
	drm/amd/display: fix seamless odm transitions
	drm/amd/display: edp do not add non-edid timings
	drm/amd/display: Remove Phantom Pipe Check When Calculating K1 and K2
	drm/amd/display: disable seamless boot if force_odm_combine is enabled
	drm/amdgpu: fix clearing mappings for BOs that are always valid in VM
	drm/amd: Disable PSR-SU on Parade 0803 TCON
	drm/amd/display: add a NULL pointer check
	drm/amd/display: Correct `DMUB_FW_VERSION` macro
	drm/amd/display: Add monitor specific edid quirk
	drm/amdgpu: avoid restore process run into dead loop.
	drm/ttm: Don't leak a resource on swapout move error
	serial: atmel: don't enable IRQs prematurely
	tty: serial: samsung_tty: Fix a memory leak in s3c24xx_serial_getclk() in case of error
	tty: serial: samsung_tty: Fix a memory leak in s3c24xx_serial_getclk() when iterating clk
	tty: serial: imx: fix rs485 rx after tx
	firmware: stratix10-svc: Fix a potential resource leak in svc_create_memory_pool()
	libceph: harden msgr2.1 frame segment length checks
	ceph: add a dedicated private data for netfs rreq
	ceph: fix blindly expanding the readahead windows
	ceph: don't let check_caps skip sending responses for revoke msgs
	xhci: Fix resume issue of some ZHAOXIN hosts
	xhci: Fix TRB prefetch issue of ZHAOXIN hosts
	xhci: Show ZHAOXIN xHCI root hub speed correctly
	meson saradc: fix clock divider mask length
	opp: Fix use-after-free in lazy_opp_tables after probe deferral
	soundwire: qcom: fix storing port config out-of-bounds
	Revert "8250: add support for ASIX devices with a FIFO bug"
	bus: ixp4xx: fix IXP4XX_EXP_T1_MASK
	s390/decompressor: fix misaligned symbol build error
	dm: verity-loadpin: Add NULL pointer check for 'bdev' parameter
	tracing/histograms: Add histograms to hist_vars if they have referenced variables
	tracing: Fix memory leak of iter->temp when reading trace_pipe
	nvme: don't reject probe due to duplicate IDs for single-ported PCIe devices
	samples: ftrace: Save required argument registers in sample trampolines
	perf: RISC-V: Remove PERF_HES_STOPPED flag checking in riscv_pmu_start()
	regmap-irq: Fix out-of-bounds access when allocating config buffers
	net: ena: fix shift-out-of-bounds in exponential backoff
	ring-buffer: Fix deadloop issue on reading trace_pipe
	ftrace: Fix possible warning on checking all pages used in ftrace_process_locs()
	drm/amd/pm: share the code around SMU13 pcie parameters update
	drm/amd/pm: conditionally disable pcie lane/speed switching for SMU13
	cifs: if deferred close is disabled then close files immediately
	xtensa: ISS: fix call to split_if_spec
	perf/x86: Fix lockdep warning in for_each_sibling_event() on SPR
	PM: QoS: Restore support for default value on frequency QoS
	pwm: meson: modify and simplify calculation in meson_pwm_get_state
	pwm: meson: fix handling of period/duty if greater than UINT_MAX
	fprobe: Release rethook after the ftrace_ops is unregistered
	fprobe: Ensure running fprobe_exit_handler() finished before calling rethook_free()
	tracing: Fix null pointer dereference in tracing_err_log_open()
	selftests: mptcp: connect: fail if nft supposed to work
	selftests: mptcp: sockopt: return error if wrong mark
	selftests: mptcp: userspace_pm: use correct server port
	selftests: mptcp: userspace_pm: report errors with 'remove' tests
	selftests: mptcp: depend on SYN_COOKIES
	selftests: mptcp: pm_nl_ctl: fix 32-bit support
	tracing/probes: Fix not to count error code to total length
	tracing/probes: Fix to update dynamic data counter if fetcharg uses it
	tracing/user_events: Fix struct arg size match check
	scsi: qla2xxx: Multi-que support for TMF
	scsi: qla2xxx: Fix task management cmd failure
	scsi: qla2xxx: Fix task management cmd fail due to unavailable resource
	scsi: qla2xxx: Fix hang in task management
	scsi: qla2xxx: Wait for io return on terminate rport
	scsi: qla2xxx: Fix mem access after free
	scsi: qla2xxx: Array index may go out of bound
	scsi: qla2xxx: Avoid fcport pointer dereference
	scsi: qla2xxx: Fix buffer overrun
	scsi: qla2xxx: Fix potential NULL pointer dereference
	scsi: qla2xxx: Check valid rport returned by fc_bsg_to_rport()
	scsi: qla2xxx: Correct the index of array
	scsi: qla2xxx: Pointer may be dereferenced
	scsi: qla2xxx: Remove unused nvme_ls_waitq wait queue
	scsi: qla2xxx: Fix end of loop test
	MIPS: kvm: Fix build error with KVM_MIPS_DEBUG_COP0_COUNTERS enabled
	Revert "drm/amd: Disable PSR-SU on Parade 0803 TCON"
	swiotlb: mark swiotlb_memblock_alloc() as __init
	net/sched: sch_qfq: reintroduce lmax bound check for MTU
	drm/atomic: Fix potential use-after-free in nonblocking commits
	net/ncsi: make one oem_gma function for all mfr id
	net/ncsi: change from ndo_set_mac_address to dev_set_mac_address
	Linux 6.1.40

Change-Id: I5cc6aab178c66d2a23fe2a8d21e71cc4a8b15acf
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-09-02 19:51:24 +00:00
commit f1311733c2
195 changed files with 2117 additions and 1149 deletions

View File

@ -123,3 +123,11 @@ Other examples (per target):
0 1638400 verity 1 8:1 8:2 4096 4096 204800 1 sha256
fb1a5a0f00deb908d8b53cb270858975e76cf64105d412ce764225d53b8f3cfd
51934789604d1b92399c52e7cb149d1b3a1b74bbbcb103b2a0aaacbed5c08584
For setups using device-mapper on top of asynchronously probed block
devices (MMC, USB, ..), it may be necessary to tell dm-init to
explicitly wait for them to become available before setting up the
device-mapper tables. This can be done with the "dm-mod.waitfor="
module parameter, which takes a list of devices to wait for::
dm-mod.waitfor=<device1>[,..,<deviceN>]

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 39
SUBLEVEL = 40
EXTRAVERSION =
NAME = Curry Ramen

View File

@ -317,7 +317,7 @@ struct kvm_vcpu_arch {
unsigned int aux_inuse;
/* COP0 State */
struct mips_coproc *cop0;
struct mips_coproc cop0;
/* Resume PC after MMIO completion */
unsigned long io_pc;
@ -698,7 +698,7 @@ static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
{
return kvm_mips_guest_can_have_fpu(vcpu) &&
kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
kvm_read_c0_guest_config1(&vcpu->cop0) & MIPS_CONF1_FP;
}
static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
@ -710,7 +710,7 @@ static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
{
return kvm_mips_guest_can_have_msa(vcpu) &&
kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA;
kvm_read_c0_guest_config3(&vcpu->cop0) & MIPS_CONF3_MSA;
}
struct kvm_mips_callbacks {

View File

@ -1675,7 +1675,10 @@ static inline void decode_cpucfg(struct cpuinfo_mips *c)
static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
{
c->cputype = CPU_LOONGSON64;
/* All Loongson processors covered here define ExcCode 16 as GSExc. */
decode_configs(c);
c->options |= MIPS_CPU_GSEXCEX;
switch (c->processor_id & PRID_IMP_MASK) {
@ -1685,7 +1688,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_REV_LOONGSON2K_R1_1:
case PRID_REV_LOONGSON2K_R1_2:
case PRID_REV_LOONGSON2K_R1_3:
c->cputype = CPU_LOONGSON64;
__cpu_name[cpu] = "Loongson-2K";
set_elf_platform(cpu, "gs264e");
set_isa(c, MIPS_CPU_ISA_M64R2);
@ -1698,14 +1700,12 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
switch (c->processor_id & PRID_REV_MASK) {
case PRID_REV_LOONGSON3A_R2_0:
case PRID_REV_LOONGSON3A_R2_1:
c->cputype = CPU_LOONGSON64;
__cpu_name[cpu] = "ICT Loongson-3";
set_elf_platform(cpu, "loongson3a");
set_isa(c, MIPS_CPU_ISA_M64R2);
break;
case PRID_REV_LOONGSON3A_R3_0:
case PRID_REV_LOONGSON3A_R3_1:
c->cputype = CPU_LOONGSON64;
__cpu_name[cpu] = "ICT Loongson-3";
set_elf_platform(cpu, "loongson3a");
set_isa(c, MIPS_CPU_ISA_M64R2);
@ -1725,7 +1725,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */
break;
case PRID_IMP_LOONGSON_64G:
c->cputype = CPU_LOONGSON64;
__cpu_name[cpu] = "ICT Loongson-3";
set_elf_platform(cpu, "loongson3a");
set_isa(c, MIPS_CPU_ISA_M64R2);
@ -1735,8 +1734,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
panic("Unknown Loongson Processor ID!");
break;
}
decode_configs(c);
}
#else
static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { }

View File

@ -312,7 +312,7 @@ int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
*/
int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mips_coproc *cop0 = &vcpu->arch.cop0;
return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
@ -384,7 +384,7 @@ static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
*/
static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mips_coproc *cop0 = &vcpu->arch.cop0;
ktime_t expires, threshold;
u32 count, compare;
int running;
@ -444,7 +444,7 @@ static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
*/
u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mips_coproc *cop0 = &vcpu->arch.cop0;
/* If count disabled just read static copy of count */
if (kvm_mips_count_disabled(vcpu))
@ -502,7 +502,7 @@ ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
ktime_t now, u32 count)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mips_coproc *cop0 = &vcpu->arch.cop0;
u32 compare;
u64 delta;
ktime_t expire;
@ -603,7 +603,7 @@ int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
*/
void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mips_coproc *cop0 = &vcpu->arch.cop0;
ktime_t now;
/* Calculate bias */
@ -649,7 +649,7 @@ void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz)
*/
int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mips_coproc *cop0 = &vcpu->arch.cop0;
int dc;
ktime_t now;
u32 count;
@ -696,7 +696,7 @@ int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
*/
void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mips_coproc *cop0 = &vcpu->arch.cop0;
int dc;
u32 old_compare = kvm_read_c0_guest_compare(cop0);
s32 delta = compare - old_compare;
@ -779,7 +779,7 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
*/
static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mips_coproc *cop0 = &vcpu->arch.cop0;
u32 count;
ktime_t now;
@ -806,7 +806,7 @@ static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
*/
void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mips_coproc *cop0 = &vcpu->arch.cop0;
kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
@ -826,7 +826,7 @@ void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
*/
void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mips_coproc *cop0 = &vcpu->arch.cop0;
u32 count;
kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
@ -852,7 +852,7 @@ void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
*/
int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mips_coproc *cop0 = &vcpu->arch.cop0;
s64 changed = count_ctl ^ vcpu->arch.count_ctl;
s64 delta;
ktime_t expire, now;

View File

@ -659,7 +659,7 @@ static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mips_coproc *cop0 = &vcpu->arch.cop0;
struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
int ret;
s64 v;
@ -771,7 +771,7 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mips_coproc *cop0 = &vcpu->arch.cop0;
struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
s64 v;
s64 vs[2];
@ -1111,7 +1111,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
return kvm_mips_pending_timer(vcpu) ||
kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI;
kvm_read_c0_guest_cause(&vcpu->arch.cop0) & C_TI;
}
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
@ -1135,7 +1135,7 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
cop0 = vcpu->arch.cop0;
cop0 = &vcpu->arch.cop0;
kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
kvm_read_c0_guest_status(cop0),
kvm_read_c0_guest_cause(cop0));
@ -1257,7 +1257,7 @@ static int __kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
case EXCCODE_TLBS:
kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
cause, kvm_read_c0_guest_status(&vcpu->arch.cop0), opc,
badvaddr);
++vcpu->stat.tlbmiss_st_exits;
@ -1329,7 +1329,7 @@ static int __kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
kvm_get_badinstr(opc, vcpu, &inst);
kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
exccode, opc, inst, badvaddr,
kvm_read_c0_guest_status(vcpu->arch.cop0));
kvm_read_c0_guest_status(&vcpu->arch.cop0));
kvm_arch_vcpu_dump_regs(vcpu);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
@ -1402,7 +1402,7 @@ int noinstr kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
/* Enable FPU for guest and restore context */
void kvm_own_fpu(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mips_coproc *cop0 = &vcpu->arch.cop0;
unsigned int sr, cfg5;
preempt_disable();
@ -1446,7 +1446,7 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
/* Enable MSA for guest and restore context */
void kvm_own_msa(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mips_coproc *cop0 = &vcpu->arch.cop0;
unsigned int sr, cfg5;
preempt_disable();

View File

@ -54,9 +54,9 @@ void kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
kvm_info("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
if (vcpu->arch.cop0->stat[i][j])
if (vcpu->arch.cop0.stat[i][j])
kvm_info("%s[%d]: %lu\n", kvm_cop0_str[i], j,
vcpu->arch.cop0->stat[i][j]);
vcpu->arch.cop0.stat[i][j]);
}
}
#endif

View File

@ -322,11 +322,11 @@ TRACE_EVENT_FN(kvm_guest_mode_change,
),
TP_fast_assign(
__entry->epc = kvm_read_c0_guest_epc(vcpu->arch.cop0);
__entry->epc = kvm_read_c0_guest_epc(&vcpu->arch.cop0);
__entry->pc = vcpu->arch.pc;
__entry->badvaddr = kvm_read_c0_guest_badvaddr(vcpu->arch.cop0);
__entry->status = kvm_read_c0_guest_status(vcpu->arch.cop0);
__entry->cause = kvm_read_c0_guest_cause(vcpu->arch.cop0);
__entry->badvaddr = kvm_read_c0_guest_badvaddr(&vcpu->arch.cop0);
__entry->status = kvm_read_c0_guest_status(&vcpu->arch.cop0);
__entry->cause = kvm_read_c0_guest_cause(&vcpu->arch.cop0);
),
TP_printk("EPC: 0x%08lx PC: 0x%08lx Status: 0x%08x Cause: 0x%08x BadVAddr: 0x%08lx",

View File

@ -422,7 +422,7 @@ static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu,
*/
static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mips_coproc *cop0 = &vcpu->arch.cop0;
u32 cause, compare;
compare = kvm_read_sw_gc0_compare(cop0);
@ -517,7 +517,7 @@ static void _kvm_vz_save_htimer(struct kvm_vcpu *vcpu,
*/
static void kvm_vz_save_timer(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mips_coproc *cop0 = &vcpu->arch.cop0;
u32 gctl0, compare, cause;
gctl0 = read_c0_guestctl0();
@ -863,7 +863,7 @@ static unsigned long mips_process_maar(unsigned int op, unsigned long val)
static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mips_coproc *cop0 = &vcpu->arch.cop0;
val &= MIPS_MAARI_INDEX;
if (val == MIPS_MAARI_INDEX)
@ -876,7 +876,7 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
u32 *opc, u32 cause,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mips_coproc *cop0 = &vcpu->arch.cop0;
enum emulation_result er = EMULATE_DONE;
u32 rt, rd, sel;
unsigned long curr_pc;
@ -1911,7 +1911,7 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg,
s64 *v)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mips_coproc *cop0 = &vcpu->arch.cop0;
unsigned int idx;
switch (reg->id) {
@ -2081,7 +2081,7 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_CP0_MAARI:
if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
return -EINVAL;
*v = kvm_read_sw_gc0_maari(vcpu->arch.cop0);
*v = kvm_read_sw_gc0_maari(&vcpu->arch.cop0);
break;
#ifdef CONFIG_64BIT
case KVM_REG_MIPS_CP0_XCONTEXT:
@ -2135,7 +2135,7 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg,
s64 v)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mips_coproc *cop0 = &vcpu->arch.cop0;
unsigned int idx;
int ret = 0;
unsigned int cur, change;
@ -2562,7 +2562,7 @@ static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mips_coproc *cop0 = &vcpu->arch.cop0;
bool migrated, all;
/*
@ -2704,7 +2704,7 @@ static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mips_coproc *cop0 = &vcpu->arch.cop0;
if (current->flags & PF_VCPU)
kvm_vz_vcpu_save_wired(vcpu);
@ -3076,7 +3076,7 @@ static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu)
static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct mips_coproc *cop0 = &vcpu->arch.cop0;
unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */
/*

View File

@ -402,3 +402,11 @@ checkbin:
echo -n '*** Please use a different binutils version.' ; \
false ; \
fi
@if test "x${CONFIG_FTRACE_MCOUNT_USE_RECORDMCOUNT}" = "xy" -a \
"x${CONFIG_LD_IS_BFD}" = "xy" -a \
"${CONFIG_LD_VERSION}" = "23700" ; then \
echo -n '*** binutils 2.37 drops unused section symbols, which recordmcount ' ; \
echo 'is unable to handle.' ; \
echo '*** Please use a different binutils version.' ; \
false ; \
fi

View File

@ -364,26 +364,27 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *
static int ssb_prctl_get(struct task_struct *task)
{
if (stf_enabled_flush_types == STF_BARRIER_NONE)
/*
* We don't have an explicit signal from firmware that we're
* vulnerable or not, we only have certain CPU revisions that
* are known to be vulnerable.
*
* We assume that if we're on another CPU, where the barrier is
* NONE, then we are not vulnerable.
*/
/*
* The STF_BARRIER feature is on by default, so if it's off that means
* firmware has explicitly said the CPU is not vulnerable via either
* the hypercall or device tree.
*/
if (!security_ftr_enabled(SEC_FTR_STF_BARRIER))
return PR_SPEC_NOT_AFFECTED;
else
/*
* If we do have a barrier type then we are vulnerable. The
* barrier is not a global or per-process mitigation, so the
* only value we can report here is PR_SPEC_ENABLE, which
* appears as "vulnerable" in /proc.
*/
return PR_SPEC_ENABLE;
return -EINVAL;
/*
* If the system's CPU has no known barrier (see setup_stf_barrier())
* then assume that the CPU is not vulnerable.
*/
if (stf_enabled_flush_types == STF_BARRIER_NONE)
return PR_SPEC_NOT_AFFECTED;
/*
* Otherwise the CPU is vulnerable. The barrier is not a global or
* per-process mitigation, so the only value that can be reported here
* is PR_SPEC_ENABLE, which appears as "vulnerable" in /proc.
*/
return PR_SPEC_ENABLE;
}
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)

View File

@ -328,10 +328,12 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
static long native_hpte_remove(unsigned long hpte_group)
{
unsigned long hpte_v, flags;
struct hash_pte *hptep;
int i;
int slot_offset;
unsigned long hpte_v;
local_irq_save(flags);
DBG_LOW(" remove(group=%lx)\n", hpte_group);
@ -356,13 +358,16 @@ static long native_hpte_remove(unsigned long hpte_group)
slot_offset &= 0x7;
}
if (i == HPTES_PER_GROUP)
return -1;
if (i == HPTES_PER_GROUP) {
i = -1;
goto out;
}
/* Invalidate the hpte. NOTE: this also unlocks it */
release_hpte_lock();
hptep->v = 0;
out:
local_irq_restore(flags);
return i;
}

View File

@ -1187,7 +1187,7 @@ static void __init reserve_crashkernel(void)
*/
crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
search_start,
min(search_end, (unsigned long) SZ_4G));
min(search_end, (unsigned long)(SZ_4G - 1)));
if (crash_base == 0) {
/* Try again without restricting region to 32bit addressible memory */
crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,

View File

@ -69,7 +69,7 @@ struct rv_jit_context {
struct bpf_prog *prog;
u16 *insns; /* RV insns */
int ninsns;
int body_len;
int prologue_len;
int epilogue_offset;
int *offset; /* BPF to RV */
int nexentries;
@ -216,8 +216,8 @@ static inline int rv_offset(int insn, int off, struct rv_jit_context *ctx)
int from, to;
off++; /* BPF branch is from PC+1, RV is from PC */
from = (insn > 0) ? ctx->offset[insn - 1] : 0;
to = (insn + off > 0) ? ctx->offset[insn + off - 1] : 0;
from = (insn > 0) ? ctx->offset[insn - 1] : ctx->prologue_len;
to = (insn + off > 0) ? ctx->offset[insn + off - 1] : ctx->prologue_len;
return ninsns_rvoff(to - from);
}

View File

@ -44,7 +44,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
unsigned int prog_size = 0, extable_size = 0;
bool tmp_blinded = false, extra_pass = false;
struct bpf_prog *tmp, *orig_prog = prog;
int pass = 0, prev_ninsns = 0, prologue_len, i;
int pass = 0, prev_ninsns = 0, i;
struct rv_jit_data *jit_data;
struct rv_jit_context *ctx;
@ -83,6 +83,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog = orig_prog;
goto out_offset;
}
if (build_body(ctx, extra_pass, NULL)) {
prog = orig_prog;
goto out_offset;
}
for (i = 0; i < prog->len; i++) {
prev_ninsns += 32;
ctx->offset[i] = prev_ninsns;
@ -91,12 +97,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
for (i = 0; i < NR_JIT_ITERATIONS; i++) {
pass++;
ctx->ninsns = 0;
bpf_jit_build_prologue(ctx);
ctx->prologue_len = ctx->ninsns;
if (build_body(ctx, extra_pass, ctx->offset)) {
prog = orig_prog;
goto out_offset;
}
ctx->body_len = ctx->ninsns;
bpf_jit_build_prologue(ctx);
ctx->epilogue_offset = ctx->ninsns;
bpf_jit_build_epilogue(ctx);
@ -162,10 +171,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
if (!prog->is_func || extra_pass) {
bpf_jit_binary_lock_ro(jit_data->header);
prologue_len = ctx->epilogue_offset - ctx->body_len;
for (i = 0; i < prog->len; i++)
ctx->offset[i] = ninsns_rvoff(prologue_len +
ctx->offset[i]);
ctx->offset[i] = ninsns_rvoff(ctx->offset[i]);
bpf_prog_fill_jited_linfo(prog, ctx->offset);
out_offset:
kfree(ctx->offset);

View File

@ -27,6 +27,7 @@ KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbac
KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables
KBUILD_CFLAGS_DECOMPRESSOR += -ffreestanding
KBUILD_CFLAGS_DECOMPRESSOR += -fno-stack-protector
KBUILD_CFLAGS_DECOMPRESSOR += -fPIE
KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-member)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))

View File

@ -3975,6 +3975,13 @@ static int intel_pmu_hw_config(struct perf_event *event)
struct perf_event *leader = event->group_leader;
struct perf_event *sibling = NULL;
/*
* When this memload event is also the first event (no group
* exists yet), then there is no aux event before it.
*/
if (leader == event)
return -ENODATA;
if (!is_mem_loads_aux_event(leader)) {
for_each_sibling_event(sibling, leader) {
if (is_mem_loads_aux_event(sibling))

View File

@ -237,7 +237,7 @@ static int tuntap_probe(struct iss_net_private *lp, int index, char *init)
init += sizeof(TRANSPORT_TUNTAP_NAME) - 1;
if (*init == ',') {
rem = split_if_spec(init + 1, &mac_str, &dev_name);
rem = split_if_spec(init + 1, &mac_str, &dev_name, NULL);
if (rem != NULL) {
pr_err("%s: extra garbage on specification : '%s'\n",
dev->name, rem);

View File

@ -843,7 +843,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (!d->config_buf)
goto err_alloc;
for (i = 0; i < chip->num_config_regs; i++) {
for (i = 0; i < chip->num_config_bases; i++) {
d->config_buf[i] = kcalloc(chip->num_config_regs,
sizeof(**d->config_buf),
GFP_KERNEL);

View File

@ -33,7 +33,7 @@
#define IXP4XX_EXP_TIMING_STRIDE 0x04
#define IXP4XX_EXP_CS_EN BIT(31)
#define IXP456_EXP_PAR_EN BIT(30) /* Only on IXP45x and IXP46x */
#define IXP4XX_EXP_T1_MASK GENMASK(28, 27)
#define IXP4XX_EXP_T1_MASK GENMASK(29, 28)
#define IXP4XX_EXP_T1_SHIFT 28
#define IXP4XX_EXP_T2_MASK GENMASK(27, 26)
#define IXP4XX_EXP_T2_SHIFT 26

View File

@ -110,7 +110,7 @@ static int imx_rngc_self_test(struct imx_rngc *rngc)
cmd = readl(rngc->base + RNGC_COMMAND);
writel(cmd | RNGC_CMD_SELF_TEST, rngc->base + RNGC_COMMAND);
ret = wait_for_completion_timeout(&rngc->rng_op_done, RNGC_TIMEOUT);
ret = wait_for_completion_timeout(&rngc->rng_op_done, msecs_to_jiffies(RNGC_TIMEOUT));
imx_rngc_irq_mask_clear(rngc);
if (!ret)
return -ETIMEDOUT;
@ -187,9 +187,7 @@ static int imx_rngc_init(struct hwrng *rng)
cmd = readl(rngc->base + RNGC_COMMAND);
writel(cmd | RNGC_CMD_SEED, rngc->base + RNGC_COMMAND);
ret = wait_for_completion_timeout(&rngc->rng_op_done,
RNGC_TIMEOUT);
ret = wait_for_completion_timeout(&rngc->rng_op_done, msecs_to_jiffies(RNGC_TIMEOUT));
if (!ret) {
ret = -ETIMEDOUT;
goto err;

View File

@ -515,6 +515,7 @@ static int tpm_add_legacy_sysfs(struct tpm_chip *chip)
* 6.x.y.z series: 6.0.18.6 +
* 3.x.y.z series: 3.57.y.5 +
*/
#ifdef CONFIG_X86
static bool tpm_amd_is_rng_defective(struct tpm_chip *chip)
{
u32 val1, val2;
@ -563,6 +564,12 @@ static bool tpm_amd_is_rng_defective(struct tpm_chip *chip)
return true;
}
#else
static inline bool tpm_amd_is_rng_defective(struct tpm_chip *chip)
{
return false;
}
#endif /* CONFIG_X86 */
static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{

View File

@ -563,15 +563,18 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
u32 rsp_size;
int ret;
INIT_LIST_HEAD(&acpi_resource_list);
ret = acpi_dev_get_resources(device, &acpi_resource_list,
crb_check_resource, iores_array);
if (ret < 0)
return ret;
acpi_dev_free_resource_list(&acpi_resource_list);
/* Pluton doesn't appear to define ACPI memory regions */
/*
* Pluton sometimes does not define ACPI memory regions.
* Mapping is then done in crb_map_pluton
*/
if (priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
INIT_LIST_HEAD(&acpi_resource_list);
ret = acpi_dev_get_resources(device, &acpi_resource_list,
crb_check_resource, iores_array);
if (ret < 0)
return ret;
acpi_dev_free_resource_list(&acpi_resource_list);
if (resource_type(iores_array) != IORESOURCE_MEM) {
dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n");
return -EINVAL;

View File

@ -189,21 +189,28 @@ static int tpm_tis_i2c_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
int ret;
for (i = 0; i < TPM_RETRY; i++) {
/* write register */
msg.len = sizeof(reg);
msg.buf = &reg;
msg.flags = 0;
ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
if (ret < 0)
return ret;
u16 read = 0;
/* read data */
msg.buf = result;
msg.len = len;
msg.flags = I2C_M_RD;
ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
if (ret < 0)
return ret;
while (read < len) {
/* write register */
msg.len = sizeof(reg);
msg.buf = &reg;
msg.flags = 0;
ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
if (ret < 0)
return ret;
/* read data */
msg.buf = result + read;
msg.len = len - read;
msg.flags = I2C_M_RD;
if (msg.len > I2C_SMBUS_BLOCK_MAX)
msg.len = I2C_SMBUS_BLOCK_MAX;
ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
if (ret < 0)
return ret;
read += msg.len;
}
ret = tpm_tis_i2c_sanity_check_read(reg, len, result);
if (ret == 0)
@ -223,19 +230,27 @@ static int tpm_tis_i2c_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
struct i2c_msg msg = { .addr = phy->i2c_client->addr };
u8 reg = tpm_tis_i2c_address_to_register(addr);
int ret;
u16 wrote = 0;
if (len > TPM_BUFSIZE - 1)
return -EIO;
/* write register and data in one go */
phy->io_buf[0] = reg;
memcpy(phy->io_buf + sizeof(reg), value, len);
msg.len = sizeof(reg) + len;
msg.buf = phy->io_buf;
ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
if (ret < 0)
return ret;
while (wrote < len) {
/* write register and data in one go */
msg.len = sizeof(reg) + len - wrote;
if (msg.len > I2C_SMBUS_BLOCK_MAX)
msg.len = I2C_SMBUS_BLOCK_MAX;
memcpy(phy->io_buf + sizeof(reg), value + wrote,
msg.len - sizeof(reg));
ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
if (ret < 0)
return ret;
wrote += msg.len - sizeof(reg);
}
return 0;
}

View File

@ -683,37 +683,21 @@ static struct miscdevice vtpmx_miscdev = {
.fops = &vtpmx_fops,
};
static int vtpmx_init(void)
{
return misc_register(&vtpmx_miscdev);
}
static void vtpmx_cleanup(void)
{
misc_deregister(&vtpmx_miscdev);
}
static int __init vtpm_module_init(void)
{
int rc;
rc = vtpmx_init();
if (rc) {
pr_err("couldn't create vtpmx device\n");
return rc;
}
workqueue = create_workqueue("tpm-vtpm");
if (!workqueue) {
pr_err("couldn't create workqueue\n");
rc = -ENOMEM;
goto err_vtpmx_cleanup;
return -ENOMEM;
}
return 0;
err_vtpmx_cleanup:
vtpmx_cleanup();
rc = misc_register(&vtpmx_miscdev);
if (rc) {
pr_err("couldn't create vtpmx device\n");
destroy_workqueue(workqueue);
}
return rc;
}
@ -721,7 +705,7 @@ static int __init vtpm_module_init(void)
static void __exit vtpm_module_exit(void)
{
destroy_workqueue(workqueue);
vtpmx_cleanup();
misc_deregister(&vtpmx_miscdev);
}
module_init(vtpm_module_init);

View File

@ -755,7 +755,7 @@ svc_create_memory_pool(struct platform_device *pdev,
end = rounddown(sh_memory->addr + sh_memory->size, PAGE_SIZE);
paddr = begin;
size = end - begin;
va = memremap(paddr, size, MEMREMAP_WC);
va = devm_memremap(dev, paddr, size, MEMREMAP_WC);
if (!va) {
dev_err(dev, "fail to remap shared memory\n");
return ERR_PTR(-EINVAL);

View File

@ -2737,6 +2737,9 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
if (!attachment->is_mapped)
continue;
if (attachment->bo_va->base.bo->tbo.pin_count)
continue;
kfd_mem_dmaunmap_attachment(mem, attachment);
ret = update_gpuvm_pte(mem, attachment, &sync_obj);
if (ret) {

View File

@ -1668,18 +1668,30 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
/* Insert partial mapping before the range */
if (!list_empty(&before->list)) {
struct amdgpu_bo *bo = before->bo_va->base.bo;
amdgpu_vm_it_insert(before, &vm->va);
if (before->flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
!before->bo_va->base.moved)
amdgpu_vm_bo_moved(&before->bo_va->base);
} else {
kfree(before);
}
/* Insert partial mapping after the range */
if (!list_empty(&after->list)) {
struct amdgpu_bo *bo = after->bo_va->base.bo;
amdgpu_vm_it_insert(after, &vm->va);
if (after->flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
!after->bo_va->base.moved)
amdgpu_vm_bo_moved(&after->bo_va->base);
} else {
kfree(after);
}

View File

@ -346,7 +346,7 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
#define NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT 0x00000000 // off by default, no gains over L1
#define NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT 0x00000009 // 1=1us, 9=1ms
#define NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT 0x0000000E // 4ms
#define NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT 0x0000000E // 400ms
static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
bool enable)
@ -479,9 +479,12 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data);
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT;
data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
if (pci_is_thunderbolt_attached(adev->pdev))
data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
else
data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL, data);

View File

@ -2330,7 +2330,7 @@ const struct amd_ip_funcs sdma_v4_0_ip_funcs = {
static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
.type = AMDGPU_RING_TYPE_SDMA,
.align_mask = 0xf,
.align_mask = 0xff,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
@ -2400,7 +2400,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs_2nd_mmhub = {
static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = {
.type = AMDGPU_RING_TYPE_SDMA,
.align_mask = 0xf,
.align_mask = 0xff,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
.secure_submission_supported = true,

View File

@ -6972,7 +6972,13 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
drm_add_modes_noedid(connector, 640, 480);
} else {
amdgpu_dm_connector_ddc_get_modes(connector, edid);
amdgpu_dm_connector_add_common_modes(encoder, connector);
/* most eDP supports only timings from its edid,
* usually only detailed timings are available
* from eDP edid. timings which are not from edid
* may damage eDP
*/
if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
amdgpu_dm_connector_add_common_modes(encoder, connector);
amdgpu_dm_connector_add_freesync_modes(connector, edid);
}
amdgpu_dm_fbc_init(connector);

View File

@ -42,6 +42,30 @@
#include "dm_helpers.h"
#include "ddc_service_types.h"
static u32 edid_extract_panel_id(struct edid *edid)
{
return (u32)edid->mfg_id[0] << 24 |
(u32)edid->mfg_id[1] << 16 |
(u32)EDID_PRODUCT_ID(edid);
}
static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
{
uint32_t panel_id = edid_extract_panel_id(edid);
switch (panel_id) {
/* Workaround for some monitors which does not work well with FAMS */
case drm_edid_encode_panel_id('S', 'A', 'M', 0x0E5E):
case drm_edid_encode_panel_id('S', 'A', 'M', 0x7053):
case drm_edid_encode_panel_id('S', 'A', 'M', 0x71AC):
DRM_DEBUG_DRIVER("Disabling FAMS on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.disable_fams = true;
break;
default:
return;
}
}
/* dm_helpers_parse_edid_caps
*
* Parse edid caps
@ -113,6 +137,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
else
edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
apply_edid_quirks(edid_buf, edid_caps);
kfree(sads);
kfree(sadb);

View File

@ -1539,6 +1539,9 @@ bool dc_validate_boot_timing(const struct dc *dc,
return false;
}
if (dc->debug.force_odm_combine)
return false;
/* Check for enabled DIG to identify enabled display */
if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
return false;

View File

@ -970,10 +970,12 @@ enum dc_status resource_map_phy_clock_resources(
|| dc_is_virtual_signal(pipe_ctx->stream->signal))
pipe_ctx->clock_source =
dc->res_pool->dp_clock_source;
else
pipe_ctx->clock_source = find_matching_pll(
&context->res_ctx, dc->res_pool,
stream);
else {
if (stream && stream->link && stream->link->link_enc)
pipe_ctx->clock_source = find_matching_pll(
&context->res_ctx, dc->res_pool,
stream);
}
if (pipe_ctx->clock_source == NULL)
return DC_NO_CLOCK_SOURCE_RESOURCE;

View File

@ -1678,6 +1678,17 @@ static void dcn20_program_pipe(
if (hws->funcs.setup_vupdate_interrupt)
hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
if (hws->funcs.calculate_dccg_k1_k2_values && dc->res_pool->dccg->funcs->set_pixel_rate_div) {
unsigned int k1_div, k2_div;
hws->funcs.calculate_dccg_k1_k2_values(pipe_ctx, &k1_div, &k2_div);
dc->res_pool->dccg->funcs->set_pixel_rate_div(
dc->res_pool->dccg,
pipe_ctx->stream_res.tg->inst,
k1_div, k2_div);
}
}
if (pipe_ctx->update_flags.bits.odm)

View File

@ -1165,10 +1165,6 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
unsigned int odm_combine_factor = 0;
bool two_pix_per_container = false;
// For phantom pipes, use the same programming as the main pipes
if (pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM) {
stream = pipe_ctx->stream->mall_stream_config.paired_stream;
}
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);

View File

@ -98,7 +98,7 @@ static void optc32_set_odm_combine(struct timing_generator *optc, int *opp_id, i
optc1->opp_count = opp_cnt;
}
static void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode)
void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);

View File

@ -250,5 +250,6 @@
SF(OTG0_OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, mask_sh)
void dcn32_timing_generator_init(struct optc *optc1);
void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode);
#endif /* __DC_OPTC_DCN32_H__ */

View File

@ -471,7 +471,7 @@ struct dmub_notification {
* of a firmware to know if feature or functionality is supported or present.
*/
#define DMUB_FW_VERSION(major, minor, revision) \
((((major) & 0xFF) << 24) | (((minor) & 0xFF) << 16) | ((revision) & 0xFFFF))
((((major) & 0xFF) << 24) | (((minor) & 0xFF) << 16) | (((revision) & 0xFF) << 8))
/**
* dmub_srv_create() - creates the DMUB service.

View File

@ -168,6 +168,7 @@ struct smu_temperature_range {
int mem_crit_max;
int mem_emergency_max;
int software_shutdown_temp;
int software_shutdown_temp_offset;
};
struct smu_state_validation_block {

View File

@ -297,5 +297,9 @@ int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
uint32_t *size,
uint32_t pptable_id);
int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
uint32_t pcie_gen_cap,
uint32_t pcie_width_cap);
#endif
#endif

View File

@ -1381,6 +1381,7 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
*/
uint32_t ctxid = entry->src_data[0];
uint32_t data;
uint32_t high;
if (client_id == SOC15_IH_CLIENTID_THM) {
switch (src_id) {
@ -1437,6 +1438,36 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
schedule_work(&smu->throttling_logging_work);
break;
case 0x8:
high = smu->thermal_range.software_shutdown_temp +
smu->thermal_range.software_shutdown_temp_offset;
high = min_t(typeof(high),
SMU_THERMAL_MAXIMUM_ALERT_TEMP,
high);
dev_emerg(adev->dev, "Reduce soft CTF limit to %d (by an offset %d)\n",
high,
smu->thermal_range.software_shutdown_temp_offset);
data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL,
DIG_THERM_INTH,
(high & 0xff));
data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data);
break;
case 0x9:
high = min_t(typeof(high),
SMU_THERMAL_MAXIMUM_ALERT_TEMP,
smu->thermal_range.software_shutdown_temp);
dev_emerg(adev->dev, "Recover soft CTF limit to %d\n", high);
data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL,
DIG_THERM_INTH,
(high & 0xff));
data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data);
break;
}
}
}
@ -2458,3 +2489,70 @@ int smu_v13_0_mode1_reset(struct smu_context *smu)
return ret;
}
/*
* Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
* speed switching. Until we have confirmation from Intel that a specific host
* supports it, it's safer that we keep it disabled for all.
*
* https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
* https://gitlab.freedesktop.org/drm/amd/-/issues/2663
*/
static bool smu_v13_0_is_pcie_dynamic_switching_supported(void)
{
#if IS_ENABLED(CONFIG_X86)
struct cpuinfo_x86 *c = &cpu_data(0);
if (c->x86_vendor == X86_VENDOR_INTEL)
return false;
#endif
return true;
}
int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
uint32_t pcie_gen_cap,
uint32_t pcie_width_cap)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_13_0_pcie_table *pcie_table =
&dpm_context->dpm_tables.pcie_table;
int num_of_levels = pcie_table->num_of_link_levels;
uint32_t smu_pcie_arg;
int ret, i;
if (!smu_v13_0_is_pcie_dynamic_switching_supported()) {
if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap)
pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1];
/* Force all levels to use the same settings */
for (i = 0; i < num_of_levels; i++) {
pcie_table->pcie_gen[i] = pcie_gen_cap;
pcie_table->pcie_lane[i] = pcie_width_cap;
}
} else {
for (i = 0; i < num_of_levels; i++) {
if (pcie_table->pcie_gen[i] > pcie_gen_cap)
pcie_table->pcie_gen[i] = pcie_gen_cap;
if (pcie_table->pcie_lane[i] > pcie_width_cap)
pcie_table->pcie_lane[i] = pcie_width_cap;
}
}
for (i = 0; i < num_of_levels; i++) {
smu_pcie_arg = i << 16;
smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
smu_pcie_arg |= pcie_table->pcie_lane[i];
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
smu_pcie_arg,
NULL);
if (ret)
return ret;
}
return 0;
}

View File

@ -1216,37 +1216,6 @@ static int smu_v13_0_0_force_clk_levels(struct smu_context *smu,
return ret;
}
static int smu_v13_0_0_update_pcie_parameters(struct smu_context *smu,
uint32_t pcie_gen_cap,
uint32_t pcie_width_cap)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_13_0_pcie_table *pcie_table =
&dpm_context->dpm_tables.pcie_table;
uint32_t smu_pcie_arg;
int ret, i;
for (i = 0; i < pcie_table->num_of_link_levels; i++) {
if (pcie_table->pcie_gen[i] > pcie_gen_cap)
pcie_table->pcie_gen[i] = pcie_gen_cap;
if (pcie_table->pcie_lane[i] > pcie_width_cap)
pcie_table->pcie_lane[i] = pcie_width_cap;
smu_pcie_arg = i << 16;
smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
smu_pcie_arg |= pcie_table->pcie_lane[i];
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
smu_pcie_arg,
NULL);
if (ret)
return ret;
}
return 0;
}
static const struct smu_temperature_range smu13_thermal_policy[] = {
{-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
{ 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
@ -1281,6 +1250,7 @@ static int smu_v13_0_0_get_thermal_temperature_range(struct smu_context *smu,
range->mem_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)*
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
range->software_shutdown_temp_offset = pptable->SkuTable.FanAbnormalTempLimitOffset;
return 0;
}
@ -2032,7 +2002,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.feature_is_enabled = smu_cmn_feature_is_enabled,
.print_clk_levels = smu_v13_0_0_print_clk_levels,
.force_clk_levels = smu_v13_0_0_force_clk_levels,
.update_pcie_parameters = smu_v13_0_0_update_pcie_parameters,
.update_pcie_parameters = smu_v13_0_update_pcie_parameters,
.get_thermal_temperature_range = smu_v13_0_0_get_thermal_temperature_range,
.register_irq_handler = smu_v13_0_register_irq_handler,
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,

View File

@ -1225,37 +1225,6 @@ static int smu_v13_0_7_force_clk_levels(struct smu_context *smu,
return ret;
}
static int smu_v13_0_7_update_pcie_parameters(struct smu_context *smu,
uint32_t pcie_gen_cap,
uint32_t pcie_width_cap)
{
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_13_0_pcie_table *pcie_table =
&dpm_context->dpm_tables.pcie_table;
uint32_t smu_pcie_arg;
int ret, i;
for (i = 0; i < pcie_table->num_of_link_levels; i++) {
if (pcie_table->pcie_gen[i] > pcie_gen_cap)
pcie_table->pcie_gen[i] = pcie_gen_cap;
if (pcie_table->pcie_lane[i] > pcie_width_cap)
pcie_table->pcie_lane[i] = pcie_width_cap;
smu_pcie_arg = i << 16;
smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
smu_pcie_arg |= pcie_table->pcie_lane[i];
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
smu_pcie_arg,
NULL);
if (ret)
return ret;
}
return 0;
}
static const struct smu_temperature_range smu13_thermal_policy[] =
{
{-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
@ -1288,6 +1257,7 @@ static int smu_v13_0_7_get_thermal_temperature_range(struct smu_context *smu,
range->mem_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)*
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
range->software_shutdown_temp_offset = pptable->SkuTable.FanAbnormalTempLimitOffset;
return 0;
}
@ -1749,7 +1719,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.feature_is_enabled = smu_cmn_feature_is_enabled,
.print_clk_levels = smu_v13_0_7_print_clk_levels,
.force_clk_levels = smu_v13_0_7_force_clk_levels,
.update_pcie_parameters = smu_v13_0_7_update_pcie_parameters,
.update_pcie_parameters = smu_v13_0_update_pcie_parameters,
.get_thermal_temperature_range = smu_v13_0_7_get_thermal_temperature_range,
.register_irq_handler = smu_v13_0_register_irq_handler,
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,

View File

@ -170,10 +170,10 @@
* @pwm_refclk_freq: Cache for the reference clock input to the PWM.
*/
struct ti_sn65dsi86 {
struct auxiliary_device bridge_aux;
struct auxiliary_device gpio_aux;
struct auxiliary_device aux_aux;
struct auxiliary_device pwm_aux;
struct auxiliary_device *bridge_aux;
struct auxiliary_device *gpio_aux;
struct auxiliary_device *aux_aux;
struct auxiliary_device *pwm_aux;
struct device *dev;
struct regmap *regmap;
@ -468,27 +468,34 @@ static void ti_sn65dsi86_delete_aux(void *data)
auxiliary_device_delete(data);
}
/*
* AUX bus docs say that a non-NULL release is mandatory, but it makes no
* sense for the model used here where all of the aux devices are allocated
* in the single shared structure. We'll use this noop as a workaround.
*/
static void ti_sn65dsi86_noop(struct device *dev) {}
static void ti_sn65dsi86_aux_device_release(struct device *dev)
{
struct auxiliary_device *aux = container_of(dev, struct auxiliary_device, dev);
kfree(aux);
}
static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
struct auxiliary_device *aux,
struct auxiliary_device **aux_out,
const char *name)
{
struct device *dev = pdata->dev;
struct auxiliary_device *aux;
int ret;
aux = kzalloc(sizeof(*aux), GFP_KERNEL);
if (!aux)
return -ENOMEM;
aux->name = name;
aux->dev.parent = dev;
aux->dev.release = ti_sn65dsi86_noop;
aux->dev.release = ti_sn65dsi86_aux_device_release;
device_set_of_node_from_dev(&aux->dev, dev);
ret = auxiliary_device_init(aux);
if (ret)
if (ret) {
kfree(aux);
return ret;
}
ret = devm_add_action_or_reset(dev, ti_sn65dsi86_uninit_aux, aux);
if (ret)
return ret;
@ -497,6 +504,8 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
if (ret)
return ret;
ret = devm_add_action_or_reset(dev, ti_sn65dsi86_delete_aux, aux);
if (!ret)
*aux_out = aux;
return ret;
}

View File

@ -140,6 +140,12 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
if (!state->planes)
goto fail;
/*
* Because drm_atomic_state can be committed asynchronously we need our
* own reference and cannot rely on the on implied by drm_file in the
* ioctl call.
*/
drm_dev_get(dev);
state->dev = dev;
drm_dbg_atomic(dev, "Allocated atomic state %p\n", state);
@ -299,7 +305,8 @@ EXPORT_SYMBOL(drm_atomic_state_clear);
void __drm_atomic_state_free(struct kref *ref)
{
struct drm_atomic_state *state = container_of(ref, typeof(*state), ref);
struct drm_mode_config *config = &state->dev->mode_config;
struct drm_device *dev = state->dev;
struct drm_mode_config *config = &dev->mode_config;
drm_atomic_state_clear(state);
@ -311,6 +318,8 @@ void __drm_atomic_state_free(struct kref *ref)
drm_atomic_state_default_release(state);
kfree(state);
}
drm_dev_put(dev);
}
EXPORT_SYMBOL(__drm_atomic_state_free);

View File

@ -1225,7 +1225,16 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
continue;
ret = drm_crtc_vblank_get(crtc);
WARN_ONCE(ret != -EINVAL, "driver forgot to call drm_crtc_vblank_off()\n");
/*
* Self-refresh is not a true "disable"; ensure vblank remains
* enabled.
*/
if (new_crtc_state->self_refresh_active)
WARN_ONCE(ret != 0,
"driver disabled vblank in self-refresh\n");
else
WARN_ONCE(ret != -EINVAL,
"driver forgot to call drm_crtc_vblank_off()\n");
if (ret == 0)
drm_crtc_vblank_put(crtc);
}

View File

@ -122,13 +122,34 @@ EXPORT_SYMBOL(drm_client_init);
* drm_client_register() it is no longer permissible to call drm_client_release()
* directly (outside the unregister callback), instead cleanup will happen
* automatically on driver unload.
*
* Registering a client generates a hotplug event that allows the client
* to set up its display from pre-existing outputs. The client must have
* initialized its state to able to handle the hotplug event successfully.
*/
void drm_client_register(struct drm_client_dev *client)
{
struct drm_device *dev = client->dev;
int ret;
mutex_lock(&dev->clientlist_mutex);
list_add(&client->list, &dev->clientlist);
if (client->funcs && client->funcs->hotplug) {
/*
* Perform an initial hotplug event to pick up the
* display configuration for the client. This step
* has to be performed *after* registering the client
* in the list of clients, or a concurrent hotplug
* event might be lost; leaving the display off.
*
* Hold the clientlist_mutex as for a regular hotplug
* event.
*/
ret = client->funcs->hotplug(client);
if (ret)
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
}
mutex_unlock(&dev->clientlist_mutex);
}
EXPORT_SYMBOL(drm_client_register);

View File

@ -2634,10 +2634,6 @@ void drm_fbdev_generic_setup(struct drm_device *dev,
preferred_bpp = 32;
fb_helper->preferred_bpp = preferred_bpp;
ret = drm_fbdev_client_hotplug(&fb_helper->client);
if (ret)
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
drm_client_register(&fb_helper->client);
}
EXPORT_SYMBOL(drm_fbdev_generic_setup);

View File

@ -5133,7 +5133,6 @@ copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
saved_state->uapi = slave_crtc_state->uapi;
saved_state->scaler_state = slave_crtc_state->scaler_state;
saved_state->shared_dpll = slave_crtc_state->shared_dpll;
saved_state->dpll_hw_state = slave_crtc_state->dpll_hw_state;
saved_state->crc_enabled = slave_crtc_state->crc_enabled;
intel_crtc_free_hw_state(slave_crtc_state);

View File

@ -611,7 +611,7 @@ __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
if (IS_ERR(obj))
return ERR_CAST(obj);
i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {

View File

@ -2117,6 +2117,7 @@ static const struct panel_desc innolux_at043tn24 = {
.height = 54,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.connector_type = DRM_MODE_CONNECTOR_DPI,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
@ -3109,6 +3110,7 @@ static const struct drm_display_mode powertip_ph800480t013_idf02_mode = {
.vsync_start = 480 + 49,
.vsync_end = 480 + 49 + 2,
.vtotal = 480 + 49 + 2 + 22,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
static const struct panel_desc powertip_ph800480t013_idf02 = {

View File

@ -717,13 +717,13 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
if (crtc->state->self_refresh_active)
rockchip_drm_set_win_enabled(crtc, false);
if (crtc->state->self_refresh_active)
goto out;
mutex_lock(&vop->vop_lock);
drm_crtc_vblank_off(crtc);
if (crtc->state->self_refresh_active)
goto out;
/*
* Vop standby will take effect at end of current frame,
* if dsp hold valid irq happen, it means standby complete.
@ -757,9 +757,9 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
vop_core_clks_disable(vop);
pm_runtime_put(vop->dev);
out:
mutex_unlock(&vop->vop_lock);
out:
if (crtc->state->event && !crtc->state->active) {
spin_lock_irq(&crtc->dev->event_lock);
drm_crtc_send_vblank_event(crtc, crtc->state->event);

View File

@ -1165,6 +1165,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
ret = ttm_bo_handle_move_mem(bo, evict_mem, true, &ctx, &hop);
if (unlikely(ret != 0)) {
WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
ttm_resource_free(bo, &evict_mem);
goto out;
}
}

View File

@ -132,29 +132,45 @@ static void get_common_inputs(struct common_input_property *common, int report_i
common->event_type = HID_USAGE_SENSOR_EVENT_DATA_UPDATED_ENUM;
}
static int float_to_int(u32 float32)
static int float_to_int(u32 flt32_val)
{
int fraction, shift, mantissa, sign, exp, zeropre;
mantissa = float32 & GENMASK(22, 0);
sign = (float32 & BIT(31)) ? -1 : 1;
exp = (float32 & ~BIT(31)) >> 23;
mantissa = flt32_val & GENMASK(22, 0);
sign = (flt32_val & BIT(31)) ? -1 : 1;
exp = (flt32_val & ~BIT(31)) >> 23;
if (!exp && !mantissa)
return 0;
/*
* Calculate the exponent and fraction part of floating
* point representation.
*/
exp -= 127;
if (exp < 0) {
exp = -exp;
if (exp >= BITS_PER_TYPE(u32))
return 0;
zeropre = (((BIT(23) + mantissa) * 100) >> 23) >> exp;
return zeropre >= 50 ? sign : 0;
}
shift = 23 - exp;
float32 = BIT(exp) + (mantissa >> shift);
fraction = mantissa & GENMASK(shift - 1, 0);
if (abs(shift) >= BITS_PER_TYPE(u32))
return 0;
return (((fraction * 100) >> shift) >= 50) ? sign * (float32 + 1) : sign * float32;
if (shift < 0) {
shift = -shift;
flt32_val = BIT(exp) + (mantissa << shift);
shift = 0;
} else {
flt32_val = BIT(exp) + (mantissa >> shift);
}
fraction = (shift == 0) ? 0 : mantissa & GENMASK(shift - 1, 0);
return (((fraction * 100) >> shift) >= 50) ? sign * (flt32_val + 1) : sign * flt32_val;
}
static u8 get_input_rep(u8 current_index, int sensor_idx, int report_id,

View File

@ -71,7 +71,7 @@
#define MESON_SAR_ADC_REG3_PANEL_DETECT_COUNT_MASK GENMASK(20, 18)
#define MESON_SAR_ADC_REG3_PANEL_DETECT_FILTER_TB_MASK GENMASK(17, 16)
#define MESON_SAR_ADC_REG3_ADC_CLK_DIV_SHIFT 10
#define MESON_SAR_ADC_REG3_ADC_CLK_DIV_WIDTH 5
#define MESON_SAR_ADC_REG3_ADC_CLK_DIV_WIDTH 6
#define MESON_SAR_ADC_REG3_BLOCK_DLY_SEL_MASK GENMASK(9, 8)
#define MESON_SAR_ADC_REG3_BLOCK_DLY_MASK GENMASK(7, 0)

View File

@ -8,6 +8,7 @@
*/
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/device-mapper.h>
#include <linux/init.h>
@ -18,12 +19,17 @@
#define DM_MAX_DEVICES 256
#define DM_MAX_TARGETS 256
#define DM_MAX_STR_SIZE 4096
#define DM_MAX_WAITFOR 256
static char *create;
static char *waitfor[DM_MAX_WAITFOR];
/*
* Format: dm-mod.create=<name>,<uuid>,<minor>,<flags>,<table>[,<table>+][;<name>,<uuid>,<minor>,<flags>,<table>[,<table>+]+]
* Table format: <start_sector> <num_sectors> <target_type> <target_args>
* Block devices to wait for to become available before setting up tables:
* dm-mod.waitfor=<device1>[,..,<deviceN>]
*
* See Documentation/admin-guide/device-mapper/dm-init.rst for dm-mod.create="..." format
* details.
@ -266,7 +272,7 @@ static int __init dm_init_init(void)
struct dm_device *dev;
LIST_HEAD(devices);
char *str;
int r;
int i, r;
if (!create)
return 0;
@ -286,6 +292,17 @@ static int __init dm_init_init(void)
DMINFO("waiting for all devices to be available before creating mapped devices");
wait_for_device_probe();
for (i = 0; i < ARRAY_SIZE(waitfor); i++) {
if (waitfor[i]) {
DMINFO("waiting for device %s ...", waitfor[i]);
while (!dm_get_dev_t(waitfor[i]))
msleep(5);
}
}
if (waitfor[0])
DMINFO("all devices available");
list_for_each_entry(dev, &devices, list) {
if (dm_early_create(&dev->dmi, dev->table,
dev->target_args_array))
@ -301,3 +318,6 @@ late_initcall(dm_init_init);
module_param(create, charp, 0);
MODULE_PARM_DESC(create, "Create a mapped device in early boot");
module_param_array(waitfor, charp, NULL, 0);
MODULE_PARM_DESC(waitfor, "Devices to wait for before setting up tables");

View File

@ -33,11 +33,11 @@
#define DEFAULT_BUFFER_SECTORS 128
#define DEFAULT_JOURNAL_WATERMARK 50
#define DEFAULT_SYNC_MSEC 10000
#define DEFAULT_MAX_JOURNAL_SECTORS 131072
#define DEFAULT_MAX_JOURNAL_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 131072 : 8192)
#define MIN_LOG2_INTERLEAVE_SECTORS 3
#define MAX_LOG2_INTERLEAVE_SECTORS 31
#define METADATA_WORKQUEUE_MAX_ACTIVE 16
#define RECALC_SECTORS 32768
#define RECALC_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 32768 : 2048)
#define RECALC_WRITE_SUPER 16
#define BITMAP_BLOCK_SIZE 4096 /* don't change it */
#define BITMAP_FLUSH_INTERVAL (10 * HZ)

View File

@ -58,6 +58,9 @@ bool dm_verity_loadpin_is_bdev_trusted(struct block_device *bdev)
int srcu_idx;
bool trusted = false;
if (bdev == NULL)
return false;
if (list_empty(&dm_verity_loadpin_trusted_root_digests))
return false;

View File

@ -270,6 +270,18 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
goto abort;
}
if (conf->layout == RAID0_ORIG_LAYOUT) {
for (i = 1; i < conf->nr_strip_zones; i++) {
sector_t first_sector = conf->strip_zone[i-1].zone_end;
sector_div(first_sector, mddev->chunk_sectors);
zone = conf->strip_zone + i;
/* disk_shift is first disk index used in the zone */
zone->disk_shift = sector_div(first_sector,
zone->nb_dev);
}
}
pr_debug("md/raid0:%s: done.\n", mdname(mddev));
*private_conf = conf;
@ -431,6 +443,20 @@ static int raid0_run(struct mddev *mddev)
return ret;
}
/*
* Convert disk_index to the disk order in which it is read/written.
* For example, if we have 4 disks, they are numbered 0,1,2,3. If we
* write the disks starting at disk 3, then the read/write order would
* be disk 3, then 0, then 1, and then disk 2 and we want map_disk_shift()
* to map the disks as follows 0,1,2,3 => 1,2,3,0. So disk 0 would map
* to 1, 1 to 2, 2 to 3, and 3 to 0. That way we can compare disks in
* that 'output' space to understand the read/write disk ordering.
*/
static int map_disk_shift(int disk_index, int num_disks, int disk_shift)
{
return ((disk_index + num_disks - disk_shift) % num_disks);
}
static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
{
struct r0conf *conf = mddev->private;
@ -444,7 +470,9 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
sector_t end_disk_offset;
unsigned int end_disk_index;
unsigned int disk;
sector_t orig_start, orig_end;
orig_start = start;
zone = find_zone(conf, &start);
if (bio_end_sector(bio) > zone->zone_end) {
@ -458,6 +486,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
} else
end = bio_end_sector(bio);
orig_end = end;
if (zone != conf->strip_zone)
end = end - zone[-1].zone_end;
@ -469,13 +498,26 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
last_stripe_index = end;
sector_div(last_stripe_index, stripe_size);
start_disk_index = (int)(start - first_stripe_index * stripe_size) /
mddev->chunk_sectors;
/* In the first zone the original and alternate layouts are the same */
if ((conf->layout == RAID0_ORIG_LAYOUT) && (zone != conf->strip_zone)) {
sector_div(orig_start, mddev->chunk_sectors);
start_disk_index = sector_div(orig_start, zone->nb_dev);
start_disk_index = map_disk_shift(start_disk_index,
zone->nb_dev,
zone->disk_shift);
sector_div(orig_end, mddev->chunk_sectors);
end_disk_index = sector_div(orig_end, zone->nb_dev);
end_disk_index = map_disk_shift(end_disk_index,
zone->nb_dev, zone->disk_shift);
} else {
start_disk_index = (int)(start - first_stripe_index * stripe_size) /
mddev->chunk_sectors;
end_disk_index = (int)(end - last_stripe_index * stripe_size) /
mddev->chunk_sectors;
}
start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
mddev->chunk_sectors) +
first_stripe_index * mddev->chunk_sectors;
end_disk_index = (int)(end - last_stripe_index * stripe_size) /
mddev->chunk_sectors;
end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
mddev->chunk_sectors) +
last_stripe_index * mddev->chunk_sectors;
@ -483,18 +525,22 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
for (disk = 0; disk < zone->nb_dev; disk++) {
sector_t dev_start, dev_end;
struct md_rdev *rdev;
int compare_disk;
if (disk < start_disk_index)
compare_disk = map_disk_shift(disk, zone->nb_dev,
zone->disk_shift);
if (compare_disk < start_disk_index)
dev_start = (first_stripe_index + 1) *
mddev->chunk_sectors;
else if (disk > start_disk_index)
else if (compare_disk > start_disk_index)
dev_start = first_stripe_index * mddev->chunk_sectors;
else
dev_start = start_disk_offset;
if (disk < end_disk_index)
if (compare_disk < end_disk_index)
dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
else if (disk > end_disk_index)
else if (compare_disk > end_disk_index)
dev_end = last_stripe_index * mddev->chunk_sectors;
else
dev_end = end_disk_offset;

View File

@ -6,6 +6,7 @@ struct strip_zone {
sector_t zone_end; /* Start of the next zone (in sectors) */
sector_t dev_start; /* Zone offset in real dev (in sectors) */
int nb_dev; /* # of devices attached to the zone */
int disk_shift; /* start disk for the original layout */
};
/* Linux 3.14 (20d0189b101) made an unintended change to

View File

@ -233,6 +233,7 @@ static const struct of_device_id pm8008_match[] = {
{ .compatible = "qcom,pm8008", },
{ },
};
MODULE_DEVICE_TABLE(of, pm8008_match);
static struct i2c_driver pm8008_mfd_driver = {
.driver = {

View File

@ -1258,7 +1258,7 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
if (init.attrs)
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0);
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 4, 0);
err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
sc, args);

View File

@ -728,6 +728,10 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
struct pci_dev *pdev = test->pdev;
mutex_lock(&test->mutex);
reinit_completion(&test->irq_raised);
test->last_irq = -ENODATA;
switch (cmd) {
case PCITEST_BAR:
bar = arg;
@ -937,6 +941,9 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
if (id < 0)
return;
pci_endpoint_test_release_irq(test);
pci_endpoint_test_free_irq_vectors(test);
misc_deregister(&test->miscdev);
kfree(misc_device->name);
kfree(test->name);
@ -946,9 +953,6 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
pci_iounmap(pdev, test->bar[bar]);
}
pci_endpoint_test_release_irq(test);
pci_endpoint_test_free_irq_vectors(test);
pci_release_regions(pdev);
pci_disable_device(pdev);
}

View File

@ -76,6 +76,7 @@
#define GENCMDIADDRH(aih, addr) ((aih) | (((addr) >> 16) & 0xffff))
#define DMA_DIR(dir) ((dir) ? NFC_CMD_N2M : NFC_CMD_M2N)
#define DMA_ADDR_ALIGN 8
#define ECC_CHECK_RETURN_FF (-1)
@ -842,6 +843,9 @@ static int meson_nfc_read_oob(struct nand_chip *nand, int page)
static bool meson_nfc_is_buffer_dma_safe(const void *buffer)
{
if ((uintptr_t)buffer % DMA_ADDR_ALIGN)
return false;
if (virt_addr_valid(buffer) && (!object_is_on_stack(buffer)))
return true;
return false;

View File

@ -469,6 +469,9 @@ qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
bool ack;
int ret;
if (!skb)
return -ENOMEM;
reinit_completion(&mgmt_eth_data->rw_done);
/* Increment seq_num and set it in the copy pkt */

View File

@ -35,6 +35,8 @@
#define ENA_REGS_ADMIN_INTR_MASK 1
#define ENA_MAX_BACKOFF_DELAY_EXP 16U
#define ENA_MIN_ADMIN_POLL_US 100
#define ENA_MAX_ADMIN_POLL_US 5000
@ -536,6 +538,7 @@ static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
{
exp = min_t(u32, exp, ENA_MAX_BACKOFF_DELAY_EXP);
delay_us = max_t(u32, ENA_MIN_ADMIN_POLL_US, delay_us);
delay_us = min_t(u32, delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US);
usleep_range(delay_us, 2 * delay_us);

View File

@ -1492,8 +1492,6 @@ int bgmac_enet_probe(struct bgmac *bgmac)
bgmac->in_init = true;
bgmac_chip_intrs_off(bgmac);
net_dev->irq = bgmac->irq;
SET_NETDEV_DEV(net_dev, bgmac->dev);
dev_set_drvdata(bgmac->dev, bgmac);
@ -1511,6 +1509,8 @@ int bgmac_enet_probe(struct bgmac *bgmac)
*/
bgmac_clk_enable(bgmac, 0);
bgmac_chip_intrs_off(bgmac);
/* This seems to be fixing IRQ by assigning OOB #6 to the core */
if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) {
if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6)

View File

@ -664,5 +664,7 @@ void bcmgenet_mii_exit(struct net_device *dev)
if (of_phy_is_fixed_link(dn))
of_phy_deregister_fixed_link(dn);
of_node_put(priv->phy_dn);
clk_prepare_enable(priv->clk);
platform_device_unregister(priv->mii_pdev);
clk_disable_unprepare(priv->clk);
}

View File

@ -541,6 +541,9 @@ static int gve_get_link_ksettings(struct net_device *netdev,
err = gve_adminq_report_link_speed(priv);
cmd->base.speed = priv->link_speed;
cmd->base.duplex = DUPLEX_FULL;
return err;
}

View File

@ -7852,10 +7852,10 @@ static int
ice_validate_mqprio_qopt(struct ice_vsi *vsi,
struct tc_mqprio_qopt_offload *mqprio_qopt)
{
u64 sum_max_rate = 0, sum_min_rate = 0;
int non_power_of_2_qcount = 0;
struct ice_pf *pf = vsi->back;
int max_rss_q_cnt = 0;
u64 sum_min_rate = 0;
struct device *dev;
int i, speed;
u8 num_tc;
@ -7871,6 +7871,7 @@ ice_validate_mqprio_qopt(struct ice_vsi *vsi,
dev = ice_pf_to_dev(pf);
vsi->ch_rss_size = 0;
num_tc = mqprio_qopt->qopt.num_tc;
speed = ice_get_link_speed_kbps(vsi);
for (i = 0; num_tc; i++) {
int qcount = mqprio_qopt->qopt.count[i];
@ -7911,7 +7912,6 @@ ice_validate_mqprio_qopt(struct ice_vsi *vsi,
*/
max_rate = mqprio_qopt->max_rate[i];
max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
sum_max_rate += max_rate;
/* min_rate is minimum guaranteed rate and it can't be zero */
min_rate = mqprio_qopt->min_rate[i];
@ -7924,6 +7924,12 @@ ice_validate_mqprio_qopt(struct ice_vsi *vsi,
return -EINVAL;
}
if (max_rate && max_rate > speed) {
dev_err(dev, "TC%d: max_rate(%llu Kbps) > link speed of %u Kbps\n",
i, max_rate, speed);
return -EINVAL;
}
iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
if (rem) {
dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
@ -7961,12 +7967,6 @@ ice_validate_mqprio_qopt(struct ice_vsi *vsi,
(mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
return -EINVAL;
speed = ice_get_link_speed_kbps(vsi);
if (sum_max_rate && sum_max_rate > (u64)speed) {
dev_err(dev, "Invalid max Tx rate(%llu) Kbps > speed(%u) Kbps specified\n",
sum_max_rate, speed);
return -EINVAL;
}
if (sum_min_rate && sum_min_rate > (u64)speed) {
dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
sum_min_rate, speed);

View File

@ -1707,6 +1707,8 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
/* twisted pair */
cmd->base.port = PORT_TP;
cmd->base.phy_address = hw->phy.addr;
ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
/* advertising link modes */
if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF)

View File

@ -709,7 +709,6 @@ static void igc_configure_tx_ring(struct igc_adapter *adapter,
/* disable the queue */
wr32(IGC_TXDCTL(reg_idx), 0);
wrfl();
mdelay(10);
wr32(IGC_TDLEN(reg_idx),
ring->count * sizeof(union igc_adv_tx_desc));
@ -1015,7 +1014,7 @@ static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime,
ktime_t base_time = adapter->base_time;
ktime_t now = ktime_get_clocktai();
ktime_t baset_est, end_of_cycle;
u32 launchtime;
s32 launchtime;
s64 n;
n = div64_s64(ktime_sub_ns(now, base_time), cycle_time);
@ -1028,7 +1027,7 @@ static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime,
*first_flag = true;
ring->last_ff_cycle = baset_est;
if (ktime_compare(txtime, ring->last_tx_cycle) > 0)
if (ktime_compare(end_of_cycle, ring->last_tx_cycle) > 0)
*insert_empty = true;
}
}

View File

@ -356,16 +356,35 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
tsim &= ~IGC_TSICR_TT0;
}
if (on) {
struct timespec64 safe_start;
int i = rq->perout.index;
igc_pin_perout(igc, i, pin, use_freq);
igc->perout[i].start.tv_sec = rq->perout.start.sec;
igc_ptp_read(igc, &safe_start);
/* PPS output start time is triggered by Target time(TT)
* register. Programming any past time value into TT
* register will cause PPS to never start. Need to make
* sure we program the TT register a time ahead in
* future. There isn't a stringent need to fire PPS out
* right away. Adding +2 seconds should take care of
* corner cases. Let's say if the SYSTIML is close to
* wrap up and the timer keeps ticking as we program the
* register, adding +2seconds is safe bet.
*/
safe_start.tv_sec += 2;
if (rq->perout.start.sec < safe_start.tv_sec)
igc->perout[i].start.tv_sec = safe_start.tv_sec;
else
igc->perout[i].start.tv_sec = rq->perout.start.sec;
igc->perout[i].start.tv_nsec = rq->perout.start.nsec;
igc->perout[i].period.tv_sec = ts.tv_sec;
igc->perout[i].period.tv_nsec = ts.tv_nsec;
wr32(trgttimh, rq->perout.start.sec);
wr32(trgttimh, (u32)igc->perout[i].start.tv_sec);
/* For now, always select timer 0 as source. */
wr32(trgttiml, rq->perout.start.nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
wr32(trgttiml, (u32)(igc->perout[i].start.tv_nsec |
IGC_TT_IO_TIMER_SEL_SYSTIM0));
if (use_freq)
wr32(freqout, ns);
tsauxc |= tsauxc_mask;

View File

@ -1505,7 +1505,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
*/
if (txq_number == 1)
txq_map = (cpu == pp->rxq_def) ?
MVNETA_CPU_TXQ_ACCESS(1) : 0;
MVNETA_CPU_TXQ_ACCESS(0) : 0;
} else {
txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
@ -4294,7 +4294,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
*/
if (txq_number == 1)
txq_map = (cpu == elected_cpu) ?
MVNETA_CPU_TXQ_ACCESS(1) : 0;
MVNETA_CPU_TXQ_ACCESS(0) : 0;
else
txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
MVNETA_CPU_TXQ_ACCESS_ALL_MASK;

View File

@ -208,7 +208,7 @@ struct ptp *ptp_get(void)
/* Check driver is bound to PTP block */
if (!ptp)
ptp = ERR_PTR(-EPROBE_DEFER);
else
else if (!IS_ERR(ptp))
pci_dev_get(ptp->pdev);
return ptp;
@ -388,11 +388,10 @@ static int ptp_extts_on(struct ptp *ptp, int on)
static int ptp_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct ptp *ptp;
int err;
ptp = devm_kzalloc(dev, sizeof(*ptp), GFP_KERNEL);
ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
if (!ptp) {
err = -ENOMEM;
goto error;
@ -428,20 +427,19 @@ static int ptp_probe(struct pci_dev *pdev,
return 0;
error_free:
devm_kfree(dev, ptp);
kfree(ptp);
error:
/* For `ptp_get()` we need to differentiate between the case
* when the core has not tried to probe this device and the case when
* the probe failed. In the later case we pretend that the
* initialization was successful and keep the error in
* the probe failed. In the later case we keep the error in
* `dev->driver_data`.
*/
pci_set_drvdata(pdev, ERR_PTR(err));
if (!first_ptp_block)
first_ptp_block = ERR_PTR(err);
return 0;
return err;
}
static void ptp_remove(struct pci_dev *pdev)
@ -449,16 +447,17 @@ static void ptp_remove(struct pci_dev *pdev)
struct ptp *ptp = pci_get_drvdata(pdev);
u64 clock_cfg;
if (cn10k_ptp_errata(ptp) && hrtimer_active(&ptp->hrtimer))
hrtimer_cancel(&ptp->hrtimer);
if (IS_ERR_OR_NULL(ptp))
return;
if (cn10k_ptp_errata(ptp) && hrtimer_active(&ptp->hrtimer))
hrtimer_cancel(&ptp->hrtimer);
/* Disable PTP clock */
clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
kfree(ptp);
}
static const struct pci_device_id ptp_id_table[] = {

View File

@ -3244,7 +3244,7 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rvu->ptp = ptp_get();
if (IS_ERR(rvu->ptp)) {
err = PTR_ERR(rvu->ptp);
if (err == -EPROBE_DEFER)
if (err)
goto err_release_regions;
rvu->ptp = NULL;
}

View File

@ -3804,21 +3804,14 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
}
/* install/uninstall promisc entry */
if (promisc) {
if (promisc)
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base,
pfvf->rx_chan_cnt);
if (rvu_npc_exact_has_match_table(rvu))
rvu_npc_exact_promisc_enable(rvu, pcifunc);
} else {
else
if (!nix_rx_multicast)
rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
if (rvu_npc_exact_has_match_table(rvu))
rvu_npc_exact_promisc_disable(rvu, pcifunc);
}
return 0;
}

View File

@ -1168,8 +1168,10 @@ static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_i
{
struct npc_exact_table *table;
u16 *cnt, old_cnt;
bool promisc;
table = rvu->hw->table;
promisc = table->promisc_mode[drop_mcam_idx];
cnt = &table->cnt_cmd_rules[drop_mcam_idx];
old_cnt = *cnt;
@ -1181,13 +1183,18 @@ static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_i
*enable_or_disable_cam = false;
/* If all rules are deleted, disable cam */
if (promisc)
goto done;
/* If all rules are deleted and not already in promisc mode;
* disable cam
*/
if (!*cnt && val < 0) {
*enable_or_disable_cam = true;
goto done;
}
/* If rule got added, enable cam */
/* If rule got added and not already in promisc mode; enable cam */
if (!old_cnt && val > 0) {
*enable_or_disable_cam = true;
goto done;
@ -1466,6 +1473,12 @@ int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
*promisc = false;
mutex_unlock(&table->lock);
/* Enable drop rule */
rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX,
true);
dev_dbg(rvu->dev, "%s: disabled promisc mode (cgx=%d lmac=%d)\n",
__func__, cgx_id, lmac_id);
return 0;
}
@ -1507,6 +1520,12 @@ int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
*promisc = true;
mutex_unlock(&table->lock);
/* disable drop rule */
rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX,
false);
dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d)\n",
__func__, cgx_id, lmac_id);
return 0;
}

View File

@ -867,6 +867,14 @@ static int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
return -EINVAL;
vlan_etype = be16_to_cpu(fsp->h_ext.vlan_etype);
/* Drop rule with vlan_etype == 802.1Q
* and vlan_id == 0 is not supported
*/
if (vlan_etype == ETH_P_8021Q && !fsp->m_ext.vlan_tci &&
fsp->ring_cookie == RX_CLS_FLOW_DISC)
return -EINVAL;
/* Only ETH_P_8021Q and ETH_P_802AD types supported */
if (vlan_etype != ETH_P_8021Q &&
vlan_etype != ETH_P_8021AD)

View File

@ -579,6 +579,21 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
return -EOPNOTSUPP;
}
if (!match.mask->vlan_id) {
struct flow_action_entry *act;
int i;
flow_action_for_each(i, act, &rule->action) {
if (act->id == FLOW_ACTION_DROP) {
netdev_err(nic->netdev,
"vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n",
ntohs(match.key->vlan_tpid),
match.key->vlan_id);
return -EOPNOTSUPP;
}
}
}
if (match.mask->vlan_id ||
match.mask->vlan_dei ||
match.mask->vlan_priority) {

View File

@ -594,7 +594,7 @@ int mlx5e_fs_tt_redirect_any_create(struct mlx5e_flow_steering *fs)
err = fs_any_create_table(fs);
if (err)
return err;
goto err_free_any;
err = fs_any_enable(fs);
if (err)
@ -606,8 +606,8 @@ int mlx5e_fs_tt_redirect_any_create(struct mlx5e_flow_steering *fs)
err_destroy_table:
fs_any_destroy_table(fs_any);
kfree(fs_any);
err_free_any:
mlx5e_fs_set_any(fs, NULL);
kfree(fs_any);
return err;
}

View File

@ -729,8 +729,10 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
c = kvzalloc_node(sizeof(*c), GFP_KERNEL, dev_to_node(mlx5_core_dma_dev(mdev)));
cparams = kvzalloc(sizeof(*cparams), GFP_KERNEL);
if (!c || !cparams)
return -ENOMEM;
if (!c || !cparams) {
err = -ENOMEM;
goto err_free;
}
c->priv = priv;
c->mdev = priv->mdev;

View File

@ -190,6 +190,7 @@ static int accel_fs_tcp_create_groups(struct mlx5e_flow_table *ft,
in = kvzalloc(inlen, GFP_KERNEL);
if (!in || !ft->g) {
kfree(ft->g);
ft->g = NULL;
kvfree(in);
return -ENOMEM;
}

View File

@ -1551,7 +1551,8 @@ static void remove_unready_flow(struct mlx5e_tc_flow *flow)
uplink_priv = &rpriv->uplink_priv;
mutex_lock(&uplink_priv->unready_flows_lock);
unready_flow_del(flow);
if (flow_flag_test(flow, NOT_READY))
unready_flow_del(flow);
mutex_unlock(&uplink_priv->unready_flows_lock);
}
@ -1896,8 +1897,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
esw_attr = attr->esw_attr;
mlx5e_put_flow_tunnel_id(flow);
if (flow_flag_test(flow, NOT_READY))
remove_unready_flow(flow);
remove_unready_flow(flow);
if (mlx5e_is_offloaded_flow(flow)) {
if (flow_flag_test(flow, SLOW))

View File

@ -144,6 +144,18 @@ static int lan743x_csr_light_reset(struct lan743x_adapter *adapter)
!(data & HW_CFG_LRST_), 100000, 10000000);
}
static int lan743x_csr_wait_for_bit_atomic(struct lan743x_adapter *adapter,
int offset, u32 bit_mask,
int target_value, int udelay_min,
int udelay_max, int count)
{
u32 data;
return readx_poll_timeout_atomic(LAN743X_CSR_READ_OP, offset, data,
target_value == !!(data & bit_mask),
udelay_max, udelay_min * count);
}
static int lan743x_csr_wait_for_bit(struct lan743x_adapter *adapter,
int offset, u32 bit_mask,
int target_value, int usleep_min,
@ -746,8 +758,8 @@ static int lan743x_dp_write(struct lan743x_adapter *adapter,
u32 dp_sel;
int i;
if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
1, 40, 100, 100))
if (lan743x_csr_wait_for_bit_atomic(adapter, DP_SEL, DP_SEL_DPRDY_,
1, 40, 100, 100))
return -EIO;
dp_sel = lan743x_csr_read(adapter, DP_SEL);
dp_sel &= ~DP_SEL_MASK_;
@ -758,8 +770,9 @@ static int lan743x_dp_write(struct lan743x_adapter *adapter,
lan743x_csr_write(adapter, DP_ADDR, addr + i);
lan743x_csr_write(adapter, DP_DATA_0, buf[i]);
lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_);
if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
1, 40, 100, 100))
if (lan743x_csr_wait_for_bit_atomic(adapter, DP_SEL,
DP_SEL_DPRDY_,
1, 40, 100, 100))
return -EIO;
}

View File

@ -462,11 +462,6 @@ static void ionic_qcqs_free(struct ionic_lif *lif)
static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
struct ionic_qcq *n_qcq)
{
if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index);
n_qcq->flags &= ~IONIC_QCQ_F_INTR;
}
n_qcq->intr.vector = src_qcq->intr.vector;
n_qcq->intr.index = src_qcq->intr.index;
n_qcq->napi_qcq = src_qcq->napi_qcq;

View File

@ -184,13 +184,10 @@ static ssize_t nsim_dev_trap_fa_cookie_write(struct file *file,
cookie_len = (count - 1) / 2;
if ((count - 1) % 2)
return -EINVAL;
buf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
if (!buf)
return -ENOMEM;
ret = simple_write_to_buffer(buf, count, ppos, data, count);
if (ret < 0)
goto free_buf;
buf = memdup_user(data, count);
if (IS_ERR(buf))
return PTR_ERR(buf);
fa_cookie = kmalloc(sizeof(*fa_cookie) + cookie_len,
GFP_KERNEL | __GFP_NOWARN);

View File

@ -12,6 +12,11 @@
/* MDIO_MMD_VEND2 registers */
#define DP83TD510E_PHY_STS 0x10
/* Bit 7 - mii_interrupt, active high. Clears on read.
* Note: Clearing does not necessarily deactivate IRQ pin if interrupts pending.
* This differs from the DP83TD510E datasheet (2020) which states this bit
* clears on write 0.
*/
#define DP83TD510E_STS_MII_INT BIT(7)
#define DP83TD510E_LINK_STATUS BIT(0)
@ -53,12 +58,6 @@ static int dp83td510_config_intr(struct phy_device *phydev)
int ret;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
/* Clear any pending interrupts */
ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PHY_STS,
0x0);
if (ret)
return ret;
ret = phy_write_mmd(phydev, MDIO_MMD_VEND2,
DP83TD510E_INTERRUPT_REG_1,
DP83TD510E_INT1_LINK_EN);
@ -81,10 +80,6 @@ static int dp83td510_config_intr(struct phy_device *phydev)
DP83TD510E_GENCFG_INT_EN);
if (ret)
return ret;
/* Clear any pending interrupts */
ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PHY_STS,
0x0);
}
return ret;
@ -94,14 +89,6 @@ static irqreturn_t dp83td510_handle_interrupt(struct phy_device *phydev)
{
int ret;
ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_PHY_STS);
if (ret < 0) {
phy_error(phydev);
return IRQ_NONE;
} else if (!(ret & DP83TD510E_STS_MII_INT)) {
return IRQ_NONE;
}
/* Read the current enabled interrupts */
ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_INTERRUPT_REG_1);
if (ret < 0) {

View File

@ -6146,8 +6146,11 @@ static int airo_get_rate(struct net_device *dev,
{
struct airo_info *local = dev->ml_priv;
StatusRid status_rid; /* Card status info */
int ret;
readStatusRid(local, &status_rid, 1);
ret = readStatusRid(local, &status_rid, 1);
if (ret)
return -EBUSY;
vwrq->value = le16_to_cpu(status_rid.currentXmitRate) * 500000;
/* If more than one rate, set auto */

View File

@ -2130,17 +2130,18 @@ static ssize_t rtw89_debug_priv_send_h2c_set(struct file *filp,
struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
u8 *h2c;
int ret;
u16 h2c_len = count / 2;
h2c = rtw89_hex2bin_user(rtwdev, user_buf, count);
if (IS_ERR(h2c))
return -EFAULT;
rtw89_fw_h2c_raw(rtwdev, h2c, h2c_len);
ret = rtw89_fw_h2c_raw(rtwdev, h2c, h2c_len);
kfree(h2c);
return count;
return ret ? ret : count;
}
static int

View File

@ -1338,12 +1338,17 @@ static struct pci_driver amd_ntb_pci_driver = {
static int __init amd_ntb_pci_driver_init(void)
{
int ret;
pr_info("%s %s\n", NTB_DESC, NTB_VER);
if (debugfs_initialized())
debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
return pci_register_driver(&amd_ntb_pci_driver);
ret = pci_register_driver(&amd_ntb_pci_driver);
if (ret)
debugfs_remove_recursive(debugfs_dir);
return ret;
}
module_init(amd_ntb_pci_driver_init);

View File

@ -2891,6 +2891,7 @@ static struct pci_driver idt_pci_driver = {
static int __init idt_pci_driver_init(void)
{
int ret;
pr_info("%s %s\n", NTB_DESC, NTB_VER);
/* Create the top DebugFS directory if the FS is initialized */
@ -2898,7 +2899,11 @@ static int __init idt_pci_driver_init(void)
dbgfs_topdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
/* Register the NTB hardware driver to handle the PCI device */
return pci_register_driver(&idt_pci_driver);
ret = pci_register_driver(&idt_pci_driver);
if (ret)
debugfs_remove_recursive(dbgfs_topdir);
return ret;
}
module_init(idt_pci_driver_init);

View File

@ -2064,12 +2064,17 @@ static struct pci_driver intel_ntb_pci_driver = {
static int __init intel_ntb_pci_driver_init(void)
{
int ret;
pr_info("%s %s\n", NTB_DESC, NTB_VER);
if (debugfs_initialized())
debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
return pci_register_driver(&intel_ntb_pci_driver);
ret = pci_register_driver(&intel_ntb_pci_driver);
if (ret)
debugfs_remove_recursive(debugfs_dir);
return ret;
}
module_init(intel_ntb_pci_driver_init);

View File

@ -410,7 +410,7 @@ int ntb_transport_register_client_dev(char *device_name)
rc = device_register(dev);
if (rc) {
kfree(client_dev);
put_device(dev);
goto err;
}

View File

@ -998,6 +998,8 @@ static int tool_init_mws(struct tool_ctx *tc)
tc->peers[pidx].outmws =
devm_kcalloc(&tc->ntb->dev, tc->peers[pidx].outmw_cnt,
sizeof(*tc->peers[pidx].outmws), GFP_KERNEL);
if (tc->peers[pidx].outmws == NULL)
return -ENOMEM;
for (widx = 0; widx < tc->peers[pidx].outmw_cnt; widx++) {
tc->peers[pidx].outmws[widx].pidx = pidx;

View File

@ -4175,10 +4175,40 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids);
if (ret) {
dev_err(ctrl->device,
"globally duplicate IDs for nsid %d\n", info->nsid);
/*
* We've found two different namespaces on two different
* subsystems that report the same ID. This is pretty nasty
* for anything that actually requires unique device
* identification. In the kernel we need this for multipathing,
* and in user space the /dev/disk/by-id/ links rely on it.
*
* If the device also claims to be multi-path capable back off
* here now and refuse the probe the second device as this is a
* recipe for data corruption. If not this is probably a
* cheap consumer device if on the PCIe bus, so let the user
* proceed and use the shiny toy, but warn that with changing
* probing order (which due to our async probing could just be
* device taking longer to startup) the other device could show
* up at any time.
*/
nvme_print_device_info(ctrl);
return ret;
if ((ns->ctrl->ops->flags & NVME_F_FABRICS) || /* !PCIe */
((ns->ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) &&
info->is_shared)) {
dev_err(ctrl->device,
"ignoring nsid %d because of duplicate IDs\n",
info->nsid);
return ret;
}
dev_err(ctrl->device,
"clearing duplicate IDs for nsid %d\n", info->nsid);
dev_err(ctrl->device,
"use of /dev/disk/by-id/ may cause data corruption\n");
memset(&info->ids.nguid, 0, sizeof(info->ids.nguid));
memset(&info->ids.uuid, 0, sizeof(info->ids.uuid));
memset(&info->ids.eui64, 0, sizeof(info->ids.eui64));
ctrl->quirks |= NVME_QUIRK_BOGUS_NID;
}
mutex_lock(&ctrl->subsys->lock);

View File

@ -1022,7 +1022,7 @@ static __always_inline void nvme_pci_unmap_rq(struct request *req)
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
dma_unmap_page(dev->dev, iod->meta_dma,
rq_integrity_vec(req)->bv_len, rq_data_dir(req));
rq_integrity_vec(req)->bv_len, rq_dma_dir(req));
}
if (blk_rq_nr_phys_segments(req))

View File

@ -1348,7 +1348,10 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
return opp_table;
remove_opp_dev:
_of_clear_opp_table(opp_table);
_remove_opp_dev(opp_dev, opp_table);
mutex_destroy(&opp_table->genpd_virt_dev_lock);
mutex_destroy(&opp_table->lock);
err:
kfree(opp_table);
return ERR_PTR(ret);

View File

@ -1176,6 +1176,8 @@ static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
PCI_EXP_DEVCTL2);
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}

Some files were not shown because too many files have changed in this diff Show More