This is the 6.1.66 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmVyywAACgkQONu9yGCS
 aT420Q//RK1ZeDdGWqAEH84PtuOzFA7gl5aXjmt1r9I1sDFr06ktk9rc67BNo87b
 Ugubto1UUpM/ZJfpezH1M4DMQ5f67thkRhCv5qvolh80v21duD7G7i1kX3rJsWk1
 daJ76RcYXH63/Qv59uT+ADjSIIAH7yF/FGnUSShyznDRwDh/TqujEoh0e25X4YlV
 MhcCGBS0NE9Rcuwv2XPp84D4psXhPhmOuUVEPVnPLVnXg09XqOVjMV5uW+X4Sqft
 sc/bzveBmHoPOVtkz71qo1oxsVkKNMcdmD88+Xn9rSBgAkti5MpV/ZCAxRSVZbwF
 wyBh23gzRQzHXTn45Bf/1wS5zzQ+PIkadCo7hlPbQHguOMGXkdqTgNJf9EwB09I2
 DEAWnCNH5orNk0Sltbfo/7Ja2oJtSHkiaUWk4nP1fZN9Vt9yt1xnRkpkaoBh0L7q
 NmXBFuvrylC44cfQNXIZSqAXduwCvMPyQDm1txSxYDZVrOy82/zVRWcOrytb0PnO
 zfqSuQKZPoF29ESq2Ti65Zk5e47EjSjYca91gzOlSVBNXx+xTuSoXCL0RXYclT7H
 umxK5/wmDSQX6wJzd+JNy7H86U753DuSIzA1112IC1GdWNlWWsjca5omEMgt+lqu
 Xc9q13vg3Ox+tv0MRv+P398b7NwzuMVcLbMoHE+1EzMH0JS636E=
 =p/en
 -----END PGP SIGNATURE-----

Merge 6.1.66 into android14-6.1-lts

Changes in 6.1.66
	cifs: Fix FALLOC_FL_ZERO_RANGE by setting i_size if EOF moved
	cifs: Fix FALLOC_FL_INSERT_RANGE by setting i_size after EOF moved
	smb: client: report correct st_size for SMB and NFS symlinks
	pinctrl: avoid reload of p state in list iteration
	firewire: core: fix possible memory leak in create_units()
	mmc: sdhci-pci-gli: Disable LPM during initialization
	mmc: cqhci: Increase recovery halt timeout
	mmc: cqhci: Warn of halt or task clear failure
	mmc: cqhci: Fix task clearing in CQE error recovery
	mmc: block: Retry commands in CQE error recovery
	mmc: block: Do not lose cache flush during CQE error recovery
	mmc: block: Be sure to wait while busy in CQE error recovery
	ALSA: hda: Disable power-save on KONTRON SinglePC
	ALSA: hda/realtek: Headset Mic VREF to 100%
	ALSA: hda/realtek: Add supported ALC257 for ChromeOS
	dm-verity: align struct dm_verity_fec_io properly
	scsi: Change SCSI device boolean fields to single bit flags
	scsi: sd: Fix system start for ATA devices
	drm/amd: Enable PCIe PME from D3
	drm/amdgpu: Force order between a read and write to the same address
	drm/amd/display: Include udelay when waiting for INBOX0 ACK
	drm/amd/display: Remove min_dst_y_next_start check for Z8
	drm/amd/display: Use DRAM speed from validation for dummy p-state
	drm/amd/display: Update min Z8 residency time to 2100 for DCN314
	drm/amd/display: fix ABM disablement
	dm verity: initialize fec io before freeing it
	dm verity: don't perform FEC for failed readahead IO
	nvme: check for valid nvme_identify_ns() before using it
	powercap: DTPM: Fix unneeded conversions to micro-Watts
	cpufreq/amd-pstate: Fix the return value of amd_pstate_fast_switch()
	dma-buf: fix check in dma_resv_add_fence
	bcache: revert replacing IS_ERR_OR_NULL with IS_ERR
	iommu/vt-d: Add MTL to quirk list to skip TE disabling
	KVM: PPC: Book3S HV: Fix KVM_RUN clobbering FP/VEC user registers
	powerpc: Don't clobber f0/vs0 during fp|altivec register save
	parisc: Mark ex_table entries 32-bit aligned in assembly.h
	parisc: Mark ex_table entries 32-bit aligned in uaccess.h
	parisc: Use natural CPU alignment for bug_table
	parisc: Mark lock_aligned variables 16-byte aligned on SMP
	parisc: Drop the HP-UX ENOSYM and EREMOTERELEASE error codes
	parisc: Mark jump_table naturally aligned
	parisc: Ensure 32-bit alignment on parisc unwind section
	parisc: Mark altinstructions read-only and 32-bit aligned
	btrfs: add dmesg output for first mount and last unmount of a filesystem
	btrfs: ref-verify: fix memory leaks in btrfs_ref_tree_mod()
	btrfs: fix off-by-one when checking chunk map includes logical address
	btrfs: send: ensure send_fd is writable
	btrfs: make error messages more clear when getting a chunk map
	btrfs: fix 64bit compat send ioctl arguments not initializing version member
	Input: xpad - add HyperX Clutch Gladiate Support
	auxdisplay: hd44780: move cursor home after clear display command
	serial: sc16is7xx: Put IOControl register into regmap_volatile
	serial: sc16is7xx: add missing support for rs485 devicetree properties
	wifi: cfg80211: fix CQM for non-range use
	USB: xhci-plat: fix legacy PHY double init
	USB: core: Change configuration warnings to notices
	usb: config: fix iteration issue in 'usb_get_bos_descriptor()'
	ipv4: igmp: fix refcnt uaf issue when receiving igmp query packet
	dpaa2-eth: increase the needed headroom to account for alignment
	uapi: propagate __struct_group() attributes to the container union
	selftests/net: ipsec: fix constant out of range
	selftests/net: fix a char signedness issue
	selftests/net: unix: fix unused variable compiler warning
	selftests/net: mptcp: fix uninitialized variable warnings
	octeontx2-af: Fix possible buffer overflow
	net: stmmac: xgmac: Disable FPE MMC interrupts
	octeontx2-pf: Fix adding mbox work queue entry when num_vfs > 64
	octeontx2-af: Install TC filter rules in hardware based on priority
	octeontx2-pf: Restore TC ingress police rules when interface is up
	r8169: prevent potential deadlock in rtl8169_close
	ravb: Fix races between ravb_tx_timeout_work() and net related ops
	net: ravb: Check return value of reset_control_deassert()
	net: ravb: Use pm_runtime_resume_and_get()
	net: ravb: Make write access to CXR35 first before accessing other EMAC registers
	net: ravb: Start TX queues after HW initialization succeeded
	net: ravb: Stop DMA in case of failures on ravb_open()
	net: ravb: Keep reverse order of operations in ravb_remove()
	KVM: x86: Fix lapic timer interrupt lost after loading a snapshot.
	PCI: Lengthen reset delay for VideoPropulsion Torrent QN16e card
	octeontx2-af: Initialize 'cntr_val' to fix uninitialized symbol error
	PCI: qcom-ep: Add dedicated callback for writing to DBI2 registers
	fbdev: stifb: Make the STI next font pointer a 32-bit signed offset
	spi: Fix null dereference on suspend
	drm/amd/display: Restore rptr/wptr for DMCUB as workaround
	drm/amd/display: Guard against invalid RPTR/WPTR being set
	cpufreq: imx6q: don't warn for disabling a non-existing frequency
	cpufreq: imx6q: Don't disable 792 Mhz OPP unnecessarily
	iommu/vt-d: Omit devTLB invalidation requests when TES=0
	iommu/vt-d: Allocate pasid table in device probe path
	iommu/vt-d: Add device_block_translation() helper
	iommu/vt-d: Disable PCI ATS in legacy passthrough mode
	iommu/vt-d: Make context clearing consistent with context mapping
	drm/amd/pm: fix a memleak in aldebaran_tables_init
	mmc: core: add helpers mmc_regulator_enable/disable_vqmmc
	mmc: sdhci-sprd: Fix vqmmc not shutting down after the card was pulled
	drm/amd/display: Expand kernel doc for DC
	drm/amd/display: clean code-style issues in dcn30_set_mpc_shaper_3dlut
	drm/amd/display: Fix the delta clamping for shaper LUT
	drm/amd/display: Fix MPCC 1DLUT programming
	r8169: disable ASPM in case of tx timeout
	r8169: fix deadlock on RTL8125 in jumbo mtu mode
	xen: Allow platform PCI interrupt to be shared
	xen: simplify evtchn_do_upcall() call maze
	x86/xen: fix percpu vcpu_info allocation
	x86/apic/msi: Fix misconfigured non-maskable MSI quirk
	iomap: update ki_pos a little later in iomap_dio_complete
	Linux 6.1.66

Note, this merge point merges out the following two scsi changes due to
them needing to be reverted due to abi breakage and reliance on previous
commits that we have already reverted:
	cebccbe801 ("scsi: sd: Fix system start for ATA devices")
	181fd67dc5 ("scsi: Change SCSI device boolean fields to single bit flags")

Also the following commit was manually reverted as part of the merge
point due to it conflicting with other changes in the tree AND it being
automatically reverted in later LTS releases due to it being broken:
	307a6525c8 ("wifi: cfg80211: fix CQM for non-range use")

Change-Id: I37b08dcf2259de8b2a29a5afc5cbc4bbd08e739a
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-12-14 12:47:21 +00:00
commit 6b1e1d37f1
126 changed files with 1223 additions and 554 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 65
SUBLEVEL = 66
EXTRAVERSION =
NAME = Curry Ramen

View File

@ -207,7 +207,7 @@ static void xen_power_off(void)
static irqreturn_t xen_arm_callback(int irq, void *arg)
{
xen_hvm_evtchn_do_upcall();
xen_evtchn_do_upcall();
return IRQ_HANDLED;
}

View File

@ -34,7 +34,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
/* Alternative SMP implementation. */
#define ALTERNATIVE(cond, replacement) "!0:" \
".section .altinstructions, \"aw\" !" \
".section .altinstructions, \"a\" !" \
".align 4 !" \
".word (0b-4-.) !" \
".hword 1, " __stringify(cond) " !" \
".word " __stringify(replacement) " !" \
@ -44,7 +45,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
/* to replace one single instructions by a new instruction */
#define ALTERNATIVE(from, to, cond, replacement)\
.section .altinstructions, "aw" ! \
.section .altinstructions, "a" ! \
.align 4 ! \
.word (from - .) ! \
.hword (to - from)/4, cond ! \
.word replacement ! \
@ -52,7 +54,8 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
/* to replace multiple instructions by new code */
#define ALTERNATIVE_CODE(from, num_instructions, cond, new_instr_ptr)\
.section .altinstructions, "aw" ! \
.section .altinstructions, "a" ! \
.align 4 ! \
.word (from - .) ! \
.hword -num_instructions, cond ! \
.word (new_instr_ptr - .) ! \

View File

@ -574,6 +574,7 @@
*/
#define ASM_EXCEPTIONTABLE_ENTRY(fault_addr, except_addr) \
.section __ex_table,"aw" ! \
.align 4 ! \
.word (fault_addr - .), (except_addr - .) ! \
.previous

View File

@ -28,13 +28,15 @@
do { \
asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
"\t.pushsection __bug_table,\"aw\"\n" \
"\t.pushsection __bug_table,\"a\"\n" \
"\t.align %4\n" \
"2:\t" ASM_WORD_INSN "1b, %c0\n" \
"\t.short %c1, %c2\n" \
"\t.org 2b+%c3\n" \
"\t.short %1, %2\n" \
"\t.blockz %3-2*%4-2*2\n" \
"\t.popsection" \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (0), "i" (sizeof(struct bug_entry)) ); \
"i" (0), "i" (sizeof(struct bug_entry)), \
"i" (sizeof(long)) ); \
unreachable(); \
} while(0)
@ -51,27 +53,31 @@
do { \
asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
"\t.pushsection __bug_table,\"aw\"\n" \
"\t.pushsection __bug_table,\"a\"\n" \
"\t.align %4\n" \
"2:\t" ASM_WORD_INSN "1b, %c0\n" \
"\t.short %c1, %c2\n" \
"\t.org 2b+%c3\n" \
"\t.short %1, %2\n" \
"\t.blockz %3-2*%4-2*2\n" \
"\t.popsection" \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (BUGFLAG_WARNING|(flags)), \
"i" (sizeof(struct bug_entry)) ); \
"i" (sizeof(struct bug_entry)), \
"i" (sizeof(long)) ); \
} while(0)
#else
#define __WARN_FLAGS(flags) \
do { \
asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
"\t.pushsection __bug_table,\"aw\"\n" \
"\t.pushsection __bug_table,\"a\"\n" \
"\t.align %2\n" \
"2:\t" ASM_WORD_INSN "1b\n" \
"\t.short %c0\n" \
"\t.org 2b+%c1\n" \
"\t.short %0\n" \
"\t.blockz %1-%2-2\n" \
"\t.popsection" \
: : "i" (BUGFLAG_WARNING|(flags)), \
"i" (sizeof(struct bug_entry)) ); \
"i" (sizeof(struct bug_entry)), \
"i" (sizeof(long)) ); \
} while(0)
#endif

View File

@ -15,10 +15,12 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
asm_volatile_goto("1:\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align %1\n\t"
".word 1b - ., %l[l_yes] - .\n\t"
__stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
".popsection\n\t"
: : "i" (&((char *)key)[branch]) : : l_yes);
: : "i" (&((char *)key)[branch]), "i" (sizeof(long))
: : l_yes);
return false;
l_yes:
@ -30,10 +32,12 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool
asm_volatile_goto("1:\n\t"
"b,n %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align %1\n\t"
".word 1b - ., %l[l_yes] - .\n\t"
__stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
".popsection\n\t"
: : "i" (&((char *)key)[branch]) : : l_yes);
: : "i" (&((char *)key)[branch]), "i" (sizeof(long))
: : l_yes);
return false;
l_yes:

View File

@ -56,7 +56,7 @@
})
#ifdef CONFIG_SMP
# define __lock_aligned __section(".data..lock_aligned")
# define __lock_aligned __section(".data..lock_aligned") __aligned(16)
#endif
#endif /* __PARISC_LDCW_H */

View File

@ -41,6 +41,7 @@ struct exception_table_entry {
#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
".section __ex_table,\"aw\"\n" \
".align 4\n" \
".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
".previous\n"

View File

@ -75,7 +75,6 @@
/* We now return you to your regularly scheduled HPUX. */
#define ENOSYM 215 /* symbol does not exist in executable */
#define ENOTSOCK 216 /* Socket operation on non-socket */
#define EDESTADDRREQ 217 /* Destination address required */
#define EMSGSIZE 218 /* Message too long */
@ -101,7 +100,6 @@
#define ETIMEDOUT 238 /* Connection timed out */
#define ECONNREFUSED 239 /* Connection refused */
#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
#define EREMOTERELEASE 240 /* Remote peer released connection */
#define EHOSTDOWN 241 /* Host is down */
#define EHOSTUNREACH 242 /* No route to host */

View File

@ -131,6 +131,7 @@ SECTIONS
RO_DATA(8)
/* unwind info */
. = ALIGN(4);
.PARISC.unwind : {
__start___unwind = .;
*(.PARISC.unwind)

View File

@ -23,6 +23,15 @@
#include <asm/feature-fixups.h>
#ifdef CONFIG_VSX
#define __REST_1FPVSR(n,c,base) \
BEGIN_FTR_SECTION \
b 2f; \
END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
REST_FPR(n,base); \
b 3f; \
2: REST_VSR(n,c,base); \
3:
#define __REST_32FPVSRS(n,c,base) \
BEGIN_FTR_SECTION \
b 2f; \
@ -41,9 +50,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
2: SAVE_32VSRS(n,c,base); \
3:
#else
#define __REST_1FPVSR(n,b,base) REST_FPR(n, base)
#define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base)
#define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base)
#endif
#define REST_1FPVSR(n,c,base) __REST_1FPVSR(n,__REG_##c,__REG_##base)
#define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base)
#define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base)
@ -67,6 +78,7 @@ _GLOBAL(store_fp_state)
SAVE_32FPVSRS(0, R4, R3)
mffs fr0
stfd fr0,FPSTATE_FPSCR(r3)
REST_1FPVSR(0, R4, R3)
blr
EXPORT_SYMBOL(store_fp_state)
@ -138,4 +150,5 @@ _GLOBAL(save_fpu)
2: SAVE_32FPVSRS(0, R4, R6)
mffs fr0
stfd fr0,FPSTATE_FPSCR(r6)
REST_1FPVSR(0, R4, R6)
blr

View File

@ -1163,11 +1163,11 @@ void kvmppc_save_user_regs(void)
usermsr = current->thread.regs->msr;
/* Caller has enabled FP/VEC/VSX/TM in MSR */
if (usermsr & MSR_FP)
save_fpu(current);
__giveup_fpu(current);
if (usermsr & MSR_VEC)
save_altivec(current);
__giveup_altivec(current);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (usermsr & MSR_TM) {

View File

@ -32,6 +32,7 @@ _GLOBAL(store_vr_state)
mfvscr v0
li r4, VRSTATE_VSCR
stvx v0, r4, r3
lvx v0, 0, r3
blr
EXPORT_SYMBOL(store_vr_state)
@ -108,6 +109,7 @@ _GLOBAL(save_altivec)
mfvscr v0
li r4,VRSTATE_VSCR
stvx v0,r4,r7
lvx v0,0,r7
blr
#ifdef CONFIG_VSX

View File

@ -294,7 +294,7 @@ static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs)
inc_irq_stat(irq_hv_callback_count);
xen_hvm_evtchn_do_upcall();
xen_evtchn_do_upcall();
set_irq_regs(old_regs);
}

View File

@ -106,6 +106,7 @@ KVM_X86_OP_OPTIONAL(vcpu_blocking)
KVM_X86_OP_OPTIONAL(vcpu_unblocking)
KVM_X86_OP_OPTIONAL(pi_update_irte)
KVM_X86_OP_OPTIONAL(pi_start_assignment)
KVM_X86_OP_OPTIONAL(apicv_pre_state_restore)
KVM_X86_OP_OPTIONAL(apicv_post_state_restore)
KVM_X86_OP_OPTIONAL_RET0(dy_apicv_has_pending_interrupt)
KVM_X86_OP_OPTIONAL(set_hv_timer)

View File

@ -1603,6 +1603,7 @@ struct kvm_x86_ops {
int (*pi_update_irte)(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
void (*pi_start_assignment)(struct kvm *kvm);
void (*apicv_pre_state_restore)(struct kvm_vcpu *vcpu);
void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);

View File

@ -55,14 +55,14 @@ msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force)
* caused by the non-atomic update of the address/data pair.
*
* Direct update is possible when:
* - The MSI is maskable (remapped MSI does not use this code path)).
* The quirk bit is not set in this case.
* - The MSI is maskable (remapped MSI does not use this code path).
* The reservation mode bit is set in this case.
* - The new vector is the same as the old vector
* - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up)
* - The interrupt is not yet started up
* - The new destination CPU is the same as the old destination CPU
*/
if (!irqd_msi_nomask_quirk(irqd) ||
if (!irqd_can_reserve(irqd) ||
cfg->vector == old_cfg.vector ||
old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR ||
!irqd_is_started(irqd) ||
@ -202,8 +202,6 @@ struct irq_domain * __init native_create_pci_msi_domain(void)
if (!d) {
irq_domain_free_fwnode(fn);
pr_warn("Failed to initialize PCI-MSI irqdomain.\n");
} else {
d->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK;
}
return d;
}

View File

@ -2446,6 +2446,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
u64 msr_val;
int i;
static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
if (!init_event) {
msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
if (kvm_vcpu_is_reset_bsp(vcpu))
@ -2757,6 +2759,8 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
struct kvm_lapic *apic = vcpu->arch.apic;
int r;
static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
/* set SPIV separately to get count of SW disabled APICs right */
apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));

View File

@ -6799,7 +6799,7 @@ static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
}
static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
static void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@ -8172,7 +8172,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.set_apic_access_page_addr = vmx_set_apic_access_page_addr,
.refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
.load_eoi_exitmap = vmx_load_eoi_exitmap,
.apicv_post_state_restore = vmx_apicv_post_state_restore,
.apicv_pre_state_restore = vmx_apicv_pre_state_restore,
.check_apicv_inhibit_reasons = vmx_check_apicv_inhibit_reasons,
.hwapic_irr_update = vmx_hwapic_irr_update,
.hwapic_isr_update = vmx_hwapic_isr_update,

View File

@ -32,10 +32,13 @@ EXPORT_SYMBOL_GPL(hypercall_page);
* &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info
* and xen_vcpu_setup for details. By default it points to share_info->vcpu_info
* but during boot it is switched to point to xen_vcpu_info.
* The pointer is used in __xen_evtchn_do_upcall to acknowledge pending events.
* The pointer is used in xen_evtchn_do_upcall to acknowledge pending events.
* Make sure that xen_vcpu_info doesn't cross a page boundary by making it
* cache-line aligned (the struct is guaranteed to have a size of 64 bytes,
* which matches the cache line size of 64-bit x86 processors).
*/
DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
DEFINE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info);
/* Linux <-> Xen vCPU id mapping */
DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
@ -160,6 +163,7 @@ void xen_vcpu_setup(int cpu)
int err;
struct vcpu_info *vcpup;
BUILD_BUG_ON(sizeof(*vcpup) > SMP_CACHE_BYTES);
BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
/*

View File

@ -136,7 +136,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_xen_hvm_callback)
inc_irq_stat(irq_hv_callback_count);
xen_hvm_evtchn_do_upcall();
xen_evtchn_do_upcall();
set_irq_regs(old_regs);
}

View File

@ -21,7 +21,7 @@ extern void *xen_initial_gdt;
struct trap_info;
void xen_copy_trap_info(struct trap_info *traps);
DECLARE_PER_CPU(struct vcpu_info, xen_vcpu_info);
DECLARE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info);
DECLARE_PER_CPU(unsigned long, xen_cr3);
DECLARE_PER_CPU(unsigned long, xen_current_cr3);

View File

@ -82,7 +82,15 @@ int hd44780_common_clear_display(struct charlcd *lcd)
hdc->write_cmd(hdc, LCD_CMD_DISPLAY_CLEAR);
/* datasheet says to wait 1,64 milliseconds */
long_sleep(2);
return 0;
/*
* The Hitachi HD44780 controller (and compatible ones) reset the DDRAM
* address when executing the DISPLAY_CLEAR command, thus the
* following call is not required. However, other controllers do not
* (e.g. NewHaven NHD-0220DZW-AG5), thus move the cursor to home
* unconditionally to support both.
*/
return hd44780_common_home(lcd);
}
EXPORT_SYMBOL_GPL(hd44780_common_clear_display);

View File

@ -296,7 +296,9 @@ static int amd_pstate_target(struct cpufreq_policy *policy,
static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
return amd_pstate_update_freq(policy, target_freq, true);
if (!amd_pstate_update_freq(policy, target_freq, true))
return target_freq;
return policy->cur;
}
static void amd_pstate_adjust_perf(unsigned int cpu,

View File

@ -209,6 +209,14 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
.suspend = cpufreq_generic_suspend,
};
static void imx6x_disable_freq_in_opp(struct device *dev, unsigned long freq)
{
int ret = dev_pm_opp_disable(dev, freq);
if (ret < 0 && ret != -ENODEV)
dev_warn(dev, "failed to disable %ldMHz OPP\n", freq / 1000000);
}
#define OCOTP_CFG3 0x440
#define OCOTP_CFG3_SPEED_SHIFT 16
#define OCOTP_CFG3_SPEED_1P2GHZ 0x3
@ -254,17 +262,15 @@ static int imx6q_opp_check_speed_grading(struct device *dev)
val &= 0x3;
if (val < OCOTP_CFG3_SPEED_996MHZ)
if (dev_pm_opp_disable(dev, 996000000))
dev_warn(dev, "failed to disable 996MHz OPP\n");
imx6x_disable_freq_in_opp(dev, 996000000);
if (of_machine_is_compatible("fsl,imx6q") ||
of_machine_is_compatible("fsl,imx6qp")) {
if (val != OCOTP_CFG3_SPEED_852MHZ)
if (dev_pm_opp_disable(dev, 852000000))
dev_warn(dev, "failed to disable 852MHz OPP\n");
imx6x_disable_freq_in_opp(dev, 852000000);
if (val != OCOTP_CFG3_SPEED_1P2GHZ)
if (dev_pm_opp_disable(dev, 1200000000))
dev_warn(dev, "failed to disable 1.2GHz OPP\n");
imx6x_disable_freq_in_opp(dev, 1200000000);
}
return 0;
@ -316,20 +322,16 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
val >>= OCOTP_CFG3_SPEED_SHIFT;
val &= 0x3;
if (of_machine_is_compatible("fsl,imx6ul")) {
if (of_machine_is_compatible("fsl,imx6ul"))
if (val != OCOTP_CFG3_6UL_SPEED_696MHZ)
if (dev_pm_opp_disable(dev, 696000000))
dev_warn(dev, "failed to disable 696MHz OPP\n");
}
imx6x_disable_freq_in_opp(dev, 696000000);
if (of_machine_is_compatible("fsl,imx6ull")) {
if (val != OCOTP_CFG3_6ULL_SPEED_792MHZ)
if (dev_pm_opp_disable(dev, 792000000))
dev_warn(dev, "failed to disable 792MHz OPP\n");
if (val < OCOTP_CFG3_6ULL_SPEED_792MHZ)
imx6x_disable_freq_in_opp(dev, 792000000);
if (val != OCOTP_CFG3_6ULL_SPEED_900MHZ)
if (dev_pm_opp_disable(dev, 900000000))
dev_warn(dev, "failed to disable 900MHz OPP\n");
imx6x_disable_freq_in_opp(dev, 900000000);
}
return ret;

View File

@ -296,7 +296,7 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
if ((old->context == fence->context && old_usage >= usage &&
dma_fence_is_later(fence, old)) ||
dma_fence_is_later_or_same(fence, old)) ||
dma_fence_is_signaled(old)) {
dma_resv_list_set(fobj, i, fence, usage);
dma_fence_put(old);

View File

@ -717,14 +717,11 @@ static void create_units(struct fw_device *device)
fw_unit_attributes,
&unit->attribute_group);
if (device_register(&unit->device) < 0)
goto skip_unit;
fw_device_get(device);
continue;
skip_unit:
kfree(unit);
if (device_register(&unit->device) < 0) {
put_device(&unit->device);
continue;
}
}
}

View File

@ -2200,6 +2200,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
pci_wake_from_d3(pdev, TRUE);
/*
* For runpm implemented via BACO, PMFW will handle the
* timing for BACO in and out:

View File

@ -82,6 +82,10 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_4_me.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_4_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_4_rlc.bin");
static const struct soc15_reg_golden golden_settings_gc_11_0[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL, 0x20000000, 0x20000000)
};
static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010),
@ -274,6 +278,10 @@ static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
default:
break;
}
soc15_program_register_sequence(adev,
golden_settings_gc_11_0,
(const u32)ARRAY_SIZE(golden_settings_gc_11_0));
}
static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,

View File

@ -6149,7 +6149,7 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
dm_new_state->underscan_enable = val;
ret = 0;
} else if (property == adev->mode_info.abm_level_property) {
dm_new_state->abm_level = val;
dm_new_state->abm_level = val ?: ABM_LEVEL_IMMEDIATE_DISABLE;
ret = 0;
}
@ -6194,7 +6194,8 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
*val = dm_state->underscan_enable;
ret = 0;
} else if (property == adev->mode_info.abm_level_property) {
*val = dm_state->abm_level;
*val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ?
dm_state->abm_level : 0;
ret = 0;
}
@ -6274,7 +6275,8 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
state->pbn = 0;
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
state->abm_level = amdgpu_dm_abm_level;
state->abm_level = amdgpu_dm_abm_level ?:
ABM_LEVEL_IMMEDIATE_DISABLE;
__drm_atomic_helper_connector_reset(connector, &state->base);
}

View File

@ -499,9 +499,12 @@ enum dcn_zstate_support_state {
DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY,
DCN_ZSTATE_SUPPORT_DISALLOW,
};
/*
* For any clocks that may differ per pipe
* only the max is stored in this structure
/**
* dc_clocks - DC pipe clocks
*
* For any clocks that may differ per pipe only the max is stored in this
* structure
*/
struct dc_clocks {
int dispclk_khz;
@ -528,6 +531,16 @@ struct dc_clocks {
bool prev_p_state_change_support;
bool fclk_prev_p_state_change_support;
int num_ways;
/**
* @fw_based_mclk_switching
*
* DC has a mechanism that leverage the variable refresh rate to switch
* memory clock in cases that we have a large latency to achieve the
* memory clock change and a short vblank window. DC has some
* requirements to enable this feature, and this field describes if the
* system support or not such a feature.
*/
bool fw_based_mclk_switching;
bool fw_based_mclk_switching_shut_down;
int prev_num_ways;

View File

@ -202,7 +202,18 @@ struct dc_stream_state {
bool use_vsc_sdp_for_colorimetry;
bool ignore_msa_timing_param;
/**
* @allow_freesync:
*
* It say if Freesync is enabled or not.
*/
bool allow_freesync;
/**
* @vrr_active_variable:
*
* It describes if VRR is in use.
*/
bool vrr_active_variable;
bool freesync_on_desktop;

View File

@ -308,7 +308,10 @@ bool cm_helper_convert_to_custom_float(
#define NUMBER_REGIONS 32
#define NUMBER_SW_SEGMENTS 16
bool cm_helper_translate_curve_to_hw_format(
#define DC_LOGGER \
ctx->logger
bool cm_helper_translate_curve_to_hw_format(struct dc_context *ctx,
const struct dc_transfer_func *output_tf,
struct pwl_params *lut_params, bool fixpoint)
{
@ -482,10 +485,18 @@ bool cm_helper_translate_curve_to_hw_format(
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
if (fixpoint == true) {
rgb->delta_red_reg = dc_fixpt_clamp_u0d10(rgb->delta_red);
rgb->delta_green_reg = dc_fixpt_clamp_u0d10(rgb->delta_green);
rgb->delta_blue_reg = dc_fixpt_clamp_u0d10(rgb->delta_blue);
uint32_t red_clamp = dc_fixpt_clamp_u0d14(rgb->delta_red);
uint32_t green_clamp = dc_fixpt_clamp_u0d14(rgb->delta_green);
uint32_t blue_clamp = dc_fixpt_clamp_u0d14(rgb->delta_blue);
if (red_clamp >> 10 || green_clamp >> 10 || blue_clamp >> 10)
DC_LOG_WARNING("Losing delta precision while programming shaper LUT.");
rgb->delta_red_reg = red_clamp & 0x3ff;
rgb->delta_green_reg = green_clamp & 0x3ff;
rgb->delta_blue_reg = blue_clamp & 0x3ff;
rgb->red_reg = dc_fixpt_clamp_u0d14(rgb->red);
rgb->green_reg = dc_fixpt_clamp_u0d14(rgb->green);
rgb->blue_reg = dc_fixpt_clamp_u0d14(rgb->blue);

View File

@ -106,6 +106,7 @@ bool cm_helper_convert_to_custom_float(
bool fixpoint);
bool cm_helper_translate_curve_to_hw_format(
struct dc_context *ctx,
const struct dc_transfer_func *output_tf,
struct pwl_params *lut_params, bool fixpoint);

View File

@ -1867,7 +1867,7 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
* update.
*/
else if (cm_helper_translate_curve_to_hw_format(
else if (cm_helper_translate_curve_to_hw_format(dc->ctx,
stream->out_transfer_func,
&dpp->regamma_params, false)) {
dpp->funcs->dpp_program_regamma_pwl(

View File

@ -843,7 +843,7 @@ bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
params = &stream->out_transfer_func->pwl;
else if (pipe_ctx->stream->out_transfer_func->type ==
TF_TYPE_DISTRIBUTED_POINTS &&
cm_helper_translate_curve_to_hw_format(
cm_helper_translate_curve_to_hw_format(dc->ctx,
stream->out_transfer_func,
&mpc->blender_params, false))
params = &mpc->blender_params;
@ -872,7 +872,7 @@ bool dcn20_set_blend_lut(
if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
blend_lut = &plane_state->blend_tf->pwl;
else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
cm_helper_translate_curve_to_hw_format(
cm_helper_translate_curve_to_hw_format(plane_state->ctx,
plane_state->blend_tf,
&dpp_base->regamma_params, false);
blend_lut = &dpp_base->regamma_params;
@ -894,7 +894,7 @@ bool dcn20_set_shaper_3dlut(
if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL)
shaper_lut = &plane_state->in_shaper_func->pwl;
else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
cm_helper_translate_curve_to_hw_format(
cm_helper_translate_curve_to_hw_format(plane_state->ctx,
plane_state->in_shaper_func,
&dpp_base->shaper_params, true);
shaper_lut = &dpp_base->shaper_params;

View File

@ -280,7 +280,7 @@ bool dwb3_ogam_set_input_transfer_func(
dwb_ogam_lut = kzalloc(sizeof(*dwb_ogam_lut), GFP_KERNEL);
if (dwb_ogam_lut) {
cm_helper_translate_curve_to_hw_format(
cm_helper_translate_curve_to_hw_format(dwbc->ctx,
in_transfer_func_dwb_ogam,
dwb_ogam_lut, false);

View File

@ -91,8 +91,8 @@ bool dcn30_set_blend_lut(
return result;
}
static bool dcn30_set_mpc_shaper_3dlut(
struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream)
static bool dcn30_set_mpc_shaper_3dlut(struct pipe_ctx *pipe_ctx,
const struct dc_stream_state *stream)
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
@ -104,19 +104,18 @@ static bool dcn30_set_mpc_shaper_3dlut(
const struct pwl_params *shaper_lut = NULL;
//get the shaper lut params
if (stream->func_shaper) {
if (stream->func_shaper->type == TF_TYPE_HWPWL)
if (stream->func_shaper->type == TF_TYPE_HWPWL) {
shaper_lut = &stream->func_shaper->pwl;
else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
cm_helper_translate_curve_to_hw_format(
stream->func_shaper,
&dpp_base->shaper_params, true);
} else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
cm_helper_translate_curve_to_hw_format(stream->ctx, stream->func_shaper,
&dpp_base->shaper_params, true);
shaper_lut = &dpp_base->shaper_params;
}
}
if (stream->lut3d_func &&
stream->lut3d_func->state.bits.initialized == 1 &&
stream->lut3d_func->state.bits.rmu_idx_valid == 1) {
stream->lut3d_func->state.bits.initialized == 1 &&
stream->lut3d_func->state.bits.rmu_idx_valid == 1) {
if (stream->lut3d_func->state.bits.rmu_mux_num == 0)
mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu0_mux;
else if (stream->lut3d_func->state.bits.rmu_mux_num == 1)
@ -125,20 +124,22 @@ static bool dcn30_set_mpc_shaper_3dlut(
mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu2_mux;
if (mpcc_id_projected != mpcc_id)
BREAK_TO_DEBUGGER();
/*find the reason why logical layer assigned a differant mpcc_id into acquire_post_bldn_3dlut*/
/* find the reason why logical layer assigned a different
* mpcc_id into acquire_post_bldn_3dlut
*/
acquired_rmu = mpc->funcs->acquire_rmu(mpc, mpcc_id,
stream->lut3d_func->state.bits.rmu_mux_num);
stream->lut3d_func->state.bits.rmu_mux_num);
if (acquired_rmu != stream->lut3d_func->state.bits.rmu_mux_num)
BREAK_TO_DEBUGGER();
result = mpc->funcs->program_3dlut(mpc,
&stream->lut3d_func->lut_3d,
stream->lut3d_func->state.bits.rmu_mux_num);
result = mpc->funcs->program_shaper(mpc, shaper_lut,
stream->lut3d_func->state.bits.rmu_mux_num);
} else
/*loop through the available mux and release the requested mpcc_id*/
mpc->funcs->release_rmu(mpc, mpcc_id);
result = mpc->funcs->program_3dlut(mpc, &stream->lut3d_func->lut_3d,
stream->lut3d_func->state.bits.rmu_mux_num);
result = mpc->funcs->program_shaper(mpc, shaper_lut,
stream->lut3d_func->state.bits.rmu_mux_num);
} else {
// loop through the available mux and release the requested mpcc_id
mpc->funcs->release_rmu(mpc, mpcc_id);
}
return result;
}

View File

@ -884,7 +884,7 @@ static const struct dc_plane_cap plane_cap = {
static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = false,
.enable_z9_disable_interface = true,
.minimum_z8_residency_time = 2000,
.minimum_z8_residency_time = 2100,
.psr_skip_crtc_disable = true,
.disable_dmcu = true,
.force_abm_enable = false,

View File

@ -530,7 +530,7 @@ static bool dcn32_set_mpc_shaper_3dlut(
if (stream->func_shaper->type == TF_TYPE_HWPWL)
shaper_lut = &stream->func_shaper->pwl;
else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
cm_helper_translate_curve_to_hw_format(
cm_helper_translate_curve_to_hw_format(stream->ctx,
stream->func_shaper,
&dpp_base->shaper_params, true);
shaper_lut = &dpp_base->shaper_params;
@ -566,8 +566,7 @@ bool dcn32_set_mcm_luts(
if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
lut_params = &plane_state->blend_tf->pwl;
else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
cm_helper_translate_curve_to_hw_format(
plane_state->blend_tf,
cm3_helper_translate_curve_to_hw_format(plane_state->blend_tf,
&dpp_base->regamma_params, false);
lut_params = &dpp_base->regamma_params;
}
@ -581,8 +580,7 @@ bool dcn32_set_mcm_luts(
else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
// TODO: dpp_base replace
ASSERT(false);
cm_helper_translate_curve_to_hw_format(
plane_state->in_shaper_func,
cm3_helper_translate_curve_to_hw_format(plane_state->in_shaper_func,
&dpp_base->shaper_params, true);
lut_params = &dpp_base->shaper_params;
}

View File

@ -29,6 +29,13 @@
#define DC__PRESENT 1
#define DC__PRESENT__1 1
#define DC__NUM_DPP 4
/**
* @DC__VOLTAGE_STATES:
*
* Define the maximum amount of states supported by the ASIC. Every ASIC has a
* specific number of states; this macro defines the maximum number of states.
*/
#define DC__VOLTAGE_STATES 20
#define DC__NUM_DPP__4 1
#define DC__NUM_DPP__0_PRESENT 1

View File

@ -948,10 +948,8 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
{
int plane_count;
int i;
unsigned int min_dst_y_next_start_us;
plane_count = 0;
min_dst_y_next_start_us = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].plane_state)
plane_count++;
@ -973,26 +971,15 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
struct dc_link *link = context->streams[0]->sink->link;
struct dc_stream_status *stream_status = &context->stream_status[0];
struct dc_stream_state *current_stream = context->streams[0];
int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
bool is_pwrseq0 = link->link_index == 0;
bool isFreesyncVideo;
isFreesyncVideo = current_stream->adjust.v_total_min == current_stream->adjust.v_total_max;
isFreesyncVideo = isFreesyncVideo && current_stream->timing.v_total < current_stream->adjust.v_total_min;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].stream == current_stream && isFreesyncVideo) {
min_dst_y_next_start_us = context->res_ctx.pipe_ctx[i].dlg_regs.min_dst_y_next_start_us;
break;
}
}
/* Don't support multi-plane configurations */
if (stream_status->plane_count > 1)
return DCN_ZSTATE_SUPPORT_DISALLOW;
if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || min_dst_y_next_start_us > 5000))
if (is_pwrseq0 && context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
return DCN_ZSTATE_SUPPORT_ALLOW;
else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;

View File

@ -1788,6 +1788,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
int i, pipe_idx, vlevel_temp = 0;
double dcfclk = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
double dram_speed_from_validation = context->bw_ctx.dml.vba.DRAMSpeed;
double dcfclk_from_fw_based_mclk_switching = dcfclk_from_validation;
bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
dm_dram_clock_change_unsupported;
@ -1921,7 +1922,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
}
if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
min_dram_speed_mts = dram_speed_from_validation;
min_dram_speed_mts_margin = 160;
context->bw_ctx.dml.soc.dram_clock_change_latency_us =

View File

@ -105,14 +105,39 @@ enum source_macro_tile_size {
enum cursor_bpp {
dm_cur_2bit = 0, dm_cur_32bit = 1, dm_cur_64bit = 2
};
/**
* @enum clock_change_support - It represents possible reasons to change the DRAM clock.
*
* DC may change the DRAM clock during its execution, and this enum tracks all
* the available methods. Note that every ASIC has their specific way to deal
* with these clock switch.
*/
enum clock_change_support {
/**
* @dm_dram_clock_change_uninitialized: If you see this, we might have
* a code initialization issue
*/
dm_dram_clock_change_uninitialized = 0,
/**
* @dm_dram_clock_change_vactive: Support DRAM switch in VActive
*/
dm_dram_clock_change_vactive,
/**
* @dm_dram_clock_change_vblank: Support DRAM switch in VBlank
*/
dm_dram_clock_change_vblank,
dm_dram_clock_change_vactive_w_mall_full_frame,
dm_dram_clock_change_vactive_w_mall_sub_vp,
dm_dram_clock_change_vblank_w_mall_full_frame,
dm_dram_clock_change_vblank_w_mall_sub_vp,
/**
* @dm_dram_clock_change_unsupported: Do not support DRAM switch
*/
dm_dram_clock_change_unsupported
};

View File

@ -419,6 +419,15 @@ struct vba_vars_st {
double MinPixelChunkSizeBytes;
unsigned int DCCMetaBufferSizeBytes;
// Pipe/Plane Parameters
/** @VoltageLevel:
* Every ASIC has a fixed number of DPM states, and some devices might
* have some particular voltage configuration that does not map
* directly to the DPM states. This field tells how many states the
* target device supports; even though this field combines the DPM and
* special SOC voltages, it mostly matches the total number of DPM
* states.
*/
int VoltageLevel;
double FabricClock;
double DRAMSpeed;

View File

@ -115,6 +115,13 @@ struct resource_funcs {
int vlevel);
void (*update_soc_for_wm_a)(
struct dc *dc, struct dc_state *context);
/**
* @populate_dml_pipes - Populate pipe data struct
*
* Returns:
* Total of pipes available in the specific ASIC.
*/
int (*populate_dml_pipes)(
struct dc *dc,
struct dc_state *context,

View File

@ -35,6 +35,13 @@
******************************************************************************/
#define MAX_AUDIOS 7
/**
* @MAX_PIPES:
*
* Every ASIC support a fixed number of pipes; MAX_PIPES defines a large number
* to be used inside loops and for determining array sizes.
*/
#define MAX_PIPES 6
#define MAX_DIG_LINK_ENCODERS 7
#define MAX_DWB_PIPES 1

View File

@ -332,6 +332,8 @@ struct dmub_srv_hw_funcs {
void (*setup_mailbox)(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
uint32_t (*get_inbox1_wptr)(struct dmub_srv *dmub);
uint32_t (*get_inbox1_rptr)(struct dmub_srv *dmub);
void (*set_inbox1_wptr)(struct dmub_srv *dmub, uint32_t wptr_offset);
@ -590,6 +592,18 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
*/
enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub);
/**
* dmub_srv_sync_inbox1() - sync sw state with hw state
* @dmub: the dmub service
*
* Sync sw state with hw state when resume from S0i3
*
* Return:
* DMUB_STATUS_OK - success
* DMUB_STATUS_INVALID - unspecified error
*/
enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub);
/**
* dmub_srv_cmd_queue() - queues a command to the DMUB
* @dmub: the dmub service

View File

@ -282,6 +282,11 @@ void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base);
}
uint32_t dmub_dcn20_get_inbox1_wptr(struct dmub_srv *dmub)
{
return REG_READ(DMCUB_INBOX1_WPTR);
}
uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub)
{
return REG_READ(DMCUB_INBOX1_RPTR);

View File

@ -202,6 +202,8 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
uint32_t dmub_dcn20_get_inbox1_wptr(struct dmub_srv *dmub);
uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub);
void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset);

View File

@ -242,6 +242,11 @@ void dmub_dcn31_setup_mailbox(struct dmub_srv *dmub,
REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base);
}
uint32_t dmub_dcn31_get_inbox1_wptr(struct dmub_srv *dmub)
{
return REG_READ(DMCUB_INBOX1_WPTR);
}
uint32_t dmub_dcn31_get_inbox1_rptr(struct dmub_srv *dmub)
{
return REG_READ(DMCUB_INBOX1_RPTR);

View File

@ -204,6 +204,8 @@ void dmub_dcn31_setup_windows(struct dmub_srv *dmub,
void dmub_dcn31_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
uint32_t dmub_dcn31_get_inbox1_wptr(struct dmub_srv *dmub);
uint32_t dmub_dcn31_get_inbox1_rptr(struct dmub_srv *dmub);
void dmub_dcn31_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset);

View File

@ -266,6 +266,11 @@ void dmub_dcn32_setup_mailbox(struct dmub_srv *dmub,
REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base);
}
uint32_t dmub_dcn32_get_inbox1_wptr(struct dmub_srv *dmub)
{
return REG_READ(DMCUB_INBOX1_WPTR);
}
uint32_t dmub_dcn32_get_inbox1_rptr(struct dmub_srv *dmub)
{
return REG_READ(DMCUB_INBOX1_RPTR);

View File

@ -206,6 +206,8 @@ void dmub_dcn32_setup_windows(struct dmub_srv *dmub,
void dmub_dcn32_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
uint32_t dmub_dcn32_get_inbox1_wptr(struct dmub_srv *dmub);
uint32_t dmub_dcn32_get_inbox1_rptr(struct dmub_srv *dmub);
void dmub_dcn32_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset);

View File

@ -167,6 +167,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->backdoor_load = dmub_dcn20_backdoor_load;
funcs->setup_windows = dmub_dcn20_setup_windows;
funcs->setup_mailbox = dmub_dcn20_setup_mailbox;
funcs->get_inbox1_wptr = dmub_dcn20_get_inbox1_wptr;
funcs->get_inbox1_rptr = dmub_dcn20_get_inbox1_rptr;
funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr;
funcs->is_supported = dmub_dcn20_is_supported;
@ -243,6 +244,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->backdoor_load = dmub_dcn31_backdoor_load;
funcs->setup_windows = dmub_dcn31_setup_windows;
funcs->setup_mailbox = dmub_dcn31_setup_mailbox;
funcs->get_inbox1_wptr = dmub_dcn31_get_inbox1_wptr;
funcs->get_inbox1_rptr = dmub_dcn31_get_inbox1_rptr;
funcs->set_inbox1_wptr = dmub_dcn31_set_inbox1_wptr;
funcs->setup_out_mailbox = dmub_dcn31_setup_out_mailbox;
@ -281,6 +283,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->backdoor_load_zfb_mode = dmub_dcn32_backdoor_load_zfb_mode;
funcs->setup_windows = dmub_dcn32_setup_windows;
funcs->setup_mailbox = dmub_dcn32_setup_mailbox;
funcs->get_inbox1_wptr = dmub_dcn32_get_inbox1_wptr;
funcs->get_inbox1_rptr = dmub_dcn32_get_inbox1_rptr;
funcs->set_inbox1_wptr = dmub_dcn32_set_inbox1_wptr;
funcs->setup_out_mailbox = dmub_dcn32_setup_out_mailbox;
@ -666,6 +669,27 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
return DMUB_STATUS_OK;
}
enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
{
if (!dmub->sw_init)
return DMUB_STATUS_INVALID;
if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub);
if (rptr > dmub->inbox1_rb.capacity || wptr > dmub->inbox1_rb.capacity) {
return DMUB_STATUS_HW_FAILURE;
} else {
dmub->inbox1_rb.rptr = rptr;
dmub->inbox1_rb.wrpt = wptr;
dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
}
}
return DMUB_STATUS_OK;
}
enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
{
if (!dmub->sw_init)
@ -694,6 +718,11 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
if (!dmub->hw_init)
return DMUB_STATUS_INVALID;
if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity ||
dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) {
return DMUB_STATUS_HW_FAILURE;
}
if (dmub_rb_push_front(&dmub->inbox1_rb, cmd))
return DMUB_STATUS_OK;
@ -964,6 +993,7 @@ enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t ti
ack = dmub->hw_funcs.read_inbox0_ack_register(dmub);
if (ack)
return DMUB_STATUS_OK;
udelay(1);
}
return DMUB_STATUS_TIMEOUT;
}

View File

@ -6369,6 +6369,8 @@
#define regTCP_INVALIDATE_BASE_IDX 1
#define regTCP_STATUS 0x19a1
#define regTCP_STATUS_BASE_IDX 1
#define regTCP_CNTL 0x19a2
#define regTCP_CNTL_BASE_IDX 1
#define regTCP_CNTL2 0x19a3
#define regTCP_CNTL2_BASE_IDX 1
#define regTCP_DEBUG_INDEX 0x19a5

View File

@ -258,8 +258,11 @@ static int aldebaran_tables_init(struct smu_context *smu)
}
smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
if (!smu_table->ecc_table)
if (!smu_table->ecc_table) {
kfree(smu_table->metrics_table);
kfree(smu_table->gpu_metrics_table);
return -ENOMEM;
}
return 0;
}

View File

@ -136,6 +136,7 @@ static const struct xpad_device {
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
{ 0x044f, 0x0f10, "Thrustmaster Modena GT Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 },
{ 0x03f0, 0x0495, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE },
{ 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX },
{ 0x045e, 0x0285, "Microsoft X-Box pad (Japan)", 0, XTYPE_XBOX },
{ 0x045e, 0x0287, "Microsoft Xbox Controller S", 0, XTYPE_XBOX },
@ -459,6 +460,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 Controller */
XPAD_XBOX360_VENDOR(0x03eb), /* Wooting Keyboards (Legacy) */
XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster X-Box 360 controllers */
XPAD_XBOXONE_VENDOR(0x03f0), /* HP HyperX Xbox One Controllers */
XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft X-Box One controllers */
XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */

View File

@ -1502,6 +1502,15 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
{
struct qi_desc desc;
/*
* VT-d spec, section 4.3:
*
* Software is recommended to not submit any Device-TLB invalidation
* requests while address remapping hardware is disabled.
*/
if (!(iommu->gcmd & DMA_GCMD_TE))
return;
if (mask) {
addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
@ -1567,6 +1576,15 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
/*
* VT-d spec, section 4.3:
*
* Software is recommended to not submit any Device-TLB invalidation
* requests while address remapping hardware is disabled.
*/
if (!(iommu->gcmd & DMA_GCMD_TE))
return;
desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
QI_DEV_IOTLB_PFSID(pfsid);

View File

@ -277,7 +277,7 @@ static LIST_HEAD(dmar_satc_units);
#define for_each_rmrr_units(rmrr) \
list_for_each_entry(rmrr, &dmar_rmrr_units, list)
static void dmar_remove_one_dev_info(struct device *dev);
static void device_block_translation(struct device *dev);
int dmar_disabled = !IS_ENABLED(CONFIG_INTEL_IOMMU_DEFAULT_ON);
int intel_iommu_sm = IS_ENABLED(CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON);
@ -1418,7 +1418,7 @@ static void iommu_enable_pci_caps(struct device_domain_info *info)
{
struct pci_dev *pdev;
if (!info || !dev_is_pci(info->dev))
if (!dev_is_pci(info->dev))
return;
pdev = to_pci_dev(info->dev);
@ -2064,7 +2064,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
} else {
iommu_flush_write_buffer(iommu);
}
iommu_enable_pci_caps(info);
ret = 0;
@ -2494,13 +2493,6 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
/* PASID table is mandatory for a PCI device in scalable mode. */
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
ret = intel_pasid_alloc_table(dev);
if (ret) {
dev_err(dev, "PASID table allocation failed\n");
dmar_remove_one_dev_info(dev);
return ret;
}
/* Setup the PASID entry for requests without PASID: */
if (hw_pass_through && domain_type_is_si(domain))
ret = intel_pasid_setup_pass_through(iommu, domain,
@ -2513,7 +2505,7 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
dev, PASID_RID2PASID);
if (ret) {
dev_err(dev, "Setup RID2PASID failed\n");
dmar_remove_one_dev_info(dev);
device_block_translation(dev);
return ret;
}
}
@ -2521,10 +2513,13 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
ret = domain_context_mapping(domain, dev);
if (ret) {
dev_err(dev, "Domain context map failed\n");
dmar_remove_one_dev_info(dev);
device_block_translation(dev);
return ret;
}
if (sm_supported(info->iommu) || !domain_type_is_si(info->domain))
iommu_enable_pci_caps(info);
return 0;
}
@ -4091,8 +4086,8 @@ static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *op
*/
static void domain_context_clear(struct device_domain_info *info)
{
if (!info->iommu || !info->dev || !dev_is_pci(info->dev))
return;
if (!dev_is_pci(info->dev))
domain_context_clear_one(info, info->bus, info->devfn);
pci_for_each_dma_alias(to_pci_dev(info->dev),
&domain_context_clear_one_cb, info);
@ -4112,7 +4107,6 @@ static void dmar_remove_one_dev_info(struct device *dev)
iommu_disable_dev_iotlb(info);
domain_context_clear(info);
intel_pasid_free_table(info->dev);
}
spin_lock_irqsave(&domain->lock, flags);
@ -4123,6 +4117,37 @@ static void dmar_remove_one_dev_info(struct device *dev)
info->domain = NULL;
}
/*
* Clear the page table pointer in context or pasid table entries so that
* all DMA requests without PASID from the device are blocked. If the page
* table has been set, clean up the data structures.
*/
static void device_block_translation(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
unsigned long flags;
iommu_disable_dev_iotlb(info);
if (!dev_is_real_dma_subdevice(dev)) {
if (sm_supported(iommu))
intel_pasid_tear_down_entry(iommu, dev,
PASID_RID2PASID, false);
else
domain_context_clear(info);
}
if (!info->domain)
return;
spin_lock_irqsave(&info->domain->lock, flags);
list_del(&info->link);
spin_unlock_irqrestore(&info->domain->lock, flags);
domain_detach_iommu(info->domain, iommu);
info->domain = NULL;
}
static int md_domain_init(struct dmar_domain *domain, int guest_width)
{
int adjust_width;
@ -4248,7 +4273,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
struct device_domain_info *info = dev_iommu_priv_get(dev);
if (info->domain)
dmar_remove_one_dev_info(dev);
device_block_translation(dev);
}
ret = prepare_domain_attach_device(domain, dev);
@ -4479,6 +4504,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
struct device_domain_info *info;
struct intel_iommu *iommu;
u8 bus, devfn;
int ret;
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu || !iommu->iommu.ops)
@ -4523,6 +4549,16 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
dev_iommu_priv_set(dev, info);
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
ret = intel_pasid_alloc_table(dev);
if (ret) {
dev_err(dev, "PASID table allocation failed\n");
dev_iommu_priv_set(dev, NULL);
kfree(info);
return ERR_PTR(ret);
}
}
return &iommu->iommu;
}
@ -4531,6 +4567,7 @@ static void intel_iommu_release_device(struct device *dev)
struct device_domain_info *info = dev_iommu_priv_get(dev);
dmar_remove_one_dev_info(dev);
intel_pasid_free_table(dev);
dev_iommu_priv_set(dev, NULL);
kfree(info);
set_dma_ops(dev, NULL);
@ -4894,7 +4931,7 @@ static void quirk_igfx_skip_te_disable(struct pci_dev *dev)
ver = (dev->device >> 8) & 0xff;
if (ver != 0x45 && ver != 0x46 && ver != 0x4c &&
ver != 0x4e && ver != 0x8a && ver != 0x98 &&
ver != 0x9a && ver != 0xa7)
ver != 0x9a && ver != 0xa7 && ver != 0x7d)
return;
if (risky_device(dev))

View File

@ -1489,7 +1489,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
bch_keylist_free(&keylist);
for (i = 0; i < nodes; i++)
if (!IS_ERR(new_nodes[i])) {
if (!IS_ERR_OR_NULL(new_nodes[i])) {
btree_node_free(new_nodes[i]);
rw_unlock(true, new_nodes[i]);
}

View File

@ -24,7 +24,8 @@ bool verity_fec_is_enabled(struct dm_verity *v)
*/
static inline struct dm_verity_fec_io *fec_io(struct dm_verity_io *io)
{
return (struct dm_verity_fec_io *) verity_io_digest_end(io->v, io);
return (struct dm_verity_fec_io *)
((char *)io + io->v->ti->per_io_data_size - sizeof(struct dm_verity_fec_io));
}
/*

View File

@ -631,7 +631,6 @@ static void verity_work(struct work_struct *w)
io->in_tasklet = false;
verity_fec_init_io(io);
verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
}
@ -657,7 +656,9 @@ static void verity_end_io(struct bio *bio)
struct dm_verity_io *io = bio->bi_private;
if (bio->bi_status &&
(!verity_fec_is_enabled(io->v) || verity_is_system_shutting_down())) {
(!verity_fec_is_enabled(io->v) ||
verity_is_system_shutting_down() ||
(bio->bi_opf & REQ_RAHEAD))) {
verity_finish_io(io, bio->bi_status);
return;
}
@ -779,6 +780,8 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
bio->bi_private = io;
io->iter = bio->bi_iter;
verity_fec_init_io(io);
verity_submit_prefetch(v, io);
submit_bio_noacct(bio);

View File

@ -115,12 +115,6 @@ static inline u8 *verity_io_want_digest(struct dm_verity *v,
return (u8 *)(io + 1) + v->ahash_reqsize + v->digest_size;
}
static inline u8 *verity_io_digest_end(struct dm_verity *v,
struct dm_verity_io *io)
{
return verity_io_want_digest(v, io) + v->digest_size;
}
extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
struct bvec_iter *iter,
int (*process)(struct dm_verity *v,

View File

@ -1508,6 +1508,8 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
blk_mq_requeue_request(req, true);
else
__blk_mq_end_request(req, BLK_STS_OK);
} else if (mq->in_recovery) {
blk_mq_requeue_request(req, true);
} else {
blk_mq_end_request(req, BLK_STS_OK);
}

View File

@ -552,7 +552,9 @@ int mmc_cqe_recovery(struct mmc_host *host)
cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
mmc_wait_for_cmd(host, &cmd, 0);
mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
mmc_poll_for_busy(host->card, MMC_CQE_RECOVERY_TIMEOUT, true, MMC_BUSY_IO);
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = MMC_CMDQ_TASK_MGMT;
@ -560,10 +562,13 @@ int mmc_cqe_recovery(struct mmc_host *host)
cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
err = mmc_wait_for_cmd(host, &cmd, 0);
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
host->cqe_ops->cqe_recovery_finish(host);
if (err)
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
mmc_retune_release(host);
return err;

View File

@ -271,3 +271,44 @@ int mmc_regulator_get_supply(struct mmc_host *mmc)
return 0;
}
EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
/**
* mmc_regulator_enable_vqmmc - enable VQMMC regulator for a host
* @mmc: the host to regulate
*
* Returns 0 or errno. Enables the regulator for vqmmc.
* Keeps track of the enable status for ensuring that calls to
* regulator_enable/disable are balanced.
*/
int mmc_regulator_enable_vqmmc(struct mmc_host *mmc)
{
int ret = 0;
if (!IS_ERR(mmc->supply.vqmmc) && !mmc->vqmmc_enabled) {
ret = regulator_enable(mmc->supply.vqmmc);
if (ret < 0)
dev_err(mmc_dev(mmc), "enabling vqmmc regulator failed\n");
else
mmc->vqmmc_enabled = true;
}
return ret;
}
EXPORT_SYMBOL_GPL(mmc_regulator_enable_vqmmc);
/**
* mmc_regulator_disable_vqmmc - disable VQMMC regulator for a host
* @mmc: the host to regulate
*
* Returns 0 or errno. Disables the regulator for vqmmc.
* Keeps track of the enable status for ensuring that calls to
* regulator_enable/disable are balanced.
*/
void mmc_regulator_disable_vqmmc(struct mmc_host *mmc)
{
if (!IS_ERR(mmc->supply.vqmmc) && mmc->vqmmc_enabled) {
regulator_disable(mmc->supply.vqmmc);
mmc->vqmmc_enabled = false;
}
}
EXPORT_SYMBOL_GPL(mmc_regulator_disable_vqmmc);

View File

@ -942,8 +942,8 @@ static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
ret = cqhci_tasks_cleared(cq_host);
if (!ret)
pr_debug("%s: cqhci: Failed to clear tasks\n",
mmc_hostname(mmc));
pr_warn("%s: cqhci: Failed to clear tasks\n",
mmc_hostname(mmc));
return ret;
}
@ -976,7 +976,7 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
ret = cqhci_halted(cq_host);
if (!ret)
pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
pr_warn("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
return ret;
}
@ -984,10 +984,10 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
/*
* After halting we expect to be able to use the command line. We interpret the
* failure to halt to mean the data lines might still be in use (and the upper
* layers will need to send a STOP command), so we set the timeout based on a
* generous command timeout.
* layers will need to send a STOP command), however failing to halt complicates
* the recovery, so set a timeout that would reasonably allow I/O to complete.
*/
#define CQHCI_START_HALT_TIMEOUT 5
#define CQHCI_START_HALT_TIMEOUT 500
static void cqhci_recovery_start(struct mmc_host *mmc)
{
@ -1075,28 +1075,28 @@ static void cqhci_recovery_finish(struct mmc_host *mmc)
ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
ok = false;
/*
* The specification contradicts itself, by saying that tasks cannot be
* cleared if CQHCI does not halt, but if CQHCI does not halt, it should
* be disabled/re-enabled, but not to disable before clearing tasks.
* Have a go anyway.
*/
if (!ok) {
pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
cqcfg &= ~CQHCI_ENABLE;
cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
cqcfg |= CQHCI_ENABLE;
cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
/* Be sure that there are no tasks */
ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
ok = false;
WARN_ON(!ok);
}
if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
ok = false;
/* Disable to make sure tasks really are cleared */
cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
cqcfg &= ~CQHCI_ENABLE;
cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
cqcfg |= CQHCI_ENABLE;
cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
if (!ok)
cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT);
cqhci_recover_mrqs(cq_host);

View File

@ -801,6 +801,32 @@ static void gl9763e_hs400_enhanced_strobe(struct mmc_host *mmc,
sdhci_writel(host, val, SDHCI_GLI_9763E_HS400_ES_REG);
}
static void gl9763e_set_low_power_negotiation(struct sdhci_pci_slot *slot,
bool enable)
{
struct pci_dev *pdev = slot->chip->pdev;
u32 value;
pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
value &= ~GLI_9763E_VHS_REV;
value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W);
pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG, &value);
if (enable)
value &= ~GLI_9763E_CFG_LPSN_DIS;
else
value |= GLI_9763E_CFG_LPSN_DIS;
pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG, value);
pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
value &= ~GLI_9763E_VHS_REV;
value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
}
static void sdhci_set_gl9763e_signaling(struct sdhci_host *host,
unsigned int timing)
{
@ -909,6 +935,9 @@ static int gl9763e_add_host(struct sdhci_pci_slot *slot)
if (ret)
goto cleanup;
/* Disable LPM negotiation to avoid entering L1 state. */
gl9763e_set_low_power_negotiation(slot, false);
return 0;
cleanup:
@ -960,31 +989,6 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
}
#ifdef CONFIG_PM
static void gl9763e_set_low_power_negotiation(struct sdhci_pci_slot *slot, bool enable)
{
struct pci_dev *pdev = slot->chip->pdev;
u32 value;
pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
value &= ~GLI_9763E_VHS_REV;
value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W);
pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG, &value);
if (enable)
value &= ~GLI_9763E_CFG_LPSN_DIS;
else
value |= GLI_9763E_CFG_LPSN_DIS;
pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG, value);
pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
value &= ~GLI_9763E_VHS_REV;
value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
}
static int gl9763e_runtime_suspend(struct sdhci_pci_chip *chip)
{
struct sdhci_pci_slot *slot = chip->slots[0];

View File

@ -405,12 +405,33 @@ static void sdhci_sprd_request_done(struct sdhci_host *host,
mmc_request_done(host->mmc, mrq);
}
static void sdhci_sprd_set_power(struct sdhci_host *host, unsigned char mode,
unsigned short vdd)
{
struct mmc_host *mmc = host->mmc;
switch (mode) {
case MMC_POWER_OFF:
mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, 0);
mmc_regulator_disable_vqmmc(mmc);
break;
case MMC_POWER_ON:
mmc_regulator_enable_vqmmc(mmc);
break;
case MMC_POWER_UP:
mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, vdd);
break;
}
}
static struct sdhci_ops sdhci_sprd_ops = {
.read_l = sdhci_sprd_readl,
.write_l = sdhci_sprd_writel,
.write_w = sdhci_sprd_writew,
.write_b = sdhci_sprd_writeb,
.set_clock = sdhci_sprd_set_clock,
.set_power = sdhci_sprd_set_power,
.get_max_clock = sdhci_sprd_get_max_clock,
.get_min_clock = sdhci_sprd_get_min_clock,
.set_bus_width = sdhci_set_bus_width,
@ -676,6 +697,10 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
SDHCI_SUPPORT_DDR50);
ret = mmc_regulator_get_supply(host->mmc);
if (ret)
goto pm_runtime_disable;
ret = sdhci_setup_host(host);
if (ret)
goto pm_runtime_disable;

View File

@ -1043,14 +1043,12 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
dma_addr_t addr;
buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
/* If there's enough room to align the FD address, do it.
* It will help hardware optimize accesses.
*/
aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
DPAA2_ETH_TX_BUF_ALIGN);
if (aligned_start >= skb->head)
buffer_start = aligned_start;
else
return -ENOMEM;
/* Store a backpointer to the skb at the beginning of the buffer
* (in the private data area) such that we can release it
@ -4738,6 +4736,8 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
if (err)
goto err_dl_port_add;
net_dev->needed_headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
err = register_netdev(net_dev);
if (err < 0) {
dev_err(dev, "register_netdev() failed\n");

View File

@ -702,7 +702,7 @@ static inline bool dpaa2_eth_rx_pause_enabled(u64 link_options)
static inline unsigned int dpaa2_eth_needed_headroom(struct sk_buff *skb)
{
unsigned int headroom = DPAA2_ETH_SWA_SIZE;
unsigned int headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
/* If we don't have an skb (e.g. XDP buffer), we only need space for
* the software annotation area

View File

@ -232,7 +232,7 @@ M(NPC_GET_KEX_CFG, 0x600c, npc_get_kex_cfg, \
M(NPC_INSTALL_FLOW, 0x600d, npc_install_flow, \
npc_install_flow_req, npc_install_flow_rsp) \
M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \
npc_delete_flow_req, msg_rsp) \
npc_delete_flow_req, npc_delete_flow_rsp) \
M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \
npc_mcam_read_entry_req, \
npc_mcam_read_entry_rsp) \
@ -1471,6 +1471,8 @@ struct npc_install_flow_req {
u8 vtag0_op;
u16 vtag1_def;
u8 vtag1_op;
/* old counter value */
u16 cntr_val;
};
struct npc_install_flow_rsp {
@ -1486,6 +1488,11 @@ struct npc_delete_flow_req {
u8 all; /* PF + VFs */
};
struct npc_delete_flow_rsp {
struct mbox_msghdr hdr;
u16 cntr_val;
};
struct npc_mcam_read_entry_req {
struct mbox_msghdr hdr;
u16 entry; /* MCAM entry to read */

View File

@ -5236,6 +5236,8 @@ int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
ipolicer = &nix_hw->ipolicer[layer];
for (idx = 0; idx < req->prof_count[layer]; idx++) {
if (idx == MAX_BANDPROF_PER_PFFUNC)
break;
prof_idx = req->prof_idx[layer][idx];
if (prof_idx >= ipolicer->band_prof.max ||
ipolicer->pfvf_map[prof_idx] != pcifunc)
@ -5249,8 +5251,6 @@ int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
ipolicer->pfvf_map[prof_idx] = 0x00;
ipolicer->match_id[prof_idx] = 0;
rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
if (idx == MAX_BANDPROF_PER_PFFUNC)
break;
}
}
mutex_unlock(&rvu->rsrc_lock);

View File

@ -1184,7 +1184,7 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
write_req.enable_entry = (u8)enable;
/* if counter is available then clear and use it */
if (req->set_cntr && rule->has_cntr) {
rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), 0x00);
rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), req->cntr_val);
write_req.set_cntr = 1;
write_req.cntr = rule->cntr;
}
@ -1399,12 +1399,13 @@ static int npc_delete_flow(struct rvu *rvu, struct rvu_npc_mcam_rule *rule,
int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu,
struct npc_delete_flow_req *req,
struct msg_rsp *rsp)
struct npc_delete_flow_rsp *rsp)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
struct rvu_npc_mcam_rule *iter, *tmp;
u16 pcifunc = req->hdr.pcifunc;
struct list_head del_list;
int blkaddr;
INIT_LIST_HEAD(&del_list);
@ -1420,6 +1421,10 @@ int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu,
list_move_tail(&iter->list, &del_list);
/* single rule */
} else if (req->entry == iter->entry) {
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr)
rsp->cntr_val = rvu_read64(rvu, blkaddr,
NPC_AF_MATCH_STATX(iter->cntr));
list_move_tail(&iter->list, &del_list);
break;
}

View File

@ -145,6 +145,7 @@ void rvu_switch_enable(struct rvu *rvu)
struct npc_mcam_alloc_entry_req alloc_req = { 0 };
struct npc_mcam_alloc_entry_rsp alloc_rsp = { 0 };
struct npc_delete_flow_req uninstall_req = { 0 };
struct npc_delete_flow_rsp uninstall_rsp = { 0 };
struct npc_mcam_free_entry_req free_req = { 0 };
struct rvu_switch *rswitch = &rvu->rswitch;
struct msg_rsp rsp;
@ -184,7 +185,7 @@ void rvu_switch_enable(struct rvu *rvu)
uninstall_rules:
uninstall_req.start = rswitch->start_entry;
uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp);
kfree(rswitch->entry2pcifunc);
free_entries:
free_req.all = 1;
@ -196,6 +197,7 @@ void rvu_switch_enable(struct rvu *rvu)
void rvu_switch_disable(struct rvu *rvu)
{
struct npc_delete_flow_req uninstall_req = { 0 };
struct npc_delete_flow_rsp uninstall_rsp = { 0 };
struct npc_mcam_free_entry_req free_req = { 0 };
struct rvu_switch *rswitch = &rvu->rswitch;
struct rvu_hwinfo *hw = rvu->hw;
@ -232,7 +234,7 @@ void rvu_switch_disable(struct rvu *rvu)
uninstall_req.start = rswitch->start_entry;
uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
free_req.all = 1;
rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp);
rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &uninstall_rsp);
rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp);
rswitch->used_entries = 0;
kfree(rswitch->entry2pcifunc);

View File

@ -448,6 +448,9 @@ int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
aq->prof.pebs_mantissa = 0;
aq->prof_mask.pebs_mantissa = 0xFF;
aq->prof.hl_en = 0;
aq->prof_mask.hl_en = 1;
/* Fill AQ info */
aq->qidx = profile;
aq->ctype = NIX_AQ_CTYPE_BANDPROF;

View File

@ -339,13 +339,8 @@ struct otx2_flow_config {
struct list_head flow_list;
u32 dmacflt_max_flows;
u16 max_flows;
};
struct otx2_tc_info {
/* hash table to store TC offloaded flows */
struct rhashtable flow_table;
struct rhashtable_params flow_ht_params;
unsigned long *tc_entries_bitmap;
struct list_head flow_list_tc;
bool ntuple;
};
struct dev_hw_ops {
@ -465,7 +460,6 @@ struct otx2_nic {
/* NPC MCAM */
struct otx2_flow_config *flow_cfg;
struct otx2_mac_table *mac_table;
struct otx2_tc_info tc_info;
u64 reset_count;
struct work_struct reset_task;
@ -1024,7 +1018,8 @@ int otx2_init_tc(struct otx2_nic *nic);
void otx2_shutdown_tc(struct otx2_nic *nic);
int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data);
int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic);
void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic);
/* CGX/RPM DMAC filters support */
int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);

View File

@ -41,7 +41,6 @@ static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id,
return 0;
otx2_alloc_mcam_entries(pfvf, ctx->val.vu16);
otx2_tc_alloc_ent_bitmap(pfvf);
return 0;
}

View File

@ -753,6 +753,7 @@ static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
struct otx2_nic *pfvf = netdev_priv(dev);
int ret = -EOPNOTSUPP;
pfvf->flow_cfg->ntuple = ntuple;
switch (nfc->cmd) {
case ETHTOOL_SRXFH:
ret = otx2_set_rss_hash_opts(pfvf, nfc);

View File

@ -276,6 +276,7 @@ int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
flow_cfg = pfvf->flow_cfg;
INIT_LIST_HEAD(&flow_cfg->flow_list);
INIT_LIST_HEAD(&flow_cfg->flow_list_tc);
flow_cfg->max_flows = 0;
return 0;
@ -298,6 +299,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
return -ENOMEM;
INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc);
/* Allocate bare minimum number of MCAM entries needed for
* unicast and ntuple filters.

View File

@ -566,7 +566,9 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
TYPE_PFVF);
vfs -= 64;
if (intr)
trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
vfs = 64;
}
intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
@ -574,7 +576,8 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
if (intr)
trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
return IRQ_HANDLED;
}
@ -1855,6 +1858,8 @@ int otx2_open(struct net_device *netdev)
if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
otx2_dmacflt_reinstall_flows(pf);
otx2_tc_apply_ingress_police_rules(pf);
err = otx2_rxtx_enable(pf, true);
/* If a mbox communication error happens at this point then interface
* will end up in a state such that it is in down state but hardware

View File

@ -48,9 +48,8 @@ struct otx2_tc_flow_stats {
};
struct otx2_tc_flow {
struct rhash_head node;
struct list_head list;
unsigned long cookie;
unsigned int bitpos;
struct rcu_head rcu;
struct otx2_tc_flow_stats stats;
spinlock_t lock; /* lock for stats */
@ -58,31 +57,13 @@ struct otx2_tc_flow {
u16 entry;
u16 leaf_profile;
bool is_act_police;
u32 prio;
struct npc_install_flow_req req;
u64 rate;
u32 burst;
bool is_pps;
};
int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic)
{
struct otx2_tc_info *tc = &nic->tc_info;
if (!nic->flow_cfg->max_flows)
return 0;
/* Max flows changed, free the existing bitmap */
kfree(tc->tc_entries_bitmap);
tc->tc_entries_bitmap =
kcalloc(BITS_TO_LONGS(nic->flow_cfg->max_flows),
sizeof(long), GFP_KERNEL);
if (!tc->tc_entries_bitmap) {
netdev_err(nic->netdev,
"Unable to alloc TC flow entries bitmap\n");
return -ENOMEM;
}
return 0;
}
EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap);
static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
u32 *burst_exp, u32 *burst_mantissa)
{
@ -321,6 +302,41 @@ static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
return err;
}
static int otx2_tc_act_set_hw_police(struct otx2_nic *nic,
struct otx2_tc_flow *node)
{
int rc;
mutex_lock(&nic->mbox.lock);
rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile);
if (rc) {
mutex_unlock(&nic->mbox.lock);
return rc;
}
rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile,
node->burst, node->rate, node->is_pps);
if (rc)
goto free_leaf;
rc = cn10k_map_unmap_rq_policer(nic, node->rq, node->leaf_profile, true);
if (rc)
goto free_leaf;
mutex_unlock(&nic->mbox.lock);
return 0;
free_leaf:
if (cn10k_free_leaf_profile(nic, node->leaf_profile))
netdev_err(nic->netdev,
"Unable to free leaf bandwidth profile(%d)\n",
node->leaf_profile);
mutex_unlock(&nic->mbox.lock);
return rc;
}
static int otx2_tc_act_set_police(struct otx2_nic *nic,
struct otx2_tc_flow *node,
struct flow_cls_offload *f,
@ -337,39 +353,20 @@ static int otx2_tc_act_set_police(struct otx2_nic *nic,
return -EINVAL;
}
mutex_lock(&nic->mbox.lock);
rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile);
if (rc) {
mutex_unlock(&nic->mbox.lock);
return rc;
}
rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps);
if (rc)
goto free_leaf;
rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true);
if (rc)
goto free_leaf;
mutex_unlock(&nic->mbox.lock);
req->match_id = mark & 0xFFFFULL;
req->index = rq_idx;
req->op = NIX_RX_ACTIONOP_UCAST;
set_bit(rq_idx, &nic->rq_bmap);
node->is_act_police = true;
node->rq = rq_idx;
node->burst = burst;
node->rate = rate;
node->is_pps = pps;
return 0;
rc = otx2_tc_act_set_hw_police(nic, node);
if (!rc)
set_bit(rq_idx, &nic->rq_bmap);
free_leaf:
if (cn10k_free_leaf_profile(nic, node->leaf_profile))
netdev_err(nic->netdev,
"Unable to free leaf bandwidth profile(%d)\n",
node->leaf_profile);
mutex_unlock(&nic->mbox.lock);
return rc;
}
@ -689,8 +686,117 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
}
static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry)
static void otx2_destroy_tc_flow_list(struct otx2_nic *pfvf)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
struct otx2_tc_flow *iter, *tmp;
if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
return;
list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list_tc, list) {
list_del(&iter->list);
kfree(iter);
flow_cfg->nr_flows--;
}
}
static struct otx2_tc_flow *otx2_tc_get_entry_by_cookie(struct otx2_flow_config *flow_cfg,
unsigned long cookie)
{
struct otx2_tc_flow *tmp;
list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) {
if (tmp->cookie == cookie)
return tmp;
}
return NULL;
}
static struct otx2_tc_flow *otx2_tc_get_entry_by_index(struct otx2_flow_config *flow_cfg,
int index)
{
struct otx2_tc_flow *tmp;
int i = 0;
list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) {
if (i == index)
return tmp;
i++;
}
return NULL;
}
static void otx2_tc_del_from_flow_list(struct otx2_flow_config *flow_cfg,
struct otx2_tc_flow *node)
{
struct list_head *pos, *n;
struct otx2_tc_flow *tmp;
list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
tmp = list_entry(pos, struct otx2_tc_flow, list);
if (node == tmp) {
list_del(&node->list);
return;
}
}
}
static int otx2_tc_add_to_flow_list(struct otx2_flow_config *flow_cfg,
struct otx2_tc_flow *node)
{
struct list_head *pos, *n;
struct otx2_tc_flow *tmp;
int index = 0;
/* If the flow list is empty then add the new node */
if (list_empty(&flow_cfg->flow_list_tc)) {
list_add(&node->list, &flow_cfg->flow_list_tc);
return index;
}
list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
tmp = list_entry(pos, struct otx2_tc_flow, list);
if (node->prio < tmp->prio)
break;
index++;
}
list_add(&node->list, pos->prev);
return index;
}
static int otx2_add_mcam_flow_entry(struct otx2_nic *nic, struct npc_install_flow_req *req)
{
struct npc_install_flow_req *tmp_req;
int err;
mutex_lock(&nic->mbox.lock);
tmp_req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
if (!tmp_req) {
mutex_unlock(&nic->mbox.lock);
return -ENOMEM;
}
memcpy(tmp_req, req, sizeof(struct npc_install_flow_req));
/* Send message to AF */
err = otx2_sync_mbox_msg(&nic->mbox);
if (err) {
netdev_err(nic->netdev, "Failed to install MCAM flow entry %d\n",
req->entry);
mutex_unlock(&nic->mbox.lock);
return -EFAULT;
}
mutex_unlock(&nic->mbox.lock);
return 0;
}
static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry, u16 *cntr_val)
{
struct npc_delete_flow_rsp *rsp;
struct npc_delete_flow_req *req;
int err;
@ -711,22 +817,113 @@ static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry)
mutex_unlock(&nic->mbox.lock);
return -EFAULT;
}
if (cntr_val) {
rsp = (struct npc_delete_flow_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox,
0, &req->hdr);
if (IS_ERR(rsp)) {
netdev_err(nic->netdev, "Failed to get MCAM delete response for entry %d\n",
entry);
mutex_unlock(&nic->mbox.lock);
return -EFAULT;
}
*cntr_val = rsp->cntr_val;
}
mutex_unlock(&nic->mbox.lock);
return 0;
}
static int otx2_tc_update_mcam_table_del_req(struct otx2_nic *nic,
struct otx2_flow_config *flow_cfg,
struct otx2_tc_flow *node)
{
struct list_head *pos, *n;
struct otx2_tc_flow *tmp;
int i = 0, index = 0;
u16 cntr_val = 0;
/* Find and delete the entry from the list and re-install
* all the entries from beginning to the index of the
* deleted entry to higher mcam indexes.
*/
list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
tmp = list_entry(pos, struct otx2_tc_flow, list);
if (node == tmp) {
list_del(&tmp->list);
break;
}
otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val);
tmp->entry++;
tmp->req.entry = tmp->entry;
tmp->req.cntr_val = cntr_val;
index++;
}
list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
if (i == index)
break;
tmp = list_entry(pos, struct otx2_tc_flow, list);
otx2_add_mcam_flow_entry(nic, &tmp->req);
i++;
}
return 0;
}
static int otx2_tc_update_mcam_table_add_req(struct otx2_nic *nic,
struct otx2_flow_config *flow_cfg,
struct otx2_tc_flow *node)
{
int mcam_idx = flow_cfg->max_flows - flow_cfg->nr_flows - 1;
struct otx2_tc_flow *tmp;
int list_idx, i;
u16 cntr_val = 0;
/* Find the index of the entry(list_idx) whose priority
* is greater than the new entry and re-install all
* the entries from beginning to list_idx to higher
* mcam indexes.
*/
list_idx = otx2_tc_add_to_flow_list(flow_cfg, node);
for (i = 0; i < list_idx; i++) {
tmp = otx2_tc_get_entry_by_index(flow_cfg, i);
if (!tmp)
return -ENOMEM;
otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val);
tmp->entry = flow_cfg->flow_ent[mcam_idx];
tmp->req.entry = tmp->entry;
tmp->req.cntr_val = cntr_val;
otx2_add_mcam_flow_entry(nic, &tmp->req);
mcam_idx++;
}
return mcam_idx;
}
static int otx2_tc_update_mcam_table(struct otx2_nic *nic,
struct otx2_flow_config *flow_cfg,
struct otx2_tc_flow *node,
bool add_req)
{
if (add_req)
return otx2_tc_update_mcam_table_add_req(nic, flow_cfg, node);
return otx2_tc_update_mcam_table_del_req(nic, flow_cfg, node);
}
static int otx2_tc_del_flow(struct otx2_nic *nic,
struct flow_cls_offload *tc_flow_cmd)
{
struct otx2_flow_config *flow_cfg = nic->flow_cfg;
struct otx2_tc_info *tc_info = &nic->tc_info;
struct otx2_tc_flow *flow_node;
int err;
flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
&tc_flow_cmd->cookie,
tc_info->flow_ht_params);
flow_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie);
if (!flow_node) {
netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n",
tc_flow_cmd->cookie);
@ -734,6 +931,11 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
}
if (flow_node->is_act_police) {
__clear_bit(flow_node->rq, &nic->rq_bmap);
if (nic->flags & OTX2_FLAG_INTF_DOWN)
goto free_mcam_flow;
mutex_lock(&nic->mbox.lock);
err = cn10k_map_unmap_rq_policer(nic, flow_node->rq,
@ -749,21 +951,14 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
"Unable to free leaf bandwidth profile(%d)\n",
flow_node->leaf_profile);
__clear_bit(flow_node->rq, &nic->rq_bmap);
mutex_unlock(&nic->mbox.lock);
}
otx2_del_mcam_flow_entry(nic, flow_node->entry);
WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table,
&flow_node->node,
nic->tc_info.flow_ht_params));
free_mcam_flow:
otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL);
otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false);
kfree_rcu(flow_node, rcu);
clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap);
flow_cfg->nr_flows--;
return 0;
}
@ -772,15 +967,19 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
{
struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
struct otx2_flow_config *flow_cfg = nic->flow_cfg;
struct otx2_tc_info *tc_info = &nic->tc_info;
struct otx2_tc_flow *new_node, *old_node;
struct npc_install_flow_req *req, dummy;
int rc, err;
int rc, err, mcam_idx;
if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
return -ENOMEM;
if (bitmap_full(tc_info->tc_entries_bitmap, flow_cfg->max_flows)) {
if (nic->flags & OTX2_FLAG_INTF_DOWN) {
NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
return -EINVAL;
}
if (flow_cfg->nr_flows == flow_cfg->max_flows) {
NL_SET_ERR_MSG_MOD(extack,
"Free MCAM entry not available to add the flow");
return -ENOMEM;
@ -792,6 +991,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
return -ENOMEM;
spin_lock_init(&new_node->lock);
new_node->cookie = tc_flow_cmd->cookie;
new_node->prio = tc_flow_cmd->common.prio;
memset(&dummy, 0, sizeof(struct npc_install_flow_req));
@ -802,12 +1002,11 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
}
/* If a flow exists with the same cookie, delete it */
old_node = rhashtable_lookup_fast(&tc_info->flow_table,
&tc_flow_cmd->cookie,
tc_info->flow_ht_params);
old_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie);
if (old_node)
otx2_tc_del_flow(nic, tc_flow_cmd);
mcam_idx = otx2_tc_update_mcam_table(nic, flow_cfg, new_node, true);
mutex_lock(&nic->mbox.lock);
req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
if (!req) {
@ -818,11 +1017,8 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr));
memcpy(req, &dummy, sizeof(struct npc_install_flow_req));
new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap,
flow_cfg->max_flows);
req->channel = nic->hw.rx_chan_base;
req->entry = flow_cfg->flow_ent[flow_cfg->max_flows - new_node->bitpos - 1];
req->entry = flow_cfg->flow_ent[mcam_idx];
req->intf = NIX_INTF_RX;
req->set_cntr = 1;
new_node->entry = req->entry;
@ -832,26 +1028,18 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry");
mutex_unlock(&nic->mbox.lock);
kfree_rcu(new_node, rcu);
goto free_leaf;
}
mutex_unlock(&nic->mbox.lock);
memcpy(&new_node->req, req, sizeof(struct npc_install_flow_req));
/* add new flow to flow-table */
rc = rhashtable_insert_fast(&nic->tc_info.flow_table, &new_node->node,
nic->tc_info.flow_ht_params);
if (rc) {
otx2_del_mcam_flow_entry(nic, req->entry);
kfree_rcu(new_node, rcu);
goto free_leaf;
}
set_bit(new_node->bitpos, tc_info->tc_entries_bitmap);
flow_cfg->nr_flows++;
return 0;
free_leaf:
otx2_tc_del_from_flow_list(flow_cfg, new_node);
kfree_rcu(new_node, rcu);
if (new_node->is_act_police) {
mutex_lock(&nic->mbox.lock);
@ -878,16 +1066,13 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
static int otx2_tc_get_flow_stats(struct otx2_nic *nic,
struct flow_cls_offload *tc_flow_cmd)
{
struct otx2_tc_info *tc_info = &nic->tc_info;
struct npc_mcam_get_stats_req *req;
struct npc_mcam_get_stats_rsp *rsp;
struct otx2_tc_flow_stats *stats;
struct otx2_tc_flow *flow_node;
int err;
flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
&tc_flow_cmd->cookie,
tc_info->flow_ht_params);
flow_node = otx2_tc_get_entry_by_cookie(nic->flow_cfg, tc_flow_cmd->cookie);
if (!flow_node) {
netdev_info(nic->netdev, "tc flow not found for cookie %lx",
tc_flow_cmd->cookie);
@ -1035,12 +1220,20 @@ static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
struct otx2_nic *nic = cb_priv;
bool ntuple;
if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
return -EOPNOTSUPP;
ntuple = nic->netdev->features & NETIF_F_NTUPLE;
switch (type) {
case TC_SETUP_CLSFLOWER:
if (ntuple) {
netdev_warn(nic->netdev,
"Can't install TC flower offload rule when NTUPLE is active");
return -EOPNOTSUPP;
}
return otx2_setup_tc_cls_flower(nic, type_data);
case TC_SETUP_CLSMATCHALL:
return otx2_setup_tc_ingress_matchall(nic, type_data);
@ -1123,18 +1316,8 @@ int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
}
EXPORT_SYMBOL(otx2_setup_tc);
static const struct rhashtable_params tc_flow_ht_params = {
.head_offset = offsetof(struct otx2_tc_flow, node),
.key_offset = offsetof(struct otx2_tc_flow, cookie),
.key_len = sizeof(((struct otx2_tc_flow *)0)->cookie),
.automatic_shrinking = true,
};
int otx2_init_tc(struct otx2_nic *nic)
{
struct otx2_tc_info *tc = &nic->tc_info;
int err;
/* Exclude receive queue 0 being used for police action */
set_bit(0, &nic->rq_bmap);
@ -1144,25 +1327,54 @@ int otx2_init_tc(struct otx2_nic *nic)
return -EINVAL;
}
err = otx2_tc_alloc_ent_bitmap(nic);
if (err)
return err;
tc->flow_ht_params = tc_flow_ht_params;
err = rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
if (err) {
kfree(tc->tc_entries_bitmap);
tc->tc_entries_bitmap = NULL;
}
return err;
return 0;
}
EXPORT_SYMBOL(otx2_init_tc);
void otx2_shutdown_tc(struct otx2_nic *nic)
{
struct otx2_tc_info *tc = &nic->tc_info;
kfree(tc->tc_entries_bitmap);
rhashtable_destroy(&tc->flow_table);
otx2_destroy_tc_flow_list(nic);
}
EXPORT_SYMBOL(otx2_shutdown_tc);
static void otx2_tc_config_ingress_rule(struct otx2_nic *nic,
struct otx2_tc_flow *node)
{
struct npc_install_flow_req *req;
if (otx2_tc_act_set_hw_police(nic, node))
return;
mutex_lock(&nic->mbox.lock);
req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
if (!req)
goto err;
memcpy(req, &node->req, sizeof(struct npc_install_flow_req));
if (otx2_sync_mbox_msg(&nic->mbox))
netdev_err(nic->netdev,
"Failed to install MCAM flow entry for ingress rule");
err:
mutex_unlock(&nic->mbox.lock);
}
void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic)
{
struct otx2_flow_config *flow_cfg = nic->flow_cfg;
struct otx2_tc_flow *node;
/* If any ingress policer rules exist for the interface then
* apply those rules. Ingress policer rules depend on bandwidth
* profiles linked to the receive queues. Since no receive queues
* exist when interface is down, ingress policer rules are stored
* and configured in hardware after all receive queues are allocated
* in otx2_open.
*/
list_for_each_entry(node, &flow_cfg->flow_list_tc, list) {
if (node->is_act_police)
otx2_tc_config_ingress_rule(nic, node);
}
}
EXPORT_SYMBOL(otx2_tc_apply_ingress_police_rules);

View File

@ -576,6 +576,8 @@ struct rtl8169_tc_offsets {
enum rtl_flag {
RTL_FLAG_TASK_ENABLED = 0,
RTL_FLAG_TASK_RESET_PENDING,
RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE,
RTL_FLAG_TASK_TX_TIMEOUT,
RTL_FLAG_MAX
};
@ -3943,7 +3945,7 @@ static void rtl8169_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct rtl8169_private *tp = netdev_priv(dev);
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
rtl_schedule_task(tp, RTL_FLAG_TASK_TX_TIMEOUT);
}
static int rtl8169_tx_map(struct rtl8169_private *tp, const u32 *opts, u32 len,
@ -4537,6 +4539,7 @@ static void rtl_task(struct work_struct *work)
{
struct rtl8169_private *tp =
container_of(work, struct rtl8169_private, wk.work);
int ret;
rtnl_lock();
@ -4544,9 +4547,21 @@ static void rtl_task(struct work_struct *work)
!test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
goto out_unlock;
if (test_and_clear_bit(RTL_FLAG_TASK_TX_TIMEOUT, tp->wk.flags)) {
/* ASPM compatibility issues are a typical reason for tx timeouts */
ret = pci_disable_link_state(tp->pci_dev, PCIE_LINK_STATE_L1 |
PCIE_LINK_STATE_L0S);
if (!ret)
netdev_warn_once(tp->dev, "ASPM disabled on Tx timeout\n");
goto reset;
}
if (test_and_clear_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags)) {
reset:
rtl_reset_work(tp);
netif_wake_queue(tp->dev);
} else if (test_and_clear_bit(RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, tp->wk.flags)) {
rtl_reset_work(tp);
}
out_unlock:
rtnl_unlock();
@ -4580,7 +4595,7 @@ static void r8169_phylink_handler(struct net_device *ndev)
} else {
/* In few cases rx is broken after link-down otherwise */
if (rtl_is_8125(tp))
rtl_reset_work(tp);
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE);
pm_runtime_idle(d);
}
@ -4656,7 +4671,7 @@ static int rtl8169_close(struct net_device *dev)
rtl8169_down(tp);
rtl8169_rx_clear(tp);
cancel_work_sync(&tp->wk.work);
cancel_work(&tp->wk.work);
free_irq(tp->irq, tp);
@ -4890,6 +4905,8 @@ static void rtl_remove_one(struct pci_dev *pdev)
if (pci_dev_run_wake(pdev))
pm_runtime_get_noresume(&pdev->dev);
cancel_work_sync(&tp->wk.work);
unregister_netdev(tp->dev);
if (tp->dash_type != RTL_DASH_NONE)

View File

@ -517,6 +517,15 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
} else {
ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_RGMII, CXR35);
ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
CXR31_SEL_LINK0);
}
/* Receive frame limit set register */
ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR);
@ -539,14 +548,6 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
/* E-MAC interrupt enable register */
ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
} else {
ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
CXR31_SEL_LINK0);
}
}
static void ravb_emac_init_rcar(struct net_device *ndev)
@ -1827,19 +1828,20 @@ static int ravb_open(struct net_device *ndev)
if (info->gptp)
ravb_ptp_init(ndev, priv->pdev);
netif_tx_start_all_queues(ndev);
/* PHY control start */
error = ravb_phy_start(ndev);
if (error)
goto out_ptp_stop;
netif_tx_start_all_queues(ndev);
return 0;
out_ptp_stop:
/* Stop PTP Clock driver */
if (info->gptp)
ravb_ptp_stop(ndev);
ravb_stop_dma(ndev);
out_free_irq_mgmta:
if (!info->multi_irqs)
goto out_free_irq;
@ -1890,6 +1892,12 @@ static void ravb_tx_timeout_work(struct work_struct *work)
struct net_device *ndev = priv->ndev;
int error;
if (!rtnl_trylock()) {
usleep_range(1000, 2000);
schedule_work(&priv->work);
return;
}
netif_tx_stop_all_queues(ndev);
/* Stop PTP Clock driver */
@ -1923,7 +1931,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
*/
netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
__func__, error);
return;
goto out_unlock;
}
ravb_emac_init(ndev);
@ -1933,6 +1941,9 @@ static void ravb_tx_timeout_work(struct work_struct *work)
ravb_ptp_init(ndev, priv->pdev);
netif_tx_start_all_queues(ndev);
out_unlock:
rtnl_unlock();
}
/* Packet transmit function for Ethernet AVB */
@ -2661,9 +2672,14 @@ static int ravb_probe(struct platform_device *pdev)
ndev->features = info->net_features;
ndev->hw_features = info->net_hw_features;
reset_control_deassert(rstc);
error = reset_control_deassert(rstc);
if (error)
goto out_free_netdev;
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
error = pm_runtime_resume_and_get(&pdev->dev);
if (error < 0)
goto out_rpm_disable;
if (info->multi_irqs) {
if (info->err_mgmt_irqs)
@ -2888,11 +2904,12 @@ static int ravb_probe(struct platform_device *pdev)
out_disable_refclk:
clk_disable_unprepare(priv->refclk);
out_release:
free_netdev(ndev);
pm_runtime_put(&pdev->dev);
out_rpm_disable:
pm_runtime_disable(&pdev->dev);
reset_control_assert(rstc);
out_free_netdev:
free_netdev(ndev);
return error;
}
@ -2902,22 +2919,26 @@ static int ravb_remove(struct platform_device *pdev)
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
/* Stop PTP Clock driver */
if (info->ccc_gac)
ravb_ptp_stop(ndev);
clk_disable_unprepare(priv->gptp_clk);
clk_disable_unprepare(priv->refclk);
/* Set reset mode */
ravb_write(ndev, CCC_OPC_RESET, CCC);
unregister_netdev(ndev);
if (info->nc_queues)
netif_napi_del(&priv->napi[RAVB_NC]);
netif_napi_del(&priv->napi[RAVB_BE]);
ravb_mdio_release(priv);
/* Stop PTP Clock driver */
if (info->ccc_gac)
ravb_ptp_stop(ndev);
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma);
/* Set reset mode */
ravb_write(ndev, CCC_OPC_RESET, CCC);
clk_disable_unprepare(priv->gptp_clk);
clk_disable_unprepare(priv->refclk);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
reset_control_assert(priv->rstc);

View File

@ -177,8 +177,10 @@
#define MMC_XGMAC_RX_DISCARD_OCT_GB 0x1b4
#define MMC_XGMAC_RX_ALIGN_ERR_PKT 0x1bc
#define MMC_XGMAC_TX_FPE_INTR_MASK 0x204
#define MMC_XGMAC_TX_FPE_FRAG 0x208
#define MMC_XGMAC_TX_HOLD_REQ 0x20c
#define MMC_XGMAC_RX_FPE_INTR_MASK 0x224
#define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR 0x228
#define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c
#define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230
@ -352,6 +354,8 @@ static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr)
{
writel(0x0, mmcaddr + MMC_RX_INTR_MASK);
writel(0x0, mmcaddr + MMC_TX_INTR_MASK);
writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_TX_FPE_INTR_MASK);
writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_FPE_INTR_MASK);
writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_IPC_INTR_MASK);
}

View File

@ -2058,6 +2058,13 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
if (ret)
return ret;
if (id->ncap == 0) {
/* namespace not allocated or attached */
info->is_removed = true;
ret = -ENODEV;
goto error;
}
blk_mq_freeze_queue(ns->disk->queue);
lbaf = nvme_lbaf_index(id->flbas);
ns->lba_shift = id->lbaf[lbaf].ds;
@ -2107,6 +2114,8 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
set_bit(NVME_NS_READY, &ns->flags);
ret = 0;
}
error:
kfree(id);
return ret;
}

View File

@ -120,6 +120,7 @@
/* ELBI registers */
#define ELBI_SYS_STTS 0x08
#define ELBI_CS2_ENABLE 0xa4
/* DBI registers */
#define DBI_CON_STATUS 0x44
@ -252,6 +253,21 @@ static void qcom_pcie_dw_stop_link(struct dw_pcie *pci)
disable_irq(pcie_ep->perst_irq);
}
static void qcom_pcie_dw_write_dbi2(struct dw_pcie *pci, void __iomem *base,
u32 reg, size_t size, u32 val)
{
struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
int ret;
writel(1, pcie_ep->elbi + ELBI_CS2_ENABLE);
ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
if (ret)
dev_err(pci->dev, "Failed to write DBI2 register (0x%x): %d\n", reg, ret);
writel(0, pcie_ep->elbi + ELBI_CS2_ENABLE);
}
static int qcom_pcie_enable_resources(struct qcom_pcie_ep *pcie_ep)
{
int ret;
@ -446,6 +462,7 @@ static const struct dw_pcie_ops pci_ops = {
.link_up = qcom_pcie_dw_link_up,
.start_link = qcom_pcie_dw_start_link,
.stop_link = qcom_pcie_dw_stop_link,
.write_dbi2 = qcom_pcie_dw_write_dbi2,
};
static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev,

View File

@ -6058,3 +6058,15 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size);
#endif
/*
* Devices known to require a longer delay before first config space access
* after reset recovery or resume from D3cold:
*
* VideoPropulsion (aka Genroco) Torrent QN16e MPEG QAM Modulator
*/
static void pci_fixup_d3cold_delay_1sec(struct pci_dev *pdev)
{
pdev->d3cold_delay = 1000;
}
DECLARE_PCI_FIXUP_FINAL(0x5555, 0x0004, pci_fixup_d3cold_delay_1sec);

View File

@ -1239,17 +1239,17 @@ static void pinctrl_link_add(struct pinctrl_dev *pctldev,
static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
{
struct pinctrl_setting *setting, *setting2;
struct pinctrl_state *old_state = p->state;
struct pinctrl_state *old_state = READ_ONCE(p->state);
int ret;
if (p->state) {
if (old_state) {
/*
* For each pinmux setting in the old state, forget SW's record
* of mux owner for that pingroup. Any pingroups which are
* still owned by the new state will be re-acquired by the call
* to pinmux_enable_setting() in the loop below.
*/
list_for_each_entry(setting, &p->state->settings, node) {
list_for_each_entry(setting, &old_state->settings, node) {
if (setting->type != PIN_MAP_TYPE_MUX_GROUP)
continue;
pinmux_disable_setting(setting);

View File

@ -24,7 +24,6 @@
#include <linux/of.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/units.h>
struct dtpm_cpu {
struct dtpm dtpm;
@ -104,8 +103,7 @@ static u64 get_pd_power_uw(struct dtpm *dtpm)
if (pd->table[i].frequency < freq)
continue;
return scale_pd_power_uw(pd_mask, pd->table[i].power *
MICROWATT_PER_MILLIWATT);
return scale_pd_power_uw(pd_mask, pd->table[i].power);
}
return 0;
@ -122,11 +120,9 @@ static int update_pd_power_uw(struct dtpm *dtpm)
nr_cpus = cpumask_weight(&cpus);
dtpm->power_min = em->table[0].power;
dtpm->power_min *= MICROWATT_PER_MILLIWATT;
dtpm->power_min *= nr_cpus;
dtpm->power_max = em->table[em->nr_perf_states - 1].power;
dtpm->power_max *= MICROWATT_PER_MILLIWATT;
dtpm->power_max *= nr_cpus;
return 0;

View File

@ -39,10 +39,8 @@ static int update_pd_power_uw(struct dtpm *dtpm)
struct em_perf_domain *pd = em_pd_get(dev);
dtpm->power_min = pd->table[0].power;
dtpm->power_min *= MICROWATT_PER_MILLIWATT;
dtpm->power_max = pd->table[pd->nr_perf_states - 1].power;
dtpm->power_max *= MICROWATT_PER_MILLIWATT;
return 0;
}
@ -54,13 +52,10 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
struct device *dev = devfreq->dev.parent;
struct em_perf_domain *pd = em_pd_get(dev);
unsigned long freq;
u64 power;
int i;
for (i = 0; i < pd->nr_perf_states; i++) {
power = pd->table[i].power * MICROWATT_PER_MILLIWATT;
if (power > power_limit)
if (pd->table[i].power > power_limit)
break;
}
@ -68,7 +63,7 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
dev_pm_qos_update_request(&dtpm_devfreq->qos_req, freq);
power_limit = pd->table[i - 1].power * MICROWATT_PER_MILLIWATT;
power_limit = pd->table[i - 1].power;
return power_limit;
}
@ -110,7 +105,7 @@ static u64 get_pd_power_uw(struct dtpm *dtpm)
if (pd->table[i].frequency < freq)
continue;
power = pd->table[i].power * MICROWATT_PER_MILLIWATT;
power = pd->table[i].power;
power *= status.busy_time;
power >>= 10;

View File

@ -3299,33 +3299,52 @@ void spi_unregister_controller(struct spi_controller *ctlr)
}
EXPORT_SYMBOL_GPL(spi_unregister_controller);
static inline int __spi_check_suspended(const struct spi_controller *ctlr)
{
return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
}
static inline void __spi_mark_suspended(struct spi_controller *ctlr)
{
mutex_lock(&ctlr->bus_lock_mutex);
ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
mutex_unlock(&ctlr->bus_lock_mutex);
}
static inline void __spi_mark_resumed(struct spi_controller *ctlr)
{
mutex_lock(&ctlr->bus_lock_mutex);
ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
mutex_unlock(&ctlr->bus_lock_mutex);
}
int spi_controller_suspend(struct spi_controller *ctlr)
{
int ret;
int ret = 0;
/* Basically no-ops for non-queued controllers */
if (!ctlr->queued)
return 0;
ret = spi_stop_queue(ctlr);
if (ret)
dev_err(&ctlr->dev, "queue stop failed\n");
if (ctlr->queued) {
ret = spi_stop_queue(ctlr);
if (ret)
dev_err(&ctlr->dev, "queue stop failed\n");
}
__spi_mark_suspended(ctlr);
return ret;
}
EXPORT_SYMBOL_GPL(spi_controller_suspend);
int spi_controller_resume(struct spi_controller *ctlr)
{
int ret;
int ret = 0;
if (!ctlr->queued)
return 0;
ret = spi_start_queue(ctlr);
if (ret)
dev_err(&ctlr->dev, "queue restart failed\n");
__spi_mark_resumed(ctlr);
if (ctlr->queued) {
ret = spi_start_queue(ctlr);
if (ret)
dev_err(&ctlr->dev, "queue restart failed\n");
}
return ret;
}
EXPORT_SYMBOL_GPL(spi_controller_resume);
@ -4050,8 +4069,7 @@ static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct s
ctlr->cur_msg = msg;
ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
if (ret)
goto out;
dev_err(&ctlr->dev, "noqueue transfer failed\n");
ctlr->cur_msg = NULL;
ctlr->fallback = false;
@ -4067,7 +4085,6 @@ static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct s
spi_idle_runtime_pm(ctlr);
}
out:
mutex_unlock(&ctlr->io_mutex);
}
@ -4090,6 +4107,11 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
int status;
struct spi_controller *ctlr = spi->controller;
if (__spi_check_suspended(ctlr)) {
dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
return -ESHUTDOWN;
}
status = __spi_validate(spi, message);
if (status != 0)
return status;

View File

@ -486,6 +486,7 @@ static bool sc16is7xx_regmap_volatile(struct device *dev, unsigned int reg)
case SC16IS7XX_TXLVL_REG:
case SC16IS7XX_RXLVL_REG:
case SC16IS7XX_IOSTATE_REG:
case SC16IS7XX_IOCONTROL_REG:
return true;
default:
break;
@ -1555,6 +1556,10 @@ static int sc16is7xx_probe(struct device *dev,
goto out_ports;
}
ret = uart_get_rs485_mode(&s->p[i].port);
if (ret)
goto out_ports;
/* Disable all interrupts */
sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_IER_REG, 0);
/* Disable TX/RX */

View File

@ -61,7 +61,7 @@ static void usb_parse_ssp_isoc_endpoint_companion(struct device *ddev,
desc = (struct usb_ssp_isoc_ep_comp_descriptor *) buffer;
if (desc->bDescriptorType != USB_DT_SSP_ISOC_ENDPOINT_COMP ||
size < USB_DT_SSP_ISOC_EP_COMP_SIZE) {
dev_warn(ddev, "Invalid SuperSpeedPlus isoc endpoint companion"
dev_notice(ddev, "Invalid SuperSpeedPlus isoc endpoint companion"
"for config %d interface %d altsetting %d ep %d.\n",
cfgno, inum, asnum, ep->desc.bEndpointAddress);
return;
@ -83,7 +83,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP ||
size < USB_DT_SS_EP_COMP_SIZE) {
dev_warn(ddev, "No SuperSpeed endpoint companion for config %d "
dev_notice(ddev, "No SuperSpeed endpoint companion for config %d "
" interface %d altsetting %d ep %d: "
"using minimum values\n",
cfgno, inum, asnum, ep->desc.bEndpointAddress);
@ -109,13 +109,13 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
/* Check the various values */
if (usb_endpoint_xfer_control(&ep->desc) && desc->bMaxBurst != 0) {
dev_warn(ddev, "Control endpoint with bMaxBurst = %d in "
dev_notice(ddev, "Control endpoint with bMaxBurst = %d in "
"config %d interface %d altsetting %d ep %d: "
"setting to zero\n", desc->bMaxBurst,
cfgno, inum, asnum, ep->desc.bEndpointAddress);
ep->ss_ep_comp.bMaxBurst = 0;
} else if (desc->bMaxBurst > 15) {
dev_warn(ddev, "Endpoint with bMaxBurst = %d in "
dev_notice(ddev, "Endpoint with bMaxBurst = %d in "
"config %d interface %d altsetting %d ep %d: "
"setting to 15\n", desc->bMaxBurst,
cfgno, inum, asnum, ep->desc.bEndpointAddress);
@ -125,7 +125,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
if ((usb_endpoint_xfer_control(&ep->desc) ||
usb_endpoint_xfer_int(&ep->desc)) &&
desc->bmAttributes != 0) {
dev_warn(ddev, "%s endpoint with bmAttributes = %d in "
dev_notice(ddev, "%s endpoint with bmAttributes = %d in "
"config %d interface %d altsetting %d ep %d: "
"setting to zero\n",
usb_endpoint_xfer_control(&ep->desc) ? "Control" : "Bulk",
@ -134,7 +134,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
ep->ss_ep_comp.bmAttributes = 0;
} else if (usb_endpoint_xfer_bulk(&ep->desc) &&
desc->bmAttributes > 16) {
dev_warn(ddev, "Bulk endpoint with more than 65536 streams in "
dev_notice(ddev, "Bulk endpoint with more than 65536 streams in "
"config %d interface %d altsetting %d ep %d: "
"setting to max\n",
cfgno, inum, asnum, ep->desc.bEndpointAddress);
@ -142,7 +142,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
} else if (usb_endpoint_xfer_isoc(&ep->desc) &&
!USB_SS_SSP_ISOC_COMP(desc->bmAttributes) &&
USB_SS_MULT(desc->bmAttributes) > 3) {
dev_warn(ddev, "Isoc endpoint has Mult of %d in "
dev_notice(ddev, "Isoc endpoint has Mult of %d in "
"config %d interface %d altsetting %d ep %d: "
"setting to 3\n",
USB_SS_MULT(desc->bmAttributes),
@ -160,7 +160,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
else
max_tx = 999999;
if (le16_to_cpu(desc->wBytesPerInterval) > max_tx) {
dev_warn(ddev, "%s endpoint with wBytesPerInterval of %d in "
dev_notice(ddev, "%s endpoint with wBytesPerInterval of %d in "
"config %d interface %d altsetting %d ep %d: "
"setting to %d\n",
usb_endpoint_xfer_isoc(&ep->desc) ? "Isoc" : "Int",
@ -273,7 +273,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
else if (d->bLength >= USB_DT_ENDPOINT_SIZE)
n = USB_DT_ENDPOINT_SIZE;
else {
dev_warn(ddev, "config %d interface %d altsetting %d has an "
dev_notice(ddev, "config %d interface %d altsetting %d has an "
"invalid endpoint descriptor of length %d, skipping\n",
cfgno, inum, asnum, d->bLength);
goto skip_to_next_endpoint_or_interface_descriptor;
@ -281,7 +281,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
i = d->bEndpointAddress & ~USB_ENDPOINT_DIR_MASK;
if (i >= 16 || i == 0) {
dev_warn(ddev, "config %d interface %d altsetting %d has an "
dev_notice(ddev, "config %d interface %d altsetting %d has an "
"invalid endpoint with address 0x%X, skipping\n",
cfgno, inum, asnum, d->bEndpointAddress);
goto skip_to_next_endpoint_or_interface_descriptor;
@ -293,7 +293,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
/* Check for duplicate endpoint addresses */
if (config_endpoint_is_duplicate(config, inum, asnum, d)) {
dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
dev_notice(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
cfgno, inum, asnum, d->bEndpointAddress);
goto skip_to_next_endpoint_or_interface_descriptor;
}
@ -301,7 +301,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
/* Ignore some endpoints */
if (udev->quirks & USB_QUIRK_ENDPOINT_IGNORE) {
if (usb_endpoint_is_ignored(udev, ifp, d)) {
dev_warn(ddev, "config %d interface %d altsetting %d has an ignored endpoint with address 0x%X, skipping\n",
dev_notice(ddev, "config %d interface %d altsetting %d has an ignored endpoint with address 0x%X, skipping\n",
cfgno, inum, asnum,
d->bEndpointAddress);
goto skip_to_next_endpoint_or_interface_descriptor;
@ -378,7 +378,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
}
}
if (d->bInterval < i || d->bInterval > j) {
dev_warn(ddev, "config %d interface %d altsetting %d "
dev_notice(ddev, "config %d interface %d altsetting %d "
"endpoint 0x%X has an invalid bInterval %d, "
"changing to %d\n",
cfgno, inum, asnum,
@ -391,7 +391,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
* them usable, we will try treating them as Interrupt endpoints.
*/
if (udev->speed == USB_SPEED_LOW && usb_endpoint_xfer_bulk(d)) {
dev_warn(ddev, "config %d interface %d altsetting %d "
dev_notice(ddev, "config %d interface %d altsetting %d "
"endpoint 0x%X is Bulk; changing to Interrupt\n",
cfgno, inum, asnum, d->bEndpointAddress);
endpoint->desc.bmAttributes = USB_ENDPOINT_XFER_INT;
@ -408,7 +408,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
*/
maxp = le16_to_cpu(endpoint->desc.wMaxPacketSize);
if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) {
dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n",
dev_notice(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n",
cfgno, inum, asnum, d->bEndpointAddress);
}
@ -439,7 +439,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)];
if (maxp > j) {
dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n",
dev_notice(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n",
cfgno, inum, asnum, d->bEndpointAddress, maxp, j);
maxp = j;
endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp);
@ -452,7 +452,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
*/
if (udev->speed == USB_SPEED_HIGH && usb_endpoint_xfer_bulk(d)) {
if (maxp != 512)
dev_warn(ddev, "config %d interface %d altsetting %d "
dev_notice(ddev, "config %d interface %d altsetting %d "
"bulk endpoint 0x%X has invalid maxpacket %d\n",
cfgno, inum, asnum, d->bEndpointAddress,
maxp);
@ -533,7 +533,7 @@ static int usb_parse_interface(struct device *ddev, int cfgno,
i < intfc->num_altsetting;
(++i, ++alt)) {
if (alt->desc.bAlternateSetting == asnum) {
dev_warn(ddev, "Duplicate descriptor for config %d "
dev_notice(ddev, "Duplicate descriptor for config %d "
"interface %d altsetting %d, skipping\n",
cfgno, inum, asnum);
goto skip_to_next_interface_descriptor;
@ -559,7 +559,7 @@ static int usb_parse_interface(struct device *ddev, int cfgno,
num_ep = num_ep_orig = alt->desc.bNumEndpoints;
alt->desc.bNumEndpoints = 0; /* Use as a counter */
if (num_ep > USB_MAXENDPOINTS) {
dev_warn(ddev, "too many endpoints for config %d interface %d "
dev_notice(ddev, "too many endpoints for config %d interface %d "
"altsetting %d: %d, using maximum allowed: %d\n",
cfgno, inum, asnum, num_ep, USB_MAXENDPOINTS);
num_ep = USB_MAXENDPOINTS;
@ -590,7 +590,7 @@ static int usb_parse_interface(struct device *ddev, int cfgno,
}
if (n != num_ep_orig)
dev_warn(ddev, "config %d interface %d altsetting %d has %d "
dev_notice(ddev, "config %d interface %d altsetting %d has %d "
"endpoint descriptor%s, different from the interface "
"descriptor's value: %d\n",
cfgno, inum, asnum, n, plural(n), num_ep_orig);
@ -625,7 +625,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
if (config->desc.bDescriptorType != USB_DT_CONFIG ||
config->desc.bLength < USB_DT_CONFIG_SIZE ||
config->desc.bLength > size) {
dev_err(ddev, "invalid descriptor for config index %d: "
dev_notice(ddev, "invalid descriptor for config index %d: "
"type = 0x%X, length = %d\n", cfgidx,
config->desc.bDescriptorType, config->desc.bLength);
return -EINVAL;
@ -636,7 +636,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
size -= config->desc.bLength;
if (nintf > USB_MAXINTERFACES) {
dev_warn(ddev, "config %d has too many interfaces: %d, "
dev_notice(ddev, "config %d has too many interfaces: %d, "
"using maximum allowed: %d\n",
cfgno, nintf, USB_MAXINTERFACES);
nintf = USB_MAXINTERFACES;
@ -650,7 +650,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
(buffer2 += header->bLength, size2 -= header->bLength)) {
if (size2 < sizeof(struct usb_descriptor_header)) {
dev_warn(ddev, "config %d descriptor has %d excess "
dev_notice(ddev, "config %d descriptor has %d excess "
"byte%s, ignoring\n",
cfgno, size2, plural(size2));
break;
@ -658,7 +658,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
header = (struct usb_descriptor_header *) buffer2;
if ((header->bLength > size2) || (header->bLength < 2)) {
dev_warn(ddev, "config %d has an invalid descriptor "
dev_notice(ddev, "config %d has an invalid descriptor "
"of length %d, skipping remainder of the config\n",
cfgno, header->bLength);
break;
@ -670,7 +670,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
d = (struct usb_interface_descriptor *) header;
if (d->bLength < USB_DT_INTERFACE_SIZE) {
dev_warn(ddev, "config %d has an invalid "
dev_notice(ddev, "config %d has an invalid "
"interface descriptor of length %d, "
"skipping\n", cfgno, d->bLength);
continue;
@ -680,7 +680,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
if ((dev->quirks & USB_QUIRK_HONOR_BNUMINTERFACES) &&
n >= nintf_orig) {
dev_warn(ddev, "config %d has more interface "
dev_notice(ddev, "config %d has more interface "
"descriptors, than it declares in "
"bNumInterfaces, ignoring interface "
"number: %d\n", cfgno, inum);
@ -688,7 +688,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
}
if (inum >= nintf_orig)
dev_warn(ddev, "config %d has an invalid "
dev_notice(ddev, "config %d has an invalid "
"interface number: %d but max is %d\n",
cfgno, inum, nintf_orig - 1);
@ -713,14 +713,14 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
d = (struct usb_interface_assoc_descriptor *)header;
if (d->bLength < USB_DT_INTERFACE_ASSOCIATION_SIZE) {
dev_warn(ddev,
dev_notice(ddev,
"config %d has an invalid interface association descriptor of length %d, skipping\n",
cfgno, d->bLength);
continue;
}
if (iad_num == USB_MAXIADS) {
dev_warn(ddev, "found more Interface "
dev_notice(ddev, "found more Interface "
"Association Descriptors "
"than allocated for in "
"configuration %d\n", cfgno);
@ -731,7 +731,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
} else if (header->bDescriptorType == USB_DT_DEVICE ||
header->bDescriptorType == USB_DT_CONFIG)
dev_warn(ddev, "config %d contains an unexpected "
dev_notice(ddev, "config %d contains an unexpected "
"descriptor of type 0x%X, skipping\n",
cfgno, header->bDescriptorType);
@ -740,11 +740,11 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
config->desc.wTotalLength = cpu_to_le16(buffer2 - buffer0);
if (n != nintf)
dev_warn(ddev, "config %d has %d interface%s, different from "
dev_notice(ddev, "config %d has %d interface%s, different from "
"the descriptor's value: %d\n",
cfgno, n, plural(n), nintf_orig);
else if (n == 0)
dev_warn(ddev, "config %d has no interfaces?\n", cfgno);
dev_notice(ddev, "config %d has no interfaces?\n", cfgno);
config->desc.bNumInterfaces = nintf = n;
/* Check for missing interface numbers */
@ -754,7 +754,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
break;
}
if (j >= nintf)
dev_warn(ddev, "config %d has no interface number "
dev_notice(ddev, "config %d has no interface number "
"%d\n", cfgno, i);
}
@ -762,7 +762,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
for (i = 0; i < nintf; ++i) {
j = nalts[i];
if (j > USB_MAXALTSETTING) {
dev_warn(ddev, "too many alternate settings for "
dev_notice(ddev, "too many alternate settings for "
"config %d interface %d: %d, "
"using maximum allowed: %d\n",
cfgno, inums[i], j, USB_MAXALTSETTING);
@ -811,7 +811,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
break;
}
if (n >= intfc->num_altsetting)
dev_warn(ddev, "config %d interface %d has no "
dev_notice(ddev, "config %d interface %d has no "
"altsetting %d\n", cfgno, inums[i], j);
}
}
@ -868,7 +868,7 @@ int usb_get_configuration(struct usb_device *dev)
int result;
if (ncfg > USB_MAXCONFIG) {
dev_warn(ddev, "too many configurations: %d, "
dev_notice(ddev, "too many configurations: %d, "
"using maximum allowed: %d\n", ncfg, USB_MAXCONFIG);
dev->descriptor.bNumConfigurations = ncfg = USB_MAXCONFIG;
}
@ -902,7 +902,7 @@ int usb_get_configuration(struct usb_device *dev)
"descriptor/%s: %d\n", cfgno, "start", result);
if (result != -EPIPE)
goto err;
dev_err(ddev, "chopping to %d config(s)\n", cfgno);
dev_notice(ddev, "chopping to %d config(s)\n", cfgno);
dev->descriptor.bNumConfigurations = cfgno;
break;
} else if (result < 4) {
@ -934,7 +934,7 @@ int usb_get_configuration(struct usb_device *dev)
goto err;
}
if (result < length) {
dev_warn(ddev, "config index %d descriptor too short "
dev_notice(ddev, "config index %d descriptor too short "
"(expected %i, got %i)\n", cfgno, length, result);
length = result;
}
@ -993,7 +993,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
/* Get BOS descriptor */
ret = usb_get_descriptor(dev, USB_DT_BOS, 0, bos, USB_DT_BOS_SIZE);
if (ret < USB_DT_BOS_SIZE || bos->bLength < USB_DT_BOS_SIZE) {
dev_err(ddev, "unable to get BOS descriptor or descriptor too short\n");
dev_notice(ddev, "unable to get BOS descriptor or descriptor too short\n");
if (ret >= 0)
ret = -ENOMSG;
kfree(bos);
@ -1021,7 +1021,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
ret = usb_get_descriptor(dev, USB_DT_BOS, 0, buffer, total_len);
if (ret < total_len) {
dev_err(ddev, "unable to get BOS descriptor set\n");
dev_notice(ddev, "unable to get BOS descriptor set\n");
if (ret >= 0)
ret = -ENOMSG;
goto err;
@ -1046,8 +1046,8 @@ int usb_get_bos_descriptor(struct usb_device *dev)
}
if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
dev_warn(ddev, "descriptor type invalid, skip\n");
continue;
dev_notice(ddev, "descriptor type invalid, skip\n");
goto skip_to_next_descriptor;
}
switch (cap_type) {
@ -1081,6 +1081,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
break;
}
skip_to_next_descriptor:
total_len -= length;
buffer += length;
}

View File

@ -219,7 +219,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
int ret;
int irq;
struct xhci_plat_priv *priv = NULL;
bool of_match;
if (usb_disabled())
return -ENODEV;
@ -340,16 +340,23 @@ static int xhci_plat_probe(struct platform_device *pdev)
&xhci->imod_interval);
}
hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
if (IS_ERR(hcd->usb_phy)) {
ret = PTR_ERR(hcd->usb_phy);
if (ret == -EPROBE_DEFER)
goto disable_clk;
hcd->usb_phy = NULL;
} else {
ret = usb_phy_init(hcd->usb_phy);
if (ret)
goto disable_clk;
/*
* Drivers such as dwc3 manages PHYs themself (and rely on driver name
* matching for the xhci platform device).
*/
of_match = of_match_device(pdev->dev.driver->of_match_table, &pdev->dev);
if (of_match) {
hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
if (IS_ERR(hcd->usb_phy)) {
ret = PTR_ERR(hcd->usb_phy);
if (ret == -EPROBE_DEFER)
goto disable_clk;
hcd->usb_phy = NULL;
} else {
ret = usb_phy_init(hcd->usb_phy);
if (ret)
goto disable_clk;
}
}
ret = xhci_vendor_init(xhci);

View File

@ -231,7 +231,7 @@ struct sti_rom_font {
u8 height;
u8 font_type; /* language type */
u8 bytes_per_char;
u32 next_font;
s32 next_font; /* note: signed int */
u8 underline_height;
u8 underline_pos;
u8 res008[2];

View File

@ -1710,9 +1710,10 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
generic_handle_irq(irq);
}
static void __xen_evtchn_do_upcall(void)
int xen_evtchn_do_upcall(void)
{
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
int ret = vcpu_info->evtchn_upcall_pending ? IRQ_HANDLED : IRQ_NONE;
int cpu = smp_processor_id();
struct evtchn_loop_ctrl ctrl = { 0 };
@ -1744,25 +1745,10 @@ static void __xen_evtchn_do_upcall(void)
* above.
*/
__this_cpu_inc(irq_epoch);
return ret;
}
void xen_evtchn_do_upcall(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
irq_enter();
__xen_evtchn_do_upcall();
irq_exit();
set_irq_regs(old_regs);
}
void xen_hvm_evtchn_do_upcall(void)
{
__xen_evtchn_do_upcall();
}
EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
EXPORT_SYMBOL_GPL(xen_evtchn_do_upcall);
/* Rebind a new event channel to an existing irq. */
void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)

View File

@ -64,14 +64,13 @@ static uint64_t get_callback_via(struct pci_dev *pdev)
static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id)
{
xen_hvm_evtchn_do_upcall();
return IRQ_HANDLED;
return xen_evtchn_do_upcall();
}
static int xen_allocate_irq(struct pci_dev *pdev)
{
return request_irq(pdev->irq, do_hvm_evtchn_intr,
IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
IRQF_NOBALANCING | IRQF_SHARED,
"xen-platform-pci", pdev);
}

View File

@ -3493,6 +3493,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
goto fail_alloc;
}
btrfs_info(fs_info, "first mount of filesystem %pU", disk_super->fsid);
/*
* Verify the type first, if that or the checksum value are
* corrupted, we'll find out

Some files were not shown because too many files have changed in this diff Show More