Merge 6.1.29 into android14-6.1-lts

Changes in 6.1.29
	USB: dwc3: gadget: drop dead hibernation code
	usb: dwc3: gadget: Execute gadget stop after halting the controller
	drm/vmwgfx: Remove explicit and broken vblank handling
	drm/vmwgfx: Fix Legacy Display Unit atomic drm support
	crypto: ccp - Clear PSP interrupt status register before calling handler
	perf/x86/core: Zero @lbr instead of returning -1 in x86_perf_get_lbr() stub
	KVM: x86: Track supported PERF_CAPABILITIES in kvm_caps
	KVM: x86/pmu: Disallow legacy LBRs if architectural LBRs are available
	mtd: spi-nor: spansion: Remove NO_SFDP_FLAGS from s28hs512t info
	mtd: spi-nor: add SFDP fixups for Quad Page Program
	mtd: spi-nor: Add a RWW flag
	mtd: spi-nor: spansion: Enable JFFS2 write buffer for Infineon s28hx SEMPER flash
	qcom: llcc/edac: Support polling mode for ECC handling
	soc: qcom: llcc: Do not create EDAC platform device on SDM845
	mailbox: zynq: Switch to flexible array to simplify code
	mailbox: zynqmp: Fix counts of child nodes
	mtd: spi-nor: spansion: Enable JFFS2 write buffer for Infineon s25hx SEMPER flash
	fs/ntfs3: Fix null-ptr-deref on inode->i_op in ntfs_lookup()
	drm/amd/display: Ext displays with dock can't recognized after resume
	KVM: x86/mmu: Avoid indirect call for get_cr3
	KVM: x86: Do not unload MMU roots when only toggling CR0.WP with TDP enabled
	KVM: x86: Make use of kvm_read_cr*_bits() when testing bits
	KVM: VMX: Make CR0.WP a guest owned bit
	KVM: x86/mmu: Refresh CR0.WP prior to checking for emulated permission faults
	ASoC: Intel: soc-acpi-byt: Fix "WM510205" match no longer working
	scsi: qedi: Fix use after free bug in qedi_remove()
	drm/amd/display: Remove FPU guards from the DML folder
	drm/amd/display: Add missing WA and MCLK validation
	drm/amd/display: Return error code on DSC atomic check failure
	drm/amd/display: Fixes for dcn32_clk_mgr implementation
	drm/amd/display: Reset OUTBOX0 r/w pointer on DMUB reset
	drm/amd/display: Do not clear GPINT register when releasing DMUB from reset
	drm/amd/display: Update bounding box values for DCN321
	ixgbe: Fix panic during XDP_TX with > 64 CPUs
	octeonxt2-af: mcs: Fix per port bypass config
	octeontx2-af: mcs: Write TCAM_DATA and TCAM_MASK registers at once
	octeontx2-af: mcs: Config parser to skip 8B header
	octeontx2-af: mcs: Fix MCS block interrupt
	octeontx2-pf: mcs: Fix NULL pointer dereferences
	octeontx2-pf: mcs: Match macsec ethertype along with DMAC
	octeontx2-pf: mcs: Clear stats before freeing resource
	octeontx2-pf: mcs: Fix shared counters logic
	octeontx2-pf: mcs: Do not reset PN while updating secy
	net/ncsi: clear Tx enable mode when handling a Config required AEN
	tcp: fix skb_copy_ubufs() vs BIG TCP
	net/sched: cls_api: remove block_cb from driver_list before freeing
	sit: update dev->needed_headroom in ipip6_tunnel_bind_dev()
	selftests: srv6: make srv6_end_dt46_l3vpn_test more robust
	net: ipv6: fix skb hash for some RST packets
	net: dsa: mv88e6xxx: add mv88e6321 rsvd2cpu
	writeback: fix call of incorrect macro
	block: Skip destroyed blkg when restart in blkg_destroy_all()
	watchdog: dw_wdt: Fix the error handling path of dw_wdt_drv_probe()
	RISC-V: mm: Enable huge page support to kernel_page_present() function
	i2c: tegra: Fix PEC support for SMBUS block read
	net/sched: act_mirred: Add carrier check
	r8152: fix flow control issue of RTL8156A
	r8152: fix the poor throughput for 2.5G devices
	r8152: move setting r8153b_rx_agg_chg_indicate()
	sfc: Fix module EEPROM reporting for QSFP modules
	rxrpc: Fix hard call timeout units
	riscv: compat_syscall_table: Fixup compile warning
	drm/i915/mtl: Add the missing CPU transcoder mask in intel_device_info
	selftests: netfilter: fix libmnl pkg-config usage
	octeontx2-af: Secure APR table update with the lock
	octeontx2-af: Fix start and end bit for scan config
	octeontx2-af: Fix depth of cam and mem table.
	octeontx2-pf: Increase the size of dmac filter flows
	octeontx2-af: Allow mkex profile without DMAC and add L2M/L2B header extraction support
	octeontx2-pf: Add additional checks while configuring ucast/bcast/mcast rules
	octeontx2-af: Update/Fix NPC field hash extract feature
	octeontx2-af: Fix issues with NPC field hash extract
	octeontx2-af: Skip PFs if not enabled
	octeontx2-pf: Disable packet I/O for graceful exit
	octeontx2-vf: Detach LF resources on probe cleanup
	ionic: remove noise from ethtool rxnfc error msg
	ethtool: Fix uninitialized number of lanes
	ionic: catch failure from devlink_alloc
	af_packet: Don't send zero-byte data in packet_sendmsg_spkt().
	drm/amdgpu: add a missing lock for AMDGPU_SCHED
	ALSA: caiaq: input: Add error handling for unsupported input methods in `snd_usb_caiaq_input_init`
	KVM: s390: fix race in gmap_make_secure()
	net: dsa: mt7530: fix corrupt frames using trgmii on 40 MHz XTAL MT7621
	net: dsa: mt7530: split-off common parts from mt7531_setup
	net: dsa: mt7530: fix network connectivity with multiple CPU ports
	ice: block LAN in case of VF to VF offload
	virtio_net: suppress cpu stall when free_unused_bufs
	net: enetc: check the index of the SFI rather than the handle
	perf record: Fix "read LOST count failed" msg with sample read
	perf scripts intel-pt-events.py: Fix IPC output for Python 2
	perf vendor events s390: Remove UTF-8 characters from JSON file
	perf tests record_offcpu.sh: Fix redirection of stderr to stdin
	perf ftrace: Make system wide the default target for latency subcommand
	perf vendor events power9: Remove UTF-8 characters from JSON files
	perf pmu: zfree() expects a pointer to a pointer to zero it after freeing its contents
	perf map: Delete two variable initialisations before null pointer checks in sort__sym_from_cmp()
	perf cs-etm: Fix timeless decode mode detection
	crypto: sun8i-ss - Fix a test in sun8i_ss_setup_ivs()
	crypto: api - Add scaffolding to change completion function signature
	crypto: engine - Use crypto_request_complete
	crypto: engine - fix crypto_queue backlog handling
	perf symbols: Fix return incorrect build_id size in elf_read_build_id()
	perf tracepoint: Fix memory leak in is_valid_tracepoint()
	perf stat: Separate bperf from bpf_profiler
	RISC-V: take text_mutex during alternative patching
	RISC-V: fix taking the text_mutex twice during sifive errata patching
	x86/retbleed: Fix return thunk alignment
	btrfs: fix btrfs_prev_leaf() to not return the same key twice
	btrfs: zoned: fix wrong use of bitops API in btrfs_ensure_empty_zones
	btrfs: properly reject clear_cache and v1 cache for block-group-tree
	btrfs: fix assertion of exclop condition when starting balance
	btrfs: fix encoded write i_size corruption with no-holes
	btrfs: don't free qgroup space unless specified
	btrfs: zero the buffer before marking it dirty in btrfs_redirty_list_add
	btrfs: make clear_cache mount option to rebuild FST without disabling it
	btrfs: print-tree: parent bytenr must be aligned to sector size
	btrfs: fix space cache inconsistency after error loading it from disk
	btrfs: zoned: zone finish data relocation BG with last IO
	btrfs: zoned: fix full zone super block reading on ZNS
	cifs: fix pcchunk length type in smb2_copychunk_range
	cifs: release leases for deferred close handles when freezing
	platform/x86/intel-uncore-freq: Return error on write frequency
	platform/x86: touchscreen_dmi: Add upside-down quirk for GDIX1002 ts on the Juno Tablet
	platform/x86: thinkpad_acpi: Fix platform profiles on T490
	platform/x86: touchscreen_dmi: Add info for the Dexp Ursus KX210i
	platform/x86: thinkpad_acpi: Add profile force ability
	inotify: Avoid reporting event with invalid wd
	smb3: fix problem remounting a share after shutdown
	SMB3: force unmount was failing to close deferred close files
	sh: math-emu: fix macro redefined warning
	sh: mcount.S: fix build error when PRINTK is not enabled
	sh: init: use OF_EARLY_FLATTREE for early init
	sh: nmi_debug: fix return value of __setup handler
	proc_sysctl: update docs for __register_sysctl_table()
	proc_sysctl: enhance documentation
	remoteproc: stm32: Call of_node_put() on iteration error
	remoteproc: st: Call of_node_put() on iteration error
	remoteproc: imx_dsp_rproc: Call of_node_put() on iteration error
	remoteproc: imx_rproc: Call of_node_put() on iteration error
	remoteproc: rcar_rproc: Call of_node_put() on iteration error
	sysctl: clarify register_sysctl_init() base directory order
	ARM: dts: aspeed: asrock: Correct firmware flash SPI clocks
	ARM: dts: exynos: fix WM8960 clock name in Itop Elite
	ARM: dts: s5pv210: correct MIPI CSIS clock name
	ARM: dts: aspeed: romed8hm3: Fix GPIO polarity of system-fault LED
	drm/msm/adreno: fix runtime PM imbalance at gpu load
	drm/bridge: lt8912b: Fix DSI Video Mode
	drm/i915/color: Fix typo for Plane CSC indexes
	drm/msm: fix NULL-deref on snapshot tear down
	drm/msm: fix NULL-deref on irq uninstall
	drm/msm: fix drm device leak on bind errors
	drm/msm: fix vram leak on bind errors
	drm/msm: fix workqueue leak on bind errors
	drm/i915/dsi: Use unconditional msleep() instead of intel_dsi_msleep()
	f2fs: fix null pointer panic in tracepoint in __replace_atomic_write_block
	f2fs: fix potential corruption when moving a directory
	irqchip/loongson-pch-pic: Fix pch_pic_acpi_init calling
	irqchip/loongson-eiointc: Fix returned value on parsing MADT
	drm/panel: otm8009a: Set backlight parent to panel device
	drm/amd/display: Add NULL plane_state check for cursor disable logic
	drm/amd/display: Fix 4to1 MPC black screen with DPP RCO
	drm/amd/display: filter out invalid bits in pipe_fuses
	drm/amd/display: fix flickering caused by S/G mode
	drm/amdgpu: fix amdgpu_irq_put call trace in gmc_v10_0_hw_fini
	drm/amdgpu: fix an amdgpu_irq_put() issue in gmc_v9_0_hw_fini()
	drm/amdgpu: fix amdgpu_irq_put call trace in gmc_v11_0_hw_fini
	drm/amdgpu/gfx: disable gfx9 cp_ecc_error_irq only when enabling legacy gfx ras
	drm/amdgpu/jpeg: Remove harvest checking for JPEG3
	drm/amdgpu: change gfx 11.0.4 external_id range
	drm/amdgpu: Fix vram recover doesn't work after whole GPU reset (v2)
	drm/amd/display: Enforce 60us prefetch for 200Mhz DCFCLK modes
	drm/amd/pm: parse pp_handle under appropriate conditions
	drm/amdgpu: disable sdma ecc irq only when sdma RAS is enabled in suspend
	drm/amd/pm: avoid potential UBSAN issue on legacy asics
	drm/amdgpu: remove deprecated MES version vars
	drm/amd: Load MES microcode during early_init
	drm/amd: Add a new helper for loading/validating microcode
	drm/amd: Use `amdgpu_ucode_*` helpers for MES
	HID: wacom: Set a default resolution for older tablets
	HID: wacom: insert timestamp to packed Bluetooth (BT) events
	fs/ntfs3: Refactoring of various minor issues
	drm/msm/adreno: adreno_gpu: Use suspend() instead of idle() on load error
	f2fs: specify extent cache for read explicitly
	f2fs: move internal functions into extent_cache.c
	f2fs: remove unnecessary __init_extent_tree
	f2fs: refactor extent_cache to support for read and more
	f2fs: allocate the extent_cache by default
	f2fs: factor out victim_entry usage from general rb_tree use
	drm/msm/adreno: Simplify read64/write64 helpers
	drm/msm: Hangcheck progress detection
	drm/msm: fix missing wq allocation error handling
	irqchip/loongarch: Adjust acpi_cascade_irqdomain_init() and sub-routines
	irqchip/loongson-eiointc: Fix incorrect use of acpi_get_vec_parent
	irqchip/loongson-eiointc: Fix registration of syscore_ops
	wifi: rtw88: rtw8821c: Fix rfe_option field width
	drm/i915/mtl: update scaler source and destination limits for MTL
	drm/i915: Check pipe source size when using skl+ scalers
	drm/amd/display: Refactor eDP PSR codes
	drm/amd/display: Add Z8 allow states to z-state support list
	drm/amd/display: Add debug option to skip PSR CRTC disable
	drm/amd/display: Fix Z8 support configurations
	drm/amd/display: Add minimum Z8 residency debug option
	drm/amd/display: Update minimum stutter residency for DCN314 Z8
	drm/amd/display: Lowering min Z8 residency time
	ASoC: rt1318: Add RT1318 SDCA vendor-specific driver
	ASoC: codecs: constify static sdw_slave_ops struct
	ASoC: codecs: wcd938x: fix accessing regmap on unattached devices
	drm/amd/display: Update Z8 watermarks for DCN314
	drm/amd/display: Update Z8 SR exit/enter latencies
	drm/amd/display: Change default Z8 watermark values
	ksmbd: Implements sess->ksmbd_chann_list as xarray
	ksmbd: fix racy issue from session setup and logoff
	ksmbd: destroy expired sessions
	ksmbd: block asynchronous requests when making a delay on session setup
	ksmbd: fix racy issue from smb2 close and logoff with multichannel
	drm: Add missing DP DSC extended capability definitions.
	drm/dsc: fix drm_edp_dsc_sink_output_bpp() DPCD high byte usage
	locking/rwsem: Add __always_inline annotation to __down_read_common() and inlined callers
	ext4: fix WARNING in mb_find_extent
	ext4: avoid a potential slab-out-of-bounds in ext4_group_desc_csum
	ext4: fix data races when using cached status extents
	ext4: check iomap type only if ext4_iomap_begin() does not fail
	ext4: improve error recovery code paths in __ext4_remount()
	ext4: improve error handling from ext4_dirhash()
	ext4: fix deadlock when converting an inline directory in nojournal mode
	ext4: add bounds checking in get_max_inline_xattr_value_size()
	ext4: bail out of ext4_xattr_ibody_get() fails for any reason
	ext4: fix lockdep warning when enabling MMP
	ext4: remove a BUG_ON in ext4_mb_release_group_pa()
	ext4: fix invalid free tracking in ext4_xattr_move_to_block()
	drm/dsc: fix DP_DSC_MAX_BPP_DELTA_* macro values
	f2fs: fix to do sanity check on extent cache correctly
	f2fs: inode: fix to do sanity check on extent cache correctly
	x86/amd_nb: Add PCI ID for family 19h model 78h
	x86: fix clear_user_rep_good() exception handling annotation
	spi: fsl-spi: Re-organise transfer bits_per_word adaptation
	spi: fsl-cpm: Use 16 bit mode for large transfers with even size
	drm/amd/display: Fix hang when skipping modeset
	Linux 6.1.29

Change-Id: I576de3e4ff6a12decefda8ca0014ca600da837dd
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-06-09 12:15:07 +00:00
commit 51b8218413
262 changed files with 4741 additions and 2455 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 28
SUBLEVEL = 29
EXTRAVERSION =
NAME = Curry Ramen

View File

@ -63,7 +63,7 @@ flash@0 {
status = "okay";
m25p,fast-read;
label = "bmc";
spi-max-frequency = <100000000>; /* 100 MHz */
spi-max-frequency = <50000000>; /* 50 MHz */
#include "openbmc-flash-layout.dtsi"
};
};

View File

@ -31,7 +31,7 @@ heartbeat {
};
system-fault {
gpios = <&gpio ASPEED_GPIO(Z, 2) GPIO_ACTIVE_LOW>;
gpios = <&gpio ASPEED_GPIO(Z, 2) GPIO_ACTIVE_HIGH>;
panic-indicator;
};
};
@ -51,7 +51,7 @@ flash@0 {
status = "okay";
m25p,fast-read;
label = "bmc";
spi-max-frequency = <100000000>; /* 100 MHz */
spi-max-frequency = <50000000>; /* 50 MHz */
#include "openbmc-flash-layout-64.dtsi"
};
};

View File

@ -182,7 +182,7 @@ codec: audio-codec@1a {
compatible = "wlf,wm8960";
reg = <0x1a>;
clocks = <&pmu_system_controller 0>;
clock-names = "MCLK1";
clock-names = "mclk";
wlf,shared-lrclk;
#sound-dai-cells = <0>;
};

View File

@ -566,7 +566,7 @@ csis0: csis@fa600000 {
interrupts = <29>;
clocks = <&clocks CLK_CSIS>,
<&clocks SCLK_CSIS>;
clock-names = "clk_csis",
clock-names = "csis",
"sclk_csis";
bus-width = <4>;
status = "disabled";

View File

@ -4,6 +4,7 @@
*/
#include <linux/kernel.h>
#include <linux/memory.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/bug.h>
@ -107,7 +108,9 @@ void __init_or_module sifive_errata_patch_func(struct alt_entry *begin,
tmp = (1U << alt->errata_id);
if (cpu_req_errata & tmp) {
mutex_lock(&text_mutex);
patch_text_nosync(alt->old_ptr, alt->alt_ptr, alt->alt_len);
mutex_unlock(&text_mutex);
cpu_apply_errata |= tmp;
}
}

View File

@ -5,6 +5,7 @@
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/memory.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/uaccess.h>
@ -78,11 +79,14 @@ void __init_or_module thead_errata_patch_func(struct alt_entry *begin, struct al
tmp = (1U << alt->errata_id);
if (cpu_req_errata & tmp) {
/* On vm-alternatives, the mmu isn't running yet */
if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
if (stage == RISCV_ALTERNATIVES_EARLY_BOOT) {
memcpy((void *)__pa_symbol(alt->old_ptr),
(void *)__pa_symbol(alt->alt_ptr), alt->alt_len);
else
} else {
mutex_lock(&text_mutex);
patch_text_nosync(alt->old_ptr, alt->alt_ptr, alt->alt_len);
mutex_unlock(&text_mutex);
}
}
}

View File

@ -9,6 +9,7 @@ CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE)
endif
CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
CFLAGS_compat_syscall_table.o += $(call cc-option,-Wno-override-init,)
ifdef CONFIG_KEXEC
AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax)

View File

@ -9,6 +9,7 @@
#include <linux/bitmap.h>
#include <linux/ctype.h>
#include <linux/libfdt.h>
#include <linux/memory.h>
#include <linux/module.h>
#include <linux/of.h>
#include <asm/alternative.h>
@ -316,8 +317,11 @@ void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin,
}
tmp = (1U << alt->errata_id);
if (cpu_req_feature & tmp)
if (cpu_req_feature & tmp) {
mutex_lock(&text_mutex);
patch_text_nosync(alt->old_ptr, alt->alt_ptr, alt->alt_len);
mutex_unlock(&text_mutex);
}
}
}
#endif

View File

@ -217,18 +217,26 @@ bool kernel_page_present(struct page *page)
pgd = pgd_offset_k(addr);
if (!pgd_present(*pgd))
return false;
if (pgd_leaf(*pgd))
return true;
p4d = p4d_offset(pgd, addr);
if (!p4d_present(*p4d))
return false;
if (p4d_leaf(*p4d))
return true;
pud = pud_offset(p4d, addr);
if (!pud_present(*pud))
return false;
if (pud_leaf(*pud))
return true;
pmd = pmd_offset(pud, addr);
if (!pmd_present(*pmd))
return false;
if (pmd_leaf(*pmd))
return true;
pte = pte_offset_kernel(pmd, addr);
return pte_present(*pte);

View File

@ -192,21 +192,10 @@ static int expected_page_refs(struct page *page)
return res;
}
static int make_secure_pte(pte_t *ptep, unsigned long addr,
struct page *exp_page, struct uv_cb_header *uvcb)
static int make_page_secure(struct page *page, struct uv_cb_header *uvcb)
{
pte_t entry = READ_ONCE(*ptep);
struct page *page;
int expected, cc = 0;
if (!pte_present(entry))
return -ENXIO;
if (pte_val(entry) & _PAGE_INVALID)
return -ENXIO;
page = pte_page(entry);
if (page != exp_page)
return -ENXIO;
if (PageWriteback(page))
return -EAGAIN;
expected = expected_page_refs(page);
@ -297,17 +286,18 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
goto out;
rc = -ENXIO;
page = follow_page(vma, uaddr, FOLL_WRITE);
if (IS_ERR_OR_NULL(page))
goto out;
lock_page(page);
ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
if (should_export_before_import(uvcb, gmap->mm))
uv_convert_from_secure(page_to_phys(page));
rc = make_secure_pte(ptep, uaddr, page, uvcb);
if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) {
page = pte_page(*ptep);
rc = -EAGAIN;
if (trylock_page(page)) {
if (should_export_before_import(uvcb, gmap->mm))
uv_convert_from_secure(page_to_phys(page));
rc = make_page_secure(page, uvcb);
unlock_page(page);
}
}
pte_unmap_unlock(ptep, ptelock);
unlock_page(page);
out:
mmap_read_unlock(gmap->mm);

View File

@ -15,7 +15,7 @@ config SH_STANDARD_BIOS
config STACK_DEBUG
bool "Check for stack overflows"
depends on DEBUG_KERNEL
depends on DEBUG_KERNEL && PRINTK
help
This option will cause messages to be printed if free stack space
drops below a certain limit. Saying Y here will add overhead to

View File

@ -64,7 +64,7 @@ ENTRY(_stext)
ldc r0, r6_bank
#endif
#ifdef CONFIG_OF_FLATTREE
#ifdef CONFIG_OF_EARLY_FLATTREE
mov r4, r12 ! Store device tree blob pointer in r12
#endif
@ -315,7 +315,7 @@ ENTRY(_stext)
10:
#endif
#ifdef CONFIG_OF_FLATTREE
#ifdef CONFIG_OF_EARLY_FLATTREE
mov.l 8f, r0 ! Make flat device tree available early.
jsr @r0
mov r12, r4
@ -346,7 +346,7 @@ ENTRY(stack_start)
5: .long start_kernel
6: .long cpu_init
7: .long init_thread_union
#if defined(CONFIG_OF_FLATTREE)
#if defined(CONFIG_OF_EARLY_FLATTREE)
8: .long sh_fdt_init
#endif

View File

@ -49,7 +49,7 @@ static int __init nmi_debug_setup(char *str)
register_die_notifier(&nmi_debug_nb);
if (*str != '=')
return 0;
return 1;
for (p = str + 1; *p; p = sep + 1) {
sep = strchr(p, ',');
@ -70,6 +70,6 @@ static int __init nmi_debug_setup(char *str)
break;
}
return 0;
return 1;
}
__setup("nmi_debug", nmi_debug_setup);

View File

@ -244,7 +244,7 @@ void __init __weak plat_early_device_setup(void)
{
}
#ifdef CONFIG_OF_FLATTREE
#ifdef CONFIG_OF_EARLY_FLATTREE
void __ref sh_fdt_init(phys_addr_t dt_phys)
{
static int done = 0;
@ -326,7 +326,7 @@ void __init setup_arch(char **cmdline_p)
/* Let earlyprintk output early console messages */
sh_early_platform_driver_probe("earlyprintk", 1, 1);
#ifdef CONFIG_OF_FLATTREE
#ifdef CONFIG_OF_EARLY_FLATTREE
#ifdef CONFIG_USE_BUILTIN_DTB
unflatten_and_copy_device_tree();
#else

View File

@ -67,7 +67,3 @@
} while (0)
#define abort() return 0
#define __BYTE_ORDER __LITTLE_ENDIAN

View File

@ -1603,10 +1603,8 @@ void __init intel_pmu_arch_lbr_init(void)
* x86_perf_get_lbr - get the LBR records information
*
* @lbr: the caller's memory to store the LBR records information
*
* Returns: 0 indicates the LBR info has been successfully obtained
*/
int x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
void x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
{
int lbr_fmt = x86_pmu.intel_cap.lbr_format;
@ -1614,8 +1612,6 @@ int x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
lbr->from = x86_pmu.lbr_from;
lbr->to = x86_pmu.lbr_to;
lbr->info = (lbr_fmt == LBR_FORMAT_INFO) ? x86_pmu.lbr_info : 0;
return 0;
}
EXPORT_SYMBOL_GPL(x86_perf_get_lbr);

View File

@ -543,12 +543,12 @@ static inline void perf_check_microcode(void) { }
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data);
extern int x86_perf_get_lbr(struct x86_pmu_lbr *lbr);
extern void x86_perf_get_lbr(struct x86_pmu_lbr *lbr);
#else
struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data);
static inline int x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
static inline void x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
{
return -1;
memset(lbr, 0, sizeof(*lbr));
}
#endif

View File

@ -36,6 +36,7 @@
#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e
#define PCI_DEVICE_ID_AMD_19H_M60H_DF_F4 0x14e4
#define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4 0x14f4
#define PCI_DEVICE_ID_AMD_19H_M78H_DF_F4 0x12fc
/* Protect the PCI config register pairs used for SMN. */
static DEFINE_MUTEX(smn_mutex);
@ -79,6 +80,7 @@ static const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
{}
};

View File

@ -4,7 +4,7 @@
#include <linux/kvm_host.h>
#define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
#define KVM_POSSIBLE_CR0_GUEST_BITS (X86_CR0_TS | X86_CR0_WP)
#define KVM_POSSIBLE_CR4_GUEST_BITS \
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
| X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)

View File

@ -113,6 +113,8 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
u64 fault_address, char *insn, int insn_len);
void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu);
int kvm_mmu_load(struct kvm_vcpu *vcpu);
void kvm_mmu_unload(struct kvm_vcpu *vcpu);
@ -153,6 +155,24 @@ static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
vcpu->arch.mmu->root_role.level);
}
static inline void kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu)
{
/*
* When EPT is enabled, KVM may passthrough CR0.WP to the guest, i.e.
* @mmu's snapshot of CR0.WP and thus all related paging metadata may
* be stale. Refresh CR0.WP and the metadata on-demand when checking
* for permission faults. Exempt nested MMUs, i.e. MMUs for shadowing
* nEPT and nNPT, as CR0.WP is ignored in both cases. Note, KVM does
* need to refresh nested_mmu, a.k.a. the walker used to translate L2
* GVAs to GPAs, as that "MMU" needs to honor L2's CR0.WP.
*/
if (!tdp_enabled || mmu == &vcpu->arch.guest_mmu)
return;
__kvm_mmu_refresh_passthrough_bits(vcpu, mmu);
}
/*
* Check if a given access (described through the I/D, W/R and U/S bits of a
* page fault error code pfec) causes a permission fault with the given PTE
@ -184,8 +204,12 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
u64 implicit_access = access & PFERR_IMPLICIT_ACCESS;
bool not_smap = ((rflags & X86_EFLAGS_AC) | implicit_access) == X86_EFLAGS_AC;
int index = (pfec + (not_smap << PFERR_RSVD_BIT)) >> 1;
bool fault = (mmu->permissions[index] >> pte_access) & 1;
u32 errcode = PFERR_PRESENT_MASK;
bool fault;
kvm_mmu_refresh_passthrough_bits(vcpu, mmu);
fault = (mmu->permissions[index] >> pte_access) & 1;
WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
if (unlikely(mmu->pkru_mask)) {

View File

@ -232,6 +232,20 @@ static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
return regs;
}
static unsigned long get_guest_cr3(struct kvm_vcpu *vcpu)
{
return kvm_read_cr3(vcpu);
}
static inline unsigned long kvm_mmu_get_guest_pgd(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu)
{
if (IS_ENABLED(CONFIG_RETPOLINE) && mmu->get_guest_pgd == get_guest_cr3)
return kvm_read_cr3(vcpu);
return mmu->get_guest_pgd(vcpu);
}
static inline bool kvm_available_flush_tlb_with_range(void)
{
return kvm_x86_ops.tlb_remote_flush_with_range;
@ -3661,7 +3675,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
int quadrant, i, r;
hpa_t root;
root_pgd = mmu->get_guest_pgd(vcpu);
root_pgd = kvm_mmu_get_guest_pgd(vcpu, mmu);
root_gfn = root_pgd >> PAGE_SHIFT;
if (mmu_check_root(vcpu, root_gfn))
@ -4112,7 +4126,7 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
arch.token = alloc_apf_token(vcpu);
arch.gfn = gfn;
arch.direct_map = vcpu->arch.mmu->root_role.direct;
arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
arch.cr3 = kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu);
return kvm_setup_async_pf(vcpu, cr2_or_gpa,
kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
@ -4131,7 +4145,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
return;
if (!vcpu->arch.mmu->root_role.direct &&
work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu))
work->arch.cr3 != kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu))
return;
kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true);
@ -4488,11 +4502,6 @@ void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
}
EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
static unsigned long get_cr3(struct kvm_vcpu *vcpu)
{
return kvm_read_cr3(vcpu);
}
static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
unsigned int access)
{
@ -4996,6 +5005,21 @@ kvm_calc_cpu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs)
return role;
}
void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu)
{
const bool cr0_wp = !!kvm_read_cr0_bits(vcpu, X86_CR0_WP);
BUILD_BUG_ON((KVM_MMU_CR0_ROLE_BITS & KVM_POSSIBLE_CR0_GUEST_BITS) != X86_CR0_WP);
BUILD_BUG_ON((KVM_MMU_CR4_ROLE_BITS & KVM_POSSIBLE_CR4_GUEST_BITS));
if (is_cr0_wp(mmu) == cr0_wp)
return;
mmu->cpu_role.base.cr0_wp = cr0_wp;
reset_guest_paging_metadata(vcpu, mmu);
}
static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
{
/* tdp_root_level is architecture forced level, use it if nonzero */
@ -5043,7 +5067,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
context->page_fault = kvm_tdp_page_fault;
context->sync_page = nonpaging_sync_page;
context->invlpg = NULL;
context->get_guest_pgd = get_cr3;
context->get_guest_pgd = get_guest_cr3;
context->get_pdptr = kvm_pdptr_read;
context->inject_page_fault = kvm_inject_page_fault;
@ -5193,7 +5217,7 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu,
kvm_init_shadow_mmu(vcpu, cpu_role);
context->get_guest_pgd = get_cr3;
context->get_guest_pgd = get_guest_cr3;
context->get_pdptr = kvm_pdptr_read;
context->inject_page_fault = kvm_inject_page_fault;
}
@ -5207,7 +5231,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu,
return;
g_context->cpu_role.as_u64 = new_mode.as_u64;
g_context->get_guest_pgd = get_cr3;
g_context->get_guest_pgd = get_guest_cr3;
g_context->get_pdptr = kvm_pdptr_read;
g_context->inject_page_fault = kvm_inject_page_fault;

View File

@ -324,7 +324,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
trace_kvm_mmu_pagetable_walk(addr, access);
retry_walk:
walker->level = mmu->cpu_role.base.level;
pte = mmu->get_guest_pgd(vcpu);
pte = kvm_mmu_get_guest_pgd(vcpu, mmu);
have_ad = PT_HAVE_ACCESSED_DIRTY(mmu);
#if PTTYPE == 64

View File

@ -418,9 +418,9 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
if (!pmc)
return 1;
if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) &&
if (!(kvm_read_cr4_bits(vcpu, X86_CR4_PCE)) &&
(static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
(kvm_read_cr0(vcpu) & X86_CR0_PE))
(kvm_read_cr0_bits(vcpu, X86_CR0_PE)))
return 1;
*data = pmc_read_counter(pmc) & mask;

View File

@ -2709,6 +2709,7 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr)
msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
break;
case MSR_IA32_PERF_CAPABILITIES:
msr->data = kvm_caps.supported_perf_cap;
return 0;
default:
return KVM_MSR_RET_INVALID;
@ -4888,6 +4889,7 @@ static __init void svm_set_cpu_caps(void)
{
kvm_set_cpu_caps();
kvm_caps.supported_perf_cap = 0;
kvm_caps.supported_xss = 0;
/* CPUID 0x80000001 and 0x8000000A (SVM features) */

View File

@ -395,30 +395,6 @@ static inline bool vmx_pebs_supported(void)
return boot_cpu_has(X86_FEATURE_PEBS) && kvm_pmu_cap.pebs_ept;
}
static inline u64 vmx_get_perf_capabilities(void)
{
u64 perf_cap = PMU_CAP_FW_WRITES;
struct x86_pmu_lbr lbr;
u64 host_perf_cap = 0;
if (!enable_pmu)
return 0;
if (boot_cpu_has(X86_FEATURE_PDCM))
rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
if (x86_perf_get_lbr(&lbr) >= 0 && lbr.nr)
perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT;
if (vmx_pebs_supported()) {
perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
if ((perf_cap & PERF_CAP_PEBS_FORMAT) < 4)
perf_cap &= ~PERF_CAP_PEBS_BASELINE;
}
return perf_cap;
}
static inline bool cpu_has_notify_vmexit(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &

View File

@ -4460,7 +4460,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
* CR0_GUEST_HOST_MASK is already set in the original vmcs01
* (KVM doesn't change it);
*/
vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
vmx_set_cr0(vcpu, vmcs12->host_cr0);
/* Same as above - no reason to call set_cr4_guest_host_mask(). */
@ -4611,7 +4611,7 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
*/
vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);

View File

@ -631,7 +631,7 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
pmu->fixed_counters[i].current_config = 0;
}
vcpu->arch.perf_capabilities = vmx_get_perf_capabilities();
vcpu->arch.perf_capabilities = kvm_caps.supported_perf_cap;
lbr_desc->records.nr = 0;
lbr_desc->event = NULL;
lbr_desc->msr_passthrough = false;

View File

@ -1879,7 +1879,7 @@ static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
return 1;
return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data);
case MSR_IA32_PERF_CAPABILITIES:
msr->data = vmx_get_perf_capabilities();
msr->data = kvm_caps.supported_perf_cap;
return 0;
default:
return KVM_MSR_RET_INVALID;
@ -2058,7 +2058,7 @@ static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated
(host_initiated || guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)))
debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT;
if ((vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT) &&
if ((kvm_caps.supported_perf_cap & PMU_CAP_LBR_FMT) &&
(host_initiated || intel_pmu_lbr_is_enabled(vcpu)))
debugctl |= DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
@ -2371,14 +2371,14 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
if (data & PMU_CAP_LBR_FMT) {
if ((data & PMU_CAP_LBR_FMT) !=
(vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT))
(kvm_caps.supported_perf_cap & PMU_CAP_LBR_FMT))
return 1;
if (!cpuid_model_is_consistent(vcpu))
return 1;
}
if (data & PERF_CAP_PEBS_FORMAT) {
if ((data & PERF_CAP_PEBS_MASK) !=
(vmx_get_perf_capabilities() & PERF_CAP_PEBS_MASK))
(kvm_caps.supported_perf_cap & PERF_CAP_PEBS_MASK))
return 1;
if (!guest_cpuid_has(vcpu, X86_FEATURE_DS))
return 1;
@ -4695,7 +4695,7 @@ static void init_vmcs(struct vcpu_vmx *vmx)
/* 22.2.1, 20.8.1 */
vm_entry_controls_set(vmx, vmx_vmentry_ctrl());
vmx->vcpu.arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
vmx->vcpu.arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits);
set_cr4_guest_host_mask(vmx);
@ -5417,7 +5417,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
break;
case 3: /* lmsw */
val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
trace_kvm_cr_write(0, (kvm_read_cr0_bits(vcpu, ~0xful) | val));
kvm_lmsw(vcpu, val);
return kvm_skip_emulated_instruction(vcpu);
@ -7496,7 +7496,7 @@ static u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
if (!kvm_arch_has_noncoherent_dma(vcpu->kvm))
return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT;
if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
if (kvm_read_cr0_bits(vcpu, X86_CR0_CD)) {
if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
cache = MTRR_TYPE_WRBACK;
else
@ -7702,6 +7702,33 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
vmx_update_exception_bitmap(vcpu);
}
static u64 vmx_get_perf_capabilities(void)
{
u64 perf_cap = PMU_CAP_FW_WRITES;
struct x86_pmu_lbr lbr;
u64 host_perf_cap = 0;
if (!enable_pmu)
return 0;
if (boot_cpu_has(X86_FEATURE_PDCM))
rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR)) {
x86_perf_get_lbr(&lbr);
if (lbr.nr)
perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT;
}
if (vmx_pebs_supported()) {
perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
if ((perf_cap & PERF_CAP_PEBS_FORMAT) < 4)
perf_cap &= ~PERF_CAP_PEBS_BASELINE;
}
return perf_cap;
}
static __init void vmx_set_cpu_caps(void)
{
kvm_set_cpu_caps();
@ -7724,6 +7751,7 @@ static __init void vmx_set_cpu_caps(void)
if (!enable_pmu)
kvm_cpu_cap_clear(X86_FEATURE_PDCM);
kvm_caps.supported_perf_cap = vmx_get_perf_capabilities();
if (!enable_sgx) {
kvm_cpu_cap_clear(X86_FEATURE_SGX);

View File

@ -640,6 +640,24 @@ BUILD_CONTROLS_SHADOW(tertiary_exec, TERTIARY_VM_EXEC_CONTROL, 64)
(1 << VCPU_EXREG_EXIT_INFO_1) | \
(1 << VCPU_EXREG_EXIT_INFO_2))
static inline unsigned long vmx_l1_guest_owned_cr0_bits(void)
{
unsigned long bits = KVM_POSSIBLE_CR0_GUEST_BITS;
/*
* CR0.WP needs to be intercepted when KVM is shadowing legacy paging
* in order to construct shadow PTEs with the correct protections.
* Note! CR0.WP technically can be passed through to the guest if
* paging is disabled, but checking CR0.PG would generate a cyclical
* dependency of sorts due to forcing the caller to ensure CR0 holds
* the correct value prior to determining which CR0 bits can be owned
* by L1. Keep it simple and limit the optimization to EPT.
*/
if (!enable_ept)
bits &= ~X86_CR0_WP;
return bits;
}
static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
{
return container_of(kvm, struct kvm_vmx, kvm);

View File

@ -910,6 +910,18 @@ EXPORT_SYMBOL_GPL(load_pdptrs);
void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0)
{
/*
* CR0.WP is incorporated into the MMU role, but only for non-nested,
* indirect shadow MMUs. If TDP is enabled, the MMU's metadata needs
* to be updated, e.g. so that emulating guest translations does the
* right thing, but there's no need to unload the root as CR0.WP
* doesn't affect SPTEs.
*/
if (tdp_enabled && (cr0 ^ old_cr0) == X86_CR0_WP) {
kvm_init_mmu(vcpu);
return;
}
if ((cr0 ^ old_cr0) & X86_CR0_PG) {
kvm_clear_async_pf_completion_queue(vcpu);
kvm_async_pf_hash_reset(vcpu);

View File

@ -27,6 +27,7 @@ struct kvm_caps {
u64 supported_mce_cap;
u64 supported_xcr0;
u64 supported_xss;
u64 supported_perf_cap;
};
void kvm_spurious_fault(void);

View File

@ -142,8 +142,8 @@ SYM_FUNC_START(clear_user_rep_good)
and $7, %edx
jz .Lrep_good_exit
.Lrep_good_bytes:
mov %edx, %ecx
.Lrep_good_bytes:
rep stosb
.Lrep_good_exit:

View File

@ -87,8 +87,8 @@ SYM_CODE_END(__x86_indirect_thunk_array)
*/
.align 64
.skip 63, 0xcc
SYM_FUNC_START_NOALIGN(zen_untrain_ret);
SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
ANNOTATE_NOENDBR
/*
* As executed from zen_untrain_ret, this is:
*

View File

@ -468,6 +468,9 @@ static void blkg_destroy_all(struct gendisk *disk)
list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
struct blkcg *blkcg = blkg->blkcg;
if (hlist_unhashed(&blkg->blkcg_node))
continue;
spin_lock(&blkcg->lock);
blkg_destroy(blkg);
spin_unlock(&blkcg->lock);

View File

@ -963,6 +963,9 @@ EXPORT_SYMBOL_GPL(crypto_enqueue_request);
void crypto_enqueue_request_head(struct crypto_queue *queue,
struct crypto_async_request *request)
{
if (unlikely(queue->qlen >= queue->max_qlen))
queue->backlog = queue->backlog->prev;
queue->qlen++;
list_add(&request->list, &queue->list);
}

View File

@ -54,7 +54,7 @@ static void crypto_finalize_request(struct crypto_engine *engine,
}
}
lockdep_assert_in_softirq();
req->complete(req, err);
crypto_request_complete(req, err);
kthread_queue_work(engine->kworker, &engine->pump_requests);
}
@ -129,9 +129,6 @@ static void crypto_pump_requests(struct crypto_engine *engine,
if (!engine->retry_support)
engine->cur_req = async_req;
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
if (engine->busy)
was_busy = true;
else
@ -214,9 +211,12 @@ static void crypto_pump_requests(struct crypto_engine *engine,
}
req_err_2:
async_req->complete(async_req, ret);
crypto_request_complete(async_req, ret);
retry:
if (backlog)
crypto_request_complete(backlog, -EINPROGRESS);
/* If retry mechanism is supported, send new requests to engine */
if (engine->retry_support) {
spin_lock_irqsave(&engine->queue_lock, flags);

View File

@ -151,7 +151,7 @@ static int sun8i_ss_setup_ivs(struct skcipher_request *areq)
}
rctx->p_iv[i] = a;
/* we need to setup all others IVs only in the decrypt way */
if (rctx->op_dir & SS_ENCRYPTION)
if (rctx->op_dir == SS_ENCRYPTION)
return 0;
todo = min(len, sg_dma_len(sg));
len -= todo;

View File

@ -42,6 +42,9 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
/* Read the interrupt status: */
status = ioread32(psp->io_regs + psp->vdata->intsts_reg);
/* Clear the interrupt status by writing the same value we read. */
iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
/* invoke subdevice interrupt handlers */
if (status) {
if (psp->sev_irq_handler)
@ -51,9 +54,6 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
psp->tee_irq_handler(irq, psp->tee_irq_data, status);
}
/* Clear the interrupt status by writing the same value we read. */
iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
return IRQ_HANDLED;
}

View File

@ -76,6 +76,8 @@
#define DRP0_INTERRUPT_ENABLE BIT(6)
#define SB_DB_DRP_INTERRUPT_ENABLE 0x3
#define ECC_POLL_MSEC 5000
enum {
LLCC_DRAM_CE = 0,
LLCC_DRAM_UE,
@ -285,8 +287,7 @@ dump_syn_reg(struct edac_device_ctl_info *edev_ctl, int err_type, u32 bank)
return ret;
}
static irqreturn_t
llcc_ecc_irq_handler(int irq, void *edev_ctl)
static irqreturn_t llcc_ecc_irq_handler(int irq, void *edev_ctl)
{
struct edac_device_ctl_info *edac_dev_ctl = edev_ctl;
struct llcc_drv_data *drv = edac_dev_ctl->dev->platform_data;
@ -332,6 +333,11 @@ llcc_ecc_irq_handler(int irq, void *edev_ctl)
return irq_rc;
}
static void llcc_ecc_check(struct edac_device_ctl_info *edev_ctl)
{
llcc_ecc_irq_handler(0, edev_ctl);
}
static int qcom_llcc_edac_probe(struct platform_device *pdev)
{
struct llcc_drv_data *llcc_driv_data = pdev->dev.platform_data;
@ -359,30 +365,32 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev)
edev_ctl->ctl_name = "llcc";
edev_ctl->panic_on_ue = LLCC_ERP_PANIC_ON_UE;
/* Check if LLCC driver has passed ECC IRQ */
ecc_irq = llcc_driv_data->ecc_irq;
if (ecc_irq > 0) {
/* Use interrupt mode if IRQ is available */
rc = devm_request_irq(dev, ecc_irq, llcc_ecc_irq_handler,
IRQF_TRIGGER_HIGH, "llcc_ecc", edev_ctl);
if (!rc) {
edac_op_state = EDAC_OPSTATE_INT;
goto irq_done;
}
}
/* Fall back to polling mode otherwise */
edev_ctl->poll_msec = ECC_POLL_MSEC;
edev_ctl->edac_check = llcc_ecc_check;
edac_op_state = EDAC_OPSTATE_POLL;
irq_done:
rc = edac_device_add_device(edev_ctl);
if (rc)
goto out_mem;
if (rc) {
edac_device_free_ctl_info(edev_ctl);
return rc;
}
platform_set_drvdata(pdev, edev_ctl);
/* Request for ecc irq */
ecc_irq = llcc_driv_data->ecc_irq;
if (ecc_irq < 0) {
rc = -ENODEV;
goto out_dev;
}
rc = devm_request_irq(dev, ecc_irq, llcc_ecc_irq_handler,
IRQF_TRIGGER_HIGH, "llcc_ecc", edev_ctl);
if (rc)
goto out_dev;
return rc;
out_dev:
edac_device_del_device(edev_ctl->dev);
out_mem:
edac_device_free_ctl_info(edev_ctl);
return rc;
}

View File

@ -4483,7 +4483,11 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
dev_info(adev->dev, "recover vram bo from shadow start\n");
mutex_lock(&adev->shadow_list_lock);
list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
shadow = &vmbo->bo;
/* If vm is compute context or adev is APU, shadow will be NULL */
if (!vmbo->shadow)
continue;
shadow = vmbo->shadow;
/* No need to recover an evicted BO */
if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||

View File

@ -21,6 +21,8 @@
*
*/
#include <linux/firmware.h>
#include "amdgpu_mes.h"
#include "amdgpu.h"
#include "soc15_common.h"
@ -1423,3 +1425,60 @@ int amdgpu_mes_self_test(struct amdgpu_device *adev)
kfree(vm);
return 0;
}
int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
{
const struct mes_firmware_header_v1_0 *mes_hdr;
struct amdgpu_firmware_info *info;
char ucode_prefix[30];
char fw_name[40];
int r;
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
ucode_prefix,
pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
if (r)
goto out;
mes_hdr = (const struct mes_firmware_header_v1_0 *)
adev->mes.fw[pipe]->data;
adev->mes.uc_start_addr[pipe] =
le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
adev->mes.data_start_addr[pipe] =
le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
int ucode, ucode_data;
if (pipe == AMDGPU_MES_SCHED_PIPE) {
ucode = AMDGPU_UCODE_ID_CP_MES;
ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
} else {
ucode = AMDGPU_UCODE_ID_CP_MES1;
ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
}
info = &adev->firmware.ucode[ucode];
info->ucode_id = ucode;
info->fw = adev->mes.fw[pipe];
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
PAGE_SIZE);
info = &adev->firmware.ucode[ucode_data];
info->ucode_id = ucode_data;
info->fw = adev->mes.fw[pipe];
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
PAGE_SIZE);
}
return 0;
out:
amdgpu_ucode_release(&adev->mes.fw[pipe]);
return r;
}

View File

@ -91,14 +91,12 @@ struct amdgpu_mes {
struct amdgpu_bo *ucode_fw_obj[AMDGPU_MAX_MES_PIPES];
uint64_t ucode_fw_gpu_addr[AMDGPU_MAX_MES_PIPES];
uint32_t *ucode_fw_ptr[AMDGPU_MAX_MES_PIPES];
uint32_t ucode_fw_version[AMDGPU_MAX_MES_PIPES];
uint64_t uc_start_addr[AMDGPU_MAX_MES_PIPES];
/* mes ucode data */
struct amdgpu_bo *data_fw_obj[AMDGPU_MAX_MES_PIPES];
uint64_t data_fw_gpu_addr[AMDGPU_MAX_MES_PIPES];
uint32_t *data_fw_ptr[AMDGPU_MAX_MES_PIPES];
uint32_t data_fw_version[AMDGPU_MAX_MES_PIPES];
uint64_t data_start_addr[AMDGPU_MAX_MES_PIPES];
/* eop gpu obj */
@ -308,6 +306,7 @@ struct amdgpu_mes_funcs {
int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs);
int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe);
int amdgpu_mes_init(struct amdgpu_device *adev);
void amdgpu_mes_fini(struct amdgpu_device *adev);

View File

@ -38,6 +38,7 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
{
struct fd f = fdget(fd);
struct amdgpu_fpriv *fpriv;
struct amdgpu_ctx_mgr *mgr;
struct amdgpu_ctx *ctx;
uint32_t id;
int r;
@ -51,8 +52,11 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
return r;
}
idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
mgr = &fpriv->ctx_mgr;
mutex_lock(&mgr->lock);
idr_for_each_entry(&mgr->ctx_handles, ctx, id)
amdgpu_ctx_priority_override(ctx, priority);
mutex_unlock(&mgr->lock);
fdput(f);
return 0;

View File

@ -1091,3 +1091,39 @@ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type,
snprintf(ucode_prefix, len, "%s_%d_%d_%d", ip_name, maj, min, rev);
}
/*
* amdgpu_ucode_request - Fetch and validate amdgpu microcode
*
* @adev: amdgpu device
* @fw: pointer to load firmware to
* @fw_name: firmware to load
*
* This is a helper that will use request_firmware and amdgpu_ucode_validate
* to load and run basic validation on firmware. If the load fails, remap
* the error code to -ENODEV, so that early_init functions will fail to load.
*/
int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
const char *fw_name)
{
int err = request_firmware(fw, fw_name, adev->dev);
if (err)
return -ENODEV;
err = amdgpu_ucode_validate(*fw);
if (err)
dev_dbg(adev->dev, "\"%s\" failed to validate\n", fw_name);
return err;
}
/*
* amdgpu_ucode_release - Release firmware microcode
*
* @fw: pointer to firmware to release
*/
void amdgpu_ucode_release(const struct firmware **fw)
{
release_firmware(*fw);
*fw = NULL;
}

View File

@ -543,6 +543,9 @@ void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr);
int amdgpu_ucode_validate(const struct firmware *fw);
int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
const char *fw_name);
void amdgpu_ucode_release(const struct firmware **fw);
bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
uint16_t hdr_major, uint16_t hdr_minor);

View File

@ -3797,7 +3797,8 @@ static int gfx_v9_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);

View File

@ -1142,7 +1142,6 @@ static int gmc_v10_0_hw_fini(void *handle)
return 0;
}
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
return 0;

View File

@ -931,7 +931,6 @@ static int gmc_v11_0_hw_fini(void *handle)
return 0;
}
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v11_0_gart_disable(adev);

View File

@ -1898,7 +1898,6 @@ static int gmc_v9_0_hw_fini(void *handle)
if (adev->mmhub.funcs->update_power_gating)
adev->mmhub.funcs->update_power_gating(adev, false);
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
return 0;

View File

@ -54,6 +54,7 @@ static int jpeg_v3_0_early_init(void *handle)
switch (adev->ip_versions[UVD_HWIP][0]) {
case IP_VERSION(3, 1, 1):
case IP_VERSION(3, 1, 2):
break;
default:
harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING);

View File

@ -375,93 +375,6 @@ static const struct amdgpu_mes_funcs mes_v10_1_funcs = {
.resume_gang = mes_v10_1_resume_gang,
};
static int mes_v10_1_init_microcode(struct amdgpu_device *adev,
enum admgpu_mes_pipe pipe)
{
const char *chip_name;
char fw_name[30];
int err;
const struct mes_firmware_header_v1_0 *mes_hdr;
struct amdgpu_firmware_info *info;
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(10, 1, 10):
chip_name = "navi10";
break;
case IP_VERSION(10, 3, 0):
chip_name = "sienna_cichlid";
break;
default:
BUG();
}
if (pipe == AMDGPU_MES_SCHED_PIPE)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
chip_name);
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes1.bin",
chip_name);
err = request_firmware(&adev->mes.fw[pipe], fw_name, adev->dev);
if (err)
return err;
err = amdgpu_ucode_validate(adev->mes.fw[pipe]);
if (err) {
release_firmware(adev->mes.fw[pipe]);
adev->mes.fw[pipe] = NULL;
return err;
}
mes_hdr = (const struct mes_firmware_header_v1_0 *)
adev->mes.fw[pipe]->data;
adev->mes.ucode_fw_version[pipe] =
le32_to_cpu(mes_hdr->mes_ucode_version);
adev->mes.ucode_fw_version[pipe] =
le32_to_cpu(mes_hdr->mes_ucode_data_version);
adev->mes.uc_start_addr[pipe] =
le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
adev->mes.data_start_addr[pipe] =
le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
int ucode, ucode_data;
if (pipe == AMDGPU_MES_SCHED_PIPE) {
ucode = AMDGPU_UCODE_ID_CP_MES;
ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
} else {
ucode = AMDGPU_UCODE_ID_CP_MES1;
ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
}
info = &adev->firmware.ucode[ucode];
info->ucode_id = ucode;
info->fw = adev->mes.fw[pipe];
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
PAGE_SIZE);
info = &adev->firmware.ucode[ucode_data];
info->ucode_id = ucode_data;
info->fw = adev->mes.fw[pipe];
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
PAGE_SIZE);
}
return 0;
}
static void mes_v10_1_free_microcode(struct amdgpu_device *adev,
enum admgpu_mes_pipe pipe)
{
release_firmware(adev->mes.fw[pipe]);
adev->mes.fw[pipe] = NULL;
}
static int mes_v10_1_allocate_ucode_buffer(struct amdgpu_device *adev,
enum admgpu_mes_pipe pipe)
{
@ -1019,10 +932,6 @@ static int mes_v10_1_sw_init(void *handle)
if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
continue;
r = mes_v10_1_init_microcode(adev, pipe);
if (r)
return r;
r = mes_v10_1_allocate_eop_buf(adev, pipe);
if (r)
return r;
@ -1059,8 +968,7 @@ static int mes_v10_1_sw_fini(void *handle)
amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj[pipe],
&adev->mes.eop_gpu_addr[pipe],
NULL);
mes_v10_1_free_microcode(adev, pipe);
amdgpu_ucode_release(&adev->mes.fw[pipe]);
}
amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj,
@ -1229,6 +1137,22 @@ static int mes_v10_1_resume(void *handle)
return amdgpu_mes_resume(adev);
}
static int mes_v10_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int pipe, r;
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
continue;
r = amdgpu_mes_init_microcode(adev, pipe);
if (r)
return r;
}
return 0;
}
static int mes_v10_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@ -1241,6 +1165,7 @@ static int mes_v10_0_late_init(void *handle)
static const struct amd_ip_funcs mes_v10_1_ip_funcs = {
.name = "mes_v10_1",
.early_init = mes_v10_0_early_init,
.late_init = mes_v10_0_late_init,
.sw_init = mes_v10_1_sw_init,
.sw_fini = mes_v10_1_sw_fini,

View File

@ -453,84 +453,6 @@ static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
.misc_op = mes_v11_0_misc_op,
};
static int mes_v11_0_init_microcode(struct amdgpu_device *adev,
enum admgpu_mes_pipe pipe)
{
char fw_name[30];
char ucode_prefix[30];
int err;
const struct mes_firmware_header_v1_0 *mes_hdr;
struct amdgpu_firmware_info *info;
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
if (pipe == AMDGPU_MES_SCHED_PIPE)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
ucode_prefix);
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes1.bin",
ucode_prefix);
err = request_firmware(&adev->mes.fw[pipe], fw_name, adev->dev);
if (err)
return err;
err = amdgpu_ucode_validate(adev->mes.fw[pipe]);
if (err) {
release_firmware(adev->mes.fw[pipe]);
adev->mes.fw[pipe] = NULL;
return err;
}
mes_hdr = (const struct mes_firmware_header_v1_0 *)
adev->mes.fw[pipe]->data;
adev->mes.ucode_fw_version[pipe] =
le32_to_cpu(mes_hdr->mes_ucode_version);
adev->mes.ucode_fw_version[pipe] =
le32_to_cpu(mes_hdr->mes_ucode_data_version);
adev->mes.uc_start_addr[pipe] =
le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
adev->mes.data_start_addr[pipe] =
le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
int ucode, ucode_data;
if (pipe == AMDGPU_MES_SCHED_PIPE) {
ucode = AMDGPU_UCODE_ID_CP_MES;
ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
} else {
ucode = AMDGPU_UCODE_ID_CP_MES1;
ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
}
info = &adev->firmware.ucode[ucode];
info->ucode_id = ucode;
info->fw = adev->mes.fw[pipe];
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
PAGE_SIZE);
info = &adev->firmware.ucode[ucode_data];
info->ucode_id = ucode_data;
info->fw = adev->mes.fw[pipe];
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
PAGE_SIZE);
}
return 0;
}
static void mes_v11_0_free_microcode(struct amdgpu_device *adev,
enum admgpu_mes_pipe pipe)
{
release_firmware(adev->mes.fw[pipe]);
adev->mes.fw[pipe] = NULL;
}
static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
enum admgpu_mes_pipe pipe)
{
@ -1098,10 +1020,6 @@ static int mes_v11_0_sw_init(void *handle)
if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
continue;
r = mes_v11_0_init_microcode(adev, pipe);
if (r)
return r;
r = mes_v11_0_allocate_eop_buf(adev, pipe);
if (r)
return r;
@ -1138,8 +1056,7 @@ static int mes_v11_0_sw_fini(void *handle)
amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj[pipe],
&adev->mes.eop_gpu_addr[pipe],
NULL);
mes_v11_0_free_microcode(adev, pipe);
amdgpu_ucode_release(&adev->mes.fw[pipe]);
}
amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj,
@ -1334,6 +1251,22 @@ static int mes_v11_0_resume(void *handle)
return amdgpu_mes_resume(adev);
}
static int mes_v11_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int pipe, r;
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
continue;
r = amdgpu_mes_init_microcode(adev, pipe);
if (r)
return r;
}
return 0;
}
static int mes_v11_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@ -1348,6 +1281,7 @@ static int mes_v11_0_late_init(void *handle)
static const struct amd_ip_funcs mes_v11_0_ip_funcs = {
.name = "mes_v11_0",
.early_init = mes_v11_0_early_init,
.late_init = mes_v11_0_late_init,
.sw_init = mes_v11_0_sw_init,
.sw_fini = mes_v11_0_sw_fini,

View File

@ -1941,9 +1941,11 @@ static int sdma_v4_0_hw_fini(void *handle)
return 0;
}
for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
AMDGPU_SDMA_IRQ_INSTANCE0 + i);
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
AMDGPU_SDMA_IRQ_INSTANCE0 + i);
}
}
sdma_v4_0_ctx_switch_enable(adev, false);

View File

@ -715,7 +715,7 @@ static int soc21_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_JPEG;
adev->external_rev_id = adev->rev_id + 0x1;
adev->external_rev_id = adev->rev_id + 0x80;
break;
default:

View File

@ -39,6 +39,7 @@
#include "dc/dc_edid_parser.h"
#include "dc/dc_stat.h"
#include "amdgpu_dm_trace.h"
#include "dc/inc/dc_link_ddc.h"
#include "vid.h"
#include "amdgpu.h"
@ -2254,6 +2255,14 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
if (suspend) {
drm_dp_mst_topology_mgr_suspend(mgr);
} else {
/* if extended timeout is supported in hardware,
* default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
* CTS 4.2.1.1 regression introduced by CTS specs requirement update.
*/
dc_link_aux_try_to_configure_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
if (!dp_is_lttpr_present(aconnector->dc_link))
dc_link_aux_try_to_configure_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
ret = drm_dp_mst_topology_mgr_resume(mgr, true);
if (ret < 0) {
dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
@ -7584,6 +7593,13 @@ static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
handle_cursor_update(plane, old_plane_state);
}
static inline uint32_t get_mem_type(struct drm_framebuffer *fb)
{
struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
return abo->tbo.resource ? abo->tbo.resource->mem_type : 0;
}
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct dc_state *dc_state,
struct drm_device *dev,
@ -7657,6 +7673,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
continue;
dc_plane = dm_new_plane_state->dc_state;
if (!dc_plane)
continue;
bundle->surface_updates[planes_count].surface = dc_plane;
if (new_pcrtc_state->color_mgmt_changed) {
@ -7701,11 +7719,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
/*
* Only allow immediate flips for fast updates that don't
* change FB pitch, DCC state, rotation or mirroing.
* change memory domain, FB pitch, DCC state, rotation or
* mirroring.
*/
bundle->flip_addrs[planes_count].flip_immediate =
crtc->state->async_flip &&
acrtc_state->update_type == UPDATE_TYPE_FAST;
acrtc_state->update_type == UPDATE_TYPE_FAST &&
get_mem_type(old_plane_state->fb) == get_mem_type(fb);
timestamp_ns = ktime_get_ns();
bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
@ -9199,8 +9219,9 @@ static int dm_update_plane_state(struct dc *dc,
return -EINVAL;
}
if (dm_old_plane_state->dc_state)
dc_plane_state_release(dm_old_plane_state->dc_state);
dc_plane_state_release(dm_old_plane_state->dc_state);
dm_new_plane_state->dc_state = NULL;
*lock_and_validation_needed = true;
@ -9737,6 +9758,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
if (ret) {
DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
ret = -EINVAL;
goto fail;
}

View File

@ -1368,6 +1368,7 @@ int pre_validate_dsc(struct drm_atomic_state *state,
ret = pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars);
if (ret != 0) {
DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n");
ret = -EINVAL;
goto clean_exit;
}

View File

@ -333,8 +333,8 @@ void dcn31_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst
(support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY))
support = DCN_ZSTATE_SUPPORT_DISALLOW;
if (support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY)
if (support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY ||
support == DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY)
param = 1;
else
param = 0;

View File

@ -349,8 +349,6 @@ void dcn314_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zs
if (!clk_mgr->smu_present)
return;
// Arg[15:0] = 8/9/0 for Z8/Z9/disallow -> existing bits
// Arg[16] = Disallow Z9 -> new bit
switch (support) {
case DCN_ZSTATE_SUPPORT_ALLOW:
@ -369,6 +367,16 @@ void dcn314_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zs
param = (1 << 10);
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 10) | (1 << 8);
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 8);
break;
default: //DCN_ZSTATE_SUPPORT_UNKNOWN
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = 0;

View File

@ -756,6 +756,8 @@ void dcn32_clk_mgr_construct(
struct pp_smu_funcs *pp_smu,
struct dccg *dccg)
{
struct clk_log_info log_info = {0};
clk_mgr->base.ctx = ctx;
clk_mgr->base.funcs = &dcn32_funcs;
if (ASICREV_IS_GC_11_0_2(clk_mgr->base.ctx->asic_id.hw_internal_rev)) {
@ -789,6 +791,7 @@ void dcn32_clk_mgr_construct(
clk_mgr->base.clks.ref_dtbclk_khz = 268750;
}
/* integer part is now VCO frequency in kHz */
clk_mgr->base.dentist_vco_freq_khz = dcn32_get_vco_frequency_from_reg(clk_mgr);
@ -796,6 +799,8 @@ void dcn32_clk_mgr_construct(
if (clk_mgr->base.dentist_vco_freq_khz == 0)
clk_mgr->base.dentist_vco_freq_khz = 4300000; /* Updated as per HW docs */
dcn32_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info);
if (ctx->dc->debug.disable_dtb_ref_clk_switch &&
clk_mgr->base.clks.ref_dtbclk_khz != clk_mgr->base.boot_snapshot.dtbclk) {
clk_mgr->base.clks.ref_dtbclk_khz = clk_mgr->base.boot_snapshot.dtbclk;

View File

@ -3381,7 +3381,7 @@ bool dc_link_setup_psr(struct dc_link *link,
case FAMILY_YELLOW_CARP:
case AMDGPU_FAMILY_GC_10_3_6:
case AMDGPU_FAMILY_GC_11_0_1:
if (dc->debug.disable_z10)
if (dc->debug.disable_z10 || dc->debug.psr_skip_crtc_disable)
psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
break;
default:

View File

@ -1707,6 +1707,9 @@ bool dc_remove_plane_from_context(
struct dc_stream_status *stream_status = NULL;
struct resource_pool *pool = dc->res_pool;
if (!plane_state)
return true;
for (i = 0; i < context->stream_count; i++)
if (context->streams[i] == stream) {
stream_status = &context->stream_status[i];

View File

@ -491,6 +491,8 @@ enum dcn_pwr_state {
enum dcn_zstate_support_state {
DCN_ZSTATE_SUPPORT_UNKNOWN,
DCN_ZSTATE_SUPPORT_ALLOW,
DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY,
DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY,
DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY,
DCN_ZSTATE_SUPPORT_DISALLOW,
};
@ -764,7 +766,6 @@ struct dc_debug_options {
bool disable_mem_low_power;
bool pstate_enabled;
bool disable_dmcu;
bool disable_psr;
bool force_abm_enable;
bool disable_stereo_support;
bool vsr_support;
@ -780,6 +781,7 @@ struct dc_debug_options {
unsigned int force_odm_combine; //bit vector based on otg inst
unsigned int seamless_boot_odm_combine;
unsigned int force_odm_combine_4to1; //bit vector based on otg inst
int minimum_z8_residency_time;
bool disable_z9_mpc;
unsigned int force_fclk_khz;
bool enable_tri_buf;
@ -828,6 +830,7 @@ struct dc_debug_options {
int crb_alloc_policy_min_disp_count;
bool disable_z10;
bool enable_z9_disable_interface;
bool psr_skip_crtc_disable;
union dpia_debug_options dpia_debug;
bool disable_fixed_vs_aux_timeout_wa;
bool force_disable_subvp;

View File

@ -117,7 +117,7 @@ struct psr_settings {
* Add a struct dc_panel_config under dc_link
*/
struct dc_panel_config {
// extra panel power sequence parameters
/* extra panel power sequence parameters */
struct pps {
unsigned int extra_t3_ms;
unsigned int extra_t7_ms;
@ -127,13 +127,21 @@ struct dc_panel_config {
unsigned int extra_t12_ms;
unsigned int extra_post_OUI_ms;
} pps;
// ABM
/* PSR */
struct psr {
bool disable_psr;
bool disallow_psrsu;
bool rc_disable;
bool rc_allow_static_screen;
bool rc_allow_fullscreen_VPB;
} psr;
/* ABM */
struct varib {
unsigned int varibright_feature_enable;
unsigned int def_varibright_level;
unsigned int abm_config_setting;
} varib;
// edp DSC
/* edp DSC */
struct dsc {
bool disable_dsc_edp;
unsigned int force_dsc_edp_policy;

View File

@ -726,11 +726,15 @@ void dcn10_hubp_pg_control(
}
}
static void power_on_plane(
static void power_on_plane_resources(
struct dce_hwseq *hws,
int plane_id)
{
DC_LOGGER_INIT(hws->ctx->logger);
if (hws->funcs.dpp_root_clock_control)
hws->funcs.dpp_root_clock_control(hws, plane_id, true);
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
@ -1237,11 +1241,15 @@ void dcn10_plane_atomic_power_down(struct dc *dc,
hws->funcs.hubp_pg_control(hws, hubp->inst, false);
dpp->funcs->dpp_reset(dpp);
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
DC_LOG_DEBUG(
"Power gated front end %d\n", hubp->inst);
}
if (hws->funcs.dpp_root_clock_control)
hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
}
/* disable HW used by plane.
@ -2450,7 +2458,7 @@ static void dcn10_enable_plane(
undo_DEGVIDCN10_253_wa(dc);
power_on_plane(dc->hwseq,
power_on_plane_resources(dc->hwseq,
pipe_ctx->plane_res.hubp->inst);
/* enable DCFCLK current DCHUB */
@ -3369,7 +3377,9 @@ static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
for (test_pipe = pipe_ctx->top_pipe; test_pipe;
test_pipe = test_pipe->top_pipe) {
// Skip invisible layer and pipe-split plane on same layer
if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer)
if (!test_pipe->plane_state ||
!test_pipe->plane_state->visible ||
test_pipe->plane_state->layer_index == cur_layer)
continue;
r2 = test_pipe->plane_res.scl_data.recout;

View File

@ -1087,11 +1087,15 @@ void dcn20_blank_pixel_data(
}
static void dcn20_power_on_plane(
static void dcn20_power_on_plane_resources(
struct dce_hwseq *hws,
struct pipe_ctx *pipe_ctx)
{
DC_LOGGER_INIT(hws->ctx->logger);
if (hws->funcs.dpp_root_clock_control)
hws->funcs.dpp_root_clock_control(hws, pipe_ctx->plane_res.dpp->inst, true);
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
@ -1115,7 +1119,7 @@ static void dcn20_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
//if (dc->debug.sanity_checks) {
// dcn10_verify_allow_pstate_change_high(dc);
//}
dcn20_power_on_plane(dc->hwseq, pipe_ctx);
dcn20_power_on_plane_resources(dc->hwseq, pipe_ctx);
/* enable DCFCLK current DCHUB */
pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);

View File

@ -671,12 +671,15 @@ static const struct dc_debug_options debug_defaults_diags = {
.disable_pplib_wm_range = true,
.disable_stutter = true,
.disable_48mhz_pwrdwn = true,
.disable_psr = true,
.enable_tri_buf = true,
.use_max_lb = true
};
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
},
.ilr = {
.optimize_edp_link_rate = true,
},

View File

@ -723,7 +723,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.underflow_assert_delay_us = 0xFFFFFFFF,
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,
.disable_psr = false,
.use_max_lb = true,
.exit_idle_opt_for_cursor_updates = true
};
@ -742,11 +741,17 @@ static const struct dc_debug_options debug_defaults_diags = {
.scl_reset_length10 = true,
.dwb_fi_phase = -1, // -1 = disable
.dmub_command_table = true,
.disable_psr = true,
.enable_tri_buf = true,
.use_max_lb = true
};
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
},
};
static void dcn30_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@ -2214,6 +2219,11 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
}
}
static void dcn30_get_panel_config_defaults(struct dc_panel_config *panel_config)
{
*panel_config = panel_config_defaults;
}
static const struct resource_funcs dcn30_res_pool_funcs = {
.destroy = dcn30_destroy_resource_pool,
.link_enc_create = dcn30_link_encoder_create,
@ -2233,6 +2243,7 @@ static const struct resource_funcs dcn30_res_pool_funcs = {
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn30_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn30_get_panel_config_defaults,
};
#define CTX ctx

View File

@ -112,10 +112,16 @@ static const struct dc_debug_options debug_defaults_diags = {
.dwb_fi_phase = -1, // -1 = disable
.dmub_command_table = true,
.enable_tri_buf = true,
.disable_psr = true,
.use_max_lb = true
};
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
},
};
enum dcn302_clk_src_array_id {
DCN302_CLK_SRC_PLL0,
DCN302_CLK_SRC_PLL1,
@ -1132,6 +1138,11 @@ void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
DC_FP_END();
}
static void dcn302_get_panel_config_defaults(struct dc_panel_config *panel_config)
{
*panel_config = panel_config_defaults;
}
static struct resource_funcs dcn302_res_pool_funcs = {
.destroy = dcn302_destroy_resource_pool,
.link_enc_create = dcn302_link_encoder_create,
@ -1151,6 +1162,7 @@ static struct resource_funcs dcn302_res_pool_funcs = {
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn302_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn302_get_panel_config_defaults,
};
static struct dc_cap_funcs cap_funcs = {

View File

@ -96,7 +96,13 @@ static const struct dc_debug_options debug_defaults_diags = {
.dwb_fi_phase = -1, // -1 = disable
.dmub_command_table = true,
.enable_tri_buf = true,
.disable_psr = true,
};
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
},
};
enum dcn303_clk_src_array_id {
@ -1055,6 +1061,10 @@ static void dcn303_destroy_resource_pool(struct resource_pool **pool)
*pool = NULL;
}
static void dcn303_get_panel_config_defaults(struct dc_panel_config *panel_config)
{
*panel_config = panel_config_defaults;
}
void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
@ -1082,6 +1092,7 @@ static struct resource_funcs dcn303_res_pool_funcs = {
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn303_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn303_get_panel_config_defaults,
};
static struct dc_cap_funcs cap_funcs = {

View File

@ -66,17 +66,8 @@ void dccg31_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
REG_UPDATE(DPPCLK_DTO_CTRL,
DPPCLK_DTO_ENABLE[dpp_inst], 1);
} else {
//DTO must be enabled to generate a 0Hz clock output
if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) {
REG_UPDATE(DPPCLK_DTO_CTRL,
DPPCLK_DTO_ENABLE[dpp_inst], 1);
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
DPPCLK0_DTO_PHASE, 0,
DPPCLK0_DTO_MODULO, 1);
} else {
REG_UPDATE(DPPCLK_DTO_CTRL,
DPPCLK_DTO_ENABLE[dpp_inst], 0);
}
REG_UPDATE(DPPCLK_DTO_CTRL,
DPPCLK_DTO_ENABLE[dpp_inst], 0);
}
dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
}

View File

@ -911,6 +911,10 @@ static const struct dc_debug_options debug_defaults_diags = {
};
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
},
.ilr = {
.optimize_edp_link_rate = true,
},

View File

@ -289,8 +289,31 @@ static void dccg314_set_valid_pixel_rate(
dccg314_set_dtbclk_dto(dccg, &dto_params);
}
static void dccg314_dpp_root_clock_control(
struct dccg *dccg,
unsigned int dpp_inst,
bool clock_on)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
if (clock_on) {
/* turn off the DTO and leave phase/modulo at max */
REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_ENABLE[dpp_inst], 0);
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
DPPCLK0_DTO_PHASE, 0xFF,
DPPCLK0_DTO_MODULO, 0xFF);
} else {
/* turn on the DTO to generate a 0hz clock */
REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_ENABLE[dpp_inst], 1);
REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
DPPCLK0_DTO_PHASE, 0,
DPPCLK0_DTO_MODULO, 1);
}
}
static const struct dccg_funcs dccg314_funcs = {
.update_dpp_dto = dccg31_update_dpp_dto,
.dpp_root_clock_control = dccg314_dpp_root_clock_control,
.get_dccg_ref_freq = dccg31_get_dccg_ref_freq,
.dccg_init = dccg31_init,
.set_dpstreamclk = dccg314_set_dpstreamclk,

View File

@ -392,6 +392,16 @@ void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx)
pix_per_cycle);
}
void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on)
{
if (!hws->ctx->dc->debug.root_clock_optimization.bits.dpp)
return;
if (hws->ctx->dc->res_pool->dccg->funcs->dpp_root_clock_control)
hws->ctx->dc->res_pool->dccg->funcs->dpp_root_clock_control(
hws->ctx->dc->res_pool->dccg, dpp_inst, clock_on);
}
void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on)
{
struct dc_context *ctx = hws->ctx;

View File

@ -43,4 +43,6 @@ void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx);
void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on);
void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on);
#endif /* __DC_HWSS_DCN314_H__ */

View File

@ -137,6 +137,7 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
.plane_atomic_disable = dcn20_plane_atomic_disable,
.plane_atomic_power_down = dcn10_plane_atomic_power_down,
.enable_power_gating_plane = dcn314_enable_power_gating_plane,
.dpp_root_clock_control = dcn314_dpp_root_clock_control,
.hubp_pg_control = dcn314_hubp_pg_control,
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
.update_odm = dcn314_update_odm,

View File

@ -884,6 +884,8 @@ static const struct dc_plane_cap plane_cap = {
static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = false,
.enable_z9_disable_interface = true,
.minimum_z8_residency_time = 2000,
.psr_skip_crtc_disable = true,
.disable_dmcu = true,
.force_abm_enable = false,
.timing_trace = false,
@ -940,6 +942,10 @@ static const struct dc_debug_options debug_defaults_diags = {
};
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
},
.ilr = {
.optimize_edp_link_rate = true,
},

View File

@ -907,6 +907,10 @@ static const struct dc_debug_options debug_defaults_diags = {
};
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
},
.ilr = {
.optimize_edp_link_rate = true,
},

View File

@ -906,6 +906,10 @@ static const struct dc_debug_options debug_defaults_diags = {
};
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
},
.ilr = {
.optimize_edp_link_rate = true,
},

View File

@ -984,6 +984,7 @@ void dcn32_init_hw(struct dc *dc)
if (dc->ctx->dmub_srv) {
dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub);
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
}
/* Enable support for ODM and windowed MPO if policy flag is set */

View File

@ -1984,7 +1984,7 @@ int dcn32_populate_dml_pipes_from_context(
// In general cases we want to keep the dram clock change requirement
// (prefer configs that support MCLK switch). Only override to false
// for SubVP
if (subvp_in_use)
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || subvp_in_use)
context->bw_ctx.dml.soc.dram_clock_change_requirement_final = false;
else
context->bw_ctx.dml.soc.dram_clock_change_requirement_final = true;
@ -2037,6 +2037,14 @@ static struct resource_funcs dcn32_res_pool_funcs = {
.remove_phantom_pipes = dcn32_remove_phantom_pipes,
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
{
uint32_t value = REG_READ(CC_DC_PIPE_DIS);
/* DCN32 support max 4 pipes */
value = value & 0xf;
return value;
}
static bool dcn32_resource_construct(
uint8_t num_virtual_links,
@ -2079,7 +2087,7 @@ static bool dcn32_resource_construct(
pool->base.res_cap = &res_cap_dcn32;
/* max number of pipes for ASIC before checking for pipe fuses */
num_pipes = pool->base.res_cap->num_timing_generator;
pipe_fuses = REG_READ(CC_DC_PIPE_DIS);
pipe_fuses = read_pipe_fuses(ctx);
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++)
if (pipe_fuses & 1 << i)

View File

@ -1621,6 +1621,14 @@ static struct resource_funcs dcn321_res_pool_funcs = {
.remove_phantom_pipes = dcn32_remove_phantom_pipes,
};
static uint32_t read_pipe_fuses(struct dc_context *ctx)
{
uint32_t value = REG_READ(CC_DC_PIPE_DIS);
/* DCN321 support max 4 pipes */
value = value & 0xf;
return value;
}
static bool dcn321_resource_construct(
uint8_t num_virtual_links,
@ -1663,7 +1671,7 @@ static bool dcn321_resource_construct(
pool->base.res_cap = &res_cap_dcn321;
/* max number of pipes for ASIC before checking for pipe fuses */
num_pipes = pool->base.res_cap->num_timing_generator;
pipe_fuses = REG_READ(CC_DC_PIPE_DIS);
pipe_fuses = read_pipe_fuses(ctx);
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++)
if (pipe_fuses & 1 << i)

View File

@ -963,6 +963,8 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
* 2. single eDP, on link 0, 1 plane and stutter period > 5ms
* Z10 only cases:
* 1. single eDP, on link 0, 1 plane and stutter period >= 5ms
* Z8 cases:
* 1. stutter period sufficient
* Zstate not allowed cases:
* 1. Everything else
*/
@ -971,6 +973,9 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
struct dc_link *link = context->streams[0]->sink->link;
struct dc_stream_status *stream_status = &context->stream_status[0];
int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
bool is_pwrseq0 = link->link_index == 0;
if (dc_extended_blank_supported(dc)) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@ -983,18 +988,20 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
}
}
}
/* zstate only supported on PWRSEQ0 and when there's <2 planes*/
if (link->link_index != 0 || stream_status->plane_count > 1)
/* Don't support multi-plane configurations */
if (stream_status->plane_count > 1)
return DCN_ZSTATE_SUPPORT_DISALLOW;
if (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000)
if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000))
return DCN_ZSTATE_SUPPORT_ALLOW;
else if (link->psr_settings.psr_version == DC_PSR_VERSION_1 && !dc->debug.disable_psr)
return DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
else
return DCN_ZSTATE_SUPPORT_DISALLOW;
} else
return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY : DCN_ZSTATE_SUPPORT_DISALLOW;
} else {
return DCN_ZSTATE_SUPPORT_DISALLOW;
}
}
void dcn20_calculate_dlg_params(

View File

@ -368,7 +368,9 @@ void dcn30_fpu_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
dc_assert_fp_enabled();
if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching ||
context->bw_ctx.dml.soc.dram_clock_change_latency_us == 0)
context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
}
@ -520,9 +522,21 @@ void dcn30_fpu_calculate_wm_and_dlg(
pipe_idx++;
}
DC_FP_START();
// WA: restrict FPO to use first non-strobe mode (NV24 BW issue)
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching &&
dc->dml.soc.num_chans <= 4 &&
context->bw_ctx.dml.vba.DRAMSpeed <= 1700 &&
context->bw_ctx.dml.vba.DRAMSpeed >= 1500) {
for (i = 0; i < dc->dml.soc.num_states; i++) {
if (dc->dml.soc.clock_limits[i].dram_speed_mts > 1700) {
context->bw_ctx.dml.vba.DRAMSpeed = dc->dml.soc.clock_limits[i].dram_speed_mts;
break;
}
}
}
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
DC_FP_END();
if (!pstate_en)
/* Restore full p-state latency */

View File

@ -148,8 +148,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
.num_states = 5,
.sr_exit_time_us = 16.5,
.sr_enter_plus_exit_time_us = 18.5,
.sr_exit_z8_time_us = 442.0,
.sr_enter_plus_exit_z8_time_us = 560.0,
.sr_exit_z8_time_us = 268.0,
.sr_enter_plus_exit_z8_time_us = 393.0,
.writeback_latency_us = 12.0,
.dram_channel_width_bytes = 4,
.round_trip_ping_latency_dcfclk_cycles = 106,

View File

@ -1200,9 +1200,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
}
} else {
// Most populate phantom DLG params before programming hardware / timing for phantom pipe
DC_FP_START();
dcn32_helper_populate_phantom_dlg_params(dc, context, pipes, *pipe_cnt);
DC_FP_END();
/* Call validate_apply_pipe_split flags after calling DML getters for
* phantom dlg params, or some of the VBA params indicating pipe split
@ -1503,11 +1501,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
if (!fast_validate) {
DC_FP_START();
if (!fast_validate)
dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt);
DC_FP_END();
}
if (fast_validate ||
(dc->debug.dml_disallow_alternate_prefetch_modes &&
@ -2172,9 +2167,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
entry.fabricclk_mhz = 0;
entry.dram_speed_mts = 0;
DC_FP_START();
insert_entry_into_table_sorted(table, num_entries, &entry);
DC_FP_END();
}
// Insert the max DCFCLK
@ -2182,9 +2175,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
entry.fabricclk_mhz = 0;
entry.dram_speed_mts = 0;
DC_FP_START();
insert_entry_into_table_sorted(table, num_entries, &entry);
DC_FP_END();
// Insert the UCLK DPMS
for (i = 0; i < num_uclk_dpms; i++) {
@ -2192,9 +2183,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
entry.fabricclk_mhz = 0;
entry.dram_speed_mts = bw_params->clk_table.entries[i].memclk_mhz * 16;
DC_FP_START();
insert_entry_into_table_sorted(table, num_entries, &entry);
DC_FP_END();
}
// If FCLK is coarse grained, insert individual DPMs.
@ -2204,9 +2193,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
entry.fabricclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
entry.dram_speed_mts = 0;
DC_FP_START();
insert_entry_into_table_sorted(table, num_entries, &entry);
DC_FP_END();
}
}
// If FCLK fine grained, only insert max
@ -2215,9 +2202,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
entry.fabricclk_mhz = max_fclk_mhz;
entry.dram_speed_mts = 0;
DC_FP_START();
insert_entry_into_table_sorted(table, num_entries, &entry);
DC_FP_END();
}
// At this point, the table contains all "points of interest" based on

View File

@ -807,7 +807,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->SwathHeightY[k],
v->SwathHeightC[k],
TWait,
v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ?
(v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ||
v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= MIN_DCFCLK_FREQ_MHZ) ?
mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
/* Output */
&v->DSTXAfterScaler[k],
@ -3288,7 +3289,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->swath_width_chroma_ub_this_state[k],
v->SwathHeightYThisState[k],
v->SwathHeightCThisState[k], v->TWait,
v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ ?
(v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= MIN_DCFCLK_FREQ_MHZ) ?
mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
/* Output */

View File

@ -52,6 +52,7 @@
#define BPP_BLENDED_PIPE 0xffffffff
#define MEM_STROBE_FREQ_MHZ 1600
#define MIN_DCFCLK_FREQ_MHZ 200
#define MEM_STROBE_MAX_DELIVERY_TIME_US 60.0
struct display_mode_lib;

View File

@ -106,16 +106,16 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = {
.clock_limits = {
{
.state = 0,
.dcfclk_mhz = 1564.0,
.fabricclk_mhz = 400.0,
.dispclk_mhz = 2150.0,
.dppclk_mhz = 2150.0,
.dcfclk_mhz = 1434.0,
.fabricclk_mhz = 2250.0,
.dispclk_mhz = 1720.0,
.dppclk_mhz = 1720.0,
.phyclk_mhz = 810.0,
.phyclk_d18_mhz = 667.0,
.phyclk_d32_mhz = 625.0,
.phyclk_d32_mhz = 313.0,
.socclk_mhz = 1200.0,
.dscclk_mhz = 716.667,
.dram_speed_mts = 1600.0,
.dscclk_mhz = 573.333,
.dram_speed_mts = 16000.0,
.dtbclk_mhz = 1564.0,
},
},
@ -125,14 +125,14 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = {
.sr_exit_z8_time_us = 285.0,
.sr_enter_plus_exit_z8_time_us = 320,
.writeback_latency_us = 12.0,
.round_trip_ping_latency_dcfclk_cycles = 263,
.round_trip_ping_latency_dcfclk_cycles = 207,
.urgent_latency_pixel_data_only_us = 4,
.urgent_latency_pixel_mixed_with_vm_data_us = 4,
.urgent_latency_vm_data_only_us = 4,
.fclk_change_latency_us = 20,
.usr_retraining_latency_us = 2,
.smn_latency_us = 2,
.mall_allocated_for_dcn_mbytes = 64,
.fclk_change_latency_us = 7,
.usr_retraining_latency_us = 0,
.smn_latency_us = 0,
.mall_allocated_for_dcn_mbytes = 32,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,

View File

@ -148,18 +148,21 @@ struct dccg_funcs {
struct dccg *dccg,
int inst);
void (*set_pixel_rate_div)(
struct dccg *dccg,
uint32_t otg_inst,
enum pixel_rate_div k1,
enum pixel_rate_div k2);
void (*set_pixel_rate_div)(struct dccg *dccg,
uint32_t otg_inst,
enum pixel_rate_div k1,
enum pixel_rate_div k2);
void (*set_valid_pixel_rate)(
struct dccg *dccg,
int ref_dtbclk_khz,
int otg_inst,
int pixclk_khz);
void (*set_valid_pixel_rate)(
struct dccg *dccg,
int ref_dtbclk_khz,
int otg_inst,
int pixclk_khz);
void (*dpp_root_clock_control)(
struct dccg *dccg,
unsigned int dpp_inst,
bool clock_on);
};
#endif //__DAL_DCCG_H__

View File

@ -115,6 +115,10 @@ struct hwseq_private_funcs {
void (*plane_atomic_disable)(struct dc *dc, struct pipe_ctx *pipe_ctx);
void (*enable_power_gating_plane)(struct dce_hwseq *hws,
bool enable);
void (*dpp_root_clock_control)(
struct dce_hwseq *hws,
unsigned int dpp_inst,
bool clock_on);
void (*dpp_pg_control)(struct dce_hwseq *hws,
unsigned int dpp_inst,
bool power_on);

View File

@ -130,12 +130,13 @@ void dmub_dcn32_reset(struct dmub_srv *dmub)
REG_WRITE(DMCUB_INBOX1_WPTR, 0);
REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
REG_WRITE(DMCUB_OUTBOX1_WPTR, 0);
REG_WRITE(DMCUB_OUTBOX0_RPTR, 0);
REG_WRITE(DMCUB_OUTBOX0_WPTR, 0);
REG_WRITE(DMCUB_SCRATCH0, 0);
}
void dmub_dcn32_reset_release(struct dmub_srv *dmub)
{
REG_WRITE(DMCUB_GPINT_DATAIN1, 0);
REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 0);
REG_WRITE(DMCUB_SCRATCH15, dmub->psp_version & 0x001100FF);
REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1);

View File

@ -36,6 +36,8 @@
#define amdgpu_dpm_enable_bapm(adev, e) \
((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
#define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
@ -1414,15 +1416,24 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
{
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
struct smu_context *smu = adev->powerplay.pp_handle;
if (is_support_sw_smu(adev)) {
struct smu_context *smu = adev->powerplay.pp_handle;
if ((is_support_sw_smu(adev) && smu->od_enabled) ||
(is_support_sw_smu(adev) && smu->is_apu) ||
(!is_support_sw_smu(adev) && hwmgr->od_enabled))
return true;
return (smu->od_enabled || smu->is_apu);
} else {
struct pp_hwmgr *hwmgr;
return false;
/*
* dpm on some legacy asics don't carry od_enabled member
* as its pp_handle is casted directly from adev.
*/
if (amdgpu_dpm_is_legacy_dpm(adev))
return false;
hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
return hwmgr->od_enabled;
}
}
int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,

View File

@ -504,7 +504,6 @@ static int lt8912_attach_dsi(struct lt8912 *lt)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM |
MIPI_DSI_MODE_NO_EOT_PACKET;

View File

@ -1210,7 +1210,7 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
/* panel power on related mipi dsi vbt sequences */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
msleep(intel_dsi->panel_on_delay);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);

View File

@ -762,17 +762,6 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
gpiod_set_value_cansleep(intel_dsi->gpio_backlight, 0);
}
void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
{
struct intel_connector *connector = intel_dsi->attached_connector;
/* For v3 VBTs in vid-mode the delays are part of the VBT sequences */
if (is_vid_mode(intel_dsi) && connector->panel.vbt.dsi.seq_version >= 3)
return;
msleep(msec);
}
void intel_dsi_log_params(struct intel_dsi *intel_dsi)
{
struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);

View File

@ -16,7 +16,6 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on);
void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi);
void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id);
void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
void intel_dsi_log_params(struct intel_dsi *intel_dsi);
#endif /* __INTEL_DSI_VBT_H__ */

View File

@ -85,6 +85,10 @@ static u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
#define ICL_MAX_SRC_H 4096
#define ICL_MAX_DST_W 5120
#define ICL_MAX_DST_H 4096
#define MTL_MAX_SRC_W 4096
#define MTL_MAX_SRC_H 8192
#define MTL_MAX_DST_W 8192
#define MTL_MAX_DST_H 8192
#define SKL_MIN_YUV_420_SRC_W 16
#define SKL_MIN_YUV_420_SRC_H 16
@ -101,6 +105,10 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
int pipe_src_h = drm_rect_height(&crtc_state->pipe_src);
int min_src_w, min_src_h, min_dst_w, min_dst_h;
int max_src_w, max_src_h, max_dst_w, max_dst_h;
/*
* Src coordinates are already rotated by 270 degrees for
@ -155,15 +163,33 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
return -EINVAL;
}
min_src_w = SKL_MIN_SRC_W;
min_src_h = SKL_MIN_SRC_H;
min_dst_w = SKL_MIN_DST_W;
min_dst_h = SKL_MIN_DST_H;
if (DISPLAY_VER(dev_priv) < 11) {
max_src_w = SKL_MAX_SRC_W;
max_src_h = SKL_MAX_SRC_H;
max_dst_w = SKL_MAX_DST_W;
max_dst_h = SKL_MAX_DST_H;
} else if (DISPLAY_VER(dev_priv) < 14) {
max_src_w = ICL_MAX_SRC_W;
max_src_h = ICL_MAX_SRC_H;
max_dst_w = ICL_MAX_DST_W;
max_dst_h = ICL_MAX_DST_H;
} else {
max_src_w = MTL_MAX_SRC_W;
max_src_h = MTL_MAX_SRC_H;
max_dst_w = MTL_MAX_DST_W;
max_dst_h = MTL_MAX_DST_H;
}
/* range checks */
if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
(DISPLAY_VER(dev_priv) >= 11 &&
(src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
(DISPLAY_VER(dev_priv) < 11 &&
(src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
if (src_w < min_src_w || src_h < min_src_h ||
dst_w < min_dst_w || dst_h < min_dst_h ||
src_w > max_src_w || src_h > max_src_h ||
dst_w > max_dst_w || dst_h > max_dst_h) {
drm_dbg_kms(&dev_priv->drm,
"scaler_user index %u.%u: src %ux%u dst %ux%u "
"size is out of scaler range\n",
@ -172,6 +198,21 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
return -EINVAL;
}
/*
* The pipe scaler does not use all the bits of PIPESRC, at least
* on the earlier platforms. So even when we're scaling a plane
* the *pipe* source size must not be too large. For simplicity
* we assume the limits match the scaler source size limits. Might
* not be 100% accurate on all platforms, but good enough for now.
*/
if (pipe_src_w > max_src_w || pipe_src_h > max_src_h) {
drm_dbg_kms(&dev_priv->drm,
"scaler_user index %u.%u: pipe src size %ux%u "
"is out of scaler range\n",
crtc->pipe, scaler_user, pipe_src_w, pipe_src_h);
return -EINVAL;
}
/* mark this plane as a scaler user in crtc_state */
scaler_state->scaler_users |= (1 << scaler_user);
drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "

View File

@ -782,7 +782,6 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum port port;
@ -830,21 +829,10 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
if (!IS_GEMINILAKE(dev_priv))
intel_dsi_prepare(encoder, pipe_config);
/* Give the panel time to power-on and then deassert its reset */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
/*
* Give the panel time to power-on and then deassert its reset.
* Depending on the VBT MIPI sequences version the deassert-seq
* may contain the necessary delay, intel_dsi_msleep() will skip
* the delay in that case. If there is no deassert-seq, then an
* unconditional msleep is used to give the panel time to power-on.
*/
if (connector->panel.vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) {
intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
} else {
msleep(intel_dsi->panel_on_delay);
}
msleep(intel_dsi->panel_on_delay);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
if (IS_GEMINILAKE(dev_priv)) {
glk_cold_boot = glk_dsi_enable_io(encoder);
@ -878,7 +866,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
msleep(20); /* XXX */
for_each_dsi_port(port, intel_dsi->ports)
dpi_send_cmd(intel_dsi, TURN_ON, false, port);
intel_dsi_msleep(intel_dsi, 100);
msleep(100);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
@ -1006,7 +994,7 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
/* Assert reset */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
intel_dsi_msleep(intel_dsi, intel_dsi->panel_off_delay);
msleep(intel_dsi->panel_off_delay);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
intel_dsi->panel_power_off_time = ktime_get_boottime();

View File

@ -1133,6 +1133,8 @@ static const struct intel_gt_definition xelpmp_extra_gt[] = {
static const struct intel_device_info mtl_info = {
XE_HP_FEATURES,
XE_LPDP_FEATURES,
.__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D),
/*
* Real graphics IP version will be obtained from hardware GMD_ID
* register. Value provided here is just for sanity checking.

Some files were not shown because too many files have changed in this diff Show More