Merge 6.1.43 into android14-6.1-lts

Changes in 6.1.43
	netfilter: nf_tables: fix underflow in object reference counter
	netfilter: nf_tables: fix underflow in chain reference counter
	platform/x86/amd/pmf: Notify OS power slider update
	platform/x86/amd/pmf: reduce verbosity of apmf_get_system_params
	drm/amd/display: Keep PHY active for dp config
	ovl: fix null pointer dereference in ovl_permission()
	drm/amd: Move helper for dynamic speed switch check out of smu13
	drm/amd: Align SMU11 SMU_MSG_OverridePcieParameters implementation with SMU13
	jbd2: Fix wrongly judgement for buffer head removing while doing checkpoint
	blk-mq: Fix stall due to recursive flush plug
	powerpc/pseries/vas: Hold mmap_mutex after mmap lock during window close
	KVM: s390: pv: fix index value of replaced ASCE
	io_uring: don't audit the capability check in io_uring_create()
	gpio: tps68470: Make tps68470_gpio_output() always set the initial value
	pwm: Add a stub for devm_pwmchip_add()
	gpio: mvebu: Make use of devm_pwmchip_add
	gpio: mvebu: fix irq domain leak
	btrfs: fix race between quota disable and relocation
	i2c: Delete error messages for failed memory allocations
	i2c: Improve size determinations
	i2c: nomadik: Remove unnecessary goto label
	i2c: nomadik: Use devm_clk_get_enabled()
	i2c: nomadik: Remove a useless call in the remove function
	MIPS: Loongson: Move arch cflags to MIPS top level Makefile
	MIPS: Loongson: Fix build error when make modules_install
	PCI/ASPM: Return 0 or -ETIMEDOUT from pcie_retrain_link()
	PCI/ASPM: Factor out pcie_wait_for_retrain()
	PCI/ASPM: Avoid link retraining race
	PCI: rockchip: Remove writes to unused registers
	PCI: rockchip: Fix window mapping and address translation for endpoint
	PCI: rockchip: Don't advertise MSI-X in PCIe capabilities
	drm/amd/display: add FB_DAMAGE_CLIPS support
	drm/amd/display: Check if link state is valid
	drm/amd/display: Rework context change check
	drm/amd/display: Enable new commit sequence only for DCN32x
	drm/amd/display: Copy DC context in the commit streams
	drm/amd/display: Include surface of unaffected streams
	drm/amd/display: Use min transition for all SubVP plane add/remove
	drm/amd/display: add ODM case when looking for first split pipe
	drm/amd/display: use low clocks for no plane configs
	drm/amd/display: fix unbounded requesting for high pixel rate modes on dcn315
	drm/amd/display: add pixel rate based CRB allocation support
	drm/amd/display: fix dcn315 single stream crb allocation
	drm/amd/display: Update correct DCN314 register header
	drm/amd/display: Set minimum requirement for using PSR-SU on Rembrandt
	drm/amd/display: Set minimum requirement for using PSR-SU on Phoenix
	drm/ttm: Don't print error message if eviction was interrupted
	drm/ttm: Don't leak a resource on eviction error
	n_tty: Rename tail to old_tail in n_tty_read()
	tty: fix hang on tty device with no_room set
	drm/ttm: never consider pinned BOs for eviction&swap
	KVM: arm64: Condition HW AF updates on config option
	arm64: errata: Mitigate Ampere1 erratum AC03_CPU_38 at stage-2
	mptcp: introduce 'sk' to replace 'sock->sk' in mptcp_listen()
	mptcp: do not rely on implicit state check in mptcp_listen()
	tracing/probes: Add symstr type for dynamic events
	tracing/probes: Fix to avoid double count of the string length on the array
	tracing: Allow synthetic events to pass around stacktraces
	Revert "tracing: Add "(fault)" name injection to kernel probes"
	tracing/probes: Fix to record 0-length data_loc in fetch_store_string*() if fails
	test_maple_tree: test modifications while iterating
	maple_tree: add __init and __exit to test module
	maple_tree: fix 32 bit mas_next testing
	drm/amd/display: Rework comments on dc file
	drm/amd/display: fix dc/core/dc.c kernel-doc
	drm/amd/display: Add FAMS validation before trying to use it
	drm/amd/display: update extended blank for dcn314 onwards
	drm/amd/display: Fix possible underflow for displays with large vblank
	drm/amd/display: Prevent vtotal from being set to 0
	phy: phy-mtk-dp: Fix an error code in probe()
	phy: qcom-snps: correct struct qcom_snps_hsphy kerneldoc
	phy: qcom-snps-femto-v2: keep cfg_ahb_clk enabled during runtime suspend
	phy: qcom-snps-femto-v2: properly enable ref clock
	soundwire: qcom: update status correctly with mask
	media: staging: atomisp: select V4L2_FWNODE
	media: amphion: Fix firmware path to match linux-firmware
	i40e: Fix an NULL vs IS_ERR() bug for debugfs_create_dir()
	iavf: fix potential deadlock on allocation failure
	iavf: check for removal state before IAVF_FLAG_PF_COMMS_FAILED
	net: phy: marvell10g: fix 88x3310 power up
	net: hns3: fix the imp capability bit cannot exceed 32 bits issue
	net: hns3: fix wrong tc bandwidth weight data issue
	net: hns3: fix wrong bw weight of disabled tc issue
	vxlan: calculate correct header length for GPE
	vxlan: generalize vxlan_parse_gpe_hdr and remove unused args
	vxlan: fix GRO with VXLAN-GPE
	phy: hisilicon: Fix an out of bounds check in hisi_inno_phy_probe()
	atheros: fix return value check in atl1_tso()
	ethernet: atheros: fix return value check in atl1e_tso_csum()
	ipv6 addrconf: fix bug where deleting a mngtmpaddr can create a new temporary address
	tcp: Reduce chance of collisions in inet6_hashfn().
	ice: Fix memory management in ice_ethtool_fdir.c
	bonding: reset bond's flags when down link is P2P device
	team: reset team's flags when down link is P2P device
	octeontx2-af: Removed unnecessary debug messages.
	octeontx2-af: Fix hash extraction enable configuration
	net: stmmac: Apply redundant write work around on 4.xx too
	platform/x86: msi-laptop: Fix rfkill out-of-sync on MSI Wind U100
	x86/traps: Fix load_unaligned_zeropad() handling for shared TDX memory
	igc: Fix Kernel Panic during ndo_tx_timeout callback
	netfilter: nft_set_rbtree: fix overlap expiration walk
	netfilter: nf_tables: skip immediate deactivate in _PREPARE_ERROR
	netfilter: nf_tables: disallow rule addition to bound chain via NFTA_RULE_CHAIN_ID
	mm: suppress mm fault logging if fatal signal already pending
	net/sched: mqprio: refactor nlattr parsing to a separate function
	net/sched: mqprio: add extack to mqprio_parse_nlattr()
	net/sched: mqprio: Add length check for TCA_MQPRIO_{MAX/MIN}_RATE64
	benet: fix return value check in be_lancer_xmit_workarounds()
	tipc: check return value of pskb_trim()
	tipc: stop tipc crypto on failure in tipc_node_create
	RDMA/mlx4: Make check for invalid flags stricter
	drm/msm/dpu: drop enum dpu_core_perf_data_bus_id
	drm/msm/adreno: Fix snapshot BINDLESS_DATA size
	RDMA/irdma: Add missing read barriers
	RDMA/irdma: Fix data race on CQP completion stats
	RDMA/irdma: Fix data race on CQP request done
	RDMA/mthca: Fix crash when polling CQ for shared QPs
	RDMA/bnxt_re: Prevent handling any completions after qp destroy
	drm/msm: Fix IS_ERR_OR_NULL() vs NULL check in a5xx_submit_in_rb()
	cxl/acpi: Fix a use-after-free in cxl_parse_cfmws()
	cxl/acpi: Return 'rc' instead of '0' in cxl_parse_cfmws()
	ASoC: fsl_spdif: Silence output on stop
	block: Fix a source code comment in include/uapi/linux/blkzoned.h
	smb3: do not set NTLMSSP_VERSION flag for negotiate not auth request
	drm/i915: Fix an error handling path in igt_write_huge()
	xenbus: check xen_domain in xenbus_probe_initcall
	dm raid: fix missing reconfig_mutex unlock in raid_ctr() error paths
	dm raid: clean up four equivalent goto tags in raid_ctr()
	dm raid: protect md_stop() with 'reconfig_mutex'
	drm/amd: Fix an error handling mistake in psp_sw_init()
	drm/amd/display: Unlock on error path in dm_handle_mst_sideband_msg_ready_event()
	RDMA/irdma: Fix op_type reporting in CQEs
	RDMA/irdma: Report correct WC error
	drm/msm: Switch idr_lock to spinlock
	drm/msm: Disallow submit with fence id 0
	ublk_drv: move ublk_get_device_from_id into ublk_ctrl_uring_cmd
	ublk: fail to start device if queue setup is interrupted
	ublk: fail to recover device if queue setup is interrupted
	ata: pata_ns87415: mark ns87560_tf_read static
	ring-buffer: Fix wrong stat of cpu_buffer->read
	tracing: Fix warning in trace_buffered_event_disable()
	Revert "usb: gadget: tegra-xudc: Fix error check in tegra_xudc_powerdomain_init()"
	usb: gadget: call usb_gadget_check_config() to verify UDC capability
	USB: gadget: Fix the memory leak in raw_gadget driver
	usb: gadget: core: remove unbalanced mutex_unlock in usb_gadget_activate
	KVM: Grab a reference to KVM for VM and vCPU stats file descriptors
	KVM: VMX: Don't fudge CR0 and CR4 for restricted L2 guest
	KVM: x86: Disallow KVM_SET_SREGS{2} if incoming CR0 is invalid
	serial: qcom-geni: drop bogus runtime pm state update
	serial: 8250_dw: Preserve original value of DLF register
	serial: sifive: Fix sifive_serial_console_setup() section
	USB: serial: option: support Quectel EM060K_128
	USB: serial: option: add Quectel EC200A module support
	USB: serial: simple: add Kaufmann RKS+CAN VCP
	USB: serial: simple: sort driver entries
	can: gs_usb: gs_can_close(): add missing set of CAN state to CAN_STATE_STOPPED
	usb: typec: Set port->pd before adding device for typec_port
	usb: typec: Iterate pds array when showing the pd list
	usb: typec: Use sysfs_emit_at when concatenating the string
	Revert "usb: dwc3: core: Enable AutoRetry feature in the controller"
	usb: dwc3: pci: skip BYT GPIO lookup table for hardwired phy
	usb: dwc3: don't reset device side if dwc3 was configured as host-only
	usb: misc: ehset: fix wrong if condition
	usb: ohci-at91: Fix the unhandle interrupt when resume
	USB: quirks: add quirk for Focusrite Scarlett
	usb: cdns3: fix incorrect calculation of ep_buf_size when more than one config
	usb: xhci-mtk: set the dma max_seg_size
	Revert "usb: xhci: tegra: Fix error check"
	Documentation: security-bugs.rst: update preferences when dealing with the linux-distros group
	Documentation: security-bugs.rst: clarify CVE handling
	staging: r8712: Fix memory leak in _r8712_init_xmit_priv()
	staging: ks7010: potential buffer overflow in ks_wlan_set_encode_ext()
	tty: n_gsm: fix UAF in gsm_cleanup_mux
	Revert "xhci: add quirk for host controllers that don't update endpoint DCS"
	ALSA: hda/realtek: Support ASUS G713PV laptop
	ALSA: hda/relatek: Enable Mute LED on HP 250 G8
	hwmon: (k10temp) Enable AMD3255 Proc to show negative temperature
	hwmon: (nct7802) Fix for temp6 (PECI1) processed even if PECI1 disabled
	btrfs: account block group tree when calculating global reserve size
	btrfs: check if the transaction was aborted at btrfs_wait_for_commit()
	btrfs: check for commit error at btrfs_attach_transaction_barrier()
	x86/MCE/AMD: Decrement threshold_bank refcount when removing threshold blocks
	file: always lock position for FMODE_ATOMIC_POS
	nfsd: Remove incorrect check in nfsd4_validate_stateid
	ACPI/IORT: Remove erroneous id_count check in iort_node_get_rmr_info()
	tpm_tis: Explicitly check for error code
	irq-bcm6345-l1: Do not assume a fixed block to cpu mapping
	irqchip/gic-v4.1: Properly lock VPEs when doing a directLPI invalidation
	locking/rtmutex: Fix task->pi_waiters integrity
	proc/vmcore: fix signedness bug in read_from_oldmem()
	xen: speed up grant-table reclaim
	virtio-net: fix race between set queues and probe
	net: dsa: qca8k: fix search_and_insert wrong handling of new rule
	net: dsa: qca8k: fix broken search_and_del
	net: dsa: qca8k: fix mdb add/del case with 0 VID
	selftests: mptcp: join: only check for ip6tables if needed
	soundwire: fix enumeration completion
	Revert "um: Use swap() to make code cleaner"
	LoongArch: BPF: Fix check condition to call lu32id in move_imm()
	LoongArch: BPF: Enable bpf_probe_read{, str}() on LoongArch
	s390/dasd: fix hanging device after quiesce/resume
	s390/dasd: print copy pair message only for the correct error
	ASoC: wm8904: Fill the cache for WM8904_ADC_TEST_0 register
	arm64/sme: Set new vector length before reallocating
	PM: sleep: wakeirq: fix wake irq arming
	ceph: never send metrics if disable_send_metrics is set
	drm/i915/dpt: Use shmem for dpt objects
	dm cache policy smq: ensure IO doesn't prevent cleaner policy progress
	rbd: make get_lock_owner_info() return a single locker or NULL
	rbd: harden get_lock_owner_info() a bit
	rbd: retrieve and check lock owner twice before blocklisting
	drm/amd/display: set per pipe dppclk to 0 when dpp is off
	tracing: Fix trace_event_raw_event_synth() if else statement
	drm/amd/display: perform a bounds check before filling dirty rectangles
	drm/amd/display: Write to correct dirty_rect
	ACPI: processor: perflib: Use the "no limit" frequency QoS
	ACPI: processor: perflib: Avoid updating frequency QoS unnecessarily
	cpufreq: intel_pstate: Drop ACPI _PSS states table patching
	mptcp: ensure subflow is unhashed before cleaning the backlog
	selftests: mptcp: sockopt: use 'iptables-legacy' if available
	test_firmware: return ENOMEM instead of ENOSPC on failed memory allocation
	dma-buf: keep the signaling time of merged fences v3
	dma-buf: fix an error pointer vs NULL bug
	Linux 6.1.43

Change-Id: Id1d61f2351c51edad33ab654f1f3d911b9a75830
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-09-02 19:59:15 +00:00
commit 7f81705800
228 changed files with 3137 additions and 1760 deletions

View File

@ -60,3 +60,14 @@ Description: Module taint flags:
C staging driver module
E unsigned module
== =====================
What: /sys/module/grant_table/parameters/free_per_iteration
Date: July 2023
KernelVersion: 6.5 but backported to all supported stable branches
Contact: Xen developer discussion <xen-devel@lists.xenproject.org>
Description: Read and write number of grant entries to attempt to free per iteration.
Note: Future versions of Xen and Linux may provide a better
interface for controlling the rate of deferred grant reclaim
or may not need it at all.
Users: Qubes OS (https://www.qubes-os.org)

View File

@ -63,31 +63,28 @@ information submitted to the security list and any followup discussions
of the report are treated confidentially even after the embargo has been
lifted, in perpetuity.
Coordination
------------
Coordination with other groups
------------------------------
Fixes for sensitive bugs, such as those that might lead to privilege
escalations, may need to be coordinated with the private
<linux-distros@vs.openwall.org> mailing list so that distribution vendors
are well prepared to issue a fixed kernel upon public disclosure of the
upstream fix. Distros will need some time to test the proposed patch and
will generally request at least a few days of embargo, and vendor update
publication prefers to happen Tuesday through Thursday. When appropriate,
the security team can assist with this coordination, or the reporter can
include linux-distros from the start. In this case, remember to prefix
the email Subject line with "[vs]" as described in the linux-distros wiki:
<http://oss-security.openwall.org/wiki/mailing-lists/distros#how-to-use-the-lists>
The kernel security team strongly recommends that reporters of potential
security issues NEVER contact the "linux-distros" mailing list until
AFTER discussing it with the kernel security team. Do not Cc: both
lists at once. You may contact the linux-distros mailing list after a
fix has been agreed on and you fully understand the requirements that
doing so will impose on you and the kernel community.
The different lists have different goals and the linux-distros rules do
not contribute to actually fixing any potential security problems.
CVE assignment
--------------
The security team does not normally assign CVEs, nor do we require them
for reports or fixes, as this can needlessly complicate the process and
may delay the bug handling. If a reporter wishes to have a CVE identifier
assigned ahead of public disclosure, they will need to contact the private
linux-distros list, described above. When such a CVE identifier is known
before a patch is provided, it is desirable to mention it in the commit
message if the reporter agrees.
The security team does not assign CVEs, nor do we require them for
reports or fixes, as this can needlessly complicate the process and may
delay the bug handling. If a reporter wishes to have a CVE identifier
assigned, they should find one by themselves, for example by contacting
MITRE directly. However under no circumstances will a patch inclusion
be delayed to wait for a CVE identifier to arrive.
Non-disclosure agreements
-------------------------

View File

@ -52,6 +52,9 @@ stable kernels.
| Allwinner | A64/R18 | UNKNOWN1 | SUN50I_ERRATUM_UNKNOWN1 |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
| Ampere | AmpereOne | AC03_CPU_38 | AMPERE_ERRATUM_AC03_CPU_38 |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2457168 | ARM64_ERRATUM_2457168 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2064142 | ARM64_ERRATUM_2064142 |

View File

@ -58,8 +58,8 @@ Synopsis of kprobe_events
NAME=FETCHARG : Set NAME as the argument name of FETCHARG.
FETCHARG:TYPE : Set TYPE as the type of FETCHARG. Currently, basic types
(u8/u16/u32/u64/s8/s16/s32/s64), hexadecimal types
(x8/x16/x32/x64), "string", "ustring" and bitfield
are supported.
(x8/x16/x32/x64), "string", "ustring", "symbol", "symstr"
and bitfield are supported.
(\*1) only for the probe on function entry (offs == 0).
(\*2) only for return probe.
@ -96,6 +96,10 @@ offset, and container-size (usually 32). The syntax is::
Symbol type('symbol') is an alias of u32 or u64 type (depends on BITS_PER_LONG)
which shows given pointer in "symbol+offset" style.
On the other hand, symbol-string type ('symstr') converts the given address to
"symbol+offset/symbolsize" style and stores it as a null-terminated string.
With 'symstr' type, you can filter the event with wildcard pattern of the
symbols, and you don't need to solve symbol name by yourself.
For $comm, the default type is "string"; any other type is invalid.
.. _user_mem_access:

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 42
SUBLEVEL = 43
EXTRAVERSION =
NAME = Curry Ramen

View File

@ -384,6 +384,25 @@ menu "Kernel Features"
menu "ARM errata workarounds via the alternatives framework"
config AMPERE_ERRATUM_AC03_CPU_38
bool "AmpereOne: AC03_CPU_38: Certain bits in the Virtualization Translation Control Register and Translation Control Registers do not follow RES0 semantics"
default y
help
This option adds an alternative code sequence to work around Ampere
erratum AC03_CPU_38 on AmpereOne.
The affected design reports FEAT_HAFDBS as not implemented in
ID_AA64MMFR1_EL1.HAFDBS, but (V)TCR_ELx.{HA,HD} are not RES0
as required by the architecture. The unadvertised HAFDBS
implementation suffers from an additional erratum where hardware
A/D updates can occur after a PTE has been marked invalid.
The workaround forces KVM to explicitly set VTCR_EL2.HA to 0,
which avoids enabling unadvertised hardware Access Flag management
at stage-2.
If unsure, say Y.
config ARM64_WORKAROUND_CLEAN_CACHE
bool

View File

@ -722,6 +722,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)),
.cpu_enable = cpu_clear_bf16_from_user_emulation,
},
#endif
#ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
{
.desc = "AmpereOne erratum AC03_CPU_38",
.capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38,
ERRATA_MIDR_ALL_VERSIONS(MIDR_AMPERE1),
},
#endif
{
}

View File

@ -871,6 +871,8 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type,
if (task == current)
put_cpu_fpsimd_context();
task_set_vl(task, type, vl);
/*
* Free the changed states if they are not in use, SME will be
* reallocated to the correct size on next use and we just
@ -885,8 +887,6 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type,
if (free_sme)
sme_free(task);
task_set_vl(task, type, vl);
out:
update_tsk_thread_flag(task, vec_vl_inherit_flag(type),
flags & PR_SVE_VL_INHERIT);

View File

@ -540,12 +540,22 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
lvls = 2;
vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);
#ifdef CONFIG_ARM64_HW_AFDBM
/*
* Enable the Hardware Access Flag management, unconditionally
* on all CPUs. The features is RES0 on CPUs without the support
* and must be ignored by the CPUs.
* on all CPUs. In systems that have asymmetric support for the feature
* this allows KVM to leverage hardware support on the subset of cores
* that implement the feature.
*
* The architecture requires VTCR_EL2.HA to be RES0 (thus ignored by
* hardware) on implementations that do not advertise support for the
* feature. As such, setting HA unconditionally is safe, unless you
* happen to be running on a design that has unadvertised support for
* HAFDBS. Here be dragons.
*/
vtcr |= VTCR_EL2_HA;
if (!cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
vtcr |= VTCR_EL2_HA;
#endif /* CONFIG_ARM64_HW_AFDBM */
/* Set the vmid bits */
vtcr |= (get_vmid_bits(mmfr1) == 16) ?

View File

@ -71,6 +71,7 @@ WORKAROUND_2064142
WORKAROUND_2077057
WORKAROUND_2457168
WORKAROUND_2658417
WORKAROUND_AMPERE_AC03_CPU_38
WORKAROUND_TRBE_OVERWRITE_FILL_MODE
WORKAROUND_TSB_FLUSH_FAILURE
WORKAROUND_TRBE_WRITE_OUT_OF_RANGE

View File

@ -10,6 +10,7 @@ config LOONGARCH
select ARCH_ENABLE_MEMORY_HOTPLUG
select ARCH_ENABLE_MEMORY_HOTREMOVE
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_INLINE_READ_LOCK if !PREEMPTION

View File

@ -148,7 +148,7 @@ static inline void move_imm(struct jit_ctx *ctx, enum loongarch_gpr rd, long imm
* no need to call lu32id to do a new filled operation.
*/
imm_51_31 = (imm >> 31) & 0x1fffff;
if (imm_51_31 != 0 || imm_51_31 != 0x1fffff) {
if (imm_51_31 != 0 && imm_51_31 != 0x1fffff) {
/* lu32id rd, imm_51_32 */
imm_51_32 = (imm >> 32) & 0xfffff;
emit_insn(ctx, lu32id, rd, imm_51_32);

View File

@ -190,9 +190,43 @@ endif
cflags-$(CONFIG_CAVIUM_CN63XXP1) += -Wa,-mfix-cn63xxp1
cflags-$(CONFIG_CPU_BMIPS) += -march=mips32 -Wa,-mips32 -Wa,--trap
cflags-$(CONFIG_CPU_LOONGSON2E) += $(call cc-option,-march=loongson2e) -Wa,--trap
cflags-$(CONFIG_CPU_LOONGSON2F) += $(call cc-option,-march=loongson2f) -Wa,--trap
cflags-$(CONFIG_CPU_LOONGSON64) += $(call cc-option,-march=loongson3a,-march=mips64r2) -Wa,--trap
# Some -march= flags enable MMI instructions, and GCC complains about that
# support being enabled alongside -msoft-float. Thus explicitly disable MMI.
cflags-$(CONFIG_CPU_LOONGSON2EF) += $(call cc-option,-mno-loongson-mmi)
cflags-$(CONFIG_CPU_LOONGSON64) += $(call cc-option,-mno-loongson-mmi)
cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,)
cflags-$(CONFIG_CPU_R4400_WORKAROUNDS) += $(call cc-option,-mfix-r4400,)
cflags-$(CONFIG_CPU_DADDI_WORKAROUNDS) += $(call cc-option,-mno-daddi,)
ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS
cflags-$(CONFIG_CPU_NOP_WORKAROUNDS) += -Wa,-mfix-loongson2f-nop
cflags-$(CONFIG_CPU_JUMP_WORKAROUNDS) += -Wa,-mfix-loongson2f-jump
endif
#
# Some versions of binutils, not currently mainline as of 2019/02/04, support
# an -mfix-loongson3-llsc flag which emits a sync prior to each ll instruction
# to work around a CPU bug (see __SYNC_loongson3_war in asm/sync.h for a
# description).
#
# We disable this in order to prevent the assembler meddling with the
# instruction that labels refer to, ie. if we label an ll instruction:
#
# 1: ll v0, 0(a0)
#
# ...then with the assembler fix applied the label may actually point at a sync
# instruction inserted by the assembler, and if we were using the label in an
# exception table the table would no longer contain the address of the ll
# instruction.
#
# Avoid this by explicitly disabling that assembler behaviour. If upstream
# binutils does not merge support for the flag then we can revisit & remove
# this later - for now it ensures vendor toolchains don't cause problems.
#
cflags-$(CONFIG_CPU_LOONGSON64) += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
# For smartmips configurations, there are hundreds of warnings due to ISA overrides
# in assembly and header files. smartmips is only supported for MIPS32r1 onwards

View File

@ -2,41 +2,6 @@
# Loongson Processors' Support
#
cflags-$(CONFIG_CPU_LOONGSON2EF) += -Wa,--trap
cflags-$(CONFIG_CPU_LOONGSON2E) += -march=loongson2e
cflags-$(CONFIG_CPU_LOONGSON2F) += -march=loongson2f
#
# Some versions of binutils, not currently mainline as of 2019/02/04, support
# an -mfix-loongson3-llsc flag which emits a sync prior to each ll instruction
# to work around a CPU bug (see __SYNC_loongson3_war in asm/sync.h for a
# description).
#
# We disable this in order to prevent the assembler meddling with the
# instruction that labels refer to, ie. if we label an ll instruction:
#
# 1: ll v0, 0(a0)
#
# ...then with the assembler fix applied the label may actually point at a sync
# instruction inserted by the assembler, and if we were using the label in an
# exception table the table would no longer contain the address of the ll
# instruction.
#
# Avoid this by explicitly disabling that assembler behaviour. If upstream
# binutils does not merge support for the flag then we can revisit & remove
# this later - for now it ensures vendor toolchains don't cause problems.
#
cflags-$(CONFIG_CPU_LOONGSON2EF) += $(call cc-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
# Enable the workarounds for Loongson2f
ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS
cflags-$(CONFIG_CPU_NOP_WORKAROUNDS) += -Wa,-mfix-loongson2f-nop
cflags-$(CONFIG_CPU_JUMP_WORKAROUNDS) += -Wa,-mfix-loongson2f-jump
endif
# Some -march= flags enable MMI instructions, and GCC complains about that
# support being enabled alongside -msoft-float. Thus explicitly disable MMI.
cflags-y += $(call cc-option,-mno-loongson-mmi)
#
# Loongson Machines' Support
#

View File

@ -1,19 +1,3 @@
#
# Loongson Processors' Support
#
cflags-$(CONFIG_CPU_LOONGSON64) += -Wa,--trap
ifdef CONFIG_CPU_LOONGSON64
cflags-$(CONFIG_CC_IS_GCC) += -march=loongson3a
cflags-$(CONFIG_CC_IS_CLANG) += -march=mips64r2
endif
# Some -march= flags enable MMI instructions, and GCC complains about that
# support being enabled alongside -msoft-float. Thus explicitly disable MMI.
cflags-y += $(call cc-option,-mno-loongson-mmi)
#
# Loongson Machines' Support
#

View File

@ -744,6 +744,12 @@ static int reconfig_close_windows(struct vas_caps *vcap, int excess_creds,
}
task_ref = &win->vas_win.task_ref;
/*
* VAS mmap (coproc_mmap()) and its fault handler
* (vas_mmap_fault()) are called after holding mmap lock.
* So hold mmap mutex after mmap_lock to avoid deadlock.
*/
mmap_write_lock(task_ref->mm);
mutex_lock(&task_ref->mmap_mutex);
vma = task_ref->vma;
/*
@ -752,7 +758,6 @@ static int reconfig_close_windows(struct vas_caps *vcap, int excess_creds,
*/
win->vas_win.status |= flag;
mmap_write_lock(task_ref->mm);
/*
* vma is set in the original mapping. But this mapping
* is done with mmap() after the window is opened with ioctl.
@ -763,8 +768,8 @@ static int reconfig_close_windows(struct vas_caps *vcap, int excess_creds,
zap_page_range(vma, vma->vm_start,
vma->vm_end - vma->vm_start);
mmap_write_unlock(task_ref->mm);
mutex_unlock(&task_ref->mmap_mutex);
mmap_write_unlock(task_ref->mm);
/*
* Close VAS window in the hypervisor, but do not
* free vas_window struct since it may be reused

View File

@ -2851,6 +2851,7 @@ int s390_replace_asce(struct gmap *gmap)
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
if (!page)
return -ENOMEM;
page->index = 0;
table = page_to_virt(page);
memcpy(table, gmap->table, 1UL << (CRST_ALLOC_ORDER + PAGE_SHIFT));

View File

@ -3,7 +3,6 @@
* Copyright (C) 2002 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#include <linux/minmax.h>
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
@ -51,7 +50,7 @@ static struct pollfds all_sigio_fds;
static int write_sigio_thread(void *unused)
{
struct pollfds *fds;
struct pollfds *fds, tmp;
struct pollfd *p;
int i, n, respond_fd;
char c;
@ -78,7 +77,9 @@ static int write_sigio_thread(void *unused)
"write_sigio_thread : "
"read on socket failed, "
"err = %d\n", errno);
swap(current_poll, next_poll);
tmp = current_poll;
current_poll = next_poll;
next_poll = tmp;
respond_fd = sigio_private[1];
}
else {

View File

@ -36,6 +36,7 @@ KVM_X86_OP(get_segment)
KVM_X86_OP(get_cpl)
KVM_X86_OP(set_segment)
KVM_X86_OP(get_cs_db_l_bits)
KVM_X86_OP(is_valid_cr0)
KVM_X86_OP(set_cr0)
KVM_X86_OP_OPTIONAL(post_set_cr3)
KVM_X86_OP(is_valid_cr4)

View File

@ -1488,9 +1488,10 @@ struct kvm_x86_ops {
void (*set_segment)(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg);
void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
bool (*is_valid_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
void (*post_set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr0);
bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);

View File

@ -1265,10 +1265,10 @@ static void __threshold_remove_blocks(struct threshold_bank *b)
struct threshold_block *pos = NULL;
struct threshold_block *tmp = NULL;
kobject_del(b->kobj);
kobject_put(b->kobj);
list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj)
kobject_del(&pos->kobj);
kobject_put(b->kobj);
}
static void threshold_remove_bank(struct threshold_bank *bank)

View File

@ -697,9 +697,10 @@ static bool try_fixup_enqcmd_gp(void)
}
static bool gp_try_fixup_and_notify(struct pt_regs *regs, int trapnr,
unsigned long error_code, const char *str)
unsigned long error_code, const char *str,
unsigned long address)
{
if (fixup_exception(regs, trapnr, error_code, 0))
if (fixup_exception(regs, trapnr, error_code, address))
return true;
current->thread.error_code = error_code;
@ -759,7 +760,7 @@ DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
goto exit;
}
if (gp_try_fixup_and_notify(regs, X86_TRAP_GP, error_code, desc))
if (gp_try_fixup_and_notify(regs, X86_TRAP_GP, error_code, desc, 0))
goto exit;
if (error_code)
@ -1357,17 +1358,20 @@ DEFINE_IDTENTRY(exc_device_not_available)
#define VE_FAULT_STR "VE fault"
static void ve_raise_fault(struct pt_regs *regs, long error_code)
static void ve_raise_fault(struct pt_regs *regs, long error_code,
unsigned long address)
{
if (user_mode(regs)) {
gp_user_force_sig_segv(regs, X86_TRAP_VE, error_code, VE_FAULT_STR);
return;
}
if (gp_try_fixup_and_notify(regs, X86_TRAP_VE, error_code, VE_FAULT_STR))
if (gp_try_fixup_and_notify(regs, X86_TRAP_VE, error_code,
VE_FAULT_STR, address)) {
return;
}
die_addr(VE_FAULT_STR, regs, error_code, 0);
die_addr(VE_FAULT_STR, regs, error_code, address);
}
/*
@ -1431,7 +1435,7 @@ DEFINE_IDTENTRY(exc_virtualization_exception)
* it successfully, treat it as #GP(0) and handle it.
*/
if (!tdx_handle_virt_exception(regs, &ve))
ve_raise_fault(regs, 0);
ve_raise_fault(regs, 0, ve.gla);
cond_local_irq_disable(regs);
}

View File

@ -1763,6 +1763,11 @@ static void sev_post_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
}
}
static bool svm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
return true;
}
void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
struct vcpu_svm *svm = to_svm(vcpu);
@ -4749,6 +4754,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.set_segment = svm_set_segment,
.get_cpl = svm_get_cpl,
.get_cs_db_l_bits = svm_get_cs_db_l_bits,
.is_valid_cr0 = svm_is_valid_cr0,
.set_cr0 = svm_set_cr0,
.post_set_cr3 = sev_post_set_cr3,
.is_valid_cr4 = svm_is_valid_cr4,

View File

@ -1461,6 +1461,11 @@ void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long old_rflags;
/*
* Unlike CR0 and CR4, RFLAGS handling requires checking if the vCPU
* is an unrestricted guest in order to mark L2 as needing emulation
* if L1 runs L2 as a restricted guest.
*/
if (is_unrestricted_guest(vcpu)) {
kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS);
vmx->rflags = rflags;
@ -2970,6 +2975,15 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm);
/*
* KVM should never use VM86 to virtualize Real Mode when L2 is active,
* as using VM86 is unnecessary if unrestricted guest is enabled, and
* if unrestricted guest is disabled, VM-Enter (from L1) with CR0.PG=0
* should VM-Fail and KVM should reject userspace attempts to stuff
* CR0.PG=0 when L2 is active.
*/
WARN_ON_ONCE(is_guest_mode(vcpu));
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
@ -3160,6 +3174,17 @@ void ept_save_pdptrs(struct kvm_vcpu *vcpu)
#define CR3_EXITING_BITS (CPU_BASED_CR3_LOAD_EXITING | \
CPU_BASED_CR3_STORE_EXITING)
static bool vmx_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
if (is_guest_mode(vcpu))
return nested_guest_cr0_valid(vcpu, cr0);
if (to_vmx(vcpu)->nested.vmxon)
return nested_host_cr0_valid(vcpu, cr0);
return true;
}
void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@ -3169,7 +3194,7 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
old_cr0_pg = kvm_read_cr0_bits(vcpu, X86_CR0_PG);
hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF);
if (is_unrestricted_guest(vcpu))
if (enable_unrestricted_guest)
hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
else {
hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
@ -3197,7 +3222,7 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
}
#endif
if (enable_ept && !is_unrestricted_guest(vcpu)) {
if (enable_ept && !enable_unrestricted_guest) {
/*
* Ensure KVM has an up-to-date snapshot of the guest's CR3. If
* the below code _enables_ CR3 exiting, vmx_cache_reg() will
@ -3328,7 +3353,7 @@ void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
unsigned long hw_cr4;
hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE);
if (is_unrestricted_guest(vcpu))
if (enable_unrestricted_guest)
hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST;
else if (vmx->rmode.vm86_active)
hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON;
@ -3348,7 +3373,7 @@ void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
vcpu->arch.cr4 = cr4;
kvm_register_mark_available(vcpu, VCPU_EXREG_CR4);
if (!is_unrestricted_guest(vcpu)) {
if (!enable_unrestricted_guest) {
if (enable_ept) {
if (!is_paging(vcpu)) {
hw_cr4 &= ~X86_CR4_PAE;
@ -5311,18 +5336,11 @@ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
val = (val & ~vmcs12->cr0_guest_host_mask) |
(vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
if (!nested_guest_cr0_valid(vcpu, val))
return 1;
if (kvm_set_cr0(vcpu, val))
return 1;
vmcs_writel(CR0_READ_SHADOW, orig_val);
return 0;
} else {
if (to_vmx(vcpu)->nested.vmxon &&
!nested_host_cr0_valid(vcpu, val))
return 1;
return kvm_set_cr0(vcpu, val);
}
}
@ -8112,6 +8130,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.set_segment = vmx_set_segment,
.get_cpl = vmx_get_cpl,
.get_cs_db_l_bits = vmx_get_cs_db_l_bits,
.is_valid_cr0 = vmx_is_valid_cr0,
.set_cr0 = vmx_set_cr0,
.is_valid_cr4 = vmx_is_valid_cr4,
.set_cr4 = vmx_set_cr4,

View File

@ -908,6 +908,22 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
}
EXPORT_SYMBOL_GPL(load_pdptrs);
static bool kvm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
#ifdef CONFIG_X86_64
if (cr0 & 0xffffffff00000000UL)
return false;
#endif
if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
return false;
if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
return false;
return static_call(kvm_x86_is_valid_cr0)(vcpu, cr0);
}
void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0)
{
/*
@ -948,21 +964,14 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
unsigned long old_cr0 = kvm_read_cr0(vcpu);
if (!kvm_is_valid_cr0(vcpu, cr0))
return 1;
cr0 |= X86_CR0_ET;
#ifdef CONFIG_X86_64
if (cr0 & 0xffffffff00000000UL)
return 1;
#endif
/* Write to CR0 reserved bits are ignored, even on Intel. */
cr0 &= ~CR0_RESERVED_BITS;
if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
return 1;
if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
return 1;
#ifdef CONFIG_X86_64
if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) &&
(cr0 & X86_CR0_PG)) {
@ -11532,7 +11541,8 @@ static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
return false;
}
return kvm_is_valid_cr4(vcpu, sregs->cr4);
return kvm_is_valid_cr4(vcpu, sregs->cr4) &&
kvm_is_valid_cr0(vcpu, sregs->cr0);
}
static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs,

View File

@ -1146,8 +1146,7 @@ void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
{
if (!list_empty(&plug->cb_list))
flush_plug_callbacks(plug, from_schedule);
if (!rq_list_empty(plug->mq_list))
blk_mq_flush_plug_list(plug, from_schedule);
blk_mq_flush_plug_list(plug, from_schedule);
/*
* Unconditionally flush out cached requests, even if the unplug
* event came from schedule. Since we know hold references to the

View File

@ -2747,7 +2747,14 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
struct request *rq;
if (rq_list_empty(plug->mq_list))
/*
* We may have been called recursively midway through handling
* plug->mq_list via a schedule() in the driver's queue_rq() callback.
* To avoid mq_list changing under our feet, clear rq_count early and
* bail out specifically if rq_count is 0 rather than checking
* whether the mq_list is empty.
*/
if (plug->rq_count == 0)
return;
plug->rq_count = 0;

View File

@ -998,9 +998,6 @@ static void iort_node_get_rmr_info(struct acpi_iort_node *node,
for (i = 0; i < node->mapping_count; i++, map++) {
struct acpi_iort_node *parent;
if (!map->id_count)
continue;
parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
map->output_reference);
if (parent != iommu)

View File

@ -53,6 +53,8 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
{
acpi_status status = 0;
unsigned long long ppc = 0;
s32 qos_value;
int index;
int ret;
if (!pr)
@ -72,17 +74,30 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
}
}
pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id,
(int)ppc, ppc ? "" : "not");
index = ppc;
pr->performance_platform_limit = (int)ppc;
if (ppc >= pr->performance->state_count ||
unlikely(!freq_qos_request_active(&pr->perflib_req)))
if (pr->performance_platform_limit == index ||
ppc >= pr->performance->state_count)
return 0;
ret = freq_qos_update_request(&pr->perflib_req,
pr->performance->states[ppc].core_frequency * 1000);
pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id,
index, index ? "is" : "is not");
pr->performance_platform_limit = index;
if (unlikely(!freq_qos_request_active(&pr->perflib_req)))
return 0;
/*
* If _PPC returns 0, it means that all of the available states can be
* used ("no limit").
*/
if (index == 0)
qos_value = FREQ_QOS_MAX_DEFAULT_VALUE;
else
qos_value = pr->performance->states[index].core_frequency * 1000;
ret = freq_qos_update_request(&pr->perflib_req, qos_value);
if (ret < 0) {
pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n",
pr->id, ret);
@ -165,9 +180,16 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
if (!pr)
continue;
/*
* Reset performance_platform_limit in case there is a stale
* value in it, so as to make it match the "no limit" QoS value
* below.
*/
pr->performance_platform_limit = 0;
ret = freq_qos_add_request(&policy->constraints,
&pr->perflib_req,
FREQ_QOS_MAX, INT_MAX);
&pr->perflib_req, FREQ_QOS_MAX,
FREQ_QOS_MAX_DEFAULT_VALUE);
if (ret < 0)
pr_err("Failed to add freq constraint for CPU%d (%d)\n",
cpu, ret);

View File

@ -260,7 +260,7 @@ static u8 ns87560_check_status(struct ata_port *ap)
* LOCKING:
* Inherited from caller.
*/
void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
static void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
{
struct ata_ioports *ioaddr = &ap->ioaddr;

View File

@ -29,6 +29,7 @@ extern u64 pm_runtime_active_time(struct device *dev);
#define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \
WAKE_IRQ_DEDICATED_MANAGED | \
WAKE_IRQ_DEDICATED_REVERSE)
#define WAKE_IRQ_DEDICATED_ENABLED BIT(3)
struct wake_irq {
struct device *dev;

View File

@ -314,8 +314,10 @@ void dev_pm_enable_wake_irq_check(struct device *dev,
return;
enable:
if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) {
enable_irq(wirq->irq);
wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
}
}
/**
@ -336,8 +338,10 @@ void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable)
if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
return;
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) {
wirq->status &= ~WAKE_IRQ_DEDICATED_ENABLED;
disable_irq_nosync(wirq->irq);
}
}
/**
@ -376,7 +380,7 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq)
if (device_may_wakeup(wirq->dev)) {
if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
!pm_runtime_status_suspended(wirq->dev))
!(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
enable_irq(wirq->irq);
enable_irq_wake(wirq->irq);
@ -399,7 +403,7 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
disable_irq_wake(wirq->irq);
if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
!pm_runtime_status_suspended(wirq->dev))
!(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
disable_irq_nosync(wirq->irq);
}
}

View File

@ -3850,51 +3850,82 @@ static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
}
static int get_lock_owner_info(struct rbd_device *rbd_dev,
struct ceph_locker **lockers, u32 *num_lockers)
static bool locker_equal(const struct ceph_locker *lhs,
const struct ceph_locker *rhs)
{
return lhs->id.name.type == rhs->id.name.type &&
lhs->id.name.num == rhs->id.name.num &&
!strcmp(lhs->id.cookie, rhs->id.cookie) &&
ceph_addr_equal_no_type(&lhs->info.addr, &rhs->info.addr);
}
static void free_locker(struct ceph_locker *locker)
{
if (locker)
ceph_free_lockers(locker, 1);
}
static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct ceph_locker *lockers;
u32 num_lockers;
u8 lock_type;
char *lock_tag;
u64 handle;
int ret;
dout("%s rbd_dev %p\n", __func__, rbd_dev);
ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
&rbd_dev->header_oloc, RBD_LOCK_NAME,
&lock_type, &lock_tag, lockers, num_lockers);
if (ret)
return ret;
&lock_type, &lock_tag, &lockers, &num_lockers);
if (ret) {
rbd_warn(rbd_dev, "failed to retrieve lockers: %d", ret);
return ERR_PTR(ret);
}
if (*num_lockers == 0) {
if (num_lockers == 0) {
dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
lockers = NULL;
goto out;
}
if (strcmp(lock_tag, RBD_LOCK_TAG)) {
rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
lock_tag);
ret = -EBUSY;
goto out;
goto err_busy;
}
if (lock_type == CEPH_CLS_LOCK_SHARED) {
rbd_warn(rbd_dev, "shared lock type detected");
ret = -EBUSY;
goto out;
if (lock_type != CEPH_CLS_LOCK_EXCLUSIVE) {
rbd_warn(rbd_dev, "incompatible lock type detected");
goto err_busy;
}
if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
strlen(RBD_LOCK_COOKIE_PREFIX))) {
WARN_ON(num_lockers != 1);
ret = sscanf(lockers[0].id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu",
&handle);
if (ret != 1) {
rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
(*lockers)[0].id.cookie);
ret = -EBUSY;
goto out;
lockers[0].id.cookie);
goto err_busy;
}
if (ceph_addr_is_blank(&lockers[0].info.addr)) {
rbd_warn(rbd_dev, "locker has a blank address");
goto err_busy;
}
dout("%s rbd_dev %p got locker %s%llu@%pISpc/%u handle %llu\n",
__func__, rbd_dev, ENTITY_NAME(lockers[0].id.name),
&lockers[0].info.addr.in_addr,
le32_to_cpu(lockers[0].info.addr.nonce), handle);
out:
kfree(lock_tag);
return ret;
return lockers;
err_busy:
kfree(lock_tag);
ceph_free_lockers(lockers, num_lockers);
return ERR_PTR(-EBUSY);
}
static int find_watcher(struct rbd_device *rbd_dev,
@ -3948,51 +3979,68 @@ static int find_watcher(struct rbd_device *rbd_dev,
static int rbd_try_lock(struct rbd_device *rbd_dev)
{
struct ceph_client *client = rbd_dev->rbd_client->client;
struct ceph_locker *lockers;
u32 num_lockers;
struct ceph_locker *locker, *refreshed_locker;
int ret;
for (;;) {
locker = refreshed_locker = NULL;
ret = rbd_lock(rbd_dev);
if (ret != -EBUSY)
return ret;
goto out;
/* determine if the current lock holder is still alive */
ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
if (ret)
return ret;
if (num_lockers == 0)
locker = get_lock_owner_info(rbd_dev);
if (IS_ERR(locker)) {
ret = PTR_ERR(locker);
locker = NULL;
goto out;
}
if (!locker)
goto again;
ret = find_watcher(rbd_dev, lockers);
ret = find_watcher(rbd_dev, locker);
if (ret)
goto out; /* request lock or error */
refreshed_locker = get_lock_owner_info(rbd_dev);
if (IS_ERR(refreshed_locker)) {
ret = PTR_ERR(refreshed_locker);
refreshed_locker = NULL;
goto out;
}
if (!refreshed_locker ||
!locker_equal(locker, refreshed_locker))
goto again;
rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
ENTITY_NAME(lockers[0].id.name));
ENTITY_NAME(locker->id.name));
ret = ceph_monc_blocklist_add(&client->monc,
&lockers[0].info.addr);
&locker->info.addr);
if (ret) {
rbd_warn(rbd_dev, "blocklist of %s%llu failed: %d",
ENTITY_NAME(lockers[0].id.name), ret);
rbd_warn(rbd_dev, "failed to blocklist %s%llu: %d",
ENTITY_NAME(locker->id.name), ret);
goto out;
}
ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
&rbd_dev->header_oloc, RBD_LOCK_NAME,
lockers[0].id.cookie,
&lockers[0].id.name);
if (ret && ret != -ENOENT)
locker->id.cookie, &locker->id.name);
if (ret && ret != -ENOENT) {
rbd_warn(rbd_dev, "failed to break header lock: %d",
ret);
goto out;
}
again:
ceph_free_lockers(lockers, num_lockers);
free_locker(refreshed_locker);
free_locker(locker);
}
out:
ceph_free_lockers(lockers, num_lockers);
free_locker(refreshed_locker);
free_locker(locker);
return ret;
}

View File

@ -1529,22 +1529,18 @@ static struct ublk_device *ublk_get_device_from_id(int idx)
return ub;
}
static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
int ublksrv_pid = (int)header->data[0];
struct ublk_device *ub;
struct gendisk *disk;
int ret = -EINVAL;
if (ublksrv_pid <= 0)
return -EINVAL;
ub = ublk_get_device_from_id(header->dev_id);
if (!ub)
return -EINVAL;
wait_for_completion_interruptible(&ub->completion);
if (wait_for_completion_interruptible(&ub->completion) != 0)
return -EINTR;
schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
@ -1593,21 +1589,20 @@ static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
put_disk(disk);
out_unlock:
mutex_unlock(&ub->mutex);
ublk_put_device(ub);
return ret;
}
static int ublk_ctrl_get_queue_affinity(struct io_uring_cmd *cmd)
static int ublk_ctrl_get_queue_affinity(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublk_device *ub;
cpumask_var_t cpumask;
unsigned long queue;
unsigned int retlen;
unsigned int i;
int ret = -EINVAL;
int ret;
if (header->len * BITS_PER_BYTE < nr_cpu_ids)
return -EINVAL;
if (header->len & (sizeof(unsigned long)-1))
@ -1615,17 +1610,12 @@ static int ublk_ctrl_get_queue_affinity(struct io_uring_cmd *cmd)
if (!header->addr)
return -EINVAL;
ub = ublk_get_device_from_id(header->dev_id);
if (!ub)
return -EINVAL;
queue = header->data[0];
if (queue >= ub->dev_info.nr_hw_queues)
goto out_put_device;
return -EINVAL;
ret = -ENOMEM;
if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
goto out_put_device;
return -ENOMEM;
for_each_possible_cpu(i) {
if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[i] == queue)
@ -1643,8 +1633,6 @@ static int ublk_ctrl_get_queue_affinity(struct io_uring_cmd *cmd)
ret = 0;
out_free_cpumask:
free_cpumask_var(cpumask);
out_put_device:
ublk_put_device(ub);
return ret;
}
@ -1765,30 +1753,27 @@ static inline bool ublk_idr_freed(int id)
return ptr == NULL;
}
static int ublk_ctrl_del_dev(int idx)
static int ublk_ctrl_del_dev(struct ublk_device **p_ub)
{
struct ublk_device *ub;
struct ublk_device *ub = *p_ub;
int idx = ub->ub_number;
int ret;
ret = mutex_lock_killable(&ublk_ctl_mutex);
if (ret)
return ret;
ub = ublk_get_device_from_id(idx);
if (ub) {
ublk_remove(ub);
ublk_put_device(ub);
ret = 0;
} else {
ret = -ENODEV;
}
ublk_remove(ub);
/* Mark the reference as consumed */
*p_ub = NULL;
ublk_put_device(ub);
/*
* Wait until the idr is removed, then it can be reused after
* DEL_DEV command is returned.
*/
if (!ret)
wait_event(ublk_idr_wq, ublk_idr_freed(idx));
wait_event(ublk_idr_wq, ublk_idr_freed(idx));
mutex_unlock(&ublk_ctl_mutex);
return ret;
@ -1803,50 +1788,36 @@ static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
header->data[0], header->addr, header->len);
}
static int ublk_ctrl_stop_dev(struct io_uring_cmd *cmd)
static int ublk_ctrl_stop_dev(struct ublk_device *ub)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
struct ublk_device *ub;
ub = ublk_get_device_from_id(header->dev_id);
if (!ub)
return -EINVAL;
ublk_stop_dev(ub);
cancel_work_sync(&ub->stop_work);
cancel_work_sync(&ub->quiesce_work);
ublk_put_device(ub);
return 0;
}
static int ublk_ctrl_get_dev_info(struct io_uring_cmd *cmd)
static int ublk_ctrl_get_dev_info(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublk_device *ub;
int ret = 0;
if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr)
return -EINVAL;
ub = ublk_get_device_from_id(header->dev_id);
if (!ub)
return -EINVAL;
if (copy_to_user(argp, &ub->dev_info, sizeof(ub->dev_info)))
ret = -EFAULT;
ublk_put_device(ub);
return -EFAULT;
return ret;
return 0;
}
static int ublk_ctrl_get_params(struct io_uring_cmd *cmd)
static int ublk_ctrl_get_params(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublk_params_header ph;
struct ublk_device *ub;
int ret;
if (header->len <= sizeof(ph) || !header->addr)
@ -1861,10 +1832,6 @@ static int ublk_ctrl_get_params(struct io_uring_cmd *cmd)
if (ph.len > sizeof(struct ublk_params))
ph.len = sizeof(struct ublk_params);
ub = ublk_get_device_from_id(header->dev_id);
if (!ub)
return -EINVAL;
mutex_lock(&ub->mutex);
if (copy_to_user(argp, &ub->params, ph.len))
ret = -EFAULT;
@ -1872,16 +1839,15 @@ static int ublk_ctrl_get_params(struct io_uring_cmd *cmd)
ret = 0;
mutex_unlock(&ub->mutex);
ublk_put_device(ub);
return ret;
}
static int ublk_ctrl_set_params(struct io_uring_cmd *cmd)
static int ublk_ctrl_set_params(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublk_params_header ph;
struct ublk_device *ub;
int ret = -EFAULT;
if (header->len <= sizeof(ph) || !header->addr)
@ -1896,10 +1862,6 @@ static int ublk_ctrl_set_params(struct io_uring_cmd *cmd)
if (ph.len > sizeof(struct ublk_params))
ph.len = sizeof(struct ublk_params);
ub = ublk_get_device_from_id(header->dev_id);
if (!ub)
return -EINVAL;
/* parameters can only be changed when device isn't live */
mutex_lock(&ub->mutex);
if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
@ -1914,7 +1876,6 @@ static int ublk_ctrl_set_params(struct io_uring_cmd *cmd)
ub->params.types = 0;
}
mutex_unlock(&ub->mutex);
ublk_put_device(ub);
return ret;
}
@ -1941,17 +1902,13 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
}
}
static int ublk_ctrl_start_recovery(struct io_uring_cmd *cmd)
static int ublk_ctrl_start_recovery(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
struct ublk_device *ub;
int ret = -EINVAL;
int i;
ub = ublk_get_device_from_id(header->dev_id);
if (!ub)
return ret;
mutex_lock(&ub->mutex);
if (!ublk_can_use_recovery(ub))
goto out_unlock;
@ -1984,25 +1941,22 @@ static int ublk_ctrl_start_recovery(struct io_uring_cmd *cmd)
ret = 0;
out_unlock:
mutex_unlock(&ub->mutex);
ublk_put_device(ub);
return ret;
}
static int ublk_ctrl_end_recovery(struct io_uring_cmd *cmd)
static int ublk_ctrl_end_recovery(struct ublk_device *ub,
struct io_uring_cmd *cmd)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
int ublksrv_pid = (int)header->data[0];
struct ublk_device *ub;
int ret = -EINVAL;
ub = ublk_get_device_from_id(header->dev_id);
if (!ub)
return ret;
pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
__func__, ub->dev_info.nr_hw_queues, header->dev_id);
/* wait until new ubq_daemon sending all FETCH_REQ */
wait_for_completion_interruptible(&ub->completion);
if (wait_for_completion_interruptible(&ub->completion))
return -EINTR;
pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n",
__func__, ub->dev_info.nr_hw_queues, header->dev_id);
@ -2026,7 +1980,6 @@ static int ublk_ctrl_end_recovery(struct io_uring_cmd *cmd)
ret = 0;
out_unlock:
mutex_unlock(&ub->mutex);
ublk_put_device(ub);
return ret;
}
@ -2034,6 +1987,7 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
struct ublk_device *ub = NULL;
int ret = -EINVAL;
if (issue_flags & IO_URING_F_NONBLOCK)
@ -2048,41 +2002,50 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
if (!capable(CAP_SYS_ADMIN))
goto out;
ret = -ENODEV;
if (cmd->cmd_op != UBLK_CMD_ADD_DEV) {
ret = -ENODEV;
ub = ublk_get_device_from_id(header->dev_id);
if (!ub)
goto out;
}
switch (cmd->cmd_op) {
case UBLK_CMD_START_DEV:
ret = ublk_ctrl_start_dev(cmd);
ret = ublk_ctrl_start_dev(ub, cmd);
break;
case UBLK_CMD_STOP_DEV:
ret = ublk_ctrl_stop_dev(cmd);
ret = ublk_ctrl_stop_dev(ub);
break;
case UBLK_CMD_GET_DEV_INFO:
ret = ublk_ctrl_get_dev_info(cmd);
ret = ublk_ctrl_get_dev_info(ub, cmd);
break;
case UBLK_CMD_ADD_DEV:
ret = ublk_ctrl_add_dev(cmd);
break;
case UBLK_CMD_DEL_DEV:
ret = ublk_ctrl_del_dev(header->dev_id);
ret = ublk_ctrl_del_dev(&ub);
break;
case UBLK_CMD_GET_QUEUE_AFFINITY:
ret = ublk_ctrl_get_queue_affinity(cmd);
ret = ublk_ctrl_get_queue_affinity(ub, cmd);
break;
case UBLK_CMD_GET_PARAMS:
ret = ublk_ctrl_get_params(cmd);
ret = ublk_ctrl_get_params(ub, cmd);
break;
case UBLK_CMD_SET_PARAMS:
ret = ublk_ctrl_set_params(cmd);
ret = ublk_ctrl_set_params(ub, cmd);
break;
case UBLK_CMD_START_USER_RECOVERY:
ret = ublk_ctrl_start_recovery(cmd);
ret = ublk_ctrl_start_recovery(ub, cmd);
break;
case UBLK_CMD_END_USER_RECOVERY:
ret = ublk_ctrl_end_recovery(cmd);
ret = ublk_ctrl_end_recovery(ub, cmd);
break;
default:
ret = -ENOTSUPP;
break;
}
if (ub)
ublk_put_device(ub);
out:
io_uring_cmd_done(cmd, ret, 0, issue_flags);
pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",

View File

@ -356,8 +356,13 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
goto out;
}
size += recv_data(chip, &buf[TPM_HEADER_SIZE],
expected - TPM_HEADER_SIZE);
rc = recv_data(chip, &buf[TPM_HEADER_SIZE],
expected - TPM_HEADER_SIZE);
if (rc < 0) {
size = rc;
goto out;
}
size += rc;
if (size < expected) {
dev_err(&chip->dev, "Unable to read remainder of result\n");
size = -ETIME;

View File

@ -451,20 +451,6 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
(u32) cpu->acpi_perf_data.states[i].control);
}
/*
* The _PSS table doesn't contain whole turbo frequency range.
* This just contains +1 MHZ above the max non turbo frequency,
* with control value corresponding to max turbo ratio. But
* when cpufreq set policy is called, it will call with this
* max frequency, which will cause a reduced performance as
* this driver uses real max turbo frequency as the max
* frequency. So correct this frequency in _PSS table to
* correct max turbo frequency based on the turbo state.
* Also need to convert to MHz as _PSS freq is in MHz.
*/
if (!global.turbo_disabled)
cpu->acpi_perf_data.states[0].core_frequency =
policy->cpuinfo.max_freq / 1000;
cpu->valid_pss_table = true;
pr_debug("_PPC limits will be enforced\n");

View File

@ -154,9 +154,8 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
else
rc = cxl_decoder_autoremove(dev, cxld);
if (rc) {
dev_err(dev, "Failed to add decode range [%#llx - %#llx]\n",
cxld->hpa_range.start, cxld->hpa_range.end);
return 0;
dev_err(dev, "Failed to add decode range: %pr", res);
return rc;
}
dev_dbg(dev, "add: %s node: %d range [%#llx - %#llx]\n",
dev_name(&cxld->dev),

View File

@ -874,7 +874,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
spin_lock_init(&mvpwm->lock);
return pwmchip_add(&mvpwm->chip);
return devm_pwmchip_add(dev, &mvpwm->chip);
}
#ifdef CONFIG_DEBUG_FS
@ -1112,6 +1112,13 @@ static int mvebu_gpio_probe_syscon(struct platform_device *pdev,
return 0;
}
static void mvebu_gpio_remove_irq_domain(void *data)
{
struct irq_domain *domain = data;
irq_domain_remove(domain);
}
static int mvebu_gpio_probe(struct platform_device *pdev)
{
struct mvebu_gpio_chip *mvchip;
@ -1243,17 +1250,21 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
if (!mvchip->domain) {
dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
mvchip->chip.label);
err = -ENODEV;
goto err_pwm;
return -ENODEV;
}
err = devm_add_action_or_reset(&pdev->dev, mvebu_gpio_remove_irq_domain,
mvchip->domain);
if (err)
return err;
err = irq_alloc_domain_generic_chips(
mvchip->domain, ngpios, 2, np->name, handle_level_irq,
IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_LEVEL, 0, 0);
if (err) {
dev_err(&pdev->dev, "couldn't allocate irq chips %s (DT).\n",
mvchip->chip.label);
goto err_domain;
return err;
}
/*
@ -1293,13 +1304,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
}
return 0;
err_domain:
irq_domain_remove(mvchip->domain);
err_pwm:
pwmchip_remove(&mvchip->mvpwm->chip);
return err;
}
static struct platform_driver mvebu_gpio_driver = {

View File

@ -91,13 +91,13 @@ static int tps68470_gpio_output(struct gpio_chip *gc, unsigned int offset,
struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc);
struct regmap *regmap = tps68470_gpio->tps68470_regmap;
/* Set the initial value */
tps68470_gpio_set(gc, offset, value);
/* rest are always outputs */
if (offset >= TPS68470_N_REGULAR_GPIO)
return 0;
/* Set the initial value */
tps68470_gpio_set(gc, offset, value);
return regmap_update_bits(regmap, TPS68470_GPIO_CTL_REG_A(offset),
TPS68470_GPIO_MODE_MASK,
TPS68470_GPIO_MODE_OUT_CMOS);

View File

@ -1262,6 +1262,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
bool amdgpu_device_need_post(struct amdgpu_device *adev);
bool amdgpu_device_pcie_dynamic_switching_supported(void);
bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
bool amdgpu_device_aspm_support_quirk(void);

View File

@ -1333,6 +1333,25 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
return true;
}
/*
* Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
* speed switching. Until we have confirmation from Intel that a specific host
* supports it, it's safer that we keep it disabled for all.
*
* https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
* https://gitlab.freedesktop.org/drm/amd/-/issues/2663
*/
bool amdgpu_device_pcie_dynamic_switching_supported(void)
{
#if IS_ENABLED(CONFIG_X86)
struct cpuinfo_x86 *c = &cpu_data(0);
if (c->x86_vendor == X86_VENDOR_INTEL)
return false;
#endif
return true;
}
/**
* amdgpu_device_should_use_aspm - check if the device should program ASPM
*

View File

@ -472,11 +472,11 @@ static int psp_sw_init(void *handle)
return 0;
failed2:
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
failed1:
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
&psp->fence_buf_mc_addr, &psp->fence_buf);
failed1:
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
return ret;
}

View File

@ -4948,6 +4948,30 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
return 0;
}
static inline void fill_dc_dirty_rect(struct drm_plane *plane,
struct rect *dirty_rect, int32_t x,
int32_t y, int32_t width, int32_t height,
int *i, bool ffu)
{
WARN_ON(*i >= DC_MAX_DIRTY_RECTS);
dirty_rect->x = x;
dirty_rect->y = y;
dirty_rect->width = width;
dirty_rect->height = height;
if (ffu)
drm_dbg(plane->dev,
"[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
plane->base.id, width, height);
else
drm_dbg(plane->dev,
"[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)",
plane->base.id, x, y, width, height);
(*i)++;
}
/**
* fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
*
@ -4968,10 +4992,6 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
* addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
* implicitly provide damage clips without any client support via the plane
* bounds.
*
* Today, amdgpu_dm only supports the MPO and cursor usecase.
*
* TODO: Also enable for FB_DAMAGE_CLIPS
*/
static void fill_dc_dirty_rects(struct drm_plane *plane,
struct drm_plane_state *old_plane_state,
@ -4982,12 +5002,11 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
struct rect *dirty_rects = flip_addrs->dirty_rects;
uint32_t num_clips;
struct drm_mode_rect *clips;
bool bb_changed;
bool fb_changed;
u32 i = 0;
flip_addrs->dirty_rect_count = 0;
/*
* Cursor plane has it's own dirty rect update interface. See
* dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
@ -4995,20 +5014,20 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
if (plane->type == DRM_PLANE_TYPE_CURSOR)
return;
/*
* Today, we only consider MPO use-case for PSR SU. If MPO not
* requested, and there is a plane update, do FFU.
*/
num_clips = drm_plane_get_damage_clips_count(new_plane_state);
clips = drm_plane_get_damage_clips(new_plane_state);
if (!dm_crtc_state->mpo_requested) {
dirty_rects[0].x = 0;
dirty_rects[0].y = 0;
dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay;
dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay;
flip_addrs->dirty_rect_count = 1;
DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
new_plane_state->plane->base.id,
dm_crtc_state->base.mode.crtc_hdisplay,
dm_crtc_state->base.mode.crtc_vdisplay);
if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS)
goto ffu;
for (; flip_addrs->dirty_rect_count < num_clips; clips++)
fill_dc_dirty_rect(new_plane_state->plane,
&dirty_rects[flip_addrs->dirty_rect_count],
clips->x1, clips->y1,
clips->x2 - clips->x1, clips->y2 - clips->y1,
&flip_addrs->dirty_rect_count,
false);
return;
}
@ -5019,7 +5038,6 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
* If plane is moved or resized, also add old bounding box to dirty
* rects.
*/
num_clips = drm_plane_get_damage_clips_count(new_plane_state);
fb_changed = old_plane_state->fb->base.id !=
new_plane_state->fb->base.id;
bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
@ -5027,36 +5045,51 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
old_plane_state->crtc_w != new_plane_state->crtc_w ||
old_plane_state->crtc_h != new_plane_state->crtc_h);
DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
new_plane_state->plane->base.id,
bb_changed, fb_changed, num_clips);
drm_dbg(plane->dev,
"[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
new_plane_state->plane->base.id,
bb_changed, fb_changed, num_clips);
if (num_clips || fb_changed || bb_changed) {
dirty_rects[i].x = new_plane_state->crtc_x;
dirty_rects[i].y = new_plane_state->crtc_y;
dirty_rects[i].width = new_plane_state->crtc_w;
dirty_rects[i].height = new_plane_state->crtc_h;
DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
new_plane_state->plane->base.id,
dirty_rects[i].x, dirty_rects[i].y,
dirty_rects[i].width, dirty_rects[i].height);
i += 1;
if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS)
goto ffu;
if (bb_changed) {
fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
new_plane_state->crtc_x,
new_plane_state->crtc_y,
new_plane_state->crtc_w,
new_plane_state->crtc_h, &i, false);
/* Add old plane bounding-box if plane is moved or resized */
fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
old_plane_state->crtc_x,
old_plane_state->crtc_y,
old_plane_state->crtc_w,
old_plane_state->crtc_h, &i, false);
}
/* Add old plane bounding-box if plane is moved or resized */
if (bb_changed) {
dirty_rects[i].x = old_plane_state->crtc_x;
dirty_rects[i].y = old_plane_state->crtc_y;
dirty_rects[i].width = old_plane_state->crtc_w;
dirty_rects[i].height = old_plane_state->crtc_h;
DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
old_plane_state->plane->base.id,
dirty_rects[i].x, dirty_rects[i].y,
dirty_rects[i].width, dirty_rects[i].height);
i += 1;
if (num_clips) {
for (; i < num_clips; clips++)
fill_dc_dirty_rect(new_plane_state->plane,
&dirty_rects[i], clips->x1,
clips->y1, clips->x2 - clips->x1,
clips->y2 - clips->y1, &i, false);
} else if (fb_changed && !bb_changed) {
fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
new_plane_state->crtc_x,
new_plane_state->crtc_y,
new_plane_state->crtc_w,
new_plane_state->crtc_h, &i, false);
}
flip_addrs->dirty_rect_count = i;
return;
ffu:
fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0,
dm_crtc_state->base.mode.crtc_hdisplay,
dm_crtc_state->base.mode.crtc_vdisplay,
&flip_addrs->dirty_rect_count, true);
}
static void update_stream_scaling_settings(const struct drm_display_mode *mode,

View File

@ -677,7 +677,7 @@ void dm_handle_mst_sideband_msg_ready_event(
if (retry == 3) {
DRM_ERROR("Failed to ack MST event.\n");
return;
break;
}
drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);

View File

@ -1600,6 +1600,10 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
supported_rotations);
if (dm->adev->ip_versions[DCE_HWIP][0] > IP_VERSION(3, 0, 1) &&
plane->type != DRM_PLANE_TYPE_CURSOR)
drm_plane_enable_fb_damage_clips(plane);
drm_plane_helper_add(plane, &dm_plane_helper_funcs);
#ifdef CONFIG_DRM_AMD_DC_HDR

View File

@ -24,6 +24,7 @@
*/
#include "amdgpu_dm_psr.h"
#include "dc_dmub_srv.h"
#include "dc.h"
#include "dm_helpers.h"
#include "amdgpu_dm.h"
@ -50,7 +51,7 @@ static bool link_supports_psrsu(struct dc_link *link)
!link->dpcd_caps.psr_info.psr2_su_y_granularity_cap)
return false;
return true;
return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub);
}
/*

View File

@ -108,6 +108,11 @@ static int dcn314_get_active_display_cnt_wa(
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
tmds_present = true;
/* Checking stream / link detection ensuring that PHY is active*/
if (dc_is_dp_signal(stream->signal) && !stream->dpms_off)
display_count++;
}
for (i = 0; i < dc->link_count; i++) {

View File

@ -135,9 +135,7 @@ static const char DC_BUILD_ID[] = "production-build";
* one or two (in the pipe-split case).
*/
/*******************************************************************************
* Private functions
******************************************************************************/
/* Private functions */
static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
{
@ -384,16 +382,18 @@ static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
}
/**
* dc_stream_adjust_vmin_vmax:
* dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR
* @dc: dc reference
* @stream: Initial dc stream state
* @adjust: Updated parameters for vertical_total_min and vertical_total_max
*
* Looks up the pipe context of dc_stream_state and updates the
* vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
* Rate, which is a power-saving feature that targets reducing panel
* refresh rate while the screen is static
*
* @dc: dc reference
* @stream: Initial dc stream state
* @adjust: Updated parameters for vertical_total_min and vertical_total_max
* Return: %true if the pipe context is found and adjusted;
* %false if the pipe context is not found.
*/
bool dc_stream_adjust_vmin_vmax(struct dc *dc,
struct dc_stream_state *stream,
@ -429,18 +429,17 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
}
/**
*****************************************************************************
* Function: dc_stream_get_last_vrr_vtotal
* dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of
* dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate)
*
* @brief
* Looks up the pipe context of dc_stream_state and gets the
* last VTOTAL used by DRR (Dynamic Refresh Rate)
* @dc: [in] dc reference
* @stream: [in] Initial dc stream state
* @refresh_rate: [in] new refresh_rate
*
* @param [in] dc: dc reference
* @param [in] stream: Initial dc stream state
* @param [in] adjust: Updated parameters for vertical_total_min and
* vertical_total_max
*****************************************************************************
* Return: %true if the pipe context is found and there is an associated
* timing_generator for the DC;
* %false if the pipe context is not found or there is no
* timing_generator for the DC.
*/
bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
struct dc_stream_state *stream,
@ -587,7 +586,10 @@ bool dc_stream_stop_dmcu_crc_win_update(struct dc *dc, struct dc_stream_state *s
* once.
*
* By default, only CRC0 is configured, and the entire frame is used to
* calculate the crc.
* calculate the CRC.
*
* Return: %false if the stream is not found or CRC capture is not supported;
* %true if the stream has been configured.
*/
bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
struct crc_params *crc_window, bool enable, bool continuous)
@ -656,7 +658,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
* dc_stream_configure_crc needs to be called beforehand to enable CRCs.
*
* Return:
* false if stream is not found, or if CRCs are not enabled.
* %false if stream is not found, or if CRCs are not enabled.
*/
bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
@ -1236,9 +1238,7 @@ static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
PERF_TRACE();
}
/*******************************************************************************
* Public functions
******************************************************************************/
/* Public functions */
struct dc *dc_create(const struct dc_init_data *init_params)
{
@ -1505,17 +1505,19 @@ static void program_timing_sync(
}
}
static bool context_changed(
struct dc *dc,
struct dc_state *context)
static bool streams_changed(struct dc *dc,
struct dc_stream_state *streams[],
uint8_t stream_count)
{
uint8_t i;
if (context->stream_count != dc->current_state->stream_count)
if (stream_count != dc->current_state->stream_count)
return true;
for (i = 0; i < dc->current_state->stream_count; i++) {
if (dc->current_state->streams[i] != context->streams[i])
if (dc->current_state->streams[i] != streams[i])
return true;
if (!streams[i]->link->link_state_valid)
return true;
}
@ -1745,6 +1747,8 @@ void dc_z10_save_init(struct dc *dc)
/*
* Applies given context to HW and copy it into current context.
* It's up to the user to release the src context afterwards.
*
* Return: an enum dc_status result code for the operation
*/
static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
{
@ -1911,12 +1915,114 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
return result;
}
static bool commit_minimal_transition_state(struct dc *dc,
struct dc_state *transition_base_context);
/**
* dc_commit_streams - Commit current stream state
*
* @dc: DC object with the commit state to be configured in the hardware
* @streams: Array with a list of stream state
* @stream_count: Total of streams
*
* Function responsible for commit streams change to the hardware.
*
* Return:
* Return DC_OK if everything work as expected, otherwise, return a dc_status
* code.
*/
enum dc_status dc_commit_streams(struct dc *dc,
struct dc_stream_state *streams[],
uint8_t stream_count)
{
int i, j;
struct dc_state *context;
enum dc_status res = DC_OK;
struct dc_validation_set set[MAX_STREAMS] = {0};
struct pipe_ctx *pipe;
bool handle_exit_odm2to1 = false;
if (!streams_changed(dc, streams, stream_count))
return res;
DC_LOG_DC("%s: %d streams\n", __func__, stream_count);
for (i = 0; i < stream_count; i++) {
struct dc_stream_state *stream = streams[i];
struct dc_stream_status *status = dc_stream_get_status(stream);
dc_stream_log(dc, stream);
set[i].stream = stream;
if (status) {
set[i].plane_count = status->plane_count;
for (j = 0; j < status->plane_count; j++)
set[i].plane_states[j] = status->plane_states[j];
}
}
/* Check for case where we are going from odm 2:1 to max
* pipe scenario. For these cases, we will call
* commit_minimal_transition_state() to exit out of odm 2:1
* first before processing new streams
*/
if (stream_count == dc->res_pool->pipe_count) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->next_odm_pipe)
handle_exit_odm2to1 = true;
}
}
if (handle_exit_odm2to1)
res = commit_minimal_transition_state(dc, dc->current_state);
context = dc_create_state(dc);
if (!context)
goto context_alloc_fail;
dc_resource_state_copy_construct_current(dc, context);
/*
* Previous validation was perfomred with fast_validation = true and
* the full DML state required for hardware programming was skipped.
*
* Re-validate here to calculate these parameters / watermarks.
*/
res = dc_validate_global_state(dc, context, false);
if (res != DC_OK) {
DC_LOG_ERROR("DC commit global validation failure: %s (%d)",
dc_status_to_str(res), res);
return res;
}
res = dc_commit_state_no_check(dc, context);
context_alloc_fail:
DC_LOG_DC("%s Finished.\n", __func__);
return (res == DC_OK);
}
/* TODO: When the transition to the new commit sequence is done, remove this
* function in favor of dc_commit_streams. */
bool dc_commit_state(struct dc *dc, struct dc_state *context)
{
enum dc_status result = DC_ERROR_UNEXPECTED;
int i;
if (!context_changed(dc, context))
/* TODO: Since change commit sequence can have a huge impact,
* we decided to only enable it for DCN3x. However, as soon as
* we get more confident about this change we'll need to enable
* the new sequence for all ASICs. */
if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
result = dc_commit_streams(dc, context->streams, context->stream_count);
return result == DC_OK;
}
if (!streams_changed(dc, context->streams, context->stream_count))
return DC_OK;
DC_LOG_DC("%s: %d streams\n",
@ -2482,8 +2588,11 @@ static enum surface_update_type check_update_surfaces_for_stream(
if (stream_update->mst_bw_update)
su_flags->bits.mst_bw = 1;
if (stream_update->crtc_timing_adjust && dc_extended_blank_supported(dc))
su_flags->bits.crtc_timing_adjust = 1;
if (stream_update->stream && stream_update->stream->freesync_on_desktop &&
(stream_update->vrr_infopacket || stream_update->allow_freesync ||
stream_update->vrr_active_variable))
su_flags->bits.fams_changed = 1;
if (su_flags->raw != 0)
overall_type = UPDATE_TYPE_FULL;
@ -3648,17 +3757,17 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
}
}
/* For SubVP when adding MPO video we need to add a minimal transition.
/* For SubVP when adding or removing planes we need to add a minimal transition
* (even when disabling all planes). Whenever disabling a phantom pipe, we
* must use the minimal transition path to disable the pipe correctly.
*/
if (cur_stream_status && stream->mall_stream_config.type == SUBVP_MAIN) {
/* determine if minimal transition is required due to SubVP*/
if (surface_count > 0) {
if (cur_stream_status->plane_count > surface_count) {
force_minimal_pipe_splitting = true;
} else if (cur_stream_status->plane_count < surface_count) {
force_minimal_pipe_splitting = true;
*is_plane_addition = true;
}
if (cur_stream_status->plane_count > surface_count) {
force_minimal_pipe_splitting = true;
} else if (cur_stream_status->plane_count < surface_count) {
force_minimal_pipe_splitting = true;
*is_plane_addition = true;
}
}
@ -3675,6 +3784,8 @@ static bool commit_minimal_transition_state(struct dc *dc,
enum dc_status ret = DC_ERROR_UNEXPECTED;
unsigned int i, j;
unsigned int pipe_in_use = 0;
bool subvp_in_use = false;
bool odm_in_use = false;
if (!transition_context)
return false;
@ -3687,6 +3798,30 @@ static bool commit_minimal_transition_state(struct dc *dc,
pipe_in_use++;
}
/* If SubVP is enabled and we are adding or removing planes from any main subvp
* pipe, we must use the minimal transition.
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
subvp_in_use = true;
break;
}
}
/* If ODM is enabled and we are adding or removing planes from any ODM
* pipe, we must use the minimal transition.
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->stream && pipe->next_odm_pipe) {
odm_in_use = true;
break;
}
}
/* When the OS add a new surface if we have been used all of pipes with odm combine
* and mpc split feature, it need use commit_minimal_transition_state to transition safely.
* After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
@ -3695,7 +3830,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
* Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
* enter/exit MPO when DCN still have enough resources.
*/
if (pipe_in_use != dc->res_pool->pipe_count) {
if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use) {
dc_release_state(transition_context);
return true;
}
@ -4430,21 +4565,17 @@ void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true;
}
/*
*****************************************************************************
* Function: dc_is_dmub_outbox_supported -
/**
* dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification
*
* @brief
* Checks whether DMUB FW supports outbox notifications, if supported
* DM should register outbox interrupt prior to actually enabling interrupts
* via dc_enable_dmub_outbox
* @dc: [in] dc structure
*
* @param
* [in] dc: dc structure
* Checks whether DMUB FW supports outbox notifications, if supported DM
* should register outbox interrupt prior to actually enabling interrupts
* via dc_enable_dmub_outbox
*
* @return
* True if DMUB FW supports outbox notifications, False otherwise
*****************************************************************************
* Return:
* True if DMUB FW supports outbox notifications, False otherwise
*/
bool dc_is_dmub_outbox_supported(struct dc *dc)
{
@ -4462,21 +4593,17 @@ bool dc_is_dmub_outbox_supported(struct dc *dc)
return dc->debug.enable_dmub_aux_for_legacy_ddc;
}
/*
*****************************************************************************
* Function: dc_enable_dmub_notifications
/**
* dc_enable_dmub_notifications - Check if dmub fw supports outbox
*
* @brief
* Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
* notifications. All DMs shall switch to dc_is_dmub_outbox_supported.
* This API shall be removed after switching.
* @dc: [in] dc structure
*
* @param
* [in] dc: dc structure
* Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
* notifications. All DMs shall switch to dc_is_dmub_outbox_supported. This
* API shall be removed after switching.
*
* @return
* True if DMUB FW supports outbox notifications, False otherwise
*****************************************************************************
* Return:
* True if DMUB FW supports outbox notifications, False otherwise
*/
bool dc_enable_dmub_notifications(struct dc *dc)
{
@ -4484,18 +4611,11 @@ bool dc_enable_dmub_notifications(struct dc *dc)
}
/**
*****************************************************************************
* Function: dc_enable_dmub_outbox
* dc_enable_dmub_outbox - Enables DMUB unsolicited notification
*
* @brief
* Enables DMUB unsolicited notifications to x86 via outbox
* @dc: [in] dc structure
*
* @param
* [in] dc: dc structure
*
* @return
* None
*****************************************************************************
* Enables DMUB unsolicited notifications to x86 via outbox.
*/
void dc_enable_dmub_outbox(struct dc *dc)
{
@ -4596,21 +4716,17 @@ uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
}
/**
*****************************************************************************
* Function: dc_process_dmub_set_config_async
* dc_process_dmub_set_config_async - Submits set_config command
*
* @brief
* Submits set_config command to dmub via inbox message
* @dc: [in] dc structure
* @link_index: [in] link_index: link index
* @payload: [in] aux payload
* @notify: [out] set_config immediate reply
*
* @param
* [in] dc: dc structure
* [in] link_index: link index
* [in] payload: aux payload
* [out] notify: set_config immediate reply
* Submits set_config command to dmub via inbox message.
*
* @return
* True if successful, False if failure
*****************************************************************************
* Return:
* True if successful, False if failure
*/
bool dc_process_dmub_set_config_async(struct dc *dc,
uint32_t link_index,
@ -4646,21 +4762,17 @@ bool dc_process_dmub_set_config_async(struct dc *dc,
}
/**
*****************************************************************************
* Function: dc_process_dmub_set_mst_slots
* dc_process_dmub_set_mst_slots - Submits MST solt allocation
*
* @brief
* Submits mst slot allocation command to dmub via inbox message
* @dc: [in] dc structure
* @link_index: [in] link index
* @mst_alloc_slots: [in] mst slots to be allotted
* @mst_slots_in_use: [out] mst slots in use returned in failure case
*
* @param
* [in] dc: dc structure
* [in] link_index: link index
* [in] mst_alloc_slots: mst slots to be allotted
* [out] mst_slots_in_use: mst slots in use returned in failure case
* Submits mst slot allocation command to dmub via inbox message
*
* @return
* DC_OK if successful, DC_ERROR if failure
*****************************************************************************
* Return:
* DC_OK if successful, DC_ERROR if failure
*/
enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
uint32_t link_index,
@ -4700,19 +4812,12 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
}
/**
*****************************************************************************
* Function: dc_process_dmub_dpia_hpd_int_enable
* dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption
*
* @brief
* Submits dpia hpd int enable command to dmub via inbox message
* @dc: [in] dc structure
* @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable
*
* @param
* [in] dc: dc structure
* [in] hpd_int_enable: 1 for hpd int enable, 0 to disable
*
* @return
* None
*****************************************************************************
* Submits dpia hpd int enable command to dmub via inbox message
*/
void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
uint32_t hpd_int_enable)
@ -4741,16 +4846,13 @@ void dc_disable_accelerated_mode(struct dc *dc)
/**
*****************************************************************************
* dc_notify_vsync_int_state() - notifies vsync enable/disable state
* dc_notify_vsync_int_state - notifies vsync enable/disable state
* @dc: dc structure
* @stream: stream where vsync int state changed
* @enable: whether vsync is enabled or disabled
* @stream: stream where vsync int state changed
* @enable: whether vsync is enabled or disabled
*
* Called when vsync is enabled/disabled
* Will notify DMUB to start/stop ABM interrupts after steady state is reached
*
*****************************************************************************
* Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM
* interrupts after steady state is reached.
*/
void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
{
@ -4792,17 +4894,3 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo
if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
}
/*
* dc_extended_blank_supported: Decide whether extended blank is supported
*
* Extended blank is a freesync optimization feature to be enabled in the future.
* During the extra vblank period gained from freesync, we have the ability to enter z9/z10.
*
* @param [in] dc: Current DC state
* @return: Indicate whether extended blank is supported (true or false)
*/
bool dc_extended_blank_supported(struct dc *dc)
{
return dc->debug.extended_blank_optimization && !dc->debug.disable_z10
&& dc->caps.zstate_support && dc->caps.is_apu;
}

View File

@ -1444,6 +1444,26 @@ static int acquire_first_split_pipe(
split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst;
split_pipe->pipe_idx = i;
split_pipe->stream = stream;
return i;
} else if (split_pipe->prev_odm_pipe &&
split_pipe->prev_odm_pipe->plane_state == split_pipe->plane_state) {
split_pipe->prev_odm_pipe->next_odm_pipe = split_pipe->next_odm_pipe;
if (split_pipe->next_odm_pipe)
split_pipe->next_odm_pipe->prev_odm_pipe = split_pipe->prev_odm_pipe;
if (split_pipe->prev_odm_pipe->plane_state)
resource_build_scaling_params(split_pipe->prev_odm_pipe);
memset(split_pipe, 0, sizeof(*split_pipe));
split_pipe->stream_res.tg = pool->timing_generators[i];
split_pipe->plane_res.hubp = pool->hubps[i];
split_pipe->plane_res.ipp = pool->ipps[i];
split_pipe->plane_res.dpp = pool->dpps[i];
split_pipe->stream_res.opp = pool->opps[i];
split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst;
split_pipe->pipe_idx = i;
split_pipe->stream = stream;
return i;
}

View File

@ -56,9 +56,7 @@ struct dmub_notification;
#define MIN_VIEWPORT_SIZE 12
#define MAX_NUM_EDP 2
/*******************************************************************************
* Display Core Interfaces
******************************************************************************/
/* Display Core Interfaces */
struct dc_versions {
const char *dc_ver;
struct dmcu_version dmcu_version;
@ -993,9 +991,7 @@ void dc_init_callbacks(struct dc *dc,
void dc_deinit_callbacks(struct dc *dc);
void dc_destroy(struct dc **dc);
/*******************************************************************************
* Surface Interfaces
******************************************************************************/
/* Surface Interfaces */
enum {
TRANSFER_FUNC_POINTS = 1025
@ -1274,12 +1270,23 @@ void dc_post_update_surfaces_to_stream(
#include "dc_stream.h"
/*
* Structure to store surface/stream associations for validation
/**
* struct dc_validation_set - Struct to store surface/stream associations for validation
*/
struct dc_validation_set {
/**
* @stream: Stream state properties
*/
struct dc_stream_state *stream;
/**
* @plane_state: Surface state
*/
struct dc_plane_state *plane_states[MAX_SURFACES];
/**
* @plane_count: Total of active planes
*/
uint8_t plane_count;
};
@ -1326,15 +1333,12 @@ void dc_resource_state_destruct(struct dc_state *context);
bool dc_resource_is_dsc_encoding_supported(const struct dc *dc);
/*
* TODO update to make it about validation sets
* Set up streams and links associated to drive sinks
* The streams parameter is an absolute set of all active streams.
*
* After this call:
* Phy, Encoder, Timing Generator are programmed and enabled.
* New streams are enabled with blank stream; no memory read.
*/
enum dc_status dc_commit_streams(struct dc *dc,
struct dc_stream_state *streams[],
uint8_t stream_count);
/* TODO: When the transition to the new commit sequence is done, remove this
* function in favor of dc_commit_streams. */
bool dc_commit_state(struct dc *dc, struct dc_state *context);
struct dc_state *dc_create_state(struct dc *dc);
@ -1342,9 +1346,7 @@ struct dc_state *dc_copy_state(struct dc_state *src_ctx);
void dc_retain_state(struct dc_state *context);
void dc_release_state(struct dc_state *context);
/*******************************************************************************
* Link Interfaces
******************************************************************************/
/* Link Interfaces */
struct dpcd_caps {
union dpcd_rev dpcd_rev;
@ -1446,9 +1448,7 @@ struct hdcp_caps {
uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane);
/*******************************************************************************
* Sink Interfaces - A sink corresponds to a display output device
******************************************************************************/
/* Sink Interfaces - A sink corresponds to a display output device */
struct dc_container_id {
// 128bit GUID in binary form
@ -1520,8 +1520,6 @@ struct dc_sink_init_data {
bool converter_disable_audio;
};
bool dc_extended_blank_supported(struct dc *dc);
struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params);
/* Newer interfaces */
@ -1531,9 +1529,7 @@ struct dc_cursor {
};
/*******************************************************************************
* Interrupt interfaces
******************************************************************************/
/* Interrupt interfaces */
enum dc_irq_source dc_interrupt_to_irq_source(
struct dc *dc,
uint32_t src_id,
@ -1545,9 +1541,7 @@ enum dc_irq_source dc_get_hpd_irq_source_at_index(
void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable);
/*******************************************************************************
* Power Interfaces
******************************************************************************/
/* Power Interfaces */
void dc_set_power_state(
struct dc *dc,
@ -1620,14 +1614,10 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
uint32_t hpd_int_enable);
/*******************************************************************************
* DSC Interfaces
******************************************************************************/
/* DSC Interfaces */
#include "dc_dsc.h"
/*******************************************************************************
* Disable acc mode Interfaces
******************************************************************************/
/* Disable acc mode Interfaces */
void dc_disable_accelerated_mode(struct dc *dc);
#endif /* DC_INTERFACE_H_ */

View File

@ -1026,3 +1026,10 @@ void dc_send_update_cursor_info_to_dmu(
dc_send_cmd_to_dmu(pCtx->stream->ctx->dmub_srv, &cmd);
}
}
bool dc_dmub_check_min_version(struct dmub_srv *srv)
{
if (!srv->hw_funcs.is_psrsu_supported)
return true;
return srv->hw_funcs.is_psrsu_supported(srv);
}

View File

@ -89,4 +89,5 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, struct dc_state *context, b
void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv);
void dc_send_update_cursor_info_to_dmu(struct pipe_ctx *pCtx, uint8_t pipe_idx);
bool dc_dmub_check_min_version(struct dmub_srv *srv);
#endif /* _DMUB_DC_SRV_H_ */

View File

@ -41,6 +41,10 @@ struct timing_sync_info {
struct dc_stream_status {
int primary_otg_inst;
int stream_enc_inst;
/**
* @plane_count: Total of planes attached to a single stream
*/
int plane_count;
int audio_inst;
struct timing_sync_info timing_sync_info;
@ -127,6 +131,7 @@ union stream_update_flags {
uint32_t dsc_changed : 1;
uint32_t mst_bw : 1;
uint32_t crtc_timing_adjust : 1;
uint32_t fams_changed : 1;
} bits;
uint32_t raw;

View File

@ -2036,7 +2036,7 @@ void dcn20_optimize_bandwidth(
dc->clk_mgr,
context,
true);
if (dc_extended_blank_supported(dc) && context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
@ -2044,7 +2044,7 @@ void dcn20_optimize_bandwidth(
&& pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max
&& pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total)
pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp,
pipe_ctx->dlg_regs.optimized_min_dst_y_next_start);
pipe_ctx->dlg_regs.min_dst_y_next_start);
}
}
}

View File

@ -292,7 +292,12 @@ void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc)
void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max)
{
optc1_set_vtotal_min_max(optc, vtotal_min, vtotal_max);
struct dc *dc = optc->ctx->dc;
if (dc->caps.dmub_caps.mclk_sw && !dc->debug.disable_fams)
dc_dmub_srv_drr_update_cmd(dc, optc->inst, vtotal_min, vtotal_max);
else
optc1_set_vtotal_min_max(optc, vtotal_min, vtotal_max);
}
void optc3_tg_init(struct timing_generator *optc)

View File

@ -103,6 +103,7 @@ static void dcn31_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigne
default:
break;
}
DC_LOG_DEBUG("Set DET%d to %d segments\n", hubp_inst, det_size_segments);
/* Should never be hit, if it is we have an erroneous hw config*/
ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
+ hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs);

View File

@ -136,6 +136,9 @@
#define DCN3_15_MAX_DET_SIZE 384
#define DCN3_15_CRB_SEGMENT_SIZE_KB 64
#define DCN3_15_MAX_DET_SEGS (DCN3_15_MAX_DET_SIZE / DCN3_15_CRB_SEGMENT_SIZE_KB)
/* Minimum 2 extra segments need to be in compbuf and claimable to guarantee seamless mpo transitions */
#define MIN_RESERVED_DET_SEGS 2
enum dcn31_clk_src_array_id {
DCN31_CLK_SRC_PLL0,
@ -1636,21 +1639,61 @@ static bool is_dual_plane(enum surface_pixel_format format)
return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
}
static int source_format_to_bpp (enum source_format_class SourcePixelFormat)
{
if (SourcePixelFormat == dm_444_64)
return 8;
else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16)
return 2;
else if (SourcePixelFormat == dm_444_8)
return 1;
else if (SourcePixelFormat == dm_rgbe_alpha)
return 5;
else if (SourcePixelFormat == dm_420_8)
return 3;
else if (SourcePixelFormat == dm_420_12)
return 6;
else
return 4;
}
static bool allow_pixel_rate_crb(struct dc *dc, struct dc_state *context)
{
int i;
struct resource_context *res_ctx = &context->res_ctx;
/*Don't apply for single stream*/
if (context->stream_count < 2)
return false;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (!res_ctx->pipe_ctx[i].stream)
continue;
/*Don't apply if MPO to avoid transition issues*/
if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state != res_ctx->pipe_ctx[i].plane_state)
return false;
}
return true;
}
static int dcn315_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate)
{
int i, pipe_cnt;
int i, pipe_cnt, crb_idx, crb_pipes;
struct resource_context *res_ctx = &context->res_ctx;
struct pipe_ctx *pipe;
const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB;
int remaining_det_segs = max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB;
bool pixel_rate_crb = allow_pixel_rate_crb(dc, context);
DC_FP_START();
dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
DC_FP_END();
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
for (i = 0, pipe_cnt = 0, crb_pipes = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
if (!res_ctx->pipe_ctx[i].stream)
@ -1672,6 +1715,23 @@ static int dcn315_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.dsc_input_bpc = 0;
DC_FP_START();
dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
if (pixel_rate_crb && !pipe->top_pipe && !pipe->prev_odm_pipe) {
int bpp = source_format_to_bpp(pipes[pipe_cnt].pipe.src.source_format);
/* Ceil to crb segment size */
int approx_det_segs_required_for_pstate = dcn_get_approx_det_segs_required_for_pstate(
&context->bw_ctx.dml.soc, timing->pix_clk_100hz, bpp, DCN3_15_CRB_SEGMENT_SIZE_KB);
if (approx_det_segs_required_for_pstate <= 2 * DCN3_15_MAX_DET_SEGS) {
bool split_required = approx_det_segs_required_for_pstate > DCN3_15_MAX_DET_SEGS;
split_required = split_required || timing->pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc);
split_required = split_required || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120);
if (split_required)
approx_det_segs_required_for_pstate += approx_det_segs_required_for_pstate % 2;
pipes[pipe_cnt].pipe.src.det_size_override = approx_det_segs_required_for_pstate;
remaining_det_segs -= approx_det_segs_required_for_pstate;
} else
remaining_det_segs = -1;
crb_pipes++;
}
DC_FP_END();
if (pipes[pipe_cnt].dout.dsc_enable) {
@ -1690,16 +1750,54 @@ static int dcn315_populate_dml_pipes_from_context(
break;
}
}
pipe_cnt++;
}
/* Spread remaining unreserved crb evenly among all pipes*/
if (pixel_rate_crb) {
for (i = 0, pipe_cnt = 0, crb_idx = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &res_ctx->pipe_ctx[i];
if (!pipe->stream)
continue;
/* Do not use asymetric crb if not enough for pstate support */
if (remaining_det_segs < 0) {
pipes[pipe_cnt].pipe.src.det_size_override = 0;
continue;
}
if (!pipe->top_pipe && !pipe->prev_odm_pipe) {
bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)
|| (pipe->plane_state && pipe->plane_state->src_rect.width > 5120);
if (remaining_det_segs > MIN_RESERVED_DET_SEGS)
pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes +
(crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0);
if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) {
/* Clamp to 2 pipe split max det segments */
remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS);
pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS;
}
if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) {
/* If we are splitting we must have an even number of segments */
remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2;
pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2;
}
/* Convert segments into size for DML use */
pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB;
crb_idx++;
}
pipe_cnt++;
}
}
if (pipe_cnt)
context->bw_ctx.dml.ip.det_buffer_size_kbytes =
(max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB / pipe_cnt) * DCN3_15_CRB_SEGMENT_SIZE_KB;
if (context->bw_ctx.dml.ip.det_buffer_size_kbytes > DCN3_15_MAX_DET_SIZE)
context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_15_MAX_DET_SIZE;
ASSERT(context->bw_ctx.dml.ip.det_buffer_size_kbytes >= DCN3_15_DEFAULT_DET_SIZE);
dc->config.enable_4to1MPC = false;
if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
if (is_dual_plane(pipe->plane_state->format)
@ -1707,7 +1805,9 @@ static int dcn315_populate_dml_pipes_from_context(
dc->config.enable_4to1MPC = true;
context->bw_ctx.dml.ip.det_buffer_size_kbytes =
(max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB / 4) * DCN3_15_CRB_SEGMENT_SIZE_KB;
} else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) {
} else if (!is_dual_plane(pipe->plane_state->format)
&& pipe->plane_state->src_rect.width <= 5120
&& pipe->stream->timing.pix_clk_100hz < dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)) {
/* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */
context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
pipes[0].pipe.src.unbounded_req_mode = true;

View File

@ -948,10 +948,10 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
{
int plane_count;
int i;
unsigned int optimized_min_dst_y_next_start_us;
unsigned int min_dst_y_next_start_us;
plane_count = 0;
optimized_min_dst_y_next_start_us = 0;
min_dst_y_next_start_us = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].plane_state)
plane_count++;
@ -973,19 +973,18 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
struct dc_link *link = context->streams[0]->sink->link;
struct dc_stream_status *stream_status = &context->stream_status[0];
struct dc_stream_state *current_stream = context->streams[0];
int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
bool is_pwrseq0 = link->link_index == 0;
bool isFreesyncVideo;
if (dc_extended_blank_supported(dc)) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].stream == context->streams[0]
&& context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min == context->res_ctx.pipe_ctx[i].stream->adjust.v_total_max
&& context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min > context->res_ctx.pipe_ctx[i].stream->timing.v_total) {
optimized_min_dst_y_next_start_us =
context->res_ctx.pipe_ctx[i].dlg_regs.optimized_min_dst_y_next_start_us;
break;
}
isFreesyncVideo = current_stream->adjust.v_total_min == current_stream->adjust.v_total_max;
isFreesyncVideo = isFreesyncVideo && current_stream->timing.v_total < current_stream->adjust.v_total_min;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].stream == current_stream && isFreesyncVideo) {
min_dst_y_next_start_us = context->res_ctx.pipe_ctx[i].dlg_regs.min_dst_y_next_start_us;
break;
}
}
@ -993,7 +992,7 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
if (stream_status->plane_count > 1)
return DCN_ZSTATE_SUPPORT_DISALLOW;
if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000))
if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || min_dst_y_next_start_us > 5000))
return DCN_ZSTATE_SUPPORT_ALLOW;
else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;

View File

@ -483,7 +483,7 @@ void dcn31_calculate_wm_and_dlg_fp(
int pipe_cnt,
int vlevel)
{
int i, pipe_idx, active_dpp_count = 0;
int i, pipe_idx, total_det = 0, active_hubp_count = 0;
double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
dc_assert_fp_enabled();
@ -529,7 +529,7 @@ void dcn31_calculate_wm_and_dlg_fp(
continue;
if (context->res_ctx.pipe_ctx[i].plane_state)
active_dpp_count++;
active_hubp_count++;
pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
@ -547,9 +547,34 @@ void dcn31_calculate_wm_and_dlg_fp(
}
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
/* For 31x apu pstate change is only supported if possible in vactive or if there are no active dpps */
/* For 31x apu pstate change is only supported if possible in vactive*/
context->bw_ctx.bw.dcn.clk.p_state_change_support =
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_vactive || !active_dpp_count;
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_vactive;
/* If DCN isn't making memory requests we can allow pstate change and lower clocks */
if (!active_hubp_count) {
context->bw_ctx.bw.dcn.clk.socclk_khz = 0;
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
context->bw_ctx.bw.dcn.clk.dcfclk_khz = 0;
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = 0;
context->bw_ctx.bw.dcn.clk.dramclk_khz = 0;
context->bw_ctx.bw.dcn.clk.fclk_khz = 0;
context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].stream)
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 0;
}
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
context->res_ctx.pipe_ctx[i].det_buffer_size_kb =
get_det_buffer_size_kbytes(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
if (context->res_ctx.pipe_ctx[i].det_buffer_size_kb > 384)
context->res_ctx.pipe_ctx[i].det_buffer_size_kb /= 2;
total_det += context->res_ctx.pipe_ctx[i].det_buffer_size_kb;
pipe_idx++;
}
context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - total_det;
}
void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
@ -797,3 +822,19 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
else
dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31_FPGA);
}
int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc)
{
return soc->clock_limits[0].dispclk_mhz * 10000.0 / (1.0 + soc->dcn_downspread_percent / 100.0);
}
int dcn_get_approx_det_segs_required_for_pstate(
struct _vcs_dpi_soc_bounding_box_st *soc,
int pix_clk_100hz, int bpp, int seg_size_kb)
{
/* Roughly calculate required crb to hide latency. In practice there is slightly
* more buffer available for latency hiding
*/
return (int)(soc->dram_clock_change_latency_us * pix_clk_100hz * bpp
/ 10240000 + seg_size_kb - 1) / seg_size_kb;
}

View File

@ -46,5 +46,9 @@ void dcn31_calculate_wm_and_dlg_fp(
void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc);
int dcn_get_approx_det_segs_required_for_pstate(
struct _vcs_dpi_soc_bounding_box_st *soc,
int pix_clk_100hz, int bpp, int seg_size_kb);
#endif /* __DCN31_FPU_H__*/

View File

@ -533,7 +533,8 @@ static void CalculateStutterEfficiency(
static void CalculateSwathAndDETConfiguration(
bool ForceSingleDPP,
int NumberOfActivePlanes,
unsigned int DETBufferSizeInKByte,
bool DETSharedByAllDPP,
unsigned int DETBufferSizeInKByte[],
double MaximumSwathWidthLuma[],
double MaximumSwathWidthChroma[],
enum scan_direction_class SourceScan[],
@ -3116,7 +3117,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->SurfaceWidthC[k],
v->SurfaceHeightY[k],
v->SurfaceHeightC[k],
v->DETBufferSizeInKByte[0] * 1024,
v->DETBufferSizeInKByte[k] * 1024,
v->BlockHeight256BytesY[k],
v->BlockHeight256BytesC[k],
v->SurfaceTiling[k],
@ -3311,7 +3312,8 @@ static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
CalculateSwathAndDETConfiguration(
false,
v->NumberOfActivePlanes,
v->DETBufferSizeInKByte[0],
mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0],
v->DETBufferSizeInKByte,
dummy1,
dummy2,
v->SourceScan,
@ -3777,14 +3779,16 @@ static noinline void CalculatePrefetchSchedulePerPlane(
&v->VReadyOffsetPix[k]);
}
static void PatchDETBufferSizeInKByte(unsigned int NumberOfActivePlanes, int NoOfDPPThisState[], unsigned int config_return_buffer_size_in_kbytes, unsigned int *DETBufferSizeInKByte)
static void PatchDETBufferSizeInKByte(unsigned int NumberOfActivePlanes, int NoOfDPPThisState[], unsigned int config_return_buffer_size_in_kbytes, unsigned int DETBufferSizeInKByte[])
{
int i, total_pipes = 0;
for (i = 0; i < NumberOfActivePlanes; i++)
total_pipes += NoOfDPPThisState[i];
*DETBufferSizeInKByte = ((config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB) / 64 / total_pipes) * 64;
if (*DETBufferSizeInKByte > DCN3_15_MAX_DET_SIZE)
*DETBufferSizeInKByte = DCN3_15_MAX_DET_SIZE;
DETBufferSizeInKByte[0] = ((config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB) / 64 / total_pipes) * 64;
if (DETBufferSizeInKByte[0] > DCN3_15_MAX_DET_SIZE)
DETBufferSizeInKByte[0] = DCN3_15_MAX_DET_SIZE;
for (i = 1; i < NumberOfActivePlanes; i++)
DETBufferSizeInKByte[i] = DETBufferSizeInKByte[0];
}
@ -4024,7 +4028,8 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
CalculateSwathAndDETConfiguration(
true,
v->NumberOfActivePlanes,
v->DETBufferSizeInKByte[0],
mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0],
v->DETBufferSizeInKByte,
v->MaximumSwathWidthLuma,
v->MaximumSwathWidthChroma,
v->SourceScan,
@ -4164,6 +4169,10 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|| (v->PlaneRequiredDISPCLK > v->MaxDispclkRoundedDownToDFSGranularity)) {
v->DISPCLK_DPPCLK_Support[i][j] = false;
}
if (mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[k] > DCN3_15_MAX_DET_SIZE && v->NoOfDPP[i][j][k] < 2) {
v->MPCCombine[i][j][k] = true;
v->NoOfDPP[i][j][k] = 2;
}
}
v->TotalNumberOfActiveDPP[i][j] = 0;
v->TotalNumberOfSingleDPPPlanes[i][j] = 0;
@ -4640,12 +4649,13 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->ODMCombineEnableThisState[k] = v->ODMCombineEnablePerState[i][k];
}
if (v->NumberOfActivePlanes > 1 && mode_lib->project == DML_PROJECT_DCN315)
PatchDETBufferSizeInKByte(v->NumberOfActivePlanes, v->NoOfDPPThisState, v->ip.config_return_buffer_size_in_kbytes, &v->DETBufferSizeInKByte[0]);
if (v->NumberOfActivePlanes > 1 && mode_lib->project == DML_PROJECT_DCN315 && !v->DETSizeOverride[0])
PatchDETBufferSizeInKByte(v->NumberOfActivePlanes, v->NoOfDPPThisState, v->ip.config_return_buffer_size_in_kbytes, v->DETBufferSizeInKByte);
CalculateSwathAndDETConfiguration(
false,
v->NumberOfActivePlanes,
v->DETBufferSizeInKByte[0],
mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0],
v->DETBufferSizeInKByte,
v->MaximumSwathWidthLuma,
v->MaximumSwathWidthChroma,
v->SourceScan,
@ -6557,7 +6567,8 @@ static void CalculateStutterEfficiency(
static void CalculateSwathAndDETConfiguration(
bool ForceSingleDPP,
int NumberOfActivePlanes,
unsigned int DETBufferSizeInKByte,
bool DETSharedByAllDPP,
unsigned int DETBufferSizeInKByteA[],
double MaximumSwathWidthLuma[],
double MaximumSwathWidthChroma[],
enum scan_direction_class SourceScan[],
@ -6641,6 +6652,10 @@ static void CalculateSwathAndDETConfiguration(
*ViewportSizeSupport = true;
for (k = 0; k < NumberOfActivePlanes; ++k) {
unsigned int DETBufferSizeInKByte = DETBufferSizeInKByteA[k];
if (DETSharedByAllDPP && DPPPerPlane[k])
DETBufferSizeInKByte /= DPPPerPlane[k];
if ((SourcePixelFormat[k] == dm_444_64 || SourcePixelFormat[k] == dm_444_32 || SourcePixelFormat[k] == dm_444_16 || SourcePixelFormat[k] == dm_mono_16
|| SourcePixelFormat[k] == dm_mono_8 || SourcePixelFormat[k] == dm_rgbe)) {
if (SurfaceTiling[k] == dm_sw_linear

View File

@ -988,8 +988,7 @@ static void dml_rq_dlg_get_dlg_params(
dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start) * dml_pow(2, 2));
disp_dlg_regs->optimized_min_dst_y_next_start_us = 0;
disp_dlg_regs->optimized_min_dst_y_next_start = disp_dlg_regs->min_dst_y_next_start;
disp_dlg_regs->min_dst_y_next_start_us = 0;
ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18));
dml_print("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, min_ttu_vblank);

View File

@ -32,7 +32,7 @@
#include "dml/display_mode_vba.h"
struct _vcs_dpi_ip_params_st dcn3_14_ip = {
.VBlankNomDefaultUS = 668,
.VBlankNomDefaultUS = 800,
.gpuvm_enable = 1,
.gpuvm_max_page_table_levels = 1,
.hostvm_enable = 1,
@ -288,6 +288,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
struct resource_context *res_ctx = &context->res_ctx;
struct pipe_ctx *pipe;
bool upscaled = false;
const unsigned int max_allowed_vblank_nom = 1023;
dc_assert_fp_enabled();
@ -301,9 +302,15 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
pipe = &res_ctx->pipe_ctx[i];
timing = &pipe->stream->timing;
if (dc_extended_blank_supported(dc) && pipe->stream->adjust.v_total_max == pipe->stream->adjust.v_total_min
&& pipe->stream->adjust.v_total_min > timing->v_total)
if (pipe->stream->adjust.v_total_min != 0)
pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
else
pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total;
pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive;
pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, dcn3_14_ip.VBlankNomDefaultUS);
pipes[pipe_cnt].pipe.dest.vblank_nom = max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width);
pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, max_allowed_vblank_nom);
if (pipe->plane_state &&
(pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height ||
@ -327,8 +334,6 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
pipes[pipe_cnt].pipe.dest.vblank_nom =
dcn3_14_ip.VBlankNomDefaultUS / (timing->h_total / (timing->pix_clk_100hz / 10000.0));
pipes[pipe_cnt].pipe.src.dcc_rate = 3;
pipes[pipe_cnt].dout.dsc_input_bpc = 0;

View File

@ -1053,7 +1053,6 @@ static void dml_rq_dlg_get_dlg_params(
float vba__refcyc_per_req_delivery_pre_l = get_refcyc_per_req_delivery_pre_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
float vba__refcyc_per_req_delivery_l = get_refcyc_per_req_delivery_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
int blank_lines = 0;
memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
@ -1077,17 +1076,10 @@ static void dml_rq_dlg_get_dlg_params(
min_ttu_vblank = get_min_ttu_vblank_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
disp_dlg_regs->optimized_min_dst_y_next_start = disp_dlg_regs->min_dst_y_next_start;
disp_dlg_regs->optimized_min_dst_y_next_start_us = 0;
disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start) * dml_pow(2, 2));
blank_lines = (dst->vblank_end + dst->vtotal_min - dst->vblank_start - dst->vstartup_start - 1);
if (blank_lines < 0)
blank_lines = 0;
if (blank_lines != 0) {
disp_dlg_regs->optimized_min_dst_y_next_start = vba__min_dst_y_next_start;
disp_dlg_regs->optimized_min_dst_y_next_start_us = (disp_dlg_regs->optimized_min_dst_y_next_start * dst->hactive) / (unsigned int) dst->pixel_rate_mhz;
disp_dlg_regs->min_dst_y_next_start = disp_dlg_regs->optimized_min_dst_y_next_start;
}
disp_dlg_regs->min_dst_y_next_start_us =
(vba__min_dst_y_next_start * dst->hactive) / (unsigned int) dst->pixel_rate_mhz;
disp_dlg_regs->min_dst_y_next_start = vba__min_dst_y_next_start * dml_pow(2, 2);
ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18));
dml_print("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, min_ttu_vblank);

View File

@ -1237,7 +1237,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt, int vlevel)
{
int i, pipe_idx;
int i, pipe_idx, active_hubp_count = 0;
bool usr_retraining_support = false;
bool unbounded_req_enabled = false;
@ -1282,6 +1282,8 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
if (context->res_ctx.pipe_ctx[i].plane_state)
active_hubp_count++;
pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt,
pipe_idx);
pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt,
@ -1303,10 +1305,23 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
if (context->res_ctx.pipe_ctx[i].plane_state)
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
else
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 0;
context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
pipe_idx++;
}
/* If DCN isn't making memory requests we can allow pstate change and lower clocks */
if (!active_hubp_count) {
context->bw_ctx.bw.dcn.clk.socclk_khz = 0;
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
context->bw_ctx.bw.dcn.clk.dcfclk_khz = 0;
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = 0;
context->bw_ctx.bw.dcn.clk.dramclk_khz = 0;
context->bw_ctx.bw.dcn.clk.fclk_khz = 0;
context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
}
/*save a original dppclock copy*/
context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;

View File

@ -618,8 +618,7 @@ struct _vcs_dpi_display_dlg_regs_st {
unsigned int refcyc_h_blank_end;
unsigned int dlg_vblank_end;
unsigned int min_dst_y_next_start;
unsigned int optimized_min_dst_y_next_start;
unsigned int optimized_min_dst_y_next_start_us;
unsigned int min_dst_y_next_start_us;
unsigned int refcyc_per_htotal;
unsigned int refcyc_x_after_scaler;
unsigned int dst_y_after_scaler;

View File

@ -569,6 +569,10 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.OutputLinkDPRate[mode_lib->vba.NumberOfActivePlanes] = dout->dp_rate;
mode_lib->vba.ODMUse[mode_lib->vba.NumberOfActivePlanes] = dst->odm_combine_policy;
mode_lib->vba.DETSizeOverride[mode_lib->vba.NumberOfActivePlanes] = src->det_size_override;
if (src->det_size_override)
mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = src->det_size_override;
else
mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = ip->det_buffer_size_kbytes;
//TODO: Need to assign correct values to dp_multistream vars
mode_lib->vba.OutputMultistreamEn[mode_lib->vba.NumberOfActiveSurfaces] = dout->dp_multistream_en;
mode_lib->vba.OutputMultistreamId[mode_lib->vba.NumberOfActiveSurfaces] = dout->dp_multistream_id;
@ -783,6 +787,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.pipe_plane[k] =
mode_lib->vba.NumberOfActivePlanes;
mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes]++;
if (src_k->det_size_override)
mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = src_k->det_size_override;
if (mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes]
== dm_horz) {
mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] +=

View File

@ -350,6 +350,8 @@ struct dmub_srv_hw_funcs {
bool (*is_supported)(struct dmub_srv *dmub);
bool (*is_psrsu_supported)(struct dmub_srv *dmub);
bool (*is_hw_init)(struct dmub_srv *dmub);
bool (*is_phy_init)(struct dmub_srv *dmub);

View File

@ -345,7 +345,7 @@ union dmub_fw_boot_status {
uint32_t optimized_init_done : 1; /**< 1 if optimized init done */
uint32_t restore_required : 1; /**< 1 if driver should call restore */
uint32_t defer_load : 1; /**< 1 if VBIOS data is deferred programmed */
uint32_t reserved : 1;
uint32_t fams_enabled : 1; /**< 1 if VBIOS data is deferred programmed */
uint32_t detection_required: 1; /**< if detection need to be triggered by driver */
} bits; /**< status bits */

View File

@ -22,7 +22,7 @@
DMUB = dmub_srv.o dmub_srv_stat.o dmub_reg.o dmub_dcn20.o dmub_dcn21.o
DMUB += dmub_dcn30.o dmub_dcn301.o dmub_dcn302.o dmub_dcn303.o
DMUB += dmub_dcn31.o dmub_dcn315.o dmub_dcn316.o
DMUB += dmub_dcn31.o dmub_dcn314.o dmub_dcn315.o dmub_dcn316.o
DMUB += dmub_dcn32.o
AMD_DAL_DMUB = $(addprefix $(AMDDALPATH)/dmub/src/,$(DMUB))

View File

@ -297,6 +297,11 @@ bool dmub_dcn31_is_supported(struct dmub_srv *dmub)
return supported;
}
bool dmub_dcn31_is_psrsu_supported(struct dmub_srv *dmub)
{
return dmub->fw_version >= DMUB_FW_VERSION(4, 0, 59);
}
void dmub_dcn31_set_gpint(struct dmub_srv *dmub,
union dmub_gpint_data_register reg)
{

View File

@ -219,6 +219,8 @@ bool dmub_dcn31_is_hw_init(struct dmub_srv *dmub);
bool dmub_dcn31_is_supported(struct dmub_srv *dmub);
bool dmub_dcn31_is_psrsu_supported(struct dmub_srv *dmub);
void dmub_dcn31_set_gpint(struct dmub_srv *dmub,
union dmub_gpint_data_register reg);

View File

@ -0,0 +1,67 @@
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "../dmub_srv.h"
#include "dmub_reg.h"
#include "dmub_dcn314.h"
#include "dcn/dcn_3_1_4_offset.h"
#include "dcn/dcn_3_1_4_sh_mask.h"
#define DCN_BASE__INST0_SEG0 0x00000012
#define DCN_BASE__INST0_SEG1 0x000000C0
#define DCN_BASE__INST0_SEG2 0x000034C0
#define DCN_BASE__INST0_SEG3 0x00009000
#define DCN_BASE__INST0_SEG4 0x02403C00
#define DCN_BASE__INST0_SEG5 0
#define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg
#define CTX dmub
#define REGS dmub->regs_dcn31
#define REG_OFFSET_EXP(reg_name) (BASE(reg##reg_name##_BASE_IDX) + reg##reg_name)
/* Registers. */
const struct dmub_srv_dcn31_regs dmub_srv_dcn314_regs = {
#define DMUB_SR(reg) REG_OFFSET_EXP(reg),
{
DMUB_DCN31_REGS()
DMCUB_INTERNAL_REGS()
},
#undef DMUB_SR
#define DMUB_SF(reg, field) FD_MASK(reg, field),
{ DMUB_DCN31_FIELDS() },
#undef DMUB_SF
#define DMUB_SF(reg, field) FD_SHIFT(reg, field),
{ DMUB_DCN31_FIELDS() },
#undef DMUB_SF
};
bool dmub_dcn314_is_psrsu_supported(struct dmub_srv *dmub)
{
return dmub->fw_version >= DMUB_FW_VERSION(8, 0, 16);
}

View File

@ -0,0 +1,35 @@
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef _DMUB_DCN314_H_
#define _DMUB_DCN314_H_
#include "dmub_dcn31.h"
extern const struct dmub_srv_dcn31_regs dmub_srv_dcn314_regs;
bool dmub_dcn314_is_psrsu_supported(struct dmub_srv *dmub);
#endif /* _DMUB_DCN314_H_ */

View File

@ -32,6 +32,7 @@
#include "dmub_dcn302.h"
#include "dmub_dcn303.h"
#include "dmub_dcn31.h"
#include "dmub_dcn314.h"
#include "dmub_dcn315.h"
#include "dmub_dcn316.h"
#include "dmub_dcn32.h"
@ -226,12 +227,17 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
case DMUB_ASIC_DCN314:
case DMUB_ASIC_DCN315:
case DMUB_ASIC_DCN316:
if (asic == DMUB_ASIC_DCN315)
if (asic == DMUB_ASIC_DCN314) {
dmub->regs_dcn31 = &dmub_srv_dcn314_regs;
funcs->is_psrsu_supported = dmub_dcn314_is_psrsu_supported;
} else if (asic == DMUB_ASIC_DCN315) {
dmub->regs_dcn31 = &dmub_srv_dcn315_regs;
else if (asic == DMUB_ASIC_DCN316)
} else if (asic == DMUB_ASIC_DCN316) {
dmub->regs_dcn31 = &dmub_srv_dcn316_regs;
else
} else {
dmub->regs_dcn31 = &dmub_srv_dcn31_regs;
funcs->is_psrsu_supported = dmub_dcn31_is_psrsu_supported;
}
funcs->reset = dmub_dcn31_reset;
funcs->reset_release = dmub_dcn31_reset_release;
funcs->backdoor_load = dmub_dcn31_backdoor_load;

View File

@ -2081,89 +2081,36 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
return ret;
}
static void sienna_cichlid_get_override_pcie_settings(struct smu_context *smu,
uint32_t *gen_speed_override,
uint32_t *lane_width_override)
{
struct amdgpu_device *adev = smu->adev;
*gen_speed_override = 0xff;
*lane_width_override = 0xff;
switch (adev->pdev->device) {
case 0x73A0:
case 0x73A1:
case 0x73A2:
case 0x73A3:
case 0x73AB:
case 0x73AE:
/* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */
*lane_width_override = 6;
break;
case 0x73E0:
case 0x73E1:
case 0x73E3:
*lane_width_override = 4;
break;
case 0x7420:
case 0x7421:
case 0x7422:
case 0x7423:
case 0x7424:
*lane_width_override = 3;
break;
default:
break;
}
}
#define MAX(a, b) ((a) > (b) ? (a) : (b))
static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
uint32_t pcie_gen_cap,
uint32_t pcie_width_cap)
{
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
uint32_t gen_speed_override, lane_width_override;
uint8_t *table_member1, *table_member2;
uint32_t min_gen_speed, max_gen_speed;
uint32_t min_lane_width, max_lane_width;
uint32_t smu_pcie_arg;
u32 smu_pcie_arg;
int ret, i;
GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
/* PCIE gen speed and lane width override */
if (!amdgpu_device_pcie_dynamic_switching_supported()) {
if (pcie_table->pcie_gen[NUM_LINK_LEVELS - 1] < pcie_gen_cap)
pcie_gen_cap = pcie_table->pcie_gen[NUM_LINK_LEVELS - 1];
sienna_cichlid_get_override_pcie_settings(smu,
&gen_speed_override,
&lane_width_override);
if (pcie_table->pcie_lane[NUM_LINK_LEVELS - 1] < pcie_width_cap)
pcie_width_cap = pcie_table->pcie_lane[NUM_LINK_LEVELS - 1];
/* PCIE gen speed override */
if (gen_speed_override != 0xff) {
min_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
max_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
/* Force all levels to use the same settings */
for (i = 0; i < NUM_LINK_LEVELS; i++) {
pcie_table->pcie_gen[i] = pcie_gen_cap;
pcie_table->pcie_lane[i] = pcie_width_cap;
}
} else {
min_gen_speed = MAX(0, table_member1[0]);
max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
min_gen_speed = min_gen_speed > max_gen_speed ?
max_gen_speed : min_gen_speed;
for (i = 0; i < NUM_LINK_LEVELS; i++) {
if (pcie_table->pcie_gen[i] > pcie_gen_cap)
pcie_table->pcie_gen[i] = pcie_gen_cap;
if (pcie_table->pcie_lane[i] > pcie_width_cap)
pcie_table->pcie_lane[i] = pcie_width_cap;
}
}
pcie_table->pcie_gen[0] = min_gen_speed;
pcie_table->pcie_gen[1] = max_gen_speed;
/* PCIE lane width override */
if (lane_width_override != 0xff) {
min_lane_width = MIN(pcie_width_cap, lane_width_override);
max_lane_width = MIN(pcie_width_cap, lane_width_override);
} else {
min_lane_width = MAX(1, table_member2[0]);
max_lane_width = MIN(pcie_width_cap, table_member2[1]);
min_lane_width = min_lane_width > max_lane_width ?
max_lane_width : min_lane_width;
}
pcie_table->pcie_lane[0] = min_lane_width;
pcie_table->pcie_lane[1] = max_lane_width;
for (i = 0; i < NUM_LINK_LEVELS; i++) {
smu_pcie_arg = (i << 16 |

View File

@ -2490,25 +2490,6 @@ int smu_v13_0_mode1_reset(struct smu_context *smu)
return ret;
}
/*
* Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
* speed switching. Until we have confirmation from Intel that a specific host
* supports it, it's safer that we keep it disabled for all.
*
* https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
* https://gitlab.freedesktop.org/drm/amd/-/issues/2663
*/
static bool smu_v13_0_is_pcie_dynamic_switching_supported(void)
{
#if IS_ENABLED(CONFIG_X86)
struct cpuinfo_x86 *c = &cpu_data(0);
if (c->x86_vendor == X86_VENDOR_INTEL)
return false;
#endif
return true;
}
int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
uint32_t pcie_gen_cap,
uint32_t pcie_width_cap)
@ -2520,7 +2501,7 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
uint32_t smu_pcie_arg;
int ret, i;
if (!smu_v13_0_is_pcie_dynamic_switching_supported()) {
if (!amdgpu_device_pcie_dynamic_switching_supported()) {
if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];

View File

@ -163,6 +163,8 @@ struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
i915_vma_get(vma);
}
dpt->obj->mm.dirty = true;
atomic_dec(&i915->gpu_error.pending_fb_pin);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
@ -258,7 +260,7 @@ intel_dpt_create(struct intel_framebuffer *fb)
dpt_obj = i915_gem_object_create_stolen(i915, size);
if (IS_ERR(dpt_obj) && !HAS_LMEM(i915)) {
drm_dbg_kms(&i915->drm, "Allocating dpt from smem\n");
dpt_obj = i915_gem_object_create_internal(i915, size);
dpt_obj = i915_gem_object_create_shmem(i915, size);
}
if (IS_ERR(dpt_obj))
return ERR_CAST(dpt_obj);

View File

@ -1185,8 +1185,10 @@ static int igt_write_huge(struct drm_i915_private *i915,
* times in succession a possibility by enlarging the permutation array.
*/
order = i915_random_order(count * count, &prng);
if (!order)
return -ENOMEM;
if (!order) {
err = -ENOMEM;
goto out;
}
max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
max = div_u64(max - size, max_page_size);

View File

@ -89,7 +89,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
* since we've already mapped it once in
* submit_reloc()
*/
if (WARN_ON(!ptr))
if (WARN_ON(IS_ERR_OR_NULL(ptr)))
return;
for (i = 0; i < dwords; i++) {

View File

@ -200,7 +200,7 @@ static const struct a6xx_shader_block {
SHADER(A6XX_SP_LB_3_DATA, 0x800),
SHADER(A6XX_SP_LB_4_DATA, 0x800),
SHADER(A6XX_SP_LB_5_DATA, 0x200),
SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x2000),
SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x800),
SHADER(A6XX_SP_CB_LEGACY_DATA, 0x280),
SHADER(A6XX_SP_UAV_DATA, 0x80),
SHADER(A6XX_SP_INST_TAG, 0x80),

View File

@ -14,19 +14,6 @@
#define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000
/**
* enum dpu_core_perf_data_bus_id - data bus identifier
* @DPU_CORE_PERF_DATA_BUS_ID_MNOC: DPU/MNOC data bus
* @DPU_CORE_PERF_DATA_BUS_ID_LLCC: MNOC/LLCC data bus
* @DPU_CORE_PERF_DATA_BUS_ID_EBI: LLCC/EBI data bus
*/
enum dpu_core_perf_data_bus_id {
DPU_CORE_PERF_DATA_BUS_ID_MNOC,
DPU_CORE_PERF_DATA_BUS_ID_LLCC,
DPU_CORE_PERF_DATA_BUS_ID_EBI,
DPU_CORE_PERF_DATA_BUS_ID_MAX,
};
/**
* struct dpu_core_perf_params - definition of performance parameters
* @max_per_pipe_ib: maximum instantaneous bandwidth request

View File

@ -932,13 +932,11 @@ static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
* retired, so if the fence is not found it means there is nothing
* to wait for
*/
ret = mutex_lock_interruptible(&queue->idr_lock);
if (ret)
return ret;
spin_lock(&queue->idr_lock);
fence = idr_find(&queue->fence_idr, fence_id);
if (fence)
fence = dma_fence_get_rcu(fence);
mutex_unlock(&queue->idr_lock);
spin_unlock(&queue->idr_lock);
if (!fence)
return 0;

View File

@ -72,9 +72,9 @@ void __msm_gem_submit_destroy(struct kref *kref)
unsigned i;
if (submit->fence_id) {
mutex_lock(&submit->queue->idr_lock);
spin_lock(&submit->queue->idr_lock);
idr_remove(&submit->queue->fence_idr, submit->fence_id);
mutex_unlock(&submit->queue->idr_lock);
spin_unlock(&submit->queue->idr_lock);
}
dma_fence_put(submit->user_fence);
@ -866,7 +866,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->nr_cmds = i;
mutex_lock(&queue->idr_lock);
spin_lock(&queue->idr_lock);
/*
* If using userspace provided seqno fence, validate that the id
@ -875,8 +875,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
* after the job is armed
*/
if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) &&
idr_find(&queue->fence_idr, args->fence)) {
mutex_unlock(&queue->idr_lock);
(!args->fence || idr_find(&queue->fence_idr, args->fence))) {
spin_unlock(&queue->idr_lock);
ret = -EINVAL;
goto out;
}
@ -910,7 +910,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
INT_MAX, GFP_KERNEL);
}
mutex_unlock(&queue->idr_lock);
spin_unlock(&queue->idr_lock);
if (submit->fence_id < 0) {
ret = submit->fence_id;

View File

@ -500,7 +500,7 @@ struct msm_gpu_submitqueue {
struct msm_file_private *ctx;
struct list_head node;
struct idr fence_idr;
struct mutex idr_lock;
struct spinlock idr_lock;
struct mutex lock;
struct kref ref;
struct drm_sched_entity *entity;

View File

@ -200,7 +200,7 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
*id = queue->id;
idr_init(&queue->fence_idr);
mutex_init(&queue->idr_lock);
spin_lock_init(&queue->idr_lock);
mutex_init(&queue->lock);
list_add_tail(&queue->node, &ctx->submitqueues);

View File

@ -499,17 +499,18 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
goto out;
}
bounce:
ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
if (ret == -EMULTIHOP) {
do {
ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
if (ret != -EMULTIHOP)
break;
ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop);
if (ret) {
} while (!ret);
if (ret) {
ttm_resource_free(bo, &evict_mem);
if (ret != -ERESTARTSYS && ret != -EINTR)
pr_err("Buffer eviction failed\n");
ttm_resource_free(bo, &evict_mem);
goto out;
}
/* try and move to final place now. */
goto bounce;
}
out:
return ret;
@ -549,6 +550,12 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
{
bool ret = false;
if (bo->pin_count) {
*locked = false;
*busy = false;
return false;
}
if (bo->base.resv == ctx->resv) {
dma_resv_assert_held(bo->base.resv);
if (ctx->allow_res_evict)

View File

@ -77,6 +77,13 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
#define ZEN_CUR_TEMP_RANGE_SEL_MASK BIT(19)
#define ZEN_CUR_TEMP_TJ_SEL_MASK GENMASK(17, 16)
/*
* AMD's Industrial processor 3255 supports temperature from -40 deg to 105 deg Celsius.
* Use the model name to identify 3255 CPUs and set a flag to display negative temperature.
* Do not round off to zero for negative Tctl or Tdie values if the flag is set
*/
#define AMD_I3255_STR "3255"
struct k10temp_data {
struct pci_dev *pdev;
void (*read_htcreg)(struct pci_dev *pdev, u32 *regval);
@ -86,6 +93,7 @@ struct k10temp_data {
u32 show_temp;
bool is_zen;
u32 ccd_offset;
bool disp_negative;
};
#define TCTL_BIT 0
@ -204,12 +212,12 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
switch (channel) {
case 0: /* Tctl */
*val = get_raw_temp(data);
if (*val < 0)
if (*val < 0 && !data->disp_negative)
*val = 0;
break;
case 1: /* Tdie */
*val = get_raw_temp(data) - data->temp_offset;
if (*val < 0)
if (*val < 0 && !data->disp_negative)
*val = 0;
break;
case 2 ... 13: /* Tccd{1-12} */
@ -405,6 +413,11 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
data->pdev = pdev;
data->show_temp |= BIT(TCTL_BIT); /* Always show Tctl */
if (boot_cpu_data.x86 == 0x17 &&
strstr(boot_cpu_data.x86_model_id, AMD_I3255_STR)) {
data->disp_negative = true;
}
if (boot_cpu_data.x86 == 0x15 &&
((boot_cpu_data.x86_model & 0xf0) == 0x60 ||
(boot_cpu_data.x86_model & 0xf0) == 0x70)) {

View File

@ -725,7 +725,7 @@ static umode_t nct7802_temp_is_visible(struct kobject *kobj,
if (index >= 38 && index < 46 && !(reg & 0x01)) /* PECI 0 */
return 0;
if (index >= 0x46 && (!(reg & 0x02))) /* PECI 1 */
if (index >= 46 && !(reg & 0x02)) /* PECI 1 */
return 0;
return attr->mode;

View File

@ -694,10 +694,8 @@ static int iic_probe(struct platform_device *ofdev)
int ret;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
dev_err(&ofdev->dev, "failed to allocate device data\n");
if (!dev)
return -ENOMEM;
}
platform_set_drvdata(ofdev, dev);

View File

@ -970,12 +970,10 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
struct i2c_vendor_data *vendor = id->data;
u32 max_fifo_threshold = (vendor->fifodepth / 2) - 1;
dev = devm_kzalloc(&adev->dev, sizeof(struct nmk_i2c_dev), GFP_KERNEL);
if (!dev) {
dev_err(&adev->dev, "cannot allocate memory\n");
ret = -ENOMEM;
goto err_no_mem;
}
dev = devm_kzalloc(&adev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
dev->vendor = vendor;
dev->adev = adev;
nmk_i2c_of_probe(np, dev);
@ -996,30 +994,21 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
dev->virtbase = devm_ioremap(&adev->dev, adev->res.start,
resource_size(&adev->res));
if (!dev->virtbase) {
ret = -ENOMEM;
goto err_no_mem;
}
if (!dev->virtbase)
return -ENOMEM;
dev->irq = adev->irq[0];
ret = devm_request_irq(&adev->dev, dev->irq, i2c_irq_handler, 0,
DRIVER_NAME, dev);
if (ret) {
dev_err(&adev->dev, "cannot claim the irq %d\n", dev->irq);
goto err_no_mem;
return ret;
}
dev->clk = devm_clk_get(&adev->dev, NULL);
dev->clk = devm_clk_get_enabled(&adev->dev, NULL);
if (IS_ERR(dev->clk)) {
dev_err(&adev->dev, "could not get i2c clock\n");
ret = PTR_ERR(dev->clk);
goto err_no_mem;
}
ret = clk_prepare_enable(dev->clk);
if (ret) {
dev_err(&adev->dev, "can't prepare_enable clock\n");
goto err_no_mem;
dev_err(&adev->dev, "could enable i2c clock\n");
return PTR_ERR(dev->clk);
}
init_hw(dev);
@ -1042,22 +1031,15 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
ret = i2c_add_adapter(adap);
if (ret)
goto err_no_adap;
return ret;
pm_runtime_put(&adev->dev);
return 0;
err_no_adap:
clk_disable_unprepare(dev->clk);
err_no_mem:
return ret;
}
static void nmk_i2c_remove(struct amba_device *adev)
{
struct resource *res = &adev->res;
struct nmk_i2c_dev *dev = amba_get_drvdata(adev);
i2c_del_adapter(&dev->adap);
@ -1066,8 +1048,6 @@ static void nmk_i2c_remove(struct amba_device *adev)
clear_all_interrupts(dev);
/* disable the controller */
i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
clk_disable_unprepare(dev->clk);
release_mem_region(res->start, resource_size(res));
}
static struct i2c_vendor_data vendor_stn8815 = {

View File

@ -443,9 +443,8 @@ static int sh7760_i2c_probe(struct platform_device *pdev)
goto out0;
}
id = kzalloc(sizeof(struct cami2c), GFP_KERNEL);
id = kzalloc(sizeof(*id), GFP_KERNEL);
if (!id) {
dev_err(&pdev->dev, "no mem for private data\n");
ret = -ENOMEM;
goto out0;
}

View File

@ -226,10 +226,8 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface,
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
dev_err(&interface->dev, "Out of memory\n");
if (!dev)
goto error;
}
dev->usb_dev = usb_get_dev(interface_to_usbdev(interface));
dev->interface = interface;

View File

@ -796,7 +796,10 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
{
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
struct bnxt_re_dev *rdev = qp->rdev;
struct bnxt_qplib_nq *scq_nq = NULL;
struct bnxt_qplib_nq *rcq_nq = NULL;
unsigned int flags;
int rc;
@ -830,6 +833,15 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
ib_umem_release(qp->rumem);
ib_umem_release(qp->sumem);
/* Flush all the entries of notification queue associated with
* given qp.
*/
scq_nq = qplib_qp->scq->nq;
rcq_nq = qplib_qp->rcq->nq;
bnxt_re_synchronize_nq(scq_nq);
if (scq_nq != rcq_nq)
bnxt_re_synchronize_nq(rcq_nq);
return 0;
}

View File

@ -387,6 +387,24 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
spin_unlock_bh(&hwq->lock);
}
/* bnxt_re_synchronize_nq - self polling notification queue.
* @nq - notification queue pointer
*
* This function will start polling entries of a given notification queue
* for all pending entries.
* This function is useful to synchronize notification entries while resources
* are going away.
*/
void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq)
{
int budget = nq->budget;
nq->budget = nq->hwq.max_elements;
bnxt_qplib_service_nq(&nq->nq_tasklet);
nq->budget = budget;
}
static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
{
struct bnxt_qplib_nq *nq = dev_instance;

View File

@ -548,6 +548,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
struct bnxt_qplib_cqe *cqe,
int num_cqes);
void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp);
void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq);
static inline void *bnxt_qplib_get_swqe(struct bnxt_qplib_q *que, u32 *swq_idx)
{

View File

@ -2693,13 +2693,13 @@ static int irdma_sc_cq_modify(struct irdma_sc_cq *cq,
*/
void irdma_check_cqp_progress(struct irdma_cqp_timeout *timeout, struct irdma_sc_dev *dev)
{
if (timeout->compl_cqp_cmds != dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]) {
timeout->compl_cqp_cmds = dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS];
u64 completed_ops = atomic64_read(&dev->cqp->completed_ops);
if (timeout->compl_cqp_cmds != completed_ops) {
timeout->compl_cqp_cmds = completed_ops;
timeout->count = 0;
} else {
if (dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] !=
timeout->compl_cqp_cmds)
timeout->count++;
} else if (timeout->compl_cqp_cmds != dev->cqp->requested_ops) {
timeout->count++;
}
}
@ -2742,7 +2742,7 @@ static int irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp, u32 tail,
if (newtail != tail) {
/* SUCCESS */
IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++;
atomic64_inc(&cqp->completed_ops);
return 0;
}
udelay(cqp->dev->hw_attrs.max_sleep_count);
@ -3102,8 +3102,8 @@ int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
info->dev->cqp = cqp;
IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size);
cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] = 0;
cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS] = 0;
cqp->requested_ops = 0;
atomic64_set(&cqp->completed_ops, 0);
/* for the cqp commands backlog. */
INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head);
@ -3255,7 +3255,7 @@ __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch
if (ret_code)
return NULL;
cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS]++;
cqp->requested_ops++;
if (!*wqe_idx)
cqp->polarity = !cqp->polarity;
wqe = cqp->sq_base[*wqe_idx].elem;
@ -3344,6 +3344,9 @@ int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
if (polarity != ccq->cq_uk.polarity)
return -ENOENT;
/* Ensure CEQE contents are read after valid bit is checked */
dma_rmb();
get_64bit_val(cqe, 8, &qp_ctx);
cqp = (struct irdma_sc_cqp *)(unsigned long)qp_ctx;
info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, temp);
@ -3378,7 +3381,7 @@ int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
dma_wmb(); /* make sure shadow area is updated before moving tail */
IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
ccq->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++;
atomic64_inc(&cqp->completed_ops);
return ret_code;
}
@ -3990,13 +3993,17 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
u8 polarity;
aeqe = IRDMA_GET_CURRENT_AEQ_ELEM(aeq);
get_64bit_val(aeqe, 0, &compl_ctx);
get_64bit_val(aeqe, 8, &temp);
polarity = (u8)FIELD_GET(IRDMA_AEQE_VALID, temp);
if (aeq->polarity != polarity)
return -ENOENT;
/* Ensure AEQE contents are read after valid bit is checked */
dma_rmb();
get_64bit_val(aeqe, 0, &compl_ctx);
print_hex_dump_debug("WQE: AEQ_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, 8,
aeqe, 16, false);

View File

@ -190,32 +190,30 @@ enum irdma_cqp_op_type {
IRDMA_OP_MANAGE_VF_PBLE_BP = 25,
IRDMA_OP_QUERY_FPM_VAL = 26,
IRDMA_OP_COMMIT_FPM_VAL = 27,
IRDMA_OP_REQ_CMDS = 28,
IRDMA_OP_CMPL_CMDS = 29,
IRDMA_OP_AH_CREATE = 30,
IRDMA_OP_AH_MODIFY = 31,
IRDMA_OP_AH_DESTROY = 32,
IRDMA_OP_MC_CREATE = 33,
IRDMA_OP_MC_DESTROY = 34,
IRDMA_OP_MC_MODIFY = 35,
IRDMA_OP_STATS_ALLOCATE = 36,
IRDMA_OP_STATS_FREE = 37,
IRDMA_OP_STATS_GATHER = 38,
IRDMA_OP_WS_ADD_NODE = 39,
IRDMA_OP_WS_MODIFY_NODE = 40,
IRDMA_OP_WS_DELETE_NODE = 41,
IRDMA_OP_WS_FAILOVER_START = 42,
IRDMA_OP_WS_FAILOVER_COMPLETE = 43,
IRDMA_OP_SET_UP_MAP = 44,
IRDMA_OP_GEN_AE = 45,
IRDMA_OP_QUERY_RDMA_FEATURES = 46,
IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY = 47,
IRDMA_OP_ADD_LOCAL_MAC_ENTRY = 48,
IRDMA_OP_DELETE_LOCAL_MAC_ENTRY = 49,
IRDMA_OP_CQ_MODIFY = 50,
IRDMA_OP_AH_CREATE = 28,
IRDMA_OP_AH_MODIFY = 29,
IRDMA_OP_AH_DESTROY = 30,
IRDMA_OP_MC_CREATE = 31,
IRDMA_OP_MC_DESTROY = 32,
IRDMA_OP_MC_MODIFY = 33,
IRDMA_OP_STATS_ALLOCATE = 34,
IRDMA_OP_STATS_FREE = 35,
IRDMA_OP_STATS_GATHER = 36,
IRDMA_OP_WS_ADD_NODE = 37,
IRDMA_OP_WS_MODIFY_NODE = 38,
IRDMA_OP_WS_DELETE_NODE = 39,
IRDMA_OP_WS_FAILOVER_START = 40,
IRDMA_OP_WS_FAILOVER_COMPLETE = 41,
IRDMA_OP_SET_UP_MAP = 42,
IRDMA_OP_GEN_AE = 43,
IRDMA_OP_QUERY_RDMA_FEATURES = 44,
IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY = 45,
IRDMA_OP_ADD_LOCAL_MAC_ENTRY = 46,
IRDMA_OP_DELETE_LOCAL_MAC_ENTRY = 47,
IRDMA_OP_CQ_MODIFY = 48,
/* Must be last entry*/
IRDMA_MAX_CQP_OPS = 51,
IRDMA_MAX_CQP_OPS = 49,
};
/* CQP SQ WQES */

View File

@ -191,6 +191,7 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
case IRDMA_AE_AMP_MWBIND_VALID_STAG:
qp->flush_code = FLUSH_MW_BIND_ERR;
qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
break;
@ -2068,7 +2069,7 @@ void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
cqp_request->compl_info.error = info.error;
if (cqp_request->waiting) {
cqp_request->request_done = true;
WRITE_ONCE(cqp_request->request_done, true);
wake_up(&cqp_request->waitq);
irdma_put_cqp_request(&rf->cqp, cqp_request);
} else {

View File

@ -159,8 +159,8 @@ struct irdma_cqp_request {
void (*callback_fcn)(struct irdma_cqp_request *cqp_request);
void *param;
struct irdma_cqp_compl_info compl_info;
bool request_done; /* READ/WRITE_ONCE macros operate on it */
bool waiting:1;
bool request_done:1;
bool dynamic:1;
};

Some files were not shown because too many files have changed in this diff Show More