This is the 6.1.64 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmVmHpsACgkQONu9yGCS
 aT5uvw//SzcE0GImnHnfeN7iXtpFE9O0fhTxsjZCi8/HTXmGWPtQgWscd9y81bAd
 EHBVr456GXqd6KuIF+03g/r/FYinwWqK375meLfaybw1vSBP+fZttrEGqz6nTnYD
 yqOxw2bqgz8Xjp63UeNHD6mifpBvVtuAvzrfO1E2Ie/U1OU2uKdjRRv0iijKNeWN
 liOYTXaddIkVfZR0z6dVTl0hb5dPWsxNmF77kfVpKz4ALIHJcO13DlUuKtQz6Sb6
 0ElmJpuonHuUxHzb8e9LLsFy3IvbBqomSscwcd0tngtdUTzhMYFIZLjg2+WQ9Ovq
 raMGqvS/bKsoyoTBNKL83QB2NyXQb3vkfL0NgLsq9IwDl+r96mP9ctANYGwSjhND
 o/4sa/fbMFzeInA8Rzh7i56RCNstOBKApJPhBzWuY0f/6b1BZpvZaONyX3fFksWO
 dMeYT16GgO4lhQXnG3O6mtDT8eoZ1fLf7ZdGEZ2NktcOzXYelNc4aXJke7qdlIop
 CVxM+Ur+juj+DJymo59a6baXjEgIROdHq83N3CZwetGviPHneGqgYc0K7ETtA33H
 sH/0KGYAT8SzzjMlnXB0lpjp68WViJfzzo9Wxdf2aDZbL3SdI14GPKMUeDqqeSyU
 8bB2Hb4ItccRFW9RriiE3BPGnLGu7PDTkn5TgXDG/bDX54Cb5DQ=
 =YPzI
 -----END PGP SIGNATURE-----

Merge 6.1.64 into android14-6.1-lts

Changes in 6.1.64
	locking/ww_mutex/test: Fix potential workqueue corruption
	lib/generic-radix-tree.c: Don't overflow in peek()
	perf/core: Bail out early if the request AUX area is out of bound
	srcu: Fix srcu_struct node grpmask overflow on 64-bit systems
	selftests/lkdtm: Disable CONFIG_UBSAN_TRAP in test config
	clocksource/drivers/timer-imx-gpt: Fix potential memory leak
	clocksource/drivers/timer-atmel-tcb: Fix initialization on SAM9 hardware
	smp,csd: Throw an error if a CSD lock is stuck for too long
	cpu/hotplug: Don't offline the last non-isolated CPU
	workqueue: Provide one lock class key per work_on_cpu() callsite
	x86/mm: Drop the 4 MB restriction on minimal NUMA node memory size
	wifi: plfxlc: fix clang-specific fortify warning
	wifi: mac80211_hwsim: fix clang-specific fortify warning
	wifi: mac80211: don't return unset power in ieee80211_get_tx_power()
	atl1c: Work around the DMA RX overflow issue
	bpf: Detect IP == ksym.end as part of BPF program
	wifi: ath9k: fix clang-specific fortify warnings
	wifi: ath10k: fix clang-specific fortify warning
	net: annotate data-races around sk->sk_tx_queue_mapping
	net: annotate data-races around sk->sk_dst_pending_confirm
	wifi: ath10k: Don't touch the CE interrupt registers after power up
	vsock: read from socket's error queue
	bpf: Ensure proper register state printing for cond jumps
	Bluetooth: btusb: Add date->evt_skb is NULL check
	Bluetooth: Fix double free in hci_conn_cleanup
	ACPI: EC: Add quirk for HP 250 G7 Notebook PC
	tsnep: Fix tsnep_request_irq() format-overflow warning
	platform/chrome: kunit: initialize lock for fake ec_dev
	platform/x86: thinkpad_acpi: Add battery quirk for Thinkpad X120e
	drm/gma500: Fix call trace when psb_gem_mm_init() fails
	drm/komeda: drop all currently held locks if deadlock happens
	drm/amdgpu: not to save bo in the case of RAS err_event_athub
	drm/amdkfd: Fix a race condition of vram buffer unref in svm code
	drm/amd: Update `update_pcie_parameters` functions to use uint8_t arguments
	drm/amd/display: use full update for clip size increase of large plane source
	string.h: add array-wrappers for (v)memdup_user()
	kernel: kexec: copy user-array safely
	kernel: watch_queue: copy user-array safely
	drm_lease.c: copy user-array safely
	drm: vmwgfx_surface.c: copy user-array safely
	drm/msm/dp: skip validity check for DP CTS EDID checksum
	drm/amd: Fix UBSAN array-index-out-of-bounds for SMU7
	drm/amd: Fix UBSAN array-index-out-of-bounds for Polaris and Tonga
	drm/amdgpu: Fix potential null pointer derefernce
	drm/panel: fix a possible null pointer dereference
	drm/panel/panel-tpo-tpg110: fix a possible null pointer dereference
	drm/radeon: fix a possible null pointer dereference
	drm/amdgpu/vkms: fix a possible null pointer dereference
	drm/panel: st7703: Pick different reset sequence
	drm/amdkfd: Fix shift out-of-bounds issue
	drm/amdgpu: Fix a null pointer access when the smc_rreg pointer is NULL
	arm64: dts: ls208xa: use a pseudo-bus to constrain usb dma size
	selftests/efivarfs: create-read: fix a resource leak
	ASoC: soc-card: Add storage for PCI SSID
	ASoC: SOF: Pass PCI SSID to machine driver
	crypto: pcrypt - Fix hungtask for PADATA_RESET
	ASoC: SOF: ipc4: handle EXCEPTION_CAUGHT notification from firmware
	RDMA/hfi1: Use FIELD_GET() to extract Link Width
	scsi: hisi_sas: Set debugfs_dir pointer to NULL after removing debugfs
	scsi: ibmvfc: Remove BUG_ON in the case of an empty event pool
	fs/jfs: Add check for negative db_l2nbperpage
	fs/jfs: Add validity check for db_maxag and db_agpref
	jfs: fix array-index-out-of-bounds in dbFindLeaf
	jfs: fix array-index-out-of-bounds in diAlloc
	HID: lenovo: Detect quirk-free fw on cptkbd and stop applying workaround
	ARM: 9320/1: fix stack depot IRQ stack filter
	ALSA: hda: Fix possible null-ptr-deref when assigning a stream
	PCI: tegra194: Use FIELD_GET()/FIELD_PREP() with Link Width fields
	PCI: mvebu: Use FIELD_PREP() with Link Width
	atm: iphase: Do PCI error checks on own line
	PCI: Do error check on own line to split long "if" conditions
	scsi: libfc: Fix potential NULL pointer dereference in fc_lport_ptp_setup()
	PCI: Use FIELD_GET() to extract Link Width
	PCI: Extract ATS disabling to a helper function
	PCI: Disable ATS for specific Intel IPU E2000 devices
	misc: pci_endpoint_test: Add Device ID for R-Car S4-8 PCIe controller
	PCI: Use FIELD_GET() in Sapphire RX 5600 XT Pulse quirk
	ASoC: Intel: soc-acpi-cht: Add Lenovo Yoga Tab 3 Pro YT3-X90 quirk
	crypto: hisilicon/qm - prevent soft lockup in receive loop
	HID: Add quirk for Dell Pro Wireless Keyboard and Mouse KM5221W
	exfat: support handle zero-size directory
	mfd: intel-lpss: Add Intel Lunar Lake-M PCI IDs
	iio: adc: stm32-adc: harden against NULL pointer deref in stm32_adc_probe()
	thunderbolt: Apply USB 3.x bandwidth quirk only in software connection manager
	tty: vcc: Add check for kstrdup() in vcc_probe()
	usb: dwc3: core: configure TX/RX threshold for DWC3_IP
	soundwire: dmi-quirks: update HP Omen match
	f2fs: fix error handling of __get_node_page
	usb: gadget: f_ncm: Always set current gadget in ncm_bind()
	9p/trans_fd: Annotate data-racy writes to file::f_flags
	9p: v9fs_listxattr: fix %s null argument warning
	i3c: mipi-i3c-hci: Fix out of bounds access in hci_dma_irq_handler
	i2c: fix memleak in i2c_new_client_device()
	i2c: sun6i-p2wi: Prevent potential division by zero
	virtio-blk: fix implicit overflow on virtio_max_dma_size
	i3c: master: mipi-i3c-hci: Fix a kernel panic for accessing DAT_data.
	media: gspca: cpia1: shift-out-of-bounds in set_flicker
	media: vivid: avoid integer overflow
	gfs2: ignore negated quota changes
	gfs2: fix an oops in gfs2_permission
	media: cobalt: Use FIELD_GET() to extract Link Width
	media: ccs: Fix driver quirk struct documentation
	media: imon: fix access to invalid resource for the second interface
	drm/amd/display: Avoid NULL dereference of timing generator
	kgdb: Flush console before entering kgdb on panic
	i2c: dev: copy userspace array safely
	ASoC: ti: omap-mcbsp: Fix runtime PM underflow warnings
	drm/qxl: prevent memory leak
	ALSA: hda/realtek: Add quirk for ASUS UX7602ZM
	drm/amdgpu: fix software pci_unplug on some chips
	pwm: Fix double shift bug
	mtd: rawnand: tegra: add missing check for platform_get_irq()
	wifi: iwlwifi: Use FW rate for non-data frames
	sched/core: Optimize in_task() and in_interrupt() a bit
	SUNRPC: ECONNRESET might require a rebind
	mtd: rawnand: intel: check return value of devm_kasprintf()
	mtd: rawnand: meson: check return value of devm_kasprintf()
	NFSv4.1: fix handling NFS4ERR_DELAY when testing for session trunking
	SUNRPC: Add an IS_ERR() check back to where it was
	NFSv4.1: fix SP4_MACH_CRED protection for pnfs IO
	SUNRPC: Fix RPC client cleaned up the freed pipefs dentries
	gfs2: Silence "suspicious RCU usage in gfs2_permission" warning
	vhost-vdpa: fix use after free in vhost_vdpa_probe()
	net: set SOCK_RCU_FREE before inserting socket into hashtable
	ipvlan: add ipvlan_route_v6_outbound() helper
	tty: Fix uninit-value access in ppp_sync_receive()
	net: hns3: fix add VLAN fail issue
	net: hns3: add barrier in vf mailbox reply process
	net: hns3: fix incorrect capability bit display for copper port
	net: hns3: fix out-of-bounds access may occur when coalesce info is read via debugfs
	net: hns3: fix variable may not initialized problem in hns3_init_mac_addr()
	net: hns3: fix VF reset fail issue
	net: hns3: fix VF wrong speed and duplex issue
	tipc: Fix kernel-infoleak due to uninitialized TLV value
	net: mvneta: fix calls to page_pool_get_stats
	ppp: limit MRU to 64K
	xen/events: fix delayed eoi list handling
	ptp: annotate data-race around q->head and q->tail
	bonding: stop the device in bond_setup_by_slave()
	net: ethernet: cortina: Fix max RX frame define
	net: ethernet: cortina: Handle large frames
	net: ethernet: cortina: Fix MTU max setting
	af_unix: fix use-after-free in unix_stream_read_actor()
	netfilter: nf_conntrack_bridge: initialize err to 0
	netfilter: nf_tables: fix pointer math issue in nft_byteorder_eval()
	net: stmmac: fix rx budget limit check
	net: stmmac: avoid rx queue overrun
	net/mlx5e: fix double free of encap_header
	net/mlx5e: fix double free of encap_header in update funcs
	net/mlx5e: Fix pedit endianness
	net/mlx5e: Reduce the size of icosq_str
	net/mlx5e: Check return value of snprintf writing to fw_version buffer
	net/mlx5e: Check return value of snprintf writing to fw_version buffer for representors
	macvlan: Don't propagate promisc change to lower dev in passthru
	tools/power/turbostat: Fix a knl bug
	tools/power/turbostat: Enable the C-state Pre-wake printing
	cifs: spnego: add ';' in HOST_KEY_LEN
	cifs: fix check of rc in function generate_smb3signingkey
	i915/perf: Fix NULL deref bugs with drm_dbg() calls
	media: venus: hfi: add checks to perform sanity on queue pointers
	perf intel-pt: Fix async branch flags
	powerpc/perf: Fix disabling BHRB and instruction sampling
	randstruct: Fix gcc-plugin performance mode to stay in group
	bpf: Fix check_stack_write_fixed_off() to correctly spill imm
	bpf: Fix precision tracking for BPF_ALU | BPF_TO_BE | BPF_END
	scsi: mpt3sas: Fix loop logic
	scsi: megaraid_sas: Increase register read retry rount from 3 to 30 for selected registers
	scsi: qla2xxx: Fix system crash due to bad pointer access
	crypto: x86/sha - load modules based on CPU features
	x86/cpu/hygon: Fix the CPU topology evaluation for real
	KVM: x86: hyper-v: Don't auto-enable stimer on write from user-space
	KVM: x86: Ignore MSR_AMD64_TW_CFG access
	KVM: x86: Clear bit12 of ICR after APIC-write VM-exit
	audit: don't take task_lock() in audit_exe_compare() code path
	audit: don't WARN_ON_ONCE(!current->mm) in audit_exe_compare()
	proc: sysctl: prevent aliased sysctls from getting passed to init
	tty/sysrq: replace smp_processor_id() with get_cpu()
	tty: serial: meson: fix hard LOCKUP on crtscts mode
	hvc/xen: fix console unplug
	hvc/xen: fix error path in xen_hvc_init() to always register frontend driver
	hvc/xen: fix event channel handling for secondary consoles
	PCI/sysfs: Protect driver's D3cold preference from user space
	mm/damon/sysfs: remove requested targets when online-commit inputs
	mm/damon/sysfs: update monitoring target regions for online input commit
	watchdog: move softlockup_panic back to early_param
	mm/damon/lru_sort: avoid divide-by-zero in hot threshold calculation
	mm/damon/ops-common: avoid divide-by-zero during region hotness calculation
	mm/damon: implement a function for max nr_accesses safe calculation
	mm/damon/sysfs: check error from damon_sysfs_update_target()
	ACPI: resource: Do IRQ override on TongFang GMxXGxx
	regmap: Ensure range selector registers are updated after cache sync
	wifi: ath11k: fix temperature event locking
	wifi: ath11k: fix dfs radar event locking
	wifi: ath11k: fix htt pktlog locking
	wifi: ath11k: fix gtk offload status event locking
	mmc: meson-gx: Remove setting of CMD_CFG_ERROR
	genirq/generic_chip: Make irq_remove_generic_chip() irqdomain aware
	KEYS: trusted: tee: Refactor register SHM usage
	KEYS: trusted: Rollback init_trusted() consistently
	PCI: keystone: Don't discard .remove() callback
	PCI: keystone: Don't discard .probe() callback
	arm64: Restrict CPU_BIG_ENDIAN to GNU as or LLVM IAS 15.x or newer
	parisc/pdc: Add width field to struct pdc_model
	parisc/power: Add power soft-off when running on qemu
	clk: socfpga: Fix undefined behavior bug in struct stratix10_clock_data
	clk: qcom: ipq8074: drop the CLK_SET_RATE_PARENT flag from PLL clocks
	clk: qcom: ipq6018: drop the CLK_SET_RATE_PARENT flag from PLL clocks
	ksmbd: handle malformed smb1 message
	ksmbd: fix slab out of bounds write in smb_inherit_dacl()
	mmc: vub300: fix an error code
	mmc: sdhci_am654: fix start loop index for TAP value parsing
	mmc: Add quirk MMC_QUIRK_BROKEN_CACHE_FLUSH for Micron eMMC Q2J54A
	PCI/ASPM: Fix L1 substate handling in aspm_attr_store_common()
	PCI: kirin: Don't discard .remove() callback
	PCI: exynos: Don't discard .remove() callback
	wifi: wilc1000: use vmm_table as array in wilc struct
	svcrdma: Drop connection after an RDMA Read error
	rcu/tree: Defer setting of jiffies during stall reset
	arm64: dts: qcom: ipq6018: Fix hwlock index for SMEM
	PM: hibernate: Use __get_safe_page() rather than touching the list
	PM: hibernate: Clean up sync_read handling in snapshot_write_next()
	rcu: kmemleak: Ignore kmemleak false positives when RCU-freeing objects
	btrfs: don't arbitrarily slow down delalloc if we're committing
	arm64: dts: qcom: ipq8074: Fix hwlock index for SMEM
	firmware: qcom_scm: use 64-bit calling convention only when client is 64-bit
	ACPI: FPDT: properly handle invalid FPDT subtables
	arm64: dts: qcom: ipq6018: Fix tcsr_mutex register size
	mfd: qcom-spmi-pmic: Fix reference leaks in revid helper
	mfd: qcom-spmi-pmic: Fix revid implementation
	ima: annotate iint mutex to avoid lockdep false positive warnings
	ima: detect changes to the backing overlay file
	netfilter: nf_tables: remove catchall element in GC sync path
	netfilter: nf_tables: split async and sync catchall in two functions
	selftests/resctrl: Remove duplicate feature check from CMT test
	selftests/resctrl: Move _GNU_SOURCE define into Makefile
	selftests/resctrl: Reduce failures due to outliers in MBA/MBM tests
	hid: lenovo: Resend all settings on reset_resume for compact keyboards
	ASoC: codecs: wsa-macro: fix uninitialized stack variables with name prefix
	jbd2: fix potential data lost in recovering journal raced with synchronizing fs bdev
	quota: explicitly forbid quota files from being encrypted
	kernel/reboot: emergency_restart: Set correct system_state
	i2c: core: Run atomic i2c xfer when !preemptible
	tracing: Have the user copy of synthetic event address use correct context
	driver core: Release all resources during unbind before updating device links
	mcb: fix error handling for different scenarios when parsing
	dmaengine: stm32-mdma: correct desc prep when channel running
	s390/cmma: fix detection of DAT pages
	mm/cma: use nth_page() in place of direct struct page manipulation
	mm/memory_hotplug: use pfn math in place of direct struct page manipulation
	mtd: cfi_cmdset_0001: Byte swap OTP info
	i3c: master: cdns: Fix reading status register
	i3c: master: svc: fix race condition in ibi work thread
	i3c: master: svc: fix wrong data return when IBI happen during start frame
	i3c: master: svc: fix ibi may not return mandatory data byte
	i3c: master: svc: fix check wrong status register in irq handler
	i3c: master: svc: fix SDA keep low when polling IBIWON timeout happen
	parisc: Prevent booting 64-bit kernels on PA1.x machines
	parisc/pgtable: Do not drop upper 5 address bits of physical address
	parisc/power: Fix power soft-off when running on qemu
	xhci: Enable RPM on controllers that support low-power states
	fs: add ctime accessors infrastructure
	smb3: fix creating FIFOs when mounting with "sfu" mount option
	smb3: fix touch -h of symlink
	smb3: fix caching of ctime on setxattr
	smb: client: fix use-after-free bug in cifs_debug_data_proc_show()
	smb: client: fix potential deadlock when releasing mids
	cifs: reconnect helper should set reconnect for the right channel
	cifs: force interface update before a fresh session setup
	cifs: do not reset chan_max if multichannel is not supported at mount
	xfs: recovery should not clear di_flushiter unconditionally
	btrfs: zoned: wait for data BG to be finished on direct IO allocation
	ALSA: info: Fix potential deadlock at disconnection
	ALSA: hda/realtek: Enable Mute LED on HP 255 G8
	ALSA: hda/realtek - Add Dell ALC295 to pin fall back table
	ALSA: hda/realtek - Enable internal speaker of ASUS K6500ZC
	ALSA: hda/realtek: Enable Mute LED on HP 255 G10
	ALSA: hda/realtek: Add quirks for HP Laptops
	pmdomain: bcm: bcm2835-power: check if the ASB register is equal to enable
	pmdomain: imx: Make imx pgc power domain also set the fwnode
	cpufreq: stats: Fix buffer overflow detection in trans_stats()
	clk: visconti: remove unused visconti_pll_provider::regmap
	clk: visconti: Fix undefined behavior bug in struct visconti_pll_provider
	Bluetooth: btusb: Add Realtek RTL8852BE support ID 0x0cb8:0xc559
	bluetooth: Add device 0bda:887b to device tables
	bluetooth: Add device 13d3:3571 to device tables
	Bluetooth: btusb: Add RTW8852BE device 13d3:3570 to device tables
	Bluetooth: btusb: Add 0bda:b85b for Fn-Link RTL8852BE
	drm/amd/display: enable dsc_clk even if dsc_pg disabled
	cxl/region: Validate region mode vs decoder mode
	cxl/region: Cleanup target list on attach error
	cxl/region: Move region-position validation to a helper
	cxl/region: Do not try to cleanup after cxl_region_setup_targets() fails
	i3c: master: svc: add NACK check after start byte sent
	i3c: master: svc: fix random hot join failure since timeout error
	cxl: Unify debug messages when calling devm_cxl_add_port()
	cxl/mem: Move devm_cxl_add_endpoint() from cxl_core to cxl_mem
	tools/testing/cxl: Define a fixed volatile configuration to parse
	cxl/region: Fix x1 root-decoder granularity calculations
	Revert ncsi: Propagate carrier gain/loss events to the NCSI controller
	Revert "i2c: pxa: move to generic GPIO recovery"
	lsm: fix default return value for vm_enough_memory
	lsm: fix default return value for inode_getsecctx
	sbsa_gwdt: Calculate timeout with 64-bit math
	i2c: designware: Disable TX_EMPTY irq while waiting for block length byte
	s390/ap: fix AP bus crash on early config change callback invocation
	net: ethtool: Fix documentation of ethtool_sprintf()
	net: dsa: lan9303: consequently nested-lock physical MDIO
	net: phylink: initialize carrier state at creation
	i2c: i801: fix potential race in i801_block_transaction_byte_by_byte
	f2fs: do not return EFSCORRUPTED, but try to run online repair
	f2fs: avoid format-overflow warning
	media: lirc: drop trailing space from scancode transmit
	media: sharp: fix sharp encoding
	media: venus: hfi_parser: Add check to keep the number of codecs within range
	media: venus: hfi: fix the check to handle session buffer requirement
	media: venus: hfi: add checks to handle capabilities from firmware
	media: ccs: Correctly initialise try compose rectangle
	drm/mediatek/dp: fix memory leak on ->get_edid callback audio detection
	drm/mediatek/dp: fix memory leak on ->get_edid callback error path
	dm-verity: don't use blocking calls from tasklets
	nfsd: fix file memleak on client_opens_release
	LoongArch: Mark __percpu functions as always inline
	riscv: mm: Update the comment of CONFIG_PAGE_OFFSET
	riscv: correct pt_level name via pgtable_l5/4_enabled
	riscv: kprobes: allow writing to x0
	mmc: sdhci-pci-gli: A workaround to allow GL9750 to enter ASPM L1.2
	mm: fix for negative counter: nr_file_hugepages
	mm: kmem: drop __GFP_NOFAIL when allocating objcg vectors
	mptcp: deal with large GSO size
	mptcp: add validity check for sending RM_ADDR
	mptcp: fix setsockopt(IP_TOS) subflow locking
	r8169: fix network lost after resume on DASH systems
	r8169: add handling DASH when DASH is disabled
	mmc: sdhci-pci-gli: GL9750: Mask the replay timer timeout of AER
	media: qcom: camss: Fix pm_domain_on sequence in probe
	media: qcom: camss: Fix vfe_get() error jump
	media: qcom: camss: Fix VFE-17x vfe_disable_output()
	media: qcom: camss: Fix VFE-480 vfe_disable_output()
	media: qcom: camss: Fix missing vfe_lite clocks check
	media: qcom: camss: Fix invalid clock enable bit disjunction
	media: qcom: camss: Fix csid-gen2 for test pattern generator
	Revert "net: r8169: Disable multicast filter for RTL8168H and RTL8107E"
	ext4: apply umask if ACL support is disabled
	ext4: correct offset of gdb backup in non meta_bg group to update_backups
	ext4: mark buffer new if it is unwritten to avoid stale data exposure
	ext4: correct return value of ext4_convert_meta_bg
	ext4: correct the start block of counting reserved clusters
	ext4: remove gdb backup copy for meta bg in setup_new_flex_group_blocks
	ext4: add missed brelse in update_backups
	ext4: properly sync file size update after O_SYNC direct IO
	drm/amd/pm: Handle non-terminated overdrive commands.
	drm/i915: Bump GLK CDCLK frequency when driving multiple pipes
	drm/i915: Fix potential spectre vulnerability
	drm/amd/pm: Fix error of MACO flag setting code
	drm/amdgpu/smu13: drop compute workload workaround
	drm/amdgpu: don't use pci_is_thunderbolt_attached()
	drm/amdgpu: don't use ATRM for external devices
	drm/amdgpu: fix error handling in amdgpu_bo_list_get()
	drm/amdgpu: lower CS errors to debug severity
	drm/amd/display: fix a NULL pointer dereference in amdgpu_dm_i2c_xfer()
	drm/amd/display: Enable fast plane updates on DCN3.2 and above
	drm/amd/display: Change the DMCUB mailbox memory location from FB to inbox
	powerpc/powernv: Fix fortify source warnings in opal-prd.c
	tracing: Have trace_event_file have ref counters
	Input: xpad - add VID for Turtle Beach controllers
	mmc: sdhci-pci-gli: GL9755: Mask the replay timer timeout of AER
	cxl/port: Fix NULL pointer access in devm_cxl_add_port()
	RISC-V: drop error print from riscv_hartid_to_cpuid()
	Linux 6.1.64

Change-Id: I9284282aeae5d0f9da957a58147efe0114f8e60a
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-12-12 18:40:53 +00:00
commit f1bc13cb9d
371 changed files with 3389 additions and 1481 deletions

View File

@ -5708,6 +5708,13 @@
This feature may be more efficiently disabled This feature may be more efficiently disabled
using the csdlock_debug- kernel parameter. using the csdlock_debug- kernel parameter.
smp.panic_on_ipistall= [KNL]
If a csd_lock_timeout extends for more than
the specified number of milliseconds, panic the
system. By default, let CSD-lock acquisition
take as long as they take. Specifying 300,000
for this value provides a 5-minute timeout.
smsc-ircc2.nopnp [HW] Don't use PNP to discover SMC devices smsc-ircc2.nopnp [HW] Don't use PNP to discover SMC devices
smsc-ircc2.ircc_cfg= [HW] Device configuration I/O port smsc-ircc2.ircc_cfg= [HW] Device configuration I/O port
smsc-ircc2.ircc_sir= [HW] SIR base I/O port smsc-ircc2.ircc_sir= [HW] SIR base I/O port

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 6 VERSION = 6
PATCHLEVEL = 1 PATCHLEVEL = 1
SUBLEVEL = 63 SUBLEVEL = 64
EXTRAVERSION = EXTRAVERSION =
NAME = Curry Ramen NAME = Curry Ramen

View File

@ -10,10 +10,6 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#define __exception_irq_entry __irq_entry #define __exception_irq_entry __irq_entry
#else
#define __exception_irq_entry
#endif
#endif /* __ASM_ARM_EXCEPTION_H */ #endif /* __ASM_ARM_EXCEPTION_H */

View File

@ -1297,6 +1297,8 @@ choice
config CPU_BIG_ENDIAN config CPU_BIG_ENDIAN
bool "Build big-endian kernel" bool "Build big-endian kernel"
depends on !LD_IS_LLD || LLD_VERSION >= 130000 depends on !LD_IS_LLD || LLD_VERSION >= 130000
# https://github.com/llvm/llvm-project/commit/1379b150991f70a5782e9a143c2ba5308da1161c
depends on AS_IS_GNU || AS_VERSION >= 150000
help help
Say Y if you plan on running a kernel with a big-endian userspace. Say Y if you plan on running a kernel with a big-endian userspace.

View File

@ -1186,26 +1186,34 @@ sata1: sata@3210000 {
dma-coherent; dma-coherent;
}; };
usb0: usb@3100000 { bus: bus {
status = "disabled"; #address-cells = <2>;
compatible = "snps,dwc3"; #size-cells = <2>;
reg = <0x0 0x3100000 0x0 0x10000>; compatible = "simple-bus";
interrupts = <0 80 0x4>; /* Level high type */ ranges;
dr_mode = "host"; dma-ranges = <0x0 0x0 0x0 0x0 0x100 0x00000000>;
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
};
usb1: usb@3110000 { usb0: usb@3100000 {
status = "disabled"; compatible = "snps,dwc3";
compatible = "snps,dwc3"; reg = <0x0 0x3100000 0x0 0x10000>;
reg = <0x0 0x3110000 0x0 0x10000>; interrupts = <0 80 0x4>; /* Level high type */
interrupts = <0 81 0x4>; /* Level high type */ dr_mode = "host";
dr_mode = "host"; snps,quirk-frame-length-adjustment = <0x20>;
snps,quirk-frame-length-adjustment = <0x20>; snps,dis_rxdet_inp3_quirk;
snps,dis_rxdet_inp3_quirk; snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>; status = "disabled";
};
usb1: usb@3110000 {
compatible = "snps,dwc3";
reg = <0x0 0x3110000 0x0 0x10000>;
interrupts = <0 81 0x4>; /* Level high type */
dr_mode = "host";
snps,quirk-frame-length-adjustment = <0x20>;
snps,dis_rxdet_inp3_quirk;
snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
status = "disabled";
};
}; };
ccn@4000000 { ccn@4000000 {

View File

@ -169,7 +169,7 @@ q6_region: memory@4ab00000 {
smem { smem {
compatible = "qcom,smem"; compatible = "qcom,smem";
memory-region = <&smem_region>; memory-region = <&smem_region>;
hwlocks = <&tcsr_mutex 0>; hwlocks = <&tcsr_mutex 3>;
}; };
soc: soc { soc: soc {
@ -248,7 +248,7 @@ gcc: gcc@1800000 {
tcsr_mutex: hwlock@1905000 { tcsr_mutex: hwlock@1905000 {
compatible = "qcom,ipq6018-tcsr-mutex", "qcom,tcsr-mutex"; compatible = "qcom,ipq6018-tcsr-mutex", "qcom,tcsr-mutex";
reg = <0x0 0x01905000 0x0 0x1000>; reg = <0x0 0x01905000 0x0 0x20000>;
#hwlock-cells = <1>; #hwlock-cells = <1>;
}; };

View File

@ -90,7 +90,7 @@ smem@4ab00000 {
reg = <0x0 0x4ab00000 0x0 0x00100000>; reg = <0x0 0x4ab00000 0x0 0x00100000>;
no-map; no-map;
hwlocks = <&tcsr_mutex 0>; hwlocks = <&tcsr_mutex 3>;
}; };
memory@4ac00000 { memory@4ac00000 {

View File

@ -28,7 +28,7 @@ static inline void set_my_cpu_offset(unsigned long off)
#define __my_cpu_offset __my_cpu_offset #define __my_cpu_offset __my_cpu_offset
#define PERCPU_OP(op, asm_op, c_op) \ #define PERCPU_OP(op, asm_op, c_op) \
static inline unsigned long __percpu_##op(void *ptr, \ static __always_inline unsigned long __percpu_##op(void *ptr, \
unsigned long val, int size) \ unsigned long val, int size) \
{ \ { \
unsigned long ret; \ unsigned long ret; \
@ -59,7 +59,7 @@ PERCPU_OP(and, and, &)
PERCPU_OP(or, or, |) PERCPU_OP(or, or, |)
#undef PERCPU_OP #undef PERCPU_OP
static inline unsigned long __percpu_read(void *ptr, int size) static __always_inline unsigned long __percpu_read(void *ptr, int size)
{ {
unsigned long ret; unsigned long ret;
@ -96,7 +96,7 @@ static inline unsigned long __percpu_read(void *ptr, int size)
return ret; return ret;
} }
static inline void __percpu_write(void *ptr, unsigned long val, int size) static __always_inline void __percpu_write(void *ptr, unsigned long val, int size)
{ {
switch (size) { switch (size) {
case 1: case 1:
@ -128,8 +128,8 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
} }
} }
static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
int size) int size)
{ {
switch (size) { switch (size) {
case 1: case 1:

View File

@ -472,6 +472,7 @@ struct pdc_model { /* for PDC_MODEL */
unsigned long arch_rev; unsigned long arch_rev;
unsigned long pot_key; unsigned long pot_key;
unsigned long curr_key; unsigned long curr_key;
unsigned long width; /* default of PSW_W bit (1=enabled) */
}; };
struct pdc_cache_cf { /* for PDC_CACHE (I/D-caches) */ struct pdc_cache_cf { /* for PDC_CACHE (I/D-caches) */

View File

@ -462,13 +462,13 @@
* to a CPU TLB 4k PFN (4k => 12 bits to shift) */ * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
#define PAGE_ADD_SHIFT (PAGE_SHIFT-12) #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
#define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12) #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
#define PFN_START_BIT (63-ASM_PFN_PTE_SHIFT+(63-58)-PAGE_ADD_SHIFT)
/* Drop prot bits and convert to page addr for iitlbt and idtlbt */ /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
.macro convert_for_tlb_insert20 pte,tmp .macro convert_for_tlb_insert20 pte,tmp
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
copy \pte,\tmp copy \pte,\tmp
extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ extrd,u \tmp,PFN_START_BIT,PFN_START_BIT+1,\pte
64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
(63-58)+PAGE_ADD_SHIFT,\pte (63-58)+PAGE_ADD_SHIFT,\pte
@ -476,8 +476,7 @@
depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\ depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
(63-58)+PAGE_ADD_HUGE_SHIFT,\pte (63-58)+PAGE_ADD_HUGE_SHIFT,\pte
#else /* Huge pages disabled */ #else /* Huge pages disabled */
extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ extrd,u \pte,PFN_START_BIT,PFN_START_BIT+1,\pte
64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
(63-58)+PAGE_ADD_SHIFT,\pte (63-58)+PAGE_ADD_SHIFT,\pte
#endif #endif

View File

@ -70,9 +70,8 @@ $bss_loop:
stw,ma %arg2,4(%r1) stw,ma %arg2,4(%r1)
stw,ma %arg3,4(%r1) stw,ma %arg3,4(%r1)
#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20) #if defined(CONFIG_PA20)
/* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU /* check for 64-bit capable CPU as required by current kernel */
* and halt kernel if we detect a PA1.x CPU. */
ldi 32,%r10 ldi 32,%r10
mtctl %r10,%cr11 mtctl %r10,%cr11
.level 2.0 .level 2.0

View File

@ -1371,8 +1371,7 @@ static void power_pmu_disable(struct pmu *pmu)
/* /*
* Disable instruction sampling if it was enabled * Disable instruction sampling if it was enabled
*/ */
if (cpuhw->mmcr.mmcra & MMCRA_SAMPLE_ENABLE) val &= ~MMCRA_SAMPLE_ENABLE;
val &= ~MMCRA_SAMPLE_ENABLE;
/* Disable BHRB via mmcra (BHRBRD) for p10 */ /* Disable BHRB via mmcra (BHRBRD) for p10 */
if (ppmu->flags & PPMU_ARCH_31) if (ppmu->flags & PPMU_ARCH_31)
@ -1383,7 +1382,7 @@ static void power_pmu_disable(struct pmu *pmu)
* instruction sampling or BHRB. * instruction sampling or BHRB.
*/ */
if (val != mmcra) { if (val != mmcra) {
mtspr(SPRN_MMCRA, mmcra); mtspr(SPRN_MMCRA, val);
mb(); mb();
isync(); isync();
} }

View File

@ -24,13 +24,20 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
struct opal_prd_msg {
union {
struct opal_prd_msg_header header;
DECLARE_FLEX_ARRAY(u8, data);
};
};
/* /*
* The msg member must be at the end of the struct, as it's followed by the * The msg member must be at the end of the struct, as it's followed by the
* message data. * message data.
*/ */
struct opal_prd_msg_queue_item { struct opal_prd_msg_queue_item {
struct list_head list; struct list_head list;
struct opal_prd_msg_header msg; struct opal_prd_msg msg;
}; };
static struct device_node *prd_node; static struct device_node *prd_node;
@ -156,7 +163,7 @@ static ssize_t opal_prd_read(struct file *file, char __user *buf,
int rc; int rc;
/* we need at least a header's worth of data */ /* we need at least a header's worth of data */
if (count < sizeof(item->msg)) if (count < sizeof(item->msg.header))
return -EINVAL; return -EINVAL;
if (*ppos) if (*ppos)
@ -186,7 +193,7 @@ static ssize_t opal_prd_read(struct file *file, char __user *buf,
return -EINTR; return -EINTR;
} }
size = be16_to_cpu(item->msg.size); size = be16_to_cpu(item->msg.header.size);
if (size > count) { if (size > count) {
err = -EINVAL; err = -EINVAL;
goto err_requeue; goto err_requeue;
@ -352,7 +359,7 @@ static int opal_prd_msg_notifier(struct notifier_block *nb,
if (!item) if (!item)
return -ENOMEM; return -ENOMEM;
memcpy(&item->msg, msg->params, msg_size); memcpy(&item->msg.data, msg->params, msg_size);
spin_lock_irqsave(&opal_prd_msg_queue_lock, flags); spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
list_add_tail(&item->list, &opal_prd_msg_queue); list_add_tail(&item->list, &opal_prd_msg_queue);

View File

@ -38,8 +38,8 @@
#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) #define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
#endif #endif
/* /*
* By default, CONFIG_PAGE_OFFSET value corresponds to SV48 address space so * By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so
* define the PAGE_OFFSET value for SV39. * define the PAGE_OFFSET value for SV48 and SV39.
*/ */
#define PAGE_OFFSET_L4 _AC(0xffffaf8000000000, UL) #define PAGE_OFFSET_L4 _AC(0xffffaf8000000000, UL)
#define PAGE_OFFSET_L3 _AC(0xffffffd800000000, UL) #define PAGE_OFFSET_L3 _AC(0xffffffd800000000, UL)

View File

@ -24,7 +24,7 @@ static inline bool rv_insn_reg_set_val(struct pt_regs *regs, u32 index,
unsigned long val) unsigned long val)
{ {
if (index == 0) if (index == 0)
return false; return true;
else if (index <= 31) else if (index <= 31)
*((unsigned long *)regs + index) = val; *((unsigned long *)regs + index) = val;
else else

View File

@ -58,7 +58,6 @@ int riscv_hartid_to_cpuid(unsigned long hartid)
if (cpuid_to_hartid_map(i) == hartid) if (cpuid_to_hartid_map(i) == hartid)
return i; return i;
pr_err("Couldn't find cpu id for hartid [%lu]\n", hartid);
return -ENOENT; return -ENOENT;
} }

View File

@ -384,6 +384,9 @@ static int __init ptdump_init(void)
kernel_ptd_info.base_addr = KERN_VIRT_START; kernel_ptd_info.base_addr = KERN_VIRT_START;
pg_level[1].name = pgtable_l5_enabled ? "P4D" : "PGD";
pg_level[2].name = pgtable_l4_enabled ? "PUD" : "PGD";
for (i = 0; i < ARRAY_SIZE(pg_level); i++) for (i = 0; i < ARRAY_SIZE(pg_level); i++)
for (j = 0; j < ARRAY_SIZE(pte_bits); j++) for (j = 0; j < ARRAY_SIZE(pte_bits); j++)
pg_level[i].mask |= pte_bits[j].mask; pg_level[i].mask |= pte_bits[j].mask;

View File

@ -132,7 +132,7 @@ static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
continue; continue;
if (!pud_folded(*pud)) { if (!pud_folded(*pud)) {
page = phys_to_page(pud_val(*pud)); page = phys_to_page(pud_val(*pud));
for (i = 0; i < 3; i++) for (i = 0; i < 4; i++)
set_bit(PG_arch_1, &page[i].flags); set_bit(PG_arch_1, &page[i].flags);
} }
mark_kernel_pmd(pud, addr, next); mark_kernel_pmd(pud, addr, next);
@ -153,7 +153,7 @@ static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
continue; continue;
if (!p4d_folded(*p4d)) { if (!p4d_folded(*p4d)) {
page = phys_to_page(p4d_val(*p4d)); page = phys_to_page(p4d_val(*p4d));
for (i = 0; i < 3; i++) for (i = 0; i < 4; i++)
set_bit(PG_arch_1, &page[i].flags); set_bit(PG_arch_1, &page[i].flags);
} }
mark_kernel_pud(p4d, addr, next); mark_kernel_pud(p4d, addr, next);
@ -175,7 +175,7 @@ static void mark_kernel_pgd(void)
continue; continue;
if (!pgd_folded(*pgd)) { if (!pgd_folded(*pgd)) {
page = phys_to_page(pgd_val(*pgd)); page = phys_to_page(pgd_val(*pgd));
for (i = 0; i < 3; i++) for (i = 0; i < 4; i++)
set_bit(PG_arch_1, &page[i].flags); set_bit(PG_arch_1, &page[i].flags);
} }
mark_kernel_p4d(pgd, addr, next); mark_kernel_p4d(pgd, addr, next);

View File

@ -24,8 +24,17 @@
#include <linux/types.h> #include <linux/types.h>
#include <crypto/sha1.h> #include <crypto/sha1.h>
#include <crypto/sha1_base.h> #include <crypto/sha1_base.h>
#include <asm/cpu_device_id.h>
#include <asm/simd.h> #include <asm/simd.h>
static const struct x86_cpu_id module_cpu_ids[] = {
X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
static int sha1_update(struct shash_desc *desc, const u8 *data, static int sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len, sha1_block_fn *sha1_xform) unsigned int len, sha1_block_fn *sha1_xform)
{ {
@ -301,6 +310,9 @@ static inline void unregister_sha1_ni(void) { }
static int __init sha1_ssse3_mod_init(void) static int __init sha1_ssse3_mod_init(void)
{ {
if (!x86_match_cpu(module_cpu_ids))
return -ENODEV;
if (register_sha1_ssse3()) if (register_sha1_ssse3())
goto fail; goto fail;

View File

@ -38,11 +38,20 @@
#include <crypto/sha2.h> #include <crypto/sha2.h>
#include <crypto/sha256_base.h> #include <crypto/sha256_base.h>
#include <linux/string.h> #include <linux/string.h>
#include <asm/cpu_device_id.h>
#include <asm/simd.h> #include <asm/simd.h>
asmlinkage void sha256_transform_ssse3(struct sha256_state *state, asmlinkage void sha256_transform_ssse3(struct sha256_state *state,
const u8 *data, int blocks); const u8 *data, int blocks);
static const struct x86_cpu_id module_cpu_ids[] = {
X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
static int _sha256_update(struct shash_desc *desc, const u8 *data, static int _sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len, sha256_block_fn *sha256_xform) unsigned int len, sha256_block_fn *sha256_xform)
{ {
@ -366,6 +375,9 @@ static inline void unregister_sha256_ni(void) { }
static int __init sha256_ssse3_mod_init(void) static int __init sha256_ssse3_mod_init(void)
{ {
if (!x86_match_cpu(module_cpu_ids))
return -ENODEV;
if (register_sha256_ssse3()) if (register_sha256_ssse3())
goto fail; goto fail;

View File

@ -551,6 +551,7 @@
#define MSR_AMD64_CPUID_FN_1 0xc0011004 #define MSR_AMD64_CPUID_FN_1 0xc0011004
#define MSR_AMD64_LS_CFG 0xc0011020 #define MSR_AMD64_LS_CFG 0xc0011020
#define MSR_AMD64_DC_CFG 0xc0011022 #define MSR_AMD64_DC_CFG 0xc0011022
#define MSR_AMD64_TW_CFG 0xc0011023
#define MSR_AMD64_DE_CFG 0xc0011029 #define MSR_AMD64_DE_CFG 0xc0011029
#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1 #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1

View File

@ -12,13 +12,6 @@
#define NR_NODE_MEMBLKS (MAX_NUMNODES*2) #define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
/*
* Too small node sizes may confuse the VM badly. Usually they
* result from BIOS bugs. So dont recognize nodes as standalone
* NUMA entities that have less than this amount of RAM listed:
*/
#define NODE_MIN_SIZE (4*1024*1024)
extern int numa_off; extern int numa_off;
/* /*

View File

@ -86,8 +86,12 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
if (!err) if (!err)
c->x86_coreid_bits = get_count_order(c->x86_max_cores); c->x86_coreid_bits = get_count_order(c->x86_max_cores);
/* Socket ID is ApicId[6] for these processors. */ /*
c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT; * Socket ID is ApicId[6] for the processors with model <= 0x3
* when running on host.
*/
if (!boot_cpu_has(X86_FEATURE_HYPERVISOR) && c->x86_model <= 0x3)
c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
cacheinfo_hygon_init_llc_id(c, cpu); cacheinfo_hygon_init_llc_id(c, cpu);
} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {

View File

@ -705,10 +705,12 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
stimer_cleanup(stimer); stimer_cleanup(stimer);
stimer->count = count; stimer->count = count;
if (stimer->count == 0) if (!host) {
stimer->config.enable = 0; if (stimer->count == 0)
else if (stimer->config.auto_enable) stimer->config.enable = 0;
stimer->config.enable = 1; else if (stimer->config.auto_enable)
stimer->config.enable = 1;
}
if (stimer->config.enable) if (stimer->config.enable)
stimer_mark_pending(stimer, false); stimer_mark_pending(stimer, false);

View File

@ -2294,22 +2294,22 @@ EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset) void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
{ {
struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_lapic *apic = vcpu->arch.apic;
u64 val;
/* /*
* ICR is a single 64-bit register when x2APIC is enabled. For legacy * ICR is a single 64-bit register when x2APIC is enabled, all others
* xAPIC, ICR writes need to go down the common (slightly slower) path * registers hold 32-bit values. For legacy xAPIC, ICR writes need to
* to get the upper half from ICR2. * go down the common path to get the upper half from ICR2.
*
* Note, using the write helpers may incur an unnecessary write to the
* virtual APIC state, but KVM needs to conditionally modify the value
* in certain cases, e.g. to clear the ICR busy bit. The cost of extra
* conditional branches is likely a wash relative to the cost of the
* maybe-unecessary write, and both are in the noise anyways.
*/ */
if (apic_x2apic_mode(apic) && offset == APIC_ICR) { if (apic_x2apic_mode(apic) && offset == APIC_ICR)
val = kvm_lapic_get_reg64(apic, APIC_ICR); kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR));
kvm_apic_send_ipi(apic, (u32)val, (u32)(val >> 32)); else
trace_kvm_apic_write(APIC_ICR, val); kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
} else {
/* TODO: optimize to just emulate side effect w/o one more write */
val = kvm_lapic_get_reg(apic, offset);
kvm_lapic_reg_write(apic, offset, (u32)val);
}
} }
EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode); EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);

View File

@ -3582,6 +3582,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_AMD64_PATCH_LOADER: case MSR_AMD64_PATCH_LOADER:
case MSR_AMD64_BU_CFG2: case MSR_AMD64_BU_CFG2:
case MSR_AMD64_DC_CFG: case MSR_AMD64_DC_CFG:
case MSR_AMD64_TW_CFG:
case MSR_F15H_EX_CFG: case MSR_F15H_EX_CFG:
break; break;
@ -3982,6 +3983,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_AMD64_BU_CFG2: case MSR_AMD64_BU_CFG2:
case MSR_IA32_PERF_CTL: case MSR_IA32_PERF_CTL:
case MSR_AMD64_DC_CFG: case MSR_AMD64_DC_CFG:
case MSR_AMD64_TW_CFG:
case MSR_F15H_EX_CFG: case MSR_F15H_EX_CFG:
/* /*
* Intel Sandy Bridge CPUs must support the RAPL (running average power * Intel Sandy Bridge CPUs must support the RAPL (running average power

View File

@ -602,13 +602,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
if (start >= end) if (start >= end)
continue; continue;
/*
* Don't confuse VM with a node that doesn't have the
* minimum amount of memory:
*/
if (end && (end - start) < NODE_MIN_SIZE)
continue;
alloc_node_data(nid); alloc_node_data(nid);
} }

View File

@ -117,6 +117,8 @@ static int pcrypt_aead_encrypt(struct aead_request *req)
err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu); err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu);
if (!err) if (!err)
return -EINPROGRESS; return -EINPROGRESS;
if (err == -EBUSY)
return -EAGAIN;
return err; return err;
} }
@ -164,6 +166,8 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu); err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu);
if (!err) if (!err)
return -EINPROGRESS; return -EINPROGRESS;
if (err == -EBUSY)
return -EAGAIN;
return err; return err;
} }

View File

@ -194,12 +194,19 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
record_header = (void *)subtable_header + offset; record_header = (void *)subtable_header + offset;
offset += record_header->length; offset += record_header->length;
if (!record_header->length) {
pr_err(FW_BUG "Zero-length record found in FPTD.\n");
result = -EINVAL;
goto err;
}
switch (record_header->type) { switch (record_header->type) {
case RECORD_S3_RESUME: case RECORD_S3_RESUME:
if (subtable_type != SUBTABLE_S3PT) { if (subtable_type != SUBTABLE_S3PT) {
pr_err(FW_BUG "Invalid record %d for subtable %s\n", pr_err(FW_BUG "Invalid record %d for subtable %s\n",
record_header->type, signature); record_header->type, signature);
return -EINVAL; result = -EINVAL;
goto err;
} }
if (record_resume) { if (record_resume) {
pr_err("Duplicate resume performance record found.\n"); pr_err("Duplicate resume performance record found.\n");
@ -208,7 +215,7 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
record_resume = (struct resume_performance_record *)record_header; record_resume = (struct resume_performance_record *)record_header;
result = sysfs_create_group(fpdt_kobj, &resume_attr_group); result = sysfs_create_group(fpdt_kobj, &resume_attr_group);
if (result) if (result)
return result; goto err;
break; break;
case RECORD_S3_SUSPEND: case RECORD_S3_SUSPEND:
if (subtable_type != SUBTABLE_S3PT) { if (subtable_type != SUBTABLE_S3PT) {
@ -223,13 +230,14 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
record_suspend = (struct suspend_performance_record *)record_header; record_suspend = (struct suspend_performance_record *)record_header;
result = sysfs_create_group(fpdt_kobj, &suspend_attr_group); result = sysfs_create_group(fpdt_kobj, &suspend_attr_group);
if (result) if (result)
return result; goto err;
break; break;
case RECORD_BOOT: case RECORD_BOOT:
if (subtable_type != SUBTABLE_FBPT) { if (subtable_type != SUBTABLE_FBPT) {
pr_err(FW_BUG "Invalid %d for subtable %s\n", pr_err(FW_BUG "Invalid %d for subtable %s\n",
record_header->type, signature); record_header->type, signature);
return -EINVAL; result = -EINVAL;
goto err;
} }
if (record_boot) { if (record_boot) {
pr_err("Duplicate boot performance record found.\n"); pr_err("Duplicate boot performance record found.\n");
@ -238,7 +246,7 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
record_boot = (struct boot_performance_record *)record_header; record_boot = (struct boot_performance_record *)record_header;
result = sysfs_create_group(fpdt_kobj, &boot_attr_group); result = sysfs_create_group(fpdt_kobj, &boot_attr_group);
if (result) if (result)
return result; goto err;
break; break;
default: default:
@ -247,6 +255,18 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
} }
} }
return 0; return 0;
err:
if (record_boot)
sysfs_remove_group(fpdt_kobj, &boot_attr_group);
if (record_suspend)
sysfs_remove_group(fpdt_kobj, &suspend_attr_group);
if (record_resume)
sysfs_remove_group(fpdt_kobj, &resume_attr_group);
return result;
} }
static int __init acpi_init_fpdt(void) static int __init acpi_init_fpdt(void)
@ -255,6 +275,7 @@ static int __init acpi_init_fpdt(void)
struct acpi_table_header *header; struct acpi_table_header *header;
struct fpdt_subtable_entry *subtable; struct fpdt_subtable_entry *subtable;
u32 offset = sizeof(*header); u32 offset = sizeof(*header);
int result;
status = acpi_get_table(ACPI_SIG_FPDT, 0, &header); status = acpi_get_table(ACPI_SIG_FPDT, 0, &header);
@ -263,8 +284,8 @@ static int __init acpi_init_fpdt(void)
fpdt_kobj = kobject_create_and_add("fpdt", acpi_kobj); fpdt_kobj = kobject_create_and_add("fpdt", acpi_kobj);
if (!fpdt_kobj) { if (!fpdt_kobj) {
acpi_put_table(header); result = -ENOMEM;
return -ENOMEM; goto err_nomem;
} }
while (offset < header->length) { while (offset < header->length) {
@ -272,8 +293,10 @@ static int __init acpi_init_fpdt(void)
switch (subtable->type) { switch (subtable->type) {
case SUBTABLE_FBPT: case SUBTABLE_FBPT:
case SUBTABLE_S3PT: case SUBTABLE_S3PT:
fpdt_process_subtable(subtable->address, result = fpdt_process_subtable(subtable->address,
subtable->type); subtable->type);
if (result)
goto err_subtable;
break; break;
default: default:
/* Other types are reserved in ACPI 6.4 spec. */ /* Other types are reserved in ACPI 6.4 spec. */
@ -282,6 +305,12 @@ static int __init acpi_init_fpdt(void)
offset += sizeof(*subtable); offset += sizeof(*subtable);
} }
return 0; return 0;
err_subtable:
kobject_put(fpdt_kobj);
err_nomem:
acpi_put_table(header);
return result;
} }
fs_initcall(acpi_init_fpdt); fs_initcall(acpi_init_fpdt);

View File

@ -1897,6 +1897,16 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-dk1xxx"), DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-dk1xxx"),
}, },
}, },
{
/*
* HP 250 G7 Notebook PC
*/
.callback = ec_honor_dsdt_gpe,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP 250 G7 Notebook PC"),
},
},
{ {
/* /*
* Samsung hardware * Samsung hardware

View File

@ -499,6 +499,18 @@ static const struct dmi_system_id maingear_laptop[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-15A3070T"), DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-15A3070T"),
} }
}, },
{
/* TongFang GMxXGxx/TUXEDO Polaris 15 Gen5 AMD */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"),
},
},
{
/* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"),
},
},
{ {
.ident = "MAINGEAR Vector Pro 2 17", .ident = "MAINGEAR Vector Pro 2 17",
.matches = { .matches = {

View File

@ -2291,19 +2291,21 @@ static int get_esi(struct atm_dev *dev)
static int reset_sar(struct atm_dev *dev) static int reset_sar(struct atm_dev *dev)
{ {
IADEV *iadev; IADEV *iadev;
int i, error = 1; int i, error;
unsigned int pci[64]; unsigned int pci[64];
iadev = INPH_IA_DEV(dev); iadev = INPH_IA_DEV(dev);
for(i=0; i<64; i++) for (i = 0; i < 64; i++) {
if ((error = pci_read_config_dword(iadev->pci, error = pci_read_config_dword(iadev->pci, i * 4, &pci[i]);
i*4, &pci[i])) != PCIBIOS_SUCCESSFUL) if (error != PCIBIOS_SUCCESSFUL)
return error; return error;
}
writel(0, iadev->reg+IPHASE5575_EXT_RESET); writel(0, iadev->reg+IPHASE5575_EXT_RESET);
for(i=0; i<64; i++) for (i = 0; i < 64; i++) {
if ((error = pci_write_config_dword(iadev->pci, error = pci_write_config_dword(iadev->pci, i * 4, pci[i]);
i*4, pci[i])) != PCIBIOS_SUCCESSFUL) if (error != PCIBIOS_SUCCESSFUL)
return error; return error;
}
udelay(5); udelay(5);
return 0; return 0;
} }

View File

@ -1285,8 +1285,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
if (dev->bus && dev->bus->dma_cleanup) if (dev->bus && dev->bus->dma_cleanup)
dev->bus->dma_cleanup(dev); dev->bus->dma_cleanup(dev);
device_links_driver_cleanup(dev);
device_unbind_cleanup(dev); device_unbind_cleanup(dev);
device_links_driver_cleanup(dev);
klist_remove(&dev->p->knode_driver); klist_remove(&dev->p->knode_driver);
device_pm_check_callbacks(dev); device_pm_check_callbacks(dev);

View File

@ -331,6 +331,11 @@ static int regcache_default_sync(struct regmap *map, unsigned int min,
return 0; return 0;
} }
static int rbtree_all(const void *key, const struct rb_node *node)
{
return 0;
}
/** /**
* regcache_sync - Sync the register cache with the hardware. * regcache_sync - Sync the register cache with the hardware.
* *
@ -348,6 +353,7 @@ int regcache_sync(struct regmap *map)
unsigned int i; unsigned int i;
const char *name; const char *name;
bool bypass; bool bypass;
struct rb_node *node;
if (WARN_ON(map->cache_type == REGCACHE_NONE)) if (WARN_ON(map->cache_type == REGCACHE_NONE))
return -EINVAL; return -EINVAL;
@ -392,6 +398,30 @@ int regcache_sync(struct regmap *map)
map->async = false; map->async = false;
map->cache_bypass = bypass; map->cache_bypass = bypass;
map->no_sync_defaults = false; map->no_sync_defaults = false;
/*
* If we did any paging with cache bypassed and a cached
* paging register then the register and cache state might
* have gone out of sync, force writes of all the paging
* registers.
*/
rb_for_each(node, 0, &map->range_tree, rbtree_all) {
struct regmap_range_node *this =
rb_entry(node, struct regmap_range_node, node);
/* If there's nothing in the cache there's nothing to sync */
ret = regcache_read(map, this->selector_reg, &i);
if (ret != 0)
continue;
ret = _regmap_write(map, this->selector_reg, i);
if (ret != 0) {
dev_err(map->dev, "Failed to write %x = %x: %d\n",
this->selector_reg, i, ret);
break;
}
}
map->unlock(map->lock_arg); map->unlock(map->lock_arg);
regmap_async_complete(map); regmap_async_complete(map);

View File

@ -900,6 +900,7 @@ static int virtblk_probe(struct virtio_device *vdev)
u16 min_io_size; u16 min_io_size;
u8 physical_block_exp, alignment_offset; u8 physical_block_exp, alignment_offset;
unsigned int queue_depth; unsigned int queue_depth;
size_t max_dma_size;
if (!vdev->config->get) { if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n", dev_err(&vdev->dev, "%s failure: config access disabled\n",
@ -998,7 +999,8 @@ static int virtblk_probe(struct virtio_device *vdev)
/* No real sector limit. */ /* No real sector limit. */
blk_queue_max_hw_sectors(q, -1U); blk_queue_max_hw_sectors(q, -1U);
max_size = virtio_max_dma_size(vdev); max_dma_size = virtio_max_dma_size(vdev);
max_size = max_dma_size > U32_MAX ? U32_MAX : max_dma_size;
/* Host can optionally specify maximum segment size and number of /* Host can optionally specify maximum segment size and number of
* segments. */ * segments. */

View File

@ -532,6 +532,18 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3592), .driver_info = BTUSB_REALTEK | { USB_DEVICE(0x13d3, 0x3592), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH }, BTUSB_WIDEBAND_SPEECH },
/* Realtek 8852BE Bluetooth devices */
{ USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0xb85b), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3570), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3571), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
/* Realtek Bluetooth devices */ /* Realtek Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01), { USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
.driver_info = BTUSB_REALTEK }, .driver_info = BTUSB_REALTEK },
@ -2638,6 +2650,9 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
goto err_free_wc; goto err_free_wc;
} }
if (data->evt_skb == NULL)
goto err_free_wc;
/* Parse and handle the return WMT event */ /* Parse and handle the return WMT event */
wmt_evt = (struct btmtk_hci_wmt_evt *)data->evt_skb->data; wmt_evt = (struct btmtk_hci_wmt_evt *)data->evt_skb->data;
if (wmt_evt->whdr.op != hdr->op) { if (wmt_evt->whdr.op != hdr->op) {

View File

@ -75,7 +75,6 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
&gpll0_main.clkr.hw }, &gpll0_main.clkr.hw },
.num_parents = 1, .num_parents = 1,
.ops = &clk_fixed_factor_ops, .ops = &clk_fixed_factor_ops,
.flags = CLK_SET_RATE_PARENT,
}, },
}; };
@ -89,7 +88,6 @@ static struct clk_alpha_pll_postdiv gpll0 = {
&gpll0_main.clkr.hw }, &gpll0_main.clkr.hw },
.num_parents = 1, .num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ro_ops, .ops = &clk_alpha_pll_postdiv_ro_ops,
.flags = CLK_SET_RATE_PARENT,
}, },
}; };
@ -164,7 +162,6 @@ static struct clk_alpha_pll_postdiv gpll6 = {
&gpll6_main.clkr.hw }, &gpll6_main.clkr.hw },
.num_parents = 1, .num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ro_ops, .ops = &clk_alpha_pll_postdiv_ro_ops,
.flags = CLK_SET_RATE_PARENT,
}, },
}; };
@ -195,7 +192,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
&gpll4_main.clkr.hw }, &gpll4_main.clkr.hw },
.num_parents = 1, .num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ro_ops, .ops = &clk_alpha_pll_postdiv_ro_ops,
.flags = CLK_SET_RATE_PARENT,
}, },
}; };
@ -246,7 +242,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
&gpll2_main.clkr.hw }, &gpll2_main.clkr.hw },
.num_parents = 1, .num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ro_ops, .ops = &clk_alpha_pll_postdiv_ro_ops,
.flags = CLK_SET_RATE_PARENT,
}, },
}; };
@ -277,7 +272,6 @@ static struct clk_alpha_pll_postdiv nss_crypto_pll = {
&nss_crypto_pll_main.clkr.hw }, &nss_crypto_pll_main.clkr.hw },
.num_parents = 1, .num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ro_ops, .ops = &clk_alpha_pll_postdiv_ro_ops,
.flags = CLK_SET_RATE_PARENT,
}, },
}; };

View File

@ -419,7 +419,6 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
}, },
.num_parents = 1, .num_parents = 1,
.ops = &clk_fixed_factor_ops, .ops = &clk_fixed_factor_ops,
.flags = CLK_SET_RATE_PARENT,
}, },
}; };
@ -466,7 +465,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
}, },
.num_parents = 1, .num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ro_ops, .ops = &clk_alpha_pll_postdiv_ro_ops,
.flags = CLK_SET_RATE_PARENT,
}, },
}; };
@ -499,7 +497,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
}, },
.num_parents = 1, .num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ro_ops, .ops = &clk_alpha_pll_postdiv_ro_ops,
.flags = CLK_SET_RATE_PARENT,
}, },
}; };
@ -533,7 +530,6 @@ static struct clk_alpha_pll_postdiv gpll6 = {
}, },
.num_parents = 1, .num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ro_ops, .ops = &clk_alpha_pll_postdiv_ro_ops,
.flags = CLK_SET_RATE_PARENT,
}, },
}; };
@ -547,7 +543,6 @@ static struct clk_fixed_factor gpll6_out_main_div2 = {
}, },
.num_parents = 1, .num_parents = 1,
.ops = &clk_fixed_factor_ops, .ops = &clk_fixed_factor_ops,
.flags = CLK_SET_RATE_PARENT,
}, },
}; };
@ -612,7 +607,6 @@ static struct clk_alpha_pll_postdiv nss_crypto_pll = {
}, },
.num_parents = 1, .num_parents = 1,
.ops = &clk_alpha_pll_postdiv_ro_ops, .ops = &clk_alpha_pll_postdiv_ro_ops,
.flags = CLK_SET_RATE_PARENT,
}, },
}; };

View File

@ -7,8 +7,10 @@
#define __STRATIX10_CLK_H #define __STRATIX10_CLK_H
struct stratix10_clock_data { struct stratix10_clock_data {
struct clk_hw_onecell_data clk_data;
void __iomem *base; void __iomem *base;
/* Must be last */
struct clk_hw_onecell_data clk_data;
}; };
struct stratix10_pll_clock { struct stratix10_pll_clock {

View File

@ -15,9 +15,10 @@
struct visconti_pll_provider { struct visconti_pll_provider {
void __iomem *reg_base; void __iomem *reg_base;
struct regmap *regmap;
struct clk_hw_onecell_data clk_data;
struct device_node *node; struct device_node *node;
/* Must be last */
struct clk_hw_onecell_data clk_data;
}; };
#define VISCONTI_PLL_RATE(_rate, _dacen, _dsmen, \ #define VISCONTI_PLL_RATE(_rate, _dacen, _dsmen, \

View File

@ -315,6 +315,7 @@ static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx)
writel(mck_divisor_idx /* likely divide-by-8 */ writel(mck_divisor_idx /* likely divide-by-8 */
| ATMEL_TC_WAVE | ATMEL_TC_WAVE
| ATMEL_TC_WAVESEL_UP /* free-run */ | ATMEL_TC_WAVESEL_UP /* free-run */
| ATMEL_TC_ASWTRG_SET /* TIOA0 rises at software trigger */
| ATMEL_TC_ACPA_SET /* TIOA0 rises at 0 */ | ATMEL_TC_ACPA_SET /* TIOA0 rises at 0 */
| ATMEL_TC_ACPC_CLEAR, /* (duty cycle 50%) */ | ATMEL_TC_ACPC_CLEAR, /* (duty cycle 50%) */
tcaddr + ATMEL_TC_REG(0, CMR)); tcaddr + ATMEL_TC_REG(0, CMR));

View File

@ -454,12 +454,16 @@ static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type t
return -ENOMEM; return -ENOMEM;
imxtm->base = of_iomap(np, 0); imxtm->base = of_iomap(np, 0);
if (!imxtm->base) if (!imxtm->base) {
return -ENXIO; ret = -ENXIO;
goto err_kfree;
}
imxtm->irq = irq_of_parse_and_map(np, 0); imxtm->irq = irq_of_parse_and_map(np, 0);
if (imxtm->irq <= 0) if (imxtm->irq <= 0) {
return -EINVAL; ret = -EINVAL;
goto err_kfree;
}
imxtm->clk_ipg = of_clk_get_by_name(np, "ipg"); imxtm->clk_ipg = of_clk_get_by_name(np, "ipg");
@ -472,11 +476,15 @@ static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type t
ret = _mxc_timer_init(imxtm); ret = _mxc_timer_init(imxtm);
if (ret) if (ret)
return ret; goto err_kfree;
initialized = 1; initialized = 1;
return 0; return 0;
err_kfree:
kfree(imxtm);
return ret;
} }
static int __init imx1_timer_init_dt(struct device_node *np) static int __init imx1_timer_init_dt(struct device_node *np)

View File

@ -131,25 +131,25 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
len += scnprintf(buf + len, PAGE_SIZE - len, " From : To\n"); len += scnprintf(buf + len, PAGE_SIZE - len, " From : To\n");
len += scnprintf(buf + len, PAGE_SIZE - len, " : "); len += scnprintf(buf + len, PAGE_SIZE - len, " : ");
for (i = 0; i < stats->state_num; i++) { for (i = 0; i < stats->state_num; i++) {
if (len >= PAGE_SIZE) if (len >= PAGE_SIZE - 1)
break; break;
len += scnprintf(buf + len, PAGE_SIZE - len, "%9u ", len += scnprintf(buf + len, PAGE_SIZE - len, "%9u ",
stats->freq_table[i]); stats->freq_table[i]);
} }
if (len >= PAGE_SIZE) if (len >= PAGE_SIZE - 1)
return PAGE_SIZE; return PAGE_SIZE - 1;
len += scnprintf(buf + len, PAGE_SIZE - len, "\n"); len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
for (i = 0; i < stats->state_num; i++) { for (i = 0; i < stats->state_num; i++) {
if (len >= PAGE_SIZE) if (len >= PAGE_SIZE - 1)
break; break;
len += scnprintf(buf + len, PAGE_SIZE - len, "%9u: ", len += scnprintf(buf + len, PAGE_SIZE - len, "%9u: ",
stats->freq_table[i]); stats->freq_table[i]);
for (j = 0; j < stats->state_num; j++) { for (j = 0; j < stats->state_num; j++) {
if (len >= PAGE_SIZE) if (len >= PAGE_SIZE - 1)
break; break;
if (pending) if (pending)
@ -159,12 +159,12 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
len += scnprintf(buf + len, PAGE_SIZE - len, "%9u ", count); len += scnprintf(buf + len, PAGE_SIZE - len, "%9u ", count);
} }
if (len >= PAGE_SIZE) if (len >= PAGE_SIZE - 1)
break; break;
len += scnprintf(buf + len, PAGE_SIZE - len, "\n"); len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
} }
if (len >= PAGE_SIZE) { if (len >= PAGE_SIZE - 1) {
pr_warn_once("cpufreq transition table exceeds PAGE_SIZE. Disabling\n"); pr_warn_once("cpufreq transition table exceeds PAGE_SIZE. Disabling\n");
return -EFBIG; return -EFBIG;
} }

View File

@ -841,6 +841,8 @@ static void qm_poll_req_cb(struct hisi_qp *qp)
qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
qp->qp_status.cq_head, 0); qp->qp_status.cq_head, 0);
atomic_dec(&qp->qp_status.used); atomic_dec(&qp->qp_status.used);
cond_resched();
} }
/* set c_flag */ /* set c_flag */

View File

@ -219,7 +219,6 @@ static int add_host_bridge_uport(struct device *match, void *arg)
port = devm_cxl_add_port(host, match, dport->component_reg_phys, dport); port = devm_cxl_add_port(host, match, dport->component_reg_phys, dport);
if (IS_ERR(port)) if (IS_ERR(port))
return PTR_ERR(port); return PTR_ERR(port);
dev_dbg(host, "%s: add: %s\n", dev_name(match), dev_name(&port->dev));
return 0; return 0;
} }
@ -465,7 +464,6 @@ static int cxl_acpi_probe(struct platform_device *pdev)
root_port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL); root_port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
if (IS_ERR(root_port)) if (IS_ERR(root_port))
return PTR_ERR(root_port); return PTR_ERR(root_port);
dev_dbg(host, "add: %s\n", dev_name(&root_port->dev));
rc = bus_for_each_dev(adev->dev.bus, NULL, root_port, rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
add_host_bridge_dport); add_host_bridge_dport);

View File

@ -56,17 +56,6 @@ resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled);
resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled); resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled);
extern struct rw_semaphore cxl_dpa_rwsem; extern struct rw_semaphore cxl_dpa_rwsem;
bool is_switch_decoder(struct device *dev);
struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev);
static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port,
struct cxl_memdev *cxlmd)
{
if (!port)
return NULL;
return xa_load(&port->endpoints, (unsigned long)&cxlmd->dev);
}
int cxl_memdev_init(void); int cxl_memdev_init(void);
void cxl_memdev_exit(void); void cxl_memdev_exit(void);
void cxl_mbox_init(void); void cxl_mbox_init(void);

View File

@ -276,7 +276,7 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
return 0; return 0;
} }
static int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
resource_size_t base, resource_size_t len, resource_size_t base, resource_size_t len,
resource_size_t skipped) resource_size_t skipped)
{ {
@ -292,6 +292,7 @@ static int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled); return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
} }
EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, CXL);
resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled) resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
{ {

View File

@ -455,6 +455,7 @@ bool is_switch_decoder(struct device *dev)
{ {
return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type; return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type;
} }
EXPORT_SYMBOL_NS_GPL(is_switch_decoder, CXL);
struct cxl_decoder *to_cxl_decoder(struct device *dev) struct cxl_decoder *to_cxl_decoder(struct device *dev)
{ {
@ -482,6 +483,7 @@ struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev)
return NULL; return NULL;
return container_of(dev, struct cxl_switch_decoder, cxld.dev); return container_of(dev, struct cxl_switch_decoder, cxld.dev);
} }
EXPORT_SYMBOL_NS_GPL(to_cxl_switch_decoder, CXL);
static void cxl_ep_release(struct cxl_ep *ep) static void cxl_ep_release(struct cxl_ep *ep)
{ {
@ -655,16 +657,10 @@ static struct cxl_port *cxl_port_alloc(struct device *uport,
return ERR_PTR(rc); return ERR_PTR(rc);
} }
/** static struct cxl_port *__devm_cxl_add_port(struct device *host,
* devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy struct device *uport,
* @host: host device for devm operations resource_size_t component_reg_phys,
* @uport: "physical" device implementing this upstream port struct cxl_dport *parent_dport)
* @component_reg_phys: (optional) for configurable cxl_port instances
* @parent_dport: next hop up in the CXL memory decode hierarchy
*/
struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
resource_size_t component_reg_phys,
struct cxl_dport *parent_dport)
{ {
struct cxl_port *port; struct cxl_port *port;
struct device *dev; struct device *dev;
@ -702,6 +698,40 @@ struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
put_device(dev); put_device(dev);
return ERR_PTR(rc); return ERR_PTR(rc);
} }
/**
* devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
* @host: host device for devm operations
* @uport: "physical" device implementing this upstream port
* @component_reg_phys: (optional) for configurable cxl_port instances
* @parent_dport: next hop up in the CXL memory decode hierarchy
*/
struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
resource_size_t component_reg_phys,
struct cxl_dport *parent_dport)
{
struct cxl_port *port, *parent_port;
port = __devm_cxl_add_port(host, uport, component_reg_phys,
parent_dport);
parent_port = parent_dport ? parent_dport->port : NULL;
if (IS_ERR(port)) {
dev_dbg(uport, "Failed to add%s%s%s: %ld\n",
parent_port ? " port to " : "",
parent_port ? dev_name(&parent_port->dev) : "",
parent_port ? "" : " root port",
PTR_ERR(port));
} else {
dev_dbg(uport, "%s added%s%s%s\n",
dev_name(&port->dev),
parent_port ? " to " : "",
parent_port ? dev_name(&parent_port->dev) : "",
parent_port ? "" : " (root port)");
}
return port;
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL); EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL);
struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port) struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port)
@ -1122,47 +1152,6 @@ static void reap_dports(struct cxl_port *port)
} }
} }
int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd,
struct cxl_dport *parent_dport)
{
struct cxl_port *parent_port = parent_dport->port;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_port *endpoint, *iter, *down;
int rc;
/*
* Now that the path to the root is established record all the
* intervening ports in the chain.
*/
for (iter = parent_port, down = NULL; !is_cxl_root(iter);
down = iter, iter = to_cxl_port(iter->dev.parent)) {
struct cxl_ep *ep;
ep = cxl_ep_load(iter, cxlmd);
ep->next = down;
}
endpoint = devm_cxl_add_port(&parent_port->dev, &cxlmd->dev,
cxlds->component_reg_phys, parent_dport);
if (IS_ERR(endpoint))
return PTR_ERR(endpoint);
dev_dbg(&cxlmd->dev, "add: %s\n", dev_name(&endpoint->dev));
rc = cxl_endpoint_autoremove(cxlmd, endpoint);
if (rc)
return rc;
if (!endpoint->dev.driver) {
dev_err(&cxlmd->dev, "%s failed probe\n",
dev_name(&endpoint->dev));
return -ENXIO;
}
return 0;
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_endpoint, CXL);
static void cxl_detach_ep(void *data) static void cxl_detach_ep(void *data)
{ {
struct cxl_memdev *cxlmd = data; struct cxl_memdev *cxlmd = data;

View File

@ -1012,7 +1012,14 @@ static int cxl_port_setup_targets(struct cxl_port *port,
} }
if (is_cxl_root(parent_port)) { if (is_cxl_root(parent_port)) {
parent_ig = cxlrd->cxlsd.cxld.interleave_granularity; /*
* Root decoder IG is always set to value in CFMWS which
* may be different than this region's IG. We can use the
* region's IG here since interleave_granularity_store()
* does not allow interleaved host-bridges with
* root IG != region IG.
*/
parent_ig = p->interleave_granularity;
parent_iw = cxlrd->cxlsd.cxld.interleave_ways; parent_iw = cxlrd->cxlsd.cxld.interleave_ways;
/* /*
* For purposes of address bit routing, use power-of-2 math for * For purposes of address bit routing, use power-of-2 math for
@ -1181,29 +1188,13 @@ static int cxl_region_setup_targets(struct cxl_region *cxlr)
return 0; return 0;
} }
static int cxl_region_attach(struct cxl_region *cxlr, static int cxl_region_validate_position(struct cxl_region *cxlr,
struct cxl_endpoint_decoder *cxled, int pos) struct cxl_endpoint_decoder *cxled,
int pos)
{ {
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct cxl_port *ep_port, *root_port, *iter;
struct cxl_region_params *p = &cxlr->params; struct cxl_region_params *p = &cxlr->params;
struct cxl_dport *dport; int i;
int i, rc = -ENXIO;
if (cxled->mode == CXL_DECODER_DEAD) {
dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev));
return -ENODEV;
}
/* all full of members, or interleave config not established? */
if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) {
dev_dbg(&cxlr->dev, "region already active\n");
return -EBUSY;
} else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
dev_dbg(&cxlr->dev, "interleave config missing\n");
return -ENXIO;
}
if (pos < 0 || pos >= p->interleave_ways) { if (pos < 0 || pos >= p->interleave_ways) {
dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos, dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
@ -1242,6 +1233,77 @@ static int cxl_region_attach(struct cxl_region *cxlr,
} }
} }
return 0;
}
static int cxl_region_attach_position(struct cxl_region *cxlr,
struct cxl_root_decoder *cxlrd,
struct cxl_endpoint_decoder *cxled,
const struct cxl_dport *dport, int pos)
{
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct cxl_port *iter;
int rc;
if (cxlrd->calc_hb(cxlrd, pos) != dport) {
dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n",
dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
dev_name(&cxlrd->cxlsd.cxld.dev));
return -ENXIO;
}
for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
iter = to_cxl_port(iter->dev.parent)) {
rc = cxl_port_attach_region(iter, cxlr, cxled, pos);
if (rc)
goto err;
}
return 0;
err:
for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
iter = to_cxl_port(iter->dev.parent))
cxl_port_detach_region(iter, cxlr, cxled);
return rc;
}
static int cxl_region_attach(struct cxl_region *cxlr,
struct cxl_endpoint_decoder *cxled, int pos)
{
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct cxl_region_params *p = &cxlr->params;
struct cxl_port *ep_port, *root_port;
struct cxl_dport *dport;
int rc = -ENXIO;
if (cxled->mode != cxlr->mode) {
dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n",
dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode);
return -EINVAL;
}
if (cxled->mode == CXL_DECODER_DEAD) {
dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev));
return -ENODEV;
}
/* all full of members, or interleave config not established? */
if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) {
dev_dbg(&cxlr->dev, "region already active\n");
return -EBUSY;
} else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
dev_dbg(&cxlr->dev, "interleave config missing\n");
return -ENXIO;
}
if (p->nr_targets >= p->interleave_ways) {
dev_dbg(&cxlr->dev, "region already has %d endpoints\n",
p->nr_targets);
return -EINVAL;
}
ep_port = cxled_to_port(cxled); ep_port = cxled_to_port(cxled);
root_port = cxlrd_to_port(cxlrd); root_port = cxlrd_to_port(cxlrd);
dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge); dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge);
@ -1252,13 +1314,6 @@ static int cxl_region_attach(struct cxl_region *cxlr,
return -ENXIO; return -ENXIO;
} }
if (cxlrd->calc_hb(cxlrd, pos) != dport) {
dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n",
dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
dev_name(&cxlrd->cxlsd.cxld.dev));
return -ENXIO;
}
if (cxled->cxld.target_type != cxlr->type) { if (cxled->cxld.target_type != cxlr->type) {
dev_dbg(&cxlr->dev, "%s:%s type mismatch: %d vs %d\n", dev_dbg(&cxlr->dev, "%s:%s type mismatch: %d vs %d\n",
dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
@ -1282,12 +1337,13 @@ static int cxl_region_attach(struct cxl_region *cxlr,
return -EINVAL; return -EINVAL;
} }
for (iter = ep_port; !is_cxl_root(iter); rc = cxl_region_validate_position(cxlr, cxled, pos);
iter = to_cxl_port(iter->dev.parent)) { if (rc)
rc = cxl_port_attach_region(iter, cxlr, cxled, pos); return rc;
if (rc)
goto err; rc = cxl_region_attach_position(cxlr, cxlrd, cxled, dport, pos);
} if (rc)
return rc;
p->targets[pos] = cxled; p->targets[pos] = cxled;
cxled->pos = pos; cxled->pos = pos;
@ -1296,7 +1352,7 @@ static int cxl_region_attach(struct cxl_region *cxlr,
if (p->nr_targets == p->interleave_ways) { if (p->nr_targets == p->interleave_ways) {
rc = cxl_region_setup_targets(cxlr); rc = cxl_region_setup_targets(cxlr);
if (rc) if (rc)
goto err_decrement; return rc;
p->state = CXL_CONFIG_ACTIVE; p->state = CXL_CONFIG_ACTIVE;
} }
@ -1308,14 +1364,6 @@ static int cxl_region_attach(struct cxl_region *cxlr,
}; };
return 0; return 0;
err_decrement:
p->nr_targets--;
err:
for (iter = ep_port; !is_cxl_root(iter);
iter = to_cxl_port(iter->dev.parent))
cxl_port_detach_region(iter, cxlr, cxled);
return rc;
} }
static int cxl_region_detach(struct cxl_endpoint_decoder *cxled) static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)

View File

@ -562,8 +562,6 @@ struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port);
struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport, struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
resource_size_t component_reg_phys, resource_size_t component_reg_phys,
struct cxl_dport *parent_dport); struct cxl_dport *parent_dport);
int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd,
struct cxl_dport *parent_dport);
struct cxl_port *find_cxl_root(struct device *dev); struct cxl_port *find_cxl_root(struct device *dev);
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd); int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
int cxl_bus_rescan(void); int cxl_bus_rescan(void);
@ -577,8 +575,10 @@ struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
struct cxl_decoder *to_cxl_decoder(struct device *dev); struct cxl_decoder *to_cxl_decoder(struct device *dev);
struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev); struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev);
struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev);
struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev); struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev);
bool is_root_decoder(struct device *dev); bool is_root_decoder(struct device *dev);
bool is_switch_decoder(struct device *dev);
bool is_endpoint_decoder(struct device *dev); bool is_endpoint_decoder(struct device *dev);
struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port, struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
unsigned int nr_targets); unsigned int nr_targets);

View File

@ -75,6 +75,18 @@ static inline bool is_cxl_endpoint(struct cxl_port *port)
} }
struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds); struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds);
int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
resource_size_t base, resource_size_t len,
resource_size_t skipped);
static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port,
struct cxl_memdev *cxlmd)
{
if (!port)
return NULL;
return xa_load(&port->endpoints, (unsigned long)&cxlmd->dev);
}
/** /**
* struct cxl_mbox_cmd - A command to be submitted to hardware. * struct cxl_mbox_cmd - A command to be submitted to hardware.

View File

@ -45,6 +45,44 @@ static int cxl_mem_dpa_show(struct seq_file *file, void *data)
return 0; return 0;
} }
static int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd,
struct cxl_dport *parent_dport)
{
struct cxl_port *parent_port = parent_dport->port;
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_port *endpoint, *iter, *down;
int rc;
/*
* Now that the path to the root is established record all the
* intervening ports in the chain.
*/
for (iter = parent_port, down = NULL; !is_cxl_root(iter);
down = iter, iter = to_cxl_port(iter->dev.parent)) {
struct cxl_ep *ep;
ep = cxl_ep_load(iter, cxlmd);
ep->next = down;
}
endpoint = devm_cxl_add_port(&parent_port->dev, &cxlmd->dev,
cxlds->component_reg_phys, parent_dport);
if (IS_ERR(endpoint))
return PTR_ERR(endpoint);
rc = cxl_endpoint_autoremove(cxlmd, endpoint);
if (rc)
return rc;
if (!endpoint->dev.driver) {
dev_err(&cxlmd->dev, "%s failed probe\n",
dev_name(&endpoint->dev));
return -ENXIO;
}
return 0;
}
static int cxl_mem_probe(struct device *dev) static int cxl_mem_probe(struct device *dev)
{ {
struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_memdev *cxlmd = to_cxl_memdev(dev);

View File

@ -490,7 +490,7 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
src_maxburst = chan->dma_config.src_maxburst; src_maxburst = chan->dma_config.src_maxburst;
dst_maxburst = chan->dma_config.dst_maxburst; dst_maxburst = chan->dma_config.dst_maxburst;
ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)); ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id)); ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id)); ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
@ -966,7 +966,7 @@ stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
if (!desc) if (!desc)
return NULL; return NULL;
ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)); ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id)); ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id)); ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id)); cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));

View File

@ -164,6 +164,12 @@ static enum qcom_scm_convention __get_convention(void)
if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN)) if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
return qcom_scm_convention; return qcom_scm_convention;
/*
* Per the "SMC calling convention specification", the 64-bit calling
* convention can only be used when the client is 64-bit, otherwise
* system will encounter the undefined behaviour.
*/
#if IS_ENABLED(CONFIG_ARM64)
/* /*
* Device isn't required as there is only one argument - no device * Device isn't required as there is only one argument - no device
* needed to dma_map_single to secure world * needed to dma_map_single to secure world
@ -184,6 +190,7 @@ static enum qcom_scm_convention __get_convention(void)
forced = true; forced = true;
goto found; goto found;
} }
#endif
probed_convention = SMC_CONVENTION_ARM_32; probed_convention = SMC_CONVENTION_ARM_32;
ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);

View File

@ -29,6 +29,7 @@
#include "amdgpu.h" #include "amdgpu.h"
#include "atom.h" #include "atom.h"
#include <linux/device.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/acpi.h> #include <linux/acpi.h>
@ -289,6 +290,10 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU) if (adev->flags & AMD_IS_APU)
return false; return false;
/* ATRM is for on-platform devices only */
if (dev_is_removable(&adev->pdev->dev))
return false;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
dhandle = ACPI_HANDLE(&pdev->dev); dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle) if (!dhandle)

View File

@ -179,6 +179,7 @@ int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
} }
rcu_read_unlock(); rcu_read_unlock();
*result = NULL;
return -ENOENT; return -ENOENT;
} }

View File

@ -1391,7 +1391,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r == -ENOMEM) if (r == -ENOMEM)
DRM_ERROR("Not enough memory for command submission!\n"); DRM_ERROR("Not enough memory for command submission!\n");
else if (r != -ERESTARTSYS && r != -EAGAIN) else if (r != -ERESTARTSYS && r != -EAGAIN)
DRM_ERROR("Failed to process the buffer list %d!\n", r); DRM_DEBUG("Failed to process the buffer list %d!\n", r);
goto error_fini; goto error_fini;
} }

View File

@ -589,6 +589,9 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
ssize_t result = 0; ssize_t result = 0;
int r; int r;
if (!adev->smc_rreg)
return -EPERM;
if (size & 0x3 || *pos & 0x3) if (size & 0x3 || *pos & 0x3)
return -EINVAL; return -EINVAL;
@ -645,6 +648,9 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
ssize_t result = 0; ssize_t result = 0;
int r; int r;
if (!adev->smc_wreg)
return -EPERM;
if (size & 0x3 || *pos & 0x3) if (size & 0x3 || *pos & 0x3)
return -EINVAL; return -EINVAL;

View File

@ -41,6 +41,7 @@
#include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h> #include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h> #include <drm/amdgpu_drm.h>
#include <linux/device.h>
#include <linux/vgaarb.h> #include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h> #include <linux/vga_switcheroo.h>
#include <linux/efi.h> #include <linux/efi.h>
@ -2105,7 +2106,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
*/ */
static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
{ {
struct drm_device *dev = adev_to_drm(adev);
struct pci_dev *parent; struct pci_dev *parent;
int i, r; int i, r;
@ -2175,7 +2175,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
(amdgpu_is_atpx_hybrid() || (amdgpu_is_atpx_hybrid() ||
amdgpu_has_atpx_dgpu_power_cntl()) && amdgpu_has_atpx_dgpu_power_cntl()) &&
((adev->flags & AMD_IS_APU) == 0) && ((adev->flags & AMD_IS_APU) == 0) &&
!pci_is_thunderbolt_attached(to_pci_dev(dev->dev))) !dev_is_removable(&adev->pdev->dev))
adev->flags |= AMD_IS_PX; adev->flags |= AMD_IS_PX;
if (!(adev->flags & AMD_IS_APU)) { if (!(adev->flags & AMD_IS_APU)) {
@ -3968,7 +3968,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
px = amdgpu_device_supports_px(ddev); px = amdgpu_device_supports_px(ddev);
if (px || (!pci_is_thunderbolt_attached(adev->pdev) && if (px || (!dev_is_removable(&adev->pdev->dev) &&
apple_gmux_detect(NULL, NULL))) apple_gmux_detect(NULL, NULL)))
vga_switcheroo_register_client(adev->pdev, vga_switcheroo_register_client(adev->pdev,
&amdgpu_switcheroo_ops, px); &amdgpu_switcheroo_ops, px);
@ -4117,7 +4117,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
px = amdgpu_device_supports_px(adev_to_drm(adev)); px = amdgpu_device_supports_px(adev_to_drm(adev));
if (px || (!pci_is_thunderbolt_attached(adev->pdev) && if (px || (!dev_is_removable(&adev->pdev->dev) &&
apple_gmux_detect(NULL, NULL))) apple_gmux_detect(NULL, NULL)))
vga_switcheroo_unregister_client(adev->pdev); vga_switcheroo_unregister_client(adev->pdev);
@ -5330,7 +5330,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* Flush RAM to disk so that after reboot * Flush RAM to disk so that after reboot
* the user can read log and see why the system rebooted. * the user can read log and see why the system rebooted.
*/ */
if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) { if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
amdgpu_ras_get_context(adev)->reboot) {
DRM_WARN("Emergency reboot."); DRM_WARN("Emergency reboot.");
ksys_sync_helper(); ksys_sync_helper();

View File

@ -1273,7 +1273,8 @@ static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
{ {
struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
sysfs_remove_file_from_group(&adev->dev->kobj, if (adev->dev->kobj.sd)
sysfs_remove_file_from_group(&adev->dev->kobj,
&con->badpages_attr.attr, &con->badpages_attr.attr,
RAS_FS_NAME); RAS_FS_NAME);
} }
@ -1290,7 +1291,8 @@ static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
.attrs = attrs, .attrs = attrs,
}; };
sysfs_remove_group(&adev->dev->kobj, &group); if (adev->dev->kobj.sd)
sysfs_remove_group(&adev->dev->kobj, &group);
return 0; return 0;
} }
@ -1337,7 +1339,8 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
if (!obj || !obj->attr_inuse) if (!obj || !obj->attr_inuse)
return -EINVAL; return -EINVAL;
sysfs_remove_file_from_group(&adev->dev->kobj, if (adev->dev->kobj.sd)
sysfs_remove_file_from_group(&adev->dev->kobj,
&obj->sysfs_attr.attr, &obj->sysfs_attr.attr,
RAS_FS_NAME); RAS_FS_NAME);
obj->attr_inuse = 0; obj->attr_inuse = 0;

View File

@ -391,8 +391,15 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
void *ptr; void *ptr;
int i, idx; int i, idx;
bool in_ras_intr = amdgpu_ras_intr_triggered();
cancel_delayed_work_sync(&adev->vcn.idle_work); cancel_delayed_work_sync(&adev->vcn.idle_work);
/* err_event_athub will corrupt VCPU buffer, so we need to
* restore fw data and clear buffer in amdgpu_vcn_resume() */
if (in_ras_intr)
return 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i)) if (adev->vcn.harvest_config & (1 << i))
continue; continue;

View File

@ -239,6 +239,8 @@ static int amdgpu_vkms_conn_get_modes(struct drm_connector *connector)
for (i = 0; i < ARRAY_SIZE(common_modes); i++) { for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false); mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
if (!mode)
continue;
drm_mode_probed_add(connector, mode); drm_mode_probed_add(connector, mode);
} }

View File

@ -28,6 +28,7 @@
#include "nbio/nbio_2_3_offset.h" #include "nbio/nbio_2_3_offset.h"
#include "nbio/nbio_2_3_sh_mask.h" #include "nbio/nbio_2_3_sh_mask.h"
#include <uapi/linux/kfd_ioctl.h> #include <uapi/linux/kfd_ioctl.h>
#include <linux/device.h>
#include <linux/pci.h> #include <linux/pci.h>
#define smnPCIE_CONFIG_CNTL 0x11180044 #define smnPCIE_CONFIG_CNTL 0x11180044
@ -361,7 +362,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT; data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
if (pci_is_thunderbolt_attached(adev->pdev)) if (dev_is_removable(&adev->pdev->dev))
data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
else else
data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
@ -480,7 +481,7 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
def = data = RREG32_PCIE(smnPCIE_LC_CNTL); def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT; data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
if (pci_is_thunderbolt_attached(adev->pdev)) if (dev_is_removable(&adev->pdev->dev))
data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
else else
data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;

View File

@ -612,8 +612,15 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
void svm_range_vram_node_free(struct svm_range *prange) void svm_range_vram_node_free(struct svm_range *prange)
{ {
svm_range_bo_unref(prange->svm_bo); /* serialize prange->svm_bo unref */
prange->ttm_res = NULL; mutex_lock(&prange->lock);
/* prange->svm_bo has not been unref */
if (prange->ttm_res) {
prange->ttm_res = NULL;
mutex_unlock(&prange->lock);
svm_range_bo_unref(prange->svm_bo);
} else
mutex_unlock(&prange->lock);
} }
struct amdgpu_device * struct amdgpu_device *
@ -757,7 +764,7 @@ svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
prange->flags &= ~attrs[i].value; prange->flags &= ~attrs[i].value;
break; break;
case KFD_IOCTL_SVM_ATTR_GRANULARITY: case KFD_IOCTL_SVM_ATTR_GRANULARITY:
prange->granularity = attrs[i].value; prange->granularity = min_t(uint32_t, attrs[i].value, 0x3F);
break; break;
default: default:
WARN_ONCE(1, "svm_range_check_attrs wasn't called?"); WARN_ONCE(1, "svm_range_check_attrs wasn't called?");

View File

@ -2057,7 +2057,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
struct dmub_srv_create_params create_params; struct dmub_srv_create_params create_params;
struct dmub_srv_region_params region_params; struct dmub_srv_region_params region_params;
struct dmub_srv_region_info region_info; struct dmub_srv_region_info region_info;
struct dmub_srv_fb_params fb_params; struct dmub_srv_memory_params memory_params;
struct dmub_srv_fb_info *fb_info; struct dmub_srv_fb_info *fb_info;
struct dmub_srv *dmub_srv; struct dmub_srv *dmub_srv;
const struct dmcub_firmware_header_v1_0 *hdr; const struct dmcub_firmware_header_v1_0 *hdr;
@ -2188,6 +2188,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
adev->dm.dmub_fw->data + adev->dm.dmub_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes) + le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
PSP_HEADER_BYTES; PSP_HEADER_BYTES;
region_params.is_mailbox_in_inbox = false;
status = dmub_srv_calc_region_info(dmub_srv, &region_params, status = dmub_srv_calc_region_info(dmub_srv, &region_params,
&region_info); &region_info);
@ -2209,10 +2210,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
return r; return r;
/* Rebase the regions on the framebuffer address. */ /* Rebase the regions on the framebuffer address. */
memset(&fb_params, 0, sizeof(fb_params)); memset(&memory_params, 0, sizeof(memory_params));
fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr; memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr; memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
fb_params.region_info = &region_info; memory_params.region_info = &region_info;
adev->dm.dmub_fb_info = adev->dm.dmub_fb_info =
kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL); kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
@ -2224,7 +2225,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
return -ENOMEM; return -ENOMEM;
} }
status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info); status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
if (status != DMUB_STATUS_OK) { if (status != DMUB_STATUS_OK) {
DRM_ERROR("Error calculating DMUB FB info: %d\n", status); DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
return -EINVAL; return -EINVAL;
@ -7219,6 +7220,9 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
int i; int i;
int result = -EIO; int result = -EIO;
if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported)
return result;
cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL); cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
if (!cmd.payloads) if (!cmd.payloads)
@ -9282,14 +9286,14 @@ static bool should_reset_plane(struct drm_atomic_state *state,
struct drm_plane *other; struct drm_plane *other;
struct drm_plane_state *old_other_state, *new_other_state; struct drm_plane_state *old_other_state, *new_other_state;
struct drm_crtc_state *new_crtc_state; struct drm_crtc_state *new_crtc_state;
struct amdgpu_device *adev = drm_to_adev(plane->dev);
int i; int i;
/* /*
* TODO: Remove this hack once the checks below are sufficient * TODO: Remove this hack for all asics once it proves that the
* enough to determine when we need to reset all the planes on * fast updates works fine on DCN3.2+.
* the stream.
*/ */
if (state->allow_modeset) if (adev->ip_versions[DCE_HWIP][0] < IP_VERSION(3, 2, 0) && state->allow_modeset)
return true; return true;
/* Exit early if we know that we're adding or removing the plane. */ /* Exit early if we know that we're adding or removing the plane. */

View File

@ -996,7 +996,8 @@ static bool dc_construct(struct dc *dc,
/* set i2c speed if not done by the respective dcnxxx__resource.c */ /* set i2c speed if not done by the respective dcnxxx__resource.c */
if (dc->caps.i2c_speed_in_khz_hdcp == 0) if (dc->caps.i2c_speed_in_khz_hdcp == 0)
dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz; dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
if (dc->caps.max_optimizable_video_width == 0)
dc->caps.max_optimizable_video_width = 5120;
dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg); dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
if (!dc->clk_mgr) if (!dc->clk_mgr)
goto fail; goto fail;
@ -1805,7 +1806,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
if (dc->hwss.subvp_pipe_control_lock) if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use); dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
if (dc->debug.enable_double_buffered_dsc_pg_support) if (dc->hwss.update_dsc_pg)
dc->hwss.update_dsc_pg(dc, context, false); dc->hwss.update_dsc_pg(dc, context, false);
disable_dangling_plane(dc, context); disable_dangling_plane(dc, context);
@ -1904,7 +1905,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.optimize_bandwidth(dc, context); dc->hwss.optimize_bandwidth(dc, context);
} }
if (dc->debug.enable_double_buffered_dsc_pg_support) if (dc->hwss.update_dsc_pg)
dc->hwss.update_dsc_pg(dc, context, true); dc->hwss.update_dsc_pg(dc, context, true);
if (dc->ctx->dce_version >= DCE_VERSION_MAX) if (dc->ctx->dce_version >= DCE_VERSION_MAX)
@ -2192,7 +2193,7 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
dc->hwss.optimize_bandwidth(dc, context); dc->hwss.optimize_bandwidth(dc, context);
if (dc->debug.enable_double_buffered_dsc_pg_support) if (dc->hwss.update_dsc_pg)
dc->hwss.update_dsc_pg(dc, context, true); dc->hwss.update_dsc_pg(dc, context, true);
} }
@ -2438,6 +2439,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
} }
static enum surface_update_type get_scaling_info_update_type( static enum surface_update_type get_scaling_info_update_type(
const struct dc *dc,
const struct dc_surface_update *u) const struct dc_surface_update *u)
{ {
union surface_update_flags *update_flags = &u->surface->update_flags; union surface_update_flags *update_flags = &u->surface->update_flags;
@ -2472,6 +2474,12 @@ static enum surface_update_type get_scaling_info_update_type(
update_flags->bits.clock_change = 1; update_flags->bits.clock_change = 1;
} }
if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
(u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
/* Changing clip size of a large surface may result in MPC slice count change */
update_flags->bits.bandwidth_change = 1;
if (u->scaling_info->src_rect.x != u->surface->src_rect.x if (u->scaling_info->src_rect.x != u->surface->src_rect.x
|| u->scaling_info->src_rect.y != u->surface->src_rect.y || u->scaling_info->src_rect.y != u->surface->src_rect.y
|| u->scaling_info->clip_rect.x != u->surface->clip_rect.x || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
@ -2509,7 +2517,7 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
type = get_plane_info_update_type(u); type = get_plane_info_update_type(u);
elevate_update_type(&overall_type, type); elevate_update_type(&overall_type, type);
type = get_scaling_info_update_type(u); type = get_scaling_info_update_type(dc, u);
elevate_update_type(&overall_type, type); elevate_update_type(&overall_type, type);
if (u->flip_addr) { if (u->flip_addr) {
@ -3445,7 +3453,7 @@ static void commit_planes_for_stream(struct dc *dc,
if (get_seamless_boot_stream_count(context) == 0) if (get_seamless_boot_stream_count(context) == 0)
dc->hwss.prepare_bandwidth(dc, context); dc->hwss.prepare_bandwidth(dc, context);
if (dc->debug.enable_double_buffered_dsc_pg_support) if (dc->hwss.update_dsc_pg)
dc->hwss.update_dsc_pg(dc, context, false); dc->hwss.update_dsc_pg(dc, context, false);
context_clock_trace(dc, context); context_clock_trace(dc, context);

View File

@ -567,7 +567,7 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
for (i = 0; i < MAX_PIPES; i++) { for (i = 0; i < MAX_PIPES; i++) {
struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
if (res_ctx->pipe_ctx[i].stream != stream) if (res_ctx->pipe_ctx[i].stream != stream || !tg)
continue; continue;
return tg->funcs->get_frame_count(tg); return tg->funcs->get_frame_count(tg);
@ -626,7 +626,7 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
for (i = 0; i < MAX_PIPES; i++) { for (i = 0; i < MAX_PIPES; i++) {
struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg; struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
if (res_ctx->pipe_ctx[i].stream != stream) if (res_ctx->pipe_ctx[i].stream != stream || !tg)
continue; continue;
tg->funcs->get_scanoutpos(tg, tg->funcs->get_scanoutpos(tg,

View File

@ -230,6 +230,11 @@ struct dc_caps {
uint32_t dmdata_alloc_size; uint32_t dmdata_alloc_size;
unsigned int max_cursor_size; unsigned int max_cursor_size;
unsigned int max_video_width; unsigned int max_video_width;
/*
* max video plane width that can be safely assumed to be always
* supported by single DPP pipe.
*/
unsigned int max_optimizable_video_width;
unsigned int min_horizontal_blanking_period; unsigned int min_horizontal_blanking_period;
int linear_pitch_alignment; int linear_pitch_alignment;
bool dcc_const_color; bool dcc_const_color;

View File

@ -79,6 +79,9 @@ void dcn32_dsc_pg_control(
if (hws->ctx->dc->debug.disable_dsc_power_gate) if (hws->ctx->dc->debug.disable_dsc_power_gate)
return; return;
if (!hws->ctx->dc->debug.enable_double_buffered_dsc_pg_support)
return;
REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
if (org_ip_request_cntl == 0) if (org_ip_request_cntl == 0)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);

View File

@ -174,6 +174,7 @@ struct dmub_srv_region_params {
uint32_t vbios_size; uint32_t vbios_size;
const uint8_t *fw_inst_const; const uint8_t *fw_inst_const;
const uint8_t *fw_bss_data; const uint8_t *fw_bss_data;
bool is_mailbox_in_inbox;
}; };
/** /**
@ -193,20 +194,25 @@ struct dmub_srv_region_params {
*/ */
struct dmub_srv_region_info { struct dmub_srv_region_info {
uint32_t fb_size; uint32_t fb_size;
uint32_t inbox_size;
uint8_t num_regions; uint8_t num_regions;
struct dmub_region regions[DMUB_WINDOW_TOTAL]; struct dmub_region regions[DMUB_WINDOW_TOTAL];
}; };
/** /**
* struct dmub_srv_fb_params - parameters used for driver fb setup * struct dmub_srv_memory_params - parameters used for driver fb setup
* @region_info: region info calculated by dmub service * @region_info: region info calculated by dmub service
* @cpu_addr: base cpu address for the framebuffer * @cpu_fb_addr: base cpu address for the framebuffer
* @gpu_addr: base gpu virtual address for the framebuffer * @cpu_inbox_addr: base cpu address for the gart
* @gpu_fb_addr: base gpu virtual address for the framebuffer
* @gpu_inbox_addr: base gpu virtual address for the gart
*/ */
struct dmub_srv_fb_params { struct dmub_srv_memory_params {
const struct dmub_srv_region_info *region_info; const struct dmub_srv_region_info *region_info;
void *cpu_addr; void *cpu_fb_addr;
uint64_t gpu_addr; void *cpu_inbox_addr;
uint64_t gpu_fb_addr;
uint64_t gpu_inbox_addr;
}; };
/** /**
@ -524,8 +530,8 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
* DMUB_STATUS_OK - success * DMUB_STATUS_OK - success
* DMUB_STATUS_INVALID - unspecified error * DMUB_STATUS_INVALID - unspecified error
*/ */
enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub, enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
const struct dmub_srv_fb_params *params, const struct dmub_srv_memory_params *params,
struct dmub_srv_fb_info *out); struct dmub_srv_fb_info *out);
/** /**

View File

@ -384,7 +384,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
uint32_t fw_state_size = DMUB_FW_STATE_SIZE; uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE; uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE; uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE;
uint32_t previous_top = 0;
if (!dmub->sw_init) if (!dmub->sw_init)
return DMUB_STATUS_INVALID; return DMUB_STATUS_INVALID;
@ -409,8 +409,15 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
bios->base = dmub_align(stack->top, 256); bios->base = dmub_align(stack->top, 256);
bios->top = bios->base + params->vbios_size; bios->top = bios->base + params->vbios_size;
mail->base = dmub_align(bios->top, 256); if (params->is_mailbox_in_inbox) {
mail->top = mail->base + DMUB_MAILBOX_SIZE; mail->base = 0;
mail->top = mail->base + DMUB_MAILBOX_SIZE;
previous_top = bios->top;
} else {
mail->base = dmub_align(bios->top, 256);
mail->top = mail->base + DMUB_MAILBOX_SIZE;
previous_top = mail->top;
}
fw_info = dmub_get_fw_meta_info(params); fw_info = dmub_get_fw_meta_info(params);
@ -429,7 +436,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
dmub->fw_version = fw_info->fw_version; dmub->fw_version = fw_info->fw_version;
} }
trace_buff->base = dmub_align(mail->top, 256); trace_buff->base = dmub_align(previous_top, 256);
trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64); trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64);
fw_state->base = dmub_align(trace_buff->top, 256); fw_state->base = dmub_align(trace_buff->top, 256);
@ -440,11 +447,14 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
out->fb_size = dmub_align(scratch_mem->top, 4096); out->fb_size = dmub_align(scratch_mem->top, 4096);
if (params->is_mailbox_in_inbox)
out->inbox_size = dmub_align(mail->top, 4096);
return DMUB_STATUS_OK; return DMUB_STATUS_OK;
} }
enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub, enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
const struct dmub_srv_fb_params *params, const struct dmub_srv_memory_params *params,
struct dmub_srv_fb_info *out) struct dmub_srv_fb_info *out)
{ {
uint8_t *cpu_base; uint8_t *cpu_base;
@ -459,8 +469,8 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
if (params->region_info->num_regions != DMUB_NUM_WINDOWS) if (params->region_info->num_regions != DMUB_NUM_WINDOWS)
return DMUB_STATUS_INVALID; return DMUB_STATUS_INVALID;
cpu_base = (uint8_t *)params->cpu_addr; cpu_base = (uint8_t *)params->cpu_fb_addr;
gpu_base = params->gpu_addr; gpu_base = params->gpu_fb_addr;
for (i = 0; i < DMUB_NUM_WINDOWS; ++i) { for (i = 0; i < DMUB_NUM_WINDOWS; ++i) {
const struct dmub_region *reg = const struct dmub_region *reg =
@ -468,6 +478,12 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
out->fb[i].cpu_addr = cpu_base + reg->base; out->fb[i].cpu_addr = cpu_base + reg->base;
out->fb[i].gpu_addr = gpu_base + reg->base; out->fb[i].gpu_addr = gpu_base + reg->base;
if (i == DMUB_WINDOW_4_MAILBOX && params->cpu_inbox_addr != 0) {
out->fb[i].cpu_addr = (uint8_t *)params->cpu_inbox_addr + reg->base;
out->fb[i].gpu_addr = params->gpu_inbox_addr + reg->base;
}
out->fb[i].size = reg->top - reg->base; out->fb[i].size = reg->top - reg->base;
} }

View File

@ -78,7 +78,7 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER
typedef struct _ATOM_PPLIB_STATE typedef struct _ATOM_PPLIB_STATE
{ {
UCHAR ucNonClockStateIndex; UCHAR ucNonClockStateIndex;
UCHAR ucClockStateIndices[1]; // variable-sized UCHAR ucClockStateIndices[]; // variable-sized
} ATOM_PPLIB_STATE; } ATOM_PPLIB_STATE;
@ -473,7 +473,7 @@ typedef struct _ATOM_PPLIB_STATE_V2
/** /**
* Driver will read the first ucNumDPMLevels in this array * Driver will read the first ucNumDPMLevels in this array
*/ */
UCHAR clockInfoIndex[1]; UCHAR clockInfoIndex[];
} ATOM_PPLIB_STATE_V2; } ATOM_PPLIB_STATE_V2;
typedef struct _StateArray{ typedef struct _StateArray{

View File

@ -758,7 +758,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
if (adev->in_suspend && !adev->in_runpm) if (adev->in_suspend && !adev->in_runpm)
return -EPERM; return -EPERM;
if (count > 127) if (count > 127 || count == 0)
return -EINVAL; return -EINVAL;
if (*buf == 's') if (*buf == 's')
@ -778,7 +778,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
else else
return -EINVAL; return -EINVAL;
memcpy(buf_cpy, buf, count+1); memcpy(buf_cpy, buf, count);
buf_cpy[count] = 0;
tmp_str = buf_cpy; tmp_str = buf_cpy;
@ -795,6 +796,9 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
return -EINVAL; return -EINVAL;
parameter_size++; parameter_size++;
if (!tmp_str)
break;
while (isspace(*tmp_str)) while (isspace(*tmp_str))
tmp_str++; tmp_str++;
} }

View File

@ -164,7 +164,7 @@ typedef struct _ATOM_Tonga_State {
typedef struct _ATOM_Tonga_State_Array { typedef struct _ATOM_Tonga_State_Array {
UCHAR ucRevId; UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */ UCHAR ucNumEntries; /* Number of entries. */
ATOM_Tonga_State entries[1]; /* Dynamically allocate entries. */ ATOM_Tonga_State entries[]; /* Dynamically allocate entries. */
} ATOM_Tonga_State_Array; } ATOM_Tonga_State_Array;
typedef struct _ATOM_Tonga_MCLK_Dependency_Record { typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
@ -179,7 +179,7 @@ typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
typedef struct _ATOM_Tonga_MCLK_Dependency_Table { typedef struct _ATOM_Tonga_MCLK_Dependency_Table {
UCHAR ucRevId; UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */ UCHAR ucNumEntries; /* Number of entries. */
ATOM_Tonga_MCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ ATOM_Tonga_MCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Tonga_MCLK_Dependency_Table; } ATOM_Tonga_MCLK_Dependency_Table;
typedef struct _ATOM_Tonga_SCLK_Dependency_Record { typedef struct _ATOM_Tonga_SCLK_Dependency_Record {
@ -194,7 +194,7 @@ typedef struct _ATOM_Tonga_SCLK_Dependency_Record {
typedef struct _ATOM_Tonga_SCLK_Dependency_Table { typedef struct _ATOM_Tonga_SCLK_Dependency_Table {
UCHAR ucRevId; UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */ UCHAR ucNumEntries; /* Number of entries. */
ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ ATOM_Tonga_SCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Tonga_SCLK_Dependency_Table; } ATOM_Tonga_SCLK_Dependency_Table;
typedef struct _ATOM_Polaris_SCLK_Dependency_Record { typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
@ -210,7 +210,7 @@ typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
typedef struct _ATOM_Polaris_SCLK_Dependency_Table { typedef struct _ATOM_Polaris_SCLK_Dependency_Table {
UCHAR ucRevId; UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */ UCHAR ucNumEntries; /* Number of entries. */
ATOM_Polaris_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ ATOM_Polaris_SCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Polaris_SCLK_Dependency_Table; } ATOM_Polaris_SCLK_Dependency_Table;
typedef struct _ATOM_Tonga_PCIE_Record { typedef struct _ATOM_Tonga_PCIE_Record {
@ -222,7 +222,7 @@ typedef struct _ATOM_Tonga_PCIE_Record {
typedef struct _ATOM_Tonga_PCIE_Table { typedef struct _ATOM_Tonga_PCIE_Table {
UCHAR ucRevId; UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */ UCHAR ucNumEntries; /* Number of entries. */
ATOM_Tonga_PCIE_Record entries[1]; /* Dynamically allocate entries. */ ATOM_Tonga_PCIE_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Tonga_PCIE_Table; } ATOM_Tonga_PCIE_Table;
typedef struct _ATOM_Polaris10_PCIE_Record { typedef struct _ATOM_Polaris10_PCIE_Record {
@ -235,7 +235,7 @@ typedef struct _ATOM_Polaris10_PCIE_Record {
typedef struct _ATOM_Polaris10_PCIE_Table { typedef struct _ATOM_Polaris10_PCIE_Table {
UCHAR ucRevId; UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */ UCHAR ucNumEntries; /* Number of entries. */
ATOM_Polaris10_PCIE_Record entries[1]; /* Dynamically allocate entries. */ ATOM_Polaris10_PCIE_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Polaris10_PCIE_Table; } ATOM_Polaris10_PCIE_Table;
@ -252,7 +252,7 @@ typedef struct _ATOM_Tonga_MM_Dependency_Record {
typedef struct _ATOM_Tonga_MM_Dependency_Table { typedef struct _ATOM_Tonga_MM_Dependency_Table {
UCHAR ucRevId; UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */ UCHAR ucNumEntries; /* Number of entries. */
ATOM_Tonga_MM_Dependency_Record entries[1]; /* Dynamically allocate entries. */ ATOM_Tonga_MM_Dependency_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Tonga_MM_Dependency_Table; } ATOM_Tonga_MM_Dependency_Table;
typedef struct _ATOM_Tonga_Voltage_Lookup_Record { typedef struct _ATOM_Tonga_Voltage_Lookup_Record {
@ -265,7 +265,7 @@ typedef struct _ATOM_Tonga_Voltage_Lookup_Record {
typedef struct _ATOM_Tonga_Voltage_Lookup_Table { typedef struct _ATOM_Tonga_Voltage_Lookup_Table {
UCHAR ucRevId; UCHAR ucRevId;
UCHAR ucNumEntries; /* Number of entries. */ UCHAR ucNumEntries; /* Number of entries. */
ATOM_Tonga_Voltage_Lookup_Record entries[1]; /* Dynamically allocate entries. */ ATOM_Tonga_Voltage_Lookup_Record entries[]; /* Dynamically allocate entries. */
} ATOM_Tonga_Voltage_Lookup_Table; } ATOM_Tonga_Voltage_Lookup_Table;
typedef struct _ATOM_Tonga_Fan_Table { typedef struct _ATOM_Tonga_Fan_Table {

View File

@ -1221,7 +1221,7 @@ static int smu_smc_hw_setup(struct smu_context *smu)
{ {
struct smu_feature *feature = &smu->smu_feature; struct smu_feature *feature = &smu->smu_feature;
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
uint32_t pcie_gen = 0, pcie_width = 0; uint8_t pcie_gen = 0, pcie_width = 0;
uint64_t features_supported; uint64_t features_supported;
int ret = 0; int ret = 0;

View File

@ -844,7 +844,7 @@ struct pptable_funcs {
* &pcie_gen_cap: Maximum allowed PCIe generation. * &pcie_gen_cap: Maximum allowed PCIe generation.
* &pcie_width_cap: Maximum allowed PCIe width. * &pcie_width_cap: Maximum allowed PCIe width.
*/ */
int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap); int (*update_pcie_parameters)(struct smu_context *smu, uint8_t pcie_gen_cap, uint8_t pcie_width_cap);
/** /**
* @i2c_init: Initialize i2c. * @i2c_init: Initialize i2c.

View File

@ -298,8 +298,8 @@ int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
uint32_t pptable_id); uint32_t pptable_id);
int smu_v13_0_update_pcie_parameters(struct smu_context *smu, int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
uint32_t pcie_gen_cap, uint8_t pcie_gen_cap,
uint32_t pcie_width_cap); uint8_t pcie_width_cap);
#endif #endif
#endif #endif

View File

@ -2368,8 +2368,8 @@ static int navi10_get_power_limit(struct smu_context *smu,
} }
static int navi10_update_pcie_parameters(struct smu_context *smu, static int navi10_update_pcie_parameters(struct smu_context *smu,
uint32_t pcie_gen_cap, uint8_t pcie_gen_cap,
uint32_t pcie_width_cap) uint8_t pcie_width_cap)
{ {
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
PPTable_t *pptable = smu->smu_table.driver_pptable; PPTable_t *pptable = smu->smu_table.driver_pptable;

View File

@ -2084,14 +2084,14 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
#define MAX(a, b) ((a) > (b) ? (a) : (b)) #define MAX(a, b) ((a) > (b) ? (a) : (b))
static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
uint32_t pcie_gen_cap, uint8_t pcie_gen_cap,
uint32_t pcie_width_cap) uint8_t pcie_width_cap)
{ {
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table; struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
uint8_t *table_member1, *table_member2; uint8_t *table_member1, *table_member2;
uint32_t min_gen_speed, max_gen_speed; uint8_t min_gen_speed, max_gen_speed;
uint32_t min_lane_width, max_lane_width; uint8_t min_lane_width, max_lane_width;
uint32_t smu_pcie_arg; uint32_t smu_pcie_arg;
int ret, i; int ret, i;

View File

@ -2486,8 +2486,8 @@ int smu_v13_0_mode1_reset(struct smu_context *smu)
} }
int smu_v13_0_update_pcie_parameters(struct smu_context *smu, int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
uint32_t pcie_gen_cap, uint8_t pcie_gen_cap,
uint32_t pcie_width_cap) uint8_t pcie_width_cap)
{ {
struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_13_0_pcie_table *pcie_table = struct smu_13_0_pcie_table *pcie_table =

View File

@ -324,12 +324,12 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC) if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC)
smu->dc_controlled_by_gpio = true; smu->dc_controlled_by_gpio = true;
if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO || if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO) {
powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
smu_baco->platform_support = true; smu_baco->platform_support = true;
if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO) if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
smu_baco->maco_support = true; smu_baco->maco_support = true;
}
table_context->thermal_controller_type = table_context->thermal_controller_type =
powerplay_table->thermal_controller_type; powerplay_table->thermal_controller_type;
@ -1645,38 +1645,10 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
} }
} }
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE && /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
(((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xC8)) || workload_type = smu_cmn_to_asic_specific_index(smu,
((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xCC)))) {
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
WORKLOAD_PPLIB_COMPUTE_BIT,
(void *)(&activity_monitor_external),
false);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
return ret;
}
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor_external),
true);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
return ret;
}
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
PP_SMC_POWER_PROFILE_CUSTOM);
} else {
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD, CMN2ASIC_MAPPING_WORKLOAD,
smu->power_profile_mode); smu->power_profile_mode);
}
if (workload_type < 0) if (workload_type < 0)
return -EINVAL; return -EINVAL;

View File

@ -326,12 +326,13 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC) if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC)
smu->dc_controlled_by_gpio = true; smu->dc_controlled_by_gpio = true;
if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO || if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO) {
powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO)
smu_baco->platform_support = true; smu_baco->platform_support = true;
if (smu_baco->platform_support && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled)) if ((powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO)
smu_baco->maco_support = true; && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled))
smu_baco->maco_support = true;
}
table_context->thermal_controller_type = table_context->thermal_controller_type =
powerplay_table->thermal_controller_type; powerplay_table->thermal_controller_type;

View File

@ -1223,7 +1223,7 @@ int komeda_build_display_data_flow(struct komeda_crtc *kcrtc,
return 0; return 0;
} }
static void static int
komeda_pipeline_unbound_components(struct komeda_pipeline *pipe, komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
struct komeda_pipeline_state *new) struct komeda_pipeline_state *new)
{ {
@ -1243,8 +1243,12 @@ komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
c = komeda_pipeline_get_component(pipe, id); c = komeda_pipeline_get_component(pipe, id);
c_st = komeda_component_get_state_and_set_user(c, c_st = komeda_component_get_state_and_set_user(c,
drm_st, NULL, new->crtc); drm_st, NULL, new->crtc);
if (PTR_ERR(c_st) == -EDEADLK)
return -EDEADLK;
WARN_ON(IS_ERR(c_st)); WARN_ON(IS_ERR(c_st));
} }
return 0;
} }
/* release unclaimed pipeline resource */ /* release unclaimed pipeline resource */
@ -1266,9 +1270,8 @@ int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
if (WARN_ON(IS_ERR_OR_NULL(st))) if (WARN_ON(IS_ERR_OR_NULL(st)))
return -EINVAL; return -EINVAL;
komeda_pipeline_unbound_components(pipe, st); return komeda_pipeline_unbound_components(pipe, st);
return 0;
} }
/* Since standalone disabled components must be disabled separately and in the /* Since standalone disabled components must be disabled separately and in the

View File

@ -507,8 +507,8 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
/* Handle leased objects, if any */ /* Handle leased objects, if any */
idr_init(&leases); idr_init(&leases);
if (object_count != 0) { if (object_count != 0) {
object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), object_ids = memdup_array_user(u64_to_user_ptr(cl->object_ids),
array_size(object_count, sizeof(__u32))); object_count, sizeof(__u32));
if (IS_ERR(object_ids)) { if (IS_ERR(object_ids)) {
ret = PTR_ERR(object_ids); ret = PTR_ERR(object_ids);
idr_destroy(&leases); idr_destroy(&leases);

View File

@ -426,6 +426,7 @@ struct drm_psb_private {
uint32_t pipestat[PSB_NUM_PIPE]; uint32_t pipestat[PSB_NUM_PIPE];
spinlock_t irqmask_lock; spinlock_t irqmask_lock;
bool irq_enabled;
/* Power */ /* Power */
bool pm_initialized; bool pm_initialized;

View File

@ -338,6 +338,8 @@ int gma_irq_install(struct drm_device *dev)
gma_irq_postinstall(dev); gma_irq_postinstall(dev);
dev_priv->irq_enabled = true;
return 0; return 0;
} }
@ -348,6 +350,9 @@ void gma_irq_uninstall(struct drm_device *dev)
unsigned long irqflags; unsigned long irqflags;
unsigned int i; unsigned int i;
if (!dev_priv->irq_enabled)
return;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
if (dev_priv->ops->hotplug_enable) if (dev_priv->ops->hotplug_enable)

View File

@ -2368,6 +2368,18 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
for_each_pipe(dev_priv, pipe) for_each_pipe(dev_priv, pipe)
min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk); min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
/*
* Avoid glk_force_audio_cdclk() causing excessive screen
* blinking when multiple pipes are active by making sure
* CDCLK frequency is always high enough for audio. With a
* single active pipe we can always change CDCLK frequency
* by changing the cd2x divider (see glk_cdclk_table[]) and
* thus a full modeset won't be needed then.
*/
if (IS_GEMINILAKE(dev_priv) && cdclk_state->active_pipes &&
!is_power_of_2(cdclk_state->active_pipes))
min_cdclk = max(2 * 96000, min_cdclk);
if (min_cdclk > dev_priv->display.cdclk.max_cdclk_freq) { if (min_cdclk > dev_priv->display.cdclk.max_cdclk_freq) {
drm_dbg_kms(&dev_priv->drm, drm_dbg_kms(&dev_priv->drm,
"required cdclk (%d kHz) exceeds max (%d kHz)\n", "required cdclk (%d kHz) exceeds max (%d kHz)\n",

View File

@ -844,6 +844,7 @@ static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
if (idx >= pc->num_user_engines) if (idx >= pc->num_user_engines)
return -EINVAL; return -EINVAL;
idx = array_index_nospec(idx, pc->num_user_engines);
pe = &pc->user_engines[idx]; pe = &pc->user_engines[idx];
/* Only render engine supports RPCS configuration. */ /* Only render engine supports RPCS configuration. */

View File

@ -3809,11 +3809,8 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data,
u32 known_open_flags; u32 known_open_flags;
int ret; int ret;
if (!perf->i915) { if (!perf->i915)
drm_dbg(&perf->i915->drm,
"i915 perf interface not available for this system\n");
return -ENOTSUPP; return -ENOTSUPP;
}
known_open_flags = I915_PERF_FLAG_FD_CLOEXEC | known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
I915_PERF_FLAG_FD_NONBLOCK | I915_PERF_FLAG_FD_NONBLOCK |
@ -4140,11 +4137,8 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
struct i915_oa_reg *regs; struct i915_oa_reg *regs;
int err, id; int err, id;
if (!perf->i915) { if (!perf->i915)
drm_dbg(&perf->i915->drm,
"i915 perf interface not available for this system\n");
return -ENOTSUPP; return -ENOTSUPP;
}
if (!perf->metrics_kobj) { if (!perf->metrics_kobj) {
drm_dbg(&perf->i915->drm, drm_dbg(&perf->i915->drm,
@ -4306,11 +4300,8 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
struct i915_oa_config *oa_config; struct i915_oa_config *oa_config;
int ret; int ret;
if (!perf->i915) { if (!perf->i915)
drm_dbg(&perf->i915->drm,
"i915 perf interface not available for this system\n");
return -ENOTSUPP; return -ENOTSUPP;
}
if (i915_perf_stream_paranoid && !perfmon_capable()) { if (i915_perf_stream_paranoid && !perfmon_capable()) {
drm_dbg(&perf->i915->drm, drm_dbg(&perf->i915->drm,

View File

@ -1983,7 +1983,6 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
bool enabled = mtk_dp->enabled; bool enabled = mtk_dp->enabled;
struct edid *new_edid = NULL; struct edid *new_edid = NULL;
struct mtk_dp_audio_cfg *audio_caps = &mtk_dp->info.audio_cur_cfg; struct mtk_dp_audio_cfg *audio_caps = &mtk_dp->info.audio_cur_cfg;
struct cea_sad *sads;
if (!enabled) { if (!enabled) {
drm_bridge_chain_pre_enable(bridge); drm_bridge_chain_pre_enable(bridge);
@ -2006,11 +2005,16 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
*/ */
if (mtk_dp_parse_capabilities(mtk_dp)) { if (mtk_dp_parse_capabilities(mtk_dp)) {
drm_err(mtk_dp->drm_dev, "Can't parse capabilities\n"); drm_err(mtk_dp->drm_dev, "Can't parse capabilities\n");
kfree(new_edid);
new_edid = NULL; new_edid = NULL;
} }
if (new_edid) { if (new_edid) {
struct cea_sad *sads;
audio_caps->sad_count = drm_edid_to_sad(new_edid, &sads); audio_caps->sad_count = drm_edid_to_sad(new_edid, &sads);
kfree(sads);
audio_caps->detect_monitor = drm_detect_monitor_audio(new_edid); audio_caps->detect_monitor = drm_detect_monitor_audio(new_edid);
} }

View File

@ -266,26 +266,9 @@ int dp_panel_get_modes(struct dp_panel *dp_panel,
static u8 dp_panel_get_edid_checksum(struct edid *edid) static u8 dp_panel_get_edid_checksum(struct edid *edid)
{ {
struct edid *last_block; edid += edid->extensions;
u8 *raw_edid;
bool is_edid_corrupt = false;
if (!edid) { return edid->checksum;
DRM_ERROR("invalid edid input\n");
return 0;
}
raw_edid = (u8 *)edid;
raw_edid += (edid->extensions * EDID_LENGTH);
last_block = (struct edid *)raw_edid;
/* block type extension */
drm_edid_block_valid(raw_edid, 1, false, &is_edid_corrupt);
if (!is_edid_corrupt)
return last_block->checksum;
DRM_ERROR("Invalid block, no checksum\n");
return 0;
} }
void dp_panel_handle_sink_request(struct dp_panel *dp_panel) void dp_panel_handle_sink_request(struct dp_panel *dp_panel)

View File

@ -267,6 +267,8 @@ static int versatile_panel_get_modes(struct drm_panel *panel,
connector->display_info.bus_flags = vpanel->panel_type->bus_flags; connector->display_info.bus_flags = vpanel->panel_type->bus_flags;
mode = drm_mode_duplicate(connector->dev, &vpanel->panel_type->mode); mode = drm_mode_duplicate(connector->dev, &vpanel->panel_type->mode);
if (!mode)
return -ENOMEM;
drm_mode_set_name(mode); drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;

View File

@ -428,29 +428,30 @@ static int st7703_prepare(struct drm_panel *panel)
return 0; return 0;
dev_dbg(ctx->dev, "Resetting the panel\n"); dev_dbg(ctx->dev, "Resetting the panel\n");
ret = regulator_enable(ctx->vcc); gpiod_set_value_cansleep(ctx->reset_gpio, 1);
if (ret < 0) {
dev_err(ctx->dev, "Failed to enable vcc supply: %d\n", ret);
return ret;
}
ret = regulator_enable(ctx->iovcc); ret = regulator_enable(ctx->iovcc);
if (ret < 0) { if (ret < 0) {
dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret); dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
goto disable_vcc; return ret;
} }
gpiod_set_value_cansleep(ctx->reset_gpio, 1); ret = regulator_enable(ctx->vcc);
usleep_range(20, 40); if (ret < 0) {
dev_err(ctx->dev, "Failed to enable vcc supply: %d\n", ret);
regulator_disable(ctx->iovcc);
return ret;
}
/* Give power supplies time to stabilize before deasserting reset. */
usleep_range(10000, 20000);
gpiod_set_value_cansleep(ctx->reset_gpio, 0); gpiod_set_value_cansleep(ctx->reset_gpio, 0);
msleep(20); usleep_range(15000, 20000);
ctx->prepared = true; ctx->prepared = true;
return 0; return 0;
disable_vcc:
regulator_disable(ctx->vcc);
return ret;
} }
static const u32 mantix_bus_formats[] = { static const u32 mantix_bus_formats[] = {

View File

@ -379,6 +379,8 @@ static int tpg110_get_modes(struct drm_panel *panel,
connector->display_info.bus_flags = tpg->panel_mode->bus_flags; connector->display_info.bus_flags = tpg->panel_mode->bus_flags;
mode = drm_mode_duplicate(connector->dev, &tpg->panel_mode->mode); mode = drm_mode_duplicate(connector->dev, &tpg->panel_mode->mode);
if (!mode)
return -ENOMEM;
drm_mode_set_name(mode); drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;

View File

@ -1229,6 +1229,9 @@ int qxl_destroy_monitors_object(struct qxl_device *qdev)
if (!qdev->monitors_config_bo) if (!qdev->monitors_config_bo)
return 0; return 0;
kfree(qdev->dumb_heads);
qdev->dumb_heads = NULL;
qdev->monitors_config = NULL; qdev->monitors_config = NULL;
qdev->ram_header->monitors_config = 0; qdev->ram_header->monitors_config = 0;

View File

@ -1122,6 +1122,8 @@ static int radeon_tv_get_modes(struct drm_connector *connector)
else { else {
/* only 800x600 is supported right now on pre-avivo chips */ /* only 800x600 is supported right now on pre-avivo chips */
tv_mode = drm_cvt_mode(dev, 800, 600, 60, false, false, false); tv_mode = drm_cvt_mode(dev, 800, 600, 60, false, false, false);
if (!tv_mode)
return 0;
tv_mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; tv_mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, tv_mode); drm_mode_probed_add(connector, tv_mode);
} }

View File

@ -774,9 +774,9 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
sizeof(metadata->mip_levels)); sizeof(metadata->mip_levels));
metadata->num_sizes = num_sizes; metadata->num_sizes = num_sizes;
metadata->sizes = metadata->sizes =
memdup_user((struct drm_vmw_size __user *)(unsigned long) memdup_array_user((struct drm_vmw_size __user *)(unsigned long)
req->size_addr, req->size_addr,
sizeof(*metadata->sizes) * metadata->num_sizes); metadata->num_sizes, sizeof(*metadata->sizes));
if (IS_ERR(metadata->sizes)) { if (IS_ERR(metadata->sizes)) {
ret = PTR_ERR(metadata->sizes); ret = PTR_ERR(metadata->sizes);
goto out_no_sizes; goto out_no_sizes;

View File

@ -365,6 +365,7 @@
#define USB_VENDOR_ID_DELL 0x413c #define USB_VENDOR_ID_DELL 0x413c
#define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a #define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a
#define USB_DEVICE_ID_DELL_PRO_WIRELESS_KM5221W 0x4503
#define USB_VENDOR_ID_DELORME 0x1163 #define USB_VENDOR_ID_DELORME 0x1163
#define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100 #define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100

View File

@ -51,7 +51,12 @@ struct lenovo_drvdata {
int select_right; int select_right;
int sensitivity; int sensitivity;
int press_speed; int press_speed;
u8 middlebutton_state; /* 0:Up, 1:Down (undecided), 2:Scrolling */ /* 0: Up
* 1: Down (undecided)
* 2: Scrolling
* 3: Patched firmware, disable workaround
*/
u8 middlebutton_state;
bool fn_lock; bool fn_lock;
}; };
@ -521,6 +526,19 @@ static void lenovo_features_set_cptkbd(struct hid_device *hdev)
int ret; int ret;
struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev); struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
/*
* Tell the keyboard a driver understands it, and turn F7, F9, F11 into
* regular keys
*/
ret = lenovo_send_cmd_cptkbd(hdev, 0x01, 0x03);
if (ret)
hid_warn(hdev, "Failed to switch F7/9/11 mode: %d\n", ret);
/* Switch middle button to native mode */
ret = lenovo_send_cmd_cptkbd(hdev, 0x09, 0x01);
if (ret)
hid_warn(hdev, "Failed to switch middle button: %d\n", ret);
ret = lenovo_send_cmd_cptkbd(hdev, 0x05, cptkbd_data->fn_lock); ret = lenovo_send_cmd_cptkbd(hdev, 0x05, cptkbd_data->fn_lock);
if (ret) if (ret)
hid_err(hdev, "Fn-lock setting failed: %d\n", ret); hid_err(hdev, "Fn-lock setting failed: %d\n", ret);
@ -668,31 +686,48 @@ static int lenovo_event_cptkbd(struct hid_device *hdev,
{ {
struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev); struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
/* "wheel" scroll events */ if (cptkbd_data->middlebutton_state != 3) {
if (usage->type == EV_REL && (usage->code == REL_WHEEL || /* REL_X and REL_Y events during middle button pressed
usage->code == REL_HWHEEL)) { * are only possible on patched, bug-free firmware
/* Scroll events disable middle-click event */ * so set middlebutton_state to 3
cptkbd_data->middlebutton_state = 2; * to never apply workaround anymore
return 0; */
} if (cptkbd_data->middlebutton_state == 1 &&
usage->type == EV_REL &&
/* Middle click events */ (usage->code == REL_X || usage->code == REL_Y)) {
if (usage->type == EV_KEY && usage->code == BTN_MIDDLE) { cptkbd_data->middlebutton_state = 3;
if (value == 1) { /* send middle button press which was hold before */
cptkbd_data->middlebutton_state = 1; input_event(field->hidinput->input,
} else if (value == 0) { EV_KEY, BTN_MIDDLE, 1);
if (cptkbd_data->middlebutton_state == 1) { input_sync(field->hidinput->input);
/* No scrolling inbetween, send middle-click */ }
input_event(field->hidinput->input,
EV_KEY, BTN_MIDDLE, 1); /* "wheel" scroll events */
input_sync(field->hidinput->input); if (usage->type == EV_REL && (usage->code == REL_WHEEL ||
input_event(field->hidinput->input, usage->code == REL_HWHEEL)) {
EV_KEY, BTN_MIDDLE, 0); /* Scroll events disable middle-click event */
input_sync(field->hidinput->input); cptkbd_data->middlebutton_state = 2;
} return 0;
cptkbd_data->middlebutton_state = 0; }
/* Middle click events */
if (usage->type == EV_KEY && usage->code == BTN_MIDDLE) {
if (value == 1) {
cptkbd_data->middlebutton_state = 1;
} else if (value == 0) {
if (cptkbd_data->middlebutton_state == 1) {
/* No scrolling inbetween, send middle-click */
input_event(field->hidinput->input,
EV_KEY, BTN_MIDDLE, 1);
input_sync(field->hidinput->input);
input_event(field->hidinput->input,
EV_KEY, BTN_MIDDLE, 0);
input_sync(field->hidinput->input);
}
cptkbd_data->middlebutton_state = 0;
}
return 1;
} }
return 1;
} }
if (usage->type == EV_KEY && usage->code == KEY_FN_ESC && value == 1) { if (usage->type == EV_KEY && usage->code == KEY_FN_ESC && value == 1) {
@ -1126,22 +1161,6 @@ static int lenovo_probe_cptkbd(struct hid_device *hdev)
} }
hid_set_drvdata(hdev, cptkbd_data); hid_set_drvdata(hdev, cptkbd_data);
/*
* Tell the keyboard a driver understands it, and turn F7, F9, F11 into
* regular keys (Compact only)
*/
if (hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD ||
hdev->product == USB_DEVICE_ID_LENOVO_CBTKBD) {
ret = lenovo_send_cmd_cptkbd(hdev, 0x01, 0x03);
if (ret)
hid_warn(hdev, "Failed to switch F7/9/11 mode: %d\n", ret);
}
/* Switch middle button to native mode */
ret = lenovo_send_cmd_cptkbd(hdev, 0x09, 0x01);
if (ret)
hid_warn(hdev, "Failed to switch middle button: %d\n", ret);
/* Set keyboard settings to known state */ /* Set keyboard settings to known state */
cptkbd_data->middlebutton_state = 0; cptkbd_data->middlebutton_state = 0;
cptkbd_data->fn_lock = true; cptkbd_data->fn_lock = true;
@ -1264,6 +1283,24 @@ static int lenovo_probe(struct hid_device *hdev,
return ret; return ret;
} }
#ifdef CONFIG_PM
static int lenovo_reset_resume(struct hid_device *hdev)
{
switch (hdev->product) {
case USB_DEVICE_ID_LENOVO_CUSBKBD:
case USB_DEVICE_ID_LENOVO_TPIIUSBKBD:
if (hdev->type == HID_TYPE_USBMOUSE)
lenovo_features_set_cptkbd(hdev);
break;
default:
break;
}
return 0;
}
#endif
static void lenovo_remove_tpkbd(struct hid_device *hdev) static void lenovo_remove_tpkbd(struct hid_device *hdev)
{ {
struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
@ -1380,6 +1417,9 @@ static struct hid_driver lenovo_driver = {
.raw_event = lenovo_raw_event, .raw_event = lenovo_raw_event,
.event = lenovo_event, .event = lenovo_event,
.report_fixup = lenovo_report_fixup, .report_fixup = lenovo_report_fixup,
#ifdef CONFIG_PM
.reset_resume = lenovo_reset_resume,
#endif
}; };
module_hid_driver(lenovo_driver); module_hid_driver(lenovo_driver);

View File

@ -66,6 +66,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PRO_WIRELESS_KM5221W), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES), HID_QUIRK_MULTI_INPUT },

Some files were not shown because too many files have changed in this diff Show More