This is the 5.10.51 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmD1LZUACgkQONu9yGCS
 aT7gERAArjYemBSkD4/nq5HvxoVu7ueEyqI2orJCyB6b5npPrBZjlKna3SuYuNUF
 CmcX5Y2Ynxd3gWJvYFEJdAAQAEtKAzPdPv0QJ+KiLSP2bEZ4Q5grEJXfzcgxcB4L
 fUfaCZnUwjoII5xzW1+U2zCJ7YtGx5hZySLKMxSEc0IzawDlfMx4HdwlohXzczsA
 Zq3/sTJcW30PWSp6MSuMOH//lPh7sAoCnksAv4Yb8MZYZjC8JNnKFn+IwRUGWEMZ
 sFtNbq51sMgGq4TjYBIdO6wBElP4dgWJhYc4cO0667cDvgp6iod/bKlOLJSiwIVX
 uPWkyPihH9ZUvNVY0TxjjnxS1rnM0QhH+cEXNGn+SE7KaNmzsI2MaR+DVA2yWcFr
 9edqTq5x2CJJ0R/oXHP4nYFtsvlV/QcirlrF0OZHYwz84b16f37Ac7tHOQpr3kNO
 N29AW0l5XmpxbfHgo1Iaoi02seouLC47vRkvjTpS9mValGUC0ciXTb97CK4FremE
 34rskxIRWBU8HECYioFOeHTAi0+xb/9tOj87BnB5CJ28CD2Md27TTdorsISnqPb0
 /ER89QfVtlJLi17wGB0rlAm8fDF3Cy6BnA57QIql1z1NbJGc1cxenEAFiIOnbQ5G
 t6SLs3mgqpQizsKUFYimCWa04ZzY4Bg8H9bAI+M+9w7J6yujQzI=
 =dGMa
 -----END PGP SIGNATURE-----

Merge 5.10.51 into android12-5.10-lts

Changes in 5.10.51
	drm/mxsfb: Don't select DRM_KMS_FB_HELPER
	drm/zte: Don't select DRM_KMS_FB_HELPER
	drm/ast: Fixed CVE for DP501
	drm/amd/display: fix HDCP reset sequence on reinitialize
	drm/amd/amdgpu/sriov disable all ip hw status by default
	drm/vc4: fix argument ordering in vc4_crtc_get_margins()
	drm/bridge: nwl-dsi: Force a full modeset when crtc_state->active is changed to be true
	net: pch_gbe: Use proper accessors to BE data in pch_ptp_match()
	drm/amd/display: fix use_max_lb flag for 420 pixel formats
	clk: renesas: rcar-usb2-clock-sel: Fix error handling in .probe()
	hugetlb: clear huge pte during flush function on mips platform
	atm: iphase: fix possible use-after-free in ia_module_exit()
	mISDN: fix possible use-after-free in HFC_cleanup()
	atm: nicstar: Fix possible use-after-free in nicstar_cleanup()
	net: Treat __napi_schedule_irqoff() as __napi_schedule() on PREEMPT_RT
	drm/mediatek: Fix PM reference leak in mtk_crtc_ddp_hw_init()
	net: mdio: ipq8064: add regmap config to disable REGCACHE
	drm/bridge: lt9611: Add missing MODULE_DEVICE_TABLE
	reiserfs: add check for invalid 1st journal block
	drm/virtio: Fix double free on probe failure
	net: mdio: provide shim implementation of devm_of_mdiobus_register
	net/sched: cls_api: increase max_reclassify_loop
	pinctrl: equilibrium: Add missing MODULE_DEVICE_TABLE
	drm/scheduler: Fix hang when sched_entity released
	drm/sched: Avoid data corruptions
	udf: Fix NULL pointer dereference in udf_symlink function
	drm/vc4: Fix clock source for VEC PixelValve on BCM2711
	drm/vc4: hdmi: Fix PM reference leak in vc4_hdmi_encoder_pre_crtc_co()
	e100: handle eeprom as little endian
	igb: handle vlan types with checker enabled
	igb: fix assignment on big endian machines
	drm/bridge: cdns: Fix PM reference leak in cdns_dsi_transfer()
	clk: renesas: r8a77995: Add ZA2 clock
	net/mlx5e: IPsec/rep_tc: Fix rep_tc_update_skb drops IPsec packet
	net/mlx5: Fix lag port remapping logic
	drm: rockchip: add missing registers for RK3188
	drm: rockchip: add missing registers for RK3066
	net: stmmac: the XPCS obscures a potential "PHY not found" error
	RDMA/rtrs: Change MAX_SESS_QUEUE_DEPTH
	clk: tegra: Fix refcounting of gate clocks
	clk: tegra: Ensure that PLLU configuration is applied properly
	drm: bridge: cdns-mhdp8546: Fix PM reference leak in
	virtio-net: Add validation for used length
	ipv6: use prandom_u32() for ID generation
	MIPS: cpu-probe: Fix FPU detection on Ingenic JZ4760(B)
	MIPS: ingenic: Select CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER
	drm/amd/display: Avoid HDCP over-read and corruption
	drm/amdgpu: remove unsafe optimization to drop preamble ib
	net: tcp better handling of reordering then loss cases
	RDMA/cxgb4: Fix missing error code in create_qp()
	dm space maps: don't reset space map allocation cursor when committing
	dm writecache: don't split bios when overwriting contiguous cache content
	dm: Fix dm_accept_partial_bio() relative to zone management commands
	net: bridge: mrp: Update ring transitions.
	pinctrl: mcp23s08: fix race condition in irq handler
	ice: set the value of global config lock timeout longer
	ice: fix clang warning regarding deadcode.DeadStores
	virtio_net: Remove BUG() to avoid machine dead
	net: mscc: ocelot: check return value after calling platform_get_resource()
	net: bcmgenet: check return value after calling platform_get_resource()
	net: mvpp2: check return value after calling platform_get_resource()
	net: micrel: check return value after calling platform_get_resource()
	net: moxa: Use devm_platform_get_and_ioremap_resource()
	drm/amd/display: Fix DCN 3.01 DSCCLK validation
	drm/amd/display: Update scaling settings on modeset
	drm/amd/display: Release MST resources on switch from MST to SST
	drm/amd/display: Set DISPCLK_MAX_ERRDET_CYCLES to 7
	drm/amd/display: Fix off-by-one error in DML
	net: phy: realtek: add delay to fix RXC generation issue
	selftests: Clean forgotten resources as part of cleanup()
	net: sgi: ioc3-eth: check return value after calling platform_get_resource()
	drm/amdkfd: use allowed domain for vmbo validation
	fjes: check return value after calling platform_get_resource()
	selinux: use __GFP_NOWARN with GFP_NOWAIT in the AVC
	r8169: avoid link-up interrupt issue on RTL8106e if user enables ASPM
	drm/amd/display: Verify Gamma & Degamma LUT sizes in amdgpu_dm_atomic_check
	xfrm: Fix error reporting in xfrm_state_construct.
	dm writecache: commit just one block, not a full page
	wlcore/wl12xx: Fix wl12xx get_mac error if device is in ELP
	wl1251: Fix possible buffer overflow in wl1251_cmd_scan
	cw1200: add missing MODULE_DEVICE_TABLE
	drm/amdkfd: fix circular locking on get_wave_state
	drm/amdkfd: Fix circular lock in nocpsch path
	bpf: Fix up register-based shifts in interpreter to silence KUBSAN
	ice: fix incorrect payload indicator on PTYPE
	ice: mark PTYPE 2 as reserved
	mt76: mt7615: fix fixed-rate tx status reporting
	net: fix mistake path for netdev_features_strings
	net: ipa: Add missing of_node_put() in ipa_firmware_load()
	net: sched: fix error return code in tcf_del_walker()
	io_uring: fix false WARN_ONCE
	drm/amdgpu: fix bad address translation for sienna_cichlid
	drm/amdkfd: Walk through list with dqm lock hold
	mt76: mt7915: fix IEEE80211_HE_PHY_CAP7_MAX_NC for station mode
	rtl8xxxu: Fix device info for RTL8192EU devices
	MIPS: add PMD table accounting into MIPS'pmd_alloc_one
	net: fec: add ndo_select_queue to fix TX bandwidth fluctuations
	atm: nicstar: use 'dma_free_coherent' instead of 'kfree'
	atm: nicstar: register the interrupt handler in the right place
	vsock: notify server to shutdown when client has pending signal
	RDMA/rxe: Don't overwrite errno from ib_umem_get()
	iwlwifi: mvm: don't change band on bound PHY contexts
	iwlwifi: mvm: fix error print when session protection ends
	iwlwifi: pcie: free IML DMA memory allocation
	iwlwifi: pcie: fix context info freeing
	sfc: avoid double pci_remove of VFs
	sfc: error code if SRIOV cannot be disabled
	wireless: wext-spy: Fix out-of-bounds warning
	cfg80211: fix default HE tx bitrate mask in 2G band
	mac80211: consider per-CPU statistics if present
	mac80211_hwsim: add concurrent channels scanning support over virtio
	IB/isert: Align target max I/O size to initiator size
	media, bpf: Do not copy more entries than user space requested
	net: ip: avoid OOM kills with large UDP sends over loopback
	RDMA/cma: Fix rdma_resolve_route() memory leak
	Bluetooth: btusb: Fixed too many in-token issue for Mediatek Chip.
	Bluetooth: Fix the HCI to MGMT status conversion table
	Bluetooth: Fix alt settings for incoming SCO with transparent coding format
	Bluetooth: Shutdown controller after workqueues are flushed or cancelled
	Bluetooth: btusb: Add a new QCA_ROME device (0cf3:e500)
	Bluetooth: L2CAP: Fix invalid access if ECRED Reconfigure fails
	Bluetooth: L2CAP: Fix invalid access on ECRED Connection response
	Bluetooth: btusb: Add support USB ALT 3 for WBS
	Bluetooth: mgmt: Fix the command returns garbage parameter value
	Bluetooth: btusb: fix bt fiwmare downloading failure issue for qca btsoc.
	sched/fair: Ensure _sum and _avg values stay consistent
	bpf: Fix false positive kmemleak report in bpf_ringbuf_area_alloc()
	flow_offload: action should not be NULL when it is referenced
	sctp: validate from_addr_param return
	sctp: add size validation when walking chunks
	MIPS: loongsoon64: Reserve memory below starting pfn to prevent Oops
	MIPS: set mips32r5 for virt extensions
	selftests/resctrl: Fix incorrect parsing of option "-t"
	MIPS: MT extensions are not available on MIPS32r1
	ath11k: unlock on error path in ath11k_mac_op_add_interface()
	arm64: dts: rockchip: add rk3328 dwc3 usb controller node
	arm64: dts: rockchip: Enable USB3 for rk3328 Rock64
	loop: fix I/O error on fsync() in detached loop devices
	mm,hwpoison: return -EBUSY when migration fails
	io_uring: simplify io_remove_personalities()
	io_uring: Convert personality_idr to XArray
	io_uring: convert io_buffer_idr to XArray
	scsi: iscsi: Fix race condition between login and sync thread
	scsi: iscsi: Fix iSCSI cls conn state
	powerpc/mm: Fix lockup on kernel exec fault
	powerpc/barrier: Avoid collision with clang's __lwsync macro
	powerpc/powernv/vas: Release reference to tgid during window close
	drm/amdgpu: Update NV SIMD-per-CU to 2
	drm/amdgpu: enable sdma0 tmz for Raven/Renoir(V2)
	drm/radeon: Add the missed drm_gem_object_put() in radeon_user_framebuffer_create()
	drm/radeon: Call radeon_suspend_kms() in radeon_pci_shutdown() for Loongson64
	drm/vc4: txp: Properly set the possible_crtcs mask
	drm/vc4: crtc: Skip the TXP
	drm/vc4: hdmi: Prevent clock unbalance
	drm/dp: Handle zeroed port counts in drm_dp_read_downstream_info()
	drm/rockchip: dsi: remove extra component_del() call
	drm/amd/display: fix incorrrect valid irq check
	pinctrl/amd: Add device HID for new AMD GPIO controller
	drm/amd/display: Reject non-zero src_y and src_x for video planes
	drm/tegra: Don't set allow_fb_modifiers explicitly
	drm/msm/mdp4: Fix modifier support enabling
	drm/arm/malidp: Always list modifiers
	drm/nouveau: Don't set allow_fb_modifiers explicitly
	drm/i915/display: Do not zero past infoframes.vsc
	mmc: sdhci-acpi: Disable write protect detection on Toshiba Encore 2 WT8-B
	mmc: sdhci: Fix warning message when accessing RPMB in HS400 mode
	mmc: core: clear flags before allowing to retune
	mmc: core: Allow UHS-I voltage switch for SDSC cards if supported
	ata: ahci_sunxi: Disable DIPM
	arm64: tlb: fix the TTL value of tlb_get_level
	cpu/hotplug: Cure the cpusets trainwreck
	clocksource/arm_arch_timer: Improve Allwinner A64 timer workaround
	fpga: stratix10-soc: Add missing fpga_mgr_free() call
	ASoC: tegra: Set driver_name=tegra for all machine drivers
	i40e: fix PTP on 5Gb links
	qemu_fw_cfg: Make fw_cfg_rev_attr a proper kobj_attribute
	ipmi/watchdog: Stop watchdog timer when the current action is 'none'
	thermal/drivers/int340x/processor_thermal: Fix tcc setting
	ubifs: Fix races between xattr_{set|get} and listxattr operations
	power: supply: ab8500: Fix an old bug
	mfd: syscon: Free the allocated name field of struct regmap_config
	nvmem: core: add a missing of_node_put
	lkdtm/bugs: XFAIL UNALIGNED_LOAD_STORE_WRITE
	selftests/lkdtm: Fix expected text for CR4 pinning
	extcon: intel-mrfld: Sync hardware and software state on init
	seq_buf: Fix overflow in seq_buf_putmem_hex()
	rq-qos: fix missed wake-ups in rq_qos_throttle try two
	tracing: Simplify & fix saved_tgids logic
	tracing: Resize tgid_map to pid_max, not PID_MAX_DEFAULT
	ipack/carriers/tpci200: Fix a double free in tpci200_pci_probe
	coresight: Propagate symlink failure
	coresight: tmc-etf: Fix global-out-of-bounds in tmc_update_etf_buffer()
	dm zoned: check zone capacity
	dm writecache: flush origin device when writing and cache is full
	dm btree remove: assign new_root only when removal succeeds
	PCI: Leave Apple Thunderbolt controllers on for s2idle or standby
	PCI: aardvark: Fix checking for PIO Non-posted Request
	PCI: aardvark: Implement workaround for the readback value of VEND_ID
	media: subdev: disallow ioctl for saa6588/davinci
	media: dtv5100: fix control-request directions
	media: zr364xx: fix memory leak in zr364xx_start_readpipe
	media: gspca/sq905: fix control-request direction
	media: gspca/sunplus: fix zero-length control requests
	media: uvcvideo: Fix pixel format change for Elgato Cam Link 4K
	io_uring: fix clear IORING_SETUP_R_DISABLED in wrong function
	dm writecache: write at least 4k when committing
	pinctrl: mcp23s08: Fix missing unlock on error in mcp23s08_irq()
	drm/ast: Remove reference to struct drm_device.pdev
	jfs: fix GPF in diFree
	smackfs: restrict bytes count in smk_set_cipso()
	ext4: fix memory leak in ext4_fill_super
	f2fs: fix to avoid racing on fsync_entry_slab by multi filesystem instances
	Linux 5.10.51

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Icb10fed733a0050848ecc23db13ae3d134895acd
This commit is contained in:
Greg Kroah-Hartman 2021-07-19 10:23:41 +02:00
commit 8db62be3c3
227 changed files with 1702 additions and 877 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 50
SUBLEVEL = 51
EXTRAVERSION =
NAME = Dare mighty things

View File

@ -384,6 +384,11 @@ &usb20_otg {
status = "okay";
};
&usbdrd3 {
dr_mode = "host";
status = "okay";
};
&usb_host0_ehci {
status = "okay";
};

View File

@ -984,6 +984,25 @@ usb_host0_ohci: usb@ff5d0000 {
status = "disabled";
};
usbdrd3: usb@ff600000 {
compatible = "rockchip,rk3328-dwc3", "snps,dwc3";
reg = <0x0 0xff600000 0x0 0x100000>;
interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_USB3OTG_REF>, <&cru SCLK_USB3OTG_SUSPEND>,
<&cru ACLK_USB3OTG>;
clock-names = "ref_clk", "suspend_clk",
"bus_clk";
dr_mode = "otg";
phy_type = "utmi_wide";
snps,dis-del-phy-power-chg-quirk;
snps,dis_enblslpm_quirk;
snps,dis-tx-ipgap-linecheck-quirk;
snps,dis-u2-freeclk-exists-quirk;
snps,dis_u2_susphy_quirk;
snps,dis_u3_susphy_quirk;
status = "disabled";
};
gic: interrupt-controller@ff811000 {
compatible = "arm,gic-400";
#interrupt-cells = <3>;

View File

@ -28,6 +28,10 @@ static void tlb_flush(struct mmu_gather *tlb);
*/
static inline int tlb_get_level(struct mmu_gather *tlb)
{
/* The TTL field is only valid for the leaf entry. */
if (tlb->freed_tables)
return 0;
if (tlb->cleared_ptes && !(tlb->cleared_pmds ||
tlb->cleared_puds ||
tlb->cleared_p4ds))

View File

@ -418,6 +418,8 @@ config MACH_INGENIC_SOC
select MIPS_GENERIC
select MACH_INGENIC
select SYS_SUPPORTS_ZBOOT_UART16550
select CPU_SUPPORTS_CPUFREQ
select MIPS_EXTERNAL_TIMER
config LANTIQ
bool "Lantiq based platforms"

View File

@ -64,6 +64,8 @@
((MIPS_ISA_REV >= (ge)) && (MIPS_ISA_REV < (lt)))
#define __isa_range_or_flag(ge, lt, flag) \
(__isa_range(ge, lt) || ((MIPS_ISA_REV < (lt)) && __isa(flag)))
#define __isa_range_and_ase(ge, lt, ase) \
(__isa_range(ge, lt) && __ase(ase))
/*
* SMP assumption: Options of CPU 0 are a superset of all processors.
@ -423,7 +425,7 @@
#endif
#ifndef cpu_has_mipsmt
#define cpu_has_mipsmt __isa_lt_and_ase(6, MIPS_ASE_MIPSMT)
#define cpu_has_mipsmt __isa_range_and_ase(2, 6, MIPS_ASE_MIPSMT)
#endif
#ifndef cpu_has_vp

View File

@ -46,7 +46,13 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
flush_tlb_page(vma, addr & huge_page_mask(hstate_vma(vma)));
/*
* clear the huge pte entry firstly, so that the other smp threads will
* not get old pte entry after finishing flush_tlb_page and before
* setting new huge pte entry
*/
huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
flush_tlb_page(vma, addr);
}
#define __HAVE_ARCH_HUGE_PTE_NONE

View File

@ -2073,7 +2073,7 @@ _ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c)
({ int __res; \
__asm__ __volatile__( \
".set\tpush\n\t" \
".set\tmips32r2\n\t" \
".set\tmips32r5\n\t" \
_ASM_SET_VIRT \
"mfgc0\t%0, " #source ", %1\n\t" \
".set\tpop" \
@ -2086,7 +2086,7 @@ _ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c)
({ unsigned long long __res; \
__asm__ __volatile__( \
".set\tpush\n\t" \
".set\tmips64r2\n\t" \
".set\tmips64r5\n\t" \
_ASM_SET_VIRT \
"dmfgc0\t%0, " #source ", %1\n\t" \
".set\tpop" \
@ -2099,7 +2099,7 @@ _ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c)
do { \
__asm__ __volatile__( \
".set\tpush\n\t" \
".set\tmips32r2\n\t" \
".set\tmips32r5\n\t" \
_ASM_SET_VIRT \
"mtgc0\t%z0, " #register ", %1\n\t" \
".set\tpop" \
@ -2111,7 +2111,7 @@ do { \
do { \
__asm__ __volatile__( \
".set\tpush\n\t" \
".set\tmips64r2\n\t" \
".set\tmips64r5\n\t" \
_ASM_SET_VIRT \
"dmtgc0\t%z0, " #register ", %1\n\t" \
".set\tpop" \

View File

@ -59,11 +59,15 @@ do { \
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
pmd_t *pmd;
pmd_t *pmd = NULL;
struct page *pg;
pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ORDER);
if (pmd)
pg = alloc_pages(GFP_KERNEL | __GFP_ACCOUNT, PMD_ORDER);
if (pg) {
pgtable_pmd_page_ctor(pg);
pmd = (pmd_t *)page_address(pg);
pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
}
return pmd;
}

View File

@ -1827,6 +1827,11 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
*/
case PRID_COMP_INGENIC_D0:
c->isa_level &= ~MIPS_CPU_ISA_M32R2;
/* FPU is not properly detected on JZ4760(B). */
if (c->processor_id == 0x2ed0024f)
c->options |= MIPS_CPU_FPU;
fallthrough;
/*

View File

@ -182,6 +182,9 @@ static void __init node_mem_init(unsigned int node)
if (node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT))
memblock_reserve((node_addrspace_offset | 0xfe000000),
32 << 20);
/* Reserve pfn range 0~node[0]->node_start_pfn */
memblock_reserve(0, PAGE_SIZE * start_pfn);
}
}

View File

@ -46,6 +46,8 @@
# define SMPWMB eieio
#endif
/* clang defines this macro for a builtin, which will not work with runtime patching */
#undef __lwsync
#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
#define dma_rmb() __lwsync()
#define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")

View File

@ -198,9 +198,7 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
{
int is_exec = TRAP(regs) == 0x400;
/* NX faults set DSISR_PROTFAULT on the 8xx, DSISR_NOEXEC_OR_G on others */
if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT |
DSISR_PROTFAULT))) {
if (is_exec) {
pr_crit_ratelimited("kernel tried to execute %s page (%lx) - exploit attempt? (uid: %d)\n",
address >= TASK_SIZE ? "exec-protected" : "user",
address,

View File

@ -1093,9 +1093,9 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
/*
* Process closes window during exit. In the case of
* multithread application, the child thread can open
* window and can exit without closing it. Expects parent
* thread to use and close the window. So do not need
* to take pid reference for parent thread.
* window and can exit without closing it. so takes tgid
* reference until window closed to make sure tgid is not
* reused.
*/
txwin->tgid = find_get_pid(task_tgid_vnr(current));
/*
@ -1339,8 +1339,9 @@ int vas_win_close(struct vas_window *window)
/* if send window, drop reference to matching receive window */
if (window->tx_win) {
if (window->user_win) {
/* Drop references to pid and mm */
/* Drop references to pid. tgid and mm */
put_pid(window->pid);
put_pid(window->tgid);
if (window->mm) {
mm_context_remove_vas_window(window->mm);
mmdrop(window->mm);

View File

@ -266,8 +266,8 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
if (!has_sleeper && acquire_inflight_cb(rqw, private_data))
return;
prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
has_sleeper = !wq_has_single_sleeper(&rqw->wait);
has_sleeper = !prepare_to_wait_exclusive(&rqw->wait, &data.wq,
TASK_UNINTERRUPTIBLE);
do {
/* The memory barrier in set_task_state saves us here. */
if (data.got_token)

View File

@ -200,7 +200,7 @@ static void ahci_sunxi_start_engine(struct ata_port *ap)
}
static const struct ata_port_info ahci_sunxi_port_info = {
.flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
.flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ | ATA_FLAG_NO_DIPM,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_platform_ops,

View File

@ -3279,7 +3279,7 @@ static void __exit ia_module_exit(void)
{
pci_unregister_driver(&ia_driver);
del_timer(&ia_timer);
del_timer_sync(&ia_timer);
}
module_init(ia_module_init);

View File

@ -297,7 +297,7 @@ static void __exit nicstar_cleanup(void)
{
XPRINTK("nicstar: nicstar_cleanup() called.\n");
del_timer(&ns_timer);
del_timer_sync(&ns_timer);
pci_unregister_driver(&nicstar_driver);
@ -525,6 +525,15 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
/* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */
writel(0x00000000, card->membase + VPM);
card->intcnt = 0;
if (request_irq
(pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) {
pr_err("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
error = 9;
ns_init_card_error(card, error);
return error;
}
/* Initialize TSQ */
card->tsq.org = dma_alloc_coherent(&card->pcidev->dev,
NS_TSQSIZE + NS_TSQ_ALIGNMENT,
@ -751,15 +760,6 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
card->efbie = 1;
card->intcnt = 0;
if (request_irq
(pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) {
printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
error = 9;
ns_init_card_error(card, error);
return error;
}
/* Register device */
card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops,
-1, NULL);
@ -837,10 +837,12 @@ static void ns_init_card_error(ns_dev *card, int error)
dev_kfree_skb_any(hb);
}
if (error >= 12) {
kfree(card->rsq.org);
dma_free_coherent(&card->pcidev->dev, NS_RSQSIZE + NS_RSQ_ALIGNMENT,
card->rsq.org, card->rsq.dma);
}
if (error >= 11) {
kfree(card->tsq.org);
dma_free_coherent(&card->pcidev->dev, NS_TSQSIZE + NS_TSQ_ALIGNMENT,
card->tsq.org, card->tsq.dma);
}
if (error >= 10) {
free_irq(card->pcidev->irq, card);

View File

@ -1224,6 +1224,9 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
goto out_unlock;
}
if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags))
blk_queue_write_cache(lo->lo_queue, false, false);
/* freeze request queue during the transition */
blk_mq_freeze_queue(lo->lo_queue);

View File

@ -269,6 +269,8 @@ static const struct usb_device_id blacklist_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0cf3, 0xe500), .driver_info = BTUSB_QCA_ROME |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe092), .driver_info = BTUSB_QCA_ROME |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe09f), .driver_info = BTUSB_QCA_ROME |
@ -1719,6 +1721,13 @@ static void btusb_work(struct work_struct *work)
* which work with WBS at all.
*/
new_alts = btusb_find_altsetting(data, 6) ? 6 : 1;
/* Because mSBC frames do not need to be aligned to the
* SCO packet boundary. If support the Alt 3, use the
* Alt 3 for HCI payload >= 60 Bytes let air packet
* data satisfy 60 bytes.
*/
if (new_alts == 1 && btusb_find_altsetting(data, 3))
new_alts = 3;
}
if (btusb_switch_alt_setting(hdev, new_alts) < 0)
@ -2963,11 +2972,6 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
struct btmtk_wmt_hdr *hdr;
int err;
/* Submit control IN URB on demand to process the WMT event */
err = btusb_mtk_submit_wmt_recv_urb(hdev);
if (err < 0)
return err;
/* Send the WMT command and wait until the WMT event returns */
hlen = sizeof(*hdr) + wmt_params->dlen;
if (hlen > 255)
@ -2989,6 +2993,11 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
return err;
}
/* Submit control IN URB on demand to process the WMT event */
err = btusb_mtk_submit_wmt_recv_urb(hdev);
if (err < 0)
return err;
/* The vendor specific WMT commands are all answered by a vendor
* specific event and will have the Command Status or Command
* Complete as with usual HCI command flow control.
@ -3549,6 +3558,11 @@ static int btusb_setup_qca_download_fw(struct hci_dev *hdev,
sent += size;
count -= size;
/* ep2 need time to switch from function acl to function dfu,
* so we add 20ms delay here.
*/
msleep(20);
while (count) {
size = min_t(size_t, count, QCA_DFU_PACKET_LEN);

View File

@ -371,16 +371,18 @@ static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
data[0] = 0;
WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS);
if ((ipmi_version_major > 1)
|| ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) {
/* This is an IPMI 1.5-only feature. */
data[0] |= WDOG_DONT_STOP_ON_SET;
} else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
/*
* In ipmi 1.0, setting the timer stops the watchdog, we
* need to start it back up again.
*/
hbnow = 1;
if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
if ((ipmi_version_major > 1) ||
((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) {
/* This is an IPMI 1.5-only feature. */
data[0] |= WDOG_DONT_STOP_ON_SET;
} else {
/*
* In ipmi 1.0, setting the timer stops the watchdog, we
* need to start it back up again.
*/
hbnow = 1;
}
}
data[1] = 0;

View File

@ -75,6 +75,7 @@ static const struct cpg_core_clk r8a77995_core_clks[] __initconst = {
DEF_RATE(".oco", CLK_OCO, 8 * 1000 * 1000),
/* Core Clock Outputs */
DEF_FIXED("za2", R8A77995_CLK_ZA2, CLK_PLL0D3, 2, 1),
DEF_FIXED("z2", R8A77995_CLK_Z2, CLK_PLL0D3, 1, 1),
DEF_FIXED("ztr", R8A77995_CLK_ZTR, CLK_PLL1, 6, 1),
DEF_FIXED("zt", R8A77995_CLK_ZT, CLK_PLL1, 4, 1),

View File

@ -128,10 +128,8 @@ static int rcar_usb2_clock_sel_resume(struct device *dev)
static int rcar_usb2_clock_sel_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct usb2_clock_sel_priv *priv = platform_get_drvdata(pdev);
of_clk_del_provider(dev->of_node);
clk_hw_unregister(&priv->hw);
pm_runtime_put(dev);
pm_runtime_disable(dev);
@ -164,9 +162,6 @@ static int rcar_usb2_clock_sel_probe(struct platform_device *pdev)
if (IS_ERR(priv->rsts))
return PTR_ERR(priv->rsts);
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
clk = devm_clk_get(dev, "usb_extal");
if (!IS_ERR(clk) && !clk_prepare_enable(clk)) {
priv->extal = !!clk_get_rate(clk);
@ -183,6 +178,8 @@ static int rcar_usb2_clock_sel_probe(struct platform_device *pdev)
return -ENOENT;
}
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
platform_set_drvdata(pdev, priv);
dev_set_drvdata(dev, priv);
@ -193,11 +190,20 @@ static int rcar_usb2_clock_sel_probe(struct platform_device *pdev)
init.num_parents = 0;
priv->hw.init = &init;
clk = clk_register(NULL, &priv->hw);
if (IS_ERR(clk))
return PTR_ERR(clk);
ret = devm_clk_hw_register(NULL, &priv->hw);
if (ret)
goto pm_put;
return of_clk_add_hw_provider(np, of_clk_hw_simple_get, &priv->hw);
ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, &priv->hw);
if (ret)
goto pm_put;
return 0;
pm_put:
pm_runtime_put(dev);
pm_runtime_disable(dev);
return ret;
}
static const struct dev_pm_ops rcar_usb2_clock_sel_pm_ops = {

View File

@ -48,18 +48,9 @@ static int clk_periph_is_enabled(struct clk_hw *hw)
return state;
}
static int clk_periph_enable(struct clk_hw *hw)
static void clk_periph_enable_locked(struct clk_hw *hw)
{
struct tegra_clk_periph_gate *gate = to_clk_periph_gate(hw);
unsigned long flags = 0;
spin_lock_irqsave(&periph_ref_lock, flags);
gate->enable_refcnt[gate->clk_num]++;
if (gate->enable_refcnt[gate->clk_num] > 1) {
spin_unlock_irqrestore(&periph_ref_lock, flags);
return 0;
}
write_enb_set(periph_clk_to_bit(gate), gate);
udelay(2);
@ -78,6 +69,32 @@ static int clk_periph_enable(struct clk_hw *hw)
udelay(1);
writel_relaxed(0, gate->clk_base + LVL2_CLK_GATE_OVRE);
}
}
static void clk_periph_disable_locked(struct clk_hw *hw)
{
struct tegra_clk_periph_gate *gate = to_clk_periph_gate(hw);
/*
* If peripheral is in the APB bus then read the APB bus to
* flush the write operation in apb bus. This will avoid the
* peripheral access after disabling clock
*/
if (gate->flags & TEGRA_PERIPH_ON_APB)
tegra_read_chipid();
write_enb_clr(periph_clk_to_bit(gate), gate);
}
static int clk_periph_enable(struct clk_hw *hw)
{
struct tegra_clk_periph_gate *gate = to_clk_periph_gate(hw);
unsigned long flags = 0;
spin_lock_irqsave(&periph_ref_lock, flags);
if (!gate->enable_refcnt[gate->clk_num]++)
clk_periph_enable_locked(hw);
spin_unlock_irqrestore(&periph_ref_lock, flags);
@ -91,21 +108,28 @@ static void clk_periph_disable(struct clk_hw *hw)
spin_lock_irqsave(&periph_ref_lock, flags);
gate->enable_refcnt[gate->clk_num]--;
if (gate->enable_refcnt[gate->clk_num] > 0) {
spin_unlock_irqrestore(&periph_ref_lock, flags);
return;
}
WARN_ON(!gate->enable_refcnt[gate->clk_num]);
if (--gate->enable_refcnt[gate->clk_num] == 0)
clk_periph_disable_locked(hw);
spin_unlock_irqrestore(&periph_ref_lock, flags);
}
static void clk_periph_disable_unused(struct clk_hw *hw)
{
struct tegra_clk_periph_gate *gate = to_clk_periph_gate(hw);
unsigned long flags = 0;
spin_lock_irqsave(&periph_ref_lock, flags);
/*
* If peripheral is in the APB bus then read the APB bus to
* flush the write operation in apb bus. This will avoid the
* peripheral access after disabling clock
* Some clocks are duplicated and some of them are marked as critical,
* like fuse and fuse_burn for example, thus the enable_refcnt will
* be non-zero here if the "unused" duplicate is disabled by CCF.
*/
if (gate->flags & TEGRA_PERIPH_ON_APB)
tegra_read_chipid();
write_enb_clr(periph_clk_to_bit(gate), gate);
if (!gate->enable_refcnt[gate->clk_num])
clk_periph_disable_locked(hw);
spin_unlock_irqrestore(&periph_ref_lock, flags);
}
@ -114,6 +138,7 @@ const struct clk_ops tegra_clk_periph_gate_ops = {
.is_enabled = clk_periph_is_enabled,
.enable = clk_periph_enable,
.disable = clk_periph_disable,
.disable_unused = clk_periph_disable_unused,
};
struct clk *tegra_clk_register_periph_gate(const char *name,
@ -148,9 +173,6 @@ struct clk *tegra_clk_register_periph_gate(const char *name,
gate->enable_refcnt = enable_refcnt;
gate->regs = pregs;
if (read_enb(gate) & periph_clk_to_bit(gate))
enable_refcnt[clk_num]++;
/* Data in .init is copied by clk_register(), so stack variable OK */
gate->hw.init = &init;

View File

@ -100,6 +100,15 @@ static void clk_periph_disable(struct clk_hw *hw)
gate_ops->disable(gate_hw);
}
static void clk_periph_disable_unused(struct clk_hw *hw)
{
struct tegra_clk_periph *periph = to_clk_periph(hw);
const struct clk_ops *gate_ops = periph->gate_ops;
struct clk_hw *gate_hw = &periph->gate.hw;
gate_ops->disable_unused(gate_hw);
}
static void clk_periph_restore_context(struct clk_hw *hw)
{
struct tegra_clk_periph *periph = to_clk_periph(hw);
@ -126,6 +135,7 @@ const struct clk_ops tegra_clk_periph_ops = {
.is_enabled = clk_periph_is_enabled,
.enable = clk_periph_enable,
.disable = clk_periph_disable,
.disable_unused = clk_periph_disable_unused,
.restore_context = clk_periph_restore_context,
};
@ -135,6 +145,7 @@ static const struct clk_ops tegra_clk_periph_nodiv_ops = {
.is_enabled = clk_periph_is_enabled,
.enable = clk_periph_enable,
.disable = clk_periph_disable,
.disable_unused = clk_periph_disable_unused,
.restore_context = clk_periph_restore_context,
};

View File

@ -1131,7 +1131,8 @@ static int clk_pllu_enable(struct clk_hw *hw)
if (pll->lock)
spin_lock_irqsave(pll->lock, flags);
_clk_pll_enable(hw);
if (!clk_pll_is_enabled(hw))
_clk_pll_enable(hw);
ret = clk_pll_wait_for_lock(pll);
if (ret < 0)
@ -1748,15 +1749,13 @@ static int clk_pllu_tegra114_enable(struct clk_hw *hw)
return -EINVAL;
}
if (clk_pll_is_enabled(hw))
return 0;
input_rate = clk_hw_get_rate(__clk_get_hw(osc));
if (pll->lock)
spin_lock_irqsave(pll->lock, flags);
_clk_pll_enable(hw);
if (!clk_pll_is_enabled(hw))
_clk_pll_enable(hw);
ret = clk_pll_wait_for_lock(pll);
if (ret < 0)

View File

@ -352,7 +352,7 @@ static u64 notrace arm64_858921_read_cntvct_el0(void)
do { \
_val = read_sysreg(reg); \
_retries--; \
} while (((_val + 1) & GENMASK(9, 0)) <= 1 && _retries); \
} while (((_val + 1) & GENMASK(8, 0)) <= 1 && _retries); \
\
WARN_ON_ONCE(!_retries); \
_val; \

View File

@ -197,6 +197,7 @@ static int mrfld_extcon_probe(struct platform_device *pdev)
struct intel_soc_pmic *pmic = dev_get_drvdata(dev->parent);
struct regmap *regmap = pmic->regmap;
struct mrfld_extcon_data *data;
unsigned int status;
unsigned int id;
int irq, ret;
@ -244,6 +245,14 @@ static int mrfld_extcon_probe(struct platform_device *pdev)
/* Get initial state */
mrfld_extcon_role_detect(data);
/*
* Cached status value is used for cable detection, see comments
* in mrfld_extcon_cable_detect(), we need to sync cached value
* with a real state of the hardware.
*/
regmap_read(regmap, BCOVE_SCHGRIRQ1, &status);
data->status = status;
mrfld_extcon_clear(data, BCOVE_MIRQLVL1, BCOVE_LVL1_CHGR);
mrfld_extcon_clear(data, BCOVE_MCHGRIRQ1, BCOVE_CHGRIRQ_ALL);

View File

@ -299,15 +299,13 @@ static int fw_cfg_do_platform_probe(struct platform_device *pdev)
return 0;
}
static ssize_t fw_cfg_showrev(struct kobject *k, struct attribute *a, char *buf)
static ssize_t fw_cfg_showrev(struct kobject *k, struct kobj_attribute *a,
char *buf)
{
return sprintf(buf, "%u\n", fw_cfg_rev);
}
static const struct {
struct attribute attr;
ssize_t (*show)(struct kobject *k, struct attribute *a, char *buf);
} fw_cfg_rev_attr = {
static const struct kobj_attribute fw_cfg_rev_attr = {
.attr = { .name = "rev", .mode = S_IRUSR },
.show = fw_cfg_showrev,
};

View File

@ -454,6 +454,7 @@ static int s10_remove(struct platform_device *pdev)
struct s10_priv *priv = mgr->priv;
fpga_mgr_unregister(mgr);
fpga_mgr_free(mgr);
stratix10_svc_free_channel(priv->chan);
return 0;

View File

@ -48,12 +48,6 @@ static struct {
spinlock_t mem_limit_lock;
} kfd_mem_limit;
/* Struct used for amdgpu_amdkfd_bo_validate */
struct amdgpu_vm_parser {
uint32_t domain;
bool wait;
};
static const char * const domain_bit_to_string[] = {
"CPU",
"GTT",
@ -337,11 +331,9 @@ static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
return ret;
}
static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
{
struct amdgpu_vm_parser *p = param;
return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
}
/* vm_validate_pt_pd_bos - Validate page table and directory BOs
@ -355,20 +347,15 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
{
struct amdgpu_bo *pd = vm->root.base.bo;
struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
struct amdgpu_vm_parser param;
int ret;
param.domain = AMDGPU_GEM_DOMAIN_VRAM;
param.wait = false;
ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
&param);
ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
if (ret) {
pr_err("failed to validate PT BOs\n");
return ret;
}
ret = amdgpu_amdkfd_validate(&param, pd);
ret = amdgpu_amdkfd_validate_vm_bo(NULL, pd);
if (ret) {
pr_err("failed to validate PD\n");
return ret;

View File

@ -2760,7 +2760,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
AMD_IP_BLOCK_TYPE_IH,
};
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
for (i = 0; i < adev->num_ip_blocks; i++) {
int j;
struct amdgpu_ip_block *block;

View File

@ -128,7 +128,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0];
struct dma_fence *tmp = NULL;
bool skip_preamble, need_ctx_switch;
bool need_ctx_switch;
unsigned patch_offset = ~0;
struct amdgpu_vm *vm;
uint64_t fence_ctx;
@ -221,7 +221,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (need_ctx_switch)
status |= AMDGPU_HAVE_CTX_SWITCH;
skip_preamble = ring->current_ctx == fence_ctx;
if (job && ring->funcs->emit_cntxcntl) {
status |= job->preamble_status;
status |= job->preemption_status;
@ -239,14 +238,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
for (i = 0; i < num_ibs; ++i) {
ib = &ibs[i];
/* drop preamble IBs if we don't have a context switch */
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
skip_preamble &&
!(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) &&
!amdgpu_mcbp &&
!amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
continue;
if (job && ring->funcs->emit_frame_cntl) {
if (secure != !!(ib->flags & AMDGPU_IB_FLAGS_SECURE)) {
amdgpu_ring_emit_frame_cntl(ring, false, secure);

View File

@ -21,6 +21,11 @@
#ifndef __AMDGPU_UMC_H__
#define __AMDGPU_UMC_H__
/*
* (addr / 256) * 4096, the higher 26 bits in ErrorAddr
* is the index of 4KB block
*/
#define ADDR_OF_4KB_BLOCK(addr) (((addr) & ~0xffULL) << 4)
/*
* (addr / 256) * 8192, the higher 26 bits in ErrorAddr
* is the index of 8KB block

View File

@ -143,7 +143,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003e0),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
};
@ -269,7 +269,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0x003fff07, 0x40000051),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003e0),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x03fbe1fe)
};

View File

@ -233,7 +233,7 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
err_addr &= ~((0x1ULL << lsb) - 1);
/* translate umc channel address to soc pa, 3 parts are included */
retired_page = ADDR_OF_8KB_BLOCK(err_addr) |
retired_page = ADDR_OF_4KB_BLOCK(err_addr) |
ADDR_OF_256B_BLOCK(channel_index) |
OFFSET_IN_256B_BLOCK(err_addr);

View File

@ -486,9 +486,6 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
if (retval == -ETIME)
qpd->reset_wavefronts = true;
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
list_del(&q->list);
if (list_empty(&qpd->queues_list)) {
if (qpd->reset_wavefronts) {
@ -523,6 +520,8 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
int retval;
uint64_t sdma_val = 0;
struct kfd_process_device *pdd = qpd_to_pdd(qpd);
struct mqd_manager *mqd_mgr =
dqm->mqd_mgrs[get_mqd_type_from_queue_type(q->properties.type)];
/* Get the SDMA queue stats */
if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
@ -540,6 +539,8 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
pdd->sdma_past_activity_counter += sdma_val;
dqm_unlock(dqm);
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
return retval;
}
@ -1632,7 +1633,7 @@ static int set_trap_handler(struct device_queue_manager *dqm,
static int process_termination_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct queue *q, *next;
struct queue *q;
struct device_process_node *cur, *next_dpn;
int retval = 0;
bool found = false;
@ -1640,12 +1641,19 @@ static int process_termination_nocpsch(struct device_queue_manager *dqm,
dqm_lock(dqm);
/* Clear all user mode queues */
list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
while (!list_empty(&qpd->queues_list)) {
struct mqd_manager *mqd_mgr;
int ret;
q = list_first_entry(&qpd->queues_list, struct queue, list);
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
if (ret)
retval = ret;
dqm_unlock(dqm);
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
dqm_lock(dqm);
}
/* Unregister process */
@ -1677,36 +1685,34 @@ static int get_wave_state(struct device_queue_manager *dqm,
u32 *save_area_used_size)
{
struct mqd_manager *mqd_mgr;
int r;
dqm_lock(dqm);
if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
q->properties.is_active || !q->device->cwsr_enabled) {
r = -EINVAL;
goto dqm_unlock;
}
mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
if (!mqd_mgr->get_wave_state) {
r = -EINVAL;
goto dqm_unlock;
if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
q->properties.is_active || !q->device->cwsr_enabled ||
!mqd_mgr->get_wave_state) {
dqm_unlock(dqm);
return -EINVAL;
}
r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
ctl_stack_used_size, save_area_used_size);
dqm_unlock:
dqm_unlock(dqm);
return r;
/*
* get_wave_state is outside the dqm lock to prevent circular locking
* and the queue should be protected against destruction by the process
* lock.
*/
return mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
ctl_stack_used_size, save_area_used_size);
}
static int process_termination_cpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
int retval;
struct queue *q, *next;
struct queue *q;
struct kernel_queue *kq, *kq_next;
struct mqd_manager *mqd_mgr;
struct device_process_node *cur, *next_dpn;
@ -1763,6 +1769,19 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
qpd->reset_wavefronts = false;
}
/* Lastly, free mqd resources.
* Do free_mqd() after dqm_unlock to avoid circular locking.
*/
while (!list_empty(&qpd->queues_list)) {
q = list_first_entry(&qpd->queues_list, struct queue, list);
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
list_del(&q->list);
qpd->queue_count--;
dqm_unlock(dqm);
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
dqm_lock(dqm);
}
dqm_unlock(dqm);
/* Outside the DQM lock because under the DQM lock we can't do
@ -1771,17 +1790,6 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
if (found)
kfd_dec_compute_active(dqm->dev);
/* Lastly, free mqd resources.
* Do free_mqd() after dqm_unlock to avoid circular locking.
*/
list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
list_del(&q->list);
qpd->queue_count--;
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
}
return retval;
}

View File

@ -3685,6 +3685,23 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
scaling_info->src_rect.x = state->src_x >> 16;
scaling_info->src_rect.y = state->src_y >> 16;
/*
* For reasons we don't (yet) fully understand a non-zero
* src_y coordinate into an NV12 buffer can cause a
* system hang. To avoid hangs (and maybe be overly cautious)
* let's reject both non-zero src_x and src_y.
*
* We currently know of only one use-case to reproduce a
* scenario with non-zero src_x and src_y for NV12, which
* is to gesture the YouTube Android app into full screen
* on ChromeOS.
*/
if (state->fb &&
state->fb->format->format == DRM_FORMAT_NV12 &&
(scaling_info->src_rect.x != 0 ||
scaling_info->src_rect.y != 0))
return -EINVAL;
/*
* For reasons we don't (yet) fully understand a non-zero
* src_y coordinate into an NV12 buffer can cause a
@ -8291,7 +8308,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
BUG_ON(dm_new_crtc_state->stream == NULL);
/* Scaling or underscan settings */
if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
drm_atomic_crtc_needs_modeset(new_crtc_state))
update_stream_scaling_settings(
&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
@ -8744,6 +8762,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
continue;
ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
if (ret)
goto fail;
if (!new_crtc_state->enable)
continue;

View File

@ -498,6 +498,7 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev);
#define MAX_COLOR_LEGACY_LUT_ENTRIES 256
void amdgpu_dm_init_color_mod(void);
int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state);
int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
struct dc_plane_state *dc_plane_state);

View File

@ -284,6 +284,37 @@ static int __set_input_tf(struct dc_transfer_func *func,
return res ? 0 : -ENOMEM;
}
/**
* Verifies that the Degamma and Gamma LUTs attached to the |crtc_state| are of
* the expected size.
* Returns 0 on success.
*/
int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state)
{
const struct drm_color_lut *lut = NULL;
uint32_t size = 0;
lut = __extract_blob_lut(crtc_state->degamma_lut, &size);
if (lut && size != MAX_COLOR_LUT_ENTRIES) {
DRM_DEBUG_DRIVER(
"Invalid Degamma LUT size. Should be %u but got %u.\n",
MAX_COLOR_LUT_ENTRIES, size);
return -EINVAL;
}
lut = __extract_blob_lut(crtc_state->gamma_lut, &size);
if (lut && size != MAX_COLOR_LUT_ENTRIES &&
size != MAX_COLOR_LEGACY_LUT_ENTRIES) {
DRM_DEBUG_DRIVER(
"Invalid Gamma LUT size. Should be %u (or %u for legacy) but got %u.\n",
MAX_COLOR_LUT_ENTRIES, MAX_COLOR_LEGACY_LUT_ENTRIES,
size);
return -EINVAL;
}
return 0;
}
/**
* amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream.
* @crtc: amdgpu_dm crtc state
@ -317,14 +348,12 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
bool is_legacy;
int r;
degamma_lut = __extract_blob_lut(crtc->base.degamma_lut, &degamma_size);
if (degamma_lut && degamma_size != MAX_COLOR_LUT_ENTRIES)
return -EINVAL;
r = amdgpu_dm_verify_lut_sizes(&crtc->base);
if (r)
return r;
degamma_lut = __extract_blob_lut(crtc->base.degamma_lut, &degamma_size);
regamma_lut = __extract_blob_lut(crtc->base.gamma_lut, &regamma_size);
if (regamma_lut && regamma_size != MAX_COLOR_LUT_ENTRIES &&
regamma_size != MAX_COLOR_LEGACY_LUT_ENTRIES)
return -EINVAL;
has_degamma =
degamma_lut && !__is_lut_linear(degamma_lut, degamma_size);

View File

@ -1704,6 +1704,8 @@ static void set_dp_mst_mode(struct dc_link *link, bool mst_enable)
link->type = dc_connection_single;
link->local_sink = link->remote_sinks[0];
link->local_sink->sink_signal = SIGNAL_TYPE_DISPLAY_PORT;
dc_sink_retain(link->local_sink);
dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
} else if (mst_enable == true &&
link->type == dc_connection_single &&
link->remote_sinks[0] != NULL) {

View File

@ -482,10 +482,13 @@ static enum lb_memory_config dpp1_dscl_find_lb_memory_config(struct dcn10_dpp *d
int vtaps_c = scl_data->taps.v_taps_c;
int ceil_vratio = dc_fixpt_ceil(scl_data->ratios.vert);
int ceil_vratio_c = dc_fixpt_ceil(scl_data->ratios.vert_c);
enum lb_memory_config mem_cfg = LB_MEMORY_CONFIG_0;
if (dpp->base.ctx->dc->debug.use_max_lb)
return mem_cfg;
if (dpp->base.ctx->dc->debug.use_max_lb) {
if (scl_data->format == PIXEL_FORMAT_420BPP8
|| scl_data->format == PIXEL_FORMAT_420BPP10)
return LB_MEMORY_CONFIG_3;
return LB_MEMORY_CONFIG_0;
}
dpp->base.caps->dscl_calc_lb_num_partitions(
scl_data, LB_MEMORY_CONFIG_1, &num_part_y, &num_part_c);

View File

@ -243,7 +243,7 @@ void dcn20_dccg_init(struct dce_hwseq *hws)
REG_WRITE(MILLISECOND_TIME_BASE_DIV, 0x1186a0);
/* This value is dependent on the hardware pipeline delay so set once per SOC */
REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0x801003c);
REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0xe01003c);
}
void dcn20_disable_vga(

View File

@ -64,6 +64,7 @@ typedef struct {
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
#define DCN30_MAX_DSC_IMAGE_WIDTH 5184
#define DCN30_MAX_FMT_420_BUFFER_WIDTH 4096
static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
@ -2052,7 +2053,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->DISPCLKWithoutRamping,
v->DISPCLKDPPCLKVCOSpeed);
v->MaxDispclkRoundedToDFSGranularity = RoundToDFSGranularityDown(
v->soc.clock_limits[mode_lib->soc.num_states].dispclk_mhz,
v->soc.clock_limits[mode_lib->soc.num_states - 1].dispclk_mhz,
v->DISPCLKDPPCLKVCOSpeed);
if (v->DISPCLKWithoutRampingRoundedToDFSGranularity
> v->MaxDispclkRoundedToDFSGranularity) {
@ -3957,20 +3958,20 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
v->PlaneRequiredDISPCLKWithoutODMCombine = v->PixelClock[k] * (1.0 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
* (1.0 + v->DISPCLKRampingMargin / 100.0);
if ((v->PlaneRequiredDISPCLKWithoutODMCombine >= v->MaxDispclk[i] && v->MaxDispclk[i] == v->MaxDispclk[mode_lib->soc.num_states]
&& v->MaxDppclk[i] == v->MaxDppclk[mode_lib->soc.num_states])) {
if ((v->PlaneRequiredDISPCLKWithoutODMCombine >= v->MaxDispclk[i] && v->MaxDispclk[i] == v->MaxDispclk[mode_lib->soc.num_states - 1]
&& v->MaxDppclk[i] == v->MaxDppclk[mode_lib->soc.num_states - 1])) {
v->PlaneRequiredDISPCLKWithoutODMCombine = v->PixelClock[k] * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
}
v->PlaneRequiredDISPCLKWithODMCombine2To1 = v->PixelClock[k] / 2 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
* (1 + v->DISPCLKRampingMargin / 100.0);
if ((v->PlaneRequiredDISPCLKWithODMCombine2To1 >= v->MaxDispclk[i] && v->MaxDispclk[i] == v->MaxDispclk[mode_lib->soc.num_states]
&& v->MaxDppclk[i] == v->MaxDppclk[mode_lib->soc.num_states])) {
if ((v->PlaneRequiredDISPCLKWithODMCombine2To1 >= v->MaxDispclk[i] && v->MaxDispclk[i] == v->MaxDispclk[mode_lib->soc.num_states - 1]
&& v->MaxDppclk[i] == v->MaxDppclk[mode_lib->soc.num_states - 1])) {
v->PlaneRequiredDISPCLKWithODMCombine2To1 = v->PixelClock[k] / 2 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
}
v->PlaneRequiredDISPCLKWithODMCombine4To1 = v->PixelClock[k] / 4 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0)
* (1 + v->DISPCLKRampingMargin / 100.0);
if ((v->PlaneRequiredDISPCLKWithODMCombine4To1 >= v->MaxDispclk[i] && v->MaxDispclk[i] == v->MaxDispclk[mode_lib->soc.num_states]
&& v->MaxDppclk[i] == v->MaxDppclk[mode_lib->soc.num_states])) {
if ((v->PlaneRequiredDISPCLKWithODMCombine4To1 >= v->MaxDispclk[i] && v->MaxDispclk[i] == v->MaxDispclk[mode_lib->soc.num_states - 1]
&& v->MaxDppclk[i] == v->MaxDppclk[mode_lib->soc.num_states - 1])) {
v->PlaneRequiredDISPCLKWithODMCombine4To1 = v->PixelClock[k] / 4 * (1 + v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
}
@ -3987,19 +3988,30 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
} else if (v->PlaneRequiredDISPCLKWithoutODMCombine > v->MaxDispclkRoundedDownToDFSGranularity) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
} else if (v->DSCEnabled[k] && (v->HActive[k] > DCN30_MAX_DSC_IMAGE_WIDTH)) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
} else {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithoutODMCombine;
/*420 format workaround*/
if (v->HActive[k] > 4096 && v->OutputFormat[k] == dm_420) {
}
if (v->DSCEnabled[k] && v->HActive[k] > DCN30_MAX_DSC_IMAGE_WIDTH
&& v->ODMCombineEnablePerState[i][k] != dm_odm_combine_mode_4to1) {
if (v->HActive[k] / 2 > DCN30_MAX_DSC_IMAGE_WIDTH) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1;
} else {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
}
}
if (v->OutputFormat[k] == dm_420 && v->HActive[k] > DCN30_MAX_FMT_420_BUFFER_WIDTH
&& v->ODMCombineEnablePerState[i][k] != dm_odm_combine_mode_4to1) {
if (v->HActive[k] / 2 > DCN30_MAX_FMT_420_BUFFER_WIDTH) {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_4to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine4To1;
} else {
v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1;
}
}
if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1) {
v->MPCCombine[i][j][k] = false;
v->NoOfDPP[i][j][k] = 4;
@ -4281,42 +4293,8 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
for (i = 0; i < v->soc.num_states; i++) {
v->DSCCLKRequiredMoreThanSupported[i] = false;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (v->BlendingAndTiming[k] == k) {
if (v->Output[k] == dm_dp || v->Output[k] == dm_edp) {
if (v->OutputFormat[k] == dm_420) {
v->DSCFormatFactor = 2;
} else if (v->OutputFormat[k] == dm_444) {
v->DSCFormatFactor = 1;
} else if (v->OutputFormat[k] == dm_n422) {
v->DSCFormatFactor = 2;
} else {
v->DSCFormatFactor = 1;
}
if (v->RequiresDSC[i][k] == true) {
if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1) {
if (v->PixelClockBackEnd[k] / 12.0 / v->DSCFormatFactor
> (1.0 - v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * v->MaxDSCCLK[i]) {
v->DSCCLKRequiredMoreThanSupported[i] = true;
}
} else if (v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1) {
if (v->PixelClockBackEnd[k] / 6.0 / v->DSCFormatFactor
> (1.0 - v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * v->MaxDSCCLK[i]) {
v->DSCCLKRequiredMoreThanSupported[i] = true;
}
} else {
if (v->PixelClockBackEnd[k] / 3.0 / v->DSCFormatFactor
> (1.0 - v->DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * v->MaxDSCCLK[i]) {
v->DSCCLKRequiredMoreThanSupported[i] = true;
}
}
}
}
}
}
}
/* Skip dscclk validation: as long as dispclk is supported, dscclk is also implicitly supported */
for (i = 0; i < v->soc.num_states; i++) {
v->NotEnoughDSCUnits[i] = false;
v->TotalDSCUnitsRequired = 0.0;
@ -5319,7 +5297,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (j = 0; j < 2; j++) {
if (v->ScaleRatioAndTapsSupport == 1 && v->SourceFormatPixelAndScanSupport == 1 && v->ViewportSizeSupport[i][j] == 1
&& v->DIOSupport[i] == 1 && v->ODMCombine4To1SupportCheckOK[i] == 1
&& v->NotEnoughDSCUnits[i] == 0 && v->DSCCLKRequiredMoreThanSupported[i] == 0
&& v->NotEnoughDSCUnits[i] == 0
&& v->DTBCLKRequiredMoreThanSupported[i] == 0
&& v->ROBSupport[i][j] == 1 && v->DISPCLK_DPPCLK_Support[i][j] == 1 && v->TotalAvailablePipesSupport[i][j] == 1
&& EnoughWritebackUnits == 1 && WritebackModeSupport == 1

View File

@ -163,7 +163,7 @@ enum irq_type
};
#define DAL_VALID_IRQ_SRC_NUM(src) \
((src) <= DAL_IRQ_SOURCES_NUMBER && (src) > DC_IRQ_SOURCE_INVALID)
((src) < DAL_IRQ_SOURCES_NUMBER && (src) > DC_IRQ_SOURCE_INVALID)
/* Number of Page Flip IRQ Sources. */
#define DAL_PFLIP_IRQ_SRC_NUM \

View File

@ -260,7 +260,6 @@ enum mod_hdcp_status mod_hdcp_setup(struct mod_hdcp *hdcp,
struct mod_hdcp_output output;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
memset(hdcp, 0, sizeof(struct mod_hdcp));
memset(&output, 0, sizeof(output));
hdcp->config = *config;
HDCP_TOP_INTERFACE_TRACE(hdcp);

View File

@ -29,8 +29,10 @@ static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp)
{
uint64_t n = 0;
uint8_t count = 0;
u8 bksv[sizeof(n)] = { };
memcpy(&n, hdcp->auth.msg.hdcp1.bksv, sizeof(uint64_t));
memcpy(bksv, hdcp->auth.msg.hdcp1.bksv, sizeof(hdcp->auth.msg.hdcp1.bksv));
n = *(uint64_t *)bksv;
while (n) {
count++;

View File

@ -430,7 +430,7 @@ ARRAY_2D_DEPTH = 0x00000001,
*/
typedef enum ENUM_NUM_SIMD_PER_CU {
NUM_SIMD_PER_CU = 0x00000004,
NUM_SIMD_PER_CU = 0x00000002,
} ENUM_NUM_SIMD_PER_CU;
/*

View File

@ -922,6 +922,11 @@ static const struct drm_plane_helper_funcs malidp_de_plane_helper_funcs = {
.atomic_disable = malidp_de_plane_disable,
};
static const uint64_t linear_only_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
int malidp_de_planes_init(struct drm_device *drm)
{
struct malidp_drm *malidp = drm->dev_private;
@ -985,8 +990,8 @@ int malidp_de_planes_init(struct drm_device *drm)
*/
ret = drm_universal_plane_init(drm, &plane->base, crtcs,
&malidp_de_plane_funcs, formats, n,
(id == DE_SMART) ? NULL : modifiers, plane_type,
NULL);
(id == DE_SMART) ? linear_only_modifiers : modifiers,
plane_type, NULL);
if (ret < 0)
goto cleanup;

View File

@ -189,6 +189,9 @@ bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size)
u32 i, data;
u32 boot_address;
if (ast->config_mode != ast_use_p2a)
return false;
data = ast_mindwm(ast, 0x1e6e2100) & 0x01;
if (data) {
boot_address = get_fw_base(ast);
@ -207,6 +210,9 @@ static bool ast_launch_m68k(struct drm_device *dev)
u8 *fw_addr = NULL;
u8 jreg;
if (ast->config_mode != ast_use_p2a)
return false;
data = ast_mindwm(ast, 0x1e6e2100) & 0x01;
if (!data) {
@ -271,25 +277,55 @@ u8 ast_get_dp501_max_clk(struct drm_device *dev)
struct ast_private *ast = to_ast_private(dev);
u32 boot_address, offset, data;
u8 linkcap[4], linkrate, linklanes, maxclk = 0xff;
u32 *plinkcap;
boot_address = get_fw_base(ast);
if (ast->config_mode == ast_use_p2a) {
boot_address = get_fw_base(ast);
/* validate FW version */
offset = 0xf000;
data = ast_mindwm(ast, boot_address + offset);
if ((data & 0xf0) != 0x10) /* version: 1x */
return maxclk;
/* validate FW version */
offset = AST_DP501_GBL_VERSION;
data = ast_mindwm(ast, boot_address + offset);
if ((data & AST_DP501_FW_VERSION_MASK) != AST_DP501_FW_VERSION_1) /* version: 1x */
return maxclk;
/* Read Link Capability */
offset = 0xf014;
*(u32 *)linkcap = ast_mindwm(ast, boot_address + offset);
if (linkcap[2] == 0) {
linkrate = linkcap[0];
linklanes = linkcap[1];
data = (linkrate == 0x0a) ? (90 * linklanes) : (54 * linklanes);
if (data > 0xff)
data = 0xff;
maxclk = (u8)data;
/* Read Link Capability */
offset = AST_DP501_LINKRATE;
plinkcap = (u32 *)linkcap;
*plinkcap = ast_mindwm(ast, boot_address + offset);
if (linkcap[2] == 0) {
linkrate = linkcap[0];
linklanes = linkcap[1];
data = (linkrate == 0x0a) ? (90 * linklanes) : (54 * linklanes);
if (data > 0xff)
data = 0xff;
maxclk = (u8)data;
}
} else {
if (!ast->dp501_fw_buf)
return AST_DP501_DEFAULT_DCLK; /* 1024x768 as default */
/* dummy read */
offset = 0x0000;
data = readl(ast->dp501_fw_buf + offset);
/* validate FW version */
offset = AST_DP501_GBL_VERSION;
data = readl(ast->dp501_fw_buf + offset);
if ((data & AST_DP501_FW_VERSION_MASK) != AST_DP501_FW_VERSION_1) /* version: 1x */
return maxclk;
/* Read Link Capability */
offset = AST_DP501_LINKRATE;
plinkcap = (u32 *)linkcap;
*plinkcap = readl(ast->dp501_fw_buf + offset);
if (linkcap[2] == 0) {
linkrate = linkcap[0];
linklanes = linkcap[1];
data = (linkrate == 0x0a) ? (90 * linklanes) : (54 * linklanes);
if (data > 0xff)
data = 0xff;
maxclk = (u8)data;
}
}
return maxclk;
}
@ -298,26 +334,57 @@ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
{
struct ast_private *ast = to_ast_private(dev);
u32 i, boot_address, offset, data;
u32 *pEDIDidx;
boot_address = get_fw_base(ast);
if (ast->config_mode == ast_use_p2a) {
boot_address = get_fw_base(ast);
/* validate FW version */
offset = 0xf000;
data = ast_mindwm(ast, boot_address + offset);
if ((data & 0xf0) != 0x10)
return false;
/* validate FW version */
offset = AST_DP501_GBL_VERSION;
data = ast_mindwm(ast, boot_address + offset);
if ((data & AST_DP501_FW_VERSION_MASK) != AST_DP501_FW_VERSION_1)
return false;
/* validate PnP Monitor */
offset = 0xf010;
data = ast_mindwm(ast, boot_address + offset);
if (!(data & 0x01))
return false;
/* validate PnP Monitor */
offset = AST_DP501_PNPMONITOR;
data = ast_mindwm(ast, boot_address + offset);
if (!(data & AST_DP501_PNP_CONNECTED))
return false;
/* Read EDID */
offset = 0xf020;
for (i = 0; i < 128; i += 4) {
data = ast_mindwm(ast, boot_address + offset + i);
*(u32 *)(ediddata + i) = data;
/* Read EDID */
offset = AST_DP501_EDID_DATA;
for (i = 0; i < 128; i += 4) {
data = ast_mindwm(ast, boot_address + offset + i);
pEDIDidx = (u32 *)(ediddata + i);
*pEDIDidx = data;
}
} else {
if (!ast->dp501_fw_buf)
return false;
/* dummy read */
offset = 0x0000;
data = readl(ast->dp501_fw_buf + offset);
/* validate FW version */
offset = AST_DP501_GBL_VERSION;
data = readl(ast->dp501_fw_buf + offset);
if ((data & AST_DP501_FW_VERSION_MASK) != AST_DP501_FW_VERSION_1)
return false;
/* validate PnP Monitor */
offset = AST_DP501_PNPMONITOR;
data = readl(ast->dp501_fw_buf + offset);
if (!(data & AST_DP501_PNP_CONNECTED))
return false;
/* Read EDID */
offset = AST_DP501_EDID_DATA;
for (i = 0; i < 128; i += 4) {
data = readl(ast->dp501_fw_buf + offset + i);
pEDIDidx = (u32 *)(ediddata + i);
*pEDIDidx = data;
}
}
return true;

View File

@ -120,6 +120,7 @@ struct ast_private {
void __iomem *regs;
void __iomem *ioregs;
void __iomem *dp501_fw_buf;
enum ast_chip chip;
bool vga2_clone;
@ -298,6 +299,17 @@ int ast_mode_config_init(struct ast_private *ast);
#define AST_MM_ALIGN_SHIFT 4
#define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1)
#define AST_DP501_FW_VERSION_MASK GENMASK(7, 4)
#define AST_DP501_FW_VERSION_1 BIT(4)
#define AST_DP501_PNP_CONNECTED BIT(1)
#define AST_DP501_DEFAULT_DCLK 65
#define AST_DP501_GBL_VERSION 0xf000
#define AST_DP501_PNPMONITOR 0xf010
#define AST_DP501_LINKRATE 0xf014
#define AST_DP501_EDID_DATA 0xf020
int ast_mm_init(struct ast_private *ast);
/* ast post */

View File

@ -98,7 +98,7 @@ static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) {
/* Double check it's actually working */
data = ast_read32(ast, 0xf004);
if (data != 0xFFFFFFFF) {
if ((data != 0xFFFFFFFF) && (data != 0x00)) {
/* P2A works, grab silicon revision */
ast->config_mode = ast_use_p2a;
@ -406,7 +406,6 @@ struct ast_private *ast_device_create(struct drm_driver *drv,
return ast;
dev = &ast->base;
dev->pdev = pdev;
pci_set_drvdata(pdev, dev);
ast->regs = pcim_iomap(pdev, 1, 0);
@ -446,6 +445,14 @@ struct ast_private *ast_device_create(struct drm_driver *drv,
if (ret)
return ERR_PTR(ret);
/* map reserved buffer */
ast->dp501_fw_buf = NULL;
if (dev->vram_mm->vram_size < pci_resource_len(pdev, 0)) {
ast->dp501_fw_buf = pci_iomap_range(pdev, 0, dev->vram_mm->vram_size, 0);
if (!ast->dp501_fw_buf)
drm_info(dev, "failed to map reserved buffer!\n");
}
ret = ast_mode_config_init(ast);
if (ret)
return ERR_PTR(ret);

View File

@ -2369,9 +2369,9 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
clk_prepare_enable(clk);
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
dev_err(dev, "pm_runtime_get_sync failed\n");
dev_err(dev, "pm_runtime_resume_and_get failed\n");
pm_runtime_disable(dev);
goto clk_disable;
}

View File

@ -1028,7 +1028,7 @@ static ssize_t cdns_dsi_transfer(struct mipi_dsi_host *host,
struct mipi_dsi_packet packet;
int ret, i, tx_len, rx_len;
ret = pm_runtime_get_sync(host->dev);
ret = pm_runtime_resume_and_get(host->dev);
if (ret < 0)
return ret;

View File

@ -1209,6 +1209,7 @@ static struct i2c_device_id lt9611_id[] = {
{ "lontium,lt9611", 0 },
{}
};
MODULE_DEVICE_TABLE(i2c, lt9611_id);
static const struct of_device_id lt9611_match_table[] = {
{ .compatible = "lontium,lt9611" },

View File

@ -21,6 +21,7 @@
#include <linux/sys_soc.h>
#include <linux/time64.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
@ -742,7 +743,9 @@ static int nwl_dsi_disable(struct nwl_dsi *dsi)
return 0;
}
static void nwl_dsi_bridge_disable(struct drm_bridge *bridge)
static void
nwl_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct nwl_dsi *dsi = bridge_to_dsi(bridge);
int ret;
@ -803,17 +806,6 @@ static int nwl_dsi_get_dphy_params(struct nwl_dsi *dsi,
return 0;
}
static bool nwl_dsi_bridge_mode_fixup(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
/* At least LCDIF + NWL needs active high sync */
adjusted_mode->flags |= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
adjusted_mode->flags &= ~(DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
return true;
}
static enum drm_mode_status
nwl_dsi_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
@ -831,6 +823,24 @@ nwl_dsi_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
static int nwl_dsi_bridge_atomic_check(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
/* At least LCDIF + NWL needs active high sync */
adjusted_mode->flags |= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
adjusted_mode->flags &= ~(DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
/* Do a full modeset if crtc_state->active is changed to be true. */
if (crtc_state->active_changed && crtc_state->active)
crtc_state->mode_changed = true;
return 0;
}
static void
nwl_dsi_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
@ -862,7 +872,9 @@ nwl_dsi_bridge_mode_set(struct drm_bridge *bridge,
drm_mode_debug_printmodeline(adjusted_mode);
}
static void nwl_dsi_bridge_pre_enable(struct drm_bridge *bridge)
static void
nwl_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct nwl_dsi *dsi = bridge_to_dsi(bridge);
int ret;
@ -897,7 +909,9 @@ static void nwl_dsi_bridge_pre_enable(struct drm_bridge *bridge)
}
}
static void nwl_dsi_bridge_enable(struct drm_bridge *bridge)
static void
nwl_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct nwl_dsi *dsi = bridge_to_dsi(bridge);
int ret;
@ -942,14 +956,17 @@ static void nwl_dsi_bridge_detach(struct drm_bridge *bridge)
}
static const struct drm_bridge_funcs nwl_dsi_bridge_funcs = {
.pre_enable = nwl_dsi_bridge_pre_enable,
.enable = nwl_dsi_bridge_enable,
.disable = nwl_dsi_bridge_disable,
.mode_fixup = nwl_dsi_bridge_mode_fixup,
.mode_set = nwl_dsi_bridge_mode_set,
.mode_valid = nwl_dsi_bridge_mode_valid,
.attach = nwl_dsi_bridge_attach,
.detach = nwl_dsi_bridge_detach,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_check = nwl_dsi_bridge_atomic_check,
.atomic_pre_enable = nwl_dsi_bridge_atomic_pre_enable,
.atomic_enable = nwl_dsi_bridge_atomic_enable,
.atomic_disable = nwl_dsi_bridge_atomic_disable,
.mode_set = nwl_dsi_bridge_mode_set,
.mode_valid = nwl_dsi_bridge_mode_valid,
.attach = nwl_dsi_bridge_attach,
.detach = nwl_dsi_bridge_detach,
};
static int nwl_dsi_parse_dt(struct nwl_dsi *dsi)

View File

@ -602,7 +602,14 @@ int drm_dp_read_downstream_info(struct drm_dp_aux *aux,
!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
return 0;
/* Some branches advertise having 0 downstream ports, despite also advertising they have a
* downstream port present. The DP spec isn't clear on if this is allowed or not, but since
* some branches do it we need to handle it regardless.
*/
len = drm_dp_downstream_port_count(dpcd);
if (!len)
return 0;
if (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE)
len *= 4;

View File

@ -5080,7 +5080,7 @@ static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
if (size < sizeof(struct dp_sdp))
return -EINVAL;
memset(vsc, 0, size);
memset(vsc, 0, sizeof(*vsc));
if (sdp->sdp_header.HB0 != 0)
return -EINVAL;

View File

@ -274,7 +274,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
drm_connector_list_iter_end(&conn_iter);
}
ret = pm_runtime_get_sync(crtc->dev->dev);
ret = pm_runtime_resume_and_get(crtc->dev->dev);
if (ret < 0) {
DRM_ERROR("Failed to enable power domain: %d\n", ret);
return ret;

View File

@ -88,8 +88,6 @@ static int mdp4_hw_init(struct msm_kms *kms)
if (mdp4_kms->rev > 1)
mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
dev->mode_config.allow_fb_modifiers = true;
out:
pm_runtime_put_sync(dev->dev);

View File

@ -347,6 +347,12 @@ enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane)
return mdp4_plane->pipe;
}
static const uint64_t supported_format_modifiers[] = {
DRM_FORMAT_MOD_SAMSUNG_64_32_TILE,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
/* initialize plane */
struct drm_plane *mdp4_plane_init(struct drm_device *dev,
enum mdp4_pipe pipe_id, bool private_plane)
@ -375,7 +381,7 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
mdp4_plane->formats, mdp4_plane->nformats,
NULL, type, NULL);
supported_format_modifiers, type, NULL);
if (ret)
goto fail;

View File

@ -10,7 +10,6 @@ config DRM_MXSFB
depends on COMMON_CLK
select DRM_MXS
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
select DRM_PANEL
select DRM_PANEL_BRIDGE

View File

@ -700,7 +700,6 @@ nouveau_display_create(struct drm_device *dev)
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
dev->mode_config.allow_fb_modifiers = true;
if (drm->client.device.info.chipset < 0x11)
dev->mode_config.async_page_flip = false;

View File

@ -1334,6 +1334,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
if (obj->import_attach) {
DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
drm_gem_object_put(obj);
return ERR_PTR(-EINVAL);
}

View File

@ -416,13 +416,13 @@ radeon_pci_shutdown(struct pci_dev *pdev)
if (radeon_device_is_virtual())
radeon_pci_remove(pdev);
#ifdef CONFIG_PPC64
#if defined(CONFIG_PPC64) || defined(CONFIG_MACH_LOONGSON64)
/*
* Some adapters need to be suspended before a
* shutdown occurs in order to prevent an error
* during kexec.
* Make this power specific becauase it breaks
* some non-power boards.
* during kexec, shutdown or reboot.
* Make this power and Loongson specific because
* it breaks some other boards.
*/
radeon_suspend_kms(pci_get_drvdata(pdev), true, true, false);
#endif

View File

@ -243,7 +243,6 @@ struct dw_mipi_dsi_rockchip {
struct dw_mipi_dsi *dmd;
const struct rockchip_dw_dsi_chip_data *cdata;
struct dw_mipi_dsi_plat_data pdata;
int devcnt;
};
struct dphy_pll_parameter_map {
@ -1141,9 +1140,6 @@ static int dw_mipi_dsi_rockchip_remove(struct platform_device *pdev)
{
struct dw_mipi_dsi_rockchip *dsi = platform_get_drvdata(pdev);
if (dsi->devcnt == 0)
component_del(dsi->dev, &dw_mipi_dsi_rockchip_ops);
dw_mipi_dsi_remove(dsi->dmd);
return 0;

View File

@ -349,8 +349,8 @@ static const struct vop_win_phy rk3066_win0_data = {
.nformats = ARRAY_SIZE(formats_win_full),
.format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 0),
.format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 4),
.rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 19),
.format = VOP_REG(RK3066_SYS_CTRL1, 0x7, 4),
.rb_swap = VOP_REG(RK3066_SYS_CTRL1, 0x1, 19),
.act_info = VOP_REG(RK3066_WIN0_ACT_INFO, 0x1fff1fff, 0),
.dsp_info = VOP_REG(RK3066_WIN0_DSP_INFO, 0x0fff0fff, 0),
.dsp_st = VOP_REG(RK3066_WIN0_DSP_ST, 0x1fff1fff, 0),
@ -361,13 +361,12 @@ static const struct vop_win_phy rk3066_win0_data = {
};
static const struct vop_win_phy rk3066_win1_data = {
.scl = &rk3066_win_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
.format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 1),
.format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 7),
.rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 23),
.format = VOP_REG(RK3066_SYS_CTRL1, 0x7, 7),
.rb_swap = VOP_REG(RK3066_SYS_CTRL1, 0x1, 23),
.act_info = VOP_REG(RK3066_WIN1_ACT_INFO, 0x1fff1fff, 0),
.dsp_info = VOP_REG(RK3066_WIN1_DSP_INFO, 0x0fff0fff, 0),
.dsp_st = VOP_REG(RK3066_WIN1_DSP_ST, 0x1fff1fff, 0),
@ -382,8 +381,8 @@ static const struct vop_win_phy rk3066_win2_data = {
.nformats = ARRAY_SIZE(formats_win_lite),
.format_modifiers = format_modifiers_win_lite,
.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 2),
.format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 10),
.rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 27),
.format = VOP_REG(RK3066_SYS_CTRL1, 0x7, 10),
.rb_swap = VOP_REG(RK3066_SYS_CTRL1, 0x1, 27),
.dsp_info = VOP_REG(RK3066_WIN2_DSP_INFO, 0x0fff0fff, 0),
.dsp_st = VOP_REG(RK3066_WIN2_DSP_ST, 0x1fff1fff, 0),
.yrgb_mst = VOP_REG(RK3066_WIN2_MST, 0xffffffff, 0),
@ -408,6 +407,9 @@ static const struct vop_common rk3066_common = {
.dither_down_en = VOP_REG(RK3066_DSP_CTRL0, 0x1, 11),
.dither_down_mode = VOP_REG(RK3066_DSP_CTRL0, 0x1, 10),
.dsp_blank = VOP_REG(RK3066_DSP_CTRL1, 0x1, 24),
.dither_up = VOP_REG(RK3066_DSP_CTRL0, 0x1, 9),
.dsp_lut_en = VOP_REG(RK3066_SYS_CTRL1, 0x1, 31),
.data_blank = VOP_REG(RK3066_DSP_CTRL1, 0x1, 25),
};
static const struct vop_win_data rk3066_vop_win_data[] = {
@ -505,7 +507,10 @@ static const struct vop_common rk3188_common = {
.dither_down_sel = VOP_REG(RK3188_DSP_CTRL0, 0x1, 27),
.dither_down_en = VOP_REG(RK3188_DSP_CTRL0, 0x1, 11),
.dither_down_mode = VOP_REG(RK3188_DSP_CTRL0, 0x1, 10),
.dsp_blank = VOP_REG(RK3188_DSP_CTRL1, 0x3, 24),
.dsp_blank = VOP_REG(RK3188_DSP_CTRL1, 0x1, 24),
.dither_up = VOP_REG(RK3188_DSP_CTRL0, 0x1, 9),
.dsp_lut_en = VOP_REG(RK3188_SYS_CTRL, 0x1, 28),
.data_blank = VOP_REG(RK3188_DSP_CTRL1, 0x1, 25),
};
static const struct vop_win_data rk3188_vop_win_data[] = {

View File

@ -113,7 +113,8 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
rmb(); /* for list_empty to work without lock */
if (list_empty(&entity->list) ||
spsc_queue_count(&entity->job_queue) == 0)
spsc_queue_count(&entity->job_queue) == 0 ||
entity->stopped)
return true;
return false;
@ -218,11 +219,16 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
{
struct drm_sched_job *job;
struct dma_fence *f;
int r;
while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
struct drm_sched_fence *s_fence = job->s_fence;
/* Wait for all dependencies to avoid data corruptions */
while ((f = job->sched->ops->dependency(job, entity)))
dma_fence_wait(f, false);
drm_sched_fence_scheduled(s_fence);
dma_fence_set_error(&s_fence->finished, -ESRCH);

View File

@ -887,9 +887,33 @@ EXPORT_SYMBOL(drm_sched_init);
*/
void drm_sched_fini(struct drm_gpu_scheduler *sched)
{
struct drm_sched_entity *s_entity;
int i;
if (sched->thread)
kthread_stop(sched->thread);
for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
struct drm_sched_rq *rq = &sched->sched_rq[i];
if (!rq)
continue;
spin_lock(&rq->lock);
list_for_each_entry(s_entity, &rq->entities, list)
/*
* Prevents reinsertion and marks job_queue as idle,
* it will removed from rq in drm_sched_entity_fini
* eventually
*/
s_entity->stopped = true;
spin_unlock(&rq->lock);
}
/* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
wake_up_all(&sched->job_scheduled);
/* Confirm no work left behind accessing device structures */
cancel_delayed_work_sync(&sched->work_tdr);

View File

@ -947,6 +947,11 @@ static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = {
.atomic_disable = tegra_cursor_atomic_disable,
};
static const uint64_t linear_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
struct tegra_dc *dc)
{
@ -975,7 +980,7 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
&tegra_plane_funcs, formats,
num_formats, NULL,
num_formats, linear_modifiers,
DRM_PLANE_TYPE_CURSOR, NULL);
if (err < 0) {
kfree(plane);
@ -1094,7 +1099,8 @@ static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
&tegra_plane_funcs, formats,
num_formats, NULL, type, NULL);
num_formats, linear_modifiers,
type, NULL);
if (err < 0) {
kfree(plane);
return ERR_PTR(err);

View File

@ -1127,8 +1127,6 @@ static int host1x_drm_probe(struct host1x_device *dev)
drm->mode_config.max_width = 4096;
drm->mode_config.max_height = 4096;
drm->mode_config.allow_fb_modifiers = true;
drm->mode_config.normalize_zpos = true;
drm->mode_config.funcs = &tegra_drm_mode_config_funcs;

View File

@ -1001,7 +1001,7 @@ static const struct vc4_pv_data bcm2711_pv3_data = {
.fifo_depth = 64,
.pixels_per_clock = 1,
.encoder_types = {
[0] = VC4_ENCODER_TYPE_VEC,
[PV_CONTROL_CLK_SELECT_VEC] = VC4_ENCODER_TYPE_VEC,
},
};
@ -1042,6 +1042,9 @@ static void vc4_set_crtc_possible_masks(struct drm_device *drm,
struct vc4_encoder *vc4_encoder;
int i;
if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
continue;
vc4_encoder = to_vc4_encoder(encoder);
for (i = 0; i < ARRAY_SIZE(pv_data->encoder_types); i++) {
if (vc4_encoder->type == encoder_types[i]) {

View File

@ -837,7 +837,7 @@ void vc4_crtc_destroy_state(struct drm_crtc *crtc,
void vc4_crtc_reset(struct drm_crtc *crtc);
void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
void vc4_crtc_get_margins(struct drm_crtc_state *state,
unsigned int *right, unsigned int *left,
unsigned int *left, unsigned int *right,
unsigned int *top, unsigned int *bottom);
/* vc4_debugfs.c */

View File

@ -627,7 +627,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder)
unsigned long pixel_rate, hsm_rate;
int ret;
ret = pm_runtime_get_sync(&vc4_hdmi->pdev->dev);
ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
if (ret < 0) {
DRM_ERROR("Failed to retain power domain: %d\n", ret);
return;
@ -1807,6 +1807,14 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
if (vc4_hdmi->variant->reset)
vc4_hdmi->variant->reset(vc4_hdmi);
if ((of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi0") ||
of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi1")) &&
HDMI_READ(HDMI_VID_CTL) & VC4_HD_VID_CTL_ENABLE) {
clk_prepare_enable(vc4_hdmi->pixel_clock);
clk_prepare_enable(vc4_hdmi->hsm_clock);
clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
}
pm_runtime_enable(dev);
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);

View File

@ -503,7 +503,7 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
return ret;
encoder = &txp->connector.encoder;
encoder->possible_crtcs |= drm_crtc_mask(crtc);
encoder->possible_crtcs = drm_crtc_mask(crtc);
ret = devm_request_irq(dev, irq, vc4_txp_interrupt, 0,
dev_name(dev), txp);

View File

@ -209,6 +209,7 @@ int virtio_gpu_init(struct drm_device *dev)
err_vbufs:
vgdev->vdev->config->del_vqs(vgdev->vdev);
err_vqs:
dev->dev_private = NULL;
kfree(vgdev);
return ret;
}

View File

@ -3,7 +3,6 @@ config DRM_ZTE
tristate "DRM Support for ZTE SoCs"
depends on DRM && ARCH_ZX
select DRM_KMS_CMA_HELPER
select DRM_KMS_FB_HELPER
select DRM_KMS_HELPER
select SND_SOC_HDMI_CODEC if SND_SOC
select VIDEOMODE_HELPERS

View File

@ -1386,7 +1386,7 @@ static int coresight_fixup_device_conns(struct coresight_device *csdev)
}
}
return 0;
return ret;
}
static int coresight_remove_match(struct device *dev, void *data)

View File

@ -530,7 +530,7 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
buf_ptr = buf->data_pages[cur] + offset;
*buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
if (lost && *barrier) {
if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) {
*buf_ptr = *barrier;
barrier++;
}

View File

@ -2793,7 +2793,8 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv,
cma_init_resolve_route_work(work, id_priv);
route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
if (!route->path_rec)
route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
if (!route->path_rec) {
ret = -ENOMEM;
goto err1;

View File

@ -295,6 +295,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
if (user && (!wq->sq.bar2_pa || (need_rq && !wq->rq.bar2_pa))) {
pr_warn("%s: sqid %u or rqid %u not in BAR2 range\n",
pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
ret = -EINVAL;
goto free_dma;
}

View File

@ -139,7 +139,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
if (IS_ERR(umem)) {
pr_warn("err %d from rxe_umem_get\n",
(int)PTR_ERR(umem));
err = -EINVAL;
err = PTR_ERR(umem);
goto err1;
}

View File

@ -35,10 +35,10 @@ static const struct kernel_param_ops sg_tablesize_ops = {
.get = param_get_int,
};
static int isert_sg_tablesize = ISCSI_ISER_DEF_SG_TABLESIZE;
static int isert_sg_tablesize = ISCSI_ISER_MIN_SG_TABLESIZE;
module_param_cb(sg_tablesize, &sg_tablesize_ops, &isert_sg_tablesize, 0644);
MODULE_PARM_DESC(sg_tablesize,
"Number of gather/scatter entries in a single scsi command, should >= 128 (default: 256, max: 4096)");
"Number of gather/scatter entries in a single scsi command, should >= 128 (default: 128, max: 4096)");
static DEFINE_MUTEX(device_list_mutex);
static LIST_HEAD(device_list);

View File

@ -65,9 +65,6 @@
*/
#define ISER_RX_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 1024)
/* Default I/O size is 1MB */
#define ISCSI_ISER_DEF_SG_TABLESIZE 256
/* Minimum I/O size is 512KB */
#define ISCSI_ISER_MIN_SG_TABLESIZE 128

View File

@ -47,12 +47,15 @@ enum {
MAX_PATHS_NUM = 128,
/*
* With the size of struct rtrs_permit allocated on the client, 4K
* is the maximum number of rtrs_permits we can allocate. This number is
* also used on the client to allocate the IU for the user connection
* to receive the RDMA addresses from the server.
* Max IB immediate data size is 2^28 (MAX_IMM_PAYL_BITS)
* and the minimum chunk size is 4096 (2^12).
* So the maximum sess_queue_depth is 65536 (2^16) in theory.
* But mempool_create, create_qp and ib_post_send fail with
* "cannot allocate memory" error if sess_queue_depth is too big.
* Therefore the pratical max value of sess_queue_depth is
* somewhere between 1 and 65536 and it depends on the system.
*/
MAX_SESS_QUEUE_DEPTH = 4096,
MAX_SESS_QUEUE_DEPTH = 65536,
RTRS_HB_INTERVAL_MS = 5000,
RTRS_HB_MISSED_MAX = 5,

View File

@ -596,8 +596,11 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
out_err_bus_register:
tpci200_uninstall(tpci200);
/* tpci200->info->cfg_regs is unmapped in tpci200_uninstall */
tpci200->info->cfg_regs = NULL;
out_err_install:
iounmap(tpci200->info->cfg_regs);
if (tpci200->info->cfg_regs)
iounmap(tpci200->info->cfg_regs);
out_err_ioremap:
pci_release_region(pdev, TPCI200_CFG_MEM_BAR);
out_err_pci_request:

View File

@ -2342,7 +2342,7 @@ static void __exit
HFC_cleanup(void)
{
if (timer_pending(&hfc_tl))
del_timer(&hfc_tl);
del_timer_sync(&hfc_tl);
pci_unregister_driver(&hfc_driver);
}

View File

@ -532,7 +532,7 @@ static void ssd_commit_superblock(struct dm_writecache *wc)
region.bdev = wc->ssd_dev->bdev;
region.sector = 0;
region.count = PAGE_SIZE >> SECTOR_SHIFT;
region.count = max(4096U, wc->block_size) >> SECTOR_SHIFT;
if (unlikely(region.sector + region.count > wc->metadata_sectors))
region.count = wc->metadata_sectors - region.sector;
@ -1301,8 +1301,12 @@ static int writecache_map(struct dm_target *ti, struct bio *bio)
writecache_flush(wc);
if (writecache_has_error(wc))
goto unlock_error;
if (unlikely(wc->cleaner))
goto unlock_remap_origin;
goto unlock_submit;
} else {
if (dm_bio_get_target_bio_nr(bio))
goto unlock_remap_origin;
writecache_offload_bio(wc, bio);
goto unlock_return;
}
@ -1360,14 +1364,18 @@ static int writecache_map(struct dm_target *ti, struct bio *bio)
} else {
do {
bool found_entry = false;
bool search_used = false;
if (writecache_has_error(wc))
goto unlock_error;
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
if (e) {
if (!writecache_entry_is_committed(wc, e))
if (!writecache_entry_is_committed(wc, e)) {
search_used = true;
goto bio_copy;
}
if (!WC_MODE_PMEM(wc) && !e->write_in_progress) {
wc->overwrote_committed = true;
search_used = true;
goto bio_copy;
}
found_entry = true;
@ -1377,7 +1385,7 @@ static int writecache_map(struct dm_target *ti, struct bio *bio)
}
e = writecache_pop_from_freelist(wc, (sector_t)-1);
if (unlikely(!e)) {
if (!found_entry) {
if (!WC_MODE_PMEM(wc) && !found_entry) {
direct_write:
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
if (e) {
@ -1404,13 +1412,31 @@ static int writecache_map(struct dm_target *ti, struct bio *bio)
sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT);
while (bio_size < bio->bi_iter.bi_size) {
struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
if (!f)
break;
write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
(bio_size >> SECTOR_SHIFT), wc->seq_count);
writecache_insert_entry(wc, f);
wc->uncommitted_blocks++;
if (!search_used) {
struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
if (!f)
break;
write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
(bio_size >> SECTOR_SHIFT), wc->seq_count);
writecache_insert_entry(wc, f);
wc->uncommitted_blocks++;
} else {
struct wc_entry *f;
struct rb_node *next = rb_next(&e->rb_node);
if (!next)
break;
f = container_of(next, struct wc_entry, rb_node);
if (f != e + 1)
break;
if (read_original_sector(wc, f) !=
read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
break;
if (unlikely(f->write_in_progress))
break;
if (writecache_entry_is_committed(wc, f))
wc->overwrote_committed = true;
e = f;
}
bio_size += wc->block_size;
current_cache_sec += wc->block_size >> SECTOR_SHIFT;
}
@ -2465,7 +2491,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
ti->num_flush_bios = 1;
ti->num_flush_bios = WC_MODE_PMEM(wc) ? 1 : 2;
ti->flush_supported = true;
ti->num_discard_bios = 1;

View File

@ -1390,6 +1390,13 @@ static int dmz_init_zone(struct blk_zone *blkz, unsigned int num, void *data)
return -ENXIO;
}
/*
* Devices that have zones with a capacity smaller than the zone size
* (e.g. NVMe zoned namespaces) are not supported.
*/
if (blkz->capacity != blkz->len)
return -ENXIO;
switch (blkz->type) {
case BLK_ZONE_TYPE_CONVENTIONAL:
set_bit(DMZ_RND, &zone->flags);

View File

@ -1231,8 +1231,8 @@ static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
/*
* A target may call dm_accept_partial_bio only from the map routine. It is
* allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_RESET,
* REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE and REQ_OP_ZONE_FINISH.
* allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
* operations and REQ_OP_ZONE_APPEND (zone append writes).
*
* dm_accept_partial_bio informs the dm that the target only wants to process
* additional n_sectors sectors of the bio and the rest of the data should be
@ -1262,9 +1262,13 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
{
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
BUG_ON(bio->bi_opf & REQ_PREFLUSH);
BUG_ON(op_is_zone_mgmt(bio_op(bio)));
BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND);
BUG_ON(bi_size > *tio->len_ptr);
BUG_ON(n_sectors > bi_size);
*tio->len_ptr -= bi_size - n_sectors;
bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
}

View File

@ -549,7 +549,8 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
delete_at(n, index);
}
*new_root = shadow_root(&spine);
if (!r)
*new_root = shadow_root(&spine);
exit_shadow_spine(&spine);
return r;

View File

@ -171,6 +171,14 @@ static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
* Any block we allocate has to be free in both the old and current ll.
*/
r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, smd->begin, smd->ll.nr_blocks, b);
if (r == -ENOSPC) {
/*
* There's no free block between smd->begin and the end of the metadata device.
* We search before smd->begin in case something has been freed.
*/
r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, 0, smd->begin, b);
}
if (r)
return r;
@ -199,7 +207,6 @@ static int sm_disk_commit(struct dm_space_map *sm)
return r;
memcpy(&smd->old_ll, &smd->ll, sizeof(smd->old_ll));
smd->begin = 0;
smd->nr_allocated_this_transaction = 0;
r = sm_disk_get_nr_free(sm, &nr_free);

View File

@ -452,6 +452,14 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b)
* Any block we allocate has to be free in both the old and current ll.
*/
r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, smm->begin, smm->ll.nr_blocks, b);
if (r == -ENOSPC) {
/*
* There's no free block between smm->begin and the end of the metadata device.
* We search before smm->begin in case something has been freed.
*/
r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, 0, smm->begin, b);
}
if (r)
return r;
@ -503,7 +511,6 @@ static int sm_metadata_commit(struct dm_space_map *sm)
return r;
memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
smm->begin = 0;
smm->allocated_this_transaction = 0;
return 0;

View File

@ -380,7 +380,7 @@ static void saa6588_configure(struct saa6588 *s)
/* ---------------------------------------------------------------------- */
static long saa6588_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
static long saa6588_command(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
{
struct saa6588 *s = to_saa6588(sd);
struct saa6588_command *a = arg;
@ -433,7 +433,7 @@ static int saa6588_s_tuner(struct v4l2_subdev *sd, const struct v4l2_tuner *vt)
/* ----------------------------------------------------------------------- */
static const struct v4l2_subdev_core_ops saa6588_core_ops = {
.ioctl = saa6588_ioctl,
.command = saa6588_command,
};
static const struct v4l2_subdev_tuner_ops saa6588_tuner_ops = {

View File

@ -3187,7 +3187,7 @@ static int radio_release(struct file *file)
btv->radio_user--;
bttv_call_all(btv, core, ioctl, SAA6588_CMD_CLOSE, &cmd);
bttv_call_all(btv, core, command, SAA6588_CMD_CLOSE, &cmd);
if (btv->radio_user == 0)
btv->has_radio_tuner = 0;
@ -3268,7 +3268,7 @@ static ssize_t radio_read(struct file *file, char __user *data,
cmd.result = -ENODEV;
radio_enable(btv);
bttv_call_all(btv, core, ioctl, SAA6588_CMD_READ, &cmd);
bttv_call_all(btv, core, command, SAA6588_CMD_READ, &cmd);
return cmd.result;
}
@ -3289,7 +3289,7 @@ static __poll_t radio_poll(struct file *file, poll_table *wait)
cmd.instance = file;
cmd.event_list = wait;
cmd.poll_mask = res;
bttv_call_all(btv, core, ioctl, SAA6588_CMD_POLL, &cmd);
bttv_call_all(btv, core, command, SAA6588_CMD_POLL, &cmd);
return cmd.poll_mask;
}

View File

@ -1178,7 +1178,7 @@ static int video_release(struct file *file)
saa_call_all(dev, tuner, standby);
if (vdev->vfl_type == VFL_TYPE_RADIO)
saa_call_all(dev, core, ioctl, SAA6588_CMD_CLOSE, &cmd);
saa_call_all(dev, core, command, SAA6588_CMD_CLOSE, &cmd);
mutex_unlock(&dev->lock);
return 0;
@ -1197,7 +1197,7 @@ static ssize_t radio_read(struct file *file, char __user *data,
cmd.result = -ENODEV;
mutex_lock(&dev->lock);
saa_call_all(dev, core, ioctl, SAA6588_CMD_READ, &cmd);
saa_call_all(dev, core, command, SAA6588_CMD_READ, &cmd);
mutex_unlock(&dev->lock);
return cmd.result;
@ -1213,7 +1213,7 @@ static __poll_t radio_poll(struct file *file, poll_table *wait)
cmd.event_list = wait;
cmd.poll_mask = 0;
mutex_lock(&dev->lock);
saa_call_all(dev, core, ioctl, SAA6588_CMD_POLL, &cmd);
saa_call_all(dev, core, command, SAA6588_CMD_POLL, &cmd);
mutex_unlock(&dev->lock);
return rc | cmd.poll_mask;

View File

@ -47,7 +47,7 @@ static int venc_is_second_field(struct vpbe_display *disp_dev)
ret = v4l2_subdev_call(vpbe_dev->venc,
core,
ioctl,
command,
VENC_GET_FLD,
&val);
if (ret < 0) {

View File

@ -521,9 +521,7 @@ static int venc_s_routing(struct v4l2_subdev *sd, u32 input, u32 output,
return ret;
}
static long venc_ioctl(struct v4l2_subdev *sd,
unsigned int cmd,
void *arg)
static long venc_command(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
{
u32 val;
@ -542,7 +540,7 @@ static long venc_ioctl(struct v4l2_subdev *sd,
}
static const struct v4l2_subdev_core_ops venc_core_ops = {
.ioctl = venc_ioctl,
.command = venc_command,
};
static const struct v4l2_subdev_video_ops venc_video_ops = {

View File

@ -326,7 +326,8 @@ int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
}
if (attr->query.prog_cnt != 0 && prog_ids && cnt)
ret = bpf_prog_array_copy_to_user(progs, prog_ids, cnt);
ret = bpf_prog_array_copy_to_user(progs, prog_ids,
attr->query.prog_cnt);
unlock:
mutex_unlock(&ir_raw_handler_lock);

View File

@ -26,6 +26,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
{
struct dtv5100_state *st = d->priv;
unsigned int pipe;
u8 request;
u8 type;
u16 value;
@ -34,6 +35,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
switch (wlen) {
case 1:
/* write { reg }, read { value } */
pipe = usb_rcvctrlpipe(d->udev, 0);
request = (addr == DTV5100_DEMOD_ADDR ? DTV5100_DEMOD_READ :
DTV5100_TUNER_READ);
type = USB_TYPE_VENDOR | USB_DIR_IN;
@ -41,6 +43,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
break;
case 2:
/* write { reg, value } */
pipe = usb_sndctrlpipe(d->udev, 0);
request = (addr == DTV5100_DEMOD_ADDR ? DTV5100_DEMOD_WRITE :
DTV5100_TUNER_WRITE);
type = USB_TYPE_VENDOR | USB_DIR_OUT;
@ -54,7 +57,7 @@ static int dtv5100_i2c_msg(struct dvb_usb_device *d, u8 addr,
memcpy(st->data, rbuf, rlen);
msleep(1); /* avoid I2C errors */
return usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), request,
return usb_control_msg(d->udev, pipe, request,
type, value, index, st->data, rlen,
DTV5100_USB_TIMEOUT);
}
@ -141,7 +144,7 @@ static int dtv5100_probe(struct usb_interface *intf,
/* initialize non qt1010/zl10353 part? */
for (i = 0; dtv5100_init[i].request; i++) {
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
dtv5100_init[i].request,
USB_TYPE_VENDOR | USB_DIR_OUT,
dtv5100_init[i].value,

Some files were not shown because too many files have changed in this diff Show More