This is the 5.4.73 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl+ahE8ACgkQONu9yGCS
 aT4j1A/9HzkKKoqZ2vXYQ1/uEnUqZech9ly1KxpNTBrSZYAtx3MaWY7tGDEx2BqD
 y6iw9x4MymhHEbpwLg6YmmdWuMQLNNYJGoyLiPJgWhkE4c7zHadhNz1DcPEI8F7z
 bSlUJ3Oebr8gzv0FvUmeVXw7Z2EuOqM1zGgTAZfnKY3DkYHbLnrzUJ4AiI8TNeba
 pPIhjfIJ1TvhF+s5ggf2m8OtSWLZ0doCWCPmCFe2WyERX2WYCzPgsm0yL7L7oXME
 ZqWpOcClBsiYekBNcZ4kxozhJtArCnv24n9VoXJ/YJIlWKvCA6uC8r527nGN/z08
 dfFelj1nDs7/VrCSP4+109EjxLQnSYGgIWP0g0OsC+9wOmrQsYJ1azP1eNjm+NuC
 hPa8uYVEZxwVyJuEfu4ZB4NMZBlD2qnHoskvBKbyZ8yaVnbvlMp552XMwsmJBpCs
 8wArzabrJEz396LUUIYG829D7NBDuRav1Miu+FTzlbn+xZ/Y/S8OmhoG2stWa4wV
 y5x0M0DWgrqiZ9rMkz9A03UNnCInQVTfIBoMl63xFitW4/0vLsln3+CjzlKm7H46
 rD/tKACUoCDjR5DN+JwQzmTdL9zBb4p1cXwWjWb6rON3BkXmO0JVAxzurxI9PfX0
 ZWDydZ3HNmrm0d3J12zf3kTX56PfPFAGWUsEc4Ntb5zdWXSQJsE=
 =fZ3T
 -----END PGP SIGNATURE-----

Merge 5.4.73 into android11-5.4-lts

Changes in 5.4.73
	ibmveth: Switch order of ibmveth_helper calls.
	ibmveth: Identify ingress large send packets.
	ipv4: Restore flowi4_oif update before call to xfrm_lookup_route
	mlx4: handle non-napi callers to napi_poll
	net: fec: Fix phy_device lookup for phy_reset_after_clk_enable()
	net: fec: Fix PHY init after phy_reset_after_clk_enable()
	net: fix pos incrementment in ipv6_route_seq_next
	net/smc: fix valid DMBE buffer sizes
	net/tls: sendfile fails with ktls offload
	net: usb: qmi_wwan: add Cellient MPL200 card
	tipc: fix the skb_unshare() in tipc_buf_append()
	socket: fix option SO_TIMESTAMPING_NEW
	can: m_can_platform: don't call m_can_class_suspend in runtime suspend
	can: j1935: j1939_tp_tx_dat_new(): fix missing initialization of skbcnt
	net: j1939: j1939_session_fresh_new(): fix missing initialization of skbcnt
	net/ipv4: always honour route mtu during forwarding
	net_sched: remove a redundant goto chain check
	r8169: fix data corruption issue on RTL8402
	cxgb4: handle 4-tuple PEDIT to NAT mode translation
	binder: fix UAF when releasing todo list
	ALSA: bebob: potential info leak in hwdep_read()
	ALSA: hda: fix jack detection with Realtek codecs when in D3
	ALSA: hda/hdmi: fix incorrect locking in hdmi_pcm_close
	nvme-pci: disable the write zeros command for Intel 600P/P3100
	chelsio/chtls: fix socket lock
	chelsio/chtls: correct netdevice for vlan interface
	chelsio/chtls: correct function return and return type
	ibmvnic: save changed mac address to adapter->mac_addr
	net: ftgmac100: Fix Aspeed ast2600 TX hang issue
	net: hdlc: In hdlc_rcv, check to make sure dev is an HDLC device
	net: hdlc_raw_eth: Clear the IFF_TX_SKB_SHARING flag after calling ether_setup
	net: Properly typecast int values to set sk_max_pacing_rate
	net/sched: act_tunnel_key: fix OOB write in case of IPv6 ERSPAN tunnels
	nexthop: Fix performance regression in nexthop deletion
	nfc: Ensure presence of NFC_ATTR_FIRMWARE_NAME attribute in nfc_genl_fw_download()
	r8169: fix operation under forced interrupt threading
	selftests: forwarding: Add missing 'rp_filter' configuration
	selftests: rtnetlink: load fou module for kci_test_encap_fou() test
	tcp: fix to update snd_wl1 in bulk receiver fast path
	icmp: randomize the global rate limiter
	ALSA: hda/realtek - The front Mic on a HP machine doesn't work
	ALSA: hda/realtek - set mic to auto detect on a HP AIO machine
	ALSA: hda/realtek - Add mute Led support for HP Elitebook 845 G7
	ALSA: hda/realtek: Enable audio jacks of ASUS D700SA with ALC887
	cifs: remove bogus debug code
	cifs: Return the error from crypt_message when enc/dec key not found.
	SMB3: Resolve data corruption of TCP server info fields
	KVM: nVMX: Reset the segment cache when stuffing guest segs
	KVM: nVMX: Reload vmcs01 if getting vmcs12's pages fails
	KVM: x86/mmu: Commit zap of remaining invalid pages when recovering lpages
	KVM: SVM: Initialize prev_ga_tag before use
	ima: Don't ignore errors from crypto_shash_update()
	crypto: algif_aead - Do not set MAY_BACKLOG on the async path
	crypto: caam/qi - add fallback for XTS with more than 8B IV
	EDAC/i5100: Fix error handling order in i5100_init_one()
	EDAC/aspeed: Fix handling of platform_get_irq() error
	EDAC/ti: Fix handling of platform_get_irq() error
	perf/x86/intel/ds: Fix x86_pmu_stop warning for large PEBS
	x86/fpu: Allow multiple bits in clearcpuid= parameter
	drivers/perf: xgene_pmu: Fix uninitialized resource struct
	drivers/perf: thunderx2_pmu: Fix memory resource error handling
	sched/fair: Fix wrong cpu selecting from isolated domain
	perf/x86/intel/uncore: Update Ice Lake uncore units
	perf/x86/intel/uncore: Reduce the number of CBOX counters
	x86/nmi: Fix nmi_handle() duration miscalculation
	x86/events/amd/iommu: Fix sizeof mismatch
	crypto: algif_skcipher - EBUSY on aio should be an error
	crypto: mediatek - Fix wrong return value in mtk_desc_ring_alloc()
	crypto: ixp4xx - Fix the size used in a 'dma_free_coherent()' call
	crypto: picoxcell - Fix potential race condition bug
	media: tuner-simple: fix regression in simple_set_radio_freq
	media: Revert "media: exynos4-is: Add missed check for pinctrl_lookup_state()"
	media: ov5640: Correct Bit Div register in clock tree diagram
	media: m5mols: Check function pointer in m5mols_sensor_power
	media: uvcvideo: Set media controller entity functions
	media: uvcvideo: Silence shift-out-of-bounds warning
	media: staging/intel-ipu3: css: Correctly reset some memory
	media: omap3isp: Fix memleak in isp_probe
	media: i2c: ov5640: Remain in power down for DVP mode unless streaming
	media: i2c: ov5640: Separate out mipi configuration from s_power
	media: i2c: ov5640: Enable data pins on poweron for DVP mode
	media: rcar_drif: Fix fwnode reference leak when parsing DT
	media: rcar_drif: Allocate v4l2_async_subdev dynamically
	media: rcar-csi2: Allocate v4l2_async_subdev dynamically
	crypto: omap-sham - fix digcnt register handling with export/import
	hwmon: (pmbus/max34440) Fix status register reads for MAX344{51,60,61}
	cypto: mediatek - fix leaks in mtk_desc_ring_alloc
	media: mx2_emmaprp: Fix memleak in emmaprp_probe
	media: tc358743: initialize variable
	media: tc358743: cleanup tc358743_cec_isr
	media: rcar-vin: Fix a reference count leak.
	media: rockchip/rga: Fix a reference count leak.
	media: platform: fcp: Fix a reference count leak.
	media: camss: Fix a reference count leak.
	media: s5p-mfc: Fix a reference count leak
	media: stm32-dcmi: Fix a reference count leak
	media: ti-vpe: Fix a missing check and reference count leak
	regulator: resolve supply after creating regulator
	pinctrl: bcm: fix kconfig dependency warning when !GPIOLIB
	spi: spi-s3c64xx: swap s3c64xx_spi_set_cs() and s3c64xx_enable_datapath()
	spi: spi-s3c64xx: Check return values
	blk-mq: move cancel of hctx->run_work to the front of blk_exit_queue
	ath10k: provide survey info as accumulated data
	drm/vkms: fix xrgb on compute crc
	Bluetooth: hci_uart: Cancel init work before unregistering
	drm/amd/display: Fix wrong return value in dm_update_plane_state()
	drm: panel: Fix bus format for OrtusTech COM43H4M85ULC panel
	ath6kl: prevent potential array overflow in ath6kl_add_new_sta()
	ath9k: Fix potential out of bounds in ath9k_htc_txcompletion_cb()
	ath10k: Fix the size used in a 'dma_free_coherent()' call in an error handling path
	wcn36xx: Fix reported 802.11n rx_highest rate wcn3660/wcn3680
	ASoC: qcom: lpass-platform: fix memory leak
	ASoC: qcom: lpass-cpu: fix concurrency issue
	brcmfmac: check ndev pointer
	mwifiex: Do not use GFP_KERNEL in atomic context
	staging: rtl8192u: Do not use GFP_KERNEL in atomic context
	drm/gma500: fix error check
	scsi: qla4xxx: Fix an error handling path in 'qla4xxx_get_host_stats()'
	scsi: qla2xxx: Fix wrong return value in qlt_chk_unresolv_exchg()
	scsi: qla2xxx: Fix wrong return value in qla_nvme_register_hba()
	scsi: csiostor: Fix wrong return value in csio_hw_prep_fw()
	backlight: sky81452-backlight: Fix refcount imbalance on error
	staging: emxx_udc: Fix passing of NULL to dma_alloc_coherent()
	VMCI: check return value of get_user_pages_fast() for errors
	mm/error_inject: Fix allow_error_inject function signatures.
	drm: panel: Fix bpc for OrtusTech COM43H4M85ULC panel
	drm/crc-debugfs: Fix memleak in crc_control_write
	binder: Remove bogus warning on failed same-process transaction
	tty: serial: earlycon dependency
	tty: hvcs: Don't NULL tty->driver_data until hvcs_cleanup()
	pty: do tty_flip_buffer_push without port->lock in pty_write
	pwm: lpss: Fix off by one error in base_unit math in pwm_lpss_prepare()
	pwm: lpss: Add range limit check for the base_unit register value
	drivers/virt/fsl_hypervisor: Fix error handling path
	video: fbdev: vga16fb: fix setting of pixclock because a pass-by-value error
	video: fbdev: sis: fix null ptr dereference
	video: fbdev: radeon: Fix memleak in radeonfb_pci_register
	ASoC: fsl: imx-es8328: add missing put_device() call in imx_es8328_probe()
	HID: roccat: add bounds checking in kone_sysfs_write_settings()
	drm/msm: Avoid div-by-zero in dpu_crtc_atomic_check()
	drm/panfrost: Ensure GPU quirks are always initialised
	iomap: Clear page error before beginning a write
	pinctrl: mcp23s08: Fix mcp23x17_regmap initialiser
	pinctrl: mcp23s08: Fix mcp23x17 precious range
	net/mlx5: Don't call timecounter cyc2time directly from 1PPS flow
	scsi: mpt3sas: Fix sync irqs
	net: stmmac: use netif_tx_start|stop_all_queues() function
	cpufreq: armada-37xx: Add missing MODULE_DEVICE_TABLE
	drm: mxsfb: check framebuffer pitch
	coresight: etm4x: Handle unreachable sink in perf mode
	xhci: don't create endpoint debugfs entry before ring buffer is set.
	net: dsa: rtl8366: Check validity of passed VLANs
	net: dsa: rtl8366: Refactor VLAN/PVID init
	net: dsa: rtl8366: Skip PVID setting if not requested
	net: wilc1000: clean up resource in error path of init mon interface
	ASoC: tlv320aic32x4: Fix bdiv clock rate derivation
	net: dsa: rtl8366rb: Support all 4096 VLANs
	spi: omap2-mcspi: Improve performance waiting for CHSTAT
	ath6kl: wmi: prevent a shift wrapping bug in ath6kl_wmi_delete_pstream_cmd()
	dmaengine: dmatest: Check list for emptiness before access its last entry
	misc: mic: scif: Fix error handling path
	ALSA: seq: oss: Avoid mutex lock for a long-time ioctl
	usb: dwc2: Fix parameter type in function pointer prototype
	quota: clear padding in v2r1_mem2diskdqb()
	slimbus: core: check get_addr before removing laddr ida
	slimbus: core: do not enter to clock pause mode in core
	slimbus: qcom-ngd-ctrl: disable ngd in qmi server down callback
	ASoC: fsl_sai: Instantiate snd_soc_dai_driver
	HID: hid-input: fix stylus battery reporting
	nvmem: core: fix possibly memleak when use nvmem_cell_info_to_nvmem_cell()
	nl80211: fix OBSS PD min and max offset validation
	coresight: etm: perf: Fix warning caused by etm_setup_aux failure
	ibmvnic: set up 200GBPS speed
	qtnfmac: fix resource leaks on unsupported iftype error return path
	iio: adc: stm32-adc: fix runtime autosuspend delay when slow polling
	net: enic: Cure the enic api locking trainwreck
	mfd: sm501: Fix leaks in probe()
	iwlwifi: mvm: split a print to avoid a WARNING in ROC
	usb: gadget: f_ncm: fix ncm_bitrate for SuperSpeed and above.
	usb: gadget: u_ether: enable qmult on SuperSpeed Plus as well
	nl80211: fix non-split wiphy information
	usb: dwc2: Fix INTR OUT transfers in DDMA mode.
	scsi: target: tcmu: Fix warning: 'page' may be used uninitialized
	scsi: be2iscsi: Fix a theoretical leak in beiscsi_create_eqs()
	ipmi_si: Fix wrong return value in try_smi_init()
	platform/x86: mlx-platform: Remove PSU EEPROM configuration
	mwifiex: fix double free
	ipvs: clear skb->tstamp in forwarding path
	net: korina: fix kfree of rx/tx descriptor array
	netfilter: nf_log: missing vlan offload tag and proto
	mm/swapfile.c: fix potential memory leak in sys_swapon
	mm/memcg: fix device private memcg accounting
	mm, oom_adj: don't loop through tasks in __set_oom_adj when not necessary
	fs: fix NULL dereference due to data race in prepend_path()
	selftests/ftrace: Change synthetic event name for inter-event-combined test
	i3c: master add i3c_master_attach_boardinfo to preserve boardinfo
	IB/mlx4: Fix starvation in paravirt mux/demux
	IB/mlx4: Adjust delayed work when a dup is observed
	powerpc/pseries: Fix missing of_node_put() in rng_init()
	powerpc/icp-hv: Fix missing of_node_put() in success path
	RDMA/ucma: Fix locking for ctx->events_reported
	RDMA/ucma: Add missing locking around rdma_leave_multicast()
	mtd: lpddr: fix excessive stack usage with clang
	RDMA/hns: Add a check for current state before modifying QP
	RDMA/umem: Fix signature of stub ib_umem_find_best_pgsz()
	powerpc/pseries: explicitly reschedule during drmem_lmb list traversal
	pseries/drmem: don't cache node id in drmem_lmb struct
	RDMA/mlx5: Fix potential race between destroy and CQE poll
	mtd: mtdoops: Don't write panic data twice
	ARM: 9007/1: l2c: fix prefetch bits init in L2X0_AUX_CTRL using DT values
	arc: plat-hsdk: fix kconfig dependency warning when !RESET_CONTROLLER
	ida: Free allocated bitmap in error path
	xfs: limit entries returned when counting fsmap records
	xfs: fix deadlock and streamline xfs_getfsmap performance
	xfs: fix high key handling in the rt allocator's query_range function
	RDMA/umem: Fix ib_umem_find_best_pgsz() for mappings that cross a page boundary
	RDMA/umem: Prevent small pages from being returned by ib_umem_find_best_pgsz()
	RDMA/qedr: Fix qp structure memory leak
	RDMA/qedr: Fix use of uninitialized field
	RDMA/qedr: Fix return code if accept is called on a destroyed qp
	RDMA/qedr: Fix inline size returned for iWARP
	powerpc/book3s64/hash/4k: Support large linear mapping range with 4K
	powerpc/tau: Use appropriate temperature sample interval
	powerpc/tau: Convert from timer to workqueue
	powerpc/tau: Remove duplicated set_thresholds() call
	powerpc/tau: Check processor type before enabling TAU interrupt
	powerpc/tau: Disable TAU between measurements
	powerpc/64s/radix: Fix mm_cpumask trimming race vs kthread_use_mm
	RDMA/cma: Remove dead code for kernel rdmacm multicast
	RDMA/cma: Consolidate the destruction of a cma_multicast in one place
	perf intel-pt: Fix "context_switch event has no tid" error
	RDMA/hns: Set the unsupported wr opcode
	RDMA/mlx5: Disable IB_DEVICE_MEM_MGT_EXTENSIONS if IB_WR_REG_MR can't work
	i40iw: Add support to make destroy QP synchronous
	perf stat: Skip duration_time in setup_system_wide
	RDMA/hns: Fix the wrong value of rnr_retry when querying qp
	RDMA/hns: Fix missing sq_sig_type when querying QP
	mtd: rawnand: vf610: disable clk on error handling path in probe
	mtd: spinand: gigadevice: Only one dummy byte in QUADIO
	mtd: spinand: gigadevice: Add QE Bit
	kdb: Fix pager search for multi-line strings
	overflow: Include header file with SIZE_MAX declaration
	RDMA/ipoib: Set rtnl_link_ops for ipoib interfaces
	powerpc/perf: Exclude pmc5/6 from the irrelevant PMU group constraints
	powerpc/perf/hv-gpci: Fix starting index value
	i3c: master: Fix error return in cdns_i3c_master_probe()
	cpufreq: powernv: Fix frame-size-overflow in powernv_cpufreq_reboot_notifier
	IB/rdmavt: Fix sizeof mismatch
	RDMA/rxe: Fix skb lifetime in rxe_rcv_mcast_pkt()
	maiblox: mediatek: Fix handling of platform_get_irq() error
	selftests/powerpc: Fix eeh-basic.sh exit codes
	f2fs: wait for sysfs kobject removal before freeing f2fs_sb_info
	RDMA/rxe: Handle skb_clone() failure in rxe_recv.c
	mm/page_owner: change split_page_owner to take a count
	lib/crc32.c: fix trivial typo in preprocessor condition
	ramfs: fix nommu mmap with gaps in the page cache
	rapidio: fix error handling path
	rapidio: fix the missed put_device() for rio_mport_add_riodev
	mailbox: avoid timer start from callback
	i2c: rcar: Auto select RESET_CONTROLLER
	clk: meson: g12a: mark fclk_div2 as critical
	PCI: aardvark: Check for errors from pci_bridge_emul_init() call
	PCI: iproc: Set affinity mask on MSI interrupts
	rpmsg: smd: Fix a kobj leak in in qcom_smd_parse_edge()
	PCI/IOV: Mark VFs as not implementing PCI_COMMAND_MEMORY
	vfio/pci: Decouple PCI_COMMAND_MEMORY bit checks from is_virtfn
	clk: qcom: gcc-sdm660: Fix wrong parent_map
	clk: keystone: sci-clk: fix parsing assigned-clock data during probe
	pwm: img: Fix null pointer access in probe
	clk: rockchip: Initialize hw to error to avoid undefined behavior
	clk: mediatek: add UART0 clock support
	module: statically initialize init section freeing data
	clk: at91: clk-main: update key before writing AT91_CKGR_MOR
	clk: bcm2835: add missing release if devm_clk_hw_register fails
	watchdog: Fix memleak in watchdog_cdev_register
	watchdog: Use put_device on error
	watchdog: sp5100: Fix definition of EFCH_PM_DECODEEN3
	svcrdma: fix bounce buffers for unaligned offsets and multiple pages
	ext4: limit entries returned when counting fsmap records
	vfio/pci: Clear token on bypass registration failure
	vfio iommu type1: Fix memory leak in vfio_iommu_type1_pin_pages
	clk: imx8mq: Fix usdhc parents order
	SUNRPC: fix copying of multiple pages in gss_read_proxy_verf()
	Input: imx6ul_tsc - clean up some errors in imx6ul_tsc_resume()
	Input: stmfts - fix a & vs && typo
	Input: ep93xx_keypad - fix handling of platform_get_irq() error
	Input: omap4-keypad - fix handling of platform_get_irq() error
	Input: twl4030_keypad - fix handling of platform_get_irq() error
	Input: sun4i-ps2 - fix handling of platform_get_irq() error
	KVM: x86: emulating RDPID failure shall return #UD rather than #GP
	scsi: bfa: Fix error return in bfad_pci_init()
	netfilter: conntrack: connection timeout after re-register
	netfilter: ebtables: Fixes dropping of small packets in bridge nat
	netfilter: nf_fwd_netdev: clear timestamp in forwarding path
	arm64: dts: meson: vim3: correct led polarity
	ARM: dts: imx6sl: fix rng node
	ARM: at91: pm: of_node_put() after its usage
	ARM: s3c24xx: fix mmc gpio lookup tables
	ARM: dts: sun8i: r40: bananapi-m2-ultra: Fix dcdc1 regulator
	arm64: dts: allwinner: h5: remove Mali GPU PMU module
	memory: omap-gpmc: Fix a couple off by ones
	memory: omap-gpmc: Fix build error without CONFIG_OF
	memory: fsl-corenet-cf: Fix handling of platform_get_irq() error
	arm64: dts: imx8mq: Add missing interrupts to GPC
	arm64: dts: qcom: msm8916: Remove one more thermal trip point unit name
	arm64: dts: qcom: pm8916: Remove invalid reg size from wcd_codec
	arm64: dts: qcom: msm8916: Fix MDP/DSI interrupts
	arm64: dts: renesas: r8a77990: Fix MSIOF1 DMA channels
	arm64: dts: renesas: r8a774c0: Fix MSIOF1 DMA channels
	arm64: dts: actions: limit address range for pinctrl node
	ARM: dts: owl-s500: Fix incorrect PPI interrupt specifiers
	soc: fsl: qbman: Fix return value on success
	ARM: OMAP2+: Restore MPU power domain if cpu_cluster_pm_enter() fails
	arm64: dts: zynqmp: Remove additional compatible string for i2c IPs
	ARM: dts: meson8: remove two invalid interrupt lines from the GPU node
	lightnvm: fix out-of-bounds write to array devices->info[]
	powerpc/powernv/dump: Fix race while processing OPAL dump
	powerpc/pseries: Avoid using addr_to_pfn in real mode
	nvmet: fix uninitialized work for zero kato
	NTB: hw: amd: fix an issue about leak system resources
	sched/features: Fix !CONFIG_JUMP_LABEL case
	perf: correct SNOOPX field offset
	i2c: core: Restore acpi_walk_dep_device_list() getting called after registering the ACPI i2c devs
	md/bitmap: fix memory leak of temporary bitmap
	block: ratelimit handle_bad_sector() message
	crypto: ccp - fix error handling
	x86/asm: Replace __force_order with a memory clobber
	x86/mce: Add Skylake quirk for patrol scrub reported errors
	media: firewire: fix memory leak
	media: ati_remote: sanity check for both endpoints
	media: st-delta: Fix reference count leak in delta_run_work
	media: sti: Fix reference count leaks
	media: exynos4-is: Fix several reference count leaks due to pm_runtime_get_sync
	media: exynos4-is: Fix a reference count leak due to pm_runtime_get_sync
	media: exynos4-is: Fix a reference count leak
	media: vsp1: Fix runtime PM imbalance on error
	media: platform: s3c-camif: Fix runtime PM imbalance on error
	media: platform: sti: hva: Fix runtime PM imbalance on error
	media: bdisp: Fix runtime PM imbalance on error
	media: media/pci: prevent memory leak in bttv_probe
	x86/mce: Make mce_rdmsrl() panic on an inaccessible MSR
	media: uvcvideo: Ensure all probed info is returned to v4l2
	mmc: sdio: Check for CISTPL_VERS_1 buffer size
	media: saa7134: avoid a shift overflow
	media: venus: fixes for list corruption
	fs: dlm: fix configfs memory leak
	media: venus: core: Fix runtime PM imbalance in venus_probe
	ntfs: add check for mft record size in superblock
	ip_gre: set dev->hard_header_len and dev->needed_headroom properly
	mac80211: handle lack of sband->bitrates in rates
	PM: hibernate: remove the bogus call to get_gendisk() in software_resume()
	scsi: mvumi: Fix error return in mvumi_io_attach()
	scsi: target: core: Add CONTROL field for trace events
	mic: vop: copy data to kernel space then write to io memory
	misc: vop: add round_up(x,4) for vring_size to avoid kernel panic
	usb: dwc3: Add splitdisable quirk for Hisilicon Kirin Soc
	usb: gadget: function: printer: fix use-after-free in __lock_acquire
	udf: Limit sparing table size
	udf: Avoid accessing uninitialized data on failed inode read
	rtw88: increse the size of rx buffer size
	USB: cdc-acm: handle broken union descriptors
	usb: dwc3: simple: add support for Hikey 970
	can: flexcan: flexcan_chip_stop(): add error handling and propagate error value
	ath9k: hif_usb: fix race condition between usb_get_urb() and usb_kill_anchored_urbs()
	drm/panfrost: add amlogic reset quirk callback
	bpf: Limit caller's stack depth 256 for subprogs with tailcalls
	misc: rtsx: Fix memory leak in rtsx_pci_probe
	reiserfs: only call unlock_new_inode() if I_NEW
	opp: Prevent memory leak in dev_pm_opp_attach_genpd()
	xfs: make sure the rt allocator doesn't run off the end
	usb: ohci: Default to per-port over-current protection
	Bluetooth: Only mark socket zapped after unlocking
	drm/msm/a6xx: fix a potential overflow issue
	iomap: fix WARN_ON_ONCE() from unprivileged users
	scsi: ibmvfc: Fix error return in ibmvfc_probe()
	scsi: qla2xxx: Warn if done() or free() are called on an already freed srb
	selftests/bpf: Fix test_sysctl_loop{1, 2} failure due to clang change
	brcmsmac: fix memory leak in wlc_phy_attach_lcnphy
	rtl8xxxu: prevent potential memory leak
	Fix use after free in get_capset_info callback.
	HID: ite: Add USB id match for Acer One S1003 keyboard dock
	scsi: qedf: Return SUCCESS if stale rport is encountered
	scsi: qedi: Protect active command list to avoid list corruption
	scsi: qedi: Fix list_del corruption while removing active I/O
	fbmem: add margin check to fb_check_caps()
	tty: ipwireless: fix error handling
	Bluetooth: btusb: Fix memleak in btusb_mtk_submit_wmt_recv_urb
	ipvs: Fix uninit-value in do_ip_vs_set_ctl()
	reiserfs: Fix memory leak in reiserfs_parse_options()
	mwifiex: don't call del_timer_sync() on uninitialized timer
	ALSA: hda/ca0132 - Add AE-7 microphone selection commands.
	ALSA: hda/ca0132 - Add new quirk ID for SoundBlaster AE-7.
	scsi: smartpqi: Avoid crashing kernel for controller issues
	brcm80211: fix possible memleak in brcmf_proto_msgbuf_attach
	usb: core: Solve race condition in anchor cleanup functions
	scsi: ufs: ufs-qcom: Fix race conditions caused by ufs_qcom_testbus_config()
	dmaengine: dw: Add DMA-channels mask cell support
	dmaengine: dw: Activate FIFO-mode for memory peripherals only
	ath10k: check idx validity in __ath10k_htt_rx_ring_fill_n()
	net: korina: cast KSEG0 address to pointer in kfree
	s390/qeth: don't let HW override the configured port role
	tty: serial: lpuart: fix lpuart32_write usage
	tty: serial: fsl_lpuart: fix lpuart32_poll_get_char
	usb: cdc-acm: add quirk to blacklist ETAS ES58X devices
	USB: cdc-wdm: Make wdm_flush() interruptible and add wdm_fsync().
	usb: cdns3: gadget: free interrupt after gadget has deleted
	eeprom: at25: set minimum read/write access stride to 1
	usb: gadget: f_ncm: allow using NCM in SuperSpeed Plus gadgets.
	Linux 5.4.73

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I3245a6f313462f8b4ea408c7657a1027ab95b78c
This commit is contained in:
Greg Kroah-Hartman 2020-10-29 11:38:08 +01:00
commit 7ee5d73d3e
422 changed files with 3275 additions and 1706 deletions

View File

@ -567,7 +567,7 @@
loops can be debugged more effectively on production
systems.
clearcpuid=BITNUM [X86]
clearcpuid=BITNUM[,BITNUM...] [X86]
Disable CPUID feature X for the kernel. See
arch/x86/include/asm/cpufeatures.h for the valid bit
numbers. Note the Linux specific bits are not necessarily

View File

@ -1013,12 +1013,14 @@ icmp_ratelimit - INTEGER
icmp_msgs_per_sec - INTEGER
Limit maximal number of ICMP packets sent per second from this host.
Only messages whose type matches icmp_ratemask (see below) are
controlled by this limit.
controlled by this limit. For security reasons, the precise count
of messages per second is randomized.
Default: 1000
icmp_msgs_burst - INTEGER
icmp_msgs_per_sec controls number of ICMP packets sent per second,
while icmp_msgs_burst controls the burst size of these packets.
For security reasons, the precise burst size is randomized.
Default: 50
icmp_ratemask - INTEGER

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 72
SUBLEVEL = 73
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -8,5 +8,6 @@ menuconfig ARC_SOC_HSDK
select ARC_HAS_ACCL_REGS
select ARC_IRQ_NO_AUTOSAVE
select CLK_HSDK
select RESET_CONTROLLER
select RESET_HSDK
select HAVE_PCI

View File

@ -936,8 +936,10 @@
};
rngb: rngb@21b4000 {
compatible = "fsl,imx6sl-rngb", "fsl,imx25-rngb";
reg = <0x021b4000 0x4000>;
interrupts = <0 5 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SL_CLK_DUMMY>;
};
weim: weim@21b8000 {

View File

@ -230,8 +230,6 @@
<GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>,

View File

@ -84,21 +84,21 @@
global_timer: timer@b0020200 {
compatible = "arm,cortex-a9-global-timer";
reg = <0xb0020200 0x100>;
interrupts = <GIC_PPI 0 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
status = "disabled";
};
twd_timer: timer@b0020600 {
compatible = "arm,cortex-a9-twd-timer";
reg = <0xb0020600 0x20>;
interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
status = "disabled";
};
twd_wdt: wdt@b0020620 {
compatible = "arm,cortex-a9-twd-wdt";
reg = <0xb0020620 0xe0>;
interrupts = <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
status = "disabled";
};

View File

@ -223,16 +223,16 @@
};
&reg_dc1sw {
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3000000>;
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-name = "vcc-gmac-phy";
};
&reg_dcdc1 {
regulator-always-on;
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3000000>;
regulator-name = "vcc-3v0";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-name = "vcc-3v3";
};
&reg_dcdc2 {

View File

@ -777,6 +777,7 @@ static void __init at91_pm_init(void (*pm_idle)(void))
pmc_np = of_find_matching_node_and_match(NULL, atmel_pmc_ids, &of_id);
soc_pm.data.pmc = of_iomap(pmc_np, 0);
of_node_put(pmc_np);
if (!soc_pm.data.pmc) {
pr_err("AT91: PM not supported, PMC not found\n");
return;

View File

@ -174,8 +174,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
*/
if (mpuss_can_lose_context) {
error = cpu_cluster_pm_enter();
if (error)
if (error) {
omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
goto cpu_cluster_pm_out;
}
}
}

View File

@ -143,7 +143,7 @@ static struct gpiod_lookup_table at2440evb_mci_gpio_table = {
.dev_id = "s3c2410-sdi",
.table = {
/* Card detect S3C2410_GPG(10) */
GPIO_LOOKUP("GPG", 10, "cd", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("GPIOG", 10, "cd", GPIO_ACTIVE_LOW),
{ },
},
};

View File

@ -468,9 +468,9 @@ static struct gpiod_lookup_table h1940_mmc_gpio_table = {
.dev_id = "s3c2410-sdi",
.table = {
/* Card detect S3C2410_GPF(5) */
GPIO_LOOKUP("GPF", 5, "cd", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("GPIOF", 5, "cd", GPIO_ACTIVE_LOW),
/* Write protect S3C2410_GPH(8) */
GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_LOW),
{ },
},
};

View File

@ -244,9 +244,9 @@ static struct gpiod_lookup_table mini2440_mmc_gpio_table = {
.dev_id = "s3c2410-sdi",
.table = {
/* Card detect S3C2410_GPG(8) */
GPIO_LOOKUP("GPG", 8, "cd", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("GPIOG", 8, "cd", GPIO_ACTIVE_LOW),
/* Write protect S3C2410_GPH(8) */
GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_HIGH),
{ },
},
};

View File

@ -359,9 +359,9 @@ static struct gpiod_lookup_table n30_mci_gpio_table = {
.dev_id = "s3c2410-sdi",
.table = {
/* Card detect S3C2410_GPF(1) */
GPIO_LOOKUP("GPF", 1, "cd", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("GPIOF", 1, "cd", GPIO_ACTIVE_LOW),
/* Write protect S3C2410_GPG(10) */
GPIO_LOOKUP("GPG", 10, "wp", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("GPIOG", 10, "wp", GPIO_ACTIVE_LOW),
{ },
},
};

View File

@ -567,9 +567,9 @@ static struct gpiod_lookup_table rx1950_mmc_gpio_table = {
.dev_id = "s3c2410-sdi",
.table = {
/* Card detect S3C2410_GPF(5) */
GPIO_LOOKUP("GPF", 5, "cd", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("GPIOF", 5, "cd", GPIO_ACTIVE_LOW),
/* Write protect S3C2410_GPH(8) */
GPIO_LOOKUP("GPH", 8, "wp", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("GPIOH", 8, "wp", GPIO_ACTIVE_LOW),
{ },
},
};

View File

@ -1249,20 +1249,28 @@ static void __init l2c310_of_parse(const struct device_node *np,
ret = of_property_read_u32(np, "prefetch-data", &val);
if (ret == 0) {
if (val)
if (val) {
prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH;
else
*aux_val |= L310_PREFETCH_CTRL_DATA_PREFETCH;
} else {
prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
*aux_val &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
}
*aux_mask &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF prefetch-data property value is missing\n");
}
ret = of_property_read_u32(np, "prefetch-instr", &val);
if (ret == 0) {
if (val)
if (val) {
prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
else
*aux_val |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
} else {
prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
*aux_val &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
}
*aux_mask &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF prefetch-instr property value is missing\n");
}

View File

@ -231,7 +231,7 @@
pinctrl: pinctrl@e01b0000 {
compatible = "actions,s700-pinctrl";
reg = <0x0 0xe01b0000 0x0 0x1000>;
reg = <0x0 0xe01b0000 0x0 0x100>;
clocks = <&cmu CLK_GPIO>;
gpio-controller;
gpio-ranges = <&pinctrl 0 0 136>;

View File

@ -155,8 +155,7 @@
<GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
<GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "gp",
"gpmmu",
"pp",
@ -167,8 +166,7 @@
"pp2",
"ppmmu2",
"pp3",
"ppmmu3",
"pmu";
"ppmmu3";
clocks = <&ccu CLK_BUS_GPU>, <&ccu CLK_GPU>;
clock-names = "bus", "core";
resets = <&ccu RST_BUS_GPU>;

View File

@ -43,13 +43,13 @@
white {
label = "vim3:white:sys";
gpios = <&gpio_ao GPIOAO_4 GPIO_ACTIVE_LOW>;
gpios = <&gpio_ao GPIOAO_4 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
};
red {
label = "vim3:red";
gpios = <&gpio_expander 5 GPIO_ACTIVE_LOW>;
gpios = <&gpio_expander 5 GPIO_ACTIVE_HIGH>;
};
};

View File

@ -516,6 +516,7 @@
gpc: gpc@303a0000 {
compatible = "fsl,imx8mq-gpc";
reg = <0x303a0000 0x10000>;
interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
interrupt-controller;
#interrupt-cells = <3>;

View File

@ -175,14 +175,14 @@
};
thermal-zones {
cpu0_1-thermal {
cpu0-1-thermal {
polling-delay-passive = <250>;
polling-delay = <1000>;
thermal-sensors = <&tsens 4>;
trips {
cpu0_1_alert0: trip-point@0 {
cpu0_1_alert0: trip-point0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
@ -205,7 +205,7 @@
};
};
cpu2_3-thermal {
cpu2-3-thermal {
polling-delay-passive = <250>;
polling-delay = <1000>;
@ -934,7 +934,7 @@
reg-names = "mdp_phys";
interrupt-parent = <&mdss>;
interrupts = <0 0>;
interrupts = <0>;
clocks = <&gcc GCC_MDSS_AHB_CLK>,
<&gcc GCC_MDSS_AXI_CLK>,
@ -966,7 +966,7 @@
reg-names = "dsi_ctrl";
interrupt-parent = <&mdss>;
interrupts = <4 0>;
interrupts = <4>;
assigned-clocks = <&gcc BYTE0_CLK_SRC>,
<&gcc PCLK0_CLK_SRC>;

View File

@ -113,7 +113,7 @@
wcd_codec: codec@f000 {
compatible = "qcom,pm8916-wcd-analog-codec";
reg = <0xf000 0x200>;
reg = <0xf000>;
reg-names = "pmic-codec-core";
clocks = <&gcc GCC_CODEC_DIGCODEC_CLK>;
clock-names = "mclk";

View File

@ -1212,9 +1212,8 @@
reg = <0 0xe6ea0000 0 0x0064>;
interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 210>;
dmas = <&dmac1 0x43>, <&dmac1 0x42>,
<&dmac2 0x43>, <&dmac2 0x42>;
dma-names = "tx", "rx", "tx", "rx";
dmas = <&dmac0 0x43>, <&dmac0 0x42>;
dma-names = "tx", "rx";
power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
resets = <&cpg 210>;
#address-cells = <1>;

View File

@ -1168,9 +1168,8 @@
reg = <0 0xe6ea0000 0 0x0064>;
interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 210>;
dmas = <&dmac1 0x43>, <&dmac1 0x42>,
<&dmac2 0x43>, <&dmac2 0x42>;
dma-names = "tx", "rx", "tx", "rx";
dmas = <&dmac0 0x43>, <&dmac0 0x42>;
dma-names = "tx", "rx";
power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
resets = <&cpg 210>;
#address-cells = <1>;

View File

@ -419,7 +419,7 @@
};
i2c0: i2c@ff020000 {
compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
compatible = "cdns,i2c-r1p14";
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 17 4>;
@ -429,7 +429,7 @@
};
i2c1: i2c@ff030000 {
compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
compatible = "cdns,i2c-r1p14";
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 18 4>;

View File

@ -13,20 +13,19 @@
*/
#define MAX_EA_BITS_PER_CONTEXT 46
#define REGION_SHIFT (MAX_EA_BITS_PER_CONTEXT - 2)
/*
* Our page table limit us to 64TB. Hence for the kernel mapping,
* each MAP area is limited to 16 TB.
* The four map areas are: linear mapping, vmap, IO and vmemmap
* Our page table limit us to 64TB. For 64TB physical memory, we only need 64GB
* of vmemmap space. To better support sparse memory layout, we use 61TB
* linear map range, 1TB of vmalloc, 1TB of I/O and 1TB of vmememmap.
*/
#define REGION_SHIFT (40)
#define H_KERN_MAP_SIZE (ASM_CONST(1) << REGION_SHIFT)
/*
* Define the address range of the kernel non-linear virtual area
* 16TB
* Define the address range of the kernel non-linear virtual area (61TB)
*/
#define H_KERN_VIRT_START ASM_CONST(0xc000100000000000)
#define H_KERN_VIRT_START ASM_CONST(0xc0003d0000000000)
#ifndef __ASSEMBLY__
#define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE)

View File

@ -8,14 +8,13 @@
#ifndef _ASM_POWERPC_LMB_H
#define _ASM_POWERPC_LMB_H
#include <linux/sched.h>
struct drmem_lmb {
u64 base_addr;
u32 drc_index;
u32 aa_index;
u32 flags;
#ifdef CONFIG_MEMORY_HOTPLUG
int nid;
#endif
};
struct drmem_lmb_info {
@ -26,8 +25,22 @@ struct drmem_lmb_info {
extern struct drmem_lmb_info *drmem_info;
static inline struct drmem_lmb *drmem_lmb_next(struct drmem_lmb *lmb,
const struct drmem_lmb *start)
{
/*
* DLPAR code paths can take several milliseconds per element
* when interacting with firmware. Ensure that we don't
* unfairly monopolize the CPU.
*/
if (((++lmb - start) % 16) == 0)
cond_resched();
return lmb;
}
#define for_each_drmem_lmb_in_range(lmb, start, end) \
for ((lmb) = (start); (lmb) < (end); (lmb)++)
for ((lmb) = (start); (lmb) < (end); lmb = drmem_lmb_next(lmb, start))
#define for_each_drmem_lmb(lmb) \
for_each_drmem_lmb_in_range((lmb), \
@ -103,22 +116,4 @@ static inline void invalidate_lmb_associativity_index(struct drmem_lmb *lmb)
lmb->aa_index = 0xffffffff;
}
#ifdef CONFIG_MEMORY_HOTPLUG
static inline void lmb_set_nid(struct drmem_lmb *lmb)
{
lmb->nid = memory_add_physaddr_to_nid(lmb->base_addr);
}
static inline void lmb_clear_nid(struct drmem_lmb *lmb)
{
lmb->nid = -1;
}
#else
static inline void lmb_set_nid(struct drmem_lmb *lmb)
{
}
static inline void lmb_clear_nid(struct drmem_lmb *lmb)
{
}
#endif
#endif /* _ASM_POWERPC_LMB_H */

View File

@ -796,7 +796,7 @@
#define THRM1_TIN (1 << 31)
#define THRM1_TIV (1 << 30)
#define THRM1_THRES(x) ((x&0x7f)<<23)
#define THRM3_SITV(x) ((x&0x3fff)<<1)
#define THRM3_SITV(x) ((x & 0x1fff) << 1)
#define THRM1_TID (1<<2)
#define THRM1_TIE (1<<1)
#define THRM1_V (1<<0)

View File

@ -67,19 +67,6 @@ static inline int mm_is_thread_local(struct mm_struct *mm)
return false;
return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm));
}
static inline void mm_reset_thread_local(struct mm_struct *mm)
{
WARN_ON(atomic_read(&mm->context.copros) > 0);
/*
* It's possible for mm_access to take a reference on mm_users to
* access the remote mm from another thread, but it's not allowed
* to set mm_cpumask, so mm_users may be > 1 here.
*/
WARN_ON(current->mm != mm);
atomic_set(&mm->context.active_cpus, 1);
cpumask_clear(mm_cpumask(mm));
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
}
#else /* CONFIG_PPC_BOOK3S_64 */
static inline int mm_is_thread_local(struct mm_struct *mm)
{

View File

@ -13,13 +13,14 @@
*/
#include <linux/errno.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <asm/io.h>
#include <asm/reg.h>
@ -39,9 +40,7 @@ static struct tau_temp
unsigned char grew;
} tau[NR_CPUS];
struct timer_list tau_timer;
#undef DEBUG
static bool tau_int_enable;
/* TODO: put these in a /proc interface, with some sanity checks, and maybe
* dynamic adjustment to minimize # of interrupts */
@ -50,72 +49,49 @@ struct timer_list tau_timer;
#define step_size 2 /* step size when temp goes out of range */
#define window_expand 1 /* expand the window by this much */
/* configurable values for shrinking the window */
#define shrink_timer 2*HZ /* period between shrinking the window */
#define shrink_timer 2000 /* period between shrinking the window */
#define min_window 2 /* minimum window size, degrees C */
static void set_thresholds(unsigned long cpu)
{
#ifdef CONFIG_TAU_INT
/*
* setup THRM1,
* threshold, valid bit, enable interrupts, interrupt when below threshold
*/
mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID);
u32 maybe_tie = tau_int_enable ? THRM1_TIE : 0;
/* setup THRM2,
* threshold, valid bit, enable interrupts, interrupt when above threshold
*/
mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE);
#else
/* same thing but don't enable interrupts */
mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TID);
mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V);
#endif
/* setup THRM1, threshold, valid bit, interrupt when below threshold */
mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | maybe_tie | THRM1_TID);
/* setup THRM2, threshold, valid bit, interrupt when above threshold */
mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | maybe_tie);
}
static void TAUupdate(int cpu)
{
unsigned thrm;
#ifdef DEBUG
printk("TAUupdate ");
#endif
u32 thrm;
u32 bits = THRM1_TIV | THRM1_TIN | THRM1_V;
/* if both thresholds are crossed, the step_sizes cancel out
* and the window winds up getting expanded twice. */
if((thrm = mfspr(SPRN_THRM1)) & THRM1_TIV){ /* is valid? */
if(thrm & THRM1_TIN){ /* crossed low threshold */
if (tau[cpu].low >= step_size){
tau[cpu].low -= step_size;
tau[cpu].high -= (step_size - window_expand);
}
tau[cpu].grew = 1;
#ifdef DEBUG
printk("low threshold crossed ");
#endif
thrm = mfspr(SPRN_THRM1);
if ((thrm & bits) == bits) {
mtspr(SPRN_THRM1, 0);
if (tau[cpu].low >= step_size) {
tau[cpu].low -= step_size;
tau[cpu].high -= (step_size - window_expand);
}
tau[cpu].grew = 1;
pr_debug("%s: low threshold crossed\n", __func__);
}
if((thrm = mfspr(SPRN_THRM2)) & THRM1_TIV){ /* is valid? */
if(thrm & THRM1_TIN){ /* crossed high threshold */
if (tau[cpu].high <= 127-step_size){
tau[cpu].low += (step_size - window_expand);
tau[cpu].high += step_size;
}
tau[cpu].grew = 1;
#ifdef DEBUG
printk("high threshold crossed ");
#endif
thrm = mfspr(SPRN_THRM2);
if ((thrm & bits) == bits) {
mtspr(SPRN_THRM2, 0);
if (tau[cpu].high <= 127 - step_size) {
tau[cpu].low += (step_size - window_expand);
tau[cpu].high += step_size;
}
tau[cpu].grew = 1;
pr_debug("%s: high threshold crossed\n", __func__);
}
#ifdef DEBUG
printk("grew = %d\n", tau[cpu].grew);
#endif
#ifndef CONFIG_TAU_INT /* tau_timeout will do this if not using interrupts */
set_thresholds(cpu);
#endif
}
#ifdef CONFIG_TAU_INT
@ -140,17 +116,16 @@ void TAUException(struct pt_regs * regs)
static void tau_timeout(void * info)
{
int cpu;
unsigned long flags;
int size;
int shrink;
/* disabling interrupts *should* be okay */
local_irq_save(flags);
cpu = smp_processor_id();
#ifndef CONFIG_TAU_INT
TAUupdate(cpu);
#endif
if (!tau_int_enable)
TAUupdate(cpu);
/* Stop thermal sensor comparisons and interrupts */
mtspr(SPRN_THRM3, 0);
size = tau[cpu].high - tau[cpu].low;
if (size > min_window && ! tau[cpu].grew) {
@ -173,32 +148,26 @@ static void tau_timeout(void * info)
set_thresholds(cpu);
/*
* Do the enable every time, since otherwise a bunch of (relatively)
* complex sleep code needs to be added. One mtspr every time
* tau_timeout is called is probably not a big deal.
*
* Enable thermal sensor and set up sample interval timer
* need 20 us to do the compare.. until a nice 'cpu_speed' function
* call is implemented, just assume a 500 mhz clock. It doesn't really
* matter if we take too long for a compare since it's all interrupt
* driven anyway.
*
* use a extra long time.. (60 us @ 500 mhz)
/* Restart thermal sensor comparisons and interrupts.
* The "PowerPC 740 and PowerPC 750 Microprocessor Datasheet"
* recommends that "the maximum value be set in THRM3 under all
* conditions."
*/
mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E);
local_irq_restore(flags);
mtspr(SPRN_THRM3, THRM3_SITV(0x1fff) | THRM3_E);
}
static void tau_timeout_smp(struct timer_list *unused)
static struct workqueue_struct *tau_workq;
static void tau_work_func(struct work_struct *work)
{
/* schedule ourselves to be run again */
mod_timer(&tau_timer, jiffies + shrink_timer) ;
msleep(shrink_timer);
on_each_cpu(tau_timeout, NULL, 0);
/* schedule ourselves to be run again */
queue_work(tau_workq, work);
}
DECLARE_WORK(tau_work, tau_work_func);
/*
* setup the TAU
*
@ -231,21 +200,19 @@ static int __init TAU_init(void)
return 1;
}
tau_int_enable = IS_ENABLED(CONFIG_TAU_INT) &&
!strcmp(cur_cpu_spec->platform, "ppc750");
/* first, set up the window shrinking timer */
timer_setup(&tau_timer, tau_timeout_smp, 0);
tau_timer.expires = jiffies + shrink_timer;
add_timer(&tau_timer);
tau_workq = alloc_workqueue("tau", WQ_UNBOUND, 1, 0);
if (!tau_workq)
return -ENOMEM;
on_each_cpu(TAU_init_smp, NULL, 0);
printk("Thermal assist unit ");
#ifdef CONFIG_TAU_INT
printk("using interrupts, ");
#else
printk("using timers, ");
#endif
printk("shrink_timer: %d jiffies\n", shrink_timer);
queue_work(tau_workq, &tau_work);
pr_info("Thermal assist unit using %s, shrink_timer: %d ms\n",
tau_int_enable ? "interrupts" : "workqueue", shrink_timer);
tau_initialized = 1;
return 0;

View File

@ -639,19 +639,29 @@ static void do_exit_flush_lazy_tlb(void *arg)
struct mm_struct *mm = arg;
unsigned long pid = mm->context.id;
/*
* A kthread could have done a mmget_not_zero() after the flushing CPU
* checked mm_is_singlethreaded, and be in the process of
* kthread_use_mm when interrupted here. In that case, current->mm will
* be set to mm, because kthread_use_mm() setting ->mm and switching to
* the mm is done with interrupts off.
*/
if (current->mm == mm)
return; /* Local CPU */
goto out_flush;
if (current->active_mm == mm) {
/*
* Must be a kernel thread because sender is single-threaded.
*/
BUG_ON(current->mm);
WARN_ON_ONCE(current->mm != NULL);
/* Is a kernel thread and is using mm as the lazy tlb */
mmgrab(&init_mm);
switch_mm(mm, &init_mm, current);
current->active_mm = &init_mm;
switch_mm_irqs_off(mm, &init_mm, current);
mmdrop(mm);
}
atomic_dec(&mm->context.active_cpus);
cpumask_clear_cpu(smp_processor_id(), mm_cpumask(mm));
out_flush:
_tlbiel_pid(pid, RIC_FLUSH_ALL);
}
@ -666,7 +676,6 @@ static void exit_flush_lazy_tlbs(struct mm_struct *mm)
*/
smp_call_function_many(mm_cpumask(mm), do_exit_flush_lazy_tlb,
(void *)mm, 1);
mm_reset_thread_local(mm);
}
void radix__flush_tlb_mm(struct mm_struct *mm)

View File

@ -362,10 +362,8 @@ static void __init init_drmem_v1_lmbs(const __be32 *prop)
if (!drmem_info->lmbs)
return;
for_each_drmem_lmb(lmb) {
for_each_drmem_lmb(lmb)
read_drconf_v1_cell(lmb, &prop);
lmb_set_nid(lmb);
}
}
static void __init init_drmem_v2_lmbs(const __be32 *prop)
@ -410,8 +408,6 @@ static void __init init_drmem_v2_lmbs(const __be32 *prop)
lmb->aa_index = dr_cell.aa_index;
lmb->flags = dr_cell.flags;
lmb_set_nid(lmb);
}
}
}

View File

@ -95,7 +95,7 @@ REQUEST(__field(0, 8, partition_id)
#define REQUEST_NAME system_performance_capabilities
#define REQUEST_NUM 0x40
#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
#define REQUEST_IDX_KIND "starting_index=0xffffffff"
#include I(REQUEST_BEGIN)
REQUEST(__field(0, 1, perf_collect_privileged)
__field(0x1, 1, capability_mask)
@ -223,7 +223,7 @@ REQUEST(__field(0, 2, partition_id)
#define REQUEST_NAME system_hypervisor_times
#define REQUEST_NUM 0xF0
#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
#define REQUEST_IDX_KIND "starting_index=0xffffffff"
#include I(REQUEST_BEGIN)
REQUEST(__count(0, 8, time_spent_to_dispatch_virtual_processors)
__count(0x8, 8, time_spent_processing_virtual_processor_timers)
@ -234,7 +234,7 @@ REQUEST(__count(0, 8, time_spent_to_dispatch_virtual_processors)
#define REQUEST_NAME system_tlbie_count_and_time
#define REQUEST_NUM 0xF4
#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
#define REQUEST_IDX_KIND "starting_index=0xffffffff"
#include I(REQUEST_BEGIN)
REQUEST(__count(0, 8, tlbie_instructions_issued)
/*

View File

@ -269,6 +269,15 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
mask |= CNST_PMC_MASK(pmc);
value |= CNST_PMC_VAL(pmc);
/*
* PMC5 and PMC6 are used to count cycles and instructions and
* they do not support most of the constraint bits. Add a check
* to exclude PMC5/6 from most of the constraints except for
* EBB/BHRB.
*/
if (pmc >= 5)
goto ebb_bhrb;
}
if (pmc <= 4) {
@ -335,6 +344,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
}
}
ebb_bhrb:
if (!pmc && ebb)
/* EBB events must specify the PMC */
return -1;

View File

@ -219,12 +219,11 @@ config TAU
temperature within 2-4 degrees Celsius. This option shows the current
on-die temperature in /proc/cpuinfo if the cpu supports it.
Unfortunately, on some chip revisions, this sensor is very inaccurate
and in many cases, does not work at all, so don't assume the cpu
temp is actually what /proc/cpuinfo says it is.
Unfortunately, this sensor is very inaccurate when uncalibrated, so
don't assume the cpu temp is actually what /proc/cpuinfo says it is.
config TAU_INT
bool "Interrupt driven TAU driver (DANGEROUS)"
bool "Interrupt driven TAU driver (EXPERIMENTAL)"
depends on TAU
---help---
The TAU supports an interrupt driven mode which causes an interrupt
@ -232,12 +231,7 @@ config TAU_INT
to get notified the temp has exceeded a range. With this option off,
a timer is used to re-check the temperature periodically.
However, on some cpus it appears that the TAU interrupt hardware
is buggy and can cause a situation which would lead unexplained hard
lockups.
Unless you are extending the TAU driver, or enjoy kernel/hardware
debugging, leave this option off.
If in doubt, say N here.
config TAU_AVERAGE
bool "Average high and low temp"

View File

@ -318,15 +318,14 @@ static ssize_t dump_attr_read(struct file *filep, struct kobject *kobj,
return count;
}
static struct dump_obj *create_dump_obj(uint32_t id, size_t size,
uint32_t type)
static void create_dump_obj(uint32_t id, size_t size, uint32_t type)
{
struct dump_obj *dump;
int rc;
dump = kzalloc(sizeof(*dump), GFP_KERNEL);
if (!dump)
return NULL;
return;
dump->kobj.kset = dump_kset;
@ -346,21 +345,39 @@ static struct dump_obj *create_dump_obj(uint32_t id, size_t size,
rc = kobject_add(&dump->kobj, NULL, "0x%x-0x%x", type, id);
if (rc) {
kobject_put(&dump->kobj);
return NULL;
return;
}
/*
* As soon as the sysfs file for this dump is created/activated there is
* a chance the opal_errd daemon (or any userspace) might read and
* acknowledge the dump before kobject_uevent() is called. If that
* happens then there is a potential race between
* dump_ack_store->kobject_put() and kobject_uevent() which leads to a
* use-after-free of a kernfs object resulting in a kernel crash.
*
* To avoid that, we need to take a reference on behalf of the bin file,
* so that our reference remains valid while we call kobject_uevent().
* We then drop our reference before exiting the function, leaving the
* bin file to drop the last reference (if it hasn't already).
*/
/* Take a reference for the bin file */
kobject_get(&dump->kobj);
rc = sysfs_create_bin_file(&dump->kobj, &dump->dump_attr);
if (rc) {
if (rc == 0) {
kobject_uevent(&dump->kobj, KOBJ_ADD);
pr_info("%s: New platform dump. ID = 0x%x Size %u\n",
__func__, dump->id, dump->size);
} else {
/* Drop reference count taken for bin file */
kobject_put(&dump->kobj);
return NULL;
}
pr_info("%s: New platform dump. ID = 0x%x Size %u\n",
__func__, dump->id, dump->size);
kobject_uevent(&dump->kobj, KOBJ_ADD);
return dump;
/* Drop our reference */
kobject_put(&dump->kobj);
return;
}
static irqreturn_t process_dump(int irq, void *data)

View File

@ -376,25 +376,32 @@ static int dlpar_add_lmb(struct drmem_lmb *);
static int dlpar_remove_lmb(struct drmem_lmb *lmb)
{
struct memory_block *mem_block;
unsigned long block_sz;
int rc;
if (!lmb_is_removable(lmb))
return -EINVAL;
mem_block = lmb_to_memblock(lmb);
if (mem_block == NULL)
return -EINVAL;
rc = dlpar_offline_lmb(lmb);
if (rc)
if (rc) {
put_device(&mem_block->dev);
return rc;
}
block_sz = pseries_memory_block_size();
__remove_memory(lmb->nid, lmb->base_addr, block_sz);
__remove_memory(mem_block->nid, lmb->base_addr, block_sz);
put_device(&mem_block->dev);
/* Update memory regions for memory remove */
memblock_remove(lmb->base_addr, block_sz);
invalidate_lmb_associativity_index(lmb);
lmb_clear_nid(lmb);
lmb->flags &= ~DRCONF_MEM_ASSIGNED;
return 0;
@ -651,7 +658,7 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
static int dlpar_add_lmb(struct drmem_lmb *lmb)
{
unsigned long block_sz;
int rc;
int nid, rc;
if (lmb->flags & DRCONF_MEM_ASSIGNED)
return -EINVAL;
@ -662,11 +669,13 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
return rc;
}
lmb_set_nid(lmb);
block_sz = memory_block_size_bytes();
/* Find the node id for this address. */
nid = memory_add_physaddr_to_nid(lmb->base_addr);
/* Add the memory */
rc = __add_memory(lmb->nid, lmb->base_addr, block_sz);
rc = __add_memory(nid, lmb->base_addr, block_sz);
if (rc) {
invalidate_lmb_associativity_index(lmb);
return rc;
@ -674,9 +683,8 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
rc = dlpar_online_lmb(lmb);
if (rc) {
__remove_memory(lmb->nid, lmb->base_addr, block_sz);
__remove_memory(nid, lmb->base_addr, block_sz);
invalidate_lmb_associativity_index(lmb);
lmb_clear_nid(lmb);
} else {
lmb->flags |= DRCONF_MEM_ASSIGNED;
}

View File

@ -494,18 +494,55 @@ int pSeries_system_reset_exception(struct pt_regs *regs)
return 0; /* need to perform reset */
}
static int mce_handle_err_realmode(int disposition, u8 error_type)
{
#ifdef CONFIG_PPC_BOOK3S_64
if (disposition == RTAS_DISP_NOT_RECOVERED) {
switch (error_type) {
case MC_ERROR_TYPE_SLB:
case MC_ERROR_TYPE_ERAT:
/*
* Store the old slb content in paca before flushing.
* Print this when we go to virtual mode.
* There are chances that we may hit MCE again if there
* is a parity error on the SLB entry we trying to read
* for saving. Hence limit the slb saving to single
* level of recursion.
*/
if (local_paca->in_mce == 1)
slb_save_contents(local_paca->mce_faulty_slbs);
flush_and_reload_slb();
disposition = RTAS_DISP_FULLY_RECOVERED;
break;
default:
break;
}
} else if (disposition == RTAS_DISP_LIMITED_RECOVERY) {
/* Platform corrected itself but could be degraded */
pr_err("MCE: limited recovery, system may be degraded\n");
disposition = RTAS_DISP_FULLY_RECOVERED;
}
#endif
return disposition;
}
static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
static int mce_handle_err_virtmode(struct pt_regs *regs,
struct rtas_error_log *errp,
struct pseries_mc_errorlog *mce_log,
int disposition)
{
struct mce_error_info mce_err = { 0 };
unsigned long eaddr = 0, paddr = 0;
struct pseries_errorlog *pseries_log;
struct pseries_mc_errorlog *mce_log;
int disposition = rtas_error_disposition(errp);
int initiator = rtas_error_initiator(errp);
int severity = rtas_error_severity(errp);
unsigned long eaddr = 0, paddr = 0;
u8 error_type, err_sub_type;
if (!mce_log)
goto out;
error_type = mce_log->error_type;
err_sub_type = rtas_mc_error_sub_type(mce_log);
if (initiator == RTAS_INITIATOR_UNKNOWN)
mce_err.initiator = MCE_INITIATOR_UNKNOWN;
else if (initiator == RTAS_INITIATOR_CPU)
@ -544,18 +581,7 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
mce_err.error_type = MCE_ERROR_TYPE_UNKNOWN;
mce_err.error_class = MCE_ECLASS_UNKNOWN;
if (!rtas_error_extended(errp))
goto out;
pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE);
if (pseries_log == NULL)
goto out;
mce_log = (struct pseries_mc_errorlog *)pseries_log->data;
error_type = mce_log->error_type;
err_sub_type = rtas_mc_error_sub_type(mce_log);
switch (mce_log->error_type) {
switch (error_type) {
case MC_ERROR_TYPE_UE:
mce_err.error_type = MCE_ERROR_TYPE_UE;
switch (err_sub_type) {
@ -652,37 +678,31 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
mce_err.error_type = MCE_ERROR_TYPE_UNKNOWN;
break;
}
#ifdef CONFIG_PPC_BOOK3S_64
if (disposition == RTAS_DISP_NOT_RECOVERED) {
switch (error_type) {
case MC_ERROR_TYPE_SLB:
case MC_ERROR_TYPE_ERAT:
/*
* Store the old slb content in paca before flushing.
* Print this when we go to virtual mode.
* There are chances that we may hit MCE again if there
* is a parity error on the SLB entry we trying to read
* for saving. Hence limit the slb saving to single
* level of recursion.
*/
if (local_paca->in_mce == 1)
slb_save_contents(local_paca->mce_faulty_slbs);
flush_and_reload_slb();
disposition = RTAS_DISP_FULLY_RECOVERED;
break;
default:
break;
}
} else if (disposition == RTAS_DISP_LIMITED_RECOVERY) {
/* Platform corrected itself but could be degraded */
printk(KERN_ERR "MCE: limited recovery, system may "
"be degraded\n");
disposition = RTAS_DISP_FULLY_RECOVERED;
}
#endif
out:
save_mce_event(regs, disposition == RTAS_DISP_FULLY_RECOVERED,
&mce_err, regs->nip, eaddr, paddr);
return disposition;
}
static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
{
struct pseries_errorlog *pseries_log;
struct pseries_mc_errorlog *mce_log = NULL;
int disposition = rtas_error_disposition(errp);
u8 error_type;
if (!rtas_error_extended(errp))
goto out;
pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE);
if (!pseries_log)
goto out;
mce_log = (struct pseries_mc_errorlog *)pseries_log->data;
error_type = mce_log->error_type;
disposition = mce_handle_err_realmode(disposition, error_type);
/*
* Enable translation as we will be accessing per-cpu variables
* in save_mce_event() which may fall outside RMO region, also
@ -693,10 +713,10 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
* Note: All the realmode handling like flushing SLB entries for
* SLB multihit is done by now.
*/
out:
mtmsr(mfmsr() | MSR_IR | MSR_DR);
save_mce_event(regs, disposition == RTAS_DISP_FULLY_RECOVERED,
&mce_err, regs->nip, eaddr, paddr);
disposition = mce_handle_err_virtmode(regs, errp, mce_log,
disposition);
return disposition;
}

View File

@ -36,6 +36,7 @@ static __init int rng_init(void)
ppc_md.get_random_seed = pseries_get_random_long;
of_node_put(dn);
return 0;
}
machine_subsys_initcall(pseries, rng_init);

View File

@ -174,6 +174,7 @@ int icp_hv_init(void)
icp_ops = &icp_hv_ops;
of_node_put(np);
return 0;
}

View File

@ -5,15 +5,6 @@
#include "pgtable.h"
#include "../string.h"
/*
* __force_order is used by special_insns.h asm code to force instruction
* serialization.
*
* It is not referenced from the code, but GCC < 5 with -fPIE would fail
* due to an undefined symbol. Define it to make these ancient GCCs work.
*/
unsigned long __force_order;
#define BIOS_START_MIN 0x20000U /* 128K, less than this is insane */
#define BIOS_START_MAX 0x9f000U /* 640K, absolute maximum */

View File

@ -379,7 +379,7 @@ static __init int _init_events_attrs(void)
while (amd_iommu_v2_event_descs[i].attr.attr.name)
i++;
attrs = kcalloc(i + 1, sizeof(struct attribute **), GFP_KERNEL);
attrs = kcalloc(i + 1, sizeof(*attrs), GFP_KERNEL);
if (!attrs)
return -ENOMEM;

View File

@ -669,9 +669,7 @@ int intel_pmu_drain_bts_buffer(void)
static inline void intel_pmu_drain_pebs_buffer(void)
{
struct pt_regs regs;
x86_pmu.drain_pebs(&regs);
x86_pmu.drain_pebs(NULL);
}
/*
@ -1736,6 +1734,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
struct x86_perf_regs perf_regs;
struct pt_regs *regs = &perf_regs.regs;
void *at = get_next_pebs_record_by_bit(base, top, bit);
struct pt_regs dummy_iregs;
if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
/*
@ -1748,6 +1747,9 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
} else if (!intel_pmu_save_and_restart(event))
return;
if (!iregs)
iregs = &dummy_iregs;
while (count > 1) {
setup_sample(event, iregs, at, &data, regs);
perf_event_output(event, &data, regs);
@ -1757,16 +1759,22 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
}
setup_sample(event, iregs, at, &data, regs);
/*
* All but the last records are processed.
* The last one is left to be able to call the overflow handler.
*/
if (perf_event_overflow(event, &data, regs)) {
x86_pmu_stop(event, 0);
return;
if (iregs == &dummy_iregs) {
/*
* The PEBS records may be drained in the non-overflow context,
* e.g., large PEBS + context switch. Perf should treat the
* last record the same as other PEBS records, and doesn't
* invoke the generic overflow handler.
*/
perf_event_output(event, &data, regs);
} else {
/*
* All but the last records are processed.
* The last one is left to be able to call the overflow handler.
*/
if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0);
}
}
static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)

View File

@ -110,6 +110,10 @@
#define ICL_UNC_CBO_0_PER_CTR0 0x702
#define ICL_UNC_CBO_MSR_OFFSET 0x8
/* ICL ARB register */
#define ICL_UNC_ARB_PER_CTR 0x3b1
#define ICL_UNC_ARB_PERFEVTSEL 0x3b3
DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
@ -297,15 +301,21 @@ void skl_uncore_cpu_init(void)
snb_uncore_arb.ops = &skl_uncore_msr_ops;
}
static struct intel_uncore_ops icl_uncore_msr_ops = {
.disable_event = snb_uncore_msr_disable_event,
.enable_event = snb_uncore_msr_enable_event,
.read_counter = uncore_msr_read_counter,
};
static struct intel_uncore_type icl_uncore_cbox = {
.name = "cbox",
.num_counters = 4,
.num_counters = 2,
.perf_ctr_bits = 44,
.perf_ctr = ICL_UNC_CBO_0_PER_CTR0,
.event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
.event_mask = SNB_UNC_RAW_EVENT_MASK,
.msr_offset = ICL_UNC_CBO_MSR_OFFSET,
.ops = &skl_uncore_msr_ops,
.ops = &icl_uncore_msr_ops,
.format_group = &snb_uncore_format_group,
};
@ -334,13 +344,25 @@ static struct intel_uncore_type icl_uncore_clockbox = {
.single_fixed = 1,
.event_mask = SNB_UNC_CTL_EV_SEL_MASK,
.format_group = &icl_uncore_clock_format_group,
.ops = &skl_uncore_msr_ops,
.ops = &icl_uncore_msr_ops,
.event_descs = icl_uncore_events,
};
static struct intel_uncore_type icl_uncore_arb = {
.name = "arb",
.num_counters = 1,
.num_boxes = 1,
.perf_ctr_bits = 44,
.perf_ctr = ICL_UNC_ARB_PER_CTR,
.event_ctl = ICL_UNC_ARB_PERFEVTSEL,
.event_mask = SNB_UNC_RAW_EVENT_MASK,
.ops = &icl_uncore_msr_ops,
.format_group = &snb_uncore_format_group,
};
static struct intel_uncore_type *icl_msr_uncores[] = {
&icl_uncore_cbox,
&snb_uncore_arb,
&icl_uncore_arb,
&icl_uncore_clockbox,
NULL,
};
@ -358,7 +380,6 @@ void icl_uncore_cpu_init(void)
{
uncore_msr_uncores = icl_msr_uncores;
icl_uncore_cbox.num_boxes = icl_get_cbox_num();
snb_uncore_arb.ops = &skl_uncore_msr_ops;
}
enum {

View File

@ -10,45 +10,47 @@
#include <linux/jump_label.h>
/*
* Volatile isn't enough to prevent the compiler from reordering the
* read/write functions for the control registers and messing everything up.
* A memory clobber would solve the problem, but would prevent reordering of
* all loads stores around it, which can hurt performance. Solution is to
* use a variable and mimic reads and writes to it to enforce serialization
* The compiler should not reorder volatile asm statements with respect to each
* other: they should execute in program order. However GCC 4.9.x and 5.x have
* a bug (which was fixed in 8.1, 7.3 and 6.5) where they might reorder
* volatile asm. The write functions are not affected since they have memory
* clobbers preventing reordering. To prevent reads from being reordered with
* respect to writes, use a dummy memory operand.
*/
extern unsigned long __force_order;
#define __FORCE_ORDER "m"(*(unsigned int *)0x1000UL)
void native_write_cr0(unsigned long val);
static inline unsigned long native_read_cr0(void)
{
unsigned long val;
asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
asm volatile("mov %%cr0,%0\n\t" : "=r" (val) : __FORCE_ORDER);
return val;
}
static inline unsigned long native_read_cr2(void)
{
unsigned long val;
asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
asm volatile("mov %%cr2,%0\n\t" : "=r" (val) : __FORCE_ORDER);
return val;
}
static inline void native_write_cr2(unsigned long val)
{
asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
asm volatile("mov %0,%%cr2": : "r" (val) : "memory");
}
static inline unsigned long __native_read_cr3(void)
{
unsigned long val;
asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
asm volatile("mov %%cr3,%0\n\t" : "=r" (val) : __FORCE_ORDER);
return val;
}
static inline void native_write_cr3(unsigned long val)
{
asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
asm volatile("mov %0,%%cr3": : "r" (val) : "memory");
}
static inline unsigned long native_read_cr4(void)
@ -63,10 +65,10 @@ static inline unsigned long native_read_cr4(void)
asm volatile("1: mov %%cr4, %0\n"
"2:\n"
_ASM_EXTABLE(1b, 2b)
: "=r" (val), "=m" (__force_order) : "0" (0));
: "=r" (val) : "0" (0), __FORCE_ORDER);
#else
/* CR4 always exists on x86_64. */
asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
asm volatile("mov %%cr4,%0\n\t" : "=r" (val) : __FORCE_ORDER);
#endif
return val;
}

View File

@ -377,7 +377,7 @@ void native_write_cr0(unsigned long val)
unsigned long bits_missing = 0;
set_register:
asm volatile("mov %0,%%cr0": "+r" (val), "+m" (__force_order));
asm volatile("mov %0,%%cr0": "+r" (val) : : "memory");
if (static_branch_likely(&cr_pinning)) {
if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) {
@ -396,7 +396,7 @@ void native_write_cr4(unsigned long val)
unsigned long bits_changed = 0;
set_register:
asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits));
asm volatile("mov %0,%%cr4": "+r" (val) : : "memory");
if (static_branch_likely(&cr_pinning)) {
if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {

View File

@ -388,10 +388,28 @@ static int msr_to_offset(u32 msr)
return -1;
}
__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr,
unsigned long error_code,
unsigned long fault_addr)
{
pr_emerg("MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
(unsigned int)regs->cx, regs->ip, (void *)regs->ip);
show_stack_regs(regs);
panic("MCA architectural violation!\n");
while (true)
cpu_relax();
return true;
}
/* MSR access wrappers used for error injection */
static u64 mce_rdmsrl(u32 msr)
{
u64 v;
DECLARE_ARGS(val, low, high);
if (__this_cpu_read(injectm.finished)) {
int offset = msr_to_offset(msr);
@ -401,21 +419,43 @@ static u64 mce_rdmsrl(u32 msr)
return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
}
if (rdmsrl_safe(msr, &v)) {
WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr);
/*
* Return zero in case the access faulted. This should
* not happen normally but can happen if the CPU does
* something weird, or if the code is buggy.
*/
v = 0;
}
/*
* RDMSR on MCA MSRs should not fault. If they do, this is very much an
* architectural violation and needs to be reported to hw vendor. Panic
* the box to not allow any further progress.
*/
asm volatile("1: rdmsr\n"
"2:\n"
_ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_fault)
: EAX_EDX_RET(val, low, high) : "c" (msr));
return v;
return EAX_EDX_VAL(val, low, high);
}
__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr,
unsigned long error_code,
unsigned long fault_addr)
{
pr_emerg("MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
(unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->ax,
regs->ip, (void *)regs->ip);
show_stack_regs(regs);
panic("MCA architectural violation!\n");
while (true)
cpu_relax();
return true;
}
static void mce_wrmsrl(u32 msr, u64 v)
{
u32 low, high;
if (__this_cpu_read(injectm.finished)) {
int offset = msr_to_offset(msr);
@ -423,7 +463,15 @@ static void mce_wrmsrl(u32 msr, u64 v)
*(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
return;
}
wrmsrl(msr, v);
low = (u32)v;
high = (u32)(v >> 32);
/* See comment in mce_rdmsrl() */
asm volatile("1: wrmsr\n"
"2:\n"
_ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_fault)
: : "c" (msr), "a"(low), "d" (high) : "memory");
}
/*

View File

@ -172,4 +172,14 @@ extern bool amd_filter_mce(struct mce *m);
static inline bool amd_filter_mce(struct mce *m) { return false; };
#endif
__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr,
unsigned long error_code,
unsigned long fault_addr);
__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr,
unsigned long error_code,
unsigned long fault_addr);
#endif /* __X86_MCE_INTERNAL_H__ */

View File

@ -9,9 +9,11 @@
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/debugfs.h>
#include <asm/mce.h>
#include <linux/uaccess.h>
#include <asm/mce.h>
#include <asm/intel-family.h>
#include "internal.h"
/*
@ -40,9 +42,14 @@ static struct severity {
unsigned char context;
unsigned char excp;
unsigned char covered;
unsigned char cpu_model;
unsigned char cpu_minstepping;
unsigned char bank_lo, bank_hi;
char *msg;
} severities[] = {
#define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c }
#define BANK_RANGE(l, h) .bank_lo = l, .bank_hi = h
#define MODEL_STEPPING(m, s) .cpu_model = m, .cpu_minstepping = s
#define KERNEL .context = IN_KERNEL
#define USER .context = IN_USER
#define KERNEL_RECOV .context = IN_KERNEL_RECOV
@ -97,7 +104,6 @@ static struct severity {
KEEP, "Corrected error",
NOSER, BITCLR(MCI_STATUS_UC)
),
/*
* known AO MCACODs reported via MCE or CMC:
*
@ -113,6 +119,18 @@ static struct severity {
AO, "Action optional: last level cache writeback error",
SER, MASK(MCI_UC_AR|MCACOD, MCI_STATUS_UC|MCACOD_L3WB)
),
/*
* Quirk for Skylake/Cascade Lake. Patrol scrubber may be configured
* to report uncorrected errors using CMCI with a special signature.
* UC=0, MSCOD=0x0010, MCACOD=binary(000X 0000 1100 XXXX) reported
* in one of the memory controller banks.
* Set severity to "AO" for same action as normal patrol scrub error.
*/
MCESEV(
AO, "Uncorrected Patrol Scrub Error",
SER, MASK(MCI_STATUS_UC|MCI_ADDR|0xffffeff0, MCI_ADDR|0x001000c0),
MODEL_STEPPING(INTEL_FAM6_SKYLAKE_X, 4), BANK_RANGE(13, 18)
),
/* ignore OVER for UCNA */
MCESEV(
@ -320,6 +338,12 @@ static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_e
continue;
if (s->excp && excp != s->excp)
continue;
if (s->cpu_model && boot_cpu_data.x86_model != s->cpu_model)
continue;
if (s->cpu_minstepping && boot_cpu_data.x86_stepping < s->cpu_minstepping)
continue;
if (s->bank_lo && (m->bank < s->bank_lo || m->bank > s->bank_hi))
continue;
if (msg)
*msg = s->msg;
s->covered = 1;

View File

@ -242,9 +242,9 @@ static void __init fpu__init_system_ctx_switch(void)
*/
static void __init fpu__init_parse_early_param(void)
{
char arg[32];
char arg[128];
char *argptr = arg;
int bit;
int arglen, res, bit;
#ifdef CONFIG_X86_32
if (cmdline_find_option_bool(boot_command_line, "no387"))
@ -267,12 +267,26 @@ static void __init fpu__init_parse_early_param(void)
if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
setup_clear_cpu_cap(X86_FEATURE_XSAVES);
if (cmdline_find_option(boot_command_line, "clearcpuid", arg,
sizeof(arg)) &&
get_option(&argptr, &bit) &&
bit >= 0 &&
bit < NCAPINTS * 32)
setup_clear_cpu_cap(bit);
arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg));
if (arglen <= 0)
return;
pr_info("Clearing CPUID bits:");
do {
res = get_option(&argptr, &bit);
if (res == 0 || res == 3)
break;
/* If the argument was too long, the last bit may be cut off */
if (res == 1 && arglen >= sizeof(arg))
break;
if (bit >= 0 && bit < NCAPINTS * 32) {
pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit));
setup_clear_cpu_cap(bit);
}
} while (res == 2);
pr_cont("\n");
}
/*

View File

@ -106,7 +106,6 @@ fs_initcall(nmi_warning_debugfs);
static void nmi_check_duration(struct nmiaction *action, u64 duration)
{
u64 whole_msecs = READ_ONCE(action->max_duration);
int remainder_ns, decimal_msecs;
if (duration < nmi_longest_ns || duration < action->max_duration)
@ -114,12 +113,12 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration)
action->max_duration = duration;
remainder_ns = do_div(whole_msecs, (1000 * 1000));
remainder_ns = do_div(duration, (1000 * 1000));
decimal_msecs = remainder_ns / 1000;
printk_ratelimited(KERN_INFO
"INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
action->handler, whole_msecs, decimal_msecs);
action->handler, duration, decimal_msecs);
}
static int nmi_handle(unsigned int type, struct pt_regs *regs)

View File

@ -3617,7 +3617,7 @@ static int em_rdpid(struct x86_emulate_ctxt *ctxt)
u64 tsc_aux = 0;
if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
return emulate_gp(ctxt, 0);
return emulate_ud(ctxt);
ctxt->dst.val = tsc_aux;
return X86EMUL_CONTINUE;
}

View File

@ -6453,6 +6453,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
cond_resched_lock(&kvm->mmu_lock);
}
}
kvm_mmu_commit_zap_page(kvm, &invalid_list);
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, rcu_idx);

View File

@ -5383,6 +5383,7 @@ static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
* - Tell IOMMU to use legacy mode for this interrupt.
* - Retrieve ga_tag of prior interrupt remapping data.
*/
pi.prev_ga_tag = 0;
pi.is_guest_mode = false;
ret = irq_set_vcpu_affinity(host_irq, &pi);

View File

@ -2231,6 +2231,8 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
vmx->segment_cache.bitmask = 0;
}
if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
@ -3094,8 +3096,10 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
prepare_vmcs02_early(vmx, vmcs12);
if (from_vmentry) {
if (unlikely(!nested_get_vmcs12_pages(vcpu)))
if (unlikely(!nested_get_vmcs12_pages(vcpu))) {
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
return NVMX_VMENTRY_KVM_INTERNAL_ERROR;
}
if (nested_vmx_check_vmentry_hw(vcpu)) {
vmx_switch_vmcs(vcpu, &vmx->vmcs01);

View File

@ -747,11 +747,10 @@ static void handle_bad_sector(struct bio *bio, sector_t maxsector)
{
char b[BDEVNAME_SIZE];
printk(KERN_INFO "attempt to access beyond end of device\n");
printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
bio_devname(bio, b), bio->bi_opf,
(unsigned long long)bio_end_sector(bio),
(long long)maxsector);
pr_info_ratelimited("attempt to access beyond end of device\n"
"%s: rw=%d, want=%llu, limit=%llu\n",
bio_devname(bio, b), bio->bi_opf,
bio_end_sector(bio), maxsector);
}
#ifdef CONFIG_FAIL_MAKE_REQUEST

View File

@ -36,8 +36,6 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj)
struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
kobj);
cancel_delayed_work_sync(&hctx->run_work);
if (hctx->flags & BLK_MQ_F_BLOCKING)
cleanup_srcu_struct(hctx->srcu);
blk_free_flush_queue(hctx->fq);

View File

@ -891,9 +891,16 @@ static void __blk_release_queue(struct work_struct *work)
blk_free_queue_stats(q->stats);
if (queue_is_mq(q))
if (queue_is_mq(q)) {
struct blk_mq_hw_ctx *hctx;
int i;
cancel_delayed_work_sync(&q->requeue_work);
queue_for_each_hw_ctx(q, hctx, i)
cancel_delayed_work_sync(&hctx->run_work);
}
blk_exit_queue(q);
blk_queue_free_zone_bitmaps(q);

View File

@ -78,7 +78,7 @@ static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm,
SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
skcipher_request_set_sync_tfm(skreq, null_tfm);
skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG,
skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP,
NULL, NULL);
skcipher_request_set_crypt(skreq, src, dst, len, NULL);
@ -291,19 +291,20 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
areq->outlen = outlen;
aead_request_set_callback(&areq->cra_u.aead_req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
CRYPTO_TFM_REQ_MAY_SLEEP,
af_alg_async_cb, areq);
err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
crypto_aead_decrypt(&areq->cra_u.aead_req);
/* AIO operation in progress */
if (err == -EINPROGRESS || err == -EBUSY)
if (err == -EINPROGRESS)
return -EIOCBQUEUED;
sock_put(sk);
} else {
/* Synchronous operation */
aead_request_set_callback(&areq->cra_u.aead_req,
CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &ctx->wait);
err = crypto_wait_req(ctx->enc ?

View File

@ -123,7 +123,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
/* AIO operation in progress */
if (err == -EINPROGRESS || err == -EBUSY)
if (err == -EINPROGRESS)
return -EIOCBQUEUED;
sock_put(sk);

View File

@ -2483,8 +2483,6 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
* file is done when the transaction is torn
* down.
*/
WARN_ON(failed_at &&
proc->tsk == current->group_leader);
} break;
case BINDER_TYPE_PTR:
/*

View File

@ -2664,6 +2664,7 @@ static int btusb_mtk_submit_wmt_recv_urb(struct hci_dev *hdev)
buf = kmalloc(size, GFP_KERNEL);
if (!buf) {
kfree(dr);
usb_free_urb(urb);
return -ENOMEM;
}

View File

@ -538,6 +538,7 @@ static void hci_uart_tty_close(struct tty_struct *tty)
clear_bit(HCI_UART_PROTO_READY, &hu->flags);
percpu_up_write(&hu->proto_lock);
cancel_work_sync(&hu->init_ready);
cancel_work_sync(&hu->write_work);
if (hdev) {

View File

@ -357,6 +357,8 @@ void hci_uart_unregister_device(struct hci_uart *hu)
struct hci_dev *hdev = hu->hdev;
clear_bit(HCI_UART_PROTO_READY, &hu->flags);
cancel_work_sync(&hu->init_ready);
if (test_bit(HCI_UART_REGISTERED, &hu->flags))
hci_unregister_dev(hdev);
hci_free_dev(hdev);

View File

@ -1977,7 +1977,7 @@ static int try_smi_init(struct smi_info *new_smi)
/* Do this early so it's available for logs. */
if (!new_smi->io.dev) {
pr_err("IPMI interface added with no device\n");
rv = EIO;
rv = -EIO;
goto out_err;
}

View File

@ -437,12 +437,17 @@ static int clk_sam9x5_main_set_parent(struct clk_hw *hw, u8 index)
return -EINVAL;
regmap_read(regmap, AT91_CKGR_MOR, &tmp);
tmp &= ~MOR_KEY_MASK;
if (index && !(tmp & AT91_PMC_MOSCSEL))
regmap_write(regmap, AT91_CKGR_MOR, tmp | AT91_PMC_MOSCSEL);
tmp = AT91_PMC_MOSCSEL;
else if (!index && (tmp & AT91_PMC_MOSCSEL))
regmap_write(regmap, AT91_CKGR_MOR, tmp & ~AT91_PMC_MOSCSEL);
tmp = 0;
else
return 0;
regmap_update_bits(regmap, AT91_CKGR_MOR,
AT91_PMC_MOSCSEL | MOR_KEY_MASK,
tmp | AT91_PMC_KEY);
while (!clk_sam9x5_main_ready(regmap))
cpu_relax();

View File

@ -1336,8 +1336,10 @@ static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman,
pll->hw.init = &init;
ret = devm_clk_hw_register(cprman->dev, &pll->hw);
if (ret)
if (ret) {
kfree(pll);
return NULL;
}
return &pll->hw;
}

View File

@ -157,10 +157,10 @@ static const char * const imx8mq_qspi_sels[] = {"osc_25m", "sys1_pll_400m", "sys
"audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", };
static const char * const imx8mq_usdhc1_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
"audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", };
"sys3_pll_out", "sys1_pll_266m", "audio_pll2_out", "sys1_pll_100m", };
static const char * const imx8mq_usdhc2_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
"audio_pll2_out", "sys1_pll_266m", "sys3_pll_out", "sys1_pll_100m", };
"sys3_pll_out", "sys1_pll_266m", "audio_pll2_out", "sys1_pll_100m", };
static const char * const imx8mq_i2c1_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll_out", "audio_pll1_out",
"video_pll1_out", "audio_pll2_out", "sys1_pll_133m", };

View File

@ -522,7 +522,7 @@ static int ti_sci_scan_clocks_from_dt(struct sci_clk_provider *provider)
np = of_find_node_with_property(np, *clk_name);
if (!np) {
clk_name++;
break;
continue;
}
if (!of_device_is_available(np))

View File

@ -919,6 +919,8 @@ static const struct mtk_gate infra_clks[] = {
"pwm_sel", 19),
GATE_INFRA0(CLK_INFRA_PWM, "infra_pwm",
"pwm_sel", 21),
GATE_INFRA0(CLK_INFRA_UART0, "infra_uart0",
"uart_sel", 22),
GATE_INFRA0(CLK_INFRA_UART1, "infra_uart1",
"uart_sel", 23),
GATE_INFRA0(CLK_INFRA_UART2, "infra_uart2",

View File

@ -298,6 +298,17 @@ static struct clk_regmap g12a_fclk_div2 = {
&g12a_fclk_div2_div.hw
},
.num_parents = 1,
/*
* Similar to fclk_div3, it seems that this clock is used by
* the resident firmware and is required by the platform to
* operate correctly.
* Until the following condition are met, we need this clock to
* be marked as critical:
* a) Mark the clock used by a firmware resource, if possible
* b) CCF has a clock hand-off mechanism to make the sure the
* clock stays on until the proper driver comes along
*/
.flags = CLK_IS_CRITICAL,
},
};

View File

@ -666,7 +666,7 @@ static struct clk_rcg2 hmss_rbcpr_clk_src = {
.cmd_rcgr = 0x48044,
.mnd_width = 0,
.hid_width = 5,
.parent_map = gcc_parent_map_xo_gpll0_gpll0_early_div,
.parent_map = gcc_parent_map_xo_gpll0,
.freq_tbl = ftbl_hmss_rbcpr_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "hmss_rbcpr_clk_src",

View File

@ -167,7 +167,7 @@ struct clk *rockchip_clk_register_halfdiv(const char *name,
unsigned long flags,
spinlock_t *lock)
{
struct clk *clk;
struct clk *clk = ERR_PTR(-ENOMEM);
struct clk_mux *mux = NULL;
struct clk_gate *gate = NULL;
struct clk_divider *div = NULL;

View File

@ -484,6 +484,12 @@ static int __init armada37xx_cpufreq_driver_init(void)
/* late_initcall, to guarantee the driver is loaded after A37xx clock driver */
late_initcall(armada37xx_cpufreq_driver_init);
static const struct of_device_id __maybe_unused armada37xx_cpufreq_of_match[] = {
{ .compatible = "marvell,armada-3700-nb-pm" },
{ },
};
MODULE_DEVICE_TABLE(of, armada37xx_cpufreq_of_match);
MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
MODULE_DESCRIPTION("Armada 37xx cpufreq driver");
MODULE_LICENSE("GPL");

View File

@ -884,12 +884,15 @@ static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
unsigned long action, void *unused)
{
int cpu;
struct cpufreq_policy cpu_policy;
struct cpufreq_policy *cpu_policy;
rebooting = true;
for_each_online_cpu(cpu) {
cpufreq_get_policy(&cpu_policy, cpu);
powernv_cpufreq_target_index(&cpu_policy, get_nominal_index());
cpu_policy = cpufreq_cpu_get(cpu);
if (!cpu_policy)
continue;
powernv_cpufreq_target_index(cpu_policy, get_nominal_index());
cpufreq_cpu_put(cpu_policy);
}
return NOTIFY_DONE;

View File

@ -112,6 +112,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
select CRYPTO_DES
select CRYPTO_XTS
help
Selecting this will use CAAM Queue Interface (QI) for sending
& receiving crypto jobs to/from CAAM. This gives better performance

View File

@ -18,6 +18,7 @@
#include "qi.h"
#include "jr.h"
#include "caamalg_desc.h"
#include <asm/unaligned.h>
/*
* crypto alg
@ -67,6 +68,11 @@ struct caam_ctx {
struct device *qidev;
spinlock_t lock; /* Protects multiple init of driver context */
struct caam_drv_ctx *drv_ctx[NUM_OP];
struct crypto_skcipher *fallback;
};
struct caam_skcipher_req_ctx {
struct skcipher_request fallback_req;
};
static int aead_set_sh_desc(struct crypto_aead *aead)
@ -745,12 +751,17 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
int ret = 0;
int err;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
dev_err(jrdev, "key size mismatch\n");
goto badkey;
}
err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
if (err)
return err;
ctx->cdata.keylen = keylen;
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
@ -1395,6 +1406,14 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
return edesc;
}
static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
{
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
}
static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
{
struct skcipher_edesc *edesc;
@ -1405,6 +1424,21 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
if (!req->cryptlen)
return 0;
if (ctx->fallback && xts_skcipher_ivsize(req)) {
struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
skcipher_request_set_callback(&rctx->fallback_req,
req->base.flags,
req->base.complete,
req->base.data);
skcipher_request_set_crypt(&rctx->fallback_req, req->src,
req->dst, req->cryptlen, req->iv);
return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
crypto_skcipher_decrypt(&rctx->fallback_req);
}
if (unlikely(caam_congested))
return -EAGAIN;
@ -1529,6 +1563,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.base = {
.cra_name = "xts(aes)",
.cra_driver_name = "xts-aes-caam-qi",
.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = AES_BLOCK_SIZE,
},
.setkey = xts_skcipher_setkey,
@ -2462,9 +2497,32 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct caam_skcipher_alg *caam_alg =
container_of(alg, typeof(*caam_alg), skcipher);
struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
int ret = 0;
return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
false);
if (alg_aai == OP_ALG_AAI_XTS) {
const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
struct crypto_skcipher *fallback;
fallback = crypto_alloc_skcipher(tfm_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback)) {
dev_err(ctx->jrdev, "Failed to allocate %s fallback: %ld\n",
tfm_name, PTR_ERR(fallback));
return PTR_ERR(fallback);
}
ctx->fallback = fallback;
crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
crypto_skcipher_reqsize(fallback));
}
ret = caam_init_common(ctx, &caam_alg->caam, false);
if (ret && ctx->fallback)
crypto_free_skcipher(ctx->fallback);
return ret;
}
static int caam_aead_init(struct crypto_aead *tfm)
@ -2490,7 +2548,11 @@ static void caam_exit_common(struct caam_ctx *ctx)
static void caam_cra_exit(struct crypto_skcipher *tfm)
{
caam_exit_common(crypto_skcipher_ctx(tfm));
struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
if (ctx->fallback)
crypto_free_skcipher(ctx->fallback);
caam_exit_common(ctx);
}
static void caam_aead_exit(struct crypto_aead *tfm)
@ -2524,7 +2586,7 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->base.cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
alg->init = caam_cra_init;
alg->exit = caam_cra_exit;

View File

@ -1746,7 +1746,7 @@ ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
break;
default:
ret = -EINVAL;
goto e_ctx;
goto e_data;
}
} else {
/* Stash the context */

View File

@ -1053,6 +1053,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
ndev = n->dev;
if (!ndev)
goto free_dst;
if (is_vlan_dev(ndev))
ndev = vlan_dev_real_dev(ndev);
port_id = cxgb4_port_idx(ndev);
csk = chtls_sock_create(cdev);

View File

@ -910,9 +910,9 @@ static int tls_header_read(struct tls_hdr *thdr, struct iov_iter *from)
return (__force int)cpu_to_be16(thdr->length);
}
static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
static bool csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
{
return (cdev->max_host_sndbuf - sk->sk_wmem_queued);
return (cdev->max_host_sndbuf - sk->sk_wmem_queued > 0);
}
static int csk_wait_memory(struct chtls_dev *cdev,
@ -1210,6 +1210,7 @@ int chtls_sendpage(struct sock *sk, struct page *page,
copied = 0;
csk = rcu_dereference_sk_user_data(sk);
cdev = csk->cdev;
lock_sock(sk);
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
err = sk_stream_wait_connect(sk, &timeo);

View File

@ -527,7 +527,7 @@ static void release_ixp_crypto(struct device *dev)
if (crypt_virt) {
dma_free_coherent(dev,
NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
NPE_QLEN * sizeof(struct crypt_ctl),
crypt_virt, crypt_phys);
}
}

View File

@ -442,7 +442,7 @@ static void mtk_desc_dma_free(struct mtk_cryp *cryp)
static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
{
struct mtk_ring **ring = cryp->ring;
int i, err = ENOMEM;
int i;
for (i = 0; i < MTK_RING_MAX; i++) {
ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL);
@ -469,14 +469,14 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
return 0;
err_cleanup:
for (; i--; ) {
do {
dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
ring[i]->res_base, ring[i]->res_dma);
dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
ring[i]->cmd_base, ring[i]->cmd_dma);
kfree(ring[i]);
}
return err;
} while (i--);
return -ENOMEM;
}
static int mtk_crypto_probe(struct platform_device *pdev)

View File

@ -453,6 +453,9 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
u32 val, mask;
if (likely(ctx->digcnt))
omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
/*
* Setting ALGO_CONST only for the first iteration and
* CLOSE_HASH only for the last one. Note that flags mode bits

View File

@ -1697,11 +1697,6 @@ static int spacc_probe(struct platform_device *pdev)
goto err_clk_put;
}
ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
if (ret)
goto err_clk_disable;
/*
* Use an IRQ threshold of 50% as a default. This seems to be a
* reasonable trade off of latency against throughput but can be
@ -1709,6 +1704,10 @@ static int spacc_probe(struct platform_device *pdev)
*/
engine->stat_irq_thresh = (engine->fifo_sz / 2);
ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
if (ret)
goto err_clk_disable;
/*
* Configure the interrupts. We only use the STAT_CNT interrupt as we
* only submit a new packet for processing when we complete another in

View File

@ -1218,15 +1218,14 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
add_threaded_test(info);
/* Check if channel was added successfully */
dtc = list_last_entry(&info->channels, struct dmatest_chan, node);
if (dtc->chan) {
if (!list_empty(&info->channels)) {
/*
* if new channel was not successfully added, revert the
* "test_channel" string to the name of the last successfully
* added channel. exception for when users issues empty string
* to channel parameter.
*/
dtc = list_last_entry(&info->channels, struct dmatest_chan, node);
if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0)
&& (strcmp("", strim(test_channel)) != 0)) {
ret = -EINVAL;

View File

@ -772,6 +772,10 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
if (dws->dma_dev != chan->device->dev)
return false;
/* permit channels in accordance with the channels mask */
if (dws->channels && !(dws->channels & dwc->mask))
return false;
/* We have to copy data since dws can be temporary storage */
memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave));

View File

@ -14,7 +14,7 @@
static void dw_dma_initialize_chan(struct dw_dma_chan *dwc)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
u32 cfghi = DWC_CFGH_FIFO_MODE;
u32 cfghi = is_slave_direction(dwc->direction) ? 0 : DWC_CFGH_FIFO_MODE;
u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
bool hs_polarity = dwc->dws.hs_polarity;

View File

@ -22,18 +22,21 @@ static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
};
dma_cap_mask_t cap;
if (dma_spec->args_count != 3)
if (dma_spec->args_count < 3 || dma_spec->args_count > 4)
return NULL;
slave.src_id = dma_spec->args[0];
slave.dst_id = dma_spec->args[0];
slave.m_master = dma_spec->args[1];
slave.p_master = dma_spec->args[2];
if (dma_spec->args_count >= 4)
slave.channels = dma_spec->args[3];
if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
slave.m_master >= dw->pdata->nr_masters ||
slave.p_master >= dw->pdata->nr_masters))
slave.p_master >= dw->pdata->nr_masters ||
slave.channels >= BIT(dw->pdata->nr_channels)))
return NULL;
dma_cap_zero(cap);

View File

@ -209,8 +209,8 @@ static int config_irq(void *ctx, struct platform_device *pdev)
/* register interrupt handler */
irq = platform_get_irq(pdev, 0);
dev_dbg(&pdev->dev, "got irq %d\n", irq);
if (!irq)
return -ENODEV;
if (irq < 0)
return irq;
rc = devm_request_irq(&pdev->dev, irq, mcr_isr, IRQF_TRIGGER_HIGH,
DRV_NAME, ctx);

View File

@ -1074,16 +1074,15 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
PCI_DEVICE_ID_INTEL_5100_19, 0);
if (!einj) {
ret = -ENODEV;
goto bail_einj;
goto bail_mc_free;
}
rc = pci_enable_device(einj);
if (rc < 0) {
ret = rc;
goto bail_disable_einj;
goto bail_einj;
}
mci->pdev = &pdev->dev;
priv = mci->pvt_info;
@ -1149,14 +1148,14 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
bail_scrub:
priv->scrub_enable = 0;
cancel_delayed_work_sync(&(priv->i5100_scrubbing));
edac_mc_free(mci);
bail_disable_einj:
pci_disable_device(einj);
bail_einj:
pci_dev_put(einj);
bail_mc_free:
edac_mc_free(mci);
bail_disable_ch1:
pci_disable_device(ch1mm);

View File

@ -278,7 +278,8 @@ static int ti_edac_probe(struct platform_device *pdev)
/* add EMIF ECC error handler */
error_irq = platform_get_irq(pdev, 0);
if (!error_irq) {
if (error_irq < 0) {
ret = error_irq;
edac_printk(KERN_ERR, EDAC_MOD_NAME,
"EMIF irq number not defined.\n");
goto err;

View File

@ -6984,8 +6984,7 @@ static int dm_update_plane_state(struct dc *dc,
dm_old_plane_state->dc_state,
dm_state->context)) {
ret = EINVAL;
return ret;
return -EINVAL;
}

View File

@ -144,8 +144,10 @@ static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
source[len - 1] = '\0';
ret = crtc->funcs->verify_crc_source(crtc, source, &values_cnt);
if (ret)
if (ret) {
kfree(source);
return ret;
}
spin_lock_irq(&crc->lock);

View File

@ -2120,7 +2120,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
intel_dp->dpcd,
sizeof(intel_dp->dpcd));
cdv_intel_edp_panel_vdd_off(gma_encoder);
if (ret == 0) {
if (ret <= 0) {
/* if this fails, presume the device is a ghost */
DRM_INFO("failed to retrieve link info, disabling eDP\n");
cdv_intel_dp_encoder_destroy(encoder);

View File

@ -834,7 +834,7 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
int i;
a6xx_state->indexed_regs = state_kcalloc(a6xx_state, count,
sizeof(a6xx_state->indexed_regs));
sizeof(*a6xx_state->indexed_regs));
if (!a6xx_state->indexed_regs)
return;

View File

@ -819,7 +819,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_plane *plane;
struct drm_display_mode *mode;
int cnt = 0, rc = 0, mixer_width, i, z_pos;
int cnt = 0, rc = 0, mixer_width = 0, i, z_pos;
struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
int multirect_count = 0;
@ -852,9 +852,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
memset(pipe_staged, 0, sizeof(pipe_staged));
mixer_width = mode->hdisplay / cstate->num_mixers;
if (cstate->num_mixers) {
mixer_width = mode->hdisplay / cstate->num_mixers;
_dpu_crtc_setup_lm_bounds(crtc, state);
_dpu_crtc_setup_lm_bounds(crtc, state);
}
crtc_rect.x2 = mode->hdisplay;
crtc_rect.y2 = mode->vdisplay;

View File

@ -26,6 +26,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_irq.h>
@ -87,8 +88,26 @@ void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb)
clk_disable_unprepare(mxsfb->clk_axi);
}
static struct drm_framebuffer *
mxsfb_fb_create(struct drm_device *dev, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
const struct drm_format_info *info;
info = drm_get_format_info(dev, mode_cmd);
if (!info)
return ERR_PTR(-EINVAL);
if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) {
dev_dbg(dev->dev, "Invalid pitch: fb width must match pitch\n");
return ERR_PTR(-EINVAL);
}
return drm_gem_fb_create(dev, file_priv, mode_cmd);
}
static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.fb_create = mxsfb_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};

View File

@ -2382,12 +2382,12 @@ static const struct drm_display_mode ortustech_com43h4m85ulc_mode = {
static const struct panel_desc ortustech_com43h4m85ulc = {
.modes = &ortustech_com43h4m85ulc_mode,
.num_modes = 1,
.bpc = 8,
.bpc = 6,
.size = {
.width = 56,
.height = 93,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};

View File

@ -75,6 +75,17 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
return 0;
}
void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev)
{
/*
* The Amlogic integrated Mali-T820, Mali-G31 & Mali-G52 needs
* these undocumented bits in GPU_PWR_OVERRIDE1 to be set in order
* to operate correctly.
*/
gpu_write(pfdev, GPU_PWR_KEY, GPU_PWR_KEY_UNLOCK);
gpu_write(pfdev, GPU_PWR_OVERRIDE1, 0xfff | (0x20 << 16));
}
static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev)
{
u32 quirks = 0;
@ -304,6 +315,8 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
int ret;
u32 val;
panfrost_gpu_init_quirks(pfdev);
/* Just turn on everything for now */
gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present);
ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
@ -357,7 +370,6 @@ int panfrost_gpu_init(struct panfrost_device *pfdev)
return err;
}
panfrost_gpu_init_quirks(pfdev);
panfrost_gpu_power_on(pfdev);
return 0;

View File

@ -16,4 +16,6 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev);
void panfrost_gpu_power_on(struct panfrost_device *pfdev);
void panfrost_gpu_power_off(struct panfrost_device *pfdev);
void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev);
#endif

Some files were not shown because too many files have changed in this diff Show More