Merge 5.10.37 into android12-5.10

Changes in 5.10.37
	Bluetooth: verify AMP hci_chan before amp_destroy
	bluetooth: eliminate the potential race condition when removing the HCI controller
	net/nfc: fix use-after-free llcp_sock_bind/connect
	io_uring: truncate lengths larger than MAX_RW_COUNT on provide buffers
	Revert "USB: cdc-acm: fix rounding error in TIOCSSERIAL"
	usb: roles: Call try_module_get() from usb_role_switch_find_by_fwnode()
	tty: moxa: fix TIOCSSERIAL jiffies conversions
	tty: amiserial: fix TIOCSSERIAL permission check
	USB: serial: usb_wwan: fix TIOCSSERIAL jiffies conversions
	staging: greybus: uart: fix TIOCSSERIAL jiffies conversions
	USB: serial: ti_usb_3410_5052: fix TIOCSSERIAL permission check
	staging: fwserial: fix TIOCSSERIAL jiffies conversions
	tty: moxa: fix TIOCSSERIAL permission check
	staging: fwserial: fix TIOCSSERIAL permission check
	drm: bridge: fix LONTIUM use of mipi_dsi_() functions
	usb: typec: tcpm: Address incorrect values of tcpm psy for fixed supply
	usb: typec: tcpm: Address incorrect values of tcpm psy for pps supply
	usb: typec: tcpm: update power supply once partner accepts
	usb: xhci-mtk: remove or operator for setting schedule parameters
	usb: xhci-mtk: improve bandwidth scheduling with TT
	ASoC: samsung: tm2_wm5110: check of of_parse return value
	ASoC: Intel: kbl_da7219_max98927: Fix kabylake_ssp_fixup function
	ASoC: tlv320aic32x4: Register clocks before registering component
	ASoC: tlv320aic32x4: Increase maximum register in regmap
	MIPS: pci-mt7620: fix PLL lock check
	MIPS: pci-rt2880: fix slot 0 configuration
	FDDI: defxx: Bail out gracefully with unassigned PCI resource for CSR
	PCI: Allow VPD access for QLogic ISP2722
	KVM: x86: Defer the MMU unload to the normal path on an global INVPCID
	PCI: xgene: Fix cfg resource mapping
	PCI: keystone: Let AM65 use the pci_ops defined in pcie-designware-host.c
	PM / devfreq: Unlock mutex and free devfreq struct in error path
	soc/tegra: regulators: Fix locking up when voltage-spread is out of range
	iio: inv_mpu6050: Fully validate gyro and accel scale writes
	iio:accel:adis16201: Fix wrong axis assignment that prevents loading
	iio:adc:ad7476: Fix remove handling
	sc16is7xx: Defer probe if device read fails
	phy: cadence: Sierra: Fix PHY power_on sequence
	misc: lis3lv02d: Fix false-positive WARN on various HP models
	phy: ti: j721e-wiz: Invoke wiz_init() before of_platform_device_create()
	misc: vmw_vmci: explicitly initialize vmci_notify_bm_set_msg struct
	misc: vmw_vmci: explicitly initialize vmci_datagram payload
	selinux: add proper NULL termination to the secclass_map permissions
	x86, sched: Treat Intel SNC topology as default, COD as exception
	async_xor: increase src_offs when dropping destination page
	md/bitmap: wait for external bitmap writes to complete during tear down
	md-cluster: fix use-after-free issue when removing rdev
	md: split mddev_find
	md: factor out a mddev_find_locked helper from mddev_find
	md: md_open returns -EBUSY when entering racing area
	md: Fix missing unused status line of /proc/mdstat
	mt76: mt7615: use ieee80211_free_txskb() in mt7615_tx_token_put()
	ipw2x00: potential buffer overflow in libipw_wx_set_encodeext()
	cfg80211: scan: drop entry from hidden_list on overflow
	rtw88: Fix array overrun in rtw_get_tx_power_params()
	mt76: fix potential DMA mapping leak
	FDDI: defxx: Make MMIO the configuration default except for EISA
	drm/i915/gvt: Fix virtual display setup for BXT/APL
	drm/i915/gvt: Fix vfio_edid issue for BXT/APL
	drm/qxl: use ttm bo priorities
	drm/panfrost: Clear MMU irqs before handling the fault
	drm/panfrost: Don't try to map pages that are already mapped
	drm/radeon: fix copy of uninitialized variable back to userspace
	drm/dp_mst: Revise broadcast msg lct & lcr
	drm/dp_mst: Set CLEAR_PAYLOAD_ID_TABLE as broadcast
	drm: bridge/panel: Cleanup connector on bridge detach
	drm/amd/display: Reject non-zero src_y and src_x for video planes
	drm/amdgpu: fix concurrent VM flushes on Vega/Navi v2
	ALSA: hda/realtek: Re-order ALC882 Acer quirk table entries
	ALSA: hda/realtek: Re-order ALC882 Sony quirk table entries
	ALSA: hda/realtek: Re-order ALC882 Clevo quirk table entries
	ALSA: hda/realtek: Re-order ALC269 HP quirk table entries
	ALSA: hda/realtek: Re-order ALC269 Acer quirk table entries
	ALSA: hda/realtek: Re-order ALC269 Dell quirk table entries
	ALSA: hda/realtek: Re-order ALC269 ASUS quirk table entries
	ALSA: hda/realtek: Re-order ALC269 Sony quirk table entries
	ALSA: hda/realtek: Re-order ALC269 Lenovo quirk table entries
	ALSA: hda/realtek: Re-order remaining ALC269 quirk table entries
	ALSA: hda/realtek: Re-order ALC662 quirk table entries
	ALSA: hda/realtek: Remove redundant entry for ALC861 Haier/Uniwill devices
	ALSA: hda/realtek: ALC285 Thinkpad jack pin quirk is unreachable
	ALSA: hda/realtek: Fix speaker amp on HP Envy AiO 32
	KVM: s390: VSIE: correctly handle MVPG when in VSIE
	KVM: s390: split kvm_s390_logical_to_effective
	KVM: s390: fix guarded storage control register handling
	s390: fix detection of vector enhancements facility 1 vs. vector packed decimal facility
	KVM: s390: VSIE: fix MVPG handling for prefixing and MSO
	KVM: s390: split kvm_s390_real_to_abs
	KVM: s390: extend kvm_s390_shadow_fault to return entry pointer
	KVM: x86/mmu: Alloc page for PDPTEs when shadowing 32-bit NPT with 64-bit
	KVM: x86: Remove emulator's broken checks on CR0/CR3/CR4 loads
	KVM: nSVM: Set the shadow root level to the TDP level for nested NPT
	KVM: SVM: Don't strip the C-bit from CR2 on #PF interception
	KVM: SVM: Do not allow SEV/SEV-ES initialization after vCPUs are created
	KVM: SVM: Inject #GP on guest MSR_TSC_AUX accesses if RDTSCP unsupported
	KVM: nVMX: Defer the MMU reload to the normal path on an EPTP switch
	KVM: nVMX: Truncate bits 63:32 of VMCS field on nested check in !64-bit
	KVM: nVMX: Truncate base/index GPR value on address calc in !64-bit
	KVM: arm/arm64: Fix KVM_VGIC_V3_ADDR_TYPE_REDIST read
	KVM: Destroy I/O bus devices on unregister failure _after_ sync'ing SRCU
	KVM: Stop looking for coalesced MMIO zones if the bus is destroyed
	KVM: arm64: Fully zero the vcpu state on reset
	KVM: arm64: Fix KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION read
	Revert "drivers/net/wan/hdlc_fr: Fix a double free in pvc_xmit"
	Revert "i3c master: fix missing destroy_workqueue() on error in i3c_master_register"
	ovl: fix missing revert_creds() on error path
	Revert "drm/qxl: do not run release if qxl failed to init"
	usb: gadget: pch_udc: Revert d3cb25a121 completely
	Revert "tools/power turbostat: adjust for temperature offset"
	firmware: xilinx: Fix dereferencing freed memory
	firmware: xilinx: Add a blank line after function declaration
	firmware: xilinx: Remove zynqmp_pm_get_eemi_ops() in IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE)
	fpga: fpga-mgr: xilinx-spi: fix error messages on -EPROBE_DEFER
	crypto: sun8i-ss - fix result memory leak on error path
	memory: gpmc: fix out of bounds read and dereference on gpmc_cs[]
	ARM: dts: exynos: correct fuel gauge interrupt trigger level on GT-I9100
	ARM: dts: exynos: correct fuel gauge interrupt trigger level on Midas family
	ARM: dts: exynos: correct MUIC interrupt trigger level on Midas family
	ARM: dts: exynos: correct PMIC interrupt trigger level on Midas family
	ARM: dts: exynos: correct PMIC interrupt trigger level on Odroid X/U3 family
	ARM: dts: exynos: correct PMIC interrupt trigger level on SMDK5250
	ARM: dts: exynos: correct PMIC interrupt trigger level on Snow
	ARM: dts: s5pv210: correct fuel gauge interrupt trigger level on Fascinate family
	ARM: dts: renesas: Add mmc aliases into R-Car Gen2 board dts files
	arm64: dts: renesas: Add mmc aliases into board dts files
	x86/platform/uv: Set section block size for hubless architectures
	serial: stm32: fix code cleaning warnings and checks
	serial: stm32: add "_usart" prefix in functions name
	serial: stm32: fix probe and remove order for dma
	serial: stm32: Use of_device_get_match_data()
	serial: stm32: fix startup by enabling usart for reception
	serial: stm32: fix incorrect characters on console
	serial: stm32: fix TX and RX FIFO thresholds
	serial: stm32: fix a deadlock condition with wakeup event
	serial: stm32: fix wake-up flag handling
	serial: stm32: fix a deadlock in set_termios
	serial: stm32: fix tx dma completion, release channel
	serial: stm32: call stm32_transmit_chars locked
	serial: stm32: fix FIFO flush in startup and set_termios
	serial: stm32: add FIFO flush when port is closed
	serial: stm32: fix tx_empty condition
	usb: typec: tcpci: Check ROLE_CONTROL while interpreting CC_STATUS
	usb: typec: tps6598x: Fix return value check in tps6598x_probe()
	usb: typec: stusb160x: fix return value check in stusb160x_probe()
	regmap: set debugfs_name to NULL after it is freed
	spi: rockchip: avoid objtool warning
	mtd: rawnand: fsmc: Fix error code in fsmc_nand_probe()
	mtd: rawnand: brcmnand: fix OOB R/W with Hamming ECC
	mtd: Handle possible -EPROBE_DEFER from parse_mtd_partitions()
	mtd: rawnand: qcom: Return actual error code instead of -ENODEV
	mtd: don't lock when recursively deleting partitions
	mtd: maps: fix error return code of physmap_flash_remove()
	ARM: dts: stm32: fix usart 2 & 3 pinconf to wake up with flow control
	arm64: dts: qcom: sm8250: Fix level triggered PMU interrupt polarity
	arm64: dts: qcom: sm8250: Fix timer interrupt to specify EL2 physical timer
	arm64: dts: qcom: sdm845: fix number of pins in 'gpio-ranges'
	arm64: dts: qcom: sm8150: fix number of pins in 'gpio-ranges'
	arm64: dts: qcom: sm8250: fix number of pins in 'gpio-ranges'
	arm64: dts: qcom: db845c: fix correct powerdown pin for WSA881x
	crypto: sun8i-ss - Fix memory leak of object d when dma_iv fails to map
	spi: stm32: drop devres version of spi_register_master
	regulator: bd9576: Fix return from bd957x_probe()
	arm64: dts: renesas: r8a77980: Fix vin4-7 endpoint binding
	spi: stm32: Fix use-after-free on unbind
	x86/microcode: Check for offline CPUs before requesting new microcode
	devtmpfs: fix placement of complete() call
	usb: gadget: pch_udc: Replace cpu_to_le32() by lower_32_bits()
	usb: gadget: pch_udc: Check if driver is present before calling ->setup()
	usb: gadget: pch_udc: Check for DMA mapping error
	usb: gadget: pch_udc: Initialize device pointer before use
	usb: gadget: pch_udc: Provide a GPIO line used on Intel Minnowboard (v1)
	crypto: ccp - fix command queuing to TEE ring buffer
	crypto: qat - don't release uninitialized resources
	crypto: qat - ADF_STATUS_PF_RUNNING should be set after adf_dev_init
	fotg210-udc: Fix DMA on EP0 for length > max packet size
	fotg210-udc: Fix EP0 IN requests bigger than two packets
	fotg210-udc: Remove a dubious condition leading to fotg210_done
	fotg210-udc: Mask GRP2 interrupts we don't handle
	fotg210-udc: Don't DMA more than the buffer can take
	fotg210-udc: Complete OUT requests on short packets
	usb: gadget: s3c: Fix incorrect resources releasing
	usb: gadget: s3c: Fix the error handling path in 's3c2410_udc_probe()'
	dt-bindings: serial: stm32: Use 'type: object' instead of false for 'additionalProperties'
	mtd: require write permissions for locking and badblock ioctls
	arm64: dts: renesas: r8a779a0: Fix PMU interrupt
	bus: qcom: Put child node before return
	soundwire: bus: Fix device found flag correctly
	phy: ti: j721e-wiz: Delete "clk_div_sel" clk provider during cleanup
	phy: marvell: ARMADA375_USBCLUSTER_PHY should not default to y, unconditionally
	arm64: dts: mediatek: fix reset GPIO level on pumpkin
	NFSD: Fix sparse warning in nfs4proc.c
	NFSv4.2: fix copy stateid copying for the async copy
	crypto: poly1305 - fix poly1305_core_setkey() declaration
	crypto: qat - fix error path in adf_isr_resource_alloc()
	usb: gadget: aspeed: fix dma map failure
	USB: gadget: udc: fix wrong pointer passed to IS_ERR() and PTR_ERR()
	drivers: nvmem: Fix voltage settings for QTI qfprom-efuse
	driver core: platform: Declare early_platform_cleanup() prototype
	memory: pl353: fix mask of ECC page_size config register
	soundwire: stream: fix memory leak in stream config error path
	m68k: mvme147,mvme16x: Don't wipe PCC timer config bits
	firmware: qcom_scm: Make __qcom_scm_is_call_available() return bool
	firmware: qcom_scm: Reduce locking section for __get_convention()
	firmware: qcom_scm: Workaround lack of "is available" call on SC7180
	iio: adc: Kconfig: make AD9467 depend on ADI_AXI_ADC symbol
	mtd: rawnand: gpmi: Fix a double free in gpmi_nand_init
	irqchip/gic-v3: Fix OF_BAD_ADDR error handling
	staging: comedi: tests: ni_routes_test: Fix compilation error
	staging: rtl8192u: Fix potential infinite loop
	staging: fwserial: fix TIOCSSERIAL implementation
	staging: fwserial: fix TIOCGSERIAL implementation
	staging: greybus: uart: fix unprivileged TIOCCSERIAL
	soc: qcom: pdr: Fix error return code in pdr_register_listener
	PM / devfreq: Use more accurate returned new_freq as resume_freq
	clocksource/drivers/timer-ti-dm: Fix posted mode status check order
	clocksource/drivers/timer-ti-dm: Add missing set_state_oneshot_stopped
	clocksource/drivers/ingenic_ost: Fix return value check in ingenic_ost_probe()
	spi: Fix use-after-free with devm_spi_alloc_*
	spi: fsl: add missing iounmap() on error in of_fsl_spi_probe()
	soc: qcom: mdt_loader: Validate that p_filesz < p_memsz
	soc: qcom: mdt_loader: Detect truncated read of segments
	PM: runtime: Replace inline function pm_runtime_callbacks_present()
	cpuidle: Fix ARM_QCOM_SPM_CPUIDLE configuration
	ACPI: CPPC: Replace cppc_attr with kobj_attribute
	crypto: allwinner - add missing CRYPTO_ prefix
	crypto: sun8i-ss - Fix memory leak of pad
	crypto: sa2ul - Fix memory leak of rxd
	crypto: qat - Fix a double free in adf_create_ring
	cpufreq: armada-37xx: Fix setting TBG parent for load levels
	clk: mvebu: armada-37xx-periph: remove .set_parent method for CPU PM clock
	cpufreq: armada-37xx: Fix the AVS value for load L1
	clk: mvebu: armada-37xx-periph: Fix switching CPU freq from 250 Mhz to 1 GHz
	clk: mvebu: armada-37xx-periph: Fix workaround for switching from L1 to L0
	cpufreq: armada-37xx: Fix driver cleanup when registration failed
	cpufreq: armada-37xx: Fix determining base CPU frequency
	spi: spi-zynqmp-gqspi: use wait_for_completion_timeout to make zynqmp_qspi_exec_op not interruptible
	spi: spi-zynqmp-gqspi: add mutex locking for exec_op
	spi: spi-zynqmp-gqspi: transmit dummy circles by using the controller's internal functionality
	spi: spi-zynqmp-gqspi: fix incorrect operating mode in zynqmp_qspi_read_op
	spi: fsl-lpspi: Fix PM reference leak in lpspi_prepare_xfer_hardware()
	usb: gadget: r8a66597: Add missing null check on return from platform_get_resource
	USB: cdc-acm: fix unprivileged TIOCCSERIAL
	USB: cdc-acm: fix TIOCGSERIAL implementation
	tty: actually undefine superseded ASYNC flags
	tty: fix return value for unsupported ioctls
	tty: Remove dead termiox code
	tty: fix return value for unsupported termiox ioctls
	serial: core: return early on unsupported ioctls
	firmware: qcom-scm: Fix QCOM_SCM configuration
	node: fix device cleanups in error handling code
	crypto: chelsio - Read rxchannel-id from firmware
	usbip: vudc: fix missing unlock on error in usbip_sockfd_store()
	m68k: Add missing mmap_read_lock() to sys_cacheflush()
	spi: spi-zynqmp-gqspi: Fix missing unlock on error in zynqmp_qspi_exec_op()
	memory: renesas-rpc-if: fix possible NULL pointer dereference of resource
	memory: samsung: exynos5422-dmc: handle clk_set_parent() failure
	security: keys: trusted: fix TPM2 authorizations
	platform/x86: pmc_atom: Match all Beckhoff Automation baytrail boards with critclk_systems DMI table
	ARM: dts: aspeed: Rainier: Fix humidity sensor bus address
	Drivers: hv: vmbus: Use after free in __vmbus_open()
	spi: spi-zynqmp-gqspi: fix clk_enable/disable imbalance issue
	spi: spi-zynqmp-gqspi: fix hang issue when suspend/resume
	spi: spi-zynqmp-gqspi: fix use-after-free in zynqmp_qspi_exec_op
	spi: spi-zynqmp-gqspi: return -ENOMEM if dma_map_single fails
	x86/platform/uv: Fix !KEXEC build failure
	hwmon: (pmbus/pxe1610) don't bail out when not all pages are active
	Drivers: hv: vmbus: Increase wait time for VMbus unload
	PM: hibernate: x86: Use crc32 instead of md5 for hibernation e820 integrity check
	usb: dwc2: Fix host mode hibernation exit with remote wakeup flow.
	usb: dwc2: Fix hibernation between host and device modes.
	ttyprintk: Add TTY hangup callback.
	serial: omap: don't disable rs485 if rts gpio is missing
	serial: omap: fix rs485 half-duplex filtering
	xen-blkback: fix compatibility bug with single page rings
	soc: aspeed: fix a ternary sign expansion bug
	drm/tilcdc: send vblank event when disabling crtc
	drm/stm: Fix bus_flags handling
	drm/amd/display: Fix off by one in hdmi_14_process_transaction()
	drm/mcde/panel: Inverse misunderstood flag
	sched/fair: Fix shift-out-of-bounds in load_balance()
	afs: Fix updating of i_mode due to 3rd party change
	rcu: Remove spurious instrumentation_end() in rcu_nmi_enter()
	media: vivid: fix assignment of dev->fbuf_out_flags
	media: saa7134: use sg_dma_len when building pgtable
	media: saa7146: use sg_dma_len when building pgtable
	media: omap4iss: return error code when omap4iss_get() failed
	media: rkisp1: rsz: crash fix when setting src format
	media: aspeed: fix clock handling logic
	drm/probe-helper: Check epoch counter in output_poll_execute()
	media: venus: core: Fix some resource leaks in the error path of 'venus_probe()'
	media: platform: sunxi: sun6i-csi: fix error return code of sun6i_video_start_streaming()
	media: m88ds3103: fix return value check in m88ds3103_probe()
	media: docs: Fix data organization of MEDIA_BUS_FMT_RGB101010_1X30
	media: [next] staging: media: atomisp: fix memory leak of object flash
	media: atomisp: Fixed error handling path
	media: m88rs6000t: avoid potential out-of-bounds reads on arrays
	media: atomisp: Fix use after free in atomisp_alloc_css_stat_bufs()
	drm/amdkfd: fix build error with AMD_IOMMU_V2=m
	of: overlay: fix for_each_child.cocci warnings
	x86/kprobes: Fix to check non boostable prefixes correctly
	selftests: fix prepending $(OUTPUT) to $(TEST_PROGS)
	pata_arasan_cf: fix IRQ check
	pata_ipx4xx_cf: fix IRQ check
	sata_mv: add IRQ checks
	ata: libahci_platform: fix IRQ check
	seccomp: Fix CONFIG tests for Seccomp_filters
	nvme-tcp: block BH in sk state_change sk callback
	nvmet-tcp: fix incorrect locking in state_change sk callback
	clk: imx: Fix reparenting of UARTs not associated with stdout
	power: supply: bq25980: Move props from battery node
	nvme: retrigger ANA log update if group descriptor isn't found
	media: i2c: imx219: Move out locking/unlocking of vflip and hflip controls from imx219_set_stream
	media: i2c: imx219: Balance runtime PM use-count
	media: v4l2-ctrls.c: fix race condition in hdl->requests list
	vfio/fsl-mc: Re-order vfio_fsl_mc_probe()
	vfio/pci: Move VGA and VF initialization to functions
	vfio/pci: Re-order vfio_pci_probe()
	vfio/mdev: Do not allow a mdev_type to have a NULL parent pointer
	clk: zynqmp: move zynqmp_pll_set_mode out of round_rate callback
	clk: zynqmp: pll: add set_pll_mode to check condition in zynqmp_pll_enable
	drm: xlnx: zynqmp: fix a memset in zynqmp_dp_train()
	clk: qcom: a53-pll: Add missing MODULE_DEVICE_TABLE
	clk: qcom: apss-ipq-pll: Add missing MODULE_DEVICE_TABLE
	drm/amd/display: use GFP_ATOMIC in dcn20_resource_construct
	drm/radeon: Fix a missing check bug in radeon_dp_mst_detect()
	clk: uniphier: Fix potential infinite loop
	scsi: pm80xx: Increase timeout for pm80xx mpi_uninit_check()
	scsi: pm80xx: Fix potential infinite loop
	scsi: ufs: ufshcd-pltfrm: Fix deferred probing
	scsi: hisi_sas: Fix IRQ checks
	scsi: jazz_esp: Add IRQ check
	scsi: sun3x_esp: Add IRQ check
	scsi: sni_53c710: Add IRQ check
	scsi: ibmvfc: Fix invalid state machine BUG_ON()
	mailbox: sprd: Introduce refcnt when clients requests/free channels
	mfd: stm32-timers: Avoid clearing auto reload register
	nvmet-tcp: fix a segmentation fault during io parsing error
	nvme-pci: don't simple map sgl when sgls are disabled
	media: cedrus: Fix H265 status definitions
	HSI: core: fix resource leaks in hsi_add_client_from_dt()
	x86/events/amd/iommu: Fix sysfs type mismatch
	perf/amd/uncore: Fix sysfs type mismatch
	io_uring: fix overflows checks in provide buffers
	sched/debug: Fix cgroup_path[] serialization
	drivers/block/null_blk/main: Fix a double free in null_init.
	xsk: Respect device's headroom and tailroom on generic xmit path
	HID: plantronics: Workaround for double volume key presses
	perf symbols: Fix dso__fprintf_symbols_by_name() to return the number of printed chars
	ASoC: Intel: boards: sof-wm8804: add check for PLL setting
	ASoC: Intel: Skylake: Compile when any configuration is selected
	RDMA/mlx5: Fix mlx5 rates to IB rates map
	wilc1000: write value to WILC_INTR2_ENABLE register
	KVM: x86/mmu: Retry page faults that hit an invalid memslot
	Bluetooth: avoid deadlock between hci_dev->lock and socket lock
	net: lapbether: Prevent racing when checking whether the netif is running
	libbpf: Add explicit padding to bpf_xdp_set_link_opts
	bpftool: Fix maybe-uninitialized warnings
	iommu: Check dev->iommu in iommu_dev_xxx functions
	iommu/vt-d: Reject unsupported page request modes
	selftests/bpf: Re-generate vmlinux.h and BPF skeletons if bpftool changed
	libbpf: Add explicit padding to btf_dump_emit_type_decl_opts
	powerpc/fadump: Mark fadump_calculate_reserve_size as __init
	powerpc/prom: Mark identical_pvr_fixup as __init
	MIPS: fix local_irq_{disable,enable} in asmmacro.h
	ima: Fix the error code for restoring the PCR value
	inet: use bigger hash table for IP ID generation
	pinctrl: pinctrl-single: remove unused parameter
	pinctrl: pinctrl-single: fix pcs_pin_dbg_show() when bits_per_mux is not zero
	MIPS: loongson64: fix bug when PAGE_SIZE > 16KB
	ASoC: wm8960: Remove bitclk relax condition in wm8960_configure_sysclk
	iommu/arm-smmu-v3: add bit field SFM into GERROR_ERR_MASK
	RDMA/mlx5: Fix drop packet rule in egress table
	IB/isert: Fix a use after free in isert_connect_request
	powerpc: Fix HAVE_HARDLOCKUP_DETECTOR_ARCH build configuration
	MIPS/bpf: Enable bpf_probe_read{, str}() on MIPS again
	gpio: guard gpiochip_irqchip_add_domain() with GPIOLIB_IRQCHIP
	ALSA: core: remove redundant spin_lock pair in snd_card_disconnect
	net: phy: lan87xx: fix access to wrong register of LAN87xx
	udp: never accept GSO_FRAGLIST packets
	powerpc/pseries: Only register vio drivers if vio bus exists
	net/tipc: fix missing destroy_workqueue() on error in tipc_crypto_start()
	bug: Remove redundant condition check in report_bug
	RDMA/core: Fix corrupted SL on passive side
	nfc: pn533: prevent potential memory corruption
	net: hns3: Limiting the scope of vector_ring_chain variable
	mips: bmips: fix syscon-reboot nodes
	iommu/vt-d: Don't set then clear private data in prq_event_thread()
	iommu: Fix a boundary issue to avoid performance drop
	iommu/vt-d: Report right snoop capability when using FL for IOVA
	iommu/vt-d: Report the right page fault address
	iommu/vt-d: Preset Access/Dirty bits for IOVA over FL
	iommu/vt-d: Remove WO permissions on second-level paging entries
	iommu/vt-d: Invalidate PASID cache when root/context entry changed
	ALSA: usb-audio: Add error checks for usb_driver_claim_interface() calls
	HID: lenovo: Use brightness_set_blocking callback for setting LEDs brightness
	HID: lenovo: Fix lenovo_led_set_tp10ubkbd() error handling
	HID: lenovo: Check hid_get_drvdata() returns non NULL in lenovo_event()
	HID: lenovo: Map mic-mute button to KEY_F20 instead of KEY_MICMUTE
	KVM: arm64: Initialize VCPU mdcr_el2 before loading it
	ASoC: simple-card: fix possible uninitialized single_cpu local variable
	liquidio: Fix unintented sign extension of a left shift of a u16
	IB/hfi1: Use kzalloc() for mmu_rb_handler allocation
	powerpc/64s: Fix pte update for kernel memory on radix
	powerpc/perf: Fix PMU constraint check for EBB events
	powerpc: iommu: fix build when neither PCI or IBMVIO is set
	mac80211: bail out if cipher schemes are invalid
	perf vendor events amd: Fix broken L2 Cache Hits from L2 HWPF metric
	xfs: fix return of uninitialized value in variable error
	rtw88: Fix an error code in rtw_debugfs_set_rsvd_page()
	mt7601u: fix always true expression
	mt76: mt7615: fix tx skb dma unmap
	mt76: mt7915: fix tx skb dma unmap
	mt76: mt7915: fix aggr len debugfs node
	mt76: mt7615: fix mib stats counter reporting to mac80211
	mt76: mt7915: fix mib stats counter reporting to mac80211
	mt76: mt7663s: make all of packets 4-bytes aligned in sdio tx aggregation
	mt76: mt7663s: fix the possible device hang in high traffic
	KVM: PPC: Book3S HV P9: Restore host CTRL SPR after guest exit
	ovl: invalidate readdir cache on changes to dir with origin
	RDMA/qedr: Fix error return code in qedr_iw_connect()
	IB/hfi1: Fix error return code in parse_platform_config()
	RDMA/bnxt_re: Fix error return code in bnxt_qplib_cq_process_terminal()
	cxgb4: Fix unintentional sign extension issues
	net: thunderx: Fix unintentional sign extension issue
	RDMA/srpt: Fix error return code in srpt_cm_req_recv()
	RDMA/rtrs-clt: destroy sysfs after removing session from active list
	i2c: cadence: fix reference leak when pm_runtime_get_sync fails
	i2c: img-scb: fix reference leak when pm_runtime_get_sync fails
	i2c: imx-lpi2c: fix reference leak when pm_runtime_get_sync fails
	i2c: imx: fix reference leak when pm_runtime_get_sync fails
	i2c: omap: fix reference leak when pm_runtime_get_sync fails
	i2c: sprd: fix reference leak when pm_runtime_get_sync fails
	i2c: stm32f7: fix reference leak when pm_runtime_get_sync fails
	i2c: xiic: fix reference leak when pm_runtime_get_sync fails
	i2c: cadence: add IRQ check
	i2c: emev2: add IRQ check
	i2c: jz4780: add IRQ check
	i2c: mlxbf: add IRQ check
	i2c: rcar: make sure irq is not threaded on Gen2 and earlier
	i2c: rcar: protect against supurious interrupts on V3U
	i2c: rcar: add IRQ check
	i2c: sh7760: add IRQ check
	powerpc/xive: Drop check on irq_data in xive_core_debug_show()
	powerpc/xive: Fix xmon command "dxi"
	ASoC: ak5558: correct reset polarity
	net/mlx5: Fix bit-wise and with zero
	net/packet: make packet_fanout.arr size configurable up to 64K
	net/packet: remove data races in fanout operations
	drm/i915/gvt: Fix error code in intel_gvt_init_device()
	iommu/amd: Put newline after closing bracket in warning
	perf beauty: Fix fsconfig generator
	drm/amd/pm: fix error code in smu_set_power_limit()
	MIPS: pci-legacy: stop using of_pci_range_to_resource
	powerpc/pseries: extract host bridge from pci_bus prior to bus removal
	powerpc/smp: Reintroduce cpu_core_mask
	KVM: x86: dump_vmcs should not assume GUEST_IA32_EFER is valid
	rtlwifi: 8821ae: upgrade PHY and RF parameters
	wlcore: fix overlapping snprintf arguments in debugfs
	i2c: sh7760: fix IRQ error path
	i2c: mediatek: Fix wrong dma sync flag
	mwl8k: Fix a double Free in mwl8k_probe_hw
	netfilter: nft_payload: fix C-VLAN offload support
	netfilter: nftables_offload: VLAN id needs host byteorder in flow dissector
	netfilter: nftables_offload: special ethertype handling for VLAN
	vsock/vmci: log once the failed queue pair allocation
	libbpf: Initialize the bpf_seq_printf parameters array field by field
	net: ethernet: ixp4xx: Set the DMA masks explicitly
	gro: fix napi_gro_frags() Fast GRO breakage due to IP alignment check
	RDMA/cxgb4: add missing qpid increment
	RDMA/i40iw: Fix error unwinding when i40iw_hmc_sd_one fails
	ALSA: usb: midi: don't return -ENOMEM when usb_urb_ep_type_check fails
	sfc: ef10: fix TX queue lookup in TX event handling
	vsock/virtio: free queued packets when closing socket
	net: marvell: prestera: fix port event handling on init
	net: davinci_emac: Fix incorrect masking of tx and rx error channel
	mt76: mt7615: fix memleak when mt7615_unregister_device()
	crypto: ccp: Detect and reject "invalid" addresses destined for PSP
	nfp: devlink: initialize the devlink port attribute "lanes"
	net: stmmac: fix TSO and TBS feature enabling during driver open
	net: renesas: ravb: Fix a stuck issue when a lot of frames are received
	net: phy: intel-xway: enable integrated led functions
	RDMA/rxe: Fix a bug in rxe_fill_ip_info()
	RDMA/core: Add CM to restrack after successful attachment to a device
	powerpc/64: Fix the definition of the fixmap area
	ath9k: Fix error check in ath9k_hw_read_revisions() for PCI devices
	ath10k: Fix a use after free in ath10k_htc_send_bundle
	ath10k: Fix ath10k_wmi_tlv_op_pull_peer_stats_info() unlock without lock
	wlcore: Fix buffer overrun by snprintf due to incorrect buffer size
	powerpc/perf: Fix the threshold event selection for memory events in power10
	powerpc/52xx: Fix an invalid ASM expression ('addi' used instead of 'add')
	net: phy: marvell: fix m88e1011_set_downshift
	net: phy: marvell: fix m88e1111_set_downshift
	net: enetc: fix link error again
	bnxt_en: fix ternary sign extension bug in bnxt_show_temp()
	ARM: dts: uniphier: Change phy-mode to RGMII-ID to enable delay pins for RTL8211E
	arm64: dts: uniphier: Change phy-mode to RGMII-ID to enable delay pins for RTL8211E
	net: geneve: modify IP header check in geneve6_xmit_skb and geneve_xmit_skb
	selftests: net: mirror_gre_vlan_bridge_1q: Make an FDB entry static
	selftests: mlxsw: Remove a redundant if statement in tc_flower_scale test
	bnxt_en: Fix RX consumer index logic in the error path.
	KVM: VMX: Intercept FS/GS_BASE MSR accesses for 32-bit KVM
	net:emac/emac-mac: Fix a use after free in emac_mac_tx_buf_send
	selftests/bpf: Fix BPF_CORE_READ_BITFIELD() macro
	selftests/bpf: Fix field existence CO-RE reloc tests
	selftests/bpf: Fix core_reloc test runner
	bpf: Fix propagation of 32 bit unsigned bounds from 64 bit bounds
	RDMA/siw: Fix a use after free in siw_alloc_mr
	RDMA/bnxt_re: Fix a double free in bnxt_qplib_alloc_res
	net: bridge: mcast: fix broken length + header check for MRDv6 Adv.
	net:nfc:digital: Fix a double free in digital_tg_recv_dep_req
	perf tools: Change fields type in perf_record_time_conv
	perf jit: Let convert_timestamp() to be backwards-compatible
	perf session: Add swap operation for event TIME_CONV
	ia64: fix EFI_DEBUG build
	kfifo: fix ternary sign extension bugs
	mm/sl?b.c: remove ctor argument from kmem_cache_flags
	mm: memcontrol: slab: fix obtain a reference to a freeing memcg
	mm/sparse: add the missing sparse_buffer_fini() in error branch
	mm/memory-failure: unnecessary amount of unmapping
	afs: Fix speculative status fetches
	bpf: Fix alu32 const subreg bound tracking on bitwise operations
	bpf, ringbuf: Deny reserve of buffers larger than ringbuf
	bpf: Prevent writable memory-mapping of read-only ringbuf pages
	arm64: Remove arm64_dma32_phys_limit and its uses
	net: Only allow init netns to set default tcp cong to a restricted algo
	smp: Fix smp_call_function_single_async prototype
	Revert "net/sctp: fix race condition in sctp_destroy_sock"
	sctp: delay auto_asconf init until binding the first addr
	Linux 5.10.37

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I5bee89c285d9dd72de967b0e70d96951ae4e06ae
This commit is contained in:
Greg Kroah-Hartman 2021-05-14 10:40:17 +02:00
commit e054456ced
536 changed files with 5005 additions and 3155 deletions

View File

@ -77,7 +77,8 @@ required:
- interrupts
- clocks
additionalProperties: false
additionalProperties:
type: object
examples:
- |

View File

@ -16,35 +16,8 @@ components running across different processing clusters on a chip or
device to communicate with a power management controller (PMC) on a
device to issue or respond to power management requests.
EEMI ops is a structure containing all eemi APIs supported by Zynq MPSoC.
The zynqmp-firmware driver maintain all EEMI APIs in zynqmp_eemi_ops
structure. Any driver who want to communicate with PMC using EEMI APIs
can call zynqmp_pm_get_eemi_ops().
Example of EEMI ops::
/* zynqmp-firmware driver maintain all EEMI APIs */
struct zynqmp_eemi_ops {
int (*get_api_version)(u32 *version);
int (*query_data)(struct zynqmp_pm_query_data qdata, u32 *out);
};
static const struct zynqmp_eemi_ops eemi_ops = {
.get_api_version = zynqmp_pm_get_api_version,
.query_data = zynqmp_pm_query_data,
};
Example of EEMI ops usage::
static const struct zynqmp_eemi_ops *eemi_ops;
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
eemi_ops = zynqmp_pm_get_eemi_ops();
if (IS_ERR(eemi_ops))
return PTR_ERR(eemi_ops);
ret = eemi_ops->query_data(qdata, ret_payload);
Any driver who wants to communicate with PMC using EEMI APIs use the
functions provided for each function.
IOCTL
------

View File

@ -1567,8 +1567,8 @@ The following tables list existing packed RGB formats.
- MEDIA_BUS_FMT_RGB101010_1X30
- 0x1018
-
- 0
- 0
-
-
- r\ :sub:`9`
- r\ :sub:`8`
- r\ :sub:`7`

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 36
SUBLEVEL = 37
EXTRAVERSION =
NAME = Dare mighty things

View File

@ -707,9 +707,9 @@ &i2c7 {
multi-master;
status = "okay";
si7021-a20@20 {
si7021-a20@40 {
compatible = "silabs,si7020";
reg = <0x20>;
reg = <0x40>;
};
tmp275@48 {

View File

@ -136,7 +136,7 @@ battery@36 {
compatible = "maxim,max17042";
interrupt-parent = <&gpx2>;
interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
pinctrl-0 = <&max17042_fuel_irq>;
pinctrl-names = "default";

View File

@ -174,7 +174,7 @@ i2c_max77693: i2c-gpio-1 {
max77693@66 {
compatible = "maxim,max77693";
interrupt-parent = <&gpx1>;
interrupts = <5 IRQ_TYPE_EDGE_FALLING>;
interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&max77693_irq>;
reg = <0x66>;
@ -223,7 +223,7 @@ i2c_max77693_fuel: i2c-gpio-3 {
max77693-fuel-gauge@36 {
compatible = "maxim,max17047";
interrupt-parent = <&gpx2>;
interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&max77693_fuel_irq>;
reg = <0x36>;
@ -668,7 +668,7 @@ &i2c_7 {
max77686: max77686_pmic@9 {
compatible = "maxim,max77686";
interrupt-parent = <&gpx0>;
interrupts = <7 IRQ_TYPE_NONE>;
interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
pinctrl-0 = <&max77686_irq>;
pinctrl-names = "default";
reg = <0x09>;

View File

@ -279,7 +279,7 @@ usb3503: usb3503@8 {
max77686: pmic@9 {
compatible = "maxim,max77686";
interrupt-parent = <&gpx3>;
interrupts = <2 IRQ_TYPE_NONE>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&max77686_irq>;
reg = <0x09>;

View File

@ -134,7 +134,7 @@ max77686: pmic@9 {
compatible = "maxim,max77686";
reg = <0x09>;
interrupt-parent = <&gpx3>;
interrupts = <2 IRQ_TYPE_NONE>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&max77686_irq>;
#clock-cells = <1>;

View File

@ -292,7 +292,7 @@ &i2c_0 {
max77686: max77686@9 {
compatible = "maxim,max77686";
interrupt-parent = <&gpx3>;
interrupts = <2 IRQ_TYPE_NONE>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&max77686_irq>;
wakeup-source;

View File

@ -53,6 +53,9 @@ aliases {
i2c11 = &i2cexio1;
i2c12 = &i2chdmi;
i2c13 = &i2cpwr;
mmc0 = &mmcif1;
mmc1 = &sdhi0;
mmc2 = &sdhi2;
};
chosen {

View File

@ -53,6 +53,9 @@ aliases {
i2c12 = &i2cexio1;
i2c13 = &i2chdmi;
i2c14 = &i2cexio4;
mmc0 = &sdhi0;
mmc1 = &sdhi1;
mmc2 = &sdhi2;
};
chosen {

View File

@ -28,6 +28,8 @@ aliases {
serial0 = &scif0;
i2c9 = &gpioi2c2;
i2c10 = &i2chdmi;
mmc0 = &sdhi0;
mmc1 = &sdhi2;
};
chosen {

View File

@ -49,6 +49,9 @@ aliases {
i2c10 = &gpioi2c4;
i2c11 = &i2chdmi;
i2c12 = &i2cexio4;
mmc0 = &sdhi0;
mmc1 = &sdhi1;
mmc2 = &sdhi2;
};
chosen {

View File

@ -19,6 +19,9 @@ aliases {
i2c10 = &gpioi2c4;
i2c11 = &i2chdmi;
i2c12 = &i2cexio4;
mmc0 = &mmcif0;
mmc1 = &sdhi0;
mmc2 = &sdhi1;
};
chosen {

View File

@ -31,6 +31,8 @@ aliases {
serial0 = &scif2;
i2c9 = &gpioi2c1;
i2c10 = &i2chdmi;
mmc0 = &mmcif0;
mmc1 = &sdhi1;
};
chosen {

View File

@ -115,7 +115,7 @@ &fg {
compatible = "maxim,max77836-battery";
interrupt-parent = <&gph3>;
interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&fg_irq>;

View File

@ -1806,10 +1806,15 @@ pins2 {
usart2_idle_pins_c: usart2-idle-2 {
pins1 {
pinmux = <STM32_PINMUX('D', 5, ANALOG)>, /* USART2_TX */
<STM32_PINMUX('D', 4, ANALOG)>, /* USART2_RTS */
<STM32_PINMUX('D', 3, ANALOG)>; /* USART2_CTS_NSS */
};
pins2 {
pinmux = <STM32_PINMUX('D', 4, AF7)>; /* USART2_RTS */
bias-disable;
drive-push-pull;
slew-rate = <3>;
};
pins3 {
pinmux = <STM32_PINMUX('D', 6, AF7)>; /* USART2_RX */
bias-disable;
};
@ -1855,10 +1860,15 @@ pins2 {
usart3_idle_pins_b: usart3-idle-1 {
pins1 {
pinmux = <STM32_PINMUX('B', 10, ANALOG)>, /* USART3_TX */
<STM32_PINMUX('G', 8, ANALOG)>, /* USART3_RTS */
<STM32_PINMUX('I', 10, ANALOG)>; /* USART3_CTS_NSS */
};
pins2 {
pinmux = <STM32_PINMUX('G', 8, AF8)>; /* USART3_RTS */
bias-disable;
drive-push-pull;
slew-rate = <0>;
};
pins3 {
pinmux = <STM32_PINMUX('B', 12, AF8)>; /* USART3_RX */
bias-disable;
};
@ -1891,10 +1901,15 @@ pins2 {
usart3_idle_pins_c: usart3-idle-2 {
pins1 {
pinmux = <STM32_PINMUX('B', 10, ANALOG)>, /* USART3_TX */
<STM32_PINMUX('G', 8, ANALOG)>, /* USART3_RTS */
<STM32_PINMUX('B', 13, ANALOG)>; /* USART3_CTS_NSS */
};
pins2 {
pinmux = <STM32_PINMUX('G', 8, AF8)>; /* USART3_RTS */
bias-disable;
drive-push-pull;
slew-rate = <0>;
};
pins3 {
pinmux = <STM32_PINMUX('B', 12, AF8)>; /* USART3_RX */
bias-disable;
};

View File

@ -583,7 +583,7 @@ eth: ethernet@65000000 {
clocks = <&sys_clk 6>;
reset-names = "ether";
resets = <&sys_rst 6>;
phy-mode = "rgmii";
phy-mode = "rgmii-id";
local-mac-address = [00 00 00 00 00 00];
socionext,syscon-phy-mode = <&soc_glue 0>;

View File

@ -29,7 +29,7 @@ void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit)
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
{
poly1305_init_arm(&dctx->h, key);
dctx->s[0] = get_unaligned_le32(key + 16);

View File

@ -56,7 +56,7 @@ &i2c0 {
tca6416: gpio@20 {
compatible = "ti,tca6416";
reg = <0x20>;
reset-gpios = <&pio 65 GPIO_ACTIVE_HIGH>;
reset-gpios = <&pio 65 GPIO_ACTIVE_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&tca6416_pins>;

View File

@ -1015,7 +1015,7 @@ swm: swm@c85 {
left_spkr: wsa8810-left{
compatible = "sdw10217201000";
reg = <0 1>;
powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
#thermal-sensor-cells = <0>;
sound-name-prefix = "SpkrLeft";
#sound-dai-cells = <0>;
@ -1023,7 +1023,7 @@ left_spkr: wsa8810-left{
right_spkr: wsa8810-right{
compatible = "sdw10217201000";
powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
reg = <0 2>;
#thermal-sensor-cells = <0>;
sound-name-prefix = "SpkrRight";

View File

@ -2192,7 +2192,7 @@ tlmm: pinctrl@3400000 {
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
gpio-ranges = <&tlmm 0 0 150>;
gpio-ranges = <&tlmm 0 0 151>;
wakeup-parent = <&pdc_intc>;
cci0_default: cci0-default {

View File

@ -748,7 +748,7 @@ tlmm: pinctrl@3100000 {
<0x0 0x03D00000 0x0 0x300000>;
reg-names = "west", "east", "north", "south";
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
gpio-ranges = <&tlmm 0 0 175>;
gpio-ranges = <&tlmm 0 0 176>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;

View File

@ -216,7 +216,7 @@ memory@80000000 {
pmu {
compatible = "arm,armv8-pmuv3";
interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
};
psci {
@ -1555,7 +1555,7 @@ tlmm: pinctrl@f100000 {
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
gpio-ranges = <&tlmm 0 0 180>;
gpio-ranges = <&tlmm 0 0 181>;
wakeup-parent = <&pdc>;
qup_i2c0_default: qup-i2c0-default {
@ -2379,7 +2379,7 @@ timer {
(GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11
(GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 12
<GIC_PPI 10
(GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
};

View File

@ -12,6 +12,9 @@ / {
aliases {
serial0 = &scif2;
serial1 = &hscif0;
mmc0 = &sdhi3;
mmc1 = &sdhi0;
mmc2 = &sdhi2;
};
chosen {

View File

@ -21,6 +21,9 @@ aliases {
serial4 = &hscif2;
serial5 = &scif5;
ethernet0 = &avb;
mmc0 = &sdhi3;
mmc1 = &sdhi0;
mmc2 = &sdhi2;
};
chosen {

View File

@ -17,6 +17,8 @@ / {
aliases {
serial0 = &scif2;
serial1 = &hscif2;
mmc0 = &sdhi0;
mmc1 = &sdhi3;
};
chosen {

View File

@ -990,8 +990,8 @@ port@1 {
reg = <1>;
vin4csi41: endpoint@2 {
reg = <2>;
vin4csi41: endpoint@3 {
reg = <3>;
remote-endpoint = <&csi41vin4>;
};
};
@ -1018,8 +1018,8 @@ port@1 {
reg = <1>;
vin5csi41: endpoint@2 {
reg = <2>;
vin5csi41: endpoint@3 {
reg = <3>;
remote-endpoint = <&csi41vin5>;
};
};
@ -1046,8 +1046,8 @@ port@1 {
reg = <1>;
vin6csi41: endpoint@2 {
reg = <2>;
vin6csi41: endpoint@3 {
reg = <3>;
remote-endpoint = <&csi41vin6>;
};
};
@ -1074,8 +1074,8 @@ port@1 {
reg = <1>;
vin7csi41: endpoint@2 {
reg = <2>;
vin7csi41: endpoint@3 {
reg = <3>;
remote-endpoint = <&csi41vin7>;
};
};

View File

@ -16,6 +16,9 @@ / {
aliases {
serial0 = &scif2;
ethernet0 = &avb;
mmc0 = &sdhi3;
mmc1 = &sdhi0;
mmc2 = &sdhi1;
};
chosen {

View File

@ -50,10 +50,7 @@ extalr_clk: extalr {
pmu_a76 {
compatible = "arm,cortex-a76-pmu";
interrupts-extended = <&gic GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>,
<&gic GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
<&gic GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
<&gic GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
interrupts-extended = <&gic GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
};
/* External SCIF clock - to be overridden by boards that provide it */

View File

@ -36,6 +36,9 @@ aliases {
serial0 = &scif2;
serial1 = &hscif1;
ethernet0 = &avb;
mmc0 = &sdhi2;
mmc1 = &sdhi0;
mmc2 = &sdhi3;
};
chosen {

View File

@ -16,6 +16,7 @@ / {
aliases {
serial1 = &hscif0;
serial2 = &scif1;
mmc2 = &sdhi3;
};
clksndsel: clksndsel {

View File

@ -23,6 +23,8 @@ / {
aliases {
serial0 = &scif2;
ethernet0 = &avb;
mmc0 = &sdhi2;
mmc1 = &sdhi0;
};
chosen {

View File

@ -734,7 +734,7 @@ eth: ethernet@65000000 {
clocks = <&sys_clk 6>;
reset-names = "ether";
resets = <&sys_rst 6>;
phy-mode = "rgmii";
phy-mode = "rgmii-id";
local-mac-address = [00 00 00 00 00 00];
socionext,syscon-phy-mode = <&soc_glue 0>;

View File

@ -564,7 +564,7 @@ eth0: ethernet@65000000 {
clocks = <&sys_clk 6>;
reset-names = "ether";
resets = <&sys_rst 6>;
phy-mode = "rgmii";
phy-mode = "rgmii-id";
local-mac-address = [00 00 00 00 00 00];
socionext,syscon-phy-mode = <&soc_glue 0>;
@ -585,7 +585,7 @@ eth1: ethernet@65200000 {
clocks = <&sys_clk 7>;
reset-names = "ether";
resets = <&sys_rst 7>;
phy-mode = "rgmii";
phy-mode = "rgmii-id";
local-mac-address = [00 00 00 00 00 00];
socionext,syscon-phy-mode = <&soc_glue 1>;

View File

@ -25,7 +25,7 @@ asmlinkage void poly1305_emit(void *state, u8 *digest, const u32 *nonce);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
{
poly1305_init_arm64(&dctx->h, key);
dctx->s[0] = get_unaligned_le32(key + 16);

View File

@ -711,6 +711,7 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
void kvm_arm_init_debug(void);
void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);

View File

@ -97,8 +97,7 @@
#endif /* CONFIG_ARM64_FORCE_52BIT */
extern phys_addr_t arm64_dma_phys_limit;
extern phys_addr_t arm64_dma32_phys_limit;
#define ARCH_LOW_ADDRESS_LIMIT ((arm64_dma_phys_limit ? : arm64_dma32_phys_limit) - 1)
#define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1)
struct debug_info {
#ifdef CONFIG_HAVE_HW_BREAKPOINT

View File

@ -580,6 +580,8 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
vcpu->arch.has_run_once = true;
kvm_arm_vcpu_init_debug(vcpu);
if (likely(irqchip_in_kernel(kvm))) {
/*
* Map the VGIC hardware resources before running a vcpu the

View File

@ -68,6 +68,64 @@ void kvm_arm_init_debug(void)
__this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2));
}
/**
* kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
*
* @vcpu: the vcpu pointer
*
* This ensures we will trap access to:
* - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
* - Debug ROM Address (MDCR_EL2_TDRA)
* - OS related registers (MDCR_EL2_TDOSA)
* - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
* - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
*/
static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
{
/*
* This also clears MDCR_EL2_E2PB_MASK to disable guest access
* to the profiling buffer.
*/
vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
MDCR_EL2_TPMS |
MDCR_EL2_TTRF |
MDCR_EL2_TPMCR |
MDCR_EL2_TDRA |
MDCR_EL2_TDOSA);
/* Is the VM being debugged by userspace? */
if (vcpu->guest_debug)
/* Route all software debug exceptions to EL2 */
vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
/*
* Trap debug register access when one of the following is true:
* - Userspace is using the hardware to debug the guest
* (KVM_GUESTDBG_USE_HW is set).
* - The guest is not using debug (KVM_ARM64_DEBUG_DIRTY is clear).
*/
if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) ||
!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
}
/**
* kvm_arm_vcpu_init_debug - setup vcpu debug traps
*
* @vcpu: the vcpu pointer
*
* Set vcpu initial mdcr_el2 value.
*/
void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
{
preempt_disable();
kvm_arm_setup_mdcr_el2(vcpu);
preempt_enable();
}
/**
* kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
*/
@ -83,13 +141,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
* @vcpu: the vcpu pointer
*
* This is called before each entry into the hypervisor to setup any
* debug related registers. Currently this just ensures we will trap
* access to:
* - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
* - Debug ROM Address (MDCR_EL2_TDRA)
* - OS related registers (MDCR_EL2_TDOSA)
* - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
* - Self-hosted Trace (MDCR_EL2_TTRF/MDCR_EL2_E2TB)
* debug related registers.
*
* Additionally, KVM only traps guest accesses to the debug registers if
* the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
@ -101,28 +153,14 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
{
bool trap_debug = !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY);
unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2;
trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
/*
* This also clears MDCR_EL2_E2PB_MASK and MDCR_EL2_E2TB_MASK
* to disable guest access to the profiling and trace buffers
*/
vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
MDCR_EL2_TPMS |
MDCR_EL2_TTRF |
MDCR_EL2_TPMCR |
MDCR_EL2_TDRA |
MDCR_EL2_TDOSA);
kvm_arm_setup_mdcr_el2(vcpu);
/* Is Guest debugging in effect? */
if (vcpu->guest_debug) {
/* Route all software debug exceptions to EL2 */
vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
/* Save guest debug state */
save_guest_debug_regs(vcpu);
@ -176,7 +214,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
trap_debug = true;
trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
&vcpu->arch.debug_ptr->dbg_bcr[0],
@ -191,10 +228,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
BUG_ON(!vcpu->guest_debug &&
vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
/* Trap debug register access */
if (trap_debug)
vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
/* If KDE or MDE are set, perform a full save/restore cycle. */
if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
@ -203,7 +236,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2)
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
}

View File

@ -238,6 +238,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
/* Reset core registers */
memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
vcpu->arch.ctxt.spsr_abt = 0;
vcpu->arch.ctxt.spsr_und = 0;
vcpu->arch.ctxt.spsr_irq = 0;
vcpu->arch.ctxt.spsr_fiq = 0;
vcpu_gp_regs(vcpu)->pstate = pstate;
/* Reset system registers */

View File

@ -87,8 +87,8 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
r = vgic_v3_set_redist_base(kvm, 0, *addr, 0);
goto out;
}
rdreg = list_first_entry(&vgic->rd_regions,
struct vgic_redist_region, list);
rdreg = list_first_entry_or_null(&vgic->rd_regions,
struct vgic_redist_region, list);
if (!rdreg)
addr_ptr = &undef_value;
else
@ -226,6 +226,9 @@ static int vgic_get_common_attr(struct kvm_device *dev,
u64 addr;
unsigned long type = (unsigned long)attr->attr;
if (copy_from_user(&addr, uaddr, sizeof(addr)))
return -EFAULT;
r = kvm_vgic_addr(dev->kvm, type, &addr, false);
if (r)
return (r == -ENODEV) ? -ENXIO : r;

View File

@ -54,13 +54,13 @@ s64 memstart_addr __ro_after_init = -1;
EXPORT_SYMBOL(memstart_addr);
/*
* We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of
* memory as some devices, namely the Raspberry Pi 4, have peripherals with
* this limited view of the memory. ZONE_DMA32 will cover the rest of the 32
* bit addressable memory area.
* If the corresponding config options are enabled, we create both ZONE_DMA
* and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory
* unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
* In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
* otherwise it is empty.
*/
phys_addr_t arm64_dma_phys_limit __ro_after_init;
phys_addr_t arm64_dma32_phys_limit __ro_after_init;
#ifdef CONFIG_KEXEC_CORE
/*
@ -85,7 +85,7 @@ static void __init reserve_crashkernel(void)
if (crash_base == 0) {
/* Current arm64 boot protocol requires 2MB alignment */
crash_base = memblock_find_in_range(0, arm64_dma32_phys_limit,
crash_base = memblock_find_in_range(0, arm64_dma_phys_limit,
crash_size, SZ_2M);
if (crash_base == 0) {
pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
@ -190,6 +190,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
unsigned int __maybe_unused acpi_zone_dma_bits;
unsigned int __maybe_unused dt_zone_dma_bits;
phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32);
#ifdef CONFIG_ZONE_DMA
acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address());
@ -199,8 +200,12 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
#endif
#ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(arm64_dma32_phys_limit);
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
if (!arm64_dma_phys_limit)
arm64_dma_phys_limit = dma32_phys_limit;
#endif
if (!arm64_dma_phys_limit)
arm64_dma_phys_limit = PHYS_MASK + 1;
max_zone_pfns[ZONE_NORMAL] = max;
free_area_init(max_zone_pfns);
@ -397,16 +402,9 @@ void __init arm64_memblock_init(void)
early_init_fdt_scan_reserved_mem();
if (IS_ENABLED(CONFIG_ZONE_DMA32))
arm64_dma32_phys_limit = max_zone_phys(32);
else
arm64_dma32_phys_limit = PHYS_MASK + 1;
reserve_elfcorehdr();
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
dma_contiguous_reserve(arm64_dma32_phys_limit);
}
void __init bootmem_init(void)
@ -443,6 +441,11 @@ void __init bootmem_init(void)
sparse_init();
zone_sizes_init(min, max);
/*
* Reserve the CMA area after arm64_dma_phys_limit was initialised.
*/
dma_contiguous_reserve(arm64_dma_phys_limit);
/*
* request_standard_resources() depends on crashkernel's memory being
* reserved, so do it here.
@ -525,7 +528,7 @@ static void __init free_unused_memmap(void)
void __init mem_init(void)
{
if (swiotlb_force == SWIOTLB_FORCE ||
max_pfn > PFN_DOWN(arm64_dma_phys_limit ? : arm64_dma32_phys_limit))
max_pfn > PFN_DOWN(arm64_dma_phys_limit))
swiotlb_init(1);
else
swiotlb_force = SWIOTLB_NO_FORCE;

View File

@ -413,10 +413,10 @@ efi_get_pal_addr (void)
mask = ~((1 << IA64_GRANULE_SHIFT) - 1);
printk(KERN_INFO "CPU %d: mapping PAL code "
"[0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
smp_processor_id(), md->phys_addr,
md->phys_addr + efi_md_size(md),
vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
"[0x%llx-0x%llx) into [0x%llx-0x%llx)\n",
smp_processor_id(), md->phys_addr,
md->phys_addr + efi_md_size(md),
vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
#endif
return __va(md->phys_addr);
}
@ -558,6 +558,7 @@ efi_init (void)
{
efi_memory_desc_t *md;
void *p;
unsigned int i;
for (i = 0, p = efi_map_start; p < efi_map_end;
++i, p += efi_desc_size)
@ -584,7 +585,7 @@ efi_init (void)
}
printk("mem%02d: %s "
"range=[0x%016lx-0x%016lx) (%4lu%s)\n",
"range=[0x%016llx-0x%016llx) (%4lu%s)\n",
i, efi_md_typeattr_format(buf, sizeof(buf), md),
md->phys_addr,
md->phys_addr + efi_md_size(md), size, unit);

View File

@ -66,6 +66,9 @@ struct pcc_regs {
#define PCC_INT_ENAB 0x08
#define PCC_TIMER_INT_CLR 0x80
#define PCC_TIMER_TIC_EN 0x01
#define PCC_TIMER_COC_EN 0x02
#define PCC_TIMER_CLR_OVF 0x04
#define PCC_LEVEL_ABORT 0x07

View File

@ -388,6 +388,8 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
ret = -EPERM;
if (!capable(CAP_SYS_ADMIN))
goto out;
mmap_read_lock(current->mm);
} else {
struct vm_area_struct *vma;

View File

@ -116,8 +116,10 @@ static irqreturn_t mvme147_timer_int (int irq, void *dev_id)
unsigned long flags;
local_irq_save(flags);
m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR;
m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF;
m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF | PCC_TIMER_COC_EN |
PCC_TIMER_TIC_EN;
m147_pcc->t1_int_cntrl = PCC_INT_ENAB | PCC_TIMER_INT_CLR |
PCC_LEVEL_TIMER1;
clk_total += PCC_TIMER_CYCLES;
timer_routine(0, NULL);
local_irq_restore(flags);
@ -135,10 +137,10 @@ void mvme147_sched_init (irq_handler_t timer_routine)
/* Init the clock with a value */
/* The clock counter increments until 0xFFFF then reloads */
m147_pcc->t1_preload = PCC_TIMER_PRELOAD;
m147_pcc->t1_cntrl = 0x0; /* clear timer */
m147_pcc->t1_cntrl = 0x3; /* start timer */
m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR; /* clear pending ints */
m147_pcc->t1_int_cntrl = PCC_INT_ENAB|PCC_LEVEL_TIMER1;
m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF | PCC_TIMER_COC_EN |
PCC_TIMER_TIC_EN;
m147_pcc->t1_int_cntrl = PCC_INT_ENAB | PCC_TIMER_INT_CLR |
PCC_LEVEL_TIMER1;
clocksource_register_hz(&mvme147_clk, PCC_TIMER_CLOCK_FREQ);
}

View File

@ -367,6 +367,7 @@ static u32 clk_total;
#define PCCTOVR1_COC_EN 0x02
#define PCCTOVR1_OVR_CLR 0x04
#define PCCTIC1_INT_LEVEL 6
#define PCCTIC1_INT_CLR 0x08
#define PCCTIC1_INT_EN 0x10
@ -376,8 +377,8 @@ static irqreturn_t mvme16x_timer_int (int irq, void *dev_id)
unsigned long flags;
local_irq_save(flags);
out_8(PCCTIC1, in_8(PCCTIC1) | PCCTIC1_INT_CLR);
out_8(PCCTOVR1, PCCTOVR1_OVR_CLR);
out_8(PCCTOVR1, PCCTOVR1_OVR_CLR | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
out_8(PCCTIC1, PCCTIC1_INT_EN | PCCTIC1_INT_CLR | PCCTIC1_INT_LEVEL);
clk_total += PCC_TIMER_CYCLES;
timer_routine(0, NULL);
local_irq_restore(flags);
@ -391,14 +392,15 @@ void mvme16x_sched_init (irq_handler_t timer_routine)
int irq;
/* Using PCCchip2 or MC2 chip tick timer 1 */
out_be32(PCCTCNT1, 0);
out_be32(PCCTCMP1, PCC_TIMER_CYCLES);
out_8(PCCTOVR1, in_8(PCCTOVR1) | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
out_8(PCCTIC1, PCCTIC1_INT_EN | 6);
if (request_irq(MVME16x_IRQ_TIMER, mvme16x_timer_int, IRQF_TIMER, "timer",
timer_routine))
panic ("Couldn't register timer int");
out_be32(PCCTCNT1, 0);
out_be32(PCCTCMP1, PCC_TIMER_CYCLES);
out_8(PCCTOVR1, PCCTOVR1_OVR_CLR | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
out_8(PCCTIC1, PCCTIC1_INT_EN | PCCTIC1_INT_CLR | PCCTIC1_INT_LEVEL);
clocksource_register_hz(&mvme16x_clk, PCC_TIMER_CLOCK_FREQ);
if (brdno == 0x0162 || brdno == 0x172)

View File

@ -6,6 +6,7 @@ config MIPS
select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_KCOV
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE if !EVA
select ARCH_HAS_PTE_SPECIAL if !(32BIT && CPU_HAS_RIXI)
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UBSAN_SANITIZE_ALL

View File

@ -59,7 +59,7 @@ clkctl: clock-controller@fff8c004 {
periph_cntl: syscon@fff8c008 {
compatible = "syscon";
reg = <0xfff8c000 0x4>;
reg = <0xfff8c008 0x4>;
native-endian;
};

View File

@ -59,7 +59,7 @@ clkctl: clock-controller@10000004 {
periph_cntl: syscon@10000008 {
compatible = "syscon";
reg = <0x10000000 0xc>;
reg = <0x10000008 0x4>;
native-endian;
};

View File

@ -59,7 +59,7 @@ clkctl: clock-controller@fffe0004 {
periph_cntl: syscon@fffe0008 {
compatible = "syscon";
reg = <0xfffe0000 0x4>;
reg = <0xfffe0008 0x4>;
native-endian;
};

View File

@ -59,7 +59,7 @@ clkctl: clock-controller@10000004 {
periph_cntl: syscon@10000008 {
compatible = "syscon";
reg = <0x10000000 0xc>;
reg = <0x10000008 0x4>;
native-endian;
};

View File

@ -59,7 +59,7 @@ clkctl: clock-controller@10000004 {
periph_cntl: syscon@100000008 {
compatible = "syscon";
reg = <0x10000000 0xc>;
reg = <0x10000008 0x4>;
native-endian;
};

View File

@ -17,7 +17,7 @@ asmlinkage void poly1305_init_mips(void *state, const u8 *key);
asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit);
asmlinkage void poly1305_emit_mips(void *state, u8 *digest, const u32 *nonce);
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
{
poly1305_init_mips(&dctx->h, key);
dctx->s[0] = get_unaligned_le32(key + 16);

View File

@ -44,8 +44,7 @@
.endm
#endif
#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
defined(CONFIG_CPU_MIPSR6)
#ifdef CONFIG_CPU_HAS_DIEI
.macro local_irq_enable reg=t0
ei
irq_enable_hazard

View File

@ -82,7 +82,7 @@ static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, resource_size_
return -ENOMEM;
range->fwnode = fwnode;
range->size = size;
range->size = size = round_up(size, PAGE_SIZE);
range->hw_start = hw_start;
range->flags = LOGIC_PIO_CPU_MMIO;

View File

@ -166,8 +166,13 @@ void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node)
res = hose->mem_resource;
break;
}
if (res != NULL)
of_pci_range_to_resource(&range, node, res);
if (res != NULL) {
res->name = node->full_name;
res->flags = range.flags;
res->start = range.cpu_addr;
res->end = range.cpu_addr + range.size - 1;
res->parent = res->child = res->sibling = NULL;
}
}
}

View File

@ -30,6 +30,7 @@
#define RALINK_GPIOMODE 0x60
#define PPLL_CFG1 0x9c
#define PPLL_LD BIT(23)
#define PPLL_DRV 0xa0
#define PDRV_SW_SET BIT(31)
@ -239,8 +240,8 @@ static int mt7620_pci_hw_init(struct platform_device *pdev)
rt_sysc_m32(0, RALINK_PCIE0_CLK_EN, RALINK_CLKCFG1);
mdelay(100);
if (!(rt_sysc_r32(PPLL_CFG1) & PDRV_SW_SET)) {
dev_err(&pdev->dev, "MT7620 PPLL unlock\n");
if (!(rt_sysc_r32(PPLL_CFG1) & PPLL_LD)) {
dev_err(&pdev->dev, "pcie PLL not locked, aborting init\n");
reset_control_assert(rstpcie0);
rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1);
return -1;

View File

@ -180,7 +180,6 @@ static inline void rt2880_pci_write_u32(unsigned long reg, u32 val)
int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
u16 cmd;
int irq = -1;
if (dev->bus->number != 0)
@ -188,8 +187,6 @@ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
switch (PCI_SLOT(dev->devfn)) {
case 0x00:
rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000);
(void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0);
break;
case 0x11:
irq = RT288X_CPU_IRQ_PCI;
@ -201,16 +198,6 @@ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
break;
}
pci_write_config_byte((struct pci_dev *) dev,
PCI_CACHE_LINE_SIZE, 0x14);
pci_write_config_byte((struct pci_dev *) dev, PCI_LATENCY_TIMER, 0xFF);
pci_read_config_word((struct pci_dev *) dev, PCI_COMMAND, &cmd);
cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK |
PCI_COMMAND_SERR | PCI_COMMAND_WAIT | PCI_COMMAND_PARITY;
pci_write_config_word((struct pci_dev *) dev, PCI_COMMAND, cmd);
pci_write_config_byte((struct pci_dev *) dev, PCI_INTERRUPT_LINE,
dev->irq);
return irq;
}
@ -251,6 +238,30 @@ static int rt288x_pci_probe(struct platform_device *pdev)
int pcibios_plat_dev_init(struct pci_dev *dev)
{
static bool slot0_init;
/*
* Nobody seems to initialize slot 0, but this platform requires it, so
* do it once when some other slot is being enabled. The PCI subsystem
* should configure other slots properly, so no need to do anything
* special for those.
*/
if (!slot0_init && dev->bus->number == 0) {
u16 cmd;
u32 bar0;
slot0_init = true;
pci_bus_write_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0,
0x08000000);
pci_bus_read_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0,
&bar0);
pci_bus_read_config_word(dev->bus, 0, PCI_COMMAND, &cmd);
cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
pci_bus_write_config_word(dev->bus, 0, PCI_COMMAND, cmd);
}
return 0;
}

View File

@ -222,7 +222,7 @@ config PPC
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
select HAVE_HARDLOCKUP_DETECTOR_ARCH if (PPC64 && PPC_BOOK3S)
select HAVE_HARDLOCKUP_DETECTOR_ARCH if PPC64 && PPC_BOOK3S && SMP
select HAVE_OPROFILE
select HAVE_OPTPROBES if PPC64
select HAVE_PERF_EVENTS

View File

@ -352,6 +352,7 @@ config PPC_EARLY_DEBUG_CPM_ADDR
config FAIL_IOMMU
bool "Fault-injection capability for IOMMU"
depends on FAULT_INJECTION
depends on PCI || IBMVIO
help
Provide fault-injection capability for IOMMU. Each device can
be selectively enabled via the fail_iommu property.

View File

@ -7,6 +7,7 @@
#ifndef __ASSEMBLY__
#include <linux/mmdebug.h>
#include <linux/bug.h>
#include <linux/sizes.h>
#endif
/*
@ -323,7 +324,8 @@ extern unsigned long pci_io_base;
#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
#define IOREMAP_BASE (PHB_IO_END)
#define IOREMAP_START (ioremap_bot)
#define IOREMAP_END (KERN_IO_END)
#define IOREMAP_END (KERN_IO_END - FIXADDR_SIZE)
#define FIXADDR_SIZE SZ_32M
/* Advertise special mapping type for AGP */
#define HAVE_PAGE_AGP

View File

@ -222,8 +222,10 @@ static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr,
* from ptesync, it should probably go into update_mmu_cache, rather
* than set_pte_at (which is used to set ptes unrelated to faults).
*
* Spurious faults to vmalloc region are not tolerated, so there is
* a ptesync in flush_cache_vmap.
* Spurious faults from the kernel memory are not tolerated, so there
* is a ptesync in flush_cache_vmap, and __map_kernel_page() follows
* the pte update sequence from ISA Book III 6.10 Translation Table
* Update Synchronization Requirements.
*/
}

View File

@ -23,12 +23,17 @@
#include <asm/kmap_types.h>
#endif
#ifdef CONFIG_PPC64
#define FIXADDR_TOP (IOREMAP_END + FIXADDR_SIZE)
#else
#define FIXADDR_SIZE 0
#ifdef CONFIG_KASAN
#include <asm/kasan.h>
#define FIXADDR_TOP (KASAN_SHADOW_START - PAGE_SIZE)
#else
#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
#endif
#endif
/*
* Here we define all the compile-time 'special' virtual
@ -50,6 +55,7 @@
*/
enum fixed_addresses {
FIX_HOLE,
#ifdef CONFIG_PPC32
/* reserve the top 128K for early debugging purposes */
FIX_EARLY_DEBUG_TOP = FIX_HOLE,
FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1,
@ -72,6 +78,7 @@ enum fixed_addresses {
FIX_IMMR_SIZE,
#endif
/* FIX_PCIE_MCFG, */
#endif /* CONFIG_PPC32 */
__end_of_permanent_fixed_addresses,
#define NR_FIX_BTMAPS (SZ_256K / PAGE_SIZE)
@ -98,6 +105,8 @@ enum fixed_addresses {
static inline void __set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags)
{
BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC64) && __FIXADDR_SIZE > FIXADDR_SIZE);
if (__builtin_constant_p(idx))
BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
else if (WARN_ON(idx >= __end_of_fixed_addresses))

View File

@ -6,6 +6,8 @@
* the ppc64 non-hashed page table.
*/
#include <linux/sizes.h>
#include <asm/nohash/64/pgtable-4k.h>
#include <asm/barrier.h>
#include <asm/asm-const.h>
@ -54,7 +56,8 @@
#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
#define IOREMAP_BASE (PHB_IO_END)
#define IOREMAP_START (ioremap_bot)
#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE)
#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE - FIXADDR_SIZE)
#define FIXADDR_SIZE SZ_32M
/*

View File

@ -121,6 +121,11 @@ static inline struct cpumask *cpu_sibling_mask(int cpu)
return per_cpu(cpu_sibling_map, cpu);
}
static inline struct cpumask *cpu_core_mask(int cpu)
{
return per_cpu(cpu_core_map, cpu);
}
static inline struct cpumask *cpu_l2_cache_mask(int cpu)
{
return per_cpu(cpu_l2_cache_map, cpu);

View File

@ -292,7 +292,7 @@ static void fadump_show_config(void)
* that is required for a kernel to boot successfully.
*
*/
static inline u64 fadump_calculate_reserve_size(void)
static __init u64 fadump_calculate_reserve_size(void)
{
u64 base, size, bootmem_min;
int ret;

View File

@ -268,7 +268,7 @@ static struct feature_property {
};
#if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
static inline void identical_pvr_fixup(unsigned long node)
static __init void identical_pvr_fixup(unsigned long node)
{
unsigned int pvr;
const char *model = of_get_flat_dt_prop(node, "model", NULL);

View File

@ -975,17 +975,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
local_memory_node(numa_cpu_lookup_table[cpu]));
}
#endif
/*
* cpu_core_map is now more updated and exists only since
* its been exported for long. It only will have a snapshot
* of cpu_cpu_mask.
*/
cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu));
}
/* Init the cpumasks so the boot CPU is related to itself */
cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
if (has_coregroup_support())
cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
@ -1304,6 +1299,9 @@ static void remove_cpu_from_masks(int cpu)
set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
}
for_each_cpu(i, cpu_core_mask(cpu))
set_cpus_unrelated(cpu, i, cpu_core_mask);
if (has_coregroup_support()) {
for_each_cpu(i, cpu_coregroup_mask(cpu))
set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
@ -1364,8 +1362,11 @@ static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
static void add_cpu_to_masks(int cpu)
{
struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
int first_thread = cpu_first_thread_sibling(cpu);
int chip_id = cpu_to_chip_id(cpu);
cpumask_var_t mask;
bool ret;
int i;
/*
@ -1381,12 +1382,36 @@ static void add_cpu_to_masks(int cpu)
add_cpu_to_smallcore_masks(cpu);
/* In CPU-hotplug path, hence use GFP_ATOMIC */
alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
update_mask_by_l2(cpu, &mask);
if (has_coregroup_support())
update_coregroup_mask(cpu, &mask);
if (chip_id == -1 || !ret) {
cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu));
goto out;
}
if (shared_caches)
submask_fn = cpu_l2_cache_mask;
/* Update core_mask with all the CPUs that are part of submask */
or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask);
/* Skip all CPUs already part of current CPU core mask */
cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
for_each_cpu(i, mask) {
if (chip_id == cpu_to_chip_id(i)) {
or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask);
cpumask_andnot(mask, mask, submask_fn(i));
} else {
cpumask_andnot(mask, mask, cpu_core_mask(i));
}
}
out:
free_cpumask_var(mask);
}

View File

@ -3697,7 +3697,10 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu->arch.dec_expires = dec + tb;
vcpu->cpu = -1;
vcpu->arch.thread_cpu = -1;
/* Save guest CTRL register, set runlatch to 1 */
vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
if (!(vcpu->arch.ctrl & 1))
mtspr(SPRN_CTRLT, vcpu->arch.ctrl | 1);
vcpu->arch.iamr = mfspr(SPRN_IAMR);
vcpu->arch.pspb = mfspr(SPRN_PSPB);

View File

@ -108,7 +108,7 @@ static int early_map_kernel_page(unsigned long ea, unsigned long pa,
set_the_pte:
set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
smp_wmb();
asm volatile("ptesync": : :"memory");
return 0;
}
@ -168,7 +168,7 @@ static int __map_kernel_page(unsigned long ea, unsigned long pa,
set_the_pte:
set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
smp_wmb();
asm volatile("ptesync": : :"memory");
return 0;
}

View File

@ -400,8 +400,8 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
* EBB events are pinned & exclusive, so this should never actually
* hit, but we leave it as a fallback in case.
*/
mask |= CNST_EBB_VAL(ebb);
value |= CNST_EBB_MASK;
mask |= CNST_EBB_MASK;
value |= CNST_EBB_VAL(ebb);
*maskp = mask;
*valp = value;

View File

@ -66,5 +66,5 @@ EVENT(PM_RUN_INST_CMPL_ALT, 0x00002);
* thresh end (TE)
*/
EVENT(MEM_LOADS, 0x34340401e0);
EVENT(MEM_STORES, 0x343c0401e0);
EVENT(MEM_LOADS, 0x35340401e0);
EVENT(MEM_STORES, 0x353c0401e0);

View File

@ -181,7 +181,7 @@ sram_code:
udelay: /* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */
mullw r12, r12, r11
mftb r13 /* start */
addi r12, r13, r12 /* end */
add r12, r13, r12 /* end */
1:
mftb r13 /* current */
cmp cr0, r13, r12

View File

@ -50,6 +50,7 @@ EXPORT_SYMBOL_GPL(init_phb_dynamic);
int remove_phb_dynamic(struct pci_controller *phb)
{
struct pci_bus *b = phb->bus;
struct pci_host_bridge *host_bridge = to_pci_host_bridge(b->bridge);
struct resource *res;
int rc, i;
@ -76,7 +77,8 @@ int remove_phb_dynamic(struct pci_controller *phb)
/* Remove the PCI bus and unregister the bridge device from sysfs */
phb->bus = NULL;
pci_remove_bus(b);
device_unregister(b->bridge);
host_bridge->bus = NULL;
device_unregister(&host_bridge->dev);
/* Now release the IO resource */
if (res->flags & IORESOURCE_IO)

View File

@ -1286,6 +1286,10 @@ static int vio_bus_remove(struct device *dev)
int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
const char *mod_name)
{
// vio_bus_type is only initialised for pseries
if (!machine_is(pseries))
return -ENODEV;
pr_debug("%s: driver %s registering\n", __func__, viodrv->name);
/* fill in 'struct driver' fields */

View File

@ -261,17 +261,20 @@ notrace void xmon_xive_do_dump(int cpu)
xmon_printf("\n");
}
static struct irq_data *xive_get_irq_data(u32 hw_irq)
{
unsigned int irq = irq_find_mapping(xive_irq_domain, hw_irq);
return irq ? irq_get_irq_data(irq) : NULL;
}
int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
{
struct irq_chip *chip = irq_data_get_irq_chip(d);
int rc;
u32 target;
u8 prio;
u32 lirq;
if (!is_xive_irq(chip))
return -EINVAL;
rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
if (rc) {
xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
@ -281,6 +284,9 @@ int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
hw_irq, target, prio, lirq);
if (!d)
d = xive_get_irq_data(hw_irq);
if (d) {
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
u64 val = xive_esb_read(xd, XIVE_ESB_GET);
@ -1606,6 +1612,8 @@ static void xive_debug_show_irq(struct seq_file *m, u32 hw_irq, struct irq_data
u32 target;
u8 prio;
u32 lirq;
struct xive_irq_data *xd;
u64 val;
if (!is_xive_irq(chip))
return;
@ -1619,17 +1627,14 @@ static void xive_debug_show_irq(struct seq_file *m, u32 hw_irq, struct irq_data
seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
hw_irq, target, prio, lirq);
if (d) {
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
u64 val = xive_esb_read(xd, XIVE_ESB_GET);
seq_printf(m, "flags=%c%c%c PQ=%c%c",
xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
val & XIVE_ESB_VAL_P ? 'P' : '-',
val & XIVE_ESB_VAL_Q ? 'Q' : '-');
}
xd = irq_data_get_irq_handler_data(d);
val = xive_esb_read(xd, XIVE_ESB_GET);
seq_printf(m, "flags=%c%c%c PQ=%c%c",
xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
val & XIVE_ESB_VAL_P ? 'P' : '-',
val & XIVE_ESB_VAL_Q ? 'Q' : '-');
seq_puts(m, "\n");
}

View File

@ -925,9 +925,9 @@ static int __init setup_hwcaps(void)
if (MACHINE_HAS_VX) {
elf_hwcap |= HWCAP_S390_VXRS;
if (test_facility(134))
elf_hwcap |= HWCAP_S390_VXRS_EXT;
if (test_facility(135))
elf_hwcap |= HWCAP_S390_VXRS_BCD;
if (test_facility(135))
elf_hwcap |= HWCAP_S390_VXRS_EXT;
if (test_facility(148))
elf_hwcap |= HWCAP_S390_VXRS_EXT2;
if (test_facility(152))

View File

@ -976,7 +976,9 @@ int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
* kvm_s390_shadow_tables - walk the guest page table and create shadow tables
* @sg: pointer to the shadow guest address space structure
* @saddr: faulting address in the shadow gmap
* @pgt: pointer to the page table address result
* @pgt: pointer to the beginning of the page table for the given address if
* successful (return value 0), or to the first invalid DAT entry in
* case of exceptions (return value > 0)
* @fake: pgt references contiguous guest memory block, not a pgtable
*/
static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
@ -1034,6 +1036,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
rfte.val = ptr;
goto shadow_r2t;
}
*pgt = ptr + vaddr.rfx * 8;
rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val);
if (rc)
return rc;
@ -1060,6 +1063,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
rste.val = ptr;
goto shadow_r3t;
}
*pgt = ptr + vaddr.rsx * 8;
rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val);
if (rc)
return rc;
@ -1087,6 +1091,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
rtte.val = ptr;
goto shadow_sgt;
}
*pgt = ptr + vaddr.rtx * 8;
rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val);
if (rc)
return rc;
@ -1123,6 +1128,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
ste.val = ptr;
goto shadow_pgt;
}
*pgt = ptr + vaddr.sx * 8;
rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val);
if (rc)
return rc;
@ -1157,6 +1163,8 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
* @vcpu: virtual cpu
* @sg: pointer to the shadow guest address space structure
* @saddr: faulting address in the shadow gmap
* @datptr: will contain the address of the faulting DAT table entry, or of
* the valid leaf, plus some flags
*
* Returns: - 0 if the shadow fault was successfully resolved
* - > 0 (pgm exception code) on exceptions while faulting
@ -1165,11 +1173,11 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
* - -ENOMEM if out of memory
*/
int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
unsigned long saddr)
unsigned long saddr, unsigned long *datptr)
{
union vaddress vaddr;
union page_table_entry pte;
unsigned long pgt;
unsigned long pgt = 0;
int dat_protection, fake;
int rc;
@ -1191,8 +1199,20 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
pte.val = pgt + vaddr.px * PAGE_SIZE;
goto shadow_page;
}
if (!rc)
rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val);
switch (rc) {
case PGM_SEGMENT_TRANSLATION:
case PGM_REGION_THIRD_TRANS:
case PGM_REGION_SECOND_TRANS:
case PGM_REGION_FIRST_TRANS:
pgt |= PEI_NOT_PTE;
break;
case 0:
pgt += vaddr.px * 8;
rc = gmap_read_table(sg->parent, pgt, &pte.val);
}
if (datptr)
*datptr = pgt | dat_protection * PEI_DAT_PROT;
if (!rc && pte.i)
rc = PGM_PAGE_TRANSLATION;
if (!rc && pte.z)

View File

@ -16,6 +16,23 @@
#include <linux/ptrace.h>
#include "kvm-s390.h"
/**
* kvm_s390_real_to_abs - convert guest real address to guest absolute address
* @prefix - guest prefix
* @gra - guest real address
*
* Returns the guest absolute address that corresponds to the passed guest real
* address @gra of by applying the given prefix.
*/
static inline unsigned long _kvm_s390_real_to_abs(u32 prefix, unsigned long gra)
{
if (gra < 2 * PAGE_SIZE)
gra += prefix;
else if (gra >= prefix && gra < prefix + 2 * PAGE_SIZE)
gra -= prefix;
return gra;
}
/**
* kvm_s390_real_to_abs - convert guest real address to guest absolute address
* @vcpu - guest virtual cpu
@ -27,13 +44,30 @@
static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
unsigned long gra)
{
unsigned long prefix = kvm_s390_get_prefix(vcpu);
return _kvm_s390_real_to_abs(kvm_s390_get_prefix(vcpu), gra);
}
if (gra < 2 * PAGE_SIZE)
gra += prefix;
else if (gra >= prefix && gra < prefix + 2 * PAGE_SIZE)
gra -= prefix;
return gra;
/**
* _kvm_s390_logical_to_effective - convert guest logical to effective address
* @psw: psw of the guest
* @ga: guest logical address
*
* Convert a guest logical address to an effective address by applying the
* rules of the addressing mode defined by bits 31 and 32 of the given PSW
* (extendended/basic addressing mode).
*
* Depending on the addressing mode, the upper 40 bits (24 bit addressing
* mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing
* mode) of @ga will be zeroed and the remaining bits will be returned.
*/
static inline unsigned long _kvm_s390_logical_to_effective(psw_t *psw,
unsigned long ga)
{
if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
return ga;
if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
return ga & ((1UL << 31) - 1);
return ga & ((1UL << 24) - 1);
}
/**
@ -52,13 +86,7 @@ static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu,
unsigned long ga)
{
psw_t *psw = &vcpu->arch.sie_block->gpsw;
if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
return ga;
if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
return ga & ((1UL << 31) - 1);
return ga & ((1UL << 24) - 1);
return _kvm_s390_logical_to_effective(&vcpu->arch.sie_block->gpsw, ga);
}
/*
@ -359,7 +387,11 @@ void ipte_unlock(struct kvm_vcpu *vcpu);
int ipte_lock_held(struct kvm_vcpu *vcpu);
int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
/* MVPG PEI indication bits */
#define PEI_DAT_PROT 2
#define PEI_NOT_PTE 4
int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *shadow,
unsigned long saddr);
unsigned long saddr, unsigned long *datptr);
#endif /* __KVM_S390_GACCESS_H */

View File

@ -4308,16 +4308,16 @@ static void store_regs_fmt2(struct kvm_vcpu *vcpu)
kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
if (MACHINE_HAS_GS) {
preempt_disable();
__ctl_set_bit(2, 4);
if (vcpu->arch.gs_enabled)
save_gs_cb(current->thread.gs_cb);
preempt_disable();
current->thread.gs_cb = vcpu->arch.host_gscb;
restore_gs_cb(vcpu->arch.host_gscb);
preempt_enable();
if (!vcpu->arch.host_gscb)
__ctl_clear_bit(2, 4);
vcpu->arch.host_gscb = NULL;
preempt_enable();
}
/* SIE will save etoken directly into SDNX and therefore kvm_run */
}

View File

@ -416,11 +416,6 @@ static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
memcpy((void *)((u64)scb_o + 0xc0),
(void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0);
break;
case ICPT_PARTEXEC:
/* MVPG only */
memcpy((void *)((u64)scb_o + 0xc0),
(void *)((u64)scb_s + 0xc0), 0xd0 - 0xc0);
break;
}
if (scb_s->ihcpu != 0xffffU)
@ -619,10 +614,10 @@ static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
/* with mso/msl, the prefix lies at offset *mso* */
prefix += scb_s->mso;
rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix);
rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix, NULL);
if (!rc && (scb_s->ecb & ECB_TE))
rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
prefix + PAGE_SIZE);
prefix + PAGE_SIZE, NULL);
/*
* We don't have to mprotect, we will be called for all unshadows.
* SIE will detect if protection applies and trigger a validity.
@ -913,7 +908,7 @@ static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
current->thread.gmap_addr, 1);
rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
current->thread.gmap_addr);
current->thread.gmap_addr, NULL);
if (rc > 0) {
rc = inject_fault(vcpu, rc,
current->thread.gmap_addr,
@ -935,7 +930,7 @@ static void handle_last_fault(struct kvm_vcpu *vcpu,
{
if (vsie_page->fault_addr)
kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
vsie_page->fault_addr);
vsie_page->fault_addr, NULL);
vsie_page->fault_addr = 0;
}
@ -982,6 +977,98 @@ static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
return 0;
}
/*
* Get a register for a nested guest.
* @vcpu the vcpu of the guest
* @vsie_page the vsie_page for the nested guest
* @reg the register number, the upper 4 bits are ignored.
* returns: the value of the register.
*/
static u64 vsie_get_register(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, u8 reg)
{
/* no need to validate the parameter and/or perform error handling */
reg &= 0xf;
switch (reg) {
case 15:
return vsie_page->scb_s.gg15;
case 14:
return vsie_page->scb_s.gg14;
default:
return vcpu->run->s.regs.gprs[reg];
}
}
static int vsie_handle_mvpg(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
unsigned long pei_dest, pei_src, src, dest, mask, prefix;
u64 *pei_block = &vsie_page->scb_o->mcic;
int edat, rc_dest, rc_src;
union ctlreg0 cr0;
cr0.val = vcpu->arch.sie_block->gcr[0];
edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
mask = _kvm_s390_logical_to_effective(&scb_s->gpsw, PAGE_MASK);
prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
dest = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 20) & mask;
dest = _kvm_s390_real_to_abs(prefix, dest) + scb_s->mso;
src = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 16) & mask;
src = _kvm_s390_real_to_abs(prefix, src) + scb_s->mso;
rc_dest = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, dest, &pei_dest);
rc_src = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, src, &pei_src);
/*
* Either everything went well, or something non-critical went wrong
* e.g. because of a race. In either case, simply retry.
*/
if (rc_dest == -EAGAIN || rc_src == -EAGAIN || (!rc_dest && !rc_src)) {
retry_vsie_icpt(vsie_page);
return -EAGAIN;
}
/* Something more serious went wrong, propagate the error */
if (rc_dest < 0)
return rc_dest;
if (rc_src < 0)
return rc_src;
/* The only possible suppressing exception: just deliver it */
if (rc_dest == PGM_TRANSLATION_SPEC || rc_src == PGM_TRANSLATION_SPEC) {
clear_vsie_icpt(vsie_page);
rc_dest = kvm_s390_inject_program_int(vcpu, PGM_TRANSLATION_SPEC);
WARN_ON_ONCE(rc_dest);
return 1;
}
/*
* Forward the PEI intercept to the guest if it was a page fault, or
* also for segment and region table faults if EDAT applies.
*/
if (edat) {
rc_dest = rc_dest == PGM_ASCE_TYPE ? rc_dest : 0;
rc_src = rc_src == PGM_ASCE_TYPE ? rc_src : 0;
} else {
rc_dest = rc_dest != PGM_PAGE_TRANSLATION ? rc_dest : 0;
rc_src = rc_src != PGM_PAGE_TRANSLATION ? rc_src : 0;
}
if (!rc_dest && !rc_src) {
pei_block[0] = pei_dest;
pei_block[1] = pei_src;
return 1;
}
retry_vsie_icpt(vsie_page);
/*
* The host has edat, and the guest does not, or it was an ASCE type
* exception. The host needs to inject the appropriate DAT interrupts
* into the guest.
*/
if (rc_dest)
return inject_fault(vcpu, rc_dest, dest, 1);
return inject_fault(vcpu, rc_src, src, 0);
}
/*
* Run the vsie on a shadow scb and a shadow gmap, without any further
* sanity checks, handling SIE faults.
@ -1068,6 +1155,10 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
if ((scb_s->ipa & 0xf000) != 0xf000)
scb_s->ipa += 0x1000;
break;
case ICPT_PARTEXEC:
if (scb_s->ipa == 0xb254)
rc = vsie_handle_mvpg(vcpu, vsie_page);
break;
}
return rc;
}

View File

@ -569,6 +569,7 @@ config X86_UV
depends on X86_EXTENDED_PLATFORM
depends on NUMA
depends on EFI
depends on KEXEC_CORE
depends on X86_X2APIC
depends on PCI
help

View File

@ -16,7 +16,7 @@
#include <asm/simd.h>
asmlinkage void poly1305_init_x86_64(void *ctx,
const u8 key[POLY1305_KEY_SIZE]);
const u8 key[POLY1305_BLOCK_SIZE]);
asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp,
const size_t len, const u32 padbit);
asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
@ -81,7 +81,7 @@ static void convert_to_base2_64(void *ctx)
state->is_base2_26 = 0;
}
static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_KEY_SIZE])
static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_BLOCK_SIZE])
{
poly1305_init_x86_64(ctx, key);
}
@ -129,7 +129,7 @@ static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
poly1305_emit_avx(ctx, mac, nonce);
}
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
{
poly1305_simd_init(&dctx->h, key);
dctx->s[0] = get_unaligned_le32(&key[16]);

View File

@ -81,12 +81,12 @@ static struct attribute_group amd_iommu_events_group = {
};
struct amd_iommu_event_desc {
struct kobj_attribute attr;
struct device_attribute attr;
const char *event;
};
static ssize_t _iommu_event_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
static ssize_t _iommu_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct amd_iommu_event_desc *event =
container_of(attr, struct amd_iommu_event_desc, attr);

View File

@ -275,14 +275,14 @@ static struct attribute_group amd_uncore_attr_group = {
};
#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
static ssize_t __uncore_##_var##_show(struct kobject *kobj, \
struct kobj_attribute *attr, \
static ssize_t __uncore_##_var##_show(struct device *dev, \
struct device_attribute *attr, \
char *page) \
{ \
BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
return sprintf(page, _format "\n"); \
} \
static struct kobj_attribute format_attr_##_var = \
static struct device_attribute format_attr_##_var = \
__ATTR(_name, 0444, __uncore_##_var##_show, NULL)
DEFINE_UNCORE_FORMAT_ATTR(event12, event, "config:0-7,32-35");

View File

@ -1652,6 +1652,9 @@ static __init int uv_system_init_hubless(void)
if (rc < 0)
return rc;
/* Set section block size for current node memory */
set_block_size();
/* Create user access node */
if (rc >= 0)
uv_setup_proc_files(1);

View File

@ -629,16 +629,16 @@ static ssize_t reload_store(struct device *dev,
if (val != 1)
return size;
tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
if (tmp_ret != UCODE_NEW)
return size;
get_online_cpus();
ret = check_online_cpus();
if (ret)
goto put;
tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
if (tmp_ret != UCODE_NEW)
goto put;
mutex_lock(&microcode_mutex);
ret = microcode_reload_late();
mutex_unlock(&microcode_mutex);

View File

@ -31,8 +31,8 @@
* - inform the user about the firmware's notion of memory layout
* via /sys/firmware/memmap
*
* - the hibernation code uses it to generate a kernel-independent MD5
* fingerprint of the physical memory layout of a system.
* - the hibernation code uses it to generate a kernel-independent CRC32
* checksum of the physical memory layout of a system.
*
* - 'e820_table_kexec': a slightly modified (by the kernel) firmware version
* passed to us by the bootloader - the major difference between

View File

@ -159,6 +159,8 @@ NOKPROBE_SYMBOL(skip_prefixes);
int can_boost(struct insn *insn, void *addr)
{
kprobe_opcode_t opcode;
insn_byte_t prefix;
int i;
if (search_exception_tables((unsigned long)addr))
return 0; /* Page fault may occur on this address. */
@ -171,9 +173,14 @@ int can_boost(struct insn *insn, void *addr)
if (insn->opcode.nbytes != 1)
return 0;
/* Can't boost Address-size override prefix */
if (unlikely(inat_is_address_size_prefix(insn->attr)))
return 0;
for_each_insn_prefix(insn, i, prefix) {
insn_attr_t attr;
attr = inat_get_opcode_attribute(prefix);
/* Can't boost Address-size override prefix and CS override prefix */
if (prefix == 0x2e || inat_is_address_size_prefix(attr))
return 0;
}
opcode = insn->opcode.bytes[0];
@ -198,8 +205,8 @@ int can_boost(struct insn *insn, void *addr)
/* clear and set flags are boostable */
return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
default:
/* CS override prefix and call are not boostable */
return (opcode != 0x2e && opcode != 0x9a);
/* call is not boostable */
return opcode != 0x9a;
}
}

View File

@ -452,47 +452,12 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
return false;
}
/*
* Define snc_cpu[] for SNC (Sub-NUMA Cluster) CPUs.
*
* These are Intel CPUs that enumerate an LLC that is shared by
* multiple NUMA nodes. The LLC on these systems is shared for
* off-package data access but private to the NUMA node (half
* of the package) for on-package access.
*
* CPUID (the source of the information about the LLC) can only
* enumerate the cache as being shared *or* unshared, but not
* this particular configuration. The CPU in this case enumerates
* the cache to be shared across the entire package (spanning both
* NUMA nodes).
*/
static const struct x86_cpu_id snc_cpu[] = {
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
{}
};
static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{
int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
/* Do not match if we do not have a valid APICID for cpu: */
if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID)
return false;
/* Do not match if LLC id does not match: */
if (per_cpu(cpu_llc_id, cpu1) != per_cpu(cpu_llc_id, cpu2))
return false;
/*
* Allow the SNC topology without warning. Return of false
* means 'c' does not share the LLC of 'o'. This will be
* reflected to userspace.
*/
if (!topology_same_node(c, o) && x86_match_cpu(snc_cpu))
return false;
return topology_sane(c, o, "llc");
if (c->phys_proc_id == o->phys_proc_id &&
c->cpu_die_id == o->cpu_die_id)
return true;
return false;
}
/*
@ -507,12 +472,50 @@ static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
return false;
}
static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
/*
* Define intel_cod_cpu[] for Intel COD (Cluster-on-Die) CPUs.
*
* Any Intel CPU that has multiple nodes per package and does not
* match intel_cod_cpu[] has the SNC (Sub-NUMA Cluster) topology.
*
* When in SNC mode, these CPUs enumerate an LLC that is shared
* by multiple NUMA nodes. The LLC is shared for off-package data
* access but private to the NUMA node (half of the package) for
* on-package access. CPUID (the source of the information about
* the LLC) can only enumerate the cache as shared or unshared,
* but not this particular configuration.
*/
static const struct x86_cpu_id intel_cod_cpu[] = {
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, 0), /* COD */
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, 0), /* COD */
X86_MATCH_INTEL_FAM6_MODEL(ANY, 1), /* SNC */
{}
};
static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{
if ((c->phys_proc_id == o->phys_proc_id) &&
(c->cpu_die_id == o->cpu_die_id))
return true;
return false;
const struct x86_cpu_id *id = x86_match_cpu(intel_cod_cpu);
int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
bool intel_snc = id && id->driver_data;
/* Do not match if we do not have a valid APICID for cpu: */
if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID)
return false;
/* Do not match if LLC id does not match: */
if (per_cpu(cpu_llc_id, cpu1) != per_cpu(cpu_llc_id, cpu2))
return false;
/*
* Allow the SNC topology without warning. Return of false
* means 'c' does not share the LLC of 'o'. This will be
* reflected to userspace.
*/
if (match_pkg(c, o) && !topology_same_node(c, o) && intel_snc)
return false;
return topology_sane(c, o, "llc");
}
@ -586,14 +589,23 @@ void set_cpu_sibling_map(int cpu)
for_each_cpu(i, cpu_sibling_setup_mask) {
o = &cpu_data(i);
if (match_pkg(c, o) && !topology_same_node(c, o))
x86_has_numa_in_package = true;
if ((i == cpu) || (has_smt && match_smt(c, o)))
link_mask(topology_sibling_cpumask, cpu, i);
if ((i == cpu) || (has_mp && match_llc(c, o)))
link_mask(cpu_llc_shared_mask, cpu, i);
if ((i == cpu) || (has_mp && match_die(c, o)))
link_mask(topology_die_cpumask, cpu, i);
}
threads = cpumask_weight(topology_sibling_cpumask(cpu));
if (threads > __max_smt_threads)
__max_smt_threads = threads;
/*
* This needs a separate iteration over the cpus because we rely on all
* topology_sibling_cpumask links to be set-up.
@ -607,8 +619,7 @@ void set_cpu_sibling_map(int cpu)
/*
* Does this new cpu bringup a new core?
*/
if (cpumask_weight(
topology_sibling_cpumask(cpu)) == 1) {
if (threads == 1) {
/*
* for each core in package, increment
* the booted_cores for this new cpu
@ -625,16 +636,7 @@ void set_cpu_sibling_map(int cpu)
} else if (i != cpu && !c->booted_cores)
c->booted_cores = cpu_data(i).booted_cores;
}
if (match_pkg(c, o) && !topology_same_node(c, o))
x86_has_numa_in_package = true;
if ((i == cpu) || (has_mp && match_die(c, o)))
link_mask(topology_die_cpumask, cpu, i);
}
threads = cpumask_weight(topology_sibling_cpumask(cpu));
if (threads > __max_smt_threads)
__max_smt_threads = threads;
}
/* maps the cpu to the sched domain representing multi-core */

View File

@ -4220,7 +4220,7 @@ static bool valid_cr(int nr)
}
}
static int check_cr_read(struct x86_emulate_ctxt *ctxt)
static int check_cr_access(struct x86_emulate_ctxt *ctxt)
{
if (!valid_cr(ctxt->modrm_reg))
return emulate_ud(ctxt);
@ -4228,80 +4228,6 @@ static int check_cr_read(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
static int check_cr_write(struct x86_emulate_ctxt *ctxt)
{
u64 new_val = ctxt->src.val64;
int cr = ctxt->modrm_reg;
u64 efer = 0;
static u64 cr_reserved_bits[] = {
0xffffffff00000000ULL,
0, 0, 0, /* CR3 checked later */
CR4_RESERVED_BITS,
0, 0, 0,
CR8_RESERVED_BITS,
};
if (!valid_cr(cr))
return emulate_ud(ctxt);
if (new_val & cr_reserved_bits[cr])
return emulate_gp(ctxt, 0);
switch (cr) {
case 0: {
u64 cr4;
if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
return emulate_gp(ctxt, 0);
cr4 = ctxt->ops->get_cr(ctxt, 4);
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
!(cr4 & X86_CR4_PAE))
return emulate_gp(ctxt, 0);
break;
}
case 3: {
u64 rsvd = 0;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (efer & EFER_LMA) {
u64 maxphyaddr;
u32 eax, ebx, ecx, edx;
eax = 0x80000008;
ecx = 0;
if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
&edx, true))
maxphyaddr = eax & 0xff;
else
maxphyaddr = 36;
rsvd = rsvd_bits(maxphyaddr, 63);
if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
rsvd &= ~X86_CR3_PCID_NOFLUSH;
}
if (new_val & rsvd)
return emulate_gp(ctxt, 0);
break;
}
case 4: {
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
return emulate_gp(ctxt, 0);
break;
}
}
return X86EMUL_CONTINUE;
}
static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
{
unsigned long dr7;
@ -4841,10 +4767,10 @@ static const struct opcode twobyte_table[256] = {
D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */
/* 0x20 - 0x2F */
DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access),
DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
check_cr_write),
check_cr_access),
IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
check_dr_write),
N, N, N, N,

View File

@ -3195,14 +3195,14 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
(mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list);
} else {
} else if (mmu->pae_root) {
for (i = 0; i < 4; ++i)
if (mmu->pae_root[i] != 0)
mmu_free_root_page(kvm,
&mmu->pae_root[i],
&invalid_list);
mmu->root_hpa = INVALID_PAGE;
}
mmu->root_hpa = INVALID_PAGE;
mmu->root_pgd = 0;
}
@ -3314,9 +3314,23 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
* the shadow page table may be a PAE or a long mode page table.
*/
pm_mask = PT_PRESENT_MASK;
if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL)
if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
/*
* Allocate the page for the PDPTEs when shadowing 32-bit NPT
* with 64-bit only when needed. Unlike 32-bit NPT, it doesn't
* need to be in low mem. See also lm_root below.
*/
if (!vcpu->arch.mmu->pae_root) {
WARN_ON_ONCE(!tdp_enabled);
vcpu->arch.mmu->pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
if (!vcpu->arch.mmu->pae_root)
return -ENOMEM;
}
}
for (i = 0; i < 4; ++i) {
MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
@ -3339,21 +3353,19 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
/*
* If we shadow a 32 bit page table with a long mode page
* table we enter this path.
* When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
* tables are allocated and initialized at MMU creation as there is no
* equivalent level in the guest's NPT to shadow. Allocate the tables
* on demand, as running a 32-bit L1 VMM is very rare. The PDP is
* handled above (to share logic with PAE), deal with the PML4 here.
*/
if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
if (vcpu->arch.mmu->lm_root == NULL) {
/*
* The additional page necessary for this is only
* allocated on demand.
*/
u64 *lm_root;
lm_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
if (lm_root == NULL)
return 1;
if (!lm_root)
return -ENOMEM;
lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
@ -3651,6 +3663,14 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
bool async;
/*
* Retry the page fault if the gfn hit a memslot that is being deleted
* or moved. This ensures any existing SPTEs for the old memslot will
* be zapped before KVM inserts a new MMIO SPTE for the gfn.
*/
if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
return true;
/* Don't expose private memslots to L2. */
if (is_guest_mode(vcpu) && !kvm_is_visible_memslot(slot)) {
*pfn = KVM_PFN_NOSLOT;
@ -4605,12 +4625,17 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
struct kvm_mmu *context = &vcpu->arch.guest_mmu;
union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu);
context->shadow_root_level = new_role.base.level;
__kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base, false, false);
if (new_role.as_u64 != context->mmu_role.as_u64)
if (new_role.as_u64 != context->mmu_role.as_u64) {
shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
/*
* Override the level set by the common init helper, nested TDP
* always uses the host's TDP configuration.
*/
context->shadow_root_level = new_role.base.level;
}
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
@ -5297,9 +5322,11 @@ static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
* while the PDP table is a per-vCPU construct that's allocated at MMU
* creation. When emulating 32-bit mode, cr3 is only 32 bits even on
* x86_64. Therefore we need to allocate the PDP table in the first
* 4GB of memory, which happens to fit the DMA32 zone. Except for
* SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can
* skip allocating the PDP table.
* 4GB of memory, which happens to fit the DMA32 zone. TDP paging
* generally doesn't use PAE paging and can skip allocating the PDP
* table. The main exception, handled here, is SVM's 32-bit NPT. The
* other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
* KVM; that horror is handled on-demand by mmu_alloc_shadow_roots().
*/
if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
return 0;

View File

@ -168,6 +168,9 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
int asid, ret;
if (kvm->created_vcpus)
return -EINVAL;
ret = -EBUSY;
if (unlikely(sev->active))
return ret;

View File

@ -1805,7 +1805,7 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
static int pf_interception(struct vcpu_svm *svm)
{
u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
u64 fault_address = svm->vmcb->control.exit_info_2;
u64 error_code = svm->vmcb->control.exit_info_1;
return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
@ -2519,6 +2519,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_TSC_AUX:
if (!boot_cpu_has(X86_FEATURE_RDTSCP))
return 1;
if (!msr_info->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
return 1;
msr_info->data = svm->tsc_aux;
break;
/*
@ -2713,6 +2716,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
if (!boot_cpu_has(X86_FEATURE_RDTSCP))
return 1;
if (!msr->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
return 1;
/*
* This is rare, so we update the MSR here instead of using
* direct_access_msrs. Doing that would require a rdmsr in

View File

@ -618,6 +618,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
}
/* KVM unconditionally exposes the FS/GS base MSRs to L1. */
#ifdef CONFIG_X86_64
nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
MSR_FS_BASE, MSR_TYPE_RW);
@ -626,6 +627,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
#endif
/*
* Checking the L0->L1 bitmap is trying to verify two things:
@ -4613,9 +4615,9 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
else if (addr_size == 0)
off = (gva_t)sign_extend64(off, 15);
if (base_is_valid)
off += kvm_register_read(vcpu, base_reg);
off += kvm_register_readl(vcpu, base_reg);
if (index_is_valid)
off += kvm_register_read(vcpu, index_reg) << scaling;
off += kvm_register_readl(vcpu, index_reg) << scaling;
vmx_get_segment(vcpu, &s, seg_reg);
/*
@ -5491,16 +5493,11 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
if (!nested_vmx_check_eptp(vcpu, new_eptp))
return 1;
kvm_mmu_unload(vcpu);
mmu->ept_ad = accessed_dirty;
mmu->mmu_role.base.ad_disabled = !accessed_dirty;
vmcs12->ept_pointer = new_eptp;
/*
* TODO: Check what's the correct approach in case
* mmu reload fails. Currently, we just let the next
* reload potentially fail
*/
kvm_mmu_reload(vcpu);
kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
}
return 0;
@ -5729,7 +5726,7 @@ static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
/* Decode instruction info and find the field to access */
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
/* Out-of-range fields always cause a VM exit from L2 to L1 */
if (field >> 15)

View File

@ -156,9 +156,11 @@ static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = {
MSR_IA32_SPEC_CTRL,
MSR_IA32_PRED_CMD,
MSR_IA32_TSC,
#ifdef CONFIG_X86_64
MSR_FS_BASE,
MSR_GS_BASE,
MSR_KERNEL_GS_BASE,
#endif
MSR_IA32_SYSENTER_CS,
MSR_IA32_SYSENTER_ESP,
MSR_IA32_SYSENTER_EIP,
@ -5779,7 +5781,6 @@ void dump_vmcs(void)
u32 vmentry_ctl, vmexit_ctl;
u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control;
unsigned long cr4;
u64 efer;
if (!dump_invalid_vmcs) {
pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n");
@ -5791,7 +5792,6 @@ void dump_vmcs(void)
cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
cr4 = vmcs_readl(GUEST_CR4);
efer = vmcs_read64(GUEST_IA32_EFER);
secondary_exec_control = 0;
if (cpu_has_secondary_exec_ctrls())
secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
@ -5803,9 +5803,7 @@ void dump_vmcs(void)
pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK));
pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3));
if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) &&
(cr4 & X86_CR4_PAE) && !(efer & EFER_LMA))
{
if (cpu_has_vmx_ept()) {
pr_err("PDPTR0 = 0x%016llx PDPTR1 = 0x%016llx\n",
vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1));
pr_err("PDPTR2 = 0x%016llx PDPTR3 = 0x%016llx\n",
@ -5831,7 +5829,8 @@ void dump_vmcs(void)
if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) ||
(vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER)))
pr_err("EFER = 0x%016llx PAT = 0x%016llx\n",
efer, vmcs_read64(GUEST_IA32_PAT));
vmcs_read64(GUEST_IA32_EFER),
vmcs_read64(GUEST_IA32_PAT));
pr_err("DebugCtl = 0x%016llx DebugExceptions = 0x%016lx\n",
vmcs_read64(GUEST_IA32_DEBUGCTL),
vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS));
@ -6907,9 +6906,11 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R);
#ifdef CONFIG_X86_64
vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
vmx_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
vmx_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
#endif
vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);

View File

@ -11290,7 +11290,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
fallthrough;
case INVPCID_TYPE_ALL_INCL_GLOBAL:
kvm_mmu_unload(vcpu);
kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
return kvm_skip_emulated_instruction(vcpu);
default:

Some files were not shown because too many files have changed in this diff Show More