This is the 5.10.50 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmDu+1UACgkQONu9yGCS aT7jQRAAuLDi7ejk3JUameYFMzVXGAUE6yPs392/lWJzey7IBf+2uLqz4FzqqUHp U1GkEKJVaCacEfi0+rpi7BxNFljUdZdg/F/P68ARtAWPvwqAeJ4QIh5u3A682UUO 1M5h6e5/oY9F4kQIb5Kot04avqOeR6lTqrkA8jeP5h43ngyLWuS2d+5oOGmbCukS UgEaCC6CiKjcN51UUTj/fXMQ0X4IDHP5pD8rWwH0IvK0i7gduvk744un8LVB6aW1 rNV88C3BEFFtkPQh2XySnXM5Ok8kYlhFoTDsqlpeAX7pA8hiUPYBoRzTg0MJtPZn N1L/Yqhvxmn5xs9HAw7mDOo8E8NWXzsT5FvZVaBeiCgtdKmcPszylXqmSt1oiOb0 /EmkCWmlbG/3qWql24+LU4XP36iVPx32HQxAgg2XbnlNU5o0E1y2F98p6p/3JSWX NAjHtmg/MxueFQ+w8bDzhO8YzYn1dIU3V3qaXRvtpODrmaSYW+bwCyPtSjXe3/vL 604zb3dOg9+tD/gKqfRb/UPMu24nNll8M/gnSRci05/thmIxwtYudPwoLNSejDqr e+a8vejISfIyp41XrpYQbUeKs1WOA+A7vgx6CZrT791afiT+6UgC/ecQfg1NFxhs 8ayWpocaIszxyXxVGro1rfwZeQmTlbTCZ5wVdpn9sDPZfI7epts= =FCrA -----END PGP SIGNATURE----- Merge 5.10.50 into android12-5.10-lts Changes in 5.10.50 Bluetooth: hci_qca: fix potential GPF Bluetooth: btqca: Don't modify firmware contents in-place Bluetooth: Remove spurious error message ALSA: usb-audio: fix rate on Ozone Z90 USB headset ALSA: usb-audio: Fix OOB access at proc output ALSA: firewire-motu: fix stream format for MOTU 8pre FireWire ALSA: usb-audio: scarlett2: Fix wrong resume call ALSA: intel8x0: Fix breakage at ac97 clock measurement ALSA: hda/realtek: fix mute/micmute LEDs for HP ProBook 450 G8 ALSA: hda/realtek: fix mute/micmute LEDs for HP ProBook 445 G8 ALSA: hda/realtek: fix mute/micmute LEDs for HP ProBook 630 G8 ALSA: hda/realtek: Add another ALC236 variant support ALSA: hda/realtek: fix mute/micmute LEDs for HP EliteBook x360 830 G8 ALSA: hda/realtek: Improve fixup for HP Spectre x360 15-df0xxx ALSA: hda/realtek: Fix bass speaker DAC mapping for Asus UM431D ALSA: hda/realtek: Apply LED fixup for HP Dragonfly G1, too ALSA: hda/realtek: fix mute/micmute LEDs for HP EliteBook 830 G8 Notebook PC media: dvb-usb: fix wrong definition Input: usbtouchscreen - fix control-request directions net: can: ems_usb: fix use-after-free in ems_usb_disconnect() usb: gadget: eem: fix echo command packet response issue usb: renesas-xhci: Fix handling of unknown ROM state USB: cdc-acm: blacklist Heimann USB Appset device usb: dwc3: Fix debugfs creation flow usb: typec: Add the missed altmode_id_remove() in typec_register_altmode() xhci: solve a double free problem while doing s4 gfs2: Fix underflow in gfs2_page_mkwrite gfs2: Fix error handling in init_statfs ntfs: fix validity check for file name attribute selftests/lkdtm: Avoid needing explicit sub-shell copy_page_to_iter(): fix ITER_DISCARD case iov_iter_fault_in_readable() should do nothing in xarray case Input: joydev - prevent use of not validated data in JSIOCSBTNMAP ioctl crypto: nx - Fix memcpy() over-reading in nonce crypto: ccp - Annotate SEV Firmware file names arm_pmu: Fix write counter incorrect in ARMv7 big-endian mode ARM: dts: ux500: Fix LED probing ARM: dts: at91: sama5d4: fix pinctrl muxing btrfs: send: fix invalid path for unlink operations after parent orphanization btrfs: compression: don't try to compress if we don't have enough pages btrfs: clear defrag status of a root if starting transaction fails ext4: cleanup in-core orphan list if ext4_truncate() failed to get a transaction handle ext4: fix kernel infoleak via ext4_extent_header ext4: fix overflow in ext4_iomap_alloc() ext4: return error code when ext4_fill_flex_info() fails ext4: correct the cache_nr in tracepoint ext4_es_shrink_exit ext4: remove check for zero nr_to_scan in ext4_es_scan() ext4: fix avefreec in find_group_orlov ext4: use ext4_grp_locked_error in mb_find_extent can: bcm: delay release of struct bcm_op after synchronize_rcu() can: gw: synchronize rcu operations before removing gw job entry can: isotp: isotp_release(): omit unintended hrtimer restart on socket release can: j1939: j1939_sk_init(): set SOCK_RCU_FREE to call sk_destruct() after RCU is done can: peak_pciefd: pucan_handle_status(): fix a potential starvation issue in TX path mac80211: remove iwlwifi specific workaround that broke sta NDP tx SUNRPC: Fix the batch tasks count wraparound. SUNRPC: Should wake up the privileged task firstly. bus: mhi: Wait for M2 state during system resume mm/gup: fix try_grab_compound_head() race with split_huge_page() perf/smmuv3: Don't trample existing events with global filter KVM: nVMX: Handle split-lock #AC exceptions that happen in L2 KVM: PPC: Book3S HV: Workaround high stack usage with clang KVM: x86/mmu: Treat NX as used (not reserved) for all !TDP shadow MMUs KVM: x86/mmu: Use MMU's role to detect CR4.SMEP value in nested NPT walk s390/cio: dont call css_wait_for_slow_path() inside a lock s390: mm: Fix secure storage access exception handling f2fs: Prevent swap file in LFS mode clk: agilex/stratix10/n5x: fix how the bypass_reg is handled clk: agilex/stratix10: remove noc_clk clk: agilex/stratix10: fix bypass representation rtc: stm32: Fix unbalanced clk_disable_unprepare() on probe error path iio: frequency: adf4350: disable reg and clk on error in adf4350_probe() iio: light: tcs3472: do not free unallocated IRQ iio: ltr501: mark register holding upper 8 bits of ALS_DATA{0,1} and PS_DATA as volatile, too iio: ltr501: ltr559: fix initialization of LTR501_ALS_CONTR iio: ltr501: ltr501_read_ps(): add missing endianness conversion iio: accel: bma180: Fix BMA25x bandwidth register values serial: mvebu-uart: fix calculation of clock divisor serial: sh-sci: Stop dmaengine transfer in sci_stop_tx() serial_cs: Add Option International GSM-Ready 56K/ISDN modem serial_cs: remove wrong GLOBETROTTER.cis entry ath9k: Fix kernel NULL pointer dereference during ath_reset_internal() ssb: sdio: Don't overwrite const buffer if block_write fails rsi: Assign beacon rate settings to the correct rate_info descriptor field rsi: fix AP mode with WPA failure due to encrypted EAPOL tracing/histograms: Fix parsing of "sym-offset" modifier tracepoint: Add tracepoint_probe_register_may_exist() for BPF tracing seq_buf: Make trace_seq_putmem_hex() support data longer than 8 powerpc/stacktrace: Fix spurious "stale" traces in raise_backtrace_ipi() loop: Fix missing discard support when using LOOP_CONFIGURE evm: Execute evm_inode_init_security() only when an HMAC key is loaded evm: Refuse EVM_ALLOW_METADATA_WRITES only if an HMAC key is loaded fuse: Fix crash in fuse_dentry_automount() error path fuse: Fix crash if superblock of submount gets killed early fuse: Fix infinite loop in sget_fc() fuse: ignore PG_workingset after stealing fuse: check connected before queueing on fpq->io fuse: reject internal errno thermal/cpufreq_cooling: Update offline CPUs per-cpu thermal_pressure spi: Make of_register_spi_device also set the fwnode Add a reference to ucounts for each cred staging: media: rkvdec: fix pm_runtime_get_sync() usage count media: marvel-ccic: fix some issues when getting pm_runtime media: mdk-mdp: fix pm_runtime_get_sync() usage count media: s5p: fix pm_runtime_get_sync() usage count media: am437x: fix pm_runtime_get_sync() usage count media: sh_vou: fix pm_runtime_get_sync() usage count media: mtk-vcodec: fix PM runtime get logic media: s5p-jpeg: fix pm_runtime_get_sync() usage count media: sunxi: fix pm_runtime_get_sync() usage count media: sti/bdisp: fix pm_runtime_get_sync() usage count media: exynos4-is: fix pm_runtime_get_sync() usage count media: exynos-gsc: fix pm_runtime_get_sync() usage count spi: spi-loopback-test: Fix 'tx_buf' might be 'rx_buf' spi: spi-topcliff-pch: Fix potential double free in pch_spi_process_messages() spi: omap-100k: Fix the length judgment problem regulator: uniphier: Add missing MODULE_DEVICE_TABLE sched/core: Initialize the idle task with preemption disabled hwrng: exynos - Fix runtime PM imbalance on error crypto: nx - add missing MODULE_DEVICE_TABLE media: sti: fix obj-$(config) targets media: cpia2: fix memory leak in cpia2_usb_probe media: cobalt: fix race condition in setting HPD media: hevc: Fix dependent slice segment flags media: pvrusb2: fix warning in pvr2_i2c_core_done media: imx: imx7_mipi_csis: Fix logging of only error event counters crypto: qat - check return code of qat_hal_rd_rel_reg() crypto: qat - remove unused macro in FW loader crypto: qce: skcipher: Fix incorrect sg count for dma transfers arm64: perf: Convert snprintf to sysfs_emit sched/fair: Fix ascii art by relpacing tabs media: i2c: ov2659: Use clk_{prepare_enable,disable_unprepare}() to set xvclk on/off media: bt878: do not schedule tasklet when it is not setup media: em28xx: Fix possible memory leak of em28xx struct media: hantro: Fix .buf_prepare media: cedrus: Fix .buf_prepare media: v4l2-core: Avoid the dangling pointer in v4l2_fh_release media: bt8xx: Fix a missing check bug in bt878_probe media: st-hva: Fix potential NULL pointer dereferences crypto: hisilicon/sec - fixup 3des minimum key size declaration Makefile: fix GDB warning with CONFIG_RELR media: dvd_usb: memory leak in cinergyt2_fe_attach memstick: rtsx_usb_ms: fix UAF mmc: sdhci-sprd: use sdhci_sprd_writew mmc: via-sdmmc: add a check against NULL pointer dereference spi: meson-spicc: fix a wrong goto jump for avoiding memory leak. spi: meson-spicc: fix memory leak in meson_spicc_probe crypto: shash - avoid comparing pointers to exported functions under CFI media: dvb_net: avoid speculation from net slot media: siano: fix device register error path media: imx-csi: Skip first few frames from a BT.656 source hwmon: (max31790) Report correct current pwm duty cycles hwmon: (max31790) Fix pwmX_enable attributes drivers/perf: fix the missed ida_simple_remove() in ddr_perf_probe() KVM: PPC: Book3S HV: Fix TLB management on SMT8 POWER9 and POWER10 processors btrfs: fix error handling in __btrfs_update_delayed_inode btrfs: abort transaction if we fail to update the delayed inode btrfs: sysfs: fix format string for some discard stats btrfs: don't clear page extent mapped if we're not invalidating the full page btrfs: disable build on platforms having page size 256K locking/lockdep: Fix the dep path printing for backwards BFS lockding/lockdep: Avoid to find wrong lock dep path in check_irq_usage() KVM: s390: get rid of register asm usage regulator: mt6358: Fix vdram2 .vsel_mask regulator: da9052: Ensure enough delay time for .set_voltage_time_sel media: Fix Media Controller API config checks ACPI: video: use native backlight for GA401/GA502/GA503 HID: do not use down_interruptible() when unbinding devices EDAC/ti: Add missing MODULE_DEVICE_TABLE ACPI: processor idle: Fix up C-state latency if not ordered hv_utils: Fix passing zero to 'PTR_ERR' warning lib: vsprintf: Fix handling of number field widths in vsscanf Input: goodix - platform/x86: touchscreen_dmi - Move upside down quirks to touchscreen_dmi.c platform/x86: touchscreen_dmi: Add an extra entry for the upside down Goodix touchscreen on Teclast X89 tablets platform/x86: touchscreen_dmi: Add info for the Goodix GT912 panel of TM800A550L tablets ACPI: EC: Make more Asus laptops use ECDT _GPE block_dump: remove block_dump feature in mark_inode_dirty() blk-mq: grab rq->refcount before calling ->fn in blk_mq_tagset_busy_iter blk-mq: clear stale request in tags->rq[] before freeing one request pool fs: dlm: cancel work sync othercon random32: Fix implicit truncation warning in prandom_seed_state() open: don't silently ignore unknown O-flags in openat2() drivers: hv: Fix missing error code in vmbus_connect() fs: dlm: fix memory leak when fenced ACPICA: Fix memory leak caused by _CID repair function ACPI: bus: Call kobject_put() in acpi_init() error path ACPI: resources: Add checks for ACPI IRQ override block: fix race between adding/removing rq qos and normal IO platform/x86: asus-nb-wmi: Revert "Drop duplicate DMI quirk structures" platform/x86: asus-nb-wmi: Revert "add support for ASUS ROG Zephyrus G14 and G15" platform/x86: toshiba_acpi: Fix missing error code in toshiba_acpi_setup_keyboard() nvme-pci: fix var. type for increasing cq_head nvmet-fc: do not check for invalid target port in nvmet_fc_handle_fcp_rqst() EDAC/Intel: Do not load EDAC driver when running as a guest PCI: hv: Add check for hyperv_initialized in init_hv_pci_drv() cifs: improve fallocate emulation ACPI: EC: trust DSDT GPE for certain HP laptop clocksource: Retry clock read if long delays detected clocksource: Check per-CPU clock synchronization when marked unstable tpm_tis_spi: add missing SPI device ID entries ACPI: tables: Add custom DSDT file as makefile prerequisite HID: wacom: Correct base usage for capacitive ExpressKey status bits cifs: fix missing spinlock around update to ses->status mailbox: qcom: Use PLATFORM_DEVID_AUTO to register platform device block: fix discard request merge kthread_worker: fix return value when kthread_mod_delayed_work() races with kthread_cancel_delayed_work_sync() ia64: mca_drv: fix incorrect array size calculation writeback, cgroup: increment isw_nr_in_flight before grabbing an inode spi: Allow to have all native CSs in use along with GPIOs spi: Avoid undefined behaviour when counting unused native CSs media: venus: Rework error fail recover logic media: s5p_cec: decrement usage count if disabled media: hantro: do a PM resume earlier crypto: ixp4xx - dma_unmap the correct address crypto: ixp4xx - update IV after requests crypto: ux500 - Fix error return code in hash_hw_final() sata_highbank: fix deferred probing pata_rb532_cf: fix deferred probing media: I2C: change 'RST' to "RSET" to fix multiple build errors sched/uclamp: Fix wrong implementation of cpu.uclamp.min sched/uclamp: Fix locking around cpu_util_update_eff() kbuild: Fix objtool dependency for 'OBJECT_FILES_NON_STANDARD_<obj> := n' pata_octeon_cf: avoid WARN_ON() in ata_host_activate() evm: fix writing <securityfs>/evm overflow x86/elf: Use _BITUL() macro in UAPI headers crypto: sa2ul - Fix leaks on failure paths with sa_dma_init() crypto: sa2ul - Fix pm_runtime enable in sa_ul_probe() crypto: ccp - Fix a resource leak in an error handling path media: rc: i2c: Fix an error message pata_ep93xx: fix deferred probing locking/lockdep: Reduce LOCKDEP dependency list media: rkvdec: Fix .buf_prepare media: exynos4-is: Fix a use after free in isp_video_release media: au0828: fix a NULL vs IS_ERR() check media: tc358743: Fix error return code in tc358743_probe_of() media: gspca/gl860: fix zero-length control requests m68k: atari: Fix ATARI_KBD_CORE kconfig unmet dependency warning media: siano: Fix out-of-bounds warnings in smscore_load_firmware_family2() regulator: fan53880: Fix vsel_mask setting for FAN53880_BUCK crypto: nitrox - fix unchecked variable in nitrox_register_interrupts crypto: omap-sham - Fix PM reference leak in omap sham ops crypto: x86/curve25519 - fix cpu feature checking logic in mod_exit crypto: sm2 - remove unnecessary reset operations crypto: sm2 - fix a memory leak in sm2 mmc: usdhi6rol0: fix error return code in usdhi6_probe() arm64: consistently use reserved_pg_dir arm64/mm: Fix ttbr0 values stored in struct thread_info for software-pan media: subdev: remove VIDIOC_DQEVENT_TIME32 handling media: s5p-g2d: Fix a memory leak on ctx->fh.m2m_ctx hwmon: (lm70) Use device_get_match_data() hwmon: (lm70) Revert "hwmon: (lm70) Add support for ACPI" hwmon: (max31722) Remove non-standard ACPI device IDs hwmon: (max31790) Fix fan speed reporting for fan7..12 KVM: nVMX: Sync all PGDs on nested transition with shadow paging KVM: nVMX: Ensure 64-bit shift when checking VMFUNC bitmap KVM: nVMX: Don't clobber nested MMU's A/D status on EPTP switch KVM: x86/mmu: Fix return value in tdp_mmu_map_handle_target_level() perf/arm-cmn: Fix invalid pointer when access dtc object sharing the same IRQ number KVM: arm64: Don't zero the cycle count register when PMCR_EL0.P is set regulator: hi655x: Fix pass wrong pointer to config.driver_data btrfs: clear log tree recovering status if starting transaction fails x86/sev: Make sure IRQs are disabled while GHCB is active x86/sev: Split up runtime #VC handler for correct state tracking sched/rt: Fix RT utilization tracking during policy change sched/rt: Fix Deadline utilization tracking during policy change sched/uclamp: Fix uclamp_tg_restrict() lockdep: Fix wait-type for empty stack lockdep/selftests: Fix selftests vs PROVE_RAW_LOCK_NESTING spi: spi-sun6i: Fix chipselect/clock bug crypto: nx - Fix RCU warning in nx842_OF_upd_status psi: Fix race between psi_trigger_create/destroy media: v4l2-async: Clean v4l2_async_notifier_add_fwnode_remote_subdev media: video-mux: Skip dangling endpoints PM / devfreq: Add missing error code in devfreq_add_device() ACPI: PM / fan: Put fan device IDs into separate header file block: avoid double io accounting for flush request nvme-pci: look for StorageD3Enable on companion ACPI device instead ACPI: sysfs: Fix a buffer overrun problem with description_show() mark pstore-blk as broken clocksource/drivers/timer-ti-dm: Save and restore timer TIOCP_CFG extcon: extcon-max8997: Fix IRQ freeing at error path ACPI: APEI: fix synchronous external aborts in user-mode blk-wbt: introduce a new disable state to prevent false positive by rwb_enabled() blk-wbt: make sure throttle is enabled properly ACPI: Use DEVICE_ATTR_<RW|RO|WO> macros ACPI: bgrt: Fix CFI violation cpufreq: Make cpufreq_online() call driver->offline() on errors blk-mq: update hctx->dispatch_busy in case of real scheduler ocfs2: fix snprintf() checking dax: fix ENOMEM handling in grab_mapping_entry() mm/debug_vm_pgtable/basic: add validation for dirtiness after write protect mm/debug_vm_pgtable/basic: iterate over entire protection_map[] mm/debug_vm_pgtable: ensure THP availability via has_transparent_hugepage() swap: fix do_swap_page() race with swapoff mm/shmem: fix shmem_swapin() race with swapoff mm: memcg/slab: properly set up gfp flags for objcg pointer array mm: page_alloc: refactor setup_per_zone_lowmem_reserve() mm/page_alloc: fix counting of managed_pages xfrm: xfrm_state_mtu should return at least 1280 for ipv6 drm/bridge/sii8620: fix dependency on extcon drm/bridge: Fix the stop condition of drm_bridge_chain_pre_enable() drm/amd/dc: Fix a missing check bug in dm_dp_mst_detect() drm/ast: Fix missing conversions to managed API video: fbdev: imxfb: Fix an error message net: mvpp2: Put fwnode in error case during ->probe() net: pch_gbe: Propagate error from devm_gpio_request_one() pinctrl: renesas: r8a7796: Add missing bias for PRESET# pin pinctrl: renesas: r8a77990: JTAG pins do not have pull-down capabilities drm/vmwgfx: Mark a surface gpu-dirty after the SVGA3dCmdDXGenMips command drm/vmwgfx: Fix cpu updates of coherent multisample surfaces net: qrtr: ns: Fix error return code in qrtr_ns_init() clk: meson: g12a: fix gp0 and hifi ranges net: ftgmac100: add missing error return code in ftgmac100_probe() drm: rockchip: set alpha_en to 0 if it is not used drm/rockchip: cdn-dp-core: add missing clk_disable_unprepare() on error in cdn_dp_grf_write() drm/rockchip: dsi: move all lane config except LCDC mux to bind() drm/rockchip: lvds: Fix an error handling path drm/rockchip: cdn-dp: fix sign extension on an int multiply for a u64 result mptcp: fix pr_debug in mptcp_token_new_connect mptcp: generate subflow hmac after mptcp_finish_join() RDMA/srp: Fix a recently introduced memory leak RDMA/rtrs-clt: Check state of the rtrs_clt_sess before reading its stats RDMA/rtrs: Do not reset hb_missed_max after re-connection RDMA/rtrs-srv: Fix memory leak of unfreed rtrs_srv_stats object RDMA/rtrs-srv: Fix memory leak when having multiple sessions RDMA/rtrs-clt: Check if the queue_depth has changed during a reconnection RDMA/rtrs-clt: Fix memory leak of not-freed sess->stats and stats->pcpu_stats ehea: fix error return code in ehea_restart_qps() clk: tegra30: Use 300MHz for video decoder by default xfrm: remove the fragment check for ipv6 beet mode net/sched: act_vlan: Fix modify to allow 0 RDMA/core: Sanitize WQ state received from the userspace drm/pl111: depend on CONFIG_VEXPRESS_CONFIG RDMA/rxe: Fix failure during driver load drm/pl111: Actually fix CONFIG_VEXPRESS_CONFIG depends drm/vc4: hdmi: Fix error path of hpd-gpios clk: vc5: fix output disabling when enabling a FOD drm: qxl: ensure surf.data is ininitialized tools/bpftool: Fix error return code in do_batch() ath10k: go to path err_unsupported when chip id is not supported ath10k: add missing error return code in ath10k_pci_probe() wireless: carl9170: fix LEDS build errors & warnings ieee802154: hwsim: Fix possible memory leak in hwsim_subscribe_all_others clk: imx8mq: remove SYS PLL 1/2 clock gates wcn36xx: Move hal_buf allocation to devm_kmalloc in probe ssb: Fix error return code in ssb_bus_scan() brcmfmac: fix setting of station info chains bitmask brcmfmac: correctly report average RSSI in station info brcmfmac: Fix a double-free in brcmf_sdio_bus_reset brcmsmac: mac80211_if: Fix a resource leak in an error handling path cw1200: Revert unnecessary patches that fix unreal use-after-free bugs ath11k: Fix an error handling path in ath11k_core_fetch_board_data_api_n() ath10k: Fix an error code in ath10k_add_interface() ath11k: send beacon template after vdev_start/restart during csa netlabel: Fix memory leak in netlbl_mgmt_add_common RDMA/mlx5: Don't add slave port to unaffiliated list netfilter: nft_exthdr: check for IPv6 packet before further processing netfilter: nft_osf: check for TCP packet before further processing netfilter: nft_tproxy: restrict support to TCP and UDP transport protocols RDMA/rxe: Fix qp reference counting for atomic ops selftests/bpf: Whitelist test_progs.h from .gitignore xsk: Fix missing validation for skb and unaligned mode xsk: Fix broken Tx ring validation bpf: Fix libelf endian handling in resolv_btfids RDMA/rtrs-srv: Set minimal max_send_wr and max_recv_wr samples/bpf: Fix Segmentation fault for xdp_redirect command samples/bpf: Fix the error return code of xdp_redirect's main() mt76: fix possible NULL pointer dereference in mt76_tx mt76: mt7615: fix NULL pointer dereference in tx_prepare_skb() net: ethernet: aeroflex: fix UAF in greth_of_remove net: ethernet: ezchip: fix UAF in nps_enet_remove net: ethernet: ezchip: fix error handling vrf: do not push non-ND strict packets with a source LLA through packet taps again net: sched: add barrier to ensure correct ordering for lockless qdisc tls: prevent oversized sendfile() hangs by ignoring MSG_MORE netfilter: nf_tables_offload: check FLOW_DISSECTOR_KEY_BASIC in VLAN transfer logic pkt_sched: sch_qfq: fix qfq_change_class() error path xfrm: Fix xfrm offload fallback fail case iwlwifi: increase PNVM load timeout rtw88: 8822c: fix lc calibration timing vxlan: add missing rcu_read_lock() in neigh_reduce() ip6_tunnel: fix GRE6 segmentation net/ipv4: swap flow ports when validating source net: ti: am65-cpsw-nuss: Fix crash when changing number of TX queues tc-testing: fix list handling ieee802154: hwsim: Fix memory leak in hwsim_add_one ieee802154: hwsim: avoid possible crash in hwsim_del_edge_nl() bpf: Fix null ptr deref with mixed tail calls and subprogs drm/msm: Fix error return code in msm_drm_init() drm/msm/dpu: Fix error return code in dpu_mdss_init() mac80211: remove iwlwifi specific workaround NDPs of null_response net: bcmgenet: Fix attaching to PYH failed on RPi 4B ipv6: exthdrs: do not blindly use init_net can: j1939: j1939_sk_setsockopt(): prevent allocation of j1939 filter for optlen == 0 bpf: Do not change gso_size during bpf_skb_change_proto() i40e: Fix error handling in i40e_vsi_open i40e: Fix autoneg disabling for non-10GBaseT links i40e: Fix missing rtnl locking when setting up pf switch Revert "ibmvnic: remove duplicate napi_schedule call in open function" ibmvnic: set ltb->buff to NULL after freeing ibmvnic: free tx_pool if tso_pool alloc fails RDMA/cma: Protect RMW with qp_mutex net: macsec: fix the length used to copy the key for offloading net: phy: mscc: fix macsec key length net: atlantic: fix the macsec key length ipv6: fix out-of-bound access in ip6_parse_tlv() e1000e: Check the PCIm state net: dsa: sja1105: fix NULL pointer dereference in sja1105_reload_cbs() bpfilter: Specify the log level for the kmsg message RDMA/cma: Fix incorrect Packet Lifetime calculation gve: Fix swapped vars when fetching max queues Revert "be2net: disable bh with spin_lock in be_process_mcc" Bluetooth: mgmt: Fix slab-out-of-bounds in tlv_data_is_valid Bluetooth: Fix not sending Set Extended Scan Response Bluetooth: Fix Set Extended (Scan Response) Data Bluetooth: Fix handling of HCI_LE_Advertising_Set_Terminated event clk: actions: Fix UART clock dividers on Owl S500 SoC clk: actions: Fix SD clocks factor table on Owl S500 SoC clk: actions: Fix bisp_factor_table based clocks on Owl S500 SoC clk: actions: Fix AHPPREDIV-H-AHB clock chain on Owl S500 SoC clk: qcom: clk-alpha-pll: fix CAL_L write in alpha_pll_fabia_prepare clk: si5341: Wait for DEVICE_READY on startup clk: si5341: Avoid divide errors due to bogus register contents clk: si5341: Check for input clock presence and PLL lock on startup clk: si5341: Update initialization magic writeback: fix obtain a reference to a freeing memcg css net: lwtunnel: handle MTU calculation in forwading net: sched: fix warning in tcindex_alloc_perfect_hash net: tipc: fix FB_MTU eat two pages RDMA/mlx5: Don't access NULL-cleared mpi pointer RDMA/core: Always release restrack object MIPS: Fix PKMAP with 32-bit MIPS huge page support staging: fbtft: Rectify GPIO handling staging: fbtft: Don't spam logs when probe is deferred ASoC: rt5682: Disable irq on shutdown rcu: Invoke rcu_spawn_core_kthreads() from rcu_spawn_gp_kthread() serial: fsl_lpuart: don't modify arbitrary data on lpuart32 serial: fsl_lpuart: remove RTSCTS handling from get_mctrl() serial: 8250_omap: fix a timeout loop condition tty: nozomi: Fix a resource leak in an error handling function mwifiex: re-fix for unaligned accesses iio: adis_buffer: do not return ints in irq handlers iio: adis16400: do not return ints in irq handlers iio: adis16475: do not return ints in irq handlers iio: accel: bma180: Fix buffer alignment in iio_push_to_buffers_with_timestamp() iio: accel: bma220: Fix buffer alignment in iio_push_to_buffers_with_timestamp() iio: accel: hid: Fix buffer alignment in iio_push_to_buffers_with_timestamp() iio: accel: kxcjk-1013: Fix buffer alignment in iio_push_to_buffers_with_timestamp() iio: accel: mxc4005: Fix overread of data and alignment issue. iio: accel: stk8312: Fix buffer alignment in iio_push_to_buffers_with_timestamp() iio: accel: stk8ba50: Fix buffer alignment in iio_push_to_buffers_with_timestamp() iio: adc: ti-ads1015: Fix buffer alignment in iio_push_to_buffers_with_timestamp() iio: adc: vf610: Fix buffer alignment in iio_push_to_buffers_with_timestamp() iio: gyro: bmg160: Fix buffer alignment in iio_push_to_buffers_with_timestamp() iio: humidity: am2315: Fix buffer alignment in iio_push_to_buffers_with_timestamp() iio: prox: srf08: Fix buffer alignment in iio_push_to_buffers_with_timestamp() iio: prox: pulsed-light: Fix buffer alignment in iio_push_to_buffers_with_timestamp() iio: prox: as3935: Fix buffer alignment in iio_push_to_buffers_with_timestamp() iio: magn: hmc5843: Fix buffer alignment in iio_push_to_buffers_with_timestamp() iio: magn: bmc150: Fix buffer alignment in iio_push_to_buffers_with_timestamp() iio: light: isl29125: Fix buffer alignment in iio_push_to_buffers_with_timestamp() iio: light: tcs3414: Fix buffer alignment in iio_push_to_buffers_with_timestamp() iio: light: tcs3472: Fix buffer alignment in iio_push_to_buffers_with_timestamp() iio: chemical: atlas: Fix buffer alignment in iio_push_to_buffers_with_timestamp() iio: cros_ec_sensors: Fix alignment of buffer in iio_push_to_buffers_with_timestamp() iio: potentiostat: lmp91000: Fix alignment of buffer in iio_push_to_buffers_with_timestamp() ASoC: rk3328: fix missing clk_disable_unprepare() on error in rk3328_platform_probe() ASoC: hisilicon: fix missing clk_disable_unprepare() on error in hi6210_i2s_startup() backlight: lm3630a_bl: Put fwnode in error case during ->probe() ASoC: rsnd: tidyup loop on rsnd_adg_clk_query() Input: hil_kbd - fix error return code in hil_dev_connect() perf scripting python: Fix tuple_set_u64() mtd: partitions: redboot: seek fis-index-block in the right node mtd: rawnand: arasan: Ensure proper configuration for the asserted target staging: mmal-vchiq: Fix incorrect static vchiq_instance. char: pcmcia: error out if 'num_bytes_read' is greater than 4 in set_protocol() firmware: stratix10-svc: Fix a resource leak in an error handling path tty: nozomi: Fix the error handling path of 'nozomi_card_init()' leds: class: The -ENOTSUPP should never be seen by user space leds: lm3532: select regmap I2C API leds: lm36274: Put fwnode in error case during ->probe() leds: lm3692x: Put fwnode in any case during ->probe() leds: lm3697: Don't spam logs when probe is deferred leds: lp50xx: Put fwnode in error case during ->probe() scsi: FlashPoint: Rename si_flags field scsi: iscsi: Flush block work before unblock mfd: mp2629: Select MFD_CORE to fix build error mfd: rn5t618: Fix IRQ trigger by changing it to level mode fsi: core: Fix return of error values on failures fsi: scom: Reset the FSI2PIB engine for any error fsi: occ: Don't accept response from un-initialized OCC fsi/sbefifo: Clean up correct FIFO when receiving reset request from SBE fsi/sbefifo: Fix reset timeout visorbus: fix error return code in visorchipset_init() iommu/amd: Fix extended features logging s390/irq: select HAVE_IRQ_EXIT_ON_IRQ_STACK s390: enable HAVE_IOREMAP_PROT s390: appldata depends on PROC_SYSCTL selftests: splice: Adjust for handler fallback removal iommu/dma: Fix IOVA reserve dma ranges ASoC: max98373-sdw: use first_hw_init flag on resume ASoC: rt1308-sdw: use first_hw_init flag on resume ASoC: rt5682-sdw: use first_hw_init flag on resume ASoC: rt700-sdw: use first_hw_init flag on resume ASoC: rt711-sdw: use first_hw_init flag on resume ASoC: rt715-sdw: use first_hw_init flag on resume ASoC: rt5682: fix getting the wrong device id when the suspend_stress_test ASoC: rt5682-sdw: set regcache_cache_only false before reading RT5682_DEVICE_ID ASoC: mediatek: mtk-btcvsd: Fix an error handling path in 'mtk_btcvsd_snd_probe()' usb: gadget: f_fs: Fix setting of device and driver data cross-references usb: dwc2: Don't reset the core after setting turnaround time eeprom: idt_89hpesx: Put fwnode in matching case during ->probe() eeprom: idt_89hpesx: Restore printing the unsupported fwnode name thunderbolt: Bond lanes only when dual_link_port != NULL in alloc_dev_default() iio: adc: at91-sama5d2: Fix buffer alignment in iio_push_to_buffers_with_timestamp() iio: adc: hx711: Fix buffer alignment in iio_push_to_buffers_with_timestamp() iio: adc: mxs-lradc: Fix buffer alignment in iio_push_to_buffers_with_timestamp() iio: adc: ti-ads8688: Fix alignment of buffer in iio_push_to_buffers_with_timestamp() iio: magn: rm3100: Fix alignment of buffer in iio_push_to_buffers_with_timestamp() iio: light: vcnl4000: Fix buffer alignment in iio_push_to_buffers_with_timestamp() ASoC: fsl_spdif: Fix error handler with pm_runtime_enable staging: gdm724x: check for buffer overflow in gdm_lte_multi_sdu_pkt() staging: gdm724x: check for overflow in gdm_lte_netif_rx() staging: rtl8712: fix error handling in r871xu_drv_init staging: rtl8712: fix memory leak in rtl871x_load_fw_cb coresight: core: Fix use of uninitialized pointer staging: mt7621-dts: fix pci address for PCI memory range serial: 8250: Actually allow UPF_MAGIC_MULTIPLIER baud rates iio: light: vcnl4035: Fix buffer alignment in iio_push_to_buffers_with_timestamp() iio: prox: isl29501: Fix buffer alignment in iio_push_to_buffers_with_timestamp() ASoC: cs42l42: Correct definition of CS42L42_ADC_PDN_MASK of: Fix truncation of memory sizes on 32-bit platforms mtd: rawnand: marvell: add missing clk_disable_unprepare() on error in marvell_nfc_resume() habanalabs: Fix an error handling path in 'hl_pci_probe()' scsi: mpt3sas: Fix error return value in _scsih_expander_add() soundwire: stream: Fix test for DP prepare complete phy: uniphier-pcie: Fix updating phy parameters phy: ti: dm816x: Fix the error handling path in 'dm816x_usb_phy_probe() extcon: sm5502: Drop invalid register write in sm5502_reg_data extcon: max8997: Add missing modalias string powerpc/powernv: Fix machine check reporting of async store errors ASoC: atmel-i2s: Fix usage of capture and playback at the same time configfs: fix memleak in configfs_release_bin_file ASoC: Intel: sof_sdw: add SOF_RT715_DAI_ID_FIX for AlderLake ASoC: fsl_spdif: Fix unexpected interrupt after suspend leds: as3645a: Fix error return code in as3645a_parse_node() leds: ktd2692: Fix an error handling path selftests/ftrace: fix event-no-pid on 1-core machine serial: 8250: 8250_omap: Disable RX interrupt after DMA enable serial: 8250: 8250_omap: Fix possible interrupt storm on K3 SoCs powerpc: Offline CPU in stop_this_cpu() powerpc/papr_scm: Properly handle UUID types and API powerpc/64s: Fix copy-paste data exposure into newly created tasks powerpc/papr_scm: Make 'perf_stats' invisible if perf-stats unavailable ALSA: firewire-lib: Fix 'amdtp_domain_start()' when no AMDTP_OUT_STREAM stream is found serial: mvebu-uart: do not allow changing baudrate when uartclk is not available serial: mvebu-uart: correctly calculate minimal possible baudrate arm64: dts: marvell: armada-37xx: Fix reg for standard variant of UART vfio/pci: Handle concurrent vma faults mm/pmem: avoid inserting hugepage PTE entry with fsdax if hugepage support is disabled mm/huge_memory.c: remove dedicated macro HPAGE_CACHE_INDEX_MASK mm/huge_memory.c: add missing read-only THP checking in transparent_hugepage_enabled() mm/huge_memory.c: don't discard hugepage if other processes are mapping it mm/hugetlb: use helper huge_page_order and pages_per_huge_page mm/hugetlb: remove redundant check in preparing and destroying gigantic page hugetlb: remove prep_compound_huge_page cleanup include/linux/huge_mm.h: remove extern keyword mm/z3fold: fix potential memory leak in z3fold_destroy_pool() mm/z3fold: use release_z3fold_page_locked() to release locked z3fold page lib/math/rational.c: fix divide by zero selftests/vm/pkeys: fix alloc_random_pkey() to make it really, really random selftests/vm/pkeys: handle negative sys_pkey_alloc() return code selftests/vm/pkeys: refill shadow register after implicit kernel write perf llvm: Return -ENOMEM when asprintf() fails csky: fix syscache.c fallthrough warning csky: syscache: Fixup duplicate cache flush exfat: handle wrong stream entry size in exfat_readdir() scsi: fc: Correct RHBA attributes length scsi: target: cxgbit: Unmap DMA buffer before calling target_execute_cmd() mailbox: qcom-ipcc: Fix IPCC mbox channel exhaustion fscrypt: don't ignore minor_hash when hash is 0 fscrypt: fix derivation of SipHash keys on big endian CPUs tpm: Replace WARN_ONCE() with dev_err_once() in tpm_tis_status() erofs: fix error return code in erofs_read_superblock() block: return the correct bvec when checking for gaps io_uring: fix blocking inline submission mmc: block: Disable CMDQ on the ioctl path mmc: vub3000: fix control-request direction media: exynos4-is: remove a now unused integer scsi: core: Retry I/O for Notify (Enable Spinup) Required error crypto: qce - fix error return code in qce_skcipher_async_req_handle() s390: preempt: Fix preempt_count initialization cred: add missing return error code when set_cred_ucounts() failed iommu/dma: Fix compile warning in 32-bit builds powerpc/preempt: Don't touch the idle task's preempt_count during hotplug Linux 5.10.50 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Iec4eab24ea8eb5a6d79739a1aec8432d93a8f82c
This commit is contained in:
commit
2df0fb4a4b
@ -49,8 +49,30 @@ Description:
|
||||
modification of EVM-protected metadata and
|
||||
disable all further modification of policy
|
||||
|
||||
Note that once a key has been loaded, it will no longer be
|
||||
possible to enable metadata modification.
|
||||
Echoing a value is additive, the new value is added to the
|
||||
existing initialization flags.
|
||||
|
||||
For example, after::
|
||||
|
||||
echo 2 ><securityfs>/evm
|
||||
|
||||
another echo can be performed::
|
||||
|
||||
echo 1 ><securityfs>/evm
|
||||
|
||||
and the resulting value will be 3.
|
||||
|
||||
Note that once an HMAC key has been loaded, it will no longer
|
||||
be possible to enable metadata modification. Signaling that an
|
||||
HMAC key has been loaded will clear the corresponding flag.
|
||||
For example, if the current value is 6 (2 and 4 set)::
|
||||
|
||||
echo 1 ><securityfs>/evm
|
||||
|
||||
will set the new value to 3 (4 cleared).
|
||||
|
||||
Loading an HMAC key is the only way to disable metadata
|
||||
modification.
|
||||
|
||||
Until key loading has been signaled EVM can not create
|
||||
or validate the 'security.evm' xattr, but returns
|
||||
|
@ -39,9 +39,11 @@ KernelVersion: v5.9
|
||||
Contact: linuxppc-dev <linuxppc-dev@lists.ozlabs.org>, linux-nvdimm@lists.01.org,
|
||||
Description:
|
||||
(RO) Report various performance stats related to papr-scm NVDIMM
|
||||
device. Each stat is reported on a new line with each line
|
||||
composed of a stat-identifier followed by it value. Below are
|
||||
currently known dimm performance stats which are reported:
|
||||
device. This attribute is only available for NVDIMM devices
|
||||
that support reporting NVDIMM performance stats. Each stat is
|
||||
reported on a new line with each line composed of a
|
||||
stat-identifier followed by it value. Below are currently known
|
||||
dimm performance stats which are reported:
|
||||
|
||||
* "CtlResCt" : Controller Reset Count
|
||||
* "CtlResTm" : Controller Reset Elapsed Time
|
||||
|
@ -602,6 +602,12 @@
|
||||
loops can be debugged more effectively on production
|
||||
systems.
|
||||
|
||||
clocksource.max_cswd_read_retries= [KNL]
|
||||
Number of clocksource_watchdog() retries due to
|
||||
external delays before the clock will be marked
|
||||
unstable. Defaults to three retries, that is,
|
||||
four attempts to read the clock under test.
|
||||
|
||||
clearcpuid=BITNUM[,BITNUM...] [X86]
|
||||
Disable CPUID feature X for the kernel. See
|
||||
arch/x86/include/asm/cpufeatures.h for the valid bit
|
||||
|
@ -38,6 +38,7 @@ Sysfs entries
|
||||
fan[1-12]_input RO fan tachometer speed in RPM
|
||||
fan[1-12]_fault RO fan experienced fault
|
||||
fan[1-6]_target RW desired fan speed in RPM
|
||||
pwm[1-6]_enable RW regulator mode, 0=disabled, 1=manual mode, 2=rpm mode
|
||||
pwm[1-6] RW fan target duty cycle (0-255)
|
||||
pwm[1-6]_enable RW regulator mode, 0=disabled (duty cycle=0%), 1=manual mode, 2=rpm mode
|
||||
pwm[1-6] RW read: current pwm duty cycle,
|
||||
write: target pwm duty cycle (0-255)
|
||||
================== === =======================================================
|
||||
|
@ -4161,7 +4161,7 @@ enum v4l2_mpeg_video_hevc_size_of_length_field -
|
||||
:stub-columns: 0
|
||||
:widths: 1 1 2
|
||||
|
||||
* - ``V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT``
|
||||
* - ``V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT_ENABLED``
|
||||
- 0x00000001
|
||||
-
|
||||
* - ``V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT``
|
||||
@ -4369,6 +4369,9 @@ enum v4l2_mpeg_video_hevc_size_of_length_field -
|
||||
* - ``V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED``
|
||||
- 0x00000100
|
||||
-
|
||||
* - ``V4L2_HEVC_SLICE_PARAMS_FLAG_DEPENDENT_SLICE_SEGMENT``
|
||||
- 0x00000200
|
||||
-
|
||||
|
||||
.. c:type:: v4l2_hevc_dpb_entry
|
||||
|
||||
|
@ -50,7 +50,7 @@ PTE Page Table Helpers
|
||||
+---------------------------+--------------------------------------------------+
|
||||
| pte_mkwrite | Creates a writable PTE |
|
||||
+---------------------------+--------------------------------------------------+
|
||||
| pte_mkwrprotect | Creates a write protected PTE |
|
||||
| pte_wrprotect | Creates a write protected PTE |
|
||||
+---------------------------+--------------------------------------------------+
|
||||
| pte_mkspecial | Creates a special PTE |
|
||||
+---------------------------+--------------------------------------------------+
|
||||
@ -120,7 +120,7 @@ PMD Page Table Helpers
|
||||
+---------------------------+--------------------------------------------------+
|
||||
| pmd_mkwrite | Creates a writable PMD |
|
||||
+---------------------------+--------------------------------------------------+
|
||||
| pmd_mkwrprotect | Creates a write protected PMD |
|
||||
| pmd_wrprotect | Creates a write protected PMD |
|
||||
+---------------------------+--------------------------------------------------+
|
||||
| pmd_mkspecial | Creates a special PMD |
|
||||
+---------------------------+--------------------------------------------------+
|
||||
@ -186,7 +186,7 @@ PUD Page Table Helpers
|
||||
+---------------------------+--------------------------------------------------+
|
||||
| pud_mkwrite | Creates a writable PUD |
|
||||
+---------------------------+--------------------------------------------------+
|
||||
| pud_mkwrprotect | Creates a write protected PUD |
|
||||
| pud_wrprotect | Creates a write protected PUD |
|
||||
+---------------------------+--------------------------------------------------+
|
||||
| pud_mkdevmap | Creates a ZONE_DEVICE mapped PUD |
|
||||
+---------------------------+--------------------------------------------------+
|
||||
@ -224,7 +224,7 @@ HugeTLB Page Table Helpers
|
||||
+---------------------------+--------------------------------------------------+
|
||||
| huge_pte_mkwrite | Creates a writable HugeTLB |
|
||||
+---------------------------+--------------------------------------------------+
|
||||
| huge_pte_mkwrprotect | Creates a write protected HugeTLB |
|
||||
| huge_pte_wrprotect | Creates a write protected HugeTLB |
|
||||
+---------------------------+--------------------------------------------------+
|
||||
| huge_ptep_get_and_clear | Clears a HugeTLB |
|
||||
+---------------------------+--------------------------------------------------+
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 49
|
||||
SUBLEVEL = 50
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
|
@ -166,7 +166,6 @@ smp_callin(void)
|
||||
DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
|
||||
cpuid, current, current->active_mm));
|
||||
|
||||
preempt_disable();
|
||||
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
|
||||
}
|
||||
|
||||
|
@ -189,7 +189,6 @@ void start_kernel_secondary(void)
|
||||
pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
|
||||
|
||||
local_irq_enable();
|
||||
preempt_disable();
|
||||
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
|
||||
}
|
||||
|
||||
|
@ -809,7 +809,7 @@ pinctrl: pinctrl@fc06a000 {
|
||||
0xffffffff 0x3ffcfe7c 0x1c010101 /* pioA */
|
||||
0x7fffffff 0xfffccc3a 0x3f00cc3a /* pioB */
|
||||
0xffffffff 0x3ff83fff 0xff00ffff /* pioC */
|
||||
0x0003ff00 0x8002a800 0x00000000 /* pioD */
|
||||
0xb003ff00 0x8002a800 0x00000000 /* pioD */
|
||||
0xffffffff 0x7fffffff 0x76fff1bf /* pioE */
|
||||
>;
|
||||
|
||||
|
@ -4,6 +4,7 @@
|
||||
*/
|
||||
|
||||
#include <dt-bindings/interrupt-controller/irq.h>
|
||||
#include <dt-bindings/leds/common.h>
|
||||
#include "ste-href-family-pinctrl.dtsi"
|
||||
|
||||
/ {
|
||||
@ -64,17 +65,20 @@ chan@0 {
|
||||
reg = <0>;
|
||||
led-cur = /bits/ 8 <0x2f>;
|
||||
max-cur = /bits/ 8 <0x5f>;
|
||||
color = <LED_COLOR_ID_BLUE>;
|
||||
linux,default-trigger = "heartbeat";
|
||||
};
|
||||
chan@1 {
|
||||
reg = <1>;
|
||||
led-cur = /bits/ 8 <0x2f>;
|
||||
max-cur = /bits/ 8 <0x5f>;
|
||||
color = <LED_COLOR_ID_BLUE>;
|
||||
};
|
||||
chan@2 {
|
||||
reg = <2>;
|
||||
led-cur = /bits/ 8 <0x2f>;
|
||||
max-cur = /bits/ 8 <0x5f>;
|
||||
color = <LED_COLOR_ID_BLUE>;
|
||||
};
|
||||
};
|
||||
lp5521@34 {
|
||||
@ -88,16 +92,19 @@ chan@0 {
|
||||
reg = <0>;
|
||||
led-cur = /bits/ 8 <0x2f>;
|
||||
max-cur = /bits/ 8 <0x5f>;
|
||||
color = <LED_COLOR_ID_BLUE>;
|
||||
};
|
||||
chan@1 {
|
||||
reg = <1>;
|
||||
led-cur = /bits/ 8 <0x2f>;
|
||||
max-cur = /bits/ 8 <0x5f>;
|
||||
color = <LED_COLOR_ID_BLUE>;
|
||||
};
|
||||
chan@2 {
|
||||
reg = <2>;
|
||||
led-cur = /bits/ 8 <0x2f>;
|
||||
max-cur = /bits/ 8 <0x5f>;
|
||||
color = <LED_COLOR_ID_BLUE>;
|
||||
};
|
||||
};
|
||||
bh1780@29 {
|
||||
|
@ -773,10 +773,10 @@ static inline void armv7pmu_write_counter(struct perf_event *event, u64 value)
|
||||
pr_err("CPU%u writing wrong counter %d\n",
|
||||
smp_processor_id(), idx);
|
||||
} else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
|
||||
asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
|
||||
asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" ((u32)value));
|
||||
} else {
|
||||
armv7_pmnc_select_counter(idx);
|
||||
asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
|
||||
asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" ((u32)value));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -436,7 +436,6 @@ asmlinkage void secondary_start_kernel(void)
|
||||
#endif
|
||||
pr_debug("CPU%u: Booted secondary processor\n", cpu);
|
||||
|
||||
preempt_disable();
|
||||
trace_hardirqs_off();
|
||||
|
||||
/*
|
||||
|
@ -134,7 +134,7 @@ avs: avs@11500 {
|
||||
|
||||
uart0: serial@12000 {
|
||||
compatible = "marvell,armada-3700-uart";
|
||||
reg = <0x12000 0x200>;
|
||||
reg = <0x12000 0x18>;
|
||||
clocks = <&xtalclk>;
|
||||
interrupts =
|
||||
<GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
|
||||
|
@ -15,10 +15,10 @@
|
||||
.macro __uaccess_ttbr0_disable, tmp1
|
||||
mrs \tmp1, ttbr1_el1 // swapper_pg_dir
|
||||
bic \tmp1, \tmp1, #TTBR_ASID_MASK
|
||||
sub \tmp1, \tmp1, #RESERVED_TTBR0_SIZE // reserved_ttbr0 just before swapper_pg_dir
|
||||
sub \tmp1, \tmp1, #PAGE_SIZE // reserved_pg_dir just before swapper_pg_dir
|
||||
msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
|
||||
isb
|
||||
add \tmp1, \tmp1, #RESERVED_TTBR0_SIZE
|
||||
add \tmp1, \tmp1, #PAGE_SIZE
|
||||
msr ttbr1_el1, \tmp1 // set reserved ASID
|
||||
isb
|
||||
.endm
|
||||
|
@ -89,12 +89,6 @@
|
||||
#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end))
|
||||
#define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
|
||||
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
#define RESERVED_TTBR0_SIZE (PAGE_SIZE)
|
||||
#else
|
||||
#define RESERVED_TTBR0_SIZE (0)
|
||||
#endif
|
||||
|
||||
/* Initial memory map size */
|
||||
#if ARM64_SWAPPER_USES_SECTION_MAPS
|
||||
#define SWAPPER_BLOCK_SHIFT SECTION_SHIFT
|
||||
|
@ -36,11 +36,11 @@ static inline void contextidr_thread_switch(struct task_struct *next)
|
||||
}
|
||||
|
||||
/*
|
||||
* Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0.
|
||||
* Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0.
|
||||
*/
|
||||
static inline void cpu_set_reserved_ttbr0(void)
|
||||
{
|
||||
unsigned long ttbr = phys_to_ttbr(__pa_symbol(empty_zero_page));
|
||||
unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
|
||||
|
||||
write_sysreg(ttbr, ttbr0_el1);
|
||||
isb();
|
||||
@ -192,9 +192,9 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
|
||||
return;
|
||||
|
||||
if (mm == &init_mm)
|
||||
ttbr = __pa_symbol(empty_zero_page);
|
||||
ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
|
||||
else
|
||||
ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
|
||||
ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48;
|
||||
|
||||
WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
|
||||
}
|
||||
|
@ -540,6 +540,7 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
||||
extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
|
||||
extern pgd_t idmap_pg_end[];
|
||||
extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
|
||||
extern pgd_t reserved_pg_dir[PTRS_PER_PGD];
|
||||
|
||||
extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
|
||||
|
||||
|
@ -23,7 +23,7 @@ static inline void preempt_count_set(u64 pc)
|
||||
} while (0)
|
||||
|
||||
#define init_idle_preempt_count(p, cpu) do { \
|
||||
task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
|
||||
task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
|
||||
} while (0)
|
||||
|
||||
static inline void set_preempt_need_resched(void)
|
||||
|
@ -116,8 +116,8 @@ static inline void __uaccess_ttbr0_disable(void)
|
||||
local_irq_save(flags);
|
||||
ttbr = read_sysreg(ttbr1_el1);
|
||||
ttbr &= ~TTBR_ASID_MASK;
|
||||
/* reserved_ttbr0 placed before swapper_pg_dir */
|
||||
write_sysreg(ttbr - RESERVED_TTBR0_SIZE, ttbr0_el1);
|
||||
/* reserved_pg_dir placed before swapper_pg_dir */
|
||||
write_sysreg(ttbr - PAGE_SIZE, ttbr0_el1);
|
||||
isb();
|
||||
/* Set reserved ASID */
|
||||
write_sysreg(ttbr, ttbr1_el1);
|
||||
|
@ -811,9 +811,10 @@ SYM_CODE_END(ret_to_user)
|
||||
*/
|
||||
.pushsection ".entry.tramp.text", "ax"
|
||||
|
||||
// Move from tramp_pg_dir to swapper_pg_dir
|
||||
.macro tramp_map_kernel, tmp
|
||||
mrs \tmp, ttbr1_el1
|
||||
add \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
|
||||
add \tmp, \tmp, #(2 * PAGE_SIZE)
|
||||
bic \tmp, \tmp, #USER_ASID_FLAG
|
||||
msr ttbr1_el1, \tmp
|
||||
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
|
||||
@ -830,9 +831,10 @@ alternative_else_nop_endif
|
||||
#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
|
||||
.endm
|
||||
|
||||
// Move from swapper_pg_dir to tramp_pg_dir
|
||||
.macro tramp_unmap_kernel, tmp
|
||||
mrs \tmp, ttbr1_el1
|
||||
sub \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
|
||||
sub \tmp, \tmp, #(2 * PAGE_SIZE)
|
||||
orr \tmp, \tmp, #USER_ASID_FLAG
|
||||
msr ttbr1_el1, \tmp
|
||||
/*
|
||||
|
@ -312,7 +312,7 @@ static ssize_t slots_show(struct device *dev, struct device_attribute *attr,
|
||||
struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
|
||||
u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK;
|
||||
|
||||
return snprintf(page, PAGE_SIZE, "0x%08x\n", slots);
|
||||
return sysfs_emit(page, "0x%08x\n", slots);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RO(slots);
|
||||
|
@ -381,7 +381,7 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
|
||||
* faults in case uaccess_enable() is inadvertently called by the init
|
||||
* thread.
|
||||
*/
|
||||
init_task.thread_info.ttbr0 = __pa_symbol(empty_zero_page);
|
||||
init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
|
||||
#endif
|
||||
|
||||
if (boot_args[1] || boot_args[2] || boot_args[3]) {
|
||||
|
@ -228,7 +228,6 @@ asmlinkage notrace void secondary_start_kernel(void)
|
||||
init_gic_priority_masking();
|
||||
|
||||
rcu_cpu_starting(cpu);
|
||||
preempt_disable();
|
||||
trace_hardirqs_off();
|
||||
|
||||
/*
|
||||
|
@ -190,13 +190,11 @@ SECTIONS
|
||||
. += PAGE_SIZE;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
reserved_ttbr0 = .;
|
||||
. += RESERVED_TTBR0_SIZE;
|
||||
#endif
|
||||
reserved_pg_dir = .;
|
||||
. += PAGE_SIZE;
|
||||
|
||||
swapper_pg_dir = .;
|
||||
. += PAGE_SIZE;
|
||||
swapper_pg_end = .;
|
||||
|
||||
. = ALIGN(SEGMENT_ALIGN);
|
||||
__init_begin = .;
|
||||
|
@ -578,6 +578,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
|
||||
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
|
||||
|
||||
if (val & ARMV8_PMU_PMCR_P) {
|
||||
mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
|
||||
for_each_set_bit(i, &mask, 32)
|
||||
kvm_pmu_set_counter_value(vcpu, i, 0);
|
||||
}
|
||||
|
@ -179,7 +179,7 @@ SYM_FUNC_END(cpu_do_resume)
|
||||
.pushsection ".idmap.text", "awx"
|
||||
|
||||
.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
|
||||
adrp \tmp1, empty_zero_page
|
||||
adrp \tmp1, reserved_pg_dir
|
||||
phys_to_ttbr \tmp2, \tmp1
|
||||
offset_ttbr1 \tmp2, \tmp1
|
||||
msr ttbr1_el1, \tmp2
|
||||
|
@ -282,7 +282,6 @@ void csky_start_secondary(void)
|
||||
pr_info("CPU%u Online: %s...\n", cpu, __func__);
|
||||
|
||||
local_irq_enable();
|
||||
preempt_disable();
|
||||
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
|
||||
}
|
||||
|
||||
|
@ -12,14 +12,17 @@ SYSCALL_DEFINE3(cacheflush,
|
||||
int, cache)
|
||||
{
|
||||
switch (cache) {
|
||||
case ICACHE:
|
||||
case BCACHE:
|
||||
flush_icache_mm_range(current->mm,
|
||||
(unsigned long)addr,
|
||||
(unsigned long)addr + bytes);
|
||||
case DCACHE:
|
||||
dcache_wb_range((unsigned long)addr,
|
||||
(unsigned long)addr + bytes);
|
||||
if (cache != BCACHE)
|
||||
break;
|
||||
fallthrough;
|
||||
case ICACHE:
|
||||
flush_icache_mm_range(current->mm,
|
||||
(unsigned long)addr,
|
||||
(unsigned long)addr + bytes);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -343,7 +343,7 @@ init_record_index_pools(void)
|
||||
|
||||
/* - 2 - */
|
||||
sect_min_size = sal_log_sect_min_sizes[0];
|
||||
for (i = 1; i < sizeof sal_log_sect_min_sizes/sizeof(size_t); i++)
|
||||
for (i = 1; i < ARRAY_SIZE(sal_log_sect_min_sizes); i++)
|
||||
if (sect_min_size > sal_log_sect_min_sizes[i])
|
||||
sect_min_size = sal_log_sect_min_sizes[i];
|
||||
|
||||
|
@ -440,7 +440,6 @@ start_secondary (void *unused)
|
||||
#endif
|
||||
efi_map_pal_code();
|
||||
cpu_init();
|
||||
preempt_disable();
|
||||
smp_callin();
|
||||
|
||||
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
|
||||
|
@ -23,6 +23,9 @@ config ATARI
|
||||
this kernel on an Atari, say Y here and browse the material
|
||||
available in <file:Documentation/m68k>; otherwise say N.
|
||||
|
||||
config ATARI_KBD_CORE
|
||||
bool
|
||||
|
||||
config MAC
|
||||
bool "Macintosh support"
|
||||
depends on MMU
|
||||
|
@ -36,7 +36,7 @@ extern pte_t *pkmap_page_table;
|
||||
* easily, subsequent pte tables have to be allocated in one physical
|
||||
* chunk of RAM.
|
||||
*/
|
||||
#ifdef CONFIG_PHYS_ADDR_T_64BIT
|
||||
#if defined(CONFIG_PHYS_ADDR_T_64BIT) || defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
|
||||
#define LAST_PKMAP 512
|
||||
#else
|
||||
#define LAST_PKMAP 1024
|
||||
|
@ -348,7 +348,6 @@ asmlinkage void start_secondary(void)
|
||||
*/
|
||||
|
||||
calibrate_delay();
|
||||
preempt_disable();
|
||||
cpu = smp_processor_id();
|
||||
cpu_data[cpu].udelay_val = loops_per_jiffy;
|
||||
|
||||
|
@ -134,8 +134,6 @@ asmlinkage __init void secondary_start_kernel(void)
|
||||
set_cpu_online(cpu, true);
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
preempt_disable();
|
||||
/*
|
||||
* OK, it's off to the idle thread for us
|
||||
*/
|
||||
|
@ -302,7 +302,6 @@ void __init smp_callin(unsigned long pdce_proc)
|
||||
#endif
|
||||
|
||||
smp_cpu_init(slave_id);
|
||||
preempt_disable();
|
||||
|
||||
flush_cache_all_local(); /* start with known state */
|
||||
flush_tlb_all_local(NULL);
|
||||
|
@ -98,6 +98,36 @@ static inline int cpu_last_thread_sibling(int cpu)
|
||||
return cpu | (threads_per_core - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* tlb_thread_siblings are siblings which share a TLB. This is not
|
||||
* architected, is not something a hypervisor could emulate and a future
|
||||
* CPU may change behaviour even in compat mode, so this should only be
|
||||
* used on PowerNV, and only with care.
|
||||
*/
|
||||
static inline int cpu_first_tlb_thread_sibling(int cpu)
|
||||
{
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
|
||||
return cpu & ~0x6; /* Big Core */
|
||||
else
|
||||
return cpu_first_thread_sibling(cpu);
|
||||
}
|
||||
|
||||
static inline int cpu_last_tlb_thread_sibling(int cpu)
|
||||
{
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
|
||||
return cpu | 0x6; /* Big Core */
|
||||
else
|
||||
return cpu_last_thread_sibling(cpu);
|
||||
}
|
||||
|
||||
static inline int cpu_tlb_thread_sibling_step(void)
|
||||
{
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
|
||||
return 2; /* Big Core */
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline u32 get_tensr(void)
|
||||
{
|
||||
#ifdef CONFIG_BOOKE
|
||||
|
@ -475,12 +475,11 @@ static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr,
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int mce_handle_ierror(struct pt_regs *regs,
|
||||
static int mce_handle_ierror(struct pt_regs *regs, unsigned long srr1,
|
||||
const struct mce_ierror_table table[],
|
||||
struct mce_error_info *mce_err, uint64_t *addr,
|
||||
uint64_t *phys_addr)
|
||||
{
|
||||
uint64_t srr1 = regs->msr;
|
||||
int handled = 0;
|
||||
int i;
|
||||
|
||||
@ -683,19 +682,19 @@ static long mce_handle_ue_error(struct pt_regs *regs,
|
||||
}
|
||||
|
||||
static long mce_handle_error(struct pt_regs *regs,
|
||||
unsigned long srr1,
|
||||
const struct mce_derror_table dtable[],
|
||||
const struct mce_ierror_table itable[])
|
||||
{
|
||||
struct mce_error_info mce_err = { 0 };
|
||||
uint64_t addr, phys_addr = ULONG_MAX;
|
||||
uint64_t srr1 = regs->msr;
|
||||
long handled;
|
||||
|
||||
if (SRR1_MC_LOADSTORE(srr1))
|
||||
handled = mce_handle_derror(regs, dtable, &mce_err, &addr,
|
||||
&phys_addr);
|
||||
else
|
||||
handled = mce_handle_ierror(regs, itable, &mce_err, &addr,
|
||||
handled = mce_handle_ierror(regs, srr1, itable, &mce_err, &addr,
|
||||
&phys_addr);
|
||||
|
||||
if (!handled && mce_err.error_type == MCE_ERROR_TYPE_UE)
|
||||
@ -711,16 +710,20 @@ long __machine_check_early_realmode_p7(struct pt_regs *regs)
|
||||
/* P7 DD1 leaves top bits of DSISR undefined */
|
||||
regs->dsisr &= 0x0000ffff;
|
||||
|
||||
return mce_handle_error(regs, mce_p7_derror_table, mce_p7_ierror_table);
|
||||
return mce_handle_error(regs, regs->msr,
|
||||
mce_p7_derror_table, mce_p7_ierror_table);
|
||||
}
|
||||
|
||||
long __machine_check_early_realmode_p8(struct pt_regs *regs)
|
||||
{
|
||||
return mce_handle_error(regs, mce_p8_derror_table, mce_p8_ierror_table);
|
||||
return mce_handle_error(regs, regs->msr,
|
||||
mce_p8_derror_table, mce_p8_ierror_table);
|
||||
}
|
||||
|
||||
long __machine_check_early_realmode_p9(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long srr1 = regs->msr;
|
||||
|
||||
/*
|
||||
* On POWER9 DD2.1 and below, it's possible to get a machine check
|
||||
* caused by a paste instruction where only DSISR bit 25 is set. This
|
||||
@ -734,10 +737,39 @@ long __machine_check_early_realmode_p9(struct pt_regs *regs)
|
||||
if (SRR1_MC_LOADSTORE(regs->msr) && regs->dsisr == 0x02000000)
|
||||
return 1;
|
||||
|
||||
return mce_handle_error(regs, mce_p9_derror_table, mce_p9_ierror_table);
|
||||
/*
|
||||
* Async machine check due to bad real address from store or foreign
|
||||
* link time out comes with the load/store bit (PPC bit 42) set in
|
||||
* SRR1, but the cause comes in SRR1 not DSISR. Clear bit 42 so we're
|
||||
* directed to the ierror table so it will find the cause (which
|
||||
* describes it correctly as a store error).
|
||||
*/
|
||||
if (SRR1_MC_LOADSTORE(srr1) &&
|
||||
((srr1 & 0x081c0000) == 0x08140000 ||
|
||||
(srr1 & 0x081c0000) == 0x08180000)) {
|
||||
srr1 &= ~PPC_BIT(42);
|
||||
}
|
||||
|
||||
return mce_handle_error(regs, srr1,
|
||||
mce_p9_derror_table, mce_p9_ierror_table);
|
||||
}
|
||||
|
||||
long __machine_check_early_realmode_p10(struct pt_regs *regs)
|
||||
{
|
||||
return mce_handle_error(regs, mce_p10_derror_table, mce_p10_ierror_table);
|
||||
unsigned long srr1 = regs->msr;
|
||||
|
||||
/*
|
||||
* Async machine check due to bad real address from store comes with
|
||||
* the load/store bit (PPC bit 42) set in SRR1, but the cause comes in
|
||||
* SRR1 not DSISR. Clear bit 42 so we're directed to the ierror table
|
||||
* so it will find the cause (which describes it correctly as a store
|
||||
* error).
|
||||
*/
|
||||
if (SRR1_MC_LOADSTORE(srr1) &&
|
||||
(srr1 & 0x081c0000) == 0x08140000) {
|
||||
srr1 &= ~PPC_BIT(42);
|
||||
}
|
||||
|
||||
return mce_handle_error(regs, srr1,
|
||||
mce_p10_derror_table, mce_p10_ierror_table);
|
||||
}
|
||||
|
@ -1227,6 +1227,19 @@ struct task_struct *__switch_to(struct task_struct *prev,
|
||||
__flush_tlb_pending(batch);
|
||||
batch->active = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* On POWER9 the copy-paste buffer can only paste into
|
||||
* foreign real addresses, so unprivileged processes can not
|
||||
* see the data or use it in any way unless they have
|
||||
* foreign real mappings. If the new process has the foreign
|
||||
* real address mappings, we must issue a cp_abort to clear
|
||||
* any state and prevent snooping, corruption or a covert
|
||||
* channel. ISA v3.1 supports paste into local memory.
|
||||
*/
|
||||
if (new->mm && (cpu_has_feature(CPU_FTR_ARCH_31) ||
|
||||
atomic_read(&new->mm->context.vas_windows)))
|
||||
asm volatile(PPC_CP_ABORT);
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
||||
@ -1272,30 +1285,33 @@ struct task_struct *__switch_to(struct task_struct *prev,
|
||||
|
||||
last = _switch(old_thread, new_thread);
|
||||
|
||||
/*
|
||||
* Nothing after _switch will be run for newly created tasks,
|
||||
* because they switch directly to ret_from_fork/ret_from_kernel_thread
|
||||
* etc. Code added here should have a comment explaining why that is
|
||||
* okay.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
/*
|
||||
* This applies to a process that was context switched while inside
|
||||
* arch_enter_lazy_mmu_mode(), to re-activate the batch that was
|
||||
* deactivated above, before _switch(). This will never be the case
|
||||
* for new tasks.
|
||||
*/
|
||||
if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
|
||||
current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
|
||||
batch = this_cpu_ptr(&ppc64_tlb_batch);
|
||||
batch->active = 1;
|
||||
}
|
||||
|
||||
if (current->thread.regs) {
|
||||
restore_math(current->thread.regs);
|
||||
|
||||
/*
|
||||
* On POWER9 the copy-paste buffer can only paste into
|
||||
* foreign real addresses, so unprivileged processes can not
|
||||
* see the data or use it in any way unless they have
|
||||
* foreign real mappings. If the new process has the foreign
|
||||
* real address mappings, we must issue a cp_abort to clear
|
||||
* any state and prevent snooping, corruption or a covert
|
||||
* channel. ISA v3.1 supports paste into local memory.
|
||||
* Math facilities are masked out of the child MSR in copy_thread.
|
||||
* A new task does not need to restore_math because it will
|
||||
* demand fault them.
|
||||
*/
|
||||
if (current->mm &&
|
||||
(cpu_has_feature(CPU_FTR_ARCH_31) ||
|
||||
atomic_read(¤t->mm->context.vas_windows)))
|
||||
asm volatile(PPC_CP_ABORT);
|
||||
}
|
||||
if (current->thread.regs)
|
||||
restore_math(current->thread.regs);
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
|
||||
return last;
|
||||
|
@ -600,6 +600,8 @@ static void nmi_stop_this_cpu(struct pt_regs *regs)
|
||||
/*
|
||||
* IRQs are already hard disabled by the smp_handle_nmi_ipi.
|
||||
*/
|
||||
set_cpu_online(smp_processor_id(), false);
|
||||
|
||||
spin_begin();
|
||||
while (1)
|
||||
spin_cpu_relax();
|
||||
@ -615,6 +617,15 @@ void smp_send_stop(void)
|
||||
static void stop_this_cpu(void *dummy)
|
||||
{
|
||||
hard_irq_disable();
|
||||
|
||||
/*
|
||||
* Offlining CPUs in stop_this_cpu can result in scheduler warnings,
|
||||
* (see commit de6e5d38417e), but printk_safe_flush_on_panic() wants
|
||||
* to know other CPUs are offline before it breaks locks to flush
|
||||
* printk buffers, in case we panic()ed while holding the lock.
|
||||
*/
|
||||
set_cpu_online(smp_processor_id(), false);
|
||||
|
||||
spin_begin();
|
||||
while (1)
|
||||
spin_cpu_relax();
|
||||
@ -1426,7 +1437,6 @@ void start_secondary(void *unused)
|
||||
smp_store_cpu_info(cpu);
|
||||
set_dec(tb_ticks_per_jiffy);
|
||||
rcu_cpu_starting(cpu);
|
||||
preempt_disable();
|
||||
cpu_callin_map[cpu] = 1;
|
||||
|
||||
if (smp_ops->setup_cpu)
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/processor.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/delay.h>
|
||||
#include <asm/kprobes.h>
|
||||
|
||||
#include <asm/paca.h>
|
||||
@ -230,17 +231,31 @@ static void handle_backtrace_ipi(struct pt_regs *regs)
|
||||
|
||||
static void raise_backtrace_ipi(cpumask_t *mask)
|
||||
{
|
||||
struct paca_struct *p;
|
||||
unsigned int cpu;
|
||||
u64 delay_us;
|
||||
|
||||
for_each_cpu(cpu, mask) {
|
||||
if (cpu == smp_processor_id())
|
||||
if (cpu == smp_processor_id()) {
|
||||
handle_backtrace_ipi(NULL);
|
||||
else
|
||||
smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, 5 * USEC_PER_SEC);
|
||||
continue;
|
||||
}
|
||||
|
||||
for_each_cpu(cpu, mask) {
|
||||
struct paca_struct *p = paca_ptrs[cpu];
|
||||
delay_us = 5 * USEC_PER_SEC;
|
||||
|
||||
if (smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, delay_us)) {
|
||||
// Now wait up to 5s for the other CPU to do its backtrace
|
||||
while (cpumask_test_cpu(cpu, mask) && delay_us) {
|
||||
udelay(1);
|
||||
delay_us--;
|
||||
}
|
||||
|
||||
// Other CPU cleared itself from the mask
|
||||
if (delay_us)
|
||||
continue;
|
||||
}
|
||||
|
||||
p = paca_ptrs[cpu];
|
||||
|
||||
cpumask_clear_cpu(cpu, mask);
|
||||
|
||||
|
@ -2578,7 +2578,7 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
|
||||
cpumask_t *cpu_in_guest;
|
||||
int i;
|
||||
|
||||
cpu = cpu_first_thread_sibling(cpu);
|
||||
cpu = cpu_first_tlb_thread_sibling(cpu);
|
||||
if (nested) {
|
||||
cpumask_set_cpu(cpu, &nested->need_tlb_flush);
|
||||
cpu_in_guest = &nested->cpu_in_guest;
|
||||
@ -2592,9 +2592,10 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
|
||||
* the other side is the first smp_mb() in kvmppc_run_core().
|
||||
*/
|
||||
smp_mb();
|
||||
for (i = 0; i < threads_per_core; ++i)
|
||||
if (cpumask_test_cpu(cpu + i, cpu_in_guest))
|
||||
smp_call_function_single(cpu + i, do_nothing, NULL, 1);
|
||||
for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu);
|
||||
i += cpu_tlb_thread_sibling_step())
|
||||
if (cpumask_test_cpu(i, cpu_in_guest))
|
||||
smp_call_function_single(i, do_nothing, NULL, 1);
|
||||
}
|
||||
|
||||
static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
|
||||
@ -2625,8 +2626,8 @@ static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
|
||||
*/
|
||||
if (prev_cpu != pcpu) {
|
||||
if (prev_cpu >= 0 &&
|
||||
cpu_first_thread_sibling(prev_cpu) !=
|
||||
cpu_first_thread_sibling(pcpu))
|
||||
cpu_first_tlb_thread_sibling(prev_cpu) !=
|
||||
cpu_first_tlb_thread_sibling(pcpu))
|
||||
radix_flush_cpu(kvm, prev_cpu, vcpu);
|
||||
if (nested)
|
||||
nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;
|
||||
|
@ -893,7 +893,7 @@ void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
|
||||
* Thus we make all 4 threads use the same bit.
|
||||
*/
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
||||
pcpu = cpu_first_thread_sibling(pcpu);
|
||||
pcpu = cpu_first_tlb_thread_sibling(pcpu);
|
||||
|
||||
if (nested)
|
||||
need_tlb_flush = &nested->need_tlb_flush;
|
||||
|
@ -51,7 +51,8 @@ void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
|
||||
hr->ppr = vcpu->arch.ppr;
|
||||
}
|
||||
|
||||
static void byteswap_pt_regs(struct pt_regs *regs)
|
||||
/* Use noinline_for_stack due to https://bugs.llvm.org/show_bug.cgi?id=49610 */
|
||||
static noinline_for_stack void byteswap_pt_regs(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long *addr = (unsigned long *) regs;
|
||||
|
||||
|
@ -67,7 +67,7 @@ static int global_invalidates(struct kvm *kvm)
|
||||
* so use the bit for the first thread to represent the core.
|
||||
*/
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
||||
cpu = cpu_first_thread_sibling(cpu);
|
||||
cpu = cpu_first_tlb_thread_sibling(cpu);
|
||||
cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush);
|
||||
}
|
||||
|
||||
|
@ -78,9 +78,6 @@ static inline int smp_startup_cpu(unsigned int lcpu)
|
||||
|
||||
pcpu = get_hard_smp_processor_id(lcpu);
|
||||
|
||||
/* Fixup atomic count: it exited inside IRQ handler. */
|
||||
task_thread_info(paca_ptrs[lcpu]->__current)->preempt_count = 0;
|
||||
|
||||
/*
|
||||
* If the RTAS start-cpu token does not exist then presume the
|
||||
* cpu is already spinning.
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <asm/plpar_wrappers.h>
|
||||
#include <asm/papr_pdsm.h>
|
||||
#include <asm/mce.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#define BIND_ANY_ADDR (~0ul)
|
||||
|
||||
@ -867,6 +868,20 @@ static ssize_t flags_show(struct device *dev,
|
||||
}
|
||||
DEVICE_ATTR_RO(flags);
|
||||
|
||||
static umode_t papr_nd_attribute_visible(struct kobject *kobj,
|
||||
struct attribute *attr, int n)
|
||||
{
|
||||
struct device *dev = kobj_to_dev(kobj);
|
||||
struct nvdimm *nvdimm = to_nvdimm(dev);
|
||||
struct papr_scm_priv *p = nvdimm_provider_data(nvdimm);
|
||||
|
||||
/* For if perf-stats not available remove perf_stats sysfs */
|
||||
if (attr == &dev_attr_perf_stats.attr && p->stat_buffer_len == 0)
|
||||
return 0;
|
||||
|
||||
return attr->mode;
|
||||
}
|
||||
|
||||
/* papr_scm specific dimm attributes */
|
||||
static struct attribute *papr_nd_attributes[] = {
|
||||
&dev_attr_flags.attr,
|
||||
@ -876,6 +891,7 @@ static struct attribute *papr_nd_attributes[] = {
|
||||
|
||||
static struct attribute_group papr_nd_attribute_group = {
|
||||
.name = "papr",
|
||||
.is_visible = papr_nd_attribute_visible,
|
||||
.attrs = papr_nd_attributes,
|
||||
};
|
||||
|
||||
@ -891,7 +907,6 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
|
||||
struct nd_region_desc ndr_desc;
|
||||
unsigned long dimm_flags;
|
||||
int target_nid, online_nid;
|
||||
ssize_t stat_size;
|
||||
|
||||
p->bus_desc.ndctl = papr_scm_ndctl;
|
||||
p->bus_desc.module = THIS_MODULE;
|
||||
@ -962,16 +977,6 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
|
||||
list_add_tail(&p->region_list, &papr_nd_regions);
|
||||
mutex_unlock(&papr_ndr_lock);
|
||||
|
||||
/* Try retriving the stat buffer and see if its supported */
|
||||
stat_size = drc_pmem_query_stats(p, NULL, 0);
|
||||
if (stat_size > 0) {
|
||||
p->stat_buffer_len = stat_size;
|
||||
dev_dbg(&p->pdev->dev, "Max perf-stat size %lu-bytes\n",
|
||||
p->stat_buffer_len);
|
||||
} else {
|
||||
dev_info(&p->pdev->dev, "Dimm performance stats unavailable\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err: nvdimm_bus_unregister(p->bus);
|
||||
@ -1047,8 +1052,10 @@ static int papr_scm_probe(struct platform_device *pdev)
|
||||
u32 drc_index, metadata_size;
|
||||
u64 blocks, block_size;
|
||||
struct papr_scm_priv *p;
|
||||
u8 uuid_raw[UUID_SIZE];
|
||||
const char *uuid_str;
|
||||
u64 uuid[2];
|
||||
ssize_t stat_size;
|
||||
uuid_t uuid;
|
||||
int rc;
|
||||
|
||||
/* check we have all the required DT properties */
|
||||
@ -1090,16 +1097,23 @@ static int papr_scm_probe(struct platform_device *pdev)
|
||||
p->is_volatile = !of_property_read_bool(dn, "ibm,cache-flush-required");
|
||||
|
||||
/* We just need to ensure that set cookies are unique across */
|
||||
uuid_parse(uuid_str, (uuid_t *) uuid);
|
||||
uuid_parse(uuid_str, &uuid);
|
||||
|
||||
/*
|
||||
* cookie1 and cookie2 are not really little endian
|
||||
* we store a little endian representation of the
|
||||
* uuid str so that we can compare this with the label
|
||||
* area cookie irrespective of the endian config with which
|
||||
* the kernel is built.
|
||||
* The cookie1 and cookie2 are not really little endian.
|
||||
* We store a raw buffer representation of the
|
||||
* uuid string so that we can compare this with the label
|
||||
* area cookie irrespective of the endian configuration
|
||||
* with which the kernel is built.
|
||||
*
|
||||
* Historically we stored the cookie in the below format.
|
||||
* for a uuid string 72511b67-0b3b-42fd-8d1d-5be3cae8bcaa
|
||||
* cookie1 was 0xfd423b0b671b5172
|
||||
* cookie2 was 0xaabce8cae35b1d8d
|
||||
*/
|
||||
p->nd_set.cookie1 = cpu_to_le64(uuid[0]);
|
||||
p->nd_set.cookie2 = cpu_to_le64(uuid[1]);
|
||||
export_uuid(uuid_raw, &uuid);
|
||||
p->nd_set.cookie1 = get_unaligned_le64(&uuid_raw[0]);
|
||||
p->nd_set.cookie2 = get_unaligned_le64(&uuid_raw[8]);
|
||||
|
||||
/* might be zero */
|
||||
p->metadata_size = metadata_size;
|
||||
@ -1124,6 +1138,14 @@ static int papr_scm_probe(struct platform_device *pdev)
|
||||
p->res.name = pdev->name;
|
||||
p->res.flags = IORESOURCE_MEM;
|
||||
|
||||
/* Try retrieving the stat buffer and see if its supported */
|
||||
stat_size = drc_pmem_query_stats(p, NULL, 0);
|
||||
if (stat_size > 0) {
|
||||
p->stat_buffer_len = stat_size;
|
||||
dev_dbg(&p->pdev->dev, "Max perf-stat size %lu-bytes\n",
|
||||
p->stat_buffer_len);
|
||||
}
|
||||
|
||||
rc = papr_scm_nvdimm_init(p);
|
||||
if (rc)
|
||||
goto err2;
|
||||
|
@ -104,9 +104,6 @@ static inline int smp_startup_cpu(unsigned int lcpu)
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Fixup atomic count: it exited inside IRQ handler. */
|
||||
task_thread_info(paca_ptrs[lcpu]->__current)->preempt_count = 0;
|
||||
|
||||
/*
|
||||
* If the RTAS start-cpu token does not exist then presume the
|
||||
* cpu is already spinning.
|
||||
|
@ -166,7 +166,6 @@ asmlinkage __visible void smp_callin(void)
|
||||
* Disable preemption before enabling interrupts, so we don't try to
|
||||
* schedule a CPU that hasn't actually started yet.
|
||||
*/
|
||||
preempt_disable();
|
||||
local_irq_enable();
|
||||
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
|
||||
}
|
||||
|
@ -154,6 +154,8 @@ config S390
|
||||
select HAVE_FUTEX_CMPXCHG if FUTEX
|
||||
select HAVE_GCC_PLUGINS
|
||||
select HAVE_GENERIC_VDSO
|
||||
select HAVE_IOREMAP_PROT if PCI
|
||||
select HAVE_IRQ_EXIT_ON_IRQ_STACK
|
||||
select HAVE_KERNEL_BZIP2
|
||||
select HAVE_KERNEL_GZIP
|
||||
select HAVE_KERNEL_LZ4
|
||||
@ -856,7 +858,7 @@ config CMM_IUCV
|
||||
config APPLDATA_BASE
|
||||
def_bool n
|
||||
prompt "Linux - VM Monitor Stream, base infrastructure"
|
||||
depends on PROC_FS
|
||||
depends on PROC_SYSCTL
|
||||
help
|
||||
This provides a kernel interface for creating and updating z/VM APPLDATA
|
||||
monitor records. The monitor records are updated at certain time
|
||||
|
@ -36,6 +36,7 @@ void uv_query_info(void)
|
||||
uv_info.max_sec_stor_addr = ALIGN(uvcb.max_guest_stor_addr, PAGE_SIZE);
|
||||
uv_info.max_num_sec_conf = uvcb.max_num_sec_conf;
|
||||
uv_info.max_guest_cpu_id = uvcb.max_guest_cpu_id;
|
||||
uv_info.uv_feature_indications = uvcb.uv_feature_indications;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
|
||||
|
@ -864,6 +864,25 @@ static inline int pte_unused(pte_t pte)
|
||||
return pte_val(pte) & _PAGE_UNUSED;
|
||||
}
|
||||
|
||||
/*
|
||||
* Extract the pgprot value from the given pte while at the same time making it
|
||||
* usable for kernel address space mappings where fault driven dirty and
|
||||
* young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
|
||||
* must not be set.
|
||||
*/
|
||||
static inline pgprot_t pte_pgprot(pte_t pte)
|
||||
{
|
||||
unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
|
||||
|
||||
if (pte_write(pte))
|
||||
pte_flags |= pgprot_val(PAGE_KERNEL);
|
||||
else
|
||||
pte_flags |= pgprot_val(PAGE_KERNEL_RO);
|
||||
pte_flags |= pte_val(pte) & mio_wb_bit_mask;
|
||||
|
||||
return __pgprot(pte_flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* pgd/pmd/pte modification functions
|
||||
*/
|
||||
|
@ -29,12 +29,6 @@ static inline void preempt_count_set(int pc)
|
||||
old, new) != old);
|
||||
}
|
||||
|
||||
#define init_task_preempt_count(p) do { } while (0)
|
||||
|
||||
#define init_idle_preempt_count(p, cpu) do { \
|
||||
S390_lowcore.preempt_count = PREEMPT_ENABLED; \
|
||||
} while (0)
|
||||
|
||||
static inline void set_preempt_need_resched(void)
|
||||
{
|
||||
__atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
|
||||
@ -88,12 +82,6 @@ static inline void preempt_count_set(int pc)
|
||||
S390_lowcore.preempt_count = pc;
|
||||
}
|
||||
|
||||
#define init_task_preempt_count(p) do { } while (0)
|
||||
|
||||
#define init_idle_preempt_count(p, cpu) do { \
|
||||
S390_lowcore.preempt_count = PREEMPT_ENABLED; \
|
||||
} while (0)
|
||||
|
||||
static inline void set_preempt_need_resched(void)
|
||||
{
|
||||
}
|
||||
@ -130,6 +118,10 @@ static inline bool should_resched(int preempt_offset)
|
||||
|
||||
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
#define init_task_preempt_count(p) do { } while (0)
|
||||
/* Deferred to CPU bringup time */
|
||||
#define init_idle_preempt_count(p, cpu) do { } while (0)
|
||||
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
extern asmlinkage void preempt_schedule(void);
|
||||
#define __preempt_schedule() preempt_schedule()
|
||||
|
@ -73,6 +73,10 @@ enum uv_cmds_inst {
|
||||
BIT_UVC_CMD_UNPIN_PAGE_SHARED = 22,
|
||||
};
|
||||
|
||||
enum uv_feat_ind {
|
||||
BIT_UV_FEAT_MISC = 0,
|
||||
};
|
||||
|
||||
struct uv_cb_header {
|
||||
u16 len;
|
||||
u16 cmd; /* Command Code */
|
||||
@ -97,7 +101,8 @@ struct uv_cb_qui {
|
||||
u64 max_guest_stor_addr;
|
||||
u8 reserved88[158 - 136];
|
||||
u16 max_guest_cpu_id;
|
||||
u8 reserveda0[200 - 160];
|
||||
u64 uv_feature_indications;
|
||||
u8 reserveda0[200 - 168];
|
||||
} __packed __aligned(8);
|
||||
|
||||
/* Initialize Ultravisor */
|
||||
@ -274,6 +279,7 @@ struct uv_info {
|
||||
unsigned long max_sec_stor_addr;
|
||||
unsigned int max_num_sec_conf;
|
||||
unsigned short max_guest_cpu_id;
|
||||
unsigned long uv_feature_indications;
|
||||
};
|
||||
|
||||
extern struct uv_info uv_info;
|
||||
|
@ -454,6 +454,7 @@ static void __init setup_lowcore_dat_off(void)
|
||||
lc->br_r1_trampoline = 0x07f1; /* br %r1 */
|
||||
lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
|
||||
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
|
||||
lc->preempt_count = PREEMPT_DISABLED;
|
||||
|
||||
set_prefix((u32)(unsigned long) lc);
|
||||
lowcore_ptr[0] = lc;
|
||||
|
@ -215,6 +215,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
|
||||
lc->br_r1_trampoline = 0x07f1; /* br %r1 */
|
||||
lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
|
||||
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
|
||||
lc->preempt_count = PREEMPT_DISABLED;
|
||||
if (nmi_alloc_per_cpu(lc))
|
||||
goto out_async;
|
||||
if (vdso_alloc_per_cpu(lc))
|
||||
@ -863,7 +864,6 @@ static void smp_init_secondary(void)
|
||||
set_cpu_flag(CIF_ASCE_SECONDARY);
|
||||
cpu_init();
|
||||
rcu_cpu_starting(cpu);
|
||||
preempt_disable();
|
||||
init_cpu_timer();
|
||||
vtime_init();
|
||||
pfault_init();
|
||||
|
@ -364,6 +364,15 @@ static ssize_t uv_query_facilities(struct kobject *kobj,
|
||||
static struct kobj_attribute uv_query_facilities_attr =
|
||||
__ATTR(facilities, 0444, uv_query_facilities, NULL);
|
||||
|
||||
static ssize_t uv_query_feature_indications(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
|
||||
}
|
||||
|
||||
static struct kobj_attribute uv_query_feature_indications_attr =
|
||||
__ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
|
||||
|
||||
static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *page)
|
||||
{
|
||||
@ -396,6 +405,7 @@ static struct kobj_attribute uv_query_max_guest_addr_attr =
|
||||
|
||||
static struct attribute *uv_query_attrs[] = {
|
||||
&uv_query_facilities_attr.attr,
|
||||
&uv_query_feature_indications_attr.attr,
|
||||
&uv_query_max_guest_cpus_attr.attr,
|
||||
&uv_query_max_guest_vms_attr.attr,
|
||||
&uv_query_max_guest_addr_attr.attr,
|
||||
|
@ -327,31 +327,31 @@ static void allow_cpu_feat(unsigned long nr)
|
||||
|
||||
static inline int plo_test_bit(unsigned char nr)
|
||||
{
|
||||
register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
|
||||
unsigned long function = (unsigned long)nr | 0x100;
|
||||
int cc;
|
||||
|
||||
asm volatile(
|
||||
" lgr 0,%[function]\n"
|
||||
/* Parameter registers are ignored for "test bit" */
|
||||
" plo 0,0,0,0(0)\n"
|
||||
" ipm %0\n"
|
||||
" srl %0,28\n"
|
||||
: "=d" (cc)
|
||||
: "d" (r0)
|
||||
: "cc");
|
||||
: [function] "d" (function)
|
||||
: "cc", "0");
|
||||
return cc == 0;
|
||||
}
|
||||
|
||||
static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
|
||||
{
|
||||
register unsigned long r0 asm("0") = 0; /* query function */
|
||||
register unsigned long r1 asm("1") = (unsigned long) query;
|
||||
|
||||
asm volatile(
|
||||
/* Parameter regs are ignored */
|
||||
" lghi 0,0\n"
|
||||
" lgr 1,%[query]\n"
|
||||
/* Parameter registers are ignored */
|
||||
" .insn rrf,%[opc] << 16,2,4,6,0\n"
|
||||
:
|
||||
: "d" (r0), "a" (r1), [opc] "i" (opcode)
|
||||
: "cc", "memory");
|
||||
: [query] "d" ((unsigned long)query), [opc] "i" (opcode)
|
||||
: "cc", "memory", "0", "1");
|
||||
}
|
||||
|
||||
#define INSN_SORTL 0xb938
|
||||
|
@ -805,6 +805,32 @@ void do_secure_storage_access(struct pt_regs *regs)
|
||||
struct page *page;
|
||||
int rc;
|
||||
|
||||
/*
|
||||
* bit 61 tells us if the address is valid, if it's not we
|
||||
* have a major problem and should stop the kernel or send a
|
||||
* SIGSEGV to the process. Unfortunately bit 61 is not
|
||||
* reliable without the misc UV feature so we need to check
|
||||
* for that as well.
|
||||
*/
|
||||
if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications) &&
|
||||
!test_bit_inv(61, ®s->int_parm_long)) {
|
||||
/*
|
||||
* When this happens, userspace did something that it
|
||||
* was not supposed to do, e.g. branching into secure
|
||||
* memory. Trigger a segmentation fault.
|
||||
*/
|
||||
if (user_mode(regs)) {
|
||||
send_sig(SIGSEGV, current, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* The kernel should never run into this case and we
|
||||
* have no way out of this situation.
|
||||
*/
|
||||
panic("Unexpected PGM 0x3d with TEID bit 61=0");
|
||||
}
|
||||
|
||||
switch (get_fault_type(regs)) {
|
||||
case USER_FAULT:
|
||||
mm = current->mm;
|
||||
|
@ -186,8 +186,6 @@ asmlinkage void start_secondary(void)
|
||||
|
||||
per_cpu_trap_init();
|
||||
|
||||
preempt_disable();
|
||||
|
||||
notify_cpu_starting(cpu);
|
||||
|
||||
local_irq_enable();
|
||||
|
@ -348,7 +348,6 @@ static void sparc_start_secondary(void *arg)
|
||||
*/
|
||||
arch_cpu_pre_starting(arg);
|
||||
|
||||
preempt_disable();
|
||||
cpu = smp_processor_id();
|
||||
|
||||
notify_cpu_starting(cpu);
|
||||
|
@ -138,9 +138,6 @@ void smp_callin(void)
|
||||
|
||||
set_cpu_online(cpuid, true);
|
||||
|
||||
/* idle thread is expected to have preempt disabled */
|
||||
preempt_disable();
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
|
||||
|
@ -1500,7 +1500,7 @@ static int __init curve25519_mod_init(void)
|
||||
static void __exit curve25519_mod_exit(void)
|
||||
{
|
||||
if (IS_REACHABLE(CONFIG_CRYPTO_KPP) &&
|
||||
(boot_cpu_has(X86_FEATURE_BMI2) || boot_cpu_has(X86_FEATURE_ADX)))
|
||||
static_branch_likely(&curve25519_use_bmi2_adx))
|
||||
crypto_unregister_kpp(&curve25519_alg);
|
||||
}
|
||||
|
||||
|
@ -508,7 +508,7 @@ SYM_CODE_START(\asmsym)
|
||||
|
||||
movq %rsp, %rdi /* pt_regs pointer */
|
||||
|
||||
call \cfunc
|
||||
call kernel_\cfunc
|
||||
|
||||
/*
|
||||
* No need to switch back to the IST stack. The current stack is either
|
||||
@ -519,7 +519,7 @@ SYM_CODE_START(\asmsym)
|
||||
|
||||
/* Switch to the regular task stack */
|
||||
.Lfrom_usermode_switch_stack_\@:
|
||||
idtentry_body safe_stack_\cfunc, has_error_code=1
|
||||
idtentry_body user_\cfunc, has_error_code=1
|
||||
|
||||
_ASM_NOKPROBE(\asmsym)
|
||||
SYM_CODE_END(\asmsym)
|
||||
|
@ -315,8 +315,8 @@ static __always_inline void __##func(struct pt_regs *regs)
|
||||
*/
|
||||
#define DECLARE_IDTENTRY_VC(vector, func) \
|
||||
DECLARE_IDTENTRY_RAW_ERRORCODE(vector, func); \
|
||||
__visible noinstr void ist_##func(struct pt_regs *regs, unsigned long error_code); \
|
||||
__visible noinstr void safe_stack_##func(struct pt_regs *regs, unsigned long error_code)
|
||||
__visible noinstr void kernel_##func(struct pt_regs *regs, unsigned long error_code); \
|
||||
__visible noinstr void user_##func(struct pt_regs *regs, unsigned long error_code)
|
||||
|
||||
/**
|
||||
* DEFINE_IDTENTRY_IST - Emit code for IST entry points
|
||||
@ -358,33 +358,24 @@ static __always_inline void __##func(struct pt_regs *regs)
|
||||
DEFINE_IDTENTRY_RAW_ERRORCODE(func)
|
||||
|
||||
/**
|
||||
* DEFINE_IDTENTRY_VC_SAFE_STACK - Emit code for VMM communication handler
|
||||
which runs on a safe stack.
|
||||
* DEFINE_IDTENTRY_VC_KERNEL - Emit code for VMM communication handler
|
||||
when raised from kernel mode
|
||||
* @func: Function name of the entry point
|
||||
*
|
||||
* Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
|
||||
*/
|
||||
#define DEFINE_IDTENTRY_VC_SAFE_STACK(func) \
|
||||
DEFINE_IDTENTRY_RAW_ERRORCODE(safe_stack_##func)
|
||||
#define DEFINE_IDTENTRY_VC_KERNEL(func) \
|
||||
DEFINE_IDTENTRY_RAW_ERRORCODE(kernel_##func)
|
||||
|
||||
/**
|
||||
* DEFINE_IDTENTRY_VC_IST - Emit code for VMM communication handler
|
||||
which runs on the VC fall-back stack
|
||||
* DEFINE_IDTENTRY_VC_USER - Emit code for VMM communication handler
|
||||
when raised from user mode
|
||||
* @func: Function name of the entry point
|
||||
*
|
||||
* Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
|
||||
*/
|
||||
#define DEFINE_IDTENTRY_VC_IST(func) \
|
||||
DEFINE_IDTENTRY_RAW_ERRORCODE(ist_##func)
|
||||
|
||||
/**
|
||||
* DEFINE_IDTENTRY_VC - Emit code for VMM communication handler
|
||||
* @func: Function name of the entry point
|
||||
*
|
||||
* Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
|
||||
*/
|
||||
#define DEFINE_IDTENTRY_VC(func) \
|
||||
DEFINE_IDTENTRY_RAW_ERRORCODE(func)
|
||||
#define DEFINE_IDTENTRY_VC_USER(func) \
|
||||
DEFINE_IDTENTRY_RAW_ERRORCODE(user_##func)
|
||||
|
||||
#else /* CONFIG_X86_64 */
|
||||
|
||||
|
@ -84,7 +84,7 @@
|
||||
#define KVM_REQ_APICV_UPDATE \
|
||||
KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
||||
#define KVM_REQ_TLB_FLUSH_CURRENT KVM_ARCH_REQ(26)
|
||||
#define KVM_REQ_HV_TLB_FLUSH \
|
||||
#define KVM_REQ_TLB_FLUSH_GUEST \
|
||||
KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_NO_WAKEUP)
|
||||
#define KVM_REQ_APF_READY KVM_ARCH_REQ(28)
|
||||
#define KVM_REQ_MSR_FILTER_CHANGED KVM_ARCH_REQ(29)
|
||||
|
@ -43,7 +43,7 @@ static __always_inline void preempt_count_set(int pc)
|
||||
#define init_task_preempt_count(p) do { } while (0)
|
||||
|
||||
#define init_idle_preempt_count(p, cpu) do { \
|
||||
per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
|
||||
per_cpu(__preempt_count, (cpu)) = PREEMPT_DISABLED; \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
|
@ -2,10 +2,12 @@
|
||||
#ifndef _ASM_X86_HWCAP2_H
|
||||
#define _ASM_X86_HWCAP2_H
|
||||
|
||||
#include <linux/const.h>
|
||||
|
||||
/* MONITOR/MWAIT enabled in Ring 3 */
|
||||
#define HWCAP2_RING3MWAIT (1 << 0)
|
||||
#define HWCAP2_RING3MWAIT _BITUL(0)
|
||||
|
||||
/* Kernel allows FSGSBASE instructions available in Ring 3 */
|
||||
#define HWCAP2_FSGSBASE BIT(1)
|
||||
#define HWCAP2_FSGSBASE _BITUL(1)
|
||||
|
||||
#endif
|
||||
|
@ -12,7 +12,6 @@
|
||||
#include <linux/sched/debug.h> /* For show_regs() */
|
||||
#include <linux/percpu-defs.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/set_memory.h>
|
||||
@ -180,11 +179,19 @@ void noinstr __sev_es_ist_exit(void)
|
||||
this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
|
||||
}
|
||||
|
||||
static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
|
||||
/*
|
||||
* Nothing shall interrupt this code path while holding the per-CPU
|
||||
* GHCB. The backup GHCB is only for NMIs interrupting this path.
|
||||
*
|
||||
* Callers must disable local interrupts around it.
|
||||
*/
|
||||
static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
|
||||
{
|
||||
struct sev_es_runtime_data *data;
|
||||
struct ghcb *ghcb;
|
||||
|
||||
WARN_ON(!irqs_disabled());
|
||||
|
||||
data = this_cpu_read(runtime_data);
|
||||
ghcb = &data->ghcb_page;
|
||||
|
||||
@ -201,7 +208,9 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
|
||||
data->ghcb_active = false;
|
||||
data->backup_ghcb_active = false;
|
||||
|
||||
instrumentation_begin();
|
||||
panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
|
||||
instrumentation_end();
|
||||
}
|
||||
|
||||
/* Mark backup_ghcb active before writing to it */
|
||||
@ -452,11 +461,13 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
|
||||
/* Include code shared with pre-decompression boot stage */
|
||||
#include "sev-es-shared.c"
|
||||
|
||||
static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
|
||||
static noinstr void __sev_put_ghcb(struct ghcb_state *state)
|
||||
{
|
||||
struct sev_es_runtime_data *data;
|
||||
struct ghcb *ghcb;
|
||||
|
||||
WARN_ON(!irqs_disabled());
|
||||
|
||||
data = this_cpu_read(runtime_data);
|
||||
ghcb = &data->ghcb_page;
|
||||
|
||||
@ -480,7 +491,7 @@ void noinstr __sev_es_nmi_complete(void)
|
||||
struct ghcb_state state;
|
||||
struct ghcb *ghcb;
|
||||
|
||||
ghcb = sev_es_get_ghcb(&state);
|
||||
ghcb = __sev_get_ghcb(&state);
|
||||
|
||||
vc_ghcb_invalidate(ghcb);
|
||||
ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
|
||||
@ -490,7 +501,7 @@ void noinstr __sev_es_nmi_complete(void)
|
||||
sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
|
||||
VMGEXIT();
|
||||
|
||||
sev_es_put_ghcb(&state);
|
||||
__sev_put_ghcb(&state);
|
||||
}
|
||||
|
||||
static u64 get_jump_table_addr(void)
|
||||
@ -502,7 +513,7 @@ static u64 get_jump_table_addr(void)
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
ghcb = sev_es_get_ghcb(&state);
|
||||
ghcb = __sev_get_ghcb(&state);
|
||||
|
||||
vc_ghcb_invalidate(ghcb);
|
||||
ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
|
||||
@ -516,7 +527,7 @@ static u64 get_jump_table_addr(void)
|
||||
ghcb_sw_exit_info_2_is_valid(ghcb))
|
||||
ret = ghcb->save.sw_exit_info_2;
|
||||
|
||||
sev_es_put_ghcb(&state);
|
||||
__sev_put_ghcb(&state);
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
@ -641,7 +652,7 @@ static void sev_es_ap_hlt_loop(void)
|
||||
struct ghcb_state state;
|
||||
struct ghcb *ghcb;
|
||||
|
||||
ghcb = sev_es_get_ghcb(&state);
|
||||
ghcb = __sev_get_ghcb(&state);
|
||||
|
||||
while (true) {
|
||||
vc_ghcb_invalidate(ghcb);
|
||||
@ -658,7 +669,7 @@ static void sev_es_ap_hlt_loop(void)
|
||||
break;
|
||||
}
|
||||
|
||||
sev_es_put_ghcb(&state);
|
||||
__sev_put_ghcb(&state);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -748,7 +759,7 @@ void __init sev_es_init_vc_handling(void)
|
||||
sev_es_setup_play_dead();
|
||||
|
||||
/* Secondary CPUs use the runtime #VC handler */
|
||||
initial_vc_handler = (unsigned long)safe_stack_exc_vmm_communication;
|
||||
initial_vc_handler = (unsigned long)kernel_exc_vmm_communication;
|
||||
}
|
||||
|
||||
static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt)
|
||||
@ -1186,14 +1197,6 @@ static enum es_result vc_handle_trap_ac(struct ghcb *ghcb,
|
||||
return ES_EXCEPTION;
|
||||
}
|
||||
|
||||
static __always_inline void vc_handle_trap_db(struct pt_regs *regs)
|
||||
{
|
||||
if (user_mode(regs))
|
||||
noist_exc_debug(regs);
|
||||
else
|
||||
exc_debug(regs);
|
||||
}
|
||||
|
||||
static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
|
||||
struct ghcb *ghcb,
|
||||
unsigned long exit_code)
|
||||
@ -1289,44 +1292,15 @@ static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs)
|
||||
return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
|
||||
}
|
||||
|
||||
/*
|
||||
* Main #VC exception handler. It is called when the entry code was able to
|
||||
* switch off the IST to a safe kernel stack.
|
||||
*
|
||||
* With the current implementation it is always possible to switch to a safe
|
||||
* stack because #VC exceptions only happen at known places, like intercepted
|
||||
* instructions or accesses to MMIO areas/IO ports. They can also happen with
|
||||
* code instrumentation when the hypervisor intercepts #DB, but the critical
|
||||
* paths are forbidden to be instrumented, so #DB exceptions currently also
|
||||
* only happen in safe places.
|
||||
*/
|
||||
DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
|
||||
static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code)
|
||||
{
|
||||
irqentry_state_t irq_state;
|
||||
struct ghcb_state state;
|
||||
struct es_em_ctxt ctxt;
|
||||
enum es_result result;
|
||||
struct ghcb *ghcb;
|
||||
bool ret = true;
|
||||
|
||||
/*
|
||||
* Handle #DB before calling into !noinstr code to avoid recursive #DB.
|
||||
*/
|
||||
if (error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB) {
|
||||
vc_handle_trap_db(regs);
|
||||
return;
|
||||
}
|
||||
|
||||
irq_state = irqentry_nmi_enter(regs);
|
||||
lockdep_assert_irqs_disabled();
|
||||
instrumentation_begin();
|
||||
|
||||
/*
|
||||
* This is invoked through an interrupt gate, so IRQs are disabled. The
|
||||
* code below might walk page-tables for user or kernel addresses, so
|
||||
* keep the IRQs disabled to protect us against concurrent TLB flushes.
|
||||
*/
|
||||
|
||||
ghcb = sev_es_get_ghcb(&state);
|
||||
ghcb = __sev_get_ghcb(&state);
|
||||
|
||||
vc_ghcb_invalidate(ghcb);
|
||||
result = vc_init_em_ctxt(&ctxt, regs, error_code);
|
||||
@ -1334,7 +1308,7 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
|
||||
if (result == ES_OK)
|
||||
result = vc_handle_exitcode(&ctxt, ghcb, error_code);
|
||||
|
||||
sev_es_put_ghcb(&state);
|
||||
__sev_put_ghcb(&state);
|
||||
|
||||
/* Done - now check the result */
|
||||
switch (result) {
|
||||
@ -1344,15 +1318,18 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
|
||||
case ES_UNSUPPORTED:
|
||||
pr_err_ratelimited("Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
|
||||
error_code, regs->ip);
|
||||
goto fail;
|
||||
ret = false;
|
||||
break;
|
||||
case ES_VMM_ERROR:
|
||||
pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
|
||||
error_code, regs->ip);
|
||||
goto fail;
|
||||
ret = false;
|
||||
break;
|
||||
case ES_DECODE_FAILED:
|
||||
pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
|
||||
error_code, regs->ip);
|
||||
goto fail;
|
||||
ret = false;
|
||||
break;
|
||||
case ES_EXCEPTION:
|
||||
vc_forward_exception(&ctxt);
|
||||
break;
|
||||
@ -1368,24 +1345,52 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
|
||||
BUG();
|
||||
}
|
||||
|
||||
out:
|
||||
instrumentation_end();
|
||||
irqentry_nmi_exit(regs, irq_state);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return;
|
||||
static __always_inline bool vc_is_db(unsigned long error_code)
|
||||
{
|
||||
return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB;
|
||||
}
|
||||
|
||||
fail:
|
||||
if (user_mode(regs)) {
|
||||
/*
|
||||
* Do not kill the machine if user-space triggered the
|
||||
* exception. Send SIGBUS instead and let user-space deal with
|
||||
* it.
|
||||
/*
|
||||
* Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode
|
||||
* and will panic when an error happens.
|
||||
*/
|
||||
force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
|
||||
} else {
|
||||
pr_emerg("PANIC: Unhandled #VC exception in kernel space (result=%d)\n",
|
||||
result);
|
||||
DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication)
|
||||
{
|
||||
irqentry_state_t irq_state;
|
||||
|
||||
/*
|
||||
* With the current implementation it is always possible to switch to a
|
||||
* safe stack because #VC exceptions only happen at known places, like
|
||||
* intercepted instructions or accesses to MMIO areas/IO ports. They can
|
||||
* also happen with code instrumentation when the hypervisor intercepts
|
||||
* #DB, but the critical paths are forbidden to be instrumented, so #DB
|
||||
* exceptions currently also only happen in safe places.
|
||||
*
|
||||
* But keep this here in case the noinstr annotations are violated due
|
||||
* to bug elsewhere.
|
||||
*/
|
||||
if (unlikely(on_vc_fallback_stack(regs))) {
|
||||
instrumentation_begin();
|
||||
panic("Can't handle #VC exception from unsupported context\n");
|
||||
instrumentation_end();
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle #DB before calling into !noinstr code to avoid recursive #DB.
|
||||
*/
|
||||
if (vc_is_db(error_code)) {
|
||||
exc_debug(regs);
|
||||
return;
|
||||
}
|
||||
|
||||
irq_state = irqentry_nmi_enter(regs);
|
||||
|
||||
instrumentation_begin();
|
||||
|
||||
if (!vc_raw_handle_exception(regs, error_code)) {
|
||||
/* Show some debug info */
|
||||
show_regs(regs);
|
||||
|
||||
@ -1396,23 +1401,38 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
|
||||
panic("Returned from Terminate-Request to Hypervisor\n");
|
||||
}
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* This handler runs on the #VC fall-back stack. It can cause further #VC exceptions */
|
||||
DEFINE_IDTENTRY_VC_IST(exc_vmm_communication)
|
||||
{
|
||||
instrumentation_begin();
|
||||
panic("Can't handle #VC exception from unsupported context\n");
|
||||
instrumentation_end();
|
||||
irqentry_nmi_exit(regs, irq_state);
|
||||
}
|
||||
|
||||
DEFINE_IDTENTRY_VC(exc_vmm_communication)
|
||||
/*
|
||||
* Runtime #VC exception handler when raised from user mode. Runs in IRQ mode
|
||||
* and will kill the current task with SIGBUS when an error happens.
|
||||
*/
|
||||
DEFINE_IDTENTRY_VC_USER(exc_vmm_communication)
|
||||
{
|
||||
if (likely(!on_vc_fallback_stack(regs)))
|
||||
safe_stack_exc_vmm_communication(regs, error_code);
|
||||
else
|
||||
ist_exc_vmm_communication(regs, error_code);
|
||||
/*
|
||||
* Handle #DB before calling into !noinstr code to avoid recursive #DB.
|
||||
*/
|
||||
if (vc_is_db(error_code)) {
|
||||
noist_exc_debug(regs);
|
||||
return;
|
||||
}
|
||||
|
||||
irqentry_enter_from_user_mode(regs);
|
||||
instrumentation_begin();
|
||||
|
||||
if (!vc_raw_handle_exception(regs, error_code)) {
|
||||
/*
|
||||
* Do not kill the machine if user-space triggered the
|
||||
* exception. Send SIGBUS instead and let user-space deal with
|
||||
* it.
|
||||
*/
|
||||
force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
|
||||
}
|
||||
|
||||
instrumentation_end();
|
||||
irqentry_exit_to_user_mode(regs);
|
||||
}
|
||||
|
||||
bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
|
||||
|
@ -230,7 +230,6 @@ static void notrace start_secondary(void *unused)
|
||||
cpu_init_exception_handling();
|
||||
cpu_init();
|
||||
x86_cpuinit.early_percpu_clock_init();
|
||||
preempt_disable();
|
||||
smp_callin();
|
||||
|
||||
enable_start_cpu0 = 0;
|
||||
|
@ -1151,7 +1151,8 @@ static struct clocksource clocksource_tsc = {
|
||||
.mask = CLOCKSOURCE_MASK(64),
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS |
|
||||
CLOCK_SOURCE_VALID_FOR_HRES |
|
||||
CLOCK_SOURCE_MUST_VERIFY,
|
||||
CLOCK_SOURCE_MUST_VERIFY |
|
||||
CLOCK_SOURCE_VERIFY_PERCPU,
|
||||
.vdso_clock_mode = VDSO_CLOCKMODE_TSC,
|
||||
.enable = tsc_cs_enable,
|
||||
.resume = tsc_resume,
|
||||
|
@ -1564,7 +1564,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
|
||||
* vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
|
||||
* analyze it here, flush TLB regardless of the specified address space.
|
||||
*/
|
||||
kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH,
|
||||
kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST,
|
||||
NULL, vcpu_mask, &hv_vcpu->tlb_flush);
|
||||
|
||||
ret_success:
|
||||
|
@ -4133,7 +4133,15 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
|
||||
void
|
||||
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
|
||||
{
|
||||
bool uses_nx = context->nx ||
|
||||
/*
|
||||
* KVM uses NX when TDP is disabled to handle a variety of scenarios,
|
||||
* notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
|
||||
* to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
|
||||
* The iTLB multi-hit workaround can be toggled at any time, so assume
|
||||
* NX can be used by any non-nested shadow MMU to avoid having to reset
|
||||
* MMU contexts. Note, KVM forces EFER.NX=1 when TDP is disabled.
|
||||
*/
|
||||
bool uses_nx = context->nx || !tdp_enabled ||
|
||||
context->mmu_role.base.smep_andnot_wp;
|
||||
struct rsvd_bits_validate *shadow_zero_check;
|
||||
int i;
|
||||
|
@ -471,8 +471,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
|
||||
|
||||
error:
|
||||
errcode |= write_fault | user_fault;
|
||||
if (fetch_fault && (mmu->nx ||
|
||||
kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
|
||||
if (fetch_fault && (mmu->nx || mmu->mmu_role.ext.cr4_smep))
|
||||
errcode |= PFERR_FETCH_MASK;
|
||||
|
||||
walker->fault.vector = PF_VECTOR;
|
||||
|
@ -527,7 +527,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
|
||||
kvm_pfn_t pfn, bool prefault)
|
||||
{
|
||||
u64 new_spte;
|
||||
int ret = 0;
|
||||
int ret = RET_PF_FIXED;
|
||||
int make_spte_ret = 0;
|
||||
|
||||
if (unlikely(is_noslot_pfn(pfn))) {
|
||||
|
@ -1142,12 +1142,19 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
|
||||
|
||||
/*
|
||||
* Unconditionally skip the TLB flush on fast CR3 switch, all TLB
|
||||
* flushes are handled by nested_vmx_transition_tlb_flush(). See
|
||||
* nested_vmx_transition_mmu_sync for details on skipping the MMU sync.
|
||||
* flushes are handled by nested_vmx_transition_tlb_flush().
|
||||
*/
|
||||
if (!nested_ept)
|
||||
kvm_mmu_new_pgd(vcpu, cr3, true,
|
||||
!nested_vmx_transition_mmu_sync(vcpu));
|
||||
if (!nested_ept) {
|
||||
kvm_mmu_new_pgd(vcpu, cr3, true, true);
|
||||
|
||||
/*
|
||||
* A TLB flush on VM-Enter/VM-Exit flushes all linear mappings
|
||||
* across all PCIDs, i.e. all PGDs need to be synchronized.
|
||||
* See nested_vmx_transition_mmu_sync() for more details.
|
||||
*/
|
||||
if (nested_vmx_transition_mmu_sync(vcpu))
|
||||
kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
|
||||
}
|
||||
|
||||
vcpu->arch.cr3 = cr3;
|
||||
kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
|
||||
@ -5477,8 +5484,6 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
|
||||
{
|
||||
u32 index = kvm_rcx_read(vcpu);
|
||||
u64 new_eptp;
|
||||
bool accessed_dirty;
|
||||
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
|
||||
|
||||
if (!nested_cpu_has_eptp_switching(vmcs12) ||
|
||||
!nested_cpu_has_ept(vmcs12))
|
||||
@ -5487,13 +5492,10 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
|
||||
if (index >= VMFUNC_EPTP_ENTRIES)
|
||||
return 1;
|
||||
|
||||
|
||||
if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
|
||||
&new_eptp, index * 8, 8))
|
||||
return 1;
|
||||
|
||||
accessed_dirty = !!(new_eptp & VMX_EPTP_AD_ENABLE_BIT);
|
||||
|
||||
/*
|
||||
* If the (L2) guest does a vmfunc to the currently
|
||||
* active ept pointer, we don't have to do anything else
|
||||
@ -5502,8 +5504,6 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
|
||||
if (!nested_vmx_check_eptp(vcpu, new_eptp))
|
||||
return 1;
|
||||
|
||||
mmu->ept_ad = accessed_dirty;
|
||||
mmu->mmu_role.base.ad_disabled = !accessed_dirty;
|
||||
vmcs12->ept_pointer = new_eptp;
|
||||
|
||||
kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
|
||||
@ -5529,7 +5529,7 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
vmcs12 = get_vmcs12(vcpu);
|
||||
if ((vmcs12->vm_function_control & (1 << function)) == 0)
|
||||
if (!(vmcs12->vm_function_control & BIT_ULL(function)))
|
||||
goto fail;
|
||||
|
||||
switch (function) {
|
||||
@ -5787,6 +5787,9 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
|
||||
else if (is_breakpoint(intr_info) &&
|
||||
vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
|
||||
return true;
|
||||
else if (is_alignment_check(intr_info) &&
|
||||
!vmx_guest_inject_ac(vcpu))
|
||||
return true;
|
||||
return false;
|
||||
case EXIT_REASON_EXTERNAL_INTERRUPT:
|
||||
return true;
|
||||
|
@ -117,6 +117,11 @@ static inline bool is_gp_fault(u32 intr_info)
|
||||
return is_exception_n(intr_info, GP_VECTOR);
|
||||
}
|
||||
|
||||
static inline bool is_alignment_check(u32 intr_info)
|
||||
{
|
||||
return is_exception_n(intr_info, AC_VECTOR);
|
||||
}
|
||||
|
||||
static inline bool is_machine_check(u32 intr_info)
|
||||
{
|
||||
return is_exception_n(intr_info, MC_VECTOR);
|
||||
|
@ -4755,7 +4755,7 @@ static int handle_machine_check(struct kvm_vcpu *vcpu)
|
||||
* - Guest has #AC detection enabled in CR0
|
||||
* - Guest EFLAGS has AC bit set
|
||||
*/
|
||||
static inline bool guest_inject_ac(struct kvm_vcpu *vcpu)
|
||||
bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
|
||||
return true;
|
||||
@ -4864,7 +4864,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
|
||||
kvm_run->debug.arch.exception = ex_no;
|
||||
break;
|
||||
case AC_VECTOR:
|
||||
if (guest_inject_ac(vcpu)) {
|
||||
if (vmx_guest_inject_ac(vcpu)) {
|
||||
kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
|
||||
return 1;
|
||||
}
|
||||
|
@ -352,6 +352,7 @@ void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
|
||||
u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa,
|
||||
int root_level);
|
||||
|
||||
bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);
|
||||
void update_exception_bitmap(struct kvm_vcpu *vcpu);
|
||||
void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
|
||||
bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
|
||||
|
@ -8852,7 +8852,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
|
||||
kvm_vcpu_flush_tlb_current(vcpu);
|
||||
if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu))
|
||||
if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
|
||||
kvm_vcpu_flush_tlb_guest(vcpu);
|
||||
|
||||
if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
|
||||
|
@ -145,7 +145,6 @@ void secondary_start_kernel(void)
|
||||
cpumask_set_cpu(cpu, mm_cpumask(mm));
|
||||
enter_lazy_tlb(mm, current);
|
||||
|
||||
preempt_disable();
|
||||
trace_hardirqs_off();
|
||||
|
||||
calibrate_delay();
|
||||
|
@ -552,10 +552,14 @@ static inline unsigned int blk_rq_get_max_segments(struct request *rq)
|
||||
static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
|
||||
unsigned int nr_phys_segs)
|
||||
{
|
||||
if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
|
||||
if (blk_integrity_merge_bio(req->q, req, bio) == false)
|
||||
goto no_merge;
|
||||
|
||||
if (blk_integrity_merge_bio(req->q, req, bio) == false)
|
||||
/* discard request merge won't add new segment */
|
||||
if (req_op(req) == REQ_OP_DISCARD)
|
||||
return 1;
|
||||
|
||||
if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
|
||||
goto no_merge;
|
||||
|
||||
/*
|
||||
|
@ -1243,9 +1243,6 @@ static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
|
||||
{
|
||||
unsigned int ewma;
|
||||
|
||||
if (hctx->queue->elevator)
|
||||
return;
|
||||
|
||||
ewma = hctx->dispatch_busy;
|
||||
|
||||
if (!ewma && !busy)
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <linux/blk_types.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/blk-mq.h>
|
||||
|
||||
#include "blk-mq-debugfs.h"
|
||||
|
||||
@ -87,8 +88,21 @@ static inline void rq_wait_init(struct rq_wait *rq_wait)
|
||||
|
||||
static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
|
||||
{
|
||||
/*
|
||||
* No IO can be in-flight when adding rqos, so freeze queue, which
|
||||
* is fine since we only support rq_qos for blk-mq queue.
|
||||
*
|
||||
* Reuse ->queue_lock for protecting against other concurrent
|
||||
* rq_qos adding/deleting
|
||||
*/
|
||||
blk_mq_freeze_queue(q);
|
||||
|
||||
spin_lock_irq(&q->queue_lock);
|
||||
rqos->next = q->rq_qos;
|
||||
q->rq_qos = rqos;
|
||||
spin_unlock_irq(&q->queue_lock);
|
||||
|
||||
blk_mq_unfreeze_queue(q);
|
||||
|
||||
if (rqos->ops->debugfs_attrs)
|
||||
blk_mq_debugfs_register_rqos(rqos);
|
||||
@ -98,12 +112,22 @@ static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
|
||||
{
|
||||
struct rq_qos **cur;
|
||||
|
||||
/*
|
||||
* See comment in rq_qos_add() about freezing queue & using
|
||||
* ->queue_lock.
|
||||
*/
|
||||
blk_mq_freeze_queue(q);
|
||||
|
||||
spin_lock_irq(&q->queue_lock);
|
||||
for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
|
||||
if (*cur == rqos) {
|
||||
*cur = rqos->next;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&q->queue_lock);
|
||||
|
||||
blk_mq_unfreeze_queue(q);
|
||||
|
||||
blk_mq_debugfs_unregister_rqos(rqos);
|
||||
}
|
||||
|
@ -77,7 +77,8 @@ enum {
|
||||
|
||||
static inline bool rwb_enabled(struct rq_wb *rwb)
|
||||
{
|
||||
return rwb && rwb->wb_normal != 0;
|
||||
return rwb && rwb->enable_state != WBT_STATE_OFF_DEFAULT &&
|
||||
rwb->wb_normal != 0;
|
||||
}
|
||||
|
||||
static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
|
||||
@ -636,9 +637,13 @@ void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
|
||||
void wbt_enable_default(struct request_queue *q)
|
||||
{
|
||||
struct rq_qos *rqos = wbt_rq_qos(q);
|
||||
|
||||
/* Throttling already enabled? */
|
||||
if (rqos)
|
||||
if (rqos) {
|
||||
if (RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
|
||||
RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Queue not registered? Maybe shutting down... */
|
||||
if (!blk_queue_registered(q))
|
||||
@ -702,7 +707,7 @@ void wbt_disable_default(struct request_queue *q)
|
||||
rwb = RQWB(rqos);
|
||||
if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
|
||||
blk_stat_deactivate(rwb->cb);
|
||||
rwb->wb_normal = 0;
|
||||
rwb->enable_state = WBT_STATE_OFF_DEFAULT;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(wbt_disable_default);
|
||||
|
@ -34,6 +34,7 @@ enum {
|
||||
enum {
|
||||
WBT_STATE_ON_DEFAULT = 1,
|
||||
WBT_STATE_ON_MANUAL = 2,
|
||||
WBT_STATE_OFF_DEFAULT
|
||||
};
|
||||
|
||||
struct rq_wb {
|
||||
|
99
crypto/sm2.c
99
crypto/sm2.c
@ -79,10 +79,17 @@ static int sm2_ec_ctx_init(struct mpi_ec_ctx *ec)
|
||||
goto free;
|
||||
|
||||
rc = -ENOMEM;
|
||||
|
||||
ec->Q = mpi_point_new(0);
|
||||
if (!ec->Q)
|
||||
goto free;
|
||||
|
||||
/* mpi_ec_setup_elliptic_curve */
|
||||
ec->G = mpi_point_new(0);
|
||||
if (!ec->G)
|
||||
if (!ec->G) {
|
||||
mpi_point_release(ec->Q);
|
||||
goto free;
|
||||
}
|
||||
|
||||
mpi_set(ec->G->x, x);
|
||||
mpi_set(ec->G->y, y);
|
||||
@ -91,6 +98,7 @@ static int sm2_ec_ctx_init(struct mpi_ec_ctx *ec)
|
||||
rc = -EINVAL;
|
||||
ec->n = mpi_scanval(ecp->n);
|
||||
if (!ec->n) {
|
||||
mpi_point_release(ec->Q);
|
||||
mpi_point_release(ec->G);
|
||||
goto free;
|
||||
}
|
||||
@ -119,12 +127,6 @@ static void sm2_ec_ctx_deinit(struct mpi_ec_ctx *ec)
|
||||
memset(ec, 0, sizeof(*ec));
|
||||
}
|
||||
|
||||
static int sm2_ec_ctx_reset(struct mpi_ec_ctx *ec)
|
||||
{
|
||||
sm2_ec_ctx_deinit(ec);
|
||||
return sm2_ec_ctx_init(ec);
|
||||
}
|
||||
|
||||
/* RESULT must have been initialized and is set on success to the
|
||||
* point given by VALUE.
|
||||
*/
|
||||
@ -132,55 +134,48 @@ static int sm2_ecc_os2ec(MPI_POINT result, MPI value)
|
||||
{
|
||||
int rc;
|
||||
size_t n;
|
||||
const unsigned char *buf;
|
||||
unsigned char *buf_memory;
|
||||
unsigned char *buf;
|
||||
MPI x, y;
|
||||
|
||||
n = (mpi_get_nbits(value)+7)/8;
|
||||
buf_memory = kmalloc(n, GFP_KERNEL);
|
||||
rc = mpi_print(GCRYMPI_FMT_USG, buf_memory, n, &n, value);
|
||||
if (rc) {
|
||||
kfree(buf_memory);
|
||||
return rc;
|
||||
}
|
||||
buf = buf_memory;
|
||||
n = MPI_NBYTES(value);
|
||||
buf = kmalloc(n, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
if (n < 1) {
|
||||
kfree(buf_memory);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (*buf != 4) {
|
||||
kfree(buf_memory);
|
||||
return -EINVAL; /* No support for point compression. */
|
||||
}
|
||||
if (((n-1)%2)) {
|
||||
kfree(buf_memory);
|
||||
return -EINVAL;
|
||||
}
|
||||
n = (n-1)/2;
|
||||
rc = mpi_print(GCRYMPI_FMT_USG, buf, n, &n, value);
|
||||
if (rc)
|
||||
goto err_freebuf;
|
||||
|
||||
rc = -EINVAL;
|
||||
if (n < 1 || ((n - 1) % 2))
|
||||
goto err_freebuf;
|
||||
/* No support for point compression */
|
||||
if (*buf != 0x4)
|
||||
goto err_freebuf;
|
||||
|
||||
rc = -ENOMEM;
|
||||
n = (n - 1) / 2;
|
||||
x = mpi_read_raw_data(buf + 1, n);
|
||||
if (!x) {
|
||||
kfree(buf_memory);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!x)
|
||||
goto err_freebuf;
|
||||
y = mpi_read_raw_data(buf + 1 + n, n);
|
||||
kfree(buf_memory);
|
||||
if (!y) {
|
||||
mpi_free(x);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!y)
|
||||
goto err_freex;
|
||||
|
||||
mpi_normalize(x);
|
||||
mpi_normalize(y);
|
||||
|
||||
mpi_set(result->x, x);
|
||||
mpi_set(result->y, y);
|
||||
mpi_set_ui(result->z, 1);
|
||||
|
||||
mpi_free(x);
|
||||
mpi_free(y);
|
||||
rc = 0;
|
||||
|
||||
return 0;
|
||||
mpi_free(y);
|
||||
err_freex:
|
||||
mpi_free(x);
|
||||
err_freebuf:
|
||||
kfree(buf);
|
||||
return rc;
|
||||
}
|
||||
|
||||
struct sm2_signature_ctx {
|
||||
@ -399,31 +394,15 @@ static int sm2_set_pub_key(struct crypto_akcipher *tfm,
|
||||
MPI a;
|
||||
int rc;
|
||||
|
||||
rc = sm2_ec_ctx_reset(ec);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
ec->Q = mpi_point_new(0);
|
||||
if (!ec->Q)
|
||||
return -ENOMEM;
|
||||
|
||||
/* include the uncompressed flag '0x04' */
|
||||
rc = -ENOMEM;
|
||||
a = mpi_read_raw_data(key, keylen);
|
||||
if (!a)
|
||||
goto error;
|
||||
return -ENOMEM;
|
||||
|
||||
mpi_normalize(a);
|
||||
rc = sm2_ecc_os2ec(ec->Q, a);
|
||||
mpi_free(a);
|
||||
if (rc)
|
||||
goto error;
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
mpi_point_release(ec->Q);
|
||||
ec->Q = NULL;
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -8,6 +8,11 @@ ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
|
||||
#
|
||||
# ACPI Boot-Time Table Parsing
|
||||
#
|
||||
ifeq ($(CONFIG_ACPI_CUSTOM_DSDT),y)
|
||||
tables.o: $(src)/../../include/$(subst $\",,$(CONFIG_ACPI_CUSTOM_DSDT_FILE)) ;
|
||||
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_ACPI) += tables.o
|
||||
obj-$(CONFIG_X86) += blacklist.o
|
||||
|
||||
|
@ -261,7 +261,7 @@ static uint32_t acpi_pad_idle_cpus_num(void)
|
||||
return ps_tsk_num;
|
||||
}
|
||||
|
||||
static ssize_t acpi_pad_rrtime_store(struct device *dev,
|
||||
static ssize_t rrtime_store(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf, size_t count)
|
||||
{
|
||||
unsigned long num;
|
||||
@ -275,16 +275,14 @@ static ssize_t acpi_pad_rrtime_store(struct device *dev,
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t acpi_pad_rrtime_show(struct device *dev,
|
||||
static ssize_t rrtime_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", round_robin_time);
|
||||
}
|
||||
static DEVICE_ATTR(rrtime, S_IRUGO|S_IWUSR,
|
||||
acpi_pad_rrtime_show,
|
||||
acpi_pad_rrtime_store);
|
||||
static DEVICE_ATTR_RW(rrtime);
|
||||
|
||||
static ssize_t acpi_pad_idlepct_store(struct device *dev,
|
||||
static ssize_t idlepct_store(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf, size_t count)
|
||||
{
|
||||
unsigned long num;
|
||||
@ -298,16 +296,14 @@ static ssize_t acpi_pad_idlepct_store(struct device *dev,
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t acpi_pad_idlepct_show(struct device *dev,
|
||||
static ssize_t idlepct_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", idle_pct);
|
||||
}
|
||||
static DEVICE_ATTR(idlepct, S_IRUGO|S_IWUSR,
|
||||
acpi_pad_idlepct_show,
|
||||
acpi_pad_idlepct_store);
|
||||
static DEVICE_ATTR_RW(idlepct);
|
||||
|
||||
static ssize_t acpi_pad_idlecpus_store(struct device *dev,
|
||||
static ssize_t idlecpus_store(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf, size_t count)
|
||||
{
|
||||
unsigned long num;
|
||||
@ -319,16 +315,14 @@ static ssize_t acpi_pad_idlecpus_store(struct device *dev,
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t acpi_pad_idlecpus_show(struct device *dev,
|
||||
static ssize_t idlecpus_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return cpumap_print_to_pagebuf(false, buf,
|
||||
to_cpumask(pad_busy_cpus_bits));
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(idlecpus, S_IRUGO|S_IWUSR,
|
||||
acpi_pad_idlecpus_show,
|
||||
acpi_pad_idlecpus_store);
|
||||
static DEVICE_ATTR_RW(idlecpus);
|
||||
|
||||
static int acpi_pad_add_sysfs(struct acpi_device *device)
|
||||
{
|
||||
|
@ -237,7 +237,7 @@ static ssize_t time_show(struct device *dev, struct device_attribute *attr,
|
||||
rt.tz, rt.daylight);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(time, S_IRUSR | S_IWUSR, time_show, time_store);
|
||||
static DEVICE_ATTR_RW(time);
|
||||
|
||||
static struct attribute *acpi_tad_time_attrs[] = {
|
||||
&dev_attr_time.attr,
|
||||
@ -446,7 +446,7 @@ static ssize_t ac_alarm_show(struct device *dev, struct device_attribute *attr,
|
||||
return acpi_tad_alarm_read(dev, buf, ACPI_TAD_AC_TIMER);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(ac_alarm, S_IRUSR | S_IWUSR, ac_alarm_show, ac_alarm_store);
|
||||
static DEVICE_ATTR_RW(ac_alarm);
|
||||
|
||||
static ssize_t ac_policy_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
@ -462,7 +462,7 @@ static ssize_t ac_policy_show(struct device *dev, struct device_attribute *attr,
|
||||
return acpi_tad_policy_read(dev, buf, ACPI_TAD_AC_TIMER);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(ac_policy, S_IRUSR | S_IWUSR, ac_policy_show, ac_policy_store);
|
||||
static DEVICE_ATTR_RW(ac_policy);
|
||||
|
||||
static ssize_t ac_status_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
@ -478,7 +478,7 @@ static ssize_t ac_status_show(struct device *dev, struct device_attribute *attr,
|
||||
return acpi_tad_status_read(dev, buf, ACPI_TAD_AC_TIMER);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(ac_status, S_IRUSR | S_IWUSR, ac_status_show, ac_status_store);
|
||||
static DEVICE_ATTR_RW(ac_status);
|
||||
|
||||
static struct attribute *acpi_tad_attrs[] = {
|
||||
&dev_attr_caps.attr,
|
||||
@ -505,7 +505,7 @@ static ssize_t dc_alarm_show(struct device *dev, struct device_attribute *attr,
|
||||
return acpi_tad_alarm_read(dev, buf, ACPI_TAD_DC_TIMER);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(dc_alarm, S_IRUSR | S_IWUSR, dc_alarm_show, dc_alarm_store);
|
||||
static DEVICE_ATTR_RW(dc_alarm);
|
||||
|
||||
static ssize_t dc_policy_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
@ -521,7 +521,7 @@ static ssize_t dc_policy_show(struct device *dev, struct device_attribute *attr,
|
||||
return acpi_tad_policy_read(dev, buf, ACPI_TAD_DC_TIMER);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(dc_policy, S_IRUSR | S_IWUSR, dc_policy_show, dc_policy_store);
|
||||
static DEVICE_ATTR_RW(dc_policy);
|
||||
|
||||
static ssize_t dc_status_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
@ -537,7 +537,7 @@ static ssize_t dc_status_show(struct device *dev, struct device_attribute *attr,
|
||||
return acpi_tad_status_read(dev, buf, ACPI_TAD_DC_TIMER);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(dc_status, S_IRUSR | S_IWUSR, dc_status_show, dc_status_store);
|
||||
static DEVICE_ATTR_RW(dc_status);
|
||||
|
||||
static struct attribute *acpi_tad_dc_attrs[] = {
|
||||
&dev_attr_dc_alarm.attr,
|
||||
|
@ -375,6 +375,13 @@ acpi_ns_repair_CID(struct acpi_evaluate_info *info,
|
||||
|
||||
(*element_ptr)->common.reference_count =
|
||||
original_ref_count;
|
||||
|
||||
/*
|
||||
* The original_element holds a reference from the package object
|
||||
* that represents _HID. Since a new element was created by _HID,
|
||||
* remove the reference from the _CID package.
|
||||
*/
|
||||
acpi_ut_remove_reference(original_element);
|
||||
}
|
||||
|
||||
element_ptr++;
|
||||
|
@ -441,28 +441,35 @@ static void ghes_kick_task_work(struct callback_head *head)
|
||||
gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len);
|
||||
}
|
||||
|
||||
static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
|
||||
int sev)
|
||||
static bool ghes_do_memory_failure(u64 physical_addr, int flags)
|
||||
{
|
||||
unsigned long pfn;
|
||||
int flags = -1;
|
||||
int sec_sev = ghes_severity(gdata->error_severity);
|
||||
struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
|
||||
|
||||
if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
|
||||
return false;
|
||||
|
||||
if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
|
||||
return false;
|
||||
|
||||
pfn = mem_err->physical_addr >> PAGE_SHIFT;
|
||||
pfn = PHYS_PFN(physical_addr);
|
||||
if (!pfn_valid(pfn)) {
|
||||
pr_warn_ratelimited(FW_WARN GHES_PFX
|
||||
"Invalid address in generic error data: %#llx\n",
|
||||
mem_err->physical_addr);
|
||||
physical_addr);
|
||||
return false;
|
||||
}
|
||||
|
||||
memory_failure_queue(pfn, flags);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
|
||||
int sev)
|
||||
{
|
||||
int flags = -1;
|
||||
int sec_sev = ghes_severity(gdata->error_severity);
|
||||
struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
|
||||
|
||||
if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
|
||||
return false;
|
||||
|
||||
/* iff following two events can be handled properly by now */
|
||||
if (sec_sev == GHES_SEV_CORRECTED &&
|
||||
(gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED))
|
||||
@ -470,14 +477,56 @@ static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
|
||||
if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE)
|
||||
flags = 0;
|
||||
|
||||
if (flags != -1) {
|
||||
memory_failure_queue(pfn, flags);
|
||||
return true;
|
||||
}
|
||||
if (flags != -1)
|
||||
return ghes_do_memory_failure(mem_err->physical_addr, flags);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata, int sev)
|
||||
{
|
||||
struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
|
||||
bool queued = false;
|
||||
int sec_sev, i;
|
||||
char *p;
|
||||
|
||||
log_arm_hw_error(err);
|
||||
|
||||
sec_sev = ghes_severity(gdata->error_severity);
|
||||
if (sev != GHES_SEV_RECOVERABLE || sec_sev != GHES_SEV_RECOVERABLE)
|
||||
return false;
|
||||
|
||||
p = (char *)(err + 1);
|
||||
for (i = 0; i < err->err_info_num; i++) {
|
||||
struct cper_arm_err_info *err_info = (struct cper_arm_err_info *)p;
|
||||
bool is_cache = (err_info->type == CPER_ARM_CACHE_ERROR);
|
||||
bool has_pa = (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR);
|
||||
const char *error_type = "unknown error";
|
||||
|
||||
/*
|
||||
* The field (err_info->error_info & BIT(26)) is fixed to set to
|
||||
* 1 in some old firmware of HiSilicon Kunpeng920. We assume that
|
||||
* firmware won't mix corrected errors in an uncorrected section,
|
||||
* and don't filter out 'corrected' error here.
|
||||
*/
|
||||
if (is_cache && has_pa) {
|
||||
queued = ghes_do_memory_failure(err_info->physical_fault_addr, 0);
|
||||
p += err_info->length;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (err_info->type < ARRAY_SIZE(cper_proc_error_type_strs))
|
||||
error_type = cper_proc_error_type_strs[err_info->type];
|
||||
|
||||
pr_warn_ratelimited(FW_WARN GHES_PFX
|
||||
"Unhandled processor error type: %s\n",
|
||||
error_type);
|
||||
p += err_info->length;
|
||||
}
|
||||
|
||||
return queued;
|
||||
}
|
||||
|
||||
/*
|
||||
* PCIe AER errors need to be sent to the AER driver for reporting and
|
||||
* recovery. The GHES severities map to the following AER severities and
|
||||
@ -605,9 +654,7 @@ static bool ghes_do_proc(struct ghes *ghes,
|
||||
ghes_handle_aer(gdata);
|
||||
}
|
||||
else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
|
||||
struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
|
||||
|
||||
log_arm_hw_error(err);
|
||||
queued = ghes_handle_arm_hw_error(gdata, sev);
|
||||
} else {
|
||||
void *err = acpi_hest_get_payload(gdata);
|
||||
|
||||
|
@ -15,40 +15,19 @@
|
||||
static void *bgrt_image;
|
||||
static struct kobject *bgrt_kobj;
|
||||
|
||||
static ssize_t show_version(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.version);
|
||||
}
|
||||
static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
|
||||
#define BGRT_SHOW(_name, _member) \
|
||||
static ssize_t _name##_show(struct kobject *kobj, \
|
||||
struct kobj_attribute *attr, char *buf) \
|
||||
{ \
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab._member); \
|
||||
} \
|
||||
struct kobj_attribute bgrt_attr_##_name = __ATTR_RO(_name)
|
||||
|
||||
static ssize_t show_status(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.status);
|
||||
}
|
||||
static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
|
||||
|
||||
static ssize_t show_type(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_type);
|
||||
}
|
||||
static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
|
||||
|
||||
static ssize_t show_xoffset(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_offset_x);
|
||||
}
|
||||
static DEVICE_ATTR(xoffset, S_IRUGO, show_xoffset, NULL);
|
||||
|
||||
static ssize_t show_yoffset(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_offset_y);
|
||||
}
|
||||
static DEVICE_ATTR(yoffset, S_IRUGO, show_yoffset, NULL);
|
||||
BGRT_SHOW(version, version);
|
||||
BGRT_SHOW(status, status);
|
||||
BGRT_SHOW(type, image_type);
|
||||
BGRT_SHOW(xoffset, image_offset_x);
|
||||
BGRT_SHOW(yoffset, image_offset_y);
|
||||
|
||||
static ssize_t image_read(struct file *file, struct kobject *kobj,
|
||||
struct bin_attribute *attr, char *buf, loff_t off, size_t count)
|
||||
@ -60,11 +39,11 @@ static ssize_t image_read(struct file *file, struct kobject *kobj,
|
||||
static BIN_ATTR_RO(image, 0); /* size gets filled in later */
|
||||
|
||||
static struct attribute *bgrt_attributes[] = {
|
||||
&dev_attr_version.attr,
|
||||
&dev_attr_status.attr,
|
||||
&dev_attr_type.attr,
|
||||
&dev_attr_xoffset.attr,
|
||||
&dev_attr_yoffset.attr,
|
||||
&bgrt_attr_version.attr,
|
||||
&bgrt_attr_status.attr,
|
||||
&bgrt_attr_type.attr,
|
||||
&bgrt_attr_xoffset.attr,
|
||||
&bgrt_attr_yoffset.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -1245,6 +1245,7 @@ static int __init acpi_init(void)
|
||||
|
||||
result = acpi_bus_init();
|
||||
if (result) {
|
||||
kobject_put(acpi_kobj);
|
||||
disable_acpi();
|
||||
return result;
|
||||
}
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/suspend.h>
|
||||
|
||||
#include "fan.h"
|
||||
#include "internal.h"
|
||||
|
||||
#define _COMPONENT ACPI_POWER_COMPONENT
|
||||
@ -1298,10 +1299,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
|
||||
* with the generic ACPI PM domain.
|
||||
*/
|
||||
static const struct acpi_device_id special_pm_ids[] = {
|
||||
{"PNP0C0B", }, /* Generic ACPI fan */
|
||||
{"INT3404", }, /* Fan */
|
||||
{"INTC1044", }, /* Fan for Tiger Lake generation */
|
||||
{"INTC1048", }, /* Fan for Alder Lake generation */
|
||||
ACPI_FAN_DEVICE_IDS,
|
||||
{}
|
||||
};
|
||||
struct acpi_device *adev = ACPI_COMPANION(dev);
|
||||
|
@ -325,11 +325,11 @@ int acpi_device_modalias(struct device *dev, char *buf, int size)
|
||||
EXPORT_SYMBOL_GPL(acpi_device_modalias);
|
||||
|
||||
static ssize_t
|
||||
acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return __acpi_device_modalias(to_acpi_device(dev), buf, 1024);
|
||||
}
|
||||
static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
|
||||
static DEVICE_ATTR_RO(modalias);
|
||||
|
||||
static ssize_t real_power_state_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
@ -358,7 +358,7 @@ static ssize_t power_state_show(struct device *dev,
|
||||
static DEVICE_ATTR_RO(power_state);
|
||||
|
||||
static ssize_t
|
||||
acpi_eject_store(struct device *d, struct device_attribute *attr,
|
||||
eject_store(struct device *d, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct acpi_device *acpi_device = to_acpi_device(d);
|
||||
@ -387,27 +387,27 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
|
||||
return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
|
||||
static DEVICE_ATTR_WO(eject);
|
||||
|
||||
static ssize_t
|
||||
acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
hid_show(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct acpi_device *acpi_dev = to_acpi_device(dev);
|
||||
|
||||
return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev));
|
||||
}
|
||||
static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL);
|
||||
static DEVICE_ATTR_RO(hid);
|
||||
|
||||
static ssize_t acpi_device_uid_show(struct device *dev,
|
||||
static ssize_t uid_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct acpi_device *acpi_dev = to_acpi_device(dev);
|
||||
|
||||
return sprintf(buf, "%s\n", acpi_dev->pnp.unique_id);
|
||||
}
|
||||
static DEVICE_ATTR(uid, 0444, acpi_device_uid_show, NULL);
|
||||
static DEVICE_ATTR_RO(uid);
|
||||
|
||||
static ssize_t acpi_device_adr_show(struct device *dev,
|
||||
static ssize_t adr_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct acpi_device *acpi_dev = to_acpi_device(dev);
|
||||
@ -417,16 +417,16 @@ static ssize_t acpi_device_adr_show(struct device *dev,
|
||||
else
|
||||
return sprintf(buf, "0x%08llx\n", acpi_dev->pnp.bus_address);
|
||||
}
|
||||
static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL);
|
||||
static DEVICE_ATTR_RO(adr);
|
||||
|
||||
static ssize_t acpi_device_path_show(struct device *dev,
|
||||
static ssize_t path_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct acpi_device *acpi_dev = to_acpi_device(dev);
|
||||
|
||||
return acpi_object_path(acpi_dev->handle, buf);
|
||||
}
|
||||
static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL);
|
||||
static DEVICE_ATTR_RO(path);
|
||||
|
||||
/* sysfs file that shows description text from the ACPI _STR method */
|
||||
static ssize_t description_show(struct device *dev,
|
||||
@ -446,7 +446,7 @@ static ssize_t description_show(struct device *dev,
|
||||
(wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer,
|
||||
acpi_dev->pnp.str_obj->buffer.length,
|
||||
UTF16_LITTLE_ENDIAN, buf,
|
||||
PAGE_SIZE);
|
||||
PAGE_SIZE - 1);
|
||||
|
||||
buf[result++] = '\n';
|
||||
|
||||
@ -455,7 +455,7 @@ static ssize_t description_show(struct device *dev,
|
||||
static DEVICE_ATTR_RO(description);
|
||||
|
||||
static ssize_t
|
||||
acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
|
||||
sun_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf) {
|
||||
struct acpi_device *acpi_dev = to_acpi_device(dev);
|
||||
acpi_status status;
|
||||
@ -467,10 +467,10 @@ acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
|
||||
|
||||
return sprintf(buf, "%llu\n", sun);
|
||||
}
|
||||
static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL);
|
||||
static DEVICE_ATTR_RO(sun);
|
||||
|
||||
static ssize_t
|
||||
acpi_device_hrv_show(struct device *dev, struct device_attribute *attr,
|
||||
hrv_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf) {
|
||||
struct acpi_device *acpi_dev = to_acpi_device(dev);
|
||||
acpi_status status;
|
||||
@ -482,7 +482,7 @@ acpi_device_hrv_show(struct device *dev, struct device_attribute *attr,
|
||||
|
||||
return sprintf(buf, "%llu\n", hrv);
|
||||
}
|
||||
static DEVICE_ATTR(hrv, 0444, acpi_device_hrv_show, NULL);
|
||||
static DEVICE_ATTR_RO(hrv);
|
||||
|
||||
static ssize_t status_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf) {
|
||||
|
@ -484,7 +484,7 @@ int dock_notify(struct acpi_device *adev, u32 event)
|
||||
/*
|
||||
* show_docked - read method for "docked" file in sysfs
|
||||
*/
|
||||
static ssize_t show_docked(struct device *dev,
|
||||
static ssize_t docked_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct dock_station *dock_station = dev->platform_data;
|
||||
@ -493,24 +493,24 @@ static ssize_t show_docked(struct device *dev,
|
||||
acpi_bus_get_device(dock_station->handle, &adev);
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", acpi_device_enumerated(adev));
|
||||
}
|
||||
static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL);
|
||||
static DEVICE_ATTR_RO(docked);
|
||||
|
||||
/*
|
||||
* show_flags - read method for flags file in sysfs
|
||||
*/
|
||||
static ssize_t show_flags(struct device *dev,
|
||||
static ssize_t flags_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct dock_station *dock_station = dev->platform_data;
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", dock_station->flags);
|
||||
|
||||
}
|
||||
static DEVICE_ATTR(flags, S_IRUGO, show_flags, NULL);
|
||||
static DEVICE_ATTR_RO(flags);
|
||||
|
||||
/*
|
||||
* write_undock - write method for "undock" file in sysfs
|
||||
*/
|
||||
static ssize_t write_undock(struct device *dev, struct device_attribute *attr,
|
||||
static ssize_t undock_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
int ret;
|
||||
@ -525,12 +525,12 @@ static ssize_t write_undock(struct device *dev, struct device_attribute *attr,
|
||||
acpi_scan_lock_release();
|
||||
return ret ? ret: count;
|
||||
}
|
||||
static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock);
|
||||
static DEVICE_ATTR_WO(undock);
|
||||
|
||||
/*
|
||||
* show_dock_uid - read method for "uid" file in sysfs
|
||||
*/
|
||||
static ssize_t show_dock_uid(struct device *dev,
|
||||
static ssize_t uid_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
unsigned long long lbuf;
|
||||
@ -542,9 +542,9 @@ static ssize_t show_dock_uid(struct device *dev,
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llx\n", lbuf);
|
||||
}
|
||||
static DEVICE_ATTR(uid, S_IRUGO, show_dock_uid, NULL);
|
||||
static DEVICE_ATTR_RO(uid);
|
||||
|
||||
static ssize_t show_dock_type(struct device *dev,
|
||||
static ssize_t type_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct dock_station *dock_station = dev->platform_data;
|
||||
@ -561,7 +561,7 @@ static ssize_t show_dock_type(struct device *dev,
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", type);
|
||||
}
|
||||
static DEVICE_ATTR(type, S_IRUGO, show_dock_type, NULL);
|
||||
static DEVICE_ATTR_RO(type);
|
||||
|
||||
static struct attribute *dock_attributes[] = {
|
||||
&dev_attr_docked.attr,
|
||||
|
@ -183,6 +183,7 @@ static struct workqueue_struct *ec_query_wq;
|
||||
|
||||
static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
|
||||
static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */
|
||||
static int EC_FLAGS_TRUST_DSDT_GPE; /* Needs DSDT GPE as correction setting */
|
||||
static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
|
||||
|
||||
/* --------------------------------------------------------------------------
|
||||
@ -1606,7 +1607,8 @@ static int acpi_ec_add(struct acpi_device *device)
|
||||
}
|
||||
|
||||
if (boot_ec && ec->command_addr == boot_ec->command_addr &&
|
||||
ec->data_addr == boot_ec->data_addr) {
|
||||
ec->data_addr == boot_ec->data_addr &&
|
||||
!EC_FLAGS_TRUST_DSDT_GPE) {
|
||||
/*
|
||||
* Trust PNP0C09 namespace location rather than
|
||||
* ECDT ID. But trust ECDT GPE rather than _GPE
|
||||
@ -1829,6 +1831,18 @@ static int ec_correct_ecdt(const struct dmi_system_id *id)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some ECDTs contain wrong GPE setting, but they share the same port addresses
|
||||
* with DSDT EC, don't duplicate the DSDT EC with ECDT EC in this case.
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=209989
|
||||
*/
|
||||
static int ec_honor_dsdt_gpe(const struct dmi_system_id *id)
|
||||
{
|
||||
pr_debug("Detected system needing DSDT GPE setting.\n");
|
||||
EC_FLAGS_TRUST_DSDT_GPE = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some DSDTs contain wrong GPE setting.
|
||||
* Asus FX502VD/VE, GL702VMK, X550VXK, X580VD
|
||||
@ -1859,6 +1873,22 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "GL702VMK"),}, NULL},
|
||||
{
|
||||
ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BA", {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "X505BA"),}, NULL},
|
||||
{
|
||||
ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BP", {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "X505BP"),}, NULL},
|
||||
{
|
||||
ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BA", {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "X542BA"),}, NULL},
|
||||
{
|
||||
ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BP", {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "X542BP"),}, NULL},
|
||||
{
|
||||
ec_honor_ecdt_gpe, "ASUS X550VXK", {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "X550VXK"),}, NULL},
|
||||
@ -1867,6 +1897,11 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL},
|
||||
{
|
||||
/* https://bugzilla.kernel.org/show_bug.cgi?id=209989 */
|
||||
ec_honor_dsdt_gpe, "HP Pavilion Gaming Laptop 15-cx0xxx", {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-cx0xxx"),}, NULL},
|
||||
{
|
||||
ec_clear_on_resume, "Samsung hardware", {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
|
||||
{},
|
||||
|
@ -16,6 +16,8 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/sort.h>
|
||||
|
||||
#include "fan.h"
|
||||
|
||||
MODULE_AUTHOR("Paul Diefenbaugh");
|
||||
MODULE_DESCRIPTION("ACPI Fan Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
@ -24,10 +26,7 @@ static int acpi_fan_probe(struct platform_device *pdev);
|
||||
static int acpi_fan_remove(struct platform_device *pdev);
|
||||
|
||||
static const struct acpi_device_id fan_device_ids[] = {
|
||||
{"PNP0C0B", 0},
|
||||
{"INT3404", 0},
|
||||
{"INTC1044", 0},
|
||||
{"INTC1048", 0},
|
||||
ACPI_FAN_DEVICE_IDS,
|
||||
{"", 0},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, fan_device_ids);
|
||||
|
13
drivers/acpi/fan.h
Normal file
13
drivers/acpi/fan.h
Normal file
@ -0,0 +1,13 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
/*
|
||||
* ACPI fan device IDs are shared between the fan driver and the device power
|
||||
* management code.
|
||||
*
|
||||
* Add new device IDs before the generic ACPI fan one.
|
||||
*/
|
||||
#define ACPI_FAN_DEVICE_IDS \
|
||||
{"INT3404", }, /* Fan */ \
|
||||
{"INTC1044", }, /* Fan for Tiger Lake generation */ \
|
||||
{"INTC1048", }, /* Fan for Alder Lake generation */ \
|
||||
{"PNP0C0B", } /* Generic ACPI fan */
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user