This is the 5.10.215 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmYaZdgACgkQONu9yGCS aT4oMxAA0pATFAq8RN5f9CmYlMg5HqHgzZ8lJv8P0/reOINhUa+F5sJb1n+x+Ch4 WQbmiFeZRzfsKZ2qKhIdNR0Lg+9JOr/DtYXdSBZ6InfSWrTAIrQ9fjl5Warkmcgg O4WbgF5BVgU3vGFATgxLvnUZwhR1D7WK93oMDunzrT7+OqyncU3f1Uj53ZAu9030 z18UNqnTxDLYH/CMGwAeRkaZqBev9gZ1HdgQWA27SVLqWQwZq0al81Cmlo+ECVmk 5dF6V2pid4qfKGJjDDfx1NS0PVnoP68iK4By1SXyoFV9VBiSwp77nUUyDr7YsHsT u8GpZHr9jZvSO5/xtKv20NPLejTPCRKc06CbkwpikDRtGOocBL8em0GuVqlf8hMs KwDb6ZEzYhXZGPJHbJM+aRD1tq/KHw9X7TrldOszMQPr6lubBtscPbg1FCg3OlcC HUrtub0i275x7TH0dJeRTD8TRE9jRmF+tl7KQytEJM3JRrquFjLyhDj+/VJnZkiB lzj3FRf4zshzgz4+CAeqXO/8Lu8b3fGYmcW1acCmk7emjDcXUKojPj/Aig6T4l7P oCWDY3+w1E6eiyE8BazxY1KUa/41ld0VJnlW5JWGRaDFTJwrk0h6/rvf9qImSckw IGx24UezRyp6NS1op3Qm2iwHLr41pFRfKxNm9ppgH9iBPzOhe38= =pkLL -----END PGP SIGNATURE----- Merge 5.10.215 into android12-5.10-lts Changes in 5.10.215 amdkfd: use calloc instead of kzalloc to avoid integer overflow Documentation/hw-vuln: Update spectre doc x86/cpu: Support AMD Automatic IBRS x86/bugs: Use sysfs_emit() timers: Update kernel-doc for various functions timers: Use del_timer_sync() even on UP timers: Rename del_timer_sync() to timer_delete_sync() wifi: brcmfmac: Fix use-after-free bug in brcmf_cfg80211_detach media: staging: ipu3-imgu: Set fields before media_entity_pads_init() clk: qcom: gcc-sdm845: Add soft dependency on rpmhpd smack: Set SMACK64TRANSMUTE only for dirs in smack_inode_setxattr() smack: Handle SMACK64TRANSMUTE in smack_inode_setsecurity() arm: dts: marvell: Fix maxium->maxim typo in brownstone dts drm/vmwgfx: stop using ttm_bo_create v2 drm/vmwgfx: switch over to the new pin interface v2 drm/vmwgfx/vmwgfx_cmdbuf_res: Remove unused variable 'ret' drm/vmwgfx: Fix some static checker warnings drm/vmwgfx: Fix possible null pointer derefence with invalid contexts serial: max310x: fix NULL pointer dereference in I2C instantiation media: xc4000: Fix atomicity violation in xc4000_get_frequency KVM: Always flush async #PF workqueue when vCPU is being destroyed sparc64: NMI watchdog: fix return value of __setup handler sparc: vDSO: fix return value of __setup handler crypto: qat - fix double free during reset crypto: qat - resolve race condition during AER recovery selftests/mqueue: Set timeout to 180 seconds ext4: correct best extent lstart adjustment logic block: introduce zone_write_granularity limit block: Clear zone limits for a non-zoned stacked queue bounds: support non-power-of-two CONFIG_NR_CPUS fat: fix uninitialized field in nostale filehandles ubifs: Set page uptodate in the correct place ubi: Check for too small LEB size in VTBL code ubi: correct the calculation of fastmap size mtd: rawnand: meson: fix scrambling mode value in command macro parisc: Avoid clobbering the C/B bits in the PSW with tophys and tovirt macros parisc: Fix ip_fast_csum parisc: Fix csum_ipv6_magic on 32-bit systems parisc: Fix csum_ipv6_magic on 64-bit systems parisc: Strip upper 32 bit of sum in csum_ipv6_magic for 64-bit builds PM: suspend: Set mem_sleep_current during kernel command line setup clk: qcom: gcc-ipq6018: fix terminating of frequency table arrays clk: qcom: gcc-ipq8074: fix terminating of frequency table arrays clk: qcom: mmcc-apq8084: fix terminating of frequency table arrays clk: qcom: mmcc-msm8974: fix terminating of frequency table arrays powerpc/fsl: Fix mfpmr build errors with newer binutils USB: serial: ftdi_sio: add support for GMC Z216C Adapter IR-USB USB: serial: add device ID for VeriFone adapter USB: serial: cp210x: add ID for MGP Instruments PDS100 USB: serial: option: add MeiG Smart SLM320 product USB: serial: cp210x: add pid/vid for TDK NC0110013M and MM0110113M PM: sleep: wakeirq: fix wake irq warning in system suspend mmc: tmio: avoid concurrent runs of mmc_request_done() fuse: fix root lookup with nonzero generation fuse: don't unhash root usb: typec: ucsi: Clean up UCSI_CABLE_PROP macros printk/console: Split out code that enables default console serial: Lock console when calling into driver before registration btrfs: fix off-by-one chunk length calculation at contains_pending_extent() PCI: Drop pci_device_remove() test of pci_dev->driver PCI/PM: Drain runtime-idle callbacks before driver removal PCI/ERR: Cache RCEC EA Capability offset in pci_init_capabilities() PCI: Cache PCIe Device Capabilities register PCI: Work around Intel I210 ROM BAR overlap defect PCI/ASPM: Make Intel DG2 L1 acceptable latency unlimited PCI/DPC: Quirk PIO log size for certain Intel Root Ports PCI/DPC: Quirk PIO log size for Intel Raptor Lake Root Ports Revert "Revert "md/raid5: Wait for MD_SB_CHANGE_PENDING in raid5d"" dm-raid: fix lockdep waring in "pers->hot_add_disk" mac802154: fix llsec key resources release in mac802154_llsec_key_del mm: swap: fix race between free_swap_and_cache() and swapoff() mmc: core: Fix switch on gp3 partition drm/etnaviv: Restore some id values hwmon: (amc6821) add of_match table ext4: fix corruption during on-line resize nvmem: meson-efuse: fix function pointer type mismatch slimbus: core: Remove usage of the deprecated ida_simple_xx() API phy: tegra: xusb: Add API to retrieve the port number of phy usb: gadget: tegra-xudc: Use dev_err_probe() usb: gadget: tegra-xudc: Fix USB3 PHY retrieval logic speakup: Fix 8bit characters from direct synth PCI/ERR: Clear AER status only when we control AER PCI/AER: Block runtime suspend when handling errors nfs: fix UAF in direct writes kbuild: Move -Wenum-{compare-conditional,enum-conversion} into W=1 PCI: dwc: endpoint: Fix advertised resizable BAR size vfio/platform: Disable virqfds on cleanup ring-buffer: Fix waking up ring buffer readers ring-buffer: Do not set shortest_full when full target is hit ring-buffer: Fix resetting of shortest_full ring-buffer: Fix full_waiters_pending in poll soc: fsl: qbman: Always disable interrupts when taking cgr_lock soc: fsl: qbman: Add helper for sanity checking cgr ops soc: fsl: qbman: Add CGR update function soc: fsl: qbman: Use raw spinlock for cgr_lock s390/zcrypt: fix reference counting on zcrypt card objects drm/panel: do not return negative error codes from drm_panel_get_modes() drm/exynos: do not return negative values from .get_modes() drm/imx/ipuv3: do not return negative values from .get_modes() drm/vc4: hdmi: do not return negative values from .get_modes() memtest: use {READ,WRITE}_ONCE in memory scanning nilfs2: fix failure to detect DAT corruption in btree and direct mappings nilfs2: prevent kernel bug at submit_bh_wbc() cpufreq: dt: always allocate zeroed cpumask x86/CPU/AMD: Update the Zenbleed microcode revisions net: hns3: tracing: fix hclgevf trace event strings wireguard: netlink: check for dangling peer via is_dead instead of empty list wireguard: netlink: access device through ctx instead of peer ahci: asm1064: correct count of reported ports ahci: asm1064: asm1166: don't limit reported ports drm/amd/display: Return the correct HDCP error code drm/amd/display: Fix noise issue on HDMI AV mute dm snapshot: fix lockup in dm_exception_table_exit vxge: remove unnecessary cast in kfree() x86/stackprotector/32: Make the canary into a regular percpu variable x86/pm: Work around false positive kmemleak report in msr_build_context() scripts: kernel-doc: Fix syntax error due to undeclared args variable comedi: comedi_test: Prevent timers rescheduling during deletion cpufreq: brcmstb-avs-cpufreq: fix up "add check for cpufreq_cpu_get's return value" netfilter: nf_tables: mark set as dead when unbinding anonymous set with timeout netfilter: nf_tables: disallow anonymous set with timeout flag netfilter: nf_tables: reject constant set with timeout Drivers: hv: vmbus: Calculate ring buffer size for more efficient use of memory xfrm: Avoid clang fortify warning in copy_to_user_tmpl() KVM: SVM: Flush pages under kvm->lock to fix UAF in svm_register_enc_region() ALSA: hda/realtek - Fix headset Mic no show at resume back for Lenovo ALC897 platform USB: usb-storage: Prevent divide-by-0 error in isd200_ata_command usb: gadget: ncm: Fix handling of zero block length packets usb: port: Don't try to peer unused USB ports based on location tty: serial: fsl_lpuart: avoid idle preamble pending if CTS is enabled mei: me: add arrow lake point S DID mei: me: add arrow lake point H DID vt: fix unicode buffer corruption when deleting characters fs/aio: Check IOCB_AIO_RW before the struct aio_kiocb conversion tee: optee: Fix kernel panic caused by incorrect error handling xen/events: close evtchn after mapping cleanup printk: Update @console_may_schedule in console_trylock_spinning() btrfs: allocate btrfs_ioctl_defrag_range_args on stack x86/asm: Add _ASM_RIP() macro for x86-64 (%rip) suffix x86/bugs: Add asm helpers for executing VERW x86/entry_64: Add VERW just before userspace transition x86/entry_32: Add VERW just before userspace transition x86/bugs: Use ALTERNATIVE() instead of mds_user_clear static key KVM/VMX: Use BT+JNC, i.e. EFLAGS.CF to select VMRESUME vs. VMLAUNCH KVM/VMX: Move VERW closer to VMentry for MDS mitigation x86/mmio: Disable KVM mitigation when X86_FEATURE_CLEAR_CPU_BUF is set Documentation/hw-vuln: Add documentation for RFDS x86/rfds: Mitigate Register File Data Sampling (RFDS) KVM/x86: Export RFDS_NO and RFDS_CLEAR to guests perf/core: Fix reentry problem in perf_output_read_group() efivarfs: Request at most 512 bytes for variable names powerpc: xor_vmx: Add '-mhard-float' to CFLAGS serial: sc16is7xx: convert from _raw_ to _noinc_ regmap functions for FIFO mm/memory-failure: fix an incorrect use of tail pages mm/migrate: set swap entry values of THP tail pages properly. init: open /initrd.image with O_LARGEFILE wifi: mac80211: check/clear fast rx for non-4addr sta VLAN changes exec: Fix NOMMU linux_binprm::exec in transfer_args_to_stack() hexagon: vmlinux.lds.S: handle attributes section mmc: core: Initialize mmc_blk_ioc_data mmc: core: Avoid negative index with array access net: ll_temac: platform_get_resource replaced by wrong function usb: cdc-wdm: close race between read and workqueue ALSA: sh: aica: reorder cleanup operations to avoid UAF bugs scsi: core: Fix unremoved procfs host directory regression staging: vc04_services: changen strncpy() to strscpy_pad() staging: vc04_services: fix information leak in create_component() USB: core: Add hub_get() and hub_put() routines usb: dwc2: host: Fix remote wakeup from hibernation usb: dwc2: host: Fix hibernation flow usb: dwc2: host: Fix ISOC flow in DDMA mode usb: dwc2: gadget: LPM flow fix usb: udc: remove warning when queue disabled ep usb: typec: ucsi: Ack unsupported commands usb: typec: ucsi: Clear UCSI_CCI_RESET_COMPLETE before reset scsi: qla2xxx: Split FCE|EFT trace control scsi: qla2xxx: Fix command flush on cable pull scsi: qla2xxx: Delay I/O Abort on PCI error x86/cpu: Enable STIBP on AMD if Automatic IBRS is enabled PCI/DPC: Quirk PIO log size for Intel Ice Lake Root Ports scsi: lpfc: Correct size for wqe for memset() USB: core: Fix deadlock in usb_deauthorize_interface() nfc: nci: Fix uninit-value in nci_dev_up and nci_ntf_packet ixgbe: avoid sleeping allocation in ixgbe_ipsec_vf_add_sa() tcp: properly terminate timers for kernel sockets ACPICA: debugger: check status of acpi_evaluate_object() in acpi_db_walk_for_fields() bpf: Protect against int overflow for stack access size Octeontx2-af: fix pause frame configuration in GMP mode dm integrity: fix out-of-range warning r8169: fix issue caused by buggy BIOS on certain boards with RTL8168d x86/cpufeatures: Add new word for scattered features Bluetooth: hci_event: set the conn encrypted before conn establishes Bluetooth: Fix TOCTOU in HCI debugfs implementation netfilter: nf_tables: disallow timeout for anonymous sets net/rds: fix possible cp null dereference vfio/pci: Disable auto-enable of exclusive INTx IRQ vfio/pci: Lock external INTx masking ops vfio: Introduce interface to flush virqfd inject workqueue vfio/pci: Create persistent INTx handler vfio/platform: Create persistent IRQ handlers vfio/fsl-mc: Block calling interrupt handler without trigger io_uring: ensure '0' is returned on file registration success Revert "x86/mm/ident_map: Use gbpages only where full GB page should be mapped." mm, vmscan: prevent infinite loop for costly GFP_NOIO | __GFP_RETRY_MAYFAIL allocations x86/srso: Add SRSO mitigation for Hygon processors block: add check that partition length needs to be aligned with block size netfilter: nf_tables: reject new basechain after table flag update netfilter: nf_tables: flush pending destroy work before exit_net release netfilter: nf_tables: Fix potential data-race in __nft_flowtable_type_get() netfilter: validate user input for expected length vboxsf: Avoid an spurious warning if load_nls_xxx() fails bpf, sockmap: Prevent lock inversion deadlock in map delete elem net/sched: act_skbmod: prevent kernel-infoleak net: stmmac: fix rx queue priority assignment erspan: make sure erspan_base_hdr is present in skb->head selftests: reuseaddr_conflict: add missing new line at the end of the output ipv6: Fix infinite recursion in fib6_dump_done(). udp: do not transition UDP GRO fraglist partial checksums to unnecessary octeontx2-pf: check negative error code in otx2_open() i40e: fix i40e_count_filters() to count only active/new filters i40e: fix vf may be used uninitialized in this function warning scsi: qla2xxx: Update manufacturer details scsi: qla2xxx: Update manufacturer detail Revert "usb: phy: generic: Get the vbus supply" udp: do not accept non-tunnel GSO skbs landing in a tunnel net: ravb: Always process TX descriptor ring arm64: dts: qcom: sc7180: Remove clock for bluetooth on Trogdor arm64: dts: qcom: sc7180-trogdor: mark bluetooth address as broken ASoC: ops: Fix wraparound for mask in snd_soc_get_volsw ata: sata_sx4: fix pdc20621_get_from_dimm() on 64-bit scsi: mylex: Fix sysfs buffer lengths ata: sata_mv: Fix PCI device ID table declaration compilation warning ALSA: hda/realtek: Update Panasonic CF-SZ6 quirk to support headset with microphone driver core: Introduce device_link_wait_removal() of: dynamic: Synchronize of_changeset_destroy() with the devlink removals x86/mce: Make sure to grab mce_sysfs_mutex in set_bank() s390/entry: align system call table on 8 bytes riscv: Fix spurious errors from __get/put_kernel_nofault x86/bugs: Fix the SRSO mitigation on Zen3/4 x86/retpoline: Do the necessary fixup to the Zen3/4 srso return thunk for !SRSO mptcp: don't account accept() of non-MPC client as fallback to TCP x86/cpufeatures: Add CPUID_LNX_5 to track recently added Linux-defined word objtool: Add asm version of STACK_FRAME_NON_STANDARD wifi: ath9k: fix LNA selection in ath_ant_try_scan() VMCI: Fix memcpy() run-time warning in dg_dispatch_as_host() panic: Flush kernel log buffer at the end arm64: dts: rockchip: fix rk3328 hdmi ports node arm64: dts: rockchip: fix rk3399 hdmi ports node ionic: set adminq irq affinity pstore/zone: Add a null pointer check to the psz_kmsg_read tools/power x86_energy_perf_policy: Fix file leak in get_pkg_num() btrfs: handle chunk tree lookup error in btrfs_relocate_sys_chunks() btrfs: export: handle invalid inode or root reference in btrfs_get_parent() btrfs: send: handle path ref underflow in header iterate_inode_ref() net/smc: reduce rtnl pressure in smc_pnet_create_pnetids_list() Bluetooth: btintel: Fix null ptr deref in btintel_read_version Input: synaptics-rmi4 - fail probing if memory allocation for "phys" fails pinctrl: renesas: checker: Limit cfg reg enum checks to provided IDs sysv: don't call sb_bread() with pointers_lock held scsi: lpfc: Fix possible memory leak in lpfc_rcv_padisc() isofs: handle CDs with bad root inode but good Joliet root directory media: sta2x11: fix irq handler cast ext4: add a hint for block bitmap corrupt state in mb_groups ext4: forbid commit inconsistent quota data when errors=remount-ro drm/amd/display: Fix nanosec stat overflow SUNRPC: increase size of rpc_wait_queue.qlen from unsigned short to unsigned int Revert "ACPI: PM: Block ASUS B1400CEAE from suspend to idle by default" libperf evlist: Avoid out-of-bounds access block: prevent division by zero in blk_rq_stat_sum() RDMA/cm: add timeout to cm_destroy_id wait Input: allocate keycode for Display refresh rate toggle platform/x86: touchscreen_dmi: Add an extra entry for a variant of the Chuwi Vi8 tablet ktest: force $buildonly = 1 for 'make_warnings_file' test type ring-buffer: use READ_ONCE() to read cpu_buffer->commit_page in concurrent environment tools: iio: replace seekdir() in iio_generic_buffer usb: typec: tcpci: add generic tcpci fallback compatible usb: sl811-hcd: only defined function checkdone if QUIRK2 is defined fbdev: viafb: fix typo in hw_bitblt_1 and hw_bitblt_2 drivers/nvme: Add quirks for device 126f:2262 fbmon: prevent division by zero in fb_videomode_from_videomode() netfilter: nf_tables: release batch on table validation from abort path netfilter: nf_tables: release mutex after nft_gc_seq_end from abort path netfilter: nf_tables: discard table flag update with pending basechain deletion tty: n_gsm: require CAP_NET_ADMIN to attach N_GSM0710 ldisc virtio: reenable config if freezing device failed x86/mm/pat: fix VM_PAT handling in COW mappings drm/i915/gt: Reset queue_priority_hint on parking Bluetooth: btintel: Fixe build regression VMCI: Fix possible memcpy() run-time warning in vmci_datagram_invoke_guest_handler() kbuild: dummy-tools: adjust to stricter stackprotector check scsi: sd: Fix wrong zone_write_granularity value during revalidate x86/retpoline: Add NOENDBR annotation to the SRSO dummy return thunk x86/head/64: Re-enable stack protection Linux 5.10.215 Change-Id: I45a0a9c4a0683ff5ef97315690f1f884f666e1b5 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
9100d24dfd
@ -516,6 +516,7 @@ What: /sys/devices/system/cpu/vulnerabilities
|
||||
/sys/devices/system/cpu/vulnerabilities/mds
|
||||
/sys/devices/system/cpu/vulnerabilities/meltdown
|
||||
/sys/devices/system/cpu/vulnerabilities/mmio_stale_data
|
||||
/sys/devices/system/cpu/vulnerabilities/reg_file_data_sampling
|
||||
/sys/devices/system/cpu/vulnerabilities/retbleed
|
||||
/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
|
||||
/sys/devices/system/cpu/vulnerabilities/spectre_v1
|
||||
|
@ -18,3 +18,4 @@ are configurable at compile, boot or run time.
|
||||
processor_mmio_stale_data.rst
|
||||
gather_data_sampling.rst
|
||||
srso
|
||||
reg-file-data-sampling
|
||||
|
104
Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst
Normal file
104
Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst
Normal file
@ -0,0 +1,104 @@
|
||||
==================================
|
||||
Register File Data Sampling (RFDS)
|
||||
==================================
|
||||
|
||||
Register File Data Sampling (RFDS) is a microarchitectural vulnerability that
|
||||
only affects Intel Atom parts(also branded as E-cores). RFDS may allow
|
||||
a malicious actor to infer data values previously used in floating point
|
||||
registers, vector registers, or integer registers. RFDS does not provide the
|
||||
ability to choose which data is inferred. CVE-2023-28746 is assigned to RFDS.
|
||||
|
||||
Affected Processors
|
||||
===================
|
||||
Below is the list of affected Intel processors [#f1]_:
|
||||
|
||||
=================== ============
|
||||
Common name Family_Model
|
||||
=================== ============
|
||||
ATOM_GOLDMONT 06_5CH
|
||||
ATOM_GOLDMONT_D 06_5FH
|
||||
ATOM_GOLDMONT_PLUS 06_7AH
|
||||
ATOM_TREMONT_D 06_86H
|
||||
ATOM_TREMONT 06_96H
|
||||
ALDERLAKE 06_97H
|
||||
ALDERLAKE_L 06_9AH
|
||||
ATOM_TREMONT_L 06_9CH
|
||||
RAPTORLAKE 06_B7H
|
||||
RAPTORLAKE_P 06_BAH
|
||||
ALDERLAKE_N 06_BEH
|
||||
RAPTORLAKE_S 06_BFH
|
||||
=================== ============
|
||||
|
||||
As an exception to this table, Intel Xeon E family parts ALDERLAKE(06_97H) and
|
||||
RAPTORLAKE(06_B7H) codenamed Catlow are not affected. They are reported as
|
||||
vulnerable in Linux because they share the same family/model with an affected
|
||||
part. Unlike their affected counterparts, they do not enumerate RFDS_CLEAR or
|
||||
CPUID.HYBRID. This information could be used to distinguish between the
|
||||
affected and unaffected parts, but it is deemed not worth adding complexity as
|
||||
the reporting is fixed automatically when these parts enumerate RFDS_NO.
|
||||
|
||||
Mitigation
|
||||
==========
|
||||
Intel released a microcode update that enables software to clear sensitive
|
||||
information using the VERW instruction. Like MDS, RFDS deploys the same
|
||||
mitigation strategy to force the CPU to clear the affected buffers before an
|
||||
attacker can extract the secrets. This is achieved by using the otherwise
|
||||
unused and obsolete VERW instruction in combination with a microcode update.
|
||||
The microcode clears the affected CPU buffers when the VERW instruction is
|
||||
executed.
|
||||
|
||||
Mitigation points
|
||||
-----------------
|
||||
VERW is executed by the kernel before returning to user space, and by KVM
|
||||
before VMentry. None of the affected cores support SMT, so VERW is not required
|
||||
at C-state transitions.
|
||||
|
||||
New bits in IA32_ARCH_CAPABILITIES
|
||||
----------------------------------
|
||||
Newer processors and microcode update on existing affected processors added new
|
||||
bits to IA32_ARCH_CAPABILITIES MSR. These bits can be used to enumerate
|
||||
vulnerability and mitigation capability:
|
||||
|
||||
- Bit 27 - RFDS_NO - When set, processor is not affected by RFDS.
|
||||
- Bit 28 - RFDS_CLEAR - When set, processor is affected by RFDS, and has the
|
||||
microcode that clears the affected buffers on VERW execution.
|
||||
|
||||
Mitigation control on the kernel command line
|
||||
---------------------------------------------
|
||||
The kernel command line allows to control RFDS mitigation at boot time with the
|
||||
parameter "reg_file_data_sampling=". The valid arguments are:
|
||||
|
||||
========== =================================================================
|
||||
on If the CPU is vulnerable, enable mitigation; CPU buffer clearing
|
||||
on exit to userspace and before entering a VM.
|
||||
off Disables mitigation.
|
||||
========== =================================================================
|
||||
|
||||
Mitigation default is selected by CONFIG_MITIGATION_RFDS.
|
||||
|
||||
Mitigation status information
|
||||
-----------------------------
|
||||
The Linux kernel provides a sysfs interface to enumerate the current
|
||||
vulnerability status of the system: whether the system is vulnerable, and
|
||||
which mitigations are active. The relevant sysfs file is:
|
||||
|
||||
/sys/devices/system/cpu/vulnerabilities/reg_file_data_sampling
|
||||
|
||||
The possible values in this file are:
|
||||
|
||||
.. list-table::
|
||||
|
||||
* - 'Not affected'
|
||||
- The processor is not vulnerable
|
||||
* - 'Vulnerable'
|
||||
- The processor is vulnerable, but no mitigation enabled
|
||||
* - 'Vulnerable: No microcode'
|
||||
- The processor is vulnerable but microcode is not updated.
|
||||
* - 'Mitigation: Clear Register File'
|
||||
- The processor is vulnerable and the CPU buffer clearing mitigation is
|
||||
enabled.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [#f1] Affected Processors
|
||||
https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html
|
@ -484,11 +484,14 @@ Spectre variant 2
|
||||
|
||||
Systems which support enhanced IBRS (eIBRS) enable IBRS protection once at
|
||||
boot, by setting the IBRS bit, and they're automatically protected against
|
||||
Spectre v2 variant attacks, including cross-thread branch target injections
|
||||
on SMT systems (STIBP). In other words, eIBRS enables STIBP too.
|
||||
Spectre v2 variant attacks.
|
||||
|
||||
Legacy IBRS systems clear the IBRS bit on exit to userspace and
|
||||
therefore explicitly enable STIBP for that
|
||||
On Intel's enhanced IBRS systems, this includes cross-thread branch target
|
||||
injections on SMT systems (STIBP). In other words, Intel eIBRS enables
|
||||
STIBP, too.
|
||||
|
||||
AMD Automatic IBRS does not protect userspace, and Legacy IBRS systems clear
|
||||
the IBRS bit on exit to userspace, therefore both explicitly enable STIBP.
|
||||
|
||||
The retpoline mitigation is turned on by default on vulnerable
|
||||
CPUs. It can be forced on or off by the administrator
|
||||
@ -622,9 +625,10 @@ kernel command line.
|
||||
retpoline,generic Retpolines
|
||||
retpoline,lfence LFENCE; indirect branch
|
||||
retpoline,amd alias for retpoline,lfence
|
||||
eibrs enhanced IBRS
|
||||
eibrs,retpoline enhanced IBRS + Retpolines
|
||||
eibrs,lfence enhanced IBRS + LFENCE
|
||||
eibrs Enhanced/Auto IBRS
|
||||
eibrs,retpoline Enhanced/Auto IBRS + Retpolines
|
||||
eibrs,lfence Enhanced/Auto IBRS + LFENCE
|
||||
ibrs use IBRS to protect kernel
|
||||
|
||||
Not specifying this option is equivalent to
|
||||
spectre_v2=auto.
|
||||
|
@ -1025,6 +1025,26 @@
|
||||
The filter can be disabled or changed to another
|
||||
driver later using sysfs.
|
||||
|
||||
reg_file_data_sampling=
|
||||
[X86] Controls mitigation for Register File Data
|
||||
Sampling (RFDS) vulnerability. RFDS is a CPU
|
||||
vulnerability which may allow userspace to infer
|
||||
kernel data values previously stored in floating point
|
||||
registers, vector registers, or integer registers.
|
||||
RFDS only affects Intel Atom processors.
|
||||
|
||||
on: Turns ON the mitigation.
|
||||
off: Turns OFF the mitigation.
|
||||
|
||||
This parameter overrides the compile time default set
|
||||
by CONFIG_MITIGATION_RFDS. Mitigation cannot be
|
||||
disabled when other VERW based mitigations (like MDS)
|
||||
are enabled. In order to disable RFDS mitigation all
|
||||
VERW based mitigations need to be disabled.
|
||||
|
||||
For details see:
|
||||
Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst
|
||||
|
||||
driver_async_probe= [KNL]
|
||||
List of driver names to be probed asynchronously.
|
||||
Format: <driver_name1>,<driver_name2>...
|
||||
@ -2981,6 +3001,7 @@
|
||||
nopti [X86,PPC]
|
||||
nospectre_v1 [X86,PPC]
|
||||
nospectre_v2 [X86,PPC,S390,ARM64]
|
||||
reg_file_data_sampling=off [X86]
|
||||
retbleed=off [X86]
|
||||
spec_store_bypass_disable=off [X86,PPC]
|
||||
spectre_v2_user=off [X86]
|
||||
@ -5193,9 +5214,9 @@
|
||||
retpoline,generic - Retpolines
|
||||
retpoline,lfence - LFENCE; indirect branch
|
||||
retpoline,amd - alias for retpoline,lfence
|
||||
eibrs - enhanced IBRS
|
||||
eibrs,retpoline - enhanced IBRS + Retpolines
|
||||
eibrs,lfence - enhanced IBRS + LFENCE
|
||||
eibrs - Enhanced/Auto IBRS
|
||||
eibrs,retpoline - Enhanced/Auto IBRS + Retpolines
|
||||
eibrs,lfence - Enhanced/Auto IBRS + LFENCE
|
||||
ibrs - use IBRS to protect kernel
|
||||
|
||||
Not specifying this option is equivalent to
|
||||
|
@ -273,4 +273,11 @@ devices are described in the ZBC (Zoned Block Commands) and ZAC
|
||||
do not support zone commands, they will be treated as regular block devices
|
||||
and zoned will report "none".
|
||||
|
||||
zone_write_granularity (RO)
|
||||
---------------------------
|
||||
This indicates the alignment constraint, in bytes, for write operations in
|
||||
sequential zones of zoned block devices (devices with a zoned attributed
|
||||
that reports "host-managed" or "host-aware"). This value is always 0 for
|
||||
regular block devices.
|
||||
|
||||
Jens Axboe <jens.axboe@oracle.com>, February 2009
|
||||
|
@ -95,6 +95,9 @@ The kernel provides a function to invoke the buffer clearing:
|
||||
|
||||
mds_clear_cpu_buffers()
|
||||
|
||||
Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path.
|
||||
Other than CFLAGS.ZF, this macro doesn't clobber any registers.
|
||||
|
||||
The mitigation is invoked on kernel/userspace, hypervisor/guest and C-state
|
||||
(idle) transitions.
|
||||
|
||||
@ -138,17 +141,30 @@ Mitigation points
|
||||
|
||||
When transitioning from kernel to user space the CPU buffers are flushed
|
||||
on affected CPUs when the mitigation is not disabled on the kernel
|
||||
command line. The migitation is enabled through the static key
|
||||
mds_user_clear.
|
||||
command line. The mitigation is enabled through the feature flag
|
||||
X86_FEATURE_CLEAR_CPU_BUF.
|
||||
|
||||
The mitigation is invoked in prepare_exit_to_usermode() which covers
|
||||
all but one of the kernel to user space transitions. The exception
|
||||
is when we return from a Non Maskable Interrupt (NMI), which is
|
||||
handled directly in do_nmi().
|
||||
The mitigation is invoked just before transitioning to userspace after
|
||||
user registers are restored. This is done to minimize the window in
|
||||
which kernel data could be accessed after VERW e.g. via an NMI after
|
||||
VERW.
|
||||
|
||||
(The reason that NMI is special is that prepare_exit_to_usermode() can
|
||||
enable IRQs. In NMI context, NMIs are blocked, and we don't want to
|
||||
enable IRQs with NMIs blocked.)
|
||||
**Corner case not handled**
|
||||
Interrupts returning to kernel don't clear CPUs buffers since the
|
||||
exit-to-user path is expected to do that anyways. But, there could be
|
||||
a case when an NMI is generated in kernel after the exit-to-user path
|
||||
has cleared the buffers. This case is not handled and NMI returning to
|
||||
kernel don't clear CPU buffers because:
|
||||
|
||||
1. It is rare to get an NMI after VERW, but before returning to userspace.
|
||||
2. For an unprivileged user, there is no known way to make that NMI
|
||||
less rare or target it.
|
||||
3. It would take a large number of these precisely-timed NMIs to mount
|
||||
an actual attack. There's presumably not enough bandwidth.
|
||||
4. The NMI in question occurs after a VERW, i.e. when user state is
|
||||
restored and most interesting data is already scrubbed. Whats left
|
||||
is only the data that NMI touches, and that may or may not be of
|
||||
any interest.
|
||||
|
||||
|
||||
2. C-State transition
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 214
|
||||
SUBLEVEL = 215
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
|
@ -28,7 +28,7 @@ &uart3 {
|
||||
&twsi1 {
|
||||
status = "okay";
|
||||
pmic: max8925@3c {
|
||||
compatible = "maxium,max8925";
|
||||
compatible = "maxim,max8925";
|
||||
reg = <0x3c>;
|
||||
interrupts = <1>;
|
||||
interrupt-parent = <&intcmux4>;
|
||||
|
@ -810,7 +810,8 @@ bluetooth: bluetooth {
|
||||
vddrf-supply = <&pp1300_l2c>;
|
||||
vddch0-supply = <&pp3300_l10c>;
|
||||
max-speed = <3200000>;
|
||||
clocks = <&rpmhcc RPMH_RF_CLK2>;
|
||||
|
||||
qcom,local-bd-address-broken;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -732,11 +732,20 @@ hdmi: hdmi@ff3c0000 {
|
||||
status = "disabled";
|
||||
|
||||
ports {
|
||||
hdmi_in: port {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
hdmi_in: port@0 {
|
||||
reg = <0>;
|
||||
|
||||
hdmi_in_vop: endpoint {
|
||||
remote-endpoint = <&vop_out_hdmi>;
|
||||
};
|
||||
};
|
||||
|
||||
hdmi_out: port@1 {
|
||||
reg = <1>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -1769,6 +1769,7 @@ simple-audio-card,codec {
|
||||
hdmi: hdmi@ff940000 {
|
||||
compatible = "rockchip,rk3399-dw-hdmi";
|
||||
reg = <0x0 0xff940000 0x0 0x20000>;
|
||||
reg-io-width = <4>;
|
||||
interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH 0>;
|
||||
clocks = <&cru PCLK_HDMI_CTRL>,
|
||||
<&cru SCLK_HDMI_SFR>,
|
||||
@ -1777,13 +1778,16 @@ hdmi: hdmi@ff940000 {
|
||||
<&cru PLL_VPLL>;
|
||||
clock-names = "iahb", "isfr", "cec", "grf", "vpll";
|
||||
power-domains = <&power RK3399_PD_HDCP>;
|
||||
reg-io-width = <4>;
|
||||
rockchip,grf = <&grf>;
|
||||
#sound-dai-cells = <0>;
|
||||
status = "disabled";
|
||||
|
||||
ports {
|
||||
hdmi_in: port {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
hdmi_in: port@0 {
|
||||
reg = <0>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
@ -1796,6 +1800,10 @@ hdmi_in_vopl: endpoint@1 {
|
||||
remote-endpoint = <&vopl_out_hdmi>;
|
||||
};
|
||||
};
|
||||
|
||||
hdmi_out: port@1 {
|
||||
reg = <1>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -64,6 +64,7 @@ SECTIONS
|
||||
STABS_DEBUG
|
||||
DWARF_DEBUG
|
||||
ELF_DETAILS
|
||||
.hexagon.attributes 0 : { *(.hexagon.attributes) }
|
||||
|
||||
DISCARDS
|
||||
}
|
||||
|
@ -83,26 +83,28 @@
|
||||
* version takes two arguments: a src and destination register.
|
||||
* However, the source and destination registers can not be
|
||||
* the same register.
|
||||
*
|
||||
* We use add,l to avoid clobbering the C/B bits in the PSW.
|
||||
*/
|
||||
|
||||
.macro tophys grvirt, grphys
|
||||
ldil L%(__PAGE_OFFSET), \grphys
|
||||
sub \grvirt, \grphys, \grphys
|
||||
ldil L%(-__PAGE_OFFSET), \grphys
|
||||
addl \grvirt, \grphys, \grphys
|
||||
.endm
|
||||
|
||||
.macro tovirt grphys, grvirt
|
||||
ldil L%(__PAGE_OFFSET), \grvirt
|
||||
add \grphys, \grvirt, \grvirt
|
||||
addl \grphys, \grvirt, \grvirt
|
||||
.endm
|
||||
|
||||
.macro tophys_r1 gr
|
||||
ldil L%(__PAGE_OFFSET), %r1
|
||||
sub \gr, %r1, \gr
|
||||
ldil L%(-__PAGE_OFFSET), %r1
|
||||
addl \gr, %r1, \gr
|
||||
.endm
|
||||
|
||||
.macro tovirt_r1 gr
|
||||
ldil L%(__PAGE_OFFSET), %r1
|
||||
add \gr, %r1, \gr
|
||||
addl \gr, %r1, \gr
|
||||
.endm
|
||||
|
||||
.macro delay value
|
||||
|
@ -40,7 +40,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
|
||||
" addc %0, %5, %0\n"
|
||||
" addc %0, %3, %0\n"
|
||||
"1: ldws,ma 4(%1), %3\n"
|
||||
" addib,< 0, %2, 1b\n"
|
||||
" addib,> -1, %2, 1b\n"
|
||||
" addc %0, %3, %0\n"
|
||||
"\n"
|
||||
" extru %0, 31, 16, %4\n"
|
||||
@ -126,6 +126,7 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
|
||||
** Try to keep 4 registers with "live" values ahead of the ALU.
|
||||
*/
|
||||
|
||||
" depdi 0, 31, 32, %0\n"/* clear upper half of incoming checksum */
|
||||
" ldd,ma 8(%1), %4\n" /* get 1st saddr word */
|
||||
" ldd,ma 8(%2), %5\n" /* get 1st daddr word */
|
||||
" add %4, %0, %0\n"
|
||||
@ -137,8 +138,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
|
||||
" add,dc %3, %0, %0\n" /* fold in proto+len | carry bit */
|
||||
" extrd,u %0, 31, 32, %4\n"/* copy upper half down */
|
||||
" depdi 0, 31, 32, %0\n"/* clear upper half */
|
||||
" add %4, %0, %0\n" /* fold into 32-bits */
|
||||
" addc 0, %0, %0\n" /* add carry */
|
||||
" add,dc %4, %0, %0\n" /* fold into 32-bits, plus carry */
|
||||
" addc 0, %0, %0\n" /* add final carry */
|
||||
|
||||
#else
|
||||
|
||||
@ -163,7 +164,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
|
||||
" ldw,ma 4(%2), %7\n" /* 4th daddr */
|
||||
" addc %6, %0, %0\n"
|
||||
" addc %7, %0, %0\n"
|
||||
" addc %3, %0, %0\n" /* fold in proto+len, catch carry */
|
||||
" addc %3, %0, %0\n" /* fold in proto+len */
|
||||
" addc 0, %0, %0\n" /* add carry */
|
||||
|
||||
#endif
|
||||
: "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len),
|
||||
|
@ -12,9 +12,16 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
/* Performance Monitor Registers */
|
||||
#define mfpmr(rn) ({unsigned int rval; \
|
||||
asm volatile("mfpmr %0," __stringify(rn) \
|
||||
asm volatile(".machine push; " \
|
||||
".machine e300; " \
|
||||
"mfpmr %0," __stringify(rn) ";" \
|
||||
".machine pop; " \
|
||||
: "=r" (rval)); rval;})
|
||||
#define mtpmr(rn, v) asm volatile("mtpmr " __stringify(rn) ",%0" : : "r" (v))
|
||||
#define mtpmr(rn, v) asm volatile(".machine push; " \
|
||||
".machine e300; " \
|
||||
"mtpmr " __stringify(rn) ",%0; " \
|
||||
".machine pop; " \
|
||||
: : "r" (v))
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
/* Freescale Book E Performance Monitor APU Registers */
|
||||
|
@ -67,6 +67,6 @@ obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
|
||||
obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o
|
||||
|
||||
obj-$(CONFIG_ALTIVEC) += xor_vmx.o xor_vmx_glue.o
|
||||
CFLAGS_xor_vmx.o += -maltivec $(call cc-option,-mabi=altivec)
|
||||
CFLAGS_xor_vmx.o += -mhard-float -maltivec $(call cc-option,-mabi=altivec)
|
||||
|
||||
obj-$(CONFIG_PPC64) += $(obj64-y)
|
||||
|
@ -468,7 +468,7 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
|
||||
|
||||
#define __get_kernel_nofault(dst, src, type, err_label) \
|
||||
do { \
|
||||
long __kr_err; \
|
||||
long __kr_err = 0; \
|
||||
\
|
||||
__get_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err); \
|
||||
if (unlikely(__kr_err)) \
|
||||
@ -477,7 +477,7 @@ do { \
|
||||
|
||||
#define __put_kernel_nofault(dst, src, type, err_label) \
|
||||
do { \
|
||||
long __kr_err; \
|
||||
long __kr_err = 0; \
|
||||
\
|
||||
__put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err); \
|
||||
if (unlikely(__kr_err)) \
|
||||
|
@ -1298,6 +1298,7 @@ ENDPROC(stack_overflow)
|
||||
|
||||
#endif
|
||||
.section .rodata, "a"
|
||||
.balign 8
|
||||
#define SYSCALL(esame,emu) .quad __s390x_ ## esame
|
||||
.globl sys_call_table
|
||||
sys_call_table:
|
||||
|
@ -274,7 +274,7 @@ static int __init setup_nmi_watchdog(char *str)
|
||||
if (!strncmp(str, "panic", 5))
|
||||
panic_on_timeout = 1;
|
||||
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
__setup("nmi_watchdog=", setup_nmi_watchdog);
|
||||
|
||||
|
@ -449,9 +449,8 @@ static __init int vdso_setup(char *s)
|
||||
unsigned long val;
|
||||
|
||||
err = kstrtoul(s, 10, &val);
|
||||
if (err)
|
||||
return err;
|
||||
if (!err)
|
||||
vdso_enabled = val;
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
__setup("vdso=", vdso_setup);
|
||||
|
@ -359,10 +359,6 @@ config X86_64_SMP
|
||||
def_bool y
|
||||
depends on X86_64 && SMP
|
||||
|
||||
config X86_32_LAZY_GS
|
||||
def_bool y
|
||||
depends on X86_32 && !STACKPROTECTOR
|
||||
|
||||
config ARCH_SUPPORTS_UPROBES
|
||||
def_bool y
|
||||
|
||||
@ -385,7 +381,8 @@ config CC_HAS_SANE_STACKPROTECTOR
|
||||
default $(success,$(srctree)/scripts/gcc-x86_32-has-stack-protector.sh $(CC))
|
||||
help
|
||||
We have to make sure stack protector is unconditionally disabled if
|
||||
the compiler produces broken code.
|
||||
the compiler produces broken code or if it does not let us control
|
||||
the segment on 32-bit kernels.
|
||||
|
||||
menu "Processor type and features"
|
||||
|
||||
@ -2518,6 +2515,17 @@ config GDS_FORCE_MITIGATION
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
config MITIGATION_RFDS
|
||||
bool "RFDS Mitigation"
|
||||
depends on CPU_SUP_INTEL
|
||||
default y
|
||||
help
|
||||
Enable mitigation for Register File Data Sampling (RFDS) by default.
|
||||
RFDS is a hardware vulnerability which affects Intel Atom CPUs. It
|
||||
allows unprivileged speculative access to stale data previously
|
||||
stored in floating point, vector and integer registers.
|
||||
See also <file:Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst>
|
||||
|
||||
endif
|
||||
|
||||
config ARCH_HAS_ADD_PAGES
|
||||
|
@ -87,6 +87,14 @@ ifeq ($(CONFIG_X86_32),y)
|
||||
|
||||
# temporary until string.h is fixed
|
||||
KBUILD_CFLAGS += -ffreestanding
|
||||
|
||||
ifeq ($(CONFIG_STACKPROTECTOR),y)
|
||||
ifeq ($(CONFIG_SMP),y)
|
||||
KBUILD_CFLAGS += -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard
|
||||
else
|
||||
KBUILD_CFLAGS += -mstack-protector-guard=global
|
||||
endif
|
||||
endif
|
||||
else
|
||||
BITS := 64
|
||||
UTS_MACHINE := x86_64
|
||||
|
@ -6,6 +6,9 @@
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/export.h>
|
||||
#include <asm/msr-index.h>
|
||||
#include <asm/unwind_hints.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/cache.h>
|
||||
|
||||
.pushsection .noinstr.text, "ax"
|
||||
|
||||
@ -20,3 +23,23 @@ SYM_FUNC_END(entry_ibpb)
|
||||
EXPORT_SYMBOL_GPL(entry_ibpb);
|
||||
|
||||
.popsection
|
||||
|
||||
/*
|
||||
* Define the VERW operand that is disguised as entry code so that
|
||||
* it can be referenced with KPTI enabled. This ensure VERW can be
|
||||
* used late in exit-to-user path after page tables are switched.
|
||||
*/
|
||||
.pushsection .entry.text, "ax"
|
||||
|
||||
.align L1_CACHE_BYTES, 0xcc
|
||||
SYM_CODE_START_NOALIGN(mds_verw_sel)
|
||||
UNWIND_HINT_EMPTY
|
||||
ANNOTATE_NOENDBR
|
||||
.word __KERNEL_DS
|
||||
.align L1_CACHE_BYTES, 0xcc
|
||||
SYM_CODE_END(mds_verw_sel);
|
||||
/* For KVM */
|
||||
EXPORT_SYMBOL_GPL(mds_verw_sel);
|
||||
|
||||
.popsection
|
||||
|
||||
|
@ -20,7 +20,7 @@
|
||||
* 1C(%esp) - %ds
|
||||
* 20(%esp) - %es
|
||||
* 24(%esp) - %fs
|
||||
* 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
|
||||
* 28(%esp) - unused -- was %gs on old stackprotector kernels
|
||||
* 2C(%esp) - orig_eax
|
||||
* 30(%esp) - %eip
|
||||
* 34(%esp) - %cs
|
||||
@ -56,14 +56,9 @@
|
||||
/*
|
||||
* User gs save/restore
|
||||
*
|
||||
* %gs is used for userland TLS and kernel only uses it for stack
|
||||
* canary which is required to be at %gs:20 by gcc. Read the comment
|
||||
* at the top of stackprotector.h for more info.
|
||||
*
|
||||
* Local labels 98 and 99 are used.
|
||||
* This is leftover junk from CONFIG_X86_32_LAZY_GS. A subsequent patch
|
||||
* will remove it entirely.
|
||||
*/
|
||||
#ifdef CONFIG_X86_32_LAZY_GS
|
||||
|
||||
/* unfortunately push/pop can't be no-op */
|
||||
.macro PUSH_GS
|
||||
pushl $0
|
||||
@ -86,49 +81,6 @@
|
||||
.macro SET_KERNEL_GS reg
|
||||
.endm
|
||||
|
||||
#else /* CONFIG_X86_32_LAZY_GS */
|
||||
|
||||
.macro PUSH_GS
|
||||
pushl %gs
|
||||
.endm
|
||||
|
||||
.macro POP_GS pop=0
|
||||
98: popl %gs
|
||||
.if \pop <> 0
|
||||
add $\pop, %esp
|
||||
.endif
|
||||
.endm
|
||||
.macro POP_GS_EX
|
||||
.pushsection .fixup, "ax"
|
||||
99: movl $0, (%esp)
|
||||
jmp 98b
|
||||
.popsection
|
||||
_ASM_EXTABLE(98b, 99b)
|
||||
.endm
|
||||
|
||||
.macro PTGS_TO_GS
|
||||
98: mov PT_GS(%esp), %gs
|
||||
.endm
|
||||
.macro PTGS_TO_GS_EX
|
||||
.pushsection .fixup, "ax"
|
||||
99: movl $0, PT_GS(%esp)
|
||||
jmp 98b
|
||||
.popsection
|
||||
_ASM_EXTABLE(98b, 99b)
|
||||
.endm
|
||||
|
||||
.macro GS_TO_REG reg
|
||||
movl %gs, \reg
|
||||
.endm
|
||||
.macro REG_TO_PTGS reg
|
||||
movl \reg, PT_GS(%esp)
|
||||
.endm
|
||||
.macro SET_KERNEL_GS reg
|
||||
movl $(__KERNEL_STACK_CANARY), \reg
|
||||
movl \reg, %gs
|
||||
.endm
|
||||
|
||||
#endif /* CONFIG_X86_32_LAZY_GS */
|
||||
|
||||
/* Unconditionally switch to user cr3 */
|
||||
.macro SWITCH_TO_USER_CR3 scratch_reg:req
|
||||
@ -779,7 +731,7 @@ SYM_CODE_START(__switch_to_asm)
|
||||
|
||||
#ifdef CONFIG_STACKPROTECTOR
|
||||
movl TASK_stack_canary(%edx), %ebx
|
||||
movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
|
||||
movl %ebx, PER_CPU_VAR(__stack_chk_guard)
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -997,6 +949,7 @@ SYM_FUNC_START(entry_SYSENTER_32)
|
||||
BUG_IF_WRONG_CR3 no_user_check=1
|
||||
popfl
|
||||
popl %eax
|
||||
CLEAR_CPU_BUFFERS
|
||||
|
||||
/*
|
||||
* Return back to the vDSO, which will pop ecx and edx.
|
||||
@ -1069,6 +1022,7 @@ restore_all_switch_stack:
|
||||
|
||||
/* Restore user state */
|
||||
RESTORE_REGS pop=4 # skip orig_eax/error_code
|
||||
CLEAR_CPU_BUFFERS
|
||||
.Lirq_return:
|
||||
/*
|
||||
* ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
|
||||
@ -1267,6 +1221,7 @@ SYM_CODE_START(asm_exc_nmi)
|
||||
|
||||
/* Not on SYSENTER stack. */
|
||||
call exc_nmi
|
||||
CLEAR_CPU_BUFFERS
|
||||
jmp .Lnmi_return
|
||||
|
||||
.Lnmi_from_sysenter_stack:
|
||||
|
@ -615,6 +615,7 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
|
||||
/* Restore RDI. */
|
||||
popq %rdi
|
||||
SWAPGS
|
||||
CLEAR_CPU_BUFFERS
|
||||
INTERRUPT_RETURN
|
||||
|
||||
|
||||
@ -721,6 +722,8 @@ native_irq_return_ldt:
|
||||
*/
|
||||
popq %rax /* Restore user RAX */
|
||||
|
||||
CLEAR_CPU_BUFFERS
|
||||
|
||||
/*
|
||||
* RSP now points to an ordinary IRET frame, except that the page
|
||||
* is read-only and RSP[31:16] are preloaded with the userspace
|
||||
@ -1487,6 +1490,12 @@ nmi_restore:
|
||||
std
|
||||
movq $0, 5*8(%rsp) /* clear "NMI executing" */
|
||||
|
||||
/*
|
||||
* Skip CLEAR_CPU_BUFFERS here, since it only helps in rare cases like
|
||||
* NMI in kernel after user state is restored. For an unprivileged user
|
||||
* these conditions are hard to meet.
|
||||
*/
|
||||
|
||||
/*
|
||||
* iretq reads the "iret" frame and exits the NMI stack in a
|
||||
* single instruction. We are returning to kernel mode, so this
|
||||
@ -1504,6 +1513,7 @@ SYM_CODE_END(asm_exc_nmi)
|
||||
SYM_CODE_START(ignore_sysret)
|
||||
UNWIND_HINT_EMPTY
|
||||
mov $-ENOSYS, %eax
|
||||
CLEAR_CPU_BUFFERS
|
||||
sysretl
|
||||
SYM_CODE_END(ignore_sysret)
|
||||
#endif
|
||||
|
@ -319,6 +319,7 @@ sysret32_from_system_call:
|
||||
xorl %r9d, %r9d
|
||||
xorl %r10d, %r10d
|
||||
swapgs
|
||||
CLEAR_CPU_BUFFERS
|
||||
sysretl
|
||||
SYM_CODE_END(entry_SYSCALL_compat)
|
||||
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <asm/special_insns.h>
|
||||
#include <asm/preempt.h>
|
||||
#include <asm/asm.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
#ifndef CONFIG_X86_CMPXCHG64
|
||||
extern void cmpxchg8b_emu(void);
|
||||
|
@ -6,12 +6,14 @@
|
||||
# define __ASM_FORM(x) x
|
||||
# define __ASM_FORM_RAW(x) x
|
||||
# define __ASM_FORM_COMMA(x) x,
|
||||
# define __ASM_REGPFX %
|
||||
#else
|
||||
#include <linux/stringify.h>
|
||||
|
||||
# define __ASM_FORM(x) " " __stringify(x) " "
|
||||
# define __ASM_FORM_RAW(x) __stringify(x)
|
||||
# define __ASM_FORM_COMMA(x) " " __stringify(x) ","
|
||||
# define __ASM_REGPFX %%
|
||||
#endif
|
||||
|
||||
#ifndef __x86_64__
|
||||
@ -48,6 +50,9 @@
|
||||
#define _ASM_SI __ASM_REG(si)
|
||||
#define _ASM_DI __ASM_REG(di)
|
||||
|
||||
/* Adds a (%rip) suffix on 64 bits only; for immediate memory references */
|
||||
#define _ASM_RIP(x) __ASM_SEL_RAW(x, x (__ASM_REGPFX rip))
|
||||
|
||||
#ifndef __x86_64__
|
||||
/* 32 bit */
|
||||
|
||||
|
@ -33,6 +33,8 @@ enum cpuid_leafs
|
||||
CPUID_7_EDX,
|
||||
CPUID_8000_001F_EAX,
|
||||
CPUID_8000_0021_EAX,
|
||||
CPUID_LNX_5,
|
||||
NR_CPUID_WORDS,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_X86_FEATURE_NAMES
|
||||
@ -93,8 +95,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
|
||||
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \
|
||||
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 19, feature_bit) || \
|
||||
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 20, feature_bit) || \
|
||||
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 21, feature_bit) || \
|
||||
REQUIRED_MASK_CHECK || \
|
||||
BUILD_BUG_ON_ZERO(NCAPINTS != 21))
|
||||
BUILD_BUG_ON_ZERO(NCAPINTS != 22))
|
||||
|
||||
#define DISABLED_MASK_BIT_SET(feature_bit) \
|
||||
( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \
|
||||
@ -118,8 +121,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
|
||||
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \
|
||||
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 19, feature_bit) || \
|
||||
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 20, feature_bit) || \
|
||||
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 21, feature_bit) || \
|
||||
DISABLED_MASK_CHECK || \
|
||||
BUILD_BUG_ON_ZERO(NCAPINTS != 21))
|
||||
BUILD_BUG_ON_ZERO(NCAPINTS != 22))
|
||||
|
||||
#define cpu_has(c, bit) \
|
||||
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
|
||||
|
@ -13,7 +13,7 @@
|
||||
/*
|
||||
* Defines x86 CPU feature bits
|
||||
*/
|
||||
#define NCAPINTS 21 /* N 32-bit words worth of info */
|
||||
#define NCAPINTS 22 /* N 32-bit words worth of info */
|
||||
#define NBUGINTS 2 /* N 32-bit bug flags */
|
||||
|
||||
/*
|
||||
@ -300,6 +300,7 @@
|
||||
#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */
|
||||
#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */
|
||||
#define X86_FEATURE_MSR_TSX_CTRL (11*32+18) /* "" MSR IA32_TSX_CTRL (Intel) implemented */
|
||||
#define X86_FEATURE_CLEAR_CPU_BUF (11*32+19) /* "" Clear CPU buffers using VERW */
|
||||
|
||||
#define X86_FEATURE_SRSO (11*32+24) /* "" AMD BTB untrain RETs */
|
||||
#define X86_FEATURE_SRSO_ALIAS (11*32+25) /* "" AMD BTB untrain RETs through aliasing */
|
||||
@ -403,6 +404,7 @@
|
||||
#define X86_FEATURE_SEV_ES (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */
|
||||
#define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */
|
||||
|
||||
#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */
|
||||
#define X86_FEATURE_SBPB (20*32+27) /* "" Selective Branch Prediction Barrier */
|
||||
#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */
|
||||
#define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */
|
||||
@ -452,4 +454,5 @@
|
||||
/* BUG word 2 */
|
||||
#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */
|
||||
#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */
|
||||
#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */
|
||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||
|
@ -103,6 +103,7 @@
|
||||
#define DISABLED_MASK18 0
|
||||
#define DISABLED_MASK19 0
|
||||
#define DISABLED_MASK20 0
|
||||
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
|
||||
#define DISABLED_MASK21 0
|
||||
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
|
||||
|
||||
#endif /* _ASM_X86_DISABLED_FEATURES_H */
|
||||
|
@ -77,7 +77,6 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
|
||||
|
||||
static __always_inline void arch_exit_to_user_mode(void)
|
||||
{
|
||||
mds_user_clear_cpu_buffers();
|
||||
amd_clear_divider();
|
||||
}
|
||||
#define arch_exit_to_user_mode arch_exit_to_user_mode
|
||||
|
@ -134,6 +134,7 @@ static __always_inline unsigned long arch_local_irq_save(void)
|
||||
#define INTERRUPT_RETURN jmp native_iret
|
||||
#define USERGS_SYSRET64 \
|
||||
swapgs; \
|
||||
CLEAR_CPU_BUFFERS; \
|
||||
sysretq;
|
||||
#define USERGS_SYSRET32 \
|
||||
swapgs; \
|
||||
|
@ -30,6 +30,7 @@
|
||||
#define _EFER_SVME 12 /* Enable virtualization */
|
||||
#define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */
|
||||
#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */
|
||||
#define _EFER_AUTOIBRS 21 /* Enable Automatic IBRS */
|
||||
|
||||
#define EFER_SCE (1<<_EFER_SCE)
|
||||
#define EFER_LME (1<<_EFER_LME)
|
||||
@ -38,6 +39,7 @@
|
||||
#define EFER_SVME (1<<_EFER_SVME)
|
||||
#define EFER_LMSLE (1<<_EFER_LMSLE)
|
||||
#define EFER_FFXSR (1<<_EFER_FFXSR)
|
||||
#define EFER_AUTOIBRS (1<<_EFER_AUTOIBRS)
|
||||
|
||||
/* Intel MSRs. Some also available on other CPUs */
|
||||
|
||||
@ -166,6 +168,14 @@
|
||||
* CPU is not vulnerable to Gather
|
||||
* Data Sampling (GDS).
|
||||
*/
|
||||
#define ARCH_CAP_RFDS_NO BIT(27) /*
|
||||
* Not susceptible to Register
|
||||
* File Data Sampling.
|
||||
*/
|
||||
#define ARCH_CAP_RFDS_CLEAR BIT(28) /*
|
||||
* VERW clears CPU Register
|
||||
* File.
|
||||
*/
|
||||
|
||||
#define MSR_IA32_FLUSH_CMD 0x0000010b
|
||||
#define L1D_FLUSH BIT(0) /*
|
||||
|
@ -155,11 +155,20 @@
|
||||
.Lskip_rsb_\@:
|
||||
.endm
|
||||
|
||||
/*
|
||||
* The CALL to srso_alias_untrain_ret() must be patched in directly at
|
||||
* the spot where untraining must be done, ie., srso_alias_untrain_ret()
|
||||
* must be the target of a CALL instruction instead of indirectly
|
||||
* jumping to a wrapper which then calls it. Therefore, this macro is
|
||||
* called outside of __UNTRAIN_RET below, for the time being, before the
|
||||
* kernel can support nested alternatives with arbitrary nesting.
|
||||
*/
|
||||
.macro CALL_UNTRAIN_RET
|
||||
#ifdef CONFIG_CPU_UNRET_ENTRY
|
||||
#define CALL_UNTRAIN_RET "call entry_untrain_ret"
|
||||
#else
|
||||
#define CALL_UNTRAIN_RET ""
|
||||
ALTERNATIVE_2 "", "call entry_untrain_ret", X86_FEATURE_UNRET, \
|
||||
"call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
|
||||
@ -176,12 +185,24 @@
|
||||
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
|
||||
defined(CONFIG_CPU_SRSO)
|
||||
ANNOTATE_UNRET_END
|
||||
ALTERNATIVE_2 "", \
|
||||
CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
|
||||
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB
|
||||
CALL_UNTRAIN_RET
|
||||
ALTERNATIVE "", "call entry_ibpb", X86_FEATURE_ENTRY_IBPB
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Macro to execute VERW instruction that mitigate transient data sampling
|
||||
* attacks such as MDS. On affected systems a microcode update overloaded VERW
|
||||
* instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF.
|
||||
*
|
||||
* Note: Only the memory operand variant of VERW clears the CPU buffers.
|
||||
*/
|
||||
.macro CLEAR_CPU_BUFFERS
|
||||
ALTERNATIVE "jmp .Lskip_verw_\@", "", X86_FEATURE_CLEAR_CPU_BUF
|
||||
verw _ASM_RIP(mds_verw_sel)
|
||||
.Lskip_verw_\@:
|
||||
.endm
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
|
||||
#define ANNOTATE_RETPOLINE_SAFE \
|
||||
@ -357,11 +378,12 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
|
||||
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
|
||||
DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(mds_user_clear);
|
||||
DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
|
||||
|
||||
extern u16 mds_verw_sel;
|
||||
|
||||
#include <asm/segment.h>
|
||||
|
||||
/**
|
||||
@ -387,17 +409,6 @@ static __always_inline void mds_clear_cpu_buffers(void)
|
||||
asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
|
||||
}
|
||||
|
||||
/**
|
||||
* mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
|
||||
*
|
||||
* Clear CPU buffers if the corresponding static key is enabled
|
||||
*/
|
||||
static __always_inline void mds_user_clear_cpu_buffers(void)
|
||||
{
|
||||
if (static_branch_likely(&mds_user_clear))
|
||||
mds_clear_cpu_buffers();
|
||||
}
|
||||
|
||||
/**
|
||||
* mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
|
||||
*
|
||||
|
@ -441,6 +441,9 @@ struct fixed_percpu_data {
|
||||
* GCC hardcodes the stack canary as %gs:40. Since the
|
||||
* irq_stack is the object at %gs:0, we reserve the bottom
|
||||
* 48 bytes of the irq stack for the canary.
|
||||
*
|
||||
* Once we are willing to require -mstack-protector-guard-symbol=
|
||||
* support for x86_64 stackprotector, we can get rid of this.
|
||||
*/
|
||||
char gs_base[40];
|
||||
unsigned long stack_canary;
|
||||
@ -461,17 +464,7 @@ extern asmlinkage void ignore_sysret(void);
|
||||
void current_save_fsgs(void);
|
||||
#else /* X86_64 */
|
||||
#ifdef CONFIG_STACKPROTECTOR
|
||||
/*
|
||||
* Make sure stack canary segment base is cached-aligned:
|
||||
* "For Intel Atom processors, avoid non zero segment base address
|
||||
* that is not aligned to cache line boundary at all cost."
|
||||
* (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
|
||||
*/
|
||||
struct stack_canary {
|
||||
char __pad[20]; /* canary at %gs:20 */
|
||||
unsigned long canary;
|
||||
};
|
||||
DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
|
||||
DECLARE_PER_CPU(unsigned long, __stack_chk_guard);
|
||||
#endif
|
||||
/* Per CPU softirq stack pointer */
|
||||
DECLARE_PER_CPU(struct irq_stack *, softirq_stack_ptr);
|
||||
|
@ -37,7 +37,10 @@ struct pt_regs {
|
||||
unsigned short __esh;
|
||||
unsigned short fs;
|
||||
unsigned short __fsh;
|
||||
/* On interrupt, gs and __gsh store the vector number. */
|
||||
/*
|
||||
* On interrupt, gs and __gsh store the vector number. They never
|
||||
* store gs any more.
|
||||
*/
|
||||
unsigned short gs;
|
||||
unsigned short __gsh;
|
||||
/* On interrupt, this is the error code. */
|
||||
|
@ -103,6 +103,7 @@
|
||||
#define REQUIRED_MASK18 0
|
||||
#define REQUIRED_MASK19 0
|
||||
#define REQUIRED_MASK20 0
|
||||
#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
|
||||
#define REQUIRED_MASK21 0
|
||||
#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
|
||||
|
||||
#endif /* _ASM_X86_REQUIRED_FEATURES_H */
|
||||
|
@ -95,7 +95,7 @@
|
||||
*
|
||||
* 26 - ESPFIX small SS
|
||||
* 27 - per-cpu [ offset to per-cpu data area ]
|
||||
* 28 - stack_canary-20 [ for stack protector ] <=== cacheline #8
|
||||
* 28 - unused
|
||||
* 29 - unused
|
||||
* 30 - unused
|
||||
* 31 - TSS for double fault handler
|
||||
@ -118,7 +118,6 @@
|
||||
|
||||
#define GDT_ENTRY_ESPFIX_SS 26
|
||||
#define GDT_ENTRY_PERCPU 27
|
||||
#define GDT_ENTRY_STACK_CANARY 28
|
||||
|
||||
#define GDT_ENTRY_DOUBLEFAULT_TSS 31
|
||||
|
||||
@ -158,12 +157,6 @@
|
||||
# define __KERNEL_PERCPU 0
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_STACKPROTECTOR
|
||||
# define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY*8)
|
||||
#else
|
||||
# define __KERNEL_STACK_CANARY 0
|
||||
#endif
|
||||
|
||||
#else /* 64-bit: */
|
||||
|
||||
#include <asm/cache.h>
|
||||
@ -364,22 +357,15 @@ static inline void __loadsegment_fs(unsigned short value)
|
||||
asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
|
||||
|
||||
/*
|
||||
* x86-32 user GS accessors:
|
||||
* x86-32 user GS accessors. This is ugly and could do with some cleaning up.
|
||||
*/
|
||||
#ifdef CONFIG_X86_32
|
||||
# ifdef CONFIG_X86_32_LAZY_GS
|
||||
# define get_user_gs(regs) (u16)({ unsigned long v; savesegment(gs, v); v; })
|
||||
# define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
|
||||
# define task_user_gs(tsk) ((tsk)->thread.gs)
|
||||
# define lazy_save_gs(v) savesegment(gs, (v))
|
||||
# define lazy_load_gs(v) loadsegment(gs, (v))
|
||||
# else /* X86_32_LAZY_GS */
|
||||
# define get_user_gs(regs) (u16)((regs)->gs)
|
||||
# define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
|
||||
# define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
|
||||
# define lazy_save_gs(v) do { } while (0)
|
||||
# define lazy_load_gs(v) do { } while (0)
|
||||
# endif /* X86_32_LAZY_GS */
|
||||
# define load_gs_index(v) loadsegment(gs, (v))
|
||||
#endif /* X86_32 */
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
@ -49,7 +49,6 @@ extern unsigned long saved_video_mode;
|
||||
extern void reserve_standard_io_resources(void);
|
||||
extern void i386_reserve_resources(void);
|
||||
extern unsigned long __startup_64(unsigned long physaddr, struct boot_params *bp);
|
||||
extern unsigned long __startup_secondary_64(void);
|
||||
extern void startup_64_setup_env(unsigned long physbase);
|
||||
extern void early_setup_idt(void);
|
||||
extern void __init do_early_exception(struct pt_regs *regs, int trapnr);
|
||||
|
@ -5,30 +5,23 @@
|
||||
* Stack protector works by putting predefined pattern at the start of
|
||||
* the stack frame and verifying that it hasn't been overwritten when
|
||||
* returning from the function. The pattern is called stack canary
|
||||
* and unfortunately gcc requires it to be at a fixed offset from %gs.
|
||||
* On x86_64, the offset is 40 bytes and on x86_32 20 bytes. x86_64
|
||||
* and x86_32 use segment registers differently and thus handles this
|
||||
* requirement differently.
|
||||
* and unfortunately gcc historically required it to be at a fixed offset
|
||||
* from the percpu segment base. On x86_64, the offset is 40 bytes.
|
||||
*
|
||||
* On x86_64, %gs is shared by percpu area and stack canary. All
|
||||
* percpu symbols are zero based and %gs points to the base of percpu
|
||||
* area. The first occupant of the percpu area is always
|
||||
* fixed_percpu_data which contains stack_canary at offset 40. Userland
|
||||
* %gs is always saved and restored on kernel entry and exit using
|
||||
* swapgs, so stack protector doesn't add any complexity there.
|
||||
* The same segment is shared by percpu area and stack canary. On
|
||||
* x86_64, percpu symbols are zero based and %gs (64-bit) points to the
|
||||
* base of percpu area. The first occupant of the percpu area is always
|
||||
* fixed_percpu_data which contains stack_canary at the approproate
|
||||
* offset. On x86_32, the stack canary is just a regular percpu
|
||||
* variable.
|
||||
*
|
||||
* On x86_32, it's slightly more complicated. As in x86_64, %gs is
|
||||
* used for userland TLS. Unfortunately, some processors are much
|
||||
* slower at loading segment registers with different value when
|
||||
* entering and leaving the kernel, so the kernel uses %fs for percpu
|
||||
* area and manages %gs lazily so that %gs is switched only when
|
||||
* necessary, usually during task switch.
|
||||
* Putting percpu data in %fs on 32-bit is a minor optimization compared to
|
||||
* using %gs. Since 32-bit userspace normally has %fs == 0, we are likely
|
||||
* to load 0 into %fs on exit to usermode, whereas with percpu data in
|
||||
* %gs, we are likely to load a non-null %gs on return to user mode.
|
||||
*
|
||||
* As gcc requires the stack canary at %gs:20, %gs can't be managed
|
||||
* lazily if stack protector is enabled, so the kernel saves and
|
||||
* restores userland %gs on kernel entry and exit. This behavior is
|
||||
* controlled by CONFIG_X86_32_LAZY_GS and accessors are defined in
|
||||
* system.h to hide the details.
|
||||
* Once we are willing to require GCC 8.1 or better for 64-bit stackprotector
|
||||
* support, we can remove some of this complexity.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_STACKPROTECTOR_H
|
||||
@ -44,14 +37,6 @@
|
||||
#include <linux/random.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
/*
|
||||
* 24 byte read-only segment initializer for stack canary. Linker
|
||||
* can't handle the address bit shifting. Address will be set in
|
||||
* head_32 for boot CPU and setup_per_cpu_areas() for others.
|
||||
*/
|
||||
#define GDT_STACK_CANARY_INIT \
|
||||
[GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
|
||||
|
||||
/*
|
||||
* Initialize the stackprotector canary value.
|
||||
*
|
||||
@ -86,7 +71,7 @@ static __always_inline void boot_init_stack_canary(void)
|
||||
#ifdef CONFIG_X86_64
|
||||
this_cpu_write(fixed_percpu_data.stack_canary, canary);
|
||||
#else
|
||||
this_cpu_write(stack_canary.canary, canary);
|
||||
this_cpu_write(__stack_chk_guard, canary);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -95,48 +80,16 @@ static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
|
||||
#ifdef CONFIG_X86_64
|
||||
per_cpu(fixed_percpu_data.stack_canary, cpu) = idle->stack_canary;
|
||||
#else
|
||||
per_cpu(stack_canary.canary, cpu) = idle->stack_canary;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void setup_stack_canary_segment(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu);
|
||||
struct desc_struct *gdt_table = get_cpu_gdt_rw(cpu);
|
||||
struct desc_struct desc;
|
||||
|
||||
desc = gdt_table[GDT_ENTRY_STACK_CANARY];
|
||||
set_desc_base(&desc, canary);
|
||||
write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void load_stack_canary_segment(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
asm("mov %0, %%gs" : : "r" (__KERNEL_STACK_CANARY) : "memory");
|
||||
per_cpu(__stack_chk_guard, cpu) = idle->stack_canary;
|
||||
#endif
|
||||
}
|
||||
|
||||
#else /* STACKPROTECTOR */
|
||||
|
||||
#define GDT_STACK_CANARY_INIT
|
||||
|
||||
/* dummy boot_init_stack_canary() is defined in linux/stackprotector.h */
|
||||
|
||||
static inline void setup_stack_canary_segment(int cpu)
|
||||
{ }
|
||||
|
||||
static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
|
||||
{ }
|
||||
|
||||
static inline void load_stack_canary_segment(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
asm volatile ("mov %0, %%gs" : : "r" (0));
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* STACKPROTECTOR */
|
||||
#endif /* _ASM_STACKPROTECTOR_H */
|
||||
|
@ -12,13 +12,6 @@
|
||||
|
||||
/* image of the saved processor state */
|
||||
struct saved_context {
|
||||
/*
|
||||
* On x86_32, all segment registers, with the possible exception of
|
||||
* gs, are saved at kernel entry in pt_regs.
|
||||
*/
|
||||
#ifdef CONFIG_X86_32_LAZY_GS
|
||||
u16 gs;
|
||||
#endif
|
||||
unsigned long cr0, cr2, cr3, cr4;
|
||||
u64 misc_enable;
|
||||
struct saved_msrs saved_msrs;
|
||||
@ -29,6 +22,11 @@ struct saved_context {
|
||||
unsigned long tr;
|
||||
unsigned long safety;
|
||||
unsigned long return_address;
|
||||
/*
|
||||
* On x86_32, all segment registers except gs are saved at kernel
|
||||
* entry in pt_regs.
|
||||
*/
|
||||
u16 gs;
|
||||
bool misc_enable_saved;
|
||||
} __attribute__((packed));
|
||||
|
||||
|
@ -49,7 +49,6 @@ endif
|
||||
# non-deterministic coverage.
|
||||
KCOV_INSTRUMENT := n
|
||||
|
||||
CFLAGS_head$(BITS).o += -fno-stack-protector
|
||||
CFLAGS_cc_platform.o += -fno-stack-protector
|
||||
|
||||
CFLAGS_irq.o := -I $(srctree)/$(src)/../include/asm/trace
|
||||
|
@ -53,11 +53,6 @@ void foo(void)
|
||||
offsetof(struct cpu_entry_area, tss.x86_tss.sp1) -
|
||||
offsetofend(struct cpu_entry_area, entry_stack_page.stack));
|
||||
|
||||
#ifdef CONFIG_STACKPROTECTOR
|
||||
BLANK();
|
||||
OFFSET(stack_canary_offset, stack_canary, canary);
|
||||
#endif
|
||||
|
||||
BLANK();
|
||||
DEFINE(EFI_svam, offsetof(efi_runtime_services_t, set_virtual_address_map));
|
||||
}
|
||||
|
@ -1049,11 +1049,11 @@ static bool cpu_has_zenbleed_microcode(void)
|
||||
u32 good_rev = 0;
|
||||
|
||||
switch (boot_cpu_data.x86_model) {
|
||||
case 0x30 ... 0x3f: good_rev = 0x0830107a; break;
|
||||
case 0x60 ... 0x67: good_rev = 0x0860010b; break;
|
||||
case 0x68 ... 0x6f: good_rev = 0x08608105; break;
|
||||
case 0x70 ... 0x7f: good_rev = 0x08701032; break;
|
||||
case 0xa0 ... 0xaf: good_rev = 0x08a00008; break;
|
||||
case 0x30 ... 0x3f: good_rev = 0x0830107b; break;
|
||||
case 0x60 ... 0x67: good_rev = 0x0860010c; break;
|
||||
case 0x68 ... 0x6f: good_rev = 0x08608107; break;
|
||||
case 0x70 ... 0x7f: good_rev = 0x08701033; break;
|
||||
case 0xa0 ... 0xaf: good_rev = 0x08a00009; break;
|
||||
|
||||
default:
|
||||
return false;
|
||||
|
@ -109,9 +109,6 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
|
||||
/* Control unconditional IBPB in switch_mm() */
|
||||
DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
|
||||
|
||||
/* Control MDS CPU buffer clear before returning to user space */
|
||||
DEFINE_STATIC_KEY_FALSE(mds_user_clear);
|
||||
EXPORT_SYMBOL_GPL(mds_user_clear);
|
||||
/* Control MDS CPU buffer clear before idling (halt, mwait) */
|
||||
DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
|
||||
EXPORT_SYMBOL_GPL(mds_idle_clear);
|
||||
@ -249,7 +246,7 @@ static void __init mds_select_mitigation(void)
|
||||
if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
|
||||
mds_mitigation = MDS_MITIGATION_VMWERV;
|
||||
|
||||
static_branch_enable(&mds_user_clear);
|
||||
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
|
||||
|
||||
if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
|
||||
(mds_nosmt || cpu_mitigations_auto_nosmt()))
|
||||
@ -353,7 +350,7 @@ static void __init taa_select_mitigation(void)
|
||||
* For guests that can't determine whether the correct microcode is
|
||||
* present on host, enable the mitigation for UCODE_NEEDED as well.
|
||||
*/
|
||||
static_branch_enable(&mds_user_clear);
|
||||
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
|
||||
|
||||
if (taa_nosmt || cpu_mitigations_auto_nosmt())
|
||||
cpu_smt_disable(false);
|
||||
@ -421,7 +418,14 @@ static void __init mmio_select_mitigation(void)
|
||||
*/
|
||||
if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
|
||||
boot_cpu_has(X86_FEATURE_RTM)))
|
||||
static_branch_enable(&mds_user_clear);
|
||||
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
|
||||
|
||||
/*
|
||||
* X86_FEATURE_CLEAR_CPU_BUF could be enabled by other VERW based
|
||||
* mitigations, disable KVM-only mitigation in that case.
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF))
|
||||
static_branch_disable(&mmio_stale_data_clear);
|
||||
else
|
||||
static_branch_enable(&mmio_stale_data_clear);
|
||||
|
||||
@ -473,6 +477,57 @@ static int __init mmio_stale_data_parse_cmdline(char *str)
|
||||
}
|
||||
early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "Register File Data Sampling: " fmt
|
||||
|
||||
enum rfds_mitigations {
|
||||
RFDS_MITIGATION_OFF,
|
||||
RFDS_MITIGATION_VERW,
|
||||
RFDS_MITIGATION_UCODE_NEEDED,
|
||||
};
|
||||
|
||||
/* Default mitigation for Register File Data Sampling */
|
||||
static enum rfds_mitigations rfds_mitigation __ro_after_init =
|
||||
IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_VERW : RFDS_MITIGATION_OFF;
|
||||
|
||||
static const char * const rfds_strings[] = {
|
||||
[RFDS_MITIGATION_OFF] = "Vulnerable",
|
||||
[RFDS_MITIGATION_VERW] = "Mitigation: Clear Register File",
|
||||
[RFDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
|
||||
};
|
||||
|
||||
static void __init rfds_select_mitigation(void)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_RFDS) || cpu_mitigations_off()) {
|
||||
rfds_mitigation = RFDS_MITIGATION_OFF;
|
||||
return;
|
||||
}
|
||||
if (rfds_mitigation == RFDS_MITIGATION_OFF)
|
||||
return;
|
||||
|
||||
if (x86_read_arch_cap_msr() & ARCH_CAP_RFDS_CLEAR)
|
||||
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
|
||||
else
|
||||
rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
|
||||
}
|
||||
|
||||
static __init int rfds_parse_cmdline(char *str)
|
||||
{
|
||||
if (!str)
|
||||
return -EINVAL;
|
||||
|
||||
if (!boot_cpu_has_bug(X86_BUG_RFDS))
|
||||
return 0;
|
||||
|
||||
if (!strcmp(str, "off"))
|
||||
rfds_mitigation = RFDS_MITIGATION_OFF;
|
||||
else if (!strcmp(str, "on"))
|
||||
rfds_mitigation = RFDS_MITIGATION_VERW;
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("reg_file_data_sampling", rfds_parse_cmdline);
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "" fmt
|
||||
|
||||
@ -481,12 +536,12 @@ static void __init md_clear_update_mitigation(void)
|
||||
if (cpu_mitigations_off())
|
||||
return;
|
||||
|
||||
if (!static_key_enabled(&mds_user_clear))
|
||||
if (!boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data
|
||||
* mitigation, if necessary.
|
||||
* X86_FEATURE_CLEAR_CPU_BUF is now enabled. Update MDS, TAA and MMIO
|
||||
* Stale Data mitigation, if necessary.
|
||||
*/
|
||||
if (mds_mitigation == MDS_MITIGATION_OFF &&
|
||||
boot_cpu_has_bug(X86_BUG_MDS)) {
|
||||
@ -498,11 +553,19 @@ static void __init md_clear_update_mitigation(void)
|
||||
taa_mitigation = TAA_MITIGATION_VERW;
|
||||
taa_select_mitigation();
|
||||
}
|
||||
if (mmio_mitigation == MMIO_MITIGATION_OFF &&
|
||||
boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
|
||||
/*
|
||||
* MMIO_MITIGATION_OFF is not checked here so that mmio_stale_data_clear
|
||||
* gets updated correctly as per X86_FEATURE_CLEAR_CPU_BUF state.
|
||||
*/
|
||||
if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
|
||||
mmio_mitigation = MMIO_MITIGATION_VERW;
|
||||
mmio_select_mitigation();
|
||||
}
|
||||
if (rfds_mitigation == RFDS_MITIGATION_OFF &&
|
||||
boot_cpu_has_bug(X86_BUG_RFDS)) {
|
||||
rfds_mitigation = RFDS_MITIGATION_VERW;
|
||||
rfds_select_mitigation();
|
||||
}
|
||||
out:
|
||||
if (boot_cpu_has_bug(X86_BUG_MDS))
|
||||
pr_info("MDS: %s\n", mds_strings[mds_mitigation]);
|
||||
@ -512,6 +575,8 @@ static void __init md_clear_update_mitigation(void)
|
||||
pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
|
||||
else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
|
||||
pr_info("MMIO Stale Data: Unknown: No mitigations\n");
|
||||
if (boot_cpu_has_bug(X86_BUG_RFDS))
|
||||
pr_info("Register File Data Sampling: %s\n", rfds_strings[rfds_mitigation]);
|
||||
}
|
||||
|
||||
static void __init md_clear_select_mitigation(void)
|
||||
@ -519,11 +584,12 @@ static void __init md_clear_select_mitigation(void)
|
||||
mds_select_mitigation();
|
||||
taa_select_mitigation();
|
||||
mmio_select_mitigation();
|
||||
rfds_select_mitigation();
|
||||
|
||||
/*
|
||||
* As MDS, TAA and MMIO Stale Data mitigations are inter-related, update
|
||||
* and print their mitigation after MDS, TAA and MMIO Stale Data
|
||||
* mitigation selection is done.
|
||||
* As these mitigations are inter-related and rely on VERW instruction
|
||||
* to clear the microarchitural buffers, update and print their status
|
||||
* after mitigation selection is done for each of these vulnerabilities.
|
||||
*/
|
||||
md_clear_update_mitigation();
|
||||
}
|
||||
@ -1251,19 +1317,21 @@ spectre_v2_user_select_mitigation(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* If no STIBP, enhanced IBRS is enabled, or SMT impossible, STIBP
|
||||
* If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP
|
||||
* is not required.
|
||||
*
|
||||
* Enhanced IBRS also protects against cross-thread branch target
|
||||
* Intel's Enhanced IBRS also protects against cross-thread branch target
|
||||
* injection in user-mode as the IBRS bit remains always set which
|
||||
* implicitly enables cross-thread protections. However, in legacy IBRS
|
||||
* mode, the IBRS bit is set only on kernel entry and cleared on return
|
||||
* to userspace. This disables the implicit cross-thread protection,
|
||||
* so allow for STIBP to be selected in that case.
|
||||
* to userspace. AMD Automatic IBRS also does not protect userspace.
|
||||
* These modes therefore disable the implicit cross-thread protection,
|
||||
* so allow for STIBP to be selected in those cases.
|
||||
*/
|
||||
if (!boot_cpu_has(X86_FEATURE_STIBP) ||
|
||||
!smt_possible ||
|
||||
spectre_v2_in_eibrs_mode(spectre_v2_enabled))
|
||||
(spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
|
||||
!boot_cpu_has(X86_FEATURE_AUTOIBRS)))
|
||||
return;
|
||||
|
||||
/*
|
||||
@ -1293,9 +1361,9 @@ static const char * const spectre_v2_strings[] = {
|
||||
[SPECTRE_V2_NONE] = "Vulnerable",
|
||||
[SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines",
|
||||
[SPECTRE_V2_LFENCE] = "Mitigation: LFENCE",
|
||||
[SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS",
|
||||
[SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE",
|
||||
[SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines",
|
||||
[SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS",
|
||||
[SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE",
|
||||
[SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines",
|
||||
[SPECTRE_V2_IBRS] = "Mitigation: IBRS",
|
||||
};
|
||||
|
||||
@ -1364,7 +1432,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
|
||||
cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
|
||||
cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
|
||||
!boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
|
||||
pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n",
|
||||
pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n",
|
||||
mitigation_options[i].option);
|
||||
return SPECTRE_V2_CMD_AUTO;
|
||||
}
|
||||
@ -1549,9 +1617,13 @@ static void __init spectre_v2_select_mitigation(void)
|
||||
pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
|
||||
|
||||
if (spectre_v2_in_ibrs_mode(mode)) {
|
||||
if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) {
|
||||
msr_set_bit(MSR_EFER, _EFER_AUTOIBRS);
|
||||
} else {
|
||||
x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
|
||||
update_spec_ctrl(x86_spec_ctrl_base);
|
||||
}
|
||||
}
|
||||
|
||||
switch (mode) {
|
||||
case SPECTRE_V2_NONE:
|
||||
@ -1634,8 +1706,8 @@ static void __init spectre_v2_select_mitigation(void)
|
||||
/*
|
||||
* Retpoline protects the kernel, but doesn't protect firmware. IBRS
|
||||
* and Enhanced IBRS protect firmware too, so enable IBRS around
|
||||
* firmware calls only when IBRS / Enhanced IBRS aren't otherwise
|
||||
* enabled.
|
||||
* firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
|
||||
* otherwise enabled.
|
||||
*
|
||||
* Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
|
||||
* the user might select retpoline on the kernel command line and if
|
||||
@ -2432,16 +2504,16 @@ static const char * const l1tf_vmx_states[] = {
|
||||
static ssize_t l1tf_show_state(char *buf)
|
||||
{
|
||||
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
|
||||
return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
|
||||
return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
|
||||
|
||||
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
|
||||
(l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
|
||||
sched_smt_active())) {
|
||||
return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
|
||||
return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
|
||||
l1tf_vmx_states[l1tf_vmx_mitigation]);
|
||||
}
|
||||
|
||||
return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
|
||||
return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
|
||||
l1tf_vmx_states[l1tf_vmx_mitigation],
|
||||
sched_smt_active() ? "vulnerable" : "disabled");
|
||||
}
|
||||
@ -2450,40 +2522,40 @@ static ssize_t itlb_multihit_show_state(char *buf)
|
||||
{
|
||||
if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
|
||||
!boot_cpu_has(X86_FEATURE_VMX))
|
||||
return sprintf(buf, "KVM: Mitigation: VMX unsupported\n");
|
||||
return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n");
|
||||
else if (!(cr4_read_shadow() & X86_CR4_VMXE))
|
||||
return sprintf(buf, "KVM: Mitigation: VMX disabled\n");
|
||||
return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n");
|
||||
else if (itlb_multihit_kvm_mitigation)
|
||||
return sprintf(buf, "KVM: Mitigation: Split huge pages\n");
|
||||
return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n");
|
||||
else
|
||||
return sprintf(buf, "KVM: Vulnerable\n");
|
||||
return sysfs_emit(buf, "KVM: Vulnerable\n");
|
||||
}
|
||||
#else
|
||||
static ssize_t l1tf_show_state(char *buf)
|
||||
{
|
||||
return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
|
||||
return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
|
||||
}
|
||||
|
||||
static ssize_t itlb_multihit_show_state(char *buf)
|
||||
{
|
||||
return sprintf(buf, "Processor vulnerable\n");
|
||||
return sysfs_emit(buf, "Processor vulnerable\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
static ssize_t mds_show_state(char *buf)
|
||||
{
|
||||
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
|
||||
return sprintf(buf, "%s; SMT Host state unknown\n",
|
||||
return sysfs_emit(buf, "%s; SMT Host state unknown\n",
|
||||
mds_strings[mds_mitigation]);
|
||||
}
|
||||
|
||||
if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
|
||||
return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
|
||||
return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
|
||||
(mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
|
||||
sched_smt_active() ? "mitigated" : "disabled"));
|
||||
}
|
||||
|
||||
return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
|
||||
return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
|
||||
sched_smt_active() ? "vulnerable" : "disabled");
|
||||
}
|
||||
|
||||
@ -2491,14 +2563,14 @@ static ssize_t tsx_async_abort_show_state(char *buf)
|
||||
{
|
||||
if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
|
||||
(taa_mitigation == TAA_MITIGATION_OFF))
|
||||
return sprintf(buf, "%s\n", taa_strings[taa_mitigation]);
|
||||
return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]);
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
|
||||
return sprintf(buf, "%s; SMT Host state unknown\n",
|
||||
return sysfs_emit(buf, "%s; SMT Host state unknown\n",
|
||||
taa_strings[taa_mitigation]);
|
||||
}
|
||||
|
||||
return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
|
||||
return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
|
||||
sched_smt_active() ? "vulnerable" : "disabled");
|
||||
}
|
||||
|
||||
@ -2519,9 +2591,15 @@ static ssize_t mmio_stale_data_show_state(char *buf)
|
||||
sched_smt_active() ? "vulnerable" : "disabled");
|
||||
}
|
||||
|
||||
static ssize_t rfds_show_state(char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
|
||||
}
|
||||
|
||||
static char *stibp_state(void)
|
||||
{
|
||||
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
|
||||
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
|
||||
!boot_cpu_has(X86_FEATURE_AUTOIBRS))
|
||||
return "";
|
||||
|
||||
switch (spectre_v2_user_stibp) {
|
||||
@ -2567,16 +2645,16 @@ static char *pbrsb_eibrs_state(void)
|
||||
static ssize_t spectre_v2_show_state(char *buf)
|
||||
{
|
||||
if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
|
||||
return sprintf(buf, "Vulnerable: LFENCE\n");
|
||||
return sysfs_emit(buf, "Vulnerable: LFENCE\n");
|
||||
|
||||
if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
|
||||
return sprintf(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
|
||||
return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
|
||||
|
||||
if (sched_smt_active() && unprivileged_ebpf_enabled() &&
|
||||
spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
|
||||
return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
|
||||
return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
|
||||
|
||||
return sprintf(buf, "%s%s%s%s%s%s%s\n",
|
||||
return sysfs_emit(buf, "%s%s%s%s%s%s%s\n",
|
||||
spectre_v2_strings[spectre_v2_enabled],
|
||||
ibpb_state(),
|
||||
boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
|
||||
@ -2588,7 +2666,7 @@ static ssize_t spectre_v2_show_state(char *buf)
|
||||
|
||||
static ssize_t srbds_show_state(char *buf)
|
||||
{
|
||||
return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
|
||||
return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]);
|
||||
}
|
||||
|
||||
static ssize_t retbleed_show_state(char *buf)
|
||||
@ -2597,17 +2675,16 @@ static ssize_t retbleed_show_state(char *buf)
|
||||
retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
|
||||
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
|
||||
return sprintf(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
|
||||
return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
|
||||
|
||||
return sprintf(buf, "%s; SMT %s\n",
|
||||
retbleed_strings[retbleed_mitigation],
|
||||
return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation],
|
||||
!sched_smt_active() ? "disabled" :
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ?
|
||||
"enabled with STIBP protection" : "vulnerable");
|
||||
}
|
||||
|
||||
return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
|
||||
return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
|
||||
}
|
||||
|
||||
static ssize_t gds_show_state(char *buf)
|
||||
@ -2629,26 +2706,26 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
||||
char *buf, unsigned int bug)
|
||||
{
|
||||
if (!boot_cpu_has_bug(bug))
|
||||
return sprintf(buf, "Not affected\n");
|
||||
return sysfs_emit(buf, "Not affected\n");
|
||||
|
||||
switch (bug) {
|
||||
case X86_BUG_CPU_MELTDOWN:
|
||||
if (boot_cpu_has(X86_FEATURE_PTI))
|
||||
return sprintf(buf, "Mitigation: PTI\n");
|
||||
return sysfs_emit(buf, "Mitigation: PTI\n");
|
||||
|
||||
if (hypervisor_is_type(X86_HYPER_XEN_PV))
|
||||
return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
|
||||
return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
|
||||
|
||||
break;
|
||||
|
||||
case X86_BUG_SPECTRE_V1:
|
||||
return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
|
||||
return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
|
||||
|
||||
case X86_BUG_SPECTRE_V2:
|
||||
return spectre_v2_show_state(buf);
|
||||
|
||||
case X86_BUG_SPEC_STORE_BYPASS:
|
||||
return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
|
||||
return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]);
|
||||
|
||||
case X86_BUG_L1TF:
|
||||
if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
|
||||
@ -2680,11 +2757,14 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
||||
case X86_BUG_SRSO:
|
||||
return srso_show_state(buf);
|
||||
|
||||
case X86_BUG_RFDS:
|
||||
return rfds_show_state(buf);
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return sprintf(buf, "Vulnerable\n");
|
||||
return sysfs_emit(buf, "Vulnerable\n");
|
||||
}
|
||||
|
||||
ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
@ -2754,4 +2834,9 @@ ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribut
|
||||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_SRSO);
|
||||
}
|
||||
|
||||
ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
|
||||
}
|
||||
#endif
|
||||
|
@ -166,7 +166,6 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
|
||||
|
||||
[GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
|
||||
[GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
|
||||
GDT_STACK_CANARY_INIT
|
||||
#endif
|
||||
} };
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
|
||||
@ -600,7 +599,6 @@ void load_percpu_segment(int cpu)
|
||||
__loadsegment_simple(gs, 0);
|
||||
wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu));
|
||||
#endif
|
||||
load_stack_canary_segment();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
@ -1098,8 +1096,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
||||
VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
|
||||
/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
|
||||
VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
|
||||
VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
|
||||
|
||||
/* Zhaoxin Family 7 */
|
||||
VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
|
||||
@ -1134,6 +1132,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
||||
#define SRSO BIT(5)
|
||||
/* CPU is affected by GDS */
|
||||
#define GDS BIT(6)
|
||||
/* CPU is affected by Register File Data Sampling */
|
||||
#define RFDS BIT(7)
|
||||
|
||||
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
|
||||
VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
|
||||
@ -1161,14 +1161,23 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
|
||||
VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS),
|
||||
VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS),
|
||||
VULNBL_INTEL_STEPPINGS(ALDERLAKE, X86_STEPPING_ANY, RFDS),
|
||||
VULNBL_INTEL_STEPPINGS(ALDERLAKE_L, X86_STEPPING_ANY, RFDS),
|
||||
VULNBL_INTEL_STEPPINGS(RAPTORLAKE, X86_STEPPING_ANY, RFDS),
|
||||
VULNBL_INTEL_STEPPINGS(RAPTORLAKE_P, X86_STEPPING_ANY, RFDS),
|
||||
VULNBL_INTEL_STEPPINGS(RAPTORLAKE_S, X86_STEPPING_ANY, RFDS),
|
||||
VULNBL_INTEL_STEPPINGS(ALDERLAKE_N, X86_STEPPING_ANY, RFDS),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RFDS),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO | RFDS),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RFDS),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_GOLDMONT, X86_STEPPING_ANY, RFDS),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_GOLDMONT_D, X86_STEPPING_ANY, RFDS),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_GOLDMONT_PLUS, X86_STEPPING_ANY, RFDS),
|
||||
|
||||
VULNBL_AMD(0x15, RETBLEED),
|
||||
VULNBL_AMD(0x16, RETBLEED),
|
||||
VULNBL_AMD(0x17, RETBLEED | SRSO),
|
||||
VULNBL_HYGON(0x18, RETBLEED),
|
||||
VULNBL_HYGON(0x18, RETBLEED | SRSO),
|
||||
VULNBL_AMD(0x19, SRSO),
|
||||
{}
|
||||
};
|
||||
@ -1197,6 +1206,24 @@ static bool arch_cap_mmio_immune(u64 ia32_cap)
|
||||
ia32_cap & ARCH_CAP_SBDR_SSDP_NO);
|
||||
}
|
||||
|
||||
static bool __init vulnerable_to_rfds(u64 ia32_cap)
|
||||
{
|
||||
/* The "immunity" bit trumps everything else: */
|
||||
if (ia32_cap & ARCH_CAP_RFDS_NO)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* VMMs set ARCH_CAP_RFDS_CLEAR for processors not in the blacklist to
|
||||
* indicate that mitigation is needed because guest is running on a
|
||||
* vulnerable hardware or may migrate to such hardware:
|
||||
*/
|
||||
if (ia32_cap & ARCH_CAP_RFDS_CLEAR)
|
||||
return true;
|
||||
|
||||
/* Only consult the blacklist when there is no enumeration: */
|
||||
return cpu_matches(cpu_vuln_blacklist, RFDS);
|
||||
}
|
||||
|
||||
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 ia32_cap = x86_read_arch_cap_msr();
|
||||
@ -1219,8 +1246,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
!cpu_has(c, X86_FEATURE_AMD_SSB_NO))
|
||||
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
|
||||
|
||||
if (ia32_cap & ARCH_CAP_IBRS_ALL)
|
||||
/*
|
||||
* AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature
|
||||
* flag and protect from vendor-specific bugs via the whitelist.
|
||||
*/
|
||||
if ((ia32_cap & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) {
|
||||
setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
|
||||
if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
|
||||
!(ia32_cap & ARCH_CAP_PBRSB_NO))
|
||||
setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
|
||||
}
|
||||
|
||||
if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
|
||||
!(ia32_cap & ARCH_CAP_MDS_NO)) {
|
||||
@ -1282,11 +1317,6 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
setup_force_cpu_bug(X86_BUG_RETBLEED);
|
||||
}
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) &&
|
||||
!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
|
||||
!(ia32_cap & ARCH_CAP_PBRSB_NO))
|
||||
setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
|
||||
|
||||
/*
|
||||
* Check if CPU is vulnerable to GDS. If running in a virtual machine on
|
||||
* an affected processor, the VMM may have disabled the use of GATHER by
|
||||
@ -1302,6 +1332,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
setup_force_cpu_bug(X86_BUG_SRSO);
|
||||
}
|
||||
|
||||
if (vulnerable_to_rfds(ia32_cap))
|
||||
setup_force_cpu_bug(X86_BUG_RFDS);
|
||||
|
||||
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
|
||||
return;
|
||||
|
||||
@ -1937,7 +1970,8 @@ DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) =
|
||||
EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack);
|
||||
|
||||
#ifdef CONFIG_STACKPROTECTOR
|
||||
DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
|
||||
DEFINE_PER_CPU(unsigned long, __stack_chk_guard);
|
||||
EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
@ -2389,12 +2389,14 @@ static ssize_t set_bank(struct device *s, struct device_attribute *attr,
|
||||
return -EINVAL;
|
||||
|
||||
b = &per_cpu(mce_banks_array, s->id)[bank];
|
||||
|
||||
if (!b->init)
|
||||
return -ENODEV;
|
||||
|
||||
b->ctl = new;
|
||||
|
||||
mutex_lock(&mce_sysfs_mutex);
|
||||
mce_restart();
|
||||
mutex_unlock(&mce_sysfs_mutex);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
@ -100,9 +100,7 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack) = {
|
||||
.ss = __KERNEL_DS,
|
||||
.ds = __USER_DS,
|
||||
.fs = __KERNEL_PERCPU,
|
||||
#ifndef CONFIG_X86_32_LAZY_GS
|
||||
.gs = __KERNEL_STACK_CANARY,
|
||||
#endif
|
||||
.gs = 0,
|
||||
|
||||
.__cr3 = __pa_nodebug(swapper_pg_dir),
|
||||
},
|
||||
|
@ -302,15 +302,6 @@ unsigned long __head __startup_64(unsigned long physaddr,
|
||||
return sme_get_me_mask();
|
||||
}
|
||||
|
||||
unsigned long __startup_secondary_64(void)
|
||||
{
|
||||
/*
|
||||
* Return the SME encryption mask (if SME is active) to be used as a
|
||||
* modifier for the initial pgdir entry programmed into CR3.
|
||||
*/
|
||||
return sme_get_me_mask();
|
||||
}
|
||||
|
||||
/* Wipe all early page tables except for the kernel symbol map */
|
||||
static void __init reset_early_page_tables(void)
|
||||
{
|
||||
|
@ -319,8 +319,8 @@ SYM_FUNC_START(startup_32_smp)
|
||||
movl $(__KERNEL_PERCPU), %eax
|
||||
movl %eax,%fs # set this cpu's percpu
|
||||
|
||||
movl $(__KERNEL_STACK_CANARY),%eax
|
||||
movl %eax,%gs
|
||||
xorl %eax,%eax
|
||||
movl %eax,%gs # clear possible garbage in %gs
|
||||
|
||||
xorl %eax,%eax # Clear LDT
|
||||
lldt %ax
|
||||
@ -340,20 +340,6 @@ SYM_FUNC_END(startup_32_smp)
|
||||
*/
|
||||
__INIT
|
||||
setup_once:
|
||||
#ifdef CONFIG_STACKPROTECTOR
|
||||
/*
|
||||
* Configure the stack canary. The linker can't handle this by
|
||||
* relocation. Manually set base address in stack canary
|
||||
* segment descriptor.
|
||||
*/
|
||||
movl $gdt_page,%eax
|
||||
movl $stack_canary,%ecx
|
||||
movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
|
||||
shrl $16, %ecx
|
||||
movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
|
||||
movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
|
||||
#endif
|
||||
|
||||
andl $0,setup_once_ref /* Once is enough, thanks */
|
||||
RET
|
||||
|
||||
|
@ -74,6 +74,22 @@ SYM_CODE_START_NOALIGN(startup_64)
|
||||
leaq (__end_init_task - SIZEOF_PTREGS)(%rip), %rsp
|
||||
|
||||
leaq _text(%rip), %rdi
|
||||
|
||||
/*
|
||||
* initial_gs points to initial fixed_percpu_data struct with storage for
|
||||
* the stack protector canary. Global pointer fixups are needed at this
|
||||
* stage, so apply them as is done in fixup_pointer(), and initialize %gs
|
||||
* such that the canary can be accessed at %gs:40 for subsequent C calls.
|
||||
*/
|
||||
movl $MSR_GS_BASE, %ecx
|
||||
movq initial_gs(%rip), %rax
|
||||
movq $_text, %rdx
|
||||
subq %rdx, %rax
|
||||
addq %rdi, %rax
|
||||
movq %rax, %rdx
|
||||
shrq $32, %rdx
|
||||
wrmsr
|
||||
|
||||
pushq %rsi
|
||||
call startup_64_setup_env
|
||||
popq %rsi
|
||||
@ -141,9 +157,11 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
|
||||
* Retrieve the modifier (SME encryption mask if SME is active) to be
|
||||
* added to the initial pgdir entry that will be programmed into CR3.
|
||||
*/
|
||||
pushq %rsi
|
||||
call __startup_secondary_64
|
||||
popq %rsi
|
||||
#ifdef CONFIG_AMD_MEM_ENCRYPT
|
||||
movq sme_me_mask, %rax
|
||||
#else
|
||||
xorq %rax, %rax
|
||||
#endif
|
||||
|
||||
/* Form the CR3 value being sure to include the CR3 modifier */
|
||||
addq $(init_top_pgt - __START_KERNEL_map), %rax
|
||||
|
@ -519,9 +519,6 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
|
||||
write_cr2(this_cpu_read(nmi_cr2));
|
||||
if (this_cpu_dec_return(nmi_state))
|
||||
goto nmi_restart;
|
||||
|
||||
if (user_mode(regs))
|
||||
mds_user_clear_cpu_buffers();
|
||||
}
|
||||
|
||||
#if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL)
|
||||
|
@ -224,7 +224,6 @@ void __init setup_per_cpu_areas(void)
|
||||
per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
|
||||
per_cpu(cpu_number, cpu) = cpu;
|
||||
setup_percpu_segment(cpu);
|
||||
setup_stack_canary_segment(cpu);
|
||||
/*
|
||||
* Copy data used in early init routines from the
|
||||
* initial arrays to the per cpu data areas. These
|
||||
|
@ -164,17 +164,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
|
||||
savesegment(fs, sel);
|
||||
if (sel == modified_sel)
|
||||
loadsegment(fs, sel);
|
||||
#endif
|
||||
|
||||
savesegment(gs, sel);
|
||||
if (sel == modified_sel)
|
||||
load_gs_index(sel);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_32_LAZY_GS
|
||||
savesegment(gs, sel);
|
||||
if (sel == modified_sel)
|
||||
loadsegment(gs, sel);
|
||||
#endif
|
||||
} else {
|
||||
#ifdef CONFIG_X86_64
|
||||
if (p->thread.fsindex == modified_sel)
|
||||
|
@ -76,10 +76,12 @@ static const struct cpuid_reg reverse_cpuid[] = {
|
||||
*/
|
||||
static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
|
||||
{
|
||||
BUILD_BUG_ON(NR_CPUID_WORDS != NCAPINTS);
|
||||
BUILD_BUG_ON(x86_leaf == CPUID_LNX_1);
|
||||
BUILD_BUG_ON(x86_leaf == CPUID_LNX_2);
|
||||
BUILD_BUG_ON(x86_leaf == CPUID_LNX_3);
|
||||
BUILD_BUG_ON(x86_leaf == CPUID_LNX_4);
|
||||
BUILD_BUG_ON(x86_leaf == CPUID_LNX_5);
|
||||
BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
|
||||
BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
|
||||
}
|
||||
|
@ -1024,20 +1024,22 @@ int svm_register_enc_region(struct kvm *kvm,
|
||||
goto e_free;
|
||||
}
|
||||
|
||||
/*
|
||||
* The guest may change the memory encryption attribute from C=0 -> C=1
|
||||
* or vice versa for this memory range. Lets make sure caches are
|
||||
* flushed to ensure that guest data gets written into memory with
|
||||
* correct C-bit. Note, this must be done before dropping kvm->lock,
|
||||
* as region and its array of pages can be freed by a different task
|
||||
* once kvm->lock is released.
|
||||
*/
|
||||
sev_clflush_pages(region->pages, region->npages);
|
||||
|
||||
region->uaddr = range->addr;
|
||||
region->size = range->size;
|
||||
|
||||
list_add_tail(®ion->list, &sev->regions_list);
|
||||
mutex_unlock(&kvm->lock);
|
||||
|
||||
/*
|
||||
* The guest may change the memory encryption attribute from C=0 -> C=1
|
||||
* or vice versa for this memory range. Lets make sure caches are
|
||||
* flushed to ensure that guest data gets written into memory with
|
||||
* correct C-bit.
|
||||
*/
|
||||
sev_clflush_pages(region->pages, region->npages);
|
||||
|
||||
return ret;
|
||||
|
||||
e_free:
|
||||
|
@ -2,7 +2,10 @@
|
||||
#ifndef __KVM_X86_VMX_RUN_FLAGS_H
|
||||
#define __KVM_X86_VMX_RUN_FLAGS_H
|
||||
|
||||
#define VMX_RUN_VMRESUME (1 << 0)
|
||||
#define VMX_RUN_SAVE_SPEC_CTRL (1 << 1)
|
||||
#define VMX_RUN_VMRESUME_SHIFT 0
|
||||
#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT 1
|
||||
|
||||
#define VMX_RUN_VMRESUME BIT(VMX_RUN_VMRESUME_SHIFT)
|
||||
#define VMX_RUN_SAVE_SPEC_CTRL BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT)
|
||||
|
||||
#endif /* __KVM_X86_VMX_RUN_FLAGS_H */
|
||||
|
@ -77,7 +77,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
|
||||
mov (%_ASM_SP), %_ASM_AX
|
||||
|
||||
/* Check if vmlaunch or vmresume is needed */
|
||||
testb $VMX_RUN_VMRESUME, %bl
|
||||
bt $VMX_RUN_VMRESUME_SHIFT, %bx
|
||||
|
||||
/* Load guest registers. Don't clobber flags. */
|
||||
mov VCPU_RCX(%_ASM_AX), %_ASM_CX
|
||||
@ -99,8 +99,11 @@ SYM_FUNC_START(__vmx_vcpu_run)
|
||||
/* Load guest RAX. This kills the @regs pointer! */
|
||||
mov VCPU_RAX(%_ASM_AX), %_ASM_AX
|
||||
|
||||
/* Check EFLAGS.ZF from 'testb' above */
|
||||
jz .Lvmlaunch
|
||||
/* Clobbers EFLAGS.ZF */
|
||||
CLEAR_CPU_BUFFERS
|
||||
|
||||
/* Check EFLAGS.CF from the VMX_RUN_VMRESUME bit test above. */
|
||||
jnc .Lvmlaunch
|
||||
|
||||
/*
|
||||
* After a successful VMRESUME/VMLAUNCH, control flow "magically"
|
||||
|
@ -397,7 +397,8 @@ static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx)
|
||||
|
||||
static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
|
||||
{
|
||||
vmx->disable_fb_clear = vmx_fb_clear_ctrl_available;
|
||||
vmx->disable_fb_clear = !cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF) &&
|
||||
vmx_fb_clear_ctrl_available;
|
||||
|
||||
/*
|
||||
* If guest will not execute VERW, there is no need to set FB_CLEAR_DIS
|
||||
@ -6792,11 +6793,14 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
|
||||
guest_enter_irqoff();
|
||||
lockdep_hardirqs_on(CALLER_ADDR0);
|
||||
|
||||
/* L1D Flush includes CPU buffer clear to mitigate MDS */
|
||||
/*
|
||||
* L1D Flush includes CPU buffer clear to mitigate MDS, but VERW
|
||||
* mitigation for MDS is done late in VMentry and is still
|
||||
* executed in spite of L1D Flush. This is because an extra VERW
|
||||
* should not matter much after the big hammer L1D Flush.
|
||||
*/
|
||||
if (static_branch_unlikely(&vmx_l1d_should_flush))
|
||||
vmx_l1d_flush(vcpu);
|
||||
else if (static_branch_unlikely(&mds_user_clear))
|
||||
mds_clear_cpu_buffers();
|
||||
else if (static_branch_unlikely(&mmio_stale_data_clear) &&
|
||||
kvm_arch_has_assigned_device(vcpu->kvm))
|
||||
mds_clear_cpu_buffers();
|
||||
|
@ -1389,7 +1389,8 @@ static unsigned int num_msr_based_features;
|
||||
ARCH_CAP_SKIP_VMENTRY_L1DFLUSH | ARCH_CAP_SSB_NO | ARCH_CAP_MDS_NO | \
|
||||
ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
|
||||
ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
|
||||
ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO)
|
||||
ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO | \
|
||||
ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR)
|
||||
|
||||
static u64 kvm_get_arch_capabilities(void)
|
||||
{
|
||||
@ -1426,6 +1427,8 @@ static u64 kvm_get_arch_capabilities(void)
|
||||
data |= ARCH_CAP_SSB_NO;
|
||||
if (!boot_cpu_has_bug(X86_BUG_MDS))
|
||||
data |= ARCH_CAP_MDS_NO;
|
||||
if (!boot_cpu_has_bug(X86_BUG_RFDS))
|
||||
data |= ARCH_CAP_RFDS_NO;
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_RTM)) {
|
||||
/*
|
||||
|
@ -404,10 +404,6 @@ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx)
|
||||
case INAT_SEG_REG_FS:
|
||||
return (unsigned short)(regs->fs & 0xffff);
|
||||
case INAT_SEG_REG_GS:
|
||||
/*
|
||||
* GS may or may not be in regs as per CONFIG_X86_32_LAZY_GS.
|
||||
* The macro below takes care of both cases.
|
||||
*/
|
||||
return get_user_gs(regs);
|
||||
case INAT_SEG_REG_IGNORE:
|
||||
default:
|
||||
|
@ -108,6 +108,7 @@ SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
ret
|
||||
int3
|
||||
SYM_FUNC_END(srso_alias_untrain_ret)
|
||||
__EXPORT_THUNK(srso_alias_untrain_ret)
|
||||
#endif
|
||||
|
||||
SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
@ -249,9 +250,7 @@ SYM_CODE_START(srso_return_thunk)
|
||||
SYM_CODE_END(srso_return_thunk)
|
||||
|
||||
SYM_FUNC_START(entry_untrain_ret)
|
||||
ALTERNATIVE_2 "jmp retbleed_untrain_ret", \
|
||||
"jmp srso_untrain_ret", X86_FEATURE_SRSO, \
|
||||
"jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
|
||||
ALTERNATIVE "jmp retbleed_untrain_ret", "jmp srso_untrain_ret", X86_FEATURE_SRSO
|
||||
SYM_FUNC_END(entry_untrain_ret)
|
||||
__EXPORT_THUNK(entry_untrain_ret)
|
||||
|
||||
@ -259,6 +258,7 @@ SYM_CODE_START(__x86_return_thunk)
|
||||
UNWIND_HINT_FUNC
|
||||
ANNOTATE_NOENDBR
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ANNOTATE_NOENDBR
|
||||
ret
|
||||
int3
|
||||
SYM_CODE_END(__x86_return_thunk)
|
||||
|
@ -26,31 +26,18 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
|
||||
for (; addr < end; addr = next) {
|
||||
pud_t *pud = pud_page + pud_index(addr);
|
||||
pmd_t *pmd;
|
||||
bool use_gbpage;
|
||||
|
||||
next = (addr & PUD_MASK) + PUD_SIZE;
|
||||
if (next > end)
|
||||
next = end;
|
||||
|
||||
/* if this is already a gbpage, this portion is already mapped */
|
||||
if (pud_large(*pud))
|
||||
continue;
|
||||
|
||||
/* Is using a gbpage allowed? */
|
||||
use_gbpage = info->direct_gbpages;
|
||||
|
||||
/* Don't use gbpage if it maps more than the requested region. */
|
||||
/* at the begining: */
|
||||
use_gbpage &= ((addr & ~PUD_MASK) == 0);
|
||||
/* ... or at the end: */
|
||||
use_gbpage &= ((next & ~PUD_MASK) == 0);
|
||||
|
||||
/* Never overwrite existing mappings */
|
||||
use_gbpage &= !pud_present(*pud);
|
||||
|
||||
if (use_gbpage) {
|
||||
if (info->direct_gbpages) {
|
||||
pud_t pudval;
|
||||
|
||||
if (pud_present(*pud))
|
||||
continue;
|
||||
|
||||
addr &= PUD_MASK;
|
||||
pudval = __pud((addr - info->offset) | info->page_flag);
|
||||
set_pud(pud, pudval);
|
||||
continue;
|
||||
|
@ -56,6 +56,7 @@
|
||||
|
||||
#include "memtype.h"
|
||||
#include "../mm_internal.h"
|
||||
#include "../../../mm/internal.h" /* is_cow_mapping() */
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "" fmt
|
||||
@ -987,6 +988,38 @@ static void free_pfn_range(u64 paddr, unsigned long size)
|
||||
memtype_free(paddr, paddr + size);
|
||||
}
|
||||
|
||||
static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr,
|
||||
pgprot_t *pgprot)
|
||||
{
|
||||
unsigned long prot;
|
||||
|
||||
VM_WARN_ON_ONCE(!(vma->vm_flags & VM_PAT));
|
||||
|
||||
/*
|
||||
* We need the starting PFN and cachemode used for track_pfn_remap()
|
||||
* that covered the whole VMA. For most mappings, we can obtain that
|
||||
* information from the page tables. For COW mappings, we might now
|
||||
* suddenly have anon folios mapped and follow_phys() will fail.
|
||||
*
|
||||
* Fallback to using vma->vm_pgoff, see remap_pfn_range_notrack(), to
|
||||
* detect the PFN. If we need the cachemode as well, we're out of luck
|
||||
* for now and have to fail fork().
|
||||
*/
|
||||
if (!follow_phys(vma, vma->vm_start, 0, &prot, paddr)) {
|
||||
if (pgprot)
|
||||
*pgprot = __pgprot(prot);
|
||||
return 0;
|
||||
}
|
||||
if (is_cow_mapping(vma->vm_flags)) {
|
||||
if (pgprot)
|
||||
return -EINVAL;
|
||||
*paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
|
||||
return 0;
|
||||
}
|
||||
WARN_ON_ONCE(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* track_pfn_copy is called when vma that is covering the pfnmap gets
|
||||
* copied through copy_page_range().
|
||||
@ -997,20 +1030,13 @@ static void free_pfn_range(u64 paddr, unsigned long size)
|
||||
int track_pfn_copy(struct vm_area_struct *vma)
|
||||
{
|
||||
resource_size_t paddr;
|
||||
unsigned long prot;
|
||||
unsigned long vma_size = vma->vm_end - vma->vm_start;
|
||||
pgprot_t pgprot;
|
||||
|
||||
if (vma->vm_flags & VM_PAT) {
|
||||
/*
|
||||
* reserve the whole chunk covered by vma. We need the
|
||||
* starting address and protection from pte.
|
||||
*/
|
||||
if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
|
||||
WARN_ON_ONCE(1);
|
||||
if (get_pat_info(vma, &paddr, &pgprot))
|
||||
return -EINVAL;
|
||||
}
|
||||
pgprot = __pgprot(prot);
|
||||
/* reserve the whole chunk covered by vma. */
|
||||
return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
|
||||
}
|
||||
|
||||
@ -1085,7 +1111,6 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
|
||||
unsigned long size)
|
||||
{
|
||||
resource_size_t paddr;
|
||||
unsigned long prot;
|
||||
|
||||
if (vma && !(vma->vm_flags & VM_PAT))
|
||||
return;
|
||||
@ -1093,11 +1118,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
|
||||
/* free the chunk starting from pfn or the whole chunk */
|
||||
paddr = (resource_size_t)pfn << PAGE_SHIFT;
|
||||
if (!paddr && !size) {
|
||||
if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
|
||||
WARN_ON_ONCE(1);
|
||||
if (get_pat_info(vma, &paddr, NULL))
|
||||
return;
|
||||
}
|
||||
|
||||
size = vma->vm_end - vma->vm_start;
|
||||
}
|
||||
free_pfn_range(paddr, size);
|
||||
|
@ -45,10 +45,8 @@
|
||||
|
||||
#define PVH_GDT_ENTRY_CS 1
|
||||
#define PVH_GDT_ENTRY_DS 2
|
||||
#define PVH_GDT_ENTRY_CANARY 3
|
||||
#define PVH_CS_SEL (PVH_GDT_ENTRY_CS * 8)
|
||||
#define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8)
|
||||
#define PVH_CANARY_SEL (PVH_GDT_ENTRY_CANARY * 8)
|
||||
|
||||
SYM_CODE_START_LOCAL(pvh_start_xen)
|
||||
cld
|
||||
@ -109,17 +107,6 @@ SYM_CODE_START_LOCAL(pvh_start_xen)
|
||||
|
||||
#else /* CONFIG_X86_64 */
|
||||
|
||||
/* Set base address in stack canary descriptor. */
|
||||
movl $_pa(gdt_start),%eax
|
||||
movl $_pa(canary),%ecx
|
||||
movw %cx, (PVH_GDT_ENTRY_CANARY * 8) + 2(%eax)
|
||||
shrl $16, %ecx
|
||||
movb %cl, (PVH_GDT_ENTRY_CANARY * 8) + 4(%eax)
|
||||
movb %ch, (PVH_GDT_ENTRY_CANARY * 8) + 7(%eax)
|
||||
|
||||
mov $PVH_CANARY_SEL,%eax
|
||||
mov %eax,%gs
|
||||
|
||||
call mk_early_pgtbl_32
|
||||
|
||||
mov $_pa(initial_page_table), %eax
|
||||
@ -163,7 +150,6 @@ SYM_DATA_START_LOCAL(gdt_start)
|
||||
.quad GDT_ENTRY(0xc09a, 0, 0xfffff) /* PVH_CS_SEL */
|
||||
#endif
|
||||
.quad GDT_ENTRY(0xc092, 0, 0xfffff) /* PVH_DS_SEL */
|
||||
.quad GDT_ENTRY(0x4090, 0, 0x18) /* PVH_CANARY_SEL */
|
||||
SYM_DATA_END_LABEL(gdt_start, SYM_L_LOCAL, gdt_end)
|
||||
|
||||
.balign 16
|
||||
|
@ -101,11 +101,8 @@ static void __save_processor_state(struct saved_context *ctxt)
|
||||
/*
|
||||
* segment registers
|
||||
*/
|
||||
#ifdef CONFIG_X86_32_LAZY_GS
|
||||
savesegment(gs, ctxt->gs);
|
||||
#endif
|
||||
#ifdef CONFIG_X86_64
|
||||
savesegment(gs, ctxt->gs);
|
||||
savesegment(fs, ctxt->fs);
|
||||
savesegment(ds, ctxt->ds);
|
||||
savesegment(es, ctxt->es);
|
||||
@ -234,7 +231,6 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
|
||||
wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
|
||||
#else
|
||||
loadsegment(fs, __KERNEL_PERCPU);
|
||||
loadsegment(gs, __KERNEL_STACK_CANARY);
|
||||
#endif
|
||||
|
||||
/* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
|
||||
@ -257,7 +253,7 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
|
||||
*/
|
||||
wrmsrl(MSR_FS_BASE, ctxt->fs_base);
|
||||
wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
|
||||
#elif defined(CONFIG_X86_32_LAZY_GS)
|
||||
#else
|
||||
loadsegment(gs, ctxt->gs);
|
||||
#endif
|
||||
|
||||
|
@ -1193,7 +1193,6 @@ static void __init xen_setup_gdt(int cpu)
|
||||
pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry_boot;
|
||||
pv_ops.cpu.load_gdt = xen_load_gdt_boot;
|
||||
|
||||
setup_stack_canary_segment(cpu);
|
||||
switch_to_new_gdt(cpu);
|
||||
|
||||
pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry;
|
||||
|
@ -60,6 +60,7 @@ void blk_set_default_limits(struct queue_limits *lim)
|
||||
lim->io_opt = 0;
|
||||
lim->misaligned = 0;
|
||||
lim->zoned = BLK_ZONED_NONE;
|
||||
lim->zone_write_granularity = 0;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_set_default_limits);
|
||||
|
||||
@ -353,6 +354,28 @@ void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_physical_block_size);
|
||||
|
||||
/**
|
||||
* blk_queue_zone_write_granularity - set zone write granularity for the queue
|
||||
* @q: the request queue for the zoned device
|
||||
* @size: the zone write granularity size, in bytes
|
||||
*
|
||||
* Description:
|
||||
* This should be set to the lowest possible size allowing to write in
|
||||
* sequential zones of a zoned block device.
|
||||
*/
|
||||
void blk_queue_zone_write_granularity(struct request_queue *q,
|
||||
unsigned int size)
|
||||
{
|
||||
if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
|
||||
return;
|
||||
|
||||
q->limits.zone_write_granularity = size;
|
||||
|
||||
if (q->limits.zone_write_granularity < q->limits.logical_block_size)
|
||||
q->limits.zone_write_granularity = q->limits.logical_block_size;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity);
|
||||
|
||||
/**
|
||||
* blk_queue_alignment_offset - set physical block alignment offset
|
||||
* @q: the request queue for the device
|
||||
@ -630,7 +653,13 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
||||
t->discard_granularity;
|
||||
}
|
||||
|
||||
t->zone_write_granularity = max(t->zone_write_granularity,
|
||||
b->zone_write_granularity);
|
||||
t->zoned = max(t->zoned, b->zoned);
|
||||
if (!t->zoned) {
|
||||
t->zone_write_granularity = 0;
|
||||
t->max_zone_append_sectors = 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_stack_limits);
|
||||
@ -846,6 +875,8 @@ EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
|
||||
*/
|
||||
void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
|
||||
{
|
||||
struct request_queue *q = disk->queue;
|
||||
|
||||
switch (model) {
|
||||
case BLK_ZONED_HM:
|
||||
/*
|
||||
@ -874,7 +905,15 @@ void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
|
||||
break;
|
||||
}
|
||||
|
||||
disk->queue->limits.zoned = model;
|
||||
q->limits.zoned = model;
|
||||
if (model != BLK_ZONED_NONE) {
|
||||
/*
|
||||
* Set the zone write granularity to the device logical block
|
||||
* size by default. The driver can change this value if needed.
|
||||
*/
|
||||
blk_queue_zone_write_granularity(q,
|
||||
queue_logical_block_size(q));
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_set_zoned);
|
||||
|
||||
|
@ -28,7 +28,7 @@ void blk_rq_stat_init(struct blk_rq_stat *stat)
|
||||
/* src is a per-cpu stat, mean isn't initialized */
|
||||
void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
|
||||
{
|
||||
if (!src->nr_samples)
|
||||
if (dst->nr_samples + src->nr_samples <= dst->nr_samples)
|
||||
return;
|
||||
|
||||
dst->min = min(dst->min, src->min);
|
||||
|
@ -219,6 +219,12 @@ static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
|
||||
(unsigned long long)q->limits.max_write_zeroes_sectors << 9);
|
||||
}
|
||||
|
||||
static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
|
||||
char *page)
|
||||
{
|
||||
return queue_var_show(queue_zone_write_granularity(q), page);
|
||||
}
|
||||
|
||||
static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
|
||||
{
|
||||
unsigned long long max_sectors = q->limits.max_zone_append_sectors;
|
||||
@ -585,6 +591,7 @@ QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
|
||||
QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
|
||||
QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
|
||||
QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
|
||||
QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
|
||||
|
||||
QUEUE_RO_ENTRY(queue_zoned, "zoned");
|
||||
QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
|
||||
@ -639,6 +646,7 @@ static struct attribute *queue_attrs[] = {
|
||||
&queue_write_same_max_entry.attr,
|
||||
&queue_write_zeroes_max_entry.attr,
|
||||
&queue_zone_append_max_entry.attr,
|
||||
&queue_zone_write_granularity_entry.attr,
|
||||
&queue_nonrot_entry.attr,
|
||||
&queue_zoned_entry.attr,
|
||||
&queue_nr_zones_entry.attr,
|
||||
|
@ -17,7 +17,7 @@ static int blkpg_do_ioctl(struct block_device *bdev,
|
||||
struct blkpg_partition __user *upart, int op)
|
||||
{
|
||||
struct blkpg_partition p;
|
||||
long long start, length;
|
||||
sector_t start, length;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
@ -32,6 +32,12 @@ static int blkpg_do_ioctl(struct block_device *bdev,
|
||||
if (op == BLKPG_DEL_PARTITION)
|
||||
return bdev_del_partition(bdev, p.pno);
|
||||
|
||||
if (p.start < 0 || p.length <= 0 || p.start + p.length < 0)
|
||||
return -EINVAL;
|
||||
/* Check that the partition is aligned to the block size */
|
||||
if (!IS_ALIGNED(p.start | p.length, bdev_logical_block_size(bdev)))
|
||||
return -EINVAL;
|
||||
|
||||
start = p.start >> SECTOR_SHIFT;
|
||||
length = p.length >> SECTOR_SHIFT;
|
||||
|
||||
@ -46,9 +52,6 @@ static int blkpg_do_ioctl(struct block_device *bdev,
|
||||
|
||||
switch (op) {
|
||||
case BLKPG_ADD_PARTITION:
|
||||
/* check if partition is aligned to blocksize */
|
||||
if (p.start & (bdev_logical_block_size(bdev) - 1))
|
||||
return -EINVAL;
|
||||
return bdev_add_partition(bdev, p.pno, start, length);
|
||||
case BLKPG_RESIZE_PARTITION:
|
||||
return bdev_resize_partition(bdev, p.pno, start, length);
|
||||
|
@ -208,8 +208,10 @@ void spk_do_flush(void)
|
||||
wake_up_process(speakup_task);
|
||||
}
|
||||
|
||||
void synth_write(const char *buf, size_t count)
|
||||
void synth_write(const char *_buf, size_t count)
|
||||
{
|
||||
const unsigned char *buf = (const unsigned char *) _buf;
|
||||
|
||||
while (count--)
|
||||
synth_buffer_add(*buf++);
|
||||
synth_start();
|
||||
|
@ -550,8 +550,12 @@ acpi_db_walk_for_fields(acpi_handle obj_handle,
|
||||
ACPI_FREE(buffer.pointer);
|
||||
|
||||
buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
|
||||
acpi_evaluate_object(obj_handle, NULL, NULL, &buffer);
|
||||
|
||||
status = acpi_evaluate_object(obj_handle, NULL, NULL, &buffer);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
acpi_os_printf("Could Not evaluate object %p\n",
|
||||
obj_handle);
|
||||
return (AE_OK);
|
||||
}
|
||||
/*
|
||||
* Since this is a field unit, surround the output in braces
|
||||
*/
|
||||
|
@ -385,18 +385,6 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
|
||||
},
|
||||
},
|
||||
/*
|
||||
* ASUS B1400CEAE hangs on resume from suspend (see
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=215742).
|
||||
*/
|
||||
{
|
||||
.callback = init_default_s3,
|
||||
.ident = "ASUS B1400CEAE",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "ASUS EXPERTBOOK B1400CEAE"),
|
||||
},
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
|
@ -662,11 +662,6 @@ MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
|
||||
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
|
||||
struct ahci_host_priv *hpriv)
|
||||
{
|
||||
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1166) {
|
||||
dev_info(&pdev->dev, "ASM1166 has only six ports\n");
|
||||
hpriv->saved_port_map = 0x3f;
|
||||
}
|
||||
|
||||
if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
|
||||
dev_info(&pdev->dev, "JMB361 has only one port\n");
|
||||
hpriv->force_port_map = 1;
|
||||
|
@ -783,37 +783,6 @@ static const struct ata_port_info mv_port_info[] = {
|
||||
},
|
||||
};
|
||||
|
||||
static const struct pci_device_id mv_pci_tbl[] = {
|
||||
{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
|
||||
{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
|
||||
/* RocketRAID 1720/174x have different identifiers */
|
||||
{ PCI_VDEVICE(TTI, 0x1720), chip_6042 },
|
||||
{ PCI_VDEVICE(TTI, 0x1740), chip_6042 },
|
||||
{ PCI_VDEVICE(TTI, 0x1742), chip_6042 },
|
||||
|
||||
{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
|
||||
{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
|
||||
|
||||
{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
|
||||
|
||||
/* Adaptec 1430SA */
|
||||
{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
|
||||
|
||||
/* Marvell 7042 support */
|
||||
{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
|
||||
|
||||
/* Highpoint RocketRAID PCIe series */
|
||||
{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
|
||||
{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
|
||||
|
||||
{ } /* terminate list */
|
||||
};
|
||||
|
||||
static const struct mv_hw_ops mv5xxx_ops = {
|
||||
.phy_errata = mv5_phy_errata,
|
||||
.enable_leds = mv5_enable_leds,
|
||||
@ -4307,6 +4276,36 @@ static int mv_pci_init_one(struct pci_dev *pdev,
|
||||
static int mv_pci_device_resume(struct pci_dev *pdev);
|
||||
#endif
|
||||
|
||||
static const struct pci_device_id mv_pci_tbl[] = {
|
||||
{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
|
||||
{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
|
||||
/* RocketRAID 1720/174x have different identifiers */
|
||||
{ PCI_VDEVICE(TTI, 0x1720), chip_6042 },
|
||||
{ PCI_VDEVICE(TTI, 0x1740), chip_6042 },
|
||||
{ PCI_VDEVICE(TTI, 0x1742), chip_6042 },
|
||||
|
||||
{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
|
||||
{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
|
||||
|
||||
{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
|
||||
|
||||
/* Adaptec 1430SA */
|
||||
{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
|
||||
|
||||
/* Marvell 7042 support */
|
||||
{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
|
||||
|
||||
/* Highpoint RocketRAID PCIe series */
|
||||
{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
|
||||
{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
|
||||
|
||||
{ } /* terminate list */
|
||||
};
|
||||
|
||||
static struct pci_driver mv_pci_driver = {
|
||||
.name = DRV_NAME,
|
||||
@ -4319,6 +4318,7 @@ static struct pci_driver mv_pci_driver = {
|
||||
#endif
|
||||
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
|
||||
|
||||
/**
|
||||
* mv_print_info - Dump key info to kernel log for perusal.
|
||||
@ -4491,7 +4491,6 @@ static void __exit mv_exit(void)
|
||||
MODULE_AUTHOR("Brett Russ");
|
||||
MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
|
||||
MODULE_VERSION(DRV_VERSION);
|
||||
MODULE_ALIAS("platform:" DRV_NAME);
|
||||
|
||||
|
@ -1004,8 +1004,7 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
|
||||
|
||||
offset -= (idx * window_size);
|
||||
idx++;
|
||||
dist = ((long) (window_size - (offset + size))) >= 0 ? size :
|
||||
(long) (window_size - offset);
|
||||
dist = min(size, window_size - offset);
|
||||
memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
|
||||
|
||||
psource += dist;
|
||||
@ -1053,8 +1052,7 @@ static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
|
||||
readl(mmio + PDC_DIMM_WINDOW_CTLR);
|
||||
offset -= (idx * window_size);
|
||||
idx++;
|
||||
dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
|
||||
(long) (window_size - offset);
|
||||
dist = min(size, window_size - offset);
|
||||
memcpy_toio(dimm_mmio + offset / 4, psource, dist);
|
||||
writel(0x01, mmio + PDC_GENERAL_CTLR);
|
||||
readl(mmio + PDC_GENERAL_CTLR);
|
||||
|
@ -49,6 +49,7 @@ early_param("sysfs.deprecated", sysfs_deprecated_setup);
|
||||
static LIST_HEAD(deferred_sync);
|
||||
static unsigned int defer_sync_state_count = 1;
|
||||
static DEFINE_MUTEX(fwnode_link_lock);
|
||||
static struct workqueue_struct *device_link_wq;
|
||||
static bool fw_devlink_is_permissive(void);
|
||||
|
||||
/**
|
||||
@ -486,12 +487,26 @@ static void devlink_dev_release(struct device *dev)
|
||||
/*
|
||||
* It may take a while to complete this work because of the SRCU
|
||||
* synchronization in device_link_release_fn() and if the consumer or
|
||||
* supplier devices get deleted when it runs, so put it into the "long"
|
||||
* workqueue.
|
||||
* supplier devices get deleted when it runs, so put it into the
|
||||
* dedicated workqueue.
|
||||
*/
|
||||
queue_work(system_long_wq, &link->rm_work);
|
||||
queue_work(device_link_wq, &link->rm_work);
|
||||
}
|
||||
|
||||
/**
|
||||
* device_link_wait_removal - Wait for ongoing devlink removal jobs to terminate
|
||||
*/
|
||||
void device_link_wait_removal(void)
|
||||
{
|
||||
/*
|
||||
* devlink removal jobs are queued in the dedicated work queue.
|
||||
* To be sure that all removal jobs are terminated, ensure that any
|
||||
* scheduled work has run to completion.
|
||||
*/
|
||||
flush_workqueue(device_link_wq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(device_link_wait_removal);
|
||||
|
||||
static struct class devlink_class = {
|
||||
.name = "devlink",
|
||||
.owner = THIS_MODULE,
|
||||
@ -3670,9 +3685,14 @@ int __init devices_init(void)
|
||||
sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
|
||||
if (!sysfs_dev_char_kobj)
|
||||
goto char_kobj_err;
|
||||
device_link_wq = alloc_workqueue("device_link_wq", 0, 0);
|
||||
if (!device_link_wq)
|
||||
goto wq_err;
|
||||
|
||||
return 0;
|
||||
|
||||
wq_err:
|
||||
kobject_put(sysfs_dev_char_kobj);
|
||||
char_kobj_err:
|
||||
kobject_put(sysfs_dev_block_kobj);
|
||||
block_kobj_err:
|
||||
|
@ -591,6 +591,12 @@ ssize_t __weak cpu_show_spec_rstack_overflow(struct device *dev,
|
||||
return sysfs_emit(buf, "Not affected\n");
|
||||
}
|
||||
|
||||
ssize_t __weak cpu_show_reg_file_data_sampling(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "Not affected\n");
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
|
||||
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
|
||||
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
|
||||
@ -604,6 +610,7 @@ static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
|
||||
static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
|
||||
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
|
||||
static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
|
||||
static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
|
||||
|
||||
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
|
||||
&dev_attr_meltdown.attr,
|
||||
@ -619,6 +626,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
|
||||
&dev_attr_retbleed.attr,
|
||||
&dev_attr_gather_data_sampling.attr,
|
||||
&dev_attr_spec_rstack_overflow.attr,
|
||||
&dev_attr_reg_file_data_sampling.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -365,8 +365,10 @@ void dev_pm_enable_wake_irq_complete(struct device *dev)
|
||||
return;
|
||||
|
||||
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
|
||||
wirq->status & WAKE_IRQ_DEDICATED_REVERSE)
|
||||
wirq->status & WAKE_IRQ_DEDICATED_REVERSE) {
|
||||
enable_irq(wirq->irq);
|
||||
wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -350,7 +350,7 @@ int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver)
|
||||
return PTR_ERR(skb);
|
||||
}
|
||||
|
||||
if (skb->len != sizeof(*ver)) {
|
||||
if (!skb || skb->len != sizeof(*ver)) {
|
||||
bt_dev_err(hdev, "Intel version event size mismatch");
|
||||
kfree_skb(skb);
|
||||
return -EILSEQ;
|
||||
|
@ -1557,6 +1557,7 @@ static struct clk_regmap_div nss_ubi0_div_clk_src = {
|
||||
|
||||
static const struct freq_tbl ftbl_pcie_aux_clk_src[] = {
|
||||
F(24000000, P_XO, 1, 0, 0),
|
||||
{ }
|
||||
};
|
||||
|
||||
static const struct clk_parent_data gcc_xo_gpll0_core_pi_sleep_clk[] = {
|
||||
@ -1737,6 +1738,7 @@ static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = {
|
||||
F(160000000, P_GPLL0, 5, 0, 0),
|
||||
F(216000000, P_GPLL6, 5, 0, 0),
|
||||
F(308570000, P_GPLL6, 3.5, 0, 0),
|
||||
{ }
|
||||
};
|
||||
|
||||
static const struct clk_parent_data gcc_xo_gpll0_gpll6_gpll0_div2[] = {
|
||||
|
@ -972,6 +972,7 @@ static struct clk_rcg2 pcie0_axi_clk_src = {
|
||||
|
||||
static const struct freq_tbl ftbl_pcie_aux_clk_src[] = {
|
||||
F(19200000, P_XO, 1, 0, 0),
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct clk_rcg2 pcie0_aux_clk_src = {
|
||||
@ -1077,6 +1078,7 @@ static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = {
|
||||
F(19200000, P_XO, 1, 0, 0),
|
||||
F(160000000, P_GPLL0, 5, 0, 0),
|
||||
F(308570000, P_GPLL6, 3.5, 0, 0),
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct clk_rcg2 sdcc1_ice_core_clk_src = {
|
||||
|
@ -3647,3 +3647,4 @@ module_exit(gcc_sdm845_exit);
|
||||
MODULE_DESCRIPTION("QTI GCC SDM845 Driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS("platform:gcc-sdm845");
|
||||
MODULE_SOFTDEP("pre: rpmhpd");
|
||||
|
@ -333,6 +333,7 @@ static struct freq_tbl ftbl_mmss_axi_clk[] = {
|
||||
F(333430000, P_MMPLL1, 3.5, 0, 0),
|
||||
F(400000000, P_MMPLL0, 2, 0, 0),
|
||||
F(466800000, P_MMPLL1, 2.5, 0, 0),
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct clk_rcg2 mmss_axi_clk_src = {
|
||||
@ -357,6 +358,7 @@ static struct freq_tbl ftbl_ocmemnoc_clk[] = {
|
||||
F(150000000, P_GPLL0, 4, 0, 0),
|
||||
F(228570000, P_MMPLL0, 3.5, 0, 0),
|
||||
F(320000000, P_MMPLL0, 2.5, 0, 0),
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct clk_rcg2 ocmemnoc_clk_src = {
|
||||
|
@ -283,6 +283,7 @@ static struct freq_tbl ftbl_mmss_axi_clk[] = {
|
||||
F(291750000, P_MMPLL1, 4, 0, 0),
|
||||
F(400000000, P_MMPLL0, 2, 0, 0),
|
||||
F(466800000, P_MMPLL1, 2.5, 0, 0),
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct clk_rcg2 mmss_axi_clk_src = {
|
||||
@ -307,6 +308,7 @@ static struct freq_tbl ftbl_ocmemnoc_clk[] = {
|
||||
F(150000000, P_GPLL0, 4, 0, 0),
|
||||
F(291750000, P_MMPLL1, 4, 0, 0),
|
||||
F(400000000, P_MMPLL0, 2, 0, 0),
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct clk_rcg2 ocmemnoc_clk_src = {
|
||||
|
@ -486,7 +486,6 @@ static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
|
||||
if (!policy)
|
||||
return 0;
|
||||
priv = policy->driver_data;
|
||||
|
||||
cpufreq_cpu_put(policy);
|
||||
|
||||
return brcm_avs_get_frequency(priv->base);
|
||||
|
@ -251,7 +251,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL))
|
||||
if (!zalloc_cpumask_var(&priv->cpus, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
priv->cpu_dev = cpu_dev;
|
||||
|
@ -95,6 +95,8 @@ static void adf_device_reset_worker(struct work_struct *work)
|
||||
if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) {
|
||||
/* The device hanged and we can't restart it so stop here */
|
||||
dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
|
||||
if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
|
||||
completion_done(&reset_data->compl))
|
||||
kfree(reset_data);
|
||||
WARN(1, "QAT: device restart failed. Device is unusable\n");
|
||||
return;
|
||||
@ -102,11 +104,19 @@ static void adf_device_reset_worker(struct work_struct *work)
|
||||
adf_dev_restarted_notify(accel_dev);
|
||||
clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
|
||||
|
||||
/* The dev is back alive. Notify the caller if in sync mode */
|
||||
if (reset_data->mode == ADF_DEV_RESET_SYNC)
|
||||
complete(&reset_data->compl);
|
||||
else
|
||||
/*
|
||||
* The dev is back alive. Notify the caller if in sync mode
|
||||
*
|
||||
* If device restart will take a more time than expected,
|
||||
* the schedule_reset() function can timeout and exit. This can be
|
||||
* detected by calling the completion_done() function. In this case
|
||||
* the reset_data structure needs to be freed here.
|
||||
*/
|
||||
if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
|
||||
completion_done(&reset_data->compl))
|
||||
kfree(reset_data);
|
||||
else
|
||||
complete(&reset_data->compl);
|
||||
}
|
||||
|
||||
static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
|
||||
@ -139,8 +149,9 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
|
||||
dev_err(&GET_DEV(accel_dev),
|
||||
"Reset device timeout expired\n");
|
||||
ret = -EFAULT;
|
||||
}
|
||||
} else {
|
||||
kfree(reset_data);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
|
@ -415,7 +415,7 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
|
||||
void *data, bool duplicates, struct list_head *head)
|
||||
{
|
||||
const struct efivar_operations *ops;
|
||||
unsigned long variable_name_size = 1024;
|
||||
unsigned long variable_name_size = 512;
|
||||
efi_char16_t *variable_name;
|
||||
efi_status_t status;
|
||||
efi_guid_t vendor_guid;
|
||||
@ -438,12 +438,13 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
|
||||
}
|
||||
|
||||
/*
|
||||
* Per EFI spec, the maximum storage allocated for both
|
||||
* the variable name and variable data is 1024 bytes.
|
||||
* A small set of old UEFI implementations reject sizes
|
||||
* above a certain threshold, the lowest seen in the wild
|
||||
* is 512.
|
||||
*/
|
||||
|
||||
do {
|
||||
variable_name_size = 1024;
|
||||
variable_name_size = 512;
|
||||
|
||||
status = ops->get_next_variable(&variable_name_size,
|
||||
variable_name,
|
||||
@ -491,9 +492,13 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
|
||||
break;
|
||||
case EFI_NOT_FOUND:
|
||||
break;
|
||||
case EFI_BUFFER_TOO_SMALL:
|
||||
pr_warn("efivars: Variable name size exceeds maximum (%lu > 512)\n",
|
||||
variable_name_size);
|
||||
status = EFI_NOT_FOUND;
|
||||
break;
|
||||
default:
|
||||
printk(KERN_WARNING "efivars: get_next_variable: status=%lx\n",
|
||||
status);
|
||||
pr_warn("efivars: get_next_variable: status=%lx\n", status);
|
||||
status = EFI_NOT_FOUND;
|
||||
break;
|
||||
}
|
||||
|
@ -959,8 +959,8 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
|
||||
* nodes, but not more than args->num_of_nodes as that is
|
||||
* the amount of memory allocated by user
|
||||
*/
|
||||
pa = kzalloc((sizeof(struct kfd_process_device_apertures) *
|
||||
args->num_of_nodes), GFP_KERNEL);
|
||||
pa = kcalloc(args->num_of_nodes, sizeof(struct kfd_process_device_apertures),
|
||||
GFP_KERNEL);
|
||||
if (!pa)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -641,10 +641,20 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
|
||||
if (pipe_ctx == NULL)
|
||||
return;
|
||||
|
||||
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL)
|
||||
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) {
|
||||
pipe_ctx->stream_res.stream_enc->funcs->set_avmute(
|
||||
pipe_ctx->stream_res.stream_enc,
|
||||
enable);
|
||||
|
||||
/* Wait for two frame to make sure AV mute is sent out */
|
||||
if (enable) {
|
||||
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
|
||||
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
|
||||
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
|
||||
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
|
||||
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx)
|
||||
|
@ -405,6 +405,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
|
||||
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
|
||||
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
|
||||
|
||||
if (!display)
|
||||
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
|
||||
|
||||
hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
|
||||
|
||||
if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)
|
||||
|
@ -57,10 +57,10 @@ void mod_stats_update_event(struct mod_stats *mod_stats,
|
||||
unsigned int length);
|
||||
|
||||
void mod_stats_update_flip(struct mod_stats *mod_stats,
|
||||
unsigned long timestamp_in_ns);
|
||||
unsigned long long timestamp_in_ns);
|
||||
|
||||
void mod_stats_update_vupdate(struct mod_stats *mod_stats,
|
||||
unsigned long timestamp_in_ns);
|
||||
unsigned long long timestamp_in_ns);
|
||||
|
||||
void mod_stats_update_freesync(struct mod_stats *mod_stats,
|
||||
unsigned int v_total_min,
|
||||
|
@ -207,19 +207,24 @@ EXPORT_SYMBOL(drm_panel_disable);
|
||||
* The modes probed from the panel are automatically added to the connector
|
||||
* that the panel is attached to.
|
||||
*
|
||||
* Return: The number of modes available from the panel on success or a
|
||||
* negative error code on failure.
|
||||
* Return: The number of modes available from the panel on success, or 0 on
|
||||
* failure (no modes).
|
||||
*/
|
||||
int drm_panel_get_modes(struct drm_panel *panel,
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
if (!panel)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
|
||||
if (panel->funcs && panel->funcs->get_modes)
|
||||
return panel->funcs->get_modes(panel, connector);
|
||||
if (panel->funcs && panel->funcs->get_modes) {
|
||||
int num;
|
||||
|
||||
return -EOPNOTSUPP;
|
||||
num = panel->funcs->get_modes(panel, connector);
|
||||
if (num > 0)
|
||||
return num;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_panel_get_modes);
|
||||
|
||||
|
@ -511,7 +511,7 @@ static struct drm_driver etnaviv_drm_driver = {
|
||||
.desc = "etnaviv DRM",
|
||||
.date = "20151214",
|
||||
.major = 1,
|
||||
.minor = 3,
|
||||
.minor = 4,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -73,6 +73,9 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
|
||||
bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu)
|
||||
{
|
||||
struct etnaviv_chip_identity *ident = &gpu->identity;
|
||||
const u32 product_id = ident->product_id;
|
||||
const u32 customer_id = ident->customer_id;
|
||||
const u32 eco_id = ident->eco_id;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(etnaviv_chip_identities); i++) {
|
||||
@ -86,6 +89,12 @@ bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu)
|
||||
etnaviv_chip_identities[i].eco_id == ~0U)) {
|
||||
memcpy(ident, &etnaviv_chip_identities[i],
|
||||
sizeof(*ident));
|
||||
|
||||
/* Restore some id values as ~0U aka 'don't care' might been used. */
|
||||
ident->product_id = product_id;
|
||||
ident->customer_id = customer_id;
|
||||
ident->eco_id = eco_id;
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -315,14 +315,14 @@ static int vidi_get_modes(struct drm_connector *connector)
|
||||
*/
|
||||
if (!ctx->raw_edid) {
|
||||
DRM_DEV_DEBUG_KMS(ctx->dev, "raw_edid is null.\n");
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
|
||||
edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
|
||||
if (!edid) {
|
||||
DRM_DEV_DEBUG_KMS(ctx->dev, "failed to allocate edid\n");
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_connector_update_edid_property(connector, edid);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user