This is the 5.4.19 stable release
-----BEGIN PGP SIGNATURE-----
iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl5Cn6wACgkQONu9yGCS
aT789BAAkpzYCCHEL2aqDpnZQdu1kVua2nywEJCY0WqSM1lWLeU1Lk9EvS6uu99B
nHnIgoAGXR1zQy9rlhpKKt62LvCCM94QWlQRDYYeJxbFPn1ogT2/0vmwN7rqNz4M
Jdszd6gfNKtB3zpZZLJ0KXG8q6YRp5kXOHEzOXNjcVsfKRuNTWWIBV0dMmkCzduQ
Y5e62+d1FnnRFj28R7wjJfXiZSRnIGcMHohcQGXsWZsh2rktYOYsL6G37I9lCBwx
RO7/+qVOT+BImqB5fIxB98JOzOlo6uEVqPgXjMHAAZUzzA4KpOkDBn55m5hA9axf
EG67Ft4vZJc6Q3FTtHdSZZ/x6TBAJ2DUzatpKhCTDB3vlWJ6a+CsTFq3dXj4+bFr
hFuyi0u91VeudmWR8IH5Er8QaNaOq8m72XAwK22fZptZz0ZHl+Bf1QZyEY0L0P2Q
DpT/kmZVgSSDusvMtJOwI8Vr4Ibb8o46kFTQN+PCSs0pbPchEJmInHz0mIypK89N
4YIjcDZZu3WUS13pEsgNAi2FEpwZdn32LYxZg8xTYBtovzuvT1pJUEppiVSMXgKS
8vF6oCAd7pX9Fal5fYklA7gyQENnHBFI+LE+bHwMR/qwreH/3wBTLnhRPsGOxyZI
oj57YDdxZCAwEfXGoWA3Le+60lj6bGuRfmCc4VkodaOxMLb1WrE=
=rUtE
-----END PGP SIGNATURE-----
Merge 5.4.19 into android-5.4
Changes in 5.4.19
sparc32: fix struct ipc64_perm type definition
bnxt_en: Move devlink_register before registering netdev
cls_rsvp: fix rsvp_policy
gtp: use __GFP_NOWARN to avoid memalloc warning
l2tp: Allow duplicate session creation with UDP
net: hsr: fix possible NULL deref in hsr_handle_frame()
net_sched: fix an OOB access in cls_tcindex
net: stmmac: Delete txtimer in suspend()
bnxt_en: Fix TC queue mapping.
rxrpc: Fix use-after-free in rxrpc_put_local()
rxrpc: Fix insufficient receive notification generation
rxrpc: Fix missing active use pinning of rxrpc_local object
rxrpc: Fix NULL pointer deref due to call->conn being cleared on disconnect
tcp: clear tp->total_retrans in tcp_disconnect()
tcp: clear tp->delivered in tcp_disconnect()
tcp: clear tp->data_segs{in|out} in tcp_disconnect()
tcp: clear tp->segs_{in|out} in tcp_disconnect()
ionic: fix rxq comp packet type mask
MAINTAINERS: correct entries for ISDN/mISDN section
netdevsim: fix stack-out-of-bounds in nsim_dev_debugfs_init()
bnxt_en: Fix logic that disables Bus Master during firmware reset.
media: uvcvideo: Avoid cyclic entity chains due to malformed USB descriptors
mfd: dln2: More sanity checking for endpoints
netfilter: ipset: fix suspicious RCU usage in find_set_and_id
ipc/msg.c: consolidate all xxxctl_down() functions
tracing/kprobes: Have uname use __get_str() in print_fmt
tracing: Fix sched switch start/stop refcount racy updates
rcu: Use *_ONCE() to protect lockless ->expmask accesses
rcu: Avoid data-race in rcu_gp_fqs_check_wake()
srcu: Apply *_ONCE() to ->srcu_last_gp_end
rcu: Use READ_ONCE() for ->expmask in rcu_read_unlock_special()
nvmet: Fix error print message at nvmet_install_queue function
nvmet: Fix controller use after free
Bluetooth: btusb: fix memory leak on fw
Bluetooth: btusb: Disable runtime suspend on Realtek devices
brcmfmac: Fix memory leak in brcmf_usbdev_qinit
usb: dwc3: gadget: Check END_TRANSFER completion
usb: dwc3: gadget: Delay starting transfer
usb: typec: tcpci: mask event interrupts when remove driver
objtool: Silence build output
usb: gadget: f_fs: set req->num_sgs as 0 for non-sg transfer
usb: gadget: legacy: set max_speed to super-speed
usb: gadget: f_ncm: Use atomic_t to track in-flight request
usb: gadget: f_ecm: Use atomic_t to track in-flight request
ALSA: usb-audio: Fix endianess in descriptor validation
ALSA: usb-audio: Annotate endianess in Scarlett gen2 quirk
ALSA: dummy: Fix PCM format loop in proc output
memcg: fix a crash in wb_workfn when a device disappears
mm/sparse.c: reset section's mem_map when fully deactivated
mmc: sdhci-pci: Make function amd_sdhci_reset static
utimes: Clamp the timestamps in notify_change()
mm/memory_hotplug: fix remove_memory() lockdep splat
mm: thp: don't need care deferred split queue in memcg charge move path
mm: move_pages: report the number of non-attempted pages
media/v4l2-core: set pages dirty upon releasing DMA buffers
media: v4l2-core: compat: ignore native command codes
media: v4l2-rect.h: fix v4l2_rect_map_inside() top/left adjustments
lib/test_kasan.c: fix memory leak in kmalloc_oob_krealloc_more()
irqdomain: Fix a memory leak in irq_domain_push_irq()
x86/cpu: Update cached HLE state on write to TSX_CTRL_CPUID_CLEAR
platform/x86: intel_scu_ipc: Fix interrupt support
ALSA: hda: Apply aligned MMIO access only conditionally
ALSA: hda: Add Clevo W65_67SB the power_save blacklist
ALSA: hda: Add JasperLake PCI ID and codec vid
arm64: acpi: fix DAIF manipulation with pNMI
KVM: arm64: Correct PSTATE on exception entry
KVM: arm/arm64: Correct CPSR on exception entry
KVM: arm/arm64: Correct AArch32 SPSR on exception entry
KVM: arm64: Only sign-extend MMIO up to register width
MIPS: syscalls: fix indentation of the 'SYSNR' message
MIPS: fix indentation of the 'RELOCS' message
MIPS: boot: fix typo in 'vmlinux.lzma.its' target
s390/mm: fix dynamic pagetable upgrade for hugetlbfs
powerpc/mmu_gather: enable RCU_TABLE_FREE even for !SMP case
powerpc/ptdump: Fix W+X verification
powerpc/xmon: don't access ASDR in VMs
powerpc/pseries: Advance pfn if section is not present in lmb_is_removable()
powerpc/32s: Fix bad_kuap_fault()
powerpc/32s: Fix CPU wake-up from sleep mode
tracing: Fix now invalid var_ref_vals assumption in trace action
PCI: tegra: Fix return value check of pm_runtime_get_sync()
PCI: keystone: Fix outbound region mapping
PCI: keystone: Fix link training retries initiation
PCI: keystone: Fix error handling when "num-viewport" DT property is not populated
mmc: spi: Toggle SPI polarity, do not hardcode it
ACPI: video: Do not export a non working backlight interface on MSI MS-7721 boards
ACPI / battery: Deal with design or full capacity being reported as -1
ACPI / battery: Use design-cap for capacity calculations if full-cap is not available
ACPI / battery: Deal better with neither design nor full capacity not being reported
alarmtimer: Unregister wakeup source when module get fails
fscrypt: don't print name of busy file when removing key
ubifs: don't trigger assertion on invalid no-key filename
ubifs: Fix wrong memory allocation
ubifs: Fix FS_IOC_SETFLAGS unexpectedly clearing encrypt flag
ubifs: Fix deadlock in concurrent bulk-read and writepage
mmc: sdhci-of-at91: fix memleak on clk_get failure
ASoC: SOF: core: free trace on errors
hv_balloon: Balloon up according to request page number
mfd: axp20x: Mark AXP20X_VBUS_IPSOUT_MGMT as volatile
nvmem: core: fix memory abort in cleanup path
crypto: api - Check spawn->alg under lock in crypto_drop_spawn
crypto: ccree - fix backlog memory leak
crypto: ccree - fix AEAD decrypt auth fail
crypto: ccree - fix pm wrongful error reporting
crypto: ccree - fix FDE descriptor sequence
crypto: ccree - fix PM race condition
padata: Remove broken queue flushing
fs: allow deduplication of eof block into the end of the destination file
scripts/find-unused-docs: Fix massive false positives
erofs: fix out-of-bound read for shifted uncompressed block
scsi: megaraid_sas: Do not initiate OCR if controller is not in ready state
scsi: qla2xxx: Fix mtcp dump collection failure
cpupower: Revert library ABI changes from commit ae2917093f
power: supply: axp20x_ac_power: Fix reporting online status
power: supply: ltc2941-battery-gauge: fix use-after-free
ovl: fix wrong WARN_ON() in ovl_cache_update_ino()
ovl: fix lseek overflow on 32bit
f2fs: choose hardlimit when softlimit is larger than hardlimit in f2fs_statfs_project()
f2fs: fix miscounted block limit in f2fs_statfs_project()
f2fs: code cleanup for f2fs_statfs_project()
f2fs: fix dcache lookup of !casefolded directories
f2fs: fix race conditions in ->d_compare() and ->d_hash()
PM: core: Fix handling of devices deleted during system-wide resume
cpufreq: Avoid creating excessively large stack frames
of: Add OF_DMA_DEFAULT_COHERENT & select it on powerpc
ARM: dma-api: fix max_pfn off-by-one error in __dma_supported()
dm zoned: support zone sizes smaller than 128MiB
dm space map common: fix to ensure new block isn't already in use
dm writecache: fix incorrect flush sequence when doing SSD mode commit
dm crypt: fix GFP flags passed to skcipher_request_alloc()
dm crypt: fix benbi IV constructor crash if used in authenticated mode
dm thin metadata: use pool locking at end of dm_pool_metadata_close
dm: fix potential for q->make_request_fn NULL pointer
scsi: qla2xxx: Fix stuck login session using prli_pend_timer
ASoC: SOF: Introduce state machine for FW boot
ASoC: SOF: core: release resources on errors in probe_continue
tracing: Annotate ftrace_graph_hash pointer with __rcu
tracing: Annotate ftrace_graph_notrace_hash pointer with __rcu
ftrace: Add comment to why rcu_dereference_sched() is open coded
ftrace: Protect ftrace_graph_hash with ftrace_sync
crypto: pcrypt - Avoid deadlock by using per-instance padata queues
btrfs: fix improper setting of scanned for range cyclic write cache pages
btrfs: Handle another split brain scenario with metadata uuid feature
riscv, bpf: Fix broken BPF tail calls
selftests/bpf: Fix perf_buffer test on systems w/ offline CPUs
bpf, devmap: Pass lockdep expression to RCU lists
libbpf: Fix realloc usage in bpf_core_find_cands
tc-testing: fix eBPF tests failure on linux fresh clones
samples/bpf: Don't try to remove user's homedir on clean
samples/bpf: Xdp_redirect_cpu fix missing tracepoint attach
selftests/bpf: Fix test_attach_probe
selftests/bpf: Skip perf hw events test if the setup disabled it
selftests: bpf: Use a temporary file in test_sockmap
selftests: bpf: Ignore FIN packets for reuseport tests
crypto: api - fix unexpectedly getting generic implementation
crypto: hisilicon - Use the offset fields in sqe to avoid need to split scatterlists
crypto: ccp - set max RSA modulus size for v3 platform devices as well
crypto: arm64/ghash-neon - bump priority to 150
crypto: pcrypt - Do not clear MAY_SLEEP flag in original request
crypto: atmel-aes - Fix counter overflow in CTR mode
crypto: api - Fix race condition in crypto_spawn_alg
crypto: picoxcell - adjust the position of tasklet_init and fix missed tasklet_kill
powerpc/futex: Fix incorrect user access blocking
scsi: qla2xxx: Fix unbound NVME response length
NFS: Fix memory leaks and corruption in readdir
NFS: Directory page cache pages need to be locked when read
nfsd: fix filecache lookup
jbd2_seq_info_next should increase position index
ext4: fix deadlock allocating crypto bounce page from mempool
ext4: fix race conditions in ->d_compare() and ->d_hash()
Btrfs: fix missing hole after hole punching and fsync when using NO_HOLES
Btrfs: make deduplication with range including the last block work
Btrfs: fix infinite loop during fsync after rename operations
btrfs: set trans->drity in btrfs_commit_transaction
btrfs: drop log root for dropped roots
Btrfs: fix race between adding and putting tree mod seq elements and nodes
btrfs: flush write bio if we loop in extent_write_cache_pages
btrfs: Correctly handle empty trees in find_first_clear_extent_bit
ARM: tegra: Enable PLLP bypass during Tegra124 LP1
iwlwifi: don't throw error when trying to remove IGTK
mwifiex: fix unbalanced locking in mwifiex_process_country_ie()
sunrpc: expiry_time should be seconds not timeval
gfs2: fix gfs2_find_jhead that returns uninitialized jhead with seq 0
gfs2: move setting current->backing_dev_info
gfs2: fix O_SYNC write handling
drm: atmel-hlcdc: use double rate for pixel clock only if supported
drm: atmel-hlcdc: enable clock before configuring timing engine
drm: atmel-hlcdc: prefer a lower pixel-clock than requested
drm/rect: Avoid division by zero
media: iguanair: fix endpoint sanity check
media: rc: ensure lirc is initialized before registering input device
tools/kvm_stat: Fix kvm_exit filter name
xen/balloon: Support xend-based toolstack take two
watchdog: fix UAF in reboot notifier handling in watchdog core code
bcache: add readahead cache policy options via sysfs interface
eventfd: track eventfd_signal() recursion depth
aio: prevent potential eventfd recursion on poll
KVM: x86: Refactor picdev_write() to prevent Spectre-v1/L1TF attacks
KVM: x86: Refactor prefix decoding to prevent Spectre-v1/L1TF attacks
KVM: x86: Protect pmu_intel.c from Spectre-v1/L1TF attacks
KVM: x86: Protect DR-based index computations from Spectre-v1/L1TF attacks
KVM: x86: Protect kvm_lapic_reg_write() from Spectre-v1/L1TF attacks
KVM: x86: Protect kvm_hv_msr_[get|set]_crash_data() from Spectre-v1/L1TF attacks
KVM: x86: Protect ioapic_write_indirect() from Spectre-v1/L1TF attacks
KVM: x86: Protect MSR-based index computations in pmu.h from Spectre-v1/L1TF attacks
KVM: x86: Protect ioapic_read_indirect() from Spectre-v1/L1TF attacks
KVM: x86: Protect MSR-based index computations from Spectre-v1/L1TF attacks in x86.c
KVM: x86: Protect x86_decode_insn from Spectre-v1/L1TF attacks
KVM: x86: Protect MSR-based index computations in fixed_msr_to_seg_unit() from Spectre-v1/L1TF attacks
KVM: x86: Fix potential put_fpu() w/o load_fpu() on MPX platform
KVM: PPC: Book3S HV: Uninit vCPU if vcore creation fails
KVM: PPC: Book3S PR: Free shared page if mmu initialization fails
kvm/svm: PKU not currently supported
x86/kvm: Be careful not to clear KVM_VCPU_FLUSH_TLB bit
x86/kvm: Introduce kvm_(un)map_gfn()
x86/KVM: Make sure KVM_VCPU_FLUSH_TLB flag is not missed
x86/kvm: Cache gfn to pfn translation
x86/KVM: Clean up host's steal time structure
KVM: VMX: Add non-canonical check on writes to RTIT address MSRs
KVM: x86: Don't let userspace set host-reserved cr4 bits
KVM: x86: Free wbinvd_dirty_mask if vCPU creation fails
KVM: x86: Handle TIF_NEED_FPU_LOAD in kvm_{load,put}_guest_fpu()
KVM: x86: Ensure guest's FPU state is loaded when accessing for emulation
KVM: x86: Revert "KVM: X86: Fix fpu state crash in kvm guest"
KVM: s390: do not clobber registers during guest reset/store status
ocfs2: fix oops when writing cloned file
mm/page_alloc.c: fix uninitialized memmaps on a partially populated last section
arm64: dts: qcom: qcs404-evb: Set vdd_apc regulator in high power mode
mm/mmu_gather: invalidate TLB correctly on batch allocation failure and flush
clk: tegra: Mark fuse clock as critical
drm/amd/dm/mst: Ignore payload update failures
virtio-balloon: initialize all vq callbacks
virtio-pci: check name when counting MSI-X vectors
fix up iter on short count in fuse_direct_io()
broken ping to ipv6 linklocal addresses on debian buster
percpu: Separate decrypted varaibles anytime encryption can be enabled
ASoC: meson: axg-fifo: fix fifo threshold setup
scsi: qla2xxx: Fix the endianness of the qla82xx_get_fw_size() return type
scsi: csiostor: Adjust indentation in csio_device_reset
scsi: qla4xxx: Adjust indentation in qla4xxx_mem_free
scsi: ufs: Recheck bkops level if bkops is disabled
mtd: spi-nor: Split mt25qu512a (n25q512a) entry into two
phy: qualcomm: Adjust indentation in read_poll_timeout
ext2: Adjust indentation in ext2_fill_super
powerpc/44x: Adjust indentation in ibm4xx_denali_fixup_memsize
drm: msm: mdp4: Adjust indentation in mdp4_dsi_encoder_enable
NFC: pn544: Adjust indentation in pn544_hci_check_presence
ppp: Adjust indentation into ppp_async_input
net: smc911x: Adjust indentation in smc911x_phy_configure
net: tulip: Adjust indentation in {dmfe, uli526x}_init_module
IB/mlx5: Fix outstanding_pi index for GSI qps
IB/core: Fix ODP get user pages flow
nfsd: fix delay timer on 32-bit architectures
nfsd: fix jiffies/time_t mixup in LRU list
nfsd: Return the correct number of bytes written to the file
virtio-balloon: Fix memory leak when unloading while hinting is in progress
virtio_balloon: Fix memory leaks on errors in virtballoon_probe()
ubi: fastmap: Fix inverted logic in seen selfcheck
ubi: Fix an error pointer dereference in error handling code
ubifs: Fix memory leak from c->sup_node
regulator: core: Add regulator_is_equal() helper
ASoC: sgtl5000: Fix VDDA and VDDIO comparison
bonding/alb: properly access headers in bond_alb_xmit()
devlink: report 0 after hitting end in region read
dpaa_eth: support all modes with rate adapting PHYs
net: dsa: b53: Always use dev->vlan_enabled in b53_configure_vlan()
net: dsa: bcm_sf2: Only 7278 supports 2Gb/sec IMP port
net: dsa: microchip: enable module autoprobe
net: mvneta: move rx_dropped and rx_errors in per-cpu stats
net_sched: fix a resource leak in tcindex_set_parms()
net: stmmac: fix a possible endless loop
net: systemport: Avoid RBUF stuck in Wake-on-LAN mode
net/mlx5: IPsec, Fix esp modify function attribute
net/mlx5: IPsec, fix memory leak at mlx5_fpga_ipsec_delete_sa_ctx
net: macb: Remove unnecessary alignment check for TSO
net: macb: Limit maximum GEM TX length in TSO
taprio: Fix enabling offload with wrong number of traffic classes
taprio: Fix still allowing changing the flags during runtime
taprio: Add missing policy validation for flags
taprio: Use taprio_reset_tc() to reset Traffic Classes configuration
taprio: Fix dropping packets when using taprio + ETF offloading
ipv6/addrconf: fix potential NULL deref in inet6_set_link_af()
qed: Fix timestamping issue for L2 unicast ptp packets.
drop_monitor: Do not cancel uninitialized work item
net/mlx5: Fix deadlock in fs_core
net/mlx5: Deprecate usage of generic TLS HW capability bit
ASoC: Intel: skl_hda_dsp_common: Fix global-out-of-bounds bug
mfd: da9062: Fix watchdog compatible string
mfd: rn5t618: Mark ADC control register volatile
mfd: bd70528: Fix hour register mask
x86/timer: Don't skip PIT setup when APIC is disabled or in legacy mode
btrfs: use bool argument in free_root_pointers()
btrfs: free block groups after free'ing fs trees
drm/dp_mst: Remove VCPI while disabling topology mgr
KVM: x86/mmu: Apply max PA check for MMIO sptes to 32-bit KVM
KVM: x86: use CPUID to locate host page table reserved bits
KVM: x86: Use gpa_t for cr2/gpa to fix TDP support on 32-bit KVM
KVM: x86: fix overlap between SPTE_MMIO_MASK and generation
KVM: nVMX: vmread should not set rflags to specify success in case of #PF
KVM: Use vcpu-specific gva->hva translation when querying host page size
KVM: Play nice with read-only memslots when querying host page size
cifs: fail i/o on soft mounts if sessionsetup errors out
x86/apic/msi: Plug non-maskable MSI affinity race
clocksource: Prevent double add_timer_on() for watchdog_timer
perf/core: Fix mlock accounting in perf_mmap()
rxrpc: Fix service call disconnection
regulator fix for "regulator: core: Add regulator_is_equal() helper"
powerpc/kuap: Fix set direction in allow/prevent_user_access()
Linux 5.4.19
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ief6bae336b8e6931810e5b357c0d5e16fbf1c13e
This commit is contained in:
commit
87acfa0267
@ -8704,8 +8704,10 @@ L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
|
||||
L: netdev@vger.kernel.org
|
||||
W: http://www.isdn4linux.de
|
||||
S: Maintained
|
||||
F: drivers/isdn/mISDN
|
||||
F: drivers/isdn/hardware
|
||||
F: drivers/isdn/mISDN/
|
||||
F: drivers/isdn/hardware/
|
||||
F: drivers/isdn/Kconfig
|
||||
F: drivers/isdn/Makefile
|
||||
|
||||
ISDN/CAPI SUBSYSTEM
|
||||
M: Karsten Keil <isdn@linux-pingi.de>
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 18
|
||||
SUBLEVEL = 19
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
@ -396,9 +396,6 @@ config HAVE_ARCH_JUMP_LABEL_RELATIVE
|
||||
config HAVE_RCU_TABLE_FREE
|
||||
bool
|
||||
|
||||
config HAVE_RCU_TABLE_NO_INVALIDATE
|
||||
bool
|
||||
|
||||
config HAVE_MMU_GATHER_PAGE_SIZE
|
||||
bool
|
||||
|
||||
|
@ -14,13 +14,25 @@
|
||||
#include <asm/cputype.h>
|
||||
|
||||
/* arm64 compatibility macros */
|
||||
#define PSR_AA32_MODE_FIQ FIQ_MODE
|
||||
#define PSR_AA32_MODE_SVC SVC_MODE
|
||||
#define PSR_AA32_MODE_ABT ABT_MODE
|
||||
#define PSR_AA32_MODE_UND UND_MODE
|
||||
#define PSR_AA32_T_BIT PSR_T_BIT
|
||||
#define PSR_AA32_F_BIT PSR_F_BIT
|
||||
#define PSR_AA32_I_BIT PSR_I_BIT
|
||||
#define PSR_AA32_A_BIT PSR_A_BIT
|
||||
#define PSR_AA32_E_BIT PSR_E_BIT
|
||||
#define PSR_AA32_IT_MASK PSR_IT_MASK
|
||||
#define PSR_AA32_GE_MASK 0x000f0000
|
||||
#define PSR_AA32_DIT_BIT 0x00200000
|
||||
#define PSR_AA32_PAN_BIT 0x00400000
|
||||
#define PSR_AA32_SSBS_BIT 0x00800000
|
||||
#define PSR_AA32_Q_BIT PSR_Q_BIT
|
||||
#define PSR_AA32_V_BIT PSR_V_BIT
|
||||
#define PSR_AA32_C_BIT PSR_C_BIT
|
||||
#define PSR_AA32_Z_BIT PSR_Z_BIT
|
||||
#define PSR_AA32_N_BIT PSR_N_BIT
|
||||
|
||||
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
|
||||
|
||||
@ -41,6 +53,11 @@ static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
|
||||
*__vcpu_spsr(vcpu) = v;
|
||||
}
|
||||
|
||||
static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
|
||||
{
|
||||
return spsr;
|
||||
}
|
||||
|
||||
static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
|
||||
u8 reg_num)
|
||||
{
|
||||
@ -177,6 +194,11 @@ static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
|
||||
return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
|
||||
|
@ -14,6 +14,8 @@
|
||||
struct kvm_decode {
|
||||
unsigned long rt;
|
||||
bool sign_extend;
|
||||
/* Not used on 32-bit arm */
|
||||
bool sixty_four;
|
||||
};
|
||||
|
||||
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
|
||||
|
@ -370,6 +370,14 @@ _pll_m_c_x_done:
|
||||
pll_locked r1, r0, CLK_RESET_PLLC_BASE
|
||||
pll_locked r1, r0, CLK_RESET_PLLX_BASE
|
||||
|
||||
tegra_get_soc_id TEGRA_APB_MISC_BASE, r1
|
||||
cmp r1, #TEGRA30
|
||||
beq 1f
|
||||
ldr r1, [r0, #CLK_RESET_PLLP_BASE]
|
||||
bic r1, r1, #(1<<31) @ disable PllP bypass
|
||||
str r1, [r0, #CLK_RESET_PLLP_BASE]
|
||||
1:
|
||||
|
||||
mov32 r7, TEGRA_TMRUS_BASE
|
||||
ldr r1, [r7]
|
||||
add r1, r1, #LOCK_DELAY
|
||||
@ -630,7 +638,10 @@ tegra30_switch_cpu_to_clk32k:
|
||||
str r0, [r4, #PMC_PLLP_WB0_OVERRIDE]
|
||||
|
||||
/* disable PLLP, PLLA, PLLC and PLLX */
|
||||
tegra_get_soc_id TEGRA_APB_MISC_BASE, r1
|
||||
cmp r1, #TEGRA30
|
||||
ldr r0, [r5, #CLK_RESET_PLLP_BASE]
|
||||
orrne r0, r0, #(1 << 31) @ enable PllP bypass on fast cluster
|
||||
bic r0, r0, #(1 << 30)
|
||||
str r0, [r5, #CLK_RESET_PLLP_BASE]
|
||||
ldr r0, [r5, #CLK_RESET_PLLA_BASE]
|
||||
|
@ -221,7 +221,7 @@ EXPORT_SYMBOL(arm_coherent_dma_ops);
|
||||
|
||||
static int __dma_supported(struct device *dev, u64 mask, bool warn)
|
||||
{
|
||||
unsigned long max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
|
||||
unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit);
|
||||
|
||||
/*
|
||||
* Translate the device's DMA mask to a PFN limit. This
|
||||
|
@ -73,6 +73,7 @@
|
||||
regulator-always-on;
|
||||
regulator-boot-on;
|
||||
regulator-name = "vdd_apc";
|
||||
regulator-initial-mode = <1>;
|
||||
regulator-min-microvolt = <1048000>;
|
||||
regulator-max-microvolt = <1384000>;
|
||||
};
|
||||
|
@ -261,7 +261,7 @@ static int ghash_setkey(struct crypto_shash *tfm,
|
||||
static struct shash_alg ghash_alg[] = {{
|
||||
.base.cra_name = "ghash",
|
||||
.base.cra_driver_name = "ghash-neon",
|
||||
.base.cra_priority = 100,
|
||||
.base.cra_priority = 150,
|
||||
.base.cra_blocksize = GHASH_BLOCK_SIZE,
|
||||
.base.cra_ctxsize = sizeof(struct ghash_key),
|
||||
.base.cra_module = THIS_MODULE,
|
||||
|
@ -36,7 +36,7 @@ static inline void local_daif_mask(void)
|
||||
trace_hardirqs_off();
|
||||
}
|
||||
|
||||
static inline unsigned long local_daif_save(void)
|
||||
static inline unsigned long local_daif_save_flags(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@ -48,6 +48,15 @@ static inline unsigned long local_daif_save(void)
|
||||
flags |= PSR_I_BIT;
|
||||
}
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
static inline unsigned long local_daif_save(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
flags = local_daif_save_flags();
|
||||
|
||||
local_daif_mask();
|
||||
|
||||
return flags;
|
||||
|
@ -204,6 +204,38 @@ static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
|
||||
vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v;
|
||||
}
|
||||
|
||||
/*
|
||||
* The layout of SPSR for an AArch32 state is different when observed from an
|
||||
* AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
|
||||
* view given an AArch64 view.
|
||||
*
|
||||
* In ARM DDI 0487E.a see:
|
||||
*
|
||||
* - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
|
||||
* - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
|
||||
* - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
|
||||
*
|
||||
* Which show the following differences:
|
||||
*
|
||||
* | Bit | AA64 | AA32 | Notes |
|
||||
* +-----+------+------+-----------------------------|
|
||||
* | 24 | DIT | J | J is RES0 in ARMv8 |
|
||||
* | 21 | SS | DIT | SS doesn't exist in AArch32 |
|
||||
*
|
||||
* ... and all other bits are (currently) common.
|
||||
*/
|
||||
static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
|
||||
{
|
||||
const unsigned long overlap = BIT(24) | BIT(21);
|
||||
unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
|
||||
|
||||
spsr &= ~overlap;
|
||||
|
||||
spsr |= dit << 21;
|
||||
|
||||
return spsr;
|
||||
}
|
||||
|
||||
static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 mode;
|
||||
@ -263,6 +295,11 @@ static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
|
||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF);
|
||||
}
|
||||
|
||||
static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
|
||||
|
@ -10,13 +10,11 @@
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
|
||||
/*
|
||||
* This is annoying. The mmio code requires this, even if we don't
|
||||
* need any decoding. To be fixed.
|
||||
*/
|
||||
struct kvm_decode {
|
||||
unsigned long rt;
|
||||
bool sign_extend;
|
||||
/* Witdth of the register accessed by the faulting instruction is 64-bits */
|
||||
bool sixty_four;
|
||||
};
|
||||
|
||||
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
|
||||
|
@ -62,6 +62,7 @@
|
||||
#define PSR_AA32_I_BIT 0x00000080
|
||||
#define PSR_AA32_A_BIT 0x00000100
|
||||
#define PSR_AA32_E_BIT 0x00000200
|
||||
#define PSR_AA32_PAN_BIT 0x00400000
|
||||
#define PSR_AA32_SSBS_BIT 0x00800000
|
||||
#define PSR_AA32_DIT_BIT 0x01000000
|
||||
#define PSR_AA32_Q_BIT 0x08000000
|
||||
|
@ -49,6 +49,7 @@
|
||||
#define PSR_SSBS_BIT 0x00001000
|
||||
#define PSR_PAN_BIT 0x00400000
|
||||
#define PSR_UAO_BIT 0x00800000
|
||||
#define PSR_DIT_BIT 0x01000000
|
||||
#define PSR_V_BIT 0x10000000
|
||||
#define PSR_C_BIT 0x20000000
|
||||
#define PSR_Z_BIT 0x40000000
|
||||
|
@ -274,7 +274,7 @@ int apei_claim_sea(struct pt_regs *regs)
|
||||
if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
|
||||
return err;
|
||||
|
||||
current_flags = arch_local_save_flags();
|
||||
current_flags = local_daif_save_flags();
|
||||
|
||||
/*
|
||||
* SEA can interrupt SError, mask it and describe this as an NMI so
|
||||
|
@ -14,9 +14,6 @@
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/esr.h>
|
||||
|
||||
#define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
|
||||
PSR_I_BIT | PSR_D_BIT)
|
||||
|
||||
#define CURRENT_EL_SP_EL0_VECTOR 0x0
|
||||
#define CURRENT_EL_SP_ELx_VECTOR 0x200
|
||||
#define LOWER_EL_AArch64_VECTOR 0x400
|
||||
@ -50,6 +47,69 @@ static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
|
||||
return vcpu_read_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
|
||||
}
|
||||
|
||||
/*
|
||||
* When an exception is taken, most PSTATE fields are left unchanged in the
|
||||
* handler. However, some are explicitly overridden (e.g. M[4:0]). Luckily all
|
||||
* of the inherited bits have the same position in the AArch64/AArch32 SPSR_ELx
|
||||
* layouts, so we don't need to shuffle these for exceptions from AArch32 EL0.
|
||||
*
|
||||
* For the SPSR_ELx layout for AArch64, see ARM DDI 0487E.a page C5-429.
|
||||
* For the SPSR_ELx layout for AArch32, see ARM DDI 0487E.a page C5-426.
|
||||
*
|
||||
* Here we manipulate the fields in order of the AArch64 SPSR_ELx layout, from
|
||||
* MSB to LSB.
|
||||
*/
|
||||
static unsigned long get_except64_pstate(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
|
||||
unsigned long old, new;
|
||||
|
||||
old = *vcpu_cpsr(vcpu);
|
||||
new = 0;
|
||||
|
||||
new |= (old & PSR_N_BIT);
|
||||
new |= (old & PSR_Z_BIT);
|
||||
new |= (old & PSR_C_BIT);
|
||||
new |= (old & PSR_V_BIT);
|
||||
|
||||
// TODO: TCO (if/when ARMv8.5-MemTag is exposed to guests)
|
||||
|
||||
new |= (old & PSR_DIT_BIT);
|
||||
|
||||
// PSTATE.UAO is set to zero upon any exception to AArch64
|
||||
// See ARM DDI 0487E.a, page D5-2579.
|
||||
|
||||
// PSTATE.PAN is unchanged unless SCTLR_ELx.SPAN == 0b0
|
||||
// SCTLR_ELx.SPAN is RES1 when ARMv8.1-PAN is not implemented
|
||||
// See ARM DDI 0487E.a, page D5-2578.
|
||||
new |= (old & PSR_PAN_BIT);
|
||||
if (!(sctlr & SCTLR_EL1_SPAN))
|
||||
new |= PSR_PAN_BIT;
|
||||
|
||||
// PSTATE.SS is set to zero upon any exception to AArch64
|
||||
// See ARM DDI 0487E.a, page D2-2452.
|
||||
|
||||
// PSTATE.IL is set to zero upon any exception to AArch64
|
||||
// See ARM DDI 0487E.a, page D1-2306.
|
||||
|
||||
// PSTATE.SSBS is set to SCTLR_ELx.DSSBS upon any exception to AArch64
|
||||
// See ARM DDI 0487E.a, page D13-3258
|
||||
if (sctlr & SCTLR_ELx_DSSBS)
|
||||
new |= PSR_SSBS_BIT;
|
||||
|
||||
// PSTATE.BTYPE is set to zero upon any exception to AArch64
|
||||
// See ARM DDI 0487E.a, pages D1-2293 to D1-2294.
|
||||
|
||||
new |= PSR_D_BIT;
|
||||
new |= PSR_A_BIT;
|
||||
new |= PSR_I_BIT;
|
||||
new |= PSR_F_BIT;
|
||||
|
||||
new |= PSR_MODE_EL1h;
|
||||
|
||||
return new;
|
||||
}
|
||||
|
||||
static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
|
||||
{
|
||||
unsigned long cpsr = *vcpu_cpsr(vcpu);
|
||||
@ -59,7 +119,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
|
||||
vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
|
||||
*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
|
||||
|
||||
*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
|
||||
*vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
|
||||
vcpu_write_spsr(vcpu, cpsr);
|
||||
|
||||
vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
|
||||
@ -94,7 +154,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
|
||||
vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
|
||||
*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
|
||||
|
||||
*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
|
||||
*vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
|
||||
vcpu_write_spsr(vcpu, cpsr);
|
||||
|
||||
/*
|
||||
|
@ -123,7 +123,7 @@ $(obj)/vmlinux.its.S: $(addprefix $(srctree)/arch/mips/$(PLATFORM)/,$(ITS_INPUTS
|
||||
targets += vmlinux.its
|
||||
targets += vmlinux.gz.its
|
||||
targets += vmlinux.bz2.its
|
||||
targets += vmlinux.lzmo.its
|
||||
targets += vmlinux.lzma.its
|
||||
targets += vmlinux.lzo.its
|
||||
|
||||
quiet_cmd_cpp_its_S = ITS $@
|
||||
|
@ -221,8 +221,7 @@ config PPC
|
||||
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
|
||||
select HAVE_PERF_REGS
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
select HAVE_RCU_TABLE_FREE if SMP
|
||||
select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE
|
||||
select HAVE_RCU_TABLE_FREE
|
||||
select HAVE_MMU_GATHER_PAGE_SIZE
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
|
||||
@ -237,6 +236,7 @@ config PPC
|
||||
select NEED_DMA_MAP_STATE if PPC64 || NOT_COHERENT_CACHE
|
||||
select NEED_SG_DMA_LENGTH
|
||||
select OF
|
||||
select OF_DMA_DEFAULT_COHERENT if !NOT_COHERENT_CACHE
|
||||
select OF_EARLY_FLATTREE
|
||||
select OLD_SIGACTION if PPC32
|
||||
select OLD_SIGSUSPEND
|
||||
|
@ -102,11 +102,13 @@ static inline void kuap_update_sr(u32 sr, u32 addr, u32 end)
|
||||
isync(); /* Context sync required after mtsrin() */
|
||||
}
|
||||
|
||||
static inline void allow_user_access(void __user *to, const void __user *from, u32 size)
|
||||
static __always_inline void allow_user_access(void __user *to, const void __user *from,
|
||||
u32 size, unsigned long dir)
|
||||
{
|
||||
u32 addr, end;
|
||||
|
||||
if (__builtin_constant_p(to) && to == NULL)
|
||||
BUILD_BUG_ON(!__builtin_constant_p(dir));
|
||||
if (!(dir & KUAP_WRITE))
|
||||
return;
|
||||
|
||||
addr = (__force u32)to;
|
||||
@ -119,11 +121,16 @@ static inline void allow_user_access(void __user *to, const void __user *from, u
|
||||
kuap_update_sr(mfsrin(addr) & ~SR_KS, addr, end); /* Clear Ks */
|
||||
}
|
||||
|
||||
static inline void prevent_user_access(void __user *to, const void __user *from, u32 size)
|
||||
static __always_inline void prevent_user_access(void __user *to, const void __user *from,
|
||||
u32 size, unsigned long dir)
|
||||
{
|
||||
u32 addr = (__force u32)to;
|
||||
u32 end = min(addr + size, TASK_SIZE);
|
||||
|
||||
BUILD_BUG_ON(!__builtin_constant_p(dir));
|
||||
if (!(dir & KUAP_WRITE))
|
||||
return;
|
||||
|
||||
if (!addr || addr >= TASK_SIZE || !size)
|
||||
return;
|
||||
|
||||
@ -131,12 +138,17 @@ static inline void prevent_user_access(void __user *to, const void __user *from,
|
||||
kuap_update_sr(mfsrin(addr) | SR_KS, addr, end); /* set Ks */
|
||||
}
|
||||
|
||||
static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write)
|
||||
static inline bool
|
||||
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
|
||||
{
|
||||
unsigned long begin = regs->kuap & 0xf0000000;
|
||||
unsigned long end = regs->kuap << 28;
|
||||
|
||||
if (!is_write)
|
||||
return false;
|
||||
|
||||
return WARN(!regs->kuap, "Bug: write fault blocked by segment registers !");
|
||||
return WARN(address < begin || address >= end,
|
||||
"Bug: write fault blocked by segment registers !");
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PPC_KUAP */
|
||||
|
@ -49,7 +49,6 @@ static inline void pgtable_free(void *table, unsigned index_size)
|
||||
|
||||
#define get_hugepd_cache_index(x) (x)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static inline void pgtable_free_tlb(struct mmu_gather *tlb,
|
||||
void *table, int shift)
|
||||
{
|
||||
@ -66,13 +65,6 @@ static inline void __tlb_remove_table(void *_table)
|
||||
|
||||
pgtable_free(table, shift);
|
||||
}
|
||||
#else
|
||||
static inline void pgtable_free_tlb(struct mmu_gather *tlb,
|
||||
void *table, int shift)
|
||||
{
|
||||
pgtable_free(table, shift);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
|
||||
unsigned long address)
|
||||
|
@ -77,25 +77,27 @@ static inline void set_kuap(unsigned long value)
|
||||
isync();
|
||||
}
|
||||
|
||||
static inline void allow_user_access(void __user *to, const void __user *from,
|
||||
unsigned long size)
|
||||
static __always_inline void allow_user_access(void __user *to, const void __user *from,
|
||||
unsigned long size, unsigned long dir)
|
||||
{
|
||||
// This is written so we can resolve to a single case at build time
|
||||
if (__builtin_constant_p(to) && to == NULL)
|
||||
BUILD_BUG_ON(!__builtin_constant_p(dir));
|
||||
if (dir == KUAP_READ)
|
||||
set_kuap(AMR_KUAP_BLOCK_WRITE);
|
||||
else if (__builtin_constant_p(from) && from == NULL)
|
||||
else if (dir == KUAP_WRITE)
|
||||
set_kuap(AMR_KUAP_BLOCK_READ);
|
||||
else
|
||||
set_kuap(0);
|
||||
}
|
||||
|
||||
static inline void prevent_user_access(void __user *to, const void __user *from,
|
||||
unsigned long size)
|
||||
unsigned long size, unsigned long dir)
|
||||
{
|
||||
set_kuap(AMR_KUAP_BLOCKED);
|
||||
}
|
||||
|
||||
static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write)
|
||||
static inline bool
|
||||
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
|
||||
{
|
||||
return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
|
||||
(regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
|
||||
|
@ -19,9 +19,7 @@ extern struct vmemmap_backing *vmemmap_list;
|
||||
extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
|
||||
extern void pmd_fragment_free(unsigned long *);
|
||||
extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
|
||||
#ifdef CONFIG_SMP
|
||||
extern void __tlb_remove_table(void *_table);
|
||||
#endif
|
||||
void pte_frag_destroy(void *pte_frag);
|
||||
|
||||
static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
|
||||
|
@ -35,7 +35,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
|
||||
{
|
||||
int oldval = 0, ret;
|
||||
|
||||
allow_write_to_user(uaddr, sizeof(*uaddr));
|
||||
allow_read_write_user(uaddr, uaddr, sizeof(*uaddr));
|
||||
pagefault_disable();
|
||||
|
||||
switch (op) {
|
||||
@ -62,7 +62,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
|
||||
|
||||
*oval = oldval;
|
||||
|
||||
prevent_write_to_user(uaddr, sizeof(*uaddr));
|
||||
prevent_read_write_user(uaddr, uaddr, sizeof(*uaddr));
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -76,7 +76,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
if (!access_ok(uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
allow_write_to_user(uaddr, sizeof(*uaddr));
|
||||
allow_read_write_user(uaddr, uaddr, sizeof(*uaddr));
|
||||
|
||||
__asm__ __volatile__ (
|
||||
PPC_ATOMIC_ENTRY_BARRIER
|
||||
"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\
|
||||
@ -97,7 +98,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
: "cc", "memory");
|
||||
|
||||
*uval = prev;
|
||||
prevent_write_to_user(uaddr, sizeof(*uaddr));
|
||||
prevent_read_write_user(uaddr, uaddr, sizeof(*uaddr));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2,6 +2,10 @@
|
||||
#ifndef _ASM_POWERPC_KUP_H_
|
||||
#define _ASM_POWERPC_KUP_H_
|
||||
|
||||
#define KUAP_READ 1
|
||||
#define KUAP_WRITE 2
|
||||
#define KUAP_READ_WRITE (KUAP_READ | KUAP_WRITE)
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
#include <asm/book3s/64/kup-radix.h>
|
||||
#endif
|
||||
@ -42,32 +46,48 @@ void setup_kuap(bool disabled);
|
||||
#else
|
||||
static inline void setup_kuap(bool disabled) { }
|
||||
static inline void allow_user_access(void __user *to, const void __user *from,
|
||||
unsigned long size) { }
|
||||
unsigned long size, unsigned long dir) { }
|
||||
static inline void prevent_user_access(void __user *to, const void __user *from,
|
||||
unsigned long size) { }
|
||||
static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write) { return false; }
|
||||
unsigned long size, unsigned long dir) { }
|
||||
static inline bool
|
||||
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_PPC_KUAP */
|
||||
|
||||
static inline void allow_read_from_user(const void __user *from, unsigned long size)
|
||||
{
|
||||
allow_user_access(NULL, from, size);
|
||||
allow_user_access(NULL, from, size, KUAP_READ);
|
||||
}
|
||||
|
||||
static inline void allow_write_to_user(void __user *to, unsigned long size)
|
||||
{
|
||||
allow_user_access(to, NULL, size);
|
||||
allow_user_access(to, NULL, size, KUAP_WRITE);
|
||||
}
|
||||
|
||||
static inline void allow_read_write_user(void __user *to, const void __user *from,
|
||||
unsigned long size)
|
||||
{
|
||||
allow_user_access(to, from, size, KUAP_READ_WRITE);
|
||||
}
|
||||
|
||||
static inline void prevent_read_from_user(const void __user *from, unsigned long size)
|
||||
{
|
||||
prevent_user_access(NULL, from, size);
|
||||
prevent_user_access(NULL, from, size, KUAP_READ);
|
||||
}
|
||||
|
||||
static inline void prevent_write_to_user(void __user *to, unsigned long size)
|
||||
{
|
||||
prevent_user_access(to, NULL, size);
|
||||
prevent_user_access(to, NULL, size, KUAP_WRITE);
|
||||
}
|
||||
|
||||
static inline void prevent_read_write_user(void __user *to, const void __user *from,
|
||||
unsigned long size)
|
||||
{
|
||||
prevent_user_access(to, from, size, KUAP_READ_WRITE);
|
||||
}
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_POWERPC_KUP_H_ */
|
||||
#endif /* _ASM_POWERPC_KUAP_H_ */
|
||||
|
@ -34,18 +34,19 @@
|
||||
#include <asm/reg.h>
|
||||
|
||||
static inline void allow_user_access(void __user *to, const void __user *from,
|
||||
unsigned long size)
|
||||
unsigned long size, unsigned long dir)
|
||||
{
|
||||
mtspr(SPRN_MD_AP, MD_APG_INIT);
|
||||
}
|
||||
|
||||
static inline void prevent_user_access(void __user *to, const void __user *from,
|
||||
unsigned long size)
|
||||
unsigned long size, unsigned long dir)
|
||||
{
|
||||
mtspr(SPRN_MD_AP, MD_APG_KUAP);
|
||||
}
|
||||
|
||||
static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write)
|
||||
static inline bool
|
||||
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
|
||||
{
|
||||
return WARN(!((regs->kuap ^ MD_APG_KUAP) & 0xf0000000),
|
||||
"Bug: fault blocked by AP register !");
|
||||
|
@ -46,7 +46,6 @@ static inline void pgtable_free(void *table, int shift)
|
||||
|
||||
#define get_hugepd_cache_index(x) (x)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
|
||||
{
|
||||
unsigned long pgf = (unsigned long)table;
|
||||
@ -64,13 +63,6 @@ static inline void __tlb_remove_table(void *_table)
|
||||
pgtable_free(table, shift);
|
||||
}
|
||||
|
||||
#else
|
||||
static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
|
||||
{
|
||||
pgtable_free(table, shift);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
|
||||
unsigned long address)
|
||||
{
|
||||
|
@ -26,6 +26,17 @@
|
||||
|
||||
#define tlb_flush tlb_flush
|
||||
extern void tlb_flush(struct mmu_gather *tlb);
|
||||
/*
|
||||
* book3s:
|
||||
* Hash does not use the linux page-tables, so we can avoid
|
||||
* the TLB invalidate for page-table freeing, Radix otoh does use the
|
||||
* page-tables and needs the TLBI.
|
||||
*
|
||||
* nohash:
|
||||
* We still do TLB invalidate in the __pte_free_tlb routine before we
|
||||
* add the page table pages to mmu gather table batch.
|
||||
*/
|
||||
#define tlb_needs_table_invalidate() radix_enabled()
|
||||
|
||||
/* Get the generic bits... */
|
||||
#include <asm-generic/tlb.h>
|
||||
|
@ -313,9 +313,9 @@ raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
|
||||
unsigned long ret;
|
||||
|
||||
barrier_nospec();
|
||||
allow_user_access(to, from, n);
|
||||
allow_read_write_user(to, from, n);
|
||||
ret = __copy_tofrom_user(to, from, n);
|
||||
prevent_user_access(to, from, n);
|
||||
prevent_read_write_user(to, from, n);
|
||||
return ret;
|
||||
}
|
||||
#endif /* __powerpc64__ */
|
||||
|
@ -179,7 +179,7 @@ transfer_to_handler:
|
||||
2: /* if from kernel, check interrupted DOZE/NAP mode and
|
||||
* check for stack overflow
|
||||
*/
|
||||
kuap_save_and_lock r11, r12, r9, r2, r0
|
||||
kuap_save_and_lock r11, r12, r9, r2, r6
|
||||
addi r2, r12, -THREAD
|
||||
lwz r9,KSP_LIMIT(r12)
|
||||
cmplw r1,r9 /* if r1 <= ksp_limit */
|
||||
@ -284,6 +284,7 @@ reenable_mmu:
|
||||
rlwinm r9,r9,0,~MSR_EE
|
||||
lwz r12,_LINK(r11) /* and return to address in LR */
|
||||
kuap_restore r11, r2, r3, r4, r5
|
||||
lwz r2, GPR2(r11)
|
||||
b fast_exception_return
|
||||
#endif
|
||||
|
||||
|
@ -2354,7 +2354,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
|
||||
mutex_unlock(&kvm->lock);
|
||||
|
||||
if (!vcore)
|
||||
goto free_vcpu;
|
||||
goto uninit_vcpu;
|
||||
|
||||
spin_lock(&vcore->lock);
|
||||
++vcore->num_threads;
|
||||
@ -2371,6 +2371,8 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
|
||||
|
||||
return vcpu;
|
||||
|
||||
uninit_vcpu:
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
free_vcpu:
|
||||
kmem_cache_free(kvm_vcpu_cache, vcpu);
|
||||
out:
|
||||
|
@ -1769,10 +1769,12 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
|
||||
|
||||
err = kvmppc_mmu_init(vcpu);
|
||||
if (err < 0)
|
||||
goto uninit_vcpu;
|
||||
goto free_shared_page;
|
||||
|
||||
return vcpu;
|
||||
|
||||
free_shared_page:
|
||||
free_page((unsigned long)vcpu->arch.shared);
|
||||
uninit_vcpu:
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
free_shadow_vcpu:
|
||||
|
@ -638,7 +638,7 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
|
||||
srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
gfn = gpa_to_gfn(kvm_eq.qaddr);
|
||||
|
||||
page_size = kvm_host_page_size(kvm, gfn);
|
||||
page_size = kvm_host_page_size(vcpu, gfn);
|
||||
if (1ull << kvm_eq.qshift > page_size) {
|
||||
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
||||
pr_warn("Incompatible host page size %lx!\n", page_size);
|
||||
|
@ -378,7 +378,6 @@ static inline void pgtable_free(void *table, int index)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
|
||||
{
|
||||
unsigned long pgf = (unsigned long)table;
|
||||
@ -395,12 +394,6 @@ void __tlb_remove_table(void *_table)
|
||||
|
||||
return pgtable_free(table, index);
|
||||
}
|
||||
#else
|
||||
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
|
||||
{
|
||||
return pgtable_free(table, index);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
|
||||
|
@ -233,7 +233,7 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
|
||||
|
||||
// Read/write fault in a valid region (the exception table search passed
|
||||
// above), but blocked by KUAP is bad, it can never succeed.
|
||||
if (bad_kuap_fault(regs, is_write))
|
||||
if (bad_kuap_fault(regs, address, is_write))
|
||||
return true;
|
||||
|
||||
// What's left? Kernel fault on user in well defined regions (extable
|
||||
|
@ -173,10 +173,12 @@ static void dump_addr(struct pg_state *st, unsigned long addr)
|
||||
|
||||
static void note_prot_wx(struct pg_state *st, unsigned long addr)
|
||||
{
|
||||
pte_t pte = __pte(st->current_flags);
|
||||
|
||||
if (!IS_ENABLED(CONFIG_PPC_DEBUG_WX) || !st->check_wx)
|
||||
return;
|
||||
|
||||
if (!((st->current_flags & pgprot_val(PAGE_KERNEL_X)) == pgprot_val(PAGE_KERNEL_X)))
|
||||
if (!pte_write(pte) || !pte_exec(pte))
|
||||
return;
|
||||
|
||||
WARN_ONCE(1, "powerpc/mm: Found insecure W+X mapping at address %p/%pS\n",
|
||||
|
@ -360,8 +360,10 @@ static bool lmb_is_removable(struct drmem_lmb *lmb)
|
||||
|
||||
for (i = 0; i < scns_per_block; i++) {
|
||||
pfn = PFN_DOWN(phys_addr);
|
||||
if (!pfn_present(pfn))
|
||||
if (!pfn_present(pfn)) {
|
||||
phys_addr += MIN_MEMORY_BLOCK_SIZE;
|
||||
continue;
|
||||
}
|
||||
|
||||
rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
|
||||
phys_addr += MIN_MEMORY_BLOCK_SIZE;
|
||||
|
@ -1894,15 +1894,14 @@ static void dump_300_sprs(void)
|
||||
|
||||
printf("pidr = %.16lx tidr = %.16lx\n",
|
||||
mfspr(SPRN_PID), mfspr(SPRN_TIDR));
|
||||
printf("asdr = %.16lx psscr = %.16lx\n",
|
||||
mfspr(SPRN_ASDR), hv ? mfspr(SPRN_PSSCR)
|
||||
: mfspr(SPRN_PSSCR_PR));
|
||||
printf("psscr = %.16lx\n",
|
||||
hv ? mfspr(SPRN_PSSCR) : mfspr(SPRN_PSSCR_PR));
|
||||
|
||||
if (!hv)
|
||||
return;
|
||||
|
||||
printf("ptcr = %.16lx\n",
|
||||
mfspr(SPRN_PTCR));
|
||||
printf("ptcr = %.16lx asdr = %.16lx\n",
|
||||
mfspr(SPRN_PTCR), mfspr(SPRN_ASDR));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -120,6 +120,11 @@ static bool seen_reg(int reg, struct rv_jit_context *ctx)
|
||||
return false;
|
||||
}
|
||||
|
||||
static void mark_fp(struct rv_jit_context *ctx)
|
||||
{
|
||||
__set_bit(RV_CTX_F_SEEN_S5, &ctx->flags);
|
||||
}
|
||||
|
||||
static void mark_call(struct rv_jit_context *ctx)
|
||||
{
|
||||
__set_bit(RV_CTX_F_SEEN_CALL, &ctx->flags);
|
||||
@ -596,6 +601,7 @@ static void __build_epilogue(u8 reg, struct rv_jit_context *ctx)
|
||||
|
||||
emit(rv_addi(RV_REG_SP, RV_REG_SP, stack_adjust), ctx);
|
||||
/* Set return value. */
|
||||
if (reg == RV_REG_RA)
|
||||
emit(rv_addi(RV_REG_A0, RV_REG_A5, 0), ctx);
|
||||
emit(rv_jalr(RV_REG_ZERO, reg, 0), ctx);
|
||||
}
|
||||
@ -1426,6 +1432,10 @@ static void build_prologue(struct rv_jit_context *ctx)
|
||||
{
|
||||
int stack_adjust = 0, store_offset, bpf_stack_adjust;
|
||||
|
||||
bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
|
||||
if (bpf_stack_adjust)
|
||||
mark_fp(ctx);
|
||||
|
||||
if (seen_reg(RV_REG_RA, ctx))
|
||||
stack_adjust += 8;
|
||||
stack_adjust += 8; /* RV_REG_FP */
|
||||
@ -1443,7 +1453,6 @@ static void build_prologue(struct rv_jit_context *ctx)
|
||||
stack_adjust += 8;
|
||||
|
||||
stack_adjust = round_up(stack_adjust, 16);
|
||||
bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
|
||||
stack_adjust += bpf_stack_adjust;
|
||||
|
||||
store_offset = stack_adjust - 8;
|
||||
|
@ -33,6 +33,8 @@
|
||||
#define ARCH_HAS_PREPARE_HUGEPAGE
|
||||
#define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
|
||||
|
||||
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
|
||||
|
||||
#include <asm/setup.h>
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
@ -2863,9 +2863,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 |
|
||||
CR14_UNUSED_33 |
|
||||
CR14_EXTERNAL_DAMAGE_SUBMASK;
|
||||
/* make sure the new fpc will be lazily loaded */
|
||||
save_fpu_regs();
|
||||
current->thread.fpu.fpc = 0;
|
||||
vcpu->run->s.regs.fpc = 0;
|
||||
vcpu->arch.sie_block->gbea = 1;
|
||||
vcpu->arch.sie_block->pp = 0;
|
||||
vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
|
||||
@ -4354,7 +4352,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
||||
switch (ioctl) {
|
||||
case KVM_S390_STORE_STATUS:
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
r = kvm_s390_vcpu_store_status(vcpu, arg);
|
||||
r = kvm_s390_store_status_unloaded(vcpu, arg);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
break;
|
||||
case KVM_S390_SET_INITIAL_PSW: {
|
||||
|
@ -2,7 +2,7 @@
|
||||
/*
|
||||
* IBM System z Huge TLB Page Support for Kernel.
|
||||
*
|
||||
* Copyright IBM Corp. 2007,2016
|
||||
* Copyright IBM Corp. 2007,2020
|
||||
* Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
|
||||
*/
|
||||
|
||||
@ -11,6 +11,9 @@
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/security.h>
|
||||
|
||||
/*
|
||||
* If the bit selected by single-bit bitmask "a" is set within "x", move
|
||||
@ -267,3 +270,98 @@ static __init int setup_hugepagesz(char *opt)
|
||||
return 1;
|
||||
}
|
||||
__setup("hugepagesz=", setup_hugepagesz);
|
||||
|
||||
static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
|
||||
unsigned long addr, unsigned long len,
|
||||
unsigned long pgoff, unsigned long flags)
|
||||
{
|
||||
struct hstate *h = hstate_file(file);
|
||||
struct vm_unmapped_area_info info;
|
||||
|
||||
info.flags = 0;
|
||||
info.length = len;
|
||||
info.low_limit = current->mm->mmap_base;
|
||||
info.high_limit = TASK_SIZE;
|
||||
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
|
||||
info.align_offset = 0;
|
||||
return vm_unmapped_area(&info);
|
||||
}
|
||||
|
||||
static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
|
||||
unsigned long addr0, unsigned long len,
|
||||
unsigned long pgoff, unsigned long flags)
|
||||
{
|
||||
struct hstate *h = hstate_file(file);
|
||||
struct vm_unmapped_area_info info;
|
||||
unsigned long addr;
|
||||
|
||||
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
|
||||
info.length = len;
|
||||
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
|
||||
info.high_limit = current->mm->mmap_base;
|
||||
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
|
||||
info.align_offset = 0;
|
||||
addr = vm_unmapped_area(&info);
|
||||
|
||||
/*
|
||||
* A failed mmap() very likely causes application failure,
|
||||
* so fall back to the bottom-up function here. This scenario
|
||||
* can happen with large stack limits and large mmap()
|
||||
* allocations.
|
||||
*/
|
||||
if (addr & ~PAGE_MASK) {
|
||||
VM_BUG_ON(addr != -ENOMEM);
|
||||
info.flags = 0;
|
||||
info.low_limit = TASK_UNMAPPED_BASE;
|
||||
info.high_limit = TASK_SIZE;
|
||||
addr = vm_unmapped_area(&info);
|
||||
}
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
||||
unsigned long len, unsigned long pgoff, unsigned long flags)
|
||||
{
|
||||
struct hstate *h = hstate_file(file);
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *vma;
|
||||
int rc;
|
||||
|
||||
if (len & ~huge_page_mask(h))
|
||||
return -EINVAL;
|
||||
if (len > TASK_SIZE - mmap_min_addr)
|
||||
return -ENOMEM;
|
||||
|
||||
if (flags & MAP_FIXED) {
|
||||
if (prepare_hugepage_range(file, addr, len))
|
||||
return -EINVAL;
|
||||
goto check_asce_limit;
|
||||
}
|
||||
|
||||
if (addr) {
|
||||
addr = ALIGN(addr, huge_page_size(h));
|
||||
vma = find_vma(mm, addr);
|
||||
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
|
||||
(!vma || addr + len <= vm_start_gap(vma)))
|
||||
goto check_asce_limit;
|
||||
}
|
||||
|
||||
if (mm->get_unmapped_area == arch_get_unmapped_area)
|
||||
addr = hugetlb_get_unmapped_area_bottomup(file, addr, len,
|
||||
pgoff, flags);
|
||||
else
|
||||
addr = hugetlb_get_unmapped_area_topdown(file, addr, len,
|
||||
pgoff, flags);
|
||||
if (addr & ~PAGE_MASK)
|
||||
return addr;
|
||||
|
||||
check_asce_limit:
|
||||
if (addr + len > current->mm->context.asce_limit &&
|
||||
addr + len <= TASK_SIZE) {
|
||||
rc = crst_table_upgrade(mm, addr + len);
|
||||
if (rc)
|
||||
return (unsigned long) rc;
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
|
@ -65,7 +65,6 @@ config SPARC64
|
||||
select HAVE_KRETPROBES
|
||||
select HAVE_KPROBES
|
||||
select HAVE_RCU_TABLE_FREE if SMP
|
||||
select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE
|
||||
select HAVE_MEMBLOCK_NODE_MAP
|
||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
|
||||
select HAVE_DYNAMIC_FTRACE
|
||||
|
@ -28,6 +28,15 @@ void flush_tlb_pending(void);
|
||||
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
|
||||
#define tlb_flush(tlb) flush_tlb_pending()
|
||||
|
||||
/*
|
||||
* SPARC64's hardware TLB fill does not use the Linux page-tables
|
||||
* and therefore we don't need a TLBI when freeing page-table pages.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
|
||||
#define tlb_needs_table_invalidate() (false)
|
||||
#endif
|
||||
|
||||
#include <asm-generic/tlb.h>
|
||||
|
||||
#endif /* _SPARC64_TLB_H */
|
||||
|
@ -16,10 +16,10 @@
|
||||
struct ipc64_perm
|
||||
{
|
||||
__kernel_key_t key;
|
||||
__kernel_uid_t uid;
|
||||
__kernel_gid_t gid;
|
||||
__kernel_uid_t cuid;
|
||||
__kernel_gid_t cgid;
|
||||
__kernel_uid32_t uid;
|
||||
__kernel_gid32_t gid;
|
||||
__kernel_uid32_t cuid;
|
||||
__kernel_gid32_t cgid;
|
||||
#ifndef __arch64__
|
||||
unsigned short __pad0;
|
||||
#endif
|
||||
|
@ -140,6 +140,7 @@ extern void apic_soft_disable(void);
|
||||
extern void lapic_shutdown(void);
|
||||
extern void sync_Arb_IDs(void);
|
||||
extern void init_bsp_APIC(void);
|
||||
extern void apic_intr_mode_select(void);
|
||||
extern void apic_intr_mode_init(void);
|
||||
extern void init_apic_mappings(void);
|
||||
void register_lapic_address(unsigned long address);
|
||||
@ -188,6 +189,7 @@ static inline void disable_local_APIC(void) { }
|
||||
# define setup_secondary_APIC_clock x86_init_noop
|
||||
static inline void lapic_update_tsc_freq(void) { }
|
||||
static inline void init_bsp_APIC(void) { }
|
||||
static inline void apic_intr_mode_select(void) { }
|
||||
static inline void apic_intr_mode_init(void) { }
|
||||
static inline void lapic_assign_system_vectors(void) { }
|
||||
static inline void lapic_assign_legacy_vector(unsigned int i, bool r) { }
|
||||
@ -452,6 +454,14 @@ static inline void ack_APIC_irq(void)
|
||||
apic_eoi();
|
||||
}
|
||||
|
||||
|
||||
static inline bool lapic_vector_set_in_irr(unsigned int vector)
|
||||
{
|
||||
u32 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
|
||||
|
||||
return !!(irr & (1U << (vector % 32)));
|
||||
}
|
||||
|
||||
static inline unsigned default_get_apic_id(unsigned long x)
|
||||
{
|
||||
unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
|
||||
|
@ -380,12 +380,12 @@ struct kvm_mmu {
|
||||
void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
|
||||
unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
|
||||
u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
|
||||
int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
|
||||
int (*page_fault)(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 err,
|
||||
bool prefault);
|
||||
void (*inject_page_fault)(struct kvm_vcpu *vcpu,
|
||||
struct x86_exception *fault);
|
||||
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
|
||||
struct x86_exception *exception);
|
||||
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t gva_or_gpa,
|
||||
u32 access, struct x86_exception *exception);
|
||||
gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
|
||||
struct x86_exception *exception);
|
||||
int (*sync_page)(struct kvm_vcpu *vcpu,
|
||||
@ -667,10 +667,10 @@ struct kvm_vcpu_arch {
|
||||
bool pvclock_set_guest_stopped_request;
|
||||
|
||||
struct {
|
||||
u8 preempted;
|
||||
u64 msr_val;
|
||||
u64 last_steal;
|
||||
struct gfn_to_hva_cache stime;
|
||||
struct kvm_steal_time steal;
|
||||
struct gfn_to_pfn_cache cache;
|
||||
} st;
|
||||
|
||||
u64 tsc_offset;
|
||||
@ -1128,6 +1128,7 @@ struct kvm_x86_ops {
|
||||
bool (*xsaves_supported)(void);
|
||||
bool (*umip_emulated)(void);
|
||||
bool (*pt_supported)(void);
|
||||
bool (*pku_supported)(void);
|
||||
|
||||
int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
|
||||
void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
|
||||
@ -1450,7 +1451,7 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu);
|
||||
|
||||
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
|
||||
|
||||
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u64 error_code,
|
||||
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
|
||||
void *insn, int insn_len);
|
||||
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
|
||||
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
|
||||
|
@ -51,12 +51,14 @@ struct x86_init_resources {
|
||||
* are set up.
|
||||
* @intr_init: interrupt init code
|
||||
* @trap_init: platform specific trap setup
|
||||
* @intr_mode_select: interrupt delivery mode selection
|
||||
* @intr_mode_init: interrupt delivery mode setup
|
||||
*/
|
||||
struct x86_init_irqs {
|
||||
void (*pre_vector_init)(void);
|
||||
void (*intr_init)(void);
|
||||
void (*trap_init)(void);
|
||||
void (*intr_mode_select)(void);
|
||||
void (*intr_mode_init)(void);
|
||||
};
|
||||
|
||||
|
@ -830,8 +830,17 @@ bool __init apic_needs_pit(void)
|
||||
if (!tsc_khz || !cpu_khz)
|
||||
return true;
|
||||
|
||||
/* Is there an APIC at all? */
|
||||
if (!boot_cpu_has(X86_FEATURE_APIC))
|
||||
/* Is there an APIC at all or is it disabled? */
|
||||
if (!boot_cpu_has(X86_FEATURE_APIC) || disable_apic)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* If interrupt delivery mode is legacy PIC or virtual wire without
|
||||
* configuration, the local APIC timer wont be set up. Make sure
|
||||
* that the PIT is initialized.
|
||||
*/
|
||||
if (apic_intr_mode == APIC_PIC ||
|
||||
apic_intr_mode == APIC_VIRTUAL_WIRE_NO_CONFIG)
|
||||
return true;
|
||||
|
||||
/* Virt guests may lack ARAT, but still have DEADLINE */
|
||||
@ -1322,7 +1331,7 @@ void __init sync_Arb_IDs(void)
|
||||
|
||||
enum apic_intr_mode_id apic_intr_mode __ro_after_init;
|
||||
|
||||
static int __init apic_intr_mode_select(void)
|
||||
static int __init __apic_intr_mode_select(void)
|
||||
{
|
||||
/* Check kernel option */
|
||||
if (disable_apic) {
|
||||
@ -1384,6 +1393,12 @@ static int __init apic_intr_mode_select(void)
|
||||
return APIC_SYMMETRIC_IO;
|
||||
}
|
||||
|
||||
/* Select the interrupt delivery mode for the BSP */
|
||||
void __init apic_intr_mode_select(void)
|
||||
{
|
||||
apic_intr_mode = __apic_intr_mode_select();
|
||||
}
|
||||
|
||||
/*
|
||||
* An initial setup of the virtual wire mode.
|
||||
*/
|
||||
@ -1440,8 +1455,6 @@ void __init apic_intr_mode_init(void)
|
||||
{
|
||||
bool upmode = IS_ENABLED(CONFIG_UP_LATE_INIT);
|
||||
|
||||
apic_intr_mode = apic_intr_mode_select();
|
||||
|
||||
switch (apic_intr_mode) {
|
||||
case APIC_PIC:
|
||||
pr_info("APIC: Keep in PIC mode(8259)\n");
|
||||
|
@ -23,10 +23,8 @@
|
||||
|
||||
static struct irq_domain *msi_default_domain;
|
||||
|
||||
static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
static void __irq_msi_compose_msg(struct irq_cfg *cfg, struct msi_msg *msg)
|
||||
{
|
||||
struct irq_cfg *cfg = irqd_cfg(data);
|
||||
|
||||
msg->address_hi = MSI_ADDR_BASE_HI;
|
||||
|
||||
if (x2apic_enabled())
|
||||
@ -47,6 +45,127 @@ static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
MSI_DATA_VECTOR(cfg->vector);
|
||||
}
|
||||
|
||||
static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
{
|
||||
__irq_msi_compose_msg(irqd_cfg(data), msg);
|
||||
}
|
||||
|
||||
static void irq_msi_update_msg(struct irq_data *irqd, struct irq_cfg *cfg)
|
||||
{
|
||||
struct msi_msg msg[2] = { [1] = { }, };
|
||||
|
||||
__irq_msi_compose_msg(cfg, msg);
|
||||
irq_data_get_irq_chip(irqd)->irq_write_msi_msg(irqd, msg);
|
||||
}
|
||||
|
||||
static int
|
||||
msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force)
|
||||
{
|
||||
struct irq_cfg old_cfg, *cfg = irqd_cfg(irqd);
|
||||
struct irq_data *parent = irqd->parent_data;
|
||||
unsigned int cpu;
|
||||
int ret;
|
||||
|
||||
/* Save the current configuration */
|
||||
cpu = cpumask_first(irq_data_get_effective_affinity_mask(irqd));
|
||||
old_cfg = *cfg;
|
||||
|
||||
/* Allocate a new target vector */
|
||||
ret = parent->chip->irq_set_affinity(parent, mask, force);
|
||||
if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* For non-maskable and non-remapped MSI interrupts the migration
|
||||
* to a different destination CPU and a different vector has to be
|
||||
* done careful to handle the possible stray interrupt which can be
|
||||
* caused by the non-atomic update of the address/data pair.
|
||||
*
|
||||
* Direct update is possible when:
|
||||
* - The MSI is maskable (remapped MSI does not use this code path)).
|
||||
* The quirk bit is not set in this case.
|
||||
* - The new vector is the same as the old vector
|
||||
* - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up)
|
||||
* - The new destination CPU is the same as the old destination CPU
|
||||
*/
|
||||
if (!irqd_msi_nomask_quirk(irqd) ||
|
||||
cfg->vector == old_cfg.vector ||
|
||||
old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR ||
|
||||
cfg->dest_apicid == old_cfg.dest_apicid) {
|
||||
irq_msi_update_msg(irqd, cfg);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Paranoia: Validate that the interrupt target is the local
|
||||
* CPU.
|
||||
*/
|
||||
if (WARN_ON_ONCE(cpu != smp_processor_id())) {
|
||||
irq_msi_update_msg(irqd, cfg);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Redirect the interrupt to the new vector on the current CPU
|
||||
* first. This might cause a spurious interrupt on this vector if
|
||||
* the device raises an interrupt right between this update and the
|
||||
* update to the final destination CPU.
|
||||
*
|
||||
* If the vector is in use then the installed device handler will
|
||||
* denote it as spurious which is no harm as this is a rare event
|
||||
* and interrupt handlers have to cope with spurious interrupts
|
||||
* anyway. If the vector is unused, then it is marked so it won't
|
||||
* trigger the 'No irq handler for vector' warning in do_IRQ().
|
||||
*
|
||||
* This requires to hold vector lock to prevent concurrent updates to
|
||||
* the affected vector.
|
||||
*/
|
||||
lock_vector_lock();
|
||||
|
||||
/*
|
||||
* Mark the new target vector on the local CPU if it is currently
|
||||
* unused. Reuse the VECTOR_RETRIGGERED state which is also used in
|
||||
* the CPU hotplug path for a similar purpose. This cannot be
|
||||
* undone here as the current CPU has interrupts disabled and
|
||||
* cannot handle the interrupt before the whole set_affinity()
|
||||
* section is done. In the CPU unplug case, the current CPU is
|
||||
* about to vanish and will not handle any interrupts anymore. The
|
||||
* vector is cleaned up when the CPU comes online again.
|
||||
*/
|
||||
if (IS_ERR_OR_NULL(this_cpu_read(vector_irq[cfg->vector])))
|
||||
this_cpu_write(vector_irq[cfg->vector], VECTOR_RETRIGGERED);
|
||||
|
||||
/* Redirect it to the new vector on the local CPU temporarily */
|
||||
old_cfg.vector = cfg->vector;
|
||||
irq_msi_update_msg(irqd, &old_cfg);
|
||||
|
||||
/* Now transition it to the target CPU */
|
||||
irq_msi_update_msg(irqd, cfg);
|
||||
|
||||
/*
|
||||
* All interrupts after this point are now targeted at the new
|
||||
* vector/CPU.
|
||||
*
|
||||
* Drop vector lock before testing whether the temporary assignment
|
||||
* to the local CPU was hit by an interrupt raised in the device,
|
||||
* because the retrigger function acquires vector lock again.
|
||||
*/
|
||||
unlock_vector_lock();
|
||||
|
||||
/*
|
||||
* Check whether the transition raced with a device interrupt and
|
||||
* is pending in the local APICs IRR. It is safe to do this outside
|
||||
* of vector lock as the irq_desc::lock of this interrupt is still
|
||||
* held and interrupts are disabled: The check is not accessing the
|
||||
* underlying vector store. It's just checking the local APIC's
|
||||
* IRR.
|
||||
*/
|
||||
if (lapic_vector_set_in_irr(cfg->vector))
|
||||
irq_data_get_irq_chip(irqd)->irq_retrigger(irqd);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
|
||||
* which implement the MSI or MSI-X Capability Structure.
|
||||
@ -58,6 +177,7 @@ static struct irq_chip pci_msi_controller = {
|
||||
.irq_ack = irq_chip_ack_parent,
|
||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||
.irq_compose_msi_msg = irq_msi_compose_msg,
|
||||
.irq_set_affinity = msi_set_affinity,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE,
|
||||
};
|
||||
|
||||
@ -146,6 +266,8 @@ void __init arch_init_msi_domain(struct irq_domain *parent)
|
||||
}
|
||||
if (!msi_default_domain)
|
||||
pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n");
|
||||
else
|
||||
msi_default_domain->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IRQ_REMAP
|
||||
|
@ -115,11 +115,12 @@ void __init tsx_init(void)
|
||||
tsx_disable();
|
||||
|
||||
/*
|
||||
* tsx_disable() will change the state of the
|
||||
* RTM CPUID bit. Clear it here since it is now
|
||||
* expected to be not set.
|
||||
* tsx_disable() will change the state of the RTM and HLE CPUID
|
||||
* bits. Clear them here since they are now expected to be not
|
||||
* set.
|
||||
*/
|
||||
setup_clear_cpu_cap(X86_FEATURE_RTM);
|
||||
setup_clear_cpu_cap(X86_FEATURE_HLE);
|
||||
} else if (tsx_ctrl_state == TSX_CTRL_ENABLE) {
|
||||
|
||||
/*
|
||||
@ -131,10 +132,10 @@ void __init tsx_init(void)
|
||||
tsx_enable();
|
||||
|
||||
/*
|
||||
* tsx_enable() will change the state of the
|
||||
* RTM CPUID bit. Force it here since it is now
|
||||
* expected to be set.
|
||||
* tsx_enable() will change the state of the RTM and HLE CPUID
|
||||
* bits. Force them here since they are now expected to be set.
|
||||
*/
|
||||
setup_force_cpu_cap(X86_FEATURE_RTM);
|
||||
setup_force_cpu_cap(X86_FEATURE_HLE);
|
||||
}
|
||||
}
|
||||
|
@ -91,10 +91,18 @@ void __init hpet_time_init(void)
|
||||
|
||||
static __init void x86_late_time_init(void)
|
||||
{
|
||||
x86_init.timers.timer_init();
|
||||
/*
|
||||
* After PIT/HPET timers init, select and setup
|
||||
* the final interrupt mode for delivering IRQs.
|
||||
* Before PIT/HPET init, select the interrupt mode. This is required
|
||||
* to make the decision whether PIT should be initialized correct.
|
||||
*/
|
||||
x86_init.irqs.intr_mode_select();
|
||||
|
||||
/* Setup the legacy timers */
|
||||
x86_init.timers.timer_init();
|
||||
|
||||
/*
|
||||
* After PIT/HPET timers init, set up the final interrupt mode for
|
||||
* delivering IRQs.
|
||||
*/
|
||||
x86_init.irqs.intr_mode_init();
|
||||
tsc_init();
|
||||
|
@ -58,6 +58,7 @@ struct x86_init_ops x86_init __initdata = {
|
||||
.pre_vector_init = init_ISA_irqs,
|
||||
.intr_init = native_init_IRQ,
|
||||
.trap_init = x86_init_noop,
|
||||
.intr_mode_select = apic_intr_mode_select,
|
||||
.intr_mode_init = apic_intr_mode_init
|
||||
},
|
||||
|
||||
|
@ -352,6 +352,7 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index)
|
||||
unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
|
||||
unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0;
|
||||
unsigned f_la57;
|
||||
unsigned f_pku = kvm_x86_ops->pku_supported() ? F(PKU) : 0;
|
||||
|
||||
/* cpuid 7.0.ebx */
|
||||
const u32 kvm_cpuid_7_0_ebx_x86_features =
|
||||
@ -363,7 +364,7 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index)
|
||||
|
||||
/* cpuid 7.0.ecx*/
|
||||
const u32 kvm_cpuid_7_0_ecx_x86_features =
|
||||
F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
|
||||
F(AVX512VBMI) | F(LA57) | 0 /*PKU*/ | 0 /*OSPKE*/ | F(RDPID) |
|
||||
F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
|
||||
F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
|
||||
F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/;
|
||||
@ -392,6 +393,7 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index)
|
||||
/* Set LA57 based on hardware capability. */
|
||||
entry->ecx |= f_la57;
|
||||
entry->ecx |= f_umip;
|
||||
entry->ecx |= f_pku;
|
||||
/* PKU is not yet implemented for shadow paging. */
|
||||
if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
|
||||
entry->ecx &= ~F(PKU);
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include "kvm_cache_regs.h"
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <linux/stringify.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/debugreg.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
@ -1075,8 +1076,23 @@ static void fetch_register_operand(struct operand *op)
|
||||
}
|
||||
}
|
||||
|
||||
static void emulator_get_fpu(void)
|
||||
{
|
||||
fpregs_lock();
|
||||
|
||||
fpregs_assert_state_consistent();
|
||||
if (test_thread_flag(TIF_NEED_FPU_LOAD))
|
||||
switch_fpu_return();
|
||||
}
|
||||
|
||||
static void emulator_put_fpu(void)
|
||||
{
|
||||
fpregs_unlock();
|
||||
}
|
||||
|
||||
static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
|
||||
{
|
||||
emulator_get_fpu();
|
||||
switch (reg) {
|
||||
case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
|
||||
case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
|
||||
@ -1098,11 +1114,13 @@ static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
|
||||
#endif
|
||||
default: BUG();
|
||||
}
|
||||
emulator_put_fpu();
|
||||
}
|
||||
|
||||
static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
|
||||
int reg)
|
||||
{
|
||||
emulator_get_fpu();
|
||||
switch (reg) {
|
||||
case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
|
||||
case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
|
||||
@ -1124,10 +1142,12 @@ static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
|
||||
#endif
|
||||
default: BUG();
|
||||
}
|
||||
emulator_put_fpu();
|
||||
}
|
||||
|
||||
static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
|
||||
{
|
||||
emulator_get_fpu();
|
||||
switch (reg) {
|
||||
case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
|
||||
case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
|
||||
@ -1139,10 +1159,12 @@ static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
|
||||
case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
|
||||
default: BUG();
|
||||
}
|
||||
emulator_put_fpu();
|
||||
}
|
||||
|
||||
static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
|
||||
{
|
||||
emulator_get_fpu();
|
||||
switch (reg) {
|
||||
case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
|
||||
case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
|
||||
@ -1154,6 +1176,7 @@ static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
|
||||
case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
|
||||
default: BUG();
|
||||
}
|
||||
emulator_put_fpu();
|
||||
}
|
||||
|
||||
static int em_fninit(struct x86_emulate_ctxt *ctxt)
|
||||
@ -1161,7 +1184,9 @@ static int em_fninit(struct x86_emulate_ctxt *ctxt)
|
||||
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
|
||||
return emulate_nm(ctxt);
|
||||
|
||||
emulator_get_fpu();
|
||||
asm volatile("fninit");
|
||||
emulator_put_fpu();
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
@ -1172,7 +1197,9 @@ static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
|
||||
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
|
||||
return emulate_nm(ctxt);
|
||||
|
||||
emulator_get_fpu();
|
||||
asm volatile("fnstcw %0": "+m"(fcw));
|
||||
emulator_put_fpu();
|
||||
|
||||
ctxt->dst.val = fcw;
|
||||
|
||||
@ -1186,7 +1213,9 @@ static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
|
||||
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
|
||||
return emulate_nm(ctxt);
|
||||
|
||||
emulator_get_fpu();
|
||||
asm volatile("fnstsw %0": "+m"(fsw));
|
||||
emulator_put_fpu();
|
||||
|
||||
ctxt->dst.val = fsw;
|
||||
|
||||
@ -4094,8 +4123,12 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
return rc;
|
||||
|
||||
emulator_get_fpu();
|
||||
|
||||
rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
|
||||
|
||||
emulator_put_fpu();
|
||||
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
return rc;
|
||||
|
||||
@ -4138,6 +4171,8 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
return rc;
|
||||
|
||||
emulator_get_fpu();
|
||||
|
||||
if (size < __fxstate_size(16)) {
|
||||
rc = fxregs_fixup(&fx_state, size);
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
@ -4153,6 +4188,8 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
|
||||
rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
|
||||
|
||||
out:
|
||||
emulator_put_fpu();
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -5212,16 +5249,28 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
|
||||
ctxt->ad_bytes = def_ad_bytes ^ 6;
|
||||
break;
|
||||
case 0x26: /* ES override */
|
||||
has_seg_override = true;
|
||||
ctxt->seg_override = VCPU_SREG_ES;
|
||||
break;
|
||||
case 0x2e: /* CS override */
|
||||
has_seg_override = true;
|
||||
ctxt->seg_override = VCPU_SREG_CS;
|
||||
break;
|
||||
case 0x36: /* SS override */
|
||||
has_seg_override = true;
|
||||
ctxt->seg_override = VCPU_SREG_SS;
|
||||
break;
|
||||
case 0x3e: /* DS override */
|
||||
has_seg_override = true;
|
||||
ctxt->seg_override = (ctxt->b >> 3) & 3;
|
||||
ctxt->seg_override = VCPU_SREG_DS;
|
||||
break;
|
||||
case 0x64: /* FS override */
|
||||
has_seg_override = true;
|
||||
ctxt->seg_override = VCPU_SREG_FS;
|
||||
break;
|
||||
case 0x65: /* GS override */
|
||||
has_seg_override = true;
|
||||
ctxt->seg_override = ctxt->b & 7;
|
||||
ctxt->seg_override = VCPU_SREG_GS;
|
||||
break;
|
||||
case 0x40 ... 0x4f: /* REX */
|
||||
if (mode != X86EMUL_MODE_PROT64)
|
||||
@ -5305,10 +5354,15 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
|
||||
}
|
||||
break;
|
||||
case Escape:
|
||||
if (ctxt->modrm > 0xbf)
|
||||
opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
|
||||
else
|
||||
if (ctxt->modrm > 0xbf) {
|
||||
size_t size = ARRAY_SIZE(opcode.u.esc->high);
|
||||
u32 index = array_index_nospec(
|
||||
ctxt->modrm - 0xc0, size);
|
||||
|
||||
opcode = opcode.u.esc->high[index];
|
||||
} else {
|
||||
opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
|
||||
}
|
||||
break;
|
||||
case InstrDual:
|
||||
if ((ctxt->modrm >> 6) == 3)
|
||||
@ -5450,7 +5504,9 @@ static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
int rc;
|
||||
|
||||
emulator_get_fpu();
|
||||
rc = asm_safe("fwait");
|
||||
emulator_put_fpu();
|
||||
|
||||
if (unlikely(rc != X86EMUL_CONTINUE))
|
||||
return emulate_exception(ctxt, MF_VECTOR, 0, false);
|
||||
|
@ -809,11 +809,12 @@ static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
|
||||
u32 index, u64 *pdata)
|
||||
{
|
||||
struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
|
||||
size_t size = ARRAY_SIZE(hv->hv_crash_param);
|
||||
|
||||
if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
|
||||
if (WARN_ON_ONCE(index >= size))
|
||||
return -EINVAL;
|
||||
|
||||
*pdata = hv->hv_crash_param[index];
|
||||
*pdata = hv->hv_crash_param[array_index_nospec(index, size)];
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -852,11 +853,12 @@ static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
|
||||
u32 index, u64 data)
|
||||
{
|
||||
struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
|
||||
size_t size = ARRAY_SIZE(hv->hv_crash_param);
|
||||
|
||||
if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
|
||||
if (WARN_ON_ONCE(index >= size))
|
||||
return -EINVAL;
|
||||
|
||||
hv->hv_crash_param[index] = data;
|
||||
hv->hv_crash_param[array_index_nospec(index, size)] = data;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -460,10 +460,14 @@ static int picdev_write(struct kvm_pic *s,
|
||||
switch (addr) {
|
||||
case 0x20:
|
||||
case 0x21:
|
||||
pic_lock(s);
|
||||
pic_ioport_write(&s->pics[0], addr, data);
|
||||
pic_unlock(s);
|
||||
break;
|
||||
case 0xa0:
|
||||
case 0xa1:
|
||||
pic_lock(s);
|
||||
pic_ioport_write(&s->pics[addr >> 7], addr, data);
|
||||
pic_ioport_write(&s->pics[1], addr, data);
|
||||
pic_unlock(s);
|
||||
break;
|
||||
case 0x4d0:
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include <linux/io.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/current.h>
|
||||
@ -68,13 +69,14 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
|
||||
default:
|
||||
{
|
||||
u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
|
||||
u64 redir_content;
|
||||
u64 redir_content = ~0ULL;
|
||||
|
||||
if (redir_index < IOAPIC_NUM_PINS)
|
||||
redir_content =
|
||||
ioapic->redirtbl[redir_index].bits;
|
||||
else
|
||||
redir_content = ~0ULL;
|
||||
if (redir_index < IOAPIC_NUM_PINS) {
|
||||
u32 index = array_index_nospec(
|
||||
redir_index, IOAPIC_NUM_PINS);
|
||||
|
||||
redir_content = ioapic->redirtbl[index].bits;
|
||||
}
|
||||
|
||||
result = (ioapic->ioregsel & 0x1) ?
|
||||
(redir_content >> 32) & 0xffffffff :
|
||||
@ -291,6 +293,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
|
||||
|
||||
if (index >= IOAPIC_NUM_PINS)
|
||||
return;
|
||||
index = array_index_nospec(index, IOAPIC_NUM_PINS);
|
||||
e = &ioapic->redirtbl[index];
|
||||
mask_before = e->fields.mask;
|
||||
/* Preserve read-only fields */
|
||||
|
@ -1926,15 +1926,20 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
|
||||
case APIC_LVTTHMR:
|
||||
case APIC_LVTPC:
|
||||
case APIC_LVT1:
|
||||
case APIC_LVTERR:
|
||||
case APIC_LVTERR: {
|
||||
/* TODO: Check vector */
|
||||
size_t size;
|
||||
u32 index;
|
||||
|
||||
if (!kvm_apic_sw_enabled(apic))
|
||||
val |= APIC_LVT_MASKED;
|
||||
|
||||
val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4];
|
||||
size = ARRAY_SIZE(apic_lvt_mask);
|
||||
index = array_index_nospec(
|
||||
(reg - APIC_LVTT) >> 4, size);
|
||||
val &= apic_lvt_mask[index];
|
||||
kvm_lapic_set_reg(apic, reg, val);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case APIC_LVTT:
|
||||
if (!kvm_apic_sw_enabled(apic))
|
||||
|
@ -418,22 +418,24 @@ static inline bool is_access_track_spte(u64 spte)
|
||||
* requires a full MMU zap). The flag is instead explicitly queried when
|
||||
* checking for MMIO spte cache hits.
|
||||
*/
|
||||
#define MMIO_SPTE_GEN_MASK GENMASK_ULL(18, 0)
|
||||
#define MMIO_SPTE_GEN_MASK GENMASK_ULL(17, 0)
|
||||
|
||||
#define MMIO_SPTE_GEN_LOW_START 3
|
||||
#define MMIO_SPTE_GEN_LOW_END 11
|
||||
#define MMIO_SPTE_GEN_LOW_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \
|
||||
MMIO_SPTE_GEN_LOW_START)
|
||||
|
||||
#define MMIO_SPTE_GEN_HIGH_START 52
|
||||
#define MMIO_SPTE_GEN_HIGH_END 61
|
||||
#define MMIO_SPTE_GEN_HIGH_START PT64_SECOND_AVAIL_BITS_SHIFT
|
||||
#define MMIO_SPTE_GEN_HIGH_END 62
|
||||
#define MMIO_SPTE_GEN_HIGH_MASK GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \
|
||||
MMIO_SPTE_GEN_HIGH_START)
|
||||
|
||||
static u64 generation_mmio_spte_mask(u64 gen)
|
||||
{
|
||||
u64 mask;
|
||||
|
||||
WARN_ON(gen & ~MMIO_SPTE_GEN_MASK);
|
||||
BUILD_BUG_ON((MMIO_SPTE_GEN_HIGH_MASK | MMIO_SPTE_GEN_LOW_MASK) & SPTE_SPECIAL_MASK);
|
||||
|
||||
mask = (gen << MMIO_SPTE_GEN_LOW_START) & MMIO_SPTE_GEN_LOW_MASK;
|
||||
mask |= (gen << MMIO_SPTE_GEN_HIGH_START) & MMIO_SPTE_GEN_HIGH_MASK;
|
||||
@ -444,8 +446,6 @@ static u64 get_mmio_spte_generation(u64 spte)
|
||||
{
|
||||
u64 gen;
|
||||
|
||||
spte &= ~shadow_mmio_mask;
|
||||
|
||||
gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_START;
|
||||
gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_START;
|
||||
return gen;
|
||||
@ -538,16 +538,20 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
|
||||
static u8 kvm_get_shadow_phys_bits(void)
|
||||
{
|
||||
/*
|
||||
* boot_cpu_data.x86_phys_bits is reduced when MKTME is detected
|
||||
* in CPU detection code, but MKTME treats those reduced bits as
|
||||
* 'keyID' thus they are not reserved bits. Therefore for MKTME
|
||||
* we should still return physical address bits reported by CPUID.
|
||||
* boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected
|
||||
* in CPU detection code, but the processor treats those reduced bits as
|
||||
* 'keyID' thus they are not reserved bits. Therefore KVM needs to look at
|
||||
* the physical address bits reported by CPUID.
|
||||
*/
|
||||
if (!boot_cpu_has(X86_FEATURE_TME) ||
|
||||
WARN_ON_ONCE(boot_cpu_data.extended_cpuid_level < 0x80000008))
|
||||
return boot_cpu_data.x86_phys_bits;
|
||||
|
||||
if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008))
|
||||
return cpuid_eax(0x80000008) & 0xff;
|
||||
|
||||
/*
|
||||
* Quite weird to have VMX or SVM but not MAXPHYADDR; probably a VM with
|
||||
* custom CPUID. Proceed with whatever the kernel found since these features
|
||||
* aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008).
|
||||
*/
|
||||
return boot_cpu_data.x86_phys_bits;
|
||||
}
|
||||
|
||||
static void kvm_mmu_reset_all_pte_masks(void)
|
||||
@ -1282,12 +1286,12 @@ static bool mmu_gfn_lpage_is_disallowed(struct kvm_vcpu *vcpu, gfn_t gfn,
|
||||
return __mmu_gfn_lpage_is_disallowed(gfn, level, slot);
|
||||
}
|
||||
|
||||
static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
|
||||
static int host_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn)
|
||||
{
|
||||
unsigned long page_size;
|
||||
int i, ret = 0;
|
||||
|
||||
page_size = kvm_host_page_size(kvm, gfn);
|
||||
page_size = kvm_host_page_size(vcpu, gfn);
|
||||
|
||||
for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
|
||||
if (page_size >= KVM_HPAGE_SIZE(i))
|
||||
@ -1337,7 +1341,7 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
|
||||
if (unlikely(*force_pt_level))
|
||||
return PT_PAGE_TABLE_LEVEL;
|
||||
|
||||
host_level = host_mapping_level(vcpu->kvm, large_gfn);
|
||||
host_level = host_mapping_level(vcpu, large_gfn);
|
||||
|
||||
if (host_level == PT_PAGE_TABLE_LEVEL)
|
||||
return host_level;
|
||||
@ -3528,7 +3532,7 @@ static bool is_access_allowed(u32 fault_err_code, u64 spte)
|
||||
* - true: let the vcpu to access on the same address again.
|
||||
* - false: let the real page fault path to fix it.
|
||||
*/
|
||||
static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
|
||||
static bool fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int level,
|
||||
u32 error_code)
|
||||
{
|
||||
struct kvm_shadow_walk_iterator iterator;
|
||||
@ -3548,7 +3552,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
|
||||
do {
|
||||
u64 new_spte;
|
||||
|
||||
for_each_shadow_entry_lockless(vcpu, gva, iterator, spte)
|
||||
for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte)
|
||||
if (!is_shadow_present_pte(spte) ||
|
||||
iterator.level < level)
|
||||
break;
|
||||
@ -3626,7 +3630,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
|
||||
|
||||
} while (true);
|
||||
|
||||
trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep,
|
||||
trace_fast_page_fault(vcpu, cr2_or_gpa, error_code, iterator.sptep,
|
||||
spte, fault_handled);
|
||||
walk_shadow_page_lockless_end(vcpu);
|
||||
|
||||
@ -3634,10 +3638,11 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
|
||||
}
|
||||
|
||||
static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
|
||||
gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable);
|
||||
gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write,
|
||||
bool *writable);
|
||||
static int make_mmu_pages_available(struct kvm_vcpu *vcpu);
|
||||
|
||||
static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
|
||||
static int nonpaging_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
|
||||
gfn_t gfn, bool prefault)
|
||||
{
|
||||
int r;
|
||||
@ -3663,16 +3668,16 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
|
||||
gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
|
||||
}
|
||||
|
||||
if (fast_page_fault(vcpu, v, level, error_code))
|
||||
if (fast_page_fault(vcpu, gpa, level, error_code))
|
||||
return RET_PF_RETRY;
|
||||
|
||||
mmu_seq = vcpu->kvm->mmu_notifier_seq;
|
||||
smp_rmb();
|
||||
|
||||
if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
|
||||
if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
|
||||
return RET_PF_RETRY;
|
||||
|
||||
if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
|
||||
if (handle_abnormal_pfn(vcpu, gpa, gfn, pfn, ACC_ALL, &r))
|
||||
return r;
|
||||
|
||||
r = RET_PF_RETRY;
|
||||
@ -3683,7 +3688,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
|
||||
goto out_unlock;
|
||||
if (likely(!force_pt_level))
|
||||
transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
|
||||
r = __direct_map(vcpu, v, write, map_writable, level, pfn,
|
||||
r = __direct_map(vcpu, gpa, write, map_writable, level, pfn,
|
||||
prefault, false);
|
||||
out_unlock:
|
||||
spin_unlock(&vcpu->kvm->mmu_lock);
|
||||
@ -3981,7 +3986,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots);
|
||||
|
||||
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
|
||||
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr,
|
||||
u32 access, struct x86_exception *exception)
|
||||
{
|
||||
if (exception)
|
||||
@ -3989,7 +3994,7 @@ static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
|
||||
return vaddr;
|
||||
}
|
||||
|
||||
static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
|
||||
static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
|
||||
u32 access,
|
||||
struct x86_exception *exception)
|
||||
{
|
||||
@ -4149,13 +4154,14 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
|
||||
walk_shadow_page_lockless_end(vcpu);
|
||||
}
|
||||
|
||||
static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
|
||||
static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
|
||||
u32 error_code, bool prefault)
|
||||
{
|
||||
gfn_t gfn = gva >> PAGE_SHIFT;
|
||||
gfn_t gfn = gpa >> PAGE_SHIFT;
|
||||
int r;
|
||||
|
||||
pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
|
||||
/* Note, paging is disabled, ergo gva == gpa. */
|
||||
pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code);
|
||||
|
||||
if (page_fault_handle_page_track(vcpu, error_code, gfn))
|
||||
return RET_PF_EMULATE;
|
||||
@ -4167,11 +4173,12 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
|
||||
MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa));
|
||||
|
||||
|
||||
return nonpaging_map(vcpu, gva & PAGE_MASK,
|
||||
return nonpaging_map(vcpu, gpa & PAGE_MASK,
|
||||
error_code, gfn, prefault);
|
||||
}
|
||||
|
||||
static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
|
||||
static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
|
||||
gfn_t gfn)
|
||||
{
|
||||
struct kvm_arch_async_pf arch;
|
||||
|
||||
@ -4180,11 +4187,13 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
|
||||
arch.direct_map = vcpu->arch.mmu->direct_map;
|
||||
arch.cr3 = vcpu->arch.mmu->get_cr3(vcpu);
|
||||
|
||||
return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
|
||||
return kvm_setup_async_pf(vcpu, cr2_or_gpa,
|
||||
kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
|
||||
}
|
||||
|
||||
static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
|
||||
gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable)
|
||||
gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write,
|
||||
bool *writable)
|
||||
{
|
||||
struct kvm_memory_slot *slot;
|
||||
bool async;
|
||||
@ -4204,12 +4213,12 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
|
||||
return false; /* *pfn has correct page already */
|
||||
|
||||
if (!prefault && kvm_can_do_async_pf(vcpu)) {
|
||||
trace_kvm_try_async_get_page(gva, gfn);
|
||||
trace_kvm_try_async_get_page(cr2_or_gpa, gfn);
|
||||
if (kvm_find_async_pf_gfn(vcpu, gfn)) {
|
||||
trace_kvm_async_pf_doublefault(gva, gfn);
|
||||
trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn);
|
||||
kvm_make_request(KVM_REQ_APF_HALT, vcpu);
|
||||
return true;
|
||||
} else if (kvm_arch_setup_async_pf(vcpu, gva, gfn))
|
||||
} else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn))
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -4222,6 +4231,12 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
|
||||
{
|
||||
int r = 1;
|
||||
|
||||
#ifndef CONFIG_X86_64
|
||||
/* A 64-bit CR2 should be impossible on 32-bit KVM. */
|
||||
if (WARN_ON_ONCE(fault_address >> 32))
|
||||
return -EFAULT;
|
||||
#endif
|
||||
|
||||
vcpu->arch.l1tf_flush_l1d = true;
|
||||
switch (vcpu->arch.apf.host_apf_reason) {
|
||||
default:
|
||||
@ -4259,7 +4274,7 @@ check_hugepage_cache_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int level)
|
||||
return kvm_mtrr_check_gfn_range_consistency(vcpu, gfn, page_num);
|
||||
}
|
||||
|
||||
static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
|
||||
static int tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
|
||||
bool prefault)
|
||||
{
|
||||
kvm_pfn_t pfn;
|
||||
@ -5516,7 +5531,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
|
||||
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
|
||||
void *insn, int insn_len)
|
||||
{
|
||||
int r, emulation_type = 0;
|
||||
@ -5525,18 +5540,18 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
|
||||
/* With shadow page tables, fault_address contains a GVA or nGPA. */
|
||||
if (vcpu->arch.mmu->direct_map) {
|
||||
vcpu->arch.gpa_available = true;
|
||||
vcpu->arch.gpa_val = cr2;
|
||||
vcpu->arch.gpa_val = cr2_or_gpa;
|
||||
}
|
||||
|
||||
r = RET_PF_INVALID;
|
||||
if (unlikely(error_code & PFERR_RSVD_MASK)) {
|
||||
r = handle_mmio_page_fault(vcpu, cr2, direct);
|
||||
r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
|
||||
if (r == RET_PF_EMULATE)
|
||||
goto emulate;
|
||||
}
|
||||
|
||||
if (r == RET_PF_INVALID) {
|
||||
r = vcpu->arch.mmu->page_fault(vcpu, cr2,
|
||||
r = vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa,
|
||||
lower_32_bits(error_code),
|
||||
false);
|
||||
WARN_ON(r == RET_PF_INVALID);
|
||||
@ -5556,7 +5571,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
|
||||
*/
|
||||
if (vcpu->arch.mmu->direct_map &&
|
||||
(error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
|
||||
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2));
|
||||
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -5571,7 +5586,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
|
||||
* explicitly shadowing L1's page tables, i.e. unprotecting something
|
||||
* for L1 isn't going to magically fix whatever issue cause L2 to fail.
|
||||
*/
|
||||
if (!mmio_info_in_cache(vcpu, cr2, direct) && !is_guest_mode(vcpu))
|
||||
if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
|
||||
emulation_type = EMULTYPE_ALLOW_RETRY;
|
||||
emulate:
|
||||
/*
|
||||
@ -5586,7 +5601,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
|
||||
return 1;
|
||||
}
|
||||
|
||||
return x86_emulate_instruction(vcpu, cr2, emulation_type, insn,
|
||||
return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
|
||||
insn_len);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
|
||||
@ -6249,7 +6264,7 @@ static void kvm_set_mmio_spte_mask(void)
|
||||
* If reserved bit is not supported, clear the present bit to disable
|
||||
* mmio page fault.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_X86_64) && shadow_phys_bits == 52)
|
||||
if (shadow_phys_bits == 52)
|
||||
mask &= ~1ull;
|
||||
|
||||
kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK);
|
||||
|
@ -249,13 +249,13 @@ TRACE_EVENT(
|
||||
|
||||
TRACE_EVENT(
|
||||
fast_page_fault,
|
||||
TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
|
||||
TP_PROTO(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 error_code,
|
||||
u64 *sptep, u64 old_spte, bool retry),
|
||||
TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry),
|
||||
TP_ARGS(vcpu, cr2_or_gpa, error_code, sptep, old_spte, retry),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, vcpu_id)
|
||||
__field(gva_t, gva)
|
||||
__field(gpa_t, cr2_or_gpa)
|
||||
__field(u32, error_code)
|
||||
__field(u64 *, sptep)
|
||||
__field(u64, old_spte)
|
||||
@ -265,7 +265,7 @@ TRACE_EVENT(
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu_id = vcpu->vcpu_id;
|
||||
__entry->gva = gva;
|
||||
__entry->cr2_or_gpa = cr2_or_gpa;
|
||||
__entry->error_code = error_code;
|
||||
__entry->sptep = sptep;
|
||||
__entry->old_spte = old_spte;
|
||||
@ -273,9 +273,9 @@ TRACE_EVENT(
|
||||
__entry->retry = retry;
|
||||
),
|
||||
|
||||
TP_printk("vcpu %d gva %lx error_code %s sptep %p old %#llx"
|
||||
TP_printk("vcpu %d gva %llx error_code %s sptep %p old %#llx"
|
||||
" new %llx spurious %d fixed %d", __entry->vcpu_id,
|
||||
__entry->gva, __print_flags(__entry->error_code, "|",
|
||||
__entry->cr2_or_gpa, __print_flags(__entry->error_code, "|",
|
||||
kvm_mmu_trace_pferr_flags), __entry->sptep,
|
||||
__entry->old_spte, __entry->new_spte,
|
||||
__spte_satisfied(old_spte), __spte_satisfied(new_spte)
|
||||
|
@ -192,11 +192,15 @@ static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit)
|
||||
break;
|
||||
case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000:
|
||||
*seg = 1;
|
||||
*unit = msr - MSR_MTRRfix16K_80000;
|
||||
*unit = array_index_nospec(
|
||||
msr - MSR_MTRRfix16K_80000,
|
||||
MSR_MTRRfix16K_A0000 - MSR_MTRRfix16K_80000 + 1);
|
||||
break;
|
||||
case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000:
|
||||
*seg = 2;
|
||||
*unit = msr - MSR_MTRRfix4K_C0000;
|
||||
*unit = array_index_nospec(
|
||||
msr - MSR_MTRRfix4K_C0000,
|
||||
MSR_MTRRfix4K_F8000 - MSR_MTRRfix4K_C0000 + 1);
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
|
@ -291,11 +291,11 @@ static inline unsigned FNAME(gpte_pkeys)(struct kvm_vcpu *vcpu, u64 gpte)
|
||||
}
|
||||
|
||||
/*
|
||||
* Fetch a guest pte for a guest virtual address
|
||||
* Fetch a guest pte for a guest virtual address, or for an L2's GPA.
|
||||
*/
|
||||
static int FNAME(walk_addr_generic)(struct guest_walker *walker,
|
||||
struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
||||
gva_t addr, u32 access)
|
||||
gpa_t addr, u32 access)
|
||||
{
|
||||
int ret;
|
||||
pt_element_t pte;
|
||||
@ -496,7 +496,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
|
||||
}
|
||||
|
||||
static int FNAME(walk_addr)(struct guest_walker *walker,
|
||||
struct kvm_vcpu *vcpu, gva_t addr, u32 access)
|
||||
struct kvm_vcpu *vcpu, gpa_t addr, u32 access)
|
||||
{
|
||||
return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr,
|
||||
access);
|
||||
@ -611,7 +611,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
|
||||
* If the guest tries to write a write-protected page, we need to
|
||||
* emulate this operation, return 1 to indicate this case.
|
||||
*/
|
||||
static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
|
||||
static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
|
||||
struct guest_walker *gw,
|
||||
int write_fault, int hlevel,
|
||||
kvm_pfn_t pfn, bool map_writable, bool prefault,
|
||||
@ -765,7 +765,7 @@ FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
|
||||
* Returns: 1 if we need to emulate the instruction, 0 otherwise, or
|
||||
* a negative value on error.
|
||||
*/
|
||||
static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
|
||||
static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
|
||||
bool prefault)
|
||||
{
|
||||
int write_fault = error_code & PFERR_WRITE_MASK;
|
||||
@ -945,18 +945,19 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
|
||||
spin_unlock(&vcpu->kvm->mmu_lock);
|
||||
}
|
||||
|
||||
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
|
||||
/* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */
|
||||
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t addr, u32 access,
|
||||
struct x86_exception *exception)
|
||||
{
|
||||
struct guest_walker walker;
|
||||
gpa_t gpa = UNMAPPED_GVA;
|
||||
int r;
|
||||
|
||||
r = FNAME(walk_addr)(&walker, vcpu, vaddr, access);
|
||||
r = FNAME(walk_addr)(&walker, vcpu, addr, access);
|
||||
|
||||
if (r) {
|
||||
gpa = gfn_to_gpa(walker.gfn);
|
||||
gpa |= vaddr & ~PAGE_MASK;
|
||||
gpa |= addr & ~PAGE_MASK;
|
||||
} else if (exception)
|
||||
*exception = walker.fault;
|
||||
|
||||
@ -964,7 +965,8 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
|
||||
}
|
||||
|
||||
#if PTTYPE != PTTYPE_EPT
|
||||
static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
|
||||
/* Note, gva_to_gpa_nested() is only used to translate L2 GVAs. */
|
||||
static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr,
|
||||
u32 access,
|
||||
struct x86_exception *exception)
|
||||
{
|
||||
@ -972,6 +974,11 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
|
||||
gpa_t gpa = UNMAPPED_GVA;
|
||||
int r;
|
||||
|
||||
#ifndef CONFIG_X86_64
|
||||
/* A 64-bit GVA should be impossible on 32-bit KVM. */
|
||||
WARN_ON_ONCE(vaddr >> 32);
|
||||
#endif
|
||||
|
||||
r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
|
||||
|
||||
if (r) {
|
||||
|
@ -2,6 +2,8 @@
|
||||
#ifndef __KVM_X86_PMU_H
|
||||
#define __KVM_X86_PMU_H
|
||||
|
||||
#include <linux/nospec.h>
|
||||
|
||||
#define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
|
||||
#define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu))
|
||||
#define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu)
|
||||
@ -86,8 +88,12 @@ static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
|
||||
static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
|
||||
u32 base)
|
||||
{
|
||||
if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
|
||||
return &pmu->gp_counters[msr - base];
|
||||
if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
|
||||
u32 index = array_index_nospec(msr - base,
|
||||
pmu->nr_arch_gp_counters);
|
||||
|
||||
return &pmu->gp_counters[index];
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@ -97,8 +103,12 @@ static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
|
||||
{
|
||||
int base = MSR_CORE_PERF_FIXED_CTR0;
|
||||
|
||||
if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
|
||||
return &pmu->fixed_counters[msr - base];
|
||||
if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
|
||||
u32 index = array_index_nospec(msr - base,
|
||||
pmu->nr_arch_fixed_counters);
|
||||
|
||||
return &pmu->fixed_counters[index];
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
@ -5986,6 +5986,11 @@ static bool svm_has_wbinvd_exit(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool svm_pku_supported(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#define PRE_EX(exit) { .exit_code = (exit), \
|
||||
.stage = X86_ICPT_PRE_EXCEPT, }
|
||||
#define POST_EX(exit) { .exit_code = (exit), \
|
||||
@ -7278,6 +7283,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
|
||||
.xsaves_supported = svm_xsaves_supported,
|
||||
.umip_emulated = svm_umip_emulated,
|
||||
.pt_supported = svm_pt_supported,
|
||||
.pku_supported = svm_pku_supported,
|
||||
|
||||
.set_supported_cpuid = svm_set_supported_cpuid,
|
||||
|
||||
|
@ -145,6 +145,11 @@ static inline bool vmx_umip_emulated(void)
|
||||
SECONDARY_EXEC_DESC;
|
||||
}
|
||||
|
||||
static inline bool vmx_pku_supported(void)
|
||||
{
|
||||
return boot_cpu_has(X86_FEATURE_PKU);
|
||||
}
|
||||
|
||||
static inline bool cpu_has_vmx_rdtscp(void)
|
||||
{
|
||||
return vmcs_config.cpu_based_2nd_exec_ctrl &
|
||||
|
@ -4663,8 +4663,10 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
|
||||
vmx_instruction_info, true, len, &gva))
|
||||
return 1;
|
||||
/* _system ok, nested_vmx_check_permission has verified cpl=0 */
|
||||
if (kvm_write_guest_virt_system(vcpu, gva, &field_value, len, &e))
|
||||
if (kvm_write_guest_virt_system(vcpu, gva, &field_value, len, &e)) {
|
||||
kvm_inject_page_fault(vcpu, &e);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
return nested_vmx_succeed(vcpu);
|
||||
|
@ -84,10 +84,14 @@ static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
|
||||
|
||||
static unsigned intel_find_fixed_event(int idx)
|
||||
{
|
||||
if (idx >= ARRAY_SIZE(fixed_pmc_events))
|
||||
u32 event;
|
||||
size_t size = ARRAY_SIZE(fixed_pmc_events);
|
||||
|
||||
if (idx >= size)
|
||||
return PERF_COUNT_HW_MAX;
|
||||
|
||||
return intel_arch_events[fixed_pmc_events[idx]].event_type;
|
||||
event = fixed_pmc_events[array_index_nospec(idx, size)];
|
||||
return intel_arch_events[event].event_type;
|
||||
}
|
||||
|
||||
/* check if a PMC is enabled by comparing it with globl_ctrl bits. */
|
||||
@ -128,16 +132,20 @@ static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
|
||||
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
||||
bool fixed = idx & (1u << 30);
|
||||
struct kvm_pmc *counters;
|
||||
unsigned int num_counters;
|
||||
|
||||
idx &= ~(3u << 30);
|
||||
if (!fixed && idx >= pmu->nr_arch_gp_counters)
|
||||
if (fixed) {
|
||||
counters = pmu->fixed_counters;
|
||||
num_counters = pmu->nr_arch_fixed_counters;
|
||||
} else {
|
||||
counters = pmu->gp_counters;
|
||||
num_counters = pmu->nr_arch_gp_counters;
|
||||
}
|
||||
if (idx >= num_counters)
|
||||
return NULL;
|
||||
if (fixed && idx >= pmu->nr_arch_fixed_counters)
|
||||
return NULL;
|
||||
counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
|
||||
*mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
|
||||
|
||||
return &counters[idx];
|
||||
return &counters[array_index_nospec(idx, num_counters)];
|
||||
}
|
||||
|
||||
static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
|
||||
|
@ -2140,6 +2140,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
(index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps,
|
||||
PT_CAP_num_address_ranges)))
|
||||
return 1;
|
||||
if (is_noncanonical_address(data, vcpu))
|
||||
return 1;
|
||||
if (index % 2)
|
||||
vmx->pt_desc.guest.addr_b[index / 2] = data;
|
||||
else
|
||||
@ -7865,6 +7867,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
|
||||
.xsaves_supported = vmx_xsaves_supported,
|
||||
.umip_emulated = vmx_umip_emulated,
|
||||
.pt_supported = vmx_pt_supported,
|
||||
.pku_supported = vmx_pku_supported,
|
||||
|
||||
.request_immediate_exit = vmx_request_immediate_exit,
|
||||
|
||||
|
@ -92,6 +92,8 @@ u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
|
||||
static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
|
||||
#endif
|
||||
|
||||
static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
|
||||
|
||||
#define VM_STAT(x, ...) offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__
|
||||
#define VCPU_STAT(x, ...) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__
|
||||
|
||||
@ -886,9 +888,38 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_set_xcr);
|
||||
|
||||
static u64 kvm_host_cr4_reserved_bits(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 reserved_bits = CR4_RESERVED_BITS;
|
||||
|
||||
if (!cpu_has(c, X86_FEATURE_XSAVE))
|
||||
reserved_bits |= X86_CR4_OSXSAVE;
|
||||
|
||||
if (!cpu_has(c, X86_FEATURE_SMEP))
|
||||
reserved_bits |= X86_CR4_SMEP;
|
||||
|
||||
if (!cpu_has(c, X86_FEATURE_SMAP))
|
||||
reserved_bits |= X86_CR4_SMAP;
|
||||
|
||||
if (!cpu_has(c, X86_FEATURE_FSGSBASE))
|
||||
reserved_bits |= X86_CR4_FSGSBASE;
|
||||
|
||||
if (!cpu_has(c, X86_FEATURE_PKU))
|
||||
reserved_bits |= X86_CR4_PKE;
|
||||
|
||||
if (!cpu_has(c, X86_FEATURE_LA57) &&
|
||||
!(cpuid_ecx(0x7) & bit(X86_FEATURE_LA57)))
|
||||
reserved_bits |= X86_CR4_LA57;
|
||||
|
||||
if (!cpu_has(c, X86_FEATURE_UMIP) && !kvm_x86_ops->umip_emulated())
|
||||
reserved_bits |= X86_CR4_UMIP;
|
||||
|
||||
return reserved_bits;
|
||||
}
|
||||
|
||||
static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
||||
{
|
||||
if (cr4 & CR4_RESERVED_BITS)
|
||||
if (cr4 & cr4_reserved_bits)
|
||||
return -EINVAL;
|
||||
|
||||
if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE))
|
||||
@ -1054,9 +1085,11 @@ static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
|
||||
{
|
||||
size_t size = ARRAY_SIZE(vcpu->arch.db);
|
||||
|
||||
switch (dr) {
|
||||
case 0 ... 3:
|
||||
vcpu->arch.db[dr] = val;
|
||||
vcpu->arch.db[array_index_nospec(dr, size)] = val;
|
||||
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
|
||||
vcpu->arch.eff_db[dr] = val;
|
||||
break;
|
||||
@ -1093,9 +1126,11 @@ EXPORT_SYMBOL_GPL(kvm_set_dr);
|
||||
|
||||
int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
|
||||
{
|
||||
size_t size = ARRAY_SIZE(vcpu->arch.db);
|
||||
|
||||
switch (dr) {
|
||||
case 0 ... 3:
|
||||
*val = vcpu->arch.db[dr];
|
||||
*val = vcpu->arch.db[array_index_nospec(dr, size)];
|
||||
break;
|
||||
case 4:
|
||||
/* fall through */
|
||||
@ -2490,7 +2525,10 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
default:
|
||||
if (msr >= MSR_IA32_MC0_CTL &&
|
||||
msr < MSR_IA32_MCx_CTL(bank_num)) {
|
||||
u32 offset = msr - MSR_IA32_MC0_CTL;
|
||||
u32 offset = array_index_nospec(
|
||||
msr - MSR_IA32_MC0_CTL,
|
||||
MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL);
|
||||
|
||||
/* only 0 or all 1s can be written to IA32_MCi_CTL
|
||||
* some Linux kernels though clear bit 10 in bank 4 to
|
||||
* workaround a BIOS/GART TBL issue on AMD K8s, ignore
|
||||
@ -2586,45 +2624,47 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
|
||||
|
||||
static void record_steal_time(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_host_map map;
|
||||
struct kvm_steal_time *st;
|
||||
|
||||
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
|
||||
return;
|
||||
|
||||
if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
|
||||
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
|
||||
/* -EAGAIN is returned in atomic context so we can just return. */
|
||||
if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT,
|
||||
&map, &vcpu->arch.st.cache, false))
|
||||
return;
|
||||
|
||||
st = map.hva +
|
||||
offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
|
||||
|
||||
/*
|
||||
* Doing a TLB flush here, on the guest's behalf, can avoid
|
||||
* expensive IPIs.
|
||||
*/
|
||||
trace_kvm_pv_tlb_flush(vcpu->vcpu_id,
|
||||
vcpu->arch.st.steal.preempted & KVM_VCPU_FLUSH_TLB);
|
||||
if (xchg(&vcpu->arch.st.steal.preempted, 0) & KVM_VCPU_FLUSH_TLB)
|
||||
st->preempted & KVM_VCPU_FLUSH_TLB);
|
||||
if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
|
||||
kvm_vcpu_flush_tlb(vcpu, false);
|
||||
|
||||
if (vcpu->arch.st.steal.version & 1)
|
||||
vcpu->arch.st.steal.version += 1; /* first time write, random junk */
|
||||
vcpu->arch.st.preempted = 0;
|
||||
|
||||
vcpu->arch.st.steal.version += 1;
|
||||
if (st->version & 1)
|
||||
st->version += 1; /* first time write, random junk */
|
||||
|
||||
kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
|
||||
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
|
||||
st->version += 1;
|
||||
|
||||
smp_wmb();
|
||||
|
||||
vcpu->arch.st.steal.steal += current->sched_info.run_delay -
|
||||
st->steal += current->sched_info.run_delay -
|
||||
vcpu->arch.st.last_steal;
|
||||
vcpu->arch.st.last_steal = current->sched_info.run_delay;
|
||||
|
||||
kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
|
||||
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
|
||||
|
||||
smp_wmb();
|
||||
|
||||
vcpu->arch.st.steal.version += 1;
|
||||
st->version += 1;
|
||||
|
||||
kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
|
||||
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
|
||||
kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false);
|
||||
}
|
||||
|
||||
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
@ -2777,11 +2817,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
if (data & KVM_STEAL_RESERVED_MASK)
|
||||
return 1;
|
||||
|
||||
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
|
||||
data & KVM_STEAL_VALID_BITS,
|
||||
sizeof(struct kvm_steal_time)))
|
||||
return 1;
|
||||
|
||||
vcpu->arch.st.msr_val = data;
|
||||
|
||||
if (!(data & KVM_MSR_ENABLED))
|
||||
@ -2917,7 +2952,10 @@ static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
|
||||
default:
|
||||
if (msr >= MSR_IA32_MC0_CTL &&
|
||||
msr < MSR_IA32_MCx_CTL(bank_num)) {
|
||||
u32 offset = msr - MSR_IA32_MC0_CTL;
|
||||
u32 offset = array_index_nospec(
|
||||
msr - MSR_IA32_MC0_CTL,
|
||||
MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL);
|
||||
|
||||
data = vcpu->arch.mce_banks[offset];
|
||||
break;
|
||||
}
|
||||
@ -3443,10 +3481,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
|
||||
kvm_x86_ops->vcpu_load(vcpu, cpu);
|
||||
|
||||
fpregs_assert_state_consistent();
|
||||
if (test_thread_flag(TIF_NEED_FPU_LOAD))
|
||||
switch_fpu_return();
|
||||
|
||||
/* Apply any externally detected TSC adjustments (due to suspend) */
|
||||
if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
|
||||
adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
|
||||
@ -3486,15 +3520,25 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
|
||||
static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_host_map map;
|
||||
struct kvm_steal_time *st;
|
||||
|
||||
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
|
||||
return;
|
||||
|
||||
vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
|
||||
if (vcpu->arch.st.preempted)
|
||||
return;
|
||||
|
||||
kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
|
||||
&vcpu->arch.st.steal.preempted,
|
||||
offsetof(struct kvm_steal_time, preempted),
|
||||
sizeof(vcpu->arch.st.steal.preempted));
|
||||
if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
|
||||
&vcpu->arch.st.cache, true))
|
||||
return;
|
||||
|
||||
st = map.hva +
|
||||
offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
|
||||
|
||||
st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
|
||||
|
||||
kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
|
||||
}
|
||||
|
||||
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
@ -6365,11 +6409,11 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
|
||||
static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
|
||||
bool write_fault_to_shadow_pgtable,
|
||||
int emulation_type)
|
||||
{
|
||||
gpa_t gpa = cr2;
|
||||
gpa_t gpa = cr2_or_gpa;
|
||||
kvm_pfn_t pfn;
|
||||
|
||||
if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
|
||||
@ -6383,7 +6427,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
|
||||
* Write permission should be allowed since only
|
||||
* write access need to be emulated.
|
||||
*/
|
||||
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
|
||||
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
|
||||
|
||||
/*
|
||||
* If the mapping is invalid in guest, let cpu retry
|
||||
@ -6440,10 +6484,10 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
|
||||
}
|
||||
|
||||
static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
|
||||
unsigned long cr2, int emulation_type)
|
||||
gpa_t cr2_or_gpa, int emulation_type)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
|
||||
unsigned long last_retry_eip, last_retry_addr, gpa = cr2;
|
||||
unsigned long last_retry_eip, last_retry_addr, gpa = cr2_or_gpa;
|
||||
|
||||
last_retry_eip = vcpu->arch.last_retry_eip;
|
||||
last_retry_addr = vcpu->arch.last_retry_addr;
|
||||
@ -6472,14 +6516,14 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
|
||||
if (x86_page_table_writing_insn(ctxt))
|
||||
return false;
|
||||
|
||||
if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
|
||||
if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa)
|
||||
return false;
|
||||
|
||||
vcpu->arch.last_retry_eip = ctxt->eip;
|
||||
vcpu->arch.last_retry_addr = cr2;
|
||||
vcpu->arch.last_retry_addr = cr2_or_gpa;
|
||||
|
||||
if (!vcpu->arch.mmu->direct_map)
|
||||
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
|
||||
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
|
||||
|
||||
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
|
||||
|
||||
@ -6625,11 +6669,8 @@ static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt)
|
||||
return false;
|
||||
}
|
||||
|
||||
int x86_emulate_instruction(struct kvm_vcpu *vcpu,
|
||||
unsigned long cr2,
|
||||
int emulation_type,
|
||||
void *insn,
|
||||
int insn_len)
|
||||
int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
|
||||
int emulation_type, void *insn, int insn_len)
|
||||
{
|
||||
int r;
|
||||
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
|
||||
@ -6675,7 +6716,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
|
||||
kvm_queue_exception(vcpu, UD_VECTOR);
|
||||
return 1;
|
||||
}
|
||||
if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
|
||||
if (reexecute_instruction(vcpu, cr2_or_gpa,
|
||||
write_fault_to_spt,
|
||||
emulation_type))
|
||||
return 1;
|
||||
if (ctxt->have_exception) {
|
||||
@ -6710,7 +6752,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (retry_instruction(ctxt, cr2, emulation_type))
|
||||
if (retry_instruction(ctxt, cr2_or_gpa, emulation_type))
|
||||
return 1;
|
||||
|
||||
/* this is needed for vmware backdoor interface to work since it
|
||||
@ -6722,7 +6764,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
|
||||
|
||||
restart:
|
||||
/* Save the faulting GPA (cr2) in the address field */
|
||||
ctxt->exception.address = cr2;
|
||||
ctxt->exception.address = cr2_or_gpa;
|
||||
|
||||
r = x86_emulate_insn(ctxt);
|
||||
|
||||
@ -6730,7 +6772,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
|
||||
return 1;
|
||||
|
||||
if (r == EMULATION_FAILED) {
|
||||
if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
|
||||
if (reexecute_instruction(vcpu, cr2_or_gpa, write_fault_to_spt,
|
||||
emulation_type))
|
||||
return 1;
|
||||
|
||||
@ -8172,8 +8214,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
trace_kvm_entry(vcpu->vcpu_id);
|
||||
guest_enter_irqoff();
|
||||
|
||||
/* The preempt notifier should have taken care of the FPU already. */
|
||||
WARN_ON_ONCE(test_thread_flag(TIF_NEED_FPU_LOAD));
|
||||
fpregs_assert_state_consistent();
|
||||
if (test_thread_flag(TIF_NEED_FPU_LOAD))
|
||||
switch_fpu_return();
|
||||
|
||||
if (unlikely(vcpu->arch.switch_db_regs)) {
|
||||
set_debugreg(0, 7);
|
||||
@ -8445,12 +8488,26 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void kvm_save_current_fpu(struct fpu *fpu)
|
||||
{
|
||||
/*
|
||||
* If the target FPU state is not resident in the CPU registers, just
|
||||
* memcpy() from current, else save CPU state directly to the target.
|
||||
*/
|
||||
if (test_thread_flag(TIF_NEED_FPU_LOAD))
|
||||
memcpy(&fpu->state, ¤t->thread.fpu.state,
|
||||
fpu_kernel_xstate_size);
|
||||
else
|
||||
copy_fpregs_to_fpstate(fpu);
|
||||
}
|
||||
|
||||
/* Swap (qemu) user FPU context for the guest FPU context. */
|
||||
static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
fpregs_lock();
|
||||
|
||||
copy_fpregs_to_fpstate(vcpu->arch.user_fpu);
|
||||
kvm_save_current_fpu(vcpu->arch.user_fpu);
|
||||
|
||||
/* PKRU is separately restored in kvm_x86_ops->run. */
|
||||
__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state,
|
||||
~XFEATURE_MASK_PKRU);
|
||||
@ -8466,7 +8523,8 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
fpregs_lock();
|
||||
|
||||
copy_fpregs_to_fpstate(vcpu->arch.guest_fpu);
|
||||
kvm_save_current_fpu(vcpu->arch.guest_fpu);
|
||||
|
||||
copy_kernel_to_fpregs(&vcpu->arch.user_fpu->state);
|
||||
|
||||
fpregs_mark_activate();
|
||||
@ -8688,6 +8746,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mp_state *mp_state)
|
||||
{
|
||||
vcpu_load(vcpu);
|
||||
if (kvm_mpx_supported())
|
||||
kvm_load_guest_fpu(vcpu);
|
||||
|
||||
kvm_apic_accept_events(vcpu);
|
||||
if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED &&
|
||||
@ -8696,6 +8756,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
|
||||
else
|
||||
mp_state->mp_state = vcpu->arch.mp_state;
|
||||
|
||||
if (kvm_mpx_supported())
|
||||
kvm_put_guest_fpu(vcpu);
|
||||
vcpu_put(vcpu);
|
||||
return 0;
|
||||
}
|
||||
@ -9055,6 +9117,9 @@ static void fx_init(struct kvm_vcpu *vcpu)
|
||||
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
|
||||
struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache;
|
||||
|
||||
kvm_release_pfn(cache->pfn, cache->dirty, cache);
|
||||
|
||||
kvmclock_reset(vcpu);
|
||||
|
||||
@ -9125,7 +9190,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
kvm_mmu_unload(vcpu);
|
||||
vcpu_put(vcpu);
|
||||
|
||||
kvm_x86_ops->vcpu_free(vcpu);
|
||||
kvm_arch_vcpu_free(vcpu);
|
||||
}
|
||||
|
||||
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
|
||||
@ -9317,6 +9382,8 @@ int kvm_arch_hardware_setup(void)
|
||||
if (r != 0)
|
||||
return r;
|
||||
|
||||
cr4_reserved_bits = kvm_host_cr4_reserved_bits(&boot_cpu_data);
|
||||
|
||||
if (kvm_has_tsc_control) {
|
||||
/*
|
||||
* Make sure the user can only configure tsc_khz values that
|
||||
@ -9719,11 +9786,18 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||
|
||||
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* memslots->generation has been incremented.
|
||||
* mmio generation may have reached its maximum value.
|
||||
*/
|
||||
kvm_mmu_invalidate_mmio_sptes(kvm, gen);
|
||||
|
||||
/* Force re-initialization of steal_time cache */
|
||||
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||
kvm_vcpu_kick(vcpu);
|
||||
}
|
||||
|
||||
int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||
@ -9975,7 +10049,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
|
||||
work->arch.cr3 != vcpu->arch.mmu->get_cr3(vcpu))
|
||||
return;
|
||||
|
||||
vcpu->arch.mmu->page_fault(vcpu, work->gva, 0, true);
|
||||
vcpu->arch.mmu->page_fault(vcpu, work->cr2_or_gpa, 0, true);
|
||||
}
|
||||
|
||||
static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
|
||||
@ -10088,7 +10162,7 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
|
||||
{
|
||||
struct x86_exception fault;
|
||||
|
||||
trace_kvm_async_pf_not_present(work->arch.token, work->gva);
|
||||
trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa);
|
||||
kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
|
||||
|
||||
if (kvm_can_deliver_async_pf(vcpu) &&
|
||||
@ -10123,7 +10197,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
|
||||
work->arch.token = ~0; /* broadcast wakeup */
|
||||
else
|
||||
kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
|
||||
trace_kvm_async_pf_ready(work->arch.token, work->gva);
|
||||
trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa);
|
||||
|
||||
if (vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED &&
|
||||
!apf_get_user(vcpu, &val)) {
|
||||
|
@ -286,7 +286,7 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
|
||||
bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
|
||||
int page_num);
|
||||
bool kvm_vector_hashing_enabled(void);
|
||||
int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
|
||||
int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
|
||||
int emulation_type, void *insn, int insn_len);
|
||||
|
||||
#define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
|
||||
|
@ -1215,6 +1215,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
|
||||
x86_platform.get_nmi_reason = xen_get_nmi_reason;
|
||||
|
||||
x86_init.resources.memory_setup = xen_memory_setup;
|
||||
x86_init.irqs.intr_mode_select = x86_init_noop;
|
||||
x86_init.irqs.intr_mode_init = x86_init_noop;
|
||||
x86_init.oem.arch_setup = xen_arch_setup;
|
||||
x86_init.oem.banner = xen_banner;
|
||||
|
@ -257,6 +257,7 @@ void crypto_alg_tested(const char *name, int err)
|
||||
struct crypto_alg *alg;
|
||||
struct crypto_alg *q;
|
||||
LIST_HEAD(list);
|
||||
bool best;
|
||||
|
||||
down_write(&crypto_alg_sem);
|
||||
list_for_each_entry(q, &crypto_alg_list, cra_list) {
|
||||
@ -280,6 +281,21 @@ void crypto_alg_tested(const char *name, int err)
|
||||
|
||||
alg->cra_flags |= CRYPTO_ALG_TESTED;
|
||||
|
||||
/* Only satisfy larval waiters if we are the best. */
|
||||
best = true;
|
||||
list_for_each_entry(q, &crypto_alg_list, cra_list) {
|
||||
if (crypto_is_moribund(q) || !crypto_is_larval(q))
|
||||
continue;
|
||||
|
||||
if (strcmp(alg->cra_name, q->cra_name))
|
||||
continue;
|
||||
|
||||
if (q->cra_priority > alg->cra_priority) {
|
||||
best = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry(q, &crypto_alg_list, cra_list) {
|
||||
if (q == alg)
|
||||
continue;
|
||||
@ -303,10 +319,12 @@ void crypto_alg_tested(const char *name, int err)
|
||||
continue;
|
||||
if ((q->cra_flags ^ alg->cra_flags) & larval->mask)
|
||||
continue;
|
||||
if (!crypto_mod_get(alg))
|
||||
continue;
|
||||
|
||||
if (best && crypto_mod_get(alg))
|
||||
larval->adult = alg;
|
||||
else
|
||||
larval->adult = ERR_PTR(-EAGAIN);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -669,10 +687,8 @@ EXPORT_SYMBOL_GPL(crypto_grab_spawn);
|
||||
|
||||
void crypto_drop_spawn(struct crypto_spawn *spawn)
|
||||
{
|
||||
if (!spawn->alg)
|
||||
return;
|
||||
|
||||
down_write(&crypto_alg_sem);
|
||||
if (spawn->alg)
|
||||
list_del(&spawn->list);
|
||||
up_write(&crypto_alg_sem);
|
||||
}
|
||||
@ -681,22 +697,16 @@ EXPORT_SYMBOL_GPL(crypto_drop_spawn);
|
||||
static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn)
|
||||
{
|
||||
struct crypto_alg *alg;
|
||||
struct crypto_alg *alg2;
|
||||
|
||||
down_read(&crypto_alg_sem);
|
||||
alg = spawn->alg;
|
||||
alg2 = alg;
|
||||
if (alg2)
|
||||
alg2 = crypto_mod_get(alg2);
|
||||
if (alg && !crypto_mod_get(alg)) {
|
||||
alg->cra_flags |= CRYPTO_ALG_DYING;
|
||||
alg = NULL;
|
||||
}
|
||||
up_read(&crypto_alg_sem);
|
||||
|
||||
if (!alg2) {
|
||||
if (alg)
|
||||
crypto_shoot_alg(alg);
|
||||
return ERR_PTR(-EAGAIN);
|
||||
}
|
||||
|
||||
return alg;
|
||||
return alg ?: ERR_PTR(-EAGAIN);
|
||||
}
|
||||
|
||||
struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
|
||||
|
@ -97,7 +97,7 @@ static void crypto_larval_destroy(struct crypto_alg *alg)
|
||||
struct crypto_larval *larval = (void *)alg;
|
||||
|
||||
BUG_ON(!crypto_is_larval(alg));
|
||||
if (larval->adult)
|
||||
if (!IS_ERR_OR_NULL(larval->adult))
|
||||
crypto_mod_put(larval->adult);
|
||||
kfree(larval);
|
||||
}
|
||||
@ -178,6 +178,8 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
|
||||
alg = ERR_PTR(-ETIMEDOUT);
|
||||
else if (!alg)
|
||||
alg = ERR_PTR(-ENOENT);
|
||||
else if (IS_ERR(alg))
|
||||
;
|
||||
else if (crypto_is_test_larval(larval) &&
|
||||
!(alg->cra_flags & CRYPTO_ALG_TESTED))
|
||||
alg = ERR_PTR(-EAGAIN);
|
||||
@ -344,13 +346,12 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
|
||||
return len;
|
||||
}
|
||||
|
||||
void crypto_shoot_alg(struct crypto_alg *alg)
|
||||
static void crypto_shoot_alg(struct crypto_alg *alg)
|
||||
{
|
||||
down_write(&crypto_alg_sem);
|
||||
alg->cra_flags |= CRYPTO_ALG_DYING;
|
||||
up_write(&crypto_alg_sem);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_shoot_alg);
|
||||
|
||||
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
|
||||
u32 mask)
|
||||
|
@ -68,7 +68,6 @@ void crypto_alg_tested(const char *name, int err);
|
||||
void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
|
||||
struct crypto_alg *nalg);
|
||||
void crypto_remove_final(struct list_head *list);
|
||||
void crypto_shoot_alg(struct crypto_alg *alg);
|
||||
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
|
||||
u32 mask);
|
||||
void *crypto_create_tfm(struct crypto_alg *alg,
|
||||
|
@ -24,6 +24,8 @@ static struct kset *pcrypt_kset;
|
||||
|
||||
struct pcrypt_instance_ctx {
|
||||
struct crypto_aead_spawn spawn;
|
||||
struct padata_shell *psenc;
|
||||
struct padata_shell *psdec;
|
||||
atomic_t tfm_count;
|
||||
};
|
||||
|
||||
@ -32,6 +34,12 @@ struct pcrypt_aead_ctx {
|
||||
unsigned int cb_cpu;
|
||||
};
|
||||
|
||||
static inline struct pcrypt_instance_ctx *pcrypt_tfm_ictx(
|
||||
struct crypto_aead *tfm)
|
||||
{
|
||||
return aead_instance_ctx(aead_alg_instance(tfm));
|
||||
}
|
||||
|
||||
static int pcrypt_aead_setkey(struct crypto_aead *parent,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
@ -63,7 +71,6 @@ static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
|
||||
struct padata_priv *padata = pcrypt_request_padata(preq);
|
||||
|
||||
padata->info = err;
|
||||
req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
padata_do_serial(padata);
|
||||
}
|
||||
@ -90,6 +97,9 @@ static int pcrypt_aead_encrypt(struct aead_request *req)
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
|
||||
u32 flags = aead_request_flags(req);
|
||||
struct pcrypt_instance_ctx *ictx;
|
||||
|
||||
ictx = pcrypt_tfm_ictx(aead);
|
||||
|
||||
memset(padata, 0, sizeof(struct padata_priv));
|
||||
|
||||
@ -103,7 +113,7 @@ static int pcrypt_aead_encrypt(struct aead_request *req)
|
||||
req->cryptlen, req->iv);
|
||||
aead_request_set_ad(creq, req->assoclen);
|
||||
|
||||
err = padata_do_parallel(pencrypt, padata, &ctx->cb_cpu);
|
||||
err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu);
|
||||
if (!err)
|
||||
return -EINPROGRESS;
|
||||
|
||||
@ -132,6 +142,9 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
|
||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
|
||||
u32 flags = aead_request_flags(req);
|
||||
struct pcrypt_instance_ctx *ictx;
|
||||
|
||||
ictx = pcrypt_tfm_ictx(aead);
|
||||
|
||||
memset(padata, 0, sizeof(struct padata_priv));
|
||||
|
||||
@ -145,7 +158,7 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
|
||||
req->cryptlen, req->iv);
|
||||
aead_request_set_ad(creq, req->assoclen);
|
||||
|
||||
err = padata_do_parallel(pdecrypt, padata, &ctx->cb_cpu);
|
||||
err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu);
|
||||
if (!err)
|
||||
return -EINPROGRESS;
|
||||
|
||||
@ -192,6 +205,8 @@ static void pcrypt_free(struct aead_instance *inst)
|
||||
struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst);
|
||||
|
||||
crypto_drop_aead(&ctx->spawn);
|
||||
padata_free_shell(ctx->psdec);
|
||||
padata_free_shell(ctx->psenc);
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
@ -233,12 +248,22 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
|
||||
if (!inst)
|
||||
return -ENOMEM;
|
||||
|
||||
err = -ENOMEM;
|
||||
|
||||
ctx = aead_instance_ctx(inst);
|
||||
ctx->psenc = padata_alloc_shell(pencrypt);
|
||||
if (!ctx->psenc)
|
||||
goto out_free_inst;
|
||||
|
||||
ctx->psdec = padata_alloc_shell(pdecrypt);
|
||||
if (!ctx->psdec)
|
||||
goto out_free_psenc;
|
||||
|
||||
crypto_set_aead_spawn(&ctx->spawn, aead_crypto_instance(inst));
|
||||
|
||||
err = crypto_grab_aead(&ctx->spawn, name, 0, 0);
|
||||
if (err)
|
||||
goto out_free_inst;
|
||||
goto out_free_psdec;
|
||||
|
||||
alg = crypto_spawn_aead_alg(&ctx->spawn);
|
||||
err = pcrypt_init_instance(aead_crypto_instance(inst), &alg->base);
|
||||
@ -271,6 +296,10 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
|
||||
|
||||
out_drop_aead:
|
||||
crypto_drop_aead(&ctx->spawn);
|
||||
out_free_psdec:
|
||||
padata_free_shell(ctx->psdec);
|
||||
out_free_psenc:
|
||||
padata_free_shell(ctx->psenc);
|
||||
out_free_inst:
|
||||
kfree(inst);
|
||||
goto out;
|
||||
|
@ -38,6 +38,8 @@
|
||||
#define PREFIX "ACPI: "
|
||||
|
||||
#define ACPI_BATTERY_VALUE_UNKNOWN 0xFFFFFFFF
|
||||
#define ACPI_BATTERY_CAPACITY_VALID(capacity) \
|
||||
((capacity) != 0 && (capacity) != ACPI_BATTERY_VALUE_UNKNOWN)
|
||||
|
||||
#define ACPI_BATTERY_DEVICE_NAME "Battery"
|
||||
|
||||
@ -192,7 +194,8 @@ static int acpi_battery_is_charged(struct acpi_battery *battery)
|
||||
|
||||
static bool acpi_battery_is_degraded(struct acpi_battery *battery)
|
||||
{
|
||||
return battery->full_charge_capacity && battery->design_capacity &&
|
||||
return ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) &&
|
||||
ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity) &&
|
||||
battery->full_charge_capacity < battery->design_capacity;
|
||||
}
|
||||
|
||||
@ -214,7 +217,7 @@ static int acpi_battery_get_property(struct power_supply *psy,
|
||||
enum power_supply_property psp,
|
||||
union power_supply_propval *val)
|
||||
{
|
||||
int ret = 0;
|
||||
int full_capacity = ACPI_BATTERY_VALUE_UNKNOWN, ret = 0;
|
||||
struct acpi_battery *battery = to_acpi_battery(psy);
|
||||
|
||||
if (acpi_battery_present(battery)) {
|
||||
@ -263,14 +266,14 @@ static int acpi_battery_get_property(struct power_supply *psy,
|
||||
break;
|
||||
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
|
||||
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
|
||||
if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
|
||||
if (!ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity))
|
||||
ret = -ENODEV;
|
||||
else
|
||||
val->intval = battery->design_capacity * 1000;
|
||||
break;
|
||||
case POWER_SUPPLY_PROP_CHARGE_FULL:
|
||||
case POWER_SUPPLY_PROP_ENERGY_FULL:
|
||||
if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
|
||||
if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity))
|
||||
ret = -ENODEV;
|
||||
else
|
||||
val->intval = battery->full_charge_capacity * 1000;
|
||||
@ -283,11 +286,17 @@ static int acpi_battery_get_property(struct power_supply *psy,
|
||||
val->intval = battery->capacity_now * 1000;
|
||||
break;
|
||||
case POWER_SUPPLY_PROP_CAPACITY:
|
||||
if (battery->capacity_now && battery->full_charge_capacity)
|
||||
val->intval = battery->capacity_now * 100/
|
||||
battery->full_charge_capacity;
|
||||
if (ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity))
|
||||
full_capacity = battery->full_charge_capacity;
|
||||
else if (ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity))
|
||||
full_capacity = battery->design_capacity;
|
||||
|
||||
if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN ||
|
||||
full_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
|
||||
ret = -ENODEV;
|
||||
else
|
||||
val->intval = 0;
|
||||
val->intval = battery->capacity_now * 100/
|
||||
full_capacity;
|
||||
break;
|
||||
case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
|
||||
if (battery->state & ACPI_BATTERY_STATE_CRITICAL)
|
||||
@ -333,6 +342,20 @@ static enum power_supply_property charge_battery_props[] = {
|
||||
POWER_SUPPLY_PROP_SERIAL_NUMBER,
|
||||
};
|
||||
|
||||
static enum power_supply_property charge_battery_full_cap_broken_props[] = {
|
||||
POWER_SUPPLY_PROP_STATUS,
|
||||
POWER_SUPPLY_PROP_PRESENT,
|
||||
POWER_SUPPLY_PROP_TECHNOLOGY,
|
||||
POWER_SUPPLY_PROP_CYCLE_COUNT,
|
||||
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
|
||||
POWER_SUPPLY_PROP_VOLTAGE_NOW,
|
||||
POWER_SUPPLY_PROP_CURRENT_NOW,
|
||||
POWER_SUPPLY_PROP_CHARGE_NOW,
|
||||
POWER_SUPPLY_PROP_MODEL_NAME,
|
||||
POWER_SUPPLY_PROP_MANUFACTURER,
|
||||
POWER_SUPPLY_PROP_SERIAL_NUMBER,
|
||||
};
|
||||
|
||||
static enum power_supply_property energy_battery_props[] = {
|
||||
POWER_SUPPLY_PROP_STATUS,
|
||||
POWER_SUPPLY_PROP_PRESENT,
|
||||
@ -794,12 +817,25 @@ static void __exit battery_hook_exit(void)
|
||||
static int sysfs_add_battery(struct acpi_battery *battery)
|
||||
{
|
||||
struct power_supply_config psy_cfg = { .drv_data = battery, };
|
||||
bool full_cap_broken = false;
|
||||
|
||||
if (!ACPI_BATTERY_CAPACITY_VALID(battery->full_charge_capacity) &&
|
||||
!ACPI_BATTERY_CAPACITY_VALID(battery->design_capacity))
|
||||
full_cap_broken = true;
|
||||
|
||||
if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) {
|
||||
if (full_cap_broken) {
|
||||
battery->bat_desc.properties =
|
||||
charge_battery_full_cap_broken_props;
|
||||
battery->bat_desc.num_properties =
|
||||
ARRAY_SIZE(charge_battery_full_cap_broken_props);
|
||||
} else {
|
||||
battery->bat_desc.properties = charge_battery_props;
|
||||
battery->bat_desc.num_properties =
|
||||
ARRAY_SIZE(charge_battery_props);
|
||||
} else if (battery->full_charge_capacity == 0) {
|
||||
}
|
||||
} else {
|
||||
if (full_cap_broken) {
|
||||
battery->bat_desc.properties =
|
||||
energy_battery_full_cap_broken_props;
|
||||
battery->bat_desc.num_properties =
|
||||
@ -809,6 +845,7 @@ static int sysfs_add_battery(struct acpi_battery *battery)
|
||||
battery->bat_desc.num_properties =
|
||||
ARRAY_SIZE(energy_battery_props);
|
||||
}
|
||||
}
|
||||
|
||||
battery->bat_desc.name = acpi_device_bid(battery->device);
|
||||
battery->bat_desc.type = POWER_SUPPLY_TYPE_BATTERY;
|
||||
|
@ -336,6 +336,11 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"),
|
||||
},
|
||||
},
|
||||
|
||||
/*
|
||||
* Desktops which falsely report a backlight and which our heuristics
|
||||
* for this do not catch.
|
||||
*/
|
||||
{
|
||||
.callback = video_detect_force_none,
|
||||
.ident = "Dell OptiPlex 9020M",
|
||||
@ -344,6 +349,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 9020M"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_detect_force_none,
|
||||
.ident = "MSI MS-7721",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "MSI"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "MS-7721"),
|
||||
},
|
||||
},
|
||||
{ },
|
||||
};
|
||||
|
||||
|
@ -274,10 +274,38 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async)
|
||||
device_links_read_unlock(idx);
|
||||
}
|
||||
|
||||
static void dpm_wait_for_superior(struct device *dev, bool async)
|
||||
static bool dpm_wait_for_superior(struct device *dev, bool async)
|
||||
{
|
||||
dpm_wait(dev->parent, async);
|
||||
struct device *parent;
|
||||
|
||||
/*
|
||||
* If the device is resumed asynchronously and the parent's callback
|
||||
* deletes both the device and the parent itself, the parent object may
|
||||
* be freed while this function is running, so avoid that by reference
|
||||
* counting the parent once more unless the device has been deleted
|
||||
* already (in which case return right away).
|
||||
*/
|
||||
mutex_lock(&dpm_list_mtx);
|
||||
|
||||
if (!device_pm_initialized(dev)) {
|
||||
mutex_unlock(&dpm_list_mtx);
|
||||
return false;
|
||||
}
|
||||
|
||||
parent = get_device(dev->parent);
|
||||
|
||||
mutex_unlock(&dpm_list_mtx);
|
||||
|
||||
dpm_wait(parent, async);
|
||||
put_device(parent);
|
||||
|
||||
dpm_wait_for_suppliers(dev, async);
|
||||
|
||||
/*
|
||||
* If the parent's callback has deleted the device, attempting to resume
|
||||
* it would be invalid, so avoid doing that then.
|
||||
*/
|
||||
return device_pm_initialized(dev);
|
||||
}
|
||||
|
||||
static void dpm_wait_for_consumers(struct device *dev, bool async)
|
||||
@ -622,7 +650,8 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
|
||||
if (!dev->power.is_noirq_suspended)
|
||||
goto Out;
|
||||
|
||||
dpm_wait_for_superior(dev, async);
|
||||
if (!dpm_wait_for_superior(dev, async))
|
||||
goto Out;
|
||||
|
||||
skip_resume = dev_pm_may_skip_resume(dev);
|
||||
|
||||
@ -830,7 +859,8 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
|
||||
if (!dev->power.is_late_suspended)
|
||||
goto Out;
|
||||
|
||||
dpm_wait_for_superior(dev, async);
|
||||
if (!dpm_wait_for_superior(dev, async))
|
||||
goto Out;
|
||||
|
||||
callback = dpm_subsys_resume_early_cb(dev, state, &info);
|
||||
|
||||
@ -945,7 +975,9 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
|
||||
goto Complete;
|
||||
}
|
||||
|
||||
dpm_wait_for_superior(dev, async);
|
||||
if (!dpm_wait_for_superior(dev, async))
|
||||
goto Complete;
|
||||
|
||||
dpm_watchdog_set(&wd, dev);
|
||||
device_lock(dev);
|
||||
|
||||
|
@ -2850,7 +2850,7 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
|
||||
err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
|
||||
if (err < 0) {
|
||||
bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
|
||||
return err;
|
||||
goto err_release_fw;
|
||||
}
|
||||
|
||||
/* Wait a few moments for firmware activation done */
|
||||
@ -3819,6 +3819,10 @@ static int btusb_probe(struct usb_interface *intf,
|
||||
* (DEVICE_REMOTE_WAKEUP)
|
||||
*/
|
||||
set_bit(BTUSB_WAKEUP_DISABLE, &data->flags);
|
||||
|
||||
err = usb_autopm_get_interface(intf);
|
||||
if (err < 0)
|
||||
goto out_free_dev;
|
||||
}
|
||||
|
||||
if (id->driver_info & BTUSB_AMP) {
|
||||
|
@ -785,7 +785,11 @@ static struct tegra_periph_init_data gate_clks[] = {
|
||||
GATE("ahbdma", "hclk", 33, 0, tegra_clk_ahbdma, 0),
|
||||
GATE("apbdma", "pclk", 34, 0, tegra_clk_apbdma, 0),
|
||||
GATE("kbc", "clk_32k", 36, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_kbc, 0),
|
||||
GATE("fuse", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse, 0),
|
||||
/*
|
||||
* Critical for RAM re-repair operation, which must occur on resume
|
||||
* from LP1 system suspend and as part of CCPLEX cluster switching.
|
||||
*/
|
||||
GATE("fuse", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse, CLK_IS_CRITICAL),
|
||||
GATE("fuse_burn", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse_burn, 0),
|
||||
GATE("kfuse", "clk_m", 40, TEGRA_PERIPH_ON_APB, tegra_clk_kfuse, 0),
|
||||
GATE("apbif", "clk_m", 107, TEGRA_PERIPH_ON_APB, tegra_clk_apbif, 0),
|
||||
|
@ -217,7 +217,7 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cppc_verify_policy(struct cpufreq_policy *policy)
|
||||
static int cppc_verify_policy(struct cpufreq_policy_data *policy)
|
||||
{
|
||||
cpufreq_verify_within_cpu_limits(policy);
|
||||
return 0;
|
||||
|
@ -291,7 +291,7 @@ static int nforce2_target(struct cpufreq_policy *policy,
|
||||
* nforce2_verify - verifies a new CPUFreq policy
|
||||
* @policy: new policy
|
||||
*/
|
||||
static int nforce2_verify(struct cpufreq_policy *policy)
|
||||
static int nforce2_verify(struct cpufreq_policy_data *policy)
|
||||
{
|
||||
unsigned int fsb_pol_max;
|
||||
|
||||
|
@ -75,6 +75,9 @@ static void cpufreq_exit_governor(struct cpufreq_policy *policy);
|
||||
static int cpufreq_start_governor(struct cpufreq_policy *policy);
|
||||
static void cpufreq_stop_governor(struct cpufreq_policy *policy);
|
||||
static void cpufreq_governor_limits(struct cpufreq_policy *policy);
|
||||
static int cpufreq_set_policy(struct cpufreq_policy *policy,
|
||||
struct cpufreq_governor *new_gov,
|
||||
unsigned int new_pol);
|
||||
|
||||
/**
|
||||
* Two notifier lists: the "policy" list is involved in the
|
||||
@ -621,25 +624,22 @@ static struct cpufreq_governor *find_governor(const char *str_governor)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int cpufreq_parse_policy(char *str_governor,
|
||||
struct cpufreq_policy *policy)
|
||||
static unsigned int cpufreq_parse_policy(char *str_governor)
|
||||
{
|
||||
if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
|
||||
policy->policy = CPUFREQ_POLICY_PERFORMANCE;
|
||||
return 0;
|
||||
}
|
||||
if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
|
||||
policy->policy = CPUFREQ_POLICY_POWERSAVE;
|
||||
return 0;
|
||||
}
|
||||
return -EINVAL;
|
||||
if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
|
||||
return CPUFREQ_POLICY_PERFORMANCE;
|
||||
|
||||
if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
|
||||
return CPUFREQ_POLICY_POWERSAVE;
|
||||
|
||||
return CPUFREQ_POLICY_UNKNOWN;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpufreq_parse_governor - parse a governor string only for has_target()
|
||||
* @str_governor: Governor name.
|
||||
*/
|
||||
static int cpufreq_parse_governor(char *str_governor,
|
||||
struct cpufreq_policy *policy)
|
||||
static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
|
||||
{
|
||||
struct cpufreq_governor *t;
|
||||
|
||||
@ -653,7 +653,7 @@ static int cpufreq_parse_governor(char *str_governor,
|
||||
|
||||
ret = request_module("cpufreq_%s", str_governor);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
return NULL;
|
||||
|
||||
mutex_lock(&cpufreq_governor_mutex);
|
||||
|
||||
@ -664,12 +664,7 @@ static int cpufreq_parse_governor(char *str_governor,
|
||||
|
||||
mutex_unlock(&cpufreq_governor_mutex);
|
||||
|
||||
if (t) {
|
||||
policy->governor = t;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
return t;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -770,29 +765,34 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
|
||||
static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
int ret;
|
||||
char str_governor[16];
|
||||
struct cpufreq_policy new_policy;
|
||||
|
||||
memcpy(&new_policy, policy, sizeof(*policy));
|
||||
int ret;
|
||||
|
||||
ret = sscanf(buf, "%15s", str_governor);
|
||||
if (ret != 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (cpufreq_driver->setpolicy) {
|
||||
if (cpufreq_parse_policy(str_governor, &new_policy))
|
||||
unsigned int new_pol;
|
||||
|
||||
new_pol = cpufreq_parse_policy(str_governor);
|
||||
if (!new_pol)
|
||||
return -EINVAL;
|
||||
|
||||
ret = cpufreq_set_policy(policy, NULL, new_pol);
|
||||
} else {
|
||||
if (cpufreq_parse_governor(str_governor, &new_policy))
|
||||
struct cpufreq_governor *new_gov;
|
||||
|
||||
new_gov = cpufreq_parse_governor(str_governor);
|
||||
if (!new_gov)
|
||||
return -EINVAL;
|
||||
|
||||
ret = cpufreq_set_policy(policy, new_gov,
|
||||
CPUFREQ_POLICY_UNKNOWN);
|
||||
|
||||
module_put(new_gov->owner);
|
||||
}
|
||||
|
||||
ret = cpufreq_set_policy(policy, &new_policy);
|
||||
|
||||
if (new_policy.governor)
|
||||
module_put(new_policy.governor->owner);
|
||||
|
||||
return ret ? ret : count;
|
||||
}
|
||||
|
||||
@ -1058,40 +1058,33 @@ __weak struct cpufreq_governor *cpufreq_default_governor(void)
|
||||
|
||||
static int cpufreq_init_policy(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpufreq_governor *gov = NULL, *def_gov = NULL;
|
||||
struct cpufreq_policy new_policy;
|
||||
|
||||
memcpy(&new_policy, policy, sizeof(*policy));
|
||||
|
||||
def_gov = cpufreq_default_governor();
|
||||
struct cpufreq_governor *def_gov = cpufreq_default_governor();
|
||||
struct cpufreq_governor *gov = NULL;
|
||||
unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
|
||||
|
||||
if (has_target()) {
|
||||
/*
|
||||
* Update governor of new_policy to the governor used before
|
||||
* hotplug
|
||||
*/
|
||||
/* Update policy governor to the one used before hotplug. */
|
||||
gov = find_governor(policy->last_governor);
|
||||
if (gov) {
|
||||
pr_debug("Restoring governor %s for cpu %d\n",
|
||||
policy->governor->name, policy->cpu);
|
||||
} else {
|
||||
if (!def_gov)
|
||||
return -ENODATA;
|
||||
} else if (def_gov) {
|
||||
gov = def_gov;
|
||||
} else {
|
||||
return -ENODATA;
|
||||
}
|
||||
new_policy.governor = gov;
|
||||
} else {
|
||||
/* Use the default policy if there is no last_policy. */
|
||||
if (policy->last_policy) {
|
||||
new_policy.policy = policy->last_policy;
|
||||
pol = policy->last_policy;
|
||||
} else if (def_gov) {
|
||||
pol = cpufreq_parse_policy(def_gov->name);
|
||||
} else {
|
||||
if (!def_gov)
|
||||
return -ENODATA;
|
||||
cpufreq_parse_policy(def_gov->name, &new_policy);
|
||||
}
|
||||
}
|
||||
|
||||
return cpufreq_set_policy(policy, &new_policy);
|
||||
return cpufreq_set_policy(policy, gov, pol);
|
||||
}
|
||||
|
||||
static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
|
||||
@ -1119,13 +1112,10 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
|
||||
|
||||
void refresh_frequency_limits(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpufreq_policy new_policy;
|
||||
|
||||
if (!policy_is_inactive(policy)) {
|
||||
new_policy = *policy;
|
||||
pr_debug("updating policy for CPU %u\n", policy->cpu);
|
||||
|
||||
cpufreq_set_policy(policy, &new_policy);
|
||||
cpufreq_set_policy(policy, policy->governor, policy->policy);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(refresh_frequency_limits);
|
||||
@ -2376,43 +2366,46 @@ EXPORT_SYMBOL(cpufreq_get_policy);
|
||||
/**
|
||||
* cpufreq_set_policy - Modify cpufreq policy parameters.
|
||||
* @policy: Policy object to modify.
|
||||
* @new_policy: New policy data.
|
||||
* @new_gov: Policy governor pointer.
|
||||
* @new_pol: Policy value (for drivers with built-in governors).
|
||||
*
|
||||
* Pass @new_policy to the cpufreq driver's ->verify() callback. Next, copy the
|
||||
* min and max parameters of @new_policy to @policy and either invoke the
|
||||
* driver's ->setpolicy() callback (if present) or carry out a governor update
|
||||
* for @policy. That is, run the current governor's ->limits() callback (if the
|
||||
* governor field in @new_policy points to the same object as the one in
|
||||
* @policy) or replace the governor for @policy with the new one stored in
|
||||
* @new_policy.
|
||||
* Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency
|
||||
* limits to be set for the policy, update @policy with the verified limits
|
||||
* values and either invoke the driver's ->setpolicy() callback (if present) or
|
||||
* carry out a governor update for @policy. That is, run the current governor's
|
||||
* ->limits() callback (if @new_gov points to the same object as the one in
|
||||
* @policy) or replace the governor for @policy with @new_gov.
|
||||
*
|
||||
* The cpuinfo part of @policy is not updated by this function.
|
||||
*/
|
||||
int cpufreq_set_policy(struct cpufreq_policy *policy,
|
||||
struct cpufreq_policy *new_policy)
|
||||
static int cpufreq_set_policy(struct cpufreq_policy *policy,
|
||||
struct cpufreq_governor *new_gov,
|
||||
unsigned int new_pol)
|
||||
{
|
||||
struct cpufreq_policy_data new_data;
|
||||
struct cpufreq_governor *old_gov;
|
||||
int ret;
|
||||
|
||||
pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
|
||||
new_policy->cpu, new_policy->min, new_policy->max);
|
||||
|
||||
memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
|
||||
|
||||
memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
|
||||
new_data.freq_table = policy->freq_table;
|
||||
new_data.cpu = policy->cpu;
|
||||
/*
|
||||
* PM QoS framework collects all the requests from users and provide us
|
||||
* the final aggregated value here.
|
||||
*/
|
||||
new_policy->min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
|
||||
new_policy->max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
|
||||
new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
|
||||
new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
|
||||
|
||||
pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
|
||||
new_data.cpu, new_data.min, new_data.max);
|
||||
|
||||
/* verify the cpu speed can be set within this limit */
|
||||
ret = cpufreq_driver->verify(new_policy);
|
||||
ret = cpufreq_driver->verify(&new_data);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
policy->min = new_policy->min;
|
||||
policy->max = new_policy->max;
|
||||
policy->min = new_data.min;
|
||||
policy->max = new_data.max;
|
||||
trace_cpu_frequency_limits(policy);
|
||||
|
||||
arch_set_max_freq_scale(policy->cpus, policy->max);
|
||||
@ -2423,12 +2416,12 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
|
||||
policy->min, policy->max);
|
||||
|
||||
if (cpufreq_driver->setpolicy) {
|
||||
policy->policy = new_policy->policy;
|
||||
policy->policy = new_pol;
|
||||
pr_debug("setting range\n");
|
||||
return cpufreq_driver->setpolicy(policy);
|
||||
}
|
||||
|
||||
if (new_policy->governor == policy->governor) {
|
||||
if (new_gov == policy->governor) {
|
||||
pr_debug("governor limits update\n");
|
||||
cpufreq_governor_limits(policy);
|
||||
return 0;
|
||||
@ -2445,7 +2438,7 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
|
||||
}
|
||||
|
||||
/* start new governor */
|
||||
policy->governor = new_policy->governor;
|
||||
policy->governor = new_gov;
|
||||
ret = cpufreq_init_governor(policy);
|
||||
if (!ret) {
|
||||
ret = cpufreq_start_governor(policy);
|
||||
|
@ -60,7 +60,7 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
|
||||
int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
|
||||
struct cpufreq_frequency_table *table)
|
||||
{
|
||||
struct cpufreq_frequency_table *pos;
|
||||
@ -100,7 +100,7 @@ EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify);
|
||||
* Generic routine to verify policy & frequency table, requires driver to set
|
||||
* policy->freq_table prior to it.
|
||||
*/
|
||||
int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy)
|
||||
int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy)
|
||||
{
|
||||
if (!policy->freq_table)
|
||||
return -ENODEV;
|
||||
|
@ -328,7 +328,7 @@ static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz)
|
||||
* for the hardware supported by the driver.
|
||||
*/
|
||||
|
||||
static int cpufreq_gx_verify(struct cpufreq_policy *policy)
|
||||
static int cpufreq_gx_verify(struct cpufreq_policy_data *policy)
|
||||
{
|
||||
unsigned int tmp_freq = 0;
|
||||
u8 tmp1, tmp2;
|
||||
|
@ -2036,8 +2036,9 @@ static int intel_pstate_get_max_freq(struct cpudata *cpu)
|
||||
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
|
||||
}
|
||||
|
||||
static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
|
||||
struct cpudata *cpu)
|
||||
static void intel_pstate_update_perf_limits(struct cpudata *cpu,
|
||||
unsigned int policy_min,
|
||||
unsigned int policy_max)
|
||||
{
|
||||
int max_freq = intel_pstate_get_max_freq(cpu);
|
||||
int32_t max_policy_perf, min_policy_perf;
|
||||
@ -2056,18 +2057,17 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
|
||||
turbo_max = cpu->pstate.turbo_pstate;
|
||||
}
|
||||
|
||||
max_policy_perf = max_state * policy->max / max_freq;
|
||||
if (policy->max == policy->min) {
|
||||
max_policy_perf = max_state * policy_max / max_freq;
|
||||
if (policy_max == policy_min) {
|
||||
min_policy_perf = max_policy_perf;
|
||||
} else {
|
||||
min_policy_perf = max_state * policy->min / max_freq;
|
||||
min_policy_perf = max_state * policy_min / max_freq;
|
||||
min_policy_perf = clamp_t(int32_t, min_policy_perf,
|
||||
0, max_policy_perf);
|
||||
}
|
||||
|
||||
pr_debug("cpu:%d max_state %d min_policy_perf:%d max_policy_perf:%d\n",
|
||||
policy->cpu, max_state,
|
||||
min_policy_perf, max_policy_perf);
|
||||
cpu->cpu, max_state, min_policy_perf, max_policy_perf);
|
||||
|
||||
/* Normalize user input to [min_perf, max_perf] */
|
||||
if (per_cpu_limits) {
|
||||
@ -2081,7 +2081,7 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
|
||||
global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
|
||||
global_min = clamp_t(int32_t, global_min, 0, global_max);
|
||||
|
||||
pr_debug("cpu:%d global_min:%d global_max:%d\n", policy->cpu,
|
||||
pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu,
|
||||
global_min, global_max);
|
||||
|
||||
cpu->min_perf_ratio = max(min_policy_perf, global_min);
|
||||
@ -2094,7 +2094,7 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
|
||||
cpu->max_perf_ratio);
|
||||
|
||||
}
|
||||
pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", policy->cpu,
|
||||
pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu,
|
||||
cpu->max_perf_ratio,
|
||||
cpu->min_perf_ratio);
|
||||
}
|
||||
@ -2114,7 +2114,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
||||
|
||||
mutex_lock(&intel_pstate_limits_lock);
|
||||
|
||||
intel_pstate_update_perf_limits(policy, cpu);
|
||||
intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
|
||||
|
||||
if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
|
||||
/*
|
||||
@ -2143,8 +2143,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy,
|
||||
struct cpudata *cpu)
|
||||
static void intel_pstate_adjust_policy_max(struct cpudata *cpu,
|
||||
struct cpufreq_policy_data *policy)
|
||||
{
|
||||
if (!hwp_active &&
|
||||
cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
|
||||
@ -2155,7 +2155,7 @@ static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy,
|
||||
}
|
||||
}
|
||||
|
||||
static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
|
||||
static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy)
|
||||
{
|
||||
struct cpudata *cpu = all_cpu_data[policy->cpu];
|
||||
|
||||
@ -2163,11 +2163,7 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
|
||||
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
|
||||
intel_pstate_get_max_freq(cpu));
|
||||
|
||||
if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
|
||||
policy->policy != CPUFREQ_POLICY_PERFORMANCE)
|
||||
return -EINVAL;
|
||||
|
||||
intel_pstate_adjust_policy_max(policy, cpu);
|
||||
intel_pstate_adjust_policy_max(cpu, policy);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2268,7 +2264,7 @@ static struct cpufreq_driver intel_pstate = {
|
||||
.name = "intel_pstate",
|
||||
};
|
||||
|
||||
static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
|
||||
static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
|
||||
{
|
||||
struct cpudata *cpu = all_cpu_data[policy->cpu];
|
||||
|
||||
@ -2276,9 +2272,9 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
|
||||
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
|
||||
intel_pstate_get_max_freq(cpu));
|
||||
|
||||
intel_pstate_adjust_policy_max(policy, cpu);
|
||||
intel_pstate_adjust_policy_max(cpu, policy);
|
||||
|
||||
intel_pstate_update_perf_limits(policy, cpu);
|
||||
intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -122,7 +122,7 @@ static int longrun_set_policy(struct cpufreq_policy *policy)
|
||||
* Validates a new CPUFreq policy. This function has to be called with
|
||||
* cpufreq_driver locked.
|
||||
*/
|
||||
static int longrun_verify_policy(struct cpufreq_policy *policy)
|
||||
static int longrun_verify_policy(struct cpufreq_policy_data *policy)
|
||||
{
|
||||
if (!policy)
|
||||
return -EINVAL;
|
||||
@ -130,10 +130,6 @@ static int longrun_verify_policy(struct cpufreq_policy *policy)
|
||||
policy->cpu = 0;
|
||||
cpufreq_verify_within_cpu_limits(policy);
|
||||
|
||||
if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
|
||||
(policy->policy != CPUFREQ_POLICY_PERFORMANCE))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -109,7 +109,7 @@ struct pcc_cpu {
|
||||
|
||||
static struct pcc_cpu __percpu *pcc_cpu_info;
|
||||
|
||||
static int pcc_cpufreq_verify(struct cpufreq_policy *policy)
|
||||
static int pcc_cpufreq_verify(struct cpufreq_policy_data *policy)
|
||||
{
|
||||
cpufreq_verify_within_cpu_limits(policy);
|
||||
return 0;
|
||||
|
@ -87,7 +87,7 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
|
||||
return work_on_cpu(policy->cpu, __sh_cpufreq_target, &data);
|
||||
}
|
||||
|
||||
static int sh_cpufreq_verify(struct cpufreq_policy *policy)
|
||||
static int sh_cpufreq_verify(struct cpufreq_policy_data *policy)
|
||||
{
|
||||
struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
|
||||
struct cpufreq_frequency_table *freq_table;
|
||||
|
@ -22,7 +22,7 @@ static struct cpufreq_driver ucv2_driver;
|
||||
/* make sure that only the "userspace" governor is run
|
||||
* -- anything else wouldn't make sense on this platform, anyway.
|
||||
*/
|
||||
static int ucv2_verify_speed(struct cpufreq_policy *policy)
|
||||
static int ucv2_verify_speed(struct cpufreq_policy_data *policy)
|
||||
{
|
||||
if (policy->cpu)
|
||||
return -EINVAL;
|
||||
|
@ -88,7 +88,6 @@
|
||||
struct atmel_aes_caps {
|
||||
bool has_dualbuff;
|
||||
bool has_cfb64;
|
||||
bool has_ctr32;
|
||||
bool has_gcm;
|
||||
bool has_xts;
|
||||
bool has_authenc;
|
||||
@ -1013,8 +1012,9 @@ static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
|
||||
struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
|
||||
struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
|
||||
struct scatterlist *src, *dst;
|
||||
u32 ctr, blocks;
|
||||
size_t datalen;
|
||||
u32 ctr;
|
||||
u16 blocks, start, end;
|
||||
bool use_dma, fragmented = false;
|
||||
|
||||
/* Check for transfer completion. */
|
||||
@ -1026,27 +1026,17 @@ static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
|
||||
datalen = req->nbytes - ctx->offset;
|
||||
blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
|
||||
ctr = be32_to_cpu(ctx->iv[3]);
|
||||
if (dd->caps.has_ctr32) {
|
||||
/* Check 32bit counter overflow. */
|
||||
u32 start = ctr;
|
||||
u32 end = start + blocks - 1;
|
||||
|
||||
if (end < start) {
|
||||
ctr |= 0xffffffff;
|
||||
datalen = AES_BLOCK_SIZE * -start;
|
||||
fragmented = true;
|
||||
}
|
||||
} else {
|
||||
/* Check 16bit counter overflow. */
|
||||
u16 start = ctr & 0xffff;
|
||||
u16 end = start + (u16)blocks - 1;
|
||||
start = ctr & 0xffff;
|
||||
end = start + blocks - 1;
|
||||
|
||||
if (blocks >> 16 || end < start) {
|
||||
ctr |= 0xffff;
|
||||
datalen = AES_BLOCK_SIZE * (0x10000 - start);
|
||||
fragmented = true;
|
||||
}
|
||||
}
|
||||
|
||||
use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD);
|
||||
|
||||
/* Jump to offset. */
|
||||
@ -2550,7 +2540,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
|
||||
{
|
||||
dd->caps.has_dualbuff = 0;
|
||||
dd->caps.has_cfb64 = 0;
|
||||
dd->caps.has_ctr32 = 0;
|
||||
dd->caps.has_gcm = 0;
|
||||
dd->caps.has_xts = 0;
|
||||
dd->caps.has_authenc = 0;
|
||||
@ -2561,7 +2550,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
|
||||
case 0x500:
|
||||
dd->caps.has_dualbuff = 1;
|
||||
dd->caps.has_cfb64 = 1;
|
||||
dd->caps.has_ctr32 = 1;
|
||||
dd->caps.has_gcm = 1;
|
||||
dd->caps.has_xts = 1;
|
||||
dd->caps.has_authenc = 1;
|
||||
@ -2570,7 +2558,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
|
||||
case 0x200:
|
||||
dd->caps.has_dualbuff = 1;
|
||||
dd->caps.has_cfb64 = 1;
|
||||
dd->caps.has_ctr32 = 1;
|
||||
dd->caps.has_gcm = 1;
|
||||
dd->caps.max_burst_size = 4;
|
||||
break;
|
||||
|
@ -586,6 +586,7 @@ const struct ccp_vdata ccpv3_platform = {
|
||||
.setup = NULL,
|
||||
.perform = &ccp3_actions,
|
||||
.offset = 0,
|
||||
.rsamax = CCP_RSA_MAX_WIDTH,
|
||||
};
|
||||
|
||||
const struct ccp_vdata ccpv3 = {
|
||||
|
@ -237,7 +237,7 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err)
|
||||
* revealed the decrypted message --> zero its memory.
|
||||
*/
|
||||
sg_zero_buffer(areq->dst, sg_nents(areq->dst),
|
||||
areq->cryptlen, 0);
|
||||
areq->cryptlen, areq->assoclen);
|
||||
err = -EBADMSG;
|
||||
}
|
||||
/*ENCRYPT*/
|
||||
|
@ -523,6 +523,7 @@ static void cc_setup_readiv_desc(struct crypto_tfm *tfm,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void cc_setup_state_desc(struct crypto_tfm *tfm,
|
||||
struct cipher_req_ctx *req_ctx,
|
||||
unsigned int ivsize, unsigned int nbytes,
|
||||
@ -534,8 +535,6 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm,
|
||||
int cipher_mode = ctx_p->cipher_mode;
|
||||
int flow_mode = ctx_p->flow_mode;
|
||||
int direction = req_ctx->gen_ctx.op_type;
|
||||
dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
|
||||
unsigned int key_len = ctx_p->keylen;
|
||||
dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
|
||||
unsigned int du_size = nbytes;
|
||||
|
||||
@ -570,6 +569,47 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm,
|
||||
break;
|
||||
case DRV_CIPHER_XTS:
|
||||
case DRV_CIPHER_ESSIV:
|
||||
case DRV_CIPHER_BITLOCKER:
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void cc_setup_xex_state_desc(struct crypto_tfm *tfm,
|
||||
struct cipher_req_ctx *req_ctx,
|
||||
unsigned int ivsize, unsigned int nbytes,
|
||||
struct cc_hw_desc desc[],
|
||||
unsigned int *seq_size)
|
||||
{
|
||||
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
|
||||
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
|
||||
int cipher_mode = ctx_p->cipher_mode;
|
||||
int flow_mode = ctx_p->flow_mode;
|
||||
int direction = req_ctx->gen_ctx.op_type;
|
||||
dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
|
||||
unsigned int key_len = ctx_p->keylen;
|
||||
dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
|
||||
unsigned int du_size = nbytes;
|
||||
|
||||
struct cc_crypto_alg *cc_alg =
|
||||
container_of(tfm->__crt_alg, struct cc_crypto_alg,
|
||||
skcipher_alg.base);
|
||||
|
||||
if (cc_alg->data_unit)
|
||||
du_size = cc_alg->data_unit;
|
||||
|
||||
switch (cipher_mode) {
|
||||
case DRV_CIPHER_ECB:
|
||||
break;
|
||||
case DRV_CIPHER_CBC:
|
||||
case DRV_CIPHER_CBC_CTS:
|
||||
case DRV_CIPHER_CTR:
|
||||
case DRV_CIPHER_OFB:
|
||||
break;
|
||||
case DRV_CIPHER_XTS:
|
||||
case DRV_CIPHER_ESSIV:
|
||||
case DRV_CIPHER_BITLOCKER:
|
||||
/* load XEX key */
|
||||
hw_desc_init(&desc[*seq_size]);
|
||||
@ -881,12 +921,14 @@ static int cc_cipher_process(struct skcipher_request *req,
|
||||
|
||||
/* STAT_PHASE_2: Create sequence */
|
||||
|
||||
/* Setup IV and XEX key used */
|
||||
/* Setup state (IV) */
|
||||
cc_setup_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
|
||||
/* Setup MLLI line, if needed */
|
||||
cc_setup_mlli_desc(tfm, req_ctx, dst, src, nbytes, req, desc, &seq_len);
|
||||
/* Setup key */
|
||||
cc_setup_key_desc(tfm, req_ctx, nbytes, desc, &seq_len);
|
||||
/* Setup state (IV and XEX key) */
|
||||
cc_setup_xex_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
|
||||
/* Data processing */
|
||||
cc_setup_flow_desc(tfm, req_ctx, dst, src, nbytes, desc, &seq_len);
|
||||
/* Read next IV */
|
||||
|
@ -161,6 +161,7 @@ struct cc_drvdata {
|
||||
int std_bodies;
|
||||
bool sec_disabled;
|
||||
u32 comp_mask;
|
||||
bool pm_on;
|
||||
};
|
||||
|
||||
struct cc_crypto_alg {
|
||||
|
@ -22,14 +22,8 @@ const struct dev_pm_ops ccree_pm = {
|
||||
int cc_pm_suspend(struct device *dev)
|
||||
{
|
||||
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
|
||||
int rc;
|
||||
|
||||
dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
|
||||
rc = cc_suspend_req_queue(drvdata);
|
||||
if (rc) {
|
||||
dev_err(dev, "cc_suspend_req_queue (%x)\n", rc);
|
||||
return rc;
|
||||
}
|
||||
fini_cc_regs(drvdata);
|
||||
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
|
||||
cc_clk_off(drvdata);
|
||||
@ -63,13 +57,6 @@ int cc_pm_resume(struct device *dev)
|
||||
/* check if tee fips error occurred during power down */
|
||||
cc_tee_handle_fips_error(drvdata);
|
||||
|
||||
rc = cc_resume_req_queue(drvdata);
|
||||
if (rc) {
|
||||
dev_err(dev, "cc_resume_req_queue (%x)\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* must be after the queue resuming as it uses the HW queue*/
|
||||
cc_init_hash_sram(drvdata);
|
||||
|
||||
return 0;
|
||||
@ -80,12 +67,10 @@ int cc_pm_get(struct device *dev)
|
||||
int rc = 0;
|
||||
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
|
||||
|
||||
if (cc_req_queue_suspended(drvdata))
|
||||
if (drvdata->pm_on)
|
||||
rc = pm_runtime_get_sync(dev);
|
||||
else
|
||||
pm_runtime_get_noresume(dev);
|
||||
|
||||
return rc;
|
||||
return (rc == 1 ? 0 : rc);
|
||||
}
|
||||
|
||||
int cc_pm_put_suspend(struct device *dev)
|
||||
@ -93,14 +78,11 @@ int cc_pm_put_suspend(struct device *dev)
|
||||
int rc = 0;
|
||||
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
|
||||
|
||||
if (!cc_req_queue_suspended(drvdata)) {
|
||||
if (drvdata->pm_on) {
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
rc = pm_runtime_put_autosuspend(dev);
|
||||
} else {
|
||||
/* Something wrong happens*/
|
||||
dev_err(dev, "request to suspend already suspended queue");
|
||||
rc = -EBUSY;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -117,7 +99,7 @@ int cc_pm_init(struct cc_drvdata *drvdata)
|
||||
/* must be before the enabling to avoid resdundent suspending */
|
||||
pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
|
||||
pm_runtime_use_autosuspend(dev);
|
||||
/* activate the PM module */
|
||||
/* set us as active - note we won't do PM ops until cc_pm_go()! */
|
||||
return pm_runtime_set_active(dev);
|
||||
}
|
||||
|
||||
@ -125,9 +107,11 @@ int cc_pm_init(struct cc_drvdata *drvdata)
|
||||
void cc_pm_go(struct cc_drvdata *drvdata)
|
||||
{
|
||||
pm_runtime_enable(drvdata_to_dev(drvdata));
|
||||
drvdata->pm_on = true;
|
||||
}
|
||||
|
||||
void cc_pm_fini(struct cc_drvdata *drvdata)
|
||||
{
|
||||
pm_runtime_disable(drvdata_to_dev(drvdata));
|
||||
drvdata->pm_on = false;
|
||||
}
|
||||
|
@ -41,7 +41,6 @@ struct cc_req_mgr_handle {
|
||||
#else
|
||||
struct tasklet_struct comptask;
|
||||
#endif
|
||||
bool is_runtime_suspended;
|
||||
};
|
||||
|
||||
struct cc_bl_item {
|
||||
@ -404,6 +403,7 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
|
||||
spin_lock(&mgr->bl_lock);
|
||||
list_del(&bli->list);
|
||||
--mgr->bl_len;
|
||||
kfree(bli);
|
||||
}
|
||||
|
||||
spin_unlock(&mgr->bl_lock);
|
||||
@ -677,52 +677,3 @@ static void comp_handler(unsigned long devarg)
|
||||
cc_proc_backlog(drvdata);
|
||||
dev_dbg(dev, "Comp. handler done.\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* resume the queue configuration - no need to take the lock as this happens
|
||||
* inside the spin lock protection
|
||||
*/
|
||||
#if defined(CONFIG_PM)
|
||||
int cc_resume_req_queue(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct cc_req_mgr_handle *request_mgr_handle =
|
||||
drvdata->request_mgr_handle;
|
||||
|
||||
spin_lock_bh(&request_mgr_handle->hw_lock);
|
||||
request_mgr_handle->is_runtime_suspended = false;
|
||||
spin_unlock_bh(&request_mgr_handle->hw_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* suspend the queue configuration. Since it is used for the runtime suspend
|
||||
* only verify that the queue can be suspended.
|
||||
*/
|
||||
int cc_suspend_req_queue(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct cc_req_mgr_handle *request_mgr_handle =
|
||||
drvdata->request_mgr_handle;
|
||||
|
||||
/* lock the send_request */
|
||||
spin_lock_bh(&request_mgr_handle->hw_lock);
|
||||
if (request_mgr_handle->req_queue_head !=
|
||||
request_mgr_handle->req_queue_tail) {
|
||||
spin_unlock_bh(&request_mgr_handle->hw_lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
request_mgr_handle->is_runtime_suspended = true;
|
||||
spin_unlock_bh(&request_mgr_handle->hw_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool cc_req_queue_suspended(struct cc_drvdata *drvdata)
|
||||
{
|
||||
struct cc_req_mgr_handle *request_mgr_handle =
|
||||
drvdata->request_mgr_handle;
|
||||
|
||||
return request_mgr_handle->is_runtime_suspended;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -40,12 +40,4 @@ void complete_request(struct cc_drvdata *drvdata);
|
||||
|
||||
void cc_req_mgr_fini(struct cc_drvdata *drvdata);
|
||||
|
||||
#if defined(CONFIG_PM)
|
||||
int cc_resume_req_queue(struct cc_drvdata *drvdata);
|
||||
|
||||
int cc_suspend_req_queue(struct cc_drvdata *drvdata);
|
||||
|
||||
bool cc_req_queue_suspended(struct cc_drvdata *drvdata);
|
||||
#endif
|
||||
|
||||
#endif /*__REQUEST_MGR_H__*/
|
||||
|
@ -35,6 +35,5 @@ config CRYPTO_DEV_HISI_ZIP
|
||||
depends on ARM64 && PCI && PCI_MSI
|
||||
select CRYPTO_DEV_HISI_QM
|
||||
select CRYPTO_HISI_SGL
|
||||
select SG_SPLIT
|
||||
help
|
||||
Support for HiSilicon ZIP Driver
|
||||
|
@ -12,6 +12,10 @@
|
||||
|
||||
/* hisi_zip_sqe dw3 */
|
||||
#define HZIP_BD_STATUS_M GENMASK(7, 0)
|
||||
/* hisi_zip_sqe dw7 */
|
||||
#define HZIP_IN_SGE_DATA_OFFSET_M GENMASK(23, 0)
|
||||
/* hisi_zip_sqe dw8 */
|
||||
#define HZIP_OUT_SGE_DATA_OFFSET_M GENMASK(23, 0)
|
||||
/* hisi_zip_sqe dw9 */
|
||||
#define HZIP_REQ_TYPE_M GENMASK(7, 0)
|
||||
#define HZIP_ALG_TYPE_ZLIB 0x02
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user