Merge keystone/android14-6.1-keystone-qcom-release.6.1.23 (4ab5627) into msm-pineapple

* refs/heads/tmp-4ab5627:
  ANDROID: Update symbol list for mtk
  ANDROID: module: Add vendor hooks
  ANDROID: kernel: Add restricted vendor hook in creds
  ANDROID: enable CONFIG_USB_XHCI_PCI_RENESAS in gki_defconfig
  ANDROID: Add utf8_data_table for case-folding support
  UPSTREAM: usb: typec: altmodes/displayport: Add hpd sysfs attribute
  ANDROID: vendor_hooks: Add vendor hook for tcpm logs
  ANDROID: usb: typec: tcpm: Add vendor hook to modify port src caps
  ANDROID: usb: typec: tcpm: Add vendor hook to store partner source capabilities
  ANDROID: usb: typec: tcpm: vendor hook for timer adjustments
  ANDROID: usb: typec: tcpci: Add vendor hook to mask vbus present
  ANDROID: usb: typec: tcpci: Add vendor hooks for tcpci interface
  UPSTREAM: scsi: ufs: mcq: Use active_reqs to check busy in clock scaling
  FROMLIST: xfrm: Skip checking of already-verified secpath entries
  Revert "Fix XFRM-I support for nested ESP tunnels"
  FROMLIST: xfrm: Ensure policy checked for nested ESP tunnels
  ANDROID: Update the ABI symbol list for typec mux
  ANDROID: Update the ABI symbol list for typec port management
  ANDROID: ABI: Add __irq_set_handler and irq_set_handler_data in QCOM symbol list
  FROMGIT: soc: qcom: geni-se: Update Tx and Rx fifo depth based on QUP HW version
  FROMGIT: soc: qcom: geni-se: Move qcom-geni-se.h to linux/soc/qcom/geni-se.h
  ANDROID: CONFIG_PKVM_MODULE_PATH to /lib/modules/
  ANDROID: KVM: arm64: Add a fallback for pKVM module loading
  ANDROID: KVM: arm64: Add a custom module path for pKVM module loading
  ANDROID: update the pixel symbol list
  ANDROID: Add ufs symbol for mtk
  ANDROID: scsi: ufs: Add hook to influence the UFS clock scaling policy
  UPSTREAM: mm: multi-gen LRU: simplify lru_gen_look_around()
  UPSTREAM: mm: multi-gen LRU: improve walk_pmd_range()
  UPSTREAM: mm: multi-gen LRU: improve lru_gen_exit_memcg()
  UPSTREAM: mm: multi-gen LRU: section for memcg LRU
  UPSTREAM: mm: multi-gen LRU: section for Bloom filters
  UPSTREAM: mm: multi-gen LRU: section for rmap/PT walk feedback
  UPSTREAM: mm: multi-gen LRU: section for working set protection
  UPSTREAM: mm: support POSIX_FADV_NOREUSE
  UPSTREAM: mm: add vma_has_recency()
  ANDROID: 4/12/2023 KMI update
  ANDROID: ABI: remove stale symbol
  ANDROID: fuse: Support errors from fuse daemon in canonical path
  ANDROID: abi_gki_aarch64_qcom: Add memremap_pages and memunmap_pages
  ANDROID: Enable CONFIG_ZONE_DEVICE
  Revert "Revert "block/io_uring: pass in issue_flags for uring_cmd task_work handling""
  Revert "Revert "net: mdio: fix owner field for mdio buses registered using device-tree""
  FROMGIT: wifi: cfg80211/mac80211: report link ID on control port RX
  UPSTREAM: iommu: Rename iommu-sva-lib.{c,h}
  UPSTREAM: iommu: Per-domain I/O page fault handling
  UPSTREAM: iommu: Prepare IOMMU domain for IOPF
  UPSTREAM: iommu: Remove SVA related callbacks from iommu ops
  UPSTREAM: iommu/sva: Refactoring iommu_sva_bind/unbind_device()
  UPSTREAM: arm-smmu-v3/sva: Add SVA domain support
  UPSTREAM: iommu/vt-d: Add SVA domain support
  UPSTREAM: iommu: Add IOMMU SVA domain support
  UPSTREAM: iommu: Add attach/detach_dev_pasid iommu interfaces
  UPSTREAM: PCI: Enable PASID only when ACS RR & UF enabled on upstream path
  UPSTREAM: iommu: Remove SVM_FLAG_SUPERVISOR_MODE support
  UPSTREAM: iommu: Add max_pasids field in struct dev_iommu
  UPSTREAM: iommu: Add max_pasids field in struct iommu_device
  ANDROID: GKI: fscrypt: add ABI padding to struct fscrypt_operations
  ANDROID: abi_gki_aarch64_qcom: Add sock_gen_put
  ANDROID: arm64: Implement hypervisor workaround for SoCs with DMA beyond the PoC
  ANDROID: GKI: add symbol list file for xiaomi
  ANDROID: Add initial symbols list for imx
  ANDROID: Add initial symbol list for mtk
  ANDROID: virt: gunyah: Move arch_is_gh_guest under RM probe
  ANDROID: GKI: Enable CONFIG_USB_CONFIGFS_F_UAC2
  ANDROID: Update the pixel symbol list
  BACKPORT: FROMLIST: Revert "scsi: ufs: core: Initialize devfreq synchronously"
  ANDROID: abi_gki_aarch64_qcom: update abi
  ANDROID: abi_gki_aarch64_qcom: Further update symbol list
  ANDROID: GKI: Convert 80211 modules as unprotected
  ANDROID: ABI: Update QCOM symbol list
  Revert "FROMGIT: scsi: ufs: ufs-qcom: Add support for reinitializing the UFS device"
  Revert "FROMGIT: scsi: ufs: ufs-qcom: Add support for finding max gear on new platforms"
  Revert "block/io_uring: pass in issue_flags for uring_cmd task_work handling"
  ANDROID: abi_gki_aarch64_qcom: Add of_icc_get_from_provider
  FROMLIST: staging: greybus: drop loopback test files
  ANDROID: KVM: arm64: Prevent pKVM module loading after IOMMU init
  ANDROID: KVM: arm64: Factor out logic for setting SVE vector length at hyp
  ANDROID: KVM: arm64: Fix pKVM module loading close
  ANDROID: KVM: arm64: Handle permission issue while loading pKVM module
  Linux 6.1.23
  Revert "cpuidle, intel_idle: Fix CPUIDLE_FLAG_IRQ_ENABLE *again*"
  x86/PVH: avoid 32-bit build warning when obtaining VGA console info
  hsr: ratelimit only when errors are printed
  drm/amdkfd: Get prange->offset after svm_range_vram_node_new
  usb: ucsi: Fix ucsi->connector race
  libbpf: Fix btf_dump's packed struct determination
  selftests/bpf: Add few corner cases to test padding handling of btf_dump
  libbpf: Fix BTF-to-C converter's padding logic
  selftests/bpf: Test btf dump for struct with padding only fields
  net: dsa: mv88e6xxx: replace VTU violation prints with trace points
  net: dsa: mv88e6xxx: replace ATU violation prints with trace points
  net: dsa: mv88e6xxx: read FID when handling ATU violations
  KVM: arm64: Disable interrupts while walking userspace PTs
  KVM: arm64: PMU: Fix GET_ONE_REG for vPMC regs to return the current value
  drm/i915: Move CSC load back into .color_commit_arm() when PSR is enabled on skl/glk
  drm/i915: Disable DC states for all commits
  drm/i915/dpt: Treat the DPT BO as a framebuffer
  drm/i915/gem: Flush lmem contents after construction
  drm/amd/display: Take FEC Overhead into Timeslot Calculation
  drm/amd/display: Add DSC Support for Synaptics Cascaded MST Hub
  drm/amdgpu: allow more APUs to do mode2 reset when go to S4
  drm/etnaviv: fix reference leak when mmaping imported buffer
  s390: reintroduce expoline dependence to scripts
  s390/uaccess: add missing earlyclobber annotations to __clear_user()
  dt-bindings: mtd: jedec,spi-nor: Document CPOL/CPHA support
  rcu: Fix rcu_torture_read ftrace event
  xtensa: fix KASAN report for show_stack
  ALSA: hda/realtek: Add quirk for Lenovo ZhaoYang CF4620Z
  ALSA: hda/realtek: Add quirks for some Clevo laptops
  ALSA: usb-audio: Fix regression on detection of Roland VS-100
  ALSA: hda/conexant: Partial revert of a quirk for Lenovo
  NFSv4: Fix hangs when recovering open state after a server reboot
  powerpc/64s: Fix __pte_needs_flush() false positive warning
  powerpc/pseries/vas: Ignore VAS update for DLPAR if copy/paste is not enabled
  powerpc: Don't try to copy PPR for task with NULL pt_regs
  platform/x86: ideapad-laptop: Stop sending KEY_TOUCHPAD_TOGGLE
  pinctrl: at91-pio4: fix domain name assignment
  pinctrl: amd: Disable and mask interrupts on resume
  modpost: Fix processing of CRCs on 32-bit build machines
  net: phy: dp83869: fix default value for tx-/rx-internal-delay
  xen/netback: don't do grant copy across page boundary
  can: j1939: prevent deadlock by moving j1939_sk_errqueue()
  dm: fix __send_duplicate_bios() to always allow for splitting IO
  zonefs: Always invalidate last cached page on append write
  vmxnet3: use gro callback when UPT is enabled
  io_uring: fix poll/netmsg alloc caches
  io_uring/rsrc: fix rogue rsrc node grabbing
  io_uring/poll: clear single/double poll flags on poll arming
  block/io_uring: pass in issue_flags for uring_cmd task_work handling
  zonefs: Do not propagate iomap_dio_rw() ENOTBLK error to user space
  btrfs: scan device in non-exclusive mode
  btrfs: fix race between quota disable and quota assign ioctls
  btrfs: fix deadlock when aborting transaction during relocation with scrub
  Input: goodix - add Lenovo Yoga Book X90F to nine_bytes_report DMI table
  Input: i8042 - add quirk for Fujitsu Lifebook A574/H
  cifs: fix DFS traversal oops without CONFIG_CIFS_DFS_UPCALL
  cifs: prevent infinite recursion in CIFSGetDFSRefer()
  Input: focaltech - use explicitly signed char type
  Input: alps - fix compatibility with -funsigned-char
  Input: i8042 - add TUXEDO devices to i8042 quirk tables for partial fix
  iommu/vt-d: Allow zero SAGAW if second-stage not supported
  Input: xpad - fix incorrectly applied patch for MAP_PROFILE_BUTTON
  pinctrl: ocelot: Fix alt mode for ocelot
  net: ethernet: mtk_eth_soc: add missing ppe cache flush when deleting a flow
  net: ethernet: mtk_eth_soc: fix flow block refcounting logic
  net: dsa: mv88e6xxx: Enable IGMP snooping on user ports only
  bnxt_en: Add missing 200G link speed reporting
  bnxt_en: Fix typo in PCI id to device description string mapping
  bnxt_en: Fix reporting of test result in ethtool selftest
  i40e: fix registers dump after run ethtool adapter self test
  net: ipa: compute DMA pool size properly
  ALSA: ymfpci: Fix BUG_ON in probe function
  ALSA: ymfpci: Create card with device-managed snd_devm_card_new()
  ice: fix invalid check for empty list in ice_sched_assoc_vsi_to_agg()
  ice: add profile conflict check for AVF FDIR
  ice: Fix ice_cfg_rdma_fltr() to only update relevant fields
  smsc911x: avoid PHY being resumed when interface is not up
  net: mvpp2: parser fix PPPoE
  net: mvpp2: parser fix QinQ
  net: mvpp2: classifier flow fix fragmentation flags
  loop: LOOP_CONFIGURE: send uevents for partitions
  ACPI: bus: Rework system-level device notification handling
  s390/vfio-ap: fix memory leak in vfio_ap device driver
  can: bcm: bcm_tx_setup(): fix KMSAN uninit-value in vfs_write
  platform/x86/intel/pmc: Alder Lake PCH slp_s0_residency fix
  drm/i915/tc: Fix the ICL PHY ownership check in TC-cold state
  net: stmmac: don't reject VLANs when IFF_PROMISC is set
  net/net_failover: fix txq exceeding warning
  regulator: Handle deferred clk
  r8169: fix RTL8168H and RTL8107E rx crc error
  net: dsa: microchip: ksz8: fix MDB configuration with non-zero VID
  net: dsa: microchip: ksz8863_smi: fix bulk access
  net: dsa: microchip: ksz8: ksz8_fdb_dump: avoid extracting ghost entry from empty dynamic MAC table.
  net: dsa: microchip: ksz8: fix offset for the timestamp filed
  net: dsa: microchip: ksz8: fix ksz8_fdb_dump() to extract all 1024 entries
  net: dsa: microchip: ksz8: fix ksz8_fdb_dump()
  ptp_qoriq: fix memory leak in probe()
  net: dsa: realtek: fix out-of-bounds access
  scsi: mpt3sas: Don't print sense pool info twice
  scsi: megaraid_sas: Fix crash after a double completion
  sfc: ef10: don't overwrite offload features at NIC reset
  SUNRPC: fix shutdown of NFS TCP client socket
  mtd: rawnand: meson: invalidate cache on polling ECC bit
  platform/surface: aggregator: Add missing fwnode_handle_put()
  platform/x86: think-lmi: Add possible_values for ThinkStation
  platform/x86: think-lmi: only display possible_values if available
  platform/x86: think-lmi: use correct possible_values delimiters
  platform/x86: think-lmi: add missing type attribute
  PCI: dwc: Fix PORT_LINK_CONTROL update when CDM check enabled
  ALSA: usb-audio: Fix recursive locking at XRUN during syncing
  mips: bmips: BCM6358: disable RAC flush for TP1
  riscv/kvm: Fix VM hang in case of timer delta being zero.
  ca8210: Fix unsigned mac_len comparison with zero in ca8210_skb_tx()
  mtd: nand: mxic-ecc: Fix mxic_ecc_data_xfer_wait_for_completion() when irq is used
  mtd: rawnand: meson: initialize struct with zeroes
  btrfs: use temporary variable for space_info in btrfs_update_block_group
  btrfs: fix uninitialized variable warning in btrfs_update_block_group
  tracing: Fix wrong return in kprobe_event_gen_test.c
  tools/power turbostat: fix decoding of HWP_STATUS
  tools/power turbostat: Fix /dev/cpu_dma_latency warnings
  fbdev: au1200fb: Fix potential divide by zero
  fbdev: lxfb: Fix potential divide by zero
  fbdev: intelfb: Fix potential divide by zero
  fbdev: nvidia: Fix potential divide by zero
  net/mlx5e: Lower maximum allowed MTU in XSK to match XDP prerequisites
  drm/amdkfd: Fixed kfd_process cleanup on module exit.
  nvme-pci: add NVME_QUIRK_BOGUS_NID for Lexar NM620
  sched_getaffinity: don't assume 'cpumask_size()' is fully initialized
  ACPI: tools: pfrut: Check if the input of level and type is in the right numeric range
  fbdev: tgafb: Fix potential divide by zero
  ALSA: hda/ca0132: fixup buffer overrun at tuning_ctl_set()
  ALSA: asihpi: check pao in control_message()
  net: hsr: Don't log netdev_err message on unknown prp dst node
  drm/amdkfd: fix potential kgd_mem UAFs
  drm/amdkfd: fix a potential double free in pqm_create_queue
  drm/amdkfd: Fix BO offset for multi-VMA page migration
  x86/PVH: obtain VGA console info in Dom0
  md: avoid signed overflow in slot_store()
  ASoC: SOF: IPC4: update gain ipc msg definition to align with fw
  ASoC: SOF: Intel: pci-tng: revert invalid bar size setting
  ASoC: SOF: ipc4-topology: Fix incorrect sample rate print unit
  ASoC: SOF: ipc3: Check for upper size limit for the received message
  ACPI: video: Add backlight=native DMI quirk for Dell Vostro 15 3535
  zstd: Fix definition of assert()
  ASoC: Intel: avs: nau8825: Adjust clock control
  ASoC: Intel: avs: ssm4567: Remove nau8825 bits
  ASoC: Intel: avs: da7219: Explicitly define codec format
  ASoC: Intel: avs: max98357a: Explicitly define codec format
  ASoC: codecs: tx-macro: Fix for KASAN: slab-out-of-bounds
  xfrm: Zero padding when dumping algos and encap
  cifs: fix missing unload_nls() in smb2_reconnect()
  arm64: efi: Set NX compat flag in PE/COFF header
  net: mscc: ocelot: fix stats region batching
  tracing: Do not let histogram values have some modifiers
  tracing: Add .graph suffix option to histogram value
  tracing: Add .percent suffix option to histogram values
  tty: serial: fsl_lpuart: fix race on RX DMA shutdown
  tty: serial: fsl_lpuart: switch to new dmaengine_terminate_* API
  drm/msm/disp/dpu: fix sc7280_pp base offset
  drm/msm/dpu: correct sm8250 and sm8350 scaler
  drm/msm/dpu: Refactor sc7280_pp location
  ARM: dts: aspeed: p10bmc: Update battery node name
  riscv: ftrace: Fixup panic by disabling preemption
  net: ethernet: ti: am65-cpsw/cpts: Fix CPTS release action
  btrfs: zoned: count fresh BG region as zone unusable
  btrfs: rename BTRFS_FS_NO_OVERCOMMIT to BTRFS_FS_ACTIVE_ZONE_TRACKING
  kcsan: avoid passing -g for test
  kernel: kcsan: kcsan_test: build without structleak plugin
  fsverity: don't drop pagecache at end of FS_IOC_ENABLE_VERITY
  zonefs: Fix error message in zonefs_file_dio_append()
  zonefs: Separate zone information from inode information
  zonefs: Reduce struct zonefs_inode_info size
  zonefs: Simplify IO error handling
  zonefs: Reorganize code
  cifs: avoid race conditions with parallel reconnects
  cifs: prevent data race in cifs_reconnect_tcon()
  cifs: update ip_addr for ses only for primary chan setup
  thunderbolt: Limit USB3 bandwidth of certain Intel USB4 host routers
  ANDROID: usb: f_accessory: Check buffer size when initialised via composite
  ANDROID: MGLRU: Avoid reactivation of anon pages on swap full
  FROMGIT: f2fs: fix null pointer panic in tracepoint in __replace_atomic_write_block
  ANDROID: incremental fs: Evict inodes before freeing mount data
  ANDROID: fsnotify: Notify lower fs of open
  ANDROID: fuse-bpf: Run bpf with migration disabled
  ANDROID: fuse-bpf: Do not change bpf program in lookups
  FROMGIT: ASoC: codecs: lpass: fix the order or clks turn off during suspend
  ANDROID: GKI: Add a filegroup instead of _aarch64_additional_kmi symbol list
  UPSTREAM: wifi: nl80211: fix puncturing bitmap policy

 Conflicts:
	Documentation/devicetree/bindings
	Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml
	drivers/ufs/host/ufs-qcom.c

Change-Id: I7004221a9c748e28c3860cb57e3da9049a25481a
Signed-off-by: jianzhou <quic_jianzhou@quicinc.com>
This commit is contained in:
jianzhou 2023-04-25 23:35:23 -07:00
commit 2f6c3deee2
308 changed files with 18422 additions and 9087 deletions

View File

@ -12,22 +12,29 @@ package(
],
)
_aarch64_additional_kmi_symbol_lists = [
# keep sorted
"android/abi_gki_aarch64_db845c",
"android/abi_gki_aarch64_galaxy",
"android/abi_gki_aarch64_oplus",
"android/abi_gki_aarch64_pixel",
"android/abi_gki_aarch64_qcom",
"android/abi_gki_aarch64_virtual_device",
]
filegroup(
name = "aarch64_additional_kmi_symbol_lists",
srcs = [
# keep sorted
"android/abi_gki_aarch64_db845c",
"android/abi_gki_aarch64_galaxy",
"android/abi_gki_aarch64_imx",
"android/abi_gki_aarch64_mtk",
"android/abi_gki_aarch64_oplus",
"android/abi_gki_aarch64_pixel",
"android/abi_gki_aarch64_qcom",
"android/abi_gki_aarch64_virtual_device",
"android/abi_gki_aarch64_xiaomi",
],
visibility = ["//visibility:public"],
)
define_common_kernels(target_configs = {
"kernel_aarch64": {
"kmi_symbol_list_strict_mode": True,
"module_implicit_outs": COMMON_GKI_MODULES_LIST,
"kmi_symbol_list": "android/abi_gki_aarch64",
"additional_kmi_symbol_lists": _aarch64_additional_kmi_symbol_lists,
"additional_kmi_symbol_lists": [":aarch64_additional_kmi_symbol_lists"],
"protected_exports_list": "android/abi_gki_protected_exports_aarch64",
"protected_modules_list": "android/gki_aarch64_protected_modules",
},
@ -39,7 +46,7 @@ define_common_kernels(target_configs = {
"kmi_symbol_list_strict_mode": False,
"module_implicit_outs": COMMON_GKI_MODULES_LIST,
"kmi_symbol_list": "android/abi_gki_aarch64",
"additional_kmi_symbol_lists": _aarch64_additional_kmi_symbol_lists,
"additional_kmi_symbol_lists": [":aarch64_additional_kmi_symbol_lists"],
"protected_exports_list": "android/abi_gki_protected_exports_aarch64",
"protected_modules_list": "android/gki_aarch64_protected_modules",
},

View File

@ -47,3 +47,18 @@ Description:
USB SuperSpeed protocol. From user perspective pin assignments C
and E are equal, where all channels on the connector are used
for carrying DisplayPort protocol (allowing higher resolutions).
What: /sys/bus/typec/devices/.../displayport/hpd
Date: Dec 2022
Contact: Badhri Jagan Sridharan <badhri@google.com>
Description:
VESA DisplayPort Alt Mode on USB Type-C Standard defines how
HotPlugDetect(HPD) shall be supported on the USB-C connector when
operating in DisplayPort Alt Mode. This is a read only node which
reflects the current state of HPD.
Valid values:
- 1: when HPDs logical state is high (HPD_High) as defined
by VESA DisplayPort Alt Mode on USB Type-C Standard.
- 0 when HPDs logical state is low (HPD_Low) as defined by
VESA DisplayPort Alt Mode on USB Type-C Standard.

View File

@ -2544,6 +2544,19 @@
for all guests.
Default is 1 (enabled) if in 64-bit or 32-bit PAE mode.
kvm-arm.force_nc
[KVM,ARM,ANDROID_ARM64_WORKAROUND_DMA_BEYOND_POC]
Enable hypercalls to remap host pages as normal
non-cacheable at stage-2 and issue these hypercalls
when installing non-cacheable ptes at stage-1. This
is useful to work around coherency issues on systems
with DMA peripherals integrated beyond the Point of
Coherency (PoC).
This option only applies when booting with
kvm-arm.mode=protected.
kvm-arm.mode=
[KVM,ARM] Select one of KVM/arm64's modes of operation.

View File

@ -141,9 +141,85 @@ loop has detected outlying refaults from the tier this page is in. To
this end, the feedback loop uses the first tier as the baseline, for
the reason stated earlier.
Working set protection
----------------------
Each generation is timestamped at birth. If ``lru_gen_min_ttl`` is
set, an ``lruvec`` is protected from the eviction when its oldest
generation was born within ``lru_gen_min_ttl`` milliseconds. In other
words, it prevents the working set of ``lru_gen_min_ttl`` milliseconds
from getting evicted. The OOM killer is triggered if this working set
cannot be kept in memory.
This time-based approach has the following advantages:
1. It is easier to configure because it is agnostic to applications
and memory sizes.
2. It is more reliable because it is directly wired to the OOM killer.
Rmap/PT walk feedback
---------------------
Searching the rmap for PTEs mapping each page on an LRU list (to test
and clear the accessed bit) can be expensive because pages from
different VMAs (PA space) are not cache friendly to the rmap (VA
space). For workloads mostly using mapped pages, searching the rmap
can incur the highest CPU cost in the reclaim path.
``lru_gen_look_around()`` exploits spatial locality to reduce the
trips into the rmap. It scans the adjacent PTEs of a young PTE and
promotes hot pages. If the scan was done cacheline efficiently, it
adds the PMD entry pointing to the PTE table to the Bloom filter. This
forms a feedback loop between the eviction and the aging.
Bloom Filters
-------------
Bloom filters are a space and memory efficient data structure for set
membership test, i.e., test if an element is not in the set or may be
in the set.
In the eviction path, specifically, in ``lru_gen_look_around()``, if a
PMD has a sufficient number of hot pages, its address is placed in the
filter. In the aging path, set membership means that the PTE range
will be scanned for young pages.
Note that Bloom filters are probabilistic on set membership. If a test
is false positive, the cost is an additional scan of a range of PTEs,
which may yield hot pages anyway. Parameters of the filter itself can
control the false positive rate in the limit.
Memcg LRU
---------
An memcg LRU is a per-node LRU of memcgs. It is also an LRU of LRUs,
since each node and memcg combination has an LRU of folios (see
``mem_cgroup_lruvec()``). Its goal is to improve the scalability of
global reclaim, which is critical to system-wide memory overcommit in
data centers. Note that memcg LRU only applies to global reclaim.
The basic structure of an memcg LRU can be understood by an analogy to
the active/inactive LRU (of folios):
1. It has the young and the old (generations), i.e., the counterparts
to the active and the inactive;
2. The increment of ``max_seq`` triggers promotion, i.e., the
counterpart to activation;
3. Other events trigger similar operations, e.g., offlining an memcg
triggers demotion, i.e., the counterpart to deactivation.
In terms of global reclaim, it has two distinct features:
1. Sharding, which allows each thread to start at a random memcg (in
the old generation) and improves parallelism;
2. Eventual fairness, which allows direct reclaim to bail out at will
and reduces latency without affecting fairness over some time.
In terms of traversing memcgs during global reclaim, it improves the
best-case complexity from O(n) to O(1) and does not affect the
worst-case complexity O(n). Therefore, on average, it has a sublinear
complexity.
Summary
-------
The multi-gen LRU can be disassembled into the following parts:
The multi-gen LRU (of folios) can be disassembled into the following
parts:
* Generations
* Rmap walks

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 22
SUBLEVEL = 23
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth

View File

@ -1 +1 @@
2420a46a3268b1c42f462da5f18642e7166dc062
9464c5644c653b3172e359597ee9a9972e7577ee

View File

@ -2,3 +2,4 @@
# commonly used symbols
module_layout
__put_task_struct
utf8_data_table

File diff suppressed because it is too large Load Diff

944
android/abi_gki_aarch64_imx Normal file
View File

@ -0,0 +1,944 @@
[abi_symbol_list]
add_timer
add_uevent_var
add_wait_queue
alloc_candev_mqs
alloc_can_err_skb
alloc_canfd_skb
alloc_can_skb
alloc_chrdev_region
alloc_etherdev_mqs
alloc_io_pgtable_ops
alloc_netdev_mqs
backlight_device_register
backlight_device_unregister
bcmp
bus_unregister
bus_unregister_notifier
cache_line_size
can_bus_off
cancel_delayed_work
cancel_delayed_work_sync
cancel_work_sync
can_change_mtu
can_change_state
can_dropped_invalid_skb
can_fd_dlc2len
can_fd_len2dlc
can_put_echo_skb
can_rx_offload_add_fifo
can_rx_offload_add_timestamp
can_rx_offload_del
can_rx_offload_enable
can_rx_offload_get_echo_skb
can_rx_offload_irq_finish
can_rx_offload_irq_offload_fifo
can_rx_offload_irq_offload_timestamp
can_rx_offload_queue_timestamp
capable
cdev_add
cdev_del
cfg80211_ch_switch_notify
cfg80211_classify8021d
cfg80211_connect_done
cfg80211_cqm_rssi_notify
cfg80211_del_sta_sinfo
cfg80211_disconnected
cfg80211_find_elem_match
cfg80211_ft_event
cfg80211_get_bss
cfg80211_inform_bss_data
cfg80211_mgmt_tx_status_ext
cfg80211_michael_mic_failure
cfg80211_new_sta
cfg80211_pmksa_candidate_notify
cfg80211_put_bss
__cfg80211_radar_event
cfg80211_ready_on_channel
cfg80211_register_netdevice
cfg80211_remain_on_channel_expired
cfg80211_report_wowlan_wakeup
cfg80211_roamed
clk_get
__clk_get_hw
__clk_get_name
clk_get_parent
clk_get_rate
clk_hw_get_name
clk_hw_get_parent
clk_hw_get_parent_by_index
clk_hw_get_rate
clk_hw_is_enabled
clk_hw_register
clk_hw_unregister
clk_is_match
clk_notifier_register
_dev_crit
dev_driver_string
_dev_err
dev_err_probe
dev_fwnode
dev_get_regmap
device_add
device_create
device_create_file
device_del
device_destroy
device_for_each_child
device_get_child_node_count
device_get_dma_attr
device_get_match_data
device_get_next_child_node
device_initialize
device_link_add
device_link_del
device_link_remove
device_match_fwnode
device_match_of_node
device_property_match_string
device_property_present
device_property_read_string
device_property_read_u16_array
device_property_read_u32_array
device_register
device_remove_file
device_set_wakeup_capable
device_set_wakeup_enable
device_unregister
device_wakeup_disable
device_wakeup_enable
_dev_info
__dev_kfree_skb_any
devm_add_action
devm_backlight_device_register
devm_blk_crypto_profile_init
devm_clk_bulk_get
devm_clk_bulk_get_all
devm_clk_bulk_get_optional
devm_clk_get
devm_clk_get_optional
devm_clk_put
devm_clk_register
devm_device_add_group
__devm_drm_dev_alloc
devm_drm_of_get_bridge
devm_drm_panel_bridge_add_typed
devm_free_irq
devm_fwnode_pwm_get
devm_get_clk_from_child
devm_gpiochip_add_data_with_key
devm_gpiod_get
devm_gpiod_get_array
devm_gpiod_get_index
devm_gpiod_get_optional
devm_gpio_request
devm_gpio_request_one
devm_i2c_new_dummy_device
devm_iio_device_alloc
__devm_iio_device_register
devm_input_allocate_device
devm_ioremap
devm_ioremap_resource
devm_ioremap_wc
devm_iounmap
__devm_irq_alloc_descs
devm_kasprintf
devm_kfree
devm_kmalloc
devm_kmemdup
devm_krealloc
devm_kstrdup
devm_led_classdev_register_ext
devm_mbox_controller_register
devm_mfd_add_devices
devm_mipi_dsi_attach
drm_bridge_attach
drm_bridge_chain_enable
drm_bridge_connector_enable_hpd
drm_bridge_connector_init
drm_bridge_hpd_notify
drm_bridge_remove
drm_compat_ioctl
drm_crtc_accurate_vblank_count
drm_crtc_add_crc_entry
drm_crtc_arm_vblank_event
drm_crtc_cleanup
__drm_crtc_commit_free
drm_crtc_commit_wait
drm_crtc_handle_vblank
drm_crtc_init_with_planes
drm_crtc_send_vblank_event
drm_crtc_vblank_get
drm_crtc_vblank_helper_get_vblank_timestamp
drm_crtc_vblank_off
drm_crtc_vblank_on
drm_crtc_vblank_put
drm_crtc_wait_one_vblank
___drm_dbg
__drm_debug
drm_detect_hdmi_monitor
drm_detect_monitor_audio
drm_dev_alloc
__drm_dev_dbg
drm_dev_printk
drm_dev_put
drm_dev_register
drm_dev_unregister
drm_do_get_edid
drm_edid_duplicate
drm_edid_get_monitor_name
drm_encoder_cleanup
drm_encoder_init
__drm_err
drm_firmware_drivers_only
drm_format_info_block_height
drm_format_info_block_width
drm_gem_create_mmap_offset
drm_gem_fb_create
drm_gem_fb_get_obj
drm_gem_handle_create
drm_gem_mmap
drm_gem_object_free
drm_gem_object_init
drm_gem_object_lookup
drm_gem_object_release
drm_gem_plane_helper_prepare_fb
drm_gem_prime_fd_to_handle
drm_gem_prime_handle_to_fd
drm_gem_prime_import
drm_gem_prime_mmap
drm_gem_private_object_init
drm_gem_vm_close
drm_gem_vm_open
drm_get_connector_status_name
drm_get_edid
drm_get_format_info
drm_helper_hpd_irq_event
drm_helper_probe_single_connector_modes
drm_ioctl
drm_kms_helper_hotplug_event
drm_kms_helper_poll_fini
drm_kms_helper_poll_init
drmm_mode_config_init
drm_mode_config_cleanup
drm_mode_config_helper_resume
drm_mode_config_helper_suspend
drm_mode_config_reset
drm_mode_copy
drm_mode_create
drm_mode_debug_printmodeline
drm_mode_destroy
drm_mode_duplicate
drm_mode_equal
drm_mode_object_put
drm_mode_probed_add
drm_modeset_acquire_fini
drm_modeset_acquire_init
drm_modeset_backoff
drm_modeset_drop_locks
drm_modeset_lock
drm_modeset_lock_single_interruptible
drm_mode_set_name
drm_modeset_unlock
drm_mode_vrefresh
drm_object_attach_property
drm_object_property_set_value
drm_open
drm_panel_add
drm_panel_disable
drm_panel_enable
drm_panel_get_modes
drm_panel_init
drm_panel_prepare
drm_panel_remove
drm_panel_unprepare
drm_plane_cleanup
drm_plane_create_alpha_property
drm_plane_create_blend_mode_property
drm_plane_create_rotation_property
drm_plane_create_zpos_immutable_property
drm_plane_create_zpos_property
drm_poll
drm_prime_gem_destroy
drm_prime_get_contiguous_size
drm_printf
__drm_printfn_debug
drm_property_create_enum
drm_property_create_range
drm_read
drm_rect_intersect
drm_release
drm_simple_encoder_init
drm_sysfs_connector_status_event
drm_universal_plane_init
drm_vblank_init
dump_stack
enable_irq
enable_percpu_irq
ether_setup
ethnl_cable_test_fault_length
ethnl_cable_test_result
ethtool_op_get_link
ethtool_op_get_ts_info
eth_type_trans
eth_validate_addr
extcon_get_edev_by_phandle
extcon_get_state
extcon_set_state_sync
fd_install
fget
_find_first_bit
_find_next_and_bit
_find_next_bit
_find_next_zero_bit
find_vma
find_vpid
finish_wait
fixed_size_llseek
flush_work
__flush_workqueue
fortify_panic
fput
free_candev
free_io_pgtable_ops
free_irq
free_netdev
__free_pages
free_pages_exact
free_percpu
free_percpu_irq
gcd
generic_device_group
generic_handle_domain_irq
generic_handle_irq
gen_pool_add_owner
gen_pool_alloc_algo_owner
gen_pool_create
gen_pool_destroy
gen_pool_free_owner
gen_pool_virt_to_phys
get_cpu_device
get_device
get_random_bytes
get_random_u32
get_random_u8
get_unused_fd_flags
gic_nonsecure_priorities
gpiochip_disable_irq
gpiochip_enable_irq
gpiochip_generic_config
gpiochip_generic_free
gpiochip_generic_request
gpiochip_get_data
gpiochip_irq_relres
gpiochip_irq_reqres
gpiochip_lock_as_irq
gpiochip_unlock_as_irq
gpiod_cansleep
gpiod_count
gpiod_direction_input
gpiod_direction_output
gpiod_direction_output_raw
gpiod_get_optional
gpiod_get_raw_value
gpiod_get_raw_value_cansleep
gpiod_get_value
gpiod_get_value_cansleep
gpiod_set_consumer_name
gpiod_set_raw_value
gpiod_set_raw_value_cansleep
gpiod_set_value
gpiod_set_value_cansleep
gpiod_to_irq
gpio_free
gpio_request
gpio_to_desc
handle_edge_irq
handle_level_irq
handle_nested_irq
handle_simple_irq
handle_sysrq
hrtimer_cancel
hrtimer_init
hrtimer_start_range_ns
hrtimer_try_to_cancel
i2c_adapter_type
i2c_add_adapter
i2c_add_numbered_adapter
i2c_bus_type
i2c_del_adapter
i2c_del_driver
i2c_for_each_dev
i2c_generic_scl_recovery
i2c_get_adapter
i2c_get_dma_safe_msg_buf
i2c_match_id
i2c_new_ancillary_device
i2c_new_dummy_device
i2c_put_adapter
i2c_put_dma_safe_msg_buf
i2c_recover_bus
i2c_register_driver
i2c_smbus_read_byte
i2c_smbus_read_byte_data
i2c_smbus_read_i2c_block_data
i2c_smbus_write_byte
i2c_smbus_write_byte_data
i2c_smbus_xfer
i2c_transfer
i2c_transfer_buffer_flags
i2c_unregister_device
i2c_verify_client
i3c_device_do_priv_xfers
i3c_driver_register_with_owner
i3c_driver_unregister
ida_alloc_range
ida_destroy
ida_free
idr_alloc
idr_destroy
idr_find
idr_for_each
idr_get_next
idr_preload
idr_remove
ieee80211_bss_get_elem
ieee80211_channel_to_freq_khz
ieee80211_freq_khz_to_channel
ieee80211_get_channel_khz
ieee80211_hdrlen
iio_buffer_enabled
iio_buffer_init
iio_buffer_put
iio_device_attach_buffer
iio_device_claim_direct_mode
iio_device_release_direct_mode
iio_device_unregister
iio_push_event
iio_push_to_buffers
import_iovec
in4_pton
init_dummy_netdev
init_net
__init_swait_queue_head
init_timer_key
init_wait_entry
__init_waitqueue_head
input_alloc_absinfo
input_allocate_device
input_event
input_free_device
input_mt_init_slots
input_mt_report_slot_state
input_mt_sync_frame
input_register_device
input_set_abs_params
input_set_capability
input_unregister_device
int_sqrt
iomem_resource
iommu_alloc_resv_region
iommu_device_register
iommu_device_sysfs_add
iommu_device_sysfs_remove
iommu_device_unregister
iommu_dma_get_resv_regions
iommu_fwspec_add_ids
iommu_fwspec_free
iommu_get_domain_for_dev
iommu_group_ref_get
iommu_iova_to_phys
ioremap_prot
iounmap
__iowrite32_copy
__irq_alloc_descs
irq_chip_ack_parent
irq_create_mapping_affinity
irq_create_of_mapping
irq_dispose_mapping
__irq_domain_add
irq_domain_get_irq_data
irq_domain_remove
irq_domain_set_info
irq_domain_simple_ops
irq_domain_update_bus_token
irq_domain_xlate_onecell
irq_domain_xlate_twocell
irq_get_irq_data
irq_modify_status
irq_of_parse_and_map
__irq_resolve_mapping
irq_set_affinity
irq_set_chained_handler_and_data
irq_set_chip_and_handler_name
irq_set_chip_data
irq_set_irq_wake
irq_to_desc
jiffies
jiffies_to_msecs
jiffies_to_usecs
kasan_flag_enabled
kasprintf
__kfifo_alloc
__kfifo_free
__kfifo_from_user
__kfifo_in
__kfifo_init
__kfifo_out
__kfifo_to_user
kfree
kfree_skb_reason
kimage_voffset
__kmalloc
kmalloc_caches
kmalloc_large
kmalloc_trace
kmem_cache_alloc
kmem_cache_create
kmem_cache_destroy
kmem_cache_free
kmemdup
kobject_create_and_add
kobject_put
kobject_uevent_env
krealloc
kstrdup
kstrdup_const
kstrtoint
kstrtoint_from_user
kstrtoll
kstrtou16
kstrtouint
kstrtoull
kthread_bind
kthread_create_on_node
kthread_should_stop
kthread_stop
ktime_get
ktime_get_mono_fast_ns
ktime_get_raw_ts64
ktime_get_real_ts64
ktime_get_ts64
ktime_get_with_offset
kvfree
kvfree_call_rcu
kvmalloc_node
__list_add_valid
__list_del_entry_valid
__local_bh_enable_ip
log_post_read_mmio
log_post_write_mmio
log_read_mmio
log_write_mmio
match_string
mbox_chan_received_data
mbox_chan_txdone
mbox_free_channel
mbox_send_message
__mdiobus_read
mdiobus_read
mdiobus_unregister
__mdiobus_write
mdiobus_write
memchr_inv
memcmp
memcpy
__memcpy_fromio
__memcpy_toio
memdup_user
memmove
memory_read_from_buffer
memparse
memremap
mem_section
memset
__memset_io
memstart_addr
memunmap
mfd_add_devices
mfd_remove_devices
misc_deregister
misc_register
mmc_cqe_request_done
mmc_gpio_get_cd
mmc_gpio_get_ro
mmc_send_tuning
mod_delayed_work_on
mod_timer
__module_get
module_layout
module_put
__msecs_to_jiffies
msleep
msleep_interruptible
__mutex_init
mutex_is_locked
mutex_lock
mutex_lock_interruptible
mutex_trylock
mutex_unlock
napi_complete_done
napi_disable
napi_enable
napi_gro_receive
__napi_schedule
napi_schedule_prep
__ndelay
__netdev_alloc_skb
netdev_err
netdev_info
netdev_pick_tx
netdev_printk
netdev_warn
netif_carrier_off
netif_carrier_on
netif_device_attach
netif_device_detach
netif_napi_add_weight
__netif_napi_del
netif_receive_skb
netif_rx
netif_tx_lock
netif_tx_stop_all_queues
netif_tx_unlock
netif_tx_wake_queue
netlink_broadcast
__netlink_kernel_create
netlink_kernel_release
net_ratelimit
__nla_parse
nla_put
noop_llseek
nr_cpu_ids
nsecs_to_jiffies
ns_to_timespec64
__num_online_cpus
of_address_to_resource
of_alias_get_id
of_can_transceiver
of_clk_add_hw_provider
page_pool_destroy
page_pool_put_defragged_page
panic
panic_notifier_list
param_array_ops
param_ops_bool
param_ops_byte
param_ops_charp
param_ops_int
param_ops_uint
param_ops_ullong
param_ops_ulong
pci_bus_type
pci_device_group
pci_disable_device
pci_enable_device
pci_enable_msi
pci_enable_wake
pci_iomap
pci_iounmap
pci_read_config_dword
__pci_register_driver
pci_release_region
pci_request_region
pci_restore_state
pci_save_state
pci_set_master
pci_set_power_state
pci_unregister_driver
pci_write_config_dword
__per_cpu_offset
perf_pmu_migrate_context
perf_pmu_register
perf_pmu_unregister
perf_trace_buf_alloc
perf_trace_run_bpf_submit
pfn_is_map_memory
pid_task
pinctrl_dev_get_drvdata
pinctrl_enable
pinctrl_force_default
pinctrl_force_sleep
pinctrl_lookup_state
pinctrl_pm_select_default_state
pinctrl_pm_select_idle_state
pinctrl_pm_select_sleep_state
pinctrl_select_default_state
pinctrl_select_state
pin_get_name
pin_user_pages
platform_device_add
platform_device_add_data
platform_device_add_resources
platform_device_alloc
platform_device_put
platform_device_register
platform_device_register_full
platform_device_unregister
__platform_driver_probe
__platform_driver_register
platform_driver_unregister
platform_get_irq
platform_get_irq_byname
platform_get_irq_byname_optional
platform_get_irq_optional
platform_get_resource
platform_get_resource_byname
platform_irqchip_probe
platform_irq_count
pm_genpd_init
pm_genpd_remove
__pm_relax
pm_relax
pm_runtime_allow
__pm_runtime_disable
pm_runtime_enable
pm_runtime_forbid
pm_runtime_force_resume
pm_runtime_force_suspend
__pm_runtime_idle
pm_runtime_no_callbacks
__pm_runtime_resume
pm_runtime_set_autosuspend_delay
__pm_runtime_set_status
__pm_runtime_suspend
__pm_runtime_use_autosuspend
__pm_stay_awake
pm_system_wakeup
pm_wakeup_dev_event
pm_wakeup_ws_event
policy_has_boost_freq
power_supply_get_property
power_supply_put
power_supply_register
power_supply_reg_notifier
power_supply_set_property
power_supply_unregister
power_supply_unreg_notifier
preempt_schedule
preempt_schedule_notrace
prepare_to_wait_event
print_hex_dump
_printk
proc_create_data
proc_create_seq_private
proc_create_single_data
proc_mkdir
pskb_expand_head
ptp_classify_raw
ptp_clock_event
ptp_clock_index
ptp_clock_register
ptp_clock_unregister
ptp_parse_header
put_device
__put_task_struct
put_unused_fd
pwm_apply_state
queue_delayed_work_on
queue_work_on
radix_tree_insert
___ratelimit
rational_best_approximation
raw_notifier_call_chain
_raw_spin_lock
_raw_spin_lock_bh
_raw_spin_lock_irq
_raw_spin_lock_irqsave
_raw_spin_trylock
_raw_spin_unlock
_raw_spin_unlock_bh
_raw_spin_unlock_irq
_raw_spin_unlock_irqrestore
rb_erase
rb_insert_color
__rcu_read_lock
__rcu_read_unlock
rdev_get_drvdata
rdev_get_id
refcount_warn_saturate
regcache_cache_bypass
regcache_cache_only
regcache_mark_dirty
regcache_sync
regcache_sync_region
register_candev
__register_chrdev
register_chrdev_region
register_inet6addr_notifier
register_inetaddr_notifier
register_netdev
register_netdevice
register_pm_notifier
register_reboot_notifier
__register_rpmsg_driver
register_syscore_ops
register_virtio_device
register_virtio_driver
regmap_bulk_read
regmap_bulk_write
regmap_field_read
regmap_field_update_bits_base
regmap_get_device
regmap_irq_get_virq
regmap_raw_write
regmap_read
regmap_register_patch
regmap_update_bits_base
regmap_write
regulator_bulk_disable
regulator_bulk_enable
regulator_bulk_get
regulator_disable
regulator_disable_regmap
regulator_enable
regulator_enable_regmap
regulator_get_voltage
regulator_get_voltage_sel_regmap
regulator_is_enabled
regulator_is_enabled_regmap
regulator_list_voltage_linear
regulator_list_voltage_linear_range
regulator_set_current_limit
regulator_set_voltage
regulatory_hint
regulatory_set_wiphy_regd_sync
release_firmware
__release_region
remap_pfn_range
remove_cpu
remove_proc_entry
remove_wait_queue
report_iommu_fault
request_firmware
request_firmware_direct
request_firmware_nowait
__request_module
__request_percpu_irq
__request_region
request_threaded_irq
reset_control_assert
reset_control_deassert
reset_control_put
reset_control_reset
rpmsg_create_channel
rpmsg_create_ept
rpmsg_register_device
rpmsg_register_device_override
rpmsg_release_channel
rpmsg_send
rpmsg_unregister_device
rproc_add
rproc_alloc
rproc_coredump_add_segment
rproc_del
rproc_free
rproc_report_crash
rps_needed
rtc_time64_to_tm
rtc_tm_to_time64
rtc_update_irq
rtnl_is_locked
rtnl_lock
rtnl_unlock
sched_clock
schedule
schedule_hrtimeout
schedule_timeout
schedule_timeout_uninterruptible
scmi_driver_register
scmi_driver_unregister
scnprintf
sdhci_add_host
sdhci_cqe_disable
sdhci_cqe_enable
sdhci_cqe_irq
sdhci_pltfm_free
sdhci_pltfm_init
sdhci_remove_host
sdhci_reset
sdio_claim_host
sdio_claim_irq
sdio_disable_func
sdio_enable_func
sdio_readb
sdio_readsb
sdio_register_driver
sdio_release_host
sdio_release_irq
sdio_set_block_size
sdio_unregister_driver
sdio_writeb
sdio_writesb
seq_lseek
seq_open
__seq_open_private
seq_printf
seq_puts
seq_read
seq_release
seq_release_private
seq_vprintf
seq_write
set_freezable
set_user_nice
sg_alloc_table
sg_alloc_table_from_pages_segment
sg_free_table
sg_init_one
sg_init_table
sg_next
__sg_page_iter_next
virtqueue_detach_unused_buf
virtqueue_disable_cb
virtqueue_enable_cb
virtqueue_get_buf
virtqueue_get_vring_size
virtqueue_kick
virtqueue_kick_prepare
virtqueue_notify
vmalloc
vmalloc_to_page
vmap
vm_get_page_prot
vm_iomap_memory
vm_mmap
vm_munmap
vm_zone_stat
vprintk
vring_del_virtqueue
vring_interrupt
vring_new_virtqueue
vsnprintf
vunmap
vzalloc
wait_for_completion
wait_for_completion_interruptible
wait_for_completion_interruptible_timeout
wait_for_completion_timeout
wait_woken
__wake_up
wake_up_process
wakeup_source_add
wakeup_source_register
wakeup_source_remove
wakeup_source_unregister
__warn_printk
watchdog_init_timeout
watchdog_set_restart_priority
wiphy_apply_custom_regulatory
wiphy_free
wiphy_new_nm
wiphy_register
wiphy_unregister
wireless_send_event
woken_wake_function
xdp_convert_zc_to_xdp_frame
xdp_do_flush
xdp_do_redirect
xdp_master_redirect
xdp_rxq_info_is_reg
__xdp_rxq_info_reg
xdp_rxq_info_reg_mem_model
xdp_rxq_info_unreg
xdp_warn

2368
android/abi_gki_aarch64_mtk Normal file

File diff suppressed because it is too large Load Diff

View File

@ -990,6 +990,7 @@
kthread_bind_mask
kthread_cancel_delayed_work_sync
kthread_complete_and_exit
kthread_create_on_cpu
kthread_create_on_node
kthread_create_worker
kthread_delayed_work_timer_fn
@ -1304,6 +1305,7 @@
pm_runtime_forbid
pm_runtime_force_resume
pm_runtime_force_suspend
pm_runtime_get_if_active
__pm_runtime_idle
pm_runtime_irq_safe
__pm_runtime_resume
@ -1682,6 +1684,7 @@
strspn
strstr
subsys_system_register
suspend_set_ops
__sw_hweight32
__sw_hweight64
sync_blockdev
@ -1727,6 +1730,8 @@
tcpm_pd_hard_reset
tcpm_pd_receive
tcpm_pd_transmit_complete
tcpm_port_clean
tcpm_port_is_toggling
tcpm_sink_frs
tcpm_sourcing_vbus
tcpm_vbus_change
@ -1801,6 +1806,8 @@
tty_insert_flip_string_fixed_flag
tty_kref_put
tty_port_tty_get
typec_mux_get_drvdata
typec_mux_register
typec_switch_get_drvdata
typec_switch_register
typec_switch_unregister
@ -1881,6 +1888,7 @@
usb_role_switch_unregister
usb_speed_string
usb_string_id
usb_udc_vbus_handler
usb_unregister_notify
__usecs_to_jiffies
usleep_range_state
@ -1967,6 +1975,7 @@
vmap
vmf_insert_pfn_prot
vprintk
vprintk_emit
vring_del_virtqueue
vring_interrupt
vring_new_virtqueue

View File

@ -1843,6 +1843,8 @@
irq_set_chained_handler_and_data
irq_set_chip_and_handler_name
irq_set_chip_data
__irq_set_handler
irq_set_handler_data
irq_set_irqchip_state
irq_set_irq_type
irq_set_irq_wake
@ -2105,6 +2107,7 @@
mempool_free
mempool_free_slab
memremap
memremap_pages
memscan
mem_section
memset64
@ -2112,6 +2115,7 @@
__memset_io
memstart_addr
memunmap
memunmap_pages
migrate_pages
migrate_swap
__migrate_task
@ -2336,6 +2340,7 @@
of_graph_parse_endpoint
of_hwspin_lock_get_id
of_icc_get
of_icc_get_from_provider
of_icc_xlate_onecell
of_iomap
of_irq_find_parent
@ -2345,7 +2350,6 @@
of_machine_is_compatible
of_match_device
of_match_node
of_mdiobus_register
of_modalias_node
of_n_addr_cells
of_node_name_eq
@ -3169,6 +3173,8 @@
smp_call_function
smp_call_function_single
smp_call_function_single_async
snd_ctl_remove
snd_hwdep_new
snd_info_create_card_entry
snd_info_create_module_entry
snd_info_free_entry
@ -3177,6 +3183,9 @@
snd_jack_set_key
snd_pcm_format_width
_snd_pcm_hw_params_any
snd_pcm_set_managed_buffer
snd_pcm_std_chmaps
snd_pcm_stop
snd_soc_add_component_controls
snd_soc_card_get_kcontrol
snd_soc_card_jack_new
@ -3241,6 +3250,7 @@
sock_edemux
sock_efree
sockfd_lookup
sock_gen_put
sock_gettstamp
sock_i_ino
sock_init_data
@ -3406,6 +3416,7 @@
tasklist_lock
__task_pid_nr_ns
__task_rq_lock
task_rq_lock
tcp_hashinfo
thermal_cdev_update
thermal_cooling_device_register
@ -3977,6 +3988,7 @@
v4l2_fh_del
v4l2_fh_exit
v4l2_fh_init
v4l2_fh_is_singular
v4l2_fh_open
v4l2_fh_release
v4l2_m2m_ctx_init
@ -4079,9 +4091,11 @@
vm_insert_page
vm_iomap_memory
vm_map_pages
vm_map_ram
vm_mmap
vm_munmap
vm_node_stat
vm_unmap_ram
vm_zone_stat
vprintk
vring_create_virtqueue

View File

@ -0,0 +1,131 @@
[abi_symbol_list]
proc_mkdir_data
proc_create_seq_private
i2c_smbus_read_byte_data
i2c_smbus_write_byte_data
blk_execute_rq
blk_rq_map_kern
scsi_device_lookup
scsi_host_lookup
scsi_host_put
ufshcd_read_desc_param
utf16s_to_utf8s
async_schedule_node
blk_mq_alloc_tag_set
blk_mq_init_queue
blk_mq_tagset_busy_iter
bsg_job_done
bsg_remove_queue
bsg_setup_queue
dev_pm_opp_remove
scsi_add_host_with_dma
scsi_block_requests
scsi_dma_unmap
scsi_is_host_device
scsi_remove_host
scsi_report_bus_reset
scsi_scan_host
scsi_unblock_requests
scsi_change_queue_depth
scsi_print_command
scsi_dma_map
scsi_host_alloc
scsi_normalize_sense
sg_copy_from_buffer
sg_copy_to_buffer
ufshcd_alloc_host
ufshcd_config_pwr_mode
ufshcd_dealloc_host
ufshcd_hba_enable
ufshcd_make_hba_operational
ufshcd_query_attr_retry
ufshcd_query_flag_retry
ufshcd_update_evt_hist
wait_for_completion_io_timeout
__scsi_add_device
blk_mq_free_tag_set
blk_queue_update_dma_alignment
blk_queue_update_dma_pad
mempool_resize
mempool_alloc_pages
mempool_free_pages
regmap_raw_write_async
snd_soc_bytes_tlv_callback
regmap_async_complete
snd_compr_stop_error
snd_soc_component_disable_pin
snd_soc_component_force_enable_pin
snd_pcm_format_physical_width
snd_pcm_hw_constraint_list
regmap_multi_reg_write_bypassed
snd_ctl_boolean_mono_info
snd_soc_put_volsw_range
snd_soc_get_volsw_range
snd_soc_info_volsw_range
regmap_raw_write
regcache_drop_region
regmap_raw_read
regmap_multi_reg_write
regulator_bulk_enable
__blk_mq_end_request
balance_dirty_pages_ratelimited
bdi_alloc
bdi_put
bdi_register
blk_mq_freeze_queue
blk_mq_quiesce_queue
blk_mq_start_request
blk_mq_unfreeze_queue
blk_mq_unquiesce_queue
blk_queue_write_cache
blk_update_request
blkdev_get_by_dev
blkdev_get_by_path
blkdev_put
deactivate_locked_super
fixed_size_llseek
generic_shutdown_super
kmsg_dump_get_buffer
kmsg_dump_register
kmsg_dump_rewind
kmsg_dump_unregister
ktime_get_coarse_real_ts64
lockref_get
logfc
lookup_bdev
name_to_dev_t
nvmem_register
nvmem_unregister
proc_create_single_data
read_cache_page
set_disk_ro
set_page_dirty
sget_fc
simple_strtoul
sync_blockdev
wait_for_device_probe
blk_mq_alloc_sq_tag_set
__traceiter_android_vh_binder_wait_for_work
__tracepoint_android_vh_binder_wait_for_work
__traceiter_android_vh_free_task
__tracepoint_android_vh_free_task
jiffies_64
__traceiter_android_rvh_after_enqueue_task
__traceiter_android_rvh_after_dequeue_task
__tracepoint_android_rvh_after_enqueue_task
__tracepoint_android_rvh_after_dequeue_task
__traceiter_android_rvh_check_preempt_tick
__traceiter_android_rvh_dequeue_entity
__traceiter_android_rvh_enqueue_entity
__tracepoint_android_rvh_check_preempt_tick
__tracepoint_android_rvh_dequeue_entity
__tracepoint_android_rvh_enqueue_entity
console_printk
__traceiter_android_vh_binder_transaction_init
__tracepoint_android_vh_binder_transaction_init
drm_get_connector_type_name
gpio_request_array
wakeup_sources_read_lock
wakeup_sources_read_unlock
wakeup_sources_walk_start
wakeup_sources_walk_next

View File

@ -1,14 +1,9 @@
__cfg80211_alloc_event_skb
__cfg80211_alloc_reply_skb
__cfg80211_radar_event
__cfg80211_send_event_skb
__hci_cmd_send
__hci_cmd_sync
__hci_cmd_sync_ev
__hci_cmd_sync_sk
__hci_cmd_sync_status
__hci_cmd_sync_status_sk
__ieee80211_schedule_txq
__nfc_alloc_vendor_cmd_reply_skb
alloc_can_err_skb
alloc_can_skb
@ -18,7 +13,6 @@ alloc_canxl_skb
arc4_crypt
arc4_setkey
baswap
bridge_tunnel_header
bt_accept_dequeue
bt_accept_enqueue
bt_accept_unlink
@ -83,105 +77,8 @@ can_rx_unregister
can_send
can_skb_get_frame_len
can_sock_destruct
cfg80211_any_usable_channels
cfg80211_assoc_comeback
cfg80211_assoc_failure
cfg80211_auth_timeout
cfg80211_background_cac_abort
cfg80211_bss_color_notify
cfg80211_bss_flush
cfg80211_bss_iter
cfg80211_cac_event
cfg80211_calculate_bitrate
cfg80211_ch_switch_notify
cfg80211_ch_switch_started_notify
cfg80211_chandef_compatible
cfg80211_chandef_create
cfg80211_chandef_dfs_required
cfg80211_chandef_usable
cfg80211_chandef_valid
cfg80211_check_combinations
cfg80211_check_station_change
cfg80211_classify8021d
cfg80211_conn_failed
cfg80211_connect_done
cfg80211_control_port_tx_status
cfg80211_cqm_beacon_loss_notify
cfg80211_cqm_pktloss_notify
cfg80211_cqm_rssi_notify
cfg80211_cqm_txe_notify
cfg80211_crit_proto_stopped
cfg80211_del_sta_sinfo
cfg80211_disconnected
cfg80211_external_auth_request
cfg80211_find_elem_match
cfg80211_find_vendor_elem
cfg80211_free_nan_func
cfg80211_ft_event
cfg80211_get_bss
cfg80211_get_drvinfo
cfg80211_get_ies_channel_number
cfg80211_get_iftype_ext_capa
cfg80211_get_p2p_attr
cfg80211_get_station
cfg80211_gtk_rekey_notify
cfg80211_ibss_joined
cfg80211_iftype_allowed
cfg80211_inform_bss_data
cfg80211_inform_bss_frame_data
cfg80211_is_element_inherited
cfg80211_iter_combinations
cfg80211_merge_profile
cfg80211_mgmt_tx_status_ext
cfg80211_michael_mic_failure
cfg80211_nan_func_terminated
cfg80211_nan_match
cfg80211_new_sta
cfg80211_notify_new_peer_candidate
cfg80211_pmksa_candidate_notify
cfg80211_pmsr_complete
cfg80211_pmsr_report
cfg80211_port_authorized
cfg80211_probe_status
cfg80211_put_bss
cfg80211_ready_on_channel
cfg80211_ref_bss
cfg80211_reg_can_beacon
cfg80211_reg_can_beacon_relax
cfg80211_register_netdevice
cfg80211_remain_on_channel_expired
cfg80211_report_obss_beacon_khz
cfg80211_report_wowlan_wakeup
cfg80211_roamed
cfg80211_rx_assoc_resp
cfg80211_rx_control_port
cfg80211_rx_mgmt_ext
cfg80211_rx_mlme_mgmt
cfg80211_rx_spurious_frame
cfg80211_rx_unexpected_4addr_frame
cfg80211_rx_unprot_mlme_mgmt
cfg80211_scan_done
cfg80211_sched_scan_results
cfg80211_sched_scan_stopped
cfg80211_sched_scan_stopped_locked
cfg80211_send_layer2_update
cfg80211_shutdown_all_interfaces
cfg80211_sinfo_alloc_tid_stats
cfg80211_sta_opmode_change_notify
cfg80211_stop_iface
cfg80211_tdls_oper_request
cfg80211_tx_mgmt_expired
cfg80211_tx_mlme_mgmt
cfg80211_unlink_bss
cfg80211_unregister_wdev
cfg80211_update_owe_info_event
cfg80211_valid_disable_subchannel_bitmap
cfg80211_vendor_cmd_get_sender
cfg80211_vendor_cmd_reply
close_candev
free_candev
freq_reg_info
get_wiphy_regdom
h4_recv_buf
hci_alloc_dev_priv
hci_cmd_sync
@ -210,151 +107,6 @@ hci_uart_unregister_device
hci_unregister_cb
hci_unregister_dev
hidp_hid_driver
ieee80211_alloc_hw_nm
ieee80211_amsdu_to_8023s
ieee80211_ap_probereq_get
ieee80211_ave_rssi
ieee80211_beacon_cntdwn_is_complete
ieee80211_beacon_get_template
ieee80211_beacon_get_tim
ieee80211_beacon_loss
ieee80211_beacon_set_cntdwn
ieee80211_beacon_update_cntdwn
ieee80211_bss_get_elem
ieee80211_calc_rx_airtime
ieee80211_calc_tx_airtime
ieee80211_chandef_to_operating_class
ieee80211_channel_switch_disconnect
ieee80211_channel_to_freq_khz
ieee80211_chswitch_done
ieee80211_color_change_finish
ieee80211_connection_loss
ieee80211_cqm_beacon_loss_notify
ieee80211_cqm_rssi_notify
ieee80211_csa_finish
ieee80211_ctstoself_duration
ieee80211_ctstoself_get
ieee80211_data_to_8023_exthdr
ieee80211_disable_rssi_reports
ieee80211_disconnect
ieee80211_enable_rssi_reports
ieee80211_find_sta
ieee80211_find_sta_by_ifaddr
ieee80211_find_sta_by_link_addrs
ieee80211_free_hw
ieee80211_free_txskb
ieee80211_freq_khz_to_channel
ieee80211_generic_frame_duration
ieee80211_get_bssid
ieee80211_get_buffered_bc
ieee80211_get_channel_khz
ieee80211_get_fils_discovery_tmpl
ieee80211_get_hdrlen_from_skb
ieee80211_get_key_rx_seq
ieee80211_get_mesh_hdrlen
ieee80211_get_num_supported_channels
ieee80211_get_response_rate
ieee80211_get_tkip_p1k_iv
ieee80211_get_tkip_p2k
ieee80211_get_tkip_rx_p1k
ieee80211_get_tx_rates
ieee80211_get_unsol_bcast_probe_resp_tmpl
ieee80211_get_vht_max_nss
ieee80211_gtk_rekey_add
ieee80211_gtk_rekey_notify
ieee80211_hdrlen
ieee80211_hw_restart_disconnect
ieee80211_ie_split_ric
ieee80211_iter_chan_contexts_atomic
ieee80211_iter_keys
ieee80211_iter_keys_rcu
ieee80211_iterate_active_interfaces_atomic
ieee80211_iterate_active_interfaces_mtx
ieee80211_iterate_interfaces
ieee80211_iterate_stations
ieee80211_iterate_stations_atomic
ieee80211_key_mic_failure
ieee80211_key_replay
ieee80211_manage_rx_ba_offl
ieee80211_mandatory_rates
ieee80211_mark_rx_ba_filtered_frames
ieee80211_nan_func_match
ieee80211_nan_func_terminated
ieee80211_next_txq
ieee80211_nullfunc_get
ieee80211_operating_class_to_band
ieee80211_parse_p2p_noa
ieee80211_probereq_get
ieee80211_proberesp_get
ieee80211_pspoll_get
ieee80211_queue_delayed_work
ieee80211_queue_stopped
ieee80211_queue_work
ieee80211_radar_detected
ieee80211_radiotap_iterator_init
ieee80211_radiotap_iterator_next
ieee80211_rate_control_register
ieee80211_rate_control_unregister
ieee80211_ready_on_channel
ieee80211_register_hw
ieee80211_remain_on_channel_expired
ieee80211_remove_key
ieee80211_report_low_ack
ieee80211_report_wowlan_wakeup
ieee80211_request_smps
ieee80211_reserve_tid
ieee80211_restart_hw
ieee80211_resume_disconnect
ieee80211_rts_duration
ieee80211_rts_get
ieee80211_rx_ba_timer_expired
ieee80211_rx_irqsafe
ieee80211_rx_list
ieee80211_rx_napi
ieee80211_s1g_channel_width
ieee80211_scan_completed
ieee80211_sched_scan_results
ieee80211_sched_scan_stopped
ieee80211_send_bar
ieee80211_send_eosp_nullfunc
ieee80211_set_active_links
ieee80211_set_active_links_async
ieee80211_set_key_rx_seq
ieee80211_sta_block_awake
ieee80211_sta_eosp
ieee80211_sta_ps_transition
ieee80211_sta_pspoll
ieee80211_sta_recalc_aggregates
ieee80211_sta_register_airtime
ieee80211_sta_set_buffered
ieee80211_sta_uapsd_trigger
ieee80211_start_tx_ba_cb_irqsafe
ieee80211_start_tx_ba_session
ieee80211_stop_queue
ieee80211_stop_queues
ieee80211_stop_rx_ba_session
ieee80211_stop_tx_ba_cb_irqsafe
ieee80211_stop_tx_ba_session
ieee80211_tdls_oper_request
ieee80211_tkip_add_iv
ieee80211_tx_dequeue
ieee80211_tx_prepare_skb
ieee80211_tx_rate_update
ieee80211_tx_status
ieee80211_tx_status_8023
ieee80211_tx_status_ext
ieee80211_tx_status_irqsafe
ieee80211_txq_airtime_check
ieee80211_txq_get_depth
ieee80211_txq_may_transmit
ieee80211_txq_schedule_start
ieee80211_unregister_hw
ieee80211_unreserve_tid
ieee80211_update_mu_groups
ieee80211_update_p2p_noa
ieee80211_vif_to_wdev
ieee80211_wake_queue
ieee80211_wake_queues
ieee802154_alloc_hw
ieee802154_configure_durations
ieee802154_free_hw
@ -371,7 +123,6 @@ ieee802154_wake_queue
ieee802154_xmit_complete
ieee802154_xmit_error
ieee802154_xmit_hw_error
ieeee80211_obss_color_collision_notify
l2cap_add_psm
l2cap_chan_close
l2cap_chan_connect
@ -460,16 +211,8 @@ qca_send_pre_shutdown_cmd
qca_set_bdaddr
qca_set_bdaddr_rome
qca_uart_setup
rate_control_set_rates
reg_initiator_name
reg_query_regdb_wmm
register_candev
register_pppox_proto
regulatory_hint
regulatory_pre_cac_allowed
regulatory_set_wiphy_regd
regulatory_set_wiphy_regd_sync
rfc1042_header
rfkill_alloc
rfkill_blocked
rfkill_destroy
@ -520,17 +263,6 @@ usb_serial_port_softint
usb_serial_register_drivers
usb_serial_resume
usb_serial_suspend
wdev_chandef
wdev_to_ieee80211_vif
wiphy_apply_custom_regulatory
wiphy_free
wiphy_new_nm
wiphy_read_of_freq_limits
wiphy_register
wiphy_rfkill_set_hw_state_reason
wiphy_rfkill_start_polling
wiphy_to_ieee80211_hw
wiphy_unregister
wpan_phy_find
wpan_phy_for_each
wpan_phy_free

View File

@ -37,11 +37,9 @@ net/ieee802154/ieee802154.ko
net/ieee802154/ieee802154_socket.ko
net/l2tp/l2tp_core.ko
net/l2tp/l2tp_ppp.ko
net/mac80211/mac80211.ko
net/mac802154/mac802154.ko
net/nfc/nfc.ko
net/rfkill/rfkill.ko
net/tipc/diag.ko
net/tipc/tipc.ko
net/wireless/cfg80211.ko

View File

@ -244,7 +244,7 @@ led-pcieslot-power {
};
};
iio-hwmon-battery {
iio-hwmon {
compatible = "iio-hwmon";
io-channels = <&adc1 7>;
};

View File

@ -220,7 +220,7 @@ event-fan5-presence {
};
};
iio-hwmon-battery {
iio-hwmon {
compatible = "iio-hwmon";
io-channels = <&adc1 7>;
};

View File

@ -1136,6 +1136,23 @@ config SOCIONEXT_SYNQUACER_PREITS
If unsure, say Y.
config ANDROID_ARM64_WORKAROUND_DMA_BEYOND_POC
bool "Remove cacheable aliases of non-cacheable DMA buffers at stage-2"
default y
depends on KVM
help
Some SoCs integrate non-coherent DMA-capable peripherals beyond
the Point of Coherency (PoC), resulting in loss of coherency
with non-cacheable mappings on the CPU in the presence of a
cacheable alias.
This workaround provides a mechanism (controlled by the kernel
command-line) to remap pages as non-cacheable in pKVM's stage-2
mapping for the host, thereby removing any cacheable aliases
that may be present in the stage-1 mapping.
If unsure, say Y.
endmenu # "ARM errata workarounds via the alternatives framework"
choice

View File

@ -77,6 +77,7 @@ CONFIG_ARM_SCPI_CPUFREQ=y
CONFIG_ARM_SCMI_CPUFREQ=y
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=y
CONFIG_PKVM_MODULE_PATH="/lib/modules/"
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_SHADOW_CALL_STACK=y
@ -111,6 +112,7 @@ CONFIG_CMA=y
CONFIG_CMA_DEBUGFS=y
CONFIG_CMA_AREAS=16
# CONFIG_ZONE_DMA is not set
CONFIG_ZONE_DEVICE=y
CONFIG_ANON_VMA_NAME=y
CONFIG_USERFAULTFD=y
CONFIG_LRU_GEN=y
@ -477,6 +479,7 @@ CONFIG_USB_HIDDEV=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_OTG=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_PCI_RENESAS=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_ROOT_HUB_TT=y
CONFIG_USB_EHCI_HCD_PLATFORM=y

View File

@ -0,0 +1,53 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2023 - Google LLC
* Author: Will Deacon <willdeacon@google.com>
*/
#ifndef _ASM_ARM64_ANDROID_ERRATUM_PGTABLE_H
#define _ASM_ARM64_ANDROID_ERRATUM_PGTABLE_H
#ifndef __ASM_PGTABLE_H
#error "Please don't include this header directly."
#endif
#ifdef CONFIG_ANDROID_ARM64_WORKAROUND_DMA_BEYOND_POC
extern void pkvm_host_set_stage2_memattr(phys_addr_t addr, bool force_nc);
extern __init int pkvm_register_early_nc_mappings(void);
DECLARE_STATIC_KEY_FALSE(pkvm_force_nc);
static inline bool prot_needs_stage2_update(pgprot_t prot)
{
pteval_t val = pgprot_val(prot);
if (!static_branch_unlikely(&pkvm_force_nc))
return 0;
return (val & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_NC);
}
static inline void arm64_update_cacheable_aliases(pte_t *ptep, pte_t pte)
{
pte_t old_pte = READ_ONCE(*ptep);
bool force_nc;
if (!static_branch_unlikely(&pkvm_force_nc))
return;
if (pte_valid(old_pte) == pte_valid(pte))
return;
if (!pte_valid(pte)) {
force_nc = false;
pte = old_pte;
} else {
force_nc = true;
}
if ((pte_val(pte) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_NC))
pkvm_host_set_stage2_memattr(__pte_to_phys(pte), force_nc);
}
#else
static inline void arm64_update_cacheable_aliases(pte_t *ptep, pte_t pte) { }
static inline bool prot_needs_stage2_update(pgprot_t prot) { return false; }
#endif /* CONFIG_ANDROID_ARM64_WORKAROUND_DMA_BEYOND_POC */
#endif /* _ASM_ARM64_ANDROID_ERRATUM_PGTABLE_H */

View File

@ -103,6 +103,9 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___pkvm_rb_swap_reader_page,
__KVM_HOST_SMCCC_FUNC___pkvm_rb_update_footers,
__KVM_HOST_SMCCC_FUNC___pkvm_enable_event,
#ifdef CONFIG_ANDROID_ARM64_WORKAROUND_DMA_BEYOND_POC
__KVM_HOST_SMCCC_FUNC___pkvm_host_set_stage2_memattr,
#endif
/*
* Start of the dynamically registered hypercalls. Start a bit

View File

@ -320,6 +320,8 @@ static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
__func__, pte_val(old_pte), pte_val(pte));
}
#include <asm/android_erratum_pgtable.h>
static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
@ -348,6 +350,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
__check_racy_pte_update(mm, ptep, pte);
arm64_update_cacheable_aliases(ptep, pte);
set_pte(ptep, pte);
}
@ -531,6 +534,7 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
WARN_ON(prot_needs_stage2_update(__pgprot(pmd_val(pmd))));
page_table_check_pmd_set(mm, addr, pmdp, pmd);
return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd));
}
@ -538,6 +542,7 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
pud_t *pudp, pud_t pud)
{
WARN_ON(prot_needs_stage2_update(__pgprot(pud_val(pud))));
page_table_check_pud_set(mm, addr, pudp, pud);
return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud));
}
@ -933,7 +938,10 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
{
pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0));
pte_t pte;
arm64_update_cacheable_aliases(ptep, __pte(0));
pte = __pte(xchg_relaxed(&pte_val(*ptep), 0));
page_table_check_pte_clear(mm, address, pte);

View File

@ -9,6 +9,9 @@
#define arch_vmap_pud_supported arch_vmap_pud_supported
static inline bool arch_vmap_pud_supported(pgprot_t prot)
{
if (prot_needs_stage2_update(prot))
return false;
/*
* SW table walks can't handle removal of intermediate entries.
*/
@ -19,6 +22,9 @@ static inline bool arch_vmap_pud_supported(pgprot_t prot)
#define arch_vmap_pmd_supported arch_vmap_pmd_supported
static inline bool arch_vmap_pmd_supported(pgprot_t prot)
{
if (prot_needs_stage2_update(prot))
return false;
/* See arch_vmap_pud_supported() */
return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
}

View File

@ -66,7 +66,7 @@
.long .Lefi_header_end - .L_head // SizeOfHeaders
.long 0 // CheckSum
.short IMAGE_SUBSYSTEM_EFI_APPLICATION // Subsystem
.short 0 // DllCharacteristics
.short IMAGE_DLL_CHARACTERISTICS_NX_COMPAT // DllCharacteristics
.quad 0 // SizeOfStackReserve
.quad 0 // SizeOfStackCommit
.quad 0 // SizeOfHeapReserve

View File

@ -121,6 +121,10 @@ KVM_NVHE_ALIAS(__hyp_event_ids_end);
/* pKVM static key */
KVM_NVHE_ALIAS(kvm_protected_mode_initialized);
#ifdef CONFIG_ANDROID_ARM64_WORKAROUND_DMA_BEYOND_POC
KVM_NVHE_ALIAS(pkvm_force_nc);
#endif
#endif /* CONFIG_KVM */
#endif /* __ARM64_KERNEL_IMAGE_VARS_H */

View File

@ -69,4 +69,11 @@ config PROTECTED_NVHE_STACKTRACE
If unsure, or not using protected nVHE (pKVM), say N.
config PKVM_MODULE_PATH
string "Path to pKVM modules"
default ""
help
Directory where the pKVM modules are found. If empty, the modules
will be searched into the default path /lib/modules/<uname>.
endif # VIRTUALIZATION

View File

@ -23,6 +23,7 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
vgic/vgic-its.o vgic/vgic-debug.o
kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o pmu.o
kvm-$(CONFIG_ANDROID_ARM64_WORKAROUND_DMA_BEYOND_POC) += android_erratum_pgtable.o
kvm-$(CONFIG_TRACING) += hyp_events.o hyp_trace.o

View File

@ -0,0 +1,108 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2023 - Google LLC
* Author: Will Deacon <willdeacon@google.com>
*/
#include <asm/kvm_host.h>
#include <asm/pgtable.h>
#include <linux/init.h>
#include <linux/jump_label.h>
#include <linux/memblock.h>
DEFINE_STATIC_KEY_FALSE(pkvm_force_nc);
static int __init early_pkvm_force_nc_cfg(char *arg)
{
static_branch_enable(&pkvm_force_nc);
return 0;
}
early_param("kvm-arm.force_nc", early_pkvm_force_nc_cfg);
/*
* Update the stage-2 memory attributes (cacheability) for a page, usually
* in response to mapping or unmapping a normal non-cacheable region at stage-1.
*
* If 'force_nc' is set, the stage-2 entry is immediately made non-cacheable
* (and cleaned+invalidated to the PoC) otherwise the entry is unmapped and the
* cacheability determined based on the stage-1 attribute of the next access
* (with no cache maintenance being performed).
*/
struct pkvm_host_nc_region {
phys_addr_t start;
phys_addr_t end;
};
#define PKVM_HOST_MAX_EARLY_NC_REGIONS 8
static struct pkvm_host_nc_region
pkvm_host_early_nc_regions[PKVM_HOST_MAX_EARLY_NC_REGIONS];
static void pkvm_host_track_early_nc_mapping(phys_addr_t addr)
{
static int idx /*= 0*/;
struct pkvm_host_nc_region *reg = &pkvm_host_early_nc_regions[idx];
if (reg->start == reg->end) {
reg->start = addr;
} else if (reg->end != addr) {
if (WARN_ON(idx == PKVM_HOST_MAX_EARLY_NC_REGIONS - 1))
return;
reg = &pkvm_host_early_nc_regions[++idx];
reg->start = addr;
}
reg->end = addr + PAGE_SIZE;
}
void pkvm_host_set_stage2_memattr(phys_addr_t addr, bool force_nc)
{
int err;
if (kvm_get_mode() != KVM_MODE_PROTECTED)
return;
/*
* Non-memory regions or carveouts marked as "no-map" are handled
* entirely by their corresponding driver, which should avoid the
* creation of a cacheable alias in the first place.
*/
if (!memblock_is_map_memory(addr))
return;
if (!is_pkvm_initialized()) {
if (!WARN_ON_ONCE(!force_nc))
pkvm_host_track_early_nc_mapping(addr);
return;
}
err = kvm_call_hyp_nvhe(__pkvm_host_set_stage2_memattr, addr, force_nc);
WARN_ON(err && err != -EAGAIN);
}
EXPORT_SYMBOL_GPL(pkvm_host_set_stage2_memattr);
int __init pkvm_register_early_nc_mappings(void)
{
int i;
if (!is_pkvm_initialized())
return 0;
for (i = 0; i < PKVM_HOST_MAX_EARLY_NC_REGIONS; ++i) {
struct pkvm_host_nc_region *reg = &pkvm_host_early_nc_regions[i];
if (reg->start == reg->end)
return 0;
while (reg->start != reg->end) {
int err;
err = kvm_call_hyp_nvhe(__pkvm_host_set_stage2_memattr, reg->start, true);
if (err)
return err;
reg->start += PAGE_SIZE;
}
}
return 0;
}

View File

@ -16,6 +16,7 @@ extern const struct pkvm_module_ops module_ops;
int hyp_create_pcpu_fixmap(void);
void *hyp_fixmap_map(phys_addr_t phys);
void *hyp_fixmap_map_nc(phys_addr_t phys);
void hyp_fixmap_unmap(void);
void hyp_poison_page(phys_addr_t phys);

View File

@ -152,6 +152,12 @@ static inline bool pkvm_ipa_range_has_pvmfw(struct pkvm_hyp_vm *vm,
return ipa_end > pkvm->pvmfw_load_addr && ipa_start < pvmfw_load_end;
}
static inline void pkvm_set_max_sve_vq(void)
{
sve_cond_update_zcr_vq(sve_vq_from_vl(kvm_host_sve_max_vl) - 1,
SYS_ZCR_EL2);
}
int pkvm_load_pvmfw_pages(struct pkvm_hyp_vm *vm, u64 ipa, phys_addr_t phys,
u64 size);
void pkvm_poison_pvmfw_pages(void);

View File

@ -688,10 +688,9 @@ static void fpsimd_host_restore(void)
if (system_supports_sve()) {
struct kvm_host_sve_state *sve_state = get_host_sve_state(vcpu);
u64 vq_len = sve_vq_from_vl(kvm_host_sve_max_vl) - 1;
write_sysreg_el1(sve_state->zcr_el1, SYS_ZCR);
sve_cond_update_zcr_vq(vq_len, SYS_ZCR_EL2);
pkvm_set_max_sve_vq();
__sve_restore_state(sve_state->sve_regs +
sve_ffr_offset(kvm_host_sve_max_vl),
&sve_state->fpsr);
@ -1264,6 +1263,17 @@ static void handle___pkvm_enable_event(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __pkvm_enable_event(id, enable);
}
#ifdef CONFIG_ANDROID_ARM64_WORKAROUND_DMA_BEYOND_POC
extern int __pkvm_host_set_stage2_memattr(phys_addr_t phys, bool force_nc);
static void handle___pkvm_host_set_stage2_memattr(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(phys_addr_t, phys, host_ctxt, 1);
DECLARE_REG(bool, force_nc, host_ctxt, 2);
cpu_reg(host_ctxt, 1) = __pkvm_host_set_stage2_memattr(phys, force_nc);
}
#endif
typedef void (*hcall_t)(struct kvm_cpu_context *);
#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
@ -1316,6 +1326,9 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__pkvm_rb_swap_reader_page),
HANDLE_FUNC(__pkvm_rb_update_footers),
HANDLE_FUNC(__pkvm_enable_event),
#ifdef CONFIG_ANDROID_ARM64_WORKAROUND_DMA_BEYOND_POC
HANDLE_FUNC(__pkvm_host_set_stage2_memattr),
#endif
};
unsigned long pkvm_priv_hcall_limit __ro_after_init = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;
@ -1328,7 +1341,7 @@ int reset_pkvm_priv_hcall_limit(void)
return -EACCES;
addr = hyp_fixmap_map(__hyp_pa(&pkvm_priv_hcall_limit));
*addr = KVM_HOST_SMCCC_FUNC(__pkvm_prot_finalize);
*addr = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;
hyp_fixmap_unmap();
return 0;

View File

@ -14,6 +14,7 @@
#include <hyp/adjust_pc.h>
#include <nvhe/iommu.h>
#include <nvhe/mm.h>
#include <nvhe/modules.h>
#include <nvhe/pkvm.h>
#define DRV_ID(drv_addr) ((unsigned long)drv_addr)
@ -469,6 +470,8 @@ int __pkvm_iommu_finalize(int err)
if (!ret && err)
pkvm_handle_system_misconfiguration(NO_DMA_ISOLATION);
__pkvm_close_late_module_registration();
return ret;
}

View File

@ -539,23 +539,10 @@ static inline bool range_included(struct kvm_mem_range *child,
return parent->start <= child->start && child->end <= parent->end;
}
static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range,
u32 level)
{
struct kvm_mem_range cur;
kvm_pte_t pte;
u32 level;
int ret;
hyp_assert_lock_held(&host_mmu.lock);
ret = kvm_pgtable_get_leaf(&host_mmu.pgt, addr, &pte, &level);
if (ret)
return ret;
if (kvm_pte_valid(pte))
return -EAGAIN;
if (pte)
return -EPERM;
do {
u64 granule = kvm_granule_size(level);
@ -641,15 +628,141 @@ static bool host_stage2_pte_is_counted(kvm_pte_t pte, u32 level)
return (pte & KVM_HOST_S2_DEFAULT_MASK) != KVM_HOST_S2_DEFAULT_MMIO_PTE;
}
static int host_stage2_idmap(u64 addr)
#define DEFERRED_MEMATTR_NOTE (1ULL << 24)
#ifdef CONFIG_ANDROID_ARM64_WORKAROUND_DMA_BEYOND_POC
static enum pkvm_page_state host_get_page_state(kvm_pte_t pte, u64 addr);
int __pkvm_host_set_stage2_memattr(phys_addr_t phys, bool force_nc)
{
kvm_pte_t pte;
int ret = 0;
if (!static_branch_unlikely(&pkvm_force_nc))
return -ENOENT;
phys = ALIGN_DOWN(phys, PAGE_SIZE);
hyp_spin_lock(&host_mmu.lock);
ret = kvm_pgtable_get_leaf(&host_mmu.pgt, phys, &pte, NULL);
if (ret)
goto unlock;
if (!addr_is_memory(phys)) {
ret = -EIO;
goto unlock;
}
if (!kvm_pte_valid(pte) && pte) {
switch (pte) {
case DEFERRED_MEMATTR_NOTE:
break;
default:
ret = -EPERM;
}
} else if (host_get_page_state(pte, phys) != PKVM_PAGE_OWNED) {
ret = -EPERM;
}
if (ret)
goto unlock;
if (force_nc) {
ret = host_stage2_idmap_locked(phys, PAGE_SIZE,
PKVM_HOST_MEM_PROT |
KVM_PGTABLE_PROT_NC,
false);
if (ret)
goto unlock;
kvm_flush_dcache_to_poc(hyp_fixmap_map_nc(phys), PAGE_SIZE);
hyp_fixmap_unmap();
} else {
ret = kvm_pgtable_stage2_annotate(&host_mmu.pgt, phys,
PAGE_SIZE, &host_s2_pool,
DEFERRED_MEMATTR_NOTE);
}
unlock:
hyp_spin_unlock(&host_mmu.lock);
return ret;
}
static int handle_memattr_annotation(struct kvm_vcpu_fault_info *fault,
u64 addr, enum kvm_pgtable_prot *prot,
struct kvm_mem_range *range)
{
u64 par, oldpar;
/* If the S1 MMU is disabled, treat the access as cacheable */
if (unlikely(!(read_sysreg(sctlr_el1) & SCTLR_ELx_M)))
return 0;
/* If we took a fault on a PTW, then treat it as cacheable */
if (fault->esr_el2 & ESR_ELx_S1PTW)
return 0;
oldpar = read_sysreg_par();
if (!__kvm_at("s1e1r", fault->far_el2))
par = read_sysreg_par();
else
par = SYS_PAR_EL1_F;
write_sysreg(oldpar, par_el1);
if (unlikely(par & SYS_PAR_EL1_F))
return -EAGAIN;
if ((par >> 56) == MAIR_ATTR_NORMAL_NC) {
range->start = ALIGN_DOWN(addr, PAGE_SIZE);
range->end = range->start + PAGE_SIZE;
*prot |= KVM_PGTABLE_PROT_NC;
}
return 0;
}
#else
static int handle_memattr_annotation(struct kvm_vcpu_fault_info *fault,
u64 addr, enum kvm_pgtable_prot *prot,
struct kvm_mem_range *range)
{
return -EPERM;
}
#endif
static int host_stage2_idmap(struct kvm_vcpu_fault_info *fault, u64 addr)
{
struct kvm_mem_range range;
bool is_memory = !!find_mem_range(addr, &range);
enum kvm_pgtable_prot prot = default_host_prot(is_memory);
kvm_pte_t pte;
u32 level;
int ret;
hyp_assert_lock_held(&host_mmu.lock);
ret = kvm_pgtable_get_leaf(&host_mmu.pgt, addr, &pte, &level);
if (ret)
return ret;
if (kvm_pte_valid(pte))
return -EAGAIN;
if (pte) {
if (!is_memory)
return -EPERM;
switch (pte) {
case DEFERRED_MEMATTR_NOTE:
ret = handle_memattr_annotation(fault, addr, &prot,
&range);
if (ret)
return ret;
break;
default:
return -EPERM;
}
}
/*
* Adjust against IOMMU devices first. host_stage2_adjust_range() should
* be called last for proper alignment.
@ -661,7 +774,7 @@ static int host_stage2_idmap(u64 addr)
return ret;
}
ret = host_stage2_adjust_range(addr, &range);
ret = host_stage2_adjust_range(addr, &range, level);
if (ret)
return ret;
@ -750,6 +863,7 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
esr = read_sysreg_el2(SYS_ESR);
BUG_ON(!__get_fault_info(esr, &fault));
fault.esr_el2 = esr;
addr = (fault.hpfar_el2 & HPFAR_MASK) << 8;
addr |= fault.far_el2 & FAR_MASK;
@ -763,7 +877,7 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
/* If not handled, attempt to map the page. */
if (ret == -EPERM)
ret = host_stage2_idmap(addr);
ret = host_stage2_idmap(&fault, addr);
host_unlock_component();

View File

@ -309,12 +309,29 @@ void *hyp_fixmap_map(phys_addr_t phys)
return (void *)slot->addr + offset_in_page(phys);
}
#define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2)
void *hyp_fixmap_map_nc(phys_addr_t phys)
{
struct hyp_fixmap_slot *slot = this_cpu_ptr(&fixmap_slots);
kvm_pte_t pte, *ptep = slot->ptep;
pte = *ptep;
pte &= ~kvm_phys_to_pte(KVM_PHYS_INVALID);
pte |= kvm_phys_to_pte(phys) | KVM_PTE_VALID |
FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX, MT_NORMAL_NC);
WRITE_ONCE(*ptep, pte);
dsb(ishst);
return (void *)slot->addr;
}
static void fixmap_clear_slot(struct hyp_fixmap_slot *slot)
{
kvm_pte_t *ptep = slot->ptep;
u64 addr = slot->addr;
WRITE_ONCE(*ptep, *ptep & ~KVM_PTE_VALID);
/* Zap the memory type too. MT_NORMAL is 0 so the fixmap is cacheable by default */
WRITE_ONCE(*ptep, *ptep & ~(KVM_PTE_VALID | KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX));
/*
* Irritatingly, the architecture requires that we use inner-shareable

View File

@ -199,10 +199,9 @@ static void kvm_hyp_handle_fpsimd_host(struct kvm_vcpu *vcpu)
*/
if (unlikely(is_protected_kvm_enabled() && system_supports_sve())) {
struct kvm_host_sve_state *sve_state = get_host_sve_state(vcpu);
u64 vq_len = sve_vq_from_vl(kvm_host_sve_max_vl) - 1;
sve_state->zcr_el1 = read_sysreg_el1(SYS_ZCR);
sve_cond_update_zcr_vq(vq_len, SYS_ZCR_EL2);
pkvm_set_max_sve_vq();
__sve_save_state(sve_state->sve_regs +
sve_ffr_offset(kvm_host_sve_max_vl),
&sve_state->fpsr);

View File

@ -668,14 +668,33 @@ static int get_user_mapping_size(struct kvm *kvm, u64 addr)
CONFIG_PGTABLE_LEVELS),
.mm_ops = &kvm_user_mm_ops,
};
unsigned long flags;
kvm_pte_t pte = 0; /* Keep GCC quiet... */
u32 level = ~0;
int ret;
/*
* Disable IRQs so that we hazard against a concurrent
* teardown of the userspace page tables (which relies on
* IPI-ing threads).
*/
local_irq_save(flags);
ret = kvm_pgtable_get_leaf(&pgt, addr, &pte, &level);
VM_BUG_ON(ret);
VM_BUG_ON(level >= KVM_PGTABLE_MAX_LEVELS);
VM_BUG_ON(!(pte & PTE_VALID));
local_irq_restore(flags);
if (ret)
return ret;
/*
* Not seeing an error, but not updating level? Something went
* deeply wrong...
*/
if (WARN_ON(level >= KVM_PGTABLE_MAX_LEVELS))
return -EFAULT;
/* Oops, the userspace PTs are gone... Replay the fault */
if (!kvm_pte_valid(pte))
return -EAGAIN;
return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level));
}
@ -1167,7 +1186,7 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
*
* Returns the size of the mapping.
*/
static unsigned long
static long
transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long hva, kvm_pfn_t *pfnp,
phys_addr_t *ipap)
@ -1179,8 +1198,15 @@ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
* sure that the HVA and IPA are sufficiently aligned and that the
* block map is contained within the memslot.
*/
if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) &&
get_user_mapping_size(kvm, hva) >= PMD_SIZE) {
if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
int sz = get_user_mapping_size(kvm, hva);
if (sz < 0)
return sz;
if (sz < PMD_SIZE)
return PAGE_SIZE;
/*
* The address we faulted on is backed by a transparent huge
* page. However, because we map the compound huge page and
@ -1411,7 +1437,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
bool logging_active = memslot_is_logging(memslot);
bool use_read_lock = false;
unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
unsigned long vma_pagesize, fault_granule;
long vma_pagesize, fault_granule;
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
struct kvm_pgtable *pgt;
@ -1568,6 +1594,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
hva, &pfn,
&fault_ipa);
if (vma_pagesize < 0) {
ret = vma_pagesize;
goto out_unlock;
}
}
if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) {

View File

@ -514,6 +514,11 @@ static int __init finalize_pkvm(void)
pkvm_firmware_rmem_clear();
}
#ifdef CONFIG_ANDROID_ARM64_WORKAROUND_DMA_BEYOND_POC
if (!ret)
ret = pkvm_register_early_nc_mappings();
#endif
return ret;
}
device_initcall_sync(finalize_pkvm);
@ -601,7 +606,6 @@ early_param("kvm-arm.protected_modules", early_pkvm_modules_cfg);
static void free_modprobe_argv(struct subprocess_info *info)
{
kfree(info->argv[3]);
kfree(info->argv);
}
@ -611,7 +615,8 @@ static void free_modprobe_argv(struct subprocess_info *info)
* security is enforced by making sure this can be called only when pKVM is
* enabled, not yet completely initialized.
*/
static int __init pkvm_request_early_module(char *module_name)
static int __init __pkvm_request_early_module(char *module_name,
char *module_path)
{
char *modprobe_path = CONFIG_MODPROBE_PATH;
struct subprocess_info *info;
@ -622,6 +627,7 @@ static int __init pkvm_request_early_module(char *module_name)
NULL
};
char **argv;
int idx = 0;
if (!is_protected_kvm_enabled())
return -EACCES;
@ -629,41 +635,56 @@ static int __init pkvm_request_early_module(char *module_name)
if (static_branch_likely(&kvm_protected_mode_initialized))
return -EACCES;
argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
argv = kmalloc(sizeof(char *) * 7, GFP_KERNEL);
if (!argv)
return -ENOMEM;
module_name = kstrdup(module_name, GFP_KERNEL);
if (!module_name)
goto free_argv;
argv[0] = modprobe_path;
argv[1] = "-q";
argv[2] = "--";
argv[3] = module_name;
argv[4] = NULL;
argv[idx++] = modprobe_path;
argv[idx++] = "-q";
if (*module_path != '\0') {
argv[idx++] = "-d";
argv[idx++] = module_path;
}
argv[idx++] = "--";
argv[idx++] = module_name;
argv[idx++] = NULL;
info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
NULL, free_modprobe_argv, NULL);
if (!info)
goto free_module_name;
goto err;
/* Even with CONFIG_STATIC_USERMODEHELPER we really want this path */
info->path = modprobe_path;
return call_usermodehelper_exec(info, UMH_WAIT_PROC | UMH_KILLABLE);
free_module_name:
kfree(module_name);
free_argv:
err:
kfree(argv);
return -ENOMEM;
}
static int __init pkvm_request_early_module(char *module_name, char *module_path)
{
int err = __pkvm_request_early_module(module_name, module_path);
if (!err)
return 0;
/* Already tried the default path */
if (*module_path == '\0')
return err;
pr_info("loading %s from %s failed, fallback to the default path\n",
module_name, module_path);
return __pkvm_request_early_module(module_name, "");
}
int __init pkvm_load_early_modules(void)
{
char *token, *buf = early_pkvm_modules;
char *module_path = CONFIG_PKVM_MODULE_PATH;
int err;
while (true) {
@ -673,7 +694,7 @@ int __init pkvm_load_early_modules(void)
break;
if (*token) {
err = pkvm_request_early_module(token);
err = pkvm_request_early_module(token, module_path);
if (err) {
pr_err("Failed to load pkvm module %s: %d\n",
token, err);
@ -773,6 +794,7 @@ int __pkvm_load_el2_module(struct module *this, unsigned long *token)
{ &mod->data, KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W },
};
void *start, *end, *hyp_va;
struct arm_smccc_res res;
kvm_nvhe_reloc_t *endrel;
int ret, i, secs_first;
size_t offset, size;
@ -802,12 +824,14 @@ int __pkvm_load_el2_module(struct module *this, unsigned long *token)
end = secs_map[ARRAY_SIZE(secs_map) - 1].sec->end;
size = end - start;
hyp_va = (void *)kvm_call_hyp_nvhe(__pkvm_alloc_module_va, size >> PAGE_SHIFT);
if (!hyp_va) {
arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__pkvm_alloc_module_va),
size >> PAGE_SHIFT, &res);
if (res.a0 != SMCCC_RET_SUCCESS || !res.a1) {
kvm_err("Failed to allocate hypervisor VA space for EL2 module\n");
module_put(this);
return -ENOMEM;
return res.a0 == SMCCC_RET_SUCCESS ? -ENOMEM : -EPERM;
}
hyp_va = (void *)res.a1;
/*
* The token can be used for other calls related to this module.

View File

@ -735,6 +735,22 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
return true;
}
static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
u64 *val)
{
u64 idx;
if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
/* PMCCNTR_EL0 */
idx = ARMV8_PMU_CYCLE_IDX;
else
/* PMEVCNTRn_EL0 */
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
*val = kvm_pmu_get_counter_value(vcpu, idx);
return 0;
}
static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
@ -951,7 +967,7 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
/* Macro to expand the PMEVCNTRn_EL0 register */
#define PMU_PMEVCNTR_EL0(n) \
{ PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
.reset = reset_pmevcntr, \
.reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \
.access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
/* Macro to expand the PMEVTYPERn_EL0 register */
@ -1600,7 +1616,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ PMU_SYS_REG(SYS_PMCEID1_EL0),
.access = access_pmceid, .reset = NULL },
{ PMU_SYS_REG(SYS_PMCCNTR_EL0),
.access = access_pmu_evcntr, .reset = reset_unknown, .reg = PMCCNTR_EL0 },
.access = access_pmu_evcntr, .reset = reset_unknown,
.reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr},
{ PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
.access = access_pmu_evtyper, .reset = NULL },
{ PMU_SYS_REG(SYS_PMXEVCNTR_EL0),

View File

@ -5,6 +5,8 @@
#include <asm/bmips.h>
#include <asm/io.h>
bool bmips_rac_flush_disable;
void arch_sync_dma_for_cpu_all(void)
{
void __iomem *cbr = BMIPS_GET_CBR();
@ -15,6 +17,9 @@ void arch_sync_dma_for_cpu_all(void)
boot_cpu_type() != CPU_BMIPS4380)
return;
if (unlikely(bmips_rac_flush_disable))
return;
/* Flush stale data out of the readahead cache */
cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
__raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);

View File

@ -35,6 +35,8 @@
#define REG_BCM6328_OTP ((void __iomem *)CKSEG1ADDR(0x1000062c))
#define BCM6328_TP1_DISABLED BIT(9)
extern bool bmips_rac_flush_disable;
static const unsigned long kbase = VMLINUX_LOAD_ADDRESS & 0xfff00000;
struct bmips_quirk {
@ -104,6 +106,12 @@ static void bcm6358_quirks(void)
* disable SMP for now
*/
bmips_smp_enabled = 0;
/*
* RAC flush causes kernel panics on BCM6358 when booting from TP1
* because the bootloader is not initializing it properly.
*/
bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31));
}
static void bcm6368_quirks(void)

View File

@ -163,6 +163,11 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
*/
}
static inline bool __pte_protnone(unsigned long pte)
{
return (pte & (pgprot_val(PAGE_NONE) | _PAGE_RWX)) == pgprot_val(PAGE_NONE);
}
static inline bool __pte_flags_need_flush(unsigned long oldval,
unsigned long newval)
{
@ -179,8 +184,8 @@ static inline bool __pte_flags_need_flush(unsigned long oldval,
/*
* We do not expect kernel mappings or non-PTEs or not-present PTEs.
*/
VM_WARN_ON_ONCE(oldval & _PAGE_PRIVILEGED);
VM_WARN_ON_ONCE(newval & _PAGE_PRIVILEGED);
VM_WARN_ON_ONCE(!__pte_protnone(oldval) && oldval & _PAGE_PRIVILEGED);
VM_WARN_ON_ONCE(!__pte_protnone(newval) && newval & _PAGE_PRIVILEGED);
VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE));
VM_WARN_ON_ONCE(!(newval & _PAGE_PTE));
VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT));

View File

@ -290,6 +290,9 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
static int ppr_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
if (!target->thread.regs)
return -EINVAL;
return membuf_write(&to, &target->thread.regs->ppr, sizeof(u64));
}
@ -297,6 +300,9 @@ static int ppr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count, const void *kbuf,
const void __user *ubuf)
{
if (!target->thread.regs)
return -EINVAL;
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.regs->ppr, 0, sizeof(u64));
}

View File

@ -857,6 +857,13 @@ int pseries_vas_dlpar_cpu(void)
{
int new_nr_creds, rc;
/*
* NX-GZIP is not enabled. Nothing to do for DLPAR event
*/
if (!copypaste_feat)
return 0;
rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES,
vascaps[VAS_GZIP_DEF_FEAT_TYPE].feat,
(u64)virt_to_phys(&hv_cop_caps));
@ -1013,6 +1020,7 @@ static int __init pseries_vas_init(void)
* Linux supports user space COPY/PASTE only with Radix
*/
if (!radix_enabled()) {
copypaste_feat = false;
pr_err("API is supported only with radix page tables\n");
return -ENOTSUPP;
}

View File

@ -278,7 +278,7 @@ config ARCH_RV64I
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER if !XIP_KERNEL
select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !PREEMPTION
select SWIOTLB if MMU
endchoice

View File

@ -147,10 +147,8 @@ static void kvm_riscv_vcpu_timer_blocking(struct kvm_vcpu *vcpu)
return;
delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
if (delta_ns) {
hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
t->next_set = true;
}
hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
t->next_set = true;
}
static void kvm_riscv_vcpu_timer_unblocking(struct kvm_vcpu *vcpu)

View File

@ -162,7 +162,7 @@ vdso_prepare: prepare0
ifdef CONFIG_EXPOLINE_EXTERN
modules_prepare: expoline_prepare
expoline_prepare:
expoline_prepare: scripts
$(Q)$(MAKE) $(build)=arch/s390/lib/expoline arch/s390/lib/expoline/expoline.o
endif
endif

View File

@ -172,7 +172,7 @@ unsigned long __clear_user(void __user *to, unsigned long size)
"4: slgr %0,%0\n"
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(6b,2b) EX_TABLE(3b,5b) EX_TABLE(7b,5b)
: "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
: "+&a" (size), "+&a" (to), "+a" (tmp1), "=&a" (tmp2)
: "a" (empty_zero_page), [spec] "d" (spec.val)
: "cc", "memory", "0");
return size;

View File

@ -109,6 +109,7 @@ CONFIG_CMA=y
CONFIG_CMA_DEBUGFS=y
CONFIG_CMA_AREAS=16
# CONFIG_ZONE_DMA is not set
CONFIG_ZONE_DEVICE=y
CONFIG_ANON_VMA_NAME=y
CONFIG_USERFAULTFD=y
CONFIG_LRU_GEN=y
@ -445,6 +446,7 @@ CONFIG_HID_WIIMOTE=y
CONFIG_USB_HIDDEV=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_PCI_RENESAS=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_ROOT_HUB_TT=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
@ -466,6 +468,7 @@ CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_F_ACC=y
CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
CONFIG_USB_CONFIGFS_F_UAC2=y
CONFIG_USB_CONFIGFS_F_MIDI=y
CONFIG_USB_CONFIGFS_F_HID=y
CONFIG_USB_CONFIGFS_F_UVC=y

View File

@ -45,6 +45,6 @@ obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
obj-$(CONFIG_XEN_PV_DOM0) += vga.o
obj-$(CONFIG_XEN_DOM0) += vga.o
obj-$(CONFIG_XEN_EFI) += efi.o

View File

@ -1389,7 +1389,8 @@ asmlinkage __visible void __init xen_start_kernel(struct start_info *si)
x86_platform.set_legacy_features =
xen_dom0_set_legacy_features;
xen_init_vga(info, xen_start_info->console.dom0.info_size);
xen_init_vga(info, xen_start_info->console.dom0.info_size,
&boot_params.screen_info);
xen_start_info->console.domU.mfn = 0;
xen_start_info->console.domU.evtchn = 0;

View File

@ -43,6 +43,19 @@ void __init xen_pvh_init(struct boot_params *boot_params)
x86_init.oem.banner = xen_banner;
xen_efi_init(boot_params);
if (xen_initial_domain()) {
struct xen_platform_op op = {
.cmd = XENPF_get_dom0_console,
};
int ret = HYPERVISOR_platform_op(&op);
if (ret > 0)
xen_init_vga(&op.u.dom0_console,
min(ret * sizeof(char),
sizeof(op.u.dom0_console)),
&boot_params->screen_info);
}
}
void __init mem_map_via_hcall(struct boot_params *boot_params_p)

View File

@ -9,10 +9,9 @@
#include "xen-ops.h"
void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size)
void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size,
struct screen_info *screen_info)
{
struct screen_info *screen_info = &boot_params.screen_info;
/* This is drawn from a dump from vgacon:startup in
* standard Linux. */
screen_info->orig_video_mode = 3;

View File

@ -108,11 +108,12 @@ static inline void xen_uninit_lock_cpu(int cpu)
struct dom0_vga_console_info;
#ifdef CONFIG_XEN_PV_DOM0
void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
#ifdef CONFIG_XEN_DOM0
void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size,
struct screen_info *);
#else
static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
size_t size)
size_t size, struct screen_info *si)
{
}
#endif

View File

@ -541,7 +541,7 @@ static size_t kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
size_t len;
size_t len, off = 0;
if (!sp)
sp = stack_pointer(task);
@ -550,9 +550,17 @@ void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE);
printk("%sStack:\n", loglvl);
print_hex_dump(loglvl, " ", DUMP_PREFIX_NONE,
STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE,
sp, len, false);
while (off < len) {
u8 line[STACK_DUMP_LINE_SIZE];
size_t line_len = len - off > STACK_DUMP_LINE_SIZE ?
STACK_DUMP_LINE_SIZE : len - off;
__memcpy(line, (u8 *)sp + off, line_len);
print_hex_dump(loglvl, " ", DUMP_PREFIX_NONE,
STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE,
line, line_len, false);
off += STACK_DUMP_LINE_SIZE;
}
show_trace(task, sp, loglvl);
}

View File

@ -1,6 +1,6 @@
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.constants
KMI_GENERATION=3
KMI_GENERATION=4
LLVM=1
DEPMOD=depmod

View File

@ -456,85 +456,67 @@ static void acpi_bus_osc_negotiate_usb_control(void)
Notification Handling
-------------------------------------------------------------------------- */
/*
* acpi_bus_notify
* ---------------
* Callback for all 'system-level' device notifications (values 0x00-0x7F).
/**
* acpi_bus_notify - Global system-level (0x00-0x7F) notifications handler
* @handle: Target ACPI object.
* @type: Notification type.
* @data: Ignored.
*
* This only handles notifications related to device hotplug.
*/
static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
{
struct acpi_device *adev;
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
bool hotplug_event = false;
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n");
hotplug_event = true;
break;
case ACPI_NOTIFY_DEVICE_CHECK:
acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n");
hotplug_event = true;
break;
case ACPI_NOTIFY_DEVICE_WAKE:
acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_WAKE event\n");
break;
return;
case ACPI_NOTIFY_EJECT_REQUEST:
acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
hotplug_event = true;
break;
case ACPI_NOTIFY_DEVICE_CHECK_LIGHT:
acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK_LIGHT event\n");
/* TBD: Exactly what does 'light' mean? */
break;
return;
case ACPI_NOTIFY_FREQUENCY_MISMATCH:
acpi_handle_err(handle, "Device cannot be configured due "
"to a frequency mismatch\n");
break;
return;
case ACPI_NOTIFY_BUS_MODE_MISMATCH:
acpi_handle_err(handle, "Device cannot be configured due "
"to a bus mode mismatch\n");
break;
return;
case ACPI_NOTIFY_POWER_FAULT:
acpi_handle_err(handle, "Device has suffered a power fault\n");
break;
return;
default:
acpi_handle_debug(handle, "Unknown event type 0x%x\n", type);
break;
}
adev = acpi_get_acpi_dev(handle);
if (!adev)
goto err;
if (adev->dev.driver) {
struct acpi_driver *driver = to_acpi_driver(adev->dev.driver);
if (driver && driver->ops.notify &&
(driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS))
driver->ops.notify(adev, type);
}
if (!hotplug_event) {
acpi_put_acpi_dev(adev);
return;
}
if (ACPI_SUCCESS(acpi_hotplug_schedule(adev, type)))
adev = acpi_get_acpi_dev(handle);
if (adev && ACPI_SUCCESS(acpi_hotplug_schedule(adev, type)))
return;
acpi_put_acpi_dev(adev);
err:
acpi_evaluate_ost(handle, type, ost_code, NULL);
acpi_evaluate_ost(handle, type, ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
}
static void acpi_notify_device(acpi_handle handle, u32 event, void *data)
@ -559,42 +541,51 @@ static u32 acpi_device_fixed_event(void *data)
return ACPI_INTERRUPT_HANDLED;
}
static int acpi_device_install_notify_handler(struct acpi_device *device)
static int acpi_device_install_notify_handler(struct acpi_device *device,
struct acpi_driver *acpi_drv)
{
acpi_status status;
if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) {
status =
acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
acpi_device_fixed_event,
device);
else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
} else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) {
status =
acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
acpi_device_fixed_event,
device);
else
status = acpi_install_notify_handler(device->handle,
ACPI_DEVICE_NOTIFY,
} else {
u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ?
ACPI_ALL_NOTIFY : ACPI_DEVICE_NOTIFY;
status = acpi_install_notify_handler(device->handle, type,
acpi_notify_device,
device);
}
if (ACPI_FAILURE(status))
return -EINVAL;
return 0;
}
static void acpi_device_remove_notify_handler(struct acpi_device *device)
static void acpi_device_remove_notify_handler(struct acpi_device *device,
struct acpi_driver *acpi_drv)
{
if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) {
acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
acpi_device_fixed_event);
else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
} else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) {
acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
acpi_device_fixed_event);
else
acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
} else {
u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ?
ACPI_ALL_NOTIFY : ACPI_DEVICE_NOTIFY;
acpi_remove_notify_handler(device->handle, type,
acpi_notify_device);
}
}
/* Handle events targeting \_SB device (at present only graceful shutdown) */
@ -1036,7 +1027,7 @@ static int acpi_device_probe(struct device *dev)
acpi_drv->name, acpi_dev->pnp.bus_id);
if (acpi_drv->ops.notify) {
ret = acpi_device_install_notify_handler(acpi_dev);
ret = acpi_device_install_notify_handler(acpi_dev, acpi_drv);
if (ret) {
if (acpi_drv->ops.remove)
acpi_drv->ops.remove(acpi_dev);
@ -1059,7 +1050,7 @@ static void acpi_device_remove(struct device *dev)
struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver);
if (acpi_drv->ops.notify)
acpi_device_remove_notify_handler(acpi_dev);
acpi_device_remove_notify_handler(acpi_dev, acpi_drv);
if (acpi_drv->ops.remove)
acpi_drv->ops.remove(acpi_dev);

View File

@ -714,6 +714,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5515"),
},
},
{
.callback = video_detect_force_native,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 15 3535"),
},
},
/*
* Desktops which falsely report a backlight and which our heuristics

View File

@ -36,6 +36,8 @@
#include <trace/hooks/cpuidle_psci.h>
#include <trace/hooks/vmscan.h>
#include <trace/hooks/avc.h>
#include <trace/hooks/creds.h>
#include <trace/hooks/module.h>
#include <trace/hooks/selinux.h>
#include <trace/hooks/syscall_check.h>
#include <trace/hooks/remoteproc.h>
@ -55,6 +57,7 @@
#include <trace/hooks/traps.h>
#include <trace/hooks/thermal.h>
#include <trace/hooks/audio_usboffload.h>
#include <trace/hooks/typec.h>
/*
* Export tracepoints that act as a bare tracehook (ie: have no trace event
@ -110,6 +113,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_send_uic_command);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_send_tm_command);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_check_int_errors);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_update_sdev);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_clock_scaling);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cgroup_attach);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_iommu_setup_dma_ops);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_iommu_alloc_insert_iova);
@ -129,6 +133,14 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_selinux_avc_insert);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_selinux_avc_node_delete);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_selinux_avc_node_replace);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_selinux_avc_lookup);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_commit_creds);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_exit_creds);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_override_creds);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_revert_creds);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_module_core_rw_nx);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_module_init_rw_nx);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_module_permit_before_init);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_module_permit_after_init);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_selinux_is_initialized);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_shmem_get_folio);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_mmap_file);
@ -168,3 +180,9 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_disable_thermal_cooling_stats);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gic_resume);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_audio_usb_offload_connect);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_audio_usb_offload_disconnect);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_typec_tcpci_override_toggling);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_typec_tcpci_get_vbus);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_typec_store_partner_src_caps);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_typec_tcpm_get_timer);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_typec_tcpm_modify_src_caps);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_typec_tcpm_log);

View File

@ -1010,9 +1010,6 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
/* suppress uevents while reconfiguring the device */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
/*
* If we don't hold exclusive handle for the device, upgrade to it
* here to avoid changing device under exclusive owner.
@ -1067,6 +1064,9 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
}
}
/* suppress uevents while reconfiguring the device */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
@ -1109,17 +1109,17 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
if (partscan)
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
/* enable and uncork uevent now that we are done */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
loop_global_unlock(lo, is_loop);
if (partscan)
loop_reread_partitions(lo);
if (!(mode & FMODE_EXCL))
bd_abort_claiming(bdev, loop_configure);
error = 0;
done:
/* enable and uncork uevent now that we are done */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
return error;
return 0;
out_unlock:
loop_global_unlock(lo, is_loop);
@ -1130,7 +1130,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
fput(file);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
goto done;
return error;
}
static void __loop_clr_fd(struct loop_device *lo, bool release)

View File

@ -656,7 +656,8 @@ static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
}
}
static void ubq_complete_io_cmd(struct ublk_io *io, int res)
static void ubq_complete_io_cmd(struct ublk_io *io, int res,
unsigned issue_flags)
{
/* mark this cmd owned by ublksrv */
io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
@ -668,7 +669,7 @@ static void ubq_complete_io_cmd(struct ublk_io *io, int res)
io->flags &= ~UBLK_IO_FLAG_ACTIVE;
/* tell ublksrv one io request is coming */
io_uring_cmd_done(io->cmd, res, 0);
io_uring_cmd_done(io->cmd, res, 0, issue_flags);
}
#define UBLK_REQUEUE_DELAY_MS 3
@ -685,7 +686,8 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0);
}
static inline void __ublk_rq_task_work(struct request *req)
static inline void __ublk_rq_task_work(struct request *req,
unsigned issue_flags)
{
struct ublk_queue *ubq = req->mq_hctx->driver_data;
int tag = req->tag;
@ -723,7 +725,7 @@ static inline void __ublk_rq_task_work(struct request *req)
pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
__func__, io->cmd->cmd_op, ubq->q_id,
req->tag, io->flags);
ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA);
ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA, issue_flags);
return;
}
/*
@ -761,17 +763,18 @@ static inline void __ublk_rq_task_work(struct request *req)
mapped_bytes >> 9;
}
ubq_complete_io_cmd(io, UBLK_IO_RES_OK);
ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
}
static inline void ublk_forward_io_cmds(struct ublk_queue *ubq)
static inline void ublk_forward_io_cmds(struct ublk_queue *ubq,
unsigned issue_flags)
{
struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
struct ublk_rq_data *data, *tmp;
io_cmds = llist_reverse_order(io_cmds);
llist_for_each_entry_safe(data, tmp, io_cmds, node)
__ublk_rq_task_work(blk_mq_rq_from_pdu(data));
__ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags);
}
static inline void ublk_abort_io_cmds(struct ublk_queue *ubq)
@ -783,12 +786,12 @@ static inline void ublk_abort_io_cmds(struct ublk_queue *ubq)
__ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data));
}
static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd)
static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
{
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
struct ublk_queue *ubq = pdu->ubq;
ublk_forward_io_cmds(ubq);
ublk_forward_io_cmds(ubq, issue_flags);
}
static void ublk_rq_task_work_fn(struct callback_head *work)
@ -797,8 +800,9 @@ static void ublk_rq_task_work_fn(struct callback_head *work)
struct ublk_rq_data, work);
struct request *req = blk_mq_rq_from_pdu(data);
struct ublk_queue *ubq = req->mq_hctx->driver_data;
unsigned issue_flags = IO_URING_F_UNLOCKED;
ublk_forward_io_cmds(ubq);
ublk_forward_io_cmds(ubq, issue_flags);
}
static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
@ -1052,7 +1056,8 @@ static void ublk_cancel_queue(struct ublk_queue *ubq)
struct ublk_io *io = &ubq->ios[i];
if (io->flags & UBLK_IO_FLAG_ACTIVE)
io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0);
io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0,
IO_URING_F_UNLOCKED);
}
/* all io commands are canceled */
@ -1295,7 +1300,7 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
return -EIOCBQUEUED;
out:
io_uring_cmd_done(cmd, ret, 0);
io_uring_cmd_done(cmd, ret, 0, issue_flags);
pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
__func__, cmd_op, tag, ret, io->flags);
return -EIOCBQUEUED;
@ -2053,7 +2058,7 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
break;
}
out:
io_uring_cmd_done(cmd, ret, 0);
io_uring_cmd_done(cmd, ret, 0, issue_flags);
pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
__func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
return -EIOCBQUEUED;

View File

@ -6,7 +6,6 @@
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/sched/task.h>
#include <linux/intel-svm.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/cdev.h>
#include <linux/fs.h>
@ -100,7 +99,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
filp->private_data = ctx;
if (device_user_pasid_enabled(idxd)) {
sva = iommu_sva_bind_device(dev, current->mm, NULL);
sva = iommu_sva_bind_device(dev, current->mm);
if (IS_ERR(sva)) {
rc = PTR_ERR(sva);
dev_err(dev, "pasid allocation failed: %d\n", rc);

View File

@ -14,7 +14,6 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/device.h>
#include <linux/idr.h>
#include <linux/intel-svm.h>
#include <linux/iommu.h>
#include <uapi/linux/idxd.h>
#include <linux/dmaengine.h>
@ -502,29 +501,7 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d
static int idxd_enable_system_pasid(struct idxd_device *idxd)
{
int flags;
unsigned int pasid;
struct iommu_sva *sva;
flags = SVM_FLAG_SUPERVISOR_MODE;
sva = iommu_sva_bind_device(&idxd->pdev->dev, NULL, &flags);
if (IS_ERR(sva)) {
dev_warn(&idxd->pdev->dev,
"iommu sva bind failed: %ld\n", PTR_ERR(sva));
return PTR_ERR(sva);
}
pasid = iommu_sva_get_pasid(sva);
if (pasid == IOMMU_PASID_INVALID) {
iommu_sva_unbind_device(sva);
return -ENODEV;
}
idxd->sva = sva;
idxd->pasid = pasid;
dev_dbg(&idxd->pdev->dev, "system pasid: %u\n", pasid);
return 0;
return -EOPNOTSUPP;
}
static void idxd_disable_system_pasid(struct idxd_device *idxd)

View File

@ -981,7 +981,12 @@ static bool amdgpu_atcs_pci_probe_handle(struct pci_dev *pdev)
*/
bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
{
if (adev->flags & AMD_IS_APU)
if ((adev->flags & AMD_IS_APU) &&
adev->gfx.imu.funcs) /* Not need to do mode2 reset for IMU enabled APUs */
return false;
if ((adev->flags & AMD_IS_APU) &&
amdgpu_acpi_is_s3_active(adev))
return false;
if (amdgpu_sriov_vf(adev))

View File

@ -1298,14 +1298,14 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
args->n_success = i+1;
}
mutex_unlock(&p->mutex);
err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed;
}
mutex_unlock(&p->mutex);
/* Flush TLBs after waiting for the page table updates to complete */
for (i = 0; i < args->n_devices; i++) {
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
@ -1321,9 +1321,9 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
bind_process_to_device_failed:
get_mem_obj_from_handle_failed:
map_memory_to_gpu_failed:
sync_memory_failed:
mutex_unlock(&p->mutex);
copy_from_user_failed:
sync_memory_failed:
kfree(devices_arr);
return err;
@ -1337,6 +1337,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
void *mem;
long err = 0;
uint32_t *devices_arr = NULL, i;
bool flush_tlb;
if (!args->n_devices) {
pr_debug("Device IDs array empty\n");
@ -1389,16 +1390,19 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
}
args->n_success = i+1;
}
mutex_unlock(&p->mutex);
if (kfd_flush_tlb_after_unmap(pdd->dev)) {
flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev);
if (flush_tlb) {
err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev,
(struct kgd_mem *) mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed;
}
}
mutex_unlock(&p->mutex);
if (flush_tlb) {
/* Flush TLBs after waiting for the page table updates to complete */
for (i = 0; i < args->n_devices; i++) {
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
@ -1414,9 +1418,9 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
bind_process_to_device_failed:
get_mem_obj_from_handle_failed:
unmap_memory_from_gpu_failed:
sync_memory_failed:
mutex_unlock(&p->mutex);
copy_from_user_failed:
sync_memory_failed:
kfree(devices_arr);
return err;
}

View File

@ -295,7 +295,7 @@ static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
static int
svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
dma_addr_t *scratch)
dma_addr_t *scratch, uint64_t ttm_res_offset)
{
uint64_t npages = migrate->npages;
struct device *dev = adev->dev;
@ -305,19 +305,13 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
uint64_t i, j;
int r;
pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
prange->last);
pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start,
prange->last, ttm_res_offset);
src = scratch;
dst = (uint64_t *)(scratch + npages);
r = svm_range_vram_node_new(adev, prange, true);
if (r) {
dev_dbg(adev->dev, "fail %d to alloc vram\n", r);
goto out;
}
amdgpu_res_first(prange->ttm_res, prange->offset << PAGE_SHIFT,
amdgpu_res_first(prange->ttm_res, ttm_res_offset,
npages << PAGE_SHIFT, &cursor);
for (i = j = 0; i < npages; i++) {
struct page *spage;
@ -397,14 +391,14 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
migrate->dst[i + 3] = 0;
}
#endif
out:
return r;
}
static long
svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start,
uint64_t end, uint32_t trigger)
uint64_t end, uint32_t trigger, uint64_t ttm_res_offset)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
uint64_t npages = (end - start) >> PAGE_SHIFT;
@ -457,7 +451,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
else
pr_debug("0x%lx pages migrated\n", cpages);
r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch);
r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch, ttm_res_offset);
migrate_vma_pages(&migrate);
pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
@ -505,6 +499,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
unsigned long addr, start, end;
struct vm_area_struct *vma;
struct amdgpu_device *adev;
uint64_t ttm_res_offset;
unsigned long cpages = 0;
long r = 0;
@ -526,6 +521,13 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
start = prange->start << PAGE_SHIFT;
end = (prange->last + 1) << PAGE_SHIFT;
r = svm_range_vram_node_new(adev, prange, true);
if (r) {
dev_dbg(adev->dev, "fail %ld to alloc vram\n", r);
return r;
}
ttm_res_offset = prange->offset << PAGE_SHIFT;
for (addr = start; addr < end;) {
unsigned long next;
@ -534,18 +536,21 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
break;
next = min(vma->vm_end, end);
r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger);
r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger, ttm_res_offset);
if (r < 0) {
pr_debug("failed %ld to migrate\n", r);
break;
} else {
cpages += r;
}
ttm_res_offset += next - addr;
addr = next;
}
if (cpages)
prange->actual_loc = best_loc;
else
svm_range_vram_node_free(prange);
return r < 0 ? r : 0;
}

View File

@ -77,6 +77,7 @@ static int kfd_init(void)
static void kfd_exit(void)
{
kfd_cleanup_processes();
kfd_debugfs_fini();
kfd_process_destroy_wq();
kfd_procfs_shutdown();

View File

@ -928,6 +928,7 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev);
int kfd_process_create_wq(void);
void kfd_process_destroy_wq(void);
void kfd_cleanup_processes(void);
struct kfd_process *kfd_create_process(struct file *filep);
struct kfd_process *kfd_get_process(const struct task_struct *task);
struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid);

View File

@ -1167,6 +1167,17 @@ static void kfd_process_free_notifier(struct mmu_notifier *mn)
kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
}
static void kfd_process_notifier_release_internal(struct kfd_process *p)
{
cancel_delayed_work_sync(&p->eviction_work);
cancel_delayed_work_sync(&p->restore_work);
/* Indicate to other users that MM is no longer valid */
p->mm = NULL;
mmu_notifier_put(&p->mmu_notifier);
}
static void kfd_process_notifier_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
@ -1181,17 +1192,22 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
return;
mutex_lock(&kfd_processes_mutex);
/*
* Do early return if table is empty.
*
* This could potentially happen if this function is called concurrently
* by mmu_notifier and by kfd_cleanup_pocesses.
*
*/
if (hash_empty(kfd_processes_table)) {
mutex_unlock(&kfd_processes_mutex);
return;
}
hash_del_rcu(&p->kfd_processes);
mutex_unlock(&kfd_processes_mutex);
synchronize_srcu(&kfd_processes_srcu);
cancel_delayed_work_sync(&p->eviction_work);
cancel_delayed_work_sync(&p->restore_work);
/* Indicate to other users that MM is no longer valid */
p->mm = NULL;
mmu_notifier_put(&p->mmu_notifier);
kfd_process_notifier_release_internal(p);
}
static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
@ -1200,6 +1216,43 @@ static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
.free_notifier = kfd_process_free_notifier,
};
/*
* This code handles the case when driver is being unloaded before all
* mm_struct are released. We need to safely free the kfd_process and
* avoid race conditions with mmu_notifier that might try to free them.
*
*/
void kfd_cleanup_processes(void)
{
struct kfd_process *p;
struct hlist_node *p_temp;
unsigned int temp;
HLIST_HEAD(cleanup_list);
/*
* Move all remaining kfd_process from the process table to a
* temp list for processing. Once done, callback from mmu_notifier
* release will not see the kfd_process in the table and do early return,
* avoiding double free issues.
*/
mutex_lock(&kfd_processes_mutex);
hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) {
hash_del_rcu(&p->kfd_processes);
synchronize_srcu(&kfd_processes_srcu);
hlist_add_head(&p->kfd_processes, &cleanup_list);
}
mutex_unlock(&kfd_processes_mutex);
hlist_for_each_entry_safe(p, p_temp, &cleanup_list, kfd_processes)
kfd_process_notifier_release_internal(p);
/*
* Ensures that all outstanding free_notifier get called, triggering
* the release of the kfd_process struct.
*/
mmu_notifier_synchronize();
}
static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
{
unsigned long offset;

View File

@ -218,8 +218,8 @@ static int init_user_queue(struct process_queue_manager *pqm,
return 0;
cleanup:
if (dev->shared_resources.enable_mes)
uninit_queue(*q);
uninit_queue(*q);
*q = NULL;
return retval;
}

View File

@ -208,6 +208,21 @@ bool needs_dsc_aux_workaround(struct dc_link *link)
return false;
}
bool is_synaptics_cascaded_panamera(struct dc_link *link, struct drm_dp_mst_port *port)
{
u8 branch_vendor_data[4] = { 0 }; // Vendor data 0x50C ~ 0x50F
if (drm_dp_dpcd_read(port->mgr->aux, DP_BRANCH_VENDOR_SPECIFIC_START, &branch_vendor_data, 4) == 4) {
if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
IS_SYNAPTICS_CASCADED_PANAMERA(link->dpcd_caps.branch_dev_name, branch_vendor_data)) {
DRM_INFO("Synaptics Cascaded MST hub\n");
return true;
}
}
return false;
}
static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
{
struct dc_sink *dc_sink = aconnector->dc_sink;
@ -231,6 +246,10 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
needs_dsc_aux_workaround(aconnector->dc_link))
aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux;
/* synaptics cascaded MST hub case */
if (!aconnector->dsc_aux && is_synaptics_cascaded_panamera(aconnector->dc_link, port))
aconnector->dsc_aux = port->mgr->aux;
if (!aconnector->dsc_aux)
return false;
@ -627,12 +646,25 @@ struct dsc_mst_fairness_params {
struct amdgpu_dm_connector *aconnector;
};
static int kbps_to_peak_pbn(int kbps)
static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
{
u8 link_coding_cap;
uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
if (link_coding_cap == DP_128b_132b_ENCODING)
fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
return fec_overhead_multiplier_x1000;
}
static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
{
u64 peak_kbps = kbps;
peak_kbps *= 1006;
peak_kbps = div_u64(peak_kbps, 1000);
peak_kbps *= fec_overhead_multiplier_x1000;
peak_kbps = div_u64(peak_kbps, 1000 * 1000);
return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
}
@ -719,11 +751,12 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
int link_timeslots_used;
int fair_pbn_alloc;
int ret = 0;
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled) {
initial_slack[i] =
kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn;
kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
bpp_increased[i] = false;
remaining_to_increase += 1;
} else {
@ -819,6 +852,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
int next_index;
int remaining_to_try = 0;
int ret;
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled
@ -848,7 +882,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
if (next_index == -1)
break;
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@ -861,7 +895,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
vars[next_index].dsc_enabled = false;
vars[next_index].bpp_x16 = 0;
} else {
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@ -890,6 +924,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
int count = 0;
int i, k, ret;
bool debugfs_overwrite = false;
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
memset(params, 0, sizeof(params));
@ -951,7 +986,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
/* Try no compression */
for (i = 0; i < count; i++) {
vars[i + k].aconnector = params[i].aconnector;
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
@ -970,7 +1005,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
/* Try max compression */
for (i = 0; i < count; i++) {
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
vars[i + k].dsc_enabled = true;
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@ -978,7 +1013,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (ret < 0)
return ret;
} else {
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,

View File

@ -34,6 +34,21 @@
#define SYNAPTICS_RC_OFFSET 0x4BC
#define SYNAPTICS_RC_DATA 0x4C0
#define DP_BRANCH_VENDOR_SPECIFIC_START 0x50C
/**
* Panamera MST Hub detection
* Offset DPCD 050Eh == 0x5A indicates cascaded MST hub case
* Check from beginning of branch device vendor specific field (050Ch)
*/
#define IS_SYNAPTICS_PANAMERA(branchDevName) (((int)branchDevName[4] & 0xF0) == 0x50 ? 1 : 0)
#define BRANCH_HW_REVISION_PANAMERA_A2 0x10
#define SYNAPTICS_CASCADED_HUB_ID 0x5A
#define IS_SYNAPTICS_CASCADED_PANAMERA(devName, data) ((IS_SYNAPTICS_PANAMERA(devName) && ((int)data[2] == SYNAPTICS_CASCADED_HUB_ID)) ? 1 : 0)
#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031
#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000
struct amdgpu_display_manager;
struct amdgpu_dm_connector;

View File

@ -91,7 +91,15 @@ static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
struct vm_area_struct *vma)
{
return dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
int ret;
ret = dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
if (!ret) {
/* Drop the reference acquired by drm_gem_mmap_obj(). */
drm_gem_object_put(&etnaviv_obj->base);
}
return ret;
}
static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = {

View File

@ -499,6 +499,22 @@ static void icl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
icl_load_csc_matrix(crtc_state);
}
static void skl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
{
/*
* Possibly related to display WA #1184, SKL CSC loses the latched
* CSC coeff/offset register values if the CSC registers are disarmed
* between DC5 exit and PSR exit. This will cause the plane(s) to
* output all black (until CSC_MODE is rearmed and properly latched).
* Once PSR exit (and proper register latching) has occurred the
* danger is over. Thus when PSR is enabled the CSC coeff/offset
* register programming will be peformed from skl_color_commit_arm()
* which is called after PSR exit.
*/
if (!crtc_state->has_psr)
ilk_load_csc_matrix(crtc_state);
}
static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state)
{
ilk_load_csc_matrix(crtc_state);
@ -541,6 +557,9 @@ static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state)
enum pipe pipe = crtc->pipe;
u32 val = 0;
if (crtc_state->has_psr)
ilk_load_csc_matrix(crtc_state);
/*
* We don't (yet) allow userspace to control the pipe background color,
* so force it to black, but apply pipe gamma and CSC appropriately
@ -2171,7 +2190,7 @@ static const struct intel_color_funcs icl_color_funcs = {
static const struct intel_color_funcs glk_color_funcs = {
.color_check = glk_color_check,
.color_commit_noarm = ilk_color_commit_noarm,
.color_commit_noarm = skl_color_commit_noarm,
.color_commit_arm = skl_color_commit_arm,
.load_luts = glk_load_luts,
.read_luts = glk_read_luts,
@ -2179,7 +2198,7 @@ static const struct intel_color_funcs glk_color_funcs = {
static const struct intel_color_funcs skl_color_funcs = {
.color_check = ivb_color_check,
.color_commit_noarm = ilk_color_commit_noarm,
.color_commit_noarm = skl_color_commit_noarm,
.color_commit_arm = skl_color_commit_arm,
.load_luts = bdw_load_luts,
.read_luts = NULL,

View File

@ -7124,6 +7124,8 @@ static void intel_update_crtc(struct intel_atomic_state *state,
intel_fbc_update(state, crtc);
drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF));
if (!modeset &&
(new_crtc_state->uapi.color_mgmt_changed ||
new_crtc_state->update_pipe))
@ -7500,8 +7502,28 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
drm_atomic_helper_wait_for_dependencies(&state->base);
drm_dp_mst_atomic_wait_for_dependencies(&state->base);
if (state->modeset)
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
/*
* During full modesets we write a lot of registers, wait
* for PLLs, etc. Doing that while DC states are enabled
* is not a good idea.
*
* During fastsets and other updates we also need to
* disable DC states due to the following scenario:
* 1. DC5 exit and PSR exit happen
* 2. Some or all _noarm() registers are written
* 3. Due to some long delay PSR is re-entered
* 4. DC5 entry -> DMC saves the already written new
* _noarm() registers and the old not yet written
* _arm() registers
* 5. DC5 exit -> DMC restores a mixture of old and
* new register values and arms the update
* 6. PSR exit -> hardware latches a mixture of old and
* new register values -> corrupted frame, or worse
* 7. New _arm() registers are finally written
* 8. Hardware finally latches a complete set of new
* register values, and subsequent frames will be OK again
*/
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF);
intel_atomic_prepare_plane_clear_colors(state);
@ -7640,8 +7662,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* the culprit.
*/
intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
}
intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, wakeref);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
/*

View File

@ -300,6 +300,7 @@ intel_dpt_create(struct intel_framebuffer *fb)
vm->pte_encode = gen8_ggtt_pte_encode;
dpt->obj = dpt_obj;
dpt->obj->is_dpt = true;
return &dpt->vm;
}
@ -308,5 +309,6 @@ void intel_dpt_destroy(struct i915_address_space *vm)
{
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
dpt->obj->is_dpt = false;
i915_vm_put(&dpt->vm);
}

View File

@ -440,9 +440,9 @@ static bool icl_tc_phy_is_owned(struct intel_digital_port *dig_port)
PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
if (val == 0xffffffff) {
drm_dbg_kms(&i915->drm,
"Port %s: PHY in TCCOLD, assume safe mode\n",
"Port %s: PHY in TCCOLD, assume not owned\n",
dig_port->tc_port_name);
return true;
return false;
}
return val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);

View File

@ -127,7 +127,8 @@ i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
memcpy(map, data, size);
i915_gem_object_unpin_map(obj);
i915_gem_object_flush_map(obj);
__i915_gem_object_release_map(obj);
return obj;
}

View File

@ -319,7 +319,7 @@ i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
static inline bool
i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
{
return READ_ONCE(obj->frontbuffer);
return READ_ONCE(obj->frontbuffer) || obj->is_dpt;
}
static inline unsigned int

View File

@ -491,6 +491,9 @@ struct drm_i915_gem_object {
*/
unsigned int cache_dirty:1;
/* @is_dpt: Object houses a display page table (DPT) */
unsigned int is_dpt:1;
/**
* @read_domains: Read memory domains.
*

View File

@ -356,7 +356,7 @@ static const struct dpu_caps sc8180x_dpu_caps = {
static const struct dpu_caps sm8250_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
.qseed_type = DPU_SSPP_SCALER_QSEED3LITE,
.qseed_type = DPU_SSPP_SCALER_QSEED4,
.smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
.ubwc_version = DPU_HW_UBWC_VER_40,
.has_src_split = true,
@ -855,22 +855,22 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
};
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 =
_VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3LITE);
_VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 =
_VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3LITE);
_VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 =
_VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3LITE);
_VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 =
_VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3LITE);
_VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED4);
static const struct dpu_sspp_cfg sm8250_sspp[] = {
SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SM8250_MASK,
SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
sm8250_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SM8250_MASK,
SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SC7180_MASK,
sm8250_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SM8250_MASK,
SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SC7180_MASK,
sm8250_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SM8250_MASK,
SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SC7180_MASK,
sm8250_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
@ -1180,6 +1180,13 @@ static const struct dpu_pingpong_cfg sm8150_pp[] = {
-1),
};
static const struct dpu_pingpong_cfg sc7280_pp[] = {
PP_BLK("pingpong_0", PINGPONG_0, 0x69000, 0, sc7280_pp_sblk, -1, -1),
PP_BLK("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk, -1, -1),
PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1),
PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1),
};
static struct dpu_pingpong_cfg qcm2290_pp[] = {
PP_BLK("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
@ -1203,13 +1210,6 @@ static const struct dpu_merge_3d_cfg sm8150_merge_3d[] = {
MERGE_3D_BLK("merge_3d_2", MERGE_3D_2, 0x83200),
};
static const struct dpu_pingpong_cfg sc7280_pp[] = {
PP_BLK("pingpong_0", PINGPONG_0, 0x59000, 0, sc7280_pp_sblk, -1, -1),
PP_BLK("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk, -1, -1),
PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1),
PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1),
};
/*************************************************************
* DSC sub blocks config
*************************************************************/

View File

@ -14,7 +14,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/qcom-geni-se.h>
#include <linux/soc/qcom/geni-se.h>
#include <linux/spinlock.h>
#define SE_I2C_TX_TRANS_LEN 0x26c

View File

@ -168,7 +168,13 @@ static __cpuidle int intel_idle_irq(struct cpuidle_device *dev,
raw_local_irq_enable();
ret = __intel_idle(dev, drv, index);
raw_local_irq_disable();
/*
* The lockdep hardirqs state may be changed to 'on' with timer
* tick interrupt followed by __do_softirq(). Use local_irq_disable()
* to keep the hardirqs state correct.
*/
local_irq_disable();
return ret;
}

View File

@ -779,9 +779,6 @@ static void xpad_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *d
input_report_key(dev, BTN_C, data[8]);
input_report_key(dev, BTN_Z, data[9]);
/* Profile button has a value of 0-3, so it is reported as an axis */
if (xpad->mapping & MAP_PROFILE_BUTTON)
input_report_abs(dev, ABS_PROFILE, data[34]);
input_sync(dev);
}
@ -1059,6 +1056,10 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char
(__u16) le16_to_cpup((__le16 *)(data + 8)));
}
/* Profile button has a value of 0-3, so it is reported as an axis */
if (xpad->mapping & MAP_PROFILE_BUTTON)
input_report_abs(dev, ABS_PROFILE, data[34]);
/* paddle handling */
/* based on SDL's SDL_hidapi_xboxone.c */
if (xpad->mapping & MAP_PADDLES) {

View File

@ -852,8 +852,8 @@ static void alps_process_packet_v6(struct psmouse *psmouse)
x = y = z = 0;
/* Divide 4 since trackpoint's speed is too fast */
input_report_rel(dev2, REL_X, (char)x / 4);
input_report_rel(dev2, REL_Y, -((char)y / 4));
input_report_rel(dev2, REL_X, (s8)x / 4);
input_report_rel(dev2, REL_Y, -((s8)y / 4));
psmouse_report_standard_buttons(dev2, packet[3]);
@ -1104,8 +1104,8 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse)
((packet[3] & 0x20) << 1);
z = (packet[5] & 0x3f) | ((packet[3] & 0x80) >> 1);
input_report_rel(dev2, REL_X, (char)x);
input_report_rel(dev2, REL_Y, -((char)y));
input_report_rel(dev2, REL_X, (s8)x);
input_report_rel(dev2, REL_Y, -((s8)y));
input_report_abs(dev2, ABS_PRESSURE, z);
psmouse_report_standard_buttons(dev2, packet[1]);
@ -2294,20 +2294,20 @@ static int alps_get_v3_v7_resolution(struct psmouse *psmouse, int reg_pitch)
if (reg < 0)
return reg;
x_pitch = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */
x_pitch = (s8)(reg << 4) >> 4; /* sign extend lower 4 bits */
x_pitch = 50 + 2 * x_pitch; /* In 0.1 mm units */
y_pitch = (char)reg >> 4; /* sign extend upper 4 bits */
y_pitch = (s8)reg >> 4; /* sign extend upper 4 bits */
y_pitch = 36 + 2 * y_pitch; /* In 0.1 mm units */
reg = alps_command_mode_read_reg(psmouse, reg_pitch + 1);
if (reg < 0)
return reg;
x_electrode = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */
x_electrode = (s8)(reg << 4) >> 4; /* sign extend lower 4 bits */
x_electrode = 17 + x_electrode;
y_electrode = (char)reg >> 4; /* sign extend upper 4 bits */
y_electrode = (s8)reg >> 4; /* sign extend upper 4 bits */
y_electrode = 13 + y_electrode;
x_phys = x_pitch * (x_electrode - 1); /* In 0.1 mm units */

View File

@ -202,8 +202,8 @@ static void focaltech_process_rel_packet(struct psmouse *psmouse,
state->pressed = packet[0] >> 7;
finger1 = ((packet[0] >> 4) & 0x7) - 1;
if (finger1 < FOC_MAX_FINGERS) {
state->fingers[finger1].x += (char)packet[1];
state->fingers[finger1].y += (char)packet[2];
state->fingers[finger1].x += (s8)packet[1];
state->fingers[finger1].y += (s8)packet[2];
} else {
psmouse_err(psmouse, "First finger in rel packet invalid: %d\n",
finger1);
@ -218,8 +218,8 @@ static void focaltech_process_rel_packet(struct psmouse *psmouse,
*/
finger2 = ((packet[3] >> 4) & 0x7) - 1;
if (finger2 < FOC_MAX_FINGERS) {
state->fingers[finger2].x += (char)packet[4];
state->fingers[finger2].y += (char)packet[5];
state->fingers[finger2].x += (s8)packet[4];
state->fingers[finger2].y += (s8)packet[5];
}
}

View File

@ -610,6 +610,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
},
.driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
/* Fujitsu Lifebook A574/H */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
DMI_MATCH(DMI_PRODUCT_NAME, "FMVA0501PZ"),
},
.driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
/* Gigabyte M912 */
.matches = {
@ -1116,6 +1124,20 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
/*
* Setting SERIO_QUIRK_NOMUX or SERIO_QUIRK_RESET_ALWAYS makes
* the keyboard very laggy for ~5 seconds after boot and
* sometimes also after resume.
* However both are required for the keyboard to not fail
* completely sometimes after boot or resume.
*/
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "N150CU"),
},
.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
@ -1123,6 +1145,20 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
/*
* Setting SERIO_QUIRK_NOMUX or SERIO_QUIRK_RESET_ALWAYS makes
* the keyboard very laggy for ~5 seconds after boot and
* sometimes also after resume.
* However both are required for the keyboard to not fail
* completely sometimes after boot or resume.
*/
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NHxxRZQ"),
},
.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),

View File

@ -124,10 +124,18 @@ static const unsigned long goodix_irq_flags[] = {
static const struct dmi_system_id nine_bytes_report[] = {
#if defined(CONFIG_DMI) && defined(CONFIG_X86)
{
.ident = "Lenovo YogaBook",
/* YB1-X91L/F and YB1-X90L/F */
/* Lenovo Yoga Book X90F / X90L */
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9")
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
}
},
{
/* Lenovo Yoga Book X91F / X91L */
.matches = {
/* Non exact match to match F + L versions */
DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
}
},
#endif

View File

@ -29,7 +29,7 @@ obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o
obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o
obj-$(CONFIG_IOMMU_SVA) += iommu-sva-lib.o io-pgfault.o
obj-$(CONFIG_IOMMU_SVA) += iommu-sva.o io-pgfault.o
obj-$(CONFIG_SPRD_IOMMU) += sprd-iommu.o
obj-$(CONFIG_QCOM_IOMMU_UTIL) += qcom_iommu_util.o
qcom_iommu_util-y += qcom-iommu-util.o

View File

@ -10,7 +10,7 @@
#include <linux/slab.h>
#include "arm-smmu-v3.h"
#include "../../iommu-sva-lib.h"
#include "../../iommu-sva.h"
#include "../../io-pgtable-arm.h"
struct arm_smmu_mmu_notifier {
@ -344,11 +344,6 @@ __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
if (!bond)
return ERR_PTR(-ENOMEM);
/* Allocate a PASID for this mm if necessary */
ret = iommu_sva_alloc_pasid(mm, 1, (1U << master->ssid_bits) - 1);
if (ret)
goto err_free_bond;
bond->mm = mm;
bond->sva.dev = dev;
refcount_set(&bond->refs, 1);
@ -367,42 +362,6 @@ __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
return ERR_PTR(ret);
}
struct iommu_sva *
arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
{
struct iommu_sva *handle;
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1)
return ERR_PTR(-EINVAL);
mutex_lock(&sva_lock);
handle = __arm_smmu_sva_bind(dev, mm);
mutex_unlock(&sva_lock);
return handle;
}
void arm_smmu_sva_unbind(struct iommu_sva *handle)
{
struct arm_smmu_bond *bond = sva_to_bond(handle);
mutex_lock(&sva_lock);
if (refcount_dec_and_test(&bond->refs)) {
list_del(&bond->list);
arm_smmu_mmu_notifier_put(bond->smmu_mn);
kfree(bond);
}
mutex_unlock(&sva_lock);
}
u32 arm_smmu_sva_get_pasid(struct iommu_sva *handle)
{
struct arm_smmu_bond *bond = sva_to_bond(handle);
return bond->mm->pasid;
}
bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
{
unsigned long reg, fld;
@ -550,3 +509,64 @@ void arm_smmu_sva_notifier_synchronize(void)
*/
mmu_notifier_synchronize();
}
void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t id)
{
struct mm_struct *mm = domain->mm;
struct arm_smmu_bond *bond = NULL, *t;
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
mutex_lock(&sva_lock);
list_for_each_entry(t, &master->bonds, list) {
if (t->mm == mm) {
bond = t;
break;
}
}
if (!WARN_ON(!bond) && refcount_dec_and_test(&bond->refs)) {
list_del(&bond->list);
arm_smmu_mmu_notifier_put(bond->smmu_mn);
kfree(bond);
}
mutex_unlock(&sva_lock);
}
static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t id)
{
int ret = 0;
struct iommu_sva *handle;
struct mm_struct *mm = domain->mm;
mutex_lock(&sva_lock);
handle = __arm_smmu_sva_bind(dev, mm);
if (IS_ERR(handle))
ret = PTR_ERR(handle);
mutex_unlock(&sva_lock);
return ret;
}
static void arm_smmu_sva_domain_free(struct iommu_domain *domain)
{
kfree(domain);
}
static const struct iommu_domain_ops arm_smmu_sva_domain_ops = {
.set_dev_pasid = arm_smmu_sva_set_dev_pasid,
.free = arm_smmu_sva_domain_free
};
struct iommu_domain *arm_smmu_sva_domain_alloc(void)
{
struct iommu_domain *domain;
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return NULL;
domain->ops = &arm_smmu_sva_domain_ops;
return domain;
}

View File

@ -29,7 +29,7 @@
#include "arm-smmu-v3.h"
#include "../../dma-iommu.h"
#include "../../iommu-sva-lib.h"
#include "../../iommu-sva.h"
static bool disable_bypass = true;
module_param(disable_bypass, bool, 0444);
@ -2009,6 +2009,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
struct arm_smmu_domain *smmu_domain;
if (type == IOMMU_DOMAIN_SVA)
return arm_smmu_sva_domain_alloc();
if (type != IOMMU_DOMAIN_UNMANAGED &&
type != IOMMU_DOMAIN_DMA &&
type != IOMMU_DOMAIN_DMA_FQ &&
@ -2838,6 +2841,17 @@ static int arm_smmu_def_domain_type(struct device *dev)
return 0;
}
static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
{
struct iommu_domain *domain;
domain = iommu_get_domain_for_dev_pasid(dev, pasid, IOMMU_DOMAIN_SVA);
if (WARN_ON(IS_ERR(domain)) || !domain)
return;
arm_smmu_sva_remove_dev_pasid(domain, dev, pasid);
}
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
@ -2846,11 +2860,9 @@ static struct iommu_ops arm_smmu_ops = {
.device_group = arm_smmu_device_group,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
.remove_dev_pasid = arm_smmu_remove_dev_pasid,
.dev_enable_feat = arm_smmu_dev_enable_feature,
.dev_disable_feat = arm_smmu_dev_disable_feature,
.sva_bind = arm_smmu_sva_bind,
.sva_unbind = arm_smmu_sva_unbind,
.sva_get_pasid = arm_smmu_sva_get_pasid,
.page_response = arm_smmu_page_response,
.def_domain_type = arm_smmu_def_domain_type,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
@ -3543,6 +3555,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
/* SID/SSID sizes */
smmu->ssid_bits = FIELD_GET(IDR1_SSIDSIZE, reg);
smmu->sid_bits = FIELD_GET(IDR1_SIDSIZE, reg);
smmu->iommu.max_pasids = 1UL << smmu->ssid_bits;
/*
* If the SMMU supports fewer bits than would fill a single L2 stream

View File

@ -754,11 +754,10 @@ bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master);
int arm_smmu_master_enable_sva(struct arm_smmu_master *master);
int arm_smmu_master_disable_sva(struct arm_smmu_master *master);
bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master);
struct iommu_sva *arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm,
void *drvdata);
void arm_smmu_sva_unbind(struct iommu_sva *handle);
u32 arm_smmu_sva_get_pasid(struct iommu_sva *handle);
void arm_smmu_sva_notifier_synchronize(void);
struct iommu_domain *arm_smmu_sva_domain_alloc(void);
void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t id);
#else /* CONFIG_ARM_SMMU_V3_SVA */
static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
{
@ -790,19 +789,17 @@ static inline bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master
return false;
}
static inline struct iommu_sva *
arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
{
return ERR_PTR(-ENODEV);
}
static inline void arm_smmu_sva_unbind(struct iommu_sva *handle) {}
static inline u32 arm_smmu_sva_get_pasid(struct iommu_sva *handle)
{
return IOMMU_PASID_INVALID;
}
static inline void arm_smmu_sva_notifier_synchronize(void) {}
static inline struct iommu_domain *arm_smmu_sva_domain_alloc(void)
{
return NULL;
}
static inline void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
struct device *dev,
ioasid_t id)
{
}
#endif /* CONFIG_ARM_SMMU_V3_SVA */
#endif /* _ARM_SMMU_V3_H */

View File

@ -1057,7 +1057,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
}
err = -EINVAL;
if (cap_sagaw(iommu->cap) == 0) {
if (!cap_sagaw(iommu->cap) &&
(!ecap_smts(iommu->ecap) || ecap_slts(iommu->ecap))) {
pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
iommu->name);
drhd->ignored = 1;
@ -1105,6 +1106,13 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
raw_spin_lock_init(&iommu->register_lock);
/*
* A value of N in PSS field of eCap register indicates hardware
* supports PASID field of N+1 bits.
*/
if (pasid_supported(iommu))
iommu->iommu.max_pasids = 2UL << ecap_pss(iommu->ecap);
/*
* This is only for hotplug; at boot time intel_iommu_enabled won't
* be set yet. When intel_iommu_init() runs, it registers the units

View File

@ -27,7 +27,7 @@
#include "iommu.h"
#include "../dma-iommu.h"
#include "../irq_remapping.h"
#include "../iommu-sva-lib.h"
#include "../iommu-sva.h"
#include "pasid.h"
#include "cap_audit.h"
@ -4189,6 +4189,8 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
return domain;
case IOMMU_DOMAIN_IDENTITY:
return &si_domain->domain;
case IOMMU_DOMAIN_SVA:
return intel_svm_domain_alloc();
default:
return NULL;
}
@ -4746,6 +4748,28 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
__mapping_notify_one(info->iommu, dmar_domain, pfn, pages);
}
static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
{
struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
struct iommu_domain *domain;
/* Domain type specific cleanup: */
domain = iommu_get_domain_for_dev_pasid(dev, pasid, 0);
if (domain) {
switch (domain->type) {
case IOMMU_DOMAIN_SVA:
intel_svm_remove_dev_pasid(dev, pasid);
break;
default:
/* should never reach here */
WARN_ON(1);
break;
}
}
intel_pasid_tear_down_entry(iommu, dev, pasid, false);
}
const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
.domain_alloc = intel_iommu_domain_alloc,
@ -4758,11 +4782,9 @@ const struct iommu_ops intel_iommu_ops = {
.dev_disable_feat = intel_iommu_dev_disable_feat,
.is_attach_deferred = intel_iommu_is_attach_deferred,
.def_domain_type = device_def_domain_type,
.remove_dev_pasid = intel_iommu_remove_dev_pasid,
.pgsize_bitmap = SZ_4K,
#ifdef CONFIG_INTEL_IOMMU_SVM
.sva_bind = intel_svm_bind,
.sva_unbind = intel_svm_unbind,
.sva_get_pasid = intel_svm_get_pasid,
.page_response = intel_svm_page_response,
#endif
.default_domain_ops = &(const struct iommu_domain_ops) {

View File

@ -480,8 +480,6 @@ enum {
#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
#define VTD_FLAG_SVM_CAPABLE (1 << 2)
extern int intel_iommu_sm;
#define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap))
#define pasid_supported(iommu) (sm_supported(iommu) && \
ecap_pasid((iommu)->ecap))
@ -754,12 +752,10 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
extern void intel_svm_check(struct intel_iommu *iommu);
extern int intel_svm_enable_prq(struct intel_iommu *iommu);
extern int intel_svm_finish_prq(struct intel_iommu *iommu);
struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm,
void *drvdata);
void intel_svm_unbind(struct iommu_sva *handle);
u32 intel_svm_get_pasid(struct iommu_sva *handle);
int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt,
struct iommu_page_response *msg);
struct iommu_domain *intel_svm_domain_alloc(void);
void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid);
struct intel_svm_dev {
struct list_head list;
@ -784,6 +780,14 @@ struct intel_svm {
};
#else
static inline void intel_svm_check(struct intel_iommu *iommu) {}
static inline struct iommu_domain *intel_svm_domain_alloc(void)
{
return NULL;
}
static inline void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid)
{
}
#endif
#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
@ -799,6 +803,7 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
extern const struct iommu_ops intel_iommu_ops;
#ifdef CONFIG_INTEL_IOMMU
extern int intel_iommu_sm;
extern int iommu_calculate_agaw(struct intel_iommu *iommu);
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
extern int dmar_disabled;
@ -814,6 +819,7 @@ static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
}
#define dmar_disabled (1)
#define intel_iommu_enabled (0)
#define intel_iommu_sm (0)
#endif
static inline const char *decode_prq_descriptor(char *str, size_t size,

View File

@ -24,7 +24,7 @@
#include "iommu.h"
#include "pasid.h"
#include "perf.h"
#include "../iommu-sva-lib.h"
#include "../iommu-sva.h"
#include "trace.h"
static irqreturn_t prq_event_thread(int irq, void *d);
@ -299,19 +299,9 @@ static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid,
return 0;
}
static int intel_svm_alloc_pasid(struct device *dev, struct mm_struct *mm,
unsigned int flags)
{
ioasid_t max_pasid = dev_is_pci(dev) ?
pci_max_pasids(to_pci_dev(dev)) : intel_pasid_max_id;
return iommu_sva_alloc_pasid(mm, PASID_MIN, max_pasid - 1);
}
static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
struct device *dev,
struct mm_struct *mm,
unsigned int flags)
struct mm_struct *mm)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_svm_dev *sdev;
@ -327,22 +317,18 @@ static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
svm->pasid = mm->pasid;
svm->mm = mm;
svm->flags = flags;
INIT_LIST_HEAD_RCU(&svm->devs);
if (!(flags & SVM_FLAG_SUPERVISOR_MODE)) {
svm->notifier.ops = &intel_mmuops;
ret = mmu_notifier_register(&svm->notifier, mm);
if (ret) {
kfree(svm);
return ERR_PTR(ret);
}
svm->notifier.ops = &intel_mmuops;
ret = mmu_notifier_register(&svm->notifier, mm);
if (ret) {
kfree(svm);
return ERR_PTR(ret);
}
ret = pasid_private_add(svm->pasid, svm);
if (ret) {
if (svm->notifier.ops)
mmu_notifier_unregister(&svm->notifier, mm);
mmu_notifier_unregister(&svm->notifier, mm);
kfree(svm);
return ERR_PTR(ret);
}
@ -377,9 +363,7 @@ static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
}
/* Setup the pasid table: */
sflags = (flags & SVM_FLAG_SUPERVISOR_MODE) ?
PASID_FLAG_SUPERVISOR_MODE : 0;
sflags |= cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0;
sflags = cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0;
ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, mm->pasid,
FLPT_DEFAULT_DID, sflags);
if (ret)
@ -393,8 +377,7 @@ static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
kfree(sdev);
free_svm:
if (list_empty(&svm->devs)) {
if (svm->notifier.ops)
mmu_notifier_unregister(&svm->notifier, mm);
mmu_notifier_unregister(&svm->notifier, mm);
pasid_private_remove(mm->pasid);
kfree(svm);
}
@ -787,67 +770,6 @@ static irqreturn_t prq_event_thread(int irq, void *d)
return IRQ_RETVAL(handled);
}
struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
{
struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
unsigned int flags = 0;
struct iommu_sva *sva;
int ret;
if (drvdata)
flags = *(unsigned int *)drvdata;
if (flags & SVM_FLAG_SUPERVISOR_MODE) {
if (!ecap_srs(iommu->ecap)) {
dev_err(dev, "%s: Supervisor PASID not supported\n",
iommu->name);
return ERR_PTR(-EOPNOTSUPP);
}
if (mm) {
dev_err(dev, "%s: Supervisor PASID with user provided mm\n",
iommu->name);
return ERR_PTR(-EINVAL);
}
mm = &init_mm;
}
mutex_lock(&pasid_mutex);
ret = intel_svm_alloc_pasid(dev, mm, flags);
if (ret) {
mutex_unlock(&pasid_mutex);
return ERR_PTR(ret);
}
sva = intel_svm_bind_mm(iommu, dev, mm, flags);
mutex_unlock(&pasid_mutex);
return sva;
}
void intel_svm_unbind(struct iommu_sva *sva)
{
struct intel_svm_dev *sdev = to_intel_svm_dev(sva);
mutex_lock(&pasid_mutex);
intel_svm_unbind_mm(sdev->dev, sdev->pasid);
mutex_unlock(&pasid_mutex);
}
u32 intel_svm_get_pasid(struct iommu_sva *sva)
{
struct intel_svm_dev *sdev;
u32 pasid;
mutex_lock(&pasid_mutex);
sdev = to_intel_svm_dev(sva);
pasid = sdev->pasid;
mutex_unlock(&pasid_mutex);
return pasid;
}
int intel_svm_page_response(struct device *dev,
struct iommu_fault_event *evt,
struct iommu_page_response *msg)
@ -918,3 +840,50 @@ int intel_svm_page_response(struct device *dev,
out:
return ret;
}
void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid)
{
mutex_lock(&pasid_mutex);
intel_svm_unbind_mm(dev, pasid);
mutex_unlock(&pasid_mutex);
}
static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
struct mm_struct *mm = domain->mm;
struct iommu_sva *sva;
int ret = 0;
mutex_lock(&pasid_mutex);
sva = intel_svm_bind_mm(iommu, dev, mm);
if (IS_ERR(sva))
ret = PTR_ERR(sva);
mutex_unlock(&pasid_mutex);
return ret;
}
static void intel_svm_domain_free(struct iommu_domain *domain)
{
kfree(to_dmar_domain(domain));
}
static const struct iommu_domain_ops intel_svm_domain_ops = {
.set_dev_pasid = intel_svm_set_dev_pasid,
.free = intel_svm_domain_free
};
struct iommu_domain *intel_svm_domain_alloc(void)
{
struct dmar_domain *domain;
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return NULL;
domain->domain.ops = &intel_svm_domain_ops;
return &domain->domain;
}

View File

@ -11,7 +11,7 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
#include "iommu-sva-lib.h"
#include "iommu-sva.h"
/**
* struct iopf_queue - IO Page Fault queue
@ -69,69 +69,18 @@ static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf,
return iommu_page_response(dev, &resp);
}
static enum iommu_page_response_code
iopf_handle_single(struct iopf_fault *iopf)
{
vm_fault_t ret;
struct mm_struct *mm;
struct vm_area_struct *vma;
unsigned int access_flags = 0;
unsigned int fault_flags = FAULT_FLAG_REMOTE;
struct iommu_fault_page_request *prm = &iopf->fault.prm;
enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID;
if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID))
return status;
mm = iommu_sva_find(prm->pasid);
if (IS_ERR_OR_NULL(mm))
return status;
mmap_read_lock(mm);
vma = find_extend_vma(mm, prm->addr);
if (!vma)
/* Unmapped area */
goto out_put_mm;
if (prm->perm & IOMMU_FAULT_PERM_READ)
access_flags |= VM_READ;
if (prm->perm & IOMMU_FAULT_PERM_WRITE) {
access_flags |= VM_WRITE;
fault_flags |= FAULT_FLAG_WRITE;
}
if (prm->perm & IOMMU_FAULT_PERM_EXEC) {
access_flags |= VM_EXEC;
fault_flags |= FAULT_FLAG_INSTRUCTION;
}
if (!(prm->perm & IOMMU_FAULT_PERM_PRIV))
fault_flags |= FAULT_FLAG_USER;
if (access_flags & ~vma->vm_flags)
/* Access fault */
goto out_put_mm;
ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL);
status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID :
IOMMU_PAGE_RESP_SUCCESS;
out_put_mm:
mmap_read_unlock(mm);
mmput(mm);
return status;
}
static void iopf_handle_group(struct work_struct *work)
static void iopf_handler(struct work_struct *work)
{
struct iopf_group *group;
struct iommu_domain *domain;
struct iopf_fault *iopf, *next;
enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
group = container_of(work, struct iopf_group, work);
domain = iommu_get_domain_for_dev_pasid(group->dev,
group->last_fault.fault.prm.pasid, 0);
if (!domain || !domain->iopf_handler)
status = IOMMU_PAGE_RESP_INVALID;
list_for_each_entry_safe(iopf, next, &group->faults, list) {
/*
@ -139,7 +88,8 @@ static void iopf_handle_group(struct work_struct *work)
* faults in the group if there is an error.
*/
if (status == IOMMU_PAGE_RESP_SUCCESS)
status = iopf_handle_single(iopf);
status = domain->iopf_handler(&iopf->fault,
domain->fault_data);
if (!(iopf->fault.prm.flags &
IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE))
@ -181,6 +131,13 @@ static void iopf_handle_group(struct work_struct *work)
* request completes, outstanding faults will have been dealt with by the time
* the PASID is freed.
*
* Any valid page fault will be eventually routed to an iommu domain and the
* page fault handler installed there will get called. The users of this
* handling framework should guarantee that the iommu domain could only be
* freed after the device has stopped generating page faults (or the iommu
* hardware has been set to block the page faults) and the pending page faults
* have been flushed.
*
* Return: 0 on success and <0 on error.
*/
int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
@ -235,7 +192,7 @@ int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
group->last_fault.fault = *fault;
INIT_LIST_HEAD(&group->faults);
list_add(&group->last_fault.list, &group->faults);
INIT_WORK(&group->work, iopf_handle_group);
INIT_WORK(&group->work, iopf_handler);
/* See if we have partial faults for this group */
list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {

View File

@ -1,71 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Helpers for IOMMU drivers implementing SVA
*/
#include <linux/mutex.h>
#include <linux/sched/mm.h>
#include "iommu-sva-lib.h"
static DEFINE_MUTEX(iommu_sva_lock);
static DECLARE_IOASID_SET(iommu_sva_pasid);
/**
* iommu_sva_alloc_pasid - Allocate a PASID for the mm
* @mm: the mm
* @min: minimum PASID value (inclusive)
* @max: maximum PASID value (inclusive)
*
* Try to allocate a PASID for this mm, or take a reference to the existing one
* provided it fits within the [@min, @max] range. On success the PASID is
* available in mm->pasid and will be available for the lifetime of the mm.
*
* Returns 0 on success and < 0 on error.
*/
int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
{
int ret = 0;
ioasid_t pasid;
if (min == INVALID_IOASID || max == INVALID_IOASID ||
min == 0 || max < min)
return -EINVAL;
mutex_lock(&iommu_sva_lock);
/* Is a PASID already associated with this mm? */
if (pasid_valid(mm->pasid)) {
if (mm->pasid < min || mm->pasid >= max)
ret = -EOVERFLOW;
goto out;
}
pasid = ioasid_alloc(&iommu_sva_pasid, min, max, mm);
if (!pasid_valid(pasid))
ret = -ENOMEM;
else
mm_pasid_set(mm, pasid);
out:
mutex_unlock(&iommu_sva_lock);
return ret;
}
EXPORT_SYMBOL_GPL(iommu_sva_alloc_pasid);
/* ioasid_find getter() requires a void * argument */
static bool __mmget_not_zero(void *mm)
{
return mmget_not_zero(mm);
}
/**
* iommu_sva_find() - Find mm associated to the given PASID
* @pasid: Process Address Space ID assigned to the mm
*
* On success a reference to the mm is taken, and must be released with mmput().
*
* Returns the mm corresponding to this PASID, or an error if not found.
*/
struct mm_struct *iommu_sva_find(ioasid_t pasid)
{
return ioasid_find(&iommu_sva_pasid, pasid, __mmget_not_zero);
}
EXPORT_SYMBOL_GPL(iommu_sva_find);

240
drivers/iommu/iommu-sva.c Normal file
View File

@ -0,0 +1,240 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Helpers for IOMMU drivers implementing SVA
*/
#include <linux/mutex.h>
#include <linux/sched/mm.h>
#include <linux/iommu.h>
#include "iommu-sva.h"
static DEFINE_MUTEX(iommu_sva_lock);
static DECLARE_IOASID_SET(iommu_sva_pasid);
/**
* iommu_sva_alloc_pasid - Allocate a PASID for the mm
* @mm: the mm
* @min: minimum PASID value (inclusive)
* @max: maximum PASID value (inclusive)
*
* Try to allocate a PASID for this mm, or take a reference to the existing one
* provided it fits within the [@min, @max] range. On success the PASID is
* available in mm->pasid and will be available for the lifetime of the mm.
*
* Returns 0 on success and < 0 on error.
*/
int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
{
int ret = 0;
ioasid_t pasid;
if (min == INVALID_IOASID || max == INVALID_IOASID ||
min == 0 || max < min)
return -EINVAL;
mutex_lock(&iommu_sva_lock);
/* Is a PASID already associated with this mm? */
if (pasid_valid(mm->pasid)) {
if (mm->pasid < min || mm->pasid >= max)
ret = -EOVERFLOW;
goto out;
}
pasid = ioasid_alloc(&iommu_sva_pasid, min, max, mm);
if (!pasid_valid(pasid))
ret = -ENOMEM;
else
mm_pasid_set(mm, pasid);
out:
mutex_unlock(&iommu_sva_lock);
return ret;
}
EXPORT_SYMBOL_GPL(iommu_sva_alloc_pasid);
/* ioasid_find getter() requires a void * argument */
static bool __mmget_not_zero(void *mm)
{
return mmget_not_zero(mm);
}
/**
* iommu_sva_find() - Find mm associated to the given PASID
* @pasid: Process Address Space ID assigned to the mm
*
* On success a reference to the mm is taken, and must be released with mmput().
*
* Returns the mm corresponding to this PASID, or an error if not found.
*/
struct mm_struct *iommu_sva_find(ioasid_t pasid)
{
return ioasid_find(&iommu_sva_pasid, pasid, __mmget_not_zero);
}
EXPORT_SYMBOL_GPL(iommu_sva_find);
/**
* iommu_sva_bind_device() - Bind a process address space to a device
* @dev: the device
* @mm: the mm to bind, caller must hold a reference to mm_users
*
* Create a bond between device and address space, allowing the device to
* access the mm using the PASID returned by iommu_sva_get_pasid(). If a
* bond already exists between @device and @mm, an additional internal
* reference is taken. Caller must call iommu_sva_unbind_device()
* to release each reference.
*
* iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
* initialize the required SVA features.
*
* On error, returns an ERR_PTR value.
*/
struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
{
struct iommu_domain *domain;
struct iommu_sva *handle;
ioasid_t max_pasids;
int ret;
max_pasids = dev->iommu->max_pasids;
if (!max_pasids)
return ERR_PTR(-EOPNOTSUPP);
/* Allocate mm->pasid if necessary. */
ret = iommu_sva_alloc_pasid(mm, 1, max_pasids - 1);
if (ret)
return ERR_PTR(ret);
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return ERR_PTR(-ENOMEM);
mutex_lock(&iommu_sva_lock);
/* Search for an existing domain. */
domain = iommu_get_domain_for_dev_pasid(dev, mm->pasid,
IOMMU_DOMAIN_SVA);
if (IS_ERR(domain)) {
ret = PTR_ERR(domain);
goto out_unlock;
}
if (domain) {
domain->users++;
goto out;
}
/* Allocate a new domain and set it on device pasid. */
domain = iommu_sva_domain_alloc(dev, mm);
if (!domain) {
ret = -ENOMEM;
goto out_unlock;
}
ret = iommu_attach_device_pasid(domain, dev, mm->pasid);
if (ret)
goto out_free_domain;
domain->users = 1;
out:
mutex_unlock(&iommu_sva_lock);
handle->dev = dev;
handle->domain = domain;
return handle;
out_free_domain:
iommu_domain_free(domain);
out_unlock:
mutex_unlock(&iommu_sva_lock);
kfree(handle);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
/**
* iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
* @handle: the handle returned by iommu_sva_bind_device()
*
* Put reference to a bond between device and address space. The device should
* not be issuing any more transaction for this PASID. All outstanding page
* requests for this PASID must have been flushed to the IOMMU.
*/
void iommu_sva_unbind_device(struct iommu_sva *handle)
{
struct iommu_domain *domain = handle->domain;
ioasid_t pasid = domain->mm->pasid;
struct device *dev = handle->dev;
mutex_lock(&iommu_sva_lock);
if (--domain->users == 0) {
iommu_detach_device_pasid(domain, dev, pasid);
iommu_domain_free(domain);
}
mutex_unlock(&iommu_sva_lock);
kfree(handle);
}
EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
u32 iommu_sva_get_pasid(struct iommu_sva *handle)
{
struct iommu_domain *domain = handle->domain;
return domain->mm->pasid;
}
EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
/*
* I/O page fault handler for SVA
*/
enum iommu_page_response_code
iommu_sva_handle_iopf(struct iommu_fault *fault, void *data)
{
vm_fault_t ret;
struct vm_area_struct *vma;
struct mm_struct *mm = data;
unsigned int access_flags = 0;
unsigned int fault_flags = FAULT_FLAG_REMOTE;
struct iommu_fault_page_request *prm = &fault->prm;
enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID;
if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID))
return status;
if (!mmget_not_zero(mm))
return status;
mmap_read_lock(mm);
vma = find_extend_vma(mm, prm->addr);
if (!vma)
/* Unmapped area */
goto out_put_mm;
if (prm->perm & IOMMU_FAULT_PERM_READ)
access_flags |= VM_READ;
if (prm->perm & IOMMU_FAULT_PERM_WRITE) {
access_flags |= VM_WRITE;
fault_flags |= FAULT_FLAG_WRITE;
}
if (prm->perm & IOMMU_FAULT_PERM_EXEC) {
access_flags |= VM_EXEC;
fault_flags |= FAULT_FLAG_INSTRUCTION;
}
if (!(prm->perm & IOMMU_FAULT_PERM_PRIV))
fault_flags |= FAULT_FLAG_USER;
if (access_flags & ~vma->vm_flags)
/* Access fault */
goto out_put_mm;
ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL);
status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID :
IOMMU_PAGE_RESP_SUCCESS;
out_put_mm:
mmap_read_unlock(mm);
mmput(mm);
return status;
}

View File

@ -2,8 +2,8 @@
/*
* SVA library for IOMMU drivers
*/
#ifndef _IOMMU_SVA_LIB_H
#define _IOMMU_SVA_LIB_H
#ifndef _IOMMU_SVA_H
#define _IOMMU_SVA_H
#include <linux/ioasid.h>
#include <linux/mm_types.h>
@ -26,6 +26,8 @@ int iopf_queue_flush_dev(struct device *dev);
struct iopf_queue *iopf_queue_alloc(const char *name);
void iopf_queue_free(struct iopf_queue *queue);
int iopf_queue_discard_partial(struct iopf_queue *queue);
enum iommu_page_response_code
iommu_sva_handle_iopf(struct iommu_fault *fault, void *data);
#else /* CONFIG_IOMMU_SVA */
static inline int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
@ -63,5 +65,11 @@ static inline int iopf_queue_discard_partial(struct iopf_queue *queue)
{
return -ENODEV;
}
static inline enum iommu_page_response_code
iommu_sva_handle_iopf(struct iommu_fault *fault, void *data)
{
return IOMMU_PAGE_RESP_INVALID;
}
#endif /* CONFIG_IOMMU_SVA */
#endif /* _IOMMU_SVA_LIB_H */
#endif /* _IOMMU_SVA_H */

View File

@ -21,6 +21,7 @@
#include <linux/idr.h>
#include <linux/err.h>
#include <linux/pci.h>
#include <linux/pci-ats.h>
#include <linux/bitops.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@ -28,9 +29,12 @@
#include <linux/module.h>
#include <linux/cc_platform.h>
#include <trace/events/iommu.h>
#include <linux/sched/mm.h>
#include "dma-iommu.h"
#include "iommu-sva.h"
static struct kset *iommu_group_kset;
static DEFINE_IDA(iommu_group_ida);
@ -42,6 +46,7 @@ struct iommu_group {
struct kobject kobj;
struct kobject *devices_kobj;
struct list_head devices;
struct xarray pasid_array;
struct mutex mutex;
void *iommu_data;
void (*iommu_data_release)(void *iommu_data);
@ -278,6 +283,24 @@ static void dev_iommu_free(struct device *dev)
kfree(param);
}
static u32 dev_iommu_get_max_pasids(struct device *dev)
{
u32 max_pasids = 0, bits = 0;
int ret;
if (dev_is_pci(dev)) {
ret = pci_max_pasids(to_pci_dev(dev));
if (ret > 0)
max_pasids = ret;
} else {
ret = device_property_read_u32(dev, "pasid-num-bits", &bits);
if (!ret)
max_pasids = 1UL << bits;
}
return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids);
}
static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
@ -313,6 +336,7 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
}
dev->iommu->iommu_dev = iommu_dev;
dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev);
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group)) {
@ -719,6 +743,7 @@ struct iommu_group *iommu_group_alloc(void)
mutex_init(&group->mutex);
INIT_LIST_HEAD(&group->devices);
INIT_LIST_HEAD(&group->entry);
xa_init(&group->pasid_array);
ret = ida_alloc(&iommu_group_ida, GFP_KERNEL);
if (ret < 0) {
@ -1932,6 +1957,8 @@ EXPORT_SYMBOL_GPL(iommu_domain_alloc);
void iommu_domain_free(struct iommu_domain *domain)
{
if (domain->type == IOMMU_DOMAIN_SVA)
mmdrop(domain->mm);
iommu_put_dma_cookie(domain);
domain->ops->free(domain);
}
@ -2760,98 +2787,6 @@ int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
}
EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
/**
* iommu_sva_bind_device() - Bind a process address space to a device
* @dev: the device
* @mm: the mm to bind, caller must hold a reference to it
* @drvdata: opaque data pointer to pass to bind callback
*
* Create a bond between device and address space, allowing the device to access
* the mm using the returned PASID. If a bond already exists between @device and
* @mm, it is returned and an additional reference is taken. Caller must call
* iommu_sva_unbind_device() to release each reference.
*
* iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
* initialize the required SVA features.
*
* On error, returns an ERR_PTR value.
*/
struct iommu_sva *
iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
{
struct iommu_group *group;
struct iommu_sva *handle = ERR_PTR(-EINVAL);
const struct iommu_ops *ops = dev_iommu_ops(dev);
if (!ops->sva_bind)
return ERR_PTR(-ENODEV);
group = iommu_group_get(dev);
if (!group)
return ERR_PTR(-ENODEV);
/* Ensure device count and domain don't change while we're binding */
mutex_lock(&group->mutex);
/*
* To keep things simple, SVA currently doesn't support IOMMU groups
* with more than one device. Existing SVA-capable systems are not
* affected by the problems that required IOMMU groups (lack of ACS
* isolation, device ID aliasing and other hardware issues).
*/
if (iommu_group_device_count(group) != 1)
goto out_unlock;
handle = ops->sva_bind(dev, mm, drvdata);
out_unlock:
mutex_unlock(&group->mutex);
iommu_group_put(group);
return handle;
}
EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
/**
* iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
* @handle: the handle returned by iommu_sva_bind_device()
*
* Put reference to a bond between device and address space. The device should
* not be issuing any more transaction for this PASID. All outstanding page
* requests for this PASID must have been flushed to the IOMMU.
*/
void iommu_sva_unbind_device(struct iommu_sva *handle)
{
struct iommu_group *group;
struct device *dev = handle->dev;
const struct iommu_ops *ops = dev_iommu_ops(dev);
if (!ops->sva_unbind)
return;
group = iommu_group_get(dev);
if (!group)
return;
mutex_lock(&group->mutex);
ops->sva_unbind(handle);
mutex_unlock(&group->mutex);
iommu_group_put(group);
}
EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
u32 iommu_sva_get_pasid(struct iommu_sva *handle)
{
const struct iommu_ops *ops = dev_iommu_ops(handle->dev);
if (!ops->sva_get_pasid)
return IOMMU_PASID_INVALID;
return ops->sva_get_pasid(handle);
}
EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
/*
* Changes the default domain of an iommu group that has *only* one device
*
@ -3121,7 +3056,8 @@ int iommu_device_use_default_domain(struct device *dev)
mutex_lock(&group->mutex);
if (group->owner_cnt) {
if (group->owner || !iommu_is_default_domain(group)) {
if (group->owner || !iommu_is_default_domain(group) ||
!xa_empty(&group->pasid_array)) {
ret = -EBUSY;
goto unlock_out;
}
@ -3152,7 +3088,7 @@ void iommu_device_unuse_default_domain(struct device *dev)
return;
mutex_lock(&group->mutex);
if (!WARN_ON(!group->owner_cnt))
if (!WARN_ON(!group->owner_cnt || !xa_empty(&group->pasid_array)))
group->owner_cnt--;
mutex_unlock(&group->mutex);
@ -3200,7 +3136,8 @@ int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
ret = -EPERM;
goto unlock_out;
} else {
if (group->domain && group->domain != group->default_domain) {
if ((group->domain && group->domain != group->default_domain) ||
!xa_empty(&group->pasid_array)) {
ret = -EBUSY;
goto unlock_out;
}
@ -3234,7 +3171,8 @@ void iommu_group_release_dma_owner(struct iommu_group *group)
int ret;
mutex_lock(&group->mutex);
if (WARN_ON(!group->owner_cnt || !group->owner))
if (WARN_ON(!group->owner_cnt || !group->owner ||
!xa_empty(&group->pasid_array)))
goto unlock_out;
group->owner_cnt = 0;
@ -3265,3 +3203,150 @@ bool iommu_group_dma_owner_claimed(struct iommu_group *group)
return user;
}
EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed);
static int __iommu_set_group_pasid(struct iommu_domain *domain,
struct iommu_group *group, ioasid_t pasid)
{
struct group_device *device;
int ret = 0;
list_for_each_entry(device, &group->devices, list) {
ret = domain->ops->set_dev_pasid(domain, device->dev, pasid);
if (ret)
break;
}
return ret;
}
static void __iommu_remove_group_pasid(struct iommu_group *group,
ioasid_t pasid)
{
struct group_device *device;
const struct iommu_ops *ops;
list_for_each_entry(device, &group->devices, list) {
ops = dev_iommu_ops(device->dev);
ops->remove_dev_pasid(device->dev, pasid);
}
}
/*
* iommu_attach_device_pasid() - Attach a domain to pasid of device
* @domain: the iommu domain.
* @dev: the attached device.
* @pasid: the pasid of the device.
*
* Return: 0 on success, or an error.
*/
int iommu_attach_device_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid)
{
struct iommu_group *group;
void *curr;
int ret;
if (!domain->ops->set_dev_pasid)
return -EOPNOTSUPP;
group = iommu_group_get(dev);
if (!group)
return -ENODEV;
mutex_lock(&group->mutex);
curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL);
if (curr) {
ret = xa_err(curr) ? : -EBUSY;
goto out_unlock;
}
ret = __iommu_set_group_pasid(domain, group, pasid);
if (ret) {
__iommu_remove_group_pasid(group, pasid);
xa_erase(&group->pasid_array, pasid);
}
out_unlock:
mutex_unlock(&group->mutex);
iommu_group_put(group);
return ret;
}
EXPORT_SYMBOL_GPL(iommu_attach_device_pasid);
/*
* iommu_detach_device_pasid() - Detach the domain from pasid of device
* @domain: the iommu domain.
* @dev: the attached device.
* @pasid: the pasid of the device.
*
* The @domain must have been attached to @pasid of the @dev with
* iommu_attach_device_pasid().
*/
void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev,
ioasid_t pasid)
{
struct iommu_group *group = iommu_group_get(dev);
mutex_lock(&group->mutex);
__iommu_remove_group_pasid(group, pasid);
WARN_ON(xa_erase(&group->pasid_array, pasid) != domain);
mutex_unlock(&group->mutex);
iommu_group_put(group);
}
EXPORT_SYMBOL_GPL(iommu_detach_device_pasid);
/*
* iommu_get_domain_for_dev_pasid() - Retrieve domain for @pasid of @dev
* @dev: the queried device
* @pasid: the pasid of the device
* @type: matched domain type, 0 for any match
*
* This is a variant of iommu_get_domain_for_dev(). It returns the existing
* domain attached to pasid of a device. Callers must hold a lock around this
* function, and both iommu_attach/detach_dev_pasid() whenever a domain of
* type is being manipulated. This API does not internally resolve races with
* attach/detach.
*
* Return: attached domain on success, NULL otherwise.
*/
struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev,
ioasid_t pasid,
unsigned int type)
{
struct iommu_domain *domain;
struct iommu_group *group;
group = iommu_group_get(dev);
if (!group)
return NULL;
xa_lock(&group->pasid_array);
domain = xa_load(&group->pasid_array, pasid);
if (type && domain && domain->type != type)
domain = ERR_PTR(-EBUSY);
xa_unlock(&group->pasid_array);
iommu_group_put(group);
return domain;
}
EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev_pasid);
struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
struct mm_struct *mm)
{
const struct iommu_ops *ops = dev_iommu_ops(dev);
struct iommu_domain *domain;
domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
if (!domain)
return NULL;
domain->type = IOMMU_DOMAIN_SVA;
mmgrab(mm);
domain->mm = mm;
domain->iopf_handler = iommu_sva_handle_iopf;
domain->fault_data = mm;
return domain;
}

Some files were not shown because too many files have changed in this diff Show More