This is the 5.4.69 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl91u0cACgkQONu9yGCS aT7KmhAAvuW3edfAfzD/F5h4vHaa9rMRmtvp2/FwefBoE4LEi3F6p2gBrUZMA3ds DNQ8Nheafeqd63wFkfE//TXYR0rYTxTxa0jTrhtuJCUZ4+anRyG00fEbHPOxvMnJ aPwQQVNOfCaUAvRbFdQ4RbuIm5chhX8Bml0ZtqvsAAFJ9XkCh1UPF0VHtSrS7PRL lRMBlamLgZqU72naaJaFY2nMp+pvMFPZrzkR7tpv0Z1bqxuJp6L2n/EmcHpmTOJy Ze+Wvt1wKk8Ep5Vql5ekXt5lEiInjacwsJZXbb5HfHO++Y+1b+ABt1kSjJx+R3/q 2Qdztq+9Eoj0N1A4gXdVFoZHqKihhbD49k8YqX4qO5ujTzqgnNyHGSEXyIKvaU6z b3b12IvjbcMhM1zm3qvFfrVbbQI3kJf66zSi9NAwsZHlsvxRzslALR8I7mila4r5 fVOyfGoZxFs44FNW9JG7I85/isAxgg0ogYraMZbk8gmhTtb1ZaN+r7kJeXuTpzOg UBAIDYPclMyZeny6tn1/qFuzNGYQQ0R9kxFcTC21Cf2zNLWHNfwCL1vE3Ob+ROIS IHcsce6IqWQKGlD8UPjkZiXTLfqCAVi51PsGTVrnidXfa1IBOuvDsVqlghPsjHSD 30N4VB++9Gbw7LFEP4e33cOZLBLjDEdYd4VuoQFYywDZ3cy6xXo= =OoZD -----END PGP SIGNATURE----- Merge 5.4.69 into android11-5.4-lts Changes in 5.4.69 kernel/sysctl-test: Add null pointer test for sysctl.c:proc_dointvec() scsi: lpfc: Fix pt2pt discovery on SLI3 HBAs scsi: mpt3sas: Free diag buffer without any status check selinux: allow labeling before policy is loaded media: mc-device.c: fix memleak in media_device_register_entity drm/amd/display: Do not double-buffer DTO adjustments drm/amdkfd: Fix race in gfx10 context restore handler dma-fence: Serialise signal enabling (dma_fence_enable_sw_signaling) scsi: qla2xxx: Add error handling for PLOGI ELS passthrough ath10k: fix array out-of-bounds access ath10k: fix memory leak for tpc_stats_final PCI/IOV: Serialize sysfs sriov_numvfs reads vs writes mm: fix double page fault on arm64 if PTE_AF is cleared scsi: aacraid: fix illegal IO beyond last LBA m68k: q40: Fix info-leak in rtc_ioctl xfs: fix inode fork extent count overflow gma/gma500: fix a memory disclosure bug due to uninitialized bytes ASoC: kirkwood: fix IRQ error handling soundwire: intel/cadence: fix startup sequence media: smiapp: Fix error handling at NVM reading drm/amd/display: Free gamma after calculating legacy transfer function xfs: properly serialise fallocate against AIO+DIO leds: mlxreg: Fix possible buffer overflow dm table: do not allow request-based DM to stack on partitions PM / devfreq: tegra30: Fix integer overflow on CPU's freq max out scsi: fnic: fix use after free scsi: lpfc: Fix kernel crash at lpfc_nvme_info_show during remote port bounce powerpc/64s: Always disable branch profiling for prom_init.o net: silence data-races on sk_backlog.tail dax: Fix alloc_dax_region() compile warning iomap: Fix overflow in iomap_page_mkwrite f2fs: avoid kernel panic on corruption test clk/ti/adpll: allocate room for terminating null drm/amdgpu/powerplay: fix AVFS handling with custom powerplay table ice: Fix to change Rx/Tx ring descriptor size via ethtool with DCBx mtd: cfi_cmdset_0002: don't free cfi->cfiq in error path of cfi_amdstd_setup() mfd: mfd-core: Protect against NULL call-back function pointer drm/amdgpu/powerplay/smu7: fix AVFS handling with custom powerplay table tpm_crb: fix fTPM on AMD Zen+ CPUs tracing: Verify if trace array exists before destroying it. tracing: Adding NULL checks for trace_array descriptor pointer bcache: fix a lost wake-up problem caused by mca_cannibalize_lock dmaengine: mediatek: hsdma_probe: fixed a memory leak when devm_request_irq fails x86/kdump: Always reserve the low 1M when the crashkernel option is specified RDMA/qedr: Fix potential use after free RDMA/i40iw: Fix potential use after free PCI: Avoid double hpmemsize MMIO window assignment fix dget_parent() fastpath race xfs: fix attr leaf header freemap.size underflow RDMA/iw_cgxb4: Fix an error handling path in 'c4iw_connect()' ubi: Fix producing anchor PEBs mmc: core: Fix size overflow for mmc partitions gfs2: clean up iopen glock mess in gfs2_create_inode scsi: pm80xx: Cleanup command when a reset times out mt76: do not use devm API for led classdev mt76: add missing locking around ampdu action debugfs: Fix !DEBUG_FS debugfs_create_automount SUNRPC: Capture completion of all RPC tasks CIFS: Use common error handling code in smb2_ioctl_query_info() CIFS: Properly process SMB3 lease breaks f2fs: stop GC when the victim becomes fully valid ASoC: max98090: remove msleep in PLL unlocked workaround xtensa: fix system_call interaction with ptrace s390: avoid misusing CALL_ON_STACK for task stack setup xfs: fix realtime file data space leak drm/amdgpu: fix calltrace during kmd unload(v3) arm64: insn: consistently handle exit text selftests/bpf: De-flake test_tcpbpf kernel/notifier.c: intercept duplicate registrations to avoid infinite loops kernel/sys.c: avoid copying possible padding bytes in copy_to_user KVM: arm/arm64: vgic: Fix potential double free dist->spis in __kvm_vgic_destroy() module: Remove accidental change of module_enable_x() xfs: fix log reservation overflows when allocating large rt extents ALSA: hda: enable regmap internal locking tipc: fix link overflow issue at socket shutdown vcc_seq_next should increase position index neigh_stat_seq_next() should increase position index rt_cpu_seq_next should increase position index ipv6_route_seq_next should increase position index drm/mcde: Handle pending vblank while disabling display seqlock: Require WRITE_ONCE surrounding raw_seqcount_barrier drm/scheduler: Avoid accessing freed bad job. media: ti-vpe: cal: Restrict DMA to avoid memory corruption opp: Replace list_kref with a local counter scsi: qla2xxx: Fix stuck session in GNL scsi: lpfc: Fix incomplete NVME discovery when target sctp: move trace_sctp_probe_path into sctp_outq_sack ACPI: EC: Reference count query handlers under lock scsi: ufs: Make ufshcd_add_command_trace() easier to read scsi: ufs: Fix a race condition in the tracing code drm/amd/display: Initialize DSC PPS variables to 0 i2c: tegra: Prevent interrupt triggering after transfer timeout btrfs: tree-checker: Check leaf chunk item size dmaengine: zynqmp_dma: fix burst length configuration s390/cpum_sf: Use kzalloc and minor changes nfsd: Fix a soft lockup race in nfsd_file_mark_find_or_create() powerpc/eeh: Only dump stack once if an MMIO loop is detected Bluetooth: btrtl: Use kvmalloc for FW allocations tracing: Set kernel_stack's caller size properly ARM: 8948/1: Prevent OOB access in stacktrace ar5523: Add USB ID of SMCWUSBT-G2 wireless adapter ceph: ensure we have a new cap before continuing in fill_inode selftests/ftrace: fix glob selftest tools/power/x86/intel_pstate_tracer: changes for python 3 compatibility Bluetooth: Fix refcount use-after-free issue mm/swapfile.c: swap_next should increase position index mm: pagewalk: fix termination condition in walk_pte_range() Bluetooth: prefetch channel before killing sock KVM: fix overflow of zero page refcount with ksm running ALSA: hda: Clear RIRB status before reading WP skbuff: fix a data race in skb_queue_len() nfsd: Fix a perf warning drm/amd/display: fix workaround for incorrect double buffer register for DLG ADL and TTU audit: CONFIG_CHANGE don't log internal bookkeeping as an event selinux: sel_avc_get_stat_idx should increase position index scsi: lpfc: Fix RQ buffer leakage when no IOCBs available scsi: lpfc: Fix release of hwq to clear the eq relationship scsi: lpfc: Fix coverity errors in fmdi attribute handling drm/omap: fix possible object reference leak locking/lockdep: Decrement IRQ context counters when removing lock chain clk: stratix10: use do_div() for 64-bit calculation crypto: chelsio - This fixes the kernel panic which occurs during a libkcapi test mt76: clear skb pointers from rx aggregation reorder buffer during cleanup mt76: fix handling full tx queues in mt76_dma_tx_queue_skb_raw ALSA: usb-audio: Don't create a mixer element with bogus volume range perf test: Fix test trace+probe_vfs_getname.sh on s390 RDMA/rxe: Fix configuration of atomic queue pair attributes KVM: x86: fix incorrect comparison in trace event KVM: nVMX: Hold KVM's srcu lock when syncing vmcs12->shadow dmaengine: stm32-mdma: use vchan_terminate_vdesc() in .terminate_all media: staging/imx: Missing assignment in imx_media_capture_device_register() x86/pkeys: Add check for pkey "overflow" bpf: Remove recursion prevention from rcu free callback dmaengine: stm32-dma: use vchan_terminate_vdesc() in .terminate_all dmaengine: tegra-apb: Prevent race conditions on channel's freeing soundwire: bus: disable pm_runtime in sdw_slave_delete drm/amd/display: dal_ddc_i2c_payloads_create can fail causing panic drm/omap: dss: Cleanup DSS ports on initialisation failure iavf: use tc_cls_can_offload_and_chain0() instead of chain check firmware: arm_sdei: Use cpus_read_lock() to avoid races with cpuhp random: fix data races at timer_rand_state bus: hisi_lpc: Fixup IO ports addresses to avoid use-after-free in host removal ASoC: SOF: ipc: check ipc return value before data copy media: go7007: Fix URB type for interrupt handling Bluetooth: guard against controllers sending zero'd events timekeeping: Prevent 32bit truncation in scale64_check_overflow() powerpc/book3s64: Fix error handling in mm_iommu_do_alloc() drm/amd/display: fix image corruption with ODM 2:1 DSC 2 slice ext4: fix a data race at inode->i_disksize perf jevents: Fix leak of mapfile memory mm: avoid data corruption on CoW fault into PFN-mapped VMA drm/amdgpu: increase atombios cmd timeout ARM: OMAP2+: Handle errors for cpu_pm drm/amd/display: Stop if retimer is not available clk: imx: Fix division by zero warning on pfdv2 cpu-topology: Fix the potential data corruption s390/irq: replace setup_irq() by request_irq() perf cs-etm: Swap packets for instruction samples perf cs-etm: Correct synthesizing instruction samples ath10k: use kzalloc to read for ath10k_sdio_hif_diag_read scsi: aacraid: Disabling TM path and only processing IOP reset Bluetooth: L2CAP: handle l2cap config request during open state media: tda10071: fix unsigned sign extension overflow tty: sifive: Finish transmission before changing the clock xfs: don't ever return a stale pointer from __xfs_dir3_free_read xfs: mark dir corrupt when lookup-by-hash fails ext4: mark block bitmap corrupted when found instead of BUGON tpm: ibmvtpm: Wait for buffer to be set before proceeding rtc: sa1100: fix possible race condition rtc: ds1374: fix possible race condition nfsd: Don't add locks to closed or closing open stateids RDMA/cm: Remove a race freeing timewait_info intel_th: Disallow multi mode on devices where it's broken KVM: PPC: Book3S HV: Treat TM-related invalid form instructions on P9 like the valid ones drm/msm: fix leaks if initialization fails drm/msm/a5xx: Always set an OPP supported hardware value tracing: Use address-of operator on section symbols thermal: rcar_thermal: Handle probe error gracefully KVM: LAPIC: Mark hrtimer for period or oneshot mode to expire in hard interrupt context perf parse-events: Fix 3 use after frees found with clang ASAN btrfs: do not init a reloc root if we aren't relocating btrfs: free the reloc_control in a consistent way r8169: improve RTL8168b FIFO overflow workaround serial: 8250_port: Don't service RX FIFO if throttled serial: 8250_omap: Fix sleeping function called from invalid context during probe serial: 8250: 8250_omap: Terminate DMA before pushing data on RX timeout perf cpumap: Fix snprintf overflow check net: axienet: Convert DMA error handler to a work queue net: axienet: Propagate failure of DMA descriptor setup cpufreq: powernv: Fix frame-size-overflow in powernv_cpufreq_work_fn tools: gpio-hammer: Avoid potential overflow in main exec: Add exec_update_mutex to replace cred_guard_mutex exec: Fix a deadlock in strace selftests/ptrace: add test cases for dead-locks kernel/kcmp.c: Use new infrastructure to fix deadlocks in execve proc: Use new infrastructure to fix deadlocks in execve proc: io_accounting: Use new infrastructure to fix deadlocks in execve perf: Use new infrastructure to fix deadlocks in execve nvme-multipath: do not reset on unknown status nvme: Fix ctrl use-after-free during sysfs deletion nvme: Fix controller creation races with teardown flow brcmfmac: Fix double freeing in the fmac usb data path xfs: prohibit fs freezing when using empty transactions RDMA/rxe: Set sys_image_guid to be aligned with HW IB devices IB/iser: Always check sig MR before putting it to the free pool scsi: hpsa: correct race condition in offload enabled SUNRPC: Fix a potential buffer overflow in 'svc_print_xprts()' svcrdma: Fix leak of transport addresses netfilter: nf_tables: silence a RCU-list warning in nft_table_lookup() PCI: Use ioremap(), not phys_to_virt() for platform ROM ubifs: ubifs_jnl_write_inode: Fix a memory leak bug ubifs: ubifs_add_orphan: Fix a memory leak bug ubifs: Fix out-of-bounds memory access caused by abnormal value of node_len ALSA: usb-audio: Fix case when USB MIDI interface has more than one extra endpoint descriptor PCI: pciehp: Fix MSI interrupt race NFS: Fix races nfs_page_group_destroy() vs nfs_destroy_unlinked_subrequests() drm/amdgpu/vcn2.0: stall DPG when WPTR/RPTR reset powerpc/perf: Implement a global lock to avoid races between trace, core and thread imc events. mm/kmemleak.c: use address-of operator on section symbols mm/filemap.c: clear page error before actual read mm/swapfile: fix data races in try_to_unuse() mm/vmscan.c: fix data races using kswapd_classzone_idx SUNRPC: Don't start a timer on an already queued rpc task nvmet-rdma: fix double free of rdma queue workqueue: Remove the warning in wq_worker_sleeping() drm/amdgpu/sriov add amdgpu_amdkfd_pre_reset in gpu reset mm/mmap.c: initialize align_offset explicitly for vm_unmapped_area ALSA: hda: Skip controller resume if not needed scsi: qedi: Fix termination timeouts in session logout serial: uartps: Wait for tx_empty in console setup btrfs: fix setting last_trans for reloc roots KVM: Remove CREATE_IRQCHIP/SET_PIT2 race perf stat: Force error in fallback on :k events bdev: Reduce time holding bd_mutex in sync in blkdev_close() drivers: char: tlclk.c: Avoid data race between init and interrupt handler KVM: arm64: vgic-v3: Retire all pending LPIs on vcpu destroy KVM: arm64: vgic-its: Fix memory leak on the error path of vgic_add_lpi() net: openvswitch: use u64 for meter bucket scsi: aacraid: Fix error handling paths in aac_probe_one() staging:r8188eu: avoid skb_clone for amsdu to msdu conversion sparc64: vcc: Fix error return code in vcc_probe() arm64: cpufeature: Relax checks for AArch32 support at EL[0-2] sched/fair: Eliminate bandwidth race between throttling and distribution dpaa2-eth: fix error return code in setup_dpni() dt-bindings: sound: wm8994: Correct required supplies based on actual implementaion devlink: Fix reporter's recovery condition atm: fix a memory leak of vcc->user_back media: venus: vdec: Init registered list unconditionally perf mem2node: Avoid double free related to realloc mm/slub: fix incorrect interpretation of s->offset i2c: tegra: Restore pinmux on system resume power: supply: max17040: Correct voltage reading phy: samsung: s5pv210-usb2: Add delay after reset Bluetooth: Handle Inquiry Cancel error after Inquiry Complete USB: EHCI: ehci-mv: fix error handling in mv_ehci_probe() KVM: x86: handle wrap around 32-bit address space tipc: fix memory leak in service subscripting tty: serial: samsung: Correct clock selection logic ALSA: hda: Fix potential race in unsol event handler drm/exynos: dsi: Remove bridge node reference in error handling path in probe function ipmi:bt-bmc: Fix error handling and status check powerpc/traps: Make unrecoverable NMIs die instead of panic svcrdma: Fix backchannel return code fuse: don't check refcount after stealing page fuse: update attr_version counter on fuse_notify_inval_inode() USB: EHCI: ehci-mv: fix less than zero comparison of an unsigned int coresight: etm4x: Fix use-after-free of per-cpu etm drvdata arm64: acpi: Make apei_claim_sea() synchronise with APEI's irq work scsi: cxlflash: Fix error return code in cxlflash_probe() arm64/cpufeature: Drop TraceFilt feature exposure from ID_DFR0 register drm/amdkfd: fix restore worker race condition e1000: Do not perform reset in reset_task if we are already down drm/nouveau/debugfs: fix runtime pm imbalance on error drm/nouveau: fix runtime pm imbalance on error drm/nouveau/dispnv50: fix runtime pm imbalance on error printk: handle blank console arguments passed in. usb: dwc3: Increase timeout for CmdAct cleared by device controller btrfs: don't force read-only after error in drop snapshot btrfs: fix double __endio_write_update_ordered in direct I/O gpio: rcar: Fix runtime PM imbalance on error vfio/pci: fix memory leaks of eventfd ctx KVM: PPC: Book3S HV: Close race with page faults around memslot flushes perf evsel: Fix 2 memory leaks perf trace: Fix the selection for architectures to generate the errno name tables perf stat: Fix duration_time value for higher intervals perf util: Fix memory leak of prefix_if_not_in perf metricgroup: Free metric_events on error perf kcore_copy: Fix module map when there are no modules loaded PCI: tegra194: Fix runtime PM imbalance on error ASoC: img-i2s-out: Fix runtime PM imbalance on error wlcore: fix runtime pm imbalance in wl1271_tx_work wlcore: fix runtime pm imbalance in wlcore_regdomain_config mtd: rawnand: gpmi: Fix runtime PM imbalance on error mtd: rawnand: omap_elm: Fix runtime PM imbalance on error PCI: tegra: Fix runtime PM imbalance on error ceph: fix potential race in ceph_check_caps mm/swap_state: fix a data race in swapin_nr_pages mm: memcontrol: fix stat-corrupting race in charge moving rapidio: avoid data race between file operation callbacks and mport_cdev_add(). mtd: parser: cmdline: Support MTD names containing one or more colons x86/speculation/mds: Mark mds_user_clear_cpu_buffers() __always_inline NFS: nfs_xdr_status should record the procedure name vfio/pci: Clear error and request eventfd ctx after releasing cifs: Fix double add page to memcg when cifs_readpages nvme: fix possible deadlock when I/O is blocked mac80211: skip mpath lookup also for control port tx scsi: libfc: Handling of extra kref scsi: libfc: Skip additional kref updating work event selftests/x86/syscall_nt: Clear weird flags after each test vfio/pci: fix racy on error and request eventfd ctx btrfs: qgroup: fix data leak caused by race between writeback and truncate perf tests: Fix test 68 zstd compression for s390 scsi: qla2xxx: Retry PLOGI on FC-NVMe PRLI failure ubi: fastmap: Free unused fastmap anchor peb during detach mt76: fix LED link time failure opp: Increase parsed_static_opps in _of_add_opp_table_v1() perf parse-events: Use strcmp() to compare the PMU name ALSA: hda: Always use jackpoll helper for jack update after resume ALSA: hda: Workaround for spurious wakeups on some Intel platforms net: openvswitch: use div_u64() for 64-by-32 divisions nvme: explicitly update mpath disk capacity on revalidation device_cgroup: Fix RCU list debugging warning ASoC: pcm3168a: ignore 0 Hz settings ASoC: wm8994: Skip setting of the WM8994_MICBIAS register for WM1811 ASoC: wm8994: Ensure the device is resumed in wm89xx_mic_detect functions ASoC: Intel: bytcr_rt5640: Add quirk for MPMAN Converter9 2-in-1 RISC-V: Take text_mutex in ftrace_init_nop() i2c: aspeed: Mask IRQ status to relevant bits s390/init: add missing __init annotations lockdep: fix order in trace_hardirqs_off_caller() EDAC/ghes: Check whether the driver is on the safe list correctly drm/amdkfd: fix a memory leak issue drm/amd/display: update nv1x stutter latencies drm/amdgpu/dc: Require primary plane to be enabled whenever the CRTC is i2c: core: Call i2c_acpi_install_space_handler() before i2c_acpi_register_devices() objtool: Fix noreturn detection for ignored functions ieee802154: fix one possible memleak in ca8210_dev_com_init ieee802154/adf7242: check status of adf7242_read_reg clocksource/drivers/h8300_timer8: Fix wrong return value in h8300_8timer_init() mwifiex: Increase AES key storage size to 256 bits batman-adv: bla: fix type misuse for backbone_gw hash indexing atm: eni: fix the missed pci_disable_device() for eni_init_one() batman-adv: mcast/TT: fix wrongly dropped or rerouted packets netfilter: conntrack: nf_conncount_init is failing with IPv6 disabled mac802154: tx: fix use-after-free bpf: Fix clobbering of r2 in bpf_gen_ld_abs drm/vc4/vc4_hdmi: fill ASoC card owner net: qed: Disable aRFS for NPAR and 100G net: qede: Disable aRFS for NPAR and 100G net: qed: RDMA personality shouldn't fail VF load drm/sun4i: sun8i-csc: Secondary CSC register correction batman-adv: Add missing include for in_interrupt() nvme-tcp: fix kconfig dependency warning when !CRYPTO batman-adv: mcast: fix duplicate mcast packets in BLA backbone from LAN batman-adv: mcast: fix duplicate mcast packets in BLA backbone from mesh batman-adv: mcast: fix duplicate mcast packets from BLA backbone to mesh bpf: Fix a rcu warning for bpffs map pretty-print lib80211: fix unmet direct dependendices config warning when !CRYPTO ALSA: asihpi: fix iounmap in error handler regmap: fix page selection for noinc reads regmap: fix page selection for noinc writes MIPS: Add the missing 'CPU_1074K' into __get_cpu_type() regulator: axp20x: fix LDO2/4 description KVM: x86: Reset MMU context if guest toggles CR4.SMAP or CR4.PKE KVM: SVM: Add a dedicated INVD intercept routine mm: validate pmd after splitting arch/x86/lib/usercopy_64.c: fix __copy_user_flushcache() cache writeback x86/ioapic: Unbreak check_timer() scsi: lpfc: Fix initial FLOGI failure due to BBSCN not supported ALSA: usb-audio: Add delay quirk for H570e USB headsets ALSA: hda/realtek - Couldn't detect Mic if booting with headset plugged ALSA: hda/realtek: Enable front panel headset LED on Lenovo ThinkStation P520 lib/string.c: implement stpcpy tracing: fix double free s390/dasd: Fix zero write for FBA devices kprobes: Fix to check probe enabled before disarm_kprobe_ftrace() kprobes: tracing/kprobes: Fix to kill kprobes on initmem after boot btrfs: fix overflow when copying corrupt csums for a message dmabuf: fix NULL pointer dereference in dma_buf_release() mm, THP, swap: fix allocating cluster for swapfile by mistake mm/gup: fix gup_fast with dynamic page table folding s390/zcrypt: Fix ZCRYPT_PERDEV_REQCNT ioctl KVM: arm64: Assume write fault on S1PTW permission fault on instruction fetch dm: fix bio splitting and its bio completion order for regular IO kprobes: Fix compiler warning for !CONFIG_KPROBES_ON_FTRACE ata: define AC_ERR_OK ata: make qc_prep return ata_completion_errors ata: sata_mv, avoid trigerrable BUG_ON Linux 5.4.69 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I2a26b4f6fd89b641fa80e339ee72089da51a1415
This commit is contained in:
commit
e772bef401
@ -14,9 +14,15 @@ Required properties:
|
||||
- #gpio-cells : Must be 2. The first cell is the pin number and the
|
||||
second cell is used to specify optional parameters (currently unused).
|
||||
|
||||
- AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply, CPVDD-supply,
|
||||
SPKVDD1-supply, SPKVDD2-supply : power supplies for the device, as covered
|
||||
in Documentation/devicetree/bindings/regulator/regulator.txt
|
||||
- power supplies for the device, as covered in
|
||||
Documentation/devicetree/bindings/regulator/regulator.txt, depending
|
||||
on compatible:
|
||||
- for wlf,wm1811 and wlf,wm8958:
|
||||
AVDD1-supply, AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply,
|
||||
DCVDD-supply, CPVDD-supply, SPKVDD1-supply, SPKVDD2-supply
|
||||
- for wlf,wm8994:
|
||||
AVDD1-supply, AVDD2-supply, DBVDD-supply, DCVDD-supply, CPVDD-supply,
|
||||
SPKVDD1-supply, SPKVDD2-supply
|
||||
|
||||
Optional properties:
|
||||
|
||||
@ -73,11 +79,11 @@ wm8994: codec@1a {
|
||||
|
||||
lineout1-se;
|
||||
|
||||
AVDD1-supply = <®ulator>;
|
||||
AVDD2-supply = <®ulator>;
|
||||
CPVDD-supply = <®ulator>;
|
||||
DBVDD1-supply = <®ulator>;
|
||||
DBVDD2-supply = <®ulator>;
|
||||
DBVDD3-supply = <®ulator>;
|
||||
DBVDD-supply = <®ulator>;
|
||||
DCVDD-supply = <®ulator>;
|
||||
SPKVDD1-supply = <®ulator>;
|
||||
SPKVDD2-supply = <®ulator>;
|
||||
};
|
||||
|
@ -250,7 +250,7 @@ High-level taskfile hooks
|
||||
|
||||
::
|
||||
|
||||
void (*qc_prep) (struct ata_queued_cmd *qc);
|
||||
enum ata_completion_errors (*qc_prep) (struct ata_queued_cmd *qc);
|
||||
int (*qc_issue) (struct ata_queued_cmd *qc);
|
||||
|
||||
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 68
|
||||
SUBLEVEL = 69
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
@ -204,7 +204,7 @@ static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
|
||||
return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
|
||||
static inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
|
||||
}
|
||||
@ -236,16 +236,21 @@ static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
|
||||
return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
|
||||
}
|
||||
|
||||
static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
|
||||
static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
|
||||
static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
|
||||
}
|
||||
|
||||
static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_get_hsr(vcpu) & HSR_FSC;
|
||||
|
@ -116,6 +116,8 @@ static int save_trace(struct stackframe *frame, void *d)
|
||||
return 0;
|
||||
|
||||
regs = (struct pt_regs *)frame->sp;
|
||||
if ((unsigned long)®s[1] > ALIGN(frame->sp, THREAD_SIZE))
|
||||
return 0;
|
||||
|
||||
trace->entries[trace->nr_entries++] = regs->ARM_pc;
|
||||
|
||||
|
@ -64,14 +64,16 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
|
||||
|
||||
void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
|
||||
{
|
||||
unsigned long end = frame + 4 + sizeof(struct pt_regs);
|
||||
|
||||
#ifdef CONFIG_KALLSYMS
|
||||
printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
|
||||
#else
|
||||
printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
|
||||
#endif
|
||||
|
||||
if (in_entry_text(from))
|
||||
dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
|
||||
if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE))
|
||||
dump_mem("", "Exception stack", frame + 4, end);
|
||||
}
|
||||
|
||||
void dump_backtrace_stm(u32 *stack, u32 instruction)
|
||||
|
@ -109,6 +109,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
|
||||
int index)
|
||||
{
|
||||
struct omap3_idle_statedata *cx = &omap3_idle_data[index];
|
||||
int error;
|
||||
|
||||
if (omap_irq_pending() || need_resched())
|
||||
goto return_sleep_time;
|
||||
@ -125,8 +126,11 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
|
||||
* Call idle CPU PM enter notifier chain so that
|
||||
* VFP context is saved.
|
||||
*/
|
||||
if (cx->mpu_state == PWRDM_POWER_OFF)
|
||||
cpu_pm_enter();
|
||||
if (cx->mpu_state == PWRDM_POWER_OFF) {
|
||||
error = cpu_pm_enter();
|
||||
if (error)
|
||||
goto out_clkdm_set;
|
||||
}
|
||||
|
||||
/* Execute ARM wfi */
|
||||
omap_sram_idle();
|
||||
@ -139,6 +143,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
|
||||
pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF)
|
||||
cpu_pm_exit();
|
||||
|
||||
out_clkdm_set:
|
||||
/* Re-allow idle for C1 */
|
||||
if (cx->flags & OMAP_CPUIDLE_CX_NO_CLKDM_IDLE)
|
||||
clkdm_allow_idle(mpu_pd->pwrdm_clkdms[0]);
|
||||
|
@ -122,6 +122,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
||||
{
|
||||
struct idle_statedata *cx = state_ptr + index;
|
||||
u32 mpuss_can_lose_context = 0;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* CPU0 has to wait and stay ON until CPU1 is OFF state.
|
||||
@ -159,7 +160,9 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
||||
* Call idle CPU PM enter notifier chain so that
|
||||
* VFP and per CPU interrupt context is saved.
|
||||
*/
|
||||
cpu_pm_enter();
|
||||
error = cpu_pm_enter();
|
||||
if (error)
|
||||
goto cpu_pm_out;
|
||||
|
||||
if (dev->cpu == 0) {
|
||||
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
|
||||
@ -169,13 +172,17 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
||||
* Call idle CPU cluster PM enter notifier chain
|
||||
* to save GIC and wakeupgen context.
|
||||
*/
|
||||
if (mpuss_can_lose_context)
|
||||
cpu_cluster_pm_enter();
|
||||
if (mpuss_can_lose_context) {
|
||||
error = cpu_cluster_pm_enter();
|
||||
if (error)
|
||||
goto cpu_cluster_pm_out;
|
||||
}
|
||||
}
|
||||
|
||||
omap4_enter_lowpower(dev->cpu, cx->cpu_state);
|
||||
cpu_done[dev->cpu] = true;
|
||||
|
||||
cpu_cluster_pm_out:
|
||||
/* Wakeup CPU1 only if it is not offlined */
|
||||
if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
|
||||
|
||||
@ -197,12 +204,6 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Call idle CPU PM exit notifier chain to restore
|
||||
* VFP and per CPU IRQ context.
|
||||
*/
|
||||
cpu_pm_exit();
|
||||
|
||||
/*
|
||||
* Call idle CPU cluster PM exit notifier chain
|
||||
* to restore GIC and wakeupgen context.
|
||||
@ -210,6 +211,13 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
||||
if (dev->cpu == 0 && mpuss_can_lose_context)
|
||||
cpu_cluster_pm_exit();
|
||||
|
||||
/*
|
||||
* Call idle CPU PM exit notifier chain to restore
|
||||
* VFP and per CPU IRQ context.
|
||||
*/
|
||||
cpu_pm_exit();
|
||||
|
||||
cpu_pm_out:
|
||||
tick_broadcast_exit();
|
||||
|
||||
fail:
|
||||
|
@ -194,6 +194,7 @@ void omap_sram_idle(void)
|
||||
int per_next_state = PWRDM_POWER_ON;
|
||||
int core_next_state = PWRDM_POWER_ON;
|
||||
u32 sdrc_pwr = 0;
|
||||
int error;
|
||||
|
||||
mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
|
||||
switch (mpu_next_state) {
|
||||
@ -222,8 +223,11 @@ void omap_sram_idle(void)
|
||||
pwrdm_pre_transition(NULL);
|
||||
|
||||
/* PER */
|
||||
if (per_next_state == PWRDM_POWER_OFF)
|
||||
cpu_cluster_pm_enter();
|
||||
if (per_next_state == PWRDM_POWER_OFF) {
|
||||
error = cpu_cluster_pm_enter();
|
||||
if (error)
|
||||
return;
|
||||
}
|
||||
|
||||
/* CORE */
|
||||
if (core_next_state < PWRDM_POWER_ON) {
|
||||
|
@ -299,7 +299,7 @@ static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
|
||||
return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
|
||||
static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
|
||||
}
|
||||
@ -307,7 +307,7 @@ static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
|
||||
static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
|
||||
kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
|
||||
kvm_vcpu_abt_iss1tw(vcpu); /* AF/DBM update */
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
|
||||
@ -336,6 +336,11 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
|
||||
return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
|
||||
}
|
||||
|
||||
static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
|
||||
@ -373,6 +378,9 @@ static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
|
||||
|
||||
static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (kvm_vcpu_abt_iss1tw(vcpu))
|
||||
return true;
|
||||
|
||||
if (kvm_vcpu_trap_is_iabt(vcpu))
|
||||
return false;
|
||||
|
||||
|
@ -15,6 +15,7 @@ extern char __hyp_text_start[], __hyp_text_end[];
|
||||
extern char __idmap_text_start[], __idmap_text_end[];
|
||||
extern char __initdata_begin[], __initdata_end[];
|
||||
extern char __inittext_begin[], __inittext_end[];
|
||||
extern char __exittext_begin[], __exittext_end[];
|
||||
extern char __irqentry_text_start[], __irqentry_text_end[];
|
||||
extern char __mmuoff_data_start[], __mmuoff_data_end[];
|
||||
extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/irq_work.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/of_fdt.h>
|
||||
#include <linux/smp.h>
|
||||
@ -269,6 +270,7 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
|
||||
int apei_claim_sea(struct pt_regs *regs)
|
||||
{
|
||||
int err = -ENOENT;
|
||||
bool return_to_irqs_enabled;
|
||||
unsigned long current_flags;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
|
||||
@ -276,6 +278,12 @@ int apei_claim_sea(struct pt_regs *regs)
|
||||
|
||||
current_flags = local_daif_save_flags();
|
||||
|
||||
/* current_flags isn't useful here as daif doesn't tell us about pNMI */
|
||||
return_to_irqs_enabled = !irqs_disabled_flags(arch_local_save_flags());
|
||||
|
||||
if (regs)
|
||||
return_to_irqs_enabled = interrupts_enabled(regs);
|
||||
|
||||
/*
|
||||
* SEA can interrupt SError, mask it and describe this as an NMI so
|
||||
* that APEI defers the handling.
|
||||
@ -284,6 +292,23 @@ int apei_claim_sea(struct pt_regs *regs)
|
||||
nmi_enter();
|
||||
err = ghes_notify_sea();
|
||||
nmi_exit();
|
||||
|
||||
/*
|
||||
* APEI NMI-like notifications are deferred to irq_work. Unless
|
||||
* we interrupted irqs-masked code, we can do that now.
|
||||
*/
|
||||
if (!err) {
|
||||
if (return_to_irqs_enabled) {
|
||||
local_daif_restore(DAIF_PROCCTX_NOIRQ);
|
||||
__irq_enter();
|
||||
irq_work_run();
|
||||
__irq_exit();
|
||||
} else {
|
||||
pr_warn_ratelimited("APEI work queued but not completed");
|
||||
err = -EINPROGRESS;
|
||||
}
|
||||
}
|
||||
|
||||
local_daif_restore(current_flags);
|
||||
|
||||
return err;
|
||||
|
@ -160,11 +160,10 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
|
||||
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
|
||||
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
|
||||
/* Linux doesn't care about the EL3 */
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
|
||||
ARM64_FTR_END,
|
||||
};
|
||||
|
||||
@ -320,7 +319,7 @@ static const struct arm64_ftr_bits ftr_id_pfr0[] = {
|
||||
};
|
||||
|
||||
static const struct arm64_ftr_bits ftr_id_dfr0[] = {
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
|
||||
/* [31:28] TraceFilt */
|
||||
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
|
||||
@ -719,9 +718,6 @@ void update_cpu_features(int cpu,
|
||||
taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
|
||||
info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
|
||||
|
||||
/*
|
||||
* EL3 is not our concern.
|
||||
*/
|
||||
taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
|
||||
info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
|
||||
taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/kprobes.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
#define AARCH64_INSN_SF_BIT BIT(31)
|
||||
#define AARCH64_INSN_N_BIT BIT(22)
|
||||
@ -78,16 +79,29 @@ bool aarch64_insn_is_branch_imm(u32 insn)
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(patch_lock);
|
||||
|
||||
static bool is_exit_text(unsigned long addr)
|
||||
{
|
||||
/* discarded with init text/data */
|
||||
return system_state < SYSTEM_RUNNING &&
|
||||
addr >= (unsigned long)__exittext_begin &&
|
||||
addr < (unsigned long)__exittext_end;
|
||||
}
|
||||
|
||||
static bool is_image_text(unsigned long addr)
|
||||
{
|
||||
return core_kernel_text(addr) || is_exit_text(addr);
|
||||
}
|
||||
|
||||
static void __kprobes *patch_map(void *addr, int fixmap)
|
||||
{
|
||||
unsigned long uintaddr = (uintptr_t) addr;
|
||||
bool module = !core_kernel_text(uintaddr);
|
||||
bool image = is_image_text(uintaddr);
|
||||
struct page *page;
|
||||
|
||||
if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
|
||||
page = vmalloc_to_page(addr);
|
||||
else if (!module)
|
||||
if (image)
|
||||
page = phys_to_page(__pa_symbol(addr));
|
||||
else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
|
||||
page = vmalloc_to_page(addr);
|
||||
else
|
||||
return addr;
|
||||
|
||||
|
@ -170,9 +170,12 @@ SECTIONS
|
||||
__inittext_begin = .;
|
||||
|
||||
INIT_TEXT_SECTION(8)
|
||||
|
||||
__exittext_begin = .;
|
||||
.exit.text : {
|
||||
ARM_EXIT_KEEP(EXIT_TEXT)
|
||||
}
|
||||
__exittext_end = .;
|
||||
|
||||
. = ALIGN(4);
|
||||
.altinstructions : {
|
||||
|
@ -496,7 +496,7 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
|
||||
kvm_vcpu_dabt_isvalid(vcpu) &&
|
||||
!kvm_vcpu_dabt_isextabt(vcpu) &&
|
||||
!kvm_vcpu_dabt_iss1tw(vcpu);
|
||||
!kvm_vcpu_abt_iss1tw(vcpu);
|
||||
|
||||
if (valid) {
|
||||
int ret = __vgic_v2_perform_cpuif_access(vcpu);
|
||||
|
@ -654,11 +654,13 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
|
||||
|
||||
inf = esr_to_fault_info(esr);
|
||||
|
||||
/*
|
||||
* Return value ignored as we rely on signal merging.
|
||||
* Future patches will make this more robust.
|
||||
*/
|
||||
apei_claim_sea(regs);
|
||||
if (user_mode(regs) && apei_claim_sea(regs) == 0) {
|
||||
/*
|
||||
* APEI claimed this as a firmware-first notification.
|
||||
* Some processing deferred to task_work before ret_to_user().
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (esr & ESR_ELx_FnV)
|
||||
siaddr = NULL;
|
||||
|
@ -264,6 +264,7 @@ static int q40_get_rtc_pll(struct rtc_pll_info *pll)
|
||||
{
|
||||
int tmp = Q40_RTC_CTRL;
|
||||
|
||||
pll->pll_ctrl = 0;
|
||||
pll->pll_value = tmp & Q40_RTC_PLL_MASK;
|
||||
if (tmp & Q40_RTC_PLL_SIGN)
|
||||
pll->pll_value = -pll->pll_value;
|
||||
|
@ -47,6 +47,7 @@ static inline int __pure __get_cpu_type(const int cpu_type)
|
||||
case CPU_34K:
|
||||
case CPU_1004K:
|
||||
case CPU_74K:
|
||||
case CPU_1074K:
|
||||
case CPU_M14KC:
|
||||
case CPU_M14KEC:
|
||||
case CPU_INTERAPTIV:
|
||||
|
@ -150,4 +150,7 @@
|
||||
|
||||
#define KVM_INST_FETCH_FAILED -1
|
||||
|
||||
/* Extract PO and XOP opcode fields */
|
||||
#define PO_XOP_OPCODE_MASK 0xfc0007fe
|
||||
|
||||
#endif /* __POWERPC_KVM_ASM_H__ */
|
||||
|
@ -19,6 +19,7 @@ CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
|
||||
CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
|
||||
|
||||
CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector)
|
||||
CFLAGS_prom_init.o += -DDISABLE_BRANCH_PROFILING
|
||||
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
# Do not trace early boot code
|
||||
@ -36,7 +37,6 @@ KASAN_SANITIZE_btext.o := n
|
||||
ifdef CONFIG_KASAN
|
||||
CFLAGS_early_32.o += -DDISABLE_BRANCH_PROFILING
|
||||
CFLAGS_cputable.o += -DDISABLE_BRANCH_PROFILING
|
||||
CFLAGS_prom_init.o += -DDISABLE_BRANCH_PROFILING
|
||||
CFLAGS_btext.o += -DDISABLE_BRANCH_PROFILING
|
||||
endif
|
||||
|
||||
|
@ -503,7 +503,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
|
||||
rc = 1;
|
||||
if (pe->state & EEH_PE_ISOLATED) {
|
||||
pe->check_count++;
|
||||
if (pe->check_count % EEH_MAX_FAILS == 0) {
|
||||
if (pe->check_count == EEH_MAX_FAILS) {
|
||||
dn = pci_device_to_OF_node(dev);
|
||||
if (dn)
|
||||
location = of_get_property(dn, "ibm,loc-code",
|
||||
|
@ -510,11 +510,11 @@ void system_reset_exception(struct pt_regs *regs)
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
BUG_ON(get_paca()->in_nmi == 0);
|
||||
if (get_paca()->in_nmi > 1)
|
||||
nmi_panic(regs, "Unrecoverable nested System Reset");
|
||||
die("Unrecoverable nested System Reset", regs, SIGABRT);
|
||||
#endif
|
||||
/* Must die if the interrupt is not recoverable */
|
||||
if (!(regs->msr & MSR_RI))
|
||||
nmi_panic(regs, "Unrecoverable System Reset");
|
||||
die("Unrecoverable System Reset", regs, SIGABRT);
|
||||
|
||||
if (saved_hsrrs) {
|
||||
mtspr(SPRN_HSRR0, hsrr0);
|
||||
@ -858,7 +858,7 @@ void machine_check_exception(struct pt_regs *regs)
|
||||
|
||||
/* Must die if the interrupt is not recoverable */
|
||||
if (!(regs->msr & MSR_RI))
|
||||
nmi_panic(regs, "Unrecoverable Machine check");
|
||||
die("Unrecoverable Machine check", regs, SIGBUS);
|
||||
|
||||
return;
|
||||
|
||||
|
@ -1104,6 +1104,11 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
|
||||
kvm->arch.lpid);
|
||||
gpa += PAGE_SIZE;
|
||||
}
|
||||
/*
|
||||
* Increase the mmu notifier sequence number to prevent any page
|
||||
* fault that read the memslot earlier from writing a PTE.
|
||||
*/
|
||||
kvm->mmu_notifier_seq++;
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
}
|
||||
|
||||
|
@ -3,6 +3,8 @@
|
||||
* Copyright 2017 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include <asm/kvm_ppc.h>
|
||||
@ -44,7 +46,18 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
|
||||
u64 newmsr, bescr;
|
||||
int ra, rs;
|
||||
|
||||
switch (instr & 0xfc0007ff) {
|
||||
/*
|
||||
* rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
|
||||
* in these instructions, so masking bit 31 out doesn't change these
|
||||
* instructions. For treclaim., tsr., and trechkpt. instructions if bit
|
||||
* 31 = 0 then they are per ISA invalid forms, however P9 UM, in section
|
||||
* 4.6.10 Book II Invalid Forms, informs specifically that ignoring bit
|
||||
* 31 is an acceptable way to handle these invalid forms that have
|
||||
* bit 31 = 0. Moreover, for emulation purposes both forms (w/ and wo/
|
||||
* bit 31 set) can generate a softpatch interrupt. Hence both forms
|
||||
* are handled below for these instructions so they behave the same way.
|
||||
*/
|
||||
switch (instr & PO_XOP_OPCODE_MASK) {
|
||||
case PPC_INST_RFID:
|
||||
/* XXX do we need to check for PR=0 here? */
|
||||
newmsr = vcpu->arch.shregs.srr1;
|
||||
@ -105,7 +118,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.shregs.msr = newmsr;
|
||||
return RESUME_GUEST;
|
||||
|
||||
case PPC_INST_TSR:
|
||||
/* ignore bit 31, see comment above */
|
||||
case (PPC_INST_TSR & PO_XOP_OPCODE_MASK):
|
||||
/* check for PR=1 and arch 2.06 bit set in PCR */
|
||||
if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
|
||||
/* generate an illegal instruction interrupt */
|
||||
@ -140,7 +154,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.shregs.msr = msr;
|
||||
return RESUME_GUEST;
|
||||
|
||||
case PPC_INST_TRECLAIM:
|
||||
/* ignore bit 31, see comment above */
|
||||
case (PPC_INST_TRECLAIM & PO_XOP_OPCODE_MASK):
|
||||
/* check for TM disabled in the HFSCR or MSR */
|
||||
if (!(vcpu->arch.hfscr & HFSCR_TM)) {
|
||||
/* generate an illegal instruction interrupt */
|
||||
@ -176,7 +191,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
|
||||
return RESUME_GUEST;
|
||||
|
||||
case PPC_INST_TRECHKPT:
|
||||
/* ignore bit 31, see comment above */
|
||||
case (PPC_INST_TRECHKPT & PO_XOP_OPCODE_MASK):
|
||||
/* XXX do we need to check for PR=0 here? */
|
||||
/* check for TM disabled in the HFSCR or MSR */
|
||||
if (!(vcpu->arch.hfscr & HFSCR_TM)) {
|
||||
@ -208,6 +224,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
/* What should we do here? We didn't recognize the instruction */
|
||||
WARN_ON_ONCE(1);
|
||||
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
|
||||
pr_warn_ratelimited("Unrecognized TM-related instruction %#x for emulation", instr);
|
||||
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
|
@ -23,7 +23,18 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
|
||||
u64 newmsr, msr, bescr;
|
||||
int rs;
|
||||
|
||||
switch (instr & 0xfc0007ff) {
|
||||
/*
|
||||
* rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
|
||||
* in these instructions, so masking bit 31 out doesn't change these
|
||||
* instructions. For the tsr. instruction if bit 31 = 0 then it is per
|
||||
* ISA an invalid form, however P9 UM, in section 4.6.10 Book II Invalid
|
||||
* Forms, informs specifically that ignoring bit 31 is an acceptable way
|
||||
* to handle TM-related invalid forms that have bit 31 = 0. Moreover,
|
||||
* for emulation purposes both forms (w/ and wo/ bit 31 set) can
|
||||
* generate a softpatch interrupt. Hence both forms are handled below
|
||||
* for tsr. to make them behave the same way.
|
||||
*/
|
||||
switch (instr & PO_XOP_OPCODE_MASK) {
|
||||
case PPC_INST_RFID:
|
||||
/* XXX do we need to check for PR=0 here? */
|
||||
newmsr = vcpu->arch.shregs.srr1;
|
||||
@ -73,7 +84,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.shregs.msr = newmsr;
|
||||
return 1;
|
||||
|
||||
case PPC_INST_TSR:
|
||||
/* ignore bit 31, see comment above */
|
||||
case (PPC_INST_TSR & PO_XOP_OPCODE_MASK):
|
||||
/* we know the MSR has the TS field = S (0b01) here */
|
||||
msr = vcpu->arch.shregs.msr;
|
||||
/* check for PR=1 and arch 2.06 bit set in PCR */
|
||||
|
@ -121,24 +121,6 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
|
||||
goto free_exit;
|
||||
}
|
||||
|
||||
pageshift = PAGE_SHIFT;
|
||||
for (i = 0; i < entries; ++i) {
|
||||
struct page *page = mem->hpages[i];
|
||||
|
||||
/*
|
||||
* Allow to use larger than 64k IOMMU pages. Only do that
|
||||
* if we are backed by hugetlb.
|
||||
*/
|
||||
if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page))
|
||||
pageshift = page_shift(compound_head(page));
|
||||
mem->pageshift = min(mem->pageshift, pageshift);
|
||||
/*
|
||||
* We don't need struct page reference any more, switch
|
||||
* to physical address.
|
||||
*/
|
||||
mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
good_exit:
|
||||
atomic64_set(&mem->mapped, 1);
|
||||
mem->used = 1;
|
||||
@ -158,6 +140,27 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
|
||||
}
|
||||
}
|
||||
|
||||
if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
|
||||
/*
|
||||
* Allow to use larger than 64k IOMMU pages. Only do that
|
||||
* if we are backed by hugetlb. Skip device memory as it is not
|
||||
* backed with page structs.
|
||||
*/
|
||||
pageshift = PAGE_SHIFT;
|
||||
for (i = 0; i < entries; ++i) {
|
||||
struct page *page = mem->hpages[i];
|
||||
|
||||
if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page))
|
||||
pageshift = page_shift(compound_head(page));
|
||||
mem->pageshift = min(mem->pageshift, pageshift);
|
||||
/*
|
||||
* We don't need struct page reference any more, switch
|
||||
* to physical address.
|
||||
*/
|
||||
mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
|
||||
}
|
||||
}
|
||||
|
||||
list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
|
||||
|
||||
mutex_unlock(&mem_list_mutex);
|
||||
|
@ -44,6 +44,16 @@ static DEFINE_PER_CPU(u64 *, trace_imc_mem);
|
||||
static struct imc_pmu_ref *trace_imc_refc;
|
||||
static int trace_imc_mem_size;
|
||||
|
||||
/*
|
||||
* Global data structure used to avoid races between thread,
|
||||
* core and trace-imc
|
||||
*/
|
||||
static struct imc_pmu_ref imc_global_refc = {
|
||||
.lock = __MUTEX_INITIALIZER(imc_global_refc.lock),
|
||||
.id = 0,
|
||||
.refc = 0,
|
||||
};
|
||||
|
||||
static struct imc_pmu *imc_event_to_pmu(struct perf_event *event)
|
||||
{
|
||||
return container_of(event->pmu, struct imc_pmu, pmu);
|
||||
@ -698,6 +708,16 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
|
||||
return -EINVAL;
|
||||
|
||||
ref->refc = 0;
|
||||
/*
|
||||
* Reduce the global reference count, if this is the
|
||||
* last cpu in this core and core-imc event running
|
||||
* in this cpu.
|
||||
*/
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
if (imc_global_refc.id == IMC_DOMAIN_CORE)
|
||||
imc_global_refc.refc--;
|
||||
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -710,6 +730,23 @@ static int core_imc_pmu_cpumask_init(void)
|
||||
ppc_core_imc_cpu_offline);
|
||||
}
|
||||
|
||||
static void reset_global_refc(struct perf_event *event)
|
||||
{
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
imc_global_refc.refc--;
|
||||
|
||||
/*
|
||||
* If no other thread is running any
|
||||
* event for this domain(thread/core/trace),
|
||||
* set the global id to zero.
|
||||
*/
|
||||
if (imc_global_refc.refc <= 0) {
|
||||
imc_global_refc.refc = 0;
|
||||
imc_global_refc.id = 0;
|
||||
}
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
}
|
||||
|
||||
static void core_imc_counters_release(struct perf_event *event)
|
||||
{
|
||||
int rc, core_id;
|
||||
@ -759,6 +796,8 @@ static void core_imc_counters_release(struct perf_event *event)
|
||||
ref->refc = 0;
|
||||
}
|
||||
mutex_unlock(&ref->lock);
|
||||
|
||||
reset_global_refc(event);
|
||||
}
|
||||
|
||||
static int core_imc_event_init(struct perf_event *event)
|
||||
@ -819,6 +858,29 @@ static int core_imc_event_init(struct perf_event *event)
|
||||
++ref->refc;
|
||||
mutex_unlock(&ref->lock);
|
||||
|
||||
/*
|
||||
* Since the system can run either in accumulation or trace-mode
|
||||
* of IMC at a time, core-imc events are allowed only if no other
|
||||
* trace/thread imc events are enabled/monitored.
|
||||
*
|
||||
* Take the global lock, and check the refc.id
|
||||
* to know whether any other trace/thread imc
|
||||
* events are running.
|
||||
*/
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) {
|
||||
/*
|
||||
* No other trace/thread imc events are running in
|
||||
* the system, so set the refc.id to core-imc.
|
||||
*/
|
||||
imc_global_refc.id = IMC_DOMAIN_CORE;
|
||||
imc_global_refc.refc++;
|
||||
} else {
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
|
||||
event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK);
|
||||
event->destroy = core_imc_counters_release;
|
||||
return 0;
|
||||
@ -877,7 +939,23 @@ static int ppc_thread_imc_cpu_online(unsigned int cpu)
|
||||
|
||||
static int ppc_thread_imc_cpu_offline(unsigned int cpu)
|
||||
{
|
||||
mtspr(SPRN_LDBAR, 0);
|
||||
/*
|
||||
* Set the bit 0 of LDBAR to zero.
|
||||
*
|
||||
* If bit 0 of LDBAR is unset, it will stop posting
|
||||
* the counter data to memory.
|
||||
* For thread-imc, bit 0 of LDBAR will be set to 1 in the
|
||||
* event_add function. So reset this bit here, to stop the updates
|
||||
* to memory in the cpu_offline path.
|
||||
*/
|
||||
mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
|
||||
|
||||
/* Reduce the refc if thread-imc event running on this cpu */
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
if (imc_global_refc.id == IMC_DOMAIN_THREAD)
|
||||
imc_global_refc.refc--;
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -916,7 +994,22 @@ static int thread_imc_event_init(struct perf_event *event)
|
||||
if (!target)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
/*
|
||||
* Check if any other trace/core imc events are running in the
|
||||
* system, if not set the global id to thread-imc.
|
||||
*/
|
||||
if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_THREAD) {
|
||||
imc_global_refc.id = IMC_DOMAIN_THREAD;
|
||||
imc_global_refc.refc++;
|
||||
} else {
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
|
||||
event->pmu->task_ctx_nr = perf_sw_context;
|
||||
event->destroy = reset_global_refc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1063,10 +1156,12 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
|
||||
int core_id;
|
||||
struct imc_pmu_ref *ref;
|
||||
|
||||
mtspr(SPRN_LDBAR, 0);
|
||||
|
||||
core_id = smp_processor_id() / threads_per_core;
|
||||
ref = &core_imc_refc[core_id];
|
||||
if (!ref) {
|
||||
pr_debug("imc: Failed to get event reference count\n");
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&ref->lock);
|
||||
ref->refc--;
|
||||
@ -1082,6 +1177,10 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
|
||||
ref->refc = 0;
|
||||
}
|
||||
mutex_unlock(&ref->lock);
|
||||
|
||||
/* Set bit 0 of LDBAR to zero, to stop posting updates to memory */
|
||||
mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
|
||||
|
||||
/*
|
||||
* Take a snapshot and calculate the delta and update
|
||||
* the event counter values.
|
||||
@ -1133,7 +1232,18 @@ static int ppc_trace_imc_cpu_online(unsigned int cpu)
|
||||
|
||||
static int ppc_trace_imc_cpu_offline(unsigned int cpu)
|
||||
{
|
||||
mtspr(SPRN_LDBAR, 0);
|
||||
/*
|
||||
* No need to set bit 0 of LDBAR to zero, as
|
||||
* it is set to zero for imc trace-mode
|
||||
*
|
||||
* Reduce the refc if any trace-imc event running
|
||||
* on this cpu.
|
||||
*/
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
if (imc_global_refc.id == IMC_DOMAIN_TRACE)
|
||||
imc_global_refc.refc--;
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1226,15 +1336,14 @@ static int trace_imc_event_add(struct perf_event *event, int flags)
|
||||
local_mem = get_trace_imc_event_base_addr();
|
||||
ldbar_value = ((u64)local_mem & THREAD_IMC_LDBAR_MASK) | TRACE_IMC_ENABLE;
|
||||
|
||||
if (core_imc_refc)
|
||||
ref = &core_imc_refc[core_id];
|
||||
/* trace-imc reference count */
|
||||
if (trace_imc_refc)
|
||||
ref = &trace_imc_refc[core_id];
|
||||
if (!ref) {
|
||||
/* If core-imc is not enabled, use trace-imc reference count */
|
||||
if (trace_imc_refc)
|
||||
ref = &trace_imc_refc[core_id];
|
||||
if (!ref)
|
||||
return -EINVAL;
|
||||
pr_debug("imc: Failed to get the event reference count\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mtspr(SPRN_LDBAR, ldbar_value);
|
||||
mutex_lock(&ref->lock);
|
||||
if (ref->refc == 0) {
|
||||
@ -1242,13 +1351,11 @@ static int trace_imc_event_add(struct perf_event *event, int flags)
|
||||
get_hard_smp_processor_id(smp_processor_id()))) {
|
||||
mutex_unlock(&ref->lock);
|
||||
pr_err("trace-imc: Unable to start the counters for core %d\n", core_id);
|
||||
mtspr(SPRN_LDBAR, 0);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
++ref->refc;
|
||||
mutex_unlock(&ref->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1274,16 +1381,13 @@ static void trace_imc_event_del(struct perf_event *event, int flags)
|
||||
int core_id = smp_processor_id() / threads_per_core;
|
||||
struct imc_pmu_ref *ref = NULL;
|
||||
|
||||
if (core_imc_refc)
|
||||
ref = &core_imc_refc[core_id];
|
||||
if (trace_imc_refc)
|
||||
ref = &trace_imc_refc[core_id];
|
||||
if (!ref) {
|
||||
/* If core-imc is not enabled, use trace-imc reference count */
|
||||
if (trace_imc_refc)
|
||||
ref = &trace_imc_refc[core_id];
|
||||
if (!ref)
|
||||
return;
|
||||
pr_debug("imc: Failed to get event reference count\n");
|
||||
return;
|
||||
}
|
||||
mtspr(SPRN_LDBAR, 0);
|
||||
|
||||
mutex_lock(&ref->lock);
|
||||
ref->refc--;
|
||||
if (ref->refc == 0) {
|
||||
@ -1297,6 +1401,7 @@ static void trace_imc_event_del(struct perf_event *event, int flags)
|
||||
ref->refc = 0;
|
||||
}
|
||||
mutex_unlock(&ref->lock);
|
||||
|
||||
trace_imc_event_stop(event, flags);
|
||||
}
|
||||
|
||||
@ -1314,10 +1419,30 @@ static int trace_imc_event_init(struct perf_event *event)
|
||||
if (event->attr.sample_period == 0)
|
||||
return -ENOENT;
|
||||
|
||||
/*
|
||||
* Take the global lock, and make sure
|
||||
* no other thread is running any core/thread imc
|
||||
* events
|
||||
*/
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) {
|
||||
/*
|
||||
* No core/thread imc events are running in the
|
||||
* system, so set the refc.id to trace-imc.
|
||||
*/
|
||||
imc_global_refc.id = IMC_DOMAIN_TRACE;
|
||||
imc_global_refc.refc++;
|
||||
} else {
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
|
||||
event->hw.idx = -1;
|
||||
target = event->hw.target;
|
||||
|
||||
event->pmu->task_ctx_nr = perf_hw_context;
|
||||
event->destroy = reset_global_refc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1429,10 +1554,10 @@ static void cleanup_all_core_imc_memory(void)
|
||||
static void thread_imc_ldbar_disable(void *dummy)
|
||||
{
|
||||
/*
|
||||
* By Zeroing LDBAR, we disable thread-imc
|
||||
* updates.
|
||||
* By setting 0th bit of LDBAR to zero, we disable thread-imc
|
||||
* updates to memory.
|
||||
*/
|
||||
mtspr(SPRN_LDBAR, 0);
|
||||
mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
|
||||
}
|
||||
|
||||
void thread_imc_disable(void)
|
||||
|
@ -63,4 +63,11 @@ do { \
|
||||
* Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here.
|
||||
*/
|
||||
#define MCOUNT_INSN_SIZE 8
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
struct dyn_ftrace;
|
||||
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
|
||||
#define ftrace_init_nop ftrace_init_nop
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -88,6 +88,25 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
|
||||
return __ftrace_modify_call(rec->ip, addr, false);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* This is called early on, and isn't wrapped by
|
||||
* ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
|
||||
* text_mutex, which triggers a lockdep failure. SMP isn't running so we could
|
||||
* just directly poke the text, but it's simpler to just take the lock
|
||||
* ourselves.
|
||||
*/
|
||||
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
|
||||
{
|
||||
int out;
|
||||
|
||||
ftrace_arch_code_modify_prepare();
|
||||
out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
|
||||
ftrace_arch_code_modify_post_process();
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
int ftrace_update_ftrace_func(ftrace_func_t func)
|
||||
{
|
||||
int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
|
||||
|
@ -1247,26 +1247,46 @@ static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
|
||||
#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
|
||||
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
||||
|
||||
static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
|
||||
static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
|
||||
{
|
||||
if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
|
||||
return (p4d_t *) pgd_deref(*pgd) + p4d_index(address);
|
||||
return (p4d_t *) pgd;
|
||||
if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
|
||||
return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
|
||||
return (p4d_t *) pgdp;
|
||||
}
|
||||
#define p4d_offset_lockless p4d_offset_lockless
|
||||
|
||||
static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
|
||||
{
|
||||
return p4d_offset_lockless(pgdp, *pgdp, address);
|
||||
}
|
||||
|
||||
static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
|
||||
static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
|
||||
{
|
||||
if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
|
||||
return (pud_t *) p4d_deref(*p4d) + pud_index(address);
|
||||
return (pud_t *) p4d;
|
||||
if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
|
||||
return (pud_t *) p4d_deref(p4d) + pud_index(address);
|
||||
return (pud_t *) p4dp;
|
||||
}
|
||||
#define pud_offset_lockless pud_offset_lockless
|
||||
|
||||
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
|
||||
static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
|
||||
{
|
||||
if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
|
||||
return (pmd_t *) pud_deref(*pud) + pmd_index(address);
|
||||
return (pmd_t *) pud;
|
||||
return pud_offset_lockless(p4dp, *p4dp, address);
|
||||
}
|
||||
#define pud_offset pud_offset
|
||||
|
||||
static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
|
||||
{
|
||||
if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
|
||||
return (pmd_t *) pud_deref(pud) + pmd_index(address);
|
||||
return (pmd_t *) pudp;
|
||||
}
|
||||
#define pmd_offset_lockless pmd_offset_lockless
|
||||
|
||||
static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
|
||||
{
|
||||
return pmd_offset_lockless(pudp, *pudp, address);
|
||||
}
|
||||
#define pmd_offset pmd_offset
|
||||
|
||||
static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address)
|
||||
{
|
||||
|
@ -111,4 +111,15 @@ struct stack_frame {
|
||||
r2; \
|
||||
})
|
||||
|
||||
#define CALL_ON_STACK_NORETURN(fn, stack) \
|
||||
({ \
|
||||
asm volatile( \
|
||||
" la 15,0(%[_stack])\n" \
|
||||
" xc %[_bc](8,15),%[_bc](15)\n" \
|
||||
" brasl 14,%[_fn]\n" \
|
||||
::[_bc] "i" (offsetof(struct stack_frame, back_chain)), \
|
||||
[_stack] "a" (stack), [_fn] "X" (fn)); \
|
||||
BUG(); \
|
||||
})
|
||||
|
||||
#endif /* _ASM_S390_STACKTRACE_H */
|
||||
|
@ -294,11 +294,6 @@ static irqreturn_t do_ext_interrupt(int irq, void *dummy)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static struct irqaction external_interrupt = {
|
||||
.name = "EXT",
|
||||
.handler = do_ext_interrupt,
|
||||
};
|
||||
|
||||
void __init init_ext_interrupts(void)
|
||||
{
|
||||
int idx;
|
||||
@ -308,7 +303,8 @@ void __init init_ext_interrupts(void)
|
||||
|
||||
irq_set_chip_and_handler(EXT_INTERRUPT,
|
||||
&dummy_irq_chip, handle_percpu_irq);
|
||||
setup_irq(EXT_INTERRUPT, &external_interrupt);
|
||||
if (request_irq(EXT_INTERRUPT, do_ext_interrupt, 0, "EXT", NULL))
|
||||
panic("Failed to register EXT interrupt\n");
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(irq_subclass_lock);
|
||||
|
@ -1429,8 +1429,8 @@ static int aux_output_begin(struct perf_output_handle *handle,
|
||||
idx = aux->empty_mark + 1;
|
||||
for (i = 0; i < range_scan; i++, idx++) {
|
||||
te = aux_sdb_trailer(aux, idx);
|
||||
te->flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK;
|
||||
te->flags = te->flags & ~SDB_TE_ALERT_REQ_MASK;
|
||||
te->flags &= ~(SDB_TE_BUFFER_FULL_MASK |
|
||||
SDB_TE_ALERT_REQ_MASK);
|
||||
te->overflow = 0;
|
||||
}
|
||||
/* Save the position of empty SDBs */
|
||||
@ -1477,8 +1477,7 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
|
||||
te = aux_sdb_trailer(aux, alert_index);
|
||||
do {
|
||||
orig_flags = te->flags;
|
||||
orig_overflow = te->overflow;
|
||||
*overflow = orig_overflow;
|
||||
*overflow = orig_overflow = te->overflow;
|
||||
if (orig_flags & SDB_TE_BUFFER_FULL_MASK) {
|
||||
/*
|
||||
* SDB is already set by hardware.
|
||||
@ -1712,7 +1711,7 @@ static void *aux_buffer_setup(struct perf_event *event, void **pages,
|
||||
}
|
||||
|
||||
/* Allocate aux_buffer struct for the event */
|
||||
aux = kmalloc(sizeof(struct aux_buffer), GFP_KERNEL);
|
||||
aux = kzalloc(sizeof(struct aux_buffer), GFP_KERNEL);
|
||||
if (!aux)
|
||||
goto no_aux;
|
||||
sfb = &aux->sfb;
|
||||
|
@ -356,7 +356,6 @@ early_initcall(async_stack_realloc);
|
||||
|
||||
void __init arch_call_rest_init(void)
|
||||
{
|
||||
struct stack_frame *frame;
|
||||
unsigned long stack;
|
||||
|
||||
stack = stack_alloc();
|
||||
@ -369,13 +368,7 @@ void __init arch_call_rest_init(void)
|
||||
set_task_stack_end_magic(current);
|
||||
stack += STACK_INIT_OFFSET;
|
||||
S390_lowcore.kernel_stack = stack;
|
||||
frame = (struct stack_frame *) stack;
|
||||
memset(frame, 0, sizeof(*frame));
|
||||
/* Branch to rest_init on the new stack, never returns */
|
||||
asm volatile(
|
||||
" la 15,0(%[_frame])\n"
|
||||
" jg rest_init\n"
|
||||
: : [_frame] "a" (frame));
|
||||
CALL_ON_STACK_NORETURN(rest_init, stack);
|
||||
}
|
||||
|
||||
static void __init setup_lowcore_dat_off(void)
|
||||
@ -634,7 +627,7 @@ static struct notifier_block kdump_mem_nb = {
|
||||
/*
|
||||
* Make sure that the area behind memory_end is protected
|
||||
*/
|
||||
static void reserve_memory_end(void)
|
||||
static void __init reserve_memory_end(void)
|
||||
{
|
||||
if (memory_end_set)
|
||||
memblock_reserve(memory_end, ULONG_MAX);
|
||||
@ -643,7 +636,7 @@ static void reserve_memory_end(void)
|
||||
/*
|
||||
* Make sure that oldmem, where the dump is stored, is protected
|
||||
*/
|
||||
static void reserve_oldmem(void)
|
||||
static void __init reserve_oldmem(void)
|
||||
{
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
if (OLDMEM_BASE)
|
||||
@ -655,7 +648,7 @@ static void reserve_oldmem(void)
|
||||
/*
|
||||
* Make sure that oldmem, where the dump is stored, is protected
|
||||
*/
|
||||
static void remove_oldmem(void)
|
||||
static void __init remove_oldmem(void)
|
||||
{
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
if (OLDMEM_BASE)
|
||||
|
@ -878,7 +878,7 @@ static void __no_sanitize_address smp_start_secondary(void *cpuvoid)
|
||||
S390_lowcore.restart_source = -1UL;
|
||||
__ctl_load(S390_lowcore.cregs_save_area, 0, 15);
|
||||
__load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
|
||||
CALL_ON_STACK(smp_init_secondary, S390_lowcore.kernel_stack, 0);
|
||||
CALL_ON_STACK_NORETURN(smp_init_secondary, S390_lowcore.kernel_stack);
|
||||
}
|
||||
|
||||
/* Upping and downing of CPUs */
|
||||
|
@ -10,4 +10,10 @@ int crash_setup_memmap_entries(struct kimage *image,
|
||||
struct boot_params *params);
|
||||
void crash_smp_send_stop(void);
|
||||
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
void __init crash_reserve_low_1M(void);
|
||||
#else
|
||||
static inline void __init crash_reserve_low_1M(void) { }
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_CRASH_H */
|
||||
|
@ -320,7 +320,7 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
|
||||
* combination with microcode which triggers a CPU buffer flush when the
|
||||
* instruction is executed.
|
||||
*/
|
||||
static inline void mds_clear_cpu_buffers(void)
|
||||
static __always_inline void mds_clear_cpu_buffers(void)
|
||||
{
|
||||
static const u16 ds = __KERNEL_DS;
|
||||
|
||||
@ -341,7 +341,7 @@ static inline void mds_clear_cpu_buffers(void)
|
||||
*
|
||||
* Clear CPU buffers if the corresponding static key is enabled
|
||||
*/
|
||||
static inline void mds_user_clear_cpu_buffers(void)
|
||||
static __always_inline void mds_user_clear_cpu_buffers(void)
|
||||
{
|
||||
if (static_branch_likely(&mds_user_clear))
|
||||
mds_clear_cpu_buffers();
|
||||
|
@ -4,6 +4,11 @@
|
||||
|
||||
#define ARCH_DEFAULT_PKEY 0
|
||||
|
||||
/*
|
||||
* If more than 16 keys are ever supported, a thorough audit
|
||||
* will be necessary to ensure that the types that store key
|
||||
* numbers and masks have sufficient capacity.
|
||||
*/
|
||||
#define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1)
|
||||
|
||||
extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
|
||||
|
@ -2256,6 +2256,7 @@ static inline void __init check_timer(void)
|
||||
legacy_pic->init(0);
|
||||
legacy_pic->make_irq(0);
|
||||
apic_write(APIC_LVT0, APIC_DM_EXTINT);
|
||||
legacy_pic->unmask(0);
|
||||
|
||||
unlock_ExtINT_logic();
|
||||
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/memblock.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/hardirq.h>
|
||||
@ -39,6 +40,7 @@
|
||||
#include <asm/virtext.h>
|
||||
#include <asm/intel_pt.h>
|
||||
#include <asm/crash.h>
|
||||
#include <asm/cmdline.h>
|
||||
|
||||
/* Used while preparing memory map entries for second kernel */
|
||||
struct crash_memmap_data {
|
||||
@ -68,6 +70,19 @@ static inline void cpu_crash_vmclear_loaded_vmcss(void)
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
* When the crashkernel option is specified, only use the low
|
||||
* 1M for the real mode trampoline.
|
||||
*/
|
||||
void __init crash_reserve_low_1M(void)
|
||||
{
|
||||
if (cmdline_find_option(boot_command_line, "crashkernel", NULL, 0) < 0)
|
||||
return;
|
||||
|
||||
memblock_reserve(0, 1<<20);
|
||||
pr_info("Reserving the low 1M of memory for crashkernel\n");
|
||||
}
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
|
||||
|
||||
static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
|
||||
|
@ -895,8 +895,6 @@ const void *get_xsave_field_ptr(int xfeature_nr)
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_PKEYS
|
||||
|
||||
#define NR_VALID_PKRU_BITS (CONFIG_NR_PROTECTION_KEYS * 2)
|
||||
#define PKRU_VALID_MASK (NR_VALID_PKRU_BITS - 1)
|
||||
/*
|
||||
* This will go out and modify PKRU register to set the access
|
||||
* rights for @pkey to @init_val.
|
||||
@ -915,6 +913,13 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
|
||||
if (!boot_cpu_has(X86_FEATURE_OSPKE))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* This code should only be called with valid 'pkey'
|
||||
* values originating from in-kernel users. Complain
|
||||
* if a bad value is observed.
|
||||
*/
|
||||
WARN_ON_ONCE(pkey >= arch_max_pkey());
|
||||
|
||||
/* Set the bits we need in PKRU: */
|
||||
if (init_val & PKEY_DISABLE_ACCESS)
|
||||
new_pkru_bits |= PKRU_AD_BIT;
|
||||
|
@ -5836,6 +5836,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
|
||||
}
|
||||
|
||||
ctxt->eip = ctxt->_eip;
|
||||
if (ctxt->mode != X86EMUL_MODE_PROT64)
|
||||
ctxt->eip = (u32)ctxt->_eip;
|
||||
|
||||
done:
|
||||
if (rc == X86EMUL_PROPAGATE_FAULT) {
|
||||
|
@ -1684,7 +1684,7 @@ static void start_sw_period(struct kvm_lapic *apic)
|
||||
|
||||
hrtimer_start(&apic->lapic_timer.timer,
|
||||
apic->lapic_timer.target_expiration,
|
||||
HRTIMER_MODE_ABS);
|
||||
HRTIMER_MODE_ABS_HARD);
|
||||
}
|
||||
|
||||
bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
|
||||
|
@ -339,7 +339,7 @@ TRACE_EVENT(
|
||||
/* These depend on page entry type, so compute them now. */
|
||||
__field(bool, r)
|
||||
__field(bool, x)
|
||||
__field(u8, u)
|
||||
__field(signed char, u)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
|
@ -787,9 +787,6 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
|
||||
if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
|
||||
return 0;
|
||||
} else {
|
||||
if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
|
||||
pr_err("%s: ip 0x%lx next 0x%llx\n",
|
||||
__func__, kvm_rip_read(vcpu), svm->next_rip);
|
||||
kvm_rip_write(vcpu, svm->next_rip);
|
||||
}
|
||||
svm_set_interrupt_shadow(vcpu, 0);
|
||||
@ -3970,6 +3967,12 @@ static int iret_interception(struct vcpu_svm *svm)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int invd_interception(struct vcpu_svm *svm)
|
||||
{
|
||||
/* Treat an INVD instruction as a NOP and just skip it. */
|
||||
return kvm_skip_emulated_instruction(&svm->vcpu);
|
||||
}
|
||||
|
||||
static int invlpg_interception(struct vcpu_svm *svm)
|
||||
{
|
||||
if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
|
||||
@ -4822,7 +4825,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
|
||||
[SVM_EXIT_RDPMC] = rdpmc_interception,
|
||||
[SVM_EXIT_CPUID] = cpuid_interception,
|
||||
[SVM_EXIT_IRET] = iret_interception,
|
||||
[SVM_EXIT_INVD] = emulate_on_interception,
|
||||
[SVM_EXIT_INVD] = invd_interception,
|
||||
[SVM_EXIT_PAUSE] = pause_interception,
|
||||
[SVM_EXIT_HLT] = halt_interception,
|
||||
[SVM_EXIT_INVLPG] = invlpg_interception,
|
||||
|
@ -1130,6 +1130,10 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
|
||||
vmx->guest_msrs[i].mask);
|
||||
|
||||
}
|
||||
|
||||
if (vmx->nested.need_vmcs12_to_shadow_sync)
|
||||
nested_sync_vmcs12_to_shadow(vcpu);
|
||||
|
||||
if (vmx->guest_state_loaded)
|
||||
return;
|
||||
|
||||
@ -1537,7 +1541,7 @@ static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data)
|
||||
|
||||
static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long rip;
|
||||
unsigned long rip, orig_rip;
|
||||
|
||||
/*
|
||||
* Using VMCS.VM_EXIT_INSTRUCTION_LEN on EPT misconfig depends on
|
||||
@ -1549,8 +1553,17 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
if (!static_cpu_has(X86_FEATURE_HYPERVISOR) ||
|
||||
to_vmx(vcpu)->exit_reason != EXIT_REASON_EPT_MISCONFIG) {
|
||||
rip = kvm_rip_read(vcpu);
|
||||
rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
|
||||
orig_rip = kvm_rip_read(vcpu);
|
||||
rip = orig_rip + vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
* We need to mask out the high 32 bits of RIP if not in 64-bit
|
||||
* mode, but just finding out that we are in 64-bit mode is
|
||||
* quite expensive. Only do it if there was a carry.
|
||||
*/
|
||||
if (unlikely(((rip ^ orig_rip) >> 31) == 3) && !is_64_bit_mode(vcpu))
|
||||
rip = (u32)rip;
|
||||
#endif
|
||||
kvm_rip_write(vcpu, rip);
|
||||
} else {
|
||||
if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
|
||||
@ -6486,8 +6499,11 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
vmcs_write32(PLE_WINDOW, vmx->ple_window);
|
||||
}
|
||||
|
||||
if (vmx->nested.need_vmcs12_to_shadow_sync)
|
||||
nested_sync_vmcs12_to_shadow(vcpu);
|
||||
/*
|
||||
* We did this in prepare_switch_to_guest, because it needs to
|
||||
* be within srcu_read_lock.
|
||||
*/
|
||||
WARN_ON_ONCE(vmx->nested.need_vmcs12_to_shadow_sync);
|
||||
|
||||
if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
|
||||
vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
|
||||
|
@ -973,6 +973,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
||||
unsigned long old_cr4 = kvm_read_cr4(vcpu);
|
||||
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
|
||||
X86_CR4_SMEP;
|
||||
unsigned long mmu_role_bits = pdptr_bits | X86_CR4_SMAP | X86_CR4_PKE;
|
||||
|
||||
if (kvm_valid_cr4(vcpu, cr4))
|
||||
return 1;
|
||||
@ -1000,7 +1001,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
||||
if (kvm_x86_ops->set_cr4(vcpu, cr4))
|
||||
return 1;
|
||||
|
||||
if (((cr4 ^ old_cr4) & pdptr_bits) ||
|
||||
if (((cr4 ^ old_cr4) & mmu_role_bits) ||
|
||||
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
|
||||
kvm_mmu_reset_context(vcpu);
|
||||
|
||||
@ -5050,10 +5051,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&u.ps, argp, sizeof(u.ps)))
|
||||
goto out;
|
||||
mutex_lock(&kvm->lock);
|
||||
r = -ENXIO;
|
||||
if (!kvm->arch.vpit)
|
||||
goto out;
|
||||
goto set_pit_out;
|
||||
r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
|
||||
set_pit_out:
|
||||
mutex_unlock(&kvm->lock);
|
||||
break;
|
||||
}
|
||||
case KVM_GET_PIT2: {
|
||||
@ -5073,10 +5077,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
|
||||
goto out;
|
||||
mutex_lock(&kvm->lock);
|
||||
r = -ENXIO;
|
||||
if (!kvm->arch.vpit)
|
||||
goto out;
|
||||
goto set_pit2_out;
|
||||
r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
|
||||
set_pit2_out:
|
||||
mutex_unlock(&kvm->lock);
|
||||
break;
|
||||
}
|
||||
case KVM_REINJECT_CONTROL: {
|
||||
|
@ -120,7 +120,7 @@ long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
|
||||
*/
|
||||
if (size < 8) {
|
||||
if (!IS_ALIGNED(dest, 4) || size != 4)
|
||||
clean_cache_range(dst, 1);
|
||||
clean_cache_range(dst, size);
|
||||
} else {
|
||||
if (!IS_ALIGNED(dest, 8)) {
|
||||
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/realmode.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/crash.h>
|
||||
|
||||
struct real_mode_header *real_mode_header;
|
||||
u32 *trampoline_cr4_features;
|
||||
@ -34,6 +35,7 @@ void __init reserve_real_mode(void)
|
||||
|
||||
memblock_reserve(mem, size);
|
||||
set_real_mode_mem(mem);
|
||||
crash_reserve_low_1M();
|
||||
}
|
||||
|
||||
static void __init setup_real_mode(void)
|
||||
|
@ -1897,6 +1897,7 @@ ENTRY(system_call)
|
||||
|
||||
mov a6, a2
|
||||
call4 do_syscall_trace_enter
|
||||
beqz a6, .Lsyscall_exit
|
||||
l32i a7, a2, PT_SYSCALL
|
||||
|
||||
1:
|
||||
@ -1911,8 +1912,6 @@ ENTRY(system_call)
|
||||
|
||||
addx4 a4, a7, a4
|
||||
l32i a4, a4, 0
|
||||
movi a5, sys_ni_syscall;
|
||||
beq a4, a5, 1f
|
||||
|
||||
/* Load args: arg0 - arg5 are passed via regs. */
|
||||
|
||||
@ -1932,6 +1931,7 @@ ENTRY(system_call)
|
||||
|
||||
s32i a6, a2, PT_AREG2
|
||||
bnez a3, 1f
|
||||
.Lsyscall_exit:
|
||||
abi_ret(4)
|
||||
|
||||
1:
|
||||
|
@ -542,14 +542,28 @@ long arch_ptrace(struct task_struct *child, long request,
|
||||
return ret;
|
||||
}
|
||||
|
||||
void do_syscall_trace_enter(struct pt_regs *regs)
|
||||
void do_syscall_trace_leave(struct pt_regs *regs);
|
||||
int do_syscall_trace_enter(struct pt_regs *regs)
|
||||
{
|
||||
if (regs->syscall == NO_SYSCALL)
|
||||
regs->areg[2] = -ENOSYS;
|
||||
|
||||
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
|
||||
tracehook_report_syscall_entry(regs))
|
||||
tracehook_report_syscall_entry(regs)) {
|
||||
regs->areg[2] = -ENOSYS;
|
||||
regs->syscall = NO_SYSCALL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (regs->syscall == NO_SYSCALL) {
|
||||
do_syscall_trace_leave(regs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
|
||||
trace_sys_enter(regs, syscall_get_nr(current, regs));
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void do_syscall_trace_leave(struct pt_regs *regs)
|
||||
|
@ -1043,29 +1043,21 @@ void acpi_ec_unblock_transactions(void)
|
||||
/* --------------------------------------------------------------------------
|
||||
Event Management
|
||||
-------------------------------------------------------------------------- */
|
||||
static struct acpi_ec_query_handler *
|
||||
acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
|
||||
{
|
||||
if (handler)
|
||||
kref_get(&handler->kref);
|
||||
return handler;
|
||||
}
|
||||
|
||||
static struct acpi_ec_query_handler *
|
||||
acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
|
||||
{
|
||||
struct acpi_ec_query_handler *handler;
|
||||
bool found = false;
|
||||
|
||||
mutex_lock(&ec->mutex);
|
||||
list_for_each_entry(handler, &ec->list, node) {
|
||||
if (value == handler->query_bit) {
|
||||
found = true;
|
||||
break;
|
||||
kref_get(&handler->kref);
|
||||
mutex_unlock(&ec->mutex);
|
||||
return handler;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&ec->mutex);
|
||||
return found ? acpi_ec_get_query_handler(handler) : NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void acpi_ec_query_handler_release(struct kref *kref)
|
||||
|
@ -56,7 +56,7 @@ struct acard_sg {
|
||||
__le32 size; /* bit 31 (EOT) max==0x10000 (64k) */
|
||||
};
|
||||
|
||||
static void acard_ahci_qc_prep(struct ata_queued_cmd *qc);
|
||||
static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc);
|
||||
static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
|
||||
static int acard_ahci_port_start(struct ata_port *ap);
|
||||
static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
|
||||
@ -210,7 +210,7 @@ static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
|
||||
return si;
|
||||
}
|
||||
|
||||
static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
struct ahci_port_priv *pp = ap->private_data;
|
||||
@ -248,6 +248,8 @@ static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
|
||||
opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
|
||||
|
||||
ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
|
||||
|
@ -57,7 +57,7 @@ static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
|
||||
static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
|
||||
static int ahci_port_start(struct ata_port *ap);
|
||||
static void ahci_port_stop(struct ata_port *ap);
|
||||
static void ahci_qc_prep(struct ata_queued_cmd *qc);
|
||||
static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc);
|
||||
static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
|
||||
static void ahci_freeze(struct ata_port *ap);
|
||||
static void ahci_thaw(struct ata_port *ap);
|
||||
@ -1624,7 +1624,7 @@ static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
|
||||
return sata_pmp_qc_defer_cmd_switch(qc);
|
||||
}
|
||||
|
||||
static void ahci_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
struct ahci_port_priv *pp = ap->private_data;
|
||||
@ -1660,6 +1660,8 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
|
||||
opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
|
||||
|
||||
ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
static void ahci_fbs_dec_intr(struct ata_port *ap)
|
||||
|
@ -4978,7 +4978,10 @@ int ata_std_qc_defer(struct ata_queued_cmd *qc)
|
||||
return ATA_DEFER_LINK;
|
||||
}
|
||||
|
||||
void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
|
||||
enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_sg_init - Associate command with scatter-gather table.
|
||||
@ -5465,7 +5468,9 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
|
||||
return;
|
||||
}
|
||||
|
||||
ap->ops->qc_prep(qc);
|
||||
qc->err_mask |= ap->ops->qc_prep(qc);
|
||||
if (unlikely(qc->err_mask))
|
||||
goto err;
|
||||
trace_ata_qc_issue(qc);
|
||||
qc->err_mask |= ap->ops->qc_issue(qc);
|
||||
if (unlikely(qc->err_mask))
|
||||
|
@ -2679,12 +2679,14 @@ static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
|
||||
* LOCKING:
|
||||
* spin_lock_irqsave(host lock)
|
||||
*/
|
||||
void ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
|
||||
enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
|
||||
ata_bmdma_fill_sg(qc);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
|
||||
|
||||
@ -2697,12 +2699,14 @@ EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
|
||||
* LOCKING:
|
||||
* spin_lock_irqsave(host lock)
|
||||
*/
|
||||
void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
|
||||
enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
|
||||
ata_bmdma_fill_sg_dumb(qc);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
|
||||
|
||||
|
@ -510,7 +510,7 @@ static int pata_macio_cable_detect(struct ata_port *ap)
|
||||
return ATA_CBL_PATA40;
|
||||
}
|
||||
|
||||
static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE);
|
||||
struct ata_port *ap = qc->ap;
|
||||
@ -523,7 +523,7 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
|
||||
__func__, qc, qc->flags, write, qc->dev->devno);
|
||||
|
||||
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
|
||||
table = (struct dbdma_cmd *) priv->dma_table_cpu;
|
||||
|
||||
@ -568,6 +568,8 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
|
||||
table->command = cpu_to_le16(DBDMA_STOP);
|
||||
|
||||
dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
|
||||
|
@ -44,25 +44,27 @@ static void pxa_ata_dma_irq(void *d)
|
||||
/*
|
||||
* Prepare taskfile for submission.
|
||||
*/
|
||||
static void pxa_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors pxa_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct pata_pxa_data *pd = qc->ap->private_data;
|
||||
struct dma_async_tx_descriptor *tx;
|
||||
enum dma_transfer_direction dir;
|
||||
|
||||
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
|
||||
dir = (qc->dma_dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
|
||||
tx = dmaengine_prep_slave_sg(pd->dma_chan, qc->sg, qc->n_elem, dir,
|
||||
DMA_PREP_INTERRUPT);
|
||||
if (!tx) {
|
||||
ata_dev_err(qc->dev, "prep_slave_sg() failed\n");
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
tx->callback = pxa_ata_dma_irq;
|
||||
tx->callback_param = pd;
|
||||
pd->dma_cookie = dmaengine_submit(tx);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -116,7 +116,7 @@ static int adma_ata_init_one(struct pci_dev *pdev,
|
||||
const struct pci_device_id *ent);
|
||||
static int adma_port_start(struct ata_port *ap);
|
||||
static void adma_port_stop(struct ata_port *ap);
|
||||
static void adma_qc_prep(struct ata_queued_cmd *qc);
|
||||
static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc);
|
||||
static unsigned int adma_qc_issue(struct ata_queued_cmd *qc);
|
||||
static int adma_check_atapi_dma(struct ata_queued_cmd *qc);
|
||||
static void adma_freeze(struct ata_port *ap);
|
||||
@ -295,7 +295,7 @@ static int adma_fill_sg(struct ata_queued_cmd *qc)
|
||||
return i;
|
||||
}
|
||||
|
||||
static void adma_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct adma_port_priv *pp = qc->ap->private_data;
|
||||
u8 *buf = pp->pkt;
|
||||
@ -306,7 +306,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
|
||||
|
||||
adma_enter_reg_mode(qc->ap);
|
||||
if (qc->tf.protocol != ATA_PROT_DMA)
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
|
||||
buf[i++] = 0; /* Response flags */
|
||||
buf[i++] = 0; /* reserved */
|
||||
@ -371,6 +371,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
|
||||
printk("%s\n", obuf);
|
||||
}
|
||||
#endif
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
static inline void adma_packet_start(struct ata_queued_cmd *qc)
|
||||
|
@ -502,7 +502,7 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
|
||||
return num_prde;
|
||||
}
|
||||
|
||||
static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors sata_fsl_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
struct sata_fsl_port_priv *pp = ap->private_data;
|
||||
@ -548,6 +548,8 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
|
||||
|
||||
VPRINTK("SATA FSL : xx_qc_prep, di = 0x%x, ttl = %d, num_prde = %d\n",
|
||||
desc_info, ttl_dwords, num_prde);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc)
|
||||
|
@ -478,7 +478,7 @@ static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc)
|
||||
prd[-1].flags |= PRD_END;
|
||||
}
|
||||
|
||||
static void inic_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors inic_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct inic_port_priv *pp = qc->ap->private_data;
|
||||
struct inic_pkt *pkt = pp->pkt;
|
||||
@ -538,6 +538,8 @@ static void inic_qc_prep(struct ata_queued_cmd *qc)
|
||||
inic_fill_sg(prd, qc);
|
||||
|
||||
pp->cpb_tbl[0] = pp->pkt_dma;
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
static unsigned int inic_qc_issue(struct ata_queued_cmd *qc)
|
||||
|
@ -592,8 +592,8 @@ static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
|
||||
static int mv_port_start(struct ata_port *ap);
|
||||
static void mv_port_stop(struct ata_port *ap);
|
||||
static int mv_qc_defer(struct ata_queued_cmd *qc);
|
||||
static void mv_qc_prep(struct ata_queued_cmd *qc);
|
||||
static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
|
||||
static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc);
|
||||
static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc);
|
||||
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
|
||||
static int mv_hardreset(struct ata_link *link, unsigned int *class,
|
||||
unsigned long deadline);
|
||||
@ -2031,7 +2031,7 @@ static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
|
||||
* LOCKING:
|
||||
* Inherited from caller.
|
||||
*/
|
||||
static void mv_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
struct mv_port_priv *pp = ap->private_data;
|
||||
@ -2043,15 +2043,15 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
|
||||
switch (tf->protocol) {
|
||||
case ATA_PROT_DMA:
|
||||
if (tf->command == ATA_CMD_DSM)
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
/* fall-thru */
|
||||
case ATA_PROT_NCQ:
|
||||
break; /* continue below */
|
||||
case ATA_PROT_PIO:
|
||||
mv_rw_multi_errata_sata24(qc);
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
default:
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
/* Fill in command request block
|
||||
@ -2098,12 +2098,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
|
||||
* non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
|
||||
* of which are defined/used by Linux. If we get here, this
|
||||
* driver needs work.
|
||||
*
|
||||
* FIXME: modify libata to give qc_prep a return value and
|
||||
* return error here.
|
||||
*/
|
||||
BUG_ON(tf->command);
|
||||
break;
|
||||
ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__,
|
||||
tf->command);
|
||||
return AC_ERR_INVALID;
|
||||
}
|
||||
mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
|
||||
mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
|
||||
@ -2116,8 +2114,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
|
||||
mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
|
||||
|
||||
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
mv_fill_sg(qc);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2132,7 +2132,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
|
||||
* LOCKING:
|
||||
* Inherited from caller.
|
||||
*/
|
||||
static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
struct mv_port_priv *pp = ap->private_data;
|
||||
@ -2143,9 +2143,9 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
|
||||
|
||||
if ((tf->protocol != ATA_PROT_DMA) &&
|
||||
(tf->protocol != ATA_PROT_NCQ))
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
if (tf->command == ATA_CMD_DSM)
|
||||
return; /* use bmdma for this */
|
||||
return AC_ERR_OK; /* use bmdma for this */
|
||||
|
||||
/* Fill in Gen IIE command request block */
|
||||
if (!(tf->flags & ATA_TFLAG_WRITE))
|
||||
@ -2186,8 +2186,10 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
|
||||
);
|
||||
|
||||
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
mv_fill_sg(qc);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -297,7 +297,7 @@ static void nv_ck804_freeze(struct ata_port *ap);
|
||||
static void nv_ck804_thaw(struct ata_port *ap);
|
||||
static int nv_adma_slave_config(struct scsi_device *sdev);
|
||||
static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
|
||||
static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
|
||||
static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
|
||||
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
|
||||
static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
|
||||
static void nv_adma_irq_clear(struct ata_port *ap);
|
||||
@ -319,7 +319,7 @@ static void nv_mcp55_freeze(struct ata_port *ap);
|
||||
static void nv_swncq_error_handler(struct ata_port *ap);
|
||||
static int nv_swncq_slave_config(struct scsi_device *sdev);
|
||||
static int nv_swncq_port_start(struct ata_port *ap);
|
||||
static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
|
||||
static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
|
||||
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
|
||||
static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
|
||||
static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
|
||||
@ -1344,7 +1344,7 @@ static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct nv_adma_port_priv *pp = qc->ap->private_data;
|
||||
struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
|
||||
@ -1356,7 +1356,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
|
||||
(qc->flags & ATA_QCFLAG_DMAMAP));
|
||||
nv_adma_register_mode(qc->ap);
|
||||
ata_bmdma_qc_prep(qc);
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
cpb->resp_flags = NV_CPB_RESP_DONE;
|
||||
@ -1388,6 +1388,8 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
|
||||
cpb->ctl_flags = ctl_flags;
|
||||
wmb();
|
||||
cpb->resp_flags = 0;
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
|
||||
@ -1950,17 +1952,19 @@ static int nv_swncq_port_start(struct ata_port *ap)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
if (qc->tf.protocol != ATA_PROT_NCQ) {
|
||||
ata_bmdma_qc_prep(qc);
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
|
||||
nv_swncq_fill_sg(qc);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
|
||||
|
@ -139,7 +139,7 @@ static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 va
|
||||
static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
|
||||
static int pdc_common_port_start(struct ata_port *ap);
|
||||
static int pdc_sata_port_start(struct ata_port *ap);
|
||||
static void pdc_qc_prep(struct ata_queued_cmd *qc);
|
||||
static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc);
|
||||
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
|
||||
static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
|
||||
static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
|
||||
@ -633,7 +633,7 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
|
||||
prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
|
||||
}
|
||||
|
||||
static void pdc_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct pdc_port_priv *pp = qc->ap->private_data;
|
||||
unsigned int i;
|
||||
@ -665,6 +665,8 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
static int pdc_is_sataii_tx4(unsigned long flags)
|
||||
|
@ -100,7 +100,7 @@ static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
|
||||
static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
|
||||
static int qs_port_start(struct ata_port *ap);
|
||||
static void qs_host_stop(struct ata_host *host);
|
||||
static void qs_qc_prep(struct ata_queued_cmd *qc);
|
||||
static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc);
|
||||
static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
|
||||
static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
|
||||
static void qs_freeze(struct ata_port *ap);
|
||||
@ -260,7 +260,7 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
|
||||
return si;
|
||||
}
|
||||
|
||||
static void qs_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct qs_port_priv *pp = qc->ap->private_data;
|
||||
u8 dflags = QS_DF_PORD, *buf = pp->pkt;
|
||||
@ -272,7 +272,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
|
||||
|
||||
qs_enter_reg_mode(qc->ap);
|
||||
if (qc->tf.protocol != ATA_PROT_DMA)
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
|
||||
nelem = qs_fill_sg(qc);
|
||||
|
||||
@ -295,6 +295,8 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
|
||||
|
||||
/* frame information structure (FIS) */
|
||||
ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
static inline void qs_packet_start(struct ata_queued_cmd *qc)
|
||||
|
@ -550,12 +550,14 @@ static void sata_rcar_bmdma_fill_sg(struct ata_queued_cmd *qc)
|
||||
prd[si - 1].addr |= cpu_to_le32(SATA_RCAR_DTEND);
|
||||
}
|
||||
|
||||
static void sata_rcar_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors sata_rcar_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
|
||||
sata_rcar_bmdma_fill_sg(qc);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
static void sata_rcar_bmdma_setup(struct ata_queued_cmd *qc)
|
||||
|
@ -103,7 +103,7 @@ static void sil_dev_config(struct ata_device *dev);
|
||||
static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
|
||||
static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
|
||||
static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
|
||||
static void sil_qc_prep(struct ata_queued_cmd *qc);
|
||||
static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc);
|
||||
static void sil_bmdma_setup(struct ata_queued_cmd *qc);
|
||||
static void sil_bmdma_start(struct ata_queued_cmd *qc);
|
||||
static void sil_bmdma_stop(struct ata_queued_cmd *qc);
|
||||
@ -317,12 +317,14 @@ static void sil_fill_sg(struct ata_queued_cmd *qc)
|
||||
last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT);
|
||||
}
|
||||
|
||||
static void sil_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
|
||||
return;
|
||||
return AC_ERR_OK;
|
||||
|
||||
sil_fill_sg(qc);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
|
||||
|
@ -326,7 +326,7 @@ static void sil24_dev_config(struct ata_device *dev);
|
||||
static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val);
|
||||
static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val);
|
||||
static int sil24_qc_defer(struct ata_queued_cmd *qc);
|
||||
static void sil24_qc_prep(struct ata_queued_cmd *qc);
|
||||
static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc);
|
||||
static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
|
||||
static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc);
|
||||
static void sil24_pmp_attach(struct ata_port *ap);
|
||||
@ -830,7 +830,7 @@ static int sil24_qc_defer(struct ata_queued_cmd *qc)
|
||||
return ata_std_qc_defer(qc);
|
||||
}
|
||||
|
||||
static void sil24_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
struct sil24_port_priv *pp = ap->private_data;
|
||||
@ -874,6 +874,8 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
|
||||
|
||||
if (qc->flags & ATA_QCFLAG_DMAMAP)
|
||||
sil24_fill_sg(qc, sge);
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
|
||||
|
@ -202,7 +202,7 @@ static void pdc_error_handler(struct ata_port *ap);
|
||||
static void pdc_freeze(struct ata_port *ap);
|
||||
static void pdc_thaw(struct ata_port *ap);
|
||||
static int pdc_port_start(struct ata_port *ap);
|
||||
static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
|
||||
static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc);
|
||||
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
|
||||
static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
|
||||
static unsigned int pdc20621_dimm_init(struct ata_host *host);
|
||||
@ -530,7 +530,7 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
|
||||
VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
|
||||
}
|
||||
|
||||
static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
|
||||
static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
switch (qc->tf.protocol) {
|
||||
case ATA_PROT_DMA:
|
||||
@ -542,6 +542,8 @@ static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return AC_ERR_OK;
|
||||
}
|
||||
|
||||
static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
|
||||
|
@ -2245,7 +2245,7 @@ static int eni_init_one(struct pci_dev *pci_dev,
|
||||
|
||||
rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
|
||||
if (rc < 0)
|
||||
goto out;
|
||||
goto err_disable;
|
||||
|
||||
rc = -ENOMEM;
|
||||
eni_dev = kmalloc(sizeof(struct eni_dev), GFP_KERNEL);
|
||||
|
@ -298,7 +298,7 @@ static int __init get_cpu_for_node(struct device_node *node)
|
||||
static int __init parse_core(struct device_node *core, int package_id,
|
||||
int core_id)
|
||||
{
|
||||
char name[10];
|
||||
char name[20];
|
||||
bool leaf = true;
|
||||
int i = 0;
|
||||
int cpu;
|
||||
@ -345,7 +345,7 @@ static int __init parse_core(struct device_node *core, int package_id,
|
||||
|
||||
static int __init parse_cluster(struct device_node *cluster, int depth)
|
||||
{
|
||||
char name[10];
|
||||
char name[20];
|
||||
bool leaf = true;
|
||||
bool has_cores = false;
|
||||
struct device_node *c;
|
||||
|
@ -259,7 +259,7 @@ bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
|
||||
int regcache_lookup_reg(struct regmap *map, unsigned int reg);
|
||||
|
||||
int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
||||
const void *val, size_t val_len);
|
||||
const void *val, size_t val_len, bool noinc);
|
||||
|
||||
void regmap_async_complete_cb(struct regmap_async *async, int ret);
|
||||
|
||||
|
@ -717,7 +717,7 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
|
||||
|
||||
map->cache_bypass = true;
|
||||
|
||||
ret = _regmap_raw_write(map, base, *data, count * val_bytes);
|
||||
ret = _regmap_raw_write(map, base, *data, count * val_bytes, false);
|
||||
if (ret)
|
||||
dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
|
||||
base, cur - map->reg_stride, ret);
|
||||
|
@ -1468,7 +1468,7 @@ static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
|
||||
}
|
||||
|
||||
static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
|
||||
const void *val, size_t val_len)
|
||||
const void *val, size_t val_len, bool noinc)
|
||||
{
|
||||
struct regmap_range_node *range;
|
||||
unsigned long flags;
|
||||
@ -1527,7 +1527,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
|
||||
win_residue, val_len / map->format.val_bytes);
|
||||
ret = _regmap_raw_write_impl(map, reg, val,
|
||||
win_residue *
|
||||
map->format.val_bytes);
|
||||
map->format.val_bytes, noinc);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
@ -1541,7 +1541,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
|
||||
win_residue = range->window_len - win_offset;
|
||||
}
|
||||
|
||||
ret = _regmap_select_page(map, ®, range, val_num);
|
||||
ret = _regmap_select_page(map, ®, range, noinc ? 1 : val_num);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
}
|
||||
@ -1749,7 +1749,8 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg,
|
||||
map->work_buf +
|
||||
map->format.reg_bytes +
|
||||
map->format.pad_bytes,
|
||||
map->format.val_bytes);
|
||||
map->format.val_bytes,
|
||||
false);
|
||||
}
|
||||
|
||||
static inline void *_regmap_map_get_context(struct regmap *map)
|
||||
@ -1843,7 +1844,7 @@ int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
|
||||
EXPORT_SYMBOL_GPL(regmap_write_async);
|
||||
|
||||
int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
||||
const void *val, size_t val_len)
|
||||
const void *val, size_t val_len, bool noinc)
|
||||
{
|
||||
size_t val_bytes = map->format.val_bytes;
|
||||
size_t val_count = val_len / val_bytes;
|
||||
@ -1864,7 +1865,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
||||
|
||||
/* Write as many bytes as possible with chunk_size */
|
||||
for (i = 0; i < chunk_count; i++) {
|
||||
ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes);
|
||||
ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1875,7 +1876,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
||||
|
||||
/* Write remaining bytes */
|
||||
if (val_len)
|
||||
ret = _regmap_raw_write_impl(map, reg, val, val_len);
|
||||
ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1908,7 +1909,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
|
||||
|
||||
map->lock(map->lock_arg);
|
||||
|
||||
ret = _regmap_raw_write(map, reg, val, val_len);
|
||||
ret = _regmap_raw_write(map, reg, val, val_len, false);
|
||||
|
||||
map->unlock(map->lock_arg);
|
||||
|
||||
@ -1966,7 +1967,7 @@ int regmap_noinc_write(struct regmap *map, unsigned int reg,
|
||||
write_len = map->max_raw_write;
|
||||
else
|
||||
write_len = val_len;
|
||||
ret = _regmap_raw_write(map, reg, val, write_len);
|
||||
ret = _regmap_raw_write(map, reg, val, write_len, true);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
val = ((u8 *)val) + write_len;
|
||||
@ -2443,7 +2444,7 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg,
|
||||
|
||||
map->async = true;
|
||||
|
||||
ret = _regmap_raw_write(map, reg, val, val_len);
|
||||
ret = _regmap_raw_write(map, reg, val, val_len, false);
|
||||
|
||||
map->async = false;
|
||||
|
||||
@ -2454,7 +2455,7 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg,
|
||||
EXPORT_SYMBOL_GPL(regmap_raw_write_async);
|
||||
|
||||
static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
|
||||
unsigned int val_len)
|
||||
unsigned int val_len, bool noinc)
|
||||
{
|
||||
struct regmap_range_node *range;
|
||||
int ret;
|
||||
@ -2467,7 +2468,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
|
||||
range = _regmap_range_lookup(map, reg);
|
||||
if (range) {
|
||||
ret = _regmap_select_page(map, ®, range,
|
||||
val_len / map->format.val_bytes);
|
||||
noinc ? 1 : val_len / map->format.val_bytes);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
}
|
||||
@ -2505,7 +2506,7 @@ static int _regmap_bus_read(void *context, unsigned int reg,
|
||||
if (!map->format.parse_val)
|
||||
return -EINVAL;
|
||||
|
||||
ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes);
|
||||
ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false);
|
||||
if (ret == 0)
|
||||
*val = map->format.parse_val(work_val);
|
||||
|
||||
@ -2621,7 +2622,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
|
||||
|
||||
/* Read bytes that fit into whole chunks */
|
||||
for (i = 0; i < chunk_count; i++) {
|
||||
ret = _regmap_raw_read(map, reg, val, chunk_bytes);
|
||||
ret = _regmap_raw_read(map, reg, val, chunk_bytes, false);
|
||||
if (ret != 0)
|
||||
goto out;
|
||||
|
||||
@ -2632,7 +2633,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
|
||||
|
||||
/* Read remaining bytes */
|
||||
if (val_len) {
|
||||
ret = _regmap_raw_read(map, reg, val, val_len);
|
||||
ret = _regmap_raw_read(map, reg, val, val_len, false);
|
||||
if (ret != 0)
|
||||
goto out;
|
||||
}
|
||||
@ -2707,7 +2708,7 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg,
|
||||
read_len = map->max_raw_read;
|
||||
else
|
||||
read_len = val_len;
|
||||
ret = _regmap_raw_read(map, reg, val, read_len);
|
||||
ret = _regmap_raw_read(map, reg, val, read_len, true);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
val = ((u8 *)val) + read_len;
|
||||
|
@ -370,11 +370,11 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
|
||||
* the end.
|
||||
*/
|
||||
len = patch_length;
|
||||
buf = kmemdup(btrtl_dev->fw_data + patch_offset, patch_length,
|
||||
GFP_KERNEL);
|
||||
buf = kvmalloc(patch_length, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(buf, btrtl_dev->fw_data + patch_offset, patch_length - 4);
|
||||
memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4);
|
||||
|
||||
*_buf = buf;
|
||||
@ -460,8 +460,10 @@ static int rtl_load_file(struct hci_dev *hdev, const char *name, u8 **buff)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ret = fw->size;
|
||||
*buff = kmemdup(fw->data, ret, GFP_KERNEL);
|
||||
if (!*buff)
|
||||
*buff = kvmalloc(fw->size, GFP_KERNEL);
|
||||
if (*buff)
|
||||
memcpy(*buff, fw->data, ret);
|
||||
else
|
||||
ret = -ENOMEM;
|
||||
|
||||
release_firmware(fw);
|
||||
@ -499,14 +501,14 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev,
|
||||
goto out;
|
||||
|
||||
if (btrtl_dev->cfg_len > 0) {
|
||||
tbuff = kzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL);
|
||||
tbuff = kvzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL);
|
||||
if (!tbuff) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
memcpy(tbuff, fw_data, ret);
|
||||
kfree(fw_data);
|
||||
kvfree(fw_data);
|
||||
|
||||
memcpy(tbuff + ret, btrtl_dev->cfg_data, btrtl_dev->cfg_len);
|
||||
ret += btrtl_dev->cfg_len;
|
||||
@ -519,14 +521,14 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev,
|
||||
ret = rtl_download_firmware(hdev, fw_data, ret);
|
||||
|
||||
out:
|
||||
kfree(fw_data);
|
||||
kvfree(fw_data);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void btrtl_free(struct btrtl_device_info *btrtl_dev)
|
||||
{
|
||||
kfree(btrtl_dev->fw_data);
|
||||
kfree(btrtl_dev->cfg_data);
|
||||
kvfree(btrtl_dev->fw_data);
|
||||
kvfree(btrtl_dev->cfg_data);
|
||||
kfree(btrtl_dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(btrtl_free);
|
||||
|
@ -358,6 +358,26 @@ static int hisi_lpc_acpi_xlat_io_res(struct acpi_device *adev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Released firmware describes the IO port max address as 0x3fff, which is
|
||||
* the max host bus address. Fixup to a proper range. This will probably
|
||||
* never be fixed in firmware.
|
||||
*/
|
||||
static void hisi_lpc_acpi_fixup_child_resource(struct device *hostdev,
|
||||
struct resource *r)
|
||||
{
|
||||
if (r->end != 0x3fff)
|
||||
return;
|
||||
|
||||
if (r->start == 0xe4)
|
||||
r->end = 0xe4 + 0x04 - 1;
|
||||
else if (r->start == 0x2f8)
|
||||
r->end = 0x2f8 + 0x08 - 1;
|
||||
else
|
||||
dev_warn(hostdev, "unrecognised resource %pR to fixup, ignoring\n",
|
||||
r);
|
||||
}
|
||||
|
||||
/*
|
||||
* hisi_lpc_acpi_set_io_res - set the resources for a child
|
||||
* @child: the device node to be updated the I/O resource
|
||||
@ -419,8 +439,11 @@ static int hisi_lpc_acpi_set_io_res(struct device *child,
|
||||
return -ENOMEM;
|
||||
}
|
||||
count = 0;
|
||||
list_for_each_entry(rentry, &resource_list, node)
|
||||
resources[count++] = *rentry->res;
|
||||
list_for_each_entry(rentry, &resource_list, node) {
|
||||
resources[count] = *rentry->res;
|
||||
hisi_lpc_acpi_fixup_child_resource(hostdev, &resources[count]);
|
||||
count++;
|
||||
}
|
||||
|
||||
acpi_dev_free_resource_list(&resource_list);
|
||||
|
||||
|
@ -399,15 +399,15 @@ static int bt_bmc_config_irq(struct bt_bmc *bt_bmc,
|
||||
struct device *dev = &pdev->dev;
|
||||
int rc;
|
||||
|
||||
bt_bmc->irq = platform_get_irq(pdev, 0);
|
||||
if (!bt_bmc->irq)
|
||||
return -ENODEV;
|
||||
bt_bmc->irq = platform_get_irq_optional(pdev, 0);
|
||||
if (bt_bmc->irq < 0)
|
||||
return bt_bmc->irq;
|
||||
|
||||
rc = devm_request_irq(dev, bt_bmc->irq, bt_bmc_irq, IRQF_SHARED,
|
||||
DEVICE_NAME, bt_bmc);
|
||||
if (rc < 0) {
|
||||
dev_warn(dev, "Unable to request IRQ %d\n", bt_bmc->irq);
|
||||
bt_bmc->irq = 0;
|
||||
bt_bmc->irq = rc;
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -479,7 +479,7 @@ static int bt_bmc_probe(struct platform_device *pdev)
|
||||
|
||||
bt_bmc_config_irq(bt_bmc, pdev);
|
||||
|
||||
if (bt_bmc->irq) {
|
||||
if (bt_bmc->irq >= 0) {
|
||||
dev_info(dev, "Using IRQ %d\n", bt_bmc->irq);
|
||||
} else {
|
||||
dev_info(dev, "No IRQ; using timer\n");
|
||||
@ -505,7 +505,7 @@ static int bt_bmc_remove(struct platform_device *pdev)
|
||||
struct bt_bmc *bt_bmc = dev_get_drvdata(&pdev->dev);
|
||||
|
||||
misc_deregister(&bt_bmc->miscdev);
|
||||
if (!bt_bmc->irq)
|
||||
if (bt_bmc->irq < 0)
|
||||
del_timer_sync(&bt_bmc->poll_timer);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1142,14 +1142,14 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
|
||||
* We take into account the first, second and third-order deltas
|
||||
* in order to make our estimate.
|
||||
*/
|
||||
delta = sample.jiffies - state->last_time;
|
||||
state->last_time = sample.jiffies;
|
||||
delta = sample.jiffies - READ_ONCE(state->last_time);
|
||||
WRITE_ONCE(state->last_time, sample.jiffies);
|
||||
|
||||
delta2 = delta - state->last_delta;
|
||||
state->last_delta = delta;
|
||||
delta2 = delta - READ_ONCE(state->last_delta);
|
||||
WRITE_ONCE(state->last_delta, delta);
|
||||
|
||||
delta3 = delta2 - state->last_delta2;
|
||||
state->last_delta2 = delta2;
|
||||
delta3 = delta2 - READ_ONCE(state->last_delta2);
|
||||
WRITE_ONCE(state->last_delta2, delta2);
|
||||
|
||||
if (delta < 0)
|
||||
delta = -delta;
|
||||
|
@ -777,18 +777,22 @@ static int __init tlclk_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major);
|
||||
return ret;
|
||||
}
|
||||
tlclk_major = ret;
|
||||
telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
|
||||
|
||||
alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
|
||||
if (!alarm_events) {
|
||||
ret = -ENOMEM;
|
||||
goto out1;
|
||||
}
|
||||
|
||||
ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major);
|
||||
kfree(alarm_events);
|
||||
return ret;
|
||||
}
|
||||
tlclk_major = ret;
|
||||
|
||||
/* Read telecom clock IRQ number (Set by BIOS) */
|
||||
if (!request_region(TLCLK_BASE, 8, "telco_clock")) {
|
||||
printk(KERN_ERR "tlclk: request_region 0x%X failed.\n",
|
||||
@ -796,7 +800,6 @@ static int __init tlclk_init(void)
|
||||
ret = -EBUSY;
|
||||
goto out2;
|
||||
}
|
||||
telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
|
||||
|
||||
if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */
|
||||
printk(KERN_ERR "telclk_interrupt = 0x%x non-mcpbl0010 hw.\n",
|
||||
@ -837,8 +840,8 @@ static int __init tlclk_init(void)
|
||||
release_region(TLCLK_BASE, 8);
|
||||
out2:
|
||||
kfree(alarm_events);
|
||||
out1:
|
||||
unregister_chrdev(tlclk_major, "telco_clock");
|
||||
out1:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include "tpm.h"
|
||||
|
||||
#define ACPI_SIG_TPM2 "TPM2"
|
||||
#define TPM_CRB_MAX_RESOURCES 3
|
||||
|
||||
static const guid_t crb_acpi_start_guid =
|
||||
GUID_INIT(0x6BBF6CAB, 0x5463, 0x4714,
|
||||
@ -91,7 +92,6 @@ enum crb_status {
|
||||
struct crb_priv {
|
||||
u32 sm;
|
||||
const char *hid;
|
||||
void __iomem *iobase;
|
||||
struct crb_regs_head __iomem *regs_h;
|
||||
struct crb_regs_tail __iomem *regs_t;
|
||||
u8 __iomem *cmd;
|
||||
@ -434,21 +434,27 @@ static const struct tpm_class_ops tpm_crb = {
|
||||
|
||||
static int crb_check_resource(struct acpi_resource *ares, void *data)
|
||||
{
|
||||
struct resource *io_res = data;
|
||||
struct resource *iores_array = data;
|
||||
struct resource_win win;
|
||||
struct resource *res = &(win.res);
|
||||
int i;
|
||||
|
||||
if (acpi_dev_resource_memory(ares, res) ||
|
||||
acpi_dev_resource_address_space(ares, &win)) {
|
||||
*io_res = *res;
|
||||
io_res->name = NULL;
|
||||
for (i = 0; i < TPM_CRB_MAX_RESOURCES + 1; ++i) {
|
||||
if (resource_type(iores_array + i) != IORESOURCE_MEM) {
|
||||
iores_array[i] = *res;
|
||||
iores_array[i].name = NULL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv,
|
||||
struct resource *io_res, u64 start, u32 size)
|
||||
static void __iomem *crb_map_res(struct device *dev, struct resource *iores,
|
||||
void __iomem **iobase_ptr, u64 start, u32 size)
|
||||
{
|
||||
struct resource new_res = {
|
||||
.start = start,
|
||||
@ -460,10 +466,16 @@ static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv,
|
||||
if (start != new_res.start)
|
||||
return (void __iomem *) ERR_PTR(-EINVAL);
|
||||
|
||||
if (!resource_contains(io_res, &new_res))
|
||||
if (!iores)
|
||||
return devm_ioremap_resource(dev, &new_res);
|
||||
|
||||
return priv->iobase + (new_res.start - io_res->start);
|
||||
if (!*iobase_ptr) {
|
||||
*iobase_ptr = devm_ioremap_resource(dev, iores);
|
||||
if (IS_ERR(*iobase_ptr))
|
||||
return *iobase_ptr;
|
||||
}
|
||||
|
||||
return *iobase_ptr + (new_res.start - iores->start);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -490,9 +502,13 @@ static u64 crb_fixup_cmd_size(struct device *dev, struct resource *io_res,
|
||||
static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
|
||||
struct acpi_table_tpm2 *buf)
|
||||
{
|
||||
struct list_head resources;
|
||||
struct resource io_res;
|
||||
struct list_head acpi_resource_list;
|
||||
struct resource iores_array[TPM_CRB_MAX_RESOURCES + 1] = { {0} };
|
||||
void __iomem *iobase_array[TPM_CRB_MAX_RESOURCES] = {NULL};
|
||||
struct device *dev = &device->dev;
|
||||
struct resource *iores;
|
||||
void __iomem **iobase_ptr;
|
||||
int i;
|
||||
u32 pa_high, pa_low;
|
||||
u64 cmd_pa;
|
||||
u32 cmd_size;
|
||||
@ -501,21 +517,41 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
|
||||
u32 rsp_size;
|
||||
int ret;
|
||||
|
||||
INIT_LIST_HEAD(&resources);
|
||||
ret = acpi_dev_get_resources(device, &resources, crb_check_resource,
|
||||
&io_res);
|
||||
INIT_LIST_HEAD(&acpi_resource_list);
|
||||
ret = acpi_dev_get_resources(device, &acpi_resource_list,
|
||||
crb_check_resource, iores_array);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
acpi_dev_free_resource_list(&resources);
|
||||
acpi_dev_free_resource_list(&acpi_resource_list);
|
||||
|
||||
if (resource_type(&io_res) != IORESOURCE_MEM) {
|
||||
if (resource_type(iores_array) != IORESOURCE_MEM) {
|
||||
dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n");
|
||||
return -EINVAL;
|
||||
} else if (resource_type(iores_array + TPM_CRB_MAX_RESOURCES) ==
|
||||
IORESOURCE_MEM) {
|
||||
dev_warn(dev, "TPM2 ACPI table defines too many memory resources\n");
|
||||
memset(iores_array + TPM_CRB_MAX_RESOURCES,
|
||||
0, sizeof(*iores_array));
|
||||
iores_array[TPM_CRB_MAX_RESOURCES].flags = 0;
|
||||
}
|
||||
|
||||
priv->iobase = devm_ioremap_resource(dev, &io_res);
|
||||
if (IS_ERR(priv->iobase))
|
||||
return PTR_ERR(priv->iobase);
|
||||
iores = NULL;
|
||||
iobase_ptr = NULL;
|
||||
for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) {
|
||||
if (buf->control_address >= iores_array[i].start &&
|
||||
buf->control_address + sizeof(struct crb_regs_tail) - 1 <=
|
||||
iores_array[i].end) {
|
||||
iores = iores_array + i;
|
||||
iobase_ptr = iobase_array + i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
priv->regs_t = crb_map_res(dev, iores, iobase_ptr, buf->control_address,
|
||||
sizeof(struct crb_regs_tail));
|
||||
|
||||
if (IS_ERR(priv->regs_t))
|
||||
return PTR_ERR(priv->regs_t);
|
||||
|
||||
/* The ACPI IO region starts at the head area and continues to include
|
||||
* the control area, as one nice sane region except for some older
|
||||
@ -523,9 +559,10 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
|
||||
*/
|
||||
if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
|
||||
(priv->sm == ACPI_TPM2_MEMORY_MAPPED)) {
|
||||
if (buf->control_address == io_res.start +
|
||||
if (iores &&
|
||||
buf->control_address == iores->start +
|
||||
sizeof(*priv->regs_h))
|
||||
priv->regs_h = priv->iobase;
|
||||
priv->regs_h = *iobase_ptr;
|
||||
else
|
||||
dev_warn(dev, FW_BUG "Bad ACPI memory layout");
|
||||
}
|
||||
@ -534,13 +571,6 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->regs_t = crb_map_res(dev, priv, &io_res, buf->control_address,
|
||||
sizeof(struct crb_regs_tail));
|
||||
if (IS_ERR(priv->regs_t)) {
|
||||
ret = PTR_ERR(priv->regs_t);
|
||||
goto out_relinquish_locality;
|
||||
}
|
||||
|
||||
/*
|
||||
* PTT HW bug w/a: wake up the device to access
|
||||
* possibly not retained registers.
|
||||
@ -552,13 +582,26 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
|
||||
pa_high = ioread32(&priv->regs_t->ctrl_cmd_pa_high);
|
||||
pa_low = ioread32(&priv->regs_t->ctrl_cmd_pa_low);
|
||||
cmd_pa = ((u64)pa_high << 32) | pa_low;
|
||||
cmd_size = crb_fixup_cmd_size(dev, &io_res, cmd_pa,
|
||||
ioread32(&priv->regs_t->ctrl_cmd_size));
|
||||
cmd_size = ioread32(&priv->regs_t->ctrl_cmd_size);
|
||||
|
||||
iores = NULL;
|
||||
iobase_ptr = NULL;
|
||||
for (i = 0; iores_array[i].end; ++i) {
|
||||
if (cmd_pa >= iores_array[i].start &&
|
||||
cmd_pa <= iores_array[i].end) {
|
||||
iores = iores_array + i;
|
||||
iobase_ptr = iobase_array + i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (iores)
|
||||
cmd_size = crb_fixup_cmd_size(dev, iores, cmd_pa, cmd_size);
|
||||
|
||||
dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n",
|
||||
pa_high, pa_low, cmd_size);
|
||||
|
||||
priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size);
|
||||
priv->cmd = crb_map_res(dev, iores, iobase_ptr, cmd_pa, cmd_size);
|
||||
if (IS_ERR(priv->cmd)) {
|
||||
ret = PTR_ERR(priv->cmd);
|
||||
goto out;
|
||||
@ -566,11 +609,25 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
|
||||
|
||||
memcpy_fromio(&__rsp_pa, &priv->regs_t->ctrl_rsp_pa, 8);
|
||||
rsp_pa = le64_to_cpu(__rsp_pa);
|
||||
rsp_size = crb_fixup_cmd_size(dev, &io_res, rsp_pa,
|
||||
ioread32(&priv->regs_t->ctrl_rsp_size));
|
||||
rsp_size = ioread32(&priv->regs_t->ctrl_rsp_size);
|
||||
|
||||
iores = NULL;
|
||||
iobase_ptr = NULL;
|
||||
for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) {
|
||||
if (rsp_pa >= iores_array[i].start &&
|
||||
rsp_pa <= iores_array[i].end) {
|
||||
iores = iores_array + i;
|
||||
iobase_ptr = iobase_array + i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (iores)
|
||||
rsp_size = crb_fixup_cmd_size(dev, iores, rsp_pa, rsp_size);
|
||||
|
||||
if (cmd_pa != rsp_pa) {
|
||||
priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size);
|
||||
priv->rsp = crb_map_res(dev, iores, iobase_ptr,
|
||||
rsp_pa, rsp_size);
|
||||
ret = PTR_ERR_OR_ZERO(priv->rsp);
|
||||
goto out;
|
||||
}
|
||||
|
@ -581,6 +581,7 @@ static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
|
||||
*/
|
||||
while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
|
||||
ibmvtpm_crq_process(crq, ibmvtpm);
|
||||
wake_up_interruptible(&ibmvtpm->crq_queue.wq);
|
||||
crq->valid = 0;
|
||||
smp_wmb();
|
||||
}
|
||||
@ -628,6 +629,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
|
||||
}
|
||||
|
||||
crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
|
||||
init_waitqueue_head(&crq_q->wq);
|
||||
ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
|
||||
CRQ_RES_BUF_SIZE,
|
||||
DMA_BIDIRECTIONAL);
|
||||
@ -680,6 +682,13 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
|
||||
if (rc)
|
||||
goto init_irq_cleanup;
|
||||
|
||||
if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
|
||||
ibmvtpm->rtce_buf != NULL,
|
||||
HZ)) {
|
||||
dev_err(dev, "CRQ response timed out\n");
|
||||
goto init_irq_cleanup;
|
||||
}
|
||||
|
||||
return tpm_chip_register(chip);
|
||||
init_irq_cleanup:
|
||||
do {
|
||||
|
@ -26,6 +26,7 @@ struct ibmvtpm_crq_queue {
|
||||
struct ibmvtpm_crq *crq_addr;
|
||||
u32 index;
|
||||
u32 num_entry;
|
||||
wait_queue_head_t wq;
|
||||
};
|
||||
|
||||
struct ibmvtpm_dev {
|
||||
|
@ -139,6 +139,12 @@ static int clk_pfdv2_set_rate(struct clk_hw *hw, unsigned long rate,
|
||||
u32 val;
|
||||
u8 frac;
|
||||
|
||||
if (!rate)
|
||||
return -EINVAL;
|
||||
|
||||
/* PFD can NOT change rate without gating */
|
||||
WARN_ON(clk_pfdv2_is_enabled(hw));
|
||||
|
||||
tmp = tmp * 18 + rate / 2;
|
||||
do_div(tmp, rate);
|
||||
frac = tmp;
|
||||
|
@ -39,7 +39,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
|
||||
/* read VCO1 reg for numerator and denominator */
|
||||
reg = readl(socfpgaclk->hw.reg);
|
||||
refdiv = (reg & SOCFPGA_PLL_REFDIV_MASK) >> SOCFPGA_PLL_REFDIV_SHIFT;
|
||||
vco_freq = (unsigned long long)parent_rate / refdiv;
|
||||
|
||||
vco_freq = parent_rate;
|
||||
do_div(vco_freq, refdiv);
|
||||
|
||||
/* Read mdiv and fdiv from the fdbck register */
|
||||
reg = readl(socfpgaclk->hw.reg + 0x4);
|
||||
|
@ -194,15 +194,8 @@ static const char *ti_adpll_clk_get_name(struct ti_adpll_data *d,
|
||||
if (err)
|
||||
return NULL;
|
||||
} else {
|
||||
const char *base_name = "adpll";
|
||||
char *buf;
|
||||
|
||||
buf = devm_kzalloc(d->dev, 8 + 1 + strlen(base_name) + 1 +
|
||||
strlen(postfix), GFP_KERNEL);
|
||||
if (!buf)
|
||||
return NULL;
|
||||
sprintf(buf, "%08lx.%s.%s", d->pa, base_name, postfix);
|
||||
name = buf;
|
||||
name = devm_kasprintf(d->dev, GFP_KERNEL, "%08lx.adpll.%s",
|
||||
d->pa, postfix);
|
||||
}
|
||||
|
||||
return name;
|
||||
|
@ -169,7 +169,7 @@ static int __init h8300_8timer_init(struct device_node *node)
|
||||
return PTR_ERR(clk);
|
||||
}
|
||||
|
||||
ret = ENXIO;
|
||||
ret = -ENXIO;
|
||||
base = of_iomap(node, 0);
|
||||
if (!base) {
|
||||
pr_err("failed to map registers for clockevent\n");
|
||||
|
@ -902,6 +902,7 @@ static struct notifier_block powernv_cpufreq_reboot_nb = {
|
||||
void powernv_cpufreq_work_fn(struct work_struct *work)
|
||||
{
|
||||
struct chip *chip = container_of(work, struct chip, throttle);
|
||||
struct cpufreq_policy *policy;
|
||||
unsigned int cpu;
|
||||
cpumask_t mask;
|
||||
|
||||
@ -916,12 +917,14 @@ void powernv_cpufreq_work_fn(struct work_struct *work)
|
||||
chip->restore = false;
|
||||
for_each_cpu(cpu, &mask) {
|
||||
int index;
|
||||
struct cpufreq_policy policy;
|
||||
|
||||
cpufreq_get_policy(&policy, cpu);
|
||||
index = cpufreq_table_find_index_c(&policy, policy.cur);
|
||||
powernv_cpufreq_target_index(&policy, index);
|
||||
cpumask_andnot(&mask, &mask, policy.cpus);
|
||||
policy = cpufreq_cpu_get(cpu);
|
||||
if (!policy)
|
||||
continue;
|
||||
index = cpufreq_table_find_index_c(policy, policy->cur);
|
||||
powernv_cpufreq_target_index(policy, index);
|
||||
cpumask_andnot(&mask, &mask, policy->cpus);
|
||||
cpufreq_cpu_put(policy);
|
||||
}
|
||||
out:
|
||||
put_online_cpus();
|
||||
|
@ -2480,8 +2480,9 @@ int chcr_aead_dma_map(struct device *dev,
|
||||
else
|
||||
reqctx->b0_dma = 0;
|
||||
if (req->src == req->dst) {
|
||||
error = dma_map_sg(dev, req->src, sg_nents(req->src),
|
||||
DMA_BIDIRECTIONAL);
|
||||
error = dma_map_sg(dev, req->src,
|
||||
sg_nents_for_len(req->src, dst_size),
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (!error)
|
||||
goto err;
|
||||
} else {
|
||||
|
@ -1437,7 +1437,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
|
||||
csk->wr_max_credits))
|
||||
sk->sk_write_space(sk);
|
||||
|
||||
if (copied >= target && !sk->sk_backlog.tail)
|
||||
if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
|
||||
break;
|
||||
|
||||
if (copied) {
|
||||
@ -1470,7 +1470,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (sk->sk_backlog.tail) {
|
||||
if (READ_ONCE(sk->sk_backlog.tail)) {
|
||||
release_sock(sk);
|
||||
lock_sock(sk);
|
||||
chtls_cleanup_rbuf(sk, copied);
|
||||
@ -1615,7 +1615,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg,
|
||||
break;
|
||||
}
|
||||
|
||||
if (sk->sk_backlog.tail) {
|
||||
if (READ_ONCE(sk->sk_backlog.tail)) {
|
||||
/* Do not sleep, just process backlog. */
|
||||
release_sock(sk);
|
||||
lock_sock(sk);
|
||||
@ -1743,7 +1743,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
|
||||
csk->wr_max_credits))
|
||||
sk->sk_write_space(sk);
|
||||
|
||||
if (copied >= target && !sk->sk_backlog.tail)
|
||||
if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
|
||||
break;
|
||||
|
||||
if (copied) {
|
||||
@ -1774,7 +1774,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
|
||||
}
|
||||
}
|
||||
|
||||
if (sk->sk_backlog.tail) {
|
||||
if (READ_ONCE(sk->sk_backlog.tail)) {
|
||||
release_sock(sk);
|
||||
lock_sock(sk);
|
||||
chtls_cleanup_rbuf(sk, copied);
|
||||
|
@ -227,7 +227,7 @@ static void dax_region_unregister(void *region)
|
||||
|
||||
struct dax_region *alloc_dax_region(struct device *parent, int region_id,
|
||||
struct resource *res, int target_node, unsigned int align,
|
||||
unsigned long pfn_flags)
|
||||
unsigned long long pfn_flags)
|
||||
{
|
||||
struct dax_region *dax_region;
|
||||
|
||||
|
@ -11,7 +11,7 @@ struct dax_region;
|
||||
void dax_region_put(struct dax_region *dax_region);
|
||||
struct dax_region *alloc_dax_region(struct device *parent, int region_id,
|
||||
struct resource *res, int target_node, unsigned int align,
|
||||
unsigned long flags);
|
||||
unsigned long long flags);
|
||||
|
||||
enum dev_dax_subsys {
|
||||
DEV_DAX_BUS,
|
||||
|
@ -32,7 +32,7 @@ struct dax_region {
|
||||
struct device *dev;
|
||||
unsigned int align;
|
||||
struct resource res;
|
||||
unsigned long pfn_flags;
|
||||
unsigned long long pfn_flags;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -68,6 +68,8 @@
|
||||
|
||||
#define KHZ 1000
|
||||
|
||||
#define KHZ_MAX (ULONG_MAX / KHZ)
|
||||
|
||||
/* Assume that the bus is saturated if the utilization is 25% */
|
||||
#define BUS_SATURATION_RATIO 25
|
||||
|
||||
@ -169,7 +171,7 @@ struct tegra_actmon_emc_ratio {
|
||||
};
|
||||
|
||||
static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
|
||||
{ 1400000, ULONG_MAX },
|
||||
{ 1400000, KHZ_MAX },
|
||||
{ 1200000, 750000 },
|
||||
{ 1100000, 600000 },
|
||||
{ 1000000, 500000 },
|
||||
|
@ -59,6 +59,8 @@ static void dma_buf_release(struct dentry *dentry)
|
||||
struct dma_buf *dmabuf;
|
||||
|
||||
dmabuf = dentry->d_fsdata;
|
||||
if (unlikely(!dmabuf))
|
||||
return;
|
||||
|
||||
BUG_ON(dmabuf->vmapping_counter);
|
||||
|
||||
|
@ -273,6 +273,30 @@ void dma_fence_free(struct dma_fence *fence)
|
||||
}
|
||||
EXPORT_SYMBOL(dma_fence_free);
|
||||
|
||||
static bool __dma_fence_enable_signaling(struct dma_fence *fence)
|
||||
{
|
||||
bool was_set;
|
||||
|
||||
lockdep_assert_held(fence->lock);
|
||||
|
||||
was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
|
||||
&fence->flags);
|
||||
|
||||
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
||||
return false;
|
||||
|
||||
if (!was_set && fence->ops->enable_signaling) {
|
||||
trace_dma_fence_enable_signal(fence);
|
||||
|
||||
if (!fence->ops->enable_signaling(fence)) {
|
||||
dma_fence_signal_locked(fence);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_fence_enable_sw_signaling - enable signaling on fence
|
||||
* @fence: the fence to enable
|
||||
@ -285,19 +309,12 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
|
||||
&fence->flags) &&
|
||||
!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
|
||||
fence->ops->enable_signaling) {
|
||||
trace_dma_fence_enable_signal(fence);
|
||||
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(fence->lock, flags);
|
||||
|
||||
if (!fence->ops->enable_signaling(fence))
|
||||
dma_fence_signal_locked(fence);
|
||||
|
||||
spin_unlock_irqrestore(fence->lock, flags);
|
||||
}
|
||||
spin_lock_irqsave(fence->lock, flags);
|
||||
__dma_fence_enable_signaling(fence);
|
||||
spin_unlock_irqrestore(fence->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
|
||||
|
||||
@ -331,7 +348,6 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
bool was_set;
|
||||
|
||||
if (WARN_ON(!fence || !func))
|
||||
return -EINVAL;
|
||||
@ -343,25 +359,14 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
|
||||
|
||||
spin_lock_irqsave(fence->lock, flags);
|
||||
|
||||
was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
|
||||
&fence->flags);
|
||||
|
||||
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
||||
ret = -ENOENT;
|
||||
else if (!was_set && fence->ops->enable_signaling) {
|
||||
trace_dma_fence_enable_signal(fence);
|
||||
|
||||
if (!fence->ops->enable_signaling(fence)) {
|
||||
dma_fence_signal_locked(fence);
|
||||
ret = -ENOENT;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
if (__dma_fence_enable_signaling(fence)) {
|
||||
cb->func = func;
|
||||
list_add_tail(&cb->node, &fence->cb_list);
|
||||
} else
|
||||
} else {
|
||||
INIT_LIST_HEAD(&cb->node);
|
||||
ret = -ENOENT;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(fence->lock, flags);
|
||||
|
||||
return ret;
|
||||
@ -461,7 +466,6 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
|
||||
struct default_wait_cb cb;
|
||||
unsigned long flags;
|
||||
signed long ret = timeout ? timeout : 1;
|
||||
bool was_set;
|
||||
|
||||
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
||||
return ret;
|
||||
@ -473,21 +477,9 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
|
||||
goto out;
|
||||
}
|
||||
|
||||
was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
|
||||
&fence->flags);
|
||||
|
||||
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
||||
if (!__dma_fence_enable_signaling(fence))
|
||||
goto out;
|
||||
|
||||
if (!was_set && fence->ops->enable_signaling) {
|
||||
trace_dma_fence_enable_signal(fence);
|
||||
|
||||
if (!fence->ops->enable_signaling(fence)) {
|
||||
dma_fence_signal_locked(fence);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (!timeout) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
|
@ -997,7 +997,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
|
||||
if (err) {
|
||||
dev_err(&pdev->dev,
|
||||
"request_irq failed with err %d\n", err);
|
||||
goto err_unregister;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, hsdma);
|
||||
@ -1006,6 +1006,8 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
|
||||
|
||||
return 0;
|
||||
|
||||
err_free:
|
||||
of_dma_controller_free(pdev->dev.of_node);
|
||||
err_unregister:
|
||||
dma_async_device_unregister(dd);
|
||||
|
||||
|
@ -488,8 +488,10 @@ static int stm32_dma_terminate_all(struct dma_chan *c)
|
||||
|
||||
spin_lock_irqsave(&chan->vchan.lock, flags);
|
||||
|
||||
if (chan->busy) {
|
||||
stm32_dma_stop(chan);
|
||||
if (chan->desc) {
|
||||
vchan_terminate_vdesc(&chan->desc->vdesc);
|
||||
if (chan->busy)
|
||||
stm32_dma_stop(chan);
|
||||
chan->desc = NULL;
|
||||
}
|
||||
|
||||
@ -545,6 +547,8 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
|
||||
if (!vdesc)
|
||||
return;
|
||||
|
||||
list_del(&vdesc->node);
|
||||
|
||||
chan->desc = to_stm32_dma_desc(vdesc);
|
||||
chan->next_sg = 0;
|
||||
}
|
||||
@ -622,7 +626,6 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan)
|
||||
} else {
|
||||
chan->busy = false;
|
||||
if (chan->next_sg == chan->desc->num_sgs) {
|
||||
list_del(&chan->desc->vdesc.node);
|
||||
vchan_cookie_complete(&chan->desc->vdesc);
|
||||
chan->desc = NULL;
|
||||
}
|
||||
|
@ -1127,6 +1127,8 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
|
||||
return;
|
||||
}
|
||||
|
||||
list_del(&vdesc->node);
|
||||
|
||||
chan->desc = to_stm32_mdma_desc(vdesc);
|
||||
hwdesc = chan->desc->node[0].hwdesc;
|
||||
chan->curr_hwdesc = 0;
|
||||
@ -1242,8 +1244,10 @@ static int stm32_mdma_terminate_all(struct dma_chan *c)
|
||||
LIST_HEAD(head);
|
||||
|
||||
spin_lock_irqsave(&chan->vchan.lock, flags);
|
||||
if (chan->busy) {
|
||||
stm32_mdma_stop(chan);
|
||||
if (chan->desc) {
|
||||
vchan_terminate_vdesc(&chan->desc->vdesc);
|
||||
if (chan->busy)
|
||||
stm32_mdma_stop(chan);
|
||||
chan->desc = NULL;
|
||||
}
|
||||
vchan_get_all_descriptors(&chan->vchan, &head);
|
||||
@ -1331,7 +1335,6 @@ static enum dma_status stm32_mdma_tx_status(struct dma_chan *c,
|
||||
|
||||
static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan)
|
||||
{
|
||||
list_del(&chan->desc->vdesc.node);
|
||||
vchan_cookie_complete(&chan->desc->vdesc);
|
||||
chan->desc = NULL;
|
||||
chan->busy = false;
|
||||
|
@ -1287,8 +1287,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
|
||||
|
||||
dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
|
||||
|
||||
if (tdc->busy)
|
||||
tegra_dma_terminate_all(dc);
|
||||
tegra_dma_terminate_all(dc);
|
||||
|
||||
spin_lock_irqsave(&tdc->lock, flags);
|
||||
list_splice_init(&tdc->pending_sg_req, &sg_req_list);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user