This is the 5.4.198 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmKos2QACgkQONu9yGCS aT4QYQ//WQD/rHjO021lbo/z4eZbWUxjDiQNisJQY4MTCnIJgPYROJ6YLBLL2+of VwDdZ0yQNpf3hBA3qgTZ8RgaBinVf+WNAk37Ap/3VFXTExxgyGCx7p/PG+Jx9Jk4 qd9YPHZCu8g9rQjJoex95fd8Fedu47tzBSd88MoAKiLz90JsNbYUZb+gqdRrLAYc 6krd7zm7T8Grk31xUWOl/tlUSxveuUuz6QQr5mwPmSyspz4gQXsBlrKSrNSWmk0o qtqgqUCypvpKTF7RYiEoS3F8wy4XvWpGsET+W79SJ84inVx3EMsZKXB9GsWVZZgI fm3eFjn10NcgA+lvc7TJpwKg0f5g8uHW/06FcfYwgBhbI+otCFDLQkkHtViN0wY2 gks3PLPsYJdAZTlwIvjNY0XY7wRqjS7Ta1pf+d1po1EndEFAyH76KJaIGCzdVKb4 OeSEy4Xw8HxmuCO+mrUtRVRqV3Y7x88GuJC359iDKYdDpc+Z21FcvaVcgrR5cy2V A7ICKIfNyArgNmWnXQ6UBXqS1rDcoyfJe+0CYyRRdgDO/ON48Mx8FIW9YJrSrMeS XEx6cw6VKZ7hE1G71us/ITOOeUlHO93V7Ju+oOcx9Fgew8TZ0mdNMliOFUFaNWPb iAG+zZD0jwP5iyx0KFfOJyyuoovEtjBh9ZgVIF5BP3Ry1xRHuHY= =oE7B -----END PGP SIGNATURE----- Merge 5.4.198 into android11-5.4-lts Changes in 5.4.198 binfmt_flat: do not stop relocating GOT entries prematurely on riscv ALSA: hda/realtek - Fix microphone noise on ASUS TUF B550M-PLUS USB: serial: option: add Quectel BG95 modem USB: new quirk for Dell Gen 2 devices usb: core: hcd: Add support for deferring roothub registration perf/x86/intel: Fix event constraints for ICL ptrace/um: Replace PT_DTRACE with TIF_SINGLESTEP ptrace/xtensa: Replace PT_SINGLESTEP with TIF_SINGLESTEP ptrace: Reimplement PTRACE_KILL by always sending SIGKILL btrfs: add "0x" prefix for unsupported optional features btrfs: repair super block num_devices automatically drm/virtio: fix NULL pointer dereference in virtio_gpu_conn_get_modes mwifiex: add mutex lock for call in mwifiex_dfs_chan_sw_work_queue b43legacy: Fix assigning negative value to unsigned variable b43: Fix assigning negative value to unsigned variable ipw2x00: Fix potential NULL dereference in libipw_xmit() ipv6: fix locking issues with loops over idev->addr_list fbcon: Consistently protect deferred_takeover with console_lock() ACPICA: Avoid cache flush inside virtual machines drm/komeda: return early if drm_universal_plane_init() fails. ALSA: jack: Access input_dev under mutex spi: spi-rspi: Remove setting {src,dst}_{addr,addr_width} based on DMA direction tools/power turbostat: fix ICX DRAM power numbers drm/amd/pm: fix double free in si_parse_power_table() ath9k: fix QCA9561 PA bias level media: venus: hfi: avoid null dereference in deinit media: pci: cx23885: Fix the error handling in cx23885_initdev() media: cx25821: Fix the warning when removing the module md/bitmap: don't set sb values if can't pass sanity check mmc: jz4740: Apply DMA engine limits to maximum segment size scsi: megaraid: Fix error check return value of register_chrdev() drm/plane: Move range check for format_count earlier drm/amd/pm: fix the compile warning arm64: compat: Do not treat syscall number as ESR_ELx for a bad syscall drm: msm: fix error check return value of irq_of_parse_and_map() ipv6: Don't send rs packets to the interface of ARPHRD_TUNNEL net/mlx5: fs, delete the FTE when there are no rules attached to it ASoC: dapm: Don't fold register value changes into notifications mlxsw: spectrum_dcb: Do not warn about priority changes drm/amdgpu/ucode: Remove firmware load type check in amdgpu_ucode_free_bo HID: bigben: fix slab-out-of-bounds Write in bigben_probe ASoC: tscs454: Add endianness flag in snd_soc_component_driver s390/preempt: disable __preempt_count_add() optimization for PROFILE_ALL_BRANCHES spi: stm32-qspi: Fix wait_cmd timeout in APM mode dma-debug: change allocation mode from GFP_NOWAIT to GFP_ATIOMIC ACPI: PM: Block ASUS B1400CEAE from suspend to idle by default ipmi:ssif: Check for NULL msg when handling events and messages ipmi: Fix pr_fmt to avoid compilation issues rtlwifi: Use pr_warn instead of WARN_ONCE media: coda: limit frame interval enumeration to supported encoder frame sizes media: cec-adap.c: fix is_configuring state openrisc: start CPU timer early in boot nvme-pci: fix a NULL pointer dereference in nvme_alloc_admin_tags ASoC: rt5645: Fix errorenous cleanup order nbd: Fix hung on disconnect request if socket is closed before net: phy: micrel: Allow probing without .driver_data media: exynos4-is: Fix compile warning ASoC: max98357a: remove dependency on GPIOLIB hwmon: Make chip parameter for with_info API mandatory rxrpc: Return an error to sendmsg if call failed eth: tg3: silence the GCC 12 array-bounds warning selftests/bpf: fix btf_dump/btf_dump due to recent clang change IB/rdmavt: add missing locks in rvt_ruc_loopback ARM: dts: ox820: align interrupt controller node name with dtschema PM / devfreq: rk3399_dmc: Disable edev on remove() fs: jfs: fix possible NULL pointer dereference in dbFree() ARM: OMAP1: clock: Fix UART rate reporting algorithm powerpc/fadump: Fix fadump to work with a different endian capture kernel fat: add ratelimit to fat*_ent_bread() ARM: versatile: Add missing of_node_put in dcscb_init ARM: dts: exynos: add atmel,24c128 fallback to Samsung EEPROM ARM: hisi: Add missing of_node_put after of_find_compatible_node PCI: Avoid pci_dev_lock() AB/BA deadlock with sriov_numvfs_store() tracing: incorrect isolate_mote_t cast in mm_vmscan_lru_isolate powerpc/xics: fix refcount leak in icp_opal_init() powerpc/powernv: fix missing of_node_put in uv_init() macintosh/via-pmu: Fix build failure when CONFIG_INPUT is disabled powerpc/iommu: Add missing of_node_put in iommu_init_early_dart RDMA/hfi1: Prevent panic when SDMA is disabled drm: fix EDID struct for old ARM OABI format ath9k: fix ar9003_get_eepmisc drm/edid: fix invalid EDID extension block filtering drm/bridge: adv7511: clean up CEC adapter when probe fails ASoC: mediatek: Fix error handling in mt8173_max98090_dev_probe ASoC: mediatek: Fix missing of_node_put in mt2701_wm8960_machine_probe x86/delay: Fix the wrong asm constraint in delay_loop() drm/mediatek: Fix mtk_cec_mask() drm/vc4: txp: Don't set TXP_VSTART_AT_EOF drm/vc4: txp: Force alpha to be 0xff if it's disabled bpf: Fix excessive memory allocation in stack_map_alloc() nl80211: show SSID for P2P_GO interfaces drm/komeda: Fix an undefined behavior bug in komeda_plane_add() drm: mali-dp: potential dereference of null pointer spi: spi-ti-qspi: Fix return value handling of wait_for_completion_timeout NFC: NULL out the dev->rfkill to prevent UAF efi: Add missing prototype for efi_capsule_setup_info drbd: fix duplicate array initializer HID: hid-led: fix maximum brightness for Dream Cheeky HID: elan: Fix potential double free in elan_input_configured drm/bridge: Fix error handling in analogix_dp_probe sched/fair: Fix cfs_rq_clock_pelt() for throttled cfs_rq spi: img-spfi: Fix pm_runtime_get_sync() error checking cpufreq: Fix possible race in cpufreq online error path ath9k_htc: fix potential out of bounds access with invalid rxstatus->rs_keyix inotify: show inotify mask flags in proc fdinfo fsnotify: fix wrong lockdep annotations of: overlay: do not break notify on NOTIFY_{OK|STOP} scsi: ufs: core: Exclude UECxx from SFR dump list x86/pm: Fix false positive kmemleak report in msr_build_context() x86/speculation: Add missing prototype for unpriv_ebpf_notify() ASoC: rk3328: fix disabling mclk on pclk probe failure perf tools: Add missing headers needed by util/data.h drm/msm/disp/dpu1: set vbif hw config to NULL to avoid use after memory free during pm runtime resume drm/msm/dsi: fix error checks and return values for DSI xmit functions drm/msm/hdmi: check return value after calling platform_get_resource_byname() drm/msm/hdmi: fix error check return value of irq_of_parse_and_map() drm/rockchip: vop: fix possible null-ptr-deref in vop_bind() virtio_blk: fix the discard_granularity and discard_alignment queue limits x86: Fix return value of __setup handlers irqchip/exiu: Fix acknowledgment of edge triggered interrupts irqchip/aspeed-i2c-ic: Fix irq_of_parse_and_map() return value x86/mm: Cleanup the control_va_addr_alignment() __setup handler regulator: core: Fix enable_count imbalance with EXCLUSIVE_GET drm/msm/mdp5: Return error code in mdp5_pipe_release when deadlock is detected drm/msm/mdp5: Return error code in mdp5_mixer_release when deadlock is detected drm/msm: return an error pointer in msm_gem_prime_get_sg_table() media: uvcvideo: Fix missing check to determine if element is found in list iomap: iomap_write_failed fix Revert "cpufreq: Fix possible race in cpufreq online error path" perf/amd/ibs: Use interrupt regs ip for stack unwinding ASoC: fsl: Fix refcount leak in imx_sgtl5000_probe ASoC: mxs-saif: Fix refcount leak in mxs_saif_probe regulator: pfuze100: Fix refcount leak in pfuze_parse_regulators_dt scripts/faddr2line: Fix overlapping text section failures media: aspeed: Fix an error handling path in aspeed_video_probe() media: st-delta: Fix PM disable depth imbalance in delta_probe media: exynos4-is: Change clk_disable to clk_disable_unprepare media: pvrusb2: fix array-index-out-of-bounds in pvr2_i2c_core_init media: vsp1: Fix offset calculation for plane cropping Bluetooth: fix dangling sco_conn and use-after-free in sco_sock_timeout m68k: math-emu: Fix dependencies of math emulation support sctp: read sk->sk_bound_dev_if once in sctp_rcv() media: ov7670: remove ov7670_power_off from ov7670_remove ext4: reject the 'commit' option on ext2 filesystems drm/msm/a6xx: Fix refcount leak in a6xx_gpu_init drm: msm: fix possible memory leak in mdp5_crtc_cursor_set() thermal/drivers/broadcom: Fix potential NULL dereference in sr_thermal_probe ASoC: wm2000: fix missing clk_disable_unprepare() on error in wm2000_anc_transition() NFC: hci: fix sleep in atomic context bugs in nfc_hci_hcp_message_tx rxrpc: Fix listen() setting the bar too high for the prealloc rings rxrpc: Don't try to resend the request if we're receiving the reply rxrpc: Fix overlapping ACK accounting rxrpc: Don't let ack.previousPacket regress rxrpc: Fix decision on when to generate an IDLE ACK net/smc: postpone sk_refcnt increment in connect() arm64: dts: rockchip: Move drive-impedance-ohm to emmc phy on rk3399 ARM: dts: suniv: F1C100: fix watchdog compatible soc: qcom: smp2p: Fix missing of_node_put() in smp2p_parse_ipc soc: qcom: smsm: Fix missing of_node_put() in smsm_parse_ipc PCI: cadence: Fix find_first_zero_bit() limit PCI: rockchip: Fix find_first_zero_bit() limit KVM: nVMX: Leave most VM-Exit info fields unmodified on failed VM-Entry can: xilinx_can: mark bit timing constants as const ARM: dts: bcm2835-rpi-zero-w: Fix GPIO line name for Wifi/BT ARM: dts: bcm2837-rpi-cm3-io3: Fix GPIO line names for SMPS I2C ARM: dts: bcm2837-rpi-3-b-plus: Fix GPIO line name of power LED ARM: dts: bcm2835-rpi-b: Fix GPIO line names misc: ocxl: fix possible double free in ocxl_file_register_afu crypto: marvell/cesa - ECB does not IV arm: mediatek: select arch timer for mt7629 powerpc/fadump: fix PT_LOAD segment for boot memory area mfd: ipaq-micro: Fix error check return value of platform_get_irq() scsi: fcoe: Fix Wstringop-overflow warnings in fcoe_wwn_from_mac() firmware: arm_scmi: Fix list protocols enumeration in the base protocol nvdimm: Allow overwrite in the presence of disabled dimms pinctrl: mvebu: Fix irq_of_parse_and_map() return value drivers/base/node.c: fix compaction sysfs file leak dax: fix cache flush on PMD-mapped pages powerpc/8xx: export 'cpm_setbrg' for modules powerpc/idle: Fix return value of __setup() handler powerpc/4xx/cpm: Fix return value of __setup() handler proc: fix dentry/inode overinstantiating under /proc/${pid}/net ipc/mqueue: use get_tree_nodev() in mqueue_get_tree() PCI: imx6: Fix PERST# start-up sequence tty: fix deadlock caused by calling printk() under tty_port->lock crypto: cryptd - Protect per-CPU resource by disabling BH. Input: sparcspkr - fix refcount leak in bbc_beep_probe powerpc/64: Only WARN if __pa()/__va() called with bad addresses powerpc/perf: Fix the threshold compare group constraint for power9 macintosh: via-pmu and via-cuda need RTC_LIB powerpc/fsl_rio: Fix refcount leak in fsl_rio_setup mfd: davinci_voicecodec: Fix possible null-ptr-deref davinci_vc_probe() mailbox: forward the hrtimer if not queued and under a lock RDMA/hfi1: Prevent use of lock before it is initialized Input: stmfts - do not leave device disabled in stmfts_input_open f2fs: fix dereference of stale list iterator after loop body iommu/mediatek: Add list_del in mtk_iommu_remove i2c: at91: use dma safe buffers i2c: at91: Initialize dma_buf in at91_twi_xfer() NFS: Do not report EINTR/ERESTARTSYS as mapping errors NFS: Do not report flush errors in nfs_write_end() NFS: Don't report errors from nfs_pageio_complete() more than once NFSv4/pNFS: Do not fail I/O when we fail to allocate the pNFS layout video: fbdev: clcdfb: Fix refcount leak in clcdfb_of_vram_setup dmaengine: stm32-mdma: remove GISR1 register iommu/amd: Increase timeout waiting for GA log enablement perf c2c: Use stdio interface if slang is not supported perf jevents: Fix event syntax error caused by ExtSel f2fs: fix to avoid f2fs_bug_on() in dec_valid_node_count() f2fs: fix to do sanity check on block address in f2fs_do_zero_range() f2fs: fix to clear dirty inode in f2fs_evict_inode() f2fs: fix deadloop in foreground GC f2fs: don't need inode lock for system hidden quota f2fs: fix fallocate to use file_modified to update permissions consistently wifi: mac80211: fix use-after-free in chanctx code iwlwifi: mvm: fix assert 1F04 upon reconfig fs-writeback: writeback_sb_inodes:Recalculate 'wrote' according skipped pages efi: Do not import certificates from UEFI Secure Boot for T2 Macs bfq: Split shared queues on move between cgroups bfq: Update cgroup information before merging bio bfq: Track whether bfq_group is still online netfilter: nf_tables: disallow non-stateful expression in sets earlier ext4: fix use-after-free in ext4_rename_dir_prepare ext4: fix warning in ext4_handle_inode_extension ext4: fix bug_on in ext4_writepages ext4: verify dir block before splitting it ext4: avoid cycles in directory h-tree ACPI: property: Release subnode properties with data nodes tracing: Fix potential double free in create_var_ref() PCI/PM: Fix bridge_d3_blacklist[] Elo i2 overwrite of Gigabyte X299 PCI: qcom: Fix runtime PM imbalance on probe errors PCI: qcom: Fix unbalanced PHY init on probe errors mm, compaction: fast_find_migrateblock() should return pfn in the target zone dlm: fix plock invalid read dlm: fix missing lkb refcount handling ocfs2: dlmfs: fix error handling of user_dlm_destroy_lock scsi: dc395x: Fix a missing check on list iterator scsi: ufs: qcom: Add a readl() to make sure ref_clk gets enabled drm/amdgpu/cs: make commands with 0 chunks illegal behaviour. drm/etnaviv: check for reaped mapping in etnaviv_iommu_unmap_gem drm/nouveau/clk: Fix an incorrect NULL check on list iterator drm/bridge: analogix_dp: Grab runtime PM reference for DP-AUX md: fix an incorrect NULL check in does_sb_need_changing md: fix an incorrect NULL check in md_reload_sb mtd: cfi_cmdset_0002: Move and rename chip_check/chip_ready/chip_good_for_write media: coda: Fix reported H264 profile media: coda: Add more H264 levels for CODA960 Kconfig: Add option for asm goto w/ tied outputs to workaround clang-13 bug RDMA/hfi1: Fix potential integer multiplication overflow errors irqchip/armada-370-xp: Do not touch Performance Counter Overflow on A375, A38x, A39x irqchip: irq-xtensa-mx: fix initial IRQ affinity mac80211: upgrade passive scan to active scan on DFS channels after beacon rx um: chan_user: Fix winch_tramp() return value um: Fix out-of-bounds read in LDT setup iommu/msm: Fix an incorrect NULL check on list iterator nodemask.h: fix compilation error with GCC12 hugetlb: fix huge_pmd_unshare address update rtl818x: Prevent using not initialized queues ASoC: rt5514: Fix event generation for "DSP Voice Wake Up" control carl9170: tx: fix an incorrect use of list iterator serial: pch: don't overwrite xmit->buf[0] by x_char tilcdc: tilcdc_external: fix an incorrect NULL check on list iterator gma500: fix an incorrect NULL check on list iterator arm64: dts: qcom: ipq8074: fix the sleep clock frequency phy: qcom-qmp: fix struct clk leak on probe errors ARM: pxa: maybe fix gpio lookup tables docs/conf.py: Cope with removal of language=None in Sphinx 5.0.0 dt-bindings: gpio: altera: correct interrupt-cells blk-iolatency: Fix inflight count imbalances and IO hangs on offline phy: qcom-qmp: fix reset-controller leak on probe errors Kconfig: add config option for asm goto w/ outputs RDMA/rxe: Generate a completion for unsupported/invalid opcode MIPS: IP27: Remove incorrect `cpu_has_fpu' override bfq: Avoid merging queues with different parents bfq: Drop pointless unlock-lock pair bfq: Remove pointless bfq_init_rq() calls bfq: Get rid of __bio_blkcg() usage bfq: Make sure bfqg for which we are queueing requests is online block: fix bio_clone_blkg_association() to associate with proper blkcg_gq md: bcache: check the return value of kzalloc() in detached_dev_do_request() pcmcia: db1xxx_ss: restrict to MIPS_DB1XXX boards staging: greybus: codecs: fix type confusion of list iterator variable iio: adc: ad7124: Remove shift from scan_type tty: goldfish: Use tty_port_destroy() to destroy port tty: serial: owl: Fix missing clk_disable_unprepare() in owl_uart_probe tty: serial: fsl_lpuart: fix potential bug when using both of_alias_get_id and ida_simple_get usb: usbip: fix a refcount leak in stub_probe() usb: usbip: add missing device lock on tweak configuration cmd USB: storage: karma: fix rio_karma_init return usb: musb: Fix missing of_node_put() in omap2430_probe staging: fieldbus: Fix the error handling path in anybuss_host_common_probe() pwm: lp3943: Fix duty calculation in case period was clamped rpmsg: qcom_smd: Fix irq_of_parse_and_map() return value usb: dwc3: pci: Fix pm_runtime_get_sync() error checking firmware: stratix10-svc: fix a missing check on list iterator iio: adc: stmpe-adc: Fix wait_for_completion_timeout return value check iio: adc: sc27xx: fix read big scale voltage not right iio: adc: sc27xx: Fine tune the scale calibration values rpmsg: qcom_smd: Fix returning 0 if irq_of_parse_and_map() fails phy: qcom-qmp: fix pipe-clock imbalance on power-on failure serial: sifive: Report actual baud base rather than fixed 115200 coresight: cpu-debug: Replace mutex with mutex_trylock on panic notifier soc: rockchip: Fix refcount leak in rockchip_grf_init clocksource/drivers/riscv: Events are stopped during CPU suspend rtc: mt6397: check return value after calling platform_get_resource() serial: meson: acquire port->lock in startup() serial: 8250_fintek: Check SER_RS485_RTS_* only with RS485 serial: digicolor-usart: Don't allow CS5-6 serial: rda-uart: Don't allow CS5-6 serial: txx9: Don't allow CS5-6 serial: sh-sci: Don't allow CS5-6 serial: sifive: Sanitize CSIZE and c_iflag serial: st-asc: Sanitize CSIZE and correct PARENB for CS7 serial: stm32-usart: Correct CSIZE, bits, and parity firmware: dmi-sysfs: Fix memory leak in dmi_sysfs_register_handle bus: ti-sysc: Fix warnings for unbind for serial driver: base: fix UAF when driver_attach failed driver core: fix deadlock in __device_attach watchdog: ts4800_wdt: Fix refcount leak in ts4800_wdt_probe ASoC: fsl_sai: Fix FSL_SAI_xDR/xFR definition clocksource/drivers/oxnas-rps: Fix irq_of_parse_and_map() return value s390/crypto: fix scatterwalk_unmap() callers in AES-GCM net: sched: fixed barrier to prevent skbuff sticking in qdisc backlog net: ethernet: mtk_eth_soc: out of bounds read in mtk_hwlro_get_fdir_entry() net: dsa: mv88e6xxx: Fix refcount leak in mv88e6xxx_mdios_register modpost: fix removing numeric suffixes jffs2: fix memory leak in jffs2_do_fill_super ubi: ubi_create_volume: Fix use-after-free when volume creation failed nfp: only report pause frame configuration for physical device net/mlx5: Don't use already freed action pointer net/mlx5e: Update netdev features after changing XDP state net: sched: add barrier to fix packet stuck problem for lockless qdisc tcp: tcp_rtx_synack() can be called from process context afs: Fix infinite loop found by xfstest generic/676 tipc: check attribute length for bearer name perf c2c: Fix sorting in percent_rmt_hitm_cmp() mips: cpc: Fix refcount leak in mips_cpc_default_phys_base tracing: Fix sleeping function called from invalid context on RT kernel tracing: Avoid adding tracer option before update_tracer_options f2fs: remove WARN_ON in f2fs_is_valid_blkaddr i2c: cadence: Increase timeout per message if necessary m68knommu: set ZERO_PAGE() to the allocated zeroed page m68knommu: fix undefined reference to `_init_sp' dmaengine: zynqmp_dma: In struct zynqmp_dma_chan fix desc_size data type NFSv4: Don't hold the layoutget locks across multiple RPC calls video: fbdev: pxa3xx-gcu: release the resources correctly in pxa3xx_gcu_probe/remove() xprtrdma: treat all calls not a bcall when bc_serv is NULL netfilter: nat: really support inet nat without l3 address ata: pata_octeon_cf: Fix refcount leak in octeon_cf_probe netfilter: nf_tables: memleak flow rule from commit path xen: unexport __init-annotated xen_xlate_map_ballooned_pages() af_unix: Fix a data-race in unix_dgram_peer_wake_me(). bpf, arm64: Clear prog->jited_len along prog->jited net: dsa: lantiq_gswip: Fix refcount leak in gswip_gphy_fw_list net/mlx4_en: Fix wrong return value on ioctl EEPROM query failure SUNRPC: Fix the calculation of xdr->end in xdr_get_next_encode_buffer() net: mdio: unexport __init-annotated mdio_bus_init() net: xfrm: unexport __init-annotated xfrm4_protocol_init() net: ipv6: unexport __init-annotated seg6_hmac_init() net/mlx5: Rearm the FW tracer after each tracer event net/mlx5: fs, fail conflicting actions ip_gre: test csum_start instead of transport header net: altera: Fix refcount leak in altera_tse_mdio_create drm: imx: fix compiler warning with gcc-12 iio: dummy: iio_simple_dummy: check the return value of kstrdup() iio: st_sensors: Add a local lock for protecting odr lkdtm/usercopy: Expand size of "out of frame" object tty: synclink_gt: Fix null-pointer-dereference in slgt_clean() tty: Fix a possible resource leak in icom_probe drivers: staging: rtl8192u: Fix deadlock in ieee80211_beacons_stop() drivers: staging: rtl8192e: Fix deadlock in rtllib_beacons_stop() USB: host: isp116x: check return value after calling platform_get_resource() drivers: tty: serial: Fix deadlock in sa1100_set_termios() drivers: usb: host: Fix deadlock in oxu_bus_suspend() USB: hcd-pci: Fully suspend across freeze/thaw cycle usb: dwc2: gadget: don't reset gadget's driver->bus misc: rtsx: set NULL intfdata when probe fails extcon: Modify extcon device to be created after driver data is set clocksource/drivers/sp804: Avoid error on multiple instances staging: rtl8712: fix uninit-value in usb_read8() and friends staging: rtl8712: fix uninit-value in r871xu_drv_init() serial: msm_serial: disable interrupts in __msm_console_write() kernfs: Separate kernfs_pr_cont_buf and rename_lock. watchdog: wdat_wdt: Stop watchdog when rebooting the system md: protect md_unregister_thread from reentrancy scsi: myrb: Fix up null pointer access on myrb_cleanup() Revert "net: af_key: add check for pfkey_broadcast in function pfkey_process" ceph: allow ceph.dir.rctime xattr to be updatable drm/radeon: fix a possible null pointer dereference modpost: fix undefined behavior of is_arm_mapping_symbol() x86/cpu: Elide KCSAN for cpu_has() and friends nbd: call genl_unregister_family() first in nbd_cleanup() nbd: fix race between nbd_alloc_config() and module removal nbd: fix io hung while disconnecting device s390/gmap: voluntarily schedule during key setting cifs: version operations for smb20 unneeded when legacy support disabled nodemask: Fix return values to be unsigned vringh: Fix loop descriptors check in the indirect cases scripts/gdb: change kernel config dumping method ALSA: hda/conexant - Fix loopback issue with CX20632 cifs: return errors during session setup during reconnects ata: libata-transport: fix {dma|pio|xfer}_mode sysfs files mmc: block: Fix CQE recovery reset success nfc: st21nfca: fix incorrect validating logic in EVT_TRANSACTION nfc: st21nfca: fix memory leaks in EVT_TRANSACTION handling ixgbe: fix bcast packets Rx on VF after promisc removal ixgbe: fix unexpected VLAN Rx in promisc mode on VF Input: bcm5974 - set missing URB_NO_TRANSFER_DMA_MAP urb flag powerpc/32: Fix overread/overwrite of thread_struct via ptrace md/raid0: Ignore RAID0 layout if the second zone has only one device mtd: cfi_cmdset_0002: Use chip_ready() for write on S29GL064N tcp: fix tcp_mtup_probe_success vs wrong snd_cwnd Linux 5.4.198 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I05615e33dbd0029f93c9724c9abc9cb9035122d2
This commit is contained in:
commit
a778a36923
@ -107,13 +107,14 @@ Description:
|
|||||||
described in ATA8 7.16 and 7.17. Only valid if
|
described in ATA8 7.16 and 7.17. Only valid if
|
||||||
the device is not a PM.
|
the device is not a PM.
|
||||||
|
|
||||||
pio_mode: (RO) Transfer modes supported by the device when
|
pio_mode: (RO) PIO transfer mode used by the device.
|
||||||
in PIO mode. Mostly used by PATA device.
|
Mostly used by PATA devices.
|
||||||
|
|
||||||
xfer_mode: (RO) Current transfer mode
|
xfer_mode: (RO) Current transfer mode. Mostly used by
|
||||||
|
PATA devices.
|
||||||
|
|
||||||
dma_mode: (RO) Transfer modes supported by the device when
|
dma_mode: (RO) DMA transfer mode used by the device.
|
||||||
in DMA mode. Mostly used by PATA device.
|
Mostly used by PATA devices.
|
||||||
|
|
||||||
class: (RO) Device class. Can be "ata" for disk,
|
class: (RO) Device class. Can be "ata" for disk,
|
||||||
"atapi" for packet device, "pmp" for PM, or
|
"atapi" for packet device, "pmp" for PM, or
|
||||||
|
@ -98,7 +98,7 @@ finally:
|
|||||||
#
|
#
|
||||||
# This is also used if you do content translation via gettext catalogs.
|
# This is also used if you do content translation via gettext catalogs.
|
||||||
# Usually you set "language" from the command line for these cases.
|
# Usually you set "language" from the command line for these cases.
|
||||||
language = None
|
language = 'en'
|
||||||
|
|
||||||
# There are two options for replacing |today|: either, you set today to some
|
# There are two options for replacing |today|: either, you set today to some
|
||||||
# non-false value, then it is used:
|
# non-false value, then it is used:
|
||||||
|
@ -9,8 +9,9 @@ Required properties:
|
|||||||
- The second cell is reserved and is currently unused.
|
- The second cell is reserved and is currently unused.
|
||||||
- gpio-controller : Marks the device node as a GPIO controller.
|
- gpio-controller : Marks the device node as a GPIO controller.
|
||||||
- interrupt-controller: Mark the device node as an interrupt controller
|
- interrupt-controller: Mark the device node as an interrupt controller
|
||||||
- #interrupt-cells : Should be 1. The interrupt type is fixed in the hardware.
|
- #interrupt-cells : Should be 2. The interrupt type is fixed in the hardware.
|
||||||
- The first cell is the GPIO offset number within the GPIO controller.
|
- The first cell is the GPIO offset number within the GPIO controller.
|
||||||
|
- The second cell is the interrupt trigger type and level flags.
|
||||||
- interrupts: Specify the interrupt.
|
- interrupts: Specify the interrupt.
|
||||||
- altr,interrupt-type: Specifies the interrupt trigger type the GPIO
|
- altr,interrupt-type: Specifies the interrupt trigger type the GPIO
|
||||||
hardware is synthesized. This field is required if the Altera GPIO controller
|
hardware is synthesized. This field is required if the Altera GPIO controller
|
||||||
@ -38,6 +39,6 @@ gpio_altr: gpio@ff200000 {
|
|||||||
altr,interrupt-type = <IRQ_TYPE_EDGE_RISING>;
|
altr,interrupt-type = <IRQ_TYPE_EDGE_RISING>;
|
||||||
#gpio-cells = <2>;
|
#gpio-cells = <2>;
|
||||||
gpio-controller;
|
gpio-controller;
|
||||||
#interrupt-cells = <1>;
|
#interrupt-cells = <2>;
|
||||||
interrupt-controller;
|
interrupt-controller;
|
||||||
};
|
};
|
||||||
|
@ -72,7 +72,7 @@ hwmon_device_register_with_info is the most comprehensive and preferred means
|
|||||||
to register a hardware monitoring device. It creates the standard sysfs
|
to register a hardware monitoring device. It creates the standard sysfs
|
||||||
attributes in the hardware monitoring core, letting the driver focus on reading
|
attributes in the hardware monitoring core, letting the driver focus on reading
|
||||||
from and writing to the chip instead of having to bother with sysfs attributes.
|
from and writing to the chip instead of having to bother with sysfs attributes.
|
||||||
The parent device parameter cannot be NULL with non-NULL chip info. Its
|
The parent device parameter as well as the chip parameter must not be NULL. Its
|
||||||
parameters are described in more detail below.
|
parameters are described in more detail below.
|
||||||
|
|
||||||
devm_hwmon_device_register_with_info is similar to
|
devm_hwmon_device_register_with_info is similar to
|
||||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 4
|
PATCHLEVEL = 4
|
||||||
SUBLEVEL = 197
|
SUBLEVEL = 198
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = Kleptomaniac Octopus
|
NAME = Kleptomaniac Octopus
|
||||||
|
|
||||||
|
@ -53,18 +53,17 @@
|
|||||||
"GPIO18",
|
"GPIO18",
|
||||||
"NC", /* GPIO19 */
|
"NC", /* GPIO19 */
|
||||||
"NC", /* GPIO20 */
|
"NC", /* GPIO20 */
|
||||||
"GPIO21",
|
"CAM_GPIO0",
|
||||||
"GPIO22",
|
"GPIO22",
|
||||||
"GPIO23",
|
"GPIO23",
|
||||||
"GPIO24",
|
"GPIO24",
|
||||||
"GPIO25",
|
"GPIO25",
|
||||||
"NC", /* GPIO26 */
|
"NC", /* GPIO26 */
|
||||||
"CAM_GPIO0",
|
"GPIO27",
|
||||||
/* Binary number representing build/revision */
|
"GPIO28",
|
||||||
"CONFIG0",
|
"GPIO29",
|
||||||
"CONFIG1",
|
"GPIO30",
|
||||||
"CONFIG2",
|
"GPIO31",
|
||||||
"CONFIG3",
|
|
||||||
"NC", /* GPIO32 */
|
"NC", /* GPIO32 */
|
||||||
"NC", /* GPIO33 */
|
"NC", /* GPIO33 */
|
||||||
"NC", /* GPIO34 */
|
"NC", /* GPIO34 */
|
||||||
|
@ -74,16 +74,18 @@
|
|||||||
"GPIO27",
|
"GPIO27",
|
||||||
"SDA0",
|
"SDA0",
|
||||||
"SCL0",
|
"SCL0",
|
||||||
"NC", /* GPIO30 */
|
/* Used by BT module */
|
||||||
"NC", /* GPIO31 */
|
"CTS0",
|
||||||
"NC", /* GPIO32 */
|
"RTS0",
|
||||||
"NC", /* GPIO33 */
|
"TXD0",
|
||||||
"NC", /* GPIO34 */
|
"RXD0",
|
||||||
"NC", /* GPIO35 */
|
/* Used by Wifi */
|
||||||
"NC", /* GPIO36 */
|
"SD1_CLK",
|
||||||
"NC", /* GPIO37 */
|
"SD1_CMD",
|
||||||
"NC", /* GPIO38 */
|
"SD1_DATA0",
|
||||||
"NC", /* GPIO39 */
|
"SD1_DATA1",
|
||||||
|
"SD1_DATA2",
|
||||||
|
"SD1_DATA3",
|
||||||
"CAM_GPIO1", /* GPIO40 */
|
"CAM_GPIO1", /* GPIO40 */
|
||||||
"WL_ON", /* GPIO41 */
|
"WL_ON", /* GPIO41 */
|
||||||
"NC", /* GPIO42 */
|
"NC", /* GPIO42 */
|
||||||
|
@ -43,7 +43,7 @@
|
|||||||
#gpio-cells = <2>;
|
#gpio-cells = <2>;
|
||||||
gpio-line-names = "BT_ON",
|
gpio-line-names = "BT_ON",
|
||||||
"WL_ON",
|
"WL_ON",
|
||||||
"STATUS_LED_R",
|
"PWR_LED_R",
|
||||||
"LAN_RUN",
|
"LAN_RUN",
|
||||||
"",
|
"",
|
||||||
"CAM_GPIO0",
|
"CAM_GPIO0",
|
||||||
|
@ -63,8 +63,8 @@
|
|||||||
"GPIO43",
|
"GPIO43",
|
||||||
"GPIO44",
|
"GPIO44",
|
||||||
"GPIO45",
|
"GPIO45",
|
||||||
"GPIO46",
|
"SMPS_SCL",
|
||||||
"GPIO47",
|
"SMPS_SDA",
|
||||||
/* Used by eMMC */
|
/* Used by eMMC */
|
||||||
"SD_CLK_R",
|
"SD_CLK_R",
|
||||||
"SD_CMD_R",
|
"SD_CMD_R",
|
||||||
|
@ -128,7 +128,7 @@
|
|||||||
samsung,i2c-max-bus-freq = <20000>;
|
samsung,i2c-max-bus-freq = <20000>;
|
||||||
|
|
||||||
eeprom@50 {
|
eeprom@50 {
|
||||||
compatible = "samsung,s524ad0xd1";
|
compatible = "samsung,s524ad0xd1", "atmel,24c128";
|
||||||
reg = <0x50>;
|
reg = <0x50>;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -287,7 +287,7 @@
|
|||||||
samsung,i2c-max-bus-freq = <20000>;
|
samsung,i2c-max-bus-freq = <20000>;
|
||||||
|
|
||||||
eeprom@51 {
|
eeprom@51 {
|
||||||
compatible = "samsung,s524ad0xd1";
|
compatible = "samsung,s524ad0xd1", "atmel,24c128";
|
||||||
reg = <0x51>;
|
reg = <0x51>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -287,7 +287,7 @@
|
|||||||
clocks = <&armclk>;
|
clocks = <&armclk>;
|
||||||
};
|
};
|
||||||
|
|
||||||
gic: gic@1000 {
|
gic: interrupt-controller@1000 {
|
||||||
compatible = "arm,arm11mp-gic";
|
compatible = "arm,arm11mp-gic";
|
||||||
interrupt-controller;
|
interrupt-controller;
|
||||||
#interrupt-cells = <3>;
|
#interrupt-cells = <3>;
|
||||||
|
@ -104,8 +104,10 @@
|
|||||||
|
|
||||||
wdt: watchdog@1c20ca0 {
|
wdt: watchdog@1c20ca0 {
|
||||||
compatible = "allwinner,suniv-f1c100s-wdt",
|
compatible = "allwinner,suniv-f1c100s-wdt",
|
||||||
"allwinner,sun4i-a10-wdt";
|
"allwinner,sun6i-a31-wdt";
|
||||||
reg = <0x01c20ca0 0x20>;
|
reg = <0x01c20ca0 0x20>;
|
||||||
|
interrupts = <16>;
|
||||||
|
clocks = <&osc32k>;
|
||||||
};
|
};
|
||||||
|
|
||||||
uart0: serial@1c25000 {
|
uart0: serial@1c25000 {
|
||||||
|
@ -67,14 +67,17 @@ static void __init hi3xxx_smp_prepare_cpus(unsigned int max_cpus)
|
|||||||
}
|
}
|
||||||
ctrl_base = of_iomap(np, 0);
|
ctrl_base = of_iomap(np, 0);
|
||||||
if (!ctrl_base) {
|
if (!ctrl_base) {
|
||||||
|
of_node_put(np);
|
||||||
pr_err("failed to map address\n");
|
pr_err("failed to map address\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (of_property_read_u32(np, "smp-offset", &offset) < 0) {
|
if (of_property_read_u32(np, "smp-offset", &offset) < 0) {
|
||||||
|
of_node_put(np);
|
||||||
pr_err("failed to find smp-offset property\n");
|
pr_err("failed to find smp-offset property\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
ctrl_base += offset;
|
ctrl_base += offset;
|
||||||
|
of_node_put(np);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -160,6 +163,7 @@ static int hip01_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|||||||
if (WARN_ON(!node))
|
if (WARN_ON(!node))
|
||||||
return -1;
|
return -1;
|
||||||
ctrl_base = of_iomap(node, 0);
|
ctrl_base = of_iomap(node, 0);
|
||||||
|
of_node_put(node);
|
||||||
|
|
||||||
/* set the secondary core boot from DDR */
|
/* set the secondary core boot from DDR */
|
||||||
remap_reg_value = readl_relaxed(ctrl_base + REG_SC_CTRL);
|
remap_reg_value = readl_relaxed(ctrl_base + REG_SC_CTRL);
|
||||||
|
@ -30,6 +30,7 @@ config MACH_MT7623
|
|||||||
config MACH_MT7629
|
config MACH_MT7629
|
||||||
bool "MediaTek MT7629 SoCs support"
|
bool "MediaTek MT7629 SoCs support"
|
||||||
default ARCH_MEDIATEK
|
default ARCH_MEDIATEK
|
||||||
|
select HAVE_ARM_ARCH_TIMER
|
||||||
|
|
||||||
config MACH_MT8127
|
config MACH_MT8127
|
||||||
bool "MediaTek MT8127 SoCs support"
|
bool "MediaTek MT8127 SoCs support"
|
||||||
|
@ -41,7 +41,7 @@ static DEFINE_SPINLOCK(clockfw_lock);
|
|||||||
unsigned long omap1_uart_recalc(struct clk *clk)
|
unsigned long omap1_uart_recalc(struct clk *clk)
|
||||||
{
|
{
|
||||||
unsigned int val = __raw_readl(clk->enable_reg);
|
unsigned int val = __raw_readl(clk->enable_reg);
|
||||||
return val & clk->enable_bit ? 48000000 : 12000000;
|
return val & 1 << clk->enable_bit ? 48000000 : 12000000;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned long omap1_sossi_recalc(struct clk *clk)
|
unsigned long omap1_sossi_recalc(struct clk *clk)
|
||||||
|
@ -355,13 +355,13 @@ static struct platform_device cm_x300_spi_gpio = {
|
|||||||
static struct gpiod_lookup_table cm_x300_spi_gpiod_table = {
|
static struct gpiod_lookup_table cm_x300_spi_gpiod_table = {
|
||||||
.dev_id = "spi_gpio",
|
.dev_id = "spi_gpio",
|
||||||
.table = {
|
.table = {
|
||||||
GPIO_LOOKUP("gpio-pxa", GPIO_LCD_SCL,
|
GPIO_LOOKUP("pca9555.1", GPIO_LCD_SCL - GPIO_LCD_BASE,
|
||||||
"sck", GPIO_ACTIVE_HIGH),
|
"sck", GPIO_ACTIVE_HIGH),
|
||||||
GPIO_LOOKUP("gpio-pxa", GPIO_LCD_DIN,
|
GPIO_LOOKUP("pca9555.1", GPIO_LCD_DIN - GPIO_LCD_BASE,
|
||||||
"mosi", GPIO_ACTIVE_HIGH),
|
"mosi", GPIO_ACTIVE_HIGH),
|
||||||
GPIO_LOOKUP("gpio-pxa", GPIO_LCD_DOUT,
|
GPIO_LOOKUP("pca9555.1", GPIO_LCD_DOUT - GPIO_LCD_BASE,
|
||||||
"miso", GPIO_ACTIVE_HIGH),
|
"miso", GPIO_ACTIVE_HIGH),
|
||||||
GPIO_LOOKUP("gpio-pxa", GPIO_LCD_CS,
|
GPIO_LOOKUP("pca9555.1", GPIO_LCD_CS - GPIO_LCD_BASE,
|
||||||
"cs", GPIO_ACTIVE_HIGH),
|
"cs", GPIO_ACTIVE_HIGH),
|
||||||
{ },
|
{ },
|
||||||
},
|
},
|
||||||
|
@ -675,7 +675,7 @@ static struct platform_device bq24022 = {
|
|||||||
static struct gpiod_lookup_table bq24022_gpiod_table = {
|
static struct gpiod_lookup_table bq24022_gpiod_table = {
|
||||||
.dev_id = "gpio-regulator",
|
.dev_id = "gpio-regulator",
|
||||||
.table = {
|
.table = {
|
||||||
GPIO_LOOKUP("gpio-pxa", EGPIO_MAGICIAN_BQ24022_ISET2,
|
GPIO_LOOKUP("htc-egpio-0", EGPIO_MAGICIAN_BQ24022_ISET2 - MAGICIAN_EGPIO_BASE,
|
||||||
NULL, GPIO_ACTIVE_HIGH),
|
NULL, GPIO_ACTIVE_HIGH),
|
||||||
GPIO_LOOKUP("gpio-pxa", GPIO30_MAGICIAN_BQ24022_nCHARGE_EN,
|
GPIO_LOOKUP("gpio-pxa", GPIO30_MAGICIAN_BQ24022_nCHARGE_EN,
|
||||||
"enable", GPIO_ACTIVE_LOW),
|
"enable", GPIO_ACTIVE_LOW),
|
||||||
|
@ -295,9 +295,9 @@ static struct gpiod_lookup_table tosa_mci_gpio_table = {
|
|||||||
.table = {
|
.table = {
|
||||||
GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_nSD_DETECT,
|
GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_nSD_DETECT,
|
||||||
"cd", GPIO_ACTIVE_LOW),
|
"cd", GPIO_ACTIVE_LOW),
|
||||||
GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_SD_WP,
|
GPIO_LOOKUP("sharp-scoop.0", TOSA_GPIO_SD_WP - TOSA_SCOOP_GPIO_BASE,
|
||||||
"wp", GPIO_ACTIVE_LOW),
|
"wp", GPIO_ACTIVE_LOW),
|
||||||
GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_PWR_ON,
|
GPIO_LOOKUP("sharp-scoop.0", TOSA_GPIO_PWR_ON - TOSA_SCOOP_GPIO_BASE,
|
||||||
"power", GPIO_ACTIVE_HIGH),
|
"power", GPIO_ACTIVE_HIGH),
|
||||||
{ },
|
{ },
|
||||||
},
|
},
|
||||||
|
@ -143,6 +143,7 @@ static int __init dcscb_init(void)
|
|||||||
if (!node)
|
if (!node)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
dcscb_base = of_iomap(node, 0);
|
dcscb_base = of_iomap(node, 0);
|
||||||
|
of_node_put(node);
|
||||||
if (!dcscb_base)
|
if (!dcscb_base)
|
||||||
return -EADDRNOTAVAIL;
|
return -EADDRNOTAVAIL;
|
||||||
cfg = readl_relaxed(dcscb_base + DCS_CFG_R);
|
cfg = readl_relaxed(dcscb_base + DCS_CFG_R);
|
||||||
|
@ -225,6 +225,7 @@ config ARCH_STRATIX10
|
|||||||
|
|
||||||
config ARCH_SYNQUACER
|
config ARCH_SYNQUACER
|
||||||
bool "Socionext SynQuacer SoC Family"
|
bool "Socionext SynQuacer SoC Family"
|
||||||
|
select IRQ_FASTEOI_HIERARCHY_HANDLERS
|
||||||
|
|
||||||
config ARCH_TEGRA
|
config ARCH_TEGRA
|
||||||
bool "NVIDIA Tegra SoC Family"
|
bool "NVIDIA Tegra SoC Family"
|
||||||
|
@ -482,7 +482,7 @@
|
|||||||
clocks {
|
clocks {
|
||||||
sleep_clk: sleep_clk {
|
sleep_clk: sleep_clk {
|
||||||
compatible = "fixed-clock";
|
compatible = "fixed-clock";
|
||||||
clock-frequency = <32000>;
|
clock-frequency = <32768>;
|
||||||
#clock-cells = <0>;
|
#clock-cells = <0>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1447,6 +1447,7 @@
|
|||||||
reg = <0xf780 0x24>;
|
reg = <0xf780 0x24>;
|
||||||
clocks = <&sdhci>;
|
clocks = <&sdhci>;
|
||||||
clock-names = "emmcclk";
|
clock-names = "emmcclk";
|
||||||
|
drive-impedance-ohm = <50>;
|
||||||
#phy-cells = <0>;
|
#phy-cells = <0>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
@ -1457,7 +1458,6 @@
|
|||||||
clock-names = "refclk";
|
clock-names = "refclk";
|
||||||
#phy-cells = <1>;
|
#phy-cells = <1>;
|
||||||
resets = <&cru SRST_PCIEPHY>;
|
resets = <&cru SRST_PCIEPHY>;
|
||||||
drive-impedance-ohm = <50>;
|
|
||||||
reset-names = "phy";
|
reset-names = "phy";
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
@ -115,6 +115,6 @@ long compat_arm_syscall(struct pt_regs *regs, int scno)
|
|||||||
(compat_thumb_mode(regs) ? 2 : 4);
|
(compat_thumb_mode(regs) ? 2 : 4);
|
||||||
|
|
||||||
arm64_notify_die("Oops - bad compat syscall(2)", regs,
|
arm64_notify_die("Oops - bad compat syscall(2)", regs,
|
||||||
SIGILL, ILL_ILLTRP, addr, scno);
|
SIGILL, ILL_ILLTRP, addr, 0);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -973,6 +973,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||||||
bpf_jit_binary_free(header);
|
bpf_jit_binary_free(header);
|
||||||
prog->bpf_func = NULL;
|
prog->bpf_func = NULL;
|
||||||
prog->jited = 0;
|
prog->jited = 0;
|
||||||
|
prog->jited_len = 0;
|
||||||
goto out_off;
|
goto out_off;
|
||||||
}
|
}
|
||||||
bpf_jit_binary_lock_ro(header);
|
bpf_jit_binary_lock_ro(header);
|
||||||
|
@ -309,7 +309,7 @@ comment "Processor Specific Options"
|
|||||||
|
|
||||||
config M68KFPU_EMU
|
config M68KFPU_EMU
|
||||||
bool "Math emulation support"
|
bool "Math emulation support"
|
||||||
depends on MMU
|
depends on M68KCLASSIC && FPU
|
||||||
help
|
help
|
||||||
At some point in the future, this will cause floating-point math
|
At some point in the future, this will cause floating-point math
|
||||||
instructions to be emulated by the kernel on machines that lack a
|
instructions to be emulated by the kernel on machines that lack a
|
||||||
|
@ -320,6 +320,7 @@ comment "Machine Options"
|
|||||||
|
|
||||||
config UBOOT
|
config UBOOT
|
||||||
bool "Support for U-Boot command line parameters"
|
bool "Support for U-Boot command line parameters"
|
||||||
|
depends on COLDFIRE
|
||||||
help
|
help
|
||||||
If you say Y here kernel will try to collect command
|
If you say Y here kernel will try to collect command
|
||||||
line parameters from the initial u-boot stack.
|
line parameters from the initial u-boot stack.
|
||||||
|
@ -42,7 +42,8 @@ extern void paging_init(void);
|
|||||||
* ZERO_PAGE is a global shared page that is always zero: used
|
* ZERO_PAGE is a global shared page that is always zero: used
|
||||||
* for zero-mapped memory areas etc..
|
* for zero-mapped memory areas etc..
|
||||||
*/
|
*/
|
||||||
#define ZERO_PAGE(vaddr) (virt_to_page(0))
|
extern void *empty_zero_page;
|
||||||
|
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* All 32bit addresses are effectively valid for vmalloc...
|
* All 32bit addresses are effectively valid for vmalloc...
|
||||||
|
@ -28,7 +28,6 @@
|
|||||||
#define cpu_has_6k_cache 0
|
#define cpu_has_6k_cache 0
|
||||||
#define cpu_has_8k_cache 0
|
#define cpu_has_8k_cache 0
|
||||||
#define cpu_has_tx39_cache 0
|
#define cpu_has_tx39_cache 0
|
||||||
#define cpu_has_fpu 1
|
|
||||||
#define cpu_has_nofpuex 0
|
#define cpu_has_nofpuex 0
|
||||||
#define cpu_has_32fpr 1
|
#define cpu_has_32fpr 1
|
||||||
#define cpu_has_counter 1
|
#define cpu_has_counter 1
|
||||||
|
@ -27,6 +27,7 @@ phys_addr_t __weak mips_cpc_default_phys_base(void)
|
|||||||
cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc");
|
cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc");
|
||||||
if (cpc_node) {
|
if (cpc_node) {
|
||||||
err = of_address_to_resource(cpc_node, 0, &res);
|
err = of_address_to_resource(cpc_node, 0, &res);
|
||||||
|
of_node_put(cpc_node);
|
||||||
if (!err)
|
if (!err)
|
||||||
return res.start;
|
return res.start;
|
||||||
}
|
}
|
||||||
|
@ -23,6 +23,7 @@ static inline cycles_t get_cycles(void)
|
|||||||
{
|
{
|
||||||
return mfspr(SPR_TTCR);
|
return mfspr(SPR_TTCR);
|
||||||
}
|
}
|
||||||
|
#define get_cycles get_cycles
|
||||||
|
|
||||||
/* This isn't really used any more */
|
/* This isn't really used any more */
|
||||||
#define CLOCK_TICK_RATE 1000
|
#define CLOCK_TICK_RATE 1000
|
||||||
|
@ -521,6 +521,15 @@ _start:
|
|||||||
l.ori r3,r0,0x1
|
l.ori r3,r0,0x1
|
||||||
l.mtspr r0,r3,SPR_SR
|
l.mtspr r0,r3,SPR_SR
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Start the TTCR as early as possible, so that the RNG can make use of
|
||||||
|
* measurements of boot time from the earliest opportunity. Especially
|
||||||
|
* important is that the TTCR does not return zero by the time we reach
|
||||||
|
* rand_initialize().
|
||||||
|
*/
|
||||||
|
l.movhi r3,hi(SPR_TTMR_CR)
|
||||||
|
l.mtspr r0,r3,SPR_TTMR
|
||||||
|
|
||||||
CLEAR_GPR(r1)
|
CLEAR_GPR(r1)
|
||||||
CLEAR_GPR(r2)
|
CLEAR_GPR(r2)
|
||||||
CLEAR_GPR(r3)
|
CLEAR_GPR(r3)
|
||||||
|
@ -216,6 +216,9 @@ static inline bool pfn_valid(unsigned long pfn)
|
|||||||
#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
|
#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
|
||||||
#else
|
#else
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC64
|
||||||
|
|
||||||
|
#define VIRTUAL_WARN_ON(x) WARN_ON(IS_ENABLED(CONFIG_DEBUG_VIRTUAL) && (x))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
|
* gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
|
||||||
* with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
|
* with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
|
||||||
@ -223,13 +226,13 @@ static inline bool pfn_valid(unsigned long pfn)
|
|||||||
*/
|
*/
|
||||||
#define __va(x) \
|
#define __va(x) \
|
||||||
({ \
|
({ \
|
||||||
VIRTUAL_BUG_ON((unsigned long)(x) >= PAGE_OFFSET); \
|
VIRTUAL_WARN_ON((unsigned long)(x) >= PAGE_OFFSET); \
|
||||||
(void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET); \
|
(void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET); \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define __pa(x) \
|
#define __pa(x) \
|
||||||
({ \
|
({ \
|
||||||
VIRTUAL_BUG_ON((unsigned long)(x) < PAGE_OFFSET); \
|
VIRTUAL_WARN_ON((unsigned long)(x) < PAGE_OFFSET); \
|
||||||
(unsigned long)(x) & 0x0fffffffffffffffUL; \
|
(unsigned long)(x) & 0x0fffffffffffffffUL; \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -835,7 +835,6 @@ static int fadump_alloc_mem_ranges(struct fadump_mrange_info *mrange_info)
|
|||||||
sizeof(struct fadump_memory_range));
|
sizeof(struct fadump_memory_range));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
|
static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
|
||||||
u64 base, u64 end)
|
u64 base, u64 end)
|
||||||
{
|
{
|
||||||
@ -854,7 +853,12 @@ static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
|
|||||||
start = mem_ranges[mrange_info->mem_range_cnt - 1].base;
|
start = mem_ranges[mrange_info->mem_range_cnt - 1].base;
|
||||||
size = mem_ranges[mrange_info->mem_range_cnt - 1].size;
|
size = mem_ranges[mrange_info->mem_range_cnt - 1].size;
|
||||||
|
|
||||||
if ((start + size) == base)
|
/*
|
||||||
|
* Boot memory area needs separate PT_LOAD segment(s) as it
|
||||||
|
* is moved to a different location at the time of crash.
|
||||||
|
* So, fold only if the region is not boot memory area.
|
||||||
|
*/
|
||||||
|
if ((start + size) == base && start >= fw_dump.boot_mem_top)
|
||||||
is_adjacent = true;
|
is_adjacent = true;
|
||||||
}
|
}
|
||||||
if (!is_adjacent) {
|
if (!is_adjacent) {
|
||||||
|
@ -37,7 +37,7 @@ static int __init powersave_off(char *arg)
|
|||||||
{
|
{
|
||||||
ppc_md.power_save = NULL;
|
ppc_md.power_save = NULL;
|
||||||
cpuidle_disable = IDLE_POWERSAVE_OFF;
|
cpuidle_disable = IDLE_POWERSAVE_OFF;
|
||||||
return 0;
|
return 1;
|
||||||
}
|
}
|
||||||
__setup("powersave=off", powersave_off);
|
__setup("powersave=off", powersave_off);
|
||||||
|
|
||||||
|
@ -3014,8 +3014,13 @@ long arch_ptrace(struct task_struct *child, long request,
|
|||||||
|
|
||||||
flush_fp_to_thread(child);
|
flush_fp_to_thread(child);
|
||||||
if (fpidx < (PT_FPSCR - PT_FPR0))
|
if (fpidx < (PT_FPSCR - PT_FPR0))
|
||||||
memcpy(&tmp, &child->thread.TS_FPR(fpidx),
|
if (IS_ENABLED(CONFIG_PPC32)) {
|
||||||
sizeof(long));
|
// On 32-bit the index we are passed refers to 32-bit words
|
||||||
|
tmp = ((u32 *)child->thread.fp_state.fpr)[fpidx];
|
||||||
|
} else {
|
||||||
|
memcpy(&tmp, &child->thread.TS_FPR(fpidx),
|
||||||
|
sizeof(long));
|
||||||
|
}
|
||||||
else
|
else
|
||||||
tmp = child->thread.fp_state.fpscr;
|
tmp = child->thread.fp_state.fpscr;
|
||||||
}
|
}
|
||||||
@ -3047,8 +3052,13 @@ long arch_ptrace(struct task_struct *child, long request,
|
|||||||
|
|
||||||
flush_fp_to_thread(child);
|
flush_fp_to_thread(child);
|
||||||
if (fpidx < (PT_FPSCR - PT_FPR0))
|
if (fpidx < (PT_FPSCR - PT_FPR0))
|
||||||
memcpy(&child->thread.TS_FPR(fpidx), &data,
|
if (IS_ENABLED(CONFIG_PPC32)) {
|
||||||
sizeof(long));
|
// On 32-bit the index we are passed refers to 32-bit words
|
||||||
|
((u32 *)child->thread.fp_state.fpr)[fpidx] = data;
|
||||||
|
} else {
|
||||||
|
memcpy(&child->thread.TS_FPR(fpidx), &data,
|
||||||
|
sizeof(long));
|
||||||
|
}
|
||||||
else
|
else
|
||||||
child->thread.fp_state.fpscr = data;
|
child->thread.fp_state.fpscr = data;
|
||||||
ret = 0;
|
ret = 0;
|
||||||
@ -3398,4 +3408,7 @@ void __init pt_regs_check(void)
|
|||||||
offsetof(struct user_pt_regs, result));
|
offsetof(struct user_pt_regs, result));
|
||||||
|
|
||||||
BUILD_BUG_ON(sizeof(struct user_pt_regs) > sizeof(struct pt_regs));
|
BUILD_BUG_ON(sizeof(struct user_pt_regs) > sizeof(struct pt_regs));
|
||||||
|
|
||||||
|
// ptrace_get/put_fpr() rely on PPC32 and VSX being incompatible
|
||||||
|
BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_VSX));
|
||||||
}
|
}
|
||||||
|
@ -326,7 +326,8 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
|
|||||||
if (event_is_threshold(event) && is_thresh_cmp_valid(event)) {
|
if (event_is_threshold(event) && is_thresh_cmp_valid(event)) {
|
||||||
mask |= CNST_THRESH_MASK;
|
mask |= CNST_THRESH_MASK;
|
||||||
value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
|
value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
|
||||||
}
|
} else if (event_is_threshold(event))
|
||||||
|
return -1;
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
|
* Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
|
||||||
|
@ -327,6 +327,6 @@ late_initcall(cpm_init);
|
|||||||
static int __init cpm_powersave_off(char *arg)
|
static int __init cpm_powersave_off(char *arg)
|
||||||
{
|
{
|
||||||
cpm.powersave_off = 1;
|
cpm.powersave_off = 1;
|
||||||
return 0;
|
return 1;
|
||||||
}
|
}
|
||||||
__setup("powersave=off", cpm_powersave_off);
|
__setup("powersave=off", cpm_powersave_off);
|
||||||
|
@ -292,6 +292,7 @@ cpm_setbrg(uint brg, uint rate)
|
|||||||
out_be32(bp, (((BRG_UART_CLK_DIV16 / rate) - 1) << 1) |
|
out_be32(bp, (((BRG_UART_CLK_DIV16 / rate) - 1) << 1) |
|
||||||
CPM_BRG_EN | CPM_BRG_DIV16);
|
CPM_BRG_EN | CPM_BRG_DIV16);
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL(cpm_setbrg);
|
||||||
|
|
||||||
struct cpm_ioport16 {
|
struct cpm_ioport16 {
|
||||||
__be16 dir, par, odr_sor, dat, intr;
|
__be16 dir, par, odr_sor, dat, intr;
|
||||||
|
@ -60,7 +60,7 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
|
|||||||
addr = be64_to_cpu(addr);
|
addr = be64_to_cpu(addr);
|
||||||
pr_debug("Kernel metadata addr: %llx\n", addr);
|
pr_debug("Kernel metadata addr: %llx\n", addr);
|
||||||
opal_fdm_active = (void *)addr;
|
opal_fdm_active = (void *)addr;
|
||||||
if (opal_fdm_active->registered_regions == 0)
|
if (be16_to_cpu(opal_fdm_active->registered_regions) == 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_BOOT_MEM, &addr);
|
ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_BOOT_MEM, &addr);
|
||||||
@ -95,17 +95,17 @@ static int opal_fadump_unregister(struct fw_dump *fadump_conf);
|
|||||||
static void opal_fadump_update_config(struct fw_dump *fadump_conf,
|
static void opal_fadump_update_config(struct fw_dump *fadump_conf,
|
||||||
const struct opal_fadump_mem_struct *fdm)
|
const struct opal_fadump_mem_struct *fdm)
|
||||||
{
|
{
|
||||||
pr_debug("Boot memory regions count: %d\n", fdm->region_cnt);
|
pr_debug("Boot memory regions count: %d\n", be16_to_cpu(fdm->region_cnt));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The destination address of the first boot memory region is the
|
* The destination address of the first boot memory region is the
|
||||||
* destination address of boot memory regions.
|
* destination address of boot memory regions.
|
||||||
*/
|
*/
|
||||||
fadump_conf->boot_mem_dest_addr = fdm->rgn[0].dest;
|
fadump_conf->boot_mem_dest_addr = be64_to_cpu(fdm->rgn[0].dest);
|
||||||
pr_debug("Destination address of boot memory regions: %#016llx\n",
|
pr_debug("Destination address of boot memory regions: %#016llx\n",
|
||||||
fadump_conf->boot_mem_dest_addr);
|
fadump_conf->boot_mem_dest_addr);
|
||||||
|
|
||||||
fadump_conf->fadumphdr_addr = fdm->fadumphdr_addr;
|
fadump_conf->fadumphdr_addr = be64_to_cpu(fdm->fadumphdr_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -126,9 +126,9 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf,
|
|||||||
fadump_conf->boot_memory_size = 0;
|
fadump_conf->boot_memory_size = 0;
|
||||||
|
|
||||||
pr_debug("Boot memory regions:\n");
|
pr_debug("Boot memory regions:\n");
|
||||||
for (i = 0; i < fdm->region_cnt; i++) {
|
for (i = 0; i < be16_to_cpu(fdm->region_cnt); i++) {
|
||||||
base = fdm->rgn[i].src;
|
base = be64_to_cpu(fdm->rgn[i].src);
|
||||||
size = fdm->rgn[i].size;
|
size = be64_to_cpu(fdm->rgn[i].size);
|
||||||
pr_debug("\t[%03d] base: 0x%lx, size: 0x%lx\n", i, base, size);
|
pr_debug("\t[%03d] base: 0x%lx, size: 0x%lx\n", i, base, size);
|
||||||
|
|
||||||
fadump_conf->boot_mem_addr[i] = base;
|
fadump_conf->boot_mem_addr[i] = base;
|
||||||
@ -143,7 +143,7 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf,
|
|||||||
* Start address of reserve dump area (permanent reservation) for
|
* Start address of reserve dump area (permanent reservation) for
|
||||||
* re-registering FADump after dump capture.
|
* re-registering FADump after dump capture.
|
||||||
*/
|
*/
|
||||||
fadump_conf->reserve_dump_area_start = fdm->rgn[0].dest;
|
fadump_conf->reserve_dump_area_start = be64_to_cpu(fdm->rgn[0].dest);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Rarely, but it can so happen that system crashes before all
|
* Rarely, but it can so happen that system crashes before all
|
||||||
@ -155,13 +155,14 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf,
|
|||||||
* Hope the memory that could not be preserved only has pages
|
* Hope the memory that could not be preserved only has pages
|
||||||
* that are usually filtered out while saving the vmcore.
|
* that are usually filtered out while saving the vmcore.
|
||||||
*/
|
*/
|
||||||
if (fdm->region_cnt > fdm->registered_regions) {
|
if (be16_to_cpu(fdm->region_cnt) > be16_to_cpu(fdm->registered_regions)) {
|
||||||
pr_warn("Not all memory regions were saved!!!\n");
|
pr_warn("Not all memory regions were saved!!!\n");
|
||||||
pr_warn(" Unsaved memory regions:\n");
|
pr_warn(" Unsaved memory regions:\n");
|
||||||
i = fdm->registered_regions;
|
i = be16_to_cpu(fdm->registered_regions);
|
||||||
while (i < fdm->region_cnt) {
|
while (i < be16_to_cpu(fdm->region_cnt)) {
|
||||||
pr_warn("\t[%03d] base: 0x%llx, size: 0x%llx\n",
|
pr_warn("\t[%03d] base: 0x%llx, size: 0x%llx\n",
|
||||||
i, fdm->rgn[i].src, fdm->rgn[i].size);
|
i, be64_to_cpu(fdm->rgn[i].src),
|
||||||
|
be64_to_cpu(fdm->rgn[i].size));
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -170,7 +171,7 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf,
|
|||||||
}
|
}
|
||||||
|
|
||||||
fadump_conf->boot_mem_top = (fadump_conf->boot_memory_size + hole_size);
|
fadump_conf->boot_mem_top = (fadump_conf->boot_memory_size + hole_size);
|
||||||
fadump_conf->boot_mem_regs_cnt = fdm->region_cnt;
|
fadump_conf->boot_mem_regs_cnt = be16_to_cpu(fdm->region_cnt);
|
||||||
opal_fadump_update_config(fadump_conf, fdm);
|
opal_fadump_update_config(fadump_conf, fdm);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -178,35 +179,38 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf,
|
|||||||
static void opal_fadump_init_metadata(struct opal_fadump_mem_struct *fdm)
|
static void opal_fadump_init_metadata(struct opal_fadump_mem_struct *fdm)
|
||||||
{
|
{
|
||||||
fdm->version = OPAL_FADUMP_VERSION;
|
fdm->version = OPAL_FADUMP_VERSION;
|
||||||
fdm->region_cnt = 0;
|
fdm->region_cnt = cpu_to_be16(0);
|
||||||
fdm->registered_regions = 0;
|
fdm->registered_regions = cpu_to_be16(0);
|
||||||
fdm->fadumphdr_addr = 0;
|
fdm->fadumphdr_addr = cpu_to_be64(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 opal_fadump_init_mem_struct(struct fw_dump *fadump_conf)
|
static u64 opal_fadump_init_mem_struct(struct fw_dump *fadump_conf)
|
||||||
{
|
{
|
||||||
u64 addr = fadump_conf->reserve_dump_area_start;
|
u64 addr = fadump_conf->reserve_dump_area_start;
|
||||||
|
u16 reg_cnt;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
opal_fdm = __va(fadump_conf->kernel_metadata);
|
opal_fdm = __va(fadump_conf->kernel_metadata);
|
||||||
opal_fadump_init_metadata(opal_fdm);
|
opal_fadump_init_metadata(opal_fdm);
|
||||||
|
|
||||||
/* Boot memory regions */
|
/* Boot memory regions */
|
||||||
|
reg_cnt = be16_to_cpu(opal_fdm->region_cnt);
|
||||||
for (i = 0; i < fadump_conf->boot_mem_regs_cnt; i++) {
|
for (i = 0; i < fadump_conf->boot_mem_regs_cnt; i++) {
|
||||||
opal_fdm->rgn[i].src = fadump_conf->boot_mem_addr[i];
|
opal_fdm->rgn[i].src = cpu_to_be64(fadump_conf->boot_mem_addr[i]);
|
||||||
opal_fdm->rgn[i].dest = addr;
|
opal_fdm->rgn[i].dest = cpu_to_be64(addr);
|
||||||
opal_fdm->rgn[i].size = fadump_conf->boot_mem_sz[i];
|
opal_fdm->rgn[i].size = cpu_to_be64(fadump_conf->boot_mem_sz[i]);
|
||||||
|
|
||||||
opal_fdm->region_cnt++;
|
reg_cnt++;
|
||||||
addr += fadump_conf->boot_mem_sz[i];
|
addr += fadump_conf->boot_mem_sz[i];
|
||||||
}
|
}
|
||||||
|
opal_fdm->region_cnt = cpu_to_be16(reg_cnt);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Kernel metadata is passed to f/w and retrieved in capture kerenl.
|
* Kernel metadata is passed to f/w and retrieved in capture kerenl.
|
||||||
* So, use it to save fadump header address instead of calculating it.
|
* So, use it to save fadump header address instead of calculating it.
|
||||||
*/
|
*/
|
||||||
opal_fdm->fadumphdr_addr = (opal_fdm->rgn[0].dest +
|
opal_fdm->fadumphdr_addr = cpu_to_be64(be64_to_cpu(opal_fdm->rgn[0].dest) +
|
||||||
fadump_conf->boot_memory_size);
|
fadump_conf->boot_memory_size);
|
||||||
|
|
||||||
opal_fadump_update_config(fadump_conf, opal_fdm);
|
opal_fadump_update_config(fadump_conf, opal_fdm);
|
||||||
|
|
||||||
@ -269,18 +273,21 @@ static u64 opal_fadump_get_bootmem_min(void)
|
|||||||
static int opal_fadump_register(struct fw_dump *fadump_conf)
|
static int opal_fadump_register(struct fw_dump *fadump_conf)
|
||||||
{
|
{
|
||||||
s64 rc = OPAL_PARAMETER;
|
s64 rc = OPAL_PARAMETER;
|
||||||
|
u16 registered_regs;
|
||||||
int i, err = -EIO;
|
int i, err = -EIO;
|
||||||
|
|
||||||
for (i = 0; i < opal_fdm->region_cnt; i++) {
|
registered_regs = be16_to_cpu(opal_fdm->registered_regions);
|
||||||
|
for (i = 0; i < be16_to_cpu(opal_fdm->region_cnt); i++) {
|
||||||
rc = opal_mpipl_update(OPAL_MPIPL_ADD_RANGE,
|
rc = opal_mpipl_update(OPAL_MPIPL_ADD_RANGE,
|
||||||
opal_fdm->rgn[i].src,
|
be64_to_cpu(opal_fdm->rgn[i].src),
|
||||||
opal_fdm->rgn[i].dest,
|
be64_to_cpu(opal_fdm->rgn[i].dest),
|
||||||
opal_fdm->rgn[i].size);
|
be64_to_cpu(opal_fdm->rgn[i].size));
|
||||||
if (rc != OPAL_SUCCESS)
|
if (rc != OPAL_SUCCESS)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
opal_fdm->registered_regions++;
|
registered_regs++;
|
||||||
}
|
}
|
||||||
|
opal_fdm->registered_regions = cpu_to_be16(registered_regs);
|
||||||
|
|
||||||
switch (rc) {
|
switch (rc) {
|
||||||
case OPAL_SUCCESS:
|
case OPAL_SUCCESS:
|
||||||
@ -291,7 +298,8 @@ static int opal_fadump_register(struct fw_dump *fadump_conf)
|
|||||||
case OPAL_RESOURCE:
|
case OPAL_RESOURCE:
|
||||||
/* If MAX regions limit in f/w is hit, warn and proceed. */
|
/* If MAX regions limit in f/w is hit, warn and proceed. */
|
||||||
pr_warn("%d regions could not be registered for MPIPL as MAX limit is reached!\n",
|
pr_warn("%d regions could not be registered for MPIPL as MAX limit is reached!\n",
|
||||||
(opal_fdm->region_cnt - opal_fdm->registered_regions));
|
(be16_to_cpu(opal_fdm->region_cnt) -
|
||||||
|
be16_to_cpu(opal_fdm->registered_regions)));
|
||||||
fadump_conf->dump_registered = 1;
|
fadump_conf->dump_registered = 1;
|
||||||
err = 0;
|
err = 0;
|
||||||
break;
|
break;
|
||||||
@ -312,7 +320,7 @@ static int opal_fadump_register(struct fw_dump *fadump_conf)
|
|||||||
* If some regions were registered before OPAL_MPIPL_ADD_RANGE
|
* If some regions were registered before OPAL_MPIPL_ADD_RANGE
|
||||||
* OPAL call failed, unregister all regions.
|
* OPAL call failed, unregister all regions.
|
||||||
*/
|
*/
|
||||||
if ((err < 0) && (opal_fdm->registered_regions > 0))
|
if ((err < 0) && (be16_to_cpu(opal_fdm->registered_regions) > 0))
|
||||||
opal_fadump_unregister(fadump_conf);
|
opal_fadump_unregister(fadump_conf);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
@ -328,7 +336,7 @@ static int opal_fadump_unregister(struct fw_dump *fadump_conf)
|
|||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
opal_fdm->registered_regions = 0;
|
opal_fdm->registered_regions = cpu_to_be16(0);
|
||||||
fadump_conf->dump_registered = 0;
|
fadump_conf->dump_registered = 0;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -563,19 +571,20 @@ static void opal_fadump_region_show(struct fw_dump *fadump_conf,
|
|||||||
else
|
else
|
||||||
fdm_ptr = opal_fdm;
|
fdm_ptr = opal_fdm;
|
||||||
|
|
||||||
for (i = 0; i < fdm_ptr->region_cnt; i++) {
|
for (i = 0; i < be16_to_cpu(fdm_ptr->region_cnt); i++) {
|
||||||
/*
|
/*
|
||||||
* Only regions that are registered for MPIPL
|
* Only regions that are registered for MPIPL
|
||||||
* would have dump data.
|
* would have dump data.
|
||||||
*/
|
*/
|
||||||
if ((fadump_conf->dump_active) &&
|
if ((fadump_conf->dump_active) &&
|
||||||
(i < fdm_ptr->registered_regions))
|
(i < be16_to_cpu(fdm_ptr->registered_regions)))
|
||||||
dumped_bytes = fdm_ptr->rgn[i].size;
|
dumped_bytes = be64_to_cpu(fdm_ptr->rgn[i].size);
|
||||||
|
|
||||||
seq_printf(m, "DUMP: Src: %#016llx, Dest: %#016llx, ",
|
seq_printf(m, "DUMP: Src: %#016llx, Dest: %#016llx, ",
|
||||||
fdm_ptr->rgn[i].src, fdm_ptr->rgn[i].dest);
|
be64_to_cpu(fdm_ptr->rgn[i].src),
|
||||||
|
be64_to_cpu(fdm_ptr->rgn[i].dest));
|
||||||
seq_printf(m, "Size: %#llx, Dumped: %#llx bytes\n",
|
seq_printf(m, "Size: %#llx, Dumped: %#llx bytes\n",
|
||||||
fdm_ptr->rgn[i].size, dumped_bytes);
|
be64_to_cpu(fdm_ptr->rgn[i].size), dumped_bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Dump is active. Show reserved area start address. */
|
/* Dump is active. Show reserved area start address. */
|
||||||
@ -624,6 +633,7 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
|
|||||||
{
|
{
|
||||||
const __be32 *prop;
|
const __be32 *prop;
|
||||||
unsigned long dn;
|
unsigned long dn;
|
||||||
|
__be64 be_addr;
|
||||||
u64 addr = 0;
|
u64 addr = 0;
|
||||||
int i, len;
|
int i, len;
|
||||||
s64 ret;
|
s64 ret;
|
||||||
@ -680,13 +690,13 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
|
|||||||
if (!prop)
|
if (!prop)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_KERNEL, &addr);
|
ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_KERNEL, &be_addr);
|
||||||
if ((ret != OPAL_SUCCESS) || !addr) {
|
if ((ret != OPAL_SUCCESS) || !be_addr) {
|
||||||
pr_err("Failed to get Kernel metadata (%lld)\n", ret);
|
pr_err("Failed to get Kernel metadata (%lld)\n", ret);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
addr = be64_to_cpu(addr);
|
addr = be64_to_cpu(be_addr);
|
||||||
pr_debug("Kernel metadata addr: %llx\n", addr);
|
pr_debug("Kernel metadata addr: %llx\n", addr);
|
||||||
|
|
||||||
opal_fdm_active = __va(addr);
|
opal_fdm_active = __va(addr);
|
||||||
@ -697,14 +707,14 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Kernel regions not registered with f/w for MPIPL */
|
/* Kernel regions not registered with f/w for MPIPL */
|
||||||
if (opal_fdm_active->registered_regions == 0) {
|
if (be16_to_cpu(opal_fdm_active->registered_regions) == 0) {
|
||||||
opal_fdm_active = NULL;
|
opal_fdm_active = NULL;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_CPU, &addr);
|
ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_CPU, &be_addr);
|
||||||
if (addr) {
|
if (be_addr) {
|
||||||
addr = be64_to_cpu(addr);
|
addr = be64_to_cpu(be_addr);
|
||||||
pr_debug("CPU metadata addr: %llx\n", addr);
|
pr_debug("CPU metadata addr: %llx\n", addr);
|
||||||
opal_cpu_metadata = __va(addr);
|
opal_cpu_metadata = __va(addr);
|
||||||
}
|
}
|
||||||
|
@ -31,14 +31,14 @@
|
|||||||
* OPAL FADump kernel metadata
|
* OPAL FADump kernel metadata
|
||||||
*
|
*
|
||||||
* The address of this structure will be registered with f/w for retrieving
|
* The address of this structure will be registered with f/w for retrieving
|
||||||
* and processing during crash dump.
|
* in the capture kernel to process the crash dump.
|
||||||
*/
|
*/
|
||||||
struct opal_fadump_mem_struct {
|
struct opal_fadump_mem_struct {
|
||||||
u8 version;
|
u8 version;
|
||||||
u8 reserved[3];
|
u8 reserved[3];
|
||||||
u16 region_cnt; /* number of regions */
|
__be16 region_cnt; /* number of regions */
|
||||||
u16 registered_regions; /* Regions registered for MPIPL */
|
__be16 registered_regions; /* Regions registered for MPIPL */
|
||||||
u64 fadumphdr_addr;
|
__be64 fadumphdr_addr;
|
||||||
struct opal_mpipl_region rgn[FADUMP_MAX_MEM_REGS];
|
struct opal_mpipl_region rgn[FADUMP_MAX_MEM_REGS];
|
||||||
} __packed;
|
} __packed;
|
||||||
|
|
||||||
@ -135,7 +135,7 @@ static inline void opal_fadump_read_regs(char *bufp, unsigned int regs_cnt,
|
|||||||
for (i = 0; i < regs_cnt; i++, bufp += reg_entry_size) {
|
for (i = 0; i < regs_cnt; i++, bufp += reg_entry_size) {
|
||||||
reg_entry = (struct hdat_fadump_reg_entry *)bufp;
|
reg_entry = (struct hdat_fadump_reg_entry *)bufp;
|
||||||
val = (cpu_endian ? be64_to_cpu(reg_entry->reg_val) :
|
val = (cpu_endian ? be64_to_cpu(reg_entry->reg_val) :
|
||||||
reg_entry->reg_val);
|
(u64)(reg_entry->reg_val));
|
||||||
opal_fadump_set_regval_regnum(regs,
|
opal_fadump_set_regval_regnum(regs,
|
||||||
be32_to_cpu(reg_entry->reg_type),
|
be32_to_cpu(reg_entry->reg_type),
|
||||||
be32_to_cpu(reg_entry->reg_num),
|
be32_to_cpu(reg_entry->reg_num),
|
||||||
|
@ -55,6 +55,7 @@ static int __init uv_init(void)
|
|||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
uv_memcons = memcons_init(node, "memcons");
|
uv_memcons = memcons_init(node, "memcons");
|
||||||
|
of_node_put(node);
|
||||||
if (!uv_memcons)
|
if (!uv_memcons)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
|
@ -403,9 +403,10 @@ void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Initialize the DART HW */
|
/* Initialize the DART HW */
|
||||||
if (dart_init(dn) != 0)
|
if (dart_init(dn) != 0) {
|
||||||
|
of_node_put(dn);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* U4 supports a DART bypass, we use it for 64-bit capable devices to
|
* U4 supports a DART bypass, we use it for 64-bit capable devices to
|
||||||
* improve performance. However, that only works for devices connected
|
* improve performance. However, that only works for devices connected
|
||||||
@ -418,6 +419,7 @@ void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops)
|
|||||||
|
|
||||||
/* Setup pci_dma ops */
|
/* Setup pci_dma ops */
|
||||||
set_pci_dma_ops(&dma_iommu_ops);
|
set_pci_dma_ops(&dma_iommu_ops);
|
||||||
|
of_node_put(dn);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PM
|
#ifdef CONFIG_PM
|
||||||
|
@ -505,8 +505,10 @@ int fsl_rio_setup(struct platform_device *dev)
|
|||||||
if (rc) {
|
if (rc) {
|
||||||
dev_err(&dev->dev, "Can't get %pOF property 'reg'\n",
|
dev_err(&dev->dev, "Can't get %pOF property 'reg'\n",
|
||||||
rmu_node);
|
rmu_node);
|
||||||
|
of_node_put(rmu_node);
|
||||||
goto err_rmu;
|
goto err_rmu;
|
||||||
}
|
}
|
||||||
|
of_node_put(rmu_node);
|
||||||
rmu_regs_win = ioremap(rmu_regs.start, resource_size(&rmu_regs));
|
rmu_regs_win = ioremap(rmu_regs.start, resource_size(&rmu_regs));
|
||||||
if (!rmu_regs_win) {
|
if (!rmu_regs_win) {
|
||||||
dev_err(&dev->dev, "Unable to map rmu register window\n");
|
dev_err(&dev->dev, "Unable to map rmu register window\n");
|
||||||
|
@ -195,6 +195,7 @@ int icp_opal_init(void)
|
|||||||
|
|
||||||
printk("XICS: Using OPAL ICP fallbacks\n");
|
printk("XICS: Using OPAL ICP fallbacks\n");
|
||||||
|
|
||||||
|
of_node_put(np);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -861,7 +861,7 @@ static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
|
|||||||
unsigned int nbytes)
|
unsigned int nbytes)
|
||||||
{
|
{
|
||||||
gw->walk_bytes_remain -= nbytes;
|
gw->walk_bytes_remain -= nbytes;
|
||||||
scatterwalk_unmap(&gw->walk);
|
scatterwalk_unmap(gw->walk_ptr);
|
||||||
scatterwalk_advance(&gw->walk, nbytes);
|
scatterwalk_advance(&gw->walk, nbytes);
|
||||||
scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
|
scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
|
||||||
gw->walk_ptr = NULL;
|
gw->walk_ptr = NULL;
|
||||||
@ -936,7 +936,7 @@ static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
scatterwalk_unmap(&gw->walk);
|
scatterwalk_unmap(gw->walk_ptr);
|
||||||
gw->walk_ptr = NULL;
|
gw->walk_ptr = NULL;
|
||||||
|
|
||||||
gw->ptr = gw->buf;
|
gw->ptr = gw->buf;
|
||||||
|
@ -52,10 +52,17 @@ static inline bool test_preempt_need_resched(void)
|
|||||||
|
|
||||||
static inline void __preempt_count_add(int val)
|
static inline void __preempt_count_add(int val)
|
||||||
{
|
{
|
||||||
if (__builtin_constant_p(val) && (val >= -128) && (val <= 127))
|
/*
|
||||||
__atomic_add_const(val, &S390_lowcore.preempt_count);
|
* With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES
|
||||||
else
|
* enabled, gcc 12 fails to handle __builtin_constant_p().
|
||||||
__atomic_add(val, &S390_lowcore.preempt_count);
|
*/
|
||||||
|
if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES)) {
|
||||||
|
if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) {
|
||||||
|
__atomic_add_const(val, &S390_lowcore.preempt_count);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
__atomic_add(val, &S390_lowcore.preempt_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __preempt_count_sub(int val)
|
static inline void __preempt_count_sub(int val)
|
||||||
|
@ -2579,6 +2579,18 @@ static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Give a chance to schedule after setting a key to 256 pages.
|
||||||
|
* We only hold the mm lock, which is a rwsem and the kvm srcu.
|
||||||
|
* Both can sleep.
|
||||||
|
*/
|
||||||
|
static int __s390_enable_skey_pmd(pmd_t *pmd, unsigned long addr,
|
||||||
|
unsigned long next, struct mm_walk *walk)
|
||||||
|
{
|
||||||
|
cond_resched();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
|
static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
|
||||||
unsigned long hmask, unsigned long next,
|
unsigned long hmask, unsigned long next,
|
||||||
struct mm_walk *walk)
|
struct mm_walk *walk)
|
||||||
@ -2601,12 +2613,14 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
|
|||||||
end = start + HPAGE_SIZE - 1;
|
end = start + HPAGE_SIZE - 1;
|
||||||
__storage_key_init_range(start, end);
|
__storage_key_init_range(start, end);
|
||||||
set_bit(PG_arch_1, &page->flags);
|
set_bit(PG_arch_1, &page->flags);
|
||||||
|
cond_resched();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct mm_walk_ops enable_skey_walk_ops = {
|
static const struct mm_walk_ops enable_skey_walk_ops = {
|
||||||
.hugetlb_entry = __s390_enable_skey_hugetlb,
|
.hugetlb_entry = __s390_enable_skey_hugetlb,
|
||||||
.pte_entry = __s390_enable_skey_pte,
|
.pte_entry = __s390_enable_skey_pte,
|
||||||
|
.pmd_entry = __s390_enable_skey_pmd,
|
||||||
};
|
};
|
||||||
|
|
||||||
int s390_enable_skey(void)
|
int s390_enable_skey(void)
|
||||||
|
@ -220,7 +220,7 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
|
|||||||
unsigned long *stack_out)
|
unsigned long *stack_out)
|
||||||
{
|
{
|
||||||
struct winch_data data;
|
struct winch_data data;
|
||||||
int fds[2], n, err;
|
int fds[2], n, err, pid;
|
||||||
char c;
|
char c;
|
||||||
|
|
||||||
err = os_pipe(fds, 1, 1);
|
err = os_pipe(fds, 1, 1);
|
||||||
@ -238,8 +238,9 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
|
|||||||
* problem with /dev/net/tun, which if held open by this
|
* problem with /dev/net/tun, which if held open by this
|
||||||
* thread, prevents the TUN/TAP device from being reused.
|
* thread, prevents the TUN/TAP device from being reused.
|
||||||
*/
|
*/
|
||||||
err = run_helper_thread(winch_thread, &data, CLONE_FILES, stack_out);
|
pid = run_helper_thread(winch_thread, &data, CLONE_FILES, stack_out);
|
||||||
if (err < 0) {
|
if (pid < 0) {
|
||||||
|
err = pid;
|
||||||
printk(UM_KERN_ERR "fork of winch_thread failed - errno = %d\n",
|
printk(UM_KERN_ERR "fork of winch_thread failed - errno = %d\n",
|
||||||
-err);
|
-err);
|
||||||
goto out_close;
|
goto out_close;
|
||||||
@ -263,7 +264,7 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
|
|||||||
goto out_close;
|
goto out_close;
|
||||||
}
|
}
|
||||||
|
|
||||||
return err;
|
return pid;
|
||||||
|
|
||||||
out_close:
|
out_close:
|
||||||
close(fds[1]);
|
close(fds[1]);
|
||||||
|
@ -63,6 +63,7 @@ static inline struct thread_info *current_thread_info(void)
|
|||||||
#define TIF_RESTORE_SIGMASK 7
|
#define TIF_RESTORE_SIGMASK 7
|
||||||
#define TIF_NOTIFY_RESUME 8
|
#define TIF_NOTIFY_RESUME 8
|
||||||
#define TIF_SECCOMP 9 /* secure computing */
|
#define TIF_SECCOMP 9 /* secure computing */
|
||||||
|
#define TIF_SINGLESTEP 10 /* single stepping userspace */
|
||||||
|
|
||||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||||
@ -70,5 +71,6 @@ static inline struct thread_info *current_thread_info(void)
|
|||||||
#define _TIF_MEMDIE (1 << TIF_MEMDIE)
|
#define _TIF_MEMDIE (1 << TIF_MEMDIE)
|
||||||
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
||||||
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
||||||
|
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -44,7 +44,7 @@ void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
|
|||||||
{
|
{
|
||||||
PT_REGS_IP(regs) = eip;
|
PT_REGS_IP(regs) = eip;
|
||||||
PT_REGS_SP(regs) = esp;
|
PT_REGS_SP(regs) = esp;
|
||||||
current->ptrace &= ~PT_DTRACE;
|
clear_thread_flag(TIF_SINGLESTEP);
|
||||||
#ifdef SUBARCH_EXECVE1
|
#ifdef SUBARCH_EXECVE1
|
||||||
SUBARCH_EXECVE1(regs->regs);
|
SUBARCH_EXECVE1(regs->regs);
|
||||||
#endif
|
#endif
|
||||||
|
@ -380,7 +380,7 @@ int singlestepping(void * t)
|
|||||||
{
|
{
|
||||||
struct task_struct *task = t ? t : current;
|
struct task_struct *task = t ? t : current;
|
||||||
|
|
||||||
if (!(task->ptrace & PT_DTRACE))
|
if (!test_thread_flag(TIF_SINGLESTEP))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (task->thread.singlestep_syscall)
|
if (task->thread.singlestep_syscall)
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
|
|
||||||
void user_enable_single_step(struct task_struct *child)
|
void user_enable_single_step(struct task_struct *child)
|
||||||
{
|
{
|
||||||
child->ptrace |= PT_DTRACE;
|
set_tsk_thread_flag(child, TIF_SINGLESTEP);
|
||||||
child->thread.singlestep_syscall = 0;
|
child->thread.singlestep_syscall = 0;
|
||||||
|
|
||||||
#ifdef SUBARCH_SET_SINGLESTEPPING
|
#ifdef SUBARCH_SET_SINGLESTEPPING
|
||||||
@ -22,7 +22,7 @@ void user_enable_single_step(struct task_struct *child)
|
|||||||
|
|
||||||
void user_disable_single_step(struct task_struct *child)
|
void user_disable_single_step(struct task_struct *child)
|
||||||
{
|
{
|
||||||
child->ptrace &= ~PT_DTRACE;
|
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
|
||||||
child->thread.singlestep_syscall = 0;
|
child->thread.singlestep_syscall = 0;
|
||||||
|
|
||||||
#ifdef SUBARCH_SET_SINGLESTEPPING
|
#ifdef SUBARCH_SET_SINGLESTEPPING
|
||||||
@ -121,7 +121,7 @@ static void send_sigtrap(struct uml_pt_regs *regs, int error_code)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* XXX Check PT_DTRACE vs TIF_SINGLESTEP for singlestepping check and
|
* XXX Check TIF_SINGLESTEP for singlestepping check and
|
||||||
* PT_PTRACED vs TIF_SYSCALL_TRACE for syscall tracing check
|
* PT_PTRACED vs TIF_SYSCALL_TRACE for syscall tracing check
|
||||||
*/
|
*/
|
||||||
int syscall_trace_enter(struct pt_regs *regs)
|
int syscall_trace_enter(struct pt_regs *regs)
|
||||||
@ -145,7 +145,7 @@ void syscall_trace_leave(struct pt_regs *regs)
|
|||||||
audit_syscall_exit(regs);
|
audit_syscall_exit(regs);
|
||||||
|
|
||||||
/* Fake a debug trap */
|
/* Fake a debug trap */
|
||||||
if (ptraced & PT_DTRACE)
|
if (test_thread_flag(TIF_SINGLESTEP))
|
||||||
send_sigtrap(®s->regs, 0);
|
send_sigtrap(®s->regs, 0);
|
||||||
|
|
||||||
if (!test_thread_flag(TIF_SYSCALL_TRACE))
|
if (!test_thread_flag(TIF_SYSCALL_TRACE))
|
||||||
|
@ -53,7 +53,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
|
|||||||
unsigned long sp;
|
unsigned long sp;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
|
if (test_thread_flag(TIF_SINGLESTEP) && (current->ptrace & PT_PTRACED))
|
||||||
singlestep = 1;
|
singlestep = 1;
|
||||||
|
|
||||||
/* Did we come from a system call? */
|
/* Did we come from a system call? */
|
||||||
@ -128,7 +128,7 @@ void do_signal(struct pt_regs *regs)
|
|||||||
* on the host. The tracing thread will check this flag and
|
* on the host. The tracing thread will check this flag and
|
||||||
* PTRACE_SYSCALL if necessary.
|
* PTRACE_SYSCALL if necessary.
|
||||||
*/
|
*/
|
||||||
if (current->ptrace & PT_DTRACE)
|
if (test_thread_flag(TIF_SINGLESTEP))
|
||||||
current->thread.singlestep_syscall =
|
current->thread.singlestep_syscall =
|
||||||
is_syscall(PT_REGS_IP(¤t->thread.regs));
|
is_syscall(PT_REGS_IP(¤t->thread.regs));
|
||||||
|
|
||||||
|
@ -323,7 +323,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
|||||||
static __init int vdso_setup(char *s)
|
static __init int vdso_setup(char *s)
|
||||||
{
|
{
|
||||||
vdso64_enabled = simple_strtoul(s, NULL, 0);
|
vdso64_enabled = simple_strtoul(s, NULL, 0);
|
||||||
return 0;
|
return 1;
|
||||||
}
|
}
|
||||||
__setup("vdso=", vdso_setup);
|
__setup("vdso=", vdso_setup);
|
||||||
|
|
||||||
|
@ -312,6 +312,16 @@ static int perf_ibs_init(struct perf_event *event)
|
|||||||
hwc->config_base = perf_ibs->msr;
|
hwc->config_base = perf_ibs->msr;
|
||||||
hwc->config = config;
|
hwc->config = config;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* rip recorded by IbsOpRip will not be consistent with rsp and rbp
|
||||||
|
* recorded as part of interrupt regs. Thus we need to use rip from
|
||||||
|
* interrupt regs while unwinding call stack. Setting _EARLY flag
|
||||||
|
* makes sure we unwind call-stack before perf sample rip is set to
|
||||||
|
* IbsOpRip.
|
||||||
|
*/
|
||||||
|
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
|
||||||
|
event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -683,6 +693,14 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
|
|||||||
data.raw = &raw;
|
data.raw = &raw;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* rip recorded by IbsOpRip will not be consistent with rsp and rbp
|
||||||
|
* recorded as part of interrupt regs. Thus we need to use rip from
|
||||||
|
* interrupt regs while unwinding call stack.
|
||||||
|
*/
|
||||||
|
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
|
||||||
|
data.callchain = perf_callchain(event, iregs);
|
||||||
|
|
||||||
throttle = perf_event_overflow(event, &data, ®s);
|
throttle = perf_event_overflow(event, &data, ®s);
|
||||||
out:
|
out:
|
||||||
if (throttle) {
|
if (throttle) {
|
||||||
|
@ -250,7 +250,7 @@ static struct event_constraint intel_icl_event_constraints[] = {
|
|||||||
INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
|
INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
|
||||||
INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
|
INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
|
||||||
INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */
|
INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */
|
||||||
INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x54, 0xf),
|
INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x56, 0xf),
|
||||||
INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
|
INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
|
||||||
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */
|
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */
|
||||||
INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */
|
INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */
|
||||||
|
@ -13,7 +13,19 @@
|
|||||||
|
|
||||||
/* Asm macros */
|
/* Asm macros */
|
||||||
|
|
||||||
#define ACPI_FLUSH_CPU_CACHE() wbinvd()
|
/*
|
||||||
|
* ACPI_FLUSH_CPU_CACHE() flushes caches on entering sleep states.
|
||||||
|
* It is required to prevent data loss.
|
||||||
|
*
|
||||||
|
* While running inside virtual machine, the kernel can bypass cache flushing.
|
||||||
|
* Changing sleep state in a virtual machine doesn't affect the host system
|
||||||
|
* sleep state and cannot lead to data loss.
|
||||||
|
*/
|
||||||
|
#define ACPI_FLUSH_CPU_CACHE() \
|
||||||
|
do { \
|
||||||
|
if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) \
|
||||||
|
wbinvd(); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
int __acpi_acquire_global_lock(unsigned int *lock);
|
int __acpi_acquire_global_lock(unsigned int *lock);
|
||||||
int __acpi_release_global_lock(unsigned int *lock);
|
int __acpi_release_global_lock(unsigned int *lock);
|
||||||
|
@ -49,7 +49,7 @@ extern const char * const x86_power_flags[32];
|
|||||||
extern const char * const x86_bug_flags[NBUGINTS*32];
|
extern const char * const x86_bug_flags[NBUGINTS*32];
|
||||||
|
|
||||||
#define test_cpu_cap(c, bit) \
|
#define test_cpu_cap(c, bit) \
|
||||||
test_bit(bit, (unsigned long *)((c)->x86_capability))
|
arch_test_bit(bit, (unsigned long *)((c)->x86_capability))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There are 32 bits/features in each mask word. The high bits
|
* There are 32 bits/features in each mask word. The high bits
|
||||||
|
@ -21,7 +21,6 @@ struct saved_context {
|
|||||||
#endif
|
#endif
|
||||||
unsigned long cr0, cr2, cr3, cr4;
|
unsigned long cr0, cr2, cr3, cr4;
|
||||||
u64 misc_enable;
|
u64 misc_enable;
|
||||||
bool misc_enable_saved;
|
|
||||||
struct saved_msrs saved_msrs;
|
struct saved_msrs saved_msrs;
|
||||||
struct desc_ptr gdt_desc;
|
struct desc_ptr gdt_desc;
|
||||||
struct desc_ptr idt;
|
struct desc_ptr idt;
|
||||||
@ -30,6 +29,7 @@ struct saved_context {
|
|||||||
unsigned long tr;
|
unsigned long tr;
|
||||||
unsigned long safety;
|
unsigned long safety;
|
||||||
unsigned long return_address;
|
unsigned long return_address;
|
||||||
|
bool misc_enable_saved;
|
||||||
} __attribute__((packed));
|
} __attribute__((packed));
|
||||||
|
|
||||||
/* routines for saving/restoring kernel state */
|
/* routines for saving/restoring kernel state */
|
||||||
|
@ -14,9 +14,13 @@
|
|||||||
* Image of the saved processor state, used by the low level ACPI suspend to
|
* Image of the saved processor state, used by the low level ACPI suspend to
|
||||||
* RAM code and by the low level hibernation code.
|
* RAM code and by the low level hibernation code.
|
||||||
*
|
*
|
||||||
* If you modify it, fix arch/x86/kernel/acpi/wakeup_64.S and make sure that
|
* If you modify it, check how it is used in arch/x86/kernel/acpi/wakeup_64.S
|
||||||
* __save/__restore_processor_state(), defined in arch/x86/kernel/suspend_64.c,
|
* and make sure that __save/__restore_processor_state(), defined in
|
||||||
* still work as required.
|
* arch/x86/power/cpu.c, still work as required.
|
||||||
|
*
|
||||||
|
* Because the structure is packed, make sure to avoid unaligned members. For
|
||||||
|
* optimisation purposes but also because tools like kmemleak only search for
|
||||||
|
* pointers that are aligned.
|
||||||
*/
|
*/
|
||||||
struct saved_context {
|
struct saved_context {
|
||||||
struct pt_regs regs;
|
struct pt_regs regs;
|
||||||
@ -36,7 +40,6 @@ struct saved_context {
|
|||||||
|
|
||||||
unsigned long cr0, cr2, cr3, cr4;
|
unsigned long cr0, cr2, cr3, cr4;
|
||||||
u64 misc_enable;
|
u64 misc_enable;
|
||||||
bool misc_enable_saved;
|
|
||||||
struct saved_msrs saved_msrs;
|
struct saved_msrs saved_msrs;
|
||||||
unsigned long efer;
|
unsigned long efer;
|
||||||
u16 gdt_pad; /* Unused */
|
u16 gdt_pad; /* Unused */
|
||||||
@ -48,6 +51,7 @@ struct saved_context {
|
|||||||
unsigned long tr;
|
unsigned long tr;
|
||||||
unsigned long safety;
|
unsigned long safety;
|
||||||
unsigned long return_address;
|
unsigned long return_address;
|
||||||
|
bool misc_enable_saved;
|
||||||
} __attribute__((packed));
|
} __attribute__((packed));
|
||||||
|
|
||||||
#define loaddebug(thread,register) \
|
#define loaddebug(thread,register) \
|
||||||
|
@ -168,7 +168,7 @@ static __init int setup_apicpmtimer(char *s)
|
|||||||
{
|
{
|
||||||
apic_calibrate_pmtmr = 1;
|
apic_calibrate_pmtmr = 1;
|
||||||
notsc_setup(NULL);
|
notsc_setup(NULL);
|
||||||
return 0;
|
return 1;
|
||||||
}
|
}
|
||||||
__setup("apicpmtimer", setup_apicpmtimer);
|
__setup("apicpmtimer", setup_apicpmtimer);
|
||||||
#endif
|
#endif
|
||||||
|
@ -97,7 +97,7 @@ static bool ring3mwait_disabled __read_mostly;
|
|||||||
static int __init ring3mwait_disable(char *__unused)
|
static int __init ring3mwait_disable(char *__unused)
|
||||||
{
|
{
|
||||||
ring3mwait_disabled = true;
|
ring3mwait_disabled = true;
|
||||||
return 0;
|
return 1;
|
||||||
}
|
}
|
||||||
__setup("ring3mwait=disable", ring3mwait_disable);
|
__setup("ring3mwait=disable", ring3mwait_disable);
|
||||||
|
|
||||||
|
@ -175,8 +175,7 @@ void set_task_blockstep(struct task_struct *task, bool on)
|
|||||||
*
|
*
|
||||||
* NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if
|
* NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if
|
||||||
* task is current or it can't be running, otherwise we can race
|
* task is current or it can't be running, otherwise we can race
|
||||||
* with __switch_to_xtra(). We rely on ptrace_freeze_traced() but
|
* with __switch_to_xtra(). We rely on ptrace_freeze_traced().
|
||||||
* PTRACE_KILL is not safe.
|
|
||||||
*/
|
*/
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
debugctl = get_debugctlmsr();
|
debugctl = get_debugctlmsr();
|
||||||
|
@ -70,9 +70,6 @@ static int __init control_va_addr_alignment(char *str)
|
|||||||
if (*str == 0)
|
if (*str == 0)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (*str == '=')
|
|
||||||
str++;
|
|
||||||
|
|
||||||
if (!strcmp(str, "32"))
|
if (!strcmp(str, "32"))
|
||||||
va_align.flags = ALIGN_VA_32;
|
va_align.flags = ALIGN_VA_32;
|
||||||
else if (!strcmp(str, "64"))
|
else if (!strcmp(str, "64"))
|
||||||
@ -82,11 +79,11 @@ static int __init control_va_addr_alignment(char *str)
|
|||||||
else if (!strcmp(str, "on"))
|
else if (!strcmp(str, "on"))
|
||||||
va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
|
va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
|
||||||
else
|
else
|
||||||
return 0;
|
pr_warn("invalid option value: 'align_va_addr=%s'\n", str);
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
__setup("align_va_addr", control_va_addr_alignment);
|
__setup("align_va_addr=", control_va_addr_alignment);
|
||||||
|
|
||||||
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
|
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
|
||||||
unsigned long, prot, unsigned long, flags,
|
unsigned long, prot, unsigned long, flags,
|
||||||
|
@ -3746,12 +3746,12 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
|
|||||||
/* update exit information fields: */
|
/* update exit information fields: */
|
||||||
vmcs12->vm_exit_reason = exit_reason;
|
vmcs12->vm_exit_reason = exit_reason;
|
||||||
vmcs12->exit_qualification = exit_qualification;
|
vmcs12->exit_qualification = exit_qualification;
|
||||||
vmcs12->vm_exit_intr_info = exit_intr_info;
|
|
||||||
|
|
||||||
vmcs12->idt_vectoring_info_field = 0;
|
|
||||||
vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
|
|
||||||
vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On VM-Exit due to a failed VM-Entry, the VMCS isn't marked launched
|
||||||
|
* and only EXIT_REASON and EXIT_QUALIFICATION are updated, all other
|
||||||
|
* exit info fields are unmodified.
|
||||||
|
*/
|
||||||
if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
|
if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
|
||||||
vmcs12->launch_state = 1;
|
vmcs12->launch_state = 1;
|
||||||
|
|
||||||
@ -3763,8 +3763,13 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
|
|||||||
* Transfer the event that L0 or L1 may wanted to inject into
|
* Transfer the event that L0 or L1 may wanted to inject into
|
||||||
* L2 to IDT_VECTORING_INFO_FIELD.
|
* L2 to IDT_VECTORING_INFO_FIELD.
|
||||||
*/
|
*/
|
||||||
|
vmcs12->idt_vectoring_info_field = 0;
|
||||||
vmcs12_save_pending_event(vcpu, vmcs12);
|
vmcs12_save_pending_event(vcpu, vmcs12);
|
||||||
|
|
||||||
|
vmcs12->vm_exit_intr_info = exit_intr_info;
|
||||||
|
vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
|
||||||
|
vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* According to spec, there's no need to store the guest's
|
* According to spec, there's no need to store the guest's
|
||||||
* MSRs if the exit is due to a VM-entry failure that occurs
|
* MSRs if the exit is due to a VM-entry failure that occurs
|
||||||
|
@ -43,8 +43,8 @@ static void delay_loop(unsigned long loops)
|
|||||||
" jnz 2b \n"
|
" jnz 2b \n"
|
||||||
"3: dec %0 \n"
|
"3: dec %0 \n"
|
||||||
|
|
||||||
: /* we don't need output */
|
: "+a" (loops)
|
||||||
:"a" (loops)
|
:
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ int pat_debug_enable;
|
|||||||
static int __init pat_debug_setup(char *str)
|
static int __init pat_debug_setup(char *str)
|
||||||
{
|
{
|
||||||
pat_debug_enable = 1;
|
pat_debug_enable = 1;
|
||||||
return 0;
|
return 1;
|
||||||
}
|
}
|
||||||
__setup("debugpat", pat_debug_setup);
|
__setup("debugpat", pat_debug_setup);
|
||||||
|
|
||||||
|
@ -23,9 +23,11 @@ static long write_ldt_entry(struct mm_id *mm_idp, int func,
|
|||||||
{
|
{
|
||||||
long res;
|
long res;
|
||||||
void *stub_addr;
|
void *stub_addr;
|
||||||
|
|
||||||
|
BUILD_BUG_ON(sizeof(*desc) % sizeof(long));
|
||||||
|
|
||||||
res = syscall_stub_data(mm_idp, (unsigned long *)desc,
|
res = syscall_stub_data(mm_idp, (unsigned long *)desc,
|
||||||
(sizeof(*desc) + sizeof(long) - 1) &
|
sizeof(*desc) / sizeof(long),
|
||||||
~(sizeof(long) - 1),
|
|
||||||
addr, &stub_addr);
|
addr, &stub_addr);
|
||||||
if (!res) {
|
if (!res) {
|
||||||
unsigned long args[] = { func,
|
unsigned long args[] = { func,
|
||||||
|
@ -225,12 +225,12 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
|
|||||||
|
|
||||||
void user_enable_single_step(struct task_struct *child)
|
void user_enable_single_step(struct task_struct *child)
|
||||||
{
|
{
|
||||||
child->ptrace |= PT_SINGLESTEP;
|
set_tsk_thread_flag(child, TIF_SINGLESTEP);
|
||||||
}
|
}
|
||||||
|
|
||||||
void user_disable_single_step(struct task_struct *child)
|
void user_disable_single_step(struct task_struct *child)
|
||||||
{
|
{
|
||||||
child->ptrace &= ~PT_SINGLESTEP;
|
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -465,7 +465,7 @@ static void do_signal(struct pt_regs *regs)
|
|||||||
/* Set up the stack frame */
|
/* Set up the stack frame */
|
||||||
ret = setup_frame(&ksig, sigmask_to_save(), regs);
|
ret = setup_frame(&ksig, sigmask_to_save(), regs);
|
||||||
signal_setup_done(ret, &ksig, 0);
|
signal_setup_done(ret, &ksig, 0);
|
||||||
if (current->ptrace & PT_SINGLESTEP)
|
if (test_thread_flag(TIF_SINGLESTEP))
|
||||||
task_pt_regs(current)->icountlevel = 1;
|
task_pt_regs(current)->icountlevel = 1;
|
||||||
|
|
||||||
return;
|
return;
|
||||||
@ -491,7 +491,7 @@ static void do_signal(struct pt_regs *regs)
|
|||||||
/* If there's no signal to deliver, we just restore the saved mask. */
|
/* If there's no signal to deliver, we just restore the saved mask. */
|
||||||
restore_saved_sigmask();
|
restore_saved_sigmask();
|
||||||
|
|
||||||
if (current->ptrace & PT_SINGLESTEP)
|
if (test_thread_flag(TIF_SINGLESTEP))
|
||||||
task_pt_regs(current)->icountlevel = 1;
|
task_pt_regs(current)->icountlevel = 1;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -536,6 +536,7 @@ static void bfq_pd_init(struct blkg_policy_data *pd)
|
|||||||
*/
|
*/
|
||||||
bfqg->bfqd = bfqd;
|
bfqg->bfqd = bfqd;
|
||||||
bfqg->active_entities = 0;
|
bfqg->active_entities = 0;
|
||||||
|
bfqg->online = true;
|
||||||
bfqg->rq_pos_tree = RB_ROOT;
|
bfqg->rq_pos_tree = RB_ROOT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -564,28 +565,11 @@ static void bfq_group_set_parent(struct bfq_group *bfqg,
|
|||||||
entity->sched_data = &parent->sched_data;
|
entity->sched_data = &parent->sched_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd,
|
static void bfq_link_bfqg(struct bfq_data *bfqd, struct bfq_group *bfqg)
|
||||||
struct blkcg *blkcg)
|
|
||||||
{
|
{
|
||||||
struct blkcg_gq *blkg;
|
struct bfq_group *parent;
|
||||||
|
|
||||||
blkg = blkg_lookup(blkcg, bfqd->queue);
|
|
||||||
if (likely(blkg))
|
|
||||||
return blkg_to_bfqg(blkg);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
|
|
||||||
struct blkcg *blkcg)
|
|
||||||
{
|
|
||||||
struct bfq_group *bfqg, *parent;
|
|
||||||
struct bfq_entity *entity;
|
struct bfq_entity *entity;
|
||||||
|
|
||||||
bfqg = bfq_lookup_bfqg(bfqd, blkcg);
|
|
||||||
|
|
||||||
if (unlikely(!bfqg))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Update chain of bfq_groups as we might be handling a leaf group
|
* Update chain of bfq_groups as we might be handling a leaf group
|
||||||
* which, along with some of its relatives, has not been hooked yet
|
* which, along with some of its relatives, has not been hooked yet
|
||||||
@ -602,8 +586,24 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
|
|||||||
bfq_group_set_parent(curr_bfqg, parent);
|
bfq_group_set_parent(curr_bfqg, parent);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return bfqg;
|
struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
|
||||||
|
{
|
||||||
|
struct blkcg_gq *blkg = bio->bi_blkg;
|
||||||
|
struct bfq_group *bfqg;
|
||||||
|
|
||||||
|
while (blkg) {
|
||||||
|
bfqg = blkg_to_bfqg(blkg);
|
||||||
|
if (bfqg->online) {
|
||||||
|
bio_associate_blkg_from_css(bio, &blkg->blkcg->css);
|
||||||
|
return bfqg;
|
||||||
|
}
|
||||||
|
blkg = blkg->parent;
|
||||||
|
}
|
||||||
|
bio_associate_blkg_from_css(bio,
|
||||||
|
&bfqg_to_blkg(bfqd->root_group)->blkcg->css);
|
||||||
|
return bfqd->root_group;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -679,25 +679,15 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
|||||||
* Move bic to blkcg, assuming that bfqd->lock is held; which makes
|
* Move bic to blkcg, assuming that bfqd->lock is held; which makes
|
||||||
* sure that the reference to cgroup is valid across the call (see
|
* sure that the reference to cgroup is valid across the call (see
|
||||||
* comments in bfq_bic_update_cgroup on this issue)
|
* comments in bfq_bic_update_cgroup on this issue)
|
||||||
*
|
|
||||||
* NOTE: an alternative approach might have been to store the current
|
|
||||||
* cgroup in bfqq and getting a reference to it, reducing the lookup
|
|
||||||
* time here, at the price of slightly more complex code.
|
|
||||||
*/
|
*/
|
||||||
static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
|
static void *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
|
||||||
struct bfq_io_cq *bic,
|
struct bfq_io_cq *bic,
|
||||||
struct blkcg *blkcg)
|
struct bfq_group *bfqg)
|
||||||
{
|
{
|
||||||
struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
|
struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
|
||||||
struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
|
struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
|
||||||
struct bfq_group *bfqg;
|
|
||||||
struct bfq_entity *entity;
|
struct bfq_entity *entity;
|
||||||
|
|
||||||
bfqg = bfq_find_set_group(bfqd, blkcg);
|
|
||||||
|
|
||||||
if (unlikely(!bfqg))
|
|
||||||
bfqg = bfqd->root_group;
|
|
||||||
|
|
||||||
if (async_bfqq) {
|
if (async_bfqq) {
|
||||||
entity = &async_bfqq->entity;
|
entity = &async_bfqq->entity;
|
||||||
|
|
||||||
@ -708,9 +698,39 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (sync_bfqq) {
|
if (sync_bfqq) {
|
||||||
entity = &sync_bfqq->entity;
|
if (!sync_bfqq->new_bfqq && !bfq_bfqq_coop(sync_bfqq)) {
|
||||||
if (entity->sched_data != &bfqg->sched_data)
|
/* We are the only user of this bfqq, just move it */
|
||||||
bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
|
if (sync_bfqq->entity.sched_data != &bfqg->sched_data)
|
||||||
|
bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
|
||||||
|
} else {
|
||||||
|
struct bfq_queue *bfqq;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The queue was merged to a different queue. Check
|
||||||
|
* that the merge chain still belongs to the same
|
||||||
|
* cgroup.
|
||||||
|
*/
|
||||||
|
for (bfqq = sync_bfqq; bfqq; bfqq = bfqq->new_bfqq)
|
||||||
|
if (bfqq->entity.sched_data !=
|
||||||
|
&bfqg->sched_data)
|
||||||
|
break;
|
||||||
|
if (bfqq) {
|
||||||
|
/*
|
||||||
|
* Some queue changed cgroup so the merge is
|
||||||
|
* not valid anymore. We cannot easily just
|
||||||
|
* cancel the merge (by clearing new_bfqq) as
|
||||||
|
* there may be other processes using this
|
||||||
|
* queue and holding refs to all queues below
|
||||||
|
* sync_bfqq->new_bfqq. Similarly if the merge
|
||||||
|
* already happened, we need to detach from
|
||||||
|
* bfqq now so that we cannot merge bio to a
|
||||||
|
* request from the old cgroup.
|
||||||
|
*/
|
||||||
|
bfq_put_cooperator(sync_bfqq);
|
||||||
|
bfq_release_process_ref(bfqd, sync_bfqq);
|
||||||
|
bic_set_bfqq(bic, NULL, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return bfqg;
|
return bfqg;
|
||||||
@ -719,20 +739,24 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
|
|||||||
void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
|
void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
|
||||||
{
|
{
|
||||||
struct bfq_data *bfqd = bic_to_bfqd(bic);
|
struct bfq_data *bfqd = bic_to_bfqd(bic);
|
||||||
struct bfq_group *bfqg = NULL;
|
struct bfq_group *bfqg = bfq_bio_bfqg(bfqd, bio);
|
||||||
uint64_t serial_nr;
|
uint64_t serial_nr;
|
||||||
|
|
||||||
rcu_read_lock();
|
serial_nr = bfqg_to_blkg(bfqg)->blkcg->css.serial_nr;
|
||||||
serial_nr = __bio_blkcg(bio)->css.serial_nr;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check whether blkcg has changed. The condition may trigger
|
* Check whether blkcg has changed. The condition may trigger
|
||||||
* spuriously on a newly created cic but there's no harm.
|
* spuriously on a newly created cic but there's no harm.
|
||||||
*/
|
*/
|
||||||
if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
|
if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
|
||||||
goto out;
|
return;
|
||||||
|
|
||||||
bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio));
|
/*
|
||||||
|
* New cgroup for this process. Make sure it is linked to bfq internal
|
||||||
|
* cgroup hierarchy.
|
||||||
|
*/
|
||||||
|
bfq_link_bfqg(bfqd, bfqg);
|
||||||
|
__bfq_bic_change_cgroup(bfqd, bic, bfqg);
|
||||||
/*
|
/*
|
||||||
* Update blkg_path for bfq_log_* functions. We cache this
|
* Update blkg_path for bfq_log_* functions. We cache this
|
||||||
* path, and update it here, for the following
|
* path, and update it here, for the following
|
||||||
@ -785,8 +809,6 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
|
|||||||
*/
|
*/
|
||||||
blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
|
blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
|
||||||
bic->blkcg_serial_nr = serial_nr;
|
bic->blkcg_serial_nr = serial_nr;
|
||||||
out:
|
|
||||||
rcu_read_unlock();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -914,6 +936,7 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
|
|||||||
|
|
||||||
put_async_queues:
|
put_async_queues:
|
||||||
bfq_put_async_queues(bfqd, bfqg);
|
bfq_put_async_queues(bfqd, bfqg);
|
||||||
|
bfqg->online = false;
|
||||||
|
|
||||||
spin_unlock_irqrestore(&bfqd->lock, flags);
|
spin_unlock_irqrestore(&bfqd->lock, flags);
|
||||||
/*
|
/*
|
||||||
@ -1402,7 +1425,7 @@ void bfq_end_wr_async(struct bfq_data *bfqd)
|
|||||||
bfq_end_wr_async_queues(bfqd, bfqd->root_group);
|
bfq_end_wr_async_queues(bfqd, bfqd->root_group);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg *blkcg)
|
struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
|
||||||
{
|
{
|
||||||
return bfqd->root_group;
|
return bfqd->root_group;
|
||||||
}
|
}
|
||||||
|
@ -2227,10 +2227,17 @@ static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
|
|||||||
|
|
||||||
spin_lock_irq(&bfqd->lock);
|
spin_lock_irq(&bfqd->lock);
|
||||||
|
|
||||||
if (bic)
|
if (bic) {
|
||||||
|
/*
|
||||||
|
* Make sure cgroup info is uptodate for current process before
|
||||||
|
* considering the merge.
|
||||||
|
*/
|
||||||
|
bfq_bic_update_cgroup(bic, bio);
|
||||||
|
|
||||||
bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
|
bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
|
||||||
else
|
} else {
|
||||||
bfqd->bio_bfqq = NULL;
|
bfqd->bio_bfqq = NULL;
|
||||||
|
}
|
||||||
bfqd->bio_bic = bic;
|
bfqd->bio_bic = bic;
|
||||||
|
|
||||||
ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
|
ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
|
||||||
@ -2260,8 +2267,6 @@ static int bfq_request_merge(struct request_queue *q, struct request **req,
|
|||||||
return ELEVATOR_NO_MERGE;
|
return ELEVATOR_NO_MERGE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct bfq_queue *bfq_init_rq(struct request *rq);
|
|
||||||
|
|
||||||
static void bfq_request_merged(struct request_queue *q, struct request *req,
|
static void bfq_request_merged(struct request_queue *q, struct request *req,
|
||||||
enum elv_merge type)
|
enum elv_merge type)
|
||||||
{
|
{
|
||||||
@ -2270,7 +2275,7 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
|
|||||||
blk_rq_pos(req) <
|
blk_rq_pos(req) <
|
||||||
blk_rq_pos(container_of(rb_prev(&req->rb_node),
|
blk_rq_pos(container_of(rb_prev(&req->rb_node),
|
||||||
struct request, rb_node))) {
|
struct request, rb_node))) {
|
||||||
struct bfq_queue *bfqq = bfq_init_rq(req);
|
struct bfq_queue *bfqq = RQ_BFQQ(req);
|
||||||
struct bfq_data *bfqd;
|
struct bfq_data *bfqd;
|
||||||
struct request *prev, *next_rq;
|
struct request *prev, *next_rq;
|
||||||
|
|
||||||
@ -2322,8 +2327,8 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
|
|||||||
static void bfq_requests_merged(struct request_queue *q, struct request *rq,
|
static void bfq_requests_merged(struct request_queue *q, struct request *rq,
|
||||||
struct request *next)
|
struct request *next)
|
||||||
{
|
{
|
||||||
struct bfq_queue *bfqq = bfq_init_rq(rq),
|
struct bfq_queue *bfqq = RQ_BFQQ(rq),
|
||||||
*next_bfqq = bfq_init_rq(next);
|
*next_bfqq = RQ_BFQQ(next);
|
||||||
|
|
||||||
if (!bfqq)
|
if (!bfqq)
|
||||||
return;
|
return;
|
||||||
@ -2502,6 +2507,14 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
|
|||||||
if (process_refs == 0 || new_process_refs == 0)
|
if (process_refs == 0 || new_process_refs == 0)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make sure merged queues belong to the same parent. Parents could
|
||||||
|
* have changed since the time we decided the two queues are suitable
|
||||||
|
* for merging.
|
||||||
|
*/
|
||||||
|
if (new_bfqq->entity.parent != bfqq->entity.parent)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
|
bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
|
||||||
new_bfqq->pid);
|
new_bfqq->pid);
|
||||||
|
|
||||||
@ -4914,7 +4927,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
|
|||||||
bfqg_and_blkg_put(bfqg);
|
bfqg_and_blkg_put(bfqg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bfq_put_cooperator(struct bfq_queue *bfqq)
|
void bfq_put_cooperator(struct bfq_queue *bfqq)
|
||||||
{
|
{
|
||||||
struct bfq_queue *__bfqq, *next;
|
struct bfq_queue *__bfqq, *next;
|
||||||
|
|
||||||
@ -5145,14 +5158,7 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
|
|||||||
struct bfq_queue *bfqq;
|
struct bfq_queue *bfqq;
|
||||||
struct bfq_group *bfqg;
|
struct bfq_group *bfqg;
|
||||||
|
|
||||||
rcu_read_lock();
|
bfqg = bfq_bio_bfqg(bfqd, bio);
|
||||||
|
|
||||||
bfqg = bfq_find_set_group(bfqd, __bio_blkcg(bio));
|
|
||||||
if (!bfqg) {
|
|
||||||
bfqq = &bfqd->oom_bfqq;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!is_sync) {
|
if (!is_sync) {
|
||||||
async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
|
async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
|
||||||
ioprio);
|
ioprio);
|
||||||
@ -5196,7 +5202,6 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
|
|||||||
out:
|
out:
|
||||||
bfqq->ref++; /* get a process reference to this queue */
|
bfqq->ref++; /* get a process reference to this queue */
|
||||||
bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref);
|
bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref);
|
||||||
rcu_read_unlock();
|
|
||||||
return bfqq;
|
return bfqq;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5499,6 +5504,8 @@ static inline void bfq_update_insert_stats(struct request_queue *q,
|
|||||||
unsigned int cmd_flags) {}
|
unsigned int cmd_flags) {}
|
||||||
#endif /* CONFIG_BFQ_CGROUP_DEBUG */
|
#endif /* CONFIG_BFQ_CGROUP_DEBUG */
|
||||||
|
|
||||||
|
static struct bfq_queue *bfq_init_rq(struct request *rq);
|
||||||
|
|
||||||
static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
||||||
bool at_head)
|
bool at_head)
|
||||||
{
|
{
|
||||||
@ -5509,17 +5516,14 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
|||||||
unsigned int cmd_flags;
|
unsigned int cmd_flags;
|
||||||
|
|
||||||
spin_lock_irq(&bfqd->lock);
|
spin_lock_irq(&bfqd->lock);
|
||||||
|
bfqq = bfq_init_rq(rq);
|
||||||
if (blk_mq_sched_try_insert_merge(q, rq)) {
|
if (blk_mq_sched_try_insert_merge(q, rq)) {
|
||||||
spin_unlock_irq(&bfqd->lock);
|
spin_unlock_irq(&bfqd->lock);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irq(&bfqd->lock);
|
|
||||||
|
|
||||||
blk_mq_sched_request_inserted(rq);
|
blk_mq_sched_request_inserted(rq);
|
||||||
|
|
||||||
spin_lock_irq(&bfqd->lock);
|
|
||||||
bfqq = bfq_init_rq(rq);
|
|
||||||
if (!bfqq || at_head || blk_rq_is_passthrough(rq)) {
|
if (!bfqq || at_head || blk_rq_is_passthrough(rq)) {
|
||||||
if (at_head)
|
if (at_head)
|
||||||
list_add(&rq->queuelist, &bfqd->dispatch);
|
list_add(&rq->queuelist, &bfqd->dispatch);
|
||||||
|
@ -896,6 +896,8 @@ struct bfq_group {
|
|||||||
|
|
||||||
/* reference counter (see comments in bfq_bic_update_cgroup) */
|
/* reference counter (see comments in bfq_bic_update_cgroup) */
|
||||||
int ref;
|
int ref;
|
||||||
|
/* Is bfq_group still online? */
|
||||||
|
bool online;
|
||||||
|
|
||||||
struct bfq_entity entity;
|
struct bfq_entity entity;
|
||||||
struct bfq_sched_data sched_data;
|
struct bfq_sched_data sched_data;
|
||||||
@ -949,6 +951,7 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
|
|||||||
void bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
void bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
||||||
bool compensate, enum bfqq_expiration reason);
|
bool compensate, enum bfqq_expiration reason);
|
||||||
void bfq_put_queue(struct bfq_queue *bfqq);
|
void bfq_put_queue(struct bfq_queue *bfqq);
|
||||||
|
void bfq_put_cooperator(struct bfq_queue *bfqq);
|
||||||
void bfq_end_wr_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
|
void bfq_end_wr_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
|
||||||
void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq);
|
void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq);
|
||||||
void bfq_schedule_dispatch(struct bfq_data *bfqd);
|
void bfq_schedule_dispatch(struct bfq_data *bfqd);
|
||||||
@ -975,8 +978,7 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
|||||||
void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg);
|
void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg);
|
||||||
void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio);
|
void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio);
|
||||||
void bfq_end_wr_async(struct bfq_data *bfqd);
|
void bfq_end_wr_async(struct bfq_data *bfqd);
|
||||||
struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
|
struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio);
|
||||||
struct blkcg *blkcg);
|
|
||||||
struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
|
struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
|
||||||
struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
|
struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
|
||||||
struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
|
struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
|
||||||
|
@ -2184,7 +2184,7 @@ void bio_clone_blkg_association(struct bio *dst, struct bio *src)
|
|||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
|
||||||
if (src->bi_blkg)
|
if (src->bi_blkg)
|
||||||
__bio_associate_blkg(dst, src->bi_blkg);
|
bio_associate_blkg_from_css(dst, &bio_blkcg(src)->css);
|
||||||
|
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
@ -86,7 +86,17 @@ struct iolatency_grp;
|
|||||||
struct blk_iolatency {
|
struct blk_iolatency {
|
||||||
struct rq_qos rqos;
|
struct rq_qos rqos;
|
||||||
struct timer_list timer;
|
struct timer_list timer;
|
||||||
atomic_t enabled;
|
|
||||||
|
/*
|
||||||
|
* ->enabled is the master enable switch gating the throttling logic and
|
||||||
|
* inflight tracking. The number of cgroups which have iolat enabled is
|
||||||
|
* tracked in ->enable_cnt, and ->enable is flipped on/off accordingly
|
||||||
|
* from ->enable_work with the request_queue frozen. For details, See
|
||||||
|
* blkiolatency_enable_work_fn().
|
||||||
|
*/
|
||||||
|
bool enabled;
|
||||||
|
atomic_t enable_cnt;
|
||||||
|
struct work_struct enable_work;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
|
static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
|
||||||
@ -94,11 +104,6 @@ static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
|
|||||||
return container_of(rqos, struct blk_iolatency, rqos);
|
return container_of(rqos, struct blk_iolatency, rqos);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat)
|
|
||||||
{
|
|
||||||
return atomic_read(&blkiolat->enabled) > 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct child_latency_info {
|
struct child_latency_info {
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
|
|
||||||
@ -463,7 +468,7 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
|
|||||||
struct blkcg_gq *blkg = bio->bi_blkg;
|
struct blkcg_gq *blkg = bio->bi_blkg;
|
||||||
bool issue_as_root = bio_issue_as_root_blkg(bio);
|
bool issue_as_root = bio_issue_as_root_blkg(bio);
|
||||||
|
|
||||||
if (!blk_iolatency_enabled(blkiolat))
|
if (!blkiolat->enabled)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
while (blkg && blkg->parent) {
|
while (blkg && blkg->parent) {
|
||||||
@ -593,7 +598,6 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
|
|||||||
u64 window_start;
|
u64 window_start;
|
||||||
u64 now = ktime_to_ns(ktime_get());
|
u64 now = ktime_to_ns(ktime_get());
|
||||||
bool issue_as_root = bio_issue_as_root_blkg(bio);
|
bool issue_as_root = bio_issue_as_root_blkg(bio);
|
||||||
bool enabled = false;
|
|
||||||
int inflight = 0;
|
int inflight = 0;
|
||||||
|
|
||||||
blkg = bio->bi_blkg;
|
blkg = bio->bi_blkg;
|
||||||
@ -604,8 +608,7 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
|
|||||||
if (!iolat)
|
if (!iolat)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
enabled = blk_iolatency_enabled(iolat->blkiolat);
|
if (!iolat->blkiolat->enabled)
|
||||||
if (!enabled)
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
while (blkg && blkg->parent) {
|
while (blkg && blkg->parent) {
|
||||||
@ -643,6 +646,7 @@ static void blkcg_iolatency_exit(struct rq_qos *rqos)
|
|||||||
struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
|
struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
|
||||||
|
|
||||||
del_timer_sync(&blkiolat->timer);
|
del_timer_sync(&blkiolat->timer);
|
||||||
|
flush_work(&blkiolat->enable_work);
|
||||||
blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
|
blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
|
||||||
kfree(blkiolat);
|
kfree(blkiolat);
|
||||||
}
|
}
|
||||||
@ -714,6 +718,44 @@ static void blkiolatency_timer_fn(struct timer_list *t)
|
|||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* blkiolatency_enable_work_fn - Enable or disable iolatency on the device
|
||||||
|
* @work: enable_work of the blk_iolatency of interest
|
||||||
|
*
|
||||||
|
* iolatency needs to keep track of the number of in-flight IOs per cgroup. This
|
||||||
|
* is relatively expensive as it involves walking up the hierarchy twice for
|
||||||
|
* every IO. Thus, if iolatency is not enabled in any cgroup for the device, we
|
||||||
|
* want to disable the in-flight tracking.
|
||||||
|
*
|
||||||
|
* We have to make sure that the counting is balanced - we don't want to leak
|
||||||
|
* the in-flight counts by disabling accounting in the completion path while IOs
|
||||||
|
* are in flight. This is achieved by ensuring that no IO is in flight by
|
||||||
|
* freezing the queue while flipping ->enabled. As this requires a sleepable
|
||||||
|
* context, ->enabled flipping is punted to this work function.
|
||||||
|
*/
|
||||||
|
static void blkiolatency_enable_work_fn(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct blk_iolatency *blkiolat = container_of(work, struct blk_iolatency,
|
||||||
|
enable_work);
|
||||||
|
bool enabled;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* There can only be one instance of this function running for @blkiolat
|
||||||
|
* and it's guaranteed to be executed at least once after the latest
|
||||||
|
* ->enabled_cnt modification. Acting on the latest ->enable_cnt is
|
||||||
|
* sufficient.
|
||||||
|
*
|
||||||
|
* Also, we know @blkiolat is safe to access as ->enable_work is flushed
|
||||||
|
* in blkcg_iolatency_exit().
|
||||||
|
*/
|
||||||
|
enabled = atomic_read(&blkiolat->enable_cnt);
|
||||||
|
if (enabled != blkiolat->enabled) {
|
||||||
|
blk_mq_freeze_queue(blkiolat->rqos.q);
|
||||||
|
blkiolat->enabled = enabled;
|
||||||
|
blk_mq_unfreeze_queue(blkiolat->rqos.q);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int blk_iolatency_init(struct request_queue *q)
|
int blk_iolatency_init(struct request_queue *q)
|
||||||
{
|
{
|
||||||
struct blk_iolatency *blkiolat;
|
struct blk_iolatency *blkiolat;
|
||||||
@ -739,17 +781,15 @@ int blk_iolatency_init(struct request_queue *q)
|
|||||||
}
|
}
|
||||||
|
|
||||||
timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
|
timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
|
||||||
|
INIT_WORK(&blkiolat->enable_work, blkiolatency_enable_work_fn);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
|
||||||
* return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
|
|
||||||
* return 0.
|
|
||||||
*/
|
|
||||||
static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
|
|
||||||
{
|
{
|
||||||
struct iolatency_grp *iolat = blkg_to_lat(blkg);
|
struct iolatency_grp *iolat = blkg_to_lat(blkg);
|
||||||
|
struct blk_iolatency *blkiolat = iolat->blkiolat;
|
||||||
u64 oldval = iolat->min_lat_nsec;
|
u64 oldval = iolat->min_lat_nsec;
|
||||||
|
|
||||||
iolat->min_lat_nsec = val;
|
iolat->min_lat_nsec = val;
|
||||||
@ -757,13 +797,15 @@ static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
|
|||||||
iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
|
iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
|
||||||
BLKIOLATENCY_MAX_WIN_SIZE);
|
BLKIOLATENCY_MAX_WIN_SIZE);
|
||||||
|
|
||||||
if (!oldval && val)
|
if (!oldval && val) {
|
||||||
return 1;
|
if (atomic_inc_return(&blkiolat->enable_cnt) == 1)
|
||||||
|
schedule_work(&blkiolat->enable_work);
|
||||||
|
}
|
||||||
if (oldval && !val) {
|
if (oldval && !val) {
|
||||||
blkcg_clear_delay(blkg);
|
blkcg_clear_delay(blkg);
|
||||||
return -1;
|
if (atomic_dec_return(&blkiolat->enable_cnt) == 0)
|
||||||
|
schedule_work(&blkiolat->enable_work);
|
||||||
}
|
}
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iolatency_clear_scaling(struct blkcg_gq *blkg)
|
static void iolatency_clear_scaling(struct blkcg_gq *blkg)
|
||||||
@ -795,7 +837,6 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
|
|||||||
u64 lat_val = 0;
|
u64 lat_val = 0;
|
||||||
u64 oldval;
|
u64 oldval;
|
||||||
int ret;
|
int ret;
|
||||||
int enable = 0;
|
|
||||||
|
|
||||||
ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
|
ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -830,41 +871,12 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
|
|||||||
blkg = ctx.blkg;
|
blkg = ctx.blkg;
|
||||||
oldval = iolat->min_lat_nsec;
|
oldval = iolat->min_lat_nsec;
|
||||||
|
|
||||||
enable = iolatency_set_min_lat_nsec(blkg, lat_val);
|
iolatency_set_min_lat_nsec(blkg, lat_val);
|
||||||
if (enable) {
|
if (oldval != iolat->min_lat_nsec)
|
||||||
if (!blk_get_queue(blkg->q)) {
|
|
||||||
ret = -ENODEV;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
blkg_get(blkg);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (oldval != iolat->min_lat_nsec) {
|
|
||||||
iolatency_clear_scaling(blkg);
|
iolatency_clear_scaling(blkg);
|
||||||
}
|
|
||||||
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
out:
|
out:
|
||||||
blkg_conf_finish(&ctx);
|
blkg_conf_finish(&ctx);
|
||||||
if (ret == 0 && enable) {
|
|
||||||
struct iolatency_grp *tmp = blkg_to_lat(blkg);
|
|
||||||
struct blk_iolatency *blkiolat = tmp->blkiolat;
|
|
||||||
|
|
||||||
blk_mq_freeze_queue(blkg->q);
|
|
||||||
|
|
||||||
if (enable == 1)
|
|
||||||
atomic_inc(&blkiolat->enabled);
|
|
||||||
else if (enable == -1)
|
|
||||||
atomic_dec(&blkiolat->enabled);
|
|
||||||
else
|
|
||||||
WARN_ON_ONCE(1);
|
|
||||||
|
|
||||||
blk_mq_unfreeze_queue(blkg->q);
|
|
||||||
|
|
||||||
blkg_put(blkg);
|
|
||||||
blk_put_queue(blkg->q);
|
|
||||||
}
|
|
||||||
return ret ?: nbytes;
|
return ret ?: nbytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1005,14 +1017,8 @@ static void iolatency_pd_offline(struct blkg_policy_data *pd)
|
|||||||
{
|
{
|
||||||
struct iolatency_grp *iolat = pd_to_lat(pd);
|
struct iolatency_grp *iolat = pd_to_lat(pd);
|
||||||
struct blkcg_gq *blkg = lat_to_blkg(iolat);
|
struct blkcg_gq *blkg = lat_to_blkg(iolat);
|
||||||
struct blk_iolatency *blkiolat = iolat->blkiolat;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = iolatency_set_min_lat_nsec(blkg, 0);
|
iolatency_set_min_lat_nsec(blkg, 0);
|
||||||
if (ret == 1)
|
|
||||||
atomic_inc(&blkiolat->enabled);
|
|
||||||
if (ret == -1)
|
|
||||||
atomic_dec(&blkiolat->enabled);
|
|
||||||
iolatency_clear_scaling(blkg);
|
iolatency_clear_scaling(blkg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,6 +39,10 @@ struct cryptd_cpu_queue {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct cryptd_queue {
|
struct cryptd_queue {
|
||||||
|
/*
|
||||||
|
* Protected by disabling BH to allow enqueueing from softinterrupt and
|
||||||
|
* dequeuing from kworker (cryptd_queue_worker()).
|
||||||
|
*/
|
||||||
struct cryptd_cpu_queue __percpu *cpu_queue;
|
struct cryptd_cpu_queue __percpu *cpu_queue;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -125,28 +129,28 @@ static void cryptd_fini_queue(struct cryptd_queue *queue)
|
|||||||
static int cryptd_enqueue_request(struct cryptd_queue *queue,
|
static int cryptd_enqueue_request(struct cryptd_queue *queue,
|
||||||
struct crypto_async_request *request)
|
struct crypto_async_request *request)
|
||||||
{
|
{
|
||||||
int cpu, err;
|
int err;
|
||||||
struct cryptd_cpu_queue *cpu_queue;
|
struct cryptd_cpu_queue *cpu_queue;
|
||||||
refcount_t *refcnt;
|
refcount_t *refcnt;
|
||||||
|
|
||||||
cpu = get_cpu();
|
local_bh_disable();
|
||||||
cpu_queue = this_cpu_ptr(queue->cpu_queue);
|
cpu_queue = this_cpu_ptr(queue->cpu_queue);
|
||||||
err = crypto_enqueue_request(&cpu_queue->queue, request);
|
err = crypto_enqueue_request(&cpu_queue->queue, request);
|
||||||
|
|
||||||
refcnt = crypto_tfm_ctx(request->tfm);
|
refcnt = crypto_tfm_ctx(request->tfm);
|
||||||
|
|
||||||
if (err == -ENOSPC)
|
if (err == -ENOSPC)
|
||||||
goto out_put_cpu;
|
goto out;
|
||||||
|
|
||||||
queue_work_on(cpu, cryptd_wq, &cpu_queue->work);
|
queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work);
|
||||||
|
|
||||||
if (!refcount_read(refcnt))
|
if (!refcount_read(refcnt))
|
||||||
goto out_put_cpu;
|
goto out;
|
||||||
|
|
||||||
refcount_inc(refcnt);
|
refcount_inc(refcnt);
|
||||||
|
|
||||||
out_put_cpu:
|
out:
|
||||||
put_cpu();
|
local_bh_enable();
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@ -162,15 +166,10 @@ static void cryptd_queue_worker(struct work_struct *work)
|
|||||||
cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
|
cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
|
||||||
/*
|
/*
|
||||||
* Only handle one request at a time to avoid hogging crypto workqueue.
|
* Only handle one request at a time to avoid hogging crypto workqueue.
|
||||||
* preempt_disable/enable is used to prevent being preempted by
|
|
||||||
* cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
|
|
||||||
* cryptd_enqueue_request() being accessed from software interrupts.
|
|
||||||
*/
|
*/
|
||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
preempt_disable();
|
|
||||||
backlog = crypto_get_backlog(&cpu_queue->queue);
|
backlog = crypto_get_backlog(&cpu_queue->queue);
|
||||||
req = crypto_dequeue_request(&cpu_queue->queue);
|
req = crypto_dequeue_request(&cpu_queue->queue);
|
||||||
preempt_enable();
|
|
||||||
local_bh_enable();
|
local_bh_enable();
|
||||||
|
|
||||||
if (!req)
|
if (!req)
|
||||||
|
@ -430,6 +430,16 @@ void acpi_init_properties(struct acpi_device *adev)
|
|||||||
acpi_extract_apple_properties(adev);
|
acpi_extract_apple_properties(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void acpi_free_device_properties(struct list_head *list)
|
||||||
|
{
|
||||||
|
struct acpi_device_properties *props, *tmp;
|
||||||
|
|
||||||
|
list_for_each_entry_safe(props, tmp, list, list) {
|
||||||
|
list_del(&props->list);
|
||||||
|
kfree(props);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void acpi_destroy_nondev_subnodes(struct list_head *list)
|
static void acpi_destroy_nondev_subnodes(struct list_head *list)
|
||||||
{
|
{
|
||||||
struct acpi_data_node *dn, *next;
|
struct acpi_data_node *dn, *next;
|
||||||
@ -442,22 +452,18 @@ static void acpi_destroy_nondev_subnodes(struct list_head *list)
|
|||||||
wait_for_completion(&dn->kobj_done);
|
wait_for_completion(&dn->kobj_done);
|
||||||
list_del(&dn->sibling);
|
list_del(&dn->sibling);
|
||||||
ACPI_FREE((void *)dn->data.pointer);
|
ACPI_FREE((void *)dn->data.pointer);
|
||||||
|
acpi_free_device_properties(&dn->data.properties);
|
||||||
kfree(dn);
|
kfree(dn);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void acpi_free_properties(struct acpi_device *adev)
|
void acpi_free_properties(struct acpi_device *adev)
|
||||||
{
|
{
|
||||||
struct acpi_device_properties *props, *tmp;
|
|
||||||
|
|
||||||
acpi_destroy_nondev_subnodes(&adev->data.subnodes);
|
acpi_destroy_nondev_subnodes(&adev->data.subnodes);
|
||||||
ACPI_FREE((void *)adev->data.pointer);
|
ACPI_FREE((void *)adev->data.pointer);
|
||||||
adev->data.of_compatible = NULL;
|
adev->data.of_compatible = NULL;
|
||||||
adev->data.pointer = NULL;
|
adev->data.pointer = NULL;
|
||||||
list_for_each_entry_safe(props, tmp, &adev->data.properties, list) {
|
acpi_free_device_properties(&adev->data.properties);
|
||||||
list_del(&props->list);
|
|
||||||
kfree(props);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -374,6 +374,18 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
|
|||||||
DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
|
DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
/*
|
||||||
|
* ASUS B1400CEAE hangs on resume from suspend (see
|
||||||
|
* https://bugzilla.kernel.org/show_bug.cgi?id=215742).
|
||||||
|
*/
|
||||||
|
{
|
||||||
|
.callback = init_default_s3,
|
||||||
|
.ident = "ASUS B1400CEAE",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||||
|
DMI_MATCH(DMI_PRODUCT_NAME, "ASUS EXPERTBOOK B1400CEAE"),
|
||||||
|
},
|
||||||
|
},
|
||||||
{},
|
{},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -196,7 +196,7 @@ static struct {
|
|||||||
{ XFER_PIO_0, "XFER_PIO_0" },
|
{ XFER_PIO_0, "XFER_PIO_0" },
|
||||||
{ XFER_PIO_SLOW, "XFER_PIO_SLOW" }
|
{ XFER_PIO_SLOW, "XFER_PIO_SLOW" }
|
||||||
};
|
};
|
||||||
ata_bitfield_name_match(xfer,ata_xfer_names)
|
ata_bitfield_name_search(xfer, ata_xfer_names)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ATA Port attributes
|
* ATA Port attributes
|
||||||
|
@ -888,12 +888,14 @@ static int octeon_cf_probe(struct platform_device *pdev)
|
|||||||
int i;
|
int i;
|
||||||
res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0);
|
res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0);
|
||||||
if (!res_dma) {
|
if (!res_dma) {
|
||||||
|
put_device(&dma_dev->dev);
|
||||||
of_node_put(dma_node);
|
of_node_put(dma_node);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
cf_port->dma_base = (u64)devm_ioremap_nocache(&pdev->dev, res_dma->start,
|
cf_port->dma_base = (u64)devm_ioremap_nocache(&pdev->dev, res_dma->start,
|
||||||
resource_size(res_dma));
|
resource_size(res_dma));
|
||||||
if (!cf_port->dma_base) {
|
if (!cf_port->dma_base) {
|
||||||
|
put_device(&dma_dev->dev);
|
||||||
of_node_put(dma_node);
|
of_node_put(dma_node);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@ -903,6 +905,7 @@ static int octeon_cf_probe(struct platform_device *pdev)
|
|||||||
irq = i;
|
irq = i;
|
||||||
irq_handler = octeon_cf_interrupt;
|
irq_handler = octeon_cf_interrupt;
|
||||||
}
|
}
|
||||||
|
put_device(&dma_dev->dev);
|
||||||
}
|
}
|
||||||
of_node_put(dma_node);
|
of_node_put(dma_node);
|
||||||
}
|
}
|
||||||
|
@ -620,7 +620,7 @@ int bus_add_driver(struct device_driver *drv)
|
|||||||
if (drv->bus->p->drivers_autoprobe) {
|
if (drv->bus->p->drivers_autoprobe) {
|
||||||
error = driver_attach(drv);
|
error = driver_attach(drv);
|
||||||
if (error)
|
if (error)
|
||||||
goto out_unregister;
|
goto out_del_list;
|
||||||
}
|
}
|
||||||
module_add_driver(drv->owner, drv);
|
module_add_driver(drv->owner, drv);
|
||||||
|
|
||||||
@ -647,6 +647,8 @@ int bus_add_driver(struct device_driver *drv)
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
out_del_list:
|
||||||
|
klist_del(&priv->knode_bus);
|
||||||
out_unregister:
|
out_unregister:
|
||||||
kobject_put(&priv->kobj);
|
kobject_put(&priv->kobj);
|
||||||
/* drv->p is freed in driver_release() */
|
/* drv->p is freed in driver_release() */
|
||||||
|
@ -894,6 +894,7 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
|
|||||||
static int __device_attach(struct device *dev, bool allow_async)
|
static int __device_attach(struct device *dev, bool allow_async)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
bool async = false;
|
||||||
|
|
||||||
device_lock(dev);
|
device_lock(dev);
|
||||||
if (dev->p->dead) {
|
if (dev->p->dead) {
|
||||||
@ -932,7 +933,7 @@ static int __device_attach(struct device *dev, bool allow_async)
|
|||||||
*/
|
*/
|
||||||
dev_dbg(dev, "scheduling asynchronous probe\n");
|
dev_dbg(dev, "scheduling asynchronous probe\n");
|
||||||
get_device(dev);
|
get_device(dev);
|
||||||
async_schedule_dev(__device_attach_async_helper, dev);
|
async = true;
|
||||||
} else {
|
} else {
|
||||||
pm_request_idle(dev);
|
pm_request_idle(dev);
|
||||||
}
|
}
|
||||||
@ -942,6 +943,8 @@ static int __device_attach(struct device *dev, bool allow_async)
|
|||||||
}
|
}
|
||||||
out_unlock:
|
out_unlock:
|
||||||
device_unlock(dev);
|
device_unlock(dev);
|
||||||
|
if (async)
|
||||||
|
async_schedule_dev(__device_attach_async_helper, dev);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -647,6 +647,7 @@ static int register_node(struct node *node, int num)
|
|||||||
*/
|
*/
|
||||||
void unregister_node(struct node *node)
|
void unregister_node(struct node *node)
|
||||||
{
|
{
|
||||||
|
compaction_unregister_node(node);
|
||||||
hugetlb_unregister_node(node); /* no-op, if memoryless node */
|
hugetlb_unregister_node(node); /* no-op, if memoryless node */
|
||||||
node_remove_accesses(node);
|
node_remove_accesses(node);
|
||||||
node_remove_caches(node);
|
node_remove_caches(node);
|
||||||
|
@ -3709,9 +3709,8 @@ const char *cmdname(enum drbd_packet cmd)
|
|||||||
* when we want to support more than
|
* when we want to support more than
|
||||||
* one PRO_VERSION */
|
* one PRO_VERSION */
|
||||||
static const char *cmdnames[] = {
|
static const char *cmdnames[] = {
|
||||||
|
|
||||||
[P_DATA] = "Data",
|
[P_DATA] = "Data",
|
||||||
[P_WSAME] = "WriteSame",
|
|
||||||
[P_TRIM] = "Trim",
|
|
||||||
[P_DATA_REPLY] = "DataReply",
|
[P_DATA_REPLY] = "DataReply",
|
||||||
[P_RS_DATA_REPLY] = "RSDataReply",
|
[P_RS_DATA_REPLY] = "RSDataReply",
|
||||||
[P_BARRIER] = "Barrier",
|
[P_BARRIER] = "Barrier",
|
||||||
@ -3722,7 +3721,6 @@ const char *cmdname(enum drbd_packet cmd)
|
|||||||
[P_DATA_REQUEST] = "DataRequest",
|
[P_DATA_REQUEST] = "DataRequest",
|
||||||
[P_RS_DATA_REQUEST] = "RSDataRequest",
|
[P_RS_DATA_REQUEST] = "RSDataRequest",
|
||||||
[P_SYNC_PARAM] = "SyncParam",
|
[P_SYNC_PARAM] = "SyncParam",
|
||||||
[P_SYNC_PARAM89] = "SyncParam89",
|
|
||||||
[P_PROTOCOL] = "ReportProtocol",
|
[P_PROTOCOL] = "ReportProtocol",
|
||||||
[P_UUIDS] = "ReportUUIDs",
|
[P_UUIDS] = "ReportUUIDs",
|
||||||
[P_SIZES] = "ReportSizes",
|
[P_SIZES] = "ReportSizes",
|
||||||
@ -3730,6 +3728,7 @@ const char *cmdname(enum drbd_packet cmd)
|
|||||||
[P_SYNC_UUID] = "ReportSyncUUID",
|
[P_SYNC_UUID] = "ReportSyncUUID",
|
||||||
[P_AUTH_CHALLENGE] = "AuthChallenge",
|
[P_AUTH_CHALLENGE] = "AuthChallenge",
|
||||||
[P_AUTH_RESPONSE] = "AuthResponse",
|
[P_AUTH_RESPONSE] = "AuthResponse",
|
||||||
|
[P_STATE_CHG_REQ] = "StateChgRequest",
|
||||||
[P_PING] = "Ping",
|
[P_PING] = "Ping",
|
||||||
[P_PING_ACK] = "PingAck",
|
[P_PING_ACK] = "PingAck",
|
||||||
[P_RECV_ACK] = "RecvAck",
|
[P_RECV_ACK] = "RecvAck",
|
||||||
@ -3740,24 +3739,26 @@ const char *cmdname(enum drbd_packet cmd)
|
|||||||
[P_NEG_DREPLY] = "NegDReply",
|
[P_NEG_DREPLY] = "NegDReply",
|
||||||
[P_NEG_RS_DREPLY] = "NegRSDReply",
|
[P_NEG_RS_DREPLY] = "NegRSDReply",
|
||||||
[P_BARRIER_ACK] = "BarrierAck",
|
[P_BARRIER_ACK] = "BarrierAck",
|
||||||
[P_STATE_CHG_REQ] = "StateChgRequest",
|
|
||||||
[P_STATE_CHG_REPLY] = "StateChgReply",
|
[P_STATE_CHG_REPLY] = "StateChgReply",
|
||||||
[P_OV_REQUEST] = "OVRequest",
|
[P_OV_REQUEST] = "OVRequest",
|
||||||
[P_OV_REPLY] = "OVReply",
|
[P_OV_REPLY] = "OVReply",
|
||||||
[P_OV_RESULT] = "OVResult",
|
[P_OV_RESULT] = "OVResult",
|
||||||
[P_CSUM_RS_REQUEST] = "CsumRSRequest",
|
[P_CSUM_RS_REQUEST] = "CsumRSRequest",
|
||||||
[P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
|
[P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
|
||||||
|
[P_SYNC_PARAM89] = "SyncParam89",
|
||||||
[P_COMPRESSED_BITMAP] = "CBitmap",
|
[P_COMPRESSED_BITMAP] = "CBitmap",
|
||||||
[P_DELAY_PROBE] = "DelayProbe",
|
[P_DELAY_PROBE] = "DelayProbe",
|
||||||
[P_OUT_OF_SYNC] = "OutOfSync",
|
[P_OUT_OF_SYNC] = "OutOfSync",
|
||||||
[P_RETRY_WRITE] = "RetryWrite",
|
|
||||||
[P_RS_CANCEL] = "RSCancel",
|
[P_RS_CANCEL] = "RSCancel",
|
||||||
[P_CONN_ST_CHG_REQ] = "conn_st_chg_req",
|
[P_CONN_ST_CHG_REQ] = "conn_st_chg_req",
|
||||||
[P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
|
[P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
|
||||||
[P_RETRY_WRITE] = "retry_write",
|
[P_RETRY_WRITE] = "retry_write",
|
||||||
[P_PROTOCOL_UPDATE] = "protocol_update",
|
[P_PROTOCOL_UPDATE] = "protocol_update",
|
||||||
|
[P_TRIM] = "Trim",
|
||||||
[P_RS_THIN_REQ] = "rs_thin_req",
|
[P_RS_THIN_REQ] = "rs_thin_req",
|
||||||
[P_RS_DEALLOCATED] = "rs_deallocated",
|
[P_RS_DEALLOCATED] = "rs_deallocated",
|
||||||
|
[P_WSAME] = "WriteSame",
|
||||||
|
[P_ZEROES] = "Zeroes",
|
||||||
|
|
||||||
/* enum drbd_packet, but not commands - obsoleted flags:
|
/* enum drbd_packet, but not commands - obsoleted flags:
|
||||||
* P_MAY_IGNORE
|
* P_MAY_IGNORE
|
||||||
|
@ -865,11 +865,15 @@ static int wait_for_reconnect(struct nbd_device *nbd)
|
|||||||
struct nbd_config *config = nbd->config;
|
struct nbd_config *config = nbd->config;
|
||||||
if (!config->dead_conn_timeout)
|
if (!config->dead_conn_timeout)
|
||||||
return 0;
|
return 0;
|
||||||
if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
|
|
||||||
|
if (!wait_event_timeout(config->conn_wait,
|
||||||
|
test_bit(NBD_RT_DISCONNECTED,
|
||||||
|
&config->runtime_flags) ||
|
||||||
|
atomic_read(&config->live_connections) > 0,
|
||||||
|
config->dead_conn_timeout))
|
||||||
return 0;
|
return 0;
|
||||||
return wait_event_timeout(config->conn_wait,
|
|
||||||
atomic_read(&config->live_connections) > 0,
|
return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
|
||||||
config->dead_conn_timeout) > 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
|
static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
|
||||||
@ -1340,7 +1344,7 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
|
|||||||
static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
|
static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
|
||||||
struct block_device *bdev)
|
struct block_device *bdev)
|
||||||
{
|
{
|
||||||
sock_shutdown(nbd);
|
nbd_clear_sock(nbd);
|
||||||
__invalidate_device(bdev, true);
|
__invalidate_device(bdev, true);
|
||||||
nbd_bdev_reset(bdev);
|
nbd_bdev_reset(bdev);
|
||||||
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
|
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
|
||||||
@ -1453,15 +1457,20 @@ static struct nbd_config *nbd_alloc_config(void)
|
|||||||
{
|
{
|
||||||
struct nbd_config *config;
|
struct nbd_config *config;
|
||||||
|
|
||||||
|
if (!try_module_get(THIS_MODULE))
|
||||||
|
return ERR_PTR(-ENODEV);
|
||||||
|
|
||||||
config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
|
config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
|
||||||
if (!config)
|
if (!config) {
|
||||||
return NULL;
|
module_put(THIS_MODULE);
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
}
|
||||||
|
|
||||||
atomic_set(&config->recv_threads, 0);
|
atomic_set(&config->recv_threads, 0);
|
||||||
init_waitqueue_head(&config->recv_wq);
|
init_waitqueue_head(&config->recv_wq);
|
||||||
init_waitqueue_head(&config->conn_wait);
|
init_waitqueue_head(&config->conn_wait);
|
||||||
config->blksize = NBD_DEF_BLKSIZE;
|
config->blksize = NBD_DEF_BLKSIZE;
|
||||||
atomic_set(&config->live_connections, 0);
|
atomic_set(&config->live_connections, 0);
|
||||||
try_module_get(THIS_MODULE);
|
|
||||||
return config;
|
return config;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1488,12 +1497,13 @@ static int nbd_open(struct block_device *bdev, fmode_t mode)
|
|||||||
mutex_unlock(&nbd->config_lock);
|
mutex_unlock(&nbd->config_lock);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
config = nbd->config = nbd_alloc_config();
|
config = nbd_alloc_config();
|
||||||
if (!config) {
|
if (IS_ERR(config)) {
|
||||||
ret = -ENOMEM;
|
ret = PTR_ERR(config);
|
||||||
mutex_unlock(&nbd->config_lock);
|
mutex_unlock(&nbd->config_lock);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
nbd->config = config;
|
||||||
refcount_set(&nbd->config_refs, 1);
|
refcount_set(&nbd->config_refs, 1);
|
||||||
refcount_inc(&nbd->refs);
|
refcount_inc(&nbd->refs);
|
||||||
mutex_unlock(&nbd->config_lock);
|
mutex_unlock(&nbd->config_lock);
|
||||||
@ -1915,13 +1925,14 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
|
|||||||
nbd_put(nbd);
|
nbd_put(nbd);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
config = nbd->config = nbd_alloc_config();
|
config = nbd_alloc_config();
|
||||||
if (!nbd->config) {
|
if (IS_ERR(config)) {
|
||||||
mutex_unlock(&nbd->config_lock);
|
mutex_unlock(&nbd->config_lock);
|
||||||
nbd_put(nbd);
|
nbd_put(nbd);
|
||||||
printk(KERN_ERR "nbd: couldn't allocate config\n");
|
printk(KERN_ERR "nbd: couldn't allocate config\n");
|
||||||
return -ENOMEM;
|
return PTR_ERR(config);
|
||||||
}
|
}
|
||||||
|
nbd->config = config;
|
||||||
refcount_set(&nbd->config_refs, 1);
|
refcount_set(&nbd->config_refs, 1);
|
||||||
set_bit(NBD_RT_BOUND, &config->runtime_flags);
|
set_bit(NBD_RT_BOUND, &config->runtime_flags);
|
||||||
|
|
||||||
@ -2014,6 +2025,7 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
|
|||||||
mutex_lock(&nbd->config_lock);
|
mutex_lock(&nbd->config_lock);
|
||||||
nbd_disconnect(nbd);
|
nbd_disconnect(nbd);
|
||||||
sock_shutdown(nbd);
|
sock_shutdown(nbd);
|
||||||
|
wake_up(&nbd->config->conn_wait);
|
||||||
/*
|
/*
|
||||||
* Make sure recv thread has finished, so it does not drop the last
|
* Make sure recv thread has finished, so it does not drop the last
|
||||||
* config ref and try to destroy the workqueue from inside the work
|
* config ref and try to destroy the workqueue from inside the work
|
||||||
@ -2441,6 +2453,12 @@ static void __exit nbd_cleanup(void)
|
|||||||
struct nbd_device *nbd;
|
struct nbd_device *nbd;
|
||||||
LIST_HEAD(del_list);
|
LIST_HEAD(del_list);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Unregister netlink interface prior to waiting
|
||||||
|
* for the completion of netlink commands.
|
||||||
|
*/
|
||||||
|
genl_unregister_family(&nbd_genl_family);
|
||||||
|
|
||||||
nbd_dbg_close();
|
nbd_dbg_close();
|
||||||
|
|
||||||
mutex_lock(&nbd_index_mutex);
|
mutex_lock(&nbd_index_mutex);
|
||||||
@ -2450,13 +2468,15 @@ static void __exit nbd_cleanup(void)
|
|||||||
while (!list_empty(&del_list)) {
|
while (!list_empty(&del_list)) {
|
||||||
nbd = list_first_entry(&del_list, struct nbd_device, list);
|
nbd = list_first_entry(&del_list, struct nbd_device, list);
|
||||||
list_del_init(&nbd->list);
|
list_del_init(&nbd->list);
|
||||||
|
if (refcount_read(&nbd->config_refs))
|
||||||
|
printk(KERN_ERR "nbd: possibly leaking nbd_config (ref %d)\n",
|
||||||
|
refcount_read(&nbd->config_refs));
|
||||||
if (refcount_read(&nbd->refs) != 1)
|
if (refcount_read(&nbd->refs) != 1)
|
||||||
printk(KERN_ERR "nbd: possibly leaking a device\n");
|
printk(KERN_ERR "nbd: possibly leaking a device\n");
|
||||||
nbd_put(nbd);
|
nbd_put(nbd);
|
||||||
}
|
}
|
||||||
|
|
||||||
idr_destroy(&nbd_index_idr);
|
idr_destroy(&nbd_index_idr);
|
||||||
genl_unregister_family(&nbd_genl_family);
|
|
||||||
unregister_blkdev(NBD_MAJOR, "nbd");
|
unregister_blkdev(NBD_MAJOR, "nbd");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -978,11 +978,12 @@ static int virtblk_probe(struct virtio_device *vdev)
|
|||||||
blk_queue_io_opt(q, blk_size * opt_io_size);
|
blk_queue_io_opt(q, blk_size * opt_io_size);
|
||||||
|
|
||||||
if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
|
if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
|
||||||
q->limits.discard_granularity = blk_size;
|
|
||||||
|
|
||||||
virtio_cread(vdev, struct virtio_blk_config,
|
virtio_cread(vdev, struct virtio_blk_config,
|
||||||
discard_sector_alignment, &v);
|
discard_sector_alignment, &v);
|
||||||
q->limits.discard_alignment = v ? v << SECTOR_SHIFT : 0;
|
if (v)
|
||||||
|
q->limits.discard_granularity = v << SECTOR_SHIFT;
|
||||||
|
else
|
||||||
|
q->limits.discard_granularity = blk_size;
|
||||||
|
|
||||||
virtio_cread(vdev, struct virtio_blk_config,
|
virtio_cread(vdev, struct virtio_blk_config,
|
||||||
max_discard_sectors, &v);
|
max_discard_sectors, &v);
|
||||||
|
@ -2724,7 +2724,9 @@ static int sysc_remove(struct platform_device *pdev)
|
|||||||
struct sysc *ddata = platform_get_drvdata(pdev);
|
struct sysc *ddata = platform_get_drvdata(pdev);
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
cancel_delayed_work_sync(&ddata->idle_work);
|
/* Device can still be enabled, see deferred idle quirk in probe */
|
||||||
|
if (cancel_delayed_work_sync(&ddata->idle_work))
|
||||||
|
ti_sysc_idle(&ddata->idle_work.work);
|
||||||
|
|
||||||
error = pm_runtime_get_sync(ddata->dev);
|
error = pm_runtime_get_sync(ddata->dev);
|
||||||
if (error < 0) {
|
if (error < 0) {
|
||||||
|
@ -11,8 +11,8 @@
|
|||||||
* Copyright 2002 MontaVista Software Inc.
|
* Copyright 2002 MontaVista Software Inc.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define pr_fmt(fmt) "%s" fmt, "IPMI message handler: "
|
#define pr_fmt(fmt) "IPMI message handler: " fmt
|
||||||
#define dev_fmt pr_fmt
|
#define dev_fmt(fmt) pr_fmt(fmt)
|
||||||
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
|
@ -845,6 +845,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case SSIF_GETTING_EVENTS:
|
case SSIF_GETTING_EVENTS:
|
||||||
|
if (!msg) {
|
||||||
|
/* Should never happen, but just in case. */
|
||||||
|
dev_warn(&ssif_info->client->dev,
|
||||||
|
"No message set while getting events\n");
|
||||||
|
ipmi_ssif_unlock_cond(ssif_info, flags);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
|
if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
|
||||||
/* Error getting event, probably done. */
|
/* Error getting event, probably done. */
|
||||||
msg->done(msg);
|
msg->done(msg);
|
||||||
@ -869,6 +877,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case SSIF_GETTING_MESSAGES:
|
case SSIF_GETTING_MESSAGES:
|
||||||
|
if (!msg) {
|
||||||
|
/* Should never happen, but just in case. */
|
||||||
|
dev_warn(&ssif_info->client->dev,
|
||||||
|
"No message set while getting messages\n");
|
||||||
|
ipmi_ssif_unlock_cond(ssif_info, flags);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
|
if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
|
||||||
/* Error getting event, probably done. */
|
/* Error getting event, probably done. */
|
||||||
msg->done(msg);
|
msg->done(msg);
|
||||||
@ -892,6 +908,13 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
|
|||||||
deliver_recv_msg(ssif_info, msg);
|
deliver_recv_msg(ssif_info, msg);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
/* Should never happen, but just in case. */
|
||||||
|
dev_warn(&ssif_info->client->dev,
|
||||||
|
"Invalid state in message done handling: %d\n",
|
||||||
|
ssif_info->ssif_state);
|
||||||
|
ipmi_ssif_unlock_cond(ssif_info, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
|
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
|
||||||
|
@ -236,7 +236,7 @@ static int __init oxnas_rps_timer_init(struct device_node *np)
|
|||||||
}
|
}
|
||||||
|
|
||||||
rps->irq = irq_of_parse_and_map(np, 0);
|
rps->irq = irq_of_parse_and_map(np, 0);
|
||||||
if (rps->irq < 0) {
|
if (!rps->irq) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto err_iomap;
|
goto err_iomap;
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,7 @@ static int riscv_clock_next_event(unsigned long delta,
|
|||||||
|
|
||||||
static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
|
static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
|
||||||
.name = "riscv_timer_clockevent",
|
.name = "riscv_timer_clockevent",
|
||||||
.features = CLOCK_EVT_FEAT_ONESHOT,
|
.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP,
|
||||||
.rating = 100,
|
.rating = 100,
|
||||||
.set_next_event = riscv_clock_next_event,
|
.set_next_event = riscv_clock_next_event,
|
||||||
};
|
};
|
||||||
|
@ -215,6 +215,11 @@ static int __init sp804_of_init(struct device_node *np)
|
|||||||
struct clk *clk1, *clk2;
|
struct clk *clk1, *clk2;
|
||||||
const char *name = of_get_property(np, "compatible", NULL);
|
const char *name = of_get_property(np, "compatible", NULL);
|
||||||
|
|
||||||
|
if (initialized) {
|
||||||
|
pr_debug("%pOF: skipping further SP804 timer device\n", np);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
base = of_iomap(np, 0);
|
base = of_iomap(np, 0);
|
||||||
if (!base)
|
if (!base)
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
@ -223,11 +228,6 @@ static int __init sp804_of_init(struct device_node *np)
|
|||||||
writel(0, base + TIMER_CTRL);
|
writel(0, base + TIMER_CTRL);
|
||||||
writel(0, base + TIMER_2_BASE + TIMER_CTRL);
|
writel(0, base + TIMER_2_BASE + TIMER_CTRL);
|
||||||
|
|
||||||
if (initialized || !of_device_is_available(np)) {
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
|
|
||||||
clk1 = of_clk_get(np, 0);
|
clk1 = of_clk_get(np, 0);
|
||||||
if (IS_ERR(clk1))
|
if (IS_ERR(clk1))
|
||||||
clk1 = NULL;
|
clk1 = NULL;
|
||||||
|
@ -610,7 +610,6 @@ struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
|
|||||||
.decrypt = mv_cesa_ecb_des3_ede_decrypt,
|
.decrypt = mv_cesa_ecb_des3_ede_decrypt,
|
||||||
.min_keysize = DES3_EDE_KEY_SIZE,
|
.min_keysize = DES3_EDE_KEY_SIZE,
|
||||||
.max_keysize = DES3_EDE_KEY_SIZE,
|
.max_keysize = DES3_EDE_KEY_SIZE,
|
||||||
.ivsize = DES3_EDE_BLOCK_SIZE,
|
|
||||||
.base = {
|
.base = {
|
||||||
.cra_name = "ecb(des3_ede)",
|
.cra_name = "ecb(des3_ede)",
|
||||||
.cra_driver_name = "mv-ecb-des3-ede",
|
.cra_driver_name = "mv-ecb-des3-ede",
|
||||||
|
@ -485,6 +485,8 @@ static int rk3399_dmcfreq_remove(struct platform_device *pdev)
|
|||||||
{
|
{
|
||||||
struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(&pdev->dev);
|
struct rk3399_dmcfreq *dmcfreq = dev_get_drvdata(&pdev->dev);
|
||||||
|
|
||||||
|
devfreq_event_disable_edev(dmcfreq->edev);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Before remove the opp table we need to unregister the opp notifier.
|
* Before remove the opp table we need to unregister the opp notifier.
|
||||||
*/
|
*/
|
||||||
|
@ -40,7 +40,6 @@
|
|||||||
STM32_MDMA_SHIFT(mask))
|
STM32_MDMA_SHIFT(mask))
|
||||||
|
|
||||||
#define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */
|
#define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */
|
||||||
#define STM32_MDMA_GISR1 0x0004 /* MDMA Int Status Reg 2 */
|
|
||||||
|
|
||||||
/* MDMA Channel x interrupt/status register */
|
/* MDMA Channel x interrupt/status register */
|
||||||
#define STM32_MDMA_CISR(x) (0x40 + 0x40 * (x)) /* x = 0..62 */
|
#define STM32_MDMA_CISR(x) (0x40 + 0x40 * (x)) /* x = 0..62 */
|
||||||
@ -196,7 +195,7 @@
|
|||||||
|
|
||||||
#define STM32_MDMA_MAX_BUF_LEN 128
|
#define STM32_MDMA_MAX_BUF_LEN 128
|
||||||
#define STM32_MDMA_MAX_BLOCK_LEN 65536
|
#define STM32_MDMA_MAX_BLOCK_LEN 65536
|
||||||
#define STM32_MDMA_MAX_CHANNELS 63
|
#define STM32_MDMA_MAX_CHANNELS 32
|
||||||
#define STM32_MDMA_MAX_REQUESTS 256
|
#define STM32_MDMA_MAX_REQUESTS 256
|
||||||
#define STM32_MDMA_MAX_BURST 128
|
#define STM32_MDMA_MAX_BURST 128
|
||||||
#define STM32_MDMA_VERY_HIGH_PRIORITY 0x11
|
#define STM32_MDMA_VERY_HIGH_PRIORITY 0x11
|
||||||
@ -1351,21 +1350,11 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
|
|||||||
|
|
||||||
/* Find out which channel generates the interrupt */
|
/* Find out which channel generates the interrupt */
|
||||||
status = readl_relaxed(dmadev->base + STM32_MDMA_GISR0);
|
status = readl_relaxed(dmadev->base + STM32_MDMA_GISR0);
|
||||||
if (status) {
|
if (!status) {
|
||||||
id = __ffs(status);
|
dev_dbg(mdma2dev(dmadev), "spurious it\n");
|
||||||
} else {
|
return IRQ_NONE;
|
||||||
status = readl_relaxed(dmadev->base + STM32_MDMA_GISR1);
|
|
||||||
if (!status) {
|
|
||||||
dev_dbg(mdma2dev(dmadev), "spurious it\n");
|
|
||||||
return IRQ_NONE;
|
|
||||||
}
|
|
||||||
id = __ffs(status);
|
|
||||||
/*
|
|
||||||
* As GISR0 provides status for channel id from 0 to 31,
|
|
||||||
* so GISR1 provides status for channel id from 32 to 62
|
|
||||||
*/
|
|
||||||
id += 32;
|
|
||||||
}
|
}
|
||||||
|
id = __ffs(status);
|
||||||
|
|
||||||
chan = &dmadev->chan[id];
|
chan = &dmadev->chan[id];
|
||||||
if (!chan) {
|
if (!chan) {
|
||||||
|
@ -232,7 +232,7 @@ struct zynqmp_dma_chan {
|
|||||||
bool is_dmacoherent;
|
bool is_dmacoherent;
|
||||||
struct tasklet_struct tasklet;
|
struct tasklet_struct tasklet;
|
||||||
bool idle;
|
bool idle;
|
||||||
u32 desc_size;
|
size_t desc_size;
|
||||||
bool err;
|
bool err;
|
||||||
u32 bus_width;
|
u32 bus_width;
|
||||||
u32 src_burst_len;
|
u32 src_burst_len;
|
||||||
@ -489,7 +489,8 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
|
|||||||
}
|
}
|
||||||
|
|
||||||
chan->desc_pool_v = dma_alloc_coherent(chan->dev,
|
chan->desc_pool_v = dma_alloc_coherent(chan->dev,
|
||||||
(2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS),
|
(2 * ZYNQMP_DMA_DESC_SIZE(chan) *
|
||||||
|
ZYNQMP_DMA_NUM_DESCS),
|
||||||
&chan->desc_pool_p, GFP_KERNEL);
|
&chan->desc_pool_p, GFP_KERNEL);
|
||||||
if (!chan->desc_pool_v)
|
if (!chan->desc_pool_v)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -1230,19 +1230,14 @@ int extcon_dev_register(struct extcon_dev *edev)
|
|||||||
edev->dev.type = &edev->extcon_dev_type;
|
edev->dev.type = &edev->extcon_dev_type;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = device_register(&edev->dev);
|
|
||||||
if (ret) {
|
|
||||||
put_device(&edev->dev);
|
|
||||||
goto err_dev;
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock_init(&edev->lock);
|
spin_lock_init(&edev->lock);
|
||||||
edev->nh = devm_kcalloc(&edev->dev, edev->max_supported,
|
if (edev->max_supported) {
|
||||||
sizeof(*edev->nh), GFP_KERNEL);
|
edev->nh = kcalloc(edev->max_supported, sizeof(*edev->nh),
|
||||||
if (!edev->nh) {
|
GFP_KERNEL);
|
||||||
ret = -ENOMEM;
|
if (!edev->nh) {
|
||||||
device_unregister(&edev->dev);
|
ret = -ENOMEM;
|
||||||
goto err_dev;
|
goto err_alloc_nh;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (index = 0; index < edev->max_supported; index++)
|
for (index = 0; index < edev->max_supported; index++)
|
||||||
@ -1253,6 +1248,12 @@ int extcon_dev_register(struct extcon_dev *edev)
|
|||||||
dev_set_drvdata(&edev->dev, edev);
|
dev_set_drvdata(&edev->dev, edev);
|
||||||
edev->state = 0;
|
edev->state = 0;
|
||||||
|
|
||||||
|
ret = device_register(&edev->dev);
|
||||||
|
if (ret) {
|
||||||
|
put_device(&edev->dev);
|
||||||
|
goto err_dev;
|
||||||
|
}
|
||||||
|
|
||||||
mutex_lock(&extcon_dev_list_lock);
|
mutex_lock(&extcon_dev_list_lock);
|
||||||
list_add(&edev->entry, &extcon_dev_list);
|
list_add(&edev->entry, &extcon_dev_list);
|
||||||
mutex_unlock(&extcon_dev_list_lock);
|
mutex_unlock(&extcon_dev_list_lock);
|
||||||
@ -1260,6 +1261,9 @@ int extcon_dev_register(struct extcon_dev *edev)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_dev:
|
err_dev:
|
||||||
|
if (edev->max_supported)
|
||||||
|
kfree(edev->nh);
|
||||||
|
err_alloc_nh:
|
||||||
if (edev->max_supported)
|
if (edev->max_supported)
|
||||||
kfree(edev->extcon_dev_type.groups);
|
kfree(edev->extcon_dev_type.groups);
|
||||||
err_alloc_groups:
|
err_alloc_groups:
|
||||||
@ -1320,6 +1324,7 @@ void extcon_dev_unregister(struct extcon_dev *edev)
|
|||||||
if (edev->max_supported) {
|
if (edev->max_supported) {
|
||||||
kfree(edev->extcon_dev_type.groups);
|
kfree(edev->extcon_dev_type.groups);
|
||||||
kfree(edev->cables);
|
kfree(edev->cables);
|
||||||
|
kfree(edev->nh);
|
||||||
}
|
}
|
||||||
|
|
||||||
put_device(&edev->dev);
|
put_device(&edev->dev);
|
||||||
|
@ -164,7 +164,7 @@ static int scmi_base_implementation_list_get(const struct scmi_handle *handle,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
loop_num_ret = le32_to_cpu(*num_ret);
|
loop_num_ret = le32_to_cpu(*num_ret);
|
||||||
if (tot_num_ret + loop_num_ret > MAX_PROTOCOLS_IMP) {
|
if (loop_num_ret > MAX_PROTOCOLS_IMP - tot_num_ret) {
|
||||||
dev_err(dev, "No. of Protocol > MAX_PROTOCOLS_IMP");
|
dev_err(dev, "No. of Protocol > MAX_PROTOCOLS_IMP");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -603,7 +603,7 @@ static void __init dmi_sysfs_register_handle(const struct dmi_header *dh,
|
|||||||
"%d-%d", dh->type, entry->instance);
|
"%d-%d", dh->type, entry->instance);
|
||||||
|
|
||||||
if (*ret) {
|
if (*ret) {
|
||||||
kfree(entry);
|
kobject_put(&entry->kobj);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -934,17 +934,17 @@ EXPORT_SYMBOL_GPL(stratix10_svc_allocate_memory);
|
|||||||
void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr)
|
void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr)
|
||||||
{
|
{
|
||||||
struct stratix10_svc_data_mem *pmem;
|
struct stratix10_svc_data_mem *pmem;
|
||||||
size_t size = 0;
|
|
||||||
|
|
||||||
list_for_each_entry(pmem, &svc_data_mem, node)
|
list_for_each_entry(pmem, &svc_data_mem, node)
|
||||||
if (pmem->vaddr == kaddr) {
|
if (pmem->vaddr == kaddr) {
|
||||||
size = pmem->size;
|
gen_pool_free(chan->ctrl->genpool,
|
||||||
break;
|
(unsigned long)kaddr, pmem->size);
|
||||||
|
pmem->vaddr = NULL;
|
||||||
|
list_del(&pmem->node);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
gen_pool_free(chan->ctrl->genpool, (unsigned long)kaddr, size);
|
list_del(&svc_data_mem);
|
||||||
pmem->vaddr = NULL;
|
|
||||||
list_del(&pmem->node);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(stratix10_svc_free_memory);
|
EXPORT_SYMBOL_GPL(stratix10_svc_free_memory);
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user