Merge android-5.4-stable.32 (724ffa0) into msm-5.4

* refs/heads/tmp-724ffa0:
  Linux 5.4.32
  iommu/vt-d: Allow devices with RMRRs to use identity domain
  drm/i915: Fix ref->mutex deadlock in i915_active_wait()
  fbcon: fix null-ptr-deref in fbcon_switch
  blk-mq: Keep set->nr_hw_queues and set->map[].nr_queues in sync
  RDMA/cm: Update num_paths in cma_resolve_iboe_route error flow
  Bluetooth: RFCOMM: fix ODEBUG bug in rfcomm_dev_ioctl
  RDMA/siw: Fix passive connection establishment
  RDMA/cma: Teach lockdep about the order of rtnl and lock
  RDMA/ucma: Put a lock around every call to the rdma_cm layer
  include/uapi/linux/swab.h: fix userspace breakage, use __BITS_PER_LONG for swap
  ceph: canonicalize server path in place
  ceph: remove the extra slashes in the server path
  ARM: imx: only select ARM_ERRATA_814220 for ARMv7-A
  ARM: imx: Enable ARM_ERRATA_814220 for i.MX6UL and i.MX7D
  IB/mlx5: Replace tunnel mpls capability bits for tunnel_offloads
  IB/hfi1: Fix memory leaks in sysfs registration and unregistration
  IB/hfi1: Call kobject_put() when kobject_init_and_add() fails
  ASoC: jz4740-i2s: Fix divider written at incorrect offset in register
  platform/x86: intel_int0002_vgpio: Use acpi_register_wakeup_handler()
  ACPI: PM: Add acpi_[un]register_wakeup_handler()
  hwrng: imx-rngc - fix an error path
  tools/accounting/getdelays.c: fix netlink attribute length
  slub: improve bit diffusion for freelist ptr obfuscation
  uapi: rename ext2_swab() to swab() and share globally in swab.h
  usb: dwc3: gadget: Wrap around when skip TRBs
  random: always use batched entropy for get_random_u{32,64}
  s390: prevent leaking kernel address in BEAR
  r8169: change back SG and TSO to be disabled by default
  mlxsw: spectrum_flower: Do not stop at FLOW_ACTION_VLAN_MANGLE
  tun: Don't put_page() for all negative return values from XDP program
  slcan: Don't transmit uninitialized stack data in padding
  net: stmmac: dwmac1000: fix out-of-bounds mac address reg setting
  net_sched: fix a missing refcnt in tcindex_init()
  net_sched: add a temporary refcnt for struct tcindex_data
  net: phy: micrel: kszphy_resume(): add delay after genphy_resume() before accessing PHY registers
  net: dsa: mt7530: fix null pointer dereferencing in port5 setup
  net: dsa: bcm_sf2: Ensure correct sub-node is parsed
  net: dsa: bcm_sf2: Do not register slave MDIO bus with OF
  ipv6: don't auto-add link-local address to lag ports
  cxgb4: fix MPS index overwrite when setting MAC address
  net: phy: realtek: fix handling of RTL8105e-integrated PHY
  ANDROID: ABI/Whitelist: add display whitelist to unisoc whitelist
  ANDROID: Kconfig.gki Add SND_SOC_TOPOLOGY
  ANDROID: GKI: enable CONFIG_ARM_PSCI_CPUIDLE
  ANDROID: power: wakeup_reason: wake reason enhancements
  UPSTREAM: cpufreq: Make cpufreq_global_kobject static
  ANDROID: staging: ion: implement vmap for built-in heaps
  ANDROID: KABI: Update kABI after dropping CONFIG_PCIEPORTBUS from gki_defconfig
  ANDROID: gki_defconfig: Drop CONFIG_PCIEPORTBUS
  FROMLIST: power_supply: Add additional health properties to the header
  UPSTREAM: power: supply: core: Update sysfs-class-power ABI document
  ANDROID: GKI: update ABI for mmc changes
  UPSTREAM: mmc: sdhci-sprd: Set the missing MMC_CAP_WAIT_WHILE_BUSY flag
  UPSTREAM: mmc: host: hsq: Add missing MODULE_LICENSE() and MODULE_DESCRIPTION()
  UPSTREAM: mmc: host: sdhci-sprd: Add software queue support
  UPSTREAM: mmc: host: sdhci: Add a variable to defer to complete requests if needed
  UPSTREAM: mmc: host: sdhci: Add request_done ops for struct sdhci_ops
  UPSTREAM: mmc: core: Enable the MMC host software queue for the SD card
  UPSTREAM: mmc: Add MMC host software queue support
  FROMGIT: kbuild: mkcompile_h: Include $LD version in /proc/version
  ANDROID: ignore compiler tag __must_check for GENKSYMS
  UPSTREAM: mfd: sc27xx: Add USB charger type detection support
  UPSTREAM: Revert "gpio: eic-sprd: Use devm_platform_ioremap_resource()"
  UPSTREAM: pinctrl: sprd: Add pin high impedance mode support
  UPSTREAM: pinctrl: sprd: Use the correct pin output configuration
  UPSTREAM: nvmem: sprd: Determine double data programming from device data
  UPSTREAM: nvmem: sprd: Optimize the block lock operation
  UPSTREAM: nvmem: sprd: Fix the block lock operation
  UPSTREAM: dmaengine: sprd: Set request pending flag when DMA controller is active
  Linux 5.4.31
  mm: mempolicy: require at least one nodeid for MPOL_PREFERRED
  padata: always acquire cpu_hotplug_lock before pinst->lock
  net: Fix Tx hash bound checking
  i2c: i801: Do not add ICH_RES_IO_SMI for the iTCO_wdt device
  watchdog: iTCO_wdt: Make ICH_RES_IO_SMI optional
  watchdog: iTCO_wdt: Export vendorsupport
  tcp: fix TFO SYNACK undo to avoid double-timestamp-undo
  IB/hfi1: Ensure pq is not left on waitlist
  rxrpc: Fix sendmsg(MSG_WAITALL) handling
  iwlwifi: dbg: don't abort if sending DBGC_SUSPEND_RESUME fails
  iwlwifi: yoyo: don't add TLV offset when reading FIFOs
  iwlwifi: consider HE capability when setting LDPC
  net/mlx5e: kTLS, Fix wrong value in record tracker enum
  soc: mediatek: knows_txdone needs to be set in Mediatek CMDQ helper
  ALSA: hda/ca0132 - Add Recon3Di quirk to handle integrated sound on EVGA X99 Classified motherboard
  Revert "dm: always call blk_queue_split() in dm_process_bio()"
  power: supply: axp288_charger: Add special handling for HP Pavilion x2 10
  extcon: axp288: Add wakeup support
  nvmem: check for NULL reg_read and reg_write before dereferencing
  mei: me: add cedar fork device ids
  coresight: do not use the BIT() macro in the UAPI header
  PCI: sysfs: Revert "rescan" file renames
  misc: pci_endpoint_test: Avoid using module parameter to determine irqtype
  misc: pci_endpoint_test: Fix to support > 10 pci-endpoint-test devices
  misc: rtsx: set correct pcr_ops for rts522A
  brcmfmac: abort and release host after error
  padata: fix uninitialized return value in padata_replace()
  XArray: Fix xa_find_next for large multi-index entries
  net/mlx5e: kTLS, Fix TCP seq off-by-1 issue in TX resync flow
  tools/power turbostat: Fix 32-bit capabilities warning
  tools/power turbostat: Fix missing SYS_LPI counter on some Chromebooks
  tools/power turbostat: Fix gcc build warnings
  drm/amdgpu: fix typo for vcn1 idle check
  initramfs: restore default compression behavior
  drm/bochs: downgrade pci_request_region failure from error to warning
  drm/amd/display: Add link_rate quirk for Apple 15" MBP 2017
  kconfig: introduce m32-flag and m64-flag
  nvme-rdma: Avoid double freeing of async event data
  ANDROID: GKI: update ABI for SCHED_AUTOGROUP removal
  ANDROID: GKI: Remove SCHED_AUTOGROUP
  ANDROID: fscrypt: fall back to filesystem-layer crypto when needed
  ANDROID: block: require drivers to declare supported crypto key type(s)
  ANDROID: block: make blk_crypto_start_using_mode() properly check for support
  ANDROID: GKI: defconfig: modify debug configs
  ANDROID: kbuild: ensure __cfi_check is correctly aligned
  FROMLIST: kmod: make request_module() return an error when autoloading is disabled
  UPSTREAM: loop: Only freeze block queue when needed.
  UPSTREAM: loop: Only change blocksize when needed.
  ANDROID: GKI: Fix ABI for CMA page allocation
  ANDROID: GKI: mm: add cma pcp list
  ANDROID: GKI: cma: redirect page allocation to CMA
  ANDROID: Fix wq fp check for CFI builds
  ANDROID: Incremental fs: Fix remount
  ANDROID: Incremental fs: Protect get_fill_block, and add a field
  ANDROID: Incremental fs: Fix crash polling 0 size read_log
  ANDROID: Incremental fs: get_filled_blocks: better index_out
  UPSTREAM: sched/rt: cpupri_find: Trigger a full search as fallback
  UPSTREAM: sched/rt: Remove unnecessary push for unfit tasks
  UPSTREAM: sched/rt: Allow pulling unfitting task
  UPSTREAM: sched/rt: Optimize cpupri_find() on non-heterogenous systems
  UPSTREAM: sched/rt: Re-instate old behavior in select_task_rq_rt()
  UPSTREAM: sched/rt: cpupri_find: Implement fallback mechanism for !fit case
  ANDROID: staging: ion: move definition of attachment
  ANDROID: Incremental fs: Fix four resource bugs
  ANDROID: GKI: Removed cuttlefish configs

Conflicts:
	drivers/base/power/wakeup.c
	drivers/irqchip/irq-gic-v3.c
	include/linux/gfp.h
	include/linux/highmem.h
	include/linux/mmc/host.h
	include/linux/mmzone.h
	include/uapi/linux/coresight-stm.h
	kernel/sched/cpupri.c
	kernel/sched/rt.c
	mm/page_alloc.c
	scripts/module-lto.lds.S

Change-Id: I1845fe7d1d0cd48d91f689565324a794b1a29f10
Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>
This commit is contained in:
Blagovest Kolenichev 2020-06-23 10:37:13 -07:00
commit 72fdae88b0
161 changed files with 88738 additions and 86910 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 30
SUBLEVEL = 32
EXTRAVERSION =
NAME = Kleptomaniac Octopus

File diff suppressed because it is too large Load Diff

View File

@ -98,6 +98,7 @@
devm_free_irq
devm_gpiochip_add_data
devm_gpiod_get
devm_gpio_free
devm_gpio_request
devm_hwspin_lock_request_specific
__devm_iio_device_register
@ -189,6 +190,7 @@
gpiod_direction_output_raw
gpiod_get_raw_value
gpiod_set_raw_value
gpiod_set_raw_value_cansleep
gpiod_to_chip
gpiod_to_irq
gpio_free
@ -199,6 +201,7 @@
handle_level_irq
i2c_del_driver
i2c_register_driver
i2c_transfer_buffer_flags
i2c_smbus_read_byte_data
i2c_smbus_write_byte_data
ida_alloc_range
@ -220,6 +223,33 @@
input_register_device
__ioremap
iounmap
iommu_alloc_resv_region
iommu_attach_device
iommu_detach_device
iommu_device_register
iommu_device_unregister
iommu_dma_get_resv_regions
iommu_domain_alloc
iommu_domain_free
iommu_domain_get_attr
iommu_domain_set_attr
iommu_fwspec_add_ids
iommu_fwspec_free
iommu_get_dma_cookie
iommu_get_domain_for_dev
iommu_group_get
iommu_group_get_for_dev
iommu_group_get_iommudata
iommu_group_put
iommu_group_ref_get
iommu_group_remove_device
iommu_group_set_iommudata
iommu_map
iommu_map_sg
iommu_present
iommu_put_dma_cookie
iommu_set_fault_handler
iommu_unmap
ip_route_output_flow
irq_find_mapping
irq_get_irq_data
@ -263,6 +293,8 @@
ktime_get_real_seconds
ktime_get_real_ts64
kzfree
__list_add_valid
__list_del_entry_valid
memcpy
memset
misc_deregister
@ -435,6 +467,7 @@
seq_putc
seq_puts
seq_read
set_normalized_timespec64
set_user_nice
sg_init_table
sg_miter_next
@ -460,6 +493,7 @@
snd_pcm_hw_constraint_minmax
snd_pcm_lib_ioctl
snd_pcm_period_elapsed
snd_soc_add_component_controls
snd_soc_dapm_disable_pin
snd_soc_dapm_enable_pin
snd_soc_dapm_ignore_suspend
@ -826,6 +860,9 @@
# required by microarray_fp.ko
cdev_alloc
# required by mmc_hsq.ko
mmc_cqe_request_done
# required by musb_hdrc.ko
device_wakeup_enable
dev_printk
@ -947,6 +984,8 @@
memcmp
mutex_is_locked
of_devfreq_cooling_register_power
of_hwspin_lock_get_id_byname
of_modalias_node
on_each_cpu
prepare_to_wait
put_unused_fd
@ -1134,6 +1173,7 @@
mmc_of_parse
mmc_regulator_set_vqmmc
mmc_remove_host
mmc_request_done
__sdhci_add_host
sdhci_cleanup_host
sdhci_enable_clk
@ -1225,6 +1265,14 @@
# required by sipc_core.ko
gen_pool_destroy
mbox_chan_received_data
mbox_chan_txdone
mbox_client_txdone
mbox_controller_register
mbox_controller_unregister
mbox_free_channel
mbox_request_channel
mbox_send_message
register_syscore_ops
# required by sipx.ko
@ -1233,6 +1281,15 @@
hrtimer_start_range_ns
hrtimer_try_to_cancel
# required by snd-soc-aw881xx.ko
i2c_smbus_write_byte
# required by snd-soc-akm4377.ko
regcache_mark_dirty
regcache_sync
snd_soc_get_volsw
snd_soc_put_volsw
# required by snd-soc-sprd-audio-dma.ko
dmam_alloc_attrs
dmam_free_coherent
@ -1302,6 +1359,16 @@
snd_ctl_boolean_mono_info
snd_pcm_hw_constraint_list
# required by snd-soc-tfa98xx.ko
crc32_le
device_create_bin_file
device_remove_bin_file
msleep_interruptible
snd_pcm_format_width
snd_pcm_hw_constraint_mask64
snd_soc_dapm_add_routes
snd_soc_dapm_new_controls
# required by sprd-cpufreq-common.ko
dev_pm_opp_remove
@ -1328,6 +1395,7 @@
dma_get_sgtable_attrs
dma_mmap_attrs
dma_set_coherent_mask
drm_add_edid_modes
drm_atomic_helper_async_commit
drm_atomic_helper_check
drm_atomic_helper_cleanup_planes
@ -1350,17 +1418,33 @@
__drm_atomic_helper_plane_destroy_state
__drm_atomic_helper_plane_duplicate_state
drm_atomic_helper_prepare_planes
drm_atomic_helper_resume
drm_atomic_helper_set_config
drm_atomic_helper_shutdown
drm_atomic_helper_suspend
drm_atomic_helper_swap_state
drm_atomic_helper_update_legacy_modeset_state
drm_atomic_helper_update_plane
drm_atomic_helper_wait_for_dependencies
drm_atomic_helper_wait_for_fences
drm_atomic_helper_wait_for_vblanks
drm_atomic_private_obj_fini
drm_atomic_private_obj_init
drm_atomic_set_crtc_for_connector
drm_atomic_set_fence_for_plane
drm_atomic_set_mode_for_crtc
drm_atomic_state_alloc
drm_atomic_state_clear
drm_atomic_state_default_clear
drm_atomic_state_default_release
__drm_atomic_state_free
drm_bridge_attach
drm_connector_attach_content_protection_property
drm_connector_attach_encoder
drm_connector_cleanup
drm_connector_init
drm_connector_unregister
drm_connector_update_edid_property
drm_crtc_cleanup
__drm_crtc_commit_free
drm_crtc_handle_vblank
@ -1370,9 +1454,21 @@
drm_crtc_vblank_on
drm_display_mode_from_videomode
drm_display_mode_to_videomode
drm_dp_aux_register
drm_dp_aux_unregister
drm_dp_calc_pbn_mode
drm_dp_channel_eq_ok
drm_dp_clock_recovery_ok
drm_dp_dpcd_read
drm_dp_dpcd_read_link_status
drm_dp_dpcd_write
drm_encoder_cleanup
drm_encoder_init
drm_format_info
drm_gem_cma_prime_get_sg_table
drm_gem_cma_prime_mmap
drm_gem_cma_prime_vmap
drm_gem_cma_prime_vunmap
drm_gem_cma_vm_ops
drm_gem_create_mmap_offset
drm_gem_fb_create
@ -1383,12 +1479,19 @@
drm_gem_object_init
drm_gem_object_put_unlocked
drm_gem_object_release
drm_gem_prime_export
drm_gem_prime_fd_to_handle
drm_gem_prime_handle_to_fd
drm_gem_prime_import
drm_gem_vm_close
drm_get_edid
drm_hdcp_check_ksvs_revoked
drm_hdcp_update_content_protection
drm_helper_hpd_irq_event
drm_helper_probe_single_connector_modes
drm_kms_helper_poll_fini
drm_kms_helper_poll_init
drm_match_cea_mode
drm_mode_config_reset
drm_mode_create
drm_mode_crtc_set_gamma_size
@ -1628,7 +1731,22 @@
free_percpu_irq
irq_create_of_mapping
panic_notifier_list
register_virtio_device
register_virtio_driver
__request_percpu_irq
unregister_virtio_device
unregister_virtio_driver
virtqueue_add_inbuf
virtqueue_add_outbuf
virtqueue_detach_unused_buf
virtqueue_get_buf
virtqueue_get_vring_size
virtqueue_kick
virtqueue_kick_prepare
virtqueue_notify
vring_del_virtqueue
vring_interrupt
vring_new_virtqueue
wait_woken
woken_wake_function
@ -1757,3 +1875,4 @@
of_usb_host_tpl_support
pci_bus_type
__usb_create_hcd
usb_hcd_platform_shutdown

View File

@ -520,6 +520,7 @@ config SOC_IMX6UL
bool "i.MX6 UltraLite support"
select PINCTRL_IMX6UL
select SOC_IMX6
select ARM_ERRATA_814220
help
This enables support for Freescale i.MX6 UltraLite processor.
@ -556,6 +557,7 @@ config SOC_IMX7D
select PINCTRL_IMX7D
select SOC_IMX7D_CA7 if ARCH_MULTI_V7
select SOC_IMX7D_CM4 if ARM_SINGLE_ARMV7M
select ARM_ERRATA_814220 if ARCH_MULTI_V7
help
This enables support for Freescale i.MX7 Dual processor.

View File

@ -10,9 +10,11 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_IKHEADERS=y
CONFIG_UCLAMP_TASK=y
CONFIG_CGROUPS=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_SCHED=y
CONFIG_UCLAMP_TASK_GROUP=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
@ -21,7 +23,6 @@ CONFIG_CGROUP_BPF=y
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
@ -60,6 +61,7 @@ CONFIG_PM_WAKELOCKS_LIMIT=0
CONFIG_ENERGY_MODEL=y
CONFIG_CPU_IDLE=y
CONFIG_ARM_CPUIDLE=y
CONFIG_ARM_PSCI_CPUIDLE=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_TIMES=y
CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y
@ -205,8 +207,6 @@ CONFIG_CFG80211=y
CONFIG_MAC80211=y
CONFIG_RFKILL=y
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
# CONFIG_PCIEAER is not set
CONFIG_PCI_HOST_GENERIC=y
CONFIG_PCIE_QCOM=y
CONFIG_PCIE_KIRIN=y
@ -499,9 +499,10 @@ CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_SOFTLOCKUP_DETECTOR=y
# CONFIG_DETECT_HUNG_TASK is not set
CONFIG_PANIC_TIMEOUT=5
CONFIG_PANIC_ON_OOPS=y
CONFIG_PANIC_TIMEOUT=-1
CONFIG_SCHEDSTATS=y
# CONFIG_DEBUG_PREEMPT is not set
CONFIG_DEBUG_LIST=y
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_CORESIGHT=y
CONFIG_CORESIGHT_STM=y

View File

@ -73,8 +73,6 @@ CONFIG_MEMORY_HOTPLUG_MOVABLE_NODE=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_CMA=y
CONFIG_CMA_DIRECT_UTILIZATION=y
CONFIG_CMA_PCP_LISTS=y
CONFIG_ZSMALLOC=y
CONFIG_BALANCE_ANON_FILE_RECLAIM=y
CONFIG_NET=y

View File

@ -28,6 +28,7 @@ CONFIG_QTI_RPM_STATS_LOG=y
CONFIG_QTI_DDR_STATS_LOG=y
CONFIG_ARM_QCOM_LPM_CPUIDLE=y
# CONFIG_ARM_CPUIDLE is not set
# CONFIG_ARM_PSCI_CPUIDLE is not set
CONFIG_MSM_GLOBAL_SYNX=y
CONFIG_MSM_CVP=y
CONFIG_ION_POOL_AUTO_REFILL=y
@ -41,8 +42,6 @@ CONFIG_OVERRIDE_MEMORY_LIMIT=y
CONFIG_QCOM_MEM_BUF=y
CONFIG_VM_EVENT_COUNT_CLEAN_PAGE_RECLAIM=y
CONFIG_BALANCE_ANON_FILE_RECLAIM=y
CONFIG_CMA_DIRECT_UTILIZATION=y
CONFIG_CMA_PCP_LISTS=y
CONFIG_DMA_COHERENT_HINT_CACHED=y
CONFIG_PRIORITIZE_OOM_TASKS=y
CONFIG_DMA_CONFIGURE_ALIGNMENT=y

View File

@ -141,7 +141,9 @@ struct lowcore {
/* br %r1 trampoline */
__u16 br_r1_trampoline; /* 0x0400 */
__u8 pad_0x0402[0x0e00-0x0402]; /* 0x0402 */
__u32 return_lpswe; /* 0x0402 */
__u32 return_mcck_lpswe; /* 0x0406 */
__u8 pad_0x040a[0x0e00-0x040a]; /* 0x040a */
/*
* 0xe00 contains the address of the IPL Parameter Information

View File

@ -162,6 +162,7 @@ typedef struct thread_struct thread_struct;
#define INIT_THREAD { \
.ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
.fpu.regs = (void *) init_task.thread.fpu.fprs, \
.last_break = 1, \
}
/*

View File

@ -8,6 +8,7 @@
#include <linux/bits.h>
#include <uapi/asm/setup.h>
#include <linux/build_bug.h>
#define EP_OFFSET 0x10008
#define EP_STRING "S390EP"
@ -157,6 +158,12 @@ static inline unsigned long kaslr_offset(void)
return __kaslr_offset;
}
static inline u32 gen_lpswe(unsigned long addr)
{
BUILD_BUG_ON(addr > 0xfff);
return 0xb2b20000 | addr;
}
#else /* __ASSEMBLY__ */
#define IPL_DEVICE (IPL_DEVICE_OFFSET)

View File

@ -125,6 +125,8 @@ int main(void)
OFFSET(__LC_EXT_DAMAGE_CODE, lowcore, external_damage_code);
OFFSET(__LC_MCCK_FAIL_STOR_ADDR, lowcore, failing_storage_address);
OFFSET(__LC_LAST_BREAK, lowcore, breaking_event_addr);
OFFSET(__LC_RETURN_LPSWE, lowcore, return_lpswe);
OFFSET(__LC_RETURN_MCCK_LPSWE, lowcore, return_mcck_lpswe);
OFFSET(__LC_RST_OLD_PSW, lowcore, restart_old_psw);
OFFSET(__LC_EXT_OLD_PSW, lowcore, external_old_psw);
OFFSET(__LC_SVC_OLD_PSW, lowcore, svc_old_psw);

View File

@ -115,26 +115,29 @@ _LPP_OFFSET = __LC_LPP
.macro SWITCH_ASYNC savearea,timer
tmhh %r8,0x0001 # interrupting from user ?
jnz 1f
jnz 2f
lgr %r14,%r9
cghi %r14,__LC_RETURN_LPSWE
je 0f
slg %r14,BASED(.Lcritical_start)
clg %r14,BASED(.Lcritical_length)
jhe 0f
jhe 1f
0:
lghi %r11,\savearea # inside critical section, do cleanup
brasl %r14,cleanup_critical
tmhh %r8,0x0001 # retest problem state after cleanup
jnz 1f
0: lg %r14,__LC_ASYNC_STACK # are we already on the target stack?
jnz 2f
1: lg %r14,__LC_ASYNC_STACK # are we already on the target stack?
slgr %r14,%r15
srag %r14,%r14,STACK_SHIFT
jnz 2f
jnz 3f
CHECK_STACK \savearea
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 3f
1: UPDATE_VTIME %r14,%r15,\timer
j 4f
2: UPDATE_VTIME %r14,%r15,\timer
BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
2: lg %r15,__LC_ASYNC_STACK # load async stack
3: la %r11,STACK_FRAME_OVERHEAD(%r15)
3: lg %r15,__LC_ASYNC_STACK # load async stack
4: la %r11,STACK_FRAME_OVERHEAD(%r15)
.endm
.macro UPDATE_VTIME w1,w2,enter_timer
@ -401,7 +404,7 @@ ENTRY(system_call)
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
lmg %r11,%r15,__PT_R11(%r11)
lpswe __LC_RETURN_PSW
b __LC_RETURN_LPSWE(%r0)
.Lsysc_done:
#
@ -608,43 +611,50 @@ ENTRY(pgm_check_handler)
BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_CURRENT
srag %r11,%r10,12
jnz 0f
/* if __LC_LAST_BREAK is < 4096, it contains one of
* the lpswe addresses in lowcore. Set it to 1 (initial state)
* to prevent leaking that address to userspace.
*/
lghi %r10,1
0: lg %r12,__LC_CURRENT
lghi %r11,0
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_PGM_OLD_PSW
tmhh %r8,0x0001 # test problem state bit
jnz 2f # -> fault in user space
jnz 3f # -> fault in user space
#if IS_ENABLED(CONFIG_KVM)
# cleanup critical section for program checks in sie64a
lgr %r14,%r9
slg %r14,BASED(.Lsie_critical_start)
clg %r14,BASED(.Lsie_critical_length)
jhe 0f
jhe 1f
lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
larl %r9,sie_exit # skip forward to sie_exit
lghi %r11,_PIF_GUEST_FAULT
#endif
0: tmhh %r8,0x4000 # PER bit set in old PSW ?
jnz 1f # -> enabled, can't be a double fault
1: tmhh %r8,0x4000 # PER bit set in old PSW ?
jnz 2f # -> enabled, can't be a double fault
tm __LC_PGM_ILC+3,0x80 # check for per exception
jnz .Lpgm_svcper # -> single stepped svc
1: CHECK_STACK __LC_SAVE_AREA_SYNC
2: CHECK_STACK __LC_SAVE_AREA_SYNC
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
# CHECK_VMAP_STACK branches to stack_overflow or 4f
CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
# CHECK_VMAP_STACK branches to stack_overflow or 5f
CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,5f
3: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
lg %r15,__LC_KERNEL_STACK
lgr %r14,%r12
aghi %r14,__TASK_thread # pointer to thread_struct
lghi %r13,__LC_PGM_TDB
tm __LC_PGM_ILC+2,0x02 # check for transaction abort
jz 3f
jz 4f
mvc __THREAD_trap_tdb(256,%r14),0(%r13)
3: stg %r10,__THREAD_last_break(%r14)
4: lgr %r13,%r11
4: stg %r10,__THREAD_last_break(%r14)
5: lgr %r13,%r11
la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
# clear user controlled registers to prevent speculative use
@ -663,14 +673,14 @@ ENTRY(pgm_check_handler)
stg %r13,__PT_FLAGS(%r11)
stg %r10,__PT_ARGS(%r11)
tm __LC_PGM_ILC+3,0x80 # check for per exception
jz 5f
jz 6f
tmhh %r8,0x0001 # kernel per event ?
jz .Lpgm_kprobe
oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
5: REENABLE_IRQS
6: REENABLE_IRQS
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
larl %r1,pgm_check_table
llgh %r10,__PT_INT_CODE+2(%r11)
@ -775,7 +785,7 @@ ENTRY(io_int_handler)
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
.Lio_exit_kernel:
lmg %r11,%r15,__PT_R11(%r11)
lpswe __LC_RETURN_PSW
b __LC_RETURN_LPSWE(%r0)
.Lio_done:
#
@ -1214,7 +1224,7 @@ ENTRY(mcck_int_handler)
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
0: lmg %r11,%r15,__PT_R11(%r11)
lpswe __LC_RETURN_MCCK_PSW
b __LC_RETURN_MCCK_LPSWE
.Lmcck_panic:
lg %r15,__LC_NODAT_STACK
@ -1271,6 +1281,8 @@ ENDPROC(stack_overflow)
#endif
ENTRY(cleanup_critical)
cghi %r9,__LC_RETURN_LPSWE
je .Lcleanup_lpswe
#if IS_ENABLED(CONFIG_KVM)
clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap
jl 0f
@ -1424,6 +1436,7 @@ ENDPROC(cleanup_critical)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
mvc 0(64,%r11),__PT_R8(%r9)
lmg %r0,%r7,__PT_R0(%r9)
.Lcleanup_lpswe:
1: lmg %r8,%r9,__LC_RETURN_PSW
BR_EX %r14,%r11
.Lcleanup_sysc_restore_insn:

View File

@ -105,6 +105,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
p->thread.system_timer = 0;
p->thread.hardirq_timer = 0;
p->thread.softirq_timer = 0;
p->thread.last_break = 1;
frame->sf.back_chain = 0;
/* new return point is ret_from_fork */

View File

@ -73,6 +73,7 @@
#include <asm/nospec-branch.h>
#include <asm/mem_detect.h>
#include <asm/uv.h>
#include <asm/asm-offsets.h>
#include "entry.h"
/*
@ -457,6 +458,8 @@ static void __init setup_lowcore_dat_off(void)
lc->spinlock_index = 0;
arch_spin_lock_setup(0);
lc->br_r1_trampoline = 0x07f1; /* br %r1 */
lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
set_prefix((u32)(unsigned long) lc);
lowcore_ptr[0] = lc;

View File

@ -212,6 +212,8 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
lc->spinlock_lockval = arch_spin_lockval(cpu);
lc->spinlock_index = 0;
lc->br_r1_trampoline = 0x07f1; /* br %r1 */
lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
if (nmi_alloc_per_cpu(lc))
goto out_async;
if (vdso_alloc_per_cpu(lc))

View File

@ -415,6 +415,10 @@ void __init vmem_map_init(void)
SET_MEMORY_RO | SET_MEMORY_X);
__set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT,
SET_MEMORY_RO | SET_MEMORY_X);
/* we need lowcore executable for our LPSWE instructions */
set_memory_x(0, 1);
pr_info("Write protected kernel read-only data: %luk\n",
(unsigned long)(__end_rodata - _stext) >> 10);
}

View File

@ -11,8 +11,10 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_IKHEADERS=y
CONFIG_UCLAMP_TASK=y
CONFIG_CGROUPS=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_CGROUP_SCHED=y
CONFIG_UCLAMP_TASK_GROUP=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
@ -21,7 +23,6 @@ CONFIG_CGROUP_BPF=y
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
@ -435,6 +436,7 @@ CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_SOFTLOCKUP_DETECTOR=y
# CONFIG_DETECT_HUNG_TASK is not set
CONFIG_PANIC_ON_OOPS=y
CONFIG_PANIC_TIMEOUT=5
CONFIG_SCHEDSTATS=y
CONFIG_DEBUG_LIST=y
CONFIG_BUG_ON_DATA_CORRUPTION=y

View File

@ -488,21 +488,13 @@ bool blk_crypto_queue_decrypt_bio(struct bio *bio)
return false;
}
/**
* blk_crypto_start_using_mode() - Start using a crypto algorithm on a device
* @mode_num: the blk_crypto_mode we want to allocate ciphers for.
* @data_unit_size: the data unit size that will be used
* @q: the request queue for the device
*
* Upper layers must call this function to ensure that a the crypto API fallback
* has transforms for this algorithm, if they become necessary.
*
* Return: 0 on success and -err on error.
/*
* Prepare blk-crypto-fallback for the specified crypto mode.
* Returns -ENOPKG if the needed crypto API support is missing.
*/
int blk_crypto_start_using_mode(enum blk_crypto_mode_num mode_num,
unsigned int data_unit_size,
struct request_queue *q)
int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
{
const char *cipher_str = blk_crypto_modes[mode_num].cipher_str;
struct blk_crypto_keyslot *slotp;
unsigned int i;
int err = 0;
@ -515,25 +507,20 @@ int blk_crypto_start_using_mode(enum blk_crypto_mode_num mode_num,
if (likely(smp_load_acquire(&tfms_inited[mode_num])))
return 0;
/*
* If the keyslot manager of the request queue supports this
* crypto mode, then we don't need to allocate this mode.
*/
if (keyslot_manager_crypto_mode_supported(q->ksm, mode_num,
data_unit_size))
return 0;
mutex_lock(&tfms_init_lock);
if (likely(tfms_inited[mode_num]))
goto out;
for (i = 0; i < blk_crypto_num_keyslots; i++) {
slotp = &blk_crypto_keyslots[i];
slotp->tfms[mode_num] = crypto_alloc_skcipher(
blk_crypto_modes[mode_num].cipher_str,
0, 0);
slotp->tfms[mode_num] = crypto_alloc_skcipher(cipher_str, 0, 0);
if (IS_ERR(slotp->tfms[mode_num])) {
err = PTR_ERR(slotp->tfms[mode_num]);
if (err == -ENOENT) {
pr_warn_once("Missing crypto API support for \"%s\"\n",
cipher_str);
err = -ENOPKG;
}
slotp->tfms[mode_num] = NULL;
goto out_free_tfms;
}
@ -559,7 +546,6 @@ int blk_crypto_start_using_mode(enum blk_crypto_mode_num mode_num,
mutex_unlock(&tfms_init_lock);
return err;
}
EXPORT_SYMBOL_GPL(blk_crypto_start_using_mode);
int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
{
@ -615,9 +601,11 @@ int __init blk_crypto_fallback_init(void)
crypto_mode_supported[i] = 0xFFFFFFFF;
crypto_mode_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
blk_crypto_ksm = keyslot_manager_create(NULL, blk_crypto_num_keyslots,
&blk_crypto_ksm_ll_ops,
crypto_mode_supported, NULL);
blk_crypto_ksm = keyslot_manager_create(
NULL, blk_crypto_num_keyslots,
&blk_crypto_ksm_ll_ops,
BLK_CRYPTO_FEATURE_STANDARD_KEYS,
crypto_mode_supported, NULL);
if (!blk_crypto_ksm)
return -ENOMEM;

View File

@ -19,6 +19,8 @@ extern const struct blk_crypto_mode blk_crypto_modes[];
#ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK
int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num);
int blk_crypto_fallback_submit_bio(struct bio **bio_ptr);
bool blk_crypto_queue_decrypt_bio(struct bio *bio);
@ -29,6 +31,13 @@ bool bio_crypt_fallback_crypted(const struct bio_crypt_ctx *bc);
#else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
static inline int
blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
{
pr_warn_once("crypto API fallback is disabled\n");
return -ENOPKG;
}
static inline bool bio_crypt_fallback_crypted(const struct bio_crypt_ctx *bc)
{
return false;

View File

@ -109,7 +109,8 @@ int blk_crypto_submit_bio(struct bio **bio_ptr)
/* Get device keyslot if supported */
if (keyslot_manager_crypto_mode_supported(q->ksm,
bc->bc_key->crypto_mode,
bc->bc_key->data_unit_size)) {
bc->bc_key->data_unit_size,
bc->bc_key->is_hw_wrapped)) {
err = bio_crypt_ctx_acquire_keyslot(bc, q->ksm);
if (!err)
return 0;
@ -232,6 +233,38 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key,
}
EXPORT_SYMBOL_GPL(blk_crypto_init_key);
/**
* blk_crypto_start_using_mode() - Start using blk-crypto on a device
* @crypto_mode: the crypto mode that will be used
* @data_unit_size: the data unit size that will be used
* @is_hw_wrapped_key: whether the key will be hardware-wrapped
* @q: the request queue for the device
*
* Upper layers must call this function to ensure that either the hardware
* supports the needed crypto settings, or the crypto API fallback has
* transforms for the needed mode allocated and ready to go.
*
* Return: 0 on success; -ENOPKG if the hardware doesn't support the crypto
* settings and blk-crypto-fallback is either disabled or the needed
* algorithm is disabled in the crypto API; or another -errno code.
*/
int blk_crypto_start_using_mode(enum blk_crypto_mode_num crypto_mode,
unsigned int data_unit_size,
bool is_hw_wrapped_key,
struct request_queue *q)
{
if (keyslot_manager_crypto_mode_supported(q->ksm, crypto_mode,
data_unit_size,
is_hw_wrapped_key))
return 0;
if (is_hw_wrapped_key) {
pr_warn_once("hardware doesn't support wrapped keys\n");
return -EOPNOTSUPP;
}
return blk_crypto_fallback_start_using_mode(crypto_mode);
}
EXPORT_SYMBOL_GPL(blk_crypto_start_using_mode);
/**
* blk_crypto_evict_key() - Evict a key from any inline encryption hardware
* it may have been programmed into
@ -252,7 +285,8 @@ int blk_crypto_evict_key(struct request_queue *q,
{
if (q->ksm &&
keyslot_manager_crypto_mode_supported(q->ksm, key->crypto_mode,
key->data_unit_size))
key->data_unit_size,
key->is_hw_wrapped))
return keyslot_manager_evict_key(q->ksm, key);
return blk_crypto_fallback_evict_key(key);

View File

@ -3007,6 +3007,14 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
{
/*
* blk_mq_map_queues() and multiple .map_queues() implementations
* expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the
* number of hardware queues.
*/
if (set->nr_maps == 1)
set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;
if (set->ops->map_queues && !is_kdump_kernel()) {
int i;

View File

@ -43,6 +43,7 @@ struct keyslot {
struct keyslot_manager {
unsigned int num_slots;
struct keyslot_mgmt_ll_ops ksm_ll_ops;
unsigned int features;
unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX];
void *ll_priv_data;
@ -135,6 +136,8 @@ static inline void keyslot_manager_hw_exit(struct keyslot_manager *ksm)
* @ksm_ll_ops: The struct keyslot_mgmt_ll_ops for the device that this keyslot
* manager will use to perform operations like programming and
* evicting keys.
* @features: The supported features as a bitmask of BLK_CRYPTO_FEATURE_* flags.
* Most drivers should set BLK_CRYPTO_FEATURE_STANDARD_KEYS here.
* @crypto_mode_supported: Array of size BLK_ENCRYPTION_MODE_MAX of
* bitmasks that represents whether a crypto mode
* and data unit size are supported. The i'th bit
@ -154,6 +157,7 @@ struct keyslot_manager *keyslot_manager_create(
struct device *dev,
unsigned int num_slots,
const struct keyslot_mgmt_ll_ops *ksm_ll_ops,
unsigned int features,
const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX],
void *ll_priv_data)
{
@ -175,6 +179,7 @@ struct keyslot_manager *keyslot_manager_create(
ksm->num_slots = num_slots;
ksm->ksm_ll_ops = *ksm_ll_ops;
ksm->features = features;
memcpy(ksm->crypto_mode_supported, crypto_mode_supported,
sizeof(ksm->crypto_mode_supported));
ksm->ll_priv_data = ll_priv_data;
@ -381,23 +386,24 @@ void keyslot_manager_put_slot(struct keyslot_manager *ksm, unsigned int slot)
}
/**
* keyslot_manager_crypto_mode_supported() - Find out if a crypto_mode/data
* unit size combination is supported
* by a ksm.
* keyslot_manager_crypto_mode_supported() - Find out if a crypto_mode /
* data unit size / is_hw_wrapped_key
* combination is supported by a ksm.
* @ksm: The keyslot manager to check
* @crypto_mode: The crypto mode to check for.
* @data_unit_size: The data_unit_size for the mode.
* @is_hw_wrapped_key: Whether a hardware-wrapped key will be used.
*
* Calls and returns the result of the crypto_mode_supported function specified
* by the ksm.
*
* Context: Process context.
* Return: Whether or not this ksm supports the specified crypto_mode/
* data_unit_size combo.
* Return: Whether or not this ksm supports the specified crypto settings.
*/
bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm,
enum blk_crypto_mode_num crypto_mode,
unsigned int data_unit_size)
unsigned int data_unit_size,
bool is_hw_wrapped_key)
{
if (!ksm)
return false;
@ -405,6 +411,13 @@ bool keyslot_manager_crypto_mode_supported(struct keyslot_manager *ksm,
return false;
if (WARN_ON(!is_power_of_2(data_unit_size)))
return false;
if (is_hw_wrapped_key) {
if (!(ksm->features & BLK_CRYPTO_FEATURE_WRAPPED_KEYS))
return false;
} else {
if (!(ksm->features & BLK_CRYPTO_FEATURE_STANDARD_KEYS))
return false;
}
return ksm->crypto_mode_supported[crypto_mode] & data_unit_size;
}
@ -520,6 +533,7 @@ EXPORT_SYMBOL_GPL(keyslot_manager_destroy);
* keyslot_manager_create_passthrough() - Create a passthrough keyslot manager
* @dev: Device for runtime power management (NULL if none)
* @ksm_ll_ops: The struct keyslot_mgmt_ll_ops
* @features: Bitmask of BLK_CRYPTO_FEATURE_* flags
* @crypto_mode_supported: Bitmasks for supported encryption modes
* @ll_priv_data: Private data passed as is to the functions in ksm_ll_ops.
*
@ -537,6 +551,7 @@ EXPORT_SYMBOL_GPL(keyslot_manager_destroy);
struct keyslot_manager *keyslot_manager_create_passthrough(
struct device *dev,
const struct keyslot_mgmt_ll_ops *ksm_ll_ops,
unsigned int features,
const unsigned int crypto_mode_supported[BLK_ENCRYPTION_MODE_MAX],
void *ll_priv_data)
{
@ -547,6 +562,7 @@ struct keyslot_manager *keyslot_manager_create_passthrough(
return NULL;
ksm->ksm_ll_ops = *ksm_ll_ops;
ksm->features = features;
memcpy(ksm->crypto_mode_supported, crypto_mode_supported,
sizeof(ksm->crypto_mode_supported));
ksm->ll_priv_data = ll_priv_data;
@ -575,11 +591,13 @@ void keyslot_manager_intersect_modes(struct keyslot_manager *parent,
if (child) {
unsigned int i;
parent->features &= child->features;
for (i = 0; i < ARRAY_SIZE(child->crypto_mode_supported); i++) {
parent->crypto_mode_supported[i] &=
child->crypto_mode_supported[i];
}
} else {
parent->features = 0;
memset(parent->crypto_mode_supported, 0,
sizeof(parent->crypto_mode_supported));
}

View File

@ -1009,6 +1009,10 @@ static bool acpi_s2idle_wake(void)
if (acpi_any_fixed_event_status_set())
return true;
/* Check wakeups from drivers sharing the SCI. */
if (acpi_check_wakeup_handlers())
return true;
/*
* If there are no EC events to process and at least one of the
* other enabled GPEs is active, the wakeup is regarded as a

View File

@ -2,6 +2,7 @@
extern void acpi_enable_wakeup_devices(u8 sleep_state);
extern void acpi_disable_wakeup_devices(u8 sleep_state);
extern bool acpi_check_wakeup_handlers(void);
extern struct list_head acpi_wakeup_device_list;
extern struct mutex acpi_device_lock;

View File

@ -12,6 +12,15 @@
#include "internal.h"
#include "sleep.h"
struct acpi_wakeup_handler {
struct list_head list_node;
bool (*wakeup)(void *context);
void *context;
};
static LIST_HEAD(acpi_wakeup_handler_head);
static DEFINE_MUTEX(acpi_wakeup_handler_mutex);
/*
* We didn't lock acpi_device_lock in the file, because it invokes oops in
* suspend/resume and isn't really required as this is called in S-state. At
@ -96,3 +105,75 @@ int __init acpi_wakeup_device_init(void)
mutex_unlock(&acpi_device_lock);
return 0;
}
/**
* acpi_register_wakeup_handler - Register wakeup handler
* @wake_irq: The IRQ through which the device may receive wakeups
* @wakeup: Wakeup-handler to call when the SCI has triggered a wakeup
* @context: Context to pass to the handler when calling it
*
* Drivers which may share an IRQ with the SCI can use this to register
* a handler which returns true when the device they are managing wants
* to trigger a wakeup.
*/
int acpi_register_wakeup_handler(int wake_irq, bool (*wakeup)(void *context),
void *context)
{
struct acpi_wakeup_handler *handler;
/*
* If the device is not sharing its IRQ with the SCI, there is no
* need to register the handler.
*/
if (!acpi_sci_irq_valid() || wake_irq != acpi_sci_irq)
return 0;
handler = kmalloc(sizeof(*handler), GFP_KERNEL);
if (!handler)
return -ENOMEM;
handler->wakeup = wakeup;
handler->context = context;
mutex_lock(&acpi_wakeup_handler_mutex);
list_add(&handler->list_node, &acpi_wakeup_handler_head);
mutex_unlock(&acpi_wakeup_handler_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(acpi_register_wakeup_handler);
/**
* acpi_unregister_wakeup_handler - Unregister wakeup handler
* @wakeup: Wakeup-handler passed to acpi_register_wakeup_handler()
* @context: Context passed to acpi_register_wakeup_handler()
*/
void acpi_unregister_wakeup_handler(bool (*wakeup)(void *context),
void *context)
{
struct acpi_wakeup_handler *handler;
mutex_lock(&acpi_wakeup_handler_mutex);
list_for_each_entry(handler, &acpi_wakeup_handler_head, list_node) {
if (handler->wakeup == wakeup && handler->context == context) {
list_del(&handler->list_node);
kfree(handler);
break;
}
}
mutex_unlock(&acpi_wakeup_handler_mutex);
}
EXPORT_SYMBOL_GPL(acpi_unregister_wakeup_handler);
bool acpi_check_wakeup_handlers(void)
{
struct acpi_wakeup_handler *handler;
/* No need to lock, nothing else is running when we're called. */
list_for_each_entry(handler, &acpi_wakeup_handler_head, list_node) {
if (handler->wakeup(handler->context))
return true;
}
return false;
}

View File

@ -1342,6 +1342,8 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
error = dpm_run_callback(callback, dev, state, info);
if (error) {
async_error = error;
log_suspend_abort_reason("Callback failed on %s in %pS returned %d",
dev_name(dev), callback, error);
goto Complete;
}
@ -1549,6 +1551,8 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
error = dpm_run_callback(callback, dev, state, info);
if (error) {
async_error = error;
log_suspend_abort_reason("Callback failed on %s in %pS returned %d",
dev_name(dev), callback, error);
goto Complete;
}
dpm_propagate_wakeup_to_parent(dev);
@ -1716,7 +1720,6 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
pm_callback_t callback = NULL;
const char *info = NULL;
int error = 0;
char suspend_abort[MAX_SUSPEND_ABORT_LEN];
DECLARE_DPM_WATCHDOG_ON_STACK(wd);
TRACE_DEVICE(dev);
@ -1740,9 +1743,6 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (pm_wakeup_pending()) {
dev->power.direct_complete = false;
pm_get_active_wakeup_sources(suspend_abort,
MAX_SUSPEND_ABORT_LEN);
log_suspend_abort_reason(suspend_abort);
async_error = -EBUSY;
goto Complete;
}
@ -1819,6 +1819,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dpm_propagate_wakeup_to_parent(dev);
dpm_clear_superiors_direct_complete(dev);
} else {
log_suspend_abort_reason("Callback failed on %s in %pS returned %d",
dev_name(dev), callback, error);
}
device_unlock(dev);
@ -2028,6 +2031,8 @@ int dpm_prepare(pm_message_t state)
}
pr_info("Device %s not prepared for power transition: code %d\n",
dev_name(dev), error);
log_suspend_abort_reason("Device %s not prepared for power transition: code %d",
dev_name(dev), error);
dpm_save_failed_dev(dev_name(dev));
put_device(dev);
break;

View File

@ -15,7 +15,9 @@
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/pm_wakeirq.h>
#include <linux/types.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <linux/wakeup_reason.h>
#include <trace/events/power.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
@ -886,6 +888,7 @@ bool pm_wakeup_pending(void)
{
unsigned long flags;
bool ret = false;
char suspend_abort[MAX_SUSPEND_ABORT_LEN];
raw_spin_lock_irqsave(&events_lock, flags);
if (events_check_enabled) {
@ -900,6 +903,10 @@ bool pm_wakeup_pending(void)
if (ret) {
pm_pr_dbg("Wakeup pending, aborting suspend\n");
pm_print_active_wakeup_sources();
pm_get_active_wakeup_sources(suspend_abort,
MAX_SUSPEND_ABORT_LEN);
log_suspend_abort_reason(suspend_abort);
pr_info("PM: %s\n", suspend_abort);
}
return ret || atomic_read(&pm_abort_suspend) > 0;
@ -937,6 +944,7 @@ void pm_system_irq_wakeup(unsigned int irq_number)
else if (desc->action && desc->action->name)
name = desc->action->name;
log_irq_wakeup_reason(irq_number);
pr_warn("%s: %d triggered %s\n", __func__,
irq_number, name);

View File

@ -214,7 +214,8 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
* LO_FLAGS_READ_ONLY, both are set from kernel, and losetup
* will get updated by ioctl(LOOP_GET_STATUS)
*/
blk_mq_freeze_queue(lo->lo_queue);
if (lo->lo_state == Lo_bound)
blk_mq_freeze_queue(lo->lo_queue);
lo->use_dio = use_dio;
if (use_dio) {
blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue);
@ -223,7 +224,8 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
}
blk_mq_unfreeze_queue(lo->lo_queue);
if (lo->lo_state == Lo_bound)
blk_mq_unfreeze_queue(lo->lo_queue);
}
static int
@ -1536,16 +1538,16 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg))
return -EINVAL;
if (lo->lo_queue->limits.logical_block_size != arg) {
sync_blockdev(lo->lo_device);
kill_bdev(lo->lo_device);
}
if (lo->lo_queue->limits.logical_block_size == arg)
return 0;
sync_blockdev(lo->lo_device);
kill_bdev(lo->lo_device);
blk_mq_freeze_queue(lo->lo_queue);
/* kill_bdev should have truncated all the pages */
if (lo->lo_queue->limits.logical_block_size != arg &&
lo->lo_device->bd_inode->i_mapping->nrpages) {
if (lo->lo_device->bd_inode->i_mapping->nrpages) {
err = -EAGAIN;
pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
__func__, lo->lo_number, lo->lo_file_name,

View File

@ -105,8 +105,10 @@ static int imx_rngc_self_test(struct imx_rngc *rngc)
return -ETIMEDOUT;
}
if (rngc->err_reg != 0)
if (rngc->err_reg != 0) {
imx_rngc_irq_mask_clear(rngc);
return -EIO;
}
return 0;
}

View File

@ -2147,11 +2147,11 @@ struct batched_entropy {
/*
* Get a random word for internal kernel use only. The quality of the random
* number is either as good as RDRAND or as good as /dev/urandom, with the
* goal of being quite fast and not depleting entropy. In order to ensure
* number is good as /dev/urandom, but there is no backtrack protection, with
* the goal of being quite fast and not depleting entropy. In order to ensure
* that the randomness provided by this function is okay, the function
* wait_for_random_bytes() should be called and return 0 at least once
* at any point prior.
* wait_for_random_bytes() should be called and return 0 at least once at any
* point prior.
*/
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
.batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
@ -2164,15 +2164,6 @@ u64 get_random_u64(void)
struct batched_entropy *batch;
static void *previous;
#if BITS_PER_LONG == 64
if (arch_get_random_long((unsigned long *)&ret))
return ret;
#else
if (arch_get_random_long((unsigned long *)&ret) &&
arch_get_random_long((unsigned long *)&ret + 1))
return ret;
#endif
warn_unseeded_randomness(&previous);
batch = raw_cpu_ptr(&batched_entropy_u64);
@ -2197,9 +2188,6 @@ u32 get_random_u32(void)
struct batched_entropy *batch;
static void *previous;
if (arch_get_random_int(&ret))
return ret;
warn_unseeded_randomness(&previous);
batch = raw_cpu_ptr(&batched_entropy_u32);

View File

@ -107,6 +107,8 @@ bool have_governor_per_policy(void)
}
EXPORT_SYMBOL_GPL(have_governor_per_policy);
static struct kobject *cpufreq_global_kobject;
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
{
if (have_governor_per_policy())
@ -2782,9 +2784,6 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
}
EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
struct kobject *cpufreq_global_kobject;
EXPORT_SYMBOL(cpufreq_global_kobject);
static int __init cpufreq_core_init(void)
{
if (cpufreq_disabled())

View File

@ -486,6 +486,28 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
return 0;
}
static void sprd_dma_set_pending(struct sprd_dma_chn *schan, bool enable)
{
struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
u32 reg, val, req_id;
if (schan->dev_id == SPRD_DMA_SOFTWARE_UID)
return;
/* The DMA request id always starts from 0. */
req_id = schan->dev_id - 1;
if (req_id < 32) {
reg = SPRD_DMA_GLB_REQ_PEND0_EN;
val = BIT(req_id);
} else {
reg = SPRD_DMA_GLB_REQ_PEND1_EN;
val = BIT(req_id - 32);
}
sprd_dma_glb_update(sdev, reg, val, enable ? val : 0);
}
static void sprd_dma_set_chn_config(struct sprd_dma_chn *schan,
struct sprd_dma_desc *sdesc)
{
@ -532,6 +554,7 @@ static void sprd_dma_start(struct sprd_dma_chn *schan)
*/
sprd_dma_set_chn_config(schan, schan->cur_desc);
sprd_dma_set_uid(schan);
sprd_dma_set_pending(schan, true);
sprd_dma_enable_chn(schan);
if (schan->dev_id == SPRD_DMA_SOFTWARE_UID &&
@ -543,6 +566,7 @@ static void sprd_dma_start(struct sprd_dma_chn *schan)
static void sprd_dma_stop(struct sprd_dma_chn *schan)
{
sprd_dma_stop_and_disable(schan);
sprd_dma_set_pending(schan, false);
sprd_dma_unset_uid(schan);
sprd_dma_clear_int(schan);
schan->cur_desc = NULL;

View File

@ -423,9 +423,40 @@ static int axp288_extcon_probe(struct platform_device *pdev)
/* Start charger cable type detection */
axp288_extcon_enable(info);
device_init_wakeup(dev, true);
platform_set_drvdata(pdev, info);
return 0;
}
static int __maybe_unused axp288_extcon_suspend(struct device *dev)
{
struct axp288_extcon_info *info = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
enable_irq_wake(info->irq[VBUS_RISING_IRQ]);
return 0;
}
static int __maybe_unused axp288_extcon_resume(struct device *dev)
{
struct axp288_extcon_info *info = dev_get_drvdata(dev);
/*
* Wakeup when a charger is connected to do charger-type
* connection and generate an extcon event which makes the
* axp288 charger driver set the input current limit.
*/
if (device_may_wakeup(dev))
disable_irq_wake(info->irq[VBUS_RISING_IRQ]);
return 0;
}
static SIMPLE_DEV_PM_OPS(axp288_extcon_pm_ops, axp288_extcon_suspend,
axp288_extcon_resume);
static const struct platform_device_id axp288_extcon_table[] = {
{ .name = "axp288_extcon" },
{},
@ -437,6 +468,7 @@ static struct platform_driver axp288_extcon_driver = {
.id_table = axp288_extcon_table,
.driver = {
.name = "axp288_extcon",
.pm = &axp288_extcon_pm_ops,
},
};

View File

@ -569,6 +569,7 @@ static int sprd_eic_probe(struct platform_device *pdev)
const struct sprd_eic_variant_data *pdata;
struct gpio_irq_chip *irq;
struct sprd_eic *sprd_eic;
struct resource *res;
int ret, i;
pdata = of_device_get_match_data(&pdev->dev);
@ -595,9 +596,13 @@ static int sprd_eic_probe(struct platform_device *pdev)
* have one bank EIC, thus base[1] and base[2] can be
* optional.
*/
sprd_eic->base[i] = devm_platform_ioremap_resource(pdev, i);
if (IS_ERR(sprd_eic->base[i]))
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res)
continue;
sprd_eic->base[i] = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(sprd_eic->base[i]))
return PTR_ERR(sprd_eic->base[i]);
}
sprd_eic->chip.label = sprd_eic_label_name[sprd_eic->type];

View File

@ -1375,7 +1375,7 @@ static int vcn_v1_0_set_clockgating_state(void *handle,
if (enable) {
/* wait for STATUS to clear */
if (vcn_v1_0_is_idle(handle))
if (!vcn_v1_0_is_idle(handle))
return -EBUSY;
vcn_v1_0_enable_clock_gating(adev);
} else {

View File

@ -2879,6 +2879,17 @@ static bool retrieve_link_cap(struct dc_link *link)
sink_id.ieee_device_id,
sizeof(sink_id.ieee_device_id));
/* Quirk Apple MBP 2017 15" Retina panel: Wrong DP_MAX_LINK_RATE */
{
uint8_t str_mbp_2017[] = { 101, 68, 21, 101, 98, 97 };
if ((link->dpcd_caps.sink_dev_id == 0x0010fa) &&
!memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2017,
sizeof(str_mbp_2017))) {
link->reported_link_cap.link_rate = 0x0c;
}
}
core_link_read_dpcd(
link,
DP_SINK_HW_REVISION_START,

View File

@ -156,10 +156,8 @@ int bochs_hw_init(struct drm_device *dev)
size = min(size, mem);
}
if (pci_request_region(pdev, 0, "bochs-drm") != 0) {
DRM_ERROR("Cannot request framebuffer\n");
return -EBUSY;
}
if (pci_request_region(pdev, 0, "bochs-drm") != 0)
DRM_WARN("Cannot request framebuffer, boot fb still active?\n");
bochs->fb_map = ioremap(addr, size);
if (bochs->fb_map == NULL) {

View File

@ -121,7 +121,7 @@ static inline void debug_active_assert(struct i915_active *ref) { }
#endif
static void
__active_retire(struct i915_active *ref)
__active_retire(struct i915_active *ref, bool lock)
{
struct active_node *it, *n;
struct rb_root root;
@ -138,7 +138,8 @@ __active_retire(struct i915_active *ref)
retire = true;
}
mutex_unlock(&ref->mutex);
if (likely(lock))
mutex_unlock(&ref->mutex);
if (!retire)
return;
@ -153,21 +154,28 @@ __active_retire(struct i915_active *ref)
}
static void
active_retire(struct i915_active *ref)
active_retire(struct i915_active *ref, bool lock)
{
GEM_BUG_ON(!atomic_read(&ref->count));
if (atomic_add_unless(&ref->count, -1, 1))
return;
/* One active may be flushed from inside the acquire of another */
mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
__active_retire(ref);
if (likely(lock))
mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
__active_retire(ref, lock);
}
static void
node_retire(struct i915_active_request *base, struct i915_request *rq)
{
active_retire(node_from_active(base)->ref);
active_retire(node_from_active(base)->ref, true);
}
static void
node_retire_nolock(struct i915_active_request *base, struct i915_request *rq)
{
active_retire(node_from_active(base)->ref, false);
}
static struct i915_active_request *
@ -364,7 +372,7 @@ int i915_active_acquire(struct i915_active *ref)
void i915_active_release(struct i915_active *ref)
{
debug_active_assert(ref);
active_retire(ref);
active_retire(ref, true);
}
static void __active_ungrab(struct i915_active *ref)
@ -391,7 +399,7 @@ void i915_active_ungrab(struct i915_active *ref)
{
GEM_BUG_ON(!test_bit(I915_ACTIVE_GRAB_BIT, &ref->flags));
active_retire(ref);
active_retire(ref, true);
__active_ungrab(ref);
}
@ -421,12 +429,13 @@ int i915_active_wait(struct i915_active *ref)
break;
}
err = i915_active_request_retire(&it->base, BKL(ref));
err = i915_active_request_retire(&it->base, BKL(ref),
node_retire_nolock);
if (err)
break;
}
__active_retire(ref);
__active_retire(ref, true);
if (err)
return err;

View File

@ -309,7 +309,7 @@ i915_active_request_isset(const struct i915_active_request *active)
*/
static inline int __must_check
i915_active_request_retire(struct i915_active_request *active,
struct mutex *mutex)
struct mutex *mutex, i915_active_retire_fn retire)
{
struct i915_request *request;
long ret;
@ -327,7 +327,7 @@ i915_active_request_retire(struct i915_active_request *active,
list_del_init(&active->link);
RCU_INIT_POINTER(active->request, NULL);
active->retire(active, request);
retire(active, request);
return 0;
}

View File

@ -129,11 +129,6 @@
#define TCOBASE 0x050
#define TCOCTL 0x054
#define ACPIBASE 0x040
#define ACPIBASE_SMI_OFF 0x030
#define ACPICTRL 0x044
#define ACPICTRL_EN 0x080
#define SBREG_BAR 0x10
#define SBREG_SMBCTRL 0xc6000c
#define SBREG_SMBCTRL_DNV 0xcf000c
@ -1544,7 +1539,7 @@ i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev,
pci_bus_write_config_byte(pci_dev->bus, devfn, 0xe1, hidden);
spin_unlock(&p2sb_spinlock);
res = &tco_res[ICH_RES_MEM_OFF];
res = &tco_res[1];
if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS)
res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV;
else
@ -1554,7 +1549,7 @@ i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev,
res->flags = IORESOURCE_MEM;
return platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1,
tco_res, 3, &spt_tco_platform_data,
tco_res, 2, &spt_tco_platform_data,
sizeof(spt_tco_platform_data));
}
@ -1567,17 +1562,16 @@ static struct platform_device *
i801_add_tco_cnl(struct i801_priv *priv, struct pci_dev *pci_dev,
struct resource *tco_res)
{
return platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1,
tco_res, 2, &cnl_tco_platform_data,
sizeof(cnl_tco_platform_data));
return platform_device_register_resndata(&pci_dev->dev,
"iTCO_wdt", -1, tco_res, 1, &cnl_tco_platform_data,
sizeof(cnl_tco_platform_data));
}
static void i801_add_tco(struct i801_priv *priv)
{
u32 base_addr, tco_base, tco_ctl, ctrl_val;
struct pci_dev *pci_dev = priv->pci_dev;
struct resource tco_res[3], *res;
unsigned int devfn;
struct resource tco_res[2], *res;
u32 tco_base, tco_ctl;
/* If we have ACPI based watchdog use that instead */
if (acpi_has_watchdog())
@ -1592,30 +1586,15 @@ static void i801_add_tco(struct i801_priv *priv)
return;
memset(tco_res, 0, sizeof(tco_res));
res = &tco_res[ICH_RES_IO_TCO];
/*
* Always populate the main iTCO IO resource here. The second entry
* for NO_REBOOT MMIO is filled by the SPT specific function.
*/
res = &tco_res[0];
res->start = tco_base & ~1;
res->end = res->start + 32 - 1;
res->flags = IORESOURCE_IO;
/*
* Power Management registers.
*/
devfn = PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 2);
pci_bus_read_config_dword(pci_dev->bus, devfn, ACPIBASE, &base_addr);
res = &tco_res[ICH_RES_IO_SMI];
res->start = (base_addr & ~1) + ACPIBASE_SMI_OFF;
res->end = res->start + 3;
res->flags = IORESOURCE_IO;
/*
* Enable the ACPI I/O space.
*/
pci_bus_read_config_dword(pci_dev->bus, devfn, ACPICTRL, &ctrl_val);
ctrl_val |= ACPICTRL_EN;
pci_bus_write_config_dword(pci_dev->bus, devfn, ACPICTRL, ctrl_val);
if (priv->features & FEATURE_TCO_CNL)
priv->tco_pdev = i801_add_tco_cnl(priv, pci_dev, tco_res);
else

View File

@ -2911,6 +2911,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
err2:
kfree(route->path_rec);
route->path_rec = NULL;
route->num_paths = 0;
err1:
kfree(work);
return ret;
@ -4719,6 +4720,19 @@ static int __init cma_init(void)
{
int ret;
/*
* There is a rare lock ordering dependency in cma_netdev_callback()
* that only happens when bonding is enabled. Teach lockdep that rtnl
* must never be nested under lock so it can find these without having
* to test with bonding.
*/
if (IS_ENABLED(CONFIG_LOCKDEP)) {
rtnl_lock();
mutex_lock(&lock);
mutex_unlock(&lock);
rtnl_unlock();
}
cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM);
if (!cma_wq)
return -ENOMEM;

View File

@ -91,6 +91,7 @@ struct ucma_context {
struct ucma_file *file;
struct rdma_cm_id *cm_id;
struct mutex mutex;
u64 uid;
struct list_head list;
@ -216,6 +217,7 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
init_completion(&ctx->comp);
INIT_LIST_HEAD(&ctx->mc_list);
ctx->file = file;
mutex_init(&ctx->mutex);
if (xa_alloc(&ctx_table, &ctx->id, ctx, xa_limit_32b, GFP_KERNEL))
goto error;
@ -589,6 +591,7 @@ static int ucma_free_ctx(struct ucma_context *ctx)
}
events_reported = ctx->events_reported;
mutex_destroy(&ctx->mutex);
kfree(ctx);
return events_reported;
}
@ -658,7 +661,10 @@ static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
mutex_lock(&ctx->mutex);
ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@ -681,7 +687,9 @@ static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
mutex_lock(&ctx->mutex);
ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@ -705,8 +713,10 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
mutex_lock(&ctx->mutex);
ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
(struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@ -731,8 +741,10 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
mutex_lock(&ctx->mutex);
ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
(struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@ -752,7 +764,9 @@ static ssize_t ucma_resolve_route(struct ucma_file *file,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
mutex_lock(&ctx->mutex);
ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@ -841,6 +855,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
mutex_lock(&ctx->mutex);
memset(&resp, 0, sizeof resp);
addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
@ -864,6 +879,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
ucma_copy_iw_route(&resp, &ctx->cm_id->route);
out:
mutex_unlock(&ctx->mutex);
if (copy_to_user(u64_to_user_ptr(cmd.response),
&resp, sizeof(resp)))
ret = -EFAULT;
@ -1014,6 +1030,7 @@ static ssize_t ucma_query(struct ucma_file *file,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
mutex_lock(&ctx->mutex);
switch (cmd.option) {
case RDMA_USER_CM_QUERY_ADDR:
ret = ucma_query_addr(ctx, response, out_len);
@ -1028,6 +1045,7 @@ static ssize_t ucma_query(struct ucma_file *file,
ret = -ENOSYS;
break;
}
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
@ -1068,7 +1086,9 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
return PTR_ERR(ctx);
ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
mutex_lock(&ctx->mutex);
ret = rdma_connect(ctx->cm_id, &conn_param);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@ -1089,7 +1109,9 @@ static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
cmd.backlog : max_backlog;
mutex_lock(&ctx->mutex);
ret = rdma_listen(ctx->cm_id, ctx->backlog);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@ -1112,13 +1134,17 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
if (cmd.conn_param.valid) {
ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
mutex_lock(&file->mut);
mutex_lock(&ctx->mutex);
ret = __rdma_accept(ctx->cm_id, &conn_param, NULL);
mutex_unlock(&ctx->mutex);
if (!ret)
ctx->uid = cmd.uid;
mutex_unlock(&file->mut);
} else
} else {
mutex_lock(&ctx->mutex);
ret = __rdma_accept(ctx->cm_id, NULL, NULL);
mutex_unlock(&ctx->mutex);
}
ucma_put_ctx(ctx);
return ret;
}
@ -1137,7 +1163,9 @@ static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
mutex_lock(&ctx->mutex);
ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@ -1156,7 +1184,9 @@ static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
mutex_lock(&ctx->mutex);
ret = rdma_disconnect(ctx->cm_id);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
}
@ -1187,7 +1217,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
resp.qp_attr_mask = 0;
memset(&qp_attr, 0, sizeof qp_attr);
qp_attr.qp_state = cmd.qp_state;
mutex_lock(&ctx->mutex);
ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
mutex_unlock(&ctx->mutex);
if (ret)
goto out;
@ -1273,9 +1305,13 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
struct sa_path_rec opa;
sa_convert_path_ib_to_opa(&opa, &sa_path);
mutex_lock(&ctx->mutex);
ret = rdma_set_ib_path(ctx->cm_id, &opa);
mutex_unlock(&ctx->mutex);
} else {
mutex_lock(&ctx->mutex);
ret = rdma_set_ib_path(ctx->cm_id, &sa_path);
mutex_unlock(&ctx->mutex);
}
if (ret)
return ret;
@ -1308,7 +1344,9 @@ static int ucma_set_option_level(struct ucma_context *ctx, int level,
switch (level) {
case RDMA_OPTION_ID:
mutex_lock(&ctx->mutex);
ret = ucma_set_option_id(ctx, optname, optval, optlen);
mutex_unlock(&ctx->mutex);
break;
case RDMA_OPTION_IB:
ret = ucma_set_option_ib(ctx, optname, optval, optlen);
@ -1368,8 +1406,10 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
mutex_lock(&ctx->mutex);
if (ctx->cm_id->device)
ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
@ -1412,8 +1452,10 @@ static ssize_t ucma_process_join(struct ucma_file *file,
mc->join_state = join_state;
mc->uid = cmd->uid;
memcpy(&mc->addr, addr, cmd->addr_size);
mutex_lock(&ctx->mutex);
ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
join_state, mc);
mutex_unlock(&ctx->mutex);
if (ret)
goto err2;
@ -1513,7 +1555,10 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file,
goto out;
}
mutex_lock(&mc->ctx->mutex);
rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
mutex_unlock(&mc->ctx->mutex);
mutex_lock(&mc->ctx->file->mut);
ucma_cleanup_mc_events(mc);
list_del(&mc->list);

View File

@ -674,7 +674,11 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
dd_dev_err(dd,
"Skipping sc2vl sysfs info, (err %d) port %u\n",
ret, port_num);
goto bail;
/*
* Based on the documentation for kobject_init_and_add(), the
* caller should call kobject_put even if this call fails.
*/
goto bail_sc2vl;
}
kobject_uevent(&ppd->sc2vl_kobj, KOBJ_ADD);
@ -684,7 +688,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
dd_dev_err(dd,
"Skipping sl2sc sysfs info, (err %d) port %u\n",
ret, port_num);
goto bail_sc2vl;
goto bail_sl2sc;
}
kobject_uevent(&ppd->sl2sc_kobj, KOBJ_ADD);
@ -694,7 +698,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
dd_dev_err(dd,
"Skipping vl2mtu sysfs info, (err %d) port %u\n",
ret, port_num);
goto bail_sl2sc;
goto bail_vl2mtu;
}
kobject_uevent(&ppd->vl2mtu_kobj, KOBJ_ADD);
@ -704,7 +708,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
dd_dev_err(dd,
"Skipping Congestion Control sysfs info, (err %d) port %u\n",
ret, port_num);
goto bail_vl2mtu;
goto bail_cc;
}
kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
@ -742,7 +746,6 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
kobject_put(&ppd->sl2sc_kobj);
bail_sc2vl:
kobject_put(&ppd->sc2vl_kobj);
bail:
return ret;
}
@ -853,8 +856,13 @@ int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd)
return 0;
bail:
for (i = 0; i < dd->num_sdma; i++)
kobject_del(&dd->per_sdma[i].kobj);
/*
* The function kobject_put() will call kobject_del() if the kobject
* has been added successfully. The sysfs files created under the
* kobject directory will also be removed during the process.
*/
for (; i >= 0; i--)
kobject_put(&dd->per_sdma[i].kobj);
return ret;
}
@ -867,6 +875,10 @@ void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd)
struct hfi1_pportdata *ppd;
int i;
/* Unwind operations in hfi1_verbs_register_sysfs() */
for (i = 0; i < dd->num_sdma; i++)
kobject_put(&dd->per_sdma[i].kobj);
for (i = 0; i < dd->num_pports; i++) {
ppd = &dd->pport[i];

View File

@ -141,6 +141,7 @@ static int defer_packet_queue(
*/
xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
if (list_empty(&pq->busy.list)) {
pq->busy.lock = &sde->waitlock;
iowait_get_priority(&pq->busy);
iowait_queue(pkts_sent, &pq->busy, &sde->dmawait);
}
@ -155,6 +156,7 @@ static void activate_packet_queue(struct iowait *wait, int reason)
{
struct hfi1_user_sdma_pkt_q *pq =
container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
pq->busy.lock = NULL;
xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
wake_up(&wait->wait_dma);
};
@ -256,6 +258,21 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
return ret;
}
static void flush_pq_iowait(struct hfi1_user_sdma_pkt_q *pq)
{
unsigned long flags;
seqlock_t *lock = pq->busy.lock;
if (!lock)
return;
write_seqlock_irqsave(lock, flags);
if (!list_empty(&pq->busy.list)) {
list_del_init(&pq->busy.list);
pq->busy.lock = NULL;
}
write_sequnlock_irqrestore(lock, flags);
}
int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
struct hfi1_ctxtdata *uctxt)
{
@ -281,6 +298,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
kfree(pq->reqs);
kfree(pq->req_in_use);
kmem_cache_destroy(pq->txreq_cache);
flush_pq_iowait(pq);
kfree(pq);
} else {
spin_unlock(&fd->pq_rcu_lock);
@ -587,11 +605,12 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
if (ret < 0) {
if (ret != -EBUSY)
goto free_req;
wait_event_interruptible_timeout(
if (wait_event_interruptible_timeout(
pq->busy.wait_dma,
(pq->state == SDMA_PKT_Q_ACTIVE),
pq->state == SDMA_PKT_Q_ACTIVE,
msecs_to_jiffies(
SDMA_IOWAIT_TIMEOUT));
SDMA_IOWAIT_TIMEOUT)) <= 0)
flush_pq_iowait(pq);
}
}
*count += idx;

View File

@ -1181,12 +1181,10 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
resp.tunnel_offloads_caps |=
MLX5_IB_TUNNELED_OFFLOADS_GRE;
if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
MLX5_FLEX_PROTO_CW_MPLS_GRE)
if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre))
resp.tunnel_offloads_caps |=
MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
MLX5_FLEX_PROTO_CW_MPLS_UDP)
if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp))
resp.tunnel_offloads_caps |=
MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
}

View File

@ -1783,14 +1783,23 @@ int siw_reject(struct iw_cm_id *id, const void *pdata, u8 pd_len)
return 0;
}
static int siw_listen_address(struct iw_cm_id *id, int backlog,
struct sockaddr *laddr, int addr_family)
/*
* siw_create_listen - Create resources for a listener's IWCM ID @id
*
* Starts listen on the socket address id->local_addr.
*
*/
int siw_create_listen(struct iw_cm_id *id, int backlog)
{
struct socket *s;
struct siw_cep *cep = NULL;
struct siw_device *sdev = to_siw_dev(id->device);
int addr_family = id->local_addr.ss_family;
int rv = 0, s_val;
if (addr_family != AF_INET && addr_family != AF_INET6)
return -EAFNOSUPPORT;
rv = sock_create(addr_family, SOCK_STREAM, IPPROTO_TCP, &s);
if (rv < 0)
return rv;
@ -1805,9 +1814,25 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
siw_dbg(id->device, "setsockopt error: %d\n", rv);
goto error;
}
rv = s->ops->bind(s, laddr, addr_family == AF_INET ?
sizeof(struct sockaddr_in) :
sizeof(struct sockaddr_in6));
if (addr_family == AF_INET) {
struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr);
/* For wildcard addr, limit binding to current device only */
if (ipv4_is_zeronet(laddr->sin_addr.s_addr))
s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
rv = s->ops->bind(s, (struct sockaddr *)laddr,
sizeof(struct sockaddr_in));
} else {
struct sockaddr_in6 *laddr = &to_sockaddr_in6(id->local_addr);
/* For wildcard addr, limit binding to current device only */
if (ipv6_addr_any(&laddr->sin6_addr))
s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
rv = s->ops->bind(s, (struct sockaddr *)laddr,
sizeof(struct sockaddr_in6));
}
if (rv) {
siw_dbg(id->device, "socket bind error: %d\n", rv);
goto error;
@ -1866,7 +1891,7 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
cep->state = SIW_EPSTATE_LISTENING;
siw_dbg(id->device, "Listen at laddr %pISp\n", laddr);
siw_dbg(id->device, "Listen at laddr %pISp\n", &id->local_addr);
return 0;
@ -1924,114 +1949,6 @@ static void siw_drop_listeners(struct iw_cm_id *id)
}
}
/*
* siw_create_listen - Create resources for a listener's IWCM ID @id
*
* Listens on the socket addresses id->local_addr and id->remote_addr.
*
* If the listener's @id provides a specific local IP address, at most one
* listening socket is created and associated with @id.
*
* If the listener's @id provides the wildcard (zero) local IP address,
* a separate listen is performed for each local IP address of the device
* by creating a listening socket and binding to that local IP address.
*
*/
int siw_create_listen(struct iw_cm_id *id, int backlog)
{
struct net_device *dev = to_siw_dev(id->device)->netdev;
int rv = 0, listeners = 0;
siw_dbg(id->device, "backlog %d\n", backlog);
/*
* For each attached address of the interface, create a
* listening socket, if id->local_addr is the wildcard
* IP address or matches the IP address.
*/
if (id->local_addr.ss_family == AF_INET) {
struct in_device *in_dev = in_dev_get(dev);
struct sockaddr_in s_laddr, *s_raddr;
const struct in_ifaddr *ifa;
if (!in_dev) {
rv = -ENODEV;
goto out;
}
memcpy(&s_laddr, &id->local_addr, sizeof(s_laddr));
s_raddr = (struct sockaddr_in *)&id->remote_addr;
siw_dbg(id->device,
"laddr %pI4:%d, raddr %pI4:%d\n",
&s_laddr.sin_addr, ntohs(s_laddr.sin_port),
&s_raddr->sin_addr, ntohs(s_raddr->sin_port));
rtnl_lock();
in_dev_for_each_ifa_rtnl(ifa, in_dev) {
if (ipv4_is_zeronet(s_laddr.sin_addr.s_addr) ||
s_laddr.sin_addr.s_addr == ifa->ifa_address) {
s_laddr.sin_addr.s_addr = ifa->ifa_address;
rv = siw_listen_address(id, backlog,
(struct sockaddr *)&s_laddr,
AF_INET);
if (!rv)
listeners++;
}
}
rtnl_unlock();
in_dev_put(in_dev);
} else if (id->local_addr.ss_family == AF_INET6) {
struct inet6_dev *in6_dev = in6_dev_get(dev);
struct inet6_ifaddr *ifp;
struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr),
*s_raddr = &to_sockaddr_in6(id->remote_addr);
if (!in6_dev) {
rv = -ENODEV;
goto out;
}
siw_dbg(id->device,
"laddr %pI6:%d, raddr %pI6:%d\n",
&s_laddr->sin6_addr, ntohs(s_laddr->sin6_port),
&s_raddr->sin6_addr, ntohs(s_raddr->sin6_port));
rtnl_lock();
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
if (ifp->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED))
continue;
if (ipv6_addr_any(&s_laddr->sin6_addr) ||
ipv6_addr_equal(&s_laddr->sin6_addr, &ifp->addr)) {
struct sockaddr_in6 bind_addr = {
.sin6_family = AF_INET6,
.sin6_port = s_laddr->sin6_port,
.sin6_flowinfo = 0,
.sin6_addr = ifp->addr,
.sin6_scope_id = dev->ifindex };
rv = siw_listen_address(id, backlog,
(struct sockaddr *)&bind_addr,
AF_INET6);
if (!rv)
listeners++;
}
}
rtnl_unlock();
in6_dev_put(in6_dev);
} else {
rv = -EAFNOSUPPORT;
}
out:
if (listeners)
rv = 0;
else if (!rv)
rv = -EINVAL;
siw_dbg(id->device, "%s\n", rv ? "FAIL" : "OK");
return rv;
}
int siw_destroy_listen(struct iw_cm_id *id)
{
if (!id->provider_data) {

View File

@ -2762,10 +2762,8 @@ static int __init si_domain_init(int hw)
}
/*
* Normally we use DMA domains for devices which have RMRRs. But we
* loose this requirement for graphic and usb devices. Identity map
* the RMRRs for graphic and USB devices so that they could use the
* si_domain.
* Identity map the RMRRs so that devices with RMRRs could also use
* the si_domain.
*/
for_each_rmrr_units(rmrr) {
for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
@ -2773,9 +2771,6 @@ static int __init si_domain_init(int hw)
unsigned long long start = rmrr->base_address;
unsigned long long end = rmrr->end_address;
if (device_is_rmrr_locked(dev))
continue;
if (WARN_ON(end < start ||
end >> agaw_to_width(si_domain->agaw)))
continue;
@ -2914,9 +2909,6 @@ static int device_def_domain_type(struct device *dev)
if (dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev);
if (device_is_rmrr_locked(dev))
return IOMMU_DOMAIN_DMA;
/*
* Prevent any device marked as untrusted from getting
* placed into the statically identity mapping domain.
@ -2954,9 +2946,6 @@ static int device_def_domain_type(struct device *dev)
return IOMMU_DOMAIN_DMA;
} else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
return IOMMU_DOMAIN_DMA;
} else {
if (device_has_rmrr(dev))
return IOMMU_DOMAIN_DMA;
}
return (iommu_identity_mapping & IDENTMAP_ALL) ?

View File

@ -19,6 +19,8 @@
#include <linux/refcount.h>
#include <linux/slab.h>
#include <linux/msm_rtb.h>
#include <linux/wakeup_reason.h>
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic-common.h>
@ -715,6 +717,9 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
err = handle_domain_irq(gic_data.domain, irqnr, regs);
if (err) {
WARN_ONCE(true, "Unexpected interrupt received!\n");
log_abnormal_wakeup_reason(
"unexpected HW IRQ %u", irqnr);
gic_deactivate_unhandled(irqnr);
}
return;

View File

@ -239,6 +239,7 @@ static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
err = blk_crypto_start_using_mode(cipher->mode_num, dkc->sector_size,
dkc->is_hw_wrapped,
dkc->dev->bdev->bd_queue);
if (err) {
ti->error = "Error starting to use blk-crypto";

View File

@ -1741,8 +1741,9 @@ static blk_qc_t dm_process_bio(struct mapped_device *md,
* won't be imposed.
*/
if (current->bio_list) {
blk_queue_split(md->queue, &bio);
if (!is_abnormal_io(bio))
if (is_abnormal_io(bio))
blk_queue_split(md->queue, &bio);
else
dm_queue_split(md, ti, &bio);
}
@ -2370,16 +2371,21 @@ static struct keyslot_mgmt_ll_ops dm_ksm_ll_ops = {
static int dm_init_inline_encryption(struct mapped_device *md)
{
unsigned int features;
unsigned int mode_masks[BLK_ENCRYPTION_MODE_MAX];
/*
* Start out with all crypto mode support bits set. Any unsupported
* bits will be cleared later when calculating the device restrictions.
* Initially declare support for all crypto settings. Anything
* unsupported by a child device will be removed later when calculating
* the device restrictions.
*/
features = BLK_CRYPTO_FEATURE_STANDARD_KEYS |
BLK_CRYPTO_FEATURE_WRAPPED_KEYS;
memset(mode_masks, 0xFF, sizeof(mode_masks));
md->queue->ksm = keyslot_manager_create_passthrough(NULL,
&dm_ksm_ll_ops,
features,
mode_masks, md);
if (!md->queue->ksm)
return -ENOMEM;

View File

@ -10,6 +10,7 @@
#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include <uapi/linux/usb/charger.h>
#define SPRD_PMIC_INT_MASK_STATUS 0x0
#define SPRD_PMIC_INT_RAW_STATUS 0x4
@ -17,6 +18,16 @@
#define SPRD_SC2731_IRQ_BASE 0x140
#define SPRD_SC2731_IRQ_NUMS 16
#define SPRD_SC2731_CHG_DET 0xedc
/* PMIC charger detection definition */
#define SPRD_PMIC_CHG_DET_DELAY_US 200000
#define SPRD_PMIC_CHG_DET_TIMEOUT 2000000
#define SPRD_PMIC_CHG_DET_DONE BIT(11)
#define SPRD_PMIC_SDP_TYPE BIT(7)
#define SPRD_PMIC_DCP_TYPE BIT(6)
#define SPRD_PMIC_CDP_TYPE BIT(5)
#define SPRD_PMIC_CHG_TYPE_MASK GENMASK(7, 5)
struct sprd_pmic {
struct regmap *regmap;
@ -24,12 +35,14 @@ struct sprd_pmic {
struct regmap_irq *irqs;
struct regmap_irq_chip irq_chip;
struct regmap_irq_chip_data *irq_data;
const struct sprd_pmic_data *pdata;
int irq;
};
struct sprd_pmic_data {
u32 irq_base;
u32 num_irqs;
u32 charger_det;
};
/*
@ -40,8 +53,46 @@ struct sprd_pmic_data {
static const struct sprd_pmic_data sc2731_data = {
.irq_base = SPRD_SC2731_IRQ_BASE,
.num_irqs = SPRD_SC2731_IRQ_NUMS,
.charger_det = SPRD_SC2731_CHG_DET,
};
enum usb_charger_type sprd_pmic_detect_charger_type(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
struct sprd_pmic *ddata = spi_get_drvdata(spi);
const struct sprd_pmic_data *pdata = ddata->pdata;
enum usb_charger_type type;
u32 val;
int ret;
ret = regmap_read_poll_timeout(ddata->regmap, pdata->charger_det, val,
(val & SPRD_PMIC_CHG_DET_DONE),
SPRD_PMIC_CHG_DET_DELAY_US,
SPRD_PMIC_CHG_DET_TIMEOUT);
if (ret) {
dev_err(&spi->dev, "failed to detect charger type\n");
return UNKNOWN_TYPE;
}
switch (val & SPRD_PMIC_CHG_TYPE_MASK) {
case SPRD_PMIC_CDP_TYPE:
type = CDP_TYPE;
break;
case SPRD_PMIC_DCP_TYPE:
type = DCP_TYPE;
break;
case SPRD_PMIC_SDP_TYPE:
type = SDP_TYPE;
break;
default:
type = UNKNOWN_TYPE;
break;
}
return type;
}
EXPORT_SYMBOL_GPL(sprd_pmic_detect_charger_type);
static const struct mfd_cell sprd_pmic_devs[] = {
{
.name = "sc27xx-wdt",
@ -181,6 +232,7 @@ static int sprd_pmic_probe(struct spi_device *spi)
spi_set_drvdata(spi, ddata);
ddata->dev = &spi->dev;
ddata->irq = spi->irq;
ddata->pdata = pdata;
ddata->irq_chip.name = dev_name(&spi->dev);
ddata->irq_chip.status_base =

View File

@ -394,6 +394,7 @@ static const struct pcr_ops rts522a_pcr_ops = {
void rts522a_init_params(struct rtsx_pcr *pcr)
{
rts5227_init_params(pcr);
pcr->ops = &rts522a_pcr_ops;
pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 20, 11);
pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3;

View File

@ -87,6 +87,8 @@
#define MEI_DEV_ID_CMP_H 0x06e0 /* Comet Lake H */
#define MEI_DEV_ID_CMP_H_3 0x06e4 /* Comet Lake H 3 (iTouch) */
#define MEI_DEV_ID_CDF 0x18D3 /* Cedar Fork */
#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
#define MEI_DEV_ID_TGP_LP 0xA0E0 /* Tiger Lake Point LP */

View File

@ -109,6 +109,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CDF, MEI_ME_PCH8_CFG)},
/* required last entry */
{0, }
};

View File

@ -98,6 +98,7 @@ struct pci_endpoint_test {
struct completion irq_raised;
int last_irq;
int num_irqs;
int irq_type;
/* mutex to protect the ioctls */
struct mutex mutex;
struct miscdevice miscdev;
@ -157,6 +158,7 @@ static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
struct pci_dev *pdev = test->pdev;
pci_free_irq_vectors(pdev);
test->irq_type = IRQ_TYPE_UNDEFINED;
}
static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
@ -191,6 +193,8 @@ static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
irq = 0;
res = false;
}
test->irq_type = type;
test->num_irqs = irq;
return res;
@ -330,6 +334,7 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
dma_addr_t orig_dst_phys_addr;
size_t offset;
size_t alignment = test->alignment;
int irq_type = test->irq_type;
u32 src_crc32;
u32 dst_crc32;
@ -426,6 +431,7 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
dma_addr_t orig_phys_addr;
size_t offset;
size_t alignment = test->alignment;
int irq_type = test->irq_type;
u32 crc32;
if (size > SIZE_MAX - alignment)
@ -494,6 +500,7 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
dma_addr_t orig_phys_addr;
size_t offset;
size_t alignment = test->alignment;
int irq_type = test->irq_type;
u32 crc32;
if (size > SIZE_MAX - alignment)
@ -555,7 +562,7 @@ static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
return false;
}
if (irq_type == req_irq_type)
if (test->irq_type == req_irq_type)
return true;
pci_endpoint_test_release_irq(test);
@ -567,12 +574,10 @@ static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
if (!pci_endpoint_test_request_irq(test))
goto err;
irq_type = req_irq_type;
return true;
err:
pci_endpoint_test_free_irq_vectors(test);
irq_type = IRQ_TYPE_UNDEFINED;
return false;
}
@ -633,7 +638,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
{
int err;
int id;
char name[20];
char name[24];
enum pci_barno bar;
void __iomem *base;
struct device *dev = &pdev->dev;
@ -652,6 +657,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
test->test_reg_bar = 0;
test->alignment = 0;
test->pdev = pdev;
test->irq_type = IRQ_TYPE_UNDEFINED;
if (no_msi)
irq_type = IRQ_TYPE_LEGACY;

View File

@ -168,6 +168,11 @@ MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
static inline int mmc_blk_part_switch(struct mmc_card *card,
unsigned int part_type);
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
int disable_multi,
struct mmc_queue *mq);
static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{
@ -1534,9 +1539,30 @@ static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req)
return mmc_blk_cqe_start_req(mq->card->host, mrq);
}
static int mmc_blk_hsq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
struct mmc_host *host = mq->card->host;
int err;
mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
mqrq->brq.mrq.done = mmc_blk_hsq_req_done;
mmc_pre_req(host, &mqrq->brq.mrq);
err = mmc_cqe_start_req(host, &mqrq->brq.mrq);
if (err)
mmc_post_req(host, &mqrq->brq.mrq, err);
return err;
}
static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
struct mmc_host *host = mq->card->host;
if (host->hsq_enabled)
return mmc_blk_hsq_issue_rw_rq(mq, req);
mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL);
@ -1922,6 +1948,41 @@ static void mmc_blk_urgent_bkops(struct mmc_queue *mq,
mmc_run_bkops(mq->card);
}
static void mmc_blk_hsq_req_done(struct mmc_request *mrq)
{
struct mmc_queue_req *mqrq =
container_of(mrq, struct mmc_queue_req, brq.mrq);
struct request *req = mmc_queue_req_to_req(mqrq);
struct request_queue *q = req->q;
struct mmc_queue *mq = q->queuedata;
struct mmc_host *host = mq->card->host;
unsigned long flags;
if (mmc_blk_rq_error(&mqrq->brq) ||
mmc_blk_urgent_bkops_needed(mq, mqrq)) {
spin_lock_irqsave(&mq->lock, flags);
mq->recovery_needed = true;
mq->recovery_req = req;
spin_unlock_irqrestore(&mq->lock, flags);
host->cqe_ops->cqe_recovery_start(host);
schedule_work(&mq->recovery_work);
return;
}
mmc_blk_rw_reset_success(mq, req);
/*
* Block layer timeouts race with completions which means the normal
* completion path cannot be used during recovery.
*/
if (mq->in_recovery)
mmc_blk_cqe_complete_rq(mq, req);
else
blk_mq_complete_request(req);
}
void mmc_blk_mq_complete(struct request *req)
{
struct mmc_queue *mq = req->q->queuedata;

View File

@ -1852,15 +1852,19 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
*/
card->reenable_cmdq = card->ext_csd.cmdq_en;
if (card->ext_csd.cmdq_en && !host->cqe_enabled) {
if (host->cqe_ops && !host->cqe_enabled) {
err = host->cqe_ops->cqe_enable(host, card);
if (err) {
pr_err("%s: Failed to enable CQE, error %d\n",
mmc_hostname(host), err);
} else {
if (!err) {
host->cqe_enabled = true;
pr_info("%s: Command Queue Engine enabled\n",
mmc_hostname(host));
if (card->ext_csd.cmdq_en) {
pr_info("%s: Command Queue Engine enabled\n",
mmc_hostname(host));
} else {
host->hsq_enabled = true;
pr_info("%s: Host Software Queue enabled\n",
mmc_hostname(host));
}
}
}

View File

@ -62,7 +62,7 @@ enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
{
struct mmc_host *host = mq->card->host;
if (mq->use_cqe)
if (mq->use_cqe && !host->hsq_enabled)
return mmc_cqe_issue_type(host, req);
if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
@ -124,12 +124,14 @@ static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
{
struct request_queue *q = req->q;
struct mmc_queue *mq = q->queuedata;
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
unsigned long flags;
int ret;
spin_lock_irqsave(&mq->lock, flags);
if (mq->recovery_needed || !mq->use_cqe)
if (mq->recovery_needed || !mq->use_cqe || host->hsq_enabled)
ret = BLK_EH_RESET_TIMER;
else
ret = mmc_cqe_timed_out(req);
@ -144,12 +146,13 @@ static void mmc_mq_recovery_handler(struct work_struct *work)
struct mmc_queue *mq = container_of(work, struct mmc_queue,
recovery_work);
struct request_queue *q = mq->queue;
struct mmc_host *host = mq->card->host;
mmc_get_card(mq->card, &mq->ctx);
mq->in_recovery = true;
if (mq->use_cqe)
if (mq->use_cqe && !host->hsq_enabled)
mmc_blk_cqe_recovery(mq);
else
mmc_blk_mq_recovery(mq);
@ -160,6 +163,9 @@ static void mmc_mq_recovery_handler(struct work_struct *work)
mq->recovery_needed = false;
spin_unlock_irq(&mq->lock);
if (host->hsq_enabled)
host->cqe_ops->cqe_recovery_finish(host);
mmc_put_card(mq->card, &mq->ctx);
blk_mq_run_hw_queues(q, true);
@ -279,6 +285,14 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
}
break;
case MMC_ISSUE_ASYNC:
/*
* For MMC host software queue, we only allow 2 requests in
* flight to avoid a long latency.
*/
if (host->hsq_enabled && mq->in_flight[issue_type] > 2) {
spin_unlock_irq(&mq->lock);
return BLK_STS_RESOURCE;
}
break;
default:
/*
@ -436,7 +450,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
* The queue depth for CQE must match the hardware because the request
* tag is used to index the hardware queue.
*/
if (mq->use_cqe)
if (mq->use_cqe && !host->hsq_enabled)
mq->tag_set.queue_depth =
min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
else

View File

@ -1147,6 +1147,16 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
}
}
if (host->cqe_ops && !host->cqe_enabled) {
err = host->cqe_ops->cqe_enable(host, card);
if (!err) {
host->cqe_enabled = true;
host->hsq_enabled = true;
pr_info("%s: Host Software Queue enabled\n",
mmc_hostname(host));
}
}
if (host->caps2 & MMC_CAP2_AVOID_3_3V &&
host->ios.signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
pr_err("%s: Host failed to negotiate down from 3.3V\n",

View File

@ -633,6 +633,7 @@ config MMC_SDHCI_SPRD
depends on ARCH_SPRD
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
select MMC_HSQ
help
This selects the SDIO Host Controller in Spreadtrum
SoCs, this driver supports R11(IP version: R11P0).
@ -937,6 +938,17 @@ config MMC_CQHCI
If unsure, say N.
config MMC_HSQ
tristate "MMC Host Software Queue support"
help
This selects the MMC Host Software Queue support. This may increase
performance, if the host controller and its driver supports it.
If you have a controller/driver supporting this interface, say Y or M
here.
If unsure, say N.
config MMC_TOSHIBA_PCI
tristate "Toshiba Type A SD/MMC Card Interface Driver"
depends on PCI

View File

@ -98,6 +98,7 @@ obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o
obj-$(CONFIG_MMC_SDHCI_OMAP) += sdhci-omap.o
obj-$(CONFIG_MMC_SDHCI_SPRD) += sdhci-sprd.o
obj-$(CONFIG_MMC_CQHCI) += cqhci.o
obj-$(CONFIG_MMC_HSQ) += mmc_hsq.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG

View File

@ -321,14 +321,20 @@ static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
struct cqhci_host *cq_host = mmc->cqe_private;
int err;
if (!card->ext_csd.cmdq_en)
return -EINVAL;
if (cq_host->enabled)
return 0;
cq_host->rca = card->rca;
err = cqhci_host_alloc_tdl(cq_host);
if (err)
if (err) {
pr_err("%s: Failed to enable CQE, error %d\n",
mmc_hostname(mmc), err);
return err;
}
__cqhci_enable(cq_host);

348
drivers/mmc/host/mmc_hsq.c Normal file
View File

@ -0,0 +1,348 @@
// SPDX-License-Identifier: GPL-2.0
/*
*
* MMC software queue support based on command queue interfaces
*
* Copyright (C) 2019 Linaro, Inc.
* Author: Baolin Wang <baolin.wang@linaro.org>
*/
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/module.h>
#include "mmc_hsq.h"
#define HSQ_NUM_SLOTS 64
#define HSQ_INVALID_TAG HSQ_NUM_SLOTS
static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
{
struct mmc_host *mmc = hsq->mmc;
struct hsq_slot *slot;
unsigned long flags;
spin_lock_irqsave(&hsq->lock, flags);
/* Make sure we are not already running a request now */
if (hsq->mrq) {
spin_unlock_irqrestore(&hsq->lock, flags);
return;
}
/* Make sure there are remain requests need to pump */
if (!hsq->qcnt || !hsq->enabled) {
spin_unlock_irqrestore(&hsq->lock, flags);
return;
}
slot = &hsq->slot[hsq->next_tag];
hsq->mrq = slot->mrq;
hsq->qcnt--;
spin_unlock_irqrestore(&hsq->lock, flags);
mmc->ops->request(mmc, hsq->mrq);
}
static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
{
struct hsq_slot *slot;
int tag;
/*
* If there are no remain requests in software queue, then set a invalid
* tag.
*/
if (!remains) {
hsq->next_tag = HSQ_INVALID_TAG;
return;
}
/*
* Increasing the next tag and check if the corresponding request is
* available, if yes, then we found a candidate request.
*/
if (++hsq->next_tag != HSQ_INVALID_TAG) {
slot = &hsq->slot[hsq->next_tag];
if (slot->mrq)
return;
}
/* Othersie we should iterate all slots to find a available tag. */
for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) {
slot = &hsq->slot[tag];
if (slot->mrq)
break;
}
if (tag == HSQ_NUM_SLOTS)
tag = HSQ_INVALID_TAG;
hsq->next_tag = tag;
}
static void mmc_hsq_post_request(struct mmc_hsq *hsq)
{
unsigned long flags;
int remains;
spin_lock_irqsave(&hsq->lock, flags);
remains = hsq->qcnt;
hsq->mrq = NULL;
/* Update the next available tag to be queued. */
mmc_hsq_update_next_tag(hsq, remains);
if (hsq->waiting_for_idle && !remains) {
hsq->waiting_for_idle = false;
wake_up(&hsq->wait_queue);
}
/* Do not pump new request in recovery mode. */
if (hsq->recovery_halt) {
spin_unlock_irqrestore(&hsq->lock, flags);
return;
}
spin_unlock_irqrestore(&hsq->lock, flags);
/*
* Try to pump new request to host controller as fast as possible,
* after completing previous request.
*/
if (remains > 0)
mmc_hsq_pump_requests(hsq);
}
/**
* mmc_hsq_finalize_request - finalize one request if the request is done
* @mmc: the host controller
* @mrq: the request need to be finalized
*
* Return true if we finalized the corresponding request in software queue,
* otherwise return false.
*/
bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct mmc_hsq *hsq = mmc->cqe_private;
unsigned long flags;
spin_lock_irqsave(&hsq->lock, flags);
if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) {
spin_unlock_irqrestore(&hsq->lock, flags);
return false;
}
/*
* Clear current completed slot request to make a room for new request.
*/
hsq->slot[hsq->next_tag].mrq = NULL;
spin_unlock_irqrestore(&hsq->lock, flags);
mmc_cqe_request_done(mmc, hsq->mrq);
mmc_hsq_post_request(hsq);
return true;
}
EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request);
static void mmc_hsq_recovery_start(struct mmc_host *mmc)
{
struct mmc_hsq *hsq = mmc->cqe_private;
unsigned long flags;
spin_lock_irqsave(&hsq->lock, flags);
hsq->recovery_halt = true;
spin_unlock_irqrestore(&hsq->lock, flags);
}
static void mmc_hsq_recovery_finish(struct mmc_host *mmc)
{
struct mmc_hsq *hsq = mmc->cqe_private;
int remains;
spin_lock_irq(&hsq->lock);
hsq->recovery_halt = false;
remains = hsq->qcnt;
spin_unlock_irq(&hsq->lock);
/*
* Try to pump new request if there are request pending in software
* queue after finishing recovery.
*/
if (remains > 0)
mmc_hsq_pump_requests(hsq);
}
static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct mmc_hsq *hsq = mmc->cqe_private;
int tag = mrq->tag;
spin_lock_irq(&hsq->lock);
if (!hsq->enabled) {
spin_unlock_irq(&hsq->lock);
return -ESHUTDOWN;
}
/* Do not queue any new requests in recovery mode. */
if (hsq->recovery_halt) {
spin_unlock_irq(&hsq->lock);
return -EBUSY;
}
hsq->slot[tag].mrq = mrq;
/*
* Set the next tag as current request tag if no available
* next tag.
*/
if (hsq->next_tag == HSQ_INVALID_TAG)
hsq->next_tag = tag;
hsq->qcnt++;
spin_unlock_irq(&hsq->lock);
mmc_hsq_pump_requests(hsq);
return 0;
}
static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq)
{
if (mmc->ops->post_req)
mmc->ops->post_req(mmc, mrq, 0);
}
static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret)
{
bool is_idle;
spin_lock_irq(&hsq->lock);
is_idle = (!hsq->mrq && !hsq->qcnt) ||
hsq->recovery_halt;
*ret = hsq->recovery_halt ? -EBUSY : 0;
hsq->waiting_for_idle = !is_idle;
spin_unlock_irq(&hsq->lock);
return is_idle;
}
static int mmc_hsq_wait_for_idle(struct mmc_host *mmc)
{
struct mmc_hsq *hsq = mmc->cqe_private;
int ret;
wait_event(hsq->wait_queue,
mmc_hsq_queue_is_idle(hsq, &ret));
return ret;
}
static void mmc_hsq_disable(struct mmc_host *mmc)
{
struct mmc_hsq *hsq = mmc->cqe_private;
u32 timeout = 500;
int ret;
spin_lock_irq(&hsq->lock);
if (!hsq->enabled) {
spin_unlock_irq(&hsq->lock);
return;
}
spin_unlock_irq(&hsq->lock);
ret = wait_event_timeout(hsq->wait_queue,
mmc_hsq_queue_is_idle(hsq, &ret),
msecs_to_jiffies(timeout));
if (ret == 0) {
pr_warn("could not stop mmc software queue\n");
return;
}
spin_lock_irq(&hsq->lock);
hsq->enabled = false;
spin_unlock_irq(&hsq->lock);
}
static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card)
{
struct mmc_hsq *hsq = mmc->cqe_private;
spin_lock_irq(&hsq->lock);
if (hsq->enabled) {
spin_unlock_irq(&hsq->lock);
return -EBUSY;
}
hsq->enabled = true;
spin_unlock_irq(&hsq->lock);
return 0;
}
static const struct mmc_cqe_ops mmc_hsq_ops = {
.cqe_enable = mmc_hsq_enable,
.cqe_disable = mmc_hsq_disable,
.cqe_request = mmc_hsq_request,
.cqe_post_req = mmc_hsq_post_req,
.cqe_wait_for_idle = mmc_hsq_wait_for_idle,
.cqe_recovery_start = mmc_hsq_recovery_start,
.cqe_recovery_finish = mmc_hsq_recovery_finish,
};
int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
{
hsq->num_slots = HSQ_NUM_SLOTS;
hsq->next_tag = HSQ_INVALID_TAG;
hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots,
sizeof(struct hsq_slot), GFP_KERNEL);
if (!hsq->slot)
return -ENOMEM;
hsq->mmc = mmc;
hsq->mmc->cqe_private = hsq;
mmc->cqe_ops = &mmc_hsq_ops;
spin_lock_init(&hsq->lock);
init_waitqueue_head(&hsq->wait_queue);
return 0;
}
EXPORT_SYMBOL_GPL(mmc_hsq_init);
void mmc_hsq_suspend(struct mmc_host *mmc)
{
mmc_hsq_disable(mmc);
}
EXPORT_SYMBOL_GPL(mmc_hsq_suspend);
int mmc_hsq_resume(struct mmc_host *mmc)
{
return mmc_hsq_enable(mmc, NULL);
}
EXPORT_SYMBOL_GPL(mmc_hsq_resume);
MODULE_DESCRIPTION("MMC Host Software Queue support");
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,30 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_MMC_HSQ_H
#define LINUX_MMC_HSQ_H
struct hsq_slot {
struct mmc_request *mrq;
};
struct mmc_hsq {
struct mmc_host *mmc;
struct mmc_request *mrq;
wait_queue_head_t wait_queue;
struct hsq_slot *slot;
spinlock_t lock;
int next_tag;
int num_slots;
int qcnt;
bool enabled;
bool waiting_for_idle;
bool recovery_halt;
};
int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc);
void mmc_hsq_suspend(struct mmc_host *mmc);
int mmc_hsq_resume(struct mmc_host *mmc);
bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq);
#endif

View File

@ -19,6 +19,7 @@
#include <linux/slab.h>
#include "sdhci-pltfm.h"
#include "mmc_hsq.h"
/* SDHCI_ARGUMENT2 register high 16bit */
#define SDHCI_SPRD_ARG2_STUFF GENMASK(31, 16)
@ -379,6 +380,16 @@ static unsigned int sdhci_sprd_get_ro(struct sdhci_host *host)
return 0;
}
static void sdhci_sprd_request_done(struct sdhci_host *host,
struct mmc_request *mrq)
{
/* Validate if the request was from software queue firstly. */
if (mmc_hsq_finalize_request(host->mmc, mrq))
return;
mmc_request_done(host->mmc, mrq);
}
static struct sdhci_ops sdhci_sprd_ops = {
.read_l = sdhci_sprd_readl,
.write_l = sdhci_sprd_writel,
@ -392,6 +403,7 @@ static struct sdhci_ops sdhci_sprd_ops = {
.hw_reset = sdhci_sprd_hw_reset,
.get_max_timeout_count = sdhci_sprd_get_max_timeout_count,
.get_ro = sdhci_sprd_get_ro,
.request_done = sdhci_sprd_request_done,
};
static void sdhci_sprd_request(struct mmc_host *mmc, struct mmc_request *mrq)
@ -521,6 +533,7 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
struct sdhci_sprd_host *sprd_host;
struct mmc_hsq *hsq;
struct clk *clk;
int ret = 0;
@ -543,7 +556,7 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
sdhci_sprd_voltage_switch;
host->mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
MMC_CAP_ERASE | MMC_CAP_CMD23;
MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY;
ret = mmc_of_parse(host->mmc);
if (ret)
goto pltfm_free;
@ -631,6 +644,18 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
sprd_host->flags = host->flags;
hsq = devm_kzalloc(&pdev->dev, sizeof(*hsq), GFP_KERNEL);
if (!hsq) {
ret = -ENOMEM;
goto err_cleanup_host;
}
ret = mmc_hsq_init(hsq, host->mmc);
if (ret)
goto err_cleanup_host;
host->always_defer_done = true;
ret = __sdhci_add_host(host);
if (ret)
goto err_cleanup_host;
@ -689,6 +714,7 @@ static int sdhci_sprd_runtime_suspend(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host);
mmc_hsq_suspend(host->mmc);
sdhci_runtime_suspend_host(host);
clk_disable_unprepare(sprd_host->clk_sdio);
@ -717,6 +743,8 @@ static int sdhci_sprd_runtime_resume(struct device *dev)
goto clk_disable;
sdhci_runtime_resume_host(host, 1);
mmc_hsq_resume(host->mmc);
return 0;
clk_disable:

View File

@ -2727,7 +2727,10 @@ static bool sdhci_request_done(struct sdhci_host *host)
spin_unlock_irqrestore(&host->lock, flags);
mmc_request_done(host->mmc, mrq);
if (host->ops->request_done)
host->ops->request_done(host, mrq);
else
mmc_request_done(host->mmc, mrq);
return false;
}
@ -3030,7 +3033,7 @@ static inline bool sdhci_defer_done(struct sdhci_host *host,
{
struct mmc_data *data = mrq->data;
return host->pending_reset ||
return host->pending_reset || host->always_defer_done ||
((host->flags & SDHCI_REQ_USE_DMA) && data &&
data->host_cookie == COOKIE_MAPPED);
}
@ -3155,7 +3158,12 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
/* Process mrqs ready for immediate completion */
for (i = 0; i < SDHCI_MAX_MRQS; i++) {
if (mrqs_done[i])
if (!mrqs_done[i])
continue;
if (host->ops->request_done)
host->ops->request_done(host, mrqs_done[i]);
else
mmc_request_done(host->mmc, mrqs_done[i]);
}

View File

@ -535,6 +535,7 @@ struct sdhci_host {
bool pending_reset; /* Cmd/data reset is pending */
bool irq_wake_enabled; /* IRQ wakeup is enabled */
bool v4_mode; /* Host Version 4 Enable */
bool always_defer_done; /* Always defer to complete requests */
struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */
struct mmc_command *cmd; /* Current command */
@ -649,6 +650,8 @@ struct sdhci_ops {
void (*voltage_switch)(struct sdhci_host *host);
void (*adma_write_desc)(struct sdhci_host *host, void **desc,
dma_addr_t addr, int len, unsigned int cmd);
void (*request_done)(struct sdhci_host *host,
struct mmc_request *mrq);
};
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS

View File

@ -148,7 +148,7 @@ static void slc_bump(struct slcan *sl)
u32 tmpid;
char *cmd = sl->rbuff;
cf.can_id = 0;
memset(&cf, 0, sizeof(cf));
switch (*cmd) {
case 'r':
@ -187,8 +187,6 @@ static void slc_bump(struct slcan *sl)
else
return;
*(u64 *) (&cf.data) = 0; /* clear payload */
/* RTR frames may have a dlc > 0 but they never have any data bytes */
if (!(cf.can_id & CAN_RTR_FLAG)) {
for (i = 0; i < cf.can_dlc; i++) {

View File

@ -459,7 +459,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
priv->slave_mii_bus->parent = ds->dev->parent;
priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
err = of_mdiobus_register(priv->slave_mii_bus, dn);
err = mdiobus_register(priv->slave_mii_bus);
if (err && dn)
of_node_put(dn);
@ -1053,6 +1053,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
const struct bcm_sf2_of_data *data;
struct b53_platform_data *pdata;
struct dsa_switch_ops *ops;
struct device_node *ports;
struct bcm_sf2_priv *priv;
struct b53_device *dev;
struct dsa_switch *ds;
@ -1115,7 +1116,11 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
set_bit(0, priv->cfp.used);
set_bit(0, priv->cfp.unique);
bcm_sf2_identify_ports(priv, dn->child);
ports = of_find_node_by_name(dn, "ports");
if (ports) {
bcm_sf2_identify_ports(priv, ports);
of_node_put(ports);
}
priv->irq0 = irq_of_parse_and_map(dn, 0);
priv->irq1 = irq_of_parse_and_map(dn, 1);

View File

@ -1353,6 +1353,9 @@ mt7530_setup(struct dsa_switch *ds)
continue;
phy_node = of_parse_phandle(mac_np, "phy-handle", 0);
if (!phy_node)
continue;
if (phy_node->parent == priv->dev->of_node->parent) {
interface = of_get_phy_mode(mac_np);
id = of_mdio_parse_addr(ds->dev, phy_node);

View File

@ -3032,7 +3032,6 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
return ret;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
pi->xact_addr_filt = ret;
return 0;
}

View File

@ -38,8 +38,8 @@ enum {
enum {
MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START = 0,
MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING = 1,
MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING = 2,
MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING = 1,
MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING = 2,
};
struct mlx5e_ktls_offload_context_tx {

View File

@ -218,7 +218,7 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
* this packet was already acknowledged and its record info
* was released.
*/
ends_before = before(tcp_seq + datalen, tls_record_start_seq(record));
ends_before = before(tcp_seq + datalen - 1, tls_record_start_seq(record));
if (unlikely(tls_record_is_start_marker(record))) {
ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;

View File

@ -123,9 +123,12 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
u8 prio = act->vlan.prio;
u16 vid = act->vlan.vid;
return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
act->id, vid,
proto, prio, extack);
err = mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
act->id, vid,
proto, prio, extack);
if (err)
return err;
break;
}
default:
NL_SET_ERR_MSG_MOD(extack, "Unsupported action");

View File

@ -7167,12 +7167,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT);
dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
dev->features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_HIGHDMA;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
@ -7190,25 +7188,25 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
if (rtl_chip_supports_csum_v2(tp)) {
dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
dev->features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
dev->hw_features |= NETIF_F_IPV6_CSUM;
dev->features |= NETIF_F_IPV6_CSUM;
}
/* There has been a number of reports that using SG/TSO results in
* tx timeouts. However for a lot of people SG/TSO works fine.
* Therefore disable both features by default, but allow users to
* enable them. Use at own risk!
*/
if (rtl_chip_supports_csum_v2(tp)) {
dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
dev->gso_max_size = RTL_GSO_MAX_SIZE_V2;
dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2;
} else {
dev->hw_features |= NETIF_F_SG | NETIF_F_TSO;
dev->gso_max_size = RTL_GSO_MAX_SIZE_V1;
dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1;
}
/* RTL8168e-vl and one RTL8168c variant are known to have a
* HW issue with TSO.
*/
if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
tp->mac_version == RTL_GIGA_MAC_VER_22) {
dev->vlan_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
dev->hw_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
dev->features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
}
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;

View File

@ -209,7 +209,7 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
reg++;
}
while (reg <= perfect_addr_number) {
while (reg < perfect_addr_number) {
writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
writel(0, ioaddr + GMAC_ADDR_LOW(reg));
reg++;

View File

@ -25,6 +25,7 @@
#include <linux/micrel_phy.h>
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/delay.h>
/* Operation Mode Strap Override */
#define MII_KSZPHY_OMSO 0x16
@ -902,6 +903,12 @@ static int kszphy_resume(struct phy_device *phydev)
genphy_resume(phydev);
/* After switching from power-down to normal mode, an internal global
* reset is automatically generated. Wait a minimum of 1 ms before
* read/write access to the PHY registers.
*/
usleep_range(1000, 2000);
ret = kszphy_config_reset(phydev);
if (ret)
return ret;

View File

@ -456,6 +456,15 @@ static struct phy_driver realtek_drvs[] = {
.resume = genphy_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
PHY_ID_MATCH_MODEL(0x001cc880),
.name = "RTL8208 Fast Ethernet",
.read_mmd = genphy_read_mmd_unsupported,
.write_mmd = genphy_write_mmd_unsupported,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
PHY_ID_MATCH_EXACT(0x001cc910),
.name = "RTL8211 Gigabit Ethernet",

View File

@ -1715,8 +1715,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
alloc_frag->offset += buflen;
}
err = tun_xdp_act(tun, xdp_prog, &xdp, act);
if (err < 0)
goto err_xdp;
if (err < 0) {
if (act == XDP_REDIRECT || act == XDP_TX)
put_page(alloc_frag->page);
goto out;
}
if (err == XDP_REDIRECT)
xdp_do_flush_map();
if (err != XDP_PASS)
@ -1730,8 +1734,6 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
err_xdp:
put_page(alloc_frag->page);
out:
rcu_read_unlock();
local_bh_enable();

View File

@ -1934,6 +1934,8 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new,
BRCMF_SDIO_FT_NORMAL)) {
rd->len = 0;
brcmf_sdio_rxfail(bus, true, true);
sdio_release_host(bus->sdiodev->func1);
brcmu_pkt_buf_free_skb(pkt);
continue;
}

View File

@ -8,7 +8,7 @@
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -31,7 +31,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
* Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -1373,11 +1373,7 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
goto out;
}
/*
* region register have absolute value so apply rxf offset after
* reading the registers
*/
offs += rxf_data.offset;
offs = rxf_data.offset;
/* Lock fence */
iwl_write_prph_no_grab(fwrt->trans, RXF_SET_FENCE_MODE + offs, 0x1);
@ -2315,10 +2311,7 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
goto out;
}
if (iwl_fw_dbg_stop_restart_recording(fwrt, &params, true)) {
IWL_ERR(fwrt, "Failed to stop DBGC recording, aborting dump\n");
goto out;
}
iwl_fw_dbg_stop_restart_recording(fwrt, &params, true);
IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection start\n");
if (iwl_trans_dbg_ini_valid(fwrt->trans))
@ -2484,14 +2477,14 @@ static int iwl_fw_dbg_restart_recording(struct iwl_trans *trans,
return 0;
}
int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_params *params,
bool stop)
void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_params *params,
bool stop)
{
int ret = 0;
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
return 0;
return;
if (fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_DBG_SUSPEND_RESUME_CMD_SUPP))
@ -2508,7 +2501,5 @@ int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
iwl_fw_set_dbg_rec_on(fwrt);
}
#endif
return ret;
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_restart_recording);

View File

@ -263,9 +263,9 @@ _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
_iwl_fw_dbg_trigger_simple_stop((fwrt), (wdev), \
iwl_fw_dbg_get_trigger((fwrt)->fw,\
(trig)))
int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_params *params,
bool stop);
void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_params *params,
bool stop);
#ifdef CONFIG_IWLWIFI_DEBUGFS
static inline void iwl_fw_set_dbg_rec_on(struct iwl_fw_runtime *fwrt)

View File

@ -147,7 +147,11 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
(vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
/* consider our LDPC support in case of HE */
/* consider LDPC support in case of HE */
if (he_cap->has_he && (he_cap->he_cap_elem.phy_cap_info[1] &
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
if (sband->iftype_data && sband->iftype_data->he_cap.has_he &&
!(sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[1] &
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))

View File

@ -850,9 +850,11 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
if (new)
blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
out_free_async_qe:
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
sizeof(struct nvme_command), DMA_TO_DEVICE);
ctrl->async_event_sqe.data = NULL;
if (ctrl->async_event_sqe.data) {
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
sizeof(struct nvme_command), DMA_TO_DEVICE);
ctrl->async_event_sqe.data = NULL;
}
out_free_queue:
nvme_rdma_free_queue(&ctrl->queues[0]);
return error;

View File

@ -56,6 +56,9 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
count = round_down(count, nvmem->word_size);
if (!nvmem->reg_read)
return -EPERM;
rc = nvmem->reg_read(nvmem->priv, pos, buf, count);
if (rc)
@ -90,6 +93,9 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
count = round_down(count, nvmem->word_size);
if (!nvmem->reg_write)
return -EPERM;
rc = nvmem->reg_write(nvmem->priv, pos, buf, count);
if (rc)

View File

@ -217,12 +217,14 @@ static int sprd_efuse_raw_prog(struct sprd_efuse *efuse, u32 blk, bool doub,
* Enable the auto-check function to validate if the programming is
* successful.
*/
sprd_efuse_set_auto_check(efuse, true);
if (lock)
sprd_efuse_set_auto_check(efuse, true);
writel(*data, efuse->base + SPRD_EFUSE_MEM(blk));
/* Disable auto-check and data double after programming */
sprd_efuse_set_auto_check(efuse, false);
if (lock)
sprd_efuse_set_auto_check(efuse, false);
sprd_efuse_set_data_double(efuse, false);
/*
@ -237,9 +239,9 @@ static int sprd_efuse_raw_prog(struct sprd_efuse *efuse, u32 blk, bool doub,
writel(SPRD_EFUSE_ERR_CLR_MASK,
efuse->base + SPRD_EFUSE_ERR_CLR);
ret = -EBUSY;
} else {
} else if (lock) {
sprd_efuse_set_prog_lock(efuse, lock);
writel(*data, efuse->base + SPRD_EFUSE_MEM(blk));
writel(0, efuse->base + SPRD_EFUSE_MEM(blk));
sprd_efuse_set_prog_lock(efuse, false);
}
@ -322,6 +324,8 @@ static int sprd_efuse_read(void *context, u32 offset, void *val, size_t bytes)
static int sprd_efuse_write(void *context, u32 offset, void *val, size_t bytes)
{
struct sprd_efuse *efuse = context;
bool blk_double = efuse->data->blk_double;
bool lock;
int ret;
ret = sprd_efuse_lock(efuse);
@ -332,7 +336,20 @@ static int sprd_efuse_write(void *context, u32 offset, void *val, size_t bytes)
if (ret)
goto unlock;
ret = sprd_efuse_raw_prog(efuse, offset, false, false, val);
/*
* If the writing bytes are equal with the block width, which means the
* whole block will be programmed. For this case, we should not allow
* this block to be programmed again by locking this block.
*
* If the block was programmed partially, we should allow this block to
* be programmed again.
*/
if (bytes < SPRD_EFUSE_BLOCK_WIDTH)
lock = false;
else
lock = true;
ret = sprd_efuse_raw_prog(efuse, offset, blk_double, lock, val);
clk_disable_unprepare(efuse->clk);

View File

@ -464,7 +464,8 @@ static ssize_t dev_rescan_store(struct device *dev,
}
return count;
}
static DEVICE_ATTR_WO(dev_rescan);
static struct device_attribute dev_attr_dev_rescan = __ATTR(rescan, 0200, NULL,
dev_rescan_store);
static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@ -501,7 +502,8 @@ static ssize_t bus_rescan_store(struct device *dev,
}
return count;
}
static DEVICE_ATTR_WO(bus_rescan);
static struct device_attribute dev_attr_bus_rescan = __ATTR(rescan, 0200, NULL,
bus_rescan_store);
#if defined(CONFIG_PM) && defined(CONFIG_ACPI)
static ssize_t d3cold_allowed_store(struct device *dev,

View File

@ -464,9 +464,15 @@ static int sprd_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin_id,
case PIN_CONFIG_INPUT_ENABLE:
arg = (reg >> SLEEP_INPUT_SHIFT) & SLEEP_INPUT_MASK;
break;
case PIN_CONFIG_OUTPUT:
case PIN_CONFIG_OUTPUT_ENABLE:
arg = reg & SLEEP_OUTPUT_MASK;
break;
case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
if ((reg & SLEEP_OUTPUT) || (reg & SLEEP_INPUT))
return -EINVAL;
arg = 1;
break;
case PIN_CONFIG_DRIVE_STRENGTH:
arg = (reg >> DRIVE_STRENGTH_SHIFT) &
DRIVE_STRENGTH_MASK;
@ -635,13 +641,23 @@ static int sprd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin_id,
shift = SLEEP_INPUT_SHIFT;
}
break;
case PIN_CONFIG_OUTPUT:
case PIN_CONFIG_OUTPUT_ENABLE:
if (is_sleep_config == true) {
val |= SLEEP_OUTPUT;
if (arg > 0)
val |= SLEEP_OUTPUT;
else
val &= ~SLEEP_OUTPUT;
mask = SLEEP_OUTPUT_MASK;
shift = SLEEP_OUTPUT_SHIFT;
}
break;
case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
if (is_sleep_config == true) {
val = shift = 0;
mask = SLEEP_OUTPUT | SLEEP_INPUT;
}
break;
case PIN_CONFIG_DRIVE_STRENGTH:
if (arg < 2 || arg > 60)
return -EINVAL;

View File

@ -127,6 +127,14 @@ static irqreturn_t int0002_irq(int irq, void *data)
return IRQ_HANDLED;
}
static bool int0002_check_wake(void *data)
{
u32 gpe_sts_reg;
gpe_sts_reg = inl(GPE0A_STS_PORT);
return (gpe_sts_reg & GPE0A_PME_B0_STS_BIT);
}
static struct irq_chip int0002_byt_irqchip = {
.name = DRV_NAME,
.irq_ack = int0002_irq_ack,
@ -220,6 +228,7 @@ static int int0002_probe(struct platform_device *pdev)
gpiochip_set_chained_irqchip(chip, irq_chip, irq, NULL);
acpi_register_wakeup_handler(irq, int0002_check_wake, NULL);
device_init_wakeup(dev, true);
return 0;
}
@ -227,6 +236,7 @@ static int int0002_probe(struct platform_device *pdev)
static int int0002_remove(struct platform_device *pdev)
{
device_init_wakeup(&pdev->dev, false);
acpi_unregister_wakeup_handler(int0002_check_wake, NULL);
return 0;
}

View File

@ -21,6 +21,7 @@
#include <linux/property.h>
#include <linux/mfd/axp20x.h>
#include <linux/extcon.h>
#include <linux/dmi.h>
#define PS_STAT_VBUS_TRIGGER BIT(0)
#define PS_STAT_BAT_CHRG_DIR BIT(2)
@ -545,6 +546,49 @@ static irqreturn_t axp288_charger_irq_thread_handler(int irq, void *dev)
return IRQ_HANDLED;
}
/*
* The HP Pavilion x2 10 series comes in a number of variants:
* Bay Trail SoC + AXP288 PMIC, DMI_BOARD_NAME: "815D"
* Cherry Trail SoC + AXP288 PMIC, DMI_BOARD_NAME: "813E"
* Cherry Trail SoC + TI PMIC, DMI_BOARD_NAME: "827C" or "82F4"
*
* The variants with the AXP288 PMIC are all kinds of special:
*
* 1. All variants use a Type-C connector which the AXP288 does not support, so
* when using a Type-C charger it is not recognized. Unlike most AXP288 devices,
* this model actually has mostly working ACPI AC / Battery code, the ACPI code
* "solves" this by simply setting the input_current_limit to 3A.
* There are still some issues with the ACPI code, so we use this native driver,
* and to solve the charging not working (500mA is not enough) issue we hardcode
* the 3A input_current_limit like the ACPI code does.
*
* 2. If no charger is connected the machine boots with the vbus-path disabled.
* Normally this is done when a 5V boost converter is active to avoid the PMIC
* trying to charge from the 5V boost converter's output. This is done when
* an OTG host cable is inserted and the ID pin on the micro-B receptacle is
* pulled low and the ID pin has an ACPI event handler associated with it
* which re-enables the vbus-path when the ID pin is pulled high when the
* OTG host cable is removed. The Type-C connector has no ID pin, there is
* no ID pin handler and there appears to be no 5V boost converter, so we
* end up not charging because the vbus-path is disabled, until we unplug
* the charger which automatically clears the vbus-path disable bit and then
* on the second plug-in of the adapter we start charging. To solve the not
* charging on first charger plugin we unconditionally enable the vbus-path at
* probe on this model, which is safe since there is no 5V boost converter.
*/
static const struct dmi_system_id axp288_hp_x2_dmi_ids[] = {
{
/*
* Bay Trail model has "Hewlett-Packard" as sys_vendor, Cherry
* Trail model has "HP", so we only match on product_name.
*/
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
},
},
{} /* Terminating entry */
};
static void axp288_charger_extcon_evt_worker(struct work_struct *work)
{
struct axp288_chrg_info *info =
@ -568,7 +612,11 @@ static void axp288_charger_extcon_evt_worker(struct work_struct *work)
}
/* Determine cable/charger type */
if (extcon_get_state(edev, EXTCON_CHG_USB_SDP) > 0) {
if (dmi_check_system(axp288_hp_x2_dmi_ids)) {
/* See comment above axp288_hp_x2_dmi_ids declaration */
dev_dbg(&info->pdev->dev, "HP X2 with Type-C, setting inlmt to 3A\n");
current_limit = 3000000;
} else if (extcon_get_state(edev, EXTCON_CHG_USB_SDP) > 0) {
dev_dbg(&info->pdev->dev, "USB SDP charger is connected\n");
current_limit = 500000;
} else if (extcon_get_state(edev, EXTCON_CHG_USB_CDP) > 0) {
@ -685,6 +733,13 @@ static int charger_init_hw_regs(struct axp288_chrg_info *info)
return ret;
}
if (dmi_check_system(axp288_hp_x2_dmi_ids)) {
/* See comment above axp288_hp_x2_dmi_ids declaration */
ret = axp288_charger_vbus_path_select(info, true);
if (ret < 0)
return ret;
}
/* Read current charge voltage and current limit */
ret = regmap_read(info->regmap, AXP20X_CHRG_CTRL1, &val);
if (ret < 0) {

View File

@ -227,7 +227,8 @@ static int ufshcd_hba_init_crypto_qti_spec(struct ufs_hba *hba,
}
hba->ksm = keyslot_manager_create(hba->dev, ufshcd_num_keyslots(hba),
ksm_ops, crypto_modes_supported, hba);
ksm_ops, BLK_CRYPTO_FEATURE_WRAPPED_KEYS,
crypto_modes_supported, hba);
if (!hba->ksm) {
err = -ENOMEM;

View File

@ -336,7 +336,9 @@ int ufshcd_hba_init_crypto_spec(struct ufs_hba *hba,
ufshcd_clear_all_keyslots(hba);
hba->ksm = keyslot_manager_create(hba->dev, ufshcd_num_keyslots(hba),
ksm_ops, crypto_modes_supported, hba);
ksm_ops,
BLK_CRYPTO_FEATURE_STANDARD_KEYS,
crypto_modes_supported, hba);
if (!hba->ksm) {
err = -ENOMEM;

View File

@ -38,6 +38,7 @@ struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout)
client->pkt_cnt = 0;
client->client.dev = dev;
client->client.tx_block = false;
client->client.knows_txdone = true;
client->chan = mbox_request_channel(&client->client, index);
if (IS_ERR(client->chan)) {

View File

@ -90,7 +90,7 @@ static void free_duped_table(struct sg_table *table)
kfree(table);
}
struct ion_dma_buf_attachment {
struct msm_ion_dma_buf_attachment {
struct device *dev;
struct sg_table *table;
struct list_head list;
@ -100,7 +100,7 @@ struct ion_dma_buf_attachment {
static int msm_ion_dma_buf_attach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attachment)
{
struct ion_dma_buf_attachment *a;
struct msm_ion_dma_buf_attachment *a;
struct sg_table *table;
struct ion_buffer *buffer = dmabuf->priv;
@ -131,7 +131,7 @@ static int msm_ion_dma_buf_attach(struct dma_buf *dmabuf,
static void msm_ion_dma_buf_detatch(struct dma_buf *dmabuf,
struct dma_buf_attachment *attachment)
{
struct ion_dma_buf_attachment *a = attachment->priv;
struct msm_ion_dma_buf_attachment *a = attachment->priv;
struct ion_buffer *buffer = dmabuf->priv;
mutex_lock(&buffer->lock);
@ -151,7 +151,7 @@ static struct sg_table
*msm_ion_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction direction)
{
struct ion_dma_buf_attachment *a = attachment->priv;
struct msm_ion_dma_buf_attachment *a = attachment->priv;
struct sg_table *table;
int count, map_attrs;
struct ion_buffer *buffer = attachment->dmabuf->priv;
@ -241,7 +241,7 @@ static void msm_ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
{
int map_attrs;
struct ion_buffer *buffer = attachment->dmabuf->priv;
struct ion_dma_buf_attachment *a = attachment->priv;
struct msm_ion_dma_buf_attachment *a = attachment->priv;
unsigned long ino = file_inode(attachment->dmabuf->file)->i_ino;
mutex_lock(&buffer->lock);
@ -545,7 +545,7 @@ static int msm_ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction)
{
struct ion_buffer *buffer = dmabuf->priv;
struct ion_dma_buf_attachment *a;
struct msm_ion_dma_buf_attachment *a;
unsigned long ino = file_inode(dmabuf->file)->i_ino;
int ret = 0;
@ -600,7 +600,7 @@ static int msm_ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction)
{
struct ion_buffer *buffer = dmabuf->priv;
struct ion_dma_buf_attachment *a;
struct msm_ion_dma_buf_attachment *a;
unsigned long ino = file_inode(dmabuf->file)->i_ino;
int ret = 0;
@ -658,7 +658,7 @@ static int msm_ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
unsigned int len)
{
struct ion_buffer *buffer = dmabuf->priv;
struct ion_dma_buf_attachment *a;
struct msm_ion_dma_buf_attachment *a;
unsigned long ino = file_inode(dmabuf->file)->i_ino;
int ret = 0;
@ -728,7 +728,7 @@ static int msm_ion_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
unsigned int len)
{
struct ion_buffer *buffer = dmabuf->priv;
struct ion_dma_buf_attachment *a;
struct msm_ion_dma_buf_attachment *a;
unsigned long ino = file_inode(dmabuf->file)->i_ino;
int ret = 0;

View File

@ -45,12 +45,6 @@ static void free_duped_table(struct sg_table *table)
kfree(table);
}
struct ion_dma_buf_attachment {
struct device *dev;
struct sg_table *table;
struct list_head list;
};
static int ion_dma_buf_attach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attachment)
{
@ -293,11 +287,16 @@ static void *ion_dma_buf_vmap(struct dma_buf *dmabuf)
{
struct ion_buffer *buffer = dmabuf->priv;
struct ion_heap *heap = buffer->heap;
void *vaddr;
if (!heap->buf_ops.vmap)
return ERR_PTR(-EOPNOTSUPP);
if (heap->buf_ops.vmap)
return heap->buf_ops.vmap(dmabuf);
return heap->buf_ops.vmap(dmabuf);
mutex_lock(&buffer->lock);
vaddr = ion_buffer_kmap_get(buffer);
mutex_unlock(&buffer->lock);
return vaddr;
}
static void ion_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
@ -305,10 +304,14 @@ static void ion_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
struct ion_buffer *buffer = dmabuf->priv;
struct ion_heap *heap = buffer->heap;
if (!heap->buf_ops.vunmap)
if (heap->buf_ops.vunmap) {
heap->buf_ops.vunmap(dmabuf, vaddr);
return;
}
return heap->buf_ops.vunmap(dmabuf, vaddr);
mutex_lock(&buffer->lock);
ion_buffer_kmap_put(buffer);
mutex_unlock(&buffer->lock);
}
static int ion_dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags)

View File

@ -1276,6 +1276,9 @@ static void fbcon_deinit(struct vc_data *vc)
if (!con_is_bound(&fb_con))
fbcon_exit();
if (vc->vc_num == logo_shown)
logo_shown = FBCON_LOGO_CANSHOW;
return;
}

View File

@ -1,10 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* iTCO Vendor Specific Support hooks */
#ifdef CONFIG_ITCO_VENDOR_SUPPORT
extern int iTCO_vendorsupport;
extern void iTCO_vendor_pre_start(struct resource *, unsigned int);
extern void iTCO_vendor_pre_stop(struct resource *);
extern int iTCO_vendor_check_noreboot_on(void);
#else
#define iTCO_vendorsupport 0
#define iTCO_vendor_pre_start(acpibase, heartbeat) {}
#define iTCO_vendor_pre_stop(acpibase) {}
#define iTCO_vendor_check_noreboot_on() 1

View File

@ -39,8 +39,10 @@
/* Broken BIOS */
#define BROKEN_BIOS 911
static int vendorsupport;
module_param(vendorsupport, int, 0);
int iTCO_vendorsupport;
EXPORT_SYMBOL(iTCO_vendorsupport);
module_param_named(vendorsupport, iTCO_vendorsupport, int, 0);
MODULE_PARM_DESC(vendorsupport, "iTCO vendor specific support mode, default="
"0 (none), 1=SuperMicro Pent3, 911=Broken SMI BIOS");
@ -152,7 +154,7 @@ static void broken_bios_stop(struct resource *smires)
void iTCO_vendor_pre_start(struct resource *smires,
unsigned int heartbeat)
{
switch (vendorsupport) {
switch (iTCO_vendorsupport) {
case SUPERMICRO_OLD_BOARD:
supermicro_old_pre_start(smires);
break;
@ -165,7 +167,7 @@ EXPORT_SYMBOL(iTCO_vendor_pre_start);
void iTCO_vendor_pre_stop(struct resource *smires)
{
switch (vendorsupport) {
switch (iTCO_vendorsupport) {
case SUPERMICRO_OLD_BOARD:
supermicro_old_pre_stop(smires);
break;
@ -178,7 +180,7 @@ EXPORT_SYMBOL(iTCO_vendor_pre_stop);
int iTCO_vendor_check_noreboot_on(void)
{
switch (vendorsupport) {
switch (iTCO_vendorsupport) {
case SUPERMICRO_OLD_BOARD:
return 0;
default:
@ -189,13 +191,13 @@ EXPORT_SYMBOL(iTCO_vendor_check_noreboot_on);
static int __init iTCO_vendor_init_module(void)
{
if (vendorsupport == SUPERMICRO_NEW_BOARD) {
if (iTCO_vendorsupport == SUPERMICRO_NEW_BOARD) {
pr_warn("Option vendorsupport=%d is no longer supported, "
"please use the w83627hf_wdt driver instead\n",
SUPERMICRO_NEW_BOARD);
return -EINVAL;
}
pr_info("vendor-support=%d\n", vendorsupport);
pr_info("vendor-support=%d\n", iTCO_vendorsupport);
return 0;
}

View File

@ -459,13 +459,25 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
if (!p->tco_res)
return -ENODEV;
p->smi_res = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_IO_SMI);
if (!p->smi_res)
return -ENODEV;
p->iTCO_version = pdata->version;
p->pci_dev = to_pci_dev(dev->parent);
p->smi_res = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_IO_SMI);
if (p->smi_res) {
/* The TCO logic uses the TCO_EN bit in the SMI_EN register */
if (!devm_request_region(dev, p->smi_res->start,
resource_size(p->smi_res),
pdev->name)) {
pr_err("I/O address 0x%04llx already in use, device disabled\n",
(u64)SMI_EN(p));
return -EBUSY;
}
} else if (iTCO_vendorsupport ||
turn_SMI_watchdog_clear_off >= p->iTCO_version) {
pr_err("SMI I/O resource is missing\n");
return -ENODEV;
}
iTCO_wdt_no_reboot_bit_setup(p, pdata);
/*
@ -492,14 +504,6 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
/* Set the NO_REBOOT bit to prevent later reboots, just for sure */
p->update_no_reboot_bit(p->no_reboot_priv, true);
/* The TCO logic uses the TCO_EN bit in the SMI_EN register */
if (!devm_request_region(dev, p->smi_res->start,
resource_size(p->smi_res),
pdev->name)) {
pr_err("I/O address 0x%04llx already in use, device disabled\n",
(u64)SMI_EN(p));
return -EBUSY;
}
if (turn_SMI_watchdog_clear_off >= p->iTCO_version) {
/*
* Bit 13: TCO_EN -> 0

Some files were not shown because too many files have changed in this diff Show More