android_kernel_asus_sm8350/drivers/virtio/virtio_ring.c
Greg Kroah-Hartman 2b44f56202 This is the 5.4.257 stable release
-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmUOqSMACgkQONu9yGCS
 aT6xIg//SVVT7zeyVcdNSchMLT6N1sJKtnplNnhyM6oFPlnyRJbgm608p394osx9
 bMkz8QNPugdJz075nFt1blC2qqh2GqNkgaAM1bSKrVmUhBR3ouaO2vKfTamd1qkQ
 uHjE2+4NSlJu0zeqF+D+xmYYo3W32XXfDjn64p3dYiEVFtM4J0r633OpkNTZL3KR
 b8Ooj0sE6WtG5Lt4I64z74/p8QjK8ESW7N7hYUjADadoycn7ms5wwED6KbXwO+Ed
 3piSteS8bddtx+s6pblRwHvRcOMU3NX0rVG8x3lBtdnjAk32/HEsUm7mAycqJdsJ
 TQ67UJ4gyqzrCtDfrbhZ9hKpaEHGuy6nnjKfXtnlSKZ+8h4uuxK0rIwFlZuS+sjH
 Xm99yiA6KK+CbdR9/ltgQyr5kaTcIqauA6VTjbqqJ3Fuj4OWEz3N2ALUpWeLPNpe
 Enl7b5/eQ4B0sDOYDVG4HsjRTt7ZgNVGFxRRp8ZulDKgX9G4M0K2khq/b3PM9aEQ
 gkgWDxLt3H0EO+6mRgCA0J3a/TSC6gPgV8t8iNcg5rzlXngJzAajdgi7HBMnhPdl
 8y8JCfojtA+RuHWHOEmPXJG1AmwQ4df7szVxbv8WDuidIqv2tb09POo38s/UWHeN
 NGM5nh1WSCs4hQBfkx4wk58xSZ/jAh4/Uq6g3GasmqlknhA8TjQ=
 =dWOv
 -----END PGP SIGNATURE-----

Merge 5.4.257 into android11-5.4-lts

Changes in 5.4.257
	erofs: ensure that the post-EOF tails are all zeroed
	ARM: pxa: remove use of symbol_get()
	mmc: au1xmmc: force non-modular build and remove symbol_get usage
	net: enetc: use EXPORT_SYMBOL_GPL for enetc_phc_index
	rtc: ds1685: use EXPORT_SYMBOL_GPL for ds1685_rtc_poweroff
	modules: only allow symbol_get of EXPORT_SYMBOL_GPL modules
	USB: serial: option: add Quectel EM05G variant (0x030e)
	USB: serial: option: add FOXCONN T99W368/T99W373 product
	HID: wacom: remove the battery when the EKR is off
	staging: rtl8712: fix race condition
	Bluetooth: btsdio: fix use after free bug in btsdio_remove due to race condition
	serial: sc16is7xx: fix bug when first setting GPIO direction
	firmware: stratix10-svc: Fix an NULL vs IS_ERR() bug in probe
	fsi: master-ast-cf: Add MODULE_FIRMWARE macro
	nilfs2: fix general protection fault in nilfs_lookup_dirty_data_buffers()
	nilfs2: fix WARNING in mark_buffer_dirty due to discarded buffer reuse
	pinctrl: amd: Don't show `Invalid config param` errors
	9p: virtio: make sure 'offs' is initialized in zc_request
	ASoC: da7219: Flush pending AAD IRQ when suspending
	ASoC: da7219: Check for failure reading AAD IRQ events
	ethernet: atheros: fix return value check in atl1c_tso_csum()
	vxlan: generalize vxlan_parse_gpe_hdr and remove unused args
	m68k: Fix invalid .section syntax
	s390/dasd: use correct number of retries for ERP requests
	s390/dasd: fix hanging device after request requeue
	fs/nls: make load_nls() take a const parameter
	ASoc: codecs: ES8316: Fix DMIC config
	ASoC: atmel: Fix the 8K sample parameter in I2SC master
	platform/x86: intel: hid: Always call BTNL ACPI method
	platform/x86: huawei-wmi: Silence ambient light sensor
	security: keys: perform capable check only on privileged operations
	clk: fixed-mmio: make COMMON_CLK_FIXED_MMIO depend on HAS_IOMEM
	net: usb: qmi_wwan: add Quectel EM05GV2
	idmaengine: make FSL_EDMA and INTEL_IDMA64 depends on HAS_IOMEM
	scsi: qedi: Fix potential deadlock on &qedi_percpu->p_work_lock
	netlabel: fix shift wrapping bug in netlbl_catmap_setlong()
	bnx2x: fix page fault following EEH recovery
	sctp: handle invalid error codes without calling BUG()
	cifs: add a warning when the in-flight count goes negative
	scsi: storvsc: Always set no_report_opcodes
	ALSA: seq: oss: Fix racy open/close of MIDI devices
	platform/mellanox: Fix mlxbf-tmfifo not handling all virtio CONSOLE notifications
	net: Avoid address overwrite in kernel_connect
	powerpc/32s: Fix assembler warning about r0
	udf: Check consistency of Space Bitmap Descriptor
	udf: Handle error when adding extent to a file
	Revert "net: macsec: preserve ingress frame ordering"
	reiserfs: Check the return value from __getblk()
	eventfd: Export eventfd_ctx_do_read()
	eventfd: prevent underflow for eventfd semaphores
	new helper: lookup_positive_unlocked()
	fs: Fix error checking for d_hash_and_lookup()
	tmpfs: verify {g,u}id mount options correctly
	OPP: Fix passing 0 to PTR_ERR in _opp_attach_genpd()
	x86/asm: Make more symbols local
	x86/boot: Annotate local functions
	x86/decompressor: Don't rely on upper 32 bits of GPRs being preserved
	perf/imx_ddr: don't enable counter0 if none of 4 counters are used
	cpufreq: powernow-k8: Use related_cpus instead of cpus in driver.exit()
	bpf: Clear the probe_addr for uprobe
	tcp: tcp_enter_quickack_mode() should be static
	regmap: rbtree: Use alloc_flags for memory allocations
	spi: tegra20-sflash: fix to check return value of platform_get_irq() in tegra_sflash_probe()
	can: gs_usb: gs_usb_receive_bulk_callback(): count RX overflow errors also in case of OOM
	wifi: mwifiex: Fix OOB and integer underflow when rx packets
	mwifiex: switch from 'pci_' to 'dma_' API
	wifi: mwifiex: fix error recovery in PCIE buffer descriptor management
	crypto: stm32 - Properly handle pm_runtime_get failing
	Bluetooth: nokia: fix value check in nokia_bluetooth_serdev_probe()
	crypto: caam - fix unchecked return value error
	hwrng: iproc-rng200 - use semicolons rather than commas to separate statements
	hwrng: iproc-rng200 - Implement suspend and resume calls
	lwt: Fix return values of BPF xmit ops
	lwt: Check LWTUNNEL_XMIT_CONTINUE strictly
	fs: ocfs2: namei: check return value of ocfs2_add_entry()
	wifi: mwifiex: fix memory leak in mwifiex_histogram_read()
	wifi: mwifiex: Fix missed return in oob checks failed path
	wifi: ath9k: fix races between ath9k_wmi_cmd and ath9k_wmi_ctrl_rx
	wifi: ath9k: protect WMI command response buffer replacement with a lock
	wifi: mwifiex: avoid possible NULL skb pointer dereference
	wifi: ath9k: use IS_ERR() with debugfs_create_dir()
	net: arcnet: Do not call kfree_skb() under local_irq_disable()
	mlxsw: i2c: Fix chunk size setting in output mailbox buffer
	mlxsw: i2c: Limit single transaction buffer size
	net/sched: sch_hfsc: Ensure inner classes have fsc curve
	netrom: Deny concurrent connect().
	drm/bridge: tc358764: Fix debug print parameter order
	quota: avoid increasing DQST_LOOKUPS when iterating over dirty/inuse list
	quota: factor out dquot_write_dquot()
	quota: rename dquot_active() to inode_quota_active()
	quota: add new helper dquot_active()
	quota: fix dqput() to follow the guarantees dquot_srcu should provide
	drm/amdgpu: avoid integer overflow warning in amdgpu_device_resize_fb_bar()
	ARM: dts: BCM53573: Drop nonexistent "default-off" LED trigger
	ARM: dts: BCM53573: Add cells sizes to PCIe node
	ARM: dts: BCM53573: Use updated "spi-gpio" binding properties
	drm/etnaviv: fix dumping of active MMU context
	ARM: dts: s3c6410: move fixed clocks under root node in Mini6410
	ARM: dts: s3c6410: align node SROM bus node name with dtschema in Mini6410
	ARM: dts: s3c64xx: align pinctrl with dtschema
	ARM: dts: samsung: s3c6410-mini6410: correct ethernet reg addresses (split)
	ARM: dts: s5pv210: add RTC 32 KHz clock in SMDKV210
	ARM: dts: s5pv210: use defines for IRQ flags in SMDKV210
	ARM: dts: s5pv210: correct ethernet unit address in SMDKV210
	ARM: dts: s5pv210: add dummy 5V regulator for backlight on SMDKv210
	ARM: dts: samsung: s5pv210-smdkv210: correct ethernet reg addresses (split)
	drm: adv7511: Fix low refresh rate register for ADV7533/5
	ARM: dts: BCM53573: Fix Ethernet info for Luxul devices
	arm64: dts: qcom: sdm845: Add missing RPMh power domain to GCC
	drm/amdgpu: Update min() to min_t() in 'amdgpu_info_ioctl'
	md/bitmap: don't set max_write_behind if there is no write mostly device
	md/md-bitmap: hold 'reconfig_mutex' in backlog_store()
	drm/tegra: Remove superfluous error messages around platform_get_irq()
	drm/tegra: dpaux: Fix incorrect return value of platform_get_irq
	of: unittest: fix null pointer dereferencing in of_unittest_find_node_by_name()
	drm/armada: Fix off-by-one error in armada_overlay_get_property()
	drm/panel: simple: Add missing connector type and pixel format for AUO T215HVN01
	ima: Remove deprecated IMA_TRUSTED_KEYRING Kconfig
	drm/msm/mdp5: Don't leak some plane state
	smackfs: Prevent underflow in smk_set_cipso()
	audit: fix possible soft lockup in __audit_inode_child()
	drm/mediatek: Fix potential memory leak if vmap() fail
	of: unittest: Fix overlay type in apply/revert check
	ALSA: ac97: Fix possible error value of *rac97
	ipmi:ssif: Add check for kstrdup
	ipmi:ssif: Fix a memory leak when scanning for an adapter
	drivers: clk: keystone: Fix parameter judgment in _of_pll_clk_init()
	clk: sunxi-ng: Modify mismatched function name
	PCI: Mark NVIDIA T4 GPUs to avoid bus reset
	PCI: pciehp: Use RMW accessors for changing LNKCTL
	PCI/ASPM: Use RMW accessors for changing LNKCTL
	clk: imx: composite-8m: fix clock pauses when set_rate would be a no-op
	powerpc/fadump: reset dump area size if fadump memory reserve fails
	PCI: Add #defines for Enter Compliance, Transmit Margin
	drm/amdgpu: Correct Transmit Margin masks
	drm/amdgpu: Replace numbers with PCI_EXP_LNKCTL2 definitions
	drm/amdgpu: Prefer pcie_capability_read_word()
	drm/amdgpu: Use RMW accessors for changing LNKCTL
	drm/radeon: Correct Transmit Margin masks
	drm/radeon: Replace numbers with PCI_EXP_LNKCTL2 definitions
	drm/radeon: Prefer pcie_capability_read_word()
	drm/radeon: Use RMW accessors for changing LNKCTL
	wifi: ath10k: Use RMW accessors for changing LNKCTL
	nfs/blocklayout: Use the passed in gfp flags
	powerpc/iommu: Fix notifiers being shared by PCI and VIO buses
	jfs: validate max amount of blocks before allocation.
	fs: lockd: avoid possible wrong NULL parameter
	NFSD: da_addr_body field missing in some GETDEVICEINFO replies
	NFS: Guard against READDIR loop when entry names exceed MAXNAMELEN
	media: v4l2-fwnode: fix v4l2_fwnode_parse_link handling
	media: v4l2-fwnode: simplify v4l2_fwnode_parse_link
	media: v4l2-core: Fix a potential resource leak in v4l2_fwnode_parse_link()
	drivers: usb: smsusb: fix error handling code in smsusb_init_device
	media: dib7000p: Fix potential division by zero
	media: dvb-usb: m920x: Fix a potential memory leak in m920x_i2c_xfer()
	media: cx24120: Add retval check for cx24120_message_send()
	media: mediatek: vcodec: Return NULL if no vdec_fb is found
	usb: phy: mxs: fix getting wrong state with mxs_phy_is_otg_host()
	scsi: iscsi: Add strlen() check in iscsi_if_set{_host}_param()
	scsi: be2iscsi: Add length check when parsing nlattrs
	scsi: qla4xxx: Add length check when parsing nlattrs
	serial: sprd: getting port index via serial aliases only
	serial: sprd: remove redundant sprd_port cleanup
	serial: sprd: Assign sprd_port after initialized to avoid wrong access
	serial: sprd: Fix DMA buffer leak issue
	x86/APM: drop the duplicate APM_MINOR_DEV macro
	scsi: qedf: Do not touch __user pointer in qedf_dbg_stop_io_on_error_cmd_read() directly
	scsi: qedf: Do not touch __user pointer in qedf_dbg_debug_cmd_read() directly
	scsi: qedf: Do not touch __user pointer in qedf_dbg_fp_int_cmd_read() directly
	coresight: tmc: Explicit type conversions to prevent integer overflow
	dma-buf/sync_file: Fix docs syntax
	driver core: test_async: fix an error code
	IB/uverbs: Fix an potential error pointer dereference
	iommu/vt-d: Fix to flush cache of PASID directory table
	media: go7007: Remove redundant if statement
	USB: gadget: f_mass_storage: Fix unused variable warning
	media: i2c: ov5640: Configure HVP lines in s_power callback
	media: ov5640: Enable MIPI interface in ov5640_set_power_mipi()
	media: i2c: ov2680: Set V4L2_CTRL_FLAG_MODIFY_LAYOUT on flips
	media: ov2680: Remove auto-gain and auto-exposure controls
	media: ov2680: Fix ov2680_bayer_order()
	media: ov2680: Fix vflip / hflip set functions
	media: ov2680: Fix regulators being left enabled on ov2680_power_on() errors
	scsi: core: Use 32-bit hostnum in scsi_host_lookup()
	scsi: fcoe: Fix potential deadlock on &fip->ctlr_lock
	serial: tegra: handle clk prepare error in tegra_uart_hw_init()
	amba: bus: fix refcount leak
	Revert "IB/isert: Fix incorrect release of isert connection"
	RDMA/siw: Balance the reference of cep->kref in the error path
	RDMA/siw: Correct wrong debug message
	HID: logitech-dj: Fix error handling in logi_dj_recv_switch_to_dj_mode()
	HID: multitouch: Correct devm device reference for hidinput input_dev name
	x86/speculation: Mark all Skylake CPUs as vulnerable to GDS
	tracing: Fix race issue between cpu buffer write and swap
	phy/rockchip: inno-hdmi: use correct vco_div_5 macro on rk3328
	phy/rockchip: inno-hdmi: round fractal pixclock in rk3328 recalc_rate
	phy/rockchip: inno-hdmi: do not power on rk3328 post pll on reg write
	rpmsg: glink: Add check for kstrdup
	mtd: rawnand: fsmc: handle clk prepare error in fsmc_nand_resume()
	um: Fix hostaudio build errors
	dmaengine: ste_dma40: Add missing IRQ check in d40_probe
	cpufreq: Fix the race condition while updating the transition_task of policy
	virtio_ring: fix avail_wrap_counter in virtqueue_add_packed
	igmp: limit igmpv3_newpack() packet size to IP_MAX_MTU
	netfilter: ipset: add the missing IP_SET_HASH_WITH_NET0 macro for ip_set_hash_netportnet.c
	netfilter: xt_u32: validate user space input
	netfilter: xt_sctp: validate the flag_info count
	skbuff: skb_segment, Call zero copy functions before using skbuff frags
	igb: set max size RX buffer when store bad packet is enabled
	PM / devfreq: Fix leak in devfreq_dev_release()
	ALSA: pcm: Fix missing fixup call in compat hw_refine ioctl
	ipmi_si: fix a memleak in try_smi_init()
	ARM: OMAP2+: Fix -Warray-bounds warning in _pwrdm_state_switch()
	backlight/gpio_backlight: Compare against struct fb_info.device
	backlight/bd6107: Compare against struct fb_info.device
	backlight/lv5207lp: Compare against struct fb_info.device
	xtensa: PMU: fix base address for the newer hardware
	media: dvb: symbol fixup for dvb_attach()
	ntb: Drop packets when qp link is down
	ntb: Clean up tx tail index on link down
	ntb: Fix calculation ntb_transport_tx_free_entry()
	Revert "PCI: Mark NVIDIA T4 GPUs to avoid bus reset"
	procfs: block chmod on /proc/thread-self/comm
	parisc: Fix /proc/cpuinfo output for lscpu
	dlm: fix plock lookup when using multiple lockspaces
	dccp: Fix out of bounds access in DCCP error handler
	X.509: if signature is unsupported skip validation
	net: handle ARPHRD_PPP in dev_is_mac_header_xmit()
	fsverity: skip PKCS#7 parser when keyring is empty
	pstore/ram: Check start of empty przs during init
	s390/ipl: add missing secure/has_secure file to ipl type 'unknown'
	crypto: stm32 - fix loop iterating through scatterlist for DMA
	cpufreq: brcmstb-avs-cpufreq: Fix -Warray-bounds bug
	sc16is7xx: Set iobase to device index
	serial: sc16is7xx: fix broken port 0 uart init
	usb: typec: tcpci: clear the fault status bit
	udf: initialize newblock to 0
	drm: fix double free for gbo in drm_gem_vram_init and drm_gem_vram_create
	net/ipv6: SKB symmetric hash should incorporate transport ports
	scsi: qla2xxx: fix inconsistent TMF timeout
	scsi: qla2xxx: Fix erroneous link up failure
	scsi: qla2xxx: Turn off noisy message log
	scsi: qla2xxx: Remove unsupported ql2xenabledif option
	fbdev/ep93xx-fb: Do not assign to struct fb_info.dev
	drm/ast: Fix DRAM init on AST2200
	lib/test_meminit: allocate pages up to order MAX_ORDER
	parisc: led: Fix LAN receive and transmit LEDs
	parisc: led: Reduce CPU overhead for disk & lan LED computation
	clk: qcom: gcc-mdm9615: use proper parent for pll0_vote clock
	soc: qcom: qmi_encdec: Restrict string length in decode
	NFSv4/pnfs: minor fix for cleanup path in nfs4_get_device_info
	kconfig: fix possible buffer overflow
	perf annotate bpf: Don't enclose non-debug code with an assert()
	x86/virt: Drop unnecessary check on extended CPUID level in cpu_has_svm()
	perf top: Don't pass an ERR_PTR() directly to perf_session__delete()
	watchdog: intel-mid_wdt: add MODULE_ALIAS() to allow auto-load
	pwm: lpc32xx: Remove handling of PWM channels
	sctp: annotate data-races around sk->sk_wmem_queued
	ipv4: annotate data-races around fi->fib_dead
	net: read sk->sk_family once in sk_mc_loop()
	igb: disable virtualization features on 82580
	veth: Fixing transmit return status for dropped packets
	net: ipv6/addrconf: avoid integer underflow in ipv6_create_tempaddr
	af_unix: Fix data-races around user->unix_inflight.
	af_unix: Fix data-race around unix_tot_inflight.
	af_unix: Fix data-races around sk->sk_shutdown.
	af_unix: Fix data race around sk->sk_err.
	net: sched: sch_qfq: Fix UAF in qfq_dequeue()
	kcm: Destroy mutex in kcm_exit_net()
	igc: Change IGC_MIN to allow set rx/tx value between 64 and 80
	igbvf: Change IGBVF_MIN to allow set rx/tx value between 64 and 80
	igb: Change IGB_MIN to allow set rx/tx value between 64 and 80
	s390/zcrypt: don't leak memory if dev_set_name() fails
	idr: fix param name in idr_alloc_cyclic() doc
	ip_tunnels: use DEV_STATS_INC()
	netfilter: nfnetlink_osf: avoid OOB read
	net: hns3: fix the port information display when sfp is absent
	sh: boards: Fix CEU buffer size passed to dma_declare_coherent_memory()
	ata: sata_gemini: Add missing MODULE_DESCRIPTION
	ata: pata_ftide010: Add missing MODULE_DESCRIPTION
	fuse: nlookup missing decrement in fuse_direntplus_link
	btrfs: don't start transaction when joining with TRANS_JOIN_NOSTART
	btrfs: use the correct superblock to compare fsid in btrfs_validate_super
	mtd: rawnand: brcmnand: Fix crash during the panic_write
	mtd: rawnand: brcmnand: Fix potential out-of-bounds access in oob write
	mtd: rawnand: brcmnand: Fix potential false time out warning
	perf hists browser: Fix hierarchy mode header
	perf tools: Handle old data in PERF_RECORD_ATTR
	usb: typec: tcpm: Refactor tcpm_handle_vdm_request payload handling
	usb: typec: tcpm: Refactor tcpm_handle_vdm_request
	usb: typec: bus: verify partner exists in typec_altmode_attention
	ARM: dts: BCM5301X: Extend RAM to full 256MB for Linksys EA6500 V2
	clk: imx8mm: Move 1443X/1416X PLL clock structure to common place
	net: ipv4: fix one memleak in __inet_del_ifa()
	net: ethernet: mvpp2_main: fix possible OOB write in mvpp2_ethtool_get_rxnfc()
	net: ethernet: mtk_eth_soc: fix possible NULL pointer dereference in mtk_hwlro_get_fdir_all()
	r8152: check budget for r8152_poll()
	kcm: Fix memory leak in error path of kcm_sendmsg()
	platform/mellanox: mlxbf-tmfifo: Drop the Rx packet if no more descriptors
	mlxbf-tmfifo: sparse tags for config access
	platform/mellanox: mlxbf-tmfifo: Drop jumbo frames
	net/tls: do not free tls_rec on async operation in bpf_exec_tx_verdict()
	ixgbe: fix timestamp configuration code
	kcm: Fix error handling for SOCK_DGRAM in kcm_sendmsg().
	drm/amd/display: Fix a bug when searching for insert_above_mpcc
	parisc: Drop loops_per_jiffy from per_cpu struct
	autofs: fix memory leak of waitqueues in autofs_catatonic_mode
	btrfs: output extra debug info if we failed to find an inline backref
	locks: fix KASAN: use-after-free in trace_event_raw_event_filelock_lock
	ACPICA: Add AML_NO_OPERAND_RESOLVE flag to Timer
	kernel/fork: beware of __put_task_struct() calling context
	ACPI: video: Add backlight=native DMI quirk for Lenovo Ideapad Z470
	perf/smmuv3: Enable HiSilicon Erratum 162001900 quirk for HIP08/09
	hw_breakpoint: fix single-stepping when using bpf_overflow_handler
	devlink: remove reload failed checks in params get/set callbacks
	wifi: ath9k: fix printk specifier
	wifi: mwifiex: fix fortify warning
	crypto: lib/mpi - avoid null pointer deref in mpi_cmp_ui()
	tpm_tis: Resend command to recover from data transfer errors
	mmc: sdhci-esdhc-imx: improve ESDHC_FLAG_ERR010450
	alx: fix OOB-read compiler warning
	wifi: mac80211_hwsim: drop short frames
	drm/exynos: fix a possible null-pointer dereference due to data race in exynos_drm_crtc_atomic_disable()
	bus: ti-sysc: Configure uart quirks for k3 SoC
	md: raid1: fix potential OOB in raid1_remove_disk()
	ext2: fix datatype of block number in ext2_xattr_set2()
	fs/jfs: prevent double-free in dbUnmount() after failed jfs_remount()
	jfs: fix invalid free of JFS_IP(ipimap)->i_imap in diUnmount
	powerpc/pseries: fix possible memory leak in ibmebus_bus_init()
	media: dvb-usb-v2: af9035: Fix null-ptr-deref in af9035_i2c_master_xfer
	media: dw2102: Fix null-ptr-deref in dw2102_i2c_transfer()
	media: af9005: Fix null-ptr-deref in af9005_i2c_xfer
	media: anysee: fix null-ptr-deref in anysee_master_xfer
	media: az6007: Fix null-ptr-deref in az6007_i2c_xfer()
	media: tuners: qt1010: replace BUG_ON with a regular error
	media: pci: cx23885: replace BUG with error return
	usb: gadget: fsl_qe_udc: validate endpoint index for ch9 udc
	scsi: target: iscsi: Fix buffer overflow in lio_target_nacl_info_show()
	serial: cpm_uart: Avoid suspicious locking
	media: pci: ipu3-cio2: Initialise timing struct to avoid a compiler warning
	kobject: Add sanity check for kset->kobj.ktype in kset_register()
	tools features: Add feature test to check if libbfd has buildid support
	perf jevents: Make build dependency on test JSONs
	perf tools: Add an option to build without libbfd
	btrfs: move btrfs_pinned_by_swapfile prototype into volumes.h
	btrfs: add a helper to read the superblock metadata_uuid
	btrfs: compare the correct fsid/metadata_uuid in btrfs_validate_super
	selftests: tracing: Fix to unmount tracefs for recovering environment
	md/raid1: fix error: ISO C90 forbids mixed declarations
	attr: block mode changes of symlinks
	btrfs: fix lockdep splat and potential deadlock after failure running delayed items
	tracing: Have current_trace inc the trace array ref count
	tracing: Have option files inc the trace array ref count
	nfsd: fix change_info in NFSv4 RENAME replies
	tracefs: Add missing lockdown check to tracefs_create_dir()
	i2c: aspeed: Reset the i2c controller when timeout occurs
	scsi: megaraid_sas: Fix deadlock on firmware crashdump
	ext4: fix rec_len verify error
	mtd: rawnand: brcmnand: Fix ECC level field setting for v7.2 controller
	drm/amdgpu: fix amdgpu_cs_p1_user_fence
	net/sched: Retire rsvp classifier
	Linux 5.4.257

Change-Id: I99f6978fc0d802b5803005fe903a90aed315d88d
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
2023-09-30 11:33:27 +00:00

2335 lines
60 KiB
C

// SPDX-License-Identifier: GPL-2.0-or-later
/* Virtio ring implementation.
*
* Copyright 2007 Rusty Russell IBM Corporation
*/
#include <linux/virtio.h>
#include <linux/virtio_ring.h>
#include <linux/virtio_config.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/hrtimer.h>
#include <linux/dma-mapping.h>
#include <xen/xen.h>
#ifdef DEBUG
/* For development, we want to crash whenever the ring is screwed. */
#define BAD_RING(_vq, fmt, args...) \
do { \
dev_err(&(_vq)->vq.vdev->dev, \
"%s:"fmt, (_vq)->vq.name, ##args); \
BUG(); \
} while (0)
/* Caller is supposed to guarantee no reentry. */
#define START_USE(_vq) \
do { \
if ((_vq)->in_use) \
panic("%s:in_use = %i\n", \
(_vq)->vq.name, (_vq)->in_use); \
(_vq)->in_use = __LINE__; \
} while (0)
#define END_USE(_vq) \
do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
#define LAST_ADD_TIME_UPDATE(_vq) \
do { \
ktime_t now = ktime_get(); \
\
/* No kick or get, with .1 second between? Warn. */ \
if ((_vq)->last_add_time_valid) \
WARN_ON(ktime_to_ms(ktime_sub(now, \
(_vq)->last_add_time)) > 100); \
(_vq)->last_add_time = now; \
(_vq)->last_add_time_valid = true; \
} while (0)
#define LAST_ADD_TIME_CHECK(_vq) \
do { \
if ((_vq)->last_add_time_valid) { \
WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
(_vq)->last_add_time)) > 100); \
} \
} while (0)
#define LAST_ADD_TIME_INVALID(_vq) \
((_vq)->last_add_time_valid = false)
#else
#define BAD_RING(_vq, fmt, args...) \
do { \
dev_err(&_vq->vq.vdev->dev, \
"%s:"fmt, (_vq)->vq.name, ##args); \
(_vq)->broken = true; \
} while (0)
#define START_USE(vq)
#define END_USE(vq)
#define LAST_ADD_TIME_UPDATE(vq)
#define LAST_ADD_TIME_CHECK(vq)
#define LAST_ADD_TIME_INVALID(vq)
#endif
struct vring_desc_state_split {
void *data; /* Data for callback. */
struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
};
struct vring_desc_state_packed {
void *data; /* Data for callback. */
struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
u16 num; /* Descriptor list length. */
u16 next; /* The next desc state in a list. */
u16 last; /* The last desc state in a list. */
};
struct vring_desc_extra_packed {
dma_addr_t addr; /* Buffer DMA addr. */
u32 len; /* Buffer length. */
u16 flags; /* Descriptor flags. */
};
struct vring_virtqueue {
struct virtqueue vq;
/* Is this a packed ring? */
bool packed_ring;
/* Is DMA API used? */
bool use_dma_api;
/* Can we use weak barriers? */
bool weak_barriers;
/* Other side has made a mess, don't try any more. */
bool broken;
/* Host supports indirect buffers */
bool indirect;
/* Host publishes avail event idx */
bool event;
/* Head of free buffer list. */
unsigned int free_head;
/* Number we've added since last sync. */
unsigned int num_added;
/* Last used index we've seen. */
u16 last_used_idx;
union {
/* Available for split ring */
struct {
/* Actual memory layout for this queue. */
struct vring vring;
/* Last written value to avail->flags */
u16 avail_flags_shadow;
/*
* Last written value to avail->idx in
* guest byte order.
*/
u16 avail_idx_shadow;
/* Per-descriptor state. */
struct vring_desc_state_split *desc_state;
/* DMA address and size information */
dma_addr_t queue_dma_addr;
size_t queue_size_in_bytes;
} split;
/* Available for packed ring */
struct {
/* Actual memory layout for this queue. */
struct {
unsigned int num;
struct vring_packed_desc *desc;
struct vring_packed_desc_event *driver;
struct vring_packed_desc_event *device;
} vring;
/* Driver ring wrap counter. */
bool avail_wrap_counter;
/* Device ring wrap counter. */
bool used_wrap_counter;
/* Avail used flags. */
u16 avail_used_flags;
/* Index of the next avail descriptor. */
u16 next_avail_idx;
/*
* Last written value to driver->flags in
* guest byte order.
*/
u16 event_flags_shadow;
/* Per-descriptor state. */
struct vring_desc_state_packed *desc_state;
struct vring_desc_extra_packed *desc_extra;
/* DMA address and size information */
dma_addr_t ring_dma_addr;
dma_addr_t driver_event_dma_addr;
dma_addr_t device_event_dma_addr;
size_t ring_size_in_bytes;
size_t event_size_in_bytes;
} packed;
};
/* How to notify other side. FIXME: commonalize hcalls! */
bool (*notify)(struct virtqueue *vq);
/* DMA, allocation, and size information */
bool we_own_ring;
#ifdef DEBUG
/* They're supposed to lock for us. */
unsigned int in_use;
/* Figure out if their kicks are too delayed. */
bool last_add_time_valid;
ktime_t last_add_time;
#endif
};
/*
* Helpers.
*/
#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
unsigned int total_sg)
{
struct vring_virtqueue *vq = to_vvq(_vq);
/*
* If the host supports indirect descriptor tables, and we have multiple
* buffers, then go indirect. FIXME: tune this threshold
*/
return (vq->indirect && total_sg > 1 && vq->vq.num_free);
}
/*
* Modern virtio devices have feature bits to specify whether they need a
* quirk and bypass the IOMMU. If not there, just use the DMA API.
*
* If there, the interaction between virtio and DMA API is messy.
*
* On most systems with virtio, physical addresses match bus addresses,
* and it doesn't particularly matter whether we use the DMA API.
*
* On some systems, including Xen and any system with a physical device
* that speaks virtio behind a physical IOMMU, we must use the DMA API
* for virtio DMA to work at all.
*
* On other systems, including SPARC and PPC64, virtio-pci devices are
* enumerated as though they are behind an IOMMU, but the virtio host
* ignores the IOMMU, so we must either pretend that the IOMMU isn't
* there or somehow map everything as the identity.
*
* For the time being, we preserve historic behavior and bypass the DMA
* API.
*
* TODO: install a per-device DMA ops structure that does the right thing
* taking into account all the above quirks, and use the DMA API
* unconditionally on data path.
*/
static bool vring_use_dma_api(struct virtio_device *vdev)
{
if (!virtio_has_iommu_quirk(vdev))
return true;
/* Otherwise, we are left to guess. */
/*
* In theory, it's possible to have a buggy QEMU-supposed
* emulated Q35 IOMMU and Xen enabled at the same time. On
* such a configuration, virtio has never worked and will
* not work without an even larger kludge. Instead, enable
* the DMA API if we're a Xen guest, which at least allows
* all of the sensible Xen configurations to work correctly.
*/
if (xen_domain())
return true;
return false;
}
size_t virtio_max_dma_size(struct virtio_device *vdev)
{
size_t max_segment_size = SIZE_MAX;
if (vring_use_dma_api(vdev))
max_segment_size = dma_max_mapping_size(vdev->dev.parent);
return max_segment_size;
}
EXPORT_SYMBOL_GPL(virtio_max_dma_size);
static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
if (vring_use_dma_api(vdev)) {
return dma_alloc_coherent(vdev->dev.parent, size,
dma_handle, flag);
} else {
void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
if (queue) {
phys_addr_t phys_addr = virt_to_phys(queue);
*dma_handle = (dma_addr_t)phys_addr;
/*
* Sanity check: make sure we dind't truncate
* the address. The only arches I can find that
* have 64-bit phys_addr_t but 32-bit dma_addr_t
* are certain non-highmem MIPS and x86
* configurations, but these configurations
* should never allocate physical pages above 32
* bits, so this is fine. Just in case, throw a
* warning and abort if we end up with an
* unrepresentable address.
*/
if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
free_pages_exact(queue, PAGE_ALIGN(size));
return NULL;
}
}
return queue;
}
}
static void vring_free_queue(struct virtio_device *vdev, size_t size,
void *queue, dma_addr_t dma_handle)
{
if (vring_use_dma_api(vdev))
dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
else
free_pages_exact(queue, PAGE_ALIGN(size));
}
/*
* The DMA ops on various arches are rather gnarly right now, and
* making all of the arch DMA ops work on the vring device itself
* is a mess. For now, we use the parent device for DMA ops.
*/
static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
{
return vq->vq.vdev->dev.parent;
}
/* Map one sg entry. */
static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
struct scatterlist *sg,
enum dma_data_direction direction)
{
if (!vq->use_dma_api)
return (dma_addr_t)sg_phys(sg);
/*
* We can't use dma_map_sg, because we don't use scatterlists in
* the way it expects (we don't guarantee that the scatterlist
* will exist for the lifetime of the mapping).
*/
return dma_map_page(vring_dma_dev(vq),
sg_page(sg), sg->offset, sg->length,
direction);
}
static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
void *cpu_addr, size_t size,
enum dma_data_direction direction)
{
if (!vq->use_dma_api)
return (dma_addr_t)virt_to_phys(cpu_addr);
return dma_map_single(vring_dma_dev(vq),
cpu_addr, size, direction);
}
static int vring_mapping_error(const struct vring_virtqueue *vq,
dma_addr_t addr)
{
if (!vq->use_dma_api)
return 0;
return dma_mapping_error(vring_dma_dev(vq), addr);
}
/*
* Split ring specific functions - *_split().
*/
static void vring_unmap_one_split(const struct vring_virtqueue *vq,
struct vring_desc *desc)
{
u16 flags;
if (!vq->use_dma_api)
return;
flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
if (flags & VRING_DESC_F_INDIRECT) {
dma_unmap_single(vring_dma_dev(vq),
virtio64_to_cpu(vq->vq.vdev, desc->addr),
virtio32_to_cpu(vq->vq.vdev, desc->len),
(flags & VRING_DESC_F_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
} else {
dma_unmap_page(vring_dma_dev(vq),
virtio64_to_cpu(vq->vq.vdev, desc->addr),
virtio32_to_cpu(vq->vq.vdev, desc->len),
(flags & VRING_DESC_F_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
}
static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
unsigned int total_sg,
gfp_t gfp)
{
struct vring_desc *desc;
unsigned int i;
/*
* We require lowmem mappings for the descriptors because
* otherwise virt_to_phys will give us bogus addresses in the
* virtqueue.
*/
gfp &= ~__GFP_HIGHMEM;
desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
if (!desc)
return NULL;
for (i = 0; i < total_sg; i++)
desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
return desc;
}
static inline int virtqueue_add_split(struct virtqueue *_vq,
struct scatterlist *sgs[],
unsigned int total_sg,
unsigned int out_sgs,
unsigned int in_sgs,
void *data,
void *ctx,
gfp_t gfp)
{
struct vring_virtqueue *vq = to_vvq(_vq);
struct scatterlist *sg;
struct vring_desc *desc;
unsigned int i, n, avail, descs_used, prev, err_idx;
int head;
bool indirect;
START_USE(vq);
BUG_ON(data == NULL);
BUG_ON(ctx && vq->indirect);
if (unlikely(vq->broken)) {
END_USE(vq);
return -EIO;
}
LAST_ADD_TIME_UPDATE(vq);
BUG_ON(total_sg == 0);
head = vq->free_head;
if (virtqueue_use_indirect(_vq, total_sg))
desc = alloc_indirect_split(_vq, total_sg, gfp);
else {
desc = NULL;
WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
}
if (desc) {
/* Use a single buffer which doesn't continue */
indirect = true;
/* Set up rest to use this indirect table. */
i = 0;
descs_used = 1;
} else {
indirect = false;
desc = vq->split.vring.desc;
i = head;
descs_used = total_sg;
}
if (vq->vq.num_free < descs_used) {
pr_debug("Can't add buf len %i - avail = %i\n",
descs_used, vq->vq.num_free);
/* FIXME: for historical reasons, we force a notify here if
* there are outgoing parts to the buffer. Presumably the
* host should service the ring ASAP. */
if (out_sgs)
vq->notify(&vq->vq);
if (indirect)
kfree(desc);
END_USE(vq);
return -ENOSPC;
}
for (n = 0; n < out_sgs; n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
if (vring_mapping_error(vq, addr))
goto unmap_release;
desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
prev = i;
i = virtio16_to_cpu(_vq->vdev, desc[i].next);
}
}
for (; n < (out_sgs + in_sgs); n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
if (vring_mapping_error(vq, addr))
goto unmap_release;
desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
prev = i;
i = virtio16_to_cpu(_vq->vdev, desc[i].next);
}
}
/* Last one doesn't continue. */
desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
if (indirect) {
/* Now that the indirect table is filled in, map it. */
dma_addr_t addr = vring_map_single(
vq, desc, total_sg * sizeof(struct vring_desc),
DMA_TO_DEVICE);
if (vring_mapping_error(vq, addr))
goto unmap_release;
vq->split.vring.desc[head].flags = cpu_to_virtio16(_vq->vdev,
VRING_DESC_F_INDIRECT);
vq->split.vring.desc[head].addr = cpu_to_virtio64(_vq->vdev,
addr);
vq->split.vring.desc[head].len = cpu_to_virtio32(_vq->vdev,
total_sg * sizeof(struct vring_desc));
}
/* We're using some buffers from the free list. */
vq->vq.num_free -= descs_used;
/* Update free pointer */
if (indirect)
vq->free_head = virtio16_to_cpu(_vq->vdev,
vq->split.vring.desc[head].next);
else
vq->free_head = i;
/* Store token and indirect buffer state. */
vq->split.desc_state[head].data = data;
if (indirect)
vq->split.desc_state[head].indir_desc = desc;
else
vq->split.desc_state[head].indir_desc = ctx;
/* Put entry in available array (but don't update avail->idx until they
* do sync). */
avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
/* Descriptors and available array need to be set before we expose the
* new available array entries. */
virtio_wmb(vq->weak_barriers);
vq->split.avail_idx_shadow++;
vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
vq->split.avail_idx_shadow);
vq->num_added++;
pr_debug("Added buffer head %i to %p\n", head, vq);
END_USE(vq);
/* This is very unlikely, but theoretically possible. Kick
* just in case. */
if (unlikely(vq->num_added == (1 << 16) - 1))
virtqueue_kick(_vq);
return 0;
unmap_release:
err_idx = i;
if (indirect)
i = 0;
else
i = head;
for (n = 0; n < total_sg; n++) {
if (i == err_idx)
break;
vring_unmap_one_split(vq, &desc[i]);
i = virtio16_to_cpu(_vq->vdev, desc[i].next);
}
if (indirect)
kfree(desc);
END_USE(vq);
return -ENOMEM;
}
static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
u16 new, old;
bool needs_kick;
START_USE(vq);
/* We need to expose available array entries before checking avail
* event. */
virtio_mb(vq->weak_barriers);
old = vq->split.avail_idx_shadow - vq->num_added;
new = vq->split.avail_idx_shadow;
vq->num_added = 0;
LAST_ADD_TIME_CHECK(vq);
LAST_ADD_TIME_INVALID(vq);
if (vq->event) {
needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
vring_avail_event(&vq->split.vring)),
new, old);
} else {
needs_kick = !(vq->split.vring.used->flags &
cpu_to_virtio16(_vq->vdev,
VRING_USED_F_NO_NOTIFY));
}
END_USE(vq);
return needs_kick;
}
static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
void **ctx)
{
unsigned int i, j;
__virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
/* Clear data ptr. */
vq->split.desc_state[head].data = NULL;
/* Put back on free list: unmap first-level descriptors and find end */
i = head;
while (vq->split.vring.desc[i].flags & nextflag) {
vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next);
vq->vq.num_free++;
}
vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
vq->split.vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev,
vq->free_head);
vq->free_head = head;
/* Plus final descriptor */
vq->vq.num_free++;
if (vq->indirect) {
struct vring_desc *indir_desc =
vq->split.desc_state[head].indir_desc;
u32 len;
/* Free the indirect table, if any, now that it's unmapped. */
if (!indir_desc)
return;
len = virtio32_to_cpu(vq->vq.vdev,
vq->split.vring.desc[head].len);
BUG_ON(!(vq->split.vring.desc[head].flags &
cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
BUG_ON(len == 0 || len % sizeof(struct vring_desc));
for (j = 0; j < len / sizeof(struct vring_desc); j++)
vring_unmap_one_split(vq, &indir_desc[j]);
kfree(indir_desc);
vq->split.desc_state[head].indir_desc = NULL;
} else if (ctx) {
*ctx = vq->split.desc_state[head].indir_desc;
}
}
static inline bool more_used_split(const struct vring_virtqueue *vq)
{
return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
vq->split.vring.used->idx);
}
static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
unsigned int *len,
void **ctx)
{
struct vring_virtqueue *vq = to_vvq(_vq);
void *ret;
unsigned int i;
u16 last_used;
START_USE(vq);
if (unlikely(vq->broken)) {
END_USE(vq);
return NULL;
}
if (!more_used_split(vq)) {
pr_debug("No more buffers in queue\n");
END_USE(vq);
return NULL;
}
/* Only get used array entries after they have been exposed by host. */
virtio_rmb(vq->weak_barriers);
last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
i = virtio32_to_cpu(_vq->vdev,
vq->split.vring.used->ring[last_used].id);
*len = virtio32_to_cpu(_vq->vdev,
vq->split.vring.used->ring[last_used].len);
if (unlikely(i >= vq->split.vring.num)) {
BAD_RING(vq, "id %u out of range\n", i);
return NULL;
}
if (unlikely(!vq->split.desc_state[i].data)) {
BAD_RING(vq, "id %u is not a head!\n", i);
return NULL;
}
/* detach_buf_split clears data, so grab it now. */
ret = vq->split.desc_state[i].data;
detach_buf_split(vq, i, ctx);
vq->last_used_idx++;
/* If we expect an interrupt for the next entry, tell host
* by writing event index and flush out the write before
* the read in the next get_buf call. */
if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
virtio_store_mb(vq->weak_barriers,
&vring_used_event(&vq->split.vring),
cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
LAST_ADD_TIME_INVALID(vq);
END_USE(vq);
return ret;
}
static void virtqueue_disable_cb_split(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
if (!vq->event)
vq->split.vring.avail->flags =
cpu_to_virtio16(_vq->vdev,
vq->split.avail_flags_shadow);
}
}
static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
u16 last_used_idx;
START_USE(vq);
/* We optimistically turn back on interrupts, then check if there was
* more to do. */
/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
* either clear the flags bit or point the event index at the next
* entry. Always do both to keep code simple. */
if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
if (!vq->event)
vq->split.vring.avail->flags =
cpu_to_virtio16(_vq->vdev,
vq->split.avail_flags_shadow);
}
vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
last_used_idx = vq->last_used_idx);
END_USE(vq);
return last_used_idx;
}
static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx)
{
struct vring_virtqueue *vq = to_vvq(_vq);
return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
vq->split.vring.used->idx);
}
static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
u16 bufs;
START_USE(vq);
/* We optimistically turn back on interrupts, then check if there was
* more to do. */
/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
* either clear the flags bit or point the event index at the next
* entry. Always update the event index to keep code simple. */
if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
if (!vq->event)
vq->split.vring.avail->flags =
cpu_to_virtio16(_vq->vdev,
vq->split.avail_flags_shadow);
}
/* TODO: tune this threshold */
bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
virtio_store_mb(vq->weak_barriers,
&vring_used_event(&vq->split.vring),
cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
- vq->last_used_idx) > bufs)) {
END_USE(vq);
return false;
}
END_USE(vq);
return true;
}
static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
unsigned int i;
void *buf;
START_USE(vq);
for (i = 0; i < vq->split.vring.num; i++) {
if (!vq->split.desc_state[i].data)
continue;
/* detach_buf_split clears data, so grab it now. */
buf = vq->split.desc_state[i].data;
detach_buf_split(vq, i, NULL);
vq->split.avail_idx_shadow--;
vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
vq->split.avail_idx_shadow);
END_USE(vq);
return buf;
}
/* That should have freed everything. */
BUG_ON(vq->vq.num_free != vq->split.vring.num);
END_USE(vq);
return NULL;
}
static struct virtqueue *vring_create_virtqueue_split(
unsigned int index,
unsigned int num,
unsigned int vring_align,
struct virtio_device *vdev,
bool weak_barriers,
bool may_reduce_num,
bool context,
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name)
{
struct virtqueue *vq;
void *queue = NULL;
dma_addr_t dma_addr;
size_t queue_size_in_bytes;
struct vring vring;
/* We assume num is a power of 2. */
if (num & (num - 1)) {
dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
return NULL;
}
/* TODO: allocate each queue chunk individually */
for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
&dma_addr,
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (queue)
break;
if (!may_reduce_num)
return NULL;
}
if (!num)
return NULL;
if (!queue) {
/* Try to get a single page. You are my only hope! */
queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
&dma_addr, GFP_KERNEL|__GFP_ZERO);
}
if (!queue)
return NULL;
queue_size_in_bytes = vring_size(num, vring_align);
vring_init(&vring, num, queue, vring_align);
vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
notify, callback, name);
if (!vq) {
vring_free_queue(vdev, queue_size_in_bytes, queue,
dma_addr);
return NULL;
}
to_vvq(vq)->split.queue_dma_addr = dma_addr;
to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
to_vvq(vq)->we_own_ring = true;
return vq;
}
/*
* Packed ring specific functions - *_packed().
*/
static void vring_unmap_state_packed(const struct vring_virtqueue *vq,
struct vring_desc_extra_packed *state)
{
u16 flags;
if (!vq->use_dma_api)
return;
flags = state->flags;
if (flags & VRING_DESC_F_INDIRECT) {
dma_unmap_single(vring_dma_dev(vq),
state->addr, state->len,
(flags & VRING_DESC_F_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
} else {
dma_unmap_page(vring_dma_dev(vq),
state->addr, state->len,
(flags & VRING_DESC_F_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
}
static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
struct vring_packed_desc *desc)
{
u16 flags;
if (!vq->use_dma_api)
return;
flags = le16_to_cpu(desc->flags);
if (flags & VRING_DESC_F_INDIRECT) {
dma_unmap_single(vring_dma_dev(vq),
le64_to_cpu(desc->addr),
le32_to_cpu(desc->len),
(flags & VRING_DESC_F_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
} else {
dma_unmap_page(vring_dma_dev(vq),
le64_to_cpu(desc->addr),
le32_to_cpu(desc->len),
(flags & VRING_DESC_F_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
}
static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
gfp_t gfp)
{
struct vring_packed_desc *desc;
/*
* We require lowmem mappings for the descriptors because
* otherwise virt_to_phys will give us bogus addresses in the
* virtqueue.
*/
gfp &= ~__GFP_HIGHMEM;
desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
return desc;
}
static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
struct scatterlist *sgs[],
unsigned int total_sg,
unsigned int out_sgs,
unsigned int in_sgs,
void *data,
gfp_t gfp)
{
struct vring_packed_desc *desc;
struct scatterlist *sg;
unsigned int i, n, err_idx;
u16 head, id;
dma_addr_t addr;
head = vq->packed.next_avail_idx;
desc = alloc_indirect_packed(total_sg, gfp);
if (unlikely(vq->vq.num_free < 1)) {
pr_debug("Can't add buf len 1 - avail = 0\n");
kfree(desc);
END_USE(vq);
return -ENOSPC;
}
i = 0;
id = vq->free_head;
BUG_ON(id == vq->packed.vring.num);
for (n = 0; n < out_sgs + in_sgs; n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
addr = vring_map_one_sg(vq, sg, n < out_sgs ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (vring_mapping_error(vq, addr))
goto unmap_release;
desc[i].flags = cpu_to_le16(n < out_sgs ?
0 : VRING_DESC_F_WRITE);
desc[i].addr = cpu_to_le64(addr);
desc[i].len = cpu_to_le32(sg->length);
i++;
}
}
/* Now that the indirect table is filled in, map it. */
addr = vring_map_single(vq, desc,
total_sg * sizeof(struct vring_packed_desc),
DMA_TO_DEVICE);
if (vring_mapping_error(vq, addr))
goto unmap_release;
vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
sizeof(struct vring_packed_desc));
vq->packed.vring.desc[head].id = cpu_to_le16(id);
if (vq->use_dma_api) {
vq->packed.desc_extra[id].addr = addr;
vq->packed.desc_extra[id].len = total_sg *
sizeof(struct vring_packed_desc);
vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
vq->packed.avail_used_flags;
}
/*
* A driver MUST NOT make the first descriptor in the list
* available before all subsequent descriptors comprising
* the list are made available.
*/
virtio_wmb(vq->weak_barriers);
vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
vq->packed.avail_used_flags);
/* We're using some buffers from the free list. */
vq->vq.num_free -= 1;
/* Update free pointer */
n = head + 1;
if (n >= vq->packed.vring.num) {
n = 0;
vq->packed.avail_wrap_counter ^= 1;
vq->packed.avail_used_flags ^=
1 << VRING_PACKED_DESC_F_AVAIL |
1 << VRING_PACKED_DESC_F_USED;
}
vq->packed.next_avail_idx = n;
vq->free_head = vq->packed.desc_state[id].next;
/* Store token and indirect buffer state. */
vq->packed.desc_state[id].num = 1;
vq->packed.desc_state[id].data = data;
vq->packed.desc_state[id].indir_desc = desc;
vq->packed.desc_state[id].last = id;
vq->num_added += 1;
pr_debug("Added buffer head %i to %p\n", head, vq);
END_USE(vq);
return 0;
unmap_release:
err_idx = i;
for (i = 0; i < err_idx; i++)
vring_unmap_desc_packed(vq, &desc[i]);
kfree(desc);
END_USE(vq);
return -ENOMEM;
}
static inline int virtqueue_add_packed(struct virtqueue *_vq,
struct scatterlist *sgs[],
unsigned int total_sg,
unsigned int out_sgs,
unsigned int in_sgs,
void *data,
void *ctx,
gfp_t gfp)
{
struct vring_virtqueue *vq = to_vvq(_vq);
struct vring_packed_desc *desc;
struct scatterlist *sg;
unsigned int i, n, c, descs_used, err_idx;
__le16 head_flags, flags;
u16 head, id, prev, curr, avail_used_flags;
START_USE(vq);
BUG_ON(data == NULL);
BUG_ON(ctx && vq->indirect);
if (unlikely(vq->broken)) {
END_USE(vq);
return -EIO;
}
LAST_ADD_TIME_UPDATE(vq);
BUG_ON(total_sg == 0);
if (virtqueue_use_indirect(_vq, total_sg))
return virtqueue_add_indirect_packed(vq, sgs, total_sg,
out_sgs, in_sgs, data, gfp);
head = vq->packed.next_avail_idx;
avail_used_flags = vq->packed.avail_used_flags;
WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
desc = vq->packed.vring.desc;
i = head;
descs_used = total_sg;
if (unlikely(vq->vq.num_free < descs_used)) {
pr_debug("Can't add buf len %i - avail = %i\n",
descs_used, vq->vq.num_free);
END_USE(vq);
return -ENOSPC;
}
id = vq->free_head;
BUG_ON(id == vq->packed.vring.num);
curr = id;
c = 0;
for (n = 0; n < out_sgs + in_sgs; n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (vring_mapping_error(vq, addr))
goto unmap_release;
flags = cpu_to_le16(vq->packed.avail_used_flags |
(++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
(n < out_sgs ? 0 : VRING_DESC_F_WRITE));
if (i == head)
head_flags = flags;
else
desc[i].flags = flags;
desc[i].addr = cpu_to_le64(addr);
desc[i].len = cpu_to_le32(sg->length);
desc[i].id = cpu_to_le16(id);
if (unlikely(vq->use_dma_api)) {
vq->packed.desc_extra[curr].addr = addr;
vq->packed.desc_extra[curr].len = sg->length;
vq->packed.desc_extra[curr].flags =
le16_to_cpu(flags);
}
prev = curr;
curr = vq->packed.desc_state[curr].next;
if ((unlikely(++i >= vq->packed.vring.num))) {
i = 0;
vq->packed.avail_used_flags ^=
1 << VRING_PACKED_DESC_F_AVAIL |
1 << VRING_PACKED_DESC_F_USED;
}
}
}
if (i <= head)
vq->packed.avail_wrap_counter ^= 1;
/* We're using some buffers from the free list. */
vq->vq.num_free -= descs_used;
/* Update free pointer */
vq->packed.next_avail_idx = i;
vq->free_head = curr;
/* Store token. */
vq->packed.desc_state[id].num = descs_used;
vq->packed.desc_state[id].data = data;
vq->packed.desc_state[id].indir_desc = ctx;
vq->packed.desc_state[id].last = prev;
/*
* A driver MUST NOT make the first descriptor in the list
* available before all subsequent descriptors comprising
* the list are made available.
*/
virtio_wmb(vq->weak_barriers);
vq->packed.vring.desc[head].flags = head_flags;
vq->num_added += descs_used;
pr_debug("Added buffer head %i to %p\n", head, vq);
END_USE(vq);
return 0;
unmap_release:
err_idx = i;
i = head;
vq->packed.avail_used_flags = avail_used_flags;
for (n = 0; n < total_sg; n++) {
if (i == err_idx)
break;
vring_unmap_desc_packed(vq, &desc[i]);
i++;
if (i >= vq->packed.vring.num)
i = 0;
}
END_USE(vq);
return -EIO;
}
static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
u16 new, old, off_wrap, flags, wrap_counter, event_idx;
bool needs_kick;
union {
struct {
__le16 off_wrap;
__le16 flags;
};
u32 u32;
} snapshot;
START_USE(vq);
/*
* We need to expose the new flags value before checking notification
* suppressions.
*/
virtio_mb(vq->weak_barriers);
old = vq->packed.next_avail_idx - vq->num_added;
new = vq->packed.next_avail_idx;
vq->num_added = 0;
snapshot.u32 = *(u32 *)vq->packed.vring.device;
flags = le16_to_cpu(snapshot.flags);
LAST_ADD_TIME_CHECK(vq);
LAST_ADD_TIME_INVALID(vq);
if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
goto out;
}
off_wrap = le16_to_cpu(snapshot.off_wrap);
wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
if (wrap_counter != vq->packed.avail_wrap_counter)
event_idx -= vq->packed.vring.num;
needs_kick = vring_need_event(event_idx, new, old);
out:
END_USE(vq);
return needs_kick;
}
static void detach_buf_packed(struct vring_virtqueue *vq,
unsigned int id, void **ctx)
{
struct vring_desc_state_packed *state = NULL;
struct vring_packed_desc *desc;
unsigned int i, curr;
state = &vq->packed.desc_state[id];
/* Clear data ptr. */
state->data = NULL;
vq->packed.desc_state[state->last].next = vq->free_head;
vq->free_head = id;
vq->vq.num_free += state->num;
if (unlikely(vq->use_dma_api)) {
curr = id;
for (i = 0; i < state->num; i++) {
vring_unmap_state_packed(vq,
&vq->packed.desc_extra[curr]);
curr = vq->packed.desc_state[curr].next;
}
}
if (vq->indirect) {
u32 len;
/* Free the indirect table, if any, now that it's unmapped. */
desc = state->indir_desc;
if (!desc)
return;
if (vq->use_dma_api) {
len = vq->packed.desc_extra[id].len;
for (i = 0; i < len / sizeof(struct vring_packed_desc);
i++)
vring_unmap_desc_packed(vq, &desc[i]);
}
kfree(desc);
state->indir_desc = NULL;
} else if (ctx) {
*ctx = state->indir_desc;
}
}
static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
u16 idx, bool used_wrap_counter)
{
bool avail, used;
u16 flags;
flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
return avail == used && used == used_wrap_counter;
}
static inline bool more_used_packed(const struct vring_virtqueue *vq)
{
return is_used_desc_packed(vq, vq->last_used_idx,
vq->packed.used_wrap_counter);
}
static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
unsigned int *len,
void **ctx)
{
struct vring_virtqueue *vq = to_vvq(_vq);
u16 last_used, id;
void *ret;
START_USE(vq);
if (unlikely(vq->broken)) {
END_USE(vq);
return NULL;
}
if (!more_used_packed(vq)) {
pr_debug("No more buffers in queue\n");
END_USE(vq);
return NULL;
}
/* Only get used elements after they have been exposed by host. */
virtio_rmb(vq->weak_barriers);
last_used = vq->last_used_idx;
id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
*len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
if (unlikely(id >= vq->packed.vring.num)) {
BAD_RING(vq, "id %u out of range\n", id);
return NULL;
}
if (unlikely(!vq->packed.desc_state[id].data)) {
BAD_RING(vq, "id %u is not a head!\n", id);
return NULL;
}
/* detach_buf_packed clears data, so grab it now. */
ret = vq->packed.desc_state[id].data;
detach_buf_packed(vq, id, ctx);
vq->last_used_idx += vq->packed.desc_state[id].num;
if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) {
vq->last_used_idx -= vq->packed.vring.num;
vq->packed.used_wrap_counter ^= 1;
}
/*
* If we expect an interrupt for the next entry, tell host
* by writing event index and flush out the write before
* the read in the next get_buf call.
*/
if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
virtio_store_mb(vq->weak_barriers,
&vq->packed.vring.driver->off_wrap,
cpu_to_le16(vq->last_used_idx |
(vq->packed.used_wrap_counter <<
VRING_PACKED_EVENT_F_WRAP_CTR)));
LAST_ADD_TIME_INVALID(vq);
END_USE(vq);
return ret;
}
static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
vq->packed.vring.driver->flags =
cpu_to_le16(vq->packed.event_flags_shadow);
}
}
static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
START_USE(vq);
/*
* We optimistically turn back on interrupts, then check if there was
* more to do.
*/
if (vq->event) {
vq->packed.vring.driver->off_wrap =
cpu_to_le16(vq->last_used_idx |
(vq->packed.used_wrap_counter <<
VRING_PACKED_EVENT_F_WRAP_CTR));
/*
* We need to update event offset and event wrap
* counter first before updating event flags.
*/
virtio_wmb(vq->weak_barriers);
}
if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
vq->packed.event_flags_shadow = vq->event ?
VRING_PACKED_EVENT_FLAG_DESC :
VRING_PACKED_EVENT_FLAG_ENABLE;
vq->packed.vring.driver->flags =
cpu_to_le16(vq->packed.event_flags_shadow);
}
END_USE(vq);
return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
VRING_PACKED_EVENT_F_WRAP_CTR);
}
static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
{
struct vring_virtqueue *vq = to_vvq(_vq);
bool wrap_counter;
u16 used_idx;
wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
return is_used_desc_packed(vq, used_idx, wrap_counter);
}
static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
u16 used_idx, wrap_counter;
u16 bufs;
START_USE(vq);
/*
* We optimistically turn back on interrupts, then check if there was
* more to do.
*/
if (vq->event) {
/* TODO: tune this threshold */
bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
wrap_counter = vq->packed.used_wrap_counter;
used_idx = vq->last_used_idx + bufs;
if (used_idx >= vq->packed.vring.num) {
used_idx -= vq->packed.vring.num;
wrap_counter ^= 1;
}
vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
(wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
/*
* We need to update event offset and event wrap
* counter first before updating event flags.
*/
virtio_wmb(vq->weak_barriers);
}
if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
vq->packed.event_flags_shadow = vq->event ?
VRING_PACKED_EVENT_FLAG_DESC :
VRING_PACKED_EVENT_FLAG_ENABLE;
vq->packed.vring.driver->flags =
cpu_to_le16(vq->packed.event_flags_shadow);
}
/*
* We need to update event suppression structure first
* before re-checking for more used buffers.
*/
virtio_mb(vq->weak_barriers);
if (is_used_desc_packed(vq,
vq->last_used_idx,
vq->packed.used_wrap_counter)) {
END_USE(vq);
return false;
}
END_USE(vq);
return true;
}
static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
unsigned int i;
void *buf;
START_USE(vq);
for (i = 0; i < vq->packed.vring.num; i++) {
if (!vq->packed.desc_state[i].data)
continue;
/* detach_buf clears data, so grab it now. */
buf = vq->packed.desc_state[i].data;
detach_buf_packed(vq, i, NULL);
END_USE(vq);
return buf;
}
/* That should have freed everything. */
BUG_ON(vq->vq.num_free != vq->packed.vring.num);
END_USE(vq);
return NULL;
}
static struct virtqueue *vring_create_virtqueue_packed(
unsigned int index,
unsigned int num,
unsigned int vring_align,
struct virtio_device *vdev,
bool weak_barriers,
bool may_reduce_num,
bool context,
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name)
{
struct vring_virtqueue *vq;
struct vring_packed_desc *ring;
struct vring_packed_desc_event *driver, *device;
dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
size_t ring_size_in_bytes, event_size_in_bytes;
unsigned int i;
ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
ring = vring_alloc_queue(vdev, ring_size_in_bytes,
&ring_dma_addr,
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (!ring)
goto err_ring;
event_size_in_bytes = sizeof(struct vring_packed_desc_event);
driver = vring_alloc_queue(vdev, event_size_in_bytes,
&driver_event_dma_addr,
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (!driver)
goto err_driver;
device = vring_alloc_queue(vdev, event_size_in_bytes,
&device_event_dma_addr,
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (!device)
goto err_device;
vq = kmalloc(sizeof(*vq), GFP_KERNEL);
if (!vq)
goto err_vq;
vq->vq.callback = callback;
vq->vq.vdev = vdev;
vq->vq.name = name;
vq->vq.num_free = num;
vq->vq.index = index;
vq->we_own_ring = true;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
vq->broken = false;
vq->last_used_idx = 0;
vq->num_added = 0;
vq->packed_ring = true;
vq->use_dma_api = vring_use_dma_api(vdev);
#ifdef DEBUG
vq->in_use = false;
vq->last_add_time_valid = false;
#endif
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
!context;
vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
vq->weak_barriers = false;
vq->packed.ring_dma_addr = ring_dma_addr;
vq->packed.driver_event_dma_addr = driver_event_dma_addr;
vq->packed.device_event_dma_addr = device_event_dma_addr;
vq->packed.ring_size_in_bytes = ring_size_in_bytes;
vq->packed.event_size_in_bytes = event_size_in_bytes;
vq->packed.vring.num = num;
vq->packed.vring.desc = ring;
vq->packed.vring.driver = driver;
vq->packed.vring.device = device;
vq->packed.next_avail_idx = 0;
vq->packed.avail_wrap_counter = 1;
vq->packed.used_wrap_counter = 1;
vq->packed.event_flags_shadow = 0;
vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
vq->packed.desc_state = kmalloc_array(num,
sizeof(struct vring_desc_state_packed),
GFP_KERNEL);
if (!vq->packed.desc_state)
goto err_desc_state;
memset(vq->packed.desc_state, 0,
num * sizeof(struct vring_desc_state_packed));
/* Put everything in free lists. */
vq->free_head = 0;
for (i = 0; i < num-1; i++)
vq->packed.desc_state[i].next = i + 1;
vq->packed.desc_extra = kmalloc_array(num,
sizeof(struct vring_desc_extra_packed),
GFP_KERNEL);
if (!vq->packed.desc_extra)
goto err_desc_extra;
memset(vq->packed.desc_extra, 0,
num * sizeof(struct vring_desc_extra_packed));
/* No callback? Tell other side not to bother us. */
if (!callback) {
vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
vq->packed.vring.driver->flags =
cpu_to_le16(vq->packed.event_flags_shadow);
}
list_add_tail(&vq->vq.list, &vdev->vqs);
return &vq->vq;
err_desc_extra:
kfree(vq->packed.desc_state);
err_desc_state:
kfree(vq);
err_vq:
vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr);
err_device:
vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr);
err_driver:
vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
err_ring:
return NULL;
}
/*
* Generic functions and exported symbols.
*/
static inline int virtqueue_add(struct virtqueue *_vq,
struct scatterlist *sgs[],
unsigned int total_sg,
unsigned int out_sgs,
unsigned int in_sgs,
void *data,
void *ctx,
gfp_t gfp)
{
struct vring_virtqueue *vq = to_vvq(_vq);
return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
out_sgs, in_sgs, data, ctx, gfp) :
virtqueue_add_split(_vq, sgs, total_sg,
out_sgs, in_sgs, data, ctx, gfp);
}
/**
* virtqueue_add_sgs - expose buffers to other end
* @_vq: the struct virtqueue we're talking about.
* @sgs: array of terminated scatterlists.
* @out_sgs: the number of scatterlists readable by other side
* @in_sgs: the number of scatterlists which are writable (after readable ones)
* @data: the token identifying the buffer.
* @gfp: how to do memory allocations (if necessary).
*
* Caller must ensure we don't call this with other virtqueue operations
* at the same time (except where noted).
*
* Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
*/
int virtqueue_add_sgs(struct virtqueue *_vq,
struct scatterlist *sgs[],
unsigned int out_sgs,
unsigned int in_sgs,
void *data,
gfp_t gfp)
{
unsigned int i, total_sg = 0;
/* Count them first. */
for (i = 0; i < out_sgs + in_sgs; i++) {
struct scatterlist *sg;
for (sg = sgs[i]; sg; sg = sg_next(sg))
total_sg++;
}
return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
data, NULL, gfp);
}
EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
/**
* virtqueue_add_outbuf - expose output buffers to other end
* @vq: the struct virtqueue we're talking about.
* @sg: scatterlist (must be well-formed and terminated!)
* @num: the number of entries in @sg readable by other side
* @data: the token identifying the buffer.
* @gfp: how to do memory allocations (if necessary).
*
* Caller must ensure we don't call this with other virtqueue operations
* at the same time (except where noted).
*
* Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
*/
int virtqueue_add_outbuf(struct virtqueue *vq,
struct scatterlist *sg, unsigned int num,
void *data,
gfp_t gfp)
{
return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
}
EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
/**
* virtqueue_add_inbuf - expose input buffers to other end
* @vq: the struct virtqueue we're talking about.
* @sg: scatterlist (must be well-formed and terminated!)
* @num: the number of entries in @sg writable by other side
* @data: the token identifying the buffer.
* @gfp: how to do memory allocations (if necessary).
*
* Caller must ensure we don't call this with other virtqueue operations
* at the same time (except where noted).
*
* Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
*/
int virtqueue_add_inbuf(struct virtqueue *vq,
struct scatterlist *sg, unsigned int num,
void *data,
gfp_t gfp)
{
return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
}
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
/**
* virtqueue_add_inbuf_ctx - expose input buffers to other end
* @vq: the struct virtqueue we're talking about.
* @sg: scatterlist (must be well-formed and terminated!)
* @num: the number of entries in @sg writable by other side
* @data: the token identifying the buffer.
* @ctx: extra context for the token
* @gfp: how to do memory allocations (if necessary).
*
* Caller must ensure we don't call this with other virtqueue operations
* at the same time (except where noted).
*
* Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
*/
int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
struct scatterlist *sg, unsigned int num,
void *data,
void *ctx,
gfp_t gfp)
{
return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
}
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
/**
* virtqueue_kick_prepare - first half of split virtqueue_kick call.
* @_vq: the struct virtqueue
*
* Instead of virtqueue_kick(), you can do:
* if (virtqueue_kick_prepare(vq))
* virtqueue_notify(vq);
*
* This is sometimes useful because the virtqueue_kick_prepare() needs
* to be serialized, but the actual virtqueue_notify() call does not.
*/
bool virtqueue_kick_prepare(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
virtqueue_kick_prepare_split(_vq);
}
EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
/**
* virtqueue_notify - second half of split virtqueue_kick call.
* @_vq: the struct virtqueue
*
* This does not need to be serialized.
*
* Returns false if host notify failed or queue is broken, otherwise true.
*/
bool virtqueue_notify(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
if (unlikely(vq->broken))
return false;
/* Prod other side to tell it about changes. */
if (!vq->notify(_vq)) {
vq->broken = true;
return false;
}
return true;
}
EXPORT_SYMBOL_GPL(virtqueue_notify);
/**
* virtqueue_kick - update after add_buf
* @vq: the struct virtqueue
*
* After one or more virtqueue_add_* calls, invoke this to kick
* the other side.
*
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*
* Returns false if kick failed, otherwise true.
*/
bool virtqueue_kick(struct virtqueue *vq)
{
if (virtqueue_kick_prepare(vq))
return virtqueue_notify(vq);
return true;
}
EXPORT_SYMBOL_GPL(virtqueue_kick);
/**
* virtqueue_get_buf - get the next used buffer
* @_vq: the struct virtqueue we're talking about.
* @len: the length written into the buffer
* @ctx: extra context for the token
*
* If the device wrote data into the buffer, @len will be set to the
* amount written. This means you don't need to clear the buffer
* beforehand to ensure there's no data leakage in the case of short
* writes.
*
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*
* Returns NULL if there are no used buffers, or the "data" token
* handed to virtqueue_add_*().
*/
void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
void **ctx)
{
struct vring_virtqueue *vq = to_vvq(_vq);
return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
virtqueue_get_buf_ctx_split(_vq, len, ctx);
}
EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
{
return virtqueue_get_buf_ctx(_vq, len, NULL);
}
EXPORT_SYMBOL_GPL(virtqueue_get_buf);
/**
* virtqueue_disable_cb - disable callbacks
* @_vq: the struct virtqueue we're talking about.
*
* Note that this is not necessarily synchronous, hence unreliable and only
* useful as an optimization.
*
* Unlike other operations, this need not be serialized.
*/
void virtqueue_disable_cb(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
if (vq->packed_ring)
virtqueue_disable_cb_packed(_vq);
else
virtqueue_disable_cb_split(_vq);
}
EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
/**
* virtqueue_enable_cb_prepare - restart callbacks after disable_cb
* @_vq: the struct virtqueue we're talking about.
*
* This re-enables callbacks; it returns current queue state
* in an opaque unsigned value. This value should be later tested by
* virtqueue_poll, to detect a possible race between the driver checking for
* more work, and enabling callbacks.
*
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*/
unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
virtqueue_enable_cb_prepare_split(_vq);
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
/**
* virtqueue_poll - query pending used buffers
* @_vq: the struct virtqueue we're talking about.
* @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
*
* Returns "true" if there are pending used buffers in the queue.
*
* This does not need to be serialized.
*/
bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
{
struct vring_virtqueue *vq = to_vvq(_vq);
if (unlikely(vq->broken))
return false;
virtio_mb(vq->weak_barriers);
return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
virtqueue_poll_split(_vq, last_used_idx);
}
EXPORT_SYMBOL_GPL(virtqueue_poll);
/**
* virtqueue_enable_cb - restart callbacks after disable_cb.
* @_vq: the struct virtqueue we're talking about.
*
* This re-enables callbacks; it returns "false" if there are pending
* buffers in the queue, to detect a possible race between the driver
* checking for more work, and enabling callbacks.
*
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*/
bool virtqueue_enable_cb(struct virtqueue *_vq)
{
unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
return !virtqueue_poll(_vq, last_used_idx);
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
/**
* virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
* @_vq: the struct virtqueue we're talking about.
*
* This re-enables callbacks but hints to the other side to delay
* interrupts until most of the available buffers have been processed;
* it returns "false" if there are many pending buffers in the queue,
* to detect a possible race between the driver checking for more work,
* and enabling callbacks.
*
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*/
bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
virtqueue_enable_cb_delayed_split(_vq);
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
/**
* virtqueue_detach_unused_buf - detach first unused buffer
* @_vq: the struct virtqueue we're talking about.
*
* Returns NULL or the "data" token handed to virtqueue_add_*().
* This is not valid on an active queue; it is useful only for device
* shutdown.
*/
void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
virtqueue_detach_unused_buf_split(_vq);
}
EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
static inline bool more_used(const struct vring_virtqueue *vq)
{
return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
}
irqreturn_t vring_interrupt(int irq, void *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
if (!more_used(vq)) {
pr_debug("virtqueue interrupt with no work for %p\n", vq);
return IRQ_NONE;
}
if (unlikely(vq->broken))
return IRQ_HANDLED;
pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
if (vq->vq.callback)
vq->vq.callback(&vq->vq);
return IRQ_HANDLED;
}
EXPORT_SYMBOL_GPL(vring_interrupt);
/* Only available for split ring */
struct virtqueue *__vring_new_virtqueue(unsigned int index,
struct vring vring,
struct virtio_device *vdev,
bool weak_barriers,
bool context,
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name)
{
unsigned int i;
struct vring_virtqueue *vq;
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
return NULL;
vq = kmalloc(sizeof(*vq), GFP_KERNEL);
if (!vq)
return NULL;
vq->packed_ring = false;
vq->vq.callback = callback;
vq->vq.vdev = vdev;
vq->vq.name = name;
vq->vq.num_free = vring.num;
vq->vq.index = index;
vq->we_own_ring = false;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
vq->broken = false;
vq->last_used_idx = 0;
vq->num_added = 0;
vq->use_dma_api = vring_use_dma_api(vdev);
#ifdef DEBUG
vq->in_use = false;
vq->last_add_time_valid = false;
#endif
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
!context;
vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
vq->weak_barriers = false;
vq->split.queue_dma_addr = 0;
vq->split.queue_size_in_bytes = 0;
vq->split.vring = vring;
vq->split.avail_flags_shadow = 0;
vq->split.avail_idx_shadow = 0;
/* No callback? Tell other side not to bother us. */
if (!callback) {
vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
if (!vq->event)
vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
vq->split.avail_flags_shadow);
}
vq->split.desc_state = kmalloc_array(vring.num,
sizeof(struct vring_desc_state_split), GFP_KERNEL);
if (!vq->split.desc_state) {
kfree(vq);
return NULL;
}
/* Put everything in free lists. */
vq->free_head = 0;
for (i = 0; i < vring.num-1; i++)
vq->split.vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
memset(vq->split.desc_state, 0, vring.num *
sizeof(struct vring_desc_state_split));
list_add_tail(&vq->vq.list, &vdev->vqs);
return &vq->vq;
}
EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
struct virtqueue *vring_create_virtqueue(
unsigned int index,
unsigned int num,
unsigned int vring_align,
struct virtio_device *vdev,
bool weak_barriers,
bool may_reduce_num,
bool context,
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name)
{
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
return vring_create_virtqueue_packed(index, num, vring_align,
vdev, weak_barriers, may_reduce_num,
context, notify, callback, name);
return vring_create_virtqueue_split(index, num, vring_align,
vdev, weak_barriers, may_reduce_num,
context, notify, callback, name);
}
EXPORT_SYMBOL_GPL(vring_create_virtqueue);
/* Only available for split ring */
struct virtqueue *vring_new_virtqueue(unsigned int index,
unsigned int num,
unsigned int vring_align,
struct virtio_device *vdev,
bool weak_barriers,
bool context,
void *pages,
bool (*notify)(struct virtqueue *vq),
void (*callback)(struct virtqueue *vq),
const char *name)
{
struct vring vring;
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
return NULL;
vring_init(&vring, num, pages, vring_align);
return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
notify, callback, name);
}
EXPORT_SYMBOL_GPL(vring_new_virtqueue);
void vring_del_virtqueue(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
if (vq->we_own_ring) {
if (vq->packed_ring) {
vring_free_queue(vq->vq.vdev,
vq->packed.ring_size_in_bytes,
vq->packed.vring.desc,
vq->packed.ring_dma_addr);
vring_free_queue(vq->vq.vdev,
vq->packed.event_size_in_bytes,
vq->packed.vring.driver,
vq->packed.driver_event_dma_addr);
vring_free_queue(vq->vq.vdev,
vq->packed.event_size_in_bytes,
vq->packed.vring.device,
vq->packed.device_event_dma_addr);
kfree(vq->packed.desc_state);
kfree(vq->packed.desc_extra);
} else {
vring_free_queue(vq->vq.vdev,
vq->split.queue_size_in_bytes,
vq->split.vring.desc,
vq->split.queue_dma_addr);
}
}
if (!vq->packed_ring)
kfree(vq->split.desc_state);
list_del(&_vq->list);
kfree(vq);
}
EXPORT_SYMBOL_GPL(vring_del_virtqueue);
/* Manipulates transport-specific feature bits. */
void vring_transport_features(struct virtio_device *vdev)
{
unsigned int i;
for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
switch (i) {
case VIRTIO_RING_F_INDIRECT_DESC:
break;
case VIRTIO_RING_F_EVENT_IDX:
break;
case VIRTIO_F_VERSION_1:
break;
case VIRTIO_F_IOMMU_PLATFORM:
break;
case VIRTIO_F_RING_PACKED:
break;
case VIRTIO_F_ORDER_PLATFORM:
break;
default:
/* We don't understand this bit. */
__virtio_clear_bit(vdev, i);
}
}
}
EXPORT_SYMBOL_GPL(vring_transport_features);
/**
* virtqueue_get_vring_size - return the size of the virtqueue's vring
* @_vq: the struct virtqueue containing the vring of interest.
*
* Returns the size of the vring. This is mainly used for boasting to
* userspace. Unlike other operations, this need not be serialized.
*/
unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
}
EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
bool virtqueue_is_broken(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
return READ_ONCE(vq->broken);
}
EXPORT_SYMBOL_GPL(virtqueue_is_broken);
/*
* This should prevent the device from being used, allowing drivers to
* recover. You may need to grab appropriate locks to flush.
*/
void virtio_break_device(struct virtio_device *dev)
{
struct virtqueue *_vq;
list_for_each_entry(_vq, &dev->vqs, list) {
struct vring_virtqueue *vq = to_vvq(_vq);
/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
WRITE_ONCE(vq->broken, true);
}
}
EXPORT_SYMBOL_GPL(virtio_break_device);
dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
BUG_ON(!vq->we_own_ring);
if (vq->packed_ring)
return vq->packed.ring_dma_addr;
return vq->split.queue_dma_addr;
}
EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
BUG_ON(!vq->we_own_ring);
if (vq->packed_ring)
return vq->packed.driver_event_dma_addr;
return vq->split.queue_dma_addr +
((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
}
EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
BUG_ON(!vq->we_own_ring);
if (vq->packed_ring)
return vq->packed.device_event_dma_addr;
return vq->split.queue_dma_addr +
((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
}
EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
/* Only available for split ring */
const struct vring *virtqueue_get_vring(struct virtqueue *vq)
{
return &to_vvq(vq)->split.vring;
}
EXPORT_SYMBOL_GPL(virtqueue_get_vring);
MODULE_LICENSE("GPL");