android_kernel_xiaomi_sm8450/fs/btrfs/raid56.c
Greg Kroah-Hartman fbe6a13851 This is the 5.10.137 stable release
-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmMCMDQACgkQONu9yGCS
 aT6TwRAAvj1dnV1nLVVNET3jcelTO65SVUUpQjiyGD1npZQaQdH5PoGR0VhMWk7y
 mLUIwJyp/rR7+OLD3BMFwxDimDWHviFGdbmm/8fsyDrARuOeRd/M1fvtHXjIRQdb
 nOvfo1yTQWp4xA1k/JwJZslkvRFDsofXWHCRf+ffEryTRanFAVc7u5aFIg92W0b/
 JWYWEFe99C4TJ7LACpDoGaP9gE6WXsupaxSZBIu+Wxa+PfDmIeRRTkQn+j4Khn0h
 I6w+LkLd6ZP3l7sbe9KfS9ZGo1wWLgSng4zz742Z9IaFgxyj2ArS9tNsYCLkkhAM
 gLSXXkiPBAxUvAtDxR1tc0YROHc1bjAttSoxNXcaaacspSo/Vi0VAtp7t6boK0bI
 /8P3dh+Hq9u/Q1ClhZtVoFpp+GVj0fDbDd56qVcr2Cp6IokpqRJog1Jhgj0CVCoG
 iElr3n0+y7/IZfmE6/U1cK00SNcW86e2YduuIy4ifCawRT574zkRiSYZalpaO3qM
 z1lF9p+zUNq3v2q0wxXuBDLi/yPoJzbJgmCGScj4ryjjr6TOvR1udSVWkJ02dR4H
 s9km3lNLgoUPCYCLBMlZl7em4T49E09/+4YCrnj/Ezp+YdImf2+QzZyd/gG3ITl2
 fW7lpbK1dx3d/19JFP6Xkj9PaIlMl9e8Ne04G+Dabv67uN+0U+g=
 =Z4rz
 -----END PGP SIGNATURE-----

Merge 5.10.137 into android12-5.10-lts

Changes in 5.10.137
	Makefile: link with -z noexecstack --no-warn-rwx-segments
	x86: link vdso and boot with -z noexecstack --no-warn-rwx-segments
	Revert "pNFS: nfs3_set_ds_client should set NFS_CS_NOPING"
	scsi: Revert "scsi: qla2xxx: Fix disk failure to rediscover"
	ALSA: bcd2000: Fix a UAF bug on the error path of probing
	ALSA: hda/realtek: Add quirk for Clevo NV45PZ
	ALSA: hda/realtek: Add quirk for HP Spectre x360 15-eb0xxx
	wifi: mac80211_hwsim: fix race condition in pending packet
	wifi: mac80211_hwsim: add back erroneously removed cast
	wifi: mac80211_hwsim: use 32-bit skb cookie
	add barriers to buffer_uptodate and set_buffer_uptodate
	HID: wacom: Only report rotation for art pen
	HID: wacom: Don't register pad_input for touch switch
	KVM: nVMX: Snapshot pre-VM-Enter BNDCFGS for !nested_run_pending case
	KVM: nVMX: Snapshot pre-VM-Enter DEBUGCTL for !nested_run_pending case
	KVM: SVM: Don't BUG if userspace injects an interrupt with GIF=0
	KVM: s390: pv: don't present the ecall interrupt twice
	KVM: nVMX: Let userspace set nVMX MSR to any _host_ supported value
	KVM: x86: Mark TSS busy during LTR emulation _after_ all fault checks
	KVM: x86: Set error code to segment selector on LLDT/LTR non-canonical #GP
	KVM: x86: Tag kvm_mmu_x86_module_init() with __init
	riscv: set default pm_power_off to NULL
	mm: Add kvrealloc()
	xfs: only set IOMAP_F_SHARED when providing a srcmap to a write
	xfs: fix I_DONTCACHE
	mm/mremap: hold the rmap lock in write mode when moving page table entries.
	ALSA: hda/conexant: Add quirk for LENOVO 20149 Notebook model
	ALSA: hda/cirrus - support for iMac 12,1 model
	ALSA: hda/realtek: Add quirk for another Asus K42JZ model
	ALSA: hda/realtek: Add a quirk for HP OMEN 15 (8786) mute LED
	tty: vt: initialize unicode screen buffer
	vfs: Check the truncate maximum size in inode_newsize_ok()
	fs: Add missing umask strip in vfs_tmpfile
	thermal: sysfs: Fix cooling_device_stats_setup() error code path
	fbcon: Fix boundary checks for fbcon=vc:n1-n2 parameters
	fbcon: Fix accelerated fbdev scrolling while logo is still shown
	usbnet: Fix linkwatch use-after-free on disconnect
	ovl: drop WARN_ON() dentry is NULL in ovl_encode_fh()
	parisc: Fix device names in /proc/iomem
	parisc: Check the return value of ioremap() in lba_driver_probe()
	parisc: io_pgetevents_time64() needs compat syscall in 32-bit compat mode
	drm/gem: Properly annotate WW context on drm_gem_lock_reservations() error
	drm/vc4: hdmi: Disable audio if dmas property is present but empty
	drm/nouveau: fix another off-by-one in nvbios_addr
	drm/nouveau: Don't pm_runtime_put_sync(), only pm_runtime_put_autosuspend()
	drm/nouveau/acpi: Don't print error when we get -EINPROGRESS from pm_runtime
	drm/amdgpu: Check BO's requested pinning domains against its preferred_domains
	mtd: rawnand: arasan: Update NAND bus clock instead of system clock
	iio: light: isl29028: Fix the warning in isl29028_remove()
	scsi: sg: Allow waiting for commands to complete on removed device
	scsi: qla2xxx: Fix incorrect display of max frame size
	scsi: qla2xxx: Zero undefined mailbox IN registers
	fuse: limit nsec
	serial: mvebu-uart: uart2 error bits clearing
	md-raid: destroy the bitmap after destroying the thread
	md-raid10: fix KASAN warning
	media: [PATCH] pci: atomisp_cmd: fix three missing checks on list iterator
	ia64, processor: fix -Wincompatible-pointer-types in ia64_get_irr()
	PCI: Add defines for normal and subtractive PCI bridges
	powerpc/fsl-pci: Fix Class Code of PCIe Root Port
	powerpc/ptdump: Fix display of RW pages on FSL_BOOK3E
	powerpc/powernv: Avoid crashing if rng is NULL
	MIPS: cpuinfo: Fix a warning for CONFIG_CPUMASK_OFFSTACK
	coresight: Clear the connection field properly
	usb: typec: ucsi: Acknowledge the GET_ERROR_STATUS command completion
	USB: HCD: Fix URB giveback issue in tasklet function
	ARM: dts: uniphier: Fix USB interrupts for PXs2 SoC
	arm64: dts: uniphier: Fix USB interrupts for PXs3 SoC
	usb: dwc3: gadget: refactor dwc3_repare_one_trb
	usb: dwc3: gadget: fix high speed multiplier setting
	lockdep: Allow tuning tracing capacity constants.
	netfilter: nf_tables: do not allow SET_ID to refer to another table
	netfilter: nf_tables: do not allow CHAIN_ID to refer to another table
	netfilter: nf_tables: do not allow RULE_ID to refer to another chain
	netfilter: nf_tables: fix null deref due to zeroed list head
	epoll: autoremove wakers even more aggressively
	x86: Handle idle=nomwait cmdline properly for x86_idle
	arm64: Do not forget syscall when starting a new thread.
	arm64: fix oops in concurrently setting insn_emulation sysctls
	ext2: Add more validity checks for inode counts
	genirq: Don't return error on missing optional irq_request_resources()
	irqchip/mips-gic: Only register IPI domain when SMP is enabled
	genirq: GENERIC_IRQ_IPI depends on SMP
	irqchip/mips-gic: Check the return value of ioremap() in gic_of_init()
	wait: Fix __wait_event_hrtimeout for RT/DL tasks
	ARM: dts: imx6ul: add missing properties for sram
	ARM: dts: imx6ul: change operating-points to uint32-matrix
	ARM: dts: imx6ul: fix keypad compatible
	ARM: dts: imx6ul: fix csi node compatible
	ARM: dts: imx6ul: fix lcdif node compatible
	ARM: dts: imx6ul: fix qspi node compatible
	ARM: dts: BCM5301X: Add DT for Meraki MR26
	spi: synquacer: Add missing clk_disable_unprepare()
	ARM: OMAP2+: display: Fix refcount leak bug
	ACPI: EC: Remove duplicate ThinkPad X1 Carbon 6th entry from DMI quirks
	ACPI: EC: Drop the EC_FLAGS_IGNORE_DSDT_GPE quirk
	ACPI: PM: save NVS memory for Lenovo G40-45
	ACPI: LPSS: Fix missing check in register_device_clock()
	arm64: dts: qcom: ipq8074: fix NAND node name
	arm64: dts: allwinner: a64: orangepi-win: Fix LED node name
	ARM: shmobile: rcar-gen2: Increase refcount for new reference
	firmware: tegra: Fix error check return value of debugfs_create_file()
	PM: hibernate: defer device probing when resuming from hibernation
	selinux: Add boundary check in put_entry()
	powerpc/64s: Disable stack variable initialisation for prom_init
	spi: spi-rspi: Fix PIO fallback on RZ platforms
	ARM: findbit: fix overflowing offset
	meson-mx-socinfo: Fix refcount leak in meson_mx_socinfo_init
	arm64: dts: renesas: beacon: Fix regulator node names
	ARM: bcm: Fix refcount leak in bcm_kona_smc_init
	ACPI: processor/idle: Annotate more functions to live in cpuidle section
	ARM: dts: imx7d-colibri-emmc: add cpu1 supply
	Input: atmel_mxt_ts - fix up inverted RESET handler
	soc: renesas: r8a779a0-sysc: Fix A2DP1 and A2CV[2357] PDR values
	soc: amlogic: Fix refcount leak in meson-secure-pwrc.c
	arm64: dts: renesas: Fix thermal-sensors on single-zone sensors
	x86/pmem: Fix platform-device leak in error path
	ARM: dts: ast2500-evb: fix board compatible
	ARM: dts: ast2600-evb: fix board compatible
	hexagon: select ARCH_WANT_LD_ORPHAN_WARN
	arm64: cpufeature: Allow different PMU versions in ID_DFR0_EL1
	locking/lockdep: Fix lockdep_init_map_*() confusion
	soc: fsl: guts: machine variable might be unset
	block: fix infinite loop for invalid zone append
	ARM: dts: qcom: mdm9615: add missing PMIC GPIO reg
	ARM: OMAP2+: Fix refcount leak in omapdss_init_of
	ARM: OMAP2+: Fix refcount leak in omap3xxx_prm_late_init
	cpufreq: zynq: Fix refcount leak in zynq_get_revision
	regulator: qcom_smd: Fix pm8916_pldo range
	ACPI: APEI: Fix _EINJ vs EFI_MEMORY_SP
	soc: qcom: ocmem: Fix refcount leak in of_get_ocmem
	soc: qcom: aoss: Fix refcount leak in qmp_cooling_devices_register
	ARM: dts: qcom: pm8841: add required thermal-sensor-cells
	bus: hisi_lpc: fix missing platform_device_put() in hisi_lpc_acpi_probe()
	arm64: dts: mt7622: fix BPI-R64 WPS button
	arm64: tegra: Fix SDMMC1 CD on P2888
	erofs: avoid consecutive detection for Highmem memory
	blk-mq: don't create hctx debugfs dir until q->debugfs_dir is created
	hwmon: (drivetemp) Add module alias
	block: remove the request_queue to argument request based tracepoints
	blktrace: Trace remapped requests correctly
	regulator: of: Fix refcount leak bug in of_get_regulation_constraints()
	soc: qcom: Make QCOM_RPMPD depend on PM
	arm64: dts: qcom: qcs404: Fix incorrect USB2 PHYs assignment
	drivers/perf: arm_spe: Fix consistency of SYS_PMSCR_EL1.CX
	nohz/full, sched/rt: Fix missed tick-reenabling bug in dequeue_task_rt()
	selftests/seccomp: Fix compile warning when CC=clang
	thermal/tools/tmon: Include pthread and time headers in tmon.h
	dm: return early from dm_pr_call() if DM device is suspended
	pwm: sifive: Don't check the return code of pwmchip_remove()
	pwm: sifive: Simplify offset calculation for PWMCMP registers
	pwm: sifive: Ensure the clk is enabled exactly once per running PWM
	pwm: sifive: Shut down hardware only after pwmchip_remove() completed
	pwm: lpc18xx-sct: Convert to devm_platform_ioremap_resource()
	drm/bridge: tc358767: Move (e)DP bridge endpoint parsing into dedicated function
	drm/bridge: tc358767: Make sure Refclk clock are enabled
	ath10k: do not enforce interrupt trigger type
	drm/st7735r: Fix module autoloading for Okaya RH128128T
	wifi: rtlwifi: fix error codes in rtl_debugfs_set_write_h2c()
	ath11k: fix netdev open race
	drm/mipi-dbi: align max_chunk to 2 in spi_transfer
	ath11k: Fix incorrect debug_mask mappings
	drm/radeon: fix potential buffer overflow in ni_set_mc_special_registers()
	drm/mediatek: Modify dsi funcs to atomic operations
	drm/mediatek: Separate poweron/poweroff from enable/disable and define new funcs
	drm/mediatek: Add pull-down MIPI operation in mtk_dsi_poweroff function
	i2c: npcm: Remove own slave addresses 2:10
	i2c: npcm: Correct slave role behavior
	virtio-gpu: fix a missing check to avoid NULL dereference
	drm: adv7511: override i2c address of cec before accessing it
	crypto: sun8i-ss - do not allocate memory when handling hash requests
	crypto: sun8i-ss - fix error codes in allocate_flows()
	net: fix sk_wmem_schedule() and sk_rmem_schedule() errors
	i2c: Fix a potential use after free
	crypto: sun8i-ss - fix infinite loop in sun8i_ss_setup_ivs()
	media: tw686x: Register the irq at the end of probe
	ath9k: fix use-after-free in ath9k_hif_usb_rx_cb
	wifi: iwlegacy: 4965: fix potential off-by-one overflow in il4965_rs_fill_link_cmd()
	drm/radeon: fix incorrrect SPDX-License-Identifiers
	test_bpf: fix incorrect netdev features
	crypto: ccp - During shutdown, check SEV data pointer before using
	drm: bridge: adv7511: Add check for mipi_dsi_driver_register
	drm/mcde: Fix refcount leak in mcde_dsi_bind
	media: hdpvr: fix error value returns in hdpvr_read
	media: v4l2-mem2mem: prevent pollerr when last_buffer_dequeued is set
	media: tw686x: Fix memory leak in tw686x_video_init
	drm/vc4: plane: Remove subpixel positioning check
	drm/vc4: plane: Fix margin calculations for the right/bottom edges
	drm/vc4: dsi: Correct DSI divider calculations
	drm/vc4: dsi: Correct pixel order for DSI0
	drm/vc4: drv: Remove the DSI pointer in vc4_drv
	drm/vc4: dsi: Use snprintf for the PHY clocks instead of an array
	drm/vc4: dsi: Introduce a variant structure
	drm/vc4: dsi: Register dsi0 as the correct vc4 encoder type
	drm/vc4: dsi: Fix dsi0 interrupt support
	drm/vc4: dsi: Add correct stop condition to vc4_dsi_encoder_disable iteration
	drm/vc4: hdmi: Remove firmware logic for MAI threshold setting
	drm/vc4: hdmi: Avoid full hdmi audio fifo writes
	drm/vc4: hdmi: Don't access the connector state in reset if kmalloc fails
	drm/vc4: hdmi: Limit the BCM2711 to the max without scrambling
	drm/vc4: hdmi: Fix timings for interlaced modes
	drm/vc4: hdmi: Correct HDMI timing registers for interlaced modes
	crypto: arm64/gcm - Select AEAD for GHASH_ARM64_CE
	selftests/xsk: Destroy BPF resources only when ctx refcount drops to 0
	drm/rockchip: vop: Don't crash for invalid duplicate_state()
	drm/rockchip: Fix an error handling path rockchip_dp_probe()
	drm/mediatek: dpi: Remove output format of YUV
	drm/mediatek: dpi: Only enable dpi after the bridge is enabled
	drm: bridge: sii8620: fix possible off-by-one
	lib: bitmap: order includes alphabetically
	lib: bitmap: provide devm_bitmap_alloc() and devm_bitmap_zalloc()
	hinic: Use the bitmap API when applicable
	net: hinic: fix bug that ethtool get wrong stats
	net: hinic: avoid kernel hung in hinic_get_stats64()
	drm/msm/mdp5: Fix global state lock backoff
	crypto: hisilicon/sec - fixes some coding style
	crypto: hisilicon/sec - don't sleep when in softirq
	crypto: hisilicon - Kunpeng916 crypto driver don't sleep when in softirq
	media: platform: mtk-mdp: Fix mdp_ipi_comm structure alignment
	mt76: mt76x02u: fix possible memory leak in __mt76x02u_mcu_send_msg
	mediatek: mt76: mac80211: Fix missing of_node_put() in mt76_led_init()
	drm/exynos/exynos7_drm_decon: free resources when clk_set_parent() failed.
	tcp: make retransmitted SKB fit into the send window
	libbpf: Fix the name of a reused map
	selftests: timers: valid-adjtimex: build fix for newer toolchains
	selftests: timers: clocksource-switch: fix passing errors from child
	bpf: Fix subprog names in stack traces.
	fs: check FMODE_LSEEK to control internal pipe splicing
	wifi: wil6210: debugfs: fix info leak in wil_write_file_wmi()
	wifi: p54: Fix an error handling path in p54spi_probe()
	wifi: p54: add missing parentheses in p54_flush()
	selftests/bpf: fix a test for snprintf() overflow
	can: pch_can: do not report txerr and rxerr during bus-off
	can: rcar_can: do not report txerr and rxerr during bus-off
	can: sja1000: do not report txerr and rxerr during bus-off
	can: hi311x: do not report txerr and rxerr during bus-off
	can: sun4i_can: do not report txerr and rxerr during bus-off
	can: kvaser_usb_hydra: do not report txerr and rxerr during bus-off
	can: kvaser_usb_leaf: do not report txerr and rxerr during bus-off
	can: usb_8dev: do not report txerr and rxerr during bus-off
	can: error: specify the values of data[5..7] of CAN error frames
	can: pch_can: pch_can_error(): initialize errc before using it
	Bluetooth: hci_intel: Add check for platform_driver_register
	i2c: cadence: Support PEC for SMBus block read
	i2c: mux-gpmux: Add of_node_put() when breaking out of loop
	wifi: wil6210: debugfs: fix uninitialized variable use in `wil_write_file_wmi()`
	wifi: iwlwifi: mvm: fix double list_add at iwl_mvm_mac_wake_tx_queue
	wifi: libertas: Fix possible refcount leak in if_usb_probe()
	media: cedrus: hevc: Add check for invalid timestamp
	net/mlx5e: Remove WARN_ON when trying to offload an unsupported TLS cipher/version
	net/mlx5e: Fix the value of MLX5E_MAX_RQ_NUM_MTTS
	crypto: hisilicon/hpre - don't use GFP_KERNEL to alloc mem during softirq
	crypto: inside-secure - Add missing MODULE_DEVICE_TABLE for of
	crypto: hisilicon/sec - fix auth key size error
	inet: add READ_ONCE(sk->sk_bound_dev_if) in INET_MATCH()
	tcp: sk->sk_bound_dev_if once in inet_request_bound_dev_if()
	ipv6: add READ_ONCE(sk->sk_bound_dev_if) in INET6_MATCH()
	tcp: Fix data-races around sysctl_tcp_l3mdev_accept.
	net: allow unbound socket for packets in VRF when tcp_l3mdev_accept set
	iavf: Fix max_rate limiting
	netdevsim: Avoid allocation warnings triggered from user space
	net: rose: fix netdev reference changes
	net: ionic: fix error check for vlan flags in ionic_set_nic_features()
	dccp: put dccp_qpolicy_full() and dccp_qpolicy_push() in the same lock
	wireguard: ratelimiter: use hrtimer in selftest
	wireguard: allowedips: don't corrupt stack when detecting overflow
	clk: renesas: r9a06g032: Fix UART clkgrp bitsel
	mtd: maps: Fix refcount leak in of_flash_probe_versatile
	mtd: maps: Fix refcount leak in ap_flash_init
	mtd: rawnand: meson: Fix a potential double free issue
	PCI: tegra194: Fix PM error handling in tegra_pcie_config_ep()
	HID: cp2112: prevent a buffer overflow in cp2112_xfer()
	mtd: sm_ftl: Fix deadlock caused by cancel_work_sync in sm_release
	mtd: partitions: Fix refcount leak in parse_redboot_of
	mtd: st_spi_fsm: Add a clk_disable_unprepare() in .probe()'s error path
	fpga: altera-pr-ip: fix unsigned comparison with less than zero
	usb: host: Fix refcount leak in ehci_hcd_ppc_of_probe
	usb: ohci-nxp: Fix refcount leak in ohci_hcd_nxp_probe
	usb: gadget: tegra-xudc: Fix error check in tegra_xudc_powerdomain_init()
	usb: xhci: tegra: Fix error check
	netfilter: xtables: Bring SPDX identifier back
	iio: accel: bma400: Fix the scale min and max macro values
	platform/chrome: cros_ec: Always expose last resume result
	iio: accel: bma400: Reordering of header files
	clk: mediatek: reset: Fix written reset bit offset
	KVM: Don't set Accessed/Dirty bits for ZERO_PAGE
	mwifiex: Ignore BTCOEX events from the 88W8897 firmware
	mwifiex: fix sleep in atomic context bugs caused by dev_coredumpv
	dmaengine: dw-edma: Fix eDMA Rd/Wr-channels and DMA-direction semantics
	misc: rtsx: Fix an error handling path in rtsx_pci_probe()
	driver core: fix potential deadlock in __driver_attach
	clk: qcom: clk-krait: unlock spin after mux completion
	usb: host: xhci: use snprintf() in xhci_decode_trb()
	clk: qcom: ipq8074: fix NSS core PLL-s
	clk: qcom: ipq8074: SW workaround for UBI32 PLL lock
	clk: qcom: ipq8074: fix NSS port frequency tables
	clk: qcom: ipq8074: set BRANCH_HALT_DELAY flag for UBI clocks
	clk: qcom: camcc-sdm845: Fix topology around titan_top power domain
	PCI: dwc: Add unroll iATU space support to dw_pcie_disable_atu()
	PCI: dwc: Deallocate EPC memory on dw_pcie_ep_init() errors
	PCI: dwc: Always enable CDM check if "snps,enable-cdm-check" exists
	soundwire: bus_type: fix remove and shutdown support
	KVM: arm64: Don't return from void function
	dmaengine: sf-pdma: apply proper spinlock flags in sf_pdma_prep_dma_memcpy()
	dmaengine: sf-pdma: Add multithread support for a DMA channel
	PCI: endpoint: Don't stop controller when unbinding endpoint function
	intel_th: Fix a resource leak in an error handling path
	intel_th: msu-sink: Potential dereference of null pointer
	intel_th: msu: Fix vmalloced buffers
	staging: rtl8192u: Fix sleep in atomic context bug in dm_fsync_timer_callback
	mmc: sdhci-of-esdhc: Fix refcount leak in esdhc_signal_voltage_switch
	memstick/ms_block: Fix some incorrect memory allocation
	memstick/ms_block: Fix a memory leak
	mmc: sdhci-of-at91: fix set_uhs_signaling rewriting of MC1R
	mmc: block: Add single read for 4k sector cards
	KVM: s390: pv: leak the topmost page table when destroy fails
	PCI/portdrv: Don't disable AER reporting in get_port_device_capability()
	PCI: qcom: Set up rev 2.1.0 PARF_PHY before enabling clocks
	scsi: smartpqi: Fix DMA direction for RAID requests
	xtensa: iss/network: provide release() callback
	xtensa: iss: fix handling error cases in iss_net_configure()
	usb: gadget: udc: amd5536 depends on HAS_DMA
	usb: aspeed-vhub: Fix refcount leak bug in ast_vhub_init_desc()
	usb: dwc3: core: Deprecate GCTL.CORESOFTRESET
	usb: dwc3: core: Do not perform GCTL_CORE_SOFTRESET during bootup
	usb: dwc3: qcom: fix missing optional irq warnings
	eeprom: idt_89hpesx: uninitialized data in idt_dbgfs_csr_write()
	interconnect: imx: fix max_node_id
	um: random: Don't initialise hwrng struct with zero
	RDMA/rtrs: Define MIN_CHUNK_SIZE
	RDMA/rtrs: Avoid Wtautological-constant-out-of-range-compare
	RDMA/rtrs-srv: Fix modinfo output for stringify
	RDMA/qedr: Improve error logs for rdma_alloc_tid error return
	RDMA/qedr: Fix potential memory leak in __qedr_alloc_mr()
	RDMA/hns: Fix incorrect clearing of interrupt status register
	RDMA/siw: Fix duplicated reported IW_CM_EVENT_CONNECT_REPLY event
	RDMA/hfi1: fix potential memory leak in setup_base_ctxt()
	gpio: gpiolib-of: Fix refcount bugs in of_mm_gpiochip_add_data()
	HID: mcp2221: prevent a buffer overflow in mcp_smbus_write()
	mmc: cavium-octeon: Add of_node_put() when breaking out of loop
	mmc: cavium-thunderx: Add of_node_put() when breaking out of loop
	HID: alps: Declare U1_UNICORN_LEGACY support
	PCI: tegra194: Fix Root Port interrupt handling
	PCI: tegra194: Fix link up retry sequence
	USB: serial: fix tty-port initialized comments
	usb: cdns3: change place of 'priv_ep' assignment in cdns3_gadget_ep_dequeue(), cdns3_gadget_ep_enable()
	platform/olpc: Fix uninitialized data in debugfs write
	RDMA/srpt: Duplicate port name members
	RDMA/srpt: Introduce a reference count in struct srpt_device
	RDMA/srpt: Fix a use-after-free
	mm/mmap.c: fix missing call to vm_unacct_memory in mmap_region
	selftests: kvm: set rax before vmcall
	RDMA/mlx5: Add missing check for return value in get namespace flow
	RDMA/rxe: Fix error unwind in rxe_create_qp()
	null_blk: fix ida error handling in null_add_dev()
	nvme: use command_id instead of req->tag in trace_nvme_complete_rq()
	jbd2: fix outstanding credits assert in jbd2_journal_commit_transaction()
	ext4: recover csum seed of tmp_inode after migrating to extents
	jbd2: fix assertion 'jh->b_frozen_data == NULL' failure when journal aborted
	usb: cdns3: Don't use priv_dev uninitialized in cdns3_gadget_ep_enable()
	opp: Fix error check in dev_pm_opp_attach_genpd()
	ASoC: cros_ec_codec: Fix refcount leak in cros_ec_codec_platform_probe
	ASoC: samsung: Fix error handling in aries_audio_probe
	ASoC: mediatek: mt8173: Fix refcount leak in mt8173_rt5650_rt5676_dev_probe
	ASoC: mt6797-mt6351: Fix refcount leak in mt6797_mt6351_dev_probe
	ASoC: codecs: da7210: add check for i2c_add_driver
	ASoC: mediatek: mt8173-rt5650: Fix refcount leak in mt8173_rt5650_dev_probe
	serial: 8250: Export ICR access helpers for internal use
	serial: 8250_dw: Store LSR into lsr_saved_flags in dw8250_tx_wait_empty()
	ASoC: codecs: msm8916-wcd-digital: move gains from SX_TLV to S8_TLV
	ASoC: codecs: wcd9335: move gains from SX_TLV to S8_TLV
	rpmsg: mtk_rpmsg: Fix circular locking dependency
	remoteproc: k3-r5: Fix refcount leak in k3_r5_cluster_of_init
	selftests/livepatch: better synchronize test_klp_callbacks_busy
	profiling: fix shift too large makes kernel panic
	ASoC: samsung: h1940_uda1380: include proepr GPIO consumer header
	powerpc/perf: Optimize clearing the pending PMI and remove WARN_ON for PMI check in power_pmu_disable
	ASoC: samsung: change gpiod_speaker_power and rx1950_audio from global to static variables
	tty: n_gsm: Delete gsmtty open SABM frame when config requester
	tty: n_gsm: fix user open not possible at responder until initiator open
	tty: n_gsm: fix wrong queuing behavior in gsm_dlci_data_output()
	tty: n_gsm: fix non flow control frames during mux flow off
	tty: n_gsm: fix packet re-transmission without open control channel
	tty: n_gsm: fix race condition in gsmld_write()
	ASoC: qcom: Fix missing of_node_put() in asoc_qcom_lpass_cpu_platform_probe()
	remoteproc: qcom: wcnss: Fix handling of IRQs
	vfio: Remove extra put/gets around vfio_device->group
	vfio: Simplify the lifetime logic for vfio_device
	vfio: Split creation of a vfio_device into init and register ops
	vfio/mdev: Make to_mdev_device() into a static inline
	vfio/ccw: Do not change FSM state in subchannel event
	tty: n_gsm: fix wrong T1 retry count handling
	tty: n_gsm: fix DM command
	tty: n_gsm: fix missing corner cases in gsmld_poll()
	iommu/exynos: Handle failed IOMMU device registration properly
	rpmsg: qcom_smd: Fix refcount leak in qcom_smd_parse_edge
	kfifo: fix kfifo_to_user() return type
	lib/smp_processor_id: fix imbalanced instrumentation_end() call
	remoteproc: sysmon: Wait for SSCTL service to come up
	mfd: t7l66xb: Drop platform disable callback
	mfd: max77620: Fix refcount leak in max77620_initialise_fps
	iommu/arm-smmu: qcom_iommu: Add of_node_put() when breaking out of loop
	perf tools: Fix dso_id inode generation comparison
	s390/dump: fix old lowcore virtual vs physical address confusion
	s390/zcore: fix race when reading from hardware system area
	ASoC: fsl_easrc: use snd_pcm_format_t type for sample_format
	ASoC: qcom: q6dsp: Fix an off-by-one in q6adm_alloc_copp()
	fuse: Remove the control interface for virtio-fs
	ASoC: audio-graph-card: Add of_node_put() in fail path
	watchdog: armada_37xx_wdt: check the return value of devm_ioremap() in armada_37xx_wdt_probe()
	video: fbdev: amba-clcd: Fix refcount leak bugs
	video: fbdev: sis: fix typos in SiS_GetModeID()
	ASoC: mchp-spdifrx: disable end of block interrupt on failures
	powerpc/32: Do not allow selection of e5500 or e6500 CPUs on PPC32
	powerpc/pci: Prefer PCI domain assignment via DT 'linux,pci-domain' and alias
	f2fs: don't set GC_FAILURE_PIN for background GC
	f2fs: write checkpoint during FG_GC
	f2fs: fix to remove F2FS_COMPR_FL and tag F2FS_NOCOMP_FL at the same time
	powerpc/spufs: Fix refcount leak in spufs_init_isolated_loader
	powerpc/xive: Fix refcount leak in xive_get_max_prio
	powerpc/cell/axon_msi: Fix refcount leak in setup_msi_msg_address
	perf symbol: Fail to read phdr workaround
	kprobes: Forbid probing on trampoline and BPF code areas
	powerpc/pci: Fix PHB numbering when using opal-phbid
	genelf: Use HAVE_LIBCRYPTO_SUPPORT, not the never defined HAVE_LIBCRYPTO
	scripts/faddr2line: Fix vmlinux detection on arm64
	sched/deadline: Merge dl_task_can_attach() and dl_cpu_busy()
	sched, cpuset: Fix dl_cpu_busy() panic due to empty cs->cpus_allowed
	x86/numa: Use cpumask_available instead of hardcoded NULL check
	video: fbdev: arkfb: Fix a divide-by-zero bug in ark_set_pixclock()
	tools/thermal: Fix possible path truncations
	sched: Fix the check of nr_running at queue wakelist
	x86/entry: Build thunk_$(BITS) only if CONFIG_PREEMPTION=y
	video: fbdev: vt8623fb: Check the size of screen before memset_io()
	video: fbdev: arkfb: Check the size of screen before memset_io()
	video: fbdev: s3fb: Check the size of screen before memset_io()
	scsi: zfcp: Fix missing auto port scan and thus missing target ports
	scsi: qla2xxx: Fix discovery issues in FC-AL topology
	scsi: qla2xxx: Turn off multi-queue for 8G adapters
	scsi: qla2xxx: Fix erroneous mailbox timeout after PCI error injection
	scsi: qla2xxx: Fix losing FCP-2 targets on long port disable with I/Os
	scsi: qla2xxx: Fix losing FCP-2 targets during port perturbation tests
	x86/bugs: Enable STIBP for IBPB mitigated RETBleed
	ftrace/x86: Add back ftrace_expected assignment
	x86/olpc: fix 'logical not is only applied to the left hand side'
	posix-cpu-timers: Cleanup CPU timers before freeing them during exec
	Input: gscps2 - check return value of ioremap() in gscps2_probe()
	__follow_mount_rcu(): verify that mount_lock remains unchanged
	spmi: trace: fix stack-out-of-bound access in SPMI tracing functions
	drm/i915/dg1: Update DMC_DEBUG3 register
	drm/mediatek: Allow commands to be sent during video mode
	drm/mediatek: Keep dsi as LP00 before dcs cmds transfer
	HID: Ignore battery for Elan touchscreen on HP Spectre X360 15-df0xxx
	HID: hid-input: add Surface Go battery quirk
	drm/vc4: drv: Adopt the dma configuration from the HVS or V3D component
	mtd: rawnand: Add a helper to clarify the interface configuration
	mtd: rawnand: arasan: Check the proposed data interface is supported
	mtd: rawnand: Add NV-DDR timings
	mtd: rawnand: arasan: Fix a macro parameter
	mtd: rawnand: arasan: Support NV-DDR interface
	mtd: rawnand: arasan: Fix clock rate in NV-DDR
	usbnet: smsc95xx: Don't clear read-only PHY interrupt
	usbnet: smsc95xx: Avoid link settings race on interrupt reception
	firmware: arm_scpi: Ensure scpi_info is not assigned if the probe fails
	intel_th: pci: Add Meteor Lake-P support
	intel_th: pci: Add Raptor Lake-S PCH support
	intel_th: pci: Add Raptor Lake-S CPU support
	KVM: set_msr_mce: Permit guests to ignore single-bit ECC errors
	KVM: x86: Signal #GP, not -EPERM, on bad WRMSR(MCi_CTL/STATUS)
	iommu/vt-d: avoid invalid memory access via node_online(NUMA_NO_NODE)
	PCI/AER: Write AER Capability only when we control it
	PCI/ERR: Bind RCEC devices to the Root Port driver
	PCI/ERR: Rename reset_link() to reset_subordinates()
	PCI/ERR: Simplify by using pci_upstream_bridge()
	PCI/ERR: Simplify by computing pci_pcie_type() once
	PCI/ERR: Use "bridge" for clarity in pcie_do_recovery()
	PCI/ERR: Avoid negated conditional for clarity
	PCI/ERR: Add pci_walk_bridge() to pcie_do_recovery()
	PCI/ERR: Recover from RCEC AER errors
	PCI/AER: Iterate over error counters instead of error strings
	serial: 8250: Dissociate 4MHz Titan ports from Oxford ports
	serial: 8250: Correct the clock for OxSemi PCIe devices
	serial: 8250_pci: Refactor the loop in pci_ite887x_init()
	serial: 8250_pci: Replace dev_*() by pci_*() macros
	serial: 8250: Fold EndRun device support into OxSemi Tornado code
	dm writecache: set a default MAX_WRITEBACK_JOBS
	kexec, KEYS, s390: Make use of built-in and secondary keyring for signature verification
	dm thin: fix use-after-free crash in dm_sm_register_threshold_callback
	timekeeping: contribute wall clock to rng on time change
	um: Allow PM with suspend-to-idle
	btrfs: reject log replay if there is unsupported RO compat flag
	btrfs: reset block group chunk force if we have to wait
	ACPI: CPPC: Do not prevent CPPC from working in the future
	KVM: VMX: Drop guest CPUID check for VMXE in vmx_set_cr4()
	KVM: VMX: Drop explicit 'nested' check from vmx_set_cr4()
	KVM: SVM: Drop VMXE check from svm_set_cr4()
	KVM: x86: Move vendor CR4 validity check to dedicated kvm_x86_ops hook
	KVM: nVMX: Inject #UD if VMXON is attempted with incompatible CR0/CR4
	KVM: x86/pmu: preserve IA32_PERF_CAPABILITIES across CPUID refresh
	KVM: x86/pmu: Use binary search to check filtered events
	KVM: x86/pmu: Use different raw event masks for AMD and Intel
	KVM: x86/pmu: Introduce the ctrl_mask value for fixed counter
	KVM: VMX: Mark all PERF_GLOBAL_(OVF)_CTRL bits reserved if there's no vPMU
	KVM: x86/pmu: Ignore pmu->global_ctrl check if vPMU doesn't support global_ctrl
	xen-blkback: fix persistent grants negotiation
	xen-blkback: Apply 'feature_persistent' parameter when connect
	xen-blkfront: Apply 'feature_persistent' parameter when connect
	KEYS: asymmetric: enforce SM2 signature use pkey algo
	tpm: eventlog: Fix section mismatch for DEBUG_SECTION_MISMATCH
	tracing: Use a struct alignof to determine trace event field alignment
	ext4: check if directory block is within i_size
	ext4: add EXT4_INODE_HAS_XATTR_SPACE macro in xattr.h
	ext4: fix warning in ext4_iomap_begin as race between bmap and write
	ext4: make sure ext4_append() always allocates new block
	ext4: fix use-after-free in ext4_xattr_set_entry
	ext4: update s_overhead_clusters in the superblock during an on-line resize
	ext4: fix extent status tree race in writeback error recovery path
	ext4: correct max_inline_xattr_value_size computing
	ext4: correct the misjudgment in ext4_iget_extra_inode
	dm raid: fix address sanitizer warning in raid_resume
	dm raid: fix address sanitizer warning in raid_status
	net_sched: cls_route: remove from list when handle is 0
	KVM: Add infrastructure and macro to mark VM as bugged
	KVM: x86: Check lapic_in_kernel() before attempting to set a SynIC irq
	KVM: x86: Avoid theoretical NULL pointer dereference in kvm_irq_delivery_to_apic_fast()
	mac80211: fix a memory leak where sta_info is not freed
	tcp: fix over estimation in sk_forced_mem_schedule()
	Revert "mwifiex: fix sleep in atomic context bugs caused by dev_coredumpv"
	drm/bridge: tc358767: Fix (e)DP bridge endpoint parsing in dedicated function
	drm/vc4: change vc4_dma_range_matches from a global to static
	Revert "net: usb: ax88179_178a needs FLAG_SEND_ZLP"
	Bluetooth: L2CAP: Fix l2cap_global_chan_by_psm regression
	mtd: rawnand: arasan: Prevent an unsupported configuration
	kvm: x86/pmu: Fix the compare function used by the pmu event filter
	tee: add overflow check in register_shm_helper()
	net/9p: Initialize the iounit field during fid creation
	net_sched: cls_route: disallow handle of 0
	sched/fair: Fix fault in reweight_entity
	btrfs: only write the sectors in the vertical stripe which has data stripes
	btrfs: raid56: don't trust any cached sector in __raid56_parity_recover()
	Linux 5.10.137

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I5775ddfad6460c5a737b1ad3f8e0b8f798338786
2022-08-29 16:53:14 +02:00

2758 lines
68 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2012 Fusion-io All rights reserved.
* Copyright (C) 2012 Intel Corp. All rights reserved.
*/
#include <linux/sched.h>
#include <linux/bio.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/raid/pq.h>
#include <linux/hash.h>
#include <linux/list_sort.h>
#include <linux/raid/xor.h>
#include <linux/mm.h>
#include "ctree.h"
#include "disk-io.h"
#include "volumes.h"
#include "raid56.h"
#include "async-thread.h"
/* set when additional merges to this rbio are not allowed */
#define RBIO_RMW_LOCKED_BIT 1
/*
* set when this rbio is sitting in the hash, but it is just a cache
* of past RMW
*/
#define RBIO_CACHE_BIT 2
/*
* set when it is safe to trust the stripe_pages for caching
*/
#define RBIO_CACHE_READY_BIT 3
#define RBIO_CACHE_SIZE 1024
#define BTRFS_STRIPE_HASH_TABLE_BITS 11
/* Used by the raid56 code to lock stripes for read/modify/write */
struct btrfs_stripe_hash {
struct list_head hash_list;
spinlock_t lock;
};
/* Used by the raid56 code to lock stripes for read/modify/write */
struct btrfs_stripe_hash_table {
struct list_head stripe_cache;
spinlock_t cache_lock;
int cache_size;
struct btrfs_stripe_hash table[];
};
enum btrfs_rbio_ops {
BTRFS_RBIO_WRITE,
BTRFS_RBIO_READ_REBUILD,
BTRFS_RBIO_PARITY_SCRUB,
BTRFS_RBIO_REBUILD_MISSING,
};
struct btrfs_raid_bio {
struct btrfs_fs_info *fs_info;
struct btrfs_bio *bbio;
/* while we're doing rmw on a stripe
* we put it into a hash table so we can
* lock the stripe and merge more rbios
* into it.
*/
struct list_head hash_list;
/*
* LRU list for the stripe cache
*/
struct list_head stripe_cache;
/*
* for scheduling work in the helper threads
*/
struct btrfs_work work;
/*
* bio list and bio_list_lock are used
* to add more bios into the stripe
* in hopes of avoiding the full rmw
*/
struct bio_list bio_list;
spinlock_t bio_list_lock;
/* also protected by the bio_list_lock, the
* plug list is used by the plugging code
* to collect partial bios while plugged. The
* stripe locking code also uses it to hand off
* the stripe lock to the next pending IO
*/
struct list_head plug_list;
/*
* flags that tell us if it is safe to
* merge with this bio
*/
unsigned long flags;
/* size of each individual stripe on disk */
int stripe_len;
/* number of data stripes (no p/q) */
int nr_data;
int real_stripes;
int stripe_npages;
/*
* set if we're doing a parity rebuild
* for a read from higher up, which is handled
* differently from a parity rebuild as part of
* rmw
*/
enum btrfs_rbio_ops operation;
/* first bad stripe */
int faila;
/* second bad stripe (for raid6 use) */
int failb;
int scrubp;
/*
* number of pages needed to represent the full
* stripe
*/
int nr_pages;
/*
* size of all the bios in the bio_list. This
* helps us decide if the rbio maps to a full
* stripe or not
*/
int bio_list_bytes;
int generic_bio_cnt;
refcount_t refs;
atomic_t stripes_pending;
atomic_t error;
/*
* these are two arrays of pointers. We allocate the
* rbio big enough to hold them both and setup their
* locations when the rbio is allocated
*/
/* pointers to pages that we allocated for
* reading/writing stripes directly from the disk (including P/Q)
*/
struct page **stripe_pages;
/*
* pointers to the pages in the bio_list. Stored
* here for faster lookup
*/
struct page **bio_pages;
/*
* bitmap to record which horizontal stripe has data
*/
unsigned long *dbitmap;
/* allocated with real_stripes-many pointers for finish_*() calls */
void **finish_pointers;
/* allocated with stripe_npages-many bits for finish_*() calls */
unsigned long *finish_pbitmap;
};
static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
static void rmw_work(struct btrfs_work *work);
static void read_rebuild_work(struct btrfs_work *work);
static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
static void __free_raid_bio(struct btrfs_raid_bio *rbio);
static void index_rbio_pages(struct btrfs_raid_bio *rbio);
static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
int need_check);
static void scrub_parity_work(struct btrfs_work *work);
static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func)
{
btrfs_init_work(&rbio->work, work_func, NULL, NULL);
btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
}
/*
* the stripe hash table is used for locking, and to collect
* bios in hopes of making a full stripe
*/
int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
{
struct btrfs_stripe_hash_table *table;
struct btrfs_stripe_hash_table *x;
struct btrfs_stripe_hash *cur;
struct btrfs_stripe_hash *h;
int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
int i;
if (info->stripe_hash_table)
return 0;
/*
* The table is large, starting with order 4 and can go as high as
* order 7 in case lock debugging is turned on.
*
* Try harder to allocate and fallback to vmalloc to lower the chance
* of a failing mount.
*/
table = kvzalloc(struct_size(table, table, num_entries), GFP_KERNEL);
if (!table)
return -ENOMEM;
spin_lock_init(&table->cache_lock);
INIT_LIST_HEAD(&table->stripe_cache);
h = table->table;
for (i = 0; i < num_entries; i++) {
cur = h + i;
INIT_LIST_HEAD(&cur->hash_list);
spin_lock_init(&cur->lock);
}
x = cmpxchg(&info->stripe_hash_table, NULL, table);
if (x)
kvfree(x);
return 0;
}
/*
* caching an rbio means to copy anything from the
* bio_pages array into the stripe_pages array. We
* use the page uptodate bit in the stripe cache array
* to indicate if it has valid data
*
* once the caching is done, we set the cache ready
* bit.
*/
static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
{
int i;
char *s;
char *d;
int ret;
ret = alloc_rbio_pages(rbio);
if (ret)
return;
for (i = 0; i < rbio->nr_pages; i++) {
if (!rbio->bio_pages[i])
continue;
s = kmap(rbio->bio_pages[i]);
d = kmap(rbio->stripe_pages[i]);
copy_page(d, s);
kunmap(rbio->bio_pages[i]);
kunmap(rbio->stripe_pages[i]);
SetPageUptodate(rbio->stripe_pages[i]);
}
set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
}
/*
* we hash on the first logical address of the stripe
*/
static int rbio_bucket(struct btrfs_raid_bio *rbio)
{
u64 num = rbio->bbio->raid_map[0];
/*
* we shift down quite a bit. We're using byte
* addressing, and most of the lower bits are zeros.
* This tends to upset hash_64, and it consistently
* returns just one or two different values.
*
* shifting off the lower bits fixes things.
*/
return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
}
/*
* stealing an rbio means taking all the uptodate pages from the stripe
* array in the source rbio and putting them into the destination rbio
*/
static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
{
int i;
struct page *s;
struct page *d;
if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
return;
for (i = 0; i < dest->nr_pages; i++) {
s = src->stripe_pages[i];
if (!s || !PageUptodate(s)) {
continue;
}
d = dest->stripe_pages[i];
if (d)
__free_page(d);
dest->stripe_pages[i] = s;
src->stripe_pages[i] = NULL;
}
}
/*
* merging means we take the bio_list from the victim and
* splice it into the destination. The victim should
* be discarded afterwards.
*
* must be called with dest->rbio_list_lock held
*/
static void merge_rbio(struct btrfs_raid_bio *dest,
struct btrfs_raid_bio *victim)
{
bio_list_merge(&dest->bio_list, &victim->bio_list);
dest->bio_list_bytes += victim->bio_list_bytes;
/* Also inherit the bitmaps from @victim. */
bitmap_or(dest->dbitmap, victim->dbitmap, dest->dbitmap,
dest->stripe_npages);
dest->generic_bio_cnt += victim->generic_bio_cnt;
bio_list_init(&victim->bio_list);
}
/*
* used to prune items that are in the cache. The caller
* must hold the hash table lock.
*/
static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
{
int bucket = rbio_bucket(rbio);
struct btrfs_stripe_hash_table *table;
struct btrfs_stripe_hash *h;
int freeit = 0;
/*
* check the bit again under the hash table lock.
*/
if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
return;
table = rbio->fs_info->stripe_hash_table;
h = table->table + bucket;
/* hold the lock for the bucket because we may be
* removing it from the hash table
*/
spin_lock(&h->lock);
/*
* hold the lock for the bio list because we need
* to make sure the bio list is empty
*/
spin_lock(&rbio->bio_list_lock);
if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
list_del_init(&rbio->stripe_cache);
table->cache_size -= 1;
freeit = 1;
/* if the bio list isn't empty, this rbio is
* still involved in an IO. We take it out
* of the cache list, and drop the ref that
* was held for the list.
*
* If the bio_list was empty, we also remove
* the rbio from the hash_table, and drop
* the corresponding ref
*/
if (bio_list_empty(&rbio->bio_list)) {
if (!list_empty(&rbio->hash_list)) {
list_del_init(&rbio->hash_list);
refcount_dec(&rbio->refs);
BUG_ON(!list_empty(&rbio->plug_list));
}
}
}
spin_unlock(&rbio->bio_list_lock);
spin_unlock(&h->lock);
if (freeit)
__free_raid_bio(rbio);
}
/*
* prune a given rbio from the cache
*/
static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
{
struct btrfs_stripe_hash_table *table;
unsigned long flags;
if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
return;
table = rbio->fs_info->stripe_hash_table;
spin_lock_irqsave(&table->cache_lock, flags);
__remove_rbio_from_cache(rbio);
spin_unlock_irqrestore(&table->cache_lock, flags);
}
/*
* remove everything in the cache
*/
static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
{
struct btrfs_stripe_hash_table *table;
unsigned long flags;
struct btrfs_raid_bio *rbio;
table = info->stripe_hash_table;
spin_lock_irqsave(&table->cache_lock, flags);
while (!list_empty(&table->stripe_cache)) {
rbio = list_entry(table->stripe_cache.next,
struct btrfs_raid_bio,
stripe_cache);
__remove_rbio_from_cache(rbio);
}
spin_unlock_irqrestore(&table->cache_lock, flags);
}
/*
* remove all cached entries and free the hash table
* used by unmount
*/
void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
{
if (!info->stripe_hash_table)
return;
btrfs_clear_rbio_cache(info);
kvfree(info->stripe_hash_table);
info->stripe_hash_table = NULL;
}
/*
* insert an rbio into the stripe cache. It
* must have already been prepared by calling
* cache_rbio_pages
*
* If this rbio was already cached, it gets
* moved to the front of the lru.
*
* If the size of the rbio cache is too big, we
* prune an item.
*/
static void cache_rbio(struct btrfs_raid_bio *rbio)
{
struct btrfs_stripe_hash_table *table;
unsigned long flags;
if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
return;
table = rbio->fs_info->stripe_hash_table;
spin_lock_irqsave(&table->cache_lock, flags);
spin_lock(&rbio->bio_list_lock);
/* bump our ref if we were not in the list before */
if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
refcount_inc(&rbio->refs);
if (!list_empty(&rbio->stripe_cache)){
list_move(&rbio->stripe_cache, &table->stripe_cache);
} else {
list_add(&rbio->stripe_cache, &table->stripe_cache);
table->cache_size += 1;
}
spin_unlock(&rbio->bio_list_lock);
if (table->cache_size > RBIO_CACHE_SIZE) {
struct btrfs_raid_bio *found;
found = list_entry(table->stripe_cache.prev,
struct btrfs_raid_bio,
stripe_cache);
if (found != rbio)
__remove_rbio_from_cache(found);
}
spin_unlock_irqrestore(&table->cache_lock, flags);
}
/*
* helper function to run the xor_blocks api. It is only
* able to do MAX_XOR_BLOCKS at a time, so we need to
* loop through.
*/
static void run_xor(void **pages, int src_cnt, ssize_t len)
{
int src_off = 0;
int xor_src_cnt = 0;
void *dest = pages[src_cnt];
while(src_cnt > 0) {
xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
xor_blocks(xor_src_cnt, len, dest, pages + src_off);
src_cnt -= xor_src_cnt;
src_off += xor_src_cnt;
}
}
/*
* Returns true if the bio list inside this rbio covers an entire stripe (no
* rmw required).
*/
static int rbio_is_full(struct btrfs_raid_bio *rbio)
{
unsigned long flags;
unsigned long size = rbio->bio_list_bytes;
int ret = 1;
spin_lock_irqsave(&rbio->bio_list_lock, flags);
if (size != rbio->nr_data * rbio->stripe_len)
ret = 0;
BUG_ON(size > rbio->nr_data * rbio->stripe_len);
spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
return ret;
}
/*
* returns 1 if it is safe to merge two rbios together.
* The merging is safe if the two rbios correspond to
* the same stripe and if they are both going in the same
* direction (read vs write), and if neither one is
* locked for final IO
*
* The caller is responsible for locking such that
* rmw_locked is safe to test
*/
static int rbio_can_merge(struct btrfs_raid_bio *last,
struct btrfs_raid_bio *cur)
{
if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
return 0;
/*
* we can't merge with cached rbios, since the
* idea is that when we merge the destination
* rbio is going to run our IO for us. We can
* steal from cached rbios though, other functions
* handle that.
*/
if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
test_bit(RBIO_CACHE_BIT, &cur->flags))
return 0;
if (last->bbio->raid_map[0] !=
cur->bbio->raid_map[0])
return 0;
/* we can't merge with different operations */
if (last->operation != cur->operation)
return 0;
/*
* We've need read the full stripe from the drive.
* check and repair the parity and write the new results.
*
* We're not allowed to add any new bios to the
* bio list here, anyone else that wants to
* change this stripe needs to do their own rmw.
*/
if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
return 0;
if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
return 0;
if (last->operation == BTRFS_RBIO_READ_REBUILD) {
int fa = last->faila;
int fb = last->failb;
int cur_fa = cur->faila;
int cur_fb = cur->failb;
if (last->faila >= last->failb) {
fa = last->failb;
fb = last->faila;
}
if (cur->faila >= cur->failb) {
cur_fa = cur->failb;
cur_fb = cur->faila;
}
if (fa != cur_fa || fb != cur_fb)
return 0;
}
return 1;
}
static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
int index)
{
return stripe * rbio->stripe_npages + index;
}
/*
* these are just the pages from the rbio array, not from anything
* the FS sent down to us
*/
static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
int index)
{
return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
}
/*
* helper to index into the pstripe
*/
static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
{
return rbio_stripe_page(rbio, rbio->nr_data, index);
}
/*
* helper to index into the qstripe, returns null
* if there is no qstripe
*/
static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
{
if (rbio->nr_data + 1 == rbio->real_stripes)
return NULL;
return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
}
/*
* The first stripe in the table for a logical address
* has the lock. rbios are added in one of three ways:
*
* 1) Nobody has the stripe locked yet. The rbio is given
* the lock and 0 is returned. The caller must start the IO
* themselves.
*
* 2) Someone has the stripe locked, but we're able to merge
* with the lock owner. The rbio is freed and the IO will
* start automatically along with the existing rbio. 1 is returned.
*
* 3) Someone has the stripe locked, but we're not able to merge.
* The rbio is added to the lock owner's plug list, or merged into
* an rbio already on the plug list. When the lock owner unlocks,
* the next rbio on the list is run and the IO is started automatically.
* 1 is returned
*
* If we return 0, the caller still owns the rbio and must continue with
* IO submission. If we return 1, the caller must assume the rbio has
* already been freed.
*/
static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
{
struct btrfs_stripe_hash *h;
struct btrfs_raid_bio *cur;
struct btrfs_raid_bio *pending;
unsigned long flags;
struct btrfs_raid_bio *freeit = NULL;
struct btrfs_raid_bio *cache_drop = NULL;
int ret = 0;
h = rbio->fs_info->stripe_hash_table->table + rbio_bucket(rbio);
spin_lock_irqsave(&h->lock, flags);
list_for_each_entry(cur, &h->hash_list, hash_list) {
if (cur->bbio->raid_map[0] != rbio->bbio->raid_map[0])
continue;
spin_lock(&cur->bio_list_lock);
/* Can we steal this cached rbio's pages? */
if (bio_list_empty(&cur->bio_list) &&
list_empty(&cur->plug_list) &&
test_bit(RBIO_CACHE_BIT, &cur->flags) &&
!test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
list_del_init(&cur->hash_list);
refcount_dec(&cur->refs);
steal_rbio(cur, rbio);
cache_drop = cur;
spin_unlock(&cur->bio_list_lock);
goto lockit;
}
/* Can we merge into the lock owner? */
if (rbio_can_merge(cur, rbio)) {
merge_rbio(cur, rbio);
spin_unlock(&cur->bio_list_lock);
freeit = rbio;
ret = 1;
goto out;
}
/*
* We couldn't merge with the running rbio, see if we can merge
* with the pending ones. We don't have to check for rmw_locked
* because there is no way they are inside finish_rmw right now
*/
list_for_each_entry(pending, &cur->plug_list, plug_list) {
if (rbio_can_merge(pending, rbio)) {
merge_rbio(pending, rbio);
spin_unlock(&cur->bio_list_lock);
freeit = rbio;
ret = 1;
goto out;
}
}
/*
* No merging, put us on the tail of the plug list, our rbio
* will be started with the currently running rbio unlocks
*/
list_add_tail(&rbio->plug_list, &cur->plug_list);
spin_unlock(&cur->bio_list_lock);
ret = 1;
goto out;
}
lockit:
refcount_inc(&rbio->refs);
list_add(&rbio->hash_list, &h->hash_list);
out:
spin_unlock_irqrestore(&h->lock, flags);
if (cache_drop)
remove_rbio_from_cache(cache_drop);
if (freeit)
__free_raid_bio(freeit);
return ret;
}
/*
* called as rmw or parity rebuild is completed. If the plug list has more
* rbios waiting for this stripe, the next one on the list will be started
*/
static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
{
int bucket;
struct btrfs_stripe_hash *h;
unsigned long flags;
int keep_cache = 0;
bucket = rbio_bucket(rbio);
h = rbio->fs_info->stripe_hash_table->table + bucket;
if (list_empty(&rbio->plug_list))
cache_rbio(rbio);
spin_lock_irqsave(&h->lock, flags);
spin_lock(&rbio->bio_list_lock);
if (!list_empty(&rbio->hash_list)) {
/*
* if we're still cached and there is no other IO
* to perform, just leave this rbio here for others
* to steal from later
*/
if (list_empty(&rbio->plug_list) &&
test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
keep_cache = 1;
clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
BUG_ON(!bio_list_empty(&rbio->bio_list));
goto done;
}
list_del_init(&rbio->hash_list);
refcount_dec(&rbio->refs);
/*
* we use the plug list to hold all the rbios
* waiting for the chance to lock this stripe.
* hand the lock over to one of them.
*/
if (!list_empty(&rbio->plug_list)) {
struct btrfs_raid_bio *next;
struct list_head *head = rbio->plug_list.next;
next = list_entry(head, struct btrfs_raid_bio,
plug_list);
list_del_init(&rbio->plug_list);
list_add(&next->hash_list, &h->hash_list);
refcount_inc(&next->refs);
spin_unlock(&rbio->bio_list_lock);
spin_unlock_irqrestore(&h->lock, flags);
if (next->operation == BTRFS_RBIO_READ_REBUILD)
start_async_work(next, read_rebuild_work);
else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
steal_rbio(rbio, next);
start_async_work(next, read_rebuild_work);
} else if (next->operation == BTRFS_RBIO_WRITE) {
steal_rbio(rbio, next);
start_async_work(next, rmw_work);
} else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
steal_rbio(rbio, next);
start_async_work(next, scrub_parity_work);
}
goto done_nolock;
}
}
done:
spin_unlock(&rbio->bio_list_lock);
spin_unlock_irqrestore(&h->lock, flags);
done_nolock:
if (!keep_cache)
remove_rbio_from_cache(rbio);
}
static void __free_raid_bio(struct btrfs_raid_bio *rbio)
{
int i;
if (!refcount_dec_and_test(&rbio->refs))
return;
WARN_ON(!list_empty(&rbio->stripe_cache));
WARN_ON(!list_empty(&rbio->hash_list));
WARN_ON(!bio_list_empty(&rbio->bio_list));
for (i = 0; i < rbio->nr_pages; i++) {
if (rbio->stripe_pages[i]) {
__free_page(rbio->stripe_pages[i]);
rbio->stripe_pages[i] = NULL;
}
}
btrfs_put_bbio(rbio->bbio);
kfree(rbio);
}
static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
{
struct bio *next;
while (cur) {
next = cur->bi_next;
cur->bi_next = NULL;
cur->bi_status = err;
bio_endio(cur);
cur = next;
}
}
/*
* this frees the rbio and runs through all the bios in the
* bio_list and calls end_io on them
*/
static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
{
struct bio *cur = bio_list_get(&rbio->bio_list);
struct bio *extra;
if (rbio->generic_bio_cnt)
btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
/*
* Clear the data bitmap, as the rbio may be cached for later usage.
* do this before before unlock_stripe() so there will be no new bio
* for this bio.
*/
bitmap_clear(rbio->dbitmap, 0, rbio->stripe_npages);
/*
* At this moment, rbio->bio_list is empty, however since rbio does not
* always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
* hash list, rbio may be merged with others so that rbio->bio_list
* becomes non-empty.
* Once unlock_stripe() is done, rbio->bio_list will not be updated any
* more and we can call bio_endio() on all queued bios.
*/
unlock_stripe(rbio);
extra = bio_list_get(&rbio->bio_list);
__free_raid_bio(rbio);
rbio_endio_bio_list(cur, err);
if (extra)
rbio_endio_bio_list(extra, err);
}
/*
* end io function used by finish_rmw. When we finally
* get here, we've written a full stripe
*/
static void raid_write_end_io(struct bio *bio)
{
struct btrfs_raid_bio *rbio = bio->bi_private;
blk_status_t err = bio->bi_status;
int max_errors;
if (err)
fail_bio_stripe(rbio, bio);
bio_put(bio);
if (!atomic_dec_and_test(&rbio->stripes_pending))
return;
err = BLK_STS_OK;
/* OK, we have read all the stripes we need to. */
max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
0 : rbio->bbio->max_errors;
if (atomic_read(&rbio->error) > max_errors)
err = BLK_STS_IOERR;
rbio_orig_end_io(rbio, err);
}
/*
* the read/modify/write code wants to use the original bio for
* any pages it included, and then use the rbio for everything
* else. This function decides if a given index (stripe number)
* and page number in that stripe fall inside the original bio
* or the rbio.
*
* if you set bio_list_only, you'll get a NULL back for any ranges
* that are outside the bio_list
*
* This doesn't take any refs on anything, you get a bare page pointer
* and the caller must bump refs as required.
*
* You must call index_rbio_pages once before you can trust
* the answers from this function.
*/
static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
int index, int pagenr, int bio_list_only)
{
int chunk_page;
struct page *p = NULL;
chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
spin_lock_irq(&rbio->bio_list_lock);
p = rbio->bio_pages[chunk_page];
spin_unlock_irq(&rbio->bio_list_lock);
if (p || bio_list_only)
return p;
return rbio->stripe_pages[chunk_page];
}
/*
* number of pages we need for the entire stripe across all the
* drives
*/
static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
{
return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
}
/*
* allocation and initial setup for the btrfs_raid_bio. Not
* this does not allocate any pages for rbio->pages.
*/
static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
struct btrfs_bio *bbio,
u64 stripe_len)
{
struct btrfs_raid_bio *rbio;
int nr_data = 0;
int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
int num_pages = rbio_nr_pages(stripe_len, real_stripes);
int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
void *p;
rbio = kzalloc(sizeof(*rbio) +
sizeof(*rbio->stripe_pages) * num_pages +
sizeof(*rbio->bio_pages) * num_pages +
sizeof(*rbio->finish_pointers) * real_stripes +
sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_npages) +
sizeof(*rbio->finish_pbitmap) *
BITS_TO_LONGS(stripe_npages),
GFP_NOFS);
if (!rbio)
return ERR_PTR(-ENOMEM);
bio_list_init(&rbio->bio_list);
INIT_LIST_HEAD(&rbio->plug_list);
spin_lock_init(&rbio->bio_list_lock);
INIT_LIST_HEAD(&rbio->stripe_cache);
INIT_LIST_HEAD(&rbio->hash_list);
rbio->bbio = bbio;
rbio->fs_info = fs_info;
rbio->stripe_len = stripe_len;
rbio->nr_pages = num_pages;
rbio->real_stripes = real_stripes;
rbio->stripe_npages = stripe_npages;
rbio->faila = -1;
rbio->failb = -1;
refcount_set(&rbio->refs, 1);
atomic_set(&rbio->error, 0);
atomic_set(&rbio->stripes_pending, 0);
/*
* the stripe_pages, bio_pages, etc arrays point to the extra
* memory we allocated past the end of the rbio
*/
p = rbio + 1;
#define CONSUME_ALLOC(ptr, count) do { \
ptr = p; \
p = (unsigned char *)p + sizeof(*(ptr)) * (count); \
} while (0)
CONSUME_ALLOC(rbio->stripe_pages, num_pages);
CONSUME_ALLOC(rbio->bio_pages, num_pages);
CONSUME_ALLOC(rbio->finish_pointers, real_stripes);
CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_npages));
CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages));
#undef CONSUME_ALLOC
if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
nr_data = real_stripes - 1;
else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
nr_data = real_stripes - 2;
else
BUG();
rbio->nr_data = nr_data;
return rbio;
}
/* allocate pages for all the stripes in the bio, including parity */
static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
{
int i;
struct page *page;
for (i = 0; i < rbio->nr_pages; i++) {
if (rbio->stripe_pages[i])
continue;
page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
if (!page)
return -ENOMEM;
rbio->stripe_pages[i] = page;
}
return 0;
}
/* only allocate pages for p/q stripes */
static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
{
int i;
struct page *page;
i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
for (; i < rbio->nr_pages; i++) {
if (rbio->stripe_pages[i])
continue;
page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
if (!page)
return -ENOMEM;
rbio->stripe_pages[i] = page;
}
return 0;
}
/*
* add a single page from a specific stripe into our list of bios for IO
* this will try to merge into existing bios if possible, and returns
* zero if all went well.
*/
static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
struct bio_list *bio_list,
struct page *page,
int stripe_nr,
unsigned long page_index,
unsigned long bio_max_len)
{
struct bio *last = bio_list->tail;
int ret;
struct bio *bio;
struct btrfs_bio_stripe *stripe;
u64 disk_start;
stripe = &rbio->bbio->stripes[stripe_nr];
disk_start = stripe->physical + (page_index << PAGE_SHIFT);
/* if the device is missing, just fail this stripe */
if (!stripe->dev->bdev)
return fail_rbio_index(rbio, stripe_nr);
/* see if we can add this page onto our existing bio */
if (last) {
u64 last_end = (u64)last->bi_iter.bi_sector << 9;
last_end += last->bi_iter.bi_size;
/*
* we can't merge these if they are from different
* devices or if they are not contiguous
*/
if (last_end == disk_start && !last->bi_status &&
last->bi_disk == stripe->dev->bdev->bd_disk &&
last->bi_partno == stripe->dev->bdev->bd_partno) {
ret = bio_add_page(last, page, PAGE_SIZE, 0);
if (ret == PAGE_SIZE)
return 0;
}
}
/* put a new bio on the list */
bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
btrfs_io_bio(bio)->device = stripe->dev;
bio->bi_iter.bi_size = 0;
bio_set_dev(bio, stripe->dev->bdev);
bio->bi_iter.bi_sector = disk_start >> 9;
bio_add_page(bio, page, PAGE_SIZE, 0);
bio_list_add(bio_list, bio);
return 0;
}
/*
* while we're doing the read/modify/write cycle, we could
* have errors in reading pages off the disk. This checks
* for errors and if we're not able to read the page it'll
* trigger parity reconstruction. The rmw will be finished
* after we've reconstructed the failed stripes
*/
static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
{
if (rbio->faila >= 0 || rbio->failb >= 0) {
BUG_ON(rbio->faila == rbio->real_stripes - 1);
__raid56_parity_recover(rbio);
} else {
finish_rmw(rbio);
}
}
/*
* helper function to walk our bio list and populate the bio_pages array with
* the result. This seems expensive, but it is faster than constantly
* searching through the bio list as we setup the IO in finish_rmw or stripe
* reconstruction.
*
* This must be called before you trust the answers from page_in_rbio
*/
static void index_rbio_pages(struct btrfs_raid_bio *rbio)
{
struct bio *bio;
u64 start;
unsigned long stripe_offset;
unsigned long page_index;
spin_lock_irq(&rbio->bio_list_lock);
bio_list_for_each(bio, &rbio->bio_list) {
struct bio_vec bvec;
struct bvec_iter iter;
int i = 0;
start = (u64)bio->bi_iter.bi_sector << 9;
stripe_offset = start - rbio->bbio->raid_map[0];
page_index = stripe_offset >> PAGE_SHIFT;
if (bio_flagged(bio, BIO_CLONED))
bio->bi_iter = btrfs_io_bio(bio)->iter;
bio_for_each_segment(bvec, bio, iter) {
rbio->bio_pages[page_index + i] = bvec.bv_page;
i++;
}
}
spin_unlock_irq(&rbio->bio_list_lock);
}
/*
* this is called from one of two situations. We either
* have a full stripe from the higher layers, or we've read all
* the missing bits off disk.
*
* This will calculate the parity and then send down any
* changed blocks.
*/
static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
{
struct btrfs_bio *bbio = rbio->bbio;
void **pointers = rbio->finish_pointers;
int nr_data = rbio->nr_data;
int stripe;
int pagenr;
bool has_qstripe;
struct bio_list bio_list;
struct bio *bio;
int ret;
bio_list_init(&bio_list);
if (rbio->real_stripes - rbio->nr_data == 1)
has_qstripe = false;
else if (rbio->real_stripes - rbio->nr_data == 2)
has_qstripe = true;
else
BUG();
/* We should have at least one data sector. */
ASSERT(bitmap_weight(rbio->dbitmap, rbio->stripe_npages));
/* at this point we either have a full stripe,
* or we've read the full stripe from the drive.
* recalculate the parity and write the new results.
*
* We're not allowed to add any new bios to the
* bio list here, anyone else that wants to
* change this stripe needs to do their own rmw.
*/
spin_lock_irq(&rbio->bio_list_lock);
set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
spin_unlock_irq(&rbio->bio_list_lock);
atomic_set(&rbio->error, 0);
/*
* now that we've set rmw_locked, run through the
* bio list one last time and map the page pointers
*
* We don't cache full rbios because we're assuming
* the higher layers are unlikely to use this area of
* the disk again soon. If they do use it again,
* hopefully they will send another full bio.
*/
index_rbio_pages(rbio);
if (!rbio_is_full(rbio))
cache_rbio_pages(rbio);
else
clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
struct page *p;
/* first collect one page from each data stripe */
for (stripe = 0; stripe < nr_data; stripe++) {
p = page_in_rbio(rbio, stripe, pagenr, 0);
pointers[stripe] = kmap(p);
}
/* then add the parity stripe */
p = rbio_pstripe_page(rbio, pagenr);
SetPageUptodate(p);
pointers[stripe++] = kmap(p);
if (has_qstripe) {
/*
* raid6, add the qstripe and call the
* library function to fill in our p/q
*/
p = rbio_qstripe_page(rbio, pagenr);
SetPageUptodate(p);
pointers[stripe++] = kmap(p);
raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
pointers);
} else {
/* raid5 */
copy_page(pointers[nr_data], pointers[0]);
run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
}
for (stripe = 0; stripe < rbio->real_stripes; stripe++)
kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
}
/*
* time to start writing. Make bios for everything from the
* higher layers (the bio_list in our rbio) and our p/q. Ignore
* everything else.
*/
for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
struct page *page;
/* This vertical stripe has no data, skip it. */
if (!test_bit(pagenr, rbio->dbitmap))
continue;
if (stripe < rbio->nr_data) {
page = page_in_rbio(rbio, stripe, pagenr, 1);
if (!page)
continue;
} else {
page = rbio_stripe_page(rbio, stripe, pagenr);
}
ret = rbio_add_io_page(rbio, &bio_list,
page, stripe, pagenr, rbio->stripe_len);
if (ret)
goto cleanup;
}
}
if (likely(!bbio->num_tgtdevs))
goto write_data;
for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
if (!bbio->tgtdev_map[stripe])
continue;
for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
struct page *page;
/* This vertical stripe has no data, skip it. */
if (!test_bit(pagenr, rbio->dbitmap))
continue;
if (stripe < rbio->nr_data) {
page = page_in_rbio(rbio, stripe, pagenr, 1);
if (!page)
continue;
} else {
page = rbio_stripe_page(rbio, stripe, pagenr);
}
ret = rbio_add_io_page(rbio, &bio_list, page,
rbio->bbio->tgtdev_map[stripe],
pagenr, rbio->stripe_len);
if (ret)
goto cleanup;
}
}
write_data:
atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
while ((bio = bio_list_pop(&bio_list))) {
bio->bi_private = rbio;
bio->bi_end_io = raid_write_end_io;
bio->bi_opf = REQ_OP_WRITE;
submit_bio(bio);
}
return;
cleanup:
rbio_orig_end_io(rbio, BLK_STS_IOERR);
while ((bio = bio_list_pop(&bio_list)))
bio_put(bio);
}
/*
* helper to find the stripe number for a given bio. Used to figure out which
* stripe has failed. This expects the bio to correspond to a physical disk,
* so it looks up based on physical sector numbers.
*/
static int find_bio_stripe(struct btrfs_raid_bio *rbio,
struct bio *bio)
{
u64 physical = bio->bi_iter.bi_sector;
int i;
struct btrfs_bio_stripe *stripe;
physical <<= 9;
for (i = 0; i < rbio->bbio->num_stripes; i++) {
stripe = &rbio->bbio->stripes[i];
if (in_range(physical, stripe->physical, rbio->stripe_len) &&
stripe->dev->bdev &&
bio->bi_disk == stripe->dev->bdev->bd_disk &&
bio->bi_partno == stripe->dev->bdev->bd_partno) {
return i;
}
}
return -1;
}
/*
* helper to find the stripe number for a given
* bio (before mapping). Used to figure out which stripe has
* failed. This looks up based on logical block numbers.
*/
static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
struct bio *bio)
{
u64 logical = (u64)bio->bi_iter.bi_sector << 9;
int i;
for (i = 0; i < rbio->nr_data; i++) {
u64 stripe_start = rbio->bbio->raid_map[i];
if (in_range(logical, stripe_start, rbio->stripe_len))
return i;
}
return -1;
}
/*
* returns -EIO if we had too many failures
*/
static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&rbio->bio_list_lock, flags);
/* we already know this stripe is bad, move on */
if (rbio->faila == failed || rbio->failb == failed)
goto out;
if (rbio->faila == -1) {
/* first failure on this rbio */
rbio->faila = failed;
atomic_inc(&rbio->error);
} else if (rbio->failb == -1) {
/* second failure on this rbio */
rbio->failb = failed;
atomic_inc(&rbio->error);
} else {
ret = -EIO;
}
out:
spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
return ret;
}
/*
* helper to fail a stripe based on a physical disk
* bio.
*/
static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
struct bio *bio)
{
int failed = find_bio_stripe(rbio, bio);
if (failed < 0)
return -EIO;
return fail_rbio_index(rbio, failed);
}
/*
* this sets each page in the bio uptodate. It should only be used on private
* rbio pages, nothing that comes in from the higher layers
*/
static void set_bio_pages_uptodate(struct bio *bio)
{
struct bio_vec *bvec;
struct bvec_iter_all iter_all;
ASSERT(!bio_flagged(bio, BIO_CLONED));
bio_for_each_segment_all(bvec, bio, iter_all)
SetPageUptodate(bvec->bv_page);
}
/*
* end io for the read phase of the rmw cycle. All the bios here are physical
* stripe bios we've read from the disk so we can recalculate the parity of the
* stripe.
*
* This will usually kick off finish_rmw once all the bios are read in, but it
* may trigger parity reconstruction if we had any errors along the way
*/
static void raid_rmw_end_io(struct bio *bio)
{
struct btrfs_raid_bio *rbio = bio->bi_private;
if (bio->bi_status)
fail_bio_stripe(rbio, bio);
else
set_bio_pages_uptodate(bio);
bio_put(bio);
if (!atomic_dec_and_test(&rbio->stripes_pending))
return;
if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
goto cleanup;
/*
* this will normally call finish_rmw to start our write
* but if there are any failed stripes we'll reconstruct
* from parity first
*/
validate_rbio_for_rmw(rbio);
return;
cleanup:
rbio_orig_end_io(rbio, BLK_STS_IOERR);
}
/*
* the stripe must be locked by the caller. It will
* unlock after all the writes are done
*/
static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
{
int bios_to_read = 0;
struct bio_list bio_list;
int ret;
int pagenr;
int stripe;
struct bio *bio;
bio_list_init(&bio_list);
ret = alloc_rbio_pages(rbio);
if (ret)
goto cleanup;
index_rbio_pages(rbio);
atomic_set(&rbio->error, 0);
/*
* build a list of bios to read all the missing parts of this
* stripe
*/
for (stripe = 0; stripe < rbio->nr_data; stripe++) {
for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
struct page *page;
/*
* we want to find all the pages missing from
* the rbio and read them from the disk. If
* page_in_rbio finds a page in the bio list
* we don't need to read it off the stripe.
*/
page = page_in_rbio(rbio, stripe, pagenr, 1);
if (page)
continue;
page = rbio_stripe_page(rbio, stripe, pagenr);
/*
* the bio cache may have handed us an uptodate
* page. If so, be happy and use it
*/
if (PageUptodate(page))
continue;
ret = rbio_add_io_page(rbio, &bio_list, page,
stripe, pagenr, rbio->stripe_len);
if (ret)
goto cleanup;
}
}
bios_to_read = bio_list_size(&bio_list);
if (!bios_to_read) {
/*
* this can happen if others have merged with
* us, it means there is nothing left to read.
* But if there are missing devices it may not be
* safe to do the full stripe write yet.
*/
goto finish;
}
/*
* the bbio may be freed once we submit the last bio. Make sure
* not to touch it after that
*/
atomic_set(&rbio->stripes_pending, bios_to_read);
while ((bio = bio_list_pop(&bio_list))) {
bio->bi_private = rbio;
bio->bi_end_io = raid_rmw_end_io;
bio->bi_opf = REQ_OP_READ;
btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
submit_bio(bio);
}
/* the actual write will happen once the reads are done */
return 0;
cleanup:
rbio_orig_end_io(rbio, BLK_STS_IOERR);
while ((bio = bio_list_pop(&bio_list)))
bio_put(bio);
return -EIO;
finish:
validate_rbio_for_rmw(rbio);
return 0;
}
/*
* if the upper layers pass in a full stripe, we thank them by only allocating
* enough pages to hold the parity, and sending it all down quickly.
*/
static int full_stripe_write(struct btrfs_raid_bio *rbio)
{
int ret;
ret = alloc_rbio_parity_pages(rbio);
if (ret) {
__free_raid_bio(rbio);
return ret;
}
ret = lock_stripe_add(rbio);
if (ret == 0)
finish_rmw(rbio);
return 0;
}
/*
* partial stripe writes get handed over to async helpers.
* We're really hoping to merge a few more writes into this
* rbio before calculating new parity
*/
static int partial_stripe_write(struct btrfs_raid_bio *rbio)
{
int ret;
ret = lock_stripe_add(rbio);
if (ret == 0)
start_async_work(rbio, rmw_work);
return 0;
}
/*
* sometimes while we were reading from the drive to
* recalculate parity, enough new bios come into create
* a full stripe. So we do a check here to see if we can
* go directly to finish_rmw
*/
static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
{
/* head off into rmw land if we don't have a full stripe */
if (!rbio_is_full(rbio))
return partial_stripe_write(rbio);
return full_stripe_write(rbio);
}
/*
* We use plugging call backs to collect full stripes.
* Any time we get a partial stripe write while plugged
* we collect it into a list. When the unplug comes down,
* we sort the list by logical block number and merge
* everything we can into the same rbios
*/
struct btrfs_plug_cb {
struct blk_plug_cb cb;
struct btrfs_fs_info *info;
struct list_head rbio_list;
struct btrfs_work work;
};
/*
* rbios on the plug list are sorted for easier merging.
*/
static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
{
struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
plug_list);
struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
plug_list);
u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
if (a_sector < b_sector)
return -1;
if (a_sector > b_sector)
return 1;
return 0;
}
static void run_plug(struct btrfs_plug_cb *plug)
{
struct btrfs_raid_bio *cur;
struct btrfs_raid_bio *last = NULL;
/*
* sort our plug list then try to merge
* everything we can in hopes of creating full
* stripes.
*/
list_sort(NULL, &plug->rbio_list, plug_cmp);
while (!list_empty(&plug->rbio_list)) {
cur = list_entry(plug->rbio_list.next,
struct btrfs_raid_bio, plug_list);
list_del_init(&cur->plug_list);
if (rbio_is_full(cur)) {
int ret;
/* we have a full stripe, send it down */
ret = full_stripe_write(cur);
BUG_ON(ret);
continue;
}
if (last) {
if (rbio_can_merge(last, cur)) {
merge_rbio(last, cur);
__free_raid_bio(cur);
continue;
}
__raid56_parity_write(last);
}
last = cur;
}
if (last) {
__raid56_parity_write(last);
}
kfree(plug);
}
/*
* if the unplug comes from schedule, we have to push the
* work off to a helper thread
*/
static void unplug_work(struct btrfs_work *work)
{
struct btrfs_plug_cb *plug;
plug = container_of(work, struct btrfs_plug_cb, work);
run_plug(plug);
}
static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
{
struct btrfs_plug_cb *plug;
plug = container_of(cb, struct btrfs_plug_cb, cb);
if (from_schedule) {
btrfs_init_work(&plug->work, unplug_work, NULL, NULL);
btrfs_queue_work(plug->info->rmw_workers,
&plug->work);
return;
}
run_plug(plug);
}
/* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */
static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio)
{
const struct btrfs_fs_info *fs_info = rbio->fs_info;
const u64 orig_logical = orig_bio->bi_iter.bi_sector << SECTOR_SHIFT;
const u64 full_stripe_start = rbio->bbio->raid_map[0];
const u32 orig_len = orig_bio->bi_iter.bi_size;
const u32 sectorsize = fs_info->sectorsize;
u64 cur_logical;
ASSERT(orig_logical >= full_stripe_start &&
orig_logical + orig_len <= full_stripe_start +
rbio->nr_data * rbio->stripe_len);
bio_list_add(&rbio->bio_list, orig_bio);
rbio->bio_list_bytes += orig_bio->bi_iter.bi_size;
/* Update the dbitmap. */
for (cur_logical = orig_logical; cur_logical < orig_logical + orig_len;
cur_logical += sectorsize) {
int bit = ((u32)(cur_logical - full_stripe_start) >>
PAGE_SHIFT) % rbio->stripe_npages;
set_bit(bit, rbio->dbitmap);
}
}
/*
* our main entry point for writes from the rest of the FS.
*/
int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len)
{
struct btrfs_raid_bio *rbio;
struct btrfs_plug_cb *plug = NULL;
struct blk_plug_cb *cb;
int ret;
rbio = alloc_rbio(fs_info, bbio, stripe_len);
if (IS_ERR(rbio)) {
btrfs_put_bbio(bbio);
return PTR_ERR(rbio);
}
rbio->operation = BTRFS_RBIO_WRITE;
rbio_add_bio(rbio, bio);
btrfs_bio_counter_inc_noblocked(fs_info);
rbio->generic_bio_cnt = 1;
/*
* don't plug on full rbios, just get them out the door
* as quickly as we can
*/
if (rbio_is_full(rbio)) {
ret = full_stripe_write(rbio);
if (ret)
btrfs_bio_counter_dec(fs_info);
return ret;
}
cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
if (cb) {
plug = container_of(cb, struct btrfs_plug_cb, cb);
if (!plug->info) {
plug->info = fs_info;
INIT_LIST_HEAD(&plug->rbio_list);
}
list_add_tail(&rbio->plug_list, &plug->rbio_list);
ret = 0;
} else {
ret = __raid56_parity_write(rbio);
if (ret)
btrfs_bio_counter_dec(fs_info);
}
return ret;
}
/*
* all parity reconstruction happens here. We've read in everything
* we can find from the drives and this does the heavy lifting of
* sorting the good from the bad.
*/
static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
{
int pagenr, stripe;
void **pointers;
int faila = -1, failb = -1;
struct page *page;
blk_status_t err;
int i;
pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
if (!pointers) {
err = BLK_STS_RESOURCE;
goto cleanup_io;
}
faila = rbio->faila;
failb = rbio->failb;
if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
spin_lock_irq(&rbio->bio_list_lock);
set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
spin_unlock_irq(&rbio->bio_list_lock);
}
index_rbio_pages(rbio);
for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
/*
* Now we just use bitmap to mark the horizontal stripes in
* which we have data when doing parity scrub.
*/
if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
!test_bit(pagenr, rbio->dbitmap))
continue;
/* setup our array of pointers with pages
* from each stripe
*/
for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
/*
* if we're rebuilding a read, we have to use
* pages from the bio list
*/
if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
(stripe == faila || stripe == failb)) {
page = page_in_rbio(rbio, stripe, pagenr, 0);
} else {
page = rbio_stripe_page(rbio, stripe, pagenr);
}
pointers[stripe] = kmap(page);
}
/* all raid6 handling here */
if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
/*
* single failure, rebuild from parity raid5
* style
*/
if (failb < 0) {
if (faila == rbio->nr_data) {
/*
* Just the P stripe has failed, without
* a bad data or Q stripe.
* TODO, we should redo the xor here.
*/
err = BLK_STS_IOERR;
goto cleanup;
}
/*
* a single failure in raid6 is rebuilt
* in the pstripe code below
*/
goto pstripe;
}
/* make sure our ps and qs are in order */
if (faila > failb)
swap(faila, failb);
/* if the q stripe is failed, do a pstripe reconstruction
* from the xors.
* If both the q stripe and the P stripe are failed, we're
* here due to a crc mismatch and we can't give them the
* data they want
*/
if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
if (rbio->bbio->raid_map[faila] ==
RAID5_P_STRIPE) {
err = BLK_STS_IOERR;
goto cleanup;
}
/*
* otherwise we have one bad data stripe and
* a good P stripe. raid5!
*/
goto pstripe;
}
if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
raid6_datap_recov(rbio->real_stripes,
PAGE_SIZE, faila, pointers);
} else {
raid6_2data_recov(rbio->real_stripes,
PAGE_SIZE, faila, failb,
pointers);
}
} else {
void *p;
/* rebuild from P stripe here (raid5 or raid6) */
BUG_ON(failb != -1);
pstripe:
/* Copy parity block into failed block to start with */
copy_page(pointers[faila], pointers[rbio->nr_data]);
/* rearrange the pointer array */
p = pointers[faila];
for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
pointers[stripe] = pointers[stripe + 1];
pointers[rbio->nr_data - 1] = p;
/* xor in the rest */
run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
}
/* if we're doing this rebuild as part of an rmw, go through
* and set all of our private rbio pages in the
* failed stripes as uptodate. This way finish_rmw will
* know they can be trusted. If this was a read reconstruction,
* other endio functions will fiddle the uptodate bits
*/
if (rbio->operation == BTRFS_RBIO_WRITE) {
for (i = 0; i < rbio->stripe_npages; i++) {
if (faila != -1) {
page = rbio_stripe_page(rbio, faila, i);
SetPageUptodate(page);
}
if (failb != -1) {
page = rbio_stripe_page(rbio, failb, i);
SetPageUptodate(page);
}
}
}
for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
/*
* if we're rebuilding a read, we have to use
* pages from the bio list
*/
if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
(stripe == faila || stripe == failb)) {
page = page_in_rbio(rbio, stripe, pagenr, 0);
} else {
page = rbio_stripe_page(rbio, stripe, pagenr);
}
kunmap(page);
}
}
err = BLK_STS_OK;
cleanup:
kfree(pointers);
cleanup_io:
/*
* Similar to READ_REBUILD, REBUILD_MISSING at this point also has a
* valid rbio which is consistent with ondisk content, thus such a
* valid rbio can be cached to avoid further disk reads.
*/
if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
/*
* - In case of two failures, where rbio->failb != -1:
*
* Do not cache this rbio since the above read reconstruction
* (raid6_datap_recov() or raid6_2data_recov()) may have
* changed some content of stripes which are not identical to
* on-disk content any more, otherwise, a later write/recover
* may steal stripe_pages from this rbio and end up with
* corruptions or rebuild failures.
*
* - In case of single failure, where rbio->failb == -1:
*
* Cache this rbio iff the above read reconstruction is
* executed without problems.
*/
if (err == BLK_STS_OK && rbio->failb < 0)
cache_rbio_pages(rbio);
else
clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
rbio_orig_end_io(rbio, err);
} else if (err == BLK_STS_OK) {
rbio->faila = -1;
rbio->failb = -1;
if (rbio->operation == BTRFS_RBIO_WRITE)
finish_rmw(rbio);
else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
finish_parity_scrub(rbio, 0);
else
BUG();
} else {
rbio_orig_end_io(rbio, err);
}
}
/*
* This is called only for stripes we've read from disk to
* reconstruct the parity.
*/
static void raid_recover_end_io(struct bio *bio)
{
struct btrfs_raid_bio *rbio = bio->bi_private;
/*
* we only read stripe pages off the disk, set them
* up to date if there were no errors
*/
if (bio->bi_status)
fail_bio_stripe(rbio, bio);
else
set_bio_pages_uptodate(bio);
bio_put(bio);
if (!atomic_dec_and_test(&rbio->stripes_pending))
return;
if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
rbio_orig_end_io(rbio, BLK_STS_IOERR);
else
__raid_recover_end_io(rbio);
}
/*
* reads everything we need off the disk to reconstruct
* the parity. endio handlers trigger final reconstruction
* when the IO is done.
*
* This is used both for reads from the higher layers and for
* parity construction required to finish a rmw cycle.
*/
static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
{
int bios_to_read = 0;
struct bio_list bio_list;
int ret;
int pagenr;
int stripe;
struct bio *bio;
bio_list_init(&bio_list);
ret = alloc_rbio_pages(rbio);
if (ret)
goto cleanup;
atomic_set(&rbio->error, 0);
/*
* Read everything that hasn't failed. However this time we will
* not trust any cached sector.
* As we may read out some stale data but higher layer is not reading
* that stale part.
*
* So here we always re-read everything in recovery path.
*/
for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
if (rbio->faila == stripe || rbio->failb == stripe) {
atomic_inc(&rbio->error);
continue;
}
for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
ret = rbio_add_io_page(rbio, &bio_list,
rbio_stripe_page(rbio, stripe, pagenr),
stripe, pagenr, rbio->stripe_len);
if (ret < 0)
goto cleanup;
}
}
bios_to_read = bio_list_size(&bio_list);
if (!bios_to_read) {
/*
* we might have no bios to read just because the pages
* were up to date, or we might have no bios to read because
* the devices were gone.
*/
if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
__raid_recover_end_io(rbio);
return 0;
} else {
goto cleanup;
}
}
/*
* the bbio may be freed once we submit the last bio. Make sure
* not to touch it after that
*/
atomic_set(&rbio->stripes_pending, bios_to_read);
while ((bio = bio_list_pop(&bio_list))) {
bio->bi_private = rbio;
bio->bi_end_io = raid_recover_end_io;
bio->bi_opf = REQ_OP_READ;
btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
submit_bio(bio);
}
return 0;
cleanup:
if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
rbio_orig_end_io(rbio, BLK_STS_IOERR);
while ((bio = bio_list_pop(&bio_list)))
bio_put(bio);
return -EIO;
}
/*
* the main entry point for reads from the higher layers. This
* is really only called when the normal read path had a failure,
* so we assume the bio they send down corresponds to a failed part
* of the drive.
*/
int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len,
int mirror_num, int generic_io)
{
struct btrfs_raid_bio *rbio;
int ret;
if (generic_io) {
ASSERT(bbio->mirror_num == mirror_num);
btrfs_io_bio(bio)->mirror_num = mirror_num;
}
rbio = alloc_rbio(fs_info, bbio, stripe_len);
if (IS_ERR(rbio)) {
if (generic_io)
btrfs_put_bbio(bbio);
return PTR_ERR(rbio);
}
rbio->operation = BTRFS_RBIO_READ_REBUILD;
rbio_add_bio(rbio, bio);
rbio->faila = find_logical_bio_stripe(rbio, bio);
if (rbio->faila == -1) {
btrfs_warn(fs_info,
"%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
__func__, (u64)bio->bi_iter.bi_sector << 9,
(u64)bio->bi_iter.bi_size, bbio->map_type);
if (generic_io)
btrfs_put_bbio(bbio);
kfree(rbio);
return -EIO;
}
if (generic_io) {
btrfs_bio_counter_inc_noblocked(fs_info);
rbio->generic_bio_cnt = 1;
} else {
btrfs_get_bbio(bbio);
}
/*
* Loop retry:
* for 'mirror == 2', reconstruct from all other stripes.
* for 'mirror_num > 2', select a stripe to fail on every retry.
*/
if (mirror_num > 2) {
/*
* 'mirror == 3' is to fail the p stripe and
* reconstruct from the q stripe. 'mirror > 3' is to
* fail a data stripe and reconstruct from p+q stripe.
*/
rbio->failb = rbio->real_stripes - (mirror_num - 1);
ASSERT(rbio->failb > 0);
if (rbio->failb <= rbio->faila)
rbio->failb--;
}
ret = lock_stripe_add(rbio);
/*
* __raid56_parity_recover will end the bio with
* any errors it hits. We don't want to return
* its error value up the stack because our caller
* will end up calling bio_endio with any nonzero
* return
*/
if (ret == 0)
__raid56_parity_recover(rbio);
/*
* our rbio has been added to the list of
* rbios that will be handled after the
* currently lock owner is done
*/
return 0;
}
static void rmw_work(struct btrfs_work *work)
{
struct btrfs_raid_bio *rbio;
rbio = container_of(work, struct btrfs_raid_bio, work);
raid56_rmw_stripe(rbio);
}
static void read_rebuild_work(struct btrfs_work *work)
{
struct btrfs_raid_bio *rbio;
rbio = container_of(work, struct btrfs_raid_bio, work);
__raid56_parity_recover(rbio);
}
/*
* The following code is used to scrub/replace the parity stripe
*
* Caller must have already increased bio_counter for getting @bbio.
*
* Note: We need make sure all the pages that add into the scrub/replace
* raid bio are correct and not be changed during the scrub/replace. That
* is those pages just hold metadata or file data with checksum.
*/
struct btrfs_raid_bio *
raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len,
struct btrfs_device *scrub_dev,
unsigned long *dbitmap, int stripe_nsectors)
{
struct btrfs_raid_bio *rbio;
int i;
rbio = alloc_rbio(fs_info, bbio, stripe_len);
if (IS_ERR(rbio))
return NULL;
bio_list_add(&rbio->bio_list, bio);
/*
* This is a special bio which is used to hold the completion handler
* and make the scrub rbio is similar to the other types
*/
ASSERT(!bio->bi_iter.bi_size);
rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
/*
* After mapping bbio with BTRFS_MAP_WRITE, parities have been sorted
* to the end position, so this search can start from the first parity
* stripe.
*/
for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
if (bbio->stripes[i].dev == scrub_dev) {
rbio->scrubp = i;
break;
}
}
ASSERT(i < rbio->real_stripes);
/* Now we just support the sectorsize equals to page size */
ASSERT(fs_info->sectorsize == PAGE_SIZE);
ASSERT(rbio->stripe_npages == stripe_nsectors);
bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
/*
* We have already increased bio_counter when getting bbio, record it
* so we can free it at rbio_orig_end_io().
*/
rbio->generic_bio_cnt = 1;
return rbio;
}
/* Used for both parity scrub and missing. */
void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
u64 logical)
{
int stripe_offset;
int index;
ASSERT(logical >= rbio->bbio->raid_map[0]);
ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
rbio->stripe_len * rbio->nr_data);
stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
index = stripe_offset >> PAGE_SHIFT;
rbio->bio_pages[index] = page;
}
/*
* We just scrub the parity that we have correct data on the same horizontal,
* so we needn't allocate all pages for all the stripes.
*/
static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
{
int i;
int bit;
int index;
struct page *page;
for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
for (i = 0; i < rbio->real_stripes; i++) {
index = i * rbio->stripe_npages + bit;
if (rbio->stripe_pages[index])
continue;
page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
if (!page)
return -ENOMEM;
rbio->stripe_pages[index] = page;
}
}
return 0;
}
static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
int need_check)
{
struct btrfs_bio *bbio = rbio->bbio;
void **pointers = rbio->finish_pointers;
unsigned long *pbitmap = rbio->finish_pbitmap;
int nr_data = rbio->nr_data;
int stripe;
int pagenr;
bool has_qstripe;
struct page *p_page = NULL;
struct page *q_page = NULL;
struct bio_list bio_list;
struct bio *bio;
int is_replace = 0;
int ret;
bio_list_init(&bio_list);
if (rbio->real_stripes - rbio->nr_data == 1)
has_qstripe = false;
else if (rbio->real_stripes - rbio->nr_data == 2)
has_qstripe = true;
else
BUG();
if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
is_replace = 1;
bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
}
/*
* Because the higher layers(scrubber) are unlikely to
* use this area of the disk again soon, so don't cache
* it.
*/
clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
if (!need_check)
goto writeback;
p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
if (!p_page)
goto cleanup;
SetPageUptodate(p_page);
if (has_qstripe) {
/* RAID6, allocate and map temp space for the Q stripe */
q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
if (!q_page) {
__free_page(p_page);
goto cleanup;
}
SetPageUptodate(q_page);
pointers[rbio->real_stripes - 1] = kmap(q_page);
}
atomic_set(&rbio->error, 0);
/* Map the parity stripe just once */
pointers[nr_data] = kmap(p_page);
for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
struct page *p;
void *parity;
/* first collect one page from each data stripe */
for (stripe = 0; stripe < nr_data; stripe++) {
p = page_in_rbio(rbio, stripe, pagenr, 0);
pointers[stripe] = kmap(p);
}
if (has_qstripe) {
/* RAID6, call the library function to fill in our P/Q */
raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
pointers);
} else {
/* raid5 */
copy_page(pointers[nr_data], pointers[0]);
run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
}
/* Check scrubbing parity and repair it */
p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
parity = kmap(p);
if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
copy_page(parity, pointers[rbio->scrubp]);
else
/* Parity is right, needn't writeback */
bitmap_clear(rbio->dbitmap, pagenr, 1);
kunmap(p);
for (stripe = 0; stripe < nr_data; stripe++)
kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
}
kunmap(p_page);
__free_page(p_page);
if (q_page) {
kunmap(q_page);
__free_page(q_page);
}
writeback:
/*
* time to start writing. Make bios for everything from the
* higher layers (the bio_list in our rbio) and our p/q. Ignore
* everything else.
*/
for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
struct page *page;
page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
ret = rbio_add_io_page(rbio, &bio_list,
page, rbio->scrubp, pagenr, rbio->stripe_len);
if (ret)
goto cleanup;
}
if (!is_replace)
goto submit_write;
for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
struct page *page;
page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
ret = rbio_add_io_page(rbio, &bio_list, page,
bbio->tgtdev_map[rbio->scrubp],
pagenr, rbio->stripe_len);
if (ret)
goto cleanup;
}
submit_write:
nr_data = bio_list_size(&bio_list);
if (!nr_data) {
/* Every parity is right */
rbio_orig_end_io(rbio, BLK_STS_OK);
return;
}
atomic_set(&rbio->stripes_pending, nr_data);
while ((bio = bio_list_pop(&bio_list))) {
bio->bi_private = rbio;
bio->bi_end_io = raid_write_end_io;
bio->bi_opf = REQ_OP_WRITE;
submit_bio(bio);
}
return;
cleanup:
rbio_orig_end_io(rbio, BLK_STS_IOERR);
while ((bio = bio_list_pop(&bio_list)))
bio_put(bio);
}
static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
{
if (stripe >= 0 && stripe < rbio->nr_data)
return 1;
return 0;
}
/*
* While we're doing the parity check and repair, we could have errors
* in reading pages off the disk. This checks for errors and if we're
* not able to read the page it'll trigger parity reconstruction. The
* parity scrub will be finished after we've reconstructed the failed
* stripes
*/
static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
{
if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
goto cleanup;
if (rbio->faila >= 0 || rbio->failb >= 0) {
int dfail = 0, failp = -1;
if (is_data_stripe(rbio, rbio->faila))
dfail++;
else if (is_parity_stripe(rbio->faila))
failp = rbio->faila;
if (is_data_stripe(rbio, rbio->failb))
dfail++;
else if (is_parity_stripe(rbio->failb))
failp = rbio->failb;
/*
* Because we can not use a scrubbing parity to repair
* the data, so the capability of the repair is declined.
* (In the case of RAID5, we can not repair anything)
*/
if (dfail > rbio->bbio->max_errors - 1)
goto cleanup;
/*
* If all data is good, only parity is correctly, just
* repair the parity.
*/
if (dfail == 0) {
finish_parity_scrub(rbio, 0);
return;
}
/*
* Here means we got one corrupted data stripe and one
* corrupted parity on RAID6, if the corrupted parity
* is scrubbing parity, luckily, use the other one to repair
* the data, or we can not repair the data stripe.
*/
if (failp != rbio->scrubp)
goto cleanup;
__raid_recover_end_io(rbio);
} else {
finish_parity_scrub(rbio, 1);
}
return;
cleanup:
rbio_orig_end_io(rbio, BLK_STS_IOERR);
}
/*
* end io for the read phase of the rmw cycle. All the bios here are physical
* stripe bios we've read from the disk so we can recalculate the parity of the
* stripe.
*
* This will usually kick off finish_rmw once all the bios are read in, but it
* may trigger parity reconstruction if we had any errors along the way
*/
static void raid56_parity_scrub_end_io(struct bio *bio)
{
struct btrfs_raid_bio *rbio = bio->bi_private;
if (bio->bi_status)
fail_bio_stripe(rbio, bio);
else
set_bio_pages_uptodate(bio);
bio_put(bio);
if (!atomic_dec_and_test(&rbio->stripes_pending))
return;
/*
* this will normally call finish_rmw to start our write
* but if there are any failed stripes we'll reconstruct
* from parity first
*/
validate_rbio_for_parity_scrub(rbio);
}
static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
{
int bios_to_read = 0;
struct bio_list bio_list;
int ret;
int pagenr;
int stripe;
struct bio *bio;
bio_list_init(&bio_list);
ret = alloc_rbio_essential_pages(rbio);
if (ret)
goto cleanup;
atomic_set(&rbio->error, 0);
/*
* build a list of bios to read all the missing parts of this
* stripe
*/
for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
struct page *page;
/*
* we want to find all the pages missing from
* the rbio and read them from the disk. If
* page_in_rbio finds a page in the bio list
* we don't need to read it off the stripe.
*/
page = page_in_rbio(rbio, stripe, pagenr, 1);
if (page)
continue;
page = rbio_stripe_page(rbio, stripe, pagenr);
/*
* the bio cache may have handed us an uptodate
* page. If so, be happy and use it
*/
if (PageUptodate(page))
continue;
ret = rbio_add_io_page(rbio, &bio_list, page,
stripe, pagenr, rbio->stripe_len);
if (ret)
goto cleanup;
}
}
bios_to_read = bio_list_size(&bio_list);
if (!bios_to_read) {
/*
* this can happen if others have merged with
* us, it means there is nothing left to read.
* But if there are missing devices it may not be
* safe to do the full stripe write yet.
*/
goto finish;
}
/*
* the bbio may be freed once we submit the last bio. Make sure
* not to touch it after that
*/
atomic_set(&rbio->stripes_pending, bios_to_read);
while ((bio = bio_list_pop(&bio_list))) {
bio->bi_private = rbio;
bio->bi_end_io = raid56_parity_scrub_end_io;
bio->bi_opf = REQ_OP_READ;
btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
submit_bio(bio);
}
/* the actual write will happen once the reads are done */
return;
cleanup:
rbio_orig_end_io(rbio, BLK_STS_IOERR);
while ((bio = bio_list_pop(&bio_list)))
bio_put(bio);
return;
finish:
validate_rbio_for_parity_scrub(rbio);
}
static void scrub_parity_work(struct btrfs_work *work)
{
struct btrfs_raid_bio *rbio;
rbio = container_of(work, struct btrfs_raid_bio, work);
raid56_parity_scrub_stripe(rbio);
}
void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
{
if (!lock_stripe_add(rbio))
start_async_work(rbio, scrub_parity_work);
}
/* The following code is used for dev replace of a missing RAID 5/6 device. */
struct btrfs_raid_bio *
raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 length)
{
struct btrfs_raid_bio *rbio;
rbio = alloc_rbio(fs_info, bbio, length);
if (IS_ERR(rbio))
return NULL;
rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
bio_list_add(&rbio->bio_list, bio);
/*
* This is a special bio which is used to hold the completion handler
* and make the scrub rbio is similar to the other types
*/
ASSERT(!bio->bi_iter.bi_size);
rbio->faila = find_logical_bio_stripe(rbio, bio);
if (rbio->faila == -1) {
BUG();
kfree(rbio);
return NULL;
}
/*
* When we get bbio, we have already increased bio_counter, record it
* so we can free it at rbio_orig_end_io()
*/
rbio->generic_bio_cnt = 1;
return rbio;
}
void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
{
if (!lock_stripe_add(rbio))
start_async_work(rbio, read_rebuild_work);
}