This is the 6.1.13 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmP2A8MACgkQONu9yGCS
 aT7GrhAAky2nTRG9J0oPxh5Eu7wNKmjqDWNj9c6it3iGHpb+tfOY+LfPXHmWz0kX
 NoaNYGZGD8SDbkmwrSOmFB1Q/0OZ4/aIwM7Kwcw72UJVvrlsKx1HwiJjXKk809ZL
 bVlLUQzFTwyVIYcvjXQ8CuBHwBinLc3qkcyYGgbS8bseR4pDuxwoToDwAxk1d/0j
 ozWuzUKhSdYHYIUrk3papUro2UpF+Kb7KFpNiVo2wMaZM7en2XK3khCt8TuojH6c
 DXL+KZ/HbB8Ig1PWLaw2/6o4ispNy6bz7CJx6oDiOILR+le8xZA5WTdkXT3ovjyr
 LxutmPTTw6PxextIyVRblJWzXNcjdlV552U4gnnngcWn6wg4D4otqYnYvTaAUc+u
 sQnwrlQFxB2KfFKLNepGAy7klQJsYP3eadjDgGXP9TSmuUvUYRaNr6h0XukbyYkc
 kx2+Tw51NMKEqhgnaiKDN8AZEDTuLu5F4+NrUertxlb3PWeRRMRYVGJ1uw0KJg6t
 d5eniCB00SaaqN6M68u/hRYRi3gnwIsU7DitEpqejqwzskMpgegMFvebmCwORiq3
 D+FD4EHOlztIToXhmEOXp0cz8fs27MuWmq4GkSwXvJuq+id5cQFdDN5GeLgNdAvH
 Kiu/Y+DY6ObW31tAQ1Jjp20L2RaWWvubrCBGeIqiDzUWmCohsks=
 =TXvc
 -----END PGP SIGNATURE-----

Merge 6.1.13 into android14-6.1

Changes in 6.1.13
	mptcp: sockopt: make 'tcp_fastopen_connect' generic
	mptcp: fix locking for setsockopt corner-case
	mptcp: deduplicate error paths on endpoint creation
	mptcp: fix locking for in-kernel listener creation
	btrfs: move the auto defrag code to defrag.c
	btrfs: lock the inode in shared mode before starting fiemap
	ASoC: amd: yc: Add DMI support for new acer/emdoor platforms
	ASoC: SOF: sof-audio: start with the right widget type
	ALSA: usb-audio: Add FIXED_RATE quirk for JBL Quantum610 Wireless
	ASoC: Intel: sof_rt5682: always set dpcm_capture for amplifiers
	ASoC: Intel: sof_cs42l42: always set dpcm_capture for amplifiers
	ASoC: Intel: sof_nau8825: always set dpcm_capture for amplifiers
	ASoC: Intel: sof_ssp_amp: always set dpcm_capture for amplifiers
	selftests/bpf: Verify copy_register_state() preserves parent/live fields
	ALSA: hda: Do not unset preset when cleaning up codec
	ASoC: amd: yc: Add Xiaomi Redmi Book Pro 15 2022 into DMI table
	bpf, sockmap: Don't let sock_map_{close,destroy,unhash} call itself
	ASoC: cs42l56: fix DT probe
	tools/virtio: fix the vringh test for virtio ring changes
	vdpa: ifcvf: Do proper cleanup if IFCVF init fails
	net/rose: Fix to not accept on connected socket
	selftest: net: Improve IPV6_TCLASS/IPV6_HOPLIMIT tests apparmor compatibility
	net: stmmac: do not stop RX_CLK in Rx LPI state for qcs404 SoC
	powerpc/64: Fix perf profiling asynchronous interrupt handlers
	fscache: Use clear_and_wake_up_bit() in fscache_create_volume_work()
	drm/nouveau/devinit/tu102-: wait for GFW_BOOT_PROGRESS == COMPLETED
	net: ethernet: mtk_eth_soc: Avoid truncating allocation
	net: sched: sch: Bounds check priority
	s390/decompressor: specify __decompress() buf len to avoid overflow
	nvme-fc: fix a missing queue put in nvmet_fc_ls_create_association
	nvme: clear the request_queue pointers on failure in nvme_alloc_admin_tag_set
	nvme: clear the request_queue pointers on failure in nvme_alloc_io_tag_set
	drm/amd/display: Add missing brackets in calculation
	drm/amd/display: Adjust downscaling limits for dcn314
	drm/amd/display: Unassign does_plane_fit_in_mall function from dcn3.2
	drm/amd/display: Reset DMUB mailbox SW state after HW reset
	drm/amdgpu: enable HDP SD for gfx 11.0.3
	drm/amdgpu: Enable vclk dclk node for gc11.0.3
	drm/amd/display: Properly handle additional cases where DCN is not supported
	platform/x86: touchscreen_dmi: Add Chuwi Vi8 (CWI501) DMI match
	ceph: move mount state enum to super.h
	ceph: blocklist the kclient when receiving corrupted snap trace
	selftests: mptcp: userspace: fix v4-v6 test in v6.1
	of: reserved_mem: Have kmemleak ignore dynamically allocated reserved mem
	kasan: fix Oops due to missing calls to kasan_arch_is_ready()
	mm: shrinkers: fix deadlock in shrinker debugfs
	aio: fix mremap after fork null-deref
	vmxnet3: move rss code block under eop descriptor
	fbdev: Fix invalid page access after closing deferred I/O devices
	drm: Disable dynamic debug as broken
	drm/amd/amdgpu: fix warning during suspend
	drm/amd/display: Fail atomic_check early on normalize_zpos error
	drm/vmwgfx: Stop accessing buffer objects which failed init
	drm/vmwgfx: Do not drop the reference to the handle too soon
	mmc: jz4740: Work around bug on JZ4760(B)
	mmc: meson-gx: fix SDIO mode if cap_sdio_irq isn't set
	mmc: sdio: fix possible resource leaks in some error paths
	mmc: mmc_spi: fix error handling in mmc_spi_probe()
	ALSA: hda: Fix codec device field initializan
	ALSA: hda/conexant: add a new hda codec SN6180
	ALSA: hda/realtek - fixed wrong gpio assigned
	ALSA: hda/realtek: fix mute/micmute LEDs don't work for a HP platform.
	ALSA: hda/realtek: Enable mute/micmute LEDs and speaker support for HP Laptops
	ata: ahci: Add Tiger Lake UP{3,4} AHCI controller
	ata: libata-core: Disable READ LOG DMA EXT for Samsung MZ7LH
	sched/psi: Fix use-after-free in ep_remove_wait_queue()
	hugetlb: check for undefined shift on 32 bit architectures
	nilfs2: fix underflow in second superblock position calculations
	mm/MADV_COLLAPSE: set EAGAIN on unexpected page refcount
	mm/filemap: fix page end in filemap_get_read_batch
	mm/migrate: fix wrongly apply write bit after mkdirty on sparc64
	gpio: sim: fix a memory leak
	freezer,umh: Fix call_usermode_helper_exec() vs SIGKILL
	coredump: Move dump_emit_page() to kill unused warning
	Revert "mm: Always release pages to the buddy allocator in memblock_free_late()."
	net: Fix unwanted sign extension in netdev_stats_to_stats64()
	revert "squashfs: harden sanity check in squashfs_read_xattr_id_table"
	drm/vc4: crtc: Increase setup cost in core clock calculation to handle extreme reduced blanking
	drm/vc4: Fix YUV plane handling when planes are in different buffers
	drm/i915/gen11: Wa_1408615072/Wa_1407596294 should be on GT list
	ice: fix lost multicast packets in promisc mode
	ixgbe: allow to increase MTU to 3K with XDP enabled
	i40e: add double of VLAN header when computing the max MTU
	net: bgmac: fix BCM5358 support by setting correct flags
	net: ethernet: ti: am65-cpsw: Add RX DMA Channel Teardown Quirk
	sctp: sctp_sock_filter(): avoid list_entry() on possibly empty list
	net/sched: tcindex: update imperfect hash filters respecting rcu
	ice: xsk: Fix cleaning of XDP_TX frames
	dccp/tcp: Avoid negative sk_forward_alloc by ipv6_pinfo.pktoptions.
	net/usb: kalmia: Don't pass act_len in usb_bulk_msg error path
	net/sched: act_ctinfo: use percpu stats
	net: openvswitch: fix possible memory leak in ovs_meter_cmd_set()
	net: stmmac: fix order of dwmac5 FlexPPS parametrization sequence
	bnxt_en: Fix mqprio and XDP ring checking logic
	tracing: Make trace_define_field_ext() static
	net: stmmac: Restrict warning on disabling DMA store and fwd mode
	net: use a bounce buffer for copying skb->mark
	tipc: fix kernel warning when sending SYN message
	net: mpls: fix stale pointer if allocation fails during device rename
	igb: conditionalize I2C bit banging on external thermal sensor support
	igb: Fix PPS input and output using 3rd and 4th SDP
	ixgbe: add double of VLAN header when computing the max MTU
	ipv6: Fix datagram socket connection with DSCP.
	ipv6: Fix tcp socket connection with DSCP.
	mm/gup: add folio to list when folio_isolate_lru() succeed
	mm: extend max struct page size for kmsan
	i40e: Add checking for null for nlmsg_find_attr()
	net/sched: tcindex: search key must be 16 bits
	nvme-tcp: stop auth work after tearing down queues in error recovery
	nvme-rdma: stop auth work after tearing down queues in error recovery
	KVM: x86/pmu: Disable vPMU support on hybrid CPUs (host PMUs)
	kvm: initialize all of the kvm_debugregs structure before sending it to userspace
	perf/x86: Refuse to export capabilities for hybrid PMUs
	alarmtimer: Prevent starvation by small intervals and SIG_IGN
	nvme-pci: refresh visible attrs for cmb attributes
	ASoC: SOF: Intel: hda-dai: fix possible stream_tag leak
	net: sched: sch: Fix off by one in htb_activate_prios()
	Linux 6.1.13

Change-Id: I8a1e4175939c14f726c545001061b95462566386
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-02-22 12:32:41 +00:00
commit dafc2fae4d
146 changed files with 1307 additions and 724 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 12
SUBLEVEL = 13
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth

View File

@ -173,6 +173,15 @@ static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
return flags;
}
static inline notrace unsigned long irq_soft_mask_andc_return(unsigned long mask)
{
unsigned long flags = irq_soft_mask_return();
irq_soft_mask_set(flags & ~mask);
return flags;
}
static inline unsigned long arch_local_save_flags(void)
{
return irq_soft_mask_return();
@ -331,10 +340,11 @@ bool power_pmu_wants_prompt_pmi(void);
* is a different soft-masked interrupt pending that requires hard
* masking.
*/
static inline bool should_hard_irq_enable(void)
static inline bool should_hard_irq_enable(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
WARN_ON(!(get_paca()->irq_happened & PACA_IRQ_HARD_DIS));
WARN_ON(mfmsr() & MSR_EE);
}
@ -347,8 +357,17 @@ static inline bool should_hard_irq_enable(void)
*
* TODO: Add test for 64e
*/
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi())
return false;
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
if (!power_pmu_wants_prompt_pmi())
return false;
/*
* If PMIs are disabled then IRQs should be disabled as well,
* so we shouldn't see this condition, check for it just in
* case because we are about to enable PMIs.
*/
if (WARN_ON_ONCE(regs->softe & IRQS_PMI_DISABLED))
return false;
}
if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)
return false;
@ -358,18 +377,16 @@ static inline bool should_hard_irq_enable(void)
/*
* Do the hard enabling, only call this if should_hard_irq_enable is true.
* This allows PMI interrupts to profile irq handlers.
*/
static inline void do_hard_irq_enable(void)
{
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK);
WARN_ON(mfmsr() & MSR_EE);
}
/*
* This allows PMI interrupts (and watchdog soft-NMIs) through.
* There is no other reason to enable this way.
* Asynch interrupts come in with IRQS_ALL_DISABLED,
* PACA_IRQ_HARD_DIS, and MSR[EE]=0.
*/
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
irq_soft_mask_andc_return(IRQS_PMI_DISABLED);
get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
__hard_irq_enable();
}
@ -452,7 +469,7 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
return !(regs->msr & MSR_EE);
}
static __always_inline bool should_hard_irq_enable(void)
static __always_inline bool should_hard_irq_enable(struct pt_regs *regs)
{
return false;
}

View File

@ -27,7 +27,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception)
ppc_msgsync();
if (should_hard_irq_enable())
if (should_hard_irq_enable(regs))
do_hard_irq_enable();
kvmppc_clear_host_ipi(smp_processor_id());

View File

@ -238,7 +238,7 @@ static void __do_irq(struct pt_regs *regs, unsigned long oldsp)
irq = static_call(ppc_get_irq)();
/* We can hard enable interrupts now to allow perf interrupts */
if (should_hard_irq_enable())
if (should_hard_irq_enable(regs))
do_hard_irq_enable();
/* And finally process it */

View File

@ -533,7 +533,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
}
/* Conditionally hard-enable interrupts. */
if (should_hard_irq_enable()) {
if (should_hard_irq_enable(regs)) {
/*
* Ensure a positive value is written to the decrementer, or
* else some CPUs will continue to take decrementer exceptions.

View File

@ -80,6 +80,6 @@ void *decompress_kernel(void)
void *output = (void *)decompress_offset;
__decompress(_compressed_start, _compressed_end - _compressed_start,
NULL, NULL, output, 0, NULL, error);
NULL, NULL, output, vmlinux.image_size, NULL, error);
return output;
}

View File

@ -2994,17 +2994,19 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
{
if (!x86_pmu_initialized()) {
/* This API doesn't currently support enumerating hybrid PMUs. */
if (WARN_ON_ONCE(cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) ||
!x86_pmu_initialized()) {
memset(cap, 0, sizeof(*cap));
return;
}
cap->version = x86_pmu.version;
/*
* KVM doesn't support the hybrid PMU yet.
* Return the common value in global x86_pmu,
* which available for all cores.
* Note, hybrid CPU models get tracked as having hybrid PMUs even when
* all E-cores are disabled via BIOS. When E-cores are disabled, the
* base PMU holds the correct number of counters for P-cores.
*/
cap->version = x86_pmu.version;
cap->num_counters_gp = x86_pmu.num_counters;
cap->num_counters_fixed = x86_pmu.num_counters_fixed;
cap->bit_width_gp = x86_pmu.cntval_bits;

View File

@ -164,15 +164,27 @@ static inline void kvm_init_pmu_capability(void)
{
bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
perf_get_x86_pmu_capability(&kvm_pmu_cap);
/*
* For Intel, only support guest architectural pmu
* on a host with architectural pmu.
*/
if ((is_intel && !kvm_pmu_cap.version) || !kvm_pmu_cap.num_counters_gp)
/*
* Hybrid PMUs don't play nice with virtualization without careful
* configuration by userspace, and KVM's APIs for reporting supported
* vPMU features do not account for hybrid PMUs. Disable vPMU support
* for hybrid PMUs until KVM gains a way to let userspace opt-in.
*/
if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
enable_pmu = false;
if (enable_pmu) {
perf_get_x86_pmu_capability(&kvm_pmu_cap);
/*
* For Intel, only support guest architectural pmu
* on a host with architectural pmu.
*/
if ((is_intel && !kvm_pmu_cap.version) ||
!kvm_pmu_cap.num_counters_gp)
enable_pmu = false;
}
if (!enable_pmu) {
memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap));
return;

View File

@ -5250,12 +5250,11 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
{
unsigned long val;
memset(dbgregs, 0, sizeof(*dbgregs));
memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
kvm_get_dr(vcpu, 6, &val);
dbgregs->dr6 = val;
dbgregs->dr7 = vcpu->arch.dr7;
dbgregs->flags = 0;
memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
}
static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,

View File

@ -422,6 +422,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x34d3), board_ahci_low_power }, /* Ice Lake LP AHCI */
{ PCI_VDEVICE(INTEL, 0x02d3), board_ahci_low_power }, /* Comet Lake PCH-U AHCI */
{ PCI_VDEVICE(INTEL, 0x02d7), board_ahci_low_power }, /* Comet Lake PCH RAID */
{ PCI_VDEVICE(INTEL, 0xa0d3), board_ahci_low_power }, /* Tiger Lake UP{3,4} AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,

View File

@ -4044,6 +4044,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "Samsung SSD 870*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM |
ATA_HORKAGE_NO_NCQ_ON_ATI },
{ "SAMSUNG*MZ7LH*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM |
ATA_HORKAGE_NO_NCQ_ON_ATI, },
{ "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM },

View File

@ -732,7 +732,7 @@ static void gpio_sim_remove_hogs(struct gpio_sim_device *dev)
gpiod_remove_hogs(dev->hogs);
for (hog = dev->hogs; !hog->chip_label; hog++) {
for (hog = dev->hogs; hog->chip_label; hog++) {
kfree(hog->chip_label);
kfree(hog->line_name);
}

View File

@ -53,7 +53,8 @@ config DRM_DEBUG_MM
config DRM_USE_DYNAMIC_DEBUG
bool "use dynamic debug to implement drm.debug"
default y
default n
depends on BROKEN
depends on DRM
depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
depends on JUMP_LABEL

View File

@ -4248,6 +4248,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
#endif
adev->in_suspend = false;
if (adev->enable_mes)
amdgpu_mes_self_test(adev);
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
DRM_WARN("smart shift update failed\n");

View File

@ -1339,7 +1339,7 @@ static int mes_v11_0_late_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* it's only intended for use in mes_self_test case, not for s0ix and reset */
if (!amdgpu_in_reset(adev) && !adev->in_s0ix &&
if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend &&
(adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)))
amdgpu_mes_self_test(adev);

View File

@ -643,7 +643,8 @@ static int soc21_common_early_init(void *handle)
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_REPEATER_FGCG |
AMD_CG_SUPPORT_GFX_MGCG;
AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_HDP_SD;
adev->pg_flags = AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;

View File

@ -4526,6 +4526,17 @@ DEVICE_ATTR_WO(s3_debug);
static int dm_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_mode_info *mode_info = &adev->mode_info;
struct atom_context *ctx = mode_info->atom_context;
int index = GetIndexIntoMasterTable(DATA, Object_Header);
u16 data_offset;
/* if there is no object header, skip DM */
if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
dev_info(adev->dev, "No object header, skipping DM\n");
return -ENOENT;
}
switch (adev->asic_type) {
#if defined(CONFIG_DRM_AMD_DC_SI)
@ -9545,7 +9556,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
* `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
* atomic state, so call drm helper to normalize zpos.
*/
drm_atomic_normalize_zpos(dev, state);
ret = drm_atomic_normalize_zpos(dev, state);
if (ret) {
drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n");
goto fail;
}
/* Remove exiting planes if they are modified */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {

View File

@ -871,8 +871,9 @@ static const struct dc_plane_cap plane_cap = {
},
// 6:1 downscaling ratio: 1000/6 = 166.666
// 4:1 downscaling ratio for ARGB888 to prevent underflow during P010 playback: 1000/4 = 250
.max_downscale_factor = {
.argb8888 = 167,
.argb8888 = 250,
.nv12 = 167,
.fp16 = 167
},
@ -1755,7 +1756,7 @@ static bool dcn314_resource_construct(
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 600;
dc->caps.max_downscale_ratio = 400;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.i2c_speed_in_khz_hdcp = 100;
dc->caps.max_cursor_size = 256;

View File

@ -94,7 +94,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
.apply_idle_power_optimizations = dcn32_apply_idle_power_optimizations,
.does_plane_fit_in_mall = dcn30_does_plane_fit_in_mall,
.does_plane_fit_in_mall = NULL,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.hardware_release = dcn30_hardware_release,

View File

@ -3184,7 +3184,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
} else {
v->MIN_DST_Y_NEXT_START[k] = v->VTotal[k] - v->VFrontPorch[k] + v->VTotal[k] - v->VActive[k] - v->VStartup[k];
}
v->MIN_DST_Y_NEXT_START[k] += dml_floor(4.0 * v->TSetup[k] / (double)v->HTotal[k] / v->PixelClock[k], 1.0) / 4.0;
v->MIN_DST_Y_NEXT_START[k] += dml_floor(4.0 * v->TSetup[k] / ((double)v->HTotal[k] / v->PixelClock[k]), 1.0) / 4.0;
if (((v->VUpdateOffsetPix[k] + v->VUpdateWidthPix[k] + v->VReadyOffsetPix[k]) / v->HTotal[k])
<= (isInterlaceTiming ?
dml_floor((v->VTotal[k] - v->VActive[k] - v->VFrontPorch[k] - v->VStartup[k]) / 2.0, 1.0) :

View File

@ -532,6 +532,9 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
if (dmub->hw_funcs.reset)
dmub->hw_funcs.reset(dmub);
/* reset the cache of the last wptr as well now that hw is reset */
dmub->inbox1_last_wptr = 0;
cw0.offset.quad_part = inst_fb->gpu_addr;
cw0.region.base = DMUB_CW0_BASE;
cw0.region.top = cw0.region.base + inst_fb->size - 1;
@ -649,6 +652,15 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
if (dmub->hw_funcs.reset)
dmub->hw_funcs.reset(dmub);
/* mailboxes have been reset in hw, so reset the sw state as well */
dmub->inbox1_last_wptr = 0;
dmub->inbox1_rb.wrpt = 0;
dmub->inbox1_rb.rptr = 0;
dmub->outbox0_rb.wrpt = 0;
dmub->outbox0_rb.rptr = 0;
dmub->outbox1_rb.wrpt = 0;
dmub->outbox1_rb.rptr = 0;
dmub->hw_init = false;
return DMUB_STATUS_OK;

View File

@ -2009,14 +2009,16 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
gc_ver == IP_VERSION(10, 3, 0) ||
gc_ver == IP_VERSION(10, 1, 2) ||
gc_ver == IP_VERSION(11, 0, 0) ||
gc_ver == IP_VERSION(11, 0, 2)))
gc_ver == IP_VERSION(11, 0, 2) ||
gc_ver == IP_VERSION(11, 0, 3)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
if (!(gc_ver == IP_VERSION(10, 3, 1) ||
gc_ver == IP_VERSION(10, 3, 0) ||
gc_ver == IP_VERSION(10, 1, 2) ||
gc_ver == IP_VERSION(11, 0, 0) ||
gc_ver == IP_VERSION(11, 0, 2)))
gc_ver == IP_VERSION(11, 0, 2) ||
gc_ver == IP_VERSION(11, 0, 3)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)

View File

@ -1249,6 +1249,13 @@ icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_L3_COH_PIPE);
/*
* Wa_1408615072:icl,ehl (vsunit)
* Wa_1407596294:icl,ehl (hsunit)
*/
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
/* Wa_1407352427:icl,ehl */
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
PSDUNIT_CLKGATE_DIS);
@ -2368,13 +2375,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
GEN11_ENABLE_32_PLANE_MODE);
/*
* Wa_1408615072:icl,ehl (vsunit)
* Wa_1407596294:icl,ehl (hsunit)
*/
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
/*
* Wa_1408767742:icl[a2..forever],ehl[all]
* Wa_1605460711:icl[a0..c0]

View File

@ -65,10 +65,33 @@ tu102_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
return ret;
}
static int
tu102_devinit_wait(struct nvkm_device *device)
{
unsigned timeout = 50 + 2000;
do {
if (nvkm_rd32(device, 0x118128) & 0x00000001) {
if ((nvkm_rd32(device, 0x118234) & 0x000000ff) == 0xff)
return 0;
}
usleep_range(1000, 2000);
} while (timeout--);
return -ETIMEDOUT;
}
int
tu102_devinit_post(struct nvkm_devinit *base, bool post)
{
struct nv50_devinit *init = nv50_devinit(base);
int ret;
ret = tu102_devinit_wait(init->base.subdev.device);
if (ret)
return ret;
gm200_devinit_preos(init, post);
return 0;
}

View File

@ -711,7 +711,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
if (vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0) {
vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 1000,
vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 8000,
mode->clock * 9 / 10) * 1000;
} else {
vc4_state->hvs_load = mode->clock * 1000;

View File

@ -340,7 +340,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
struct drm_framebuffer *fb = state->fb;
struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, 0);
struct drm_gem_dma_object *bo;
int num_planes = fb->format->num_planes;
struct drm_crtc_state *crtc_state;
u32 h_subsample = fb->format->hsub;
@ -359,8 +359,10 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
if (ret)
return ret;
for (i = 0; i < num_planes; i++)
for (i = 0; i < num_planes; i++) {
bo = drm_fb_dma_get_gem_obj(fb, i);
vc4_state->offsets[i] = bo->dma_addr + fb->offsets[i];
}
/*
* We don't support subpixel source positioning for scaling,

View File

@ -462,6 +462,9 @@ int vmw_bo_create(struct vmw_private *vmw,
return -ENOMEM;
}
/*
* vmw_bo_init will delete the *p_bo object if it fails
*/
ret = vmw_bo_init(vmw, *p_bo, size,
placement, interruptible, pin,
bo_free);
@ -470,7 +473,6 @@ int vmw_bo_create(struct vmw_private *vmw,
return ret;
out_error:
kfree(*p_bo);
*p_bo = NULL;
return ret;
}
@ -596,6 +598,7 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp,
ttm_bo_put(&vmw_bo->base);
}
drm_gem_object_put(&vmw_bo->base.base);
return ret;
}
@ -636,6 +639,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
vmw_bo_unreference(&vbo);
drm_gem_object_put(&vbo->base.base);
if (unlikely(ret != 0)) {
if (ret == -ERESTARTSYS || ret == -EBUSY)
return -EBUSY;
@ -693,7 +697,7 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
* struct vmw_buffer_object should be placed.
* Return: Zero on success, Negative error code on error.
*
* The vmw buffer object pointer will be refcounted.
* The vmw buffer object pointer will be refcounted (both ttm and gem)
*/
int vmw_user_bo_lookup(struct drm_file *filp,
uint32_t handle,
@ -710,7 +714,6 @@ int vmw_user_bo_lookup(struct drm_file *filp,
*out = gem_to_vmw_bo(gobj);
ttm_bo_get(&(*out)->base);
drm_gem_object_put(gobj);
return 0;
}
@ -777,7 +780,8 @@ int vmw_dumb_create(struct drm_file *file_priv,
ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
args->size, &args->handle,
&vbo);
/* drop reference from allocate - handle holds it now */
drm_gem_object_put(&vbo->base.base);
return ret;
}

View File

@ -1160,6 +1160,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
}
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
ttm_bo_put(&vmw_bo->base);
drm_gem_object_put(&vmw_bo->base.base);
if (unlikely(ret != 0))
return ret;
@ -1214,6 +1215,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
}
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
ttm_bo_put(&vmw_bo->base);
drm_gem_object_put(&vmw_bo->base.base);
if (unlikely(ret != 0))
return ret;

View File

@ -146,14 +146,12 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
&vmw_sys_placement :
&vmw_vram_sys_placement,
true, false, &vmw_gem_destroy, p_vbo);
(*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
if (ret != 0)
goto out_no_bo;
(*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_put(&(*p_vbo)->base.base);
out_no_bo:
return ret;
}
@ -180,6 +178,8 @@ int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
rep->cur_gmr_id = handle;
rep->cur_gmr_offset = 0;
/* drop reference from allocate - handle holds it now */
drm_gem_object_put(&vbo->base.base);
out_no_bo:
return ret;
}

View File

@ -1669,8 +1669,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
err_out:
/* vmw_user_lookup_handle takes one ref so does new_fb */
if (bo)
if (bo) {
vmw_bo_unreference(&bo);
drm_gem_object_put(&bo->base.base);
}
if (surface)
vmw_surface_unreference(&surface);

View File

@ -458,6 +458,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
vmw_bo_unreference(&buf);
drm_gem_object_put(&buf->base.base);
out_unlock:
mutex_unlock(&overlay->mutex);

View File

@ -807,6 +807,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
num_output_sig, tfile, shader_handle);
out_bad_arg:
vmw_bo_unreference(&buffer);
drm_gem_object_put(&buffer->base.base);
return ret;
}

View File

@ -683,7 +683,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
container_of(base, struct vmw_user_surface, prime.base);
struct vmw_resource *res = &user_srf->srf.res;
if (base->shareable && res && res->backup)
if (res && res->backup)
drm_gem_object_put(&res->backup->base.base);
*p_base = NULL;
@ -860,7 +860,11 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
vmw_bo_reference(res->backup);
drm_gem_object_get(&res->backup->base.base);
/*
* We don't expose the handle to the userspace and surface
* already holds a gem reference
*/
drm_gem_handle_delete(file_priv, backup_handle);
}
tmp = vmw_resource_reference(&srf->res);
@ -1564,8 +1568,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
rep->buffer_size = res->backup->base.base.size;
rep->buffer_handle = backup_handle;
if (user_srf->prime.base.shareable)
drm_gem_object_get(&res->backup->base.base);
} else {
rep->buffer_map_handle = 0;
rep->buffer_size = 0;

View File

@ -294,6 +294,12 @@ static void sdio_release_func(struct device *dev)
if (!(func->card->quirks & MMC_QUIRK_NONSTD_SDIO))
sdio_free_func_cis(func);
/*
* We have now removed the link to the tuples in the
* card structure, so remove the reference.
*/
put_device(&func->card->dev);
kfree(func->info);
kfree(func->tmpbuf);
kfree(func);
@ -324,6 +330,12 @@ struct sdio_func *sdio_alloc_func(struct mmc_card *card)
device_initialize(&func->dev);
/*
* We may link to tuples in the card structure,
* we need make sure we have a reference to it.
*/
get_device(&func->card->dev);
func->dev.parent = &card->dev;
func->dev.bus = &sdio_bus_type;
func->dev.release = sdio_release_func;
@ -377,10 +389,9 @@ int sdio_add_func(struct sdio_func *func)
*/
void sdio_remove_func(struct sdio_func *func)
{
if (!sdio_func_present(func))
return;
if (sdio_func_present(func))
device_del(&func->dev);
device_del(&func->dev);
of_node_put(func->dev.of_node);
put_device(&func->dev);
}

View File

@ -403,12 +403,6 @@ int sdio_read_func_cis(struct sdio_func *func)
if (ret)
return ret;
/*
* Since we've linked to tuples in the card structure,
* we must make sure we have a reference to it.
*/
get_device(&func->card->dev);
/*
* Vendor/device id is optional for function CIS, so
* copy it from the card structure as needed.
@ -434,11 +428,5 @@ void sdio_free_func_cis(struct sdio_func *func)
}
func->tuples = NULL;
/*
* We have now removed the link to the tuples in the
* card structure, so remove the reference.
*/
put_device(&func->card->dev);
}

View File

@ -1053,6 +1053,16 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
mmc->ops = &jz4740_mmc_ops;
if (!mmc->f_max)
mmc->f_max = JZ_MMC_CLK_RATE;
/*
* There seems to be a problem with this driver on the JZ4760 and
* JZ4760B SoCs. There, when using the maximum rate supported (50 MHz),
* the communication fails with many SD cards.
* Until this bug is sorted out, limit the maximum rate to 24 MHz.
*/
if (host->version == JZ_MMC_JZ4760 && mmc->f_max > JZ_MMC_CLK_RATE)
mmc->f_max = JZ_MMC_CLK_RATE;
mmc->f_min = mmc->f_max / 128;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;

View File

@ -435,7 +435,8 @@ static int meson_mmc_clk_init(struct meson_host *host)
clk_reg |= FIELD_PREP(CLK_CORE_PHASE_MASK, CLK_PHASE_180);
clk_reg |= FIELD_PREP(CLK_TX_PHASE_MASK, CLK_PHASE_0);
clk_reg |= FIELD_PREP(CLK_RX_PHASE_MASK, CLK_PHASE_0);
clk_reg |= CLK_IRQ_SDIO_SLEEP(host);
if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
clk_reg |= CLK_IRQ_SDIO_SLEEP(host);
writel(clk_reg, host->regs + SD_EMMC_CLOCK);
/* get the mux parents */
@ -948,16 +949,18 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
{
struct meson_host *host = dev_id;
struct mmc_command *cmd;
u32 status, raw_status;
u32 status, raw_status, irq_mask = IRQ_EN_MASK;
irqreturn_t ret = IRQ_NONE;
if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
irq_mask |= IRQ_SDIO;
raw_status = readl(host->regs + SD_EMMC_STATUS);
status = raw_status & (IRQ_EN_MASK | IRQ_SDIO);
status = raw_status & irq_mask;
if (!status) {
dev_dbg(host->dev,
"Unexpected IRQ! irq_en 0x%08lx - status 0x%08x\n",
IRQ_EN_MASK | IRQ_SDIO, raw_status);
"Unexpected IRQ! irq_en 0x%08x - status 0x%08x\n",
irq_mask, raw_status);
return IRQ_NONE;
}
@ -1204,6 +1207,11 @@ static int meson_mmc_probe(struct platform_device *pdev)
goto free_host;
}
mmc->caps |= MMC_CAP_CMD23;
if (mmc->caps & MMC_CAP_SDIO_IRQ)
mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
host->data = (struct meson_mmc_data *)
of_device_get_match_data(&pdev->dev);
if (!host->data) {
@ -1277,11 +1285,6 @@ static int meson_mmc_probe(struct platform_device *pdev)
spin_lock_init(&host->lock);
mmc->caps |= MMC_CAP_CMD23;
if (mmc->caps & MMC_CAP_SDIO_IRQ)
mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
if (host->dram_access_quirk) {
/* Limit segments to 1 due to low available sram memory */
mmc->max_segs = 1;

View File

@ -1437,7 +1437,7 @@ static int mmc_spi_probe(struct spi_device *spi)
status = mmc_add_host(mmc);
if (status != 0)
goto fail_add_host;
goto fail_glue_init;
/*
* Index 0 is card detect
@ -1445,7 +1445,7 @@ static int mmc_spi_probe(struct spi_device *spi)
*/
status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000);
if (status == -EPROBE_DEFER)
goto fail_add_host;
goto fail_gpiod_request;
if (!status) {
/*
* The platform has a CD GPIO signal that may support
@ -1460,7 +1460,7 @@ static int mmc_spi_probe(struct spi_device *spi)
/* Index 1 is write protect/read only */
status = mmc_gpiod_request_ro(mmc, NULL, 1, 0);
if (status == -EPROBE_DEFER)
goto fail_add_host;
goto fail_gpiod_request;
if (!status)
has_ro = true;
@ -1474,7 +1474,7 @@ static int mmc_spi_probe(struct spi_device *spi)
? ", cd polling" : "");
return 0;
fail_add_host:
fail_gpiod_request:
mmc_remove_host(mmc);
fail_glue_init:
mmc_spi_dma_free(host);

View File

@ -240,12 +240,12 @@ static int bgmac_probe(struct bcma_device *core)
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
if (ci->pkg == BCMA_PKG_ID_BCM47188 ||
ci->pkg == BCMA_PKG_ID_BCM47186) {
if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
(ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) {
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
}
if (ci->pkg == BCMA_PKG_ID_BCM5358)
if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358)
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_EPHYRMII;
break;
case BCMA_CHIP_ID_BCM53573:

View File

@ -9239,10 +9239,14 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
return rc;
}
if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
netdev_err(bp->dev, "tx ring reservation failure\n");
netdev_reset_tc(bp->dev);
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
if (bp->tx_nr_rings_xdp)
bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
else
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
return -ENOMEM;
}
return 0;

View File

@ -2921,7 +2921,7 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
struct i40e_pf *pf = vsi->back;
if (i40e_enabled_xdp_vsi(vsi)) {
int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
int frame_size = new_mtu + I40E_PACKET_HDR_PAD;
if (frame_size > i40e_max_xdp_frame_size(vsi))
return -EINVAL;
@ -13140,6 +13140,8 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
}
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
if (!br_spec)
return -EINVAL;
nla_for_each_nested(attr, br_spec, rem) {
__u16 mode;

View File

@ -270,6 +270,8 @@ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
if (status && status != -EEXIST)
return status;
netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n",
vsi->vsi_num, promisc_m);
return 0;
}
@ -295,6 +297,8 @@ static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
promisc_m, 0);
}
netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n",
vsi->vsi_num, promisc_m);
return status;
}
@ -423,6 +427,16 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
err = 0;
vlan_ops->dis_rx_filtering(vsi);
/* promiscuous mode implies allmulticast so
* that VSIs that are in promiscuous mode are
* subscribed to multicast packets coming to
* the port
*/
err = ice_set_promisc(vsi,
ICE_MCAST_PROMISC_BITS);
if (err)
goto out_promisc;
}
} else {
/* Clear Rx filter to remove traffic from wire */
@ -439,6 +453,18 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
NETIF_F_HW_VLAN_CTAG_FILTER)
vlan_ops->ena_rx_filtering(vsi);
}
/* disable allmulti here, but only if allmulti is not
* still enabled for the netdev
*/
if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
err = ice_clear_promisc(vsi,
ICE_MCAST_PROMISC_BITS);
if (err) {
netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n",
err, vsi->vsi_num);
}
}
}
}
goto exit;

View File

@ -789,6 +789,7 @@ static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
struct ice_tx_desc *tx_desc;
u16 cnt = xdp_ring->count;
struct ice_tx_buf *tx_buf;
u16 completed_frames = 0;
u16 xsk_frames = 0;
u16 last_rs;
int i;
@ -798,19 +799,21 @@ static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
if ((tx_desc->cmd_type_offset_bsz &
cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
if (last_rs >= ntc)
xsk_frames = last_rs - ntc + 1;
completed_frames = last_rs - ntc + 1;
else
xsk_frames = last_rs + cnt - ntc + 1;
completed_frames = last_rs + cnt - ntc + 1;
}
if (!xsk_frames)
if (!completed_frames)
return;
if (likely(!xdp_ring->xdp_tx_active))
if (likely(!xdp_ring->xdp_tx_active)) {
xsk_frames = completed_frames;
goto skip;
}
ntc = xdp_ring->next_to_clean;
for (i = 0; i < xsk_frames; i++) {
for (i = 0; i < completed_frames; i++) {
tx_buf = &xdp_ring->tx_buf[ntc];
if (tx_buf->raw_buf) {
@ -826,7 +829,7 @@ static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
}
skip:
tx_desc->cmd_type_offset_bsz = 0;
xdp_ring->next_to_clean += xsk_frames;
xdp_ring->next_to_clean += completed_frames;
if (xdp_ring->next_to_clean >= cnt)
xdp_ring->next_to_clean -= cnt;
if (xsk_frames)

View File

@ -2256,6 +2256,30 @@ static void igb_enable_mas(struct igb_adapter *adapter)
}
}
#ifdef CONFIG_IGB_HWMON
/**
* igb_set_i2c_bb - Init I2C interface
* @hw: pointer to hardware structure
**/
static void igb_set_i2c_bb(struct e1000_hw *hw)
{
u32 ctrl_ext;
s32 i2cctl;
ctrl_ext = rd32(E1000_CTRL_EXT);
ctrl_ext |= E1000_CTRL_I2C_ENA;
wr32(E1000_CTRL_EXT, ctrl_ext);
wrfl();
i2cctl = rd32(E1000_I2CPARAMS);
i2cctl |= E1000_I2CBB_EN
| E1000_I2C_CLK_OE_N
| E1000_I2C_DATA_OE_N;
wr32(E1000_I2CPARAMS, i2cctl);
wrfl();
}
#endif
void igb_reset(struct igb_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
@ -2400,7 +2424,8 @@ void igb_reset(struct igb_adapter *adapter)
* interface.
*/
if (adapter->ets)
mac->ops.init_thermal_sensor_thresh(hw);
igb_set_i2c_bb(hw);
mac->ops.init_thermal_sensor_thresh(hw);
}
}
#endif
@ -3117,21 +3142,12 @@ static void igb_init_mas(struct igb_adapter *adapter)
**/
static s32 igb_init_i2c(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
s32 status = 0;
s32 i2cctl;
/* I2C interface supported on i350 devices */
if (adapter->hw.mac.type != e1000_i350)
return 0;
i2cctl = rd32(E1000_I2CPARAMS);
i2cctl |= E1000_I2CBB_EN
| E1000_I2C_CLK_OUT | E1000_I2C_CLK_OE_N
| E1000_I2C_DATA_OUT | E1000_I2C_DATA_OE_N;
wr32(E1000_I2CPARAMS, i2cctl);
wrfl();
/* Initialize the i2c bus which is controlled by the registers.
* This bus will use the i2c_algo_bit structure that implements
* the protocol through toggling of the 4 bits in the register.
@ -3521,6 +3537,12 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->ets = true;
else
adapter->ets = false;
/* Only enable I2C bit banging if an external thermal
* sensor is supported.
*/
if (adapter->ets)
igb_set_i2c_bb(hw);
hw->mac.ops.init_thermal_sensor_thresh(hw);
if (igb_sysfs_init(adapter))
dev_err(&pdev->dev,
"failed to allocate sysfs resources\n");
@ -6794,7 +6816,7 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
struct timespec64 ts;
u32 tsauxc;
if (pin < 0 || pin >= IGB_N_PEROUT)
if (pin < 0 || pin >= IGB_N_SDP)
return;
spin_lock(&adapter->tmreg_lock);
@ -6802,7 +6824,7 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
if (hw->mac.type == e1000_82580 ||
hw->mac.type == e1000_i354 ||
hw->mac.type == e1000_i350) {
s64 ns = timespec64_to_ns(&adapter->perout[pin].period);
s64 ns = timespec64_to_ns(&adapter->perout[tsintr_tt].period);
u32 systiml, systimh, level_mask, level, rem;
u64 systim, now;
@ -6850,8 +6872,8 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
ts.tv_nsec = (u32)systim;
ts.tv_sec = ((u32)(systim >> 32)) & 0xFF;
} else {
ts = timespec64_add(adapter->perout[pin].start,
adapter->perout[pin].period);
ts = timespec64_add(adapter->perout[tsintr_tt].start,
adapter->perout[tsintr_tt].period);
}
/* u32 conversion of tv_sec is safe until y2106 */
@ -6860,7 +6882,7 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
tsauxc = rd32(E1000_TSAUXC);
tsauxc |= TSAUXC_EN_TT0;
wr32(E1000_TSAUXC, tsauxc);
adapter->perout[pin].start = ts;
adapter->perout[tsintr_tt].start = ts;
spin_unlock(&adapter->tmreg_lock);
}
@ -6874,7 +6896,7 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
struct ptp_clock_event event;
struct timespec64 ts;
if (pin < 0 || pin >= IGB_N_EXTTS)
if (pin < 0 || pin >= IGB_N_SDP)
return;
if (hw->mac.type == e1000_82580 ||

View File

@ -67,6 +67,8 @@
#define IXGBE_RXBUFFER_4K 4096
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
#define IXGBE_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
/* Attempt to maximize the headroom available for incoming frames. We
* use a 2K buffer for receives and need 1536/1534 to store the data for
* the frame. This leaves us with 512 bytes of room. From that we need

View File

@ -6777,6 +6777,18 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
ixgbe_free_rx_resources(adapter->rx_ring[i]);
}
/**
* ixgbe_max_xdp_frame_size - returns the maximum allowed frame size for XDP
* @adapter: device handle, pointer to adapter
*/
static int ixgbe_max_xdp_frame_size(struct ixgbe_adapter *adapter)
{
if (PAGE_SIZE >= 8192 || adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
return IXGBE_RXBUFFER_2K;
else
return IXGBE_RXBUFFER_3K;
}
/**
* ixgbe_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
@ -6788,18 +6800,12 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
if (adapter->xdp_prog) {
int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
VLAN_HLEN;
int i;
if (ixgbe_enabled_xdp_adapter(adapter)) {
int new_frame_size = new_mtu + IXGBE_PKT_HDR_PAD;
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
if (new_frame_size > ixgbe_rx_bufsz(ring)) {
e_warn(probe, "Requested MTU size is not supported with XDP\n");
return -EINVAL;
}
if (new_frame_size > ixgbe_max_xdp_frame_size(adapter)) {
e_warn(probe, "Requested MTU size is not supported with XDP\n");
return -EINVAL;
}
}

View File

@ -595,8 +595,7 @@ mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
int type;
flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
GFP_ATOMIC);
flow_info = kzalloc(sizeof(*flow_info), GFP_ATOMIC);
if (!flow_info)
return;

View File

@ -277,7 +277,6 @@ struct mtk_flow_entry {
struct {
struct mtk_flow_entry *base_flow;
struct hlist_node list;
struct {} end;
} l2_data;
};
struct rhash_head node;

View File

@ -560,6 +560,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
plat_dat->has_gmac4 = 1;
plat_dat->pmt = 1;
plat_dat->tso_en = of_property_read_bool(np, "snps,tso");
if (of_device_is_compatible(np, "qcom,qcs404-ethqos"))
plat_dat->rx_clk_runs_in_lpi = 1;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)

View File

@ -541,9 +541,9 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
return 0;
}
val |= PPSCMDx(index, 0x2);
val |= TRGTMODSELx(index, 0x2);
val |= PPSEN0;
writel(val, ioaddr + MAC_PPS_CONTROL);
writel(cfg->start.tv_sec, ioaddr + MAC_PPSx_TARGET_TIME_SEC(index));
@ -568,6 +568,7 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
writel(period - 1, ioaddr + MAC_PPSx_WIDTH(index));
/* Finally, activate it */
val |= PPSCMDx(index, 0x2);
writel(val, ioaddr + MAC_PPS_CONTROL);
return 0;
}

View File

@ -1077,7 +1077,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
stmmac_mac_set(priv, priv->ioaddr, true);
if (phy && priv->dma_cap.eee) {
priv->eee_active = phy_init_eee(phy, 1) >= 0;
priv->eee_active =
phy_init_eee(phy, !priv->plat->rx_clk_runs_in_lpi) >= 0;
priv->eee_enabled = stmmac_eee_init(priv);
priv->tx_lpi_enabled = priv->eee_enabled;
stmmac_set_eee_pls(priv, priv->hw, true);

View File

@ -559,7 +559,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst");
plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
if (plat->force_thresh_dma_mode) {
if (plat->force_thresh_dma_mode && plat->force_sf_dma_mode) {
plat->force_sf_dma_mode = 0;
dev_warn(&pdev->dev,
"force_sf_dma_mode is ignored if force_thresh_dma_mode is set.\n");

View File

@ -500,7 +500,15 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
k3_udma_glue_disable_tx_chn(common->tx_chns[i].tx_chn);
}
reinit_completion(&common->tdown_complete);
k3_udma_glue_tdown_rx_chn(common->rx_chns.rx_chn, true);
if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
i = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
if (!i)
dev_err(common->dev, "rx teardown timeout\n");
}
napi_disable(&common->napi_rx);
for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
@ -704,6 +712,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
if (cppi5_desc_is_tdcm(desc_dma)) {
dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx);
if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ)
complete(&common->tdown_complete);
return 0;
}
@ -2634,7 +2644,7 @@ static const struct am65_cpsw_pdata j721e_pdata = {
};
static const struct am65_cpsw_pdata am64x_cpswxg_pdata = {
.quirks = 0,
.quirks = AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ,
.ale_dev_id = "am64-cpswxg",
.fdqring_mode = K3_RINGACC_RING_MODE_RING,
};

View File

@ -86,6 +86,7 @@ struct am65_cpsw_rx_chn {
};
#define AM65_CPSW_QUIRK_I2027_NO_TX_CSUM BIT(0)
#define AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ BIT(1)
struct am65_cpsw_pdata {
u32 quirks;

View File

@ -65,8 +65,8 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len,
init_msg, init_msg_len, &act_len, KALMIA_USB_TIMEOUT);
if (status != 0) {
netdev_err(dev->net,
"Error sending init packet. Status %i, length %i\n",
status, act_len);
"Error sending init packet. Status %i\n",
status);
return status;
}
else if (act_len != init_msg_len) {
@ -83,8 +83,8 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len,
if (status != 0)
netdev_err(dev->net,
"Error receiving init result. Status %i, length %i\n",
status, act_len);
"Error receiving init result. Status %i\n",
status);
else if (act_len != expected_len)
netdev_err(dev->net, "Unexpected init result length: %i\n",
act_len);

View File

@ -1546,31 +1546,6 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
rxd->len = rbi->len;
}
#ifdef VMXNET3_RSS
if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
(adapter->netdev->features & NETIF_F_RXHASH)) {
enum pkt_hash_types hash_type;
switch (rcd->rssType) {
case VMXNET3_RCD_RSS_TYPE_IPV4:
case VMXNET3_RCD_RSS_TYPE_IPV6:
hash_type = PKT_HASH_TYPE_L3;
break;
case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
case VMXNET3_RCD_RSS_TYPE_UDPIPV4:
case VMXNET3_RCD_RSS_TYPE_UDPIPV6:
hash_type = PKT_HASH_TYPE_L4;
break;
default:
hash_type = PKT_HASH_TYPE_L3;
break;
}
skb_set_hash(ctx->skb,
le32_to_cpu(rcd->rssHash),
hash_type);
}
#endif
skb_record_rx_queue(ctx->skb, rq->qid);
skb_put(ctx->skb, rcd->len);
@ -1653,6 +1628,31 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
u32 mtu = adapter->netdev->mtu;
skb->len += skb->data_len;
#ifdef VMXNET3_RSS
if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
(adapter->netdev->features & NETIF_F_RXHASH)) {
enum pkt_hash_types hash_type;
switch (rcd->rssType) {
case VMXNET3_RCD_RSS_TYPE_IPV4:
case VMXNET3_RCD_RSS_TYPE_IPV6:
hash_type = PKT_HASH_TYPE_L3;
break;
case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
case VMXNET3_RCD_RSS_TYPE_UDPIPV4:
case VMXNET3_RCD_RSS_TYPE_UDPIPV6:
hash_type = PKT_HASH_TYPE_L4;
break;
default:
hash_type = PKT_HASH_TYPE_L3;
break;
}
skb_set_hash(skb,
le32_to_cpu(rcd->rssHash),
hash_type);
}
#endif
vmxnet3_rx_csum(adapter, skb,
(union Vmxnet3_GenericDesc *)rcd);
skb->protocol = eth_type_trans(skb, adapter->netdev);

View File

@ -4881,7 +4881,9 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
out_cleanup_admin_q:
blk_mq_destroy_queue(ctrl->admin_q);
out_free_tagset:
blk_mq_free_tag_set(ctrl->admin_tagset);
blk_mq_free_tag_set(set);
ctrl->admin_q = NULL;
ctrl->fabrics_q = NULL;
return ret;
}
EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set);
@ -4931,6 +4933,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
out_free_tag_set:
blk_mq_free_tag_set(set);
ctrl->connect_q = NULL;
return ret;
}
EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set);

View File

@ -109,6 +109,7 @@ struct nvme_queue;
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode);
static void nvme_update_attrs(struct nvme_dev *dev);
/*
* Represents an NVM Express device. Each nvme_dev is a PCI function.
@ -1967,6 +1968,8 @@ static void nvme_map_cmb(struct nvme_dev *dev)
if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) ==
(NVME_CMBSZ_WDS | NVME_CMBSZ_RDS))
pci_p2pmem_publish(pdev, true);
nvme_update_attrs(dev);
}
static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
@ -2250,6 +2253,11 @@ static const struct attribute_group *nvme_pci_dev_attr_groups[] = {
NULL,
};
static void nvme_update_attrs(struct nvme_dev *dev)
{
sysfs_update_group(&dev->ctrl.device->kobj, &nvme_pci_dev_attrs_group);
}
/*
* nirqs is the number of interrupts available for write and read
* queues. The core already reserved an interrupt for the admin queue.

View File

@ -1154,13 +1154,13 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
struct nvme_rdma_ctrl *ctrl = container_of(work,
struct nvme_rdma_ctrl, err_work);
nvme_auth_stop(&ctrl->ctrl);
nvme_stop_keep_alive(&ctrl->ctrl);
flush_work(&ctrl->ctrl.async_event_work);
nvme_rdma_teardown_io_queues(ctrl, false);
nvme_start_queues(&ctrl->ctrl);
nvme_rdma_teardown_admin_queue(ctrl, false);
nvme_start_admin_queue(&ctrl->ctrl);
nvme_auth_stop(&ctrl->ctrl);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
/* state change failure is ok if we started ctrl delete */

View File

@ -2128,7 +2128,6 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
struct nvme_tcp_ctrl, err_work);
struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
nvme_auth_stop(ctrl);
nvme_stop_keep_alive(ctrl);
flush_work(&ctrl->async_event_work);
nvme_tcp_teardown_io_queues(ctrl, false);
@ -2136,6 +2135,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
nvme_start_queues(ctrl);
nvme_tcp_teardown_admin_queue(ctrl, false);
nvme_start_admin_queue(ctrl);
nvme_auth_stop(ctrl);
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
/* state change failure is ok if we started ctrl delete */

View File

@ -1685,8 +1685,10 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
else {
queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
be16_to_cpu(rqst->assoc_cmd.sqsize));
if (!queue)
if (!queue) {
ret = VERR_QUEUE_ALLOC_FAIL;
nvmet_fc_tgt_a_put(iod->assoc);
}
}
}

View File

@ -48,9 +48,10 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
err = memblock_mark_nomap(base, size);
if (err)
memblock_phys_free(base, size);
kmemleak_ignore_phys(base);
}
kmemleak_ignore_phys(base);
return err;
}

View File

@ -1097,6 +1097,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
DMI_MATCH(DMI_BIOS_DATE, "05/07/2016"),
},
},
{
/* Chuwi Vi8 (CWI501) */
.driver_data = (void *)&chuwi_vi8_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
DMI_MATCH(DMI_PRODUCT_NAME, "i86"),
DMI_MATCH(DMI_BIOS_VERSION, "CHUWI.W86JLBNR01"),
},
},
{
/* Chuwi Vi8 (CWI506) */
.driver_data = (void *)&chuwi_vi8_data,

View File

@ -849,7 +849,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = ifcvf_init_hw(vf, pdev);
if (ret) {
IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
return ret;
goto err;
}
for (i = 0; i < vf->nr_vring; i++)

View File

@ -313,7 +313,7 @@ void fb_deferred_io_open(struct fb_info *info,
}
EXPORT_SYMBOL_GPL(fb_deferred_io_open);
void fb_deferred_io_cleanup(struct fb_info *info)
void fb_deferred_io_release(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
struct page *page;
@ -327,6 +327,14 @@ void fb_deferred_io_cleanup(struct fb_info *info)
page = fb_deferred_io_page(info, i);
page->mapping = NULL;
}
}
EXPORT_SYMBOL_GPL(fb_deferred_io_release);
void fb_deferred_io_cleanup(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
fb_deferred_io_release(info);
kvfree(info->pagerefs);
mutex_destroy(&fbdefio->lock);

View File

@ -1453,6 +1453,10 @@ __releases(&info->lock)
struct fb_info * const info = file->private_data;
lock_fb_info(info);
#if IS_ENABLED(CONFIG_FB_DEFERRED_IO)
if (info->fbdefio)
fb_deferred_io_release(info);
#endif
if (info->fbops->fb_release)
info->fbops->fb_release(info,1);
module_put(info->fbops->owner);

View File

@ -361,6 +361,9 @@ static int aio_ring_mremap(struct vm_area_struct *vma)
spin_lock(&mm->ioctx_lock);
rcu_read_lock();
table = rcu_dereference(mm->ioctx_table);
if (!table)
goto out_unlock;
for (i = 0; i < table->nr; i++) {
struct kioctx *ctx;
@ -374,6 +377,7 @@ static int aio_ring_mremap(struct vm_area_struct *vma)
}
}
out_unlock:
rcu_read_unlock();
spin_unlock(&mm->ioctx_lock);
return res;

View File

@ -3938,6 +3938,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
lockend = round_up(start + len, root->fs_info->sectorsize);
prev_extent_end = lockstart;
btrfs_inode_lock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
@ -4129,6 +4130,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
out_unlock:
unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
out:
kfree(backref_cache);
btrfs_free_path(path);

View File

@ -31,329 +31,6 @@
#include "reflink.h"
#include "subpage.h"
static struct kmem_cache *btrfs_inode_defrag_cachep;
/*
* when auto defrag is enabled we
* queue up these defrag structs to remember which
* inodes need defragging passes
*/
struct inode_defrag {
struct rb_node rb_node;
/* objectid */
u64 ino;
/*
* transid where the defrag was added, we search for
* extents newer than this
*/
u64 transid;
/* root objectid */
u64 root;
/*
* The extent size threshold for autodefrag.
*
* This value is different for compressed/non-compressed extents,
* thus needs to be passed from higher layer.
* (aka, inode_should_defrag())
*/
u32 extent_thresh;
};
static int __compare_inode_defrag(struct inode_defrag *defrag1,
struct inode_defrag *defrag2)
{
if (defrag1->root > defrag2->root)
return 1;
else if (defrag1->root < defrag2->root)
return -1;
else if (defrag1->ino > defrag2->ino)
return 1;
else if (defrag1->ino < defrag2->ino)
return -1;
else
return 0;
}
/* pop a record for an inode into the defrag tree. The lock
* must be held already
*
* If you're inserting a record for an older transid than an
* existing record, the transid already in the tree is lowered
*
* If an existing record is found the defrag item you
* pass in is freed
*/
static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
struct inode_defrag *defrag)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct inode_defrag *entry;
struct rb_node **p;
struct rb_node *parent = NULL;
int ret;
p = &fs_info->defrag_inodes.rb_node;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct inode_defrag, rb_node);
ret = __compare_inode_defrag(defrag, entry);
if (ret < 0)
p = &parent->rb_left;
else if (ret > 0)
p = &parent->rb_right;
else {
/* if we're reinserting an entry for
* an old defrag run, make sure to
* lower the transid of our existing record
*/
if (defrag->transid < entry->transid)
entry->transid = defrag->transid;
entry->extent_thresh = min(defrag->extent_thresh,
entry->extent_thresh);
return -EEXIST;
}
}
set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
rb_link_node(&defrag->rb_node, parent, p);
rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
return 0;
}
static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
{
if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
return 0;
if (btrfs_fs_closing(fs_info))
return 0;
return 1;
}
/*
* insert a defrag record for this inode if auto defrag is
* enabled
*/
int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode, u32 extent_thresh)
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct inode_defrag *defrag;
u64 transid;
int ret;
if (!__need_auto_defrag(fs_info))
return 0;
if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
return 0;
if (trans)
transid = trans->transid;
else
transid = inode->root->last_trans;
defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
if (!defrag)
return -ENOMEM;
defrag->ino = btrfs_ino(inode);
defrag->transid = transid;
defrag->root = root->root_key.objectid;
defrag->extent_thresh = extent_thresh;
spin_lock(&fs_info->defrag_inodes_lock);
if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
/*
* If we set IN_DEFRAG flag and evict the inode from memory,
* and then re-read this inode, this new inode doesn't have
* IN_DEFRAG flag. At the case, we may find the existed defrag.
*/
ret = __btrfs_add_inode_defrag(inode, defrag);
if (ret)
kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
} else {
kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
}
spin_unlock(&fs_info->defrag_inodes_lock);
return 0;
}
/*
* pick the defragable inode that we want, if it doesn't exist, we will get
* the next one.
*/
static struct inode_defrag *
btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
{
struct inode_defrag *entry = NULL;
struct inode_defrag tmp;
struct rb_node *p;
struct rb_node *parent = NULL;
int ret;
tmp.ino = ino;
tmp.root = root;
spin_lock(&fs_info->defrag_inodes_lock);
p = fs_info->defrag_inodes.rb_node;
while (p) {
parent = p;
entry = rb_entry(parent, struct inode_defrag, rb_node);
ret = __compare_inode_defrag(&tmp, entry);
if (ret < 0)
p = parent->rb_left;
else if (ret > 0)
p = parent->rb_right;
else
goto out;
}
if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
parent = rb_next(parent);
if (parent)
entry = rb_entry(parent, struct inode_defrag, rb_node);
else
entry = NULL;
}
out:
if (entry)
rb_erase(parent, &fs_info->defrag_inodes);
spin_unlock(&fs_info->defrag_inodes_lock);
return entry;
}
void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
{
struct inode_defrag *defrag;
struct rb_node *node;
spin_lock(&fs_info->defrag_inodes_lock);
node = rb_first(&fs_info->defrag_inodes);
while (node) {
rb_erase(node, &fs_info->defrag_inodes);
defrag = rb_entry(node, struct inode_defrag, rb_node);
kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
cond_resched_lock(&fs_info->defrag_inodes_lock);
node = rb_first(&fs_info->defrag_inodes);
}
spin_unlock(&fs_info->defrag_inodes_lock);
}
#define BTRFS_DEFRAG_BATCH 1024
static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
struct inode_defrag *defrag)
{
struct btrfs_root *inode_root;
struct inode *inode;
struct btrfs_ioctl_defrag_range_args range;
int ret = 0;
u64 cur = 0;
again:
if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
goto cleanup;
if (!__need_auto_defrag(fs_info))
goto cleanup;
/* get the inode */
inode_root = btrfs_get_fs_root(fs_info, defrag->root, true);
if (IS_ERR(inode_root)) {
ret = PTR_ERR(inode_root);
goto cleanup;
}
inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root);
btrfs_put_root(inode_root);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
goto cleanup;
}
if (cur >= i_size_read(inode)) {
iput(inode);
goto cleanup;
}
/* do a chunk of defrag */
clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
memset(&range, 0, sizeof(range));
range.len = (u64)-1;
range.start = cur;
range.extent_thresh = defrag->extent_thresh;
sb_start_write(fs_info->sb);
ret = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
BTRFS_DEFRAG_BATCH);
sb_end_write(fs_info->sb);
iput(inode);
if (ret < 0)
goto cleanup;
cur = max(cur + fs_info->sectorsize, range.start);
goto again;
cleanup:
kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
return ret;
}
/*
* run through the list of inodes in the FS that need
* defragging
*/
int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
{
struct inode_defrag *defrag;
u64 first_ino = 0;
u64 root_objectid = 0;
atomic_inc(&fs_info->defrag_running);
while (1) {
/* Pause the auto defragger. */
if (test_bit(BTRFS_FS_STATE_REMOUNTING,
&fs_info->fs_state))
break;
if (!__need_auto_defrag(fs_info))
break;
/* find an inode to defrag */
defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
first_ino);
if (!defrag) {
if (root_objectid || first_ino) {
root_objectid = 0;
first_ino = 0;
continue;
} else {
break;
}
}
first_ino = defrag->ino + 1;
root_objectid = defrag->root;
__btrfs_run_defrag_inode(fs_info, defrag);
}
atomic_dec(&fs_info->defrag_running);
/*
* during unmount, we use the transaction_wait queue to
* wait for the defragger to stop
*/
wake_up(&fs_info->transaction_wait);
return 0;
}
/* simple helper to fault in pages and copy. This should go away
* and be replaced with calls into generic code.
*/
@ -4130,23 +3807,6 @@ const struct file_operations btrfs_file_operations = {
.remap_file_range = btrfs_remap_file_range,
};
void __cold btrfs_auto_defrag_exit(void)
{
kmem_cache_destroy(btrfs_inode_defrag_cachep);
}
int __init btrfs_auto_defrag_init(void)
{
btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
sizeof(struct inode_defrag), 0,
SLAB_MEM_SPREAD,
NULL);
if (!btrfs_inode_defrag_cachep)
return -ENOMEM;
return 0;
}
int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
{
int ret;

View File

@ -10,6 +10,326 @@
#include "transaction.h"
#include "locking.h"
static struct kmem_cache *btrfs_inode_defrag_cachep;
/*
* When auto defrag is enabled we queue up these defrag structs to remember
* which inodes need defragging passes.
*/
struct inode_defrag {
struct rb_node rb_node;
/* Inode number */
u64 ino;
/*
* Transid where the defrag was added, we search for extents newer than
* this.
*/
u64 transid;
/* Root objectid */
u64 root;
/*
* The extent size threshold for autodefrag.
*
* This value is different for compressed/non-compressed extents, thus
* needs to be passed from higher layer.
* (aka, inode_should_defrag())
*/
u32 extent_thresh;
};
static int __compare_inode_defrag(struct inode_defrag *defrag1,
struct inode_defrag *defrag2)
{
if (defrag1->root > defrag2->root)
return 1;
else if (defrag1->root < defrag2->root)
return -1;
else if (defrag1->ino > defrag2->ino)
return 1;
else if (defrag1->ino < defrag2->ino)
return -1;
else
return 0;
}
/*
* Pop a record for an inode into the defrag tree. The lock must be held
* already.
*
* If you're inserting a record for an older transid than an existing record,
* the transid already in the tree is lowered.
*
* If an existing record is found the defrag item you pass in is freed.
*/
static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
struct inode_defrag *defrag)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct inode_defrag *entry;
struct rb_node **p;
struct rb_node *parent = NULL;
int ret;
p = &fs_info->defrag_inodes.rb_node;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct inode_defrag, rb_node);
ret = __compare_inode_defrag(defrag, entry);
if (ret < 0)
p = &parent->rb_left;
else if (ret > 0)
p = &parent->rb_right;
else {
/*
* If we're reinserting an entry for an old defrag run,
* make sure to lower the transid of our existing
* record.
*/
if (defrag->transid < entry->transid)
entry->transid = defrag->transid;
entry->extent_thresh = min(defrag->extent_thresh,
entry->extent_thresh);
return -EEXIST;
}
}
set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
rb_link_node(&defrag->rb_node, parent, p);
rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
return 0;
}
static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
{
if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
return 0;
if (btrfs_fs_closing(fs_info))
return 0;
return 1;
}
/*
* Insert a defrag record for this inode if auto defrag is enabled.
*/
int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode, u32 extent_thresh)
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct inode_defrag *defrag;
u64 transid;
int ret;
if (!__need_auto_defrag(fs_info))
return 0;
if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
return 0;
if (trans)
transid = trans->transid;
else
transid = inode->root->last_trans;
defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
if (!defrag)
return -ENOMEM;
defrag->ino = btrfs_ino(inode);
defrag->transid = transid;
defrag->root = root->root_key.objectid;
defrag->extent_thresh = extent_thresh;
spin_lock(&fs_info->defrag_inodes_lock);
if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
/*
* If we set IN_DEFRAG flag and evict the inode from memory,
* and then re-read this inode, this new inode doesn't have
* IN_DEFRAG flag. At the case, we may find the existed defrag.
*/
ret = __btrfs_add_inode_defrag(inode, defrag);
if (ret)
kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
} else {
kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
}
spin_unlock(&fs_info->defrag_inodes_lock);
return 0;
}
/*
* Pick the defragable inode that we want, if it doesn't exist, we will get the
* next one.
*/
static struct inode_defrag *btrfs_pick_defrag_inode(
struct btrfs_fs_info *fs_info, u64 root, u64 ino)
{
struct inode_defrag *entry = NULL;
struct inode_defrag tmp;
struct rb_node *p;
struct rb_node *parent = NULL;
int ret;
tmp.ino = ino;
tmp.root = root;
spin_lock(&fs_info->defrag_inodes_lock);
p = fs_info->defrag_inodes.rb_node;
while (p) {
parent = p;
entry = rb_entry(parent, struct inode_defrag, rb_node);
ret = __compare_inode_defrag(&tmp, entry);
if (ret < 0)
p = parent->rb_left;
else if (ret > 0)
p = parent->rb_right;
else
goto out;
}
if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
parent = rb_next(parent);
if (parent)
entry = rb_entry(parent, struct inode_defrag, rb_node);
else
entry = NULL;
}
out:
if (entry)
rb_erase(parent, &fs_info->defrag_inodes);
spin_unlock(&fs_info->defrag_inodes_lock);
return entry;
}
void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
{
struct inode_defrag *defrag;
struct rb_node *node;
spin_lock(&fs_info->defrag_inodes_lock);
node = rb_first(&fs_info->defrag_inodes);
while (node) {
rb_erase(node, &fs_info->defrag_inodes);
defrag = rb_entry(node, struct inode_defrag, rb_node);
kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
cond_resched_lock(&fs_info->defrag_inodes_lock);
node = rb_first(&fs_info->defrag_inodes);
}
spin_unlock(&fs_info->defrag_inodes_lock);
}
#define BTRFS_DEFRAG_BATCH 1024
static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
struct inode_defrag *defrag)
{
struct btrfs_root *inode_root;
struct inode *inode;
struct btrfs_ioctl_defrag_range_args range;
int ret = 0;
u64 cur = 0;
again:
if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
goto cleanup;
if (!__need_auto_defrag(fs_info))
goto cleanup;
/* Get the inode */
inode_root = btrfs_get_fs_root(fs_info, defrag->root, true);
if (IS_ERR(inode_root)) {
ret = PTR_ERR(inode_root);
goto cleanup;
}
inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root);
btrfs_put_root(inode_root);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
goto cleanup;
}
if (cur >= i_size_read(inode)) {
iput(inode);
goto cleanup;
}
/* Do a chunk of defrag */
clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
memset(&range, 0, sizeof(range));
range.len = (u64)-1;
range.start = cur;
range.extent_thresh = defrag->extent_thresh;
sb_start_write(fs_info->sb);
ret = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
BTRFS_DEFRAG_BATCH);
sb_end_write(fs_info->sb);
iput(inode);
if (ret < 0)
goto cleanup;
cur = max(cur + fs_info->sectorsize, range.start);
goto again;
cleanup:
kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
return ret;
}
/*
* Run through the list of inodes in the FS that need defragging.
*/
int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
{
struct inode_defrag *defrag;
u64 first_ino = 0;
u64 root_objectid = 0;
atomic_inc(&fs_info->defrag_running);
while (1) {
/* Pause the auto defragger. */
if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
break;
if (!__need_auto_defrag(fs_info))
break;
/* find an inode to defrag */
defrag = btrfs_pick_defrag_inode(fs_info, root_objectid, first_ino);
if (!defrag) {
if (root_objectid || first_ino) {
root_objectid = 0;
first_ino = 0;
continue;
} else {
break;
}
}
first_ino = defrag->ino + 1;
root_objectid = defrag->root;
__btrfs_run_defrag_inode(fs_info, defrag);
}
atomic_dec(&fs_info->defrag_running);
/*
* During unmount, we use the transaction_wait queue to wait for the
* defragger to stop.
*/
wake_up(&fs_info->transaction_wait);
return 0;
}
/*
* Defrag all the leaves in a given btree.
* Read all the leaves and try to get key order to
@ -132,3 +452,20 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
return ret;
}
void __cold btrfs_auto_defrag_exit(void)
{
kmem_cache_destroy(btrfs_inode_defrag_cachep);
}
int __init btrfs_auto_defrag_init(void)
{
btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
sizeof(struct inode_defrag), 0,
SLAB_MEM_SPREAD,
NULL);
if (!btrfs_inode_defrag_cachep)
return -ENOMEM;
return 0;
}

View File

@ -305,7 +305,7 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
struct inode *inode = rreq->inode;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_osd_request *req;
struct ceph_osd_request *req = NULL;
struct ceph_vino vino = ceph_vino(inode);
struct iov_iter iter;
struct page **pages;
@ -313,6 +313,11 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
int err = 0;
u64 len = subreq->len;
if (ceph_inode_is_shutdown(inode)) {
err = -EIO;
goto out;
}
if (ceph_has_inline_data(ci) && ceph_netfs_issue_op_inline(subreq))
return;
@ -563,6 +568,9 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
dout("writepage %p idx %lu\n", page, page->index);
if (ceph_inode_is_shutdown(inode))
return -EIO;
/* verify this is a writeable snap context */
snapc = page_snap_context(page);
if (!snapc) {
@ -1643,7 +1651,7 @@ int ceph_uninline_data(struct file *file)
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_osd_request *req = NULL;
struct ceph_cap_flush *prealloc_cf;
struct ceph_cap_flush *prealloc_cf = NULL;
struct folio *folio = NULL;
u64 inline_version = CEPH_INLINE_NONE;
struct page *pages[1];
@ -1657,6 +1665,11 @@ int ceph_uninline_data(struct file *file)
dout("uninline_data %p %llx.%llx inline_version %llu\n",
inode, ceph_vinop(inode), inline_version);
if (ceph_inode_is_shutdown(inode)) {
err = -EIO;
goto out;
}
if (inline_version == CEPH_INLINE_NONE)
return 0;

View File

@ -4081,6 +4081,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
void *p, *end;
struct cap_extra_info extra_info = {};
bool queue_trunc;
bool close_sessions = false;
dout("handle_caps from mds%d\n", session->s_mds);
@ -4218,9 +4219,13 @@ void ceph_handle_caps(struct ceph_mds_session *session,
realm = NULL;
if (snaptrace_len) {
down_write(&mdsc->snap_rwsem);
ceph_update_snap_trace(mdsc, snaptrace,
snaptrace + snaptrace_len,
false, &realm);
if (ceph_update_snap_trace(mdsc, snaptrace,
snaptrace + snaptrace_len,
false, &realm)) {
up_write(&mdsc->snap_rwsem);
close_sessions = true;
goto done;
}
downgrade_write(&mdsc->snap_rwsem);
} else {
down_read(&mdsc->snap_rwsem);
@ -4280,6 +4285,11 @@ void ceph_handle_caps(struct ceph_mds_session *session,
iput(inode);
out:
ceph_put_string(extra_info.pool_ns);
/* Defer closing the sessions after s_mutex lock being released */
if (close_sessions)
ceph_mdsc_close_sessions(mdsc);
return;
flush_cap_releases:

View File

@ -2004,6 +2004,9 @@ static int ceph_zero_partial_object(struct inode *inode,
loff_t zero = 0;
int op;
if (ceph_inode_is_shutdown(inode))
return -EIO;
if (!length) {
op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
length = &zero;

View File

@ -806,6 +806,9 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
{
struct ceph_mds_session *s;
if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO)
return ERR_PTR(-EIO);
if (mds >= mdsc->mdsmap->possible_max_rank)
return ERR_PTR(-EINVAL);
@ -1478,6 +1481,9 @@ static int __open_session(struct ceph_mds_client *mdsc,
int mstate;
int mds = session->s_mds;
if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO)
return -EIO;
/* wait for mds to go active? */
mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
dout("open_session to mds%d (%s)\n", mds,
@ -2860,6 +2866,11 @@ static void __do_request(struct ceph_mds_client *mdsc,
return;
}
if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO) {
dout("do_request metadata corrupted\n");
err = -EIO;
goto finish;
}
if (req->r_timeout &&
time_after_eq(jiffies, req->r_started + req->r_timeout)) {
dout("do_request timed out\n");
@ -3245,6 +3256,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
u64 tid;
int err, result;
int mds = session->s_mds;
bool close_sessions = false;
if (msg->front.iov_len < sizeof(*head)) {
pr_err("mdsc_handle_reply got corrupt (short) reply\n");
@ -3351,10 +3363,17 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
realm = NULL;
if (rinfo->snapblob_len) {
down_write(&mdsc->snap_rwsem);
ceph_update_snap_trace(mdsc, rinfo->snapblob,
err = ceph_update_snap_trace(mdsc, rinfo->snapblob,
rinfo->snapblob + rinfo->snapblob_len,
le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
&realm);
if (err) {
up_write(&mdsc->snap_rwsem);
close_sessions = true;
if (err == -EIO)
ceph_msg_dump(msg);
goto out_err;
}
downgrade_write(&mdsc->snap_rwsem);
} else {
down_read(&mdsc->snap_rwsem);
@ -3412,6 +3431,10 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
req->r_end_latency, err);
out:
ceph_mdsc_put_request(req);
/* Defer closing the sessions after s_mutex lock being released */
if (close_sessions)
ceph_mdsc_close_sessions(mdsc);
return;
}
@ -5017,7 +5040,7 @@ static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped)
}
/*
* called after sb is ro.
* called after sb is ro or when metadata corrupted.
*/
void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
{
@ -5307,7 +5330,8 @@ static void mds_peer_reset(struct ceph_connection *con)
struct ceph_mds_client *mdsc = s->s_mdsc;
pr_warn("mds%d closed our session\n", s->s_mds);
send_mds_reconnect(mdsc, s);
if (READ_ONCE(mdsc->fsc->mount_state) != CEPH_MOUNT_FENCE_IO)
send_mds_reconnect(mdsc, s);
}
static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg)

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/ceph/ceph_debug.h>
#include <linux/fs.h>
#include <linux/sort.h>
#include <linux/slab.h>
#include <linux/iversion.h>
@ -766,8 +767,10 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
struct ceph_snap_realm *realm;
struct ceph_snap_realm *first_realm = NULL;
struct ceph_snap_realm *realm_to_rebuild = NULL;
struct ceph_client *client = mdsc->fsc->client;
int rebuild_snapcs;
int err = -ENOMEM;
int ret;
LIST_HEAD(dirty_realms);
lockdep_assert_held_write(&mdsc->snap_rwsem);
@ -884,6 +887,27 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
if (first_realm)
ceph_put_snap_realm(mdsc, first_realm);
pr_err("%s error %d\n", __func__, err);
/*
* When receiving a corrupted snap trace we don't know what
* exactly has happened in MDS side. And we shouldn't continue
* writing to OSD, which may corrupt the snapshot contents.
*
* Just try to blocklist this kclient and then this kclient
* must be remounted to continue after the corrupted metadata
* fixed in the MDS side.
*/
WRITE_ONCE(mdsc->fsc->mount_state, CEPH_MOUNT_FENCE_IO);
ret = ceph_monc_blocklist_add(&client->monc, &client->msgr.inst.addr);
if (ret)
pr_err("%s failed to blocklist %s: %d\n", __func__,
ceph_pr_addr(&client->msgr.inst.addr), ret);
WARN(1, "%s: %s%sdo remount to continue%s",
__func__, ret ? "" : ceph_pr_addr(&client->msgr.inst.addr),
ret ? "" : " was blocklisted, ",
err == -EIO ? " after corrupted snaptrace is fixed" : "");
return err;
}
@ -984,6 +1008,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
__le64 *split_inos = NULL, *split_realms = NULL;
int i;
int locked_rwsem = 0;
bool close_sessions = false;
/* decode */
if (msg->front.iov_len < sizeof(*h))
@ -1092,8 +1117,12 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
* update using the provided snap trace. if we are deleting a
* snap, we can avoid queueing cap_snaps.
*/
ceph_update_snap_trace(mdsc, p, e,
op == CEPH_SNAP_OP_DESTROY, NULL);
if (ceph_update_snap_trace(mdsc, p, e,
op == CEPH_SNAP_OP_DESTROY,
NULL)) {
close_sessions = true;
goto bad;
}
if (op == CEPH_SNAP_OP_SPLIT)
/* we took a reference when we created the realm, above */
@ -1112,6 +1141,9 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
out:
if (locked_rwsem)
up_write(&mdsc->snap_rwsem);
if (close_sessions)
ceph_mdsc_close_sessions(mdsc);
return;
}

View File

@ -100,6 +100,17 @@ struct ceph_mount_options {
char *mon_addr;
};
/* mount state */
enum {
CEPH_MOUNT_MOUNTING,
CEPH_MOUNT_MOUNTED,
CEPH_MOUNT_UNMOUNTING,
CEPH_MOUNT_UNMOUNTED,
CEPH_MOUNT_SHUTDOWN,
CEPH_MOUNT_RECOVER,
CEPH_MOUNT_FENCE_IO,
};
#define CEPH_ASYNC_CREATE_CONFLICT_BITS 8
struct ceph_fs_client {

View File

@ -831,6 +831,30 @@ static int __dump_skip(struct coredump_params *cprm, size_t nr)
}
}
int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
{
if (cprm->to_skip) {
if (!__dump_skip(cprm, cprm->to_skip))
return 0;
cprm->to_skip = 0;
}
return __dump_emit(cprm, addr, nr);
}
EXPORT_SYMBOL(dump_emit);
void dump_skip_to(struct coredump_params *cprm, unsigned long pos)
{
cprm->to_skip = pos - cprm->pos;
}
EXPORT_SYMBOL(dump_skip_to);
void dump_skip(struct coredump_params *cprm, size_t nr)
{
cprm->to_skip += nr;
}
EXPORT_SYMBOL(dump_skip);
#ifdef CONFIG_ELF_CORE
static int dump_emit_page(struct coredump_params *cprm, struct page *page)
{
struct bio_vec bvec = {
@ -864,30 +888,6 @@ static int dump_emit_page(struct coredump_params *cprm, struct page *page)
return 1;
}
int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
{
if (cprm->to_skip) {
if (!__dump_skip(cprm, cprm->to_skip))
return 0;
cprm->to_skip = 0;
}
return __dump_emit(cprm, addr, nr);
}
EXPORT_SYMBOL(dump_emit);
void dump_skip_to(struct coredump_params *cprm, unsigned long pos)
{
cprm->to_skip = pos - cprm->pos;
}
EXPORT_SYMBOL(dump_skip_to);
void dump_skip(struct coredump_params *cprm, size_t nr)
{
cprm->to_skip += nr;
}
EXPORT_SYMBOL(dump_skip);
#ifdef CONFIG_ELF_CORE
int dump_user_range(struct coredump_params *cprm, unsigned long start,
unsigned long len)
{

View File

@ -280,8 +280,7 @@ static void fscache_create_volume_work(struct work_struct *work)
fscache_end_cache_access(volume->cache,
fscache_access_acquire_volume_end);
clear_bit_unlock(FSCACHE_VOLUME_CREATING, &volume->flags);
wake_up_bit(&volume->flags, FSCACHE_VOLUME_CREATING);
clear_and_wake_up_bit(FSCACHE_VOLUME_CREATING, &volume->flags);
fscache_put_volume(volume, fscache_volume_put_create_work);
}

View File

@ -1114,7 +1114,14 @@ static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp)
minseg = range[0] + segbytes - 1;
do_div(minseg, segbytes);
if (range[1] < 4096)
goto out;
maxseg = NILFS_SB2_OFFSET_BYTES(range[1]);
if (maxseg < segbytes)
goto out;
do_div(maxseg, segbytes);
maxseg--;

View File

@ -408,6 +408,15 @@ int nilfs_resize_fs(struct super_block *sb, __u64 newsize)
if (newsize > devsize)
goto out;
/*
* Prevent underflow in second superblock position calculation.
* The exact minimum size check is done in nilfs_sufile_resize().
*/
if (newsize < 4096) {
ret = -ENOSPC;
goto out;
}
/*
* Write lock is required to protect some functions depending
* on the number of segments, the number of reserved segments,

View File

@ -544,9 +544,15 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
{
struct nilfs_super_block **sbp = nilfs->ns_sbp;
struct buffer_head **sbh = nilfs->ns_sbh;
u64 sb2off = NILFS_SB2_OFFSET_BYTES(bdev_nr_bytes(nilfs->ns_bdev));
u64 sb2off, devsize = bdev_nr_bytes(nilfs->ns_bdev);
int valid[2], swp = 0;
if (devsize < NILFS_SEG_MIN_BLOCKS * NILFS_MIN_BLOCK_SIZE + 4096) {
nilfs_err(sb, "device size too small");
return -EINVAL;
}
sb2off = NILFS_SB2_OFFSET_BYTES(devsize);
sbp[0] = nilfs_read_super_block(sb, NILFS_SB_OFFSET_BYTES, blocksize,
&sbh[0]);
sbp[1] = nilfs_read_super_block(sb, sb2off, blocksize, &sbh[1]);

View File

@ -76,7 +76,7 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
/* Sanity check values */
/* there is always at least one xattr id */
if (*xattr_ids <= 0)
if (*xattr_ids == 0)
return ERR_PTR(-EINVAL);
len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);

View File

@ -99,16 +99,6 @@ struct ceph_options {
#define CEPH_AUTH_NAME_DEFAULT "guest"
/* mount state */
enum {
CEPH_MOUNT_MOUNTING,
CEPH_MOUNT_MOUNTED,
CEPH_MOUNT_UNMOUNTING,
CEPH_MOUNT_UNMOUNTED,
CEPH_MOUNT_SHUTDOWN,
CEPH_MOUNT_RECOVER,
};
static inline unsigned long ceph_timeout_jiffies(unsigned long timeout)
{
return timeout ?: MAX_SCHEDULE_TIMEOUT;

View File

@ -662,6 +662,7 @@ extern int fb_deferred_io_init(struct fb_info *info);
extern void fb_deferred_io_open(struct fb_info *info,
struct inode *inode,
struct file *file);
extern void fb_deferred_io_release(struct fb_info *info);
extern void fb_deferred_io_cleanup(struct fb_info *info);
extern int fb_deferred_io_fsync(struct file *file, loff_t start,
loff_t end, int datasync);

View File

@ -753,7 +753,10 @@ static inline struct hstate *hstate_sizelog(int page_size_log)
if (!page_size_log)
return &default_hstate;
return size_to_hstate(1UL << page_size_log);
if (page_size_log < BITS_PER_LONG)
return size_to_hstate(1UL << page_size_log);
return NULL;
}
static inline struct hstate *hstate_vma(struct vm_area_struct *vma)

View File

@ -136,7 +136,7 @@ extern int mmap_rnd_compat_bits __read_mostly;
* define their own version of this macro in <asm/pgtable.h>
*/
#if BITS_PER_LONG == 64
/* This function must be updated when the size of struct page grows above 80
/* This function must be updated when the size of struct page grows above 96
* or reduces below 56. The idea that compiler optimizes out switch()
* statement, and only leaves move/store instructions. Also the compiler can
* combine write statements if they are both assignments and can be reordered,
@ -147,12 +147,18 @@ static inline void __mm_zero_struct_page(struct page *page)
{
unsigned long *_pp = (void *)page;
/* Check that struct page is either 56, 64, 72, or 80 bytes */
/* Check that struct page is either 56, 64, 72, 80, 88 or 96 bytes */
BUILD_BUG_ON(sizeof(struct page) & 7);
BUILD_BUG_ON(sizeof(struct page) < 56);
BUILD_BUG_ON(sizeof(struct page) > 80);
BUILD_BUG_ON(sizeof(struct page) > 96);
switch (sizeof(struct page)) {
case 96:
_pp[11] = 0;
fallthrough;
case 88:
_pp[10] = 0;
fallthrough;
case 80:
_pp[9] = 0;
fallthrough;

View File

@ -104,7 +104,7 @@ extern void synchronize_shrinkers(void);
#ifdef CONFIG_SHRINKER_DEBUG
extern int shrinker_debugfs_add(struct shrinker *shrinker);
extern void shrinker_debugfs_remove(struct shrinker *shrinker);
extern struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker);
extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker,
const char *fmt, ...);
#else /* CONFIG_SHRINKER_DEBUG */
@ -112,8 +112,9 @@ static inline int shrinker_debugfs_add(struct shrinker *shrinker)
{
return 0;
}
static inline void shrinker_debugfs_remove(struct shrinker *shrinker)
static inline struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker)
{
return NULL;
}
static inline __printf(2, 3)
int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)

View File

@ -252,6 +252,7 @@ struct plat_stmmacenet_data {
int rss_en;
int mac_port_sel_speed;
bool en_tx_lpi_clockgating;
bool rx_clk_runs_in_lpi;
int has_xgmac;
bool vlan_fail_q_en;
u8 vlan_fail_q;

View File

@ -2430,6 +2430,19 @@ static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struc
return false;
}
static inline struct sk_buff *skb_clone_and_charge_r(struct sk_buff *skb, struct sock *sk)
{
skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
if (skb) {
if (sk_rmem_schedule(sk, skb, skb->truesize)) {
skb_set_owner_r(skb, sk);
return skb;
}
__kfree_skb(skb);
}
return NULL;
}
static inline void skb_prepare_for_gro(struct sk_buff *skb)
{
if (skb->destructor != sock_wfree) {

View File

@ -1278,10 +1278,11 @@ void psi_trigger_destroy(struct psi_trigger *t)
group = t->group;
/*
* Wakeup waiters to stop polling. Can happen if cgroup is deleted
* from under a polling process.
* Wakeup waiters to stop polling and clear the queue to prevent it from
* being accessed later. Can happen if cgroup is deleted from under a
* polling process.
*/
wake_up_interruptible(&t->event_wait);
wake_up_pollfree(&t->event_wait);
mutex_lock(&group->trigger_lock);

View File

@ -470,11 +470,35 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval)
}
EXPORT_SYMBOL_GPL(alarm_forward);
u64 alarm_forward_now(struct alarm *alarm, ktime_t interval)
static u64 __alarm_forward_now(struct alarm *alarm, ktime_t interval, bool throttle)
{
struct alarm_base *base = &alarm_bases[alarm->type];
ktime_t now = base->get_ktime();
return alarm_forward(alarm, base->get_ktime(), interval);
if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && throttle) {
/*
* Same issue as with posix_timer_fn(). Timers which are
* periodic but the signal is ignored can starve the system
* with a very small interval. The real fix which was
* promised in the context of posix_timer_fn() never
* materialized, but someone should really work on it.
*
* To prevent DOS fake @now to be 1 jiffie out which keeps
* the overrun accounting correct but creates an
* inconsistency vs. timer_gettime(2).
*/
ktime_t kj = NSEC_PER_SEC / HZ;
if (interval < kj)
now = ktime_add(now, kj);
}
return alarm_forward(alarm, now, interval);
}
u64 alarm_forward_now(struct alarm *alarm, ktime_t interval)
{
return __alarm_forward_now(alarm, interval, false);
}
EXPORT_SYMBOL_GPL(alarm_forward_now);
@ -551,9 +575,10 @@ static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
if (posix_timer_event(ptr, si_private) && ptr->it_interval) {
/*
* Handle ignored signals and rearm the timer. This will go
* away once we handle ignored signals proper.
* away once we handle ignored signals proper. Ensure that
* small intervals cannot starve the system.
*/
ptr->it_overrun += alarm_forward_now(alarm, ptr->it_interval);
ptr->it_overrun += __alarm_forward_now(alarm, ptr->it_interval, true);
++ptr->it_requeue_pending;
ptr->it_active = 1;
result = ALARMTIMER_RESTART;

View File

@ -155,7 +155,7 @@ int trace_define_field(struct trace_event_call *call, const char *type,
}
EXPORT_SYMBOL_GPL(trace_define_field);
int trace_define_field_ext(struct trace_event_call *call, const char *type,
static int trace_define_field_ext(struct trace_event_call *call, const char *type,
const char *name, int offset, int size, int is_signed,
int filter_type, int len)
{

View File

@ -438,21 +438,27 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
if (wait == UMH_NO_WAIT) /* task has freed sub_info */
goto unlock;
if (wait & UMH_KILLABLE)
state |= TASK_KILLABLE;
if (wait & UMH_FREEZABLE)
state |= TASK_FREEZABLE;
retval = wait_for_completion_state(&done, state);
if (!retval)
goto wait_done;
if (wait & UMH_KILLABLE) {
retval = wait_for_completion_state(&done, state | TASK_KILLABLE);
if (!retval)
goto wait_done;
/* umh_complete() will see NULL and free sub_info */
if (xchg(&sub_info->complete, NULL))
goto unlock;
/*
* fallthrough; in case of -ERESTARTSYS now do uninterruptible
* wait_for_completion_state(). Since umh_complete() shall call
* complete() in a moment if xchg() above returned NULL, this
* uninterruptible wait_for_completion_state() will not block
* SIGKILL'ed processes for long.
*/
}
wait_for_completion_state(&done, state);
wait_done:
retval = sub_info->retval;

View File

@ -2569,18 +2569,19 @@ static int filemap_get_pages(struct kiocb *iocb, struct iov_iter *iter,
struct folio *folio;
int err = 0;
/* "last_index" is the index of the page beyond the end of the read */
last_index = DIV_ROUND_UP(iocb->ki_pos + iter->count, PAGE_SIZE);
retry:
if (fatal_signal_pending(current))
return -EINTR;
filemap_get_read_batch(mapping, index, last_index, fbatch);
filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
if (!folio_batch_count(fbatch)) {
if (iocb->ki_flags & IOCB_NOIO)
return -EAGAIN;
page_cache_sync_readahead(mapping, ra, filp, index,
last_index - index);
filemap_get_read_batch(mapping, index, last_index, fbatch);
filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
}
if (!folio_batch_count(fbatch)) {
if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ))

View File

@ -1978,7 +1978,7 @@ static unsigned long collect_longterm_unpinnable_pages(
drain_allow = false;
}
if (!folio_isolate_lru(folio))
if (folio_isolate_lru(folio))
continue;
list_add_tail(&folio->lru, movable_page_list);

View File

@ -3253,8 +3253,6 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
if (pmd_swp_soft_dirty(*pvmw->pmd))
pmde = pmd_mksoft_dirty(pmde);
if (is_writable_migration_entry(entry))
pmde = maybe_pmd_mkwrite(pmde, vma);
if (pmd_swp_uffd_wp(*pvmw->pmd))
pmde = pmd_wrprotect(pmd_mkuffd_wp(pmde));
if (!is_migration_entry_young(entry))
@ -3262,6 +3260,10 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
/* NOTE: this may contain setting soft-dirty on some archs */
if (PageDirty(new) && is_migration_entry_dirty(entry))
pmde = pmd_mkdirty(pmde);
if (is_writable_migration_entry(entry))
pmde = maybe_pmd_mkwrite(pmde, vma);
else
pmde = pmd_wrprotect(pmde);
if (PageAnon(new)) {
rmap_t rmap_flags = RMAP_COMPOUND;

View File

@ -251,6 +251,9 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object,
static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
{
if (!kasan_arch_is_ready())
return false;
if (ptr != page_address(virt_to_head_page(ptr))) {
kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE);
return true;

View File

@ -191,7 +191,12 @@ bool kasan_check_range(unsigned long addr, size_t size, bool write,
bool kasan_byte_accessible(const void *addr)
{
s8 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
s8 shadow_byte;
if (!kasan_arch_is_ready())
return true;
shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE;
}

View File

@ -291,6 +291,9 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
unsigned long shadow_start, shadow_end;
int ret;
if (!kasan_arch_is_ready())
return 0;
if (!is_vmalloc_or_module_addr((void *)addr))
return 0;
@ -459,6 +462,9 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
unsigned long region_start, region_end;
unsigned long size;
if (!kasan_arch_is_ready())
return;
region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE);
region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE);
@ -502,6 +508,9 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
* with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored.
*/
if (!kasan_arch_is_ready())
return (void *)start;
if (!is_vmalloc_or_module_addr(start))
return (void *)start;
@ -524,6 +533,9 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
*/
void __kasan_poison_vmalloc(const void *start, unsigned long size)
{
if (!kasan_arch_is_ready())
return;
if (!is_vmalloc_or_module_addr(start))
return;

View File

@ -2608,6 +2608,7 @@ static int madvise_collapse_errno(enum scan_result r)
case SCAN_CGROUP_CHARGE_FAIL:
return -EBUSY;
/* Resource temporary unavailable - trying again might succeed */
case SCAN_PAGE_COUNT:
case SCAN_PAGE_LOCK:
case SCAN_PAGE_LRU:
case SCAN_DEL_PAGE_LRU:

Some files were not shown because too many files have changed in this diff Show More