This is the 5.10.56 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmEKcCUACgkQONu9yGCS aT7sMw/7BNJDmX9w+p1lgTIJJzSuz8C/eNgbeZgK7CE4DovO+WL9oEm53vqYcDDo j5REnrRhxcBYxwG/GXl1Oniv1wHqw0SplV+5G2NH1RMy23eSFGCw+8G+YOEJnU3P 94hJuEs/43Py7eZV/VtyO2UMdDRnGI6MlNvu18YjnRJcdqIIl2gln1G8wbyySYVb wR1rudvtiEdrmTQr7qGxeIrZNKGwFl0KxEl8j9X/aqxvfe8PRVYKlmtwblf5rybe TElQxz2XGRgk8g2yWQmnNoU6rfFHdZ4lTnCwfpFA1XE6/HBA64/1p22QTJUZvyOU pbQc1MRaoUncGV9UFAMY1j38JFsVar7YHHOcpp9YIJOjoyiAw4aatGDcntdWDCiG X1mCSLs10/xGRPaJJXulp786MH4aTR5qIeoNg8mu3Z3In4ElbBW5xr0wa3N8gs3O lEnK/gT2MHiQ1boa+Qy3W+XZmOjWtL69JgbOyRcOYS6lkHL4DFlGL2Nn5u8qGfL4 hzohJzH36W5SUHDQiYTt1wLNu4iHpAECjxcnk9fCvlcHA5Yu1bqgyQ62i3C9RA6a /aO0B0yraHmvCAboemDsESwylxmpiRB3caqKtzlaZjoiOfPydcBwJM46ZfbzLNPh l+/YKK2tLOXWyRIhEv8183tVeu7mZ02xjsetPtLltZPJqR+SJKE= =8nLw -----END PGP SIGNATURE----- Merge 5.10.56 into android12-5.10-lts Changes in 5.10.56 selftest: fix build error in tools/testing/selftests/vm/userfaultfd.c io_uring: fix null-ptr-deref in io_sq_offload_start() x86/asm: Ensure asm/proto.h can be included stand-alone pipe: make pipe writes always wake up readers btrfs: fix rw device counting in __btrfs_free_extra_devids btrfs: mark compressed range uptodate only if all bio succeed Revert "ACPI: resources: Add checks for ACPI IRQ override" ACPI: DPTF: Fix reading of attributes x86/kvm: fix vcpu-id indexed array sizes KVM: add missing compat KVM_CLEAR_DIRTY_LOG ocfs2: fix zero out valid data ocfs2: issue zeroout to EOF blocks can: j1939: j1939_xtp_rx_dat_one(): fix rxtimer value between consecutive TP.DT to 750ms can: raw: raw_setsockopt(): fix raw_rcv panic for sock UAF can: peak_usb: pcan_usb_handle_bus_evt(): fix reading rxerr/txerr values can: mcba_usb_start(): add missing urb->transfer_dma initialization can: usb_8dev: fix memory leak can: ems_usb: fix memory leak can: esd_usb2: fix memory leak alpha: register early reserved memory in memblock HID: wacom: Re-enable touch by default for Cintiq 24HDT / 27QHDT NIU: fix incorrect error return, missed in previous revert drm/amd/display: ensure dentist display clock update finished in DCN20 drm/amdgpu: Avoid printing of stack contents on firmware load error drm/amdgpu: Fix resource leak on probe error path blk-iocost: fix operation ordering in iocg_wake_fn() nfc: nfcsim: fix use after free during module unload cfg80211: Fix possible memory leak in function cfg80211_bss_update RDMA/bnxt_re: Fix stats counters bpf: Fix OOB read when printing XDP link fdinfo mac80211: fix enabling 4-address mode on a sta vif after assoc netfilter: conntrack: adjust stop timestamp to real expiry value netfilter: nft_nat: allow to specify layer 4 protocol NAT only i40e: Fix logic of disabling queues i40e: Fix firmware LLDP agent related warning i40e: Fix queue-to-TC mapping on Tx i40e: Fix log TC creation failure when max num of queues is exceeded tipc: fix implicit-connect for SYN+ tipc: fix sleeping in tipc accept routine net: Set true network header for ECN decapsulation net: qrtr: fix memory leaks ionic: remove intr coalesce update from napi ionic: fix up dim accounting for tx and rx ionic: count csum_none when offload enabled tipc: do not write skb_shinfo frags when doing decrytion octeontx2-pf: Fix interface down flag on error mlx4: Fix missing error code in mlx4_load_one() KVM: x86: Check the right feature bit for MSR_KVM_ASYNC_PF_ACK access net: llc: fix skb_over_panic drm/msm/dpu: Fix sm8250_mdp register length drm/msm/dp: Initialize the INTF_CONFIG register skmsg: Make sk_psock_destroy() static net/mlx5: Fix flow table chaining net/mlx5e: Fix nullptr in mlx5e_hairpin_get_mdev() sctp: fix return value check in __sctp_rcv_asconf_lookup tulip: windbond-840: Fix missing pci_disable_device() in probe and remove sis900: Fix missing pci_disable_device() in probe and remove can: hi311x: fix a signedness bug in hi3110_cmd() bpf: Introduce BPF nospec instruction for mitigating Spectre v4 bpf: Fix leakage due to insufficient speculative store bypass mitigation bpf: Remove superfluous aux sanitation on subprog rejection bpf: verifier: Allocate idmap scratch in verifier env bpf: Fix pointer arithmetic mask tightening under state pruning SMB3: fix readpage for large swap cache powerpc/pseries: Fix regression while building external modules Revert "perf map: Fix dso->nsinfo refcounting" i40e: Add additional info to PHY type error can: j1939: j1939_session_deactivate(): clarify lifetime of session object Linux 5.10.56 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Ib3c9244afb7ee5d6ee8d3235efe8956898f486c4
This commit is contained in:
commit
8b444656fa
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 55
|
||||
SUBLEVEL = 56
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
|
@ -325,18 +325,19 @@ setup_memory(void *kernel_end)
|
||||
i, cluster->usage, cluster->start_pfn,
|
||||
cluster->start_pfn + cluster->numpages);
|
||||
|
||||
/* Bit 0 is console/PALcode reserved. Bit 1 is
|
||||
non-volatile memory -- we might want to mark
|
||||
this for later. */
|
||||
if (cluster->usage & 3)
|
||||
continue;
|
||||
|
||||
end = cluster->start_pfn + cluster->numpages;
|
||||
if (end > max_low_pfn)
|
||||
max_low_pfn = end;
|
||||
|
||||
memblock_add(PFN_PHYS(cluster->start_pfn),
|
||||
cluster->numpages << PAGE_SHIFT);
|
||||
|
||||
/* Bit 0 is console/PALcode reserved. Bit 1 is
|
||||
non-volatile memory -- we might want to mark
|
||||
this for later. */
|
||||
if (cluster->usage & 3)
|
||||
memblock_reserve(PFN_PHYS(cluster->start_pfn),
|
||||
cluster->numpages << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1602,6 +1602,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
|
||||
rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
|
||||
emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code));
|
||||
break;
|
||||
/* speculation barrier */
|
||||
case BPF_ST | BPF_NOSPEC:
|
||||
break;
|
||||
/* ST: *(size *)(dst + off) = imm */
|
||||
case BPF_ST | BPF_MEM | BPF_W:
|
||||
case BPF_ST | BPF_MEM | BPF_H:
|
||||
|
@ -830,6 +830,19 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
||||
return ret;
|
||||
break;
|
||||
|
||||
/* speculation barrier */
|
||||
case BPF_ST | BPF_NOSPEC:
|
||||
/*
|
||||
* Nothing required here.
|
||||
*
|
||||
* In case of arm64, we rely on the firmware mitigation of
|
||||
* Speculative Store Bypass as controlled via the ssbd kernel
|
||||
* parameter. Whenever the mitigation is enabled, it works
|
||||
* for all of the kernel code with no need to provide any
|
||||
* additional instructions.
|
||||
*/
|
||||
break;
|
||||
|
||||
/* ST: *(size *)(dst + off) = imm */
|
||||
case BPF_ST | BPF_MEM | BPF_W:
|
||||
case BPF_ST | BPF_MEM | BPF_H:
|
||||
|
@ -1355,6 +1355,9 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
||||
}
|
||||
break;
|
||||
|
||||
case BPF_ST | BPF_NOSPEC: /* speculation barrier */
|
||||
break;
|
||||
|
||||
case BPF_ST | BPF_B | BPF_MEM:
|
||||
case BPF_ST | BPF_H | BPF_MEM:
|
||||
case BPF_ST | BPF_W | BPF_MEM:
|
||||
|
@ -646,6 +646,12 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
|
||||
}
|
||||
break;
|
||||
|
||||
/*
|
||||
* BPF_ST NOSPEC (speculation barrier)
|
||||
*/
|
||||
case BPF_ST | BPF_NOSPEC:
|
||||
break;
|
||||
|
||||
/*
|
||||
* BPF_ST(X)
|
||||
*/
|
||||
|
@ -76,7 +76,7 @@
|
||||
#include "../../../../drivers/pci/pci.h"
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(shared_processor);
|
||||
EXPORT_SYMBOL_GPL(shared_processor);
|
||||
EXPORT_SYMBOL(shared_processor);
|
||||
|
||||
int CMO_PrPSP = -1;
|
||||
int CMO_SecPSP = -1;
|
||||
|
@ -1251,6 +1251,10 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
|
||||
return -1;
|
||||
break;
|
||||
|
||||
/* speculation barrier */
|
||||
case BPF_ST | BPF_NOSPEC:
|
||||
break;
|
||||
|
||||
case BPF_ST | BPF_MEM | BPF_B:
|
||||
case BPF_ST | BPF_MEM | BPF_H:
|
||||
case BPF_ST | BPF_MEM | BPF_W:
|
||||
|
@ -939,6 +939,10 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
|
||||
emit_ld(rd, 0, RV_REG_T1, ctx);
|
||||
break;
|
||||
|
||||
/* speculation barrier */
|
||||
case BPF_ST | BPF_NOSPEC:
|
||||
break;
|
||||
|
||||
/* ST: *(size *)(dst + off) = imm */
|
||||
case BPF_ST | BPF_MEM | BPF_B:
|
||||
emit_imm(RV_REG_T1, imm, ctx);
|
||||
|
@ -1153,6 +1153,11 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
||||
break;
|
||||
}
|
||||
break;
|
||||
/*
|
||||
* BPF_NOSPEC (speculation barrier)
|
||||
*/
|
||||
case BPF_ST | BPF_NOSPEC:
|
||||
break;
|
||||
/*
|
||||
* BPF_ST(X)
|
||||
*/
|
||||
|
@ -1287,6 +1287,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
|
||||
return 1;
|
||||
break;
|
||||
}
|
||||
/* speculation barrier */
|
||||
case BPF_ST | BPF_NOSPEC:
|
||||
break;
|
||||
/* ST: *(size *)(dst + off) = imm */
|
||||
case BPF_ST | BPF_MEM | BPF_W:
|
||||
case BPF_ST | BPF_MEM | BPF_H:
|
||||
|
@ -4,6 +4,8 @@
|
||||
|
||||
#include <asm/ldt.h>
|
||||
|
||||
struct task_struct;
|
||||
|
||||
/* misc architecture specific prototypes */
|
||||
|
||||
void syscall_init(void);
|
||||
|
@ -96,7 +96,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
|
||||
static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
|
||||
{
|
||||
ioapic->rtc_status.pending_eoi = 0;
|
||||
bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID);
|
||||
bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID + 1);
|
||||
}
|
||||
|
||||
static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
|
||||
|
@ -43,13 +43,13 @@ struct kvm_vcpu;
|
||||
|
||||
struct dest_map {
|
||||
/* vcpu bitmap where IRQ has been sent */
|
||||
DECLARE_BITMAP(map, KVM_MAX_VCPU_ID);
|
||||
DECLARE_BITMAP(map, KVM_MAX_VCPU_ID + 1);
|
||||
|
||||
/*
|
||||
* Vector sent to a given vcpu, only valid when
|
||||
* the vcpu's bit in map is set
|
||||
*/
|
||||
u8 vectors[KVM_MAX_VCPU_ID];
|
||||
u8 vectors[KVM_MAX_VCPU_ID + 1];
|
||||
};
|
||||
|
||||
|
||||
|
@ -3205,7 +3205,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
return 1;
|
||||
break;
|
||||
case MSR_KVM_ASYNC_PF_ACK:
|
||||
if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
|
||||
if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
|
||||
return 1;
|
||||
if (data & 0x1) {
|
||||
vcpu->arch.apf.pageready_pending = false;
|
||||
@ -3534,7 +3534,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
msr_info->data = vcpu->arch.apf.msr_int_val;
|
||||
break;
|
||||
case MSR_KVM_ASYNC_PF_ACK:
|
||||
if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
|
||||
if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
|
||||
return 1;
|
||||
|
||||
msr_info->data = 0;
|
||||
|
@ -1138,6 +1138,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||
}
|
||||
break;
|
||||
|
||||
/* speculation barrier */
|
||||
case BPF_ST | BPF_NOSPEC:
|
||||
if (boot_cpu_has(X86_FEATURE_XMM2))
|
||||
/* Emit 'lfence' */
|
||||
EMIT3(0x0F, 0xAE, 0xE8);
|
||||
break;
|
||||
|
||||
/* ST: *(u8*)(dst_reg + off) = imm */
|
||||
case BPF_ST | BPF_MEM | BPF_B:
|
||||
if (is_ereg(dst_reg))
|
||||
|
@ -1705,6 +1705,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||
i++;
|
||||
break;
|
||||
}
|
||||
/* speculation barrier */
|
||||
case BPF_ST | BPF_NOSPEC:
|
||||
if (boot_cpu_has(X86_FEATURE_XMM2))
|
||||
/* Emit 'lfence' */
|
||||
EMIT3(0x0F, 0xAE, 0xE8);
|
||||
break;
|
||||
/* ST: *(u8*)(dst_reg + off) = imm */
|
||||
case BPF_ST | BPF_MEM | BPF_H:
|
||||
case BPF_ST | BPF_MEM | BPF_B:
|
||||
|
@ -1394,16 +1394,17 @@ static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
|
||||
return -1;
|
||||
|
||||
iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
|
||||
wait->committed = true;
|
||||
|
||||
/*
|
||||
* autoremove_wake_function() removes the wait entry only when it
|
||||
* actually changed the task state. We want the wait always
|
||||
* removed. Remove explicitly and use default_wake_function().
|
||||
* actually changed the task state. We want the wait always removed.
|
||||
* Remove explicitly and use default_wake_function(). Note that the
|
||||
* order of operations is important as finish_wait() tests whether
|
||||
* @wq_entry is removed without grabbing the lock.
|
||||
*/
|
||||
list_del_init(&wq_entry->entry);
|
||||
wait->committed = true;
|
||||
|
||||
default_wake_function(wq_entry, mode, flags, key);
|
||||
list_del_init_careful(&wq_entry->entry);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -9,6 +9,42 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
struct pch_fivr_resp {
|
||||
u64 status;
|
||||
u64 result;
|
||||
};
|
||||
|
||||
static int pch_fivr_read(acpi_handle handle, char *method, struct pch_fivr_resp *fivr_resp)
|
||||
{
|
||||
struct acpi_buffer resp = { sizeof(struct pch_fivr_resp), fivr_resp};
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
struct acpi_buffer format = { sizeof("NN"), "NN" };
|
||||
union acpi_object *obj;
|
||||
acpi_status status;
|
||||
int ret = -EFAULT;
|
||||
|
||||
status = acpi_evaluate_object(handle, method, NULL, &buffer);
|
||||
if (ACPI_FAILURE(status))
|
||||
return ret;
|
||||
|
||||
obj = buffer.pointer;
|
||||
if (!obj || obj->type != ACPI_TYPE_PACKAGE)
|
||||
goto release_buffer;
|
||||
|
||||
status = acpi_extract_package(obj, &format, &resp);
|
||||
if (ACPI_FAILURE(status))
|
||||
goto release_buffer;
|
||||
|
||||
if (fivr_resp->status)
|
||||
goto release_buffer;
|
||||
|
||||
ret = 0;
|
||||
|
||||
release_buffer:
|
||||
kfree(buffer.pointer);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Presentation of attributes which are defined for INT1045
|
||||
* They are:
|
||||
@ -23,15 +59,14 @@ static ssize_t name##_show(struct device *dev,\
|
||||
char *buf)\
|
||||
{\
|
||||
struct acpi_device *acpi_dev = dev_get_drvdata(dev);\
|
||||
unsigned long long val;\
|
||||
acpi_status status;\
|
||||
struct pch_fivr_resp fivr_resp;\
|
||||
int status;\
|
||||
\
|
||||
status = acpi_evaluate_integer(acpi_dev->handle, #method,\
|
||||
NULL, &val);\
|
||||
if (ACPI_SUCCESS(status))\
|
||||
return sprintf(buf, "%d\n", (int)val);\
|
||||
else\
|
||||
return -EINVAL;\
|
||||
status = pch_fivr_read(acpi_dev->handle, #method, &fivr_resp);\
|
||||
if (status)\
|
||||
return status;\
|
||||
\
|
||||
return sprintf(buf, "%llu\n", fivr_resp.result);\
|
||||
}
|
||||
|
||||
#define PCH_FIVR_STORE(name, method) \
|
||||
|
@ -430,13 +430,6 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
|
||||
}
|
||||
}
|
||||
|
||||
static bool irq_is_legacy(struct acpi_resource_irq *irq)
|
||||
{
|
||||
return irq->triggering == ACPI_EDGE_SENSITIVE &&
|
||||
irq->polarity == ACPI_ACTIVE_HIGH &&
|
||||
irq->shareable == ACPI_EXCLUSIVE;
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_dev_resource_interrupt - Extract ACPI interrupt resource information.
|
||||
* @ares: Input ACPI resource object.
|
||||
@ -475,7 +468,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
|
||||
}
|
||||
acpi_dev_get_irqresource(res, irq->interrupts[index],
|
||||
irq->triggering, irq->polarity,
|
||||
irq->shareable, irq_is_legacy(irq));
|
||||
irq->shareable, true);
|
||||
break;
|
||||
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
|
||||
ext_irq = &ares->data.extended_irq;
|
||||
|
@ -3322,13 +3322,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
r = amdgpu_device_get_job_timeout_settings(adev);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
|
||||
goto failed_unmap;
|
||||
return r;
|
||||
}
|
||||
|
||||
/* early init functions */
|
||||
r = amdgpu_device_ip_early_init(adev);
|
||||
if (r)
|
||||
goto failed_unmap;
|
||||
return r;
|
||||
|
||||
/* doorbell bar mapping and doorbell index init*/
|
||||
amdgpu_device_doorbell_init(adev);
|
||||
@ -3532,10 +3532,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
if (boco)
|
||||
vga_switcheroo_fini_domain_pm_ops(adev->dev);
|
||||
|
||||
failed_unmap:
|
||||
iounmap(adev->rmmio);
|
||||
adev->rmmio = NULL;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -67,7 +67,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
|
||||
|
||||
err = psp_init_asd_microcode(psp, chip_name);
|
||||
if (err)
|
||||
goto out;
|
||||
return err;
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
|
||||
err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
|
||||
@ -80,7 +80,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
|
||||
} else {
|
||||
err = amdgpu_ucode_validate(adev->psp.ta_fw);
|
||||
if (err)
|
||||
goto out2;
|
||||
goto out;
|
||||
|
||||
ta_hdr = (const struct ta_firmware_header_v1_0 *)
|
||||
adev->psp.ta_fw->data;
|
||||
@ -105,10 +105,9 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
|
||||
|
||||
return 0;
|
||||
|
||||
out2:
|
||||
out:
|
||||
release_firmware(adev->psp.ta_fw);
|
||||
adev->psp.ta_fw = NULL;
|
||||
out:
|
||||
if (err) {
|
||||
dev_err(adev->dev,
|
||||
"psp v12.0: Failed to load firmware \"%s\"\n",
|
||||
|
@ -135,7 +135,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr)
|
||||
|
||||
REG_UPDATE(DENTIST_DISPCLK_CNTL,
|
||||
DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider);
|
||||
// REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 5, 100);
|
||||
REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 1000);
|
||||
REG_UPDATE(DENTIST_DISPCLK_CNTL,
|
||||
DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider);
|
||||
REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100);
|
||||
|
@ -168,7 +168,7 @@ static const struct dpu_mdp_cfg sc7180_mdp[] = {
|
||||
static const struct dpu_mdp_cfg sm8250_mdp[] = {
|
||||
{
|
||||
.name = "top_0", .id = MDP_TOP,
|
||||
.base = 0x0, .len = 0x45C,
|
||||
.base = 0x0, .len = 0x494,
|
||||
.features = 0,
|
||||
.highest_bank_bit = 0x3, /* TODO: 2 for LP_DDR4 */
|
||||
.clk_ctrls[DPU_CLK_CTRL_VIG0] = {
|
||||
|
@ -740,6 +740,7 @@ int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog)
|
||||
dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY,
|
||||
dp_catalog->width_blanking);
|
||||
dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, dp_catalog->dp_active);
|
||||
dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3829,7 +3829,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
|
||||
wacom_wac->shared->touch->product == 0xF6) {
|
||||
input_dev->evbit[0] |= BIT_MASK(EV_SW);
|
||||
__set_bit(SW_MUTE_DEVICE, input_dev->swbit);
|
||||
wacom_wac->shared->has_mute_touch_switch = true;
|
||||
wacom_wac->has_mute_touch_switch = true;
|
||||
}
|
||||
fallthrough;
|
||||
|
||||
|
@ -119,6 +119,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
|
||||
if (!chip_ctx)
|
||||
return -ENOMEM;
|
||||
chip_ctx->chip_num = bp->chip_num;
|
||||
chip_ctx->hw_stats_size = bp->hw_ring_stats_size;
|
||||
|
||||
rdev->chip_ctx = chip_ctx;
|
||||
/* rest members to follow eventually */
|
||||
@ -507,6 +508,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
|
||||
dma_addr_t dma_map,
|
||||
u32 *fw_stats_ctx_id)
|
||||
{
|
||||
struct bnxt_qplib_chip_ctx *chip_ctx = rdev->chip_ctx;
|
||||
struct hwrm_stat_ctx_alloc_output resp = {0};
|
||||
struct hwrm_stat_ctx_alloc_input req = {0};
|
||||
struct bnxt_en_dev *en_dev = rdev->en_dev;
|
||||
@ -523,7 +525,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
|
||||
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
|
||||
req.update_period_ms = cpu_to_le32(1000);
|
||||
req.stats_dma_addr = cpu_to_le64(dma_map);
|
||||
req.stats_dma_length = cpu_to_le16(sizeof(struct ctx_hw_stats_ext));
|
||||
req.stats_dma_length = cpu_to_le16(chip_ctx->hw_stats_size);
|
||||
req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
|
||||
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
|
||||
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
|
||||
|
@ -56,6 +56,7 @@
|
||||
static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_stats *stats);
|
||||
static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_chip_ctx *cctx,
|
||||
struct bnxt_qplib_stats *stats);
|
||||
|
||||
/* PBL */
|
||||
@ -559,7 +560,7 @@ int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
|
||||
goto fail;
|
||||
stats_alloc:
|
||||
/* Stats */
|
||||
rc = bnxt_qplib_alloc_stats_ctx(res->pdev, &ctx->stats);
|
||||
rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &ctx->stats);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
@ -889,15 +890,12 @@ static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
|
||||
}
|
||||
|
||||
static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
|
||||
struct bnxt_qplib_chip_ctx *cctx,
|
||||
struct bnxt_qplib_stats *stats)
|
||||
{
|
||||
memset(stats, 0, sizeof(*stats));
|
||||
stats->fw_id = -1;
|
||||
/* 128 byte aligned context memory is required only for 57500.
|
||||
* However making this unconditional, it does not harm previous
|
||||
* generation.
|
||||
*/
|
||||
stats->size = ALIGN(sizeof(struct ctx_hw_stats), 128);
|
||||
stats->size = cctx->hw_stats_size;
|
||||
stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
|
||||
&stats->dma_map, GFP_KERNEL);
|
||||
if (!stats->dma) {
|
||||
|
@ -60,6 +60,7 @@ struct bnxt_qplib_chip_ctx {
|
||||
u16 chip_num;
|
||||
u8 chip_rev;
|
||||
u8 chip_metal;
|
||||
u16 hw_stats_size;
|
||||
struct bnxt_qplib_drv_modes modes;
|
||||
};
|
||||
|
||||
|
@ -218,7 +218,7 @@ static int hi3110_spi_trans(struct spi_device *spi, int len)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u8 hi3110_cmd(struct spi_device *spi, u8 command)
|
||||
static int hi3110_cmd(struct spi_device *spi, u8 command)
|
||||
{
|
||||
struct hi3110_priv *priv = spi_get_drvdata(spi);
|
||||
|
||||
|
@ -255,6 +255,8 @@ struct ems_usb {
|
||||
unsigned int free_slots; /* remember number of available slots */
|
||||
|
||||
struct ems_cpc_msg active_params; /* active controller parameters */
|
||||
void *rxbuf[MAX_RX_URBS];
|
||||
dma_addr_t rxbuf_dma[MAX_RX_URBS];
|
||||
};
|
||||
|
||||
static void ems_usb_read_interrupt_callback(struct urb *urb)
|
||||
@ -587,6 +589,7 @@ static int ems_usb_start(struct ems_usb *dev)
|
||||
for (i = 0; i < MAX_RX_URBS; i++) {
|
||||
struct urb *urb = NULL;
|
||||
u8 *buf = NULL;
|
||||
dma_addr_t buf_dma;
|
||||
|
||||
/* create a URB, and a buffer for it */
|
||||
urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
@ -596,7 +599,7 @@ static int ems_usb_start(struct ems_usb *dev)
|
||||
}
|
||||
|
||||
buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
|
||||
&urb->transfer_dma);
|
||||
&buf_dma);
|
||||
if (!buf) {
|
||||
netdev_err(netdev, "No memory left for USB buffer\n");
|
||||
usb_free_urb(urb);
|
||||
@ -604,6 +607,8 @@ static int ems_usb_start(struct ems_usb *dev)
|
||||
break;
|
||||
}
|
||||
|
||||
urb->transfer_dma = buf_dma;
|
||||
|
||||
usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 2),
|
||||
buf, RX_BUFFER_SIZE,
|
||||
ems_usb_read_bulk_callback, dev);
|
||||
@ -619,6 +624,9 @@ static int ems_usb_start(struct ems_usb *dev)
|
||||
break;
|
||||
}
|
||||
|
||||
dev->rxbuf[i] = buf;
|
||||
dev->rxbuf_dma[i] = buf_dma;
|
||||
|
||||
/* Drop reference, USB core will take care of freeing it */
|
||||
usb_free_urb(urb);
|
||||
}
|
||||
@ -684,6 +692,10 @@ static void unlink_all_urbs(struct ems_usb *dev)
|
||||
|
||||
usb_kill_anchored_urbs(&dev->rx_submitted);
|
||||
|
||||
for (i = 0; i < MAX_RX_URBS; ++i)
|
||||
usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
|
||||
dev->rxbuf[i], dev->rxbuf_dma[i]);
|
||||
|
||||
usb_kill_anchored_urbs(&dev->tx_submitted);
|
||||
atomic_set(&dev->active_tx_urbs, 0);
|
||||
|
||||
|
@ -195,6 +195,8 @@ struct esd_usb2 {
|
||||
int net_count;
|
||||
u32 version;
|
||||
int rxinitdone;
|
||||
void *rxbuf[MAX_RX_URBS];
|
||||
dma_addr_t rxbuf_dma[MAX_RX_URBS];
|
||||
};
|
||||
|
||||
struct esd_usb2_net_priv {
|
||||
@ -544,6 +546,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
|
||||
for (i = 0; i < MAX_RX_URBS; i++) {
|
||||
struct urb *urb = NULL;
|
||||
u8 *buf = NULL;
|
||||
dma_addr_t buf_dma;
|
||||
|
||||
/* create a URB, and a buffer for it */
|
||||
urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
@ -553,7 +556,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
|
||||
}
|
||||
|
||||
buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
|
||||
&urb->transfer_dma);
|
||||
&buf_dma);
|
||||
if (!buf) {
|
||||
dev_warn(dev->udev->dev.parent,
|
||||
"No memory left for USB buffer\n");
|
||||
@ -561,6 +564,8 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
|
||||
goto freeurb;
|
||||
}
|
||||
|
||||
urb->transfer_dma = buf_dma;
|
||||
|
||||
usb_fill_bulk_urb(urb, dev->udev,
|
||||
usb_rcvbulkpipe(dev->udev, 1),
|
||||
buf, RX_BUFFER_SIZE,
|
||||
@ -573,8 +578,12 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
|
||||
usb_unanchor_urb(urb);
|
||||
usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
|
||||
urb->transfer_dma);
|
||||
goto freeurb;
|
||||
}
|
||||
|
||||
dev->rxbuf[i] = buf;
|
||||
dev->rxbuf_dma[i] = buf_dma;
|
||||
|
||||
freeurb:
|
||||
/* Drop reference, USB core will take care of freeing it */
|
||||
usb_free_urb(urb);
|
||||
@ -662,6 +671,11 @@ static void unlink_all_urbs(struct esd_usb2 *dev)
|
||||
int i, j;
|
||||
|
||||
usb_kill_anchored_urbs(&dev->rx_submitted);
|
||||
|
||||
for (i = 0; i < MAX_RX_URBS; ++i)
|
||||
usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
|
||||
dev->rxbuf[i], dev->rxbuf_dma[i]);
|
||||
|
||||
for (i = 0; i < dev->net_count; i++) {
|
||||
priv = dev->nets[i];
|
||||
if (priv) {
|
||||
|
@ -653,6 +653,8 @@ static int mcba_usb_start(struct mcba_priv *priv)
|
||||
break;
|
||||
}
|
||||
|
||||
urb->transfer_dma = buf_dma;
|
||||
|
||||
usb_fill_bulk_urb(urb, priv->udev,
|
||||
usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_IN),
|
||||
buf, MCBA_USB_RX_BUFF_SIZE,
|
||||
|
@ -117,7 +117,8 @@ MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB adapter");
|
||||
#define PCAN_USB_BERR_MASK (PCAN_USB_ERR_RXERR | PCAN_USB_ERR_TXERR)
|
||||
|
||||
/* identify bus event packets with rx/tx error counters */
|
||||
#define PCAN_USB_ERR_CNT 0x80
|
||||
#define PCAN_USB_ERR_CNT_DEC 0x00 /* counters are decreasing */
|
||||
#define PCAN_USB_ERR_CNT_INC 0x80 /* counters are increasing */
|
||||
|
||||
/* private to PCAN-USB adapter */
|
||||
struct pcan_usb {
|
||||
@ -611,11 +612,12 @@ static int pcan_usb_handle_bus_evt(struct pcan_usb_msg_context *mc, u8 ir)
|
||||
|
||||
/* acccording to the content of the packet */
|
||||
switch (ir) {
|
||||
case PCAN_USB_ERR_CNT:
|
||||
case PCAN_USB_ERR_CNT_DEC:
|
||||
case PCAN_USB_ERR_CNT_INC:
|
||||
|
||||
/* save rx/tx error counters from in the device context */
|
||||
pdev->bec.rxerr = mc->ptr[0];
|
||||
pdev->bec.txerr = mc->ptr[1];
|
||||
pdev->bec.rxerr = mc->ptr[1];
|
||||
pdev->bec.txerr = mc->ptr[2];
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -137,7 +137,8 @@ struct usb_8dev_priv {
|
||||
u8 *cmd_msg_buffer;
|
||||
|
||||
struct mutex usb_8dev_cmd_lock;
|
||||
|
||||
void *rxbuf[MAX_RX_URBS];
|
||||
dma_addr_t rxbuf_dma[MAX_RX_URBS];
|
||||
};
|
||||
|
||||
/* tx frame */
|
||||
@ -733,6 +734,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
|
||||
for (i = 0; i < MAX_RX_URBS; i++) {
|
||||
struct urb *urb = NULL;
|
||||
u8 *buf;
|
||||
dma_addr_t buf_dma;
|
||||
|
||||
/* create a URB, and a buffer for it */
|
||||
urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
@ -742,7 +744,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
|
||||
}
|
||||
|
||||
buf = usb_alloc_coherent(priv->udev, RX_BUFFER_SIZE, GFP_KERNEL,
|
||||
&urb->transfer_dma);
|
||||
&buf_dma);
|
||||
if (!buf) {
|
||||
netdev_err(netdev, "No memory left for USB buffer\n");
|
||||
usb_free_urb(urb);
|
||||
@ -750,6 +752,8 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
|
||||
break;
|
||||
}
|
||||
|
||||
urb->transfer_dma = buf_dma;
|
||||
|
||||
usb_fill_bulk_urb(urb, priv->udev,
|
||||
usb_rcvbulkpipe(priv->udev,
|
||||
USB_8DEV_ENDP_DATA_RX),
|
||||
@ -767,6 +771,9 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
|
||||
break;
|
||||
}
|
||||
|
||||
priv->rxbuf[i] = buf;
|
||||
priv->rxbuf_dma[i] = buf_dma;
|
||||
|
||||
/* Drop reference, USB core will take care of freeing it */
|
||||
usb_free_urb(urb);
|
||||
}
|
||||
@ -836,6 +843,10 @@ static void unlink_all_urbs(struct usb_8dev_priv *priv)
|
||||
|
||||
usb_kill_anchored_urbs(&priv->rx_submitted);
|
||||
|
||||
for (i = 0; i < MAX_RX_URBS; ++i)
|
||||
usb_free_coherent(priv->udev, RX_BUFFER_SIZE,
|
||||
priv->rxbuf[i], priv->rxbuf_dma[i]);
|
||||
|
||||
usb_kill_anchored_urbs(&priv->tx_submitted);
|
||||
atomic_set(&priv->active_tx_urbs, 0);
|
||||
|
||||
|
@ -357,7 +357,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
|
||||
void __iomem *ioaddr;
|
||||
|
||||
i = pci_enable_device(pdev);
|
||||
i = pcim_enable_device(pdev);
|
||||
if (i) return i;
|
||||
|
||||
pci_set_master(pdev);
|
||||
@ -379,7 +379,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
|
||||
if (!ioaddr)
|
||||
goto err_out_free_res;
|
||||
goto err_out_netdev;
|
||||
|
||||
for (i = 0; i < 3; i++)
|
||||
((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i));
|
||||
@ -458,8 +458,6 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
err_out_cleardev:
|
||||
pci_iounmap(pdev, ioaddr);
|
||||
err_out_free_res:
|
||||
pci_release_regions(pdev);
|
||||
err_out_netdev:
|
||||
free_netdev (dev);
|
||||
return -ENODEV;
|
||||
@ -1526,7 +1524,6 @@ static void w840_remove1(struct pci_dev *pdev)
|
||||
if (dev) {
|
||||
struct netdev_private *np = netdev_priv(dev);
|
||||
unregister_netdev(dev);
|
||||
pci_release_regions(pdev);
|
||||
pci_iounmap(pdev, np->base_addr);
|
||||
free_netdev(dev);
|
||||
}
|
||||
|
@ -980,7 +980,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
|
||||
default:
|
||||
/* if we got here and link is up something bad is afoot */
|
||||
netdev_info(netdev,
|
||||
"WARNING: Link is up but PHY type 0x%x is not recognized.\n",
|
||||
"WARNING: Link is up but PHY type 0x%x is not recognized, or incorrect cable is in use\n",
|
||||
hw_link_info->phy_type);
|
||||
}
|
||||
|
||||
@ -5106,6 +5106,10 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
|
||||
dev_warn(&pf->pdev->dev,
|
||||
"Device configuration forbids SW from starting the LLDP agent.\n");
|
||||
return -EINVAL;
|
||||
case I40E_AQ_RC_EAGAIN:
|
||||
dev_warn(&pf->pdev->dev,
|
||||
"Stop FW LLDP agent command is still being processed, please try again in a second.\n");
|
||||
return -EBUSY;
|
||||
default:
|
||||
dev_warn(&pf->pdev->dev,
|
||||
"Starting FW LLDP agent failed: error: %s, %s\n",
|
||||
|
@ -4425,11 +4425,10 @@ int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_vsi_control_tx - Start or stop a VSI's rings
|
||||
* i40e_vsi_enable_tx - Start a VSI's rings
|
||||
* @vsi: the VSI being configured
|
||||
* @enable: start or stop the rings
|
||||
**/
|
||||
static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
|
||||
static int i40e_vsi_enable_tx(struct i40e_vsi *vsi)
|
||||
{
|
||||
struct i40e_pf *pf = vsi->back;
|
||||
int i, pf_q, ret = 0;
|
||||
@ -4438,7 +4437,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
|
||||
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
|
||||
ret = i40e_control_wait_tx_q(vsi->seid, pf,
|
||||
pf_q,
|
||||
false /*is xdp*/, enable);
|
||||
false /*is xdp*/, true);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
@ -4447,7 +4446,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
|
||||
|
||||
ret = i40e_control_wait_tx_q(vsi->seid, pf,
|
||||
pf_q + vsi->alloc_queue_pairs,
|
||||
true /*is xdp*/, enable);
|
||||
true /*is xdp*/, true);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
@ -4545,32 +4544,25 @@ int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_vsi_control_rx - Start or stop a VSI's rings
|
||||
* i40e_vsi_enable_rx - Start a VSI's rings
|
||||
* @vsi: the VSI being configured
|
||||
* @enable: start or stop the rings
|
||||
**/
|
||||
static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
|
||||
static int i40e_vsi_enable_rx(struct i40e_vsi *vsi)
|
||||
{
|
||||
struct i40e_pf *pf = vsi->back;
|
||||
int i, pf_q, ret = 0;
|
||||
|
||||
pf_q = vsi->base_queue;
|
||||
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
|
||||
ret = i40e_control_wait_rx_q(pf, pf_q, enable);
|
||||
ret = i40e_control_wait_rx_q(pf, pf_q, true);
|
||||
if (ret) {
|
||||
dev_info(&pf->pdev->dev,
|
||||
"VSI seid %d Rx ring %d %sable timeout\n",
|
||||
vsi->seid, pf_q, (enable ? "en" : "dis"));
|
||||
"VSI seid %d Rx ring %d enable timeout\n",
|
||||
vsi->seid, pf_q);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Due to HW errata, on Rx disable only, the register can indicate done
|
||||
* before it really is. Needs 50ms to be sure
|
||||
*/
|
||||
if (!enable)
|
||||
mdelay(50);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -4583,29 +4575,47 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi)
|
||||
int ret = 0;
|
||||
|
||||
/* do rx first for enable and last for disable */
|
||||
ret = i40e_vsi_control_rx(vsi, true);
|
||||
ret = i40e_vsi_enable_rx(vsi);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = i40e_vsi_control_tx(vsi, true);
|
||||
ret = i40e_vsi_enable_tx(vsi);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define I40E_DISABLE_TX_GAP_MSEC 50
|
||||
|
||||
/**
|
||||
* i40e_vsi_stop_rings - Stop a VSI's rings
|
||||
* @vsi: the VSI being configured
|
||||
**/
|
||||
void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
|
||||
{
|
||||
struct i40e_pf *pf = vsi->back;
|
||||
int pf_q, err, q_end;
|
||||
|
||||
/* When port TX is suspended, don't wait */
|
||||
if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
|
||||
return i40e_vsi_stop_rings_no_wait(vsi);
|
||||
|
||||
/* do rx first for enable and last for disable
|
||||
* Ignore return value, we need to shutdown whatever we can
|
||||
*/
|
||||
i40e_vsi_control_tx(vsi, false);
|
||||
i40e_vsi_control_rx(vsi, false);
|
||||
q_end = vsi->base_queue + vsi->num_queue_pairs;
|
||||
for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
|
||||
i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false);
|
||||
|
||||
for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) {
|
||||
err = i40e_control_wait_rx_q(pf, pf_q, false);
|
||||
if (err)
|
||||
dev_info(&pf->pdev->dev,
|
||||
"VSI seid %d Rx ring %d dissable timeout\n",
|
||||
vsi->seid, pf_q);
|
||||
}
|
||||
|
||||
msleep(I40E_DISABLE_TX_GAP_MSEC);
|
||||
pf_q = vsi->base_queue;
|
||||
for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
|
||||
wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
|
||||
|
||||
i40e_vsi_wait_queues_disabled(vsi);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -6923,6 +6933,8 @@ static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
|
||||
}
|
||||
if (vsi->num_queue_pairs <
|
||||
(mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
|
||||
dev_err(&vsi->back->pdev->dev,
|
||||
"Failed to create traffic channel, insufficient number of queues.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (sum_max_rate > i40e_get_link_speed(vsi)) {
|
||||
@ -12799,6 +12811,7 @@ static const struct net_device_ops i40e_netdev_ops = {
|
||||
.ndo_poll_controller = i40e_netpoll,
|
||||
#endif
|
||||
.ndo_setup_tc = __i40e_setup_tc,
|
||||
.ndo_select_queue = i40e_lan_select_queue,
|
||||
.ndo_set_features = i40e_set_features,
|
||||
.ndo_set_vf_mac = i40e_ndo_set_vf_mac,
|
||||
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
|
||||
|
@ -3524,6 +3524,56 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
||||
return -1;
|
||||
}
|
||||
|
||||
static u16 i40e_swdcb_skb_tx_hash(struct net_device *dev,
|
||||
const struct sk_buff *skb,
|
||||
u16 num_tx_queues)
|
||||
{
|
||||
u32 jhash_initval_salt = 0xd631614b;
|
||||
u32 hash;
|
||||
|
||||
if (skb->sk && skb->sk->sk_hash)
|
||||
hash = skb->sk->sk_hash;
|
||||
else
|
||||
hash = (__force u16)skb->protocol ^ skb->hash;
|
||||
|
||||
hash = jhash_1word(hash, jhash_initval_salt);
|
||||
|
||||
return (u16)(((u64)hash * num_tx_queues) >> 32);
|
||||
}
|
||||
|
||||
u16 i40e_lan_select_queue(struct net_device *netdev,
|
||||
struct sk_buff *skb,
|
||||
struct net_device __always_unused *sb_dev)
|
||||
{
|
||||
struct i40e_netdev_priv *np = netdev_priv(netdev);
|
||||
struct i40e_vsi *vsi = np->vsi;
|
||||
struct i40e_hw *hw;
|
||||
u16 qoffset;
|
||||
u16 qcount;
|
||||
u8 tclass;
|
||||
u16 hash;
|
||||
u8 prio;
|
||||
|
||||
/* is DCB enabled at all? */
|
||||
if (vsi->tc_config.numtc == 1)
|
||||
return i40e_swdcb_skb_tx_hash(netdev, skb,
|
||||
netdev->real_num_tx_queues);
|
||||
|
||||
prio = skb->priority;
|
||||
hw = &vsi->back->hw;
|
||||
tclass = hw->local_dcbx_config.etscfg.prioritytable[prio];
|
||||
/* sanity check */
|
||||
if (unlikely(!(vsi->tc_config.enabled_tc & BIT(tclass))))
|
||||
tclass = 0;
|
||||
|
||||
/* select a queue assigned for the given TC */
|
||||
qcount = vsi->tc_config.tc_info[tclass].qcount;
|
||||
hash = i40e_swdcb_skb_tx_hash(netdev, skb, qcount);
|
||||
|
||||
qoffset = vsi->tc_config.tc_info[tclass].qoffset;
|
||||
return qoffset + hash;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
|
||||
* @xdpf: data to transmit
|
||||
|
@ -449,6 +449,8 @@ static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
|
||||
|
||||
bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
|
||||
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
|
||||
u16 i40e_lan_select_queue(struct net_device *netdev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev);
|
||||
void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
|
||||
void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
|
||||
int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
|
||||
|
@ -230,15 +230,14 @@ static int otx2_set_channels(struct net_device *dev,
|
||||
err = otx2_set_real_num_queues(dev, channel->tx_count,
|
||||
channel->rx_count);
|
||||
if (err)
|
||||
goto fail;
|
||||
return err;
|
||||
|
||||
pfvf->hw.rx_queues = channel->rx_count;
|
||||
pfvf->hw.tx_queues = channel->tx_count;
|
||||
pfvf->qset.cq_cnt = pfvf->hw.tx_queues + pfvf->hw.rx_queues;
|
||||
|
||||
fail:
|
||||
if (if_up)
|
||||
dev->netdev_ops->ndo_open(dev);
|
||||
err = dev->netdev_ops->ndo_open(dev);
|
||||
|
||||
netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
|
||||
pfvf->hw.tx_queues, pfvf->hw.rx_queues);
|
||||
@ -342,7 +341,7 @@ static int otx2_set_ringparam(struct net_device *netdev,
|
||||
qs->rqe_cnt = rx_count;
|
||||
|
||||
if (if_up)
|
||||
netdev->netdev_ops->ndo_open(netdev);
|
||||
return netdev->netdev_ops->ndo_open(netdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1592,6 +1592,7 @@ int otx2_open(struct net_device *netdev)
|
||||
err_tx_stop_queues:
|
||||
netif_tx_stop_all_queues(netdev);
|
||||
netif_carrier_off(netdev);
|
||||
pf->flags |= OTX2_FLAG_INTF_DOWN;
|
||||
err_free_cints:
|
||||
otx2_free_cints(pf, qidx);
|
||||
vec = pci_irq_vector(pf->pdev,
|
||||
@ -1619,6 +1620,10 @@ int otx2_stop(struct net_device *netdev)
|
||||
struct otx2_rss_info *rss;
|
||||
int qidx, vec, wrk;
|
||||
|
||||
/* If the DOWN flag is set resources are already freed */
|
||||
if (pf->flags & OTX2_FLAG_INTF_DOWN)
|
||||
return 0;
|
||||
|
||||
netif_carrier_off(netdev);
|
||||
netif_tx_stop_all_queues(netdev);
|
||||
|
||||
|
@ -3535,6 +3535,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
|
||||
|
||||
if (!SRIOV_VALID_STATE(dev->flags)) {
|
||||
mlx4_err(dev, "Invalid SRIOV state\n");
|
||||
err = -EINVAL;
|
||||
goto err_close;
|
||||
}
|
||||
}
|
||||
|
@ -481,12 +481,32 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
|
||||
static
|
||||
struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
|
||||
{
|
||||
struct mlx5_core_dev *mdev;
|
||||
struct net_device *netdev;
|
||||
struct mlx5e_priv *priv;
|
||||
|
||||
netdev = __dev_get_by_index(net, ifindex);
|
||||
netdev = dev_get_by_index(net, ifindex);
|
||||
if (!netdev)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
priv = netdev_priv(netdev);
|
||||
return priv->mdev;
|
||||
mdev = priv->mdev;
|
||||
dev_put(netdev);
|
||||
|
||||
/* Mirred tc action holds a refcount on the ifindex net_device (see
|
||||
* net/sched/act_mirred.c:tcf_mirred_get_dev). So, it's okay to continue using mdev
|
||||
* after dev_put(netdev), while we're in the context of adding a tc flow.
|
||||
*
|
||||
* The mdev pointer corresponds to the peer/out net_device of a hairpin. It is then
|
||||
* stored in a hairpin object, which exists until all flows, that refer to it, get
|
||||
* removed.
|
||||
*
|
||||
* On the other hand, after a hairpin object has been created, the peer net_device may
|
||||
* be removed/unbound while there are still some hairpin flows that are using it. This
|
||||
* case is handled by mlx5e_tc_hairpin_update_dead_peer, which is hooked to
|
||||
* NETDEV_UNREGISTER event of the peer net_device.
|
||||
*/
|
||||
return mdev;
|
||||
}
|
||||
|
||||
static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
|
||||
@ -685,6 +705,10 @@ mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params
|
||||
|
||||
func_mdev = priv->mdev;
|
||||
peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
|
||||
if (IS_ERR(peer_mdev)) {
|
||||
err = PTR_ERR(peer_mdev);
|
||||
goto create_pair_err;
|
||||
}
|
||||
|
||||
pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
|
||||
if (IS_ERR(pair)) {
|
||||
@ -823,6 +847,11 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
|
||||
int err;
|
||||
|
||||
peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
|
||||
if (IS_ERR(peer_mdev)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "invalid ifindex of mirred device");
|
||||
return PTR_ERR(peer_mdev);
|
||||
}
|
||||
|
||||
if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -1024,17 +1024,19 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev,
|
||||
static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
|
||||
struct fs_prio *prio)
|
||||
{
|
||||
struct mlx5_flow_table *next_ft;
|
||||
struct mlx5_flow_table *next_ft, *first_ft;
|
||||
int err = 0;
|
||||
|
||||
/* Connect_prev_fts and update_root_ft_create are mutually exclusive */
|
||||
|
||||
if (list_empty(&prio->node.children)) {
|
||||
first_ft = list_first_entry_or_null(&prio->node.children,
|
||||
struct mlx5_flow_table, node.list);
|
||||
if (!first_ft || first_ft->level > ft->level) {
|
||||
err = connect_prev_fts(dev, ft, prio);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
next_ft = find_next_chained_ft(prio);
|
||||
next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
|
||||
err = connect_fwd_rules(dev, ft, next_ft);
|
||||
if (err)
|
||||
return err;
|
||||
@ -2113,7 +2115,7 @@ static int disconnect_flow_table(struct mlx5_flow_table *ft)
|
||||
node.list) == ft))
|
||||
return 0;
|
||||
|
||||
next_ft = find_next_chained_ft(prio);
|
||||
next_ft = find_next_ft(ft);
|
||||
err = connect_fwd_rules(dev, next_ft, ft);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -52,7 +52,19 @@ static void ionic_dim_work(struct work_struct *work)
|
||||
cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
|
||||
qcq = container_of(dim, struct ionic_qcq, dim);
|
||||
new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec);
|
||||
qcq->intr.dim_coal_hw = new_coal ? new_coal : 1;
|
||||
new_coal = new_coal ? new_coal : 1;
|
||||
|
||||
if (qcq->intr.dim_coal_hw != new_coal) {
|
||||
unsigned int qi = qcq->cq.bound_q->index;
|
||||
struct ionic_lif *lif = qcq->q.lif;
|
||||
|
||||
qcq->intr.dim_coal_hw = new_coal;
|
||||
|
||||
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
|
||||
lif->rxqcqs[qi]->intr.index,
|
||||
qcq->intr.dim_coal_hw);
|
||||
}
|
||||
|
||||
dim->state = DIM_START_MEASURE;
|
||||
}
|
||||
|
||||
|
@ -197,12 +197,11 @@ static void ionic_rx_clean(struct ionic_queue *q,
|
||||
}
|
||||
}
|
||||
|
||||
if (likely(netdev->features & NETIF_F_RXCSUM)) {
|
||||
if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
|
||||
skb->ip_summed = CHECKSUM_COMPLETE;
|
||||
skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
|
||||
stats->csum_complete++;
|
||||
}
|
||||
if (likely(netdev->features & NETIF_F_RXCSUM) &&
|
||||
(comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC)) {
|
||||
skb->ip_summed = CHECKSUM_COMPLETE;
|
||||
skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
|
||||
stats->csum_complete++;
|
||||
} else {
|
||||
stats->csum_none++;
|
||||
}
|
||||
@ -417,11 +416,12 @@ void ionic_rx_empty(struct ionic_queue *q)
|
||||
}
|
||||
}
|
||||
|
||||
static void ionic_dim_update(struct ionic_qcq *qcq)
|
||||
static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode)
|
||||
{
|
||||
struct dim_sample dim_sample;
|
||||
struct ionic_lif *lif;
|
||||
unsigned int qi;
|
||||
u64 pkts, bytes;
|
||||
|
||||
if (!qcq->intr.dim_coal_hw)
|
||||
return;
|
||||
@ -429,14 +429,23 @@ static void ionic_dim_update(struct ionic_qcq *qcq)
|
||||
lif = qcq->q.lif;
|
||||
qi = qcq->cq.bound_q->index;
|
||||
|
||||
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
|
||||
lif->rxqcqs[qi]->intr.index,
|
||||
qcq->intr.dim_coal_hw);
|
||||
switch (napi_mode) {
|
||||
case IONIC_LIF_F_TX_DIM_INTR:
|
||||
pkts = lif->txqstats[qi].pkts;
|
||||
bytes = lif->txqstats[qi].bytes;
|
||||
break;
|
||||
case IONIC_LIF_F_RX_DIM_INTR:
|
||||
pkts = lif->rxqstats[qi].pkts;
|
||||
bytes = lif->rxqstats[qi].bytes;
|
||||
break;
|
||||
default:
|
||||
pkts = lif->txqstats[qi].pkts + lif->rxqstats[qi].pkts;
|
||||
bytes = lif->txqstats[qi].bytes + lif->rxqstats[qi].bytes;
|
||||
break;
|
||||
}
|
||||
|
||||
dim_update_sample(qcq->cq.bound_intr->rearm_count,
|
||||
lif->txqstats[qi].pkts,
|
||||
lif->txqstats[qi].bytes,
|
||||
&dim_sample);
|
||||
pkts, bytes, &dim_sample);
|
||||
|
||||
net_dim(&qcq->dim, dim_sample);
|
||||
}
|
||||
@ -457,7 +466,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
|
||||
ionic_tx_service, NULL, NULL);
|
||||
|
||||
if (work_done < budget && napi_complete_done(napi, work_done)) {
|
||||
ionic_dim_update(qcq);
|
||||
ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR);
|
||||
flags |= IONIC_INTR_CRED_UNMASK;
|
||||
cq->bound_intr->rearm_count++;
|
||||
}
|
||||
@ -493,7 +502,7 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
|
||||
ionic_rx_fill(cq->bound_q);
|
||||
|
||||
if (work_done < budget && napi_complete_done(napi, work_done)) {
|
||||
ionic_dim_update(qcq);
|
||||
ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
|
||||
flags |= IONIC_INTR_CRED_UNMASK;
|
||||
cq->bound_intr->rearm_count++;
|
||||
}
|
||||
@ -535,7 +544,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
|
||||
ionic_rx_fill_cb(rxcq->bound_q);
|
||||
|
||||
if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
|
||||
ionic_dim_update(qcq);
|
||||
ionic_dim_update(qcq, 0);
|
||||
flags |= IONIC_INTR_CRED_UNMASK;
|
||||
rxcq->bound_intr->rearm_count++;
|
||||
}
|
||||
|
@ -443,7 +443,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
|
||||
#endif
|
||||
|
||||
/* setup various bits in PCI command register */
|
||||
ret = pci_enable_device(pci_dev);
|
||||
ret = pcim_enable_device(pci_dev);
|
||||
if(ret) return ret;
|
||||
|
||||
i = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
|
||||
@ -469,7 +469,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
|
||||
ioaddr = pci_iomap(pci_dev, 0, 0);
|
||||
if (!ioaddr) {
|
||||
ret = -ENOMEM;
|
||||
goto err_out_cleardev;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
sis_priv = netdev_priv(net_dev);
|
||||
@ -581,8 +581,6 @@ static int sis900_probe(struct pci_dev *pci_dev,
|
||||
sis_priv->tx_ring_dma);
|
||||
err_out_unmap:
|
||||
pci_iounmap(pci_dev, ioaddr);
|
||||
err_out_cleardev:
|
||||
pci_release_regions(pci_dev);
|
||||
err_out:
|
||||
free_netdev(net_dev);
|
||||
return ret;
|
||||
@ -2499,7 +2497,6 @@ static void sis900_remove(struct pci_dev *pci_dev)
|
||||
sis_priv->tx_ring_dma);
|
||||
pci_iounmap(pci_dev, sis_priv->ioaddr);
|
||||
free_netdev(net_dev);
|
||||
pci_release_regions(pci_dev);
|
||||
}
|
||||
|
||||
static int __maybe_unused sis900_suspend(struct device *dev)
|
||||
|
@ -8191,8 +8191,9 @@ static int niu_pci_vpd_fetch(struct niu *np, u32 start)
|
||||
err = niu_pci_vpd_scan_props(np, here, end);
|
||||
if (err < 0)
|
||||
return err;
|
||||
/* ret == 1 is not an error */
|
||||
if (err == 1)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -192,8 +192,7 @@ static void nfcsim_recv_wq(struct work_struct *work)
|
||||
|
||||
if (!IS_ERR(skb))
|
||||
dev_kfree_skb(skb);
|
||||
|
||||
skb = ERR_PTR(-ENODEV);
|
||||
return;
|
||||
}
|
||||
|
||||
dev->cb(dev->nfc_digital_dev, dev->arg, skb);
|
||||
|
@ -340,7 +340,7 @@ static void end_compressed_bio_write(struct bio *bio)
|
||||
cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
|
||||
btrfs_writepage_endio_finish_ordered(cb->compressed_pages[0],
|
||||
cb->start, cb->start + cb->len - 1,
|
||||
bio->bi_status == BLK_STS_OK);
|
||||
!cb->errors);
|
||||
cb->compressed_pages[0]->mapping = NULL;
|
||||
|
||||
end_compressed_writeback(inode, cb);
|
||||
|
@ -1077,6 +1077,7 @@ static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
|
||||
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
|
||||
list_del_init(&device->dev_alloc_list);
|
||||
clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
|
||||
fs_devices->rw_devices--;
|
||||
}
|
||||
list_del_init(&device->dev_list);
|
||||
fs_devices->num_devices--;
|
||||
|
@ -4550,7 +4550,7 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
|
||||
|
||||
static int cifs_readpage(struct file *file, struct page *page)
|
||||
{
|
||||
loff_t offset = (loff_t)page->index << PAGE_SHIFT;
|
||||
loff_t offset = page_file_offset(page);
|
||||
int rc = -EACCES;
|
||||
unsigned int xid;
|
||||
|
||||
|
@ -7997,7 +7997,7 @@ static void io_sq_offload_start(struct io_ring_ctx *ctx)
|
||||
struct io_sq_data *sqd = ctx->sq_data;
|
||||
|
||||
ctx->flags &= ~IORING_SETUP_R_DISABLED;
|
||||
if ((ctx->flags & IORING_SETUP_SQPOLL) && sqd->thread)
|
||||
if ((ctx->flags & IORING_SETUP_SQPOLL) && sqd && sqd->thread)
|
||||
wake_up_process(sqd->thread);
|
||||
}
|
||||
|
||||
|
103
fs/ocfs2/file.c
103
fs/ocfs2/file.c
@ -1529,6 +1529,45 @@ static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* zero out partial blocks of one cluster.
|
||||
*
|
||||
* start: file offset where zero starts, will be made upper block aligned.
|
||||
* len: it will be trimmed to the end of current cluster if "start + len"
|
||||
* is bigger than it.
|
||||
*/
|
||||
static int ocfs2_zeroout_partial_cluster(struct inode *inode,
|
||||
u64 start, u64 len)
|
||||
{
|
||||
int ret;
|
||||
u64 start_block, end_block, nr_blocks;
|
||||
u64 p_block, offset;
|
||||
u32 cluster, p_cluster, nr_clusters;
|
||||
struct super_block *sb = inode->i_sb;
|
||||
u64 end = ocfs2_align_bytes_to_clusters(sb, start);
|
||||
|
||||
if (start + len < end)
|
||||
end = start + len;
|
||||
|
||||
start_block = ocfs2_blocks_for_bytes(sb, start);
|
||||
end_block = ocfs2_blocks_for_bytes(sb, end);
|
||||
nr_blocks = end_block - start_block;
|
||||
if (!nr_blocks)
|
||||
return 0;
|
||||
|
||||
cluster = ocfs2_bytes_to_clusters(sb, start);
|
||||
ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
|
||||
&nr_clusters, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!p_cluster)
|
||||
return 0;
|
||||
|
||||
offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
|
||||
p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
|
||||
return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
|
||||
}
|
||||
|
||||
static int ocfs2_zero_partial_clusters(struct inode *inode,
|
||||
u64 start, u64 len)
|
||||
{
|
||||
@ -1538,6 +1577,7 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
|
||||
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
|
||||
unsigned int csize = osb->s_clustersize;
|
||||
handle_t *handle;
|
||||
loff_t isize = i_size_read(inode);
|
||||
|
||||
/*
|
||||
* The "start" and "end" values are NOT necessarily part of
|
||||
@ -1558,6 +1598,26 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
|
||||
if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
|
||||
goto out;
|
||||
|
||||
/* No page cache for EOF blocks, issue zero out to disk. */
|
||||
if (end > isize) {
|
||||
/*
|
||||
* zeroout eof blocks in last cluster starting from
|
||||
* "isize" even "start" > "isize" because it is
|
||||
* complicated to zeroout just at "start" as "start"
|
||||
* may be not aligned with block size, buffer write
|
||||
* would be required to do that, but out of eof buffer
|
||||
* write is not supported.
|
||||
*/
|
||||
ret = ocfs2_zeroout_partial_cluster(inode, isize,
|
||||
end - isize);
|
||||
if (ret) {
|
||||
mlog_errno(ret);
|
||||
goto out;
|
||||
}
|
||||
if (start >= isize)
|
||||
goto out;
|
||||
end = isize;
|
||||
}
|
||||
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
|
||||
if (IS_ERR(handle)) {
|
||||
ret = PTR_ERR(handle);
|
||||
@ -1855,45 +1915,6 @@ int ocfs2_remove_inode_range(struct inode *inode,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* zero out partial blocks of one cluster.
|
||||
*
|
||||
* start: file offset where zero starts, will be made upper block aligned.
|
||||
* len: it will be trimmed to the end of current cluster if "start + len"
|
||||
* is bigger than it.
|
||||
*/
|
||||
static int ocfs2_zeroout_partial_cluster(struct inode *inode,
|
||||
u64 start, u64 len)
|
||||
{
|
||||
int ret;
|
||||
u64 start_block, end_block, nr_blocks;
|
||||
u64 p_block, offset;
|
||||
u32 cluster, p_cluster, nr_clusters;
|
||||
struct super_block *sb = inode->i_sb;
|
||||
u64 end = ocfs2_align_bytes_to_clusters(sb, start);
|
||||
|
||||
if (start + len < end)
|
||||
end = start + len;
|
||||
|
||||
start_block = ocfs2_blocks_for_bytes(sb, start);
|
||||
end_block = ocfs2_blocks_for_bytes(sb, end);
|
||||
nr_blocks = end_block - start_block;
|
||||
if (!nr_blocks)
|
||||
return 0;
|
||||
|
||||
cluster = ocfs2_bytes_to_clusters(sb, start);
|
||||
ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
|
||||
&nr_clusters, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!p_cluster)
|
||||
return 0;
|
||||
|
||||
offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
|
||||
p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
|
||||
return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
|
||||
}
|
||||
|
||||
/*
|
||||
* Parts of this function taken from xfs_change_file_space()
|
||||
*/
|
||||
@ -1935,7 +1956,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
|
||||
goto out_inode_unlock;
|
||||
}
|
||||
|
||||
orig_isize = i_size_read(inode);
|
||||
switch (sr->l_whence) {
|
||||
case 0: /*SEEK_SET*/
|
||||
break;
|
||||
@ -1943,7 +1963,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
|
||||
sr->l_start += f_pos;
|
||||
break;
|
||||
case 2: /*SEEK_END*/
|
||||
sr->l_start += orig_isize;
|
||||
sr->l_start += i_size_read(inode);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
@ -1998,6 +2018,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
orig_isize = i_size_read(inode);
|
||||
/* zeroout eof blocks in the cluster. */
|
||||
if (!ret && change_size && orig_isize < size) {
|
||||
ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
|
||||
|
@ -133,4 +133,5 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_CGROUP, cgroup)
|
||||
BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter)
|
||||
#ifdef CONFIG_NET
|
||||
BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns)
|
||||
BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp)
|
||||
#endif
|
||||
|
@ -205,6 +205,13 @@ struct bpf_idx_pair {
|
||||
u32 idx;
|
||||
};
|
||||
|
||||
struct bpf_id_pair {
|
||||
u32 old;
|
||||
u32 cur;
|
||||
};
|
||||
|
||||
/* Maximum number of register states that can exist at once */
|
||||
#define BPF_ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
|
||||
#define MAX_CALL_FRAMES 8
|
||||
struct bpf_verifier_state {
|
||||
/* call stack tracking */
|
||||
@ -320,8 +327,8 @@ struct bpf_insn_aux_data {
|
||||
};
|
||||
u64 map_key_state; /* constant (32 bit) key tracking for maps */
|
||||
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
|
||||
int sanitize_stack_off; /* stack slot to be cleared */
|
||||
u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
|
||||
bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
|
||||
bool zext_dst; /* this insn zero extends dst reg */
|
||||
u8 alu_state; /* used in combination with alu_limit */
|
||||
|
||||
@ -393,6 +400,7 @@ struct bpf_verifier_env {
|
||||
struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
|
||||
u32 used_map_cnt; /* number of used maps */
|
||||
u32 id_gen; /* used to generate unique reg IDs */
|
||||
bool explore_alu_limits;
|
||||
bool allow_ptr_leaks;
|
||||
bool allow_uninit_stack;
|
||||
bool allow_ptr_to_map_access;
|
||||
@ -404,6 +412,7 @@ struct bpf_verifier_env {
|
||||
const struct bpf_line_info *prev_linfo;
|
||||
struct bpf_verifier_log log;
|
||||
struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
|
||||
struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE];
|
||||
struct {
|
||||
int *insn_state;
|
||||
int *insn_stack;
|
||||
|
@ -72,6 +72,11 @@ struct ctl_table_header;
|
||||
/* unused opcode to mark call to interpreter with arguments */
|
||||
#define BPF_CALL_ARGS 0xe0
|
||||
|
||||
/* unused opcode to mark speculation barrier for mitigating
|
||||
* Speculative Store Bypass
|
||||
*/
|
||||
#define BPF_NOSPEC 0xc0
|
||||
|
||||
/* As per nm, we expose JITed images as text (code) section for
|
||||
* kallsyms. That way, tools like perf can find it to match
|
||||
* addresses.
|
||||
@ -372,6 +377,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
|
||||
.off = 0, \
|
||||
.imm = 0 })
|
||||
|
||||
/* Speculation barrier */
|
||||
|
||||
#define BPF_ST_NOSPEC() \
|
||||
((struct bpf_insn) { \
|
||||
.code = BPF_ST | BPF_NOSPEC, \
|
||||
.dst_reg = 0, \
|
||||
.src_reg = 0, \
|
||||
.off = 0, \
|
||||
.imm = 0 })
|
||||
|
||||
/* Internal classic blocks for direct assignment */
|
||||
|
||||
#define __BPF_STMT(CODE, K) \
|
||||
|
@ -395,7 +395,6 @@ static inline struct sk_psock *sk_psock_get(struct sock *sk)
|
||||
}
|
||||
|
||||
void sk_psock_stop(struct sock *sk, struct sk_psock *psock);
|
||||
void sk_psock_destroy(struct rcu_head *rcu);
|
||||
void sk_psock_drop(struct sock *sk, struct sk_psock *psock);
|
||||
|
||||
static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
|
||||
|
@ -15,9 +15,11 @@
|
||||
#include <linux/if_ether.h>
|
||||
|
||||
/* Lengths of frame formats */
|
||||
#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */
|
||||
#define LLC_PDU_LEN_S 4
|
||||
#define LLC_PDU_LEN_U 3 /* header and 1 control byte */
|
||||
#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */
|
||||
#define LLC_PDU_LEN_S 4
|
||||
#define LLC_PDU_LEN_U 3 /* header and 1 control byte */
|
||||
/* header and 1 control byte and XID info */
|
||||
#define LLC_PDU_LEN_U_XID (LLC_PDU_LEN_U + sizeof(struct llc_xid_info))
|
||||
/* Known SAP addresses */
|
||||
#define LLC_GLOBAL_SAP 0xFF
|
||||
#define LLC_NULL_SAP 0x00 /* not network-layer visible */
|
||||
@ -50,9 +52,10 @@
|
||||
#define LLC_PDU_TYPE_U_MASK 0x03 /* 8-bit control field */
|
||||
#define LLC_PDU_TYPE_MASK 0x03
|
||||
|
||||
#define LLC_PDU_TYPE_I 0 /* first bit */
|
||||
#define LLC_PDU_TYPE_S 1 /* first two bits */
|
||||
#define LLC_PDU_TYPE_U 3 /* first two bits */
|
||||
#define LLC_PDU_TYPE_I 0 /* first bit */
|
||||
#define LLC_PDU_TYPE_S 1 /* first two bits */
|
||||
#define LLC_PDU_TYPE_U 3 /* first two bits */
|
||||
#define LLC_PDU_TYPE_U_XID 4 /* private type for detecting XID commands */
|
||||
|
||||
#define LLC_PDU_TYPE_IS_I(pdu) \
|
||||
((!(pdu->ctrl_1 & LLC_PDU_TYPE_I_MASK)) ? 1 : 0)
|
||||
@ -230,9 +233,18 @@ static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb)
|
||||
static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
|
||||
u8 ssap, u8 dsap, u8 cr)
|
||||
{
|
||||
const int hlen = type == LLC_PDU_TYPE_U ? 3 : 4;
|
||||
int hlen = 4; /* default value for I and S types */
|
||||
struct llc_pdu_un *pdu;
|
||||
|
||||
switch (type) {
|
||||
case LLC_PDU_TYPE_U:
|
||||
hlen = 3;
|
||||
break;
|
||||
case LLC_PDU_TYPE_U_XID:
|
||||
hlen = 6;
|
||||
break;
|
||||
}
|
||||
|
||||
skb_push(skb, hlen);
|
||||
skb_reset_network_header(skb);
|
||||
pdu = llc_pdu_un_hdr(skb);
|
||||
@ -374,7 +386,10 @@ static inline void llc_pdu_init_as_xid_cmd(struct sk_buff *skb,
|
||||
xid_info->fmt_id = LLC_XID_FMT_ID; /* 0x81 */
|
||||
xid_info->type = svcs_supported;
|
||||
xid_info->rw = rx_window << 1; /* size of receive window */
|
||||
skb_put(skb, sizeof(struct llc_xid_info));
|
||||
|
||||
/* no need to push/put since llc_pdu_header_init() has already
|
||||
* pushed 3 + 3 bytes
|
||||
*/
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -32,6 +32,8 @@
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/extable.h>
|
||||
#include <linux/log2.h>
|
||||
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include <trace/hooks/memory.h>
|
||||
@ -1384,6 +1386,7 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
|
||||
/* Non-UAPI available opcodes. */
|
||||
[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
|
||||
[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
|
||||
[BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC,
|
||||
[BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
|
||||
[BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
|
||||
[BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
|
||||
@ -1628,7 +1631,21 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
|
||||
COND_JMP(s, JSGE, >=)
|
||||
COND_JMP(s, JSLE, <=)
|
||||
#undef COND_JMP
|
||||
/* STX and ST and LDX*/
|
||||
/* ST, STX and LDX*/
|
||||
ST_NOSPEC:
|
||||
/* Speculation barrier for mitigating Speculative Store Bypass.
|
||||
* In case of arm64, we rely on the firmware mitigation as
|
||||
* controlled via the ssbd kernel parameter. Whenever the
|
||||
* mitigation is enabled, it works for all of the kernel code
|
||||
* with no need to provide any additional instructions here.
|
||||
* In case of x86, we use 'lfence' insn for mitigation. We
|
||||
* reuse preexisting logic from Spectre v1 mitigation that
|
||||
* happens to produce the required code on x86 for v4 as well.
|
||||
*/
|
||||
#ifdef CONFIG_X86
|
||||
barrier_nospec();
|
||||
#endif
|
||||
CONT;
|
||||
#define LDST(SIZEOP, SIZE) \
|
||||
STX_MEM_##SIZEOP: \
|
||||
*(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
|
||||
|
@ -162,15 +162,17 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
|
||||
else
|
||||
verbose(cbs->private_data, "BUG_%02x\n", insn->code);
|
||||
} else if (class == BPF_ST) {
|
||||
if (BPF_MODE(insn->code) != BPF_MEM) {
|
||||
if (BPF_MODE(insn->code) == BPF_MEM) {
|
||||
verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n",
|
||||
insn->code,
|
||||
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
||||
insn->dst_reg,
|
||||
insn->off, insn->imm);
|
||||
} else if (BPF_MODE(insn->code) == 0xc0 /* BPF_NOSPEC, no UAPI */) {
|
||||
verbose(cbs->private_data, "(%02x) nospec\n", insn->code);
|
||||
} else {
|
||||
verbose(cbs->private_data, "BUG_st_%02x\n", insn->code);
|
||||
return;
|
||||
}
|
||||
verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n",
|
||||
insn->code,
|
||||
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
||||
insn->dst_reg,
|
||||
insn->off, insn->imm);
|
||||
} else if (class == BPF_LDX) {
|
||||
if (BPF_MODE(insn->code) != BPF_MEM) {
|
||||
verbose(cbs->private_data, "BUG_ldx_%02x\n", insn->code);
|
||||
|
@ -2297,6 +2297,19 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
|
||||
cur = env->cur_state->frame[env->cur_state->curframe];
|
||||
if (value_regno >= 0)
|
||||
reg = &cur->regs[value_regno];
|
||||
if (!env->bypass_spec_v4) {
|
||||
bool sanitize = reg && is_spillable_regtype(reg->type);
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (state->stack[spi].slot_type[i] == STACK_INVALID) {
|
||||
sanitize = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (sanitize)
|
||||
env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
|
||||
}
|
||||
|
||||
if (reg && size == BPF_REG_SIZE && register_is_bounded(reg) &&
|
||||
!register_is_null(reg) && env->bpf_capable) {
|
||||
@ -2319,47 +2332,10 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
|
||||
verbose(env, "invalid size of register spill\n");
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
if (state != cur && reg->type == PTR_TO_STACK) {
|
||||
verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!env->bypass_spec_v4) {
|
||||
bool sanitize = false;
|
||||
|
||||
if (state->stack[spi].slot_type[0] == STACK_SPILL &&
|
||||
register_is_const(&state->stack[spi].spilled_ptr))
|
||||
sanitize = true;
|
||||
for (i = 0; i < BPF_REG_SIZE; i++)
|
||||
if (state->stack[spi].slot_type[i] == STACK_MISC) {
|
||||
sanitize = true;
|
||||
break;
|
||||
}
|
||||
if (sanitize) {
|
||||
int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
|
||||
int soff = (-spi - 1) * BPF_REG_SIZE;
|
||||
|
||||
/* detected reuse of integer stack slot with a pointer
|
||||
* which means either llvm is reusing stack slot or
|
||||
* an attacker is trying to exploit CVE-2018-3639
|
||||
* (speculative store bypass)
|
||||
* Have to sanitize that slot with preemptive
|
||||
* store of zero.
|
||||
*/
|
||||
if (*poff && *poff != soff) {
|
||||
/* disallow programs where single insn stores
|
||||
* into two different stack slots, since verifier
|
||||
* cannot sanitize them
|
||||
*/
|
||||
verbose(env,
|
||||
"insn %d cannot access two stack slots fp%d and fp%d",
|
||||
insn_idx, *poff, soff);
|
||||
return -EINVAL;
|
||||
}
|
||||
*poff = soff;
|
||||
}
|
||||
}
|
||||
save_register_state(state, spi, reg);
|
||||
} else {
|
||||
u8 type = STACK_MISC;
|
||||
@ -5816,6 +5792,12 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
|
||||
alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
|
||||
alu_state |= ptr_is_dst_reg ?
|
||||
BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
|
||||
|
||||
/* Limit pruning on unknown scalars to enable deep search for
|
||||
* potential masking differences from other program paths.
|
||||
*/
|
||||
if (!off_is_imm)
|
||||
env->explore_alu_limits = true;
|
||||
}
|
||||
|
||||
err = update_alu_sanitation_state(aux, alu_state, alu_limit);
|
||||
@ -8986,13 +8968,6 @@ static bool range_within(struct bpf_reg_state *old,
|
||||
old->s32_max_value >= cur->s32_max_value;
|
||||
}
|
||||
|
||||
/* Maximum number of register states that can exist at once */
|
||||
#define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
|
||||
struct idpair {
|
||||
u32 old;
|
||||
u32 cur;
|
||||
};
|
||||
|
||||
/* If in the old state two registers had the same id, then they need to have
|
||||
* the same id in the new state as well. But that id could be different from
|
||||
* the old state, so we need to track the mapping from old to new ids.
|
||||
@ -9003,11 +8978,11 @@ struct idpair {
|
||||
* So we look through our idmap to see if this old id has been seen before. If
|
||||
* so, we require the new id to match; otherwise, we add the id pair to the map.
|
||||
*/
|
||||
static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
|
||||
static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < ID_MAP_SIZE; i++) {
|
||||
for (i = 0; i < BPF_ID_MAP_SIZE; i++) {
|
||||
if (!idmap[i].old) {
|
||||
/* Reached an empty slot; haven't seen this id before */
|
||||
idmap[i].old = old_id;
|
||||
@ -9119,8 +9094,8 @@ static void clean_live_states(struct bpf_verifier_env *env, int insn,
|
||||
}
|
||||
|
||||
/* Returns true if (rold safe implies rcur safe) */
|
||||
static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
|
||||
struct idpair *idmap)
|
||||
static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
|
||||
struct bpf_reg_state *rcur, struct bpf_id_pair *idmap)
|
||||
{
|
||||
bool equal;
|
||||
|
||||
@ -9146,6 +9121,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
|
||||
return false;
|
||||
switch (rold->type) {
|
||||
case SCALAR_VALUE:
|
||||
if (env->explore_alu_limits)
|
||||
return false;
|
||||
if (rcur->type == SCALAR_VALUE) {
|
||||
if (!rold->precise && !rcur->precise)
|
||||
return true;
|
||||
@ -9235,9 +9212,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool stacksafe(struct bpf_func_state *old,
|
||||
struct bpf_func_state *cur,
|
||||
struct idpair *idmap)
|
||||
static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
|
||||
struct bpf_func_state *cur, struct bpf_id_pair *idmap)
|
||||
{
|
||||
int i, spi;
|
||||
|
||||
@ -9282,9 +9258,8 @@ static bool stacksafe(struct bpf_func_state *old,
|
||||
continue;
|
||||
if (old->stack[spi].slot_type[0] != STACK_SPILL)
|
||||
continue;
|
||||
if (!regsafe(&old->stack[spi].spilled_ptr,
|
||||
&cur->stack[spi].spilled_ptr,
|
||||
idmap))
|
||||
if (!regsafe(env, &old->stack[spi].spilled_ptr,
|
||||
&cur->stack[spi].spilled_ptr, idmap))
|
||||
/* when explored and current stack slot are both storing
|
||||
* spilled registers, check that stored pointers types
|
||||
* are the same as well.
|
||||
@ -9334,32 +9309,24 @@ static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
|
||||
* whereas register type in current state is meaningful, it means that
|
||||
* the current state will reach 'bpf_exit' instruction safely
|
||||
*/
|
||||
static bool func_states_equal(struct bpf_func_state *old,
|
||||
static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
|
||||
struct bpf_func_state *cur)
|
||||
{
|
||||
struct idpair *idmap;
|
||||
bool ret = false;
|
||||
int i;
|
||||
|
||||
idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
|
||||
/* If we failed to allocate the idmap, just say it's not safe */
|
||||
if (!idmap)
|
||||
memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
|
||||
for (i = 0; i < MAX_BPF_REG; i++)
|
||||
if (!regsafe(env, &old->regs[i], &cur->regs[i],
|
||||
env->idmap_scratch))
|
||||
return false;
|
||||
|
||||
if (!stacksafe(env, old, cur, env->idmap_scratch))
|
||||
return false;
|
||||
|
||||
for (i = 0; i < MAX_BPF_REG; i++) {
|
||||
if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (!stacksafe(old, cur, idmap))
|
||||
goto out_free;
|
||||
|
||||
if (!refsafe(old, cur))
|
||||
goto out_free;
|
||||
ret = true;
|
||||
out_free:
|
||||
kfree(idmap);
|
||||
return ret;
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool states_equal(struct bpf_verifier_env *env,
|
||||
@ -9386,7 +9353,7 @@ static bool states_equal(struct bpf_verifier_env *env,
|
||||
for (i = 0; i <= old->curframe; i++) {
|
||||
if (old->frame[i]->callsite != cur->frame[i]->callsite)
|
||||
return false;
|
||||
if (!func_states_equal(old->frame[i], cur->frame[i]))
|
||||
if (!func_states_equal(env, old->frame[i], cur->frame[i]))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -10947,35 +10914,33 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
|
||||
|
||||
for (i = 0; i < insn_cnt; i++, insn++) {
|
||||
bpf_convert_ctx_access_t convert_ctx_access;
|
||||
bool ctx_access;
|
||||
|
||||
if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
|
||||
insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
|
||||
insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
|
||||
insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
|
||||
insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) {
|
||||
type = BPF_READ;
|
||||
else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
|
||||
insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
|
||||
insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
|
||||
insn->code == (BPF_STX | BPF_MEM | BPF_DW))
|
||||
ctx_access = true;
|
||||
} else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
|
||||
insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
|
||||
insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
|
||||
insn->code == (BPF_STX | BPF_MEM | BPF_DW) ||
|
||||
insn->code == (BPF_ST | BPF_MEM | BPF_B) ||
|
||||
insn->code == (BPF_ST | BPF_MEM | BPF_H) ||
|
||||
insn->code == (BPF_ST | BPF_MEM | BPF_W) ||
|
||||
insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
|
||||
type = BPF_WRITE;
|
||||
else
|
||||
ctx_access = BPF_CLASS(insn->code) == BPF_STX;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (type == BPF_WRITE &&
|
||||
env->insn_aux_data[i + delta].sanitize_stack_off) {
|
||||
env->insn_aux_data[i + delta].sanitize_stack_spill) {
|
||||
struct bpf_insn patch[] = {
|
||||
/* Sanitize suspicious stack slot with zero.
|
||||
* There are no memory dependencies for this store,
|
||||
* since it's only using frame pointer and immediate
|
||||
* constant of zero
|
||||
*/
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_FP,
|
||||
env->insn_aux_data[i + delta].sanitize_stack_off,
|
||||
0),
|
||||
/* the original STX instruction will immediately
|
||||
* overwrite the same stack slot with appropriate value
|
||||
*/
|
||||
*insn,
|
||||
BPF_ST_NOSPEC(),
|
||||
};
|
||||
|
||||
cnt = ARRAY_SIZE(patch);
|
||||
@ -10989,6 +10954,9 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!ctx_access)
|
||||
continue;
|
||||
|
||||
switch (env->insn_aux_data[i + delta].ptr_type) {
|
||||
case PTR_TO_CTX:
|
||||
if (!ops->convert_ctx_access)
|
||||
@ -11748,37 +11716,6 @@ static void free_states(struct bpf_verifier_env *env)
|
||||
}
|
||||
}
|
||||
|
||||
/* The verifier is using insn_aux_data[] to store temporary data during
|
||||
* verification and to store information for passes that run after the
|
||||
* verification like dead code sanitization. do_check_common() for subprogram N
|
||||
* may analyze many other subprograms. sanitize_insn_aux_data() clears all
|
||||
* temporary data after do_check_common() finds that subprogram N cannot be
|
||||
* verified independently. pass_cnt counts the number of times
|
||||
* do_check_common() was run and insn->aux->seen tells the pass number
|
||||
* insn_aux_data was touched. These variables are compared to clear temporary
|
||||
* data from failed pass. For testing and experiments do_check_common() can be
|
||||
* run multiple times even when prior attempt to verify is unsuccessful.
|
||||
*
|
||||
* Note that special handling is needed on !env->bypass_spec_v1 if this is
|
||||
* ever called outside of error path with subsequent program rejection.
|
||||
*/
|
||||
static void sanitize_insn_aux_data(struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_insn *insn = env->prog->insnsi;
|
||||
struct bpf_insn_aux_data *aux;
|
||||
int i, class;
|
||||
|
||||
for (i = 0; i < env->prog->len; i++) {
|
||||
class = BPF_CLASS(insn[i].code);
|
||||
if (class != BPF_LDX && class != BPF_STX)
|
||||
continue;
|
||||
aux = &env->insn_aux_data[i];
|
||||
if (aux->seen != env->pass_cnt)
|
||||
continue;
|
||||
memset(aux, 0, offsetof(typeof(*aux), orig_idx));
|
||||
}
|
||||
}
|
||||
|
||||
static int do_check_common(struct bpf_verifier_env *env, int subprog)
|
||||
{
|
||||
bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
|
||||
@ -11848,9 +11785,6 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog)
|
||||
if (!ret && pop_log)
|
||||
bpf_vlog_reset(&env->log, 0);
|
||||
free_states(env);
|
||||
if (ret)
|
||||
/* clean aux data in case subprog was rejected */
|
||||
sanitize_insn_aux_data(env);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1075,11 +1075,16 @@ static bool j1939_session_deactivate_locked(struct j1939_session *session)
|
||||
|
||||
static bool j1939_session_deactivate(struct j1939_session *session)
|
||||
{
|
||||
struct j1939_priv *priv = session->priv;
|
||||
bool active;
|
||||
|
||||
j1939_session_list_lock(session->priv);
|
||||
j1939_session_list_lock(priv);
|
||||
/* This function should be called with a session ref-count of at
|
||||
* least 2.
|
||||
*/
|
||||
WARN_ON_ONCE(kref_read(&session->kref) < 2);
|
||||
active = j1939_session_deactivate_locked(session);
|
||||
j1939_session_list_unlock(session->priv);
|
||||
j1939_session_list_unlock(priv);
|
||||
|
||||
return active;
|
||||
}
|
||||
@ -1869,7 +1874,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
|
||||
if (!session->transmission)
|
||||
j1939_tp_schedule_txtimer(session, 0);
|
||||
} else {
|
||||
j1939_tp_set_rxtimeout(session, 250);
|
||||
j1939_tp_set_rxtimeout(session, 750);
|
||||
}
|
||||
session->last_cmd = 0xff;
|
||||
consume_skb(se_skb);
|
||||
|
@ -546,10 +546,18 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
rtnl_lock();
|
||||
lock_sock(sk);
|
||||
|
||||
if (ro->bound && ro->ifindex)
|
||||
if (ro->bound && ro->ifindex) {
|
||||
dev = dev_get_by_index(sock_net(sk), ro->ifindex);
|
||||
if (!dev) {
|
||||
if (count > 1)
|
||||
kfree(filter);
|
||||
err = -ENODEV;
|
||||
goto out_fil;
|
||||
}
|
||||
}
|
||||
|
||||
if (ro->bound) {
|
||||
/* (try to) register the new filters */
|
||||
@ -588,6 +596,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
|
||||
dev_put(dev);
|
||||
|
||||
release_sock(sk);
|
||||
rtnl_unlock();
|
||||
|
||||
break;
|
||||
|
||||
@ -600,10 +609,16 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
|
||||
|
||||
err_mask &= CAN_ERR_MASK;
|
||||
|
||||
rtnl_lock();
|
||||
lock_sock(sk);
|
||||
|
||||
if (ro->bound && ro->ifindex)
|
||||
if (ro->bound && ro->ifindex) {
|
||||
dev = dev_get_by_index(sock_net(sk), ro->ifindex);
|
||||
if (!dev) {
|
||||
err = -ENODEV;
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
/* remove current error mask */
|
||||
if (ro->bound) {
|
||||
@ -627,6 +642,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
|
||||
dev_put(dev);
|
||||
|
||||
release_sock(sk);
|
||||
rtnl_unlock();
|
||||
|
||||
break;
|
||||
|
||||
|
@ -676,14 +676,13 @@ static void sk_psock_destroy_deferred(struct work_struct *gc)
|
||||
kfree(psock);
|
||||
}
|
||||
|
||||
void sk_psock_destroy(struct rcu_head *rcu)
|
||||
static void sk_psock_destroy(struct rcu_head *rcu)
|
||||
{
|
||||
struct sk_psock *psock = container_of(rcu, struct sk_psock, rcu);
|
||||
|
||||
INIT_WORK(&psock->gc, sk_psock_destroy_deferred);
|
||||
schedule_work(&psock->gc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sk_psock_destroy);
|
||||
|
||||
void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
|
||||
{
|
||||
|
@ -390,7 +390,7 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
|
||||
tunnel->i_seqno = ntohl(tpi->seq) + 1;
|
||||
}
|
||||
|
||||
skb_reset_network_header(skb);
|
||||
skb_set_network_header(skb, (tunnel->dev->type == ARPHRD_ETHER) ? ETH_HLEN : 0);
|
||||
|
||||
err = IP_ECN_decapsulate(iph, skb);
|
||||
if (unlikely(err)) {
|
||||
|
@ -98,8 +98,16 @@ static inline u8 llc_ui_header_len(struct sock *sk, struct sockaddr_llc *addr)
|
||||
{
|
||||
u8 rc = LLC_PDU_LEN_U;
|
||||
|
||||
if (addr->sllc_test || addr->sllc_xid)
|
||||
if (addr->sllc_test)
|
||||
rc = LLC_PDU_LEN_U;
|
||||
else if (addr->sllc_xid)
|
||||
/* We need to expand header to sizeof(struct llc_xid_info)
|
||||
* since llc_pdu_init_as_xid_cmd() sets 4,5,6 bytes of LLC header
|
||||
* as XID PDU. In llc_ui_sendmsg() we reserved header size and then
|
||||
* filled all other space with user data. If we won't reserve this
|
||||
* bytes, llc_pdu_init_as_xid_cmd() will overwrite user data
|
||||
*/
|
||||
rc = LLC_PDU_LEN_U_XID;
|
||||
else if (sk->sk_type == SOCK_STREAM)
|
||||
rc = LLC_PDU_LEN_I;
|
||||
return rc;
|
||||
|
@ -79,7 +79,7 @@ int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb)
|
||||
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
|
||||
int rc;
|
||||
|
||||
llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap,
|
||||
llc_pdu_header_init(skb, LLC_PDU_TYPE_U_XID, ev->saddr.lsap,
|
||||
ev->daddr.lsap, LLC_PDU_CMD);
|
||||
llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0);
|
||||
rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
|
||||
|
@ -152,6 +152,8 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
|
||||
struct vif_params *params)
|
||||
{
|
||||
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
|
||||
struct ieee80211_local *local = sdata->local;
|
||||
struct sta_info *sta;
|
||||
int ret;
|
||||
|
||||
ret = ieee80211_if_change_type(sdata, type);
|
||||
@ -162,7 +164,24 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
|
||||
RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
|
||||
ieee80211_check_fast_rx_iface(sdata);
|
||||
} else if (type == NL80211_IFTYPE_STATION && params->use_4addr >= 0) {
|
||||
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
|
||||
|
||||
if (params->use_4addr == ifmgd->use_4addr)
|
||||
return 0;
|
||||
|
||||
sdata->u.mgd.use_4addr = params->use_4addr;
|
||||
if (!ifmgd->associated)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&local->sta_mtx);
|
||||
sta = sta_info_get(sdata, ifmgd->bssid);
|
||||
if (sta)
|
||||
drv_sta_set_4addr(local, sdata, &sta->sta,
|
||||
params->use_4addr);
|
||||
mutex_unlock(&local->sta_mtx);
|
||||
|
||||
if (params->use_4addr)
|
||||
ieee80211_send_4addr_nullfunc(local, sdata);
|
||||
}
|
||||
|
||||
if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
|
||||
|
@ -2051,6 +2051,8 @@ void ieee80211_dynamic_ps_timer(struct timer_list *t);
|
||||
void ieee80211_send_nullfunc(struct ieee80211_local *local,
|
||||
struct ieee80211_sub_if_data *sdata,
|
||||
bool powersave);
|
||||
void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
|
||||
struct ieee80211_sub_if_data *sdata);
|
||||
void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
|
||||
struct ieee80211_hdr *hdr, bool ack, u16 tx_time);
|
||||
|
||||
|
@ -1115,8 +1115,8 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
|
||||
ieee80211_tx_skb(sdata, skb);
|
||||
}
|
||||
|
||||
static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
|
||||
struct ieee80211_sub_if_data *sdata)
|
||||
void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
|
||||
struct ieee80211_sub_if_data *sdata)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct ieee80211_hdr *nullfunc;
|
||||
|
@ -662,8 +662,13 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
|
||||
return false;
|
||||
|
||||
tstamp = nf_conn_tstamp_find(ct);
|
||||
if (tstamp && tstamp->stop == 0)
|
||||
if (tstamp) {
|
||||
s32 timeout = ct->timeout - nfct_time_stamp;
|
||||
|
||||
tstamp->stop = ktime_get_real_ns();
|
||||
if (timeout < 0)
|
||||
tstamp->stop -= jiffies_to_nsecs(-timeout);
|
||||
}
|
||||
|
||||
if (nf_conntrack_event_report(IPCT_DESTROY, ct,
|
||||
portid, report) < 0) {
|
||||
|
@ -201,7 +201,9 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
|
||||
alen = sizeof_field(struct nf_nat_range, min_addr.ip6);
|
||||
break;
|
||||
default:
|
||||
return -EAFNOSUPPORT;
|
||||
if (tb[NFTA_NAT_REG_ADDR_MIN])
|
||||
return -EAFNOSUPPORT;
|
||||
break;
|
||||
}
|
||||
priv->family = family;
|
||||
|
||||
|
@ -504,8 +504,10 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
|
||||
if (!ipc)
|
||||
goto err;
|
||||
|
||||
if (sock_queue_rcv_skb(&ipc->sk, skb))
|
||||
if (sock_queue_rcv_skb(&ipc->sk, skb)) {
|
||||
qrtr_port_put(ipc);
|
||||
goto err;
|
||||
}
|
||||
|
||||
qrtr_port_put(ipc);
|
||||
}
|
||||
@ -830,6 +832,8 @@ static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
|
||||
|
||||
ipc = qrtr_port_lookup(to->sq_port);
|
||||
if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */
|
||||
if (ipc)
|
||||
qrtr_port_put(ipc);
|
||||
kfree_skb(skb);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
@ -1175,7 +1175,7 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
|
||||
if (unlikely(!af))
|
||||
return NULL;
|
||||
|
||||
if (af->from_addr_param(&paddr, param, peer_port, 0))
|
||||
if (!af->from_addr_param(&paddr, param, peer_port, 0))
|
||||
return NULL;
|
||||
|
||||
return __sctp_lookup_association(net, laddr, &paddr, transportp);
|
||||
|
@ -891,16 +891,10 @@ static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead,
|
||||
if (unlikely(!aead))
|
||||
return -ENOKEY;
|
||||
|
||||
/* Cow skb data if needed */
|
||||
if (likely(!skb_cloned(skb) &&
|
||||
(!skb_is_nonlinear(skb) || !skb_has_frag_list(skb)))) {
|
||||
nsg = 1 + skb_shinfo(skb)->nr_frags;
|
||||
} else {
|
||||
nsg = skb_cow_data(skb, 0, &unused);
|
||||
if (unlikely(nsg < 0)) {
|
||||
pr_err("RX: skb_cow_data() returned %d\n", nsg);
|
||||
return nsg;
|
||||
}
|
||||
nsg = skb_cow_data(skb, 0, &unused);
|
||||
if (unlikely(nsg < 0)) {
|
||||
pr_err("RX: skb_cow_data() returned %d\n", nsg);
|
||||
return nsg;
|
||||
}
|
||||
|
||||
/* Allocate memory for the AEAD operation */
|
||||
|
@ -148,6 +148,7 @@ static void tipc_sk_remove(struct tipc_sock *tsk);
|
||||
static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
|
||||
static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
|
||||
static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack);
|
||||
static int tipc_wait_for_connect(struct socket *sock, long *timeo_p);
|
||||
|
||||
static const struct proto_ops packet_ops;
|
||||
static const struct proto_ops stream_ops;
|
||||
@ -1508,8 +1509,13 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
|
||||
rc = 0;
|
||||
}
|
||||
|
||||
if (unlikely(syn && !rc))
|
||||
if (unlikely(syn && !rc)) {
|
||||
tipc_set_sk_state(sk, TIPC_CONNECTING);
|
||||
if (timeout) {
|
||||
timeout = msecs_to_jiffies(timeout);
|
||||
tipc_wait_for_connect(sock, &timeout);
|
||||
}
|
||||
}
|
||||
|
||||
return rc ? rc : dlen;
|
||||
}
|
||||
@ -1557,7 +1563,7 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
|
||||
return -EMSGSIZE;
|
||||
|
||||
/* Handle implicit connection setup */
|
||||
if (unlikely(dest)) {
|
||||
if (unlikely(dest && sk->sk_state == TIPC_OPEN)) {
|
||||
rc = __tipc_sendmsg(sock, m, dlen);
|
||||
if (dlen && dlen == rc) {
|
||||
tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
|
||||
@ -2644,7 +2650,7 @@ static int tipc_listen(struct socket *sock, int len)
|
||||
static int tipc_wait_for_accept(struct socket *sock, long timeo)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
DEFINE_WAIT(wait);
|
||||
DEFINE_WAIT_FUNC(wait, woken_wake_function);
|
||||
int err;
|
||||
|
||||
/* True wake-one mechanism for incoming connections: only
|
||||
@ -2653,12 +2659,12 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
|
||||
* anymore, the common case will execute the loop only once.
|
||||
*/
|
||||
for (;;) {
|
||||
prepare_to_wait_exclusive(sk_sleep(sk), &wait,
|
||||
TASK_INTERRUPTIBLE);
|
||||
if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
|
||||
add_wait_queue(sk_sleep(sk), &wait);
|
||||
release_sock(sk);
|
||||
timeo = schedule_timeout(timeo);
|
||||
timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
|
||||
lock_sock(sk);
|
||||
remove_wait_queue(sk_sleep(sk), &wait);
|
||||
}
|
||||
err = 0;
|
||||
if (!skb_queue_empty(&sk->sk_receive_queue))
|
||||
@ -2670,7 +2676,6 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
|
||||
if (signal_pending(current))
|
||||
break;
|
||||
}
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -2686,9 +2691,10 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
|
||||
bool kern)
|
||||
{
|
||||
struct sock *new_sk, *sk = sock->sk;
|
||||
struct sk_buff *buf;
|
||||
struct tipc_sock *new_tsock;
|
||||
struct msghdr m = {NULL,};
|
||||
struct tipc_msg *msg;
|
||||
struct sk_buff *buf;
|
||||
long timeo;
|
||||
int res;
|
||||
|
||||
@ -2733,19 +2739,17 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
|
||||
}
|
||||
|
||||
/*
|
||||
* Respond to 'SYN-' by discarding it & returning 'ACK'-.
|
||||
* Respond to 'SYN+' by queuing it on new socket.
|
||||
* Respond to 'SYN-' by discarding it & returning 'ACK'.
|
||||
* Respond to 'SYN+' by queuing it on new socket & returning 'ACK'.
|
||||
*/
|
||||
if (!msg_data_sz(msg)) {
|
||||
struct msghdr m = {NULL,};
|
||||
|
||||
tsk_advance_rx_queue(sk);
|
||||
__tipc_sendstream(new_sock, &m, 0);
|
||||
} else {
|
||||
__skb_dequeue(&sk->sk_receive_queue);
|
||||
__skb_queue_head(&new_sk->sk_receive_queue, buf);
|
||||
skb_set_owner_r(buf, new_sk);
|
||||
}
|
||||
__tipc_sendstream(new_sock, &m, 0);
|
||||
release_sock(new_sk);
|
||||
exit:
|
||||
release_sock(sk);
|
||||
|
@ -1746,16 +1746,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
|
||||
* be grouped with this beacon for updates ...
|
||||
*/
|
||||
if (!cfg80211_combine_bsses(rdev, new)) {
|
||||
kfree(new);
|
||||
bss_ref_put(rdev, new);
|
||||
goto drop;
|
||||
}
|
||||
}
|
||||
|
||||
if (rdev->bss_entries >= bss_entries_limit &&
|
||||
!cfg80211_bss_expire_oldest(rdev)) {
|
||||
if (!list_empty(&new->hidden_list))
|
||||
list_del(&new->hidden_list);
|
||||
kfree(new);
|
||||
bss_ref_put(rdev, new);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
|
@ -192,8 +192,6 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
|
||||
if (!(prot & PROT_EXEC))
|
||||
dso__set_loaded(dso);
|
||||
}
|
||||
|
||||
nsinfo__put(dso->nsinfo);
|
||||
dso->nsinfo = nsi;
|
||||
dso__put(dso);
|
||||
}
|
||||
|
@ -3896,6 +3896,16 @@ struct compat_kvm_dirty_log {
|
||||
};
|
||||
};
|
||||
|
||||
struct compat_kvm_clear_dirty_log {
|
||||
__u32 slot;
|
||||
__u32 num_pages;
|
||||
__u64 first_page;
|
||||
union {
|
||||
compat_uptr_t dirty_bitmap; /* one bit per page */
|
||||
__u64 padding2;
|
||||
};
|
||||
};
|
||||
|
||||
static long kvm_vm_compat_ioctl(struct file *filp,
|
||||
unsigned int ioctl, unsigned long arg)
|
||||
{
|
||||
@ -3905,6 +3915,24 @@ static long kvm_vm_compat_ioctl(struct file *filp,
|
||||
if (kvm->mm != current->mm)
|
||||
return -EIO;
|
||||
switch (ioctl) {
|
||||
#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
|
||||
case KVM_CLEAR_DIRTY_LOG: {
|
||||
struct compat_kvm_clear_dirty_log compat_log;
|
||||
struct kvm_clear_dirty_log log;
|
||||
|
||||
if (copy_from_user(&compat_log, (void __user *)arg,
|
||||
sizeof(compat_log)))
|
||||
return -EFAULT;
|
||||
log.slot = compat_log.slot;
|
||||
log.num_pages = compat_log.num_pages;
|
||||
log.first_page = compat_log.first_page;
|
||||
log.padding2 = compat_log.padding2;
|
||||
log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
|
||||
|
||||
r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
case KVM_GET_DIRTY_LOG: {
|
||||
struct compat_kvm_dirty_log compat_log;
|
||||
struct kvm_dirty_log log;
|
||||
|
Loading…
Reference in New Issue
Block a user