This is the 5.10.141 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmMVs1MACgkQONu9yGCS aT4dIA/+JYX/VG4PxtN3ndJGacUGTTxXf0fKn5TAPkJVKJ5Jt5zDuWbAA4+oLWym z8L7W2DQ8sdhWiKTSbQMUXWhzyMDyxmFED/J9sm9HXd4c1VbAaRroeViI26fcbxU ND5soyoTprxD2iwePmmxI7EKO2IIpqkw3hkcUS0XI6bLT2j8/zusEBHUP4RF8D9I +FCpE9miQZielOjeTLlCRiU5VlZDEg5FusTuy+EJlN4k1HJxiO/L31NVX3iG0xPs 2x4E0q5QT85xEQRwzJFUPU64hPzPFeSGENfAsiq0tzRdsqgOFuQulnp31Vt/nba3 D+D96/dRxo/OZ/s1o2zt08J9zI5tV64sdxrxXSni/+Pnc/qc2/ZrGM3pPIw4taUg /35orlmDqseNvPyZ5BKuHc68G+1Ma3uxQTbhGfcESvOEZ+T/Ezd6wL+BGMoL/jjq QKBrRDORAt2t4JmaNoq3t+LGyE4Kdi7RxUmnawYImwzmMKS+qAk0f9mTVcYST0BM DWFClp8FW4IAVzGX0AWw2uz6e0T/kSkI1xCT8dzXfM7GhAUF8LPJABgmlLJRm/0N HnzGRDwl0xPbbe9VNvhI+yCaI7HYkSuDlVHW1oujd/AoRcso5LV6TMAgnPUYyvm7 d1HZlbDP2G35Ypq+Z/EdQIb7kWvoDHd2Az3Hvslo5Chawx41S+s= =IQqi -----END PGP SIGNATURE----- Merge 5.10.141 into android12-5.10-lts Changes in 5.10.141 mm: Force TLB flush for PFNMAP mappings before unlink_file_vma() x86/nospec: Unwreck the RSB stuffing x86/nospec: Fix i386 RSB stuffing crypto: lib - remove unneeded selection of XOR_BLOCKS s390/mm: do not trigger write fault when vma does not allow VM_WRITE kbuild: Fix include path in scripts/Makefile.modpost Bluetooth: L2CAP: Fix build errors in some archs Revert "PCI/portdrv: Don't disable AER reporting in get_port_device_capability()" HID: steam: Prevent NULL pointer dereference in steam_{recv,send}_report udmabuf: Set the DMA mask for the udmabuf device (v2) media: pvrusb2: fix memory leak in pvr_probe HID: hidraw: fix memory leak in hidraw_release() net: fix refcount bug in sk_psock_get (2) fbdev: fb_pm2fb: Avoid potential divide by zero error ftrace: Fix NULL pointer dereference in is_ftrace_trampoline when ftrace is dead bpf: Don't redirect packets with invalid pkt_len mm/rmap: Fix anon_vma->degree ambiguity leading to double-reuse mmc: mtk-sd: Clear interrupts when cqe off/disable drm/amd/display: Avoid MPC infinite loop drm/amd/display: For stereo keep "FLIP_ANY_FRAME" drm/amd/display: clear optc underflow before turn off odm clock neigh: fix possible DoS due to net iface start/stop loop s390/hypfs: avoid error message under KVM drm/amd/pm: add missing ->fini_microcode interface for Sienna Cichlid drm/amd/display: Fix pixel clock programming drm/amdgpu: Increase tlb flush timeout for sriov netfilter: conntrack: NF_CONNTRACK_PROCFS should no longer default to y lib/vdso: Mark do_hres_timens() and do_coarse_timens() __always_inline() kprobes: don't call disarm_kprobe() for disabled kprobes io_uring: disable polling pollfree files xfs: remove infinite loop when reserving free block pool xfs: always succeed at setting the reserve pool size xfs: fix overfilling of reserve pool xfs: fix soft lockup via spinning in filestream ag selection loop xfs: revert "xfs: actually bump warning counts when we send warnings" net/af_packet: check len when min_header_len equals to 0 net: neigh: don't call kfree_skb() under spin_lock_irqsave() Linux 5.10.141 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I8b6a1e0bd31df051b90433857f126c183771d367
This commit is contained in:
commit
5d60de7a5f
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 140
|
||||
SUBLEVEL = 141
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
|
@ -437,7 +437,7 @@ __init int hypfs_diag_init(void)
|
||||
int rc;
|
||||
|
||||
if (diag204_probe()) {
|
||||
pr_err("The hardware system does not support hypfs\n");
|
||||
pr_info("The hardware system does not support hypfs\n");
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
|
@ -496,9 +496,9 @@ static int __init hypfs_init(void)
|
||||
hypfs_vm_exit();
|
||||
fail_hypfs_diag_exit:
|
||||
hypfs_diag_exit();
|
||||
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
|
||||
fail_dbfs_exit:
|
||||
hypfs_dbfs_exit();
|
||||
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
|
||||
return rc;
|
||||
}
|
||||
device_initcall(hypfs_init)
|
||||
|
@ -429,7 +429,9 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
|
||||
flags = FAULT_FLAG_DEFAULT;
|
||||
if (user_mode(regs))
|
||||
flags |= FAULT_FLAG_USER;
|
||||
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
|
||||
if ((trans_exc_code & store_indication) == 0x400)
|
||||
access = VM_WRITE;
|
||||
if (access == VM_WRITE)
|
||||
flags |= FAULT_FLAG_WRITE;
|
||||
mmap_read_lock(mm);
|
||||
|
||||
|
@ -35,33 +35,56 @@
|
||||
#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
|
||||
|
||||
/*
|
||||
* Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN.
|
||||
*/
|
||||
#define __FILL_RETURN_SLOT \
|
||||
ANNOTATE_INTRA_FUNCTION_CALL; \
|
||||
call 772f; \
|
||||
int3; \
|
||||
772:
|
||||
|
||||
/*
|
||||
* Stuff the entire RSB.
|
||||
*
|
||||
* Google experimented with loop-unrolling and this turned out to be
|
||||
* the optimal version — two calls, each with their own speculation
|
||||
* trap should their return address end up getting used, in a loop.
|
||||
*/
|
||||
#define __FILL_RETURN_BUFFER(reg, nr, sp) \
|
||||
mov $(nr/2), reg; \
|
||||
771: \
|
||||
ANNOTATE_INTRA_FUNCTION_CALL; \
|
||||
call 772f; \
|
||||
773: /* speculation trap */ \
|
||||
UNWIND_HINT_EMPTY; \
|
||||
pause; \
|
||||
lfence; \
|
||||
jmp 773b; \
|
||||
772: \
|
||||
ANNOTATE_INTRA_FUNCTION_CALL; \
|
||||
call 774f; \
|
||||
775: /* speculation trap */ \
|
||||
UNWIND_HINT_EMPTY; \
|
||||
pause; \
|
||||
lfence; \
|
||||
jmp 775b; \
|
||||
774: \
|
||||
add $(BITS_PER_LONG/8) * 2, sp; \
|
||||
dec reg; \
|
||||
jnz 771b; \
|
||||
/* barrier for jnz misprediction */ \
|
||||
#ifdef CONFIG_X86_64
|
||||
#define __FILL_RETURN_BUFFER(reg, nr) \
|
||||
mov $(nr/2), reg; \
|
||||
771: \
|
||||
__FILL_RETURN_SLOT \
|
||||
__FILL_RETURN_SLOT \
|
||||
add $(BITS_PER_LONG/8) * 2, %_ASM_SP; \
|
||||
dec reg; \
|
||||
jnz 771b; \
|
||||
/* barrier for jnz misprediction */ \
|
||||
lfence;
|
||||
#else
|
||||
/*
|
||||
* i386 doesn't unconditionally have LFENCE, as such it can't
|
||||
* do a loop.
|
||||
*/
|
||||
#define __FILL_RETURN_BUFFER(reg, nr) \
|
||||
.rept nr; \
|
||||
__FILL_RETURN_SLOT; \
|
||||
.endr; \
|
||||
add $(BITS_PER_LONG/8) * nr, %_ASM_SP;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Stuff a single RSB slot.
|
||||
*
|
||||
* To mitigate Post-Barrier RSB speculation, one CALL instruction must be
|
||||
* forced to retire before letting a RET instruction execute.
|
||||
*
|
||||
* On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed
|
||||
* before this point.
|
||||
*/
|
||||
#define __FILL_ONE_RETURN \
|
||||
__FILL_RETURN_SLOT \
|
||||
add $(BITS_PER_LONG/8), %_ASM_SP; \
|
||||
lfence;
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
@ -120,28 +143,15 @@
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro ISSUE_UNBALANCED_RET_GUARD
|
||||
ANNOTATE_INTRA_FUNCTION_CALL
|
||||
call .Lunbalanced_ret_guard_\@
|
||||
int3
|
||||
.Lunbalanced_ret_guard_\@:
|
||||
add $(BITS_PER_LONG/8), %_ASM_SP
|
||||
lfence
|
||||
.endm
|
||||
|
||||
/*
|
||||
* A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
|
||||
* monstrosity above, manually.
|
||||
*/
|
||||
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2
|
||||
.ifb \ftr2
|
||||
ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
|
||||
.else
|
||||
ALTERNATIVE_2 "jmp .Lskip_rsb_\@", "", \ftr, "jmp .Lunbalanced_\@", \ftr2
|
||||
.endif
|
||||
__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
|
||||
.Lunbalanced_\@:
|
||||
ISSUE_UNBALANCED_RET_GUARD
|
||||
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS)
|
||||
ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \
|
||||
__stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \
|
||||
__stringify(__FILL_ONE_RETURN), \ftr2
|
||||
|
||||
.Lskip_rsb_\@:
|
||||
.endm
|
||||
|
||||
|
@ -6197,6 +6197,7 @@ const struct file_operations binder_fops = {
|
||||
.open = binder_open,
|
||||
.flush = binder_flush,
|
||||
.release = binder_release,
|
||||
.may_pollfree = true,
|
||||
};
|
||||
|
||||
DEFINE_SHOW_ATTRIBUTE(state);
|
||||
|
@ -327,7 +327,23 @@ static struct miscdevice udmabuf_misc = {
|
||||
|
||||
static int __init udmabuf_dev_init(void)
|
||||
{
|
||||
return misc_register(&udmabuf_misc);
|
||||
int ret;
|
||||
|
||||
ret = misc_register(&udmabuf_misc);
|
||||
if (ret < 0) {
|
||||
pr_err("Could not initialize udmabuf device\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device,
|
||||
DMA_BIT_MASK(64));
|
||||
if (ret < 0) {
|
||||
pr_err("Could not setup DMA mask for udmabuf device\n");
|
||||
misc_deregister(&udmabuf_misc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit udmabuf_dev_exit(void)
|
||||
|
@ -283,7 +283,7 @@ enum amdgpu_kiq_irq {
|
||||
AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
|
||||
AMDGPU_CP_KIQ_IRQ_LAST
|
||||
};
|
||||
|
||||
#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
|
||||
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
|
||||
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
|
||||
#define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */
|
||||
|
@ -371,6 +371,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
uint32_t seq;
|
||||
uint16_t queried_pasid;
|
||||
bool ret;
|
||||
u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
|
||||
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
|
||||
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
|
||||
|
||||
@ -389,7 +390,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
|
||||
amdgpu_ring_commit(ring);
|
||||
spin_unlock(&adev->gfx.kiq.ring_lock);
|
||||
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
|
||||
r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
|
||||
if (r < 1) {
|
||||
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
|
||||
return -ETIME;
|
||||
|
@ -839,6 +839,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
uint32_t seq;
|
||||
uint16_t queried_pasid;
|
||||
bool ret;
|
||||
u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
|
||||
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
|
||||
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
|
||||
|
||||
@ -878,7 +879,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
|
||||
amdgpu_ring_commit(ring);
|
||||
spin_unlock(&adev->gfx.kiq.ring_lock);
|
||||
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
|
||||
r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
|
||||
if (r < 1) {
|
||||
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
|
||||
up_read(&adev->reset_sem);
|
||||
|
@ -546,9 +546,11 @@ static void dce112_get_pix_clk_dividers_helper (
|
||||
switch (pix_clk_params->color_depth) {
|
||||
case COLOR_DEPTH_101010:
|
||||
actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 5) >> 2;
|
||||
actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
|
||||
break;
|
||||
case COLOR_DEPTH_121212:
|
||||
actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 6) >> 2;
|
||||
actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
|
||||
break;
|
||||
case COLOR_DEPTH_161616:
|
||||
actual_pixel_clock_100hz = actual_pixel_clock_100hz * 2;
|
||||
|
@ -125,6 +125,12 @@ struct mpcc *mpc1_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
|
||||
while (tmp_mpcc != NULL) {
|
||||
if (tmp_mpcc->dpp_id == dpp_id)
|
||||
return tmp_mpcc;
|
||||
|
||||
/* avoid circular linked list */
|
||||
ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
|
||||
if (tmp_mpcc == tmp_mpcc->mpcc_bot)
|
||||
break;
|
||||
|
||||
tmp_mpcc = tmp_mpcc->mpcc_bot;
|
||||
}
|
||||
return NULL;
|
||||
|
@ -464,6 +464,11 @@ void optc1_enable_optc_clock(struct timing_generator *optc, bool enable)
|
||||
OTG_CLOCK_ON, 1,
|
||||
1, 1000);
|
||||
} else {
|
||||
|
||||
//last chance to clear underflow, otherwise, it will always there due to clock is off.
|
||||
if (optc->funcs->is_optc_underflow_occurred(optc) == true)
|
||||
optc->funcs->clear_optc_underflow(optc);
|
||||
|
||||
REG_UPDATE_2(OTG_CLOCK_CONTROL,
|
||||
OTG_CLOCK_GATE_DIS, 0,
|
||||
OTG_CLOCK_EN, 0);
|
||||
|
@ -533,6 +533,12 @@ struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
|
||||
while (tmp_mpcc != NULL) {
|
||||
if (tmp_mpcc->dpp_id == 0xf || tmp_mpcc->dpp_id == dpp_id)
|
||||
return tmp_mpcc;
|
||||
|
||||
/* avoid circular linked list */
|
||||
ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
|
||||
if (tmp_mpcc == tmp_mpcc->mpcc_bot)
|
||||
break;
|
||||
|
||||
tmp_mpcc = tmp_mpcc->mpcc_bot;
|
||||
}
|
||||
return NULL;
|
||||
|
@ -86,7 +86,7 @@ bool hubp3_program_surface_flip_and_addr(
|
||||
VMID, address->vmid);
|
||||
|
||||
if (address->type == PLN_ADDR_TYPE_GRPH_STEREO) {
|
||||
REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x1);
|
||||
REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0);
|
||||
REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x1);
|
||||
|
||||
} else {
|
||||
|
@ -2759,6 +2759,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
|
||||
.dump_pptable = sienna_cichlid_dump_pptable,
|
||||
.init_microcode = smu_v11_0_init_microcode,
|
||||
.load_microcode = smu_v11_0_load_microcode,
|
||||
.fini_microcode = smu_v11_0_fini_microcode,
|
||||
.init_smc_tables = sienna_cichlid_init_smc_tables,
|
||||
.fini_smc_tables = smu_v11_0_fini_smc_tables,
|
||||
.init_power = smu_v11_0_init_power,
|
||||
|
@ -134,6 +134,11 @@ static int steam_recv_report(struct steam_device *steam,
|
||||
int ret;
|
||||
|
||||
r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
|
||||
if (!r) {
|
||||
hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (hid_report_len(r) < 64)
|
||||
return -EINVAL;
|
||||
|
||||
@ -165,6 +170,11 @@ static int steam_send_report(struct steam_device *steam,
|
||||
int ret;
|
||||
|
||||
r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
|
||||
if (!r) {
|
||||
hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (hid_report_len(r) < 64)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -346,10 +346,13 @@ static int hidraw_release(struct inode * inode, struct file * file)
|
||||
unsigned int minor = iminor(inode);
|
||||
struct hidraw_list *list = file->private_data;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
mutex_lock(&minors_lock);
|
||||
|
||||
spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags);
|
||||
for (i = list->tail; i < list->head; i++)
|
||||
kfree(list->buffer[i].value);
|
||||
list_del(&list->node);
|
||||
spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags);
|
||||
kfree(list);
|
||||
|
@ -2610,6 +2610,7 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
|
||||
del_timer_sync(&hdw->encoder_run_timer);
|
||||
del_timer_sync(&hdw->encoder_wait_timer);
|
||||
flush_work(&hdw->workpoll);
|
||||
v4l2_device_unregister(&hdw->v4l2_dev);
|
||||
usb_free_urb(hdw->ctl_read_urb);
|
||||
usb_free_urb(hdw->ctl_write_urb);
|
||||
kfree(hdw->ctl_read_buffer);
|
||||
|
@ -2293,6 +2293,9 @@ static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery)
|
||||
/* disable busy check */
|
||||
sdr_clr_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL);
|
||||
|
||||
val = readl(host->base + MSDC_INT);
|
||||
writel(val, host->base + MSDC_INT);
|
||||
|
||||
if (recovery) {
|
||||
sdr_set_field(host->base + MSDC_DMA_CTRL,
|
||||
MSDC_DMA_CTRL_STOP, 1);
|
||||
@ -2693,11 +2696,14 @@ static int __maybe_unused msdc_suspend(struct device *dev)
|
||||
{
|
||||
struct mmc_host *mmc = dev_get_drvdata(dev);
|
||||
int ret;
|
||||
u32 val;
|
||||
|
||||
if (mmc->caps2 & MMC_CAP2_CQE) {
|
||||
ret = cqhci_suspend(mmc);
|
||||
if (ret)
|
||||
return ret;
|
||||
val = readl(((struct msdc_host *)mmc_priv(mmc))->base + MSDC_INT);
|
||||
writel(val, ((struct msdc_host *)mmc_priv(mmc))->base + MSDC_INT);
|
||||
}
|
||||
|
||||
return pm_runtime_force_suspend(dev);
|
||||
|
@ -222,8 +222,15 @@ static int get_port_device_capability(struct pci_dev *dev)
|
||||
|
||||
#ifdef CONFIG_PCIEAER
|
||||
if (dev->aer_cap && pci_aer_available() &&
|
||||
(pcie_ports_native || host->native_aer))
|
||||
(pcie_ports_native || host->native_aer)) {
|
||||
services |= PCIE_PORT_SERVICE_AER;
|
||||
|
||||
/*
|
||||
* Disable AER on this port in case it's been enabled by the
|
||||
* BIOS (the AER service driver will enable it when necessary).
|
||||
*/
|
||||
pci_disable_pcie_error_reporting(dev);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -616,6 +616,11 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!var->pixclock) {
|
||||
DPRINTK("pixclock is zero\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) {
|
||||
DPRINTK("pixclock too high (%ldKHz)\n",
|
||||
PICOS2KHZ(var->pixclock));
|
||||
|
@ -5198,6 +5198,11 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
bool cancel = false;
|
||||
|
||||
if (req->file->f_op->may_pollfree) {
|
||||
spin_lock_irq(&ctx->completion_lock);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
INIT_HLIST_NODE(&req->hash_node);
|
||||
io_init_poll_iocb(poll, mask, wake_func);
|
||||
poll->file = req->file;
|
||||
|
@ -248,6 +248,7 @@ static const struct file_operations signalfd_fops = {
|
||||
.poll = signalfd_poll,
|
||||
.read = signalfd_read,
|
||||
.llseek = noop_llseek,
|
||||
.may_pollfree = true,
|
||||
};
|
||||
|
||||
static int do_signalfd4(int ufd, sigset_t *mask, int flags)
|
||||
|
@ -128,11 +128,12 @@ xfs_filestream_pick_ag(
|
||||
if (!pag->pagf_init) {
|
||||
err = xfs_alloc_pagf_init(mp, NULL, ag, trylock);
|
||||
if (err) {
|
||||
xfs_perag_put(pag);
|
||||
if (err != -EAGAIN)
|
||||
if (err != -EAGAIN) {
|
||||
xfs_perag_put(pag);
|
||||
return err;
|
||||
}
|
||||
/* Couldn't lock the AGF, skip this AG. */
|
||||
continue;
|
||||
goto next_ag;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -376,46 +376,36 @@ xfs_reserve_blocks(
|
||||
* If the request is larger than the current reservation, reserve the
|
||||
* blocks before we update the reserve counters. Sample m_fdblocks and
|
||||
* perform a partial reservation if the request exceeds free space.
|
||||
*
|
||||
* The code below estimates how many blocks it can request from
|
||||
* fdblocks to stash in the reserve pool. This is a classic TOCTOU
|
||||
* race since fdblocks updates are not always coordinated via
|
||||
* m_sb_lock. Set the reserve size even if there's not enough free
|
||||
* space to fill it because mod_fdblocks will refill an undersized
|
||||
* reserve when it can.
|
||||
*/
|
||||
error = -ENOSPC;
|
||||
do {
|
||||
free = percpu_counter_sum(&mp->m_fdblocks) -
|
||||
mp->m_alloc_set_aside;
|
||||
if (free <= 0)
|
||||
break;
|
||||
|
||||
delta = request - mp->m_resblks;
|
||||
lcounter = free - delta;
|
||||
if (lcounter < 0)
|
||||
/* We can't satisfy the request, just get what we can */
|
||||
fdblks_delta = free;
|
||||
else
|
||||
fdblks_delta = delta;
|
||||
|
||||
free = percpu_counter_sum(&mp->m_fdblocks) -
|
||||
xfs_fdblocks_unavailable(mp);
|
||||
delta = request - mp->m_resblks;
|
||||
mp->m_resblks = request;
|
||||
if (delta > 0 && free > 0) {
|
||||
/*
|
||||
* We'll either succeed in getting space from the free block
|
||||
* count or we'll get an ENOSPC. If we get a ENOSPC, it means
|
||||
* things changed while we were calculating fdblks_delta and so
|
||||
* we should try again to see if there is anything left to
|
||||
* reserve.
|
||||
* count or we'll get an ENOSPC. Don't set the reserved flag
|
||||
* here - we don't want to reserve the extra reserve blocks
|
||||
* from the reserve.
|
||||
*
|
||||
* Don't set the reserved flag here - we don't want to reserve
|
||||
* the extra reserve blocks from the reserve.....
|
||||
* The desired reserve size can change after we drop the lock.
|
||||
* Use mod_fdblocks to put the space into the reserve or into
|
||||
* fdblocks as appropriate.
|
||||
*/
|
||||
fdblks_delta = min(free, delta);
|
||||
spin_unlock(&mp->m_sb_lock);
|
||||
error = xfs_mod_fdblocks(mp, -fdblks_delta, 0);
|
||||
if (!error)
|
||||
xfs_mod_fdblocks(mp, fdblks_delta, 0);
|
||||
spin_lock(&mp->m_sb_lock);
|
||||
} while (error == -ENOSPC);
|
||||
|
||||
/*
|
||||
* Update the reserve counters if blocks have been successfully
|
||||
* allocated.
|
||||
*/
|
||||
if (!error && fdblks_delta) {
|
||||
mp->m_resblks += fdblks_delta;
|
||||
mp->m_resblks_avail += fdblks_delta;
|
||||
}
|
||||
|
||||
out:
|
||||
if (outval) {
|
||||
outval->resblks = mp->m_resblks;
|
||||
|
@ -406,6 +406,14 @@ extern int xfs_initialize_perag(xfs_mount_t *mp, xfs_agnumber_t agcount,
|
||||
xfs_agnumber_t *maxagi);
|
||||
extern void xfs_unmountfs(xfs_mount_t *);
|
||||
|
||||
/* Accessor added for 5.10.y backport */
|
||||
static inline uint64_t
|
||||
xfs_fdblocks_unavailable(
|
||||
struct xfs_mount *mp)
|
||||
{
|
||||
return mp->m_alloc_set_aside;
|
||||
}
|
||||
|
||||
extern int xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta,
|
||||
bool reserved);
|
||||
extern int xfs_mod_frextents(struct xfs_mount *mp, int64_t delta);
|
||||
|
@ -615,7 +615,6 @@ xfs_dqresv_check(
|
||||
return QUOTA_NL_ISOFTLONGWARN;
|
||||
}
|
||||
|
||||
res->warnings++;
|
||||
return QUOTA_NL_ISOFTWARN;
|
||||
}
|
||||
|
||||
|
@ -1893,6 +1893,7 @@ struct file_operations {
|
||||
struct file *file_out, loff_t pos_out,
|
||||
loff_t len, unsigned int remap_flags);
|
||||
int (*fadvise)(struct file *, loff_t, loff_t, int);
|
||||
bool may_pollfree;
|
||||
|
||||
ANDROID_KABI_RESERVE(1);
|
||||
ANDROID_KABI_RESERVE(2);
|
||||
|
@ -43,12 +43,15 @@ struct anon_vma {
|
||||
atomic_t refcount;
|
||||
|
||||
/*
|
||||
* Count of child anon_vmas and VMAs which points to this anon_vma.
|
||||
* Count of child anon_vmas. Equals to the count of all anon_vmas that
|
||||
* have ->parent pointing to this one, including itself.
|
||||
*
|
||||
* This counter is used for making decision about reusing anon_vma
|
||||
* instead of forking new one. See comments in function anon_vma_clone.
|
||||
*/
|
||||
unsigned degree;
|
||||
unsigned long num_children;
|
||||
/* Count of VMAs whose ->anon_vma pointer points to this object. */
|
||||
unsigned long num_active_vmas;
|
||||
|
||||
struct anon_vma *parent; /* Parent of this anon_vma */
|
||||
|
||||
|
@ -2245,6 +2245,14 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
|
||||
|
||||
#endif /* NET_SKBUFF_DATA_USES_OFFSET */
|
||||
|
||||
static inline void skb_assert_len(struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_NET
|
||||
if (WARN_ONCE(!skb->len, "%s\n", __func__))
|
||||
DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
|
||||
#endif /* CONFIG_DEBUG_NET */
|
||||
}
|
||||
|
||||
/*
|
||||
* Add data to an sk_buff
|
||||
*/
|
||||
|
@ -281,7 +281,8 @@ static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start)
|
||||
|
||||
static inline struct sk_psock *sk_psock(const struct sock *sk)
|
||||
{
|
||||
return rcu_dereference_sk_user_data(sk);
|
||||
return __rcu_dereference_sk_user_data_with_flags(sk,
|
||||
SK_USER_DATA_PSOCK);
|
||||
}
|
||||
|
||||
static inline void sk_psock_queue_msg(struct sk_psock *psock,
|
||||
|
@ -548,14 +548,26 @@ enum sk_pacing {
|
||||
SK_PACING_FQ = 2,
|
||||
};
|
||||
|
||||
/* Pointer stored in sk_user_data might not be suitable for copying
|
||||
* when cloning the socket. For instance, it can point to a reference
|
||||
* counted object. sk_user_data bottom bit is set if pointer must not
|
||||
* be copied.
|
||||
/* flag bits in sk_user_data
|
||||
*
|
||||
* - SK_USER_DATA_NOCOPY: Pointer stored in sk_user_data might
|
||||
* not be suitable for copying when cloning the socket. For instance,
|
||||
* it can point to a reference counted object. sk_user_data bottom
|
||||
* bit is set if pointer must not be copied.
|
||||
*
|
||||
* - SK_USER_DATA_BPF: Mark whether sk_user_data field is
|
||||
* managed/owned by a BPF reuseport array. This bit should be set
|
||||
* when sk_user_data's sk is added to the bpf's reuseport_array.
|
||||
*
|
||||
* - SK_USER_DATA_PSOCK: Mark whether pointer stored in
|
||||
* sk_user_data points to psock type. This bit should be set
|
||||
* when sk_user_data is assigned to a psock object.
|
||||
*/
|
||||
#define SK_USER_DATA_NOCOPY 1UL
|
||||
#define SK_USER_DATA_BPF 2UL /* Managed by BPF */
|
||||
#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF)
|
||||
#define SK_USER_DATA_BPF 2UL
|
||||
#define SK_USER_DATA_PSOCK 4UL
|
||||
#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF |\
|
||||
SK_USER_DATA_PSOCK)
|
||||
|
||||
/**
|
||||
* sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied
|
||||
@ -568,24 +580,40 @@ static inline bool sk_user_data_is_nocopy(const struct sock *sk)
|
||||
|
||||
#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
|
||||
|
||||
/**
|
||||
* __rcu_dereference_sk_user_data_with_flags - return the pointer
|
||||
* only if argument flags all has been set in sk_user_data. Otherwise
|
||||
* return NULL
|
||||
*
|
||||
* @sk: socket
|
||||
* @flags: flag bits
|
||||
*/
|
||||
static inline void *
|
||||
__rcu_dereference_sk_user_data_with_flags(const struct sock *sk,
|
||||
uintptr_t flags)
|
||||
{
|
||||
uintptr_t sk_user_data = (uintptr_t)rcu_dereference(__sk_user_data(sk));
|
||||
|
||||
WARN_ON_ONCE(flags & SK_USER_DATA_PTRMASK);
|
||||
|
||||
if ((sk_user_data & flags) == flags)
|
||||
return (void *)(sk_user_data & SK_USER_DATA_PTRMASK);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define rcu_dereference_sk_user_data(sk) \
|
||||
__rcu_dereference_sk_user_data_with_flags(sk, 0)
|
||||
#define __rcu_assign_sk_user_data_with_flags(sk, ptr, flags) \
|
||||
({ \
|
||||
void *__tmp = rcu_dereference(__sk_user_data((sk))); \
|
||||
(void *)((uintptr_t)__tmp & SK_USER_DATA_PTRMASK); \
|
||||
uintptr_t __tmp1 = (uintptr_t)(ptr), \
|
||||
__tmp2 = (uintptr_t)(flags); \
|
||||
WARN_ON_ONCE(__tmp1 & ~SK_USER_DATA_PTRMASK); \
|
||||
WARN_ON_ONCE(__tmp2 & SK_USER_DATA_PTRMASK); \
|
||||
rcu_assign_pointer(__sk_user_data((sk)), \
|
||||
__tmp1 | __tmp2); \
|
||||
})
|
||||
#define rcu_assign_sk_user_data(sk, ptr) \
|
||||
({ \
|
||||
uintptr_t __tmp = (uintptr_t)(ptr); \
|
||||
WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \
|
||||
rcu_assign_pointer(__sk_user_data((sk)), __tmp); \
|
||||
})
|
||||
#define rcu_assign_sk_user_data_nocopy(sk, ptr) \
|
||||
({ \
|
||||
uintptr_t __tmp = (uintptr_t)(ptr); \
|
||||
WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \
|
||||
rcu_assign_pointer(__sk_user_data((sk)), \
|
||||
__tmp | SK_USER_DATA_NOCOPY); \
|
||||
})
|
||||
__rcu_assign_sk_user_data_with_flags(sk, ptr, 0)
|
||||
|
||||
/*
|
||||
* SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
|
||||
|
@ -1786,11 +1786,12 @@ static struct kprobe *__disable_kprobe(struct kprobe *p)
|
||||
/* Try to disarm and disable this/parent probe */
|
||||
if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
|
||||
/*
|
||||
* If kprobes_all_disarmed is set, orig_p
|
||||
* should have already been disarmed, so
|
||||
* skip unneed disarming process.
|
||||
* Don't be lazy here. Even if 'kprobes_all_disarmed'
|
||||
* is false, 'orig_p' might not have been armed yet.
|
||||
* Note arm_all_kprobes() __tries__ to arm all kprobes
|
||||
* on the best effort basis.
|
||||
*/
|
||||
if (!kprobes_all_disarmed) {
|
||||
if (!kprobes_all_disarmed && !kprobe_disabled(orig_p)) {
|
||||
ret = disarm_kprobe(orig_p, true);
|
||||
if (ret) {
|
||||
p->flags &= ~KPROBE_FLAG_DISABLED;
|
||||
|
@ -2899,6 +2899,16 @@ int ftrace_startup(struct ftrace_ops *ops, int command)
|
||||
|
||||
ftrace_startup_enable(command);
|
||||
|
||||
/*
|
||||
* If ftrace is in an undefined state, we just remove ops from list
|
||||
* to prevent the NULL pointer, instead of totally rolling it back and
|
||||
* free trampoline, because those actions could cause further damage.
|
||||
*/
|
||||
if (unlikely(ftrace_disabled)) {
|
||||
__unregister_ftrace_function(ops);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ops->flags &= ~FTRACE_OPS_FL_ADDING;
|
||||
|
||||
return 0;
|
||||
|
@ -33,7 +33,6 @@ config CRYPTO_ARCH_HAVE_LIB_CHACHA
|
||||
|
||||
config CRYPTO_LIB_CHACHA_GENERIC
|
||||
tristate
|
||||
select XOR_BLOCKS
|
||||
help
|
||||
This symbol can be depended upon by arch implementations of the
|
||||
ChaCha library interface that require the generic code as a
|
||||
|
@ -46,8 +46,8 @@ static inline bool vdso_cycles_ok(u64 cycles)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TIME_NS
|
||||
static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
|
||||
struct __kernel_timespec *ts)
|
||||
static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
|
||||
struct __kernel_timespec *ts)
|
||||
{
|
||||
const struct vdso_data *vd = __arch_get_timens_vdso_data();
|
||||
const struct timens_offset *offs = &vdns->offset[clk];
|
||||
@ -97,8 +97,8 @@ static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
|
||||
struct __kernel_timespec *ts)
|
||||
static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
|
||||
struct __kernel_timespec *ts)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -159,8 +159,8 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TIME_NS
|
||||
static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
|
||||
struct __kernel_timespec *ts)
|
||||
static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
|
||||
struct __kernel_timespec *ts)
|
||||
{
|
||||
const struct vdso_data *vd = __arch_get_timens_vdso_data();
|
||||
const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
|
||||
@ -188,8 +188,8 @@ static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
|
||||
struct __kernel_timespec *ts)
|
||||
static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
|
||||
struct __kernel_timespec *ts)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
12
mm/mmap.c
12
mm/mmap.c
@ -2773,6 +2773,18 @@ static void unmap_region(struct mm_struct *mm,
|
||||
tlb_gather_mmu(&tlb, mm, start, end);
|
||||
update_hiwater_rss(mm);
|
||||
unmap_vmas(&tlb, vma, start, end);
|
||||
|
||||
/*
|
||||
* Ensure we have no stale TLB entries by the time this mapping is
|
||||
* removed from the rmap.
|
||||
* Note that we don't have to worry about nested flushes here because
|
||||
* we're holding the mm semaphore for removing the mapping - so any
|
||||
* concurrent flush in this region has to be coming through the rmap,
|
||||
* and we synchronize against that using the rmap lock.
|
||||
*/
|
||||
if ((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) != 0)
|
||||
tlb_flush_mmu(&tlb);
|
||||
|
||||
free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
|
||||
next ? next->vm_start : USER_PGTABLES_CEILING);
|
||||
tlb_finish_mmu(&tlb, start, end);
|
||||
|
29
mm/rmap.c
29
mm/rmap.c
@ -91,7 +91,8 @@ static inline struct anon_vma *anon_vma_alloc(void)
|
||||
anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
|
||||
if (anon_vma) {
|
||||
atomic_set(&anon_vma->refcount, 1);
|
||||
anon_vma->degree = 1; /* Reference for first vma */
|
||||
anon_vma->num_children = 0;
|
||||
anon_vma->num_active_vmas = 0;
|
||||
anon_vma->parent = anon_vma;
|
||||
/*
|
||||
* Initialise the anon_vma root to point to itself. If called
|
||||
@ -199,6 +200,7 @@ int __anon_vma_prepare(struct vm_area_struct *vma)
|
||||
anon_vma = anon_vma_alloc();
|
||||
if (unlikely(!anon_vma))
|
||||
goto out_enomem_free_avc;
|
||||
anon_vma->num_children++; /* self-parent link for new root */
|
||||
allocated = anon_vma;
|
||||
}
|
||||
|
||||
@ -208,8 +210,7 @@ int __anon_vma_prepare(struct vm_area_struct *vma)
|
||||
if (likely(!vma->anon_vma)) {
|
||||
vma->anon_vma = anon_vma;
|
||||
anon_vma_chain_link(vma, avc, anon_vma);
|
||||
/* vma reference or self-parent link for new root */
|
||||
anon_vma->degree++;
|
||||
anon_vma->num_active_vmas++;
|
||||
allocated = NULL;
|
||||
avc = NULL;
|
||||
}
|
||||
@ -294,19 +295,19 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
|
||||
anon_vma_chain_link(dst, avc, anon_vma);
|
||||
|
||||
/*
|
||||
* Reuse existing anon_vma if its degree lower than two,
|
||||
* that means it has no vma and only one anon_vma child.
|
||||
* Reuse existing anon_vma if it has no vma and only one
|
||||
* anon_vma child.
|
||||
*
|
||||
* Do not chose parent anon_vma, otherwise first child
|
||||
* will always reuse it. Root anon_vma is never reused:
|
||||
* Root anon_vma is never reused:
|
||||
* it has self-parent reference and at least one child.
|
||||
*/
|
||||
if (!dst->anon_vma && src->anon_vma &&
|
||||
anon_vma != src->anon_vma && anon_vma->degree < 2)
|
||||
anon_vma->num_children < 2 &&
|
||||
anon_vma->num_active_vmas == 0)
|
||||
dst->anon_vma = anon_vma;
|
||||
}
|
||||
if (dst->anon_vma)
|
||||
dst->anon_vma->degree++;
|
||||
dst->anon_vma->num_active_vmas++;
|
||||
unlock_anon_vma_root(root);
|
||||
return 0;
|
||||
|
||||
@ -356,6 +357,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
|
||||
anon_vma = anon_vma_alloc();
|
||||
if (!anon_vma)
|
||||
goto out_error;
|
||||
anon_vma->num_active_vmas++;
|
||||
avc = anon_vma_chain_alloc(GFP_KERNEL);
|
||||
if (!avc)
|
||||
goto out_error_free_anon_vma;
|
||||
@ -376,7 +378,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
|
||||
vma->anon_vma = anon_vma;
|
||||
anon_vma_lock_write(anon_vma);
|
||||
anon_vma_chain_link(vma, avc, anon_vma);
|
||||
anon_vma->parent->degree++;
|
||||
anon_vma->parent->num_children++;
|
||||
anon_vma_unlock_write(anon_vma);
|
||||
|
||||
return 0;
|
||||
@ -408,7 +410,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
|
||||
* to free them outside the lock.
|
||||
*/
|
||||
if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
|
||||
anon_vma->parent->degree--;
|
||||
anon_vma->parent->num_children--;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -416,7 +418,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
|
||||
anon_vma_chain_free(avc);
|
||||
}
|
||||
if (vma->anon_vma)
|
||||
vma->anon_vma->degree--;
|
||||
vma->anon_vma->num_active_vmas--;
|
||||
unlock_anon_vma_root(root);
|
||||
|
||||
/*
|
||||
@ -427,7 +429,8 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
|
||||
list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
|
||||
struct anon_vma *anon_vma = avc->anon_vma;
|
||||
|
||||
VM_WARN_ON(anon_vma->degree);
|
||||
VM_WARN_ON(anon_vma->num_children);
|
||||
VM_WARN_ON(anon_vma->num_active_vmas);
|
||||
put_anon_vma(anon_vma);
|
||||
|
||||
list_del(&avc->same_vma);
|
||||
|
@ -1988,11 +1988,11 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
|
||||
src_match = !bacmp(&c->src, src);
|
||||
dst_match = !bacmp(&c->dst, dst);
|
||||
if (src_match && dst_match) {
|
||||
c = l2cap_chan_hold_unless_zero(c);
|
||||
if (c) {
|
||||
read_unlock(&chan_list_lock);
|
||||
return c;
|
||||
}
|
||||
if (!l2cap_chan_hold_unless_zero(c))
|
||||
continue;
|
||||
|
||||
read_unlock(&chan_list_lock);
|
||||
return c;
|
||||
}
|
||||
|
||||
/* Closest match */
|
||||
|
@ -441,6 +441,9 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
|
||||
{
|
||||
struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
|
||||
|
||||
if (!skb->len)
|
||||
return -EINVAL;
|
||||
|
||||
if (!__skb)
|
||||
return 0;
|
||||
|
||||
|
@ -4102,6 +4102,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
|
||||
bool again = false;
|
||||
|
||||
skb_reset_mac_header(skb);
|
||||
skb_assert_len(skb);
|
||||
|
||||
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
|
||||
__skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
|
||||
|
@ -280,11 +280,26 @@ static int neigh_del_timer(struct neighbour *n)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pneigh_queue_purge(struct sk_buff_head *list)
|
||||
static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net)
|
||||
{
|
||||
struct sk_buff_head tmp;
|
||||
unsigned long flags;
|
||||
struct sk_buff *skb;
|
||||
|
||||
while ((skb = skb_dequeue(list)) != NULL) {
|
||||
skb_queue_head_init(&tmp);
|
||||
spin_lock_irqsave(&list->lock, flags);
|
||||
skb = skb_peek(list);
|
||||
while (skb != NULL) {
|
||||
struct sk_buff *skb_next = skb_peek_next(skb, list);
|
||||
if (net == NULL || net_eq(dev_net(skb->dev), net)) {
|
||||
__skb_unlink(skb, list);
|
||||
__skb_queue_tail(&tmp, skb);
|
||||
}
|
||||
skb = skb_next;
|
||||
}
|
||||
spin_unlock_irqrestore(&list->lock, flags);
|
||||
|
||||
while ((skb = __skb_dequeue(&tmp))) {
|
||||
dev_put(skb->dev);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
@ -358,9 +373,9 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
|
||||
write_lock_bh(&tbl->lock);
|
||||
neigh_flush_dev(tbl, dev, skip_perm);
|
||||
pneigh_ifdown_and_unlock(tbl, dev);
|
||||
|
||||
del_timer_sync(&tbl->proxy_timer);
|
||||
pneigh_queue_purge(&tbl->proxy_queue);
|
||||
pneigh_queue_purge(&tbl->proxy_queue, dev_net(dev));
|
||||
if (skb_queue_empty_lockless(&tbl->proxy_queue))
|
||||
del_timer_sync(&tbl->proxy_timer);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1743,7 +1758,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl)
|
||||
/* It is not clean... Fix it to unload IPv6 module safely */
|
||||
cancel_delayed_work_sync(&tbl->gc_work);
|
||||
del_timer_sync(&tbl->proxy_timer);
|
||||
pneigh_queue_purge(&tbl->proxy_queue);
|
||||
pneigh_queue_purge(&tbl->proxy_queue, NULL);
|
||||
neigh_ifdown(tbl, NULL);
|
||||
if (atomic_read(&tbl->entries))
|
||||
pr_crit("neighbour leakage\n");
|
||||
|
@ -612,7 +612,9 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node)
|
||||
sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
|
||||
refcount_set(&psock->refcnt, 1);
|
||||
|
||||
rcu_assign_sk_user_data_nocopy(sk, psock);
|
||||
__rcu_assign_sk_user_data_with_flags(sk, psock,
|
||||
SK_USER_DATA_NOCOPY |
|
||||
SK_USER_DATA_PSOCK);
|
||||
sock_hold(sk);
|
||||
|
||||
out:
|
||||
|
@ -118,7 +118,6 @@ config NF_CONNTRACK_ZONES
|
||||
|
||||
config NF_CONNTRACK_PROCFS
|
||||
bool "Supply CT list in procfs (OBSOLETE)"
|
||||
default y
|
||||
depends on PROC_FS
|
||||
help
|
||||
This option enables for the list of known conntrack entries
|
||||
|
@ -2986,8 +2986,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
|
||||
if (err)
|
||||
goto out_free;
|
||||
|
||||
if (sock->type == SOCK_RAW &&
|
||||
!dev_validate_header(dev, skb->data, len)) {
|
||||
if ((sock->type == SOCK_RAW &&
|
||||
!dev_validate_header(dev, skb->data, len)) || !skb->len) {
|
||||
err = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
|
@ -93,8 +93,7 @@ obj := $(KBUILD_EXTMOD)
|
||||
src := $(obj)
|
||||
|
||||
# Include the module's Makefile to find KBUILD_EXTRA_SYMBOLS
|
||||
include $(if $(wildcard $(KBUILD_EXTMOD)/Kbuild), \
|
||||
$(KBUILD_EXTMOD)/Kbuild, $(KBUILD_EXTMOD)/Makefile)
|
||||
include $(if $(wildcard $(src)/Kbuild), $(src)/Kbuild, $(src)/Makefile)
|
||||
|
||||
# modpost option for external modules
|
||||
MODPOST += -e
|
||||
|
Loading…
Reference in New Issue
Block a user