This is the 5.4.191 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmJpLh8ACgkQONu9yGCS aT4c2RAAipfvQHTVXY0hA9eXQUE9JVZQoKsh/m/SF5Q46oADN8y/JDwMEhbyrE5R tyOxSyXWTZ6gIgjevqG0FnRfH2E1E++0rH0l3snCDCPSq11LoK+rV7K1tWIm6nJQ AMgc/ooWgI9Ah4PfVei2hEvHy+Ejho8YNs+aw9wA3z95kySUE2PmNpwIkSluN3wr dH5jqi4J7xzc+DSU/hI24PFPdW4TQjYbw0D6a4HJAm4cbv7lHDRwN/Y1OTMfmKT4 A3pG6ITTCAC9oQeLAu786fJgK+RFdMHj9VPgRZdZK18SiQ5jSJlGPetqklCcrL/7 kR3hMl1tHR6NldNyaCTsqiAJXngbz5oIZh+zt8a1QMm7TtcAd1Zktp8Kt/ommWqs jv3IsZmcZ2VNhfcRy+yj8b20Yc+IrwG5An+5U4I7Rt236GmWB3GcZkV9QTSd9k+Y hFN/LU3p8T2T7v9kddsnofm8cnTmc6C6aTpfSQYjrbT3sJ5Glok1saYX8uYffLN+ 7Q+UfgLfTELr7JLZqdLtcasyZIkQvGR6HQsoxyrB5lbMy77t5eedjheu+ai5Rl6j 3yM3o0xKYV6O5lrFK0PS4IcagCpwPsZX6ZwB4fnGa1Zpd2s1axAINrPyHTKYsIX5 H4B0daJltyuUB7XQqLVwJQFgAEKtEMaSVno+B8EVwPkcBz4AYd0= =FAKD -----END PGP SIGNATURE----- Merge 5.4.191 into android11-5.4-lts Changes in 5.4.191 etherdevice: Adjust ether_addr* prototypes to silence -Wstringop-overead mm: page_alloc: fix building error on -Werror=array-compare tracing: Dump stacktrace trigger to the corresponding instance can: usb_8dev: usb_8dev_start_xmit(): fix double dev_kfree_skb() in error path gfs2: assign rgrp glock before compute_bitstructs tcp: fix race condition when creating child sockets from syncookies net/sched: cls_u32: fix netns refcount changes in u32_change() tcp: Fix potential use-after-free due to double kfree() ALSA: usb-audio: Clear MIDI port active flag after draining ASoC: atmel: Remove system clock tree configuration for at91sam9g20ek ASoC: msm8916-wcd-digital: Check failure for devm_snd_soc_register_component dmaengine: imx-sdma: Fix error checking in sdma_event_remap dmaengine: mediatek:Fix PM usage reference leak of mtk_uart_apdma_alloc_chan_resources igc: Fix infinite loop in release_swfw_sync igc: Fix BUG: scheduling while atomic rxrpc: Restore removed timer deletion net/smc: Fix sock leak when release after smc_shutdown() net/packet: fix packet_sock xmit return value checking net/sched: cls_u32: fix possible leak in u32_init_knode() l3mdev: l3mdev_master_upper_ifindex_by_index_rcu should be using netdev_master_upper_dev_get_rcu netlink: reset network and mac headers in netlink_dump() selftests: mlxsw: vxlan_flooding: Prevent flooding of unwanted packets ARM: vexpress/spc: Avoid negative array index when !SMP reset: tegra-bpmp: Restore Handle errors in BPMP response platform/x86: samsung-laptop: Fix an unsigned comparison which can never be negative ALSA: usb-audio: Fix undefined behavior due to shift overflowing the constant vxlan: fix error return code in vxlan_fdb_append cifs: Check the IOCB_DIRECT flag, not O_DIRECT mt76: Fix undefined behavior due to shift overflowing the constant brcmfmac: sdio: Fix undefined behavior due to shift overflowing the constant dpaa_eth: Fix missing of_node_put in dpaa_get_ts_info() drm/msm/mdp5: check the return of kzalloc() net: macb: Restart tx only if queue pointer is lagging scsi: qedi: Fix failed disconnect handling stat: fix inconsistency between struct stat and struct compat_stat EDAC/synopsys: Read the error count from the correct register oom_kill.c: futex: delay the OOM reaper to allow time for proper futex cleanup ata: pata_marvell: Check the 'bmdma_addr' beforing reading dma: at_xdmac: fix a missing check on list iterator drm/panel/raspberrypi-touchscreen: Avoid NULL deref if not initialised drm/panel/raspberrypi-touchscreen: Initialise the bridge in prepare KVM: PPC: Fix TCE handling for VFIO drm/vc4: Use pm_runtime_resume_and_get to fix pm_runtime_get_sync() usage powerpc/perf: Fix power9 event alternatives xtensa: patch_text: Fixup last cpu should be master xtensa: fix a7 clobbering in coprocessor context load/store openvswitch: fix OOB access in reserve_sfa_size() ASoC: soc-dapm: fix two incorrect uses of list iterator e1000e: Fix possible overflow in LTR decoding ARC: entry: fix syscall_trace_exit argument arm_pmu: Validate single/group leader events ext4: fix symlink file size not match to file content ext4: fix use-after-free in ext4_search_dir ext4: limit length to bitmap_maxbytes - blocksize in punch_hole ext4, doc: fix incorrect h_reserved size ext4: fix overhead calculation to account for the reserved gdt blocks ext4: force overhead calculation if the s_overhead_cluster makes no sense jbd2: fix a potential race while discarding reserved buffers after an abort spi: atmel-quadspi: Fix the buswidth adjustment between spi-mem and controller staging: ion: Prevent incorrect reference counting behavour block/compat_ioctl: fix range check in BLKGETSIZE Revert "net: micrel: fix KS8851_MLL Kconfig" Linux 5.4.191 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Id8dee2348cd339ea32e592787839af337292ad17
This commit is contained in:
commit
36dda9143f
@ -76,7 +76,7 @@ The beginning of an extended attribute block is in
|
||||
- Checksum of the extended attribute block.
|
||||
* - 0x14
|
||||
- \_\_u32
|
||||
- h\_reserved[2]
|
||||
- h\_reserved[3]
|
||||
- Zero.
|
||||
|
||||
The checksum is calculated against the FS UUID, the 64-bit block number
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 190
|
||||
SUBLEVEL = 191
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
@ -199,6 +199,7 @@ tracesys_exit:
|
||||
st r0, [sp, PT_r0] ; sys call return value in pt_regs
|
||||
|
||||
;POST Sys Call Ptrace Hook
|
||||
mov r0, sp ; pt_regs needed
|
||||
bl @syscall_trace_exit
|
||||
b ret_from_exception ; NOT ret_from_system_call at is saves r0 which
|
||||
; we'd done before calling post hook above
|
||||
|
@ -580,7 +580,7 @@ static int __init ve_spc_clk_init(void)
|
||||
}
|
||||
|
||||
cluster = topology_physical_package_id(cpu_dev->id);
|
||||
if (init_opp_table[cluster])
|
||||
if (cluster < 0 || init_opp_table[cluster])
|
||||
continue;
|
||||
|
||||
if (ve_init_opp_table(cpu_dev))
|
||||
|
@ -420,13 +420,19 @@ static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
|
||||
tbl[idx % TCES_PER_PAGE] = tce;
|
||||
}
|
||||
|
||||
static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl,
|
||||
unsigned long entry)
|
||||
static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
|
||||
struct iommu_table *tbl, unsigned long entry)
|
||||
{
|
||||
unsigned long i;
|
||||
unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
|
||||
unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
|
||||
|
||||
for (i = 0; i < subpages; ++i) {
|
||||
unsigned long hpa = 0;
|
||||
enum dma_data_direction dir = DMA_NONE;
|
||||
|
||||
iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir);
|
||||
iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir);
|
||||
}
|
||||
}
|
||||
|
||||
static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
|
||||
@ -485,6 +491,8 @@ static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
|
||||
break;
|
||||
}
|
||||
|
||||
iommu_tce_kill(tbl, io_entry, subpages);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -544,6 +552,8 @@ static long kvmppc_tce_iommu_map(struct kvm *kvm,
|
||||
break;
|
||||
}
|
||||
|
||||
iommu_tce_kill(tbl, io_entry, subpages);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -590,10 +600,9 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||
ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
|
||||
entry, ua, dir);
|
||||
|
||||
iommu_tce_kill(stit->tbl, entry, 1);
|
||||
|
||||
if (ret != H_SUCCESS) {
|
||||
kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
|
||||
kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
|
||||
goto unlock_exit;
|
||||
}
|
||||
}
|
||||
@ -669,13 +678,13 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
||||
*/
|
||||
if (get_user(tce, tces + i)) {
|
||||
ret = H_TOO_HARD;
|
||||
goto invalidate_exit;
|
||||
goto unlock_exit;
|
||||
}
|
||||
tce = be64_to_cpu(tce);
|
||||
|
||||
if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
|
||||
ret = H_PARAMETER;
|
||||
goto invalidate_exit;
|
||||
goto unlock_exit;
|
||||
}
|
||||
|
||||
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
||||
@ -684,19 +693,15 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
||||
iommu_tce_direction(tce));
|
||||
|
||||
if (ret != H_SUCCESS) {
|
||||
kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
|
||||
entry);
|
||||
goto invalidate_exit;
|
||||
kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
|
||||
entry + i);
|
||||
goto unlock_exit;
|
||||
}
|
||||
}
|
||||
|
||||
kvmppc_tce_put(stt, entry + i, tce);
|
||||
}
|
||||
|
||||
invalidate_exit:
|
||||
list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
|
||||
iommu_tce_kill(stit->tbl, entry, npages);
|
||||
|
||||
unlock_exit:
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
|
||||
@ -735,20 +740,16 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
|
||||
continue;
|
||||
|
||||
if (ret == H_TOO_HARD)
|
||||
goto invalidate_exit;
|
||||
return ret;
|
||||
|
||||
WARN_ON_ONCE(1);
|
||||
kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
|
||||
kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
|
||||
kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
|
||||
|
||||
invalidate_exit:
|
||||
list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
|
||||
iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
|
||||
|
@ -251,13 +251,19 @@ extern void iommu_tce_kill_rm(struct iommu_table *tbl,
|
||||
tbl->it_ops->tce_kill(tbl, entry, pages, true);
|
||||
}
|
||||
|
||||
static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
|
||||
unsigned long entry)
|
||||
static void kvmppc_rm_clear_tce(struct kvm *kvm, struct kvmppc_spapr_tce_table *stt,
|
||||
struct iommu_table *tbl, unsigned long entry)
|
||||
{
|
||||
unsigned long i;
|
||||
unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
|
||||
unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
|
||||
|
||||
for (i = 0; i < subpages; ++i) {
|
||||
unsigned long hpa = 0;
|
||||
enum dma_data_direction dir = DMA_NONE;
|
||||
|
||||
iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
|
||||
iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, io_entry + i, &hpa, &dir);
|
||||
}
|
||||
}
|
||||
|
||||
static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
|
||||
@ -320,6 +326,8 @@ static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
|
||||
break;
|
||||
}
|
||||
|
||||
iommu_tce_kill_rm(tbl, io_entry, subpages);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -383,6 +391,8 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
|
||||
break;
|
||||
}
|
||||
|
||||
iommu_tce_kill_rm(tbl, io_entry, subpages);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -428,10 +438,8 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
|
||||
stit->tbl, entry, ua, dir);
|
||||
|
||||
iommu_tce_kill_rm(stit->tbl, entry, 1);
|
||||
|
||||
if (ret != H_SUCCESS) {
|
||||
kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
|
||||
kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -571,7 +579,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
||||
ua = 0;
|
||||
if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
|
||||
ret = H_PARAMETER;
|
||||
goto invalidate_exit;
|
||||
goto unlock_exit;
|
||||
}
|
||||
|
||||
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
||||
@ -580,19 +588,15 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
||||
iommu_tce_direction(tce));
|
||||
|
||||
if (ret != H_SUCCESS) {
|
||||
kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
|
||||
entry);
|
||||
goto invalidate_exit;
|
||||
kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl,
|
||||
entry + i);
|
||||
goto unlock_exit;
|
||||
}
|
||||
}
|
||||
|
||||
kvmppc_rm_tce_put(stt, entry + i, tce);
|
||||
}
|
||||
|
||||
invalidate_exit:
|
||||
list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
|
||||
iommu_tce_kill_rm(stit->tbl, entry, npages);
|
||||
|
||||
unlock_exit:
|
||||
if (rmap)
|
||||
unlock_rmap(rmap);
|
||||
@ -635,20 +639,16 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
|
||||
continue;
|
||||
|
||||
if (ret == H_TOO_HARD)
|
||||
goto invalidate_exit;
|
||||
return ret;
|
||||
|
||||
WARN_ON_ONCE_RM(1);
|
||||
kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
|
||||
kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry + i);
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
|
||||
kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
|
||||
|
||||
invalidate_exit:
|
||||
list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
|
||||
iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -131,11 +131,11 @@ int p9_dd22_bl_ev[] = {
|
||||
|
||||
/* Table of alternatives, sorted by column 0 */
|
||||
static const unsigned int power9_event_alternatives[][MAX_ALT] = {
|
||||
{ PM_BR_2PATH, PM_BR_2PATH_ALT },
|
||||
{ PM_INST_DISP, PM_INST_DISP_ALT },
|
||||
{ PM_RUN_CYC_ALT, PM_RUN_CYC },
|
||||
{ PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
|
||||
{ PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
|
||||
{ PM_BR_2PATH, PM_BR_2PATH_ALT },
|
||||
{ PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
|
||||
};
|
||||
|
||||
static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[])
|
||||
|
@ -31,15 +31,13 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
|
||||
typedef u64 __attribute__((aligned(4))) compat_u64;
|
||||
|
||||
struct compat_stat {
|
||||
compat_dev_t st_dev;
|
||||
u16 __pad1;
|
||||
u32 st_dev;
|
||||
compat_ino_t st_ino;
|
||||
compat_mode_t st_mode;
|
||||
compat_nlink_t st_nlink;
|
||||
__compat_uid_t st_uid;
|
||||
__compat_gid_t st_gid;
|
||||
compat_dev_t st_rdev;
|
||||
u16 __pad2;
|
||||
u32 st_rdev;
|
||||
u32 st_size;
|
||||
u32 st_blksize;
|
||||
u32 st_blocks;
|
||||
|
@ -37,7 +37,7 @@
|
||||
.if XTENSA_HAVE_COPROCESSOR(x); \
|
||||
.align 4; \
|
||||
.Lsave_cp_regs_cp##x: \
|
||||
xchal_cp##x##_store a2 a4 a5 a6 a7; \
|
||||
xchal_cp##x##_store a2 a3 a4 a5 a6; \
|
||||
jx a0; \
|
||||
.endif
|
||||
|
||||
@ -54,7 +54,7 @@
|
||||
.if XTENSA_HAVE_COPROCESSOR(x); \
|
||||
.align 4; \
|
||||
.Lload_cp_regs_cp##x: \
|
||||
xchal_cp##x##_load a2 a4 a5 a6 a7; \
|
||||
xchal_cp##x##_load a2 a3 a4 a5 a6; \
|
||||
jx a0; \
|
||||
.endif
|
||||
|
||||
|
@ -40,7 +40,7 @@ static int patch_text_stop_machine(void *data)
|
||||
{
|
||||
struct patch *patch = data;
|
||||
|
||||
if (atomic_inc_return(&patch->cpu_count) == 1) {
|
||||
if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
|
||||
local_patch_text(patch->addr, patch->data, patch->sz);
|
||||
atomic_inc(&patch->cpu_count);
|
||||
} else {
|
||||
|
@ -393,7 +393,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
||||
return 0;
|
||||
case BLKGETSIZE:
|
||||
size = i_size_read(bdev->bd_inode);
|
||||
if ((size >> 9) > ~0UL)
|
||||
if ((size >> 9) > ~(compat_ulong_t)0)
|
||||
return -EFBIG;
|
||||
return compat_put_ulong(arg, size >> 9);
|
||||
|
||||
|
@ -83,6 +83,8 @@ static int marvell_cable_detect(struct ata_port *ap)
|
||||
switch(ap->port_no)
|
||||
{
|
||||
case 0:
|
||||
if (!ap->ioaddr.bmdma_addr)
|
||||
return ATA_CBL_PATA_UNK;
|
||||
if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1)
|
||||
return ATA_CBL_PATA40;
|
||||
return ATA_CBL_PATA80;
|
||||
|
@ -1390,7 +1390,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
|
||||
{
|
||||
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
|
||||
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
|
||||
struct at_xdmac_desc *desc, *_desc;
|
||||
struct at_xdmac_desc *desc, *_desc, *iter;
|
||||
struct list_head *descs_list;
|
||||
enum dma_status ret;
|
||||
int residue, retry;
|
||||
@ -1505,12 +1505,14 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
|
||||
* microblock.
|
||||
*/
|
||||
descs_list = &desc->descs_list;
|
||||
list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
|
||||
dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
|
||||
residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
|
||||
if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
|
||||
list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
|
||||
dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
|
||||
residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
|
||||
if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
|
||||
desc = iter;
|
||||
break;
|
||||
}
|
||||
}
|
||||
residue += cur_ubc << dwidth;
|
||||
|
||||
dma_set_residue(txstate, residue);
|
||||
|
@ -1784,7 +1784,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
|
||||
u32 reg, val, shift, num_map, i;
|
||||
int ret = 0;
|
||||
|
||||
if (IS_ERR(np) || IS_ERR(gpr_np))
|
||||
if (IS_ERR(np) || !gpr_np)
|
||||
goto out;
|
||||
|
||||
event_remap = of_find_property(np, propname, NULL);
|
||||
@ -1832,7 +1832,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
|
||||
}
|
||||
|
||||
out:
|
||||
if (!IS_ERR(gpr_np))
|
||||
if (gpr_np)
|
||||
of_node_put(gpr_np);
|
||||
|
||||
return ret;
|
||||
|
@ -274,7 +274,7 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
|
||||
unsigned int status;
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_get_sync(mtkd->ddev.dev);
|
||||
ret = pm_runtime_resume_and_get(mtkd->ddev.dev);
|
||||
if (ret < 0) {
|
||||
pm_runtime_put_noidle(chan->device->dev);
|
||||
return ret;
|
||||
@ -288,18 +288,21 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
|
||||
ret = readx_poll_timeout(readl, c->base + VFF_EN,
|
||||
status, !status, 10, 100);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_pm;
|
||||
|
||||
ret = request_irq(c->irq, mtk_uart_apdma_irq_handler,
|
||||
IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan);
|
||||
if (ret < 0) {
|
||||
dev_err(chan->device->dev, "Can't request dma IRQ\n");
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto err_pm;
|
||||
}
|
||||
|
||||
if (mtkd->support_33bits)
|
||||
mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
|
||||
|
||||
err_pm:
|
||||
pm_runtime_put_noidle(mtkd->ddev.dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -163,6 +163,11 @@
|
||||
#define ECC_STAT_CECNT_SHIFT 8
|
||||
#define ECC_STAT_BITNUM_MASK 0x7F
|
||||
|
||||
/* ECC error count register definitions */
|
||||
#define ECC_ERRCNT_UECNT_MASK 0xFFFF0000
|
||||
#define ECC_ERRCNT_UECNT_SHIFT 16
|
||||
#define ECC_ERRCNT_CECNT_MASK 0xFFFF
|
||||
|
||||
/* DDR QOS Interrupt register definitions */
|
||||
#define DDR_QOS_IRQ_STAT_OFST 0x20200
|
||||
#define DDR_QOSUE_MASK 0x4
|
||||
@ -418,15 +423,16 @@ static int zynqmp_get_error_info(struct synps_edac_priv *priv)
|
||||
base = priv->baseaddr;
|
||||
p = &priv->stat;
|
||||
|
||||
regval = readl(base + ECC_ERRCNT_OFST);
|
||||
p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
|
||||
p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
|
||||
if (!p->ce_cnt)
|
||||
goto ue_err;
|
||||
|
||||
regval = readl(base + ECC_STAT_OFST);
|
||||
if (!regval)
|
||||
return 1;
|
||||
|
||||
p->ce_cnt = (regval & ECC_STAT_CECNT_MASK) >> ECC_STAT_CECNT_SHIFT;
|
||||
p->ue_cnt = (regval & ECC_STAT_UECNT_MASK) >> ECC_STAT_UECNT_SHIFT;
|
||||
if (!p->ce_cnt)
|
||||
goto ue_err;
|
||||
|
||||
p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
|
||||
|
||||
regval = readl(base + ECC_CEADDR0_OFST);
|
||||
|
@ -179,7 +179,10 @@ static void mdp5_plane_reset(struct drm_plane *plane)
|
||||
drm_framebuffer_put(plane->state->fb);
|
||||
|
||||
kfree(to_mdp5_plane_state(plane->state));
|
||||
plane->state = NULL;
|
||||
mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
|
||||
if (!mdp5_state)
|
||||
return;
|
||||
|
||||
/* assign default blend parameters */
|
||||
mdp5_state->alpha = 255;
|
||||
|
@ -232,7 +232,7 @@ static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts,
|
||||
|
||||
ret = i2c_smbus_write_byte_data(ts->i2c, reg, val);
|
||||
if (ret)
|
||||
dev_err(&ts->dsi->dev, "I2C write failed: %d\n", ret);
|
||||
dev_err(&ts->i2c->dev, "I2C write failed: %d\n", ret);
|
||||
}
|
||||
|
||||
static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
|
||||
@ -268,7 +268,7 @@ static int rpi_touchscreen_noop(struct drm_panel *panel)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rpi_touchscreen_enable(struct drm_panel *panel)
|
||||
static int rpi_touchscreen_prepare(struct drm_panel *panel)
|
||||
{
|
||||
struct rpi_touchscreen *ts = panel_to_ts(panel);
|
||||
int i;
|
||||
@ -298,6 +298,13 @@ static int rpi_touchscreen_enable(struct drm_panel *panel)
|
||||
rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01);
|
||||
msleep(100);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rpi_touchscreen_enable(struct drm_panel *panel)
|
||||
{
|
||||
struct rpi_touchscreen *ts = panel_to_ts(panel);
|
||||
|
||||
/* Turn on the backlight. */
|
||||
rpi_touchscreen_i2c_write(ts, REG_PWM, 255);
|
||||
|
||||
@ -352,7 +359,7 @@ static int rpi_touchscreen_get_modes(struct drm_panel *panel)
|
||||
static const struct drm_panel_funcs rpi_touchscreen_funcs = {
|
||||
.disable = rpi_touchscreen_disable,
|
||||
.unprepare = rpi_touchscreen_noop,
|
||||
.prepare = rpi_touchscreen_noop,
|
||||
.prepare = rpi_touchscreen_prepare,
|
||||
.enable = rpi_touchscreen_enable,
|
||||
.get_modes = rpi_touchscreen_get_modes,
|
||||
};
|
||||
|
@ -831,7 +831,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
|
||||
unsigned long phy_clock;
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
ret = pm_runtime_resume_and_get(dev);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->port);
|
||||
return;
|
||||
|
@ -670,9 +670,20 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
|
||||
atomic_inc(&priv->active_tx_urbs);
|
||||
|
||||
err = usb_submit_urb(urb, GFP_ATOMIC);
|
||||
if (unlikely(err))
|
||||
goto failed;
|
||||
else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
|
||||
if (unlikely(err)) {
|
||||
can_free_echo_skb(netdev, context->echo_index);
|
||||
|
||||
usb_unanchor_urb(urb);
|
||||
usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
|
||||
|
||||
atomic_dec(&priv->active_tx_urbs);
|
||||
|
||||
if (err == -ENODEV)
|
||||
netif_device_detach(netdev);
|
||||
else
|
||||
netdev_warn(netdev, "failed tx_urb %d\n", err);
|
||||
stats->tx_dropped++;
|
||||
} else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
|
||||
/* Slow down tx path */
|
||||
netif_stop_queue(netdev);
|
||||
|
||||
@ -691,19 +702,6 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
|
||||
|
||||
return NETDEV_TX_BUSY;
|
||||
|
||||
failed:
|
||||
can_free_echo_skb(netdev, context->echo_index);
|
||||
|
||||
usb_unanchor_urb(urb);
|
||||
usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
|
||||
|
||||
atomic_dec(&priv->active_tx_urbs);
|
||||
|
||||
if (err == -ENODEV)
|
||||
netif_device_detach(netdev);
|
||||
else
|
||||
netdev_warn(netdev, "failed tx_urb %d\n", err);
|
||||
|
||||
nomembuf:
|
||||
usb_free_urb(urb);
|
||||
|
||||
|
@ -1378,6 +1378,7 @@ static void macb_tx_restart(struct macb_queue *queue)
|
||||
unsigned int head = queue->tx_head;
|
||||
unsigned int tail = queue->tx_tail;
|
||||
struct macb *bp = queue->bp;
|
||||
unsigned int head_idx, tbqp;
|
||||
|
||||
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
||||
queue_writel(queue, ISR, MACB_BIT(TXUBR));
|
||||
@ -1385,6 +1386,13 @@ static void macb_tx_restart(struct macb_queue *queue)
|
||||
if (head == tail)
|
||||
return;
|
||||
|
||||
tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
|
||||
tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
|
||||
head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head));
|
||||
|
||||
if (tbqp == head_idx)
|
||||
return;
|
||||
|
||||
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
|
||||
}
|
||||
|
||||
|
@ -506,11 +506,15 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
|
||||
info->phc_index = -1;
|
||||
|
||||
fman_node = of_get_parent(mac_node);
|
||||
if (fman_node)
|
||||
if (fman_node) {
|
||||
ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
|
||||
of_node_put(fman_node);
|
||||
}
|
||||
|
||||
if (ptp_node)
|
||||
if (ptp_node) {
|
||||
ptp_dev = of_find_device_by_node(ptp_node);
|
||||
of_node_put(ptp_node);
|
||||
}
|
||||
|
||||
if (ptp_dev)
|
||||
ptp = platform_get_drvdata(ptp_dev);
|
||||
|
@ -995,8 +995,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
|
||||
{
|
||||
u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
|
||||
link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
|
||||
u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
|
||||
u16 lat_enc_d = 0; /* latency decoded */
|
||||
u32 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
|
||||
u32 lat_enc_d = 0; /* latency decoded */
|
||||
u16 lat_enc = 0; /* latency encoded */
|
||||
|
||||
if (link) {
|
||||
|
@ -156,8 +156,15 @@ void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
|
||||
{
|
||||
u32 swfw_sync;
|
||||
|
||||
while (igc_get_hw_semaphore_i225(hw))
|
||||
; /* Empty */
|
||||
/* Releasing the resource requires first getting the HW semaphore.
|
||||
* If we fail to get the semaphore, there is nothing we can do,
|
||||
* except log an error and quit. We are not allowed to hang here
|
||||
* indefinitely, as it may cause denial of service or system crash.
|
||||
*/
|
||||
if (igc_get_hw_semaphore_i225(hw)) {
|
||||
hw_dbg("Failed to release SW_FW_SYNC.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
swfw_sync = rd32(IGC_SW_FW_SYNC);
|
||||
swfw_sync &= ~mask;
|
||||
|
@ -569,7 +569,7 @@ static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data)
|
||||
* the lower time out
|
||||
*/
|
||||
for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
|
||||
usleep_range(500, 1000);
|
||||
udelay(50);
|
||||
mdic = rd32(IGC_MDIC);
|
||||
if (mdic & IGC_MDIC_READY)
|
||||
break;
|
||||
@ -626,7 +626,7 @@ static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data)
|
||||
* the lower time out
|
||||
*/
|
||||
for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
|
||||
usleep_range(500, 1000);
|
||||
udelay(50);
|
||||
mdic = rd32(IGC_MDIC);
|
||||
if (mdic & IGC_MDIC_READY)
|
||||
break;
|
||||
|
@ -37,7 +37,6 @@ config KS8851
|
||||
config KS8851_MLL
|
||||
tristate "Micrel KS8851 MLL"
|
||||
depends on HAS_IOMEM
|
||||
depends on PTP_1588_CLOCK_OPTIONAL
|
||||
select MII
|
||||
---help---
|
||||
This platform driver is for Micrel KS8851 Address/data bus
|
||||
|
@ -679,11 +679,11 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
|
||||
|
||||
rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
|
||||
if (rd == NULL)
|
||||
return -ENOBUFS;
|
||||
return -ENOMEM;
|
||||
|
||||
if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
|
||||
kfree(rd);
|
||||
return -ENOBUFS;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rd->remote_ip = *ip;
|
||||
|
@ -550,7 +550,7 @@ enum brcmf_sdio_frmtype {
|
||||
BRCMF_SDIO_FT_SUB,
|
||||
};
|
||||
|
||||
#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
|
||||
#define SDIOD_DRVSTR_KEY(chip, pmu) (((unsigned int)(chip) << 16) | (pmu))
|
||||
|
||||
/* SDIO Pad drive strength to select value mappings */
|
||||
struct sdiod_drive_str {
|
||||
|
@ -76,7 +76,7 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);
|
||||
|
||||
/* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
|
||||
mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf);
|
||||
mt76_rmw_field(dev, 0x15a0c, 0xfU << 28, 0xf);
|
||||
|
||||
/* RG_SSUSB_CDR_BR_PE1D = 0x3 */
|
||||
mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
|
||||
|
@ -322,6 +322,9 @@ validate_group(struct perf_event *event)
|
||||
if (!validate_event(event->pmu, &fake_pmu, leader))
|
||||
return -EINVAL;
|
||||
|
||||
if (event == leader)
|
||||
return 0;
|
||||
|
||||
for_each_sibling_event(sibling, leader) {
|
||||
if (!validate_event(event->pmu, &fake_pmu, sibling))
|
||||
return -EINVAL;
|
||||
@ -411,12 +414,7 @@ __hw_perf_event_init(struct perf_event *event)
|
||||
local64_set(&hwc->period_left, hwc->sample_period);
|
||||
}
|
||||
|
||||
if (event->group_leader != event) {
|
||||
if (validate_group(event) != 0)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return validate_group(event);
|
||||
}
|
||||
|
||||
static int armpmu_event_init(struct perf_event *event)
|
||||
|
@ -1121,8 +1121,6 @@ static void kbd_led_set(struct led_classdev *led_cdev,
|
||||
|
||||
if (value > samsung->kbd_led.max_brightness)
|
||||
value = samsung->kbd_led.max_brightness;
|
||||
else if (value < 0)
|
||||
value = 0;
|
||||
|
||||
samsung->kbd_led_wk = value;
|
||||
queue_work(samsung->led_workqueue, &samsung->kbd_led_work);
|
||||
|
@ -20,6 +20,7 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
|
||||
struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
|
||||
struct mrq_reset_request request;
|
||||
struct tegra_bpmp_message msg;
|
||||
int err;
|
||||
|
||||
memset(&request, 0, sizeof(request));
|
||||
request.cmd = command;
|
||||
@ -30,7 +31,13 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
|
||||
msg.tx.data = &request;
|
||||
msg.tx.size = sizeof(request);
|
||||
|
||||
return tegra_bpmp_transfer(bpmp, &msg);
|
||||
err = tegra_bpmp_transfer(bpmp, &msg);
|
||||
if (err)
|
||||
return err;
|
||||
if (msg.rx.ret)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
|
||||
|
@ -797,6 +797,37 @@ static int qedi_task_xmit(struct iscsi_task *task)
|
||||
return qedi_iscsi_send_ioreq(task);
|
||||
}
|
||||
|
||||
static void qedi_offload_work(struct work_struct *work)
|
||||
{
|
||||
struct qedi_endpoint *qedi_ep =
|
||||
container_of(work, struct qedi_endpoint, offload_work);
|
||||
struct qedi_ctx *qedi;
|
||||
int wait_delay = 5 * HZ;
|
||||
int ret;
|
||||
|
||||
qedi = qedi_ep->qedi;
|
||||
|
||||
ret = qedi_iscsi_offload_conn(qedi_ep);
|
||||
if (ret) {
|
||||
QEDI_ERR(&qedi->dbg_ctx,
|
||||
"offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
|
||||
qedi_ep->iscsi_cid, qedi_ep, ret);
|
||||
qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
|
||||
return;
|
||||
}
|
||||
|
||||
ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
|
||||
(qedi_ep->state ==
|
||||
EP_STATE_OFLDCONN_COMPL),
|
||||
wait_delay);
|
||||
if (ret <= 0 || qedi_ep->state != EP_STATE_OFLDCONN_COMPL) {
|
||||
qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
|
||||
QEDI_ERR(&qedi->dbg_ctx,
|
||||
"Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
|
||||
qedi_ep->iscsi_cid, qedi_ep);
|
||||
}
|
||||
}
|
||||
|
||||
static struct iscsi_endpoint *
|
||||
qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
|
||||
int non_blocking)
|
||||
@ -840,6 +871,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
|
||||
}
|
||||
qedi_ep = ep->dd_data;
|
||||
memset(qedi_ep, 0, sizeof(struct qedi_endpoint));
|
||||
INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
|
||||
qedi_ep->state = EP_STATE_IDLE;
|
||||
qedi_ep->iscsi_cid = (u32)-1;
|
||||
qedi_ep->qedi = qedi;
|
||||
@ -996,12 +1028,11 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
|
||||
qedi_ep = ep->dd_data;
|
||||
qedi = qedi_ep->qedi;
|
||||
|
||||
flush_work(&qedi_ep->offload_work);
|
||||
|
||||
if (qedi_ep->state == EP_STATE_OFLDCONN_START)
|
||||
goto ep_exit_recover;
|
||||
|
||||
if (qedi_ep->state != EP_STATE_OFLDCONN_NONE)
|
||||
flush_work(&qedi_ep->offload_work);
|
||||
|
||||
if (qedi_ep->conn) {
|
||||
qedi_conn = qedi_ep->conn;
|
||||
conn = qedi_conn->cls_conn->dd_data;
|
||||
@ -1161,37 +1192,6 @@ static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void qedi_offload_work(struct work_struct *work)
|
||||
{
|
||||
struct qedi_endpoint *qedi_ep =
|
||||
container_of(work, struct qedi_endpoint, offload_work);
|
||||
struct qedi_ctx *qedi;
|
||||
int wait_delay = 5 * HZ;
|
||||
int ret;
|
||||
|
||||
qedi = qedi_ep->qedi;
|
||||
|
||||
ret = qedi_iscsi_offload_conn(qedi_ep);
|
||||
if (ret) {
|
||||
QEDI_ERR(&qedi->dbg_ctx,
|
||||
"offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
|
||||
qedi_ep->iscsi_cid, qedi_ep, ret);
|
||||
qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
|
||||
return;
|
||||
}
|
||||
|
||||
ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
|
||||
(qedi_ep->state ==
|
||||
EP_STATE_OFLDCONN_COMPL),
|
||||
wait_delay);
|
||||
if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) {
|
||||
qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
|
||||
QEDI_ERR(&qedi->dbg_ctx,
|
||||
"Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
|
||||
qedi_ep->iscsi_cid, qedi_ep);
|
||||
}
|
||||
}
|
||||
|
||||
static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
|
||||
{
|
||||
struct qedi_ctx *qedi;
|
||||
@ -1307,7 +1307,6 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
|
||||
qedi_ep->dst_addr, qedi_ep->dst_port);
|
||||
}
|
||||
|
||||
INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
|
||||
queue_work(qedi->offload_thread, &qedi_ep->offload_work);
|
||||
|
||||
ret = 0;
|
||||
|
@ -202,6 +202,9 @@ static int atmel_qspi_find_mode(const struct spi_mem_op *op)
|
||||
static bool atmel_qspi_supports_op(struct spi_mem *mem,
|
||||
const struct spi_mem_op *op)
|
||||
{
|
||||
if (!spi_mem_default_supports_op(mem, op))
|
||||
return false;
|
||||
|
||||
if (atmel_qspi_find_mode(op) < 0)
|
||||
return false;
|
||||
|
||||
|
@ -249,6 +249,9 @@ void *ion_buffer_kmap_get(struct ion_buffer *buffer)
|
||||
void *vaddr;
|
||||
|
||||
if (buffer->kmap_cnt) {
|
||||
if (buffer->kmap_cnt == INT_MAX)
|
||||
return ERR_PTR(-EOVERFLOW);
|
||||
|
||||
buffer->kmap_cnt++;
|
||||
return buffer->vaddr;
|
||||
}
|
||||
|
@ -889,7 +889,7 @@ cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
|
||||
ssize_t rc;
|
||||
struct inode *inode = file_inode(iocb->ki_filp);
|
||||
|
||||
if (iocb->ki_filp->f_flags & O_DIRECT)
|
||||
if (iocb->ki_flags & IOCB_DIRECT)
|
||||
return cifs_user_readv(iocb, iter);
|
||||
|
||||
rc = cifs_revalidate_mapping(inode);
|
||||
|
@ -1959,6 +1959,10 @@ static inline int ext4_forced_shutdown(struct ext4_sb_info *sbi)
|
||||
* Structure of a directory entry
|
||||
*/
|
||||
#define EXT4_NAME_LEN 255
|
||||
/*
|
||||
* Base length of the ext4 directory entry excluding the name length
|
||||
*/
|
||||
#define EXT4_BASE_DIR_LEN (sizeof(struct ext4_dir_entry_2) - EXT4_NAME_LEN)
|
||||
|
||||
struct ext4_dir_entry {
|
||||
__le32 inode; /* Inode number */
|
||||
|
@ -4365,7 +4365,8 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
|
||||
struct super_block *sb = inode->i_sb;
|
||||
ext4_lblk_t first_block, stop_block;
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
loff_t first_block_offset, last_block_offset;
|
||||
loff_t first_block_offset, last_block_offset, max_length;
|
||||
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
||||
handle_t *handle;
|
||||
unsigned int credits;
|
||||
int ret = 0;
|
||||
@ -4411,6 +4412,14 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
|
||||
offset;
|
||||
}
|
||||
|
||||
/*
|
||||
* For punch hole the length + offset needs to be within one block
|
||||
* before last range. Adjust the length if it goes beyond that limit.
|
||||
*/
|
||||
max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize;
|
||||
if (offset + length > max_length)
|
||||
length = max_length - offset;
|
||||
|
||||
if (offset & (sb->s_blocksize - 1) ||
|
||||
(offset + length) & (sb->s_blocksize - 1)) {
|
||||
/*
|
||||
|
@ -1462,10 +1462,10 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
|
||||
|
||||
de = (struct ext4_dir_entry_2 *)search_buf;
|
||||
dlimit = search_buf + buf_size;
|
||||
while ((char *) de < dlimit) {
|
||||
while ((char *) de < dlimit - EXT4_BASE_DIR_LEN) {
|
||||
/* this code is executed quadratically often */
|
||||
/* do minimal checking `by hand' */
|
||||
if ((char *) de + de->name_len <= dlimit &&
|
||||
if (de->name + de->name_len <= dlimit &&
|
||||
ext4_match(dir, fname, de)) {
|
||||
/* found a match - just to be sure, do
|
||||
* a full check */
|
||||
|
@ -100,8 +100,10 @@ static void ext4_finish_bio(struct bio *bio)
|
||||
continue;
|
||||
}
|
||||
clear_buffer_async_write(bh);
|
||||
if (bio->bi_status)
|
||||
if (bio->bi_status) {
|
||||
set_buffer_write_io_error(bh);
|
||||
buffer_io_error(bh);
|
||||
}
|
||||
} while ((bh = bh->b_this_page) != head);
|
||||
bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
|
||||
local_irq_restore(flags);
|
||||
|
@ -3551,9 +3551,11 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
|
||||
ext4_fsblk_t first_block, last_block, b;
|
||||
ext4_group_t i, ngroups = ext4_get_groups_count(sb);
|
||||
int s, j, count = 0;
|
||||
int has_super = ext4_bg_has_super(sb, grp);
|
||||
|
||||
if (!ext4_has_feature_bigalloc(sb))
|
||||
return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
|
||||
return (has_super + ext4_bg_num_gdb(sb, grp) +
|
||||
(has_super ? le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0) +
|
||||
sbi->s_itb_per_group + 2);
|
||||
|
||||
first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
|
||||
@ -4572,9 +4574,18 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
||||
* Get the # of file system overhead blocks from the
|
||||
* superblock if present.
|
||||
*/
|
||||
if (es->s_overhead_clusters)
|
||||
sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
|
||||
else {
|
||||
/* ignore the precalculated value if it is ridiculous */
|
||||
if (sbi->s_overhead > ext4_blocks_count(es))
|
||||
sbi->s_overhead = 0;
|
||||
/*
|
||||
* If the bigalloc feature is not enabled recalculating the
|
||||
* overhead doesn't take long, so we might as well just redo
|
||||
* it to make sure we are using the correct value.
|
||||
*/
|
||||
if (!ext4_has_feature_bigalloc(sb))
|
||||
sbi->s_overhead = 0;
|
||||
if (sbi->s_overhead == 0) {
|
||||
err = ext4_calculate_overhead(sb);
|
||||
if (err)
|
||||
goto failed_mount_wq;
|
||||
|
@ -925,15 +925,15 @@ static int read_rindex_entry(struct gfs2_inode *ip)
|
||||
rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
|
||||
spin_lock_init(&rgd->rd_rsspin);
|
||||
|
||||
error = compute_bitstructs(rgd);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
error = gfs2_glock_get(sdp, rgd->rd_addr,
|
||||
&gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
error = compute_bitstructs(rgd);
|
||||
if (error)
|
||||
goto fail_glock;
|
||||
|
||||
rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
|
||||
rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
|
||||
if (rgd->rd_data > sdp->sd_max_rg_data)
|
||||
@ -950,6 +950,7 @@ static int read_rindex_entry(struct gfs2_inode *ip)
|
||||
}
|
||||
|
||||
error = 0; /* someone else read in the rgrp; free it and ignore it */
|
||||
fail_glock:
|
||||
gfs2_glock_put(rgd->rd_gl);
|
||||
|
||||
fail:
|
||||
|
@ -451,7 +451,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
|
||||
}
|
||||
spin_unlock(&commit_transaction->t_handle_lock);
|
||||
commit_transaction->t_state = T_SWITCH;
|
||||
write_unlock(&journal->j_state_lock);
|
||||
|
||||
J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
|
||||
journal->j_max_transaction_buffers);
|
||||
@ -471,6 +470,8 @@ void jbd2_journal_commit_transaction(journal_t *journal)
|
||||
* has reserved. This is consistent with the existing behaviour
|
||||
* that multiple jbd2_journal_get_write_access() calls to the same
|
||||
* buffer are perfectly permissible.
|
||||
* We use journal->j_state_lock here to serialize processing of
|
||||
* t_reserved_list with eviction of buffers from journal_unmap_buffer().
|
||||
*/
|
||||
while (commit_transaction->t_reserved_list) {
|
||||
jh = commit_transaction->t_reserved_list;
|
||||
@ -490,6 +491,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
|
||||
jbd2_journal_refile_buffer(journal, jh);
|
||||
}
|
||||
|
||||
write_unlock(&journal->j_state_lock);
|
||||
/*
|
||||
* Now try to drop any written-back buffers from the journal's
|
||||
* checkpoint lists. We do this *before* commit because it potentially
|
||||
|
19
fs/stat.c
19
fs/stat.c
@ -290,9 +290,6 @@ SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, stat
|
||||
# define choose_32_64(a,b) b
|
||||
#endif
|
||||
|
||||
#define valid_dev(x) choose_32_64(old_valid_dev(x),true)
|
||||
#define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
|
||||
|
||||
#ifndef INIT_STRUCT_STAT_PADDING
|
||||
# define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
|
||||
#endif
|
||||
@ -301,7 +298,9 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
|
||||
{
|
||||
struct stat tmp;
|
||||
|
||||
if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
|
||||
if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
|
||||
return -EOVERFLOW;
|
||||
if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
|
||||
return -EOVERFLOW;
|
||||
#if BITS_PER_LONG == 32
|
||||
if (stat->size > MAX_NON_LFS)
|
||||
@ -309,7 +308,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
|
||||
#endif
|
||||
|
||||
INIT_STRUCT_STAT_PADDING(tmp);
|
||||
tmp.st_dev = encode_dev(stat->dev);
|
||||
tmp.st_dev = new_encode_dev(stat->dev);
|
||||
tmp.st_ino = stat->ino;
|
||||
if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
|
||||
return -EOVERFLOW;
|
||||
@ -319,7 +318,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
|
||||
return -EOVERFLOW;
|
||||
SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
|
||||
SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
|
||||
tmp.st_rdev = encode_dev(stat->rdev);
|
||||
tmp.st_rdev = new_encode_dev(stat->rdev);
|
||||
tmp.st_size = stat->size;
|
||||
tmp.st_atime = stat->atime.tv_sec;
|
||||
tmp.st_mtime = stat->mtime.tv_sec;
|
||||
@ -593,11 +592,13 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
|
||||
{
|
||||
struct compat_stat tmp;
|
||||
|
||||
if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
|
||||
if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
|
||||
return -EOVERFLOW;
|
||||
if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
|
||||
return -EOVERFLOW;
|
||||
|
||||
memset(&tmp, 0, sizeof(tmp));
|
||||
tmp.st_dev = old_encode_dev(stat->dev);
|
||||
tmp.st_dev = new_encode_dev(stat->dev);
|
||||
tmp.st_ino = stat->ino;
|
||||
if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
|
||||
return -EOVERFLOW;
|
||||
@ -607,7 +608,7 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
|
||||
return -EOVERFLOW;
|
||||
SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
|
||||
SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
|
||||
tmp.st_rdev = old_encode_dev(stat->rdev);
|
||||
tmp.st_rdev = new_encode_dev(stat->rdev);
|
||||
if ((u64) stat->size > MAX_NON_LFS)
|
||||
return -EOVERFLOW;
|
||||
tmp.st_size = stat->size;
|
||||
|
@ -127,7 +127,7 @@ static inline bool is_multicast_ether_addr(const u8 *addr)
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
|
||||
static inline bool is_multicast_ether_addr_64bits(const u8 *addr)
|
||||
{
|
||||
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
|
||||
#ifdef __BIG_ENDIAN
|
||||
@ -341,8 +341,7 @@ static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
|
||||
* Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits.
|
||||
*/
|
||||
|
||||
static inline bool ether_addr_equal_64bits(const u8 addr1[6+2],
|
||||
const u8 addr2[6+2])
|
||||
static inline bool ether_addr_equal_64bits(const u8 *addr1, const u8 *addr2)
|
||||
{
|
||||
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
|
||||
u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2);
|
||||
|
@ -1271,6 +1271,7 @@ struct task_struct {
|
||||
int pagefault_disabled;
|
||||
#ifdef CONFIG_MMU
|
||||
struct task_struct *oom_reaper_list;
|
||||
struct timer_list oom_reaper_timer;
|
||||
#endif
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
struct vm_struct *stack_vm_area;
|
||||
|
@ -247,8 +247,9 @@ void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
|
||||
unsigned long high_limit);
|
||||
int inet_hashinfo2_init_mod(struct inet_hashinfo *h);
|
||||
|
||||
bool inet_ehash_insert(struct sock *sk, struct sock *osk);
|
||||
bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
|
||||
bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk);
|
||||
bool inet_ehash_nolisten(struct sock *sk, struct sock *osk,
|
||||
bool *found_dup_sk);
|
||||
int __inet_hash(struct sock *sk, struct sock *osk);
|
||||
int inet_hash(struct sock *sk);
|
||||
void inet_unhash(struct sock *sk);
|
||||
|
@ -1219,6 +1219,13 @@ static void
|
||||
stacktrace_trigger(struct event_trigger_data *data, void *rec,
|
||||
struct ring_buffer_event *event)
|
||||
{
|
||||
struct trace_event_file *file = data->private_data;
|
||||
unsigned long flags;
|
||||
|
||||
if (file) {
|
||||
local_save_flags(flags);
|
||||
__trace_stack(file->tr, flags, STACK_SKIP, preempt_count());
|
||||
} else
|
||||
trace_dump_stack(STACK_SKIP);
|
||||
}
|
||||
|
||||
|
@ -631,7 +631,7 @@ static void oom_reap_task(struct task_struct *tsk)
|
||||
*/
|
||||
set_bit(MMF_OOM_SKIP, &mm->flags);
|
||||
|
||||
/* Drop a reference taken by wake_oom_reaper */
|
||||
/* Drop a reference taken by queue_oom_reaper */
|
||||
put_task_struct(tsk);
|
||||
}
|
||||
|
||||
@ -641,12 +641,12 @@ static int oom_reaper(void *unused)
|
||||
struct task_struct *tsk = NULL;
|
||||
|
||||
wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
|
||||
spin_lock(&oom_reaper_lock);
|
||||
spin_lock_irq(&oom_reaper_lock);
|
||||
if (oom_reaper_list != NULL) {
|
||||
tsk = oom_reaper_list;
|
||||
oom_reaper_list = tsk->oom_reaper_list;
|
||||
}
|
||||
spin_unlock(&oom_reaper_lock);
|
||||
spin_unlock_irq(&oom_reaper_lock);
|
||||
|
||||
if (tsk)
|
||||
oom_reap_task(tsk);
|
||||
@ -655,20 +655,46 @@ static int oom_reaper(void *unused)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void wake_oom_reaper(struct task_struct *tsk)
|
||||
static void wake_oom_reaper(struct timer_list *timer)
|
||||
{
|
||||
struct task_struct *tsk = container_of(timer, struct task_struct,
|
||||
oom_reaper_timer);
|
||||
struct mm_struct *mm = tsk->signal->oom_mm;
|
||||
unsigned long flags;
|
||||
|
||||
/* The victim managed to terminate on its own - see exit_mmap */
|
||||
if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
|
||||
put_task_struct(tsk);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&oom_reaper_lock, flags);
|
||||
tsk->oom_reaper_list = oom_reaper_list;
|
||||
oom_reaper_list = tsk;
|
||||
spin_unlock_irqrestore(&oom_reaper_lock, flags);
|
||||
trace_wake_reaper(tsk->pid);
|
||||
wake_up(&oom_reaper_wait);
|
||||
}
|
||||
|
||||
/*
|
||||
* Give the OOM victim time to exit naturally before invoking the oom_reaping.
|
||||
* The timers timeout is arbitrary... the longer it is, the longer the worst
|
||||
* case scenario for the OOM can take. If it is too small, the oom_reaper can
|
||||
* get in the way and release resources needed by the process exit path.
|
||||
* e.g. The futex robust list can sit in Anon|Private memory that gets reaped
|
||||
* before the exit path is able to wake the futex waiters.
|
||||
*/
|
||||
#define OOM_REAPER_DELAY (2*HZ)
|
||||
static void queue_oom_reaper(struct task_struct *tsk)
|
||||
{
|
||||
/* mm is already queued? */
|
||||
if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
|
||||
return;
|
||||
|
||||
get_task_struct(tsk);
|
||||
|
||||
spin_lock(&oom_reaper_lock);
|
||||
tsk->oom_reaper_list = oom_reaper_list;
|
||||
oom_reaper_list = tsk;
|
||||
spin_unlock(&oom_reaper_lock);
|
||||
trace_wake_reaper(tsk->pid);
|
||||
wake_up(&oom_reaper_wait);
|
||||
timer_setup(&tsk->oom_reaper_timer, wake_oom_reaper, 0);
|
||||
tsk->oom_reaper_timer.expires = jiffies + OOM_REAPER_DELAY;
|
||||
add_timer(&tsk->oom_reaper_timer);
|
||||
}
|
||||
|
||||
static int __init oom_init(void)
|
||||
@ -678,7 +704,7 @@ static int __init oom_init(void)
|
||||
}
|
||||
subsys_initcall(oom_init)
|
||||
#else
|
||||
static inline void wake_oom_reaper(struct task_struct *tsk)
|
||||
static inline void queue_oom_reaper(struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
@ -927,7 +953,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
|
||||
rcu_read_unlock();
|
||||
|
||||
if (can_oom_reap)
|
||||
wake_oom_reaper(victim);
|
||||
queue_oom_reaper(victim);
|
||||
|
||||
mmdrop(mm);
|
||||
put_task_struct(victim);
|
||||
@ -963,7 +989,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
|
||||
task_lock(victim);
|
||||
if (task_will_free_mem(victim)) {
|
||||
mark_oom_victim(victim);
|
||||
wake_oom_reaper(victim);
|
||||
queue_oom_reaper(victim);
|
||||
task_unlock(victim);
|
||||
put_task_struct(victim);
|
||||
return;
|
||||
@ -1061,7 +1087,7 @@ bool out_of_memory(struct oom_control *oc)
|
||||
*/
|
||||
if (task_will_free_mem(current)) {
|
||||
mark_oom_victim(current);
|
||||
wake_oom_reaper(current);
|
||||
queue_oom_reaper(current);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -7677,7 +7677,7 @@ void __init mem_init_print_info(const char *str)
|
||||
*/
|
||||
#define adj_init_size(start, end, size, pos, adj) \
|
||||
do { \
|
||||
if (start <= pos && pos < end && size > adj) \
|
||||
if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \
|
||||
size -= adj; \
|
||||
} while (0)
|
||||
|
||||
|
@ -427,7 +427,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
|
||||
|
||||
if (__inet_inherit_port(sk, newsk) < 0)
|
||||
goto put_and_exit;
|
||||
*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
|
||||
*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), NULL);
|
||||
if (*own_req)
|
||||
ireq->ireq_opt = NULL;
|
||||
else
|
||||
|
@ -538,7 +538,7 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
|
||||
dccp_done(newsk);
|
||||
goto out;
|
||||
}
|
||||
*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
|
||||
*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), NULL);
|
||||
/* Clone pktoptions received with SYN, if we own the req */
|
||||
if (*own_req && ireq->pktopts) {
|
||||
newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC);
|
||||
|
@ -791,7 +791,7 @@ static void reqsk_queue_hash_req(struct request_sock *req,
|
||||
timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
|
||||
mod_timer(&req->rsk_timer, jiffies + timeout);
|
||||
|
||||
inet_ehash_insert(req_to_sk(req), NULL);
|
||||
inet_ehash_insert(req_to_sk(req), NULL, NULL);
|
||||
/* before letting lookups find us, make sure all req fields
|
||||
* are committed to memory and refcnt initialized.
|
||||
*/
|
||||
|
@ -20,6 +20,9 @@
|
||||
#include <net/addrconf.h>
|
||||
#include <net/inet_connection_sock.h>
|
||||
#include <net/inet_hashtables.h>
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
#include <net/inet6_hashtables.h>
|
||||
#endif
|
||||
#include <net/secure_seq.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/tcp.h>
|
||||
@ -470,10 +473,52 @@ static u32 inet_sk_port_offset(const struct sock *sk)
|
||||
inet->inet_dport);
|
||||
}
|
||||
|
||||
/* insert a socket into ehash, and eventually remove another one
|
||||
* (The another one can be a SYN_RECV or TIMEWAIT
|
||||
/* Searches for an exsiting socket in the ehash bucket list.
|
||||
* Returns true if found, false otherwise.
|
||||
*/
|
||||
bool inet_ehash_insert(struct sock *sk, struct sock *osk)
|
||||
static bool inet_ehash_lookup_by_sk(struct sock *sk,
|
||||
struct hlist_nulls_head *list)
|
||||
{
|
||||
const __portpair ports = INET_COMBINED_PORTS(sk->sk_dport, sk->sk_num);
|
||||
const int sdif = sk->sk_bound_dev_if;
|
||||
const int dif = sk->sk_bound_dev_if;
|
||||
const struct hlist_nulls_node *node;
|
||||
struct net *net = sock_net(sk);
|
||||
struct sock *esk;
|
||||
|
||||
INET_ADDR_COOKIE(acookie, sk->sk_daddr, sk->sk_rcv_saddr);
|
||||
|
||||
sk_nulls_for_each_rcu(esk, node, list) {
|
||||
if (esk->sk_hash != sk->sk_hash)
|
||||
continue;
|
||||
if (sk->sk_family == AF_INET) {
|
||||
if (unlikely(INET_MATCH(esk, net, acookie,
|
||||
sk->sk_daddr,
|
||||
sk->sk_rcv_saddr,
|
||||
ports, dif, sdif))) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
else if (sk->sk_family == AF_INET6) {
|
||||
if (unlikely(INET6_MATCH(esk, net,
|
||||
&sk->sk_v6_daddr,
|
||||
&sk->sk_v6_rcv_saddr,
|
||||
ports, dif, sdif))) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Insert a socket into ehash, and eventually remove another one
|
||||
* (The another one can be a SYN_RECV or TIMEWAIT)
|
||||
* If an existing socket already exists, socket sk is not inserted,
|
||||
* and sets found_dup_sk parameter to true.
|
||||
*/
|
||||
bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
|
||||
{
|
||||
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
|
||||
struct hlist_nulls_head *list;
|
||||
@ -492,16 +537,23 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk)
|
||||
if (osk) {
|
||||
WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
|
||||
ret = sk_nulls_del_node_init_rcu(osk);
|
||||
} else if (found_dup_sk) {
|
||||
*found_dup_sk = inet_ehash_lookup_by_sk(sk, list);
|
||||
if (*found_dup_sk)
|
||||
ret = false;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
__sk_nulls_add_node_rcu(sk, list);
|
||||
|
||||
spin_unlock(lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool inet_ehash_nolisten(struct sock *sk, struct sock *osk)
|
||||
bool inet_ehash_nolisten(struct sock *sk, struct sock *osk, bool *found_dup_sk)
|
||||
{
|
||||
bool ok = inet_ehash_insert(sk, osk);
|
||||
bool ok = inet_ehash_insert(sk, osk, found_dup_sk);
|
||||
|
||||
if (ok) {
|
||||
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
|
||||
@ -545,7 +597,7 @@ int __inet_hash(struct sock *sk, struct sock *osk)
|
||||
int err = 0;
|
||||
|
||||
if (sk->sk_state != TCP_LISTEN) {
|
||||
inet_ehash_nolisten(sk, osk);
|
||||
inet_ehash_nolisten(sk, osk, NULL);
|
||||
return 0;
|
||||
}
|
||||
WARN_ON(!sk_unhashed(sk));
|
||||
@ -641,7 +693,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
|
||||
tb = inet_csk(sk)->icsk_bind_hash;
|
||||
spin_lock_bh(&head->lock);
|
||||
if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
|
||||
inet_ehash_nolisten(sk, NULL);
|
||||
inet_ehash_nolisten(sk, NULL, NULL);
|
||||
spin_unlock_bh(&head->lock);
|
||||
return 0;
|
||||
}
|
||||
@ -720,7 +772,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
|
||||
inet_bind_hash(sk, tb, port);
|
||||
if (sk_unhashed(sk)) {
|
||||
inet_sk(sk)->inet_sport = htons(port);
|
||||
inet_ehash_nolisten(sk, (struct sock *)tw);
|
||||
inet_ehash_nolisten(sk, (struct sock *)tw, NULL);
|
||||
}
|
||||
if (tw)
|
||||
inet_twsk_bind_unhash(tw, hinfo);
|
||||
|
@ -1426,6 +1426,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
|
||||
bool *own_req)
|
||||
{
|
||||
struct inet_request_sock *ireq;
|
||||
bool found_dup_sk = false;
|
||||
struct inet_sock *newinet;
|
||||
struct tcp_sock *newtp;
|
||||
struct sock *newsk;
|
||||
@ -1496,12 +1497,22 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
|
||||
|
||||
if (__inet_inherit_port(sk, newsk) < 0)
|
||||
goto put_and_exit;
|
||||
*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
|
||||
*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
|
||||
&found_dup_sk);
|
||||
if (likely(*own_req)) {
|
||||
tcp_move_syn(newtp, req);
|
||||
ireq->ireq_opt = NULL;
|
||||
} else {
|
||||
newinet->inet_opt = NULL;
|
||||
|
||||
if (!req_unhash && found_dup_sk) {
|
||||
/* This code path should only be executed in the
|
||||
* syncookie case only
|
||||
*/
|
||||
bh_unlock_sock(newsk);
|
||||
sock_put(newsk);
|
||||
newsk = NULL;
|
||||
}
|
||||
}
|
||||
return newsk;
|
||||
|
||||
|
@ -1142,6 +1142,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
|
||||
const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
|
||||
struct ipv6_txoptions *opt;
|
||||
struct inet_sock *newinet;
|
||||
bool found_dup_sk = false;
|
||||
struct tcp_sock *newtp;
|
||||
struct sock *newsk;
|
||||
#ifdef CONFIG_TCP_MD5SIG
|
||||
@ -1308,7 +1309,8 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
|
||||
tcp_done(newsk);
|
||||
goto out;
|
||||
}
|
||||
*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
|
||||
*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
|
||||
&found_dup_sk);
|
||||
if (*own_req) {
|
||||
tcp_move_syn(newtp, req);
|
||||
|
||||
@ -1323,6 +1325,15 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
|
||||
skb_set_owner_r(newnp->pktoptions, newsk);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (!req_unhash && found_dup_sk) {
|
||||
/* This code path should only be executed in the
|
||||
* syncookie case only
|
||||
*/
|
||||
bh_unlock_sock(newsk);
|
||||
sock_put(newsk);
|
||||
newsk = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return newsk;
|
||||
|
@ -54,7 +54,7 @@ int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex)
|
||||
|
||||
dev = dev_get_by_index_rcu(net, ifindex);
|
||||
while (dev && !netif_is_l3_master(dev))
|
||||
dev = netdev_master_upper_dev_get(dev);
|
||||
dev = netdev_master_upper_dev_get_rcu(dev);
|
||||
|
||||
return dev ? dev->ifindex : 0;
|
||||
}
|
||||
|
@ -2253,6 +2253,13 @@ static int netlink_dump(struct sock *sk)
|
||||
* single netdev. The outcome is MSG_TRUNC error.
|
||||
*/
|
||||
skb_reserve(skb, skb_tailroom(skb) - alloc_size);
|
||||
|
||||
/* Make sure malicious BPF programs can not read unitialized memory
|
||||
* from skb->head -> skb->data
|
||||
*/
|
||||
skb_reset_network_header(skb);
|
||||
skb_reset_mac_header(skb);
|
||||
|
||||
netlink_skb_set_owner_r(skb, sk);
|
||||
|
||||
if (nlk->dump_done_errno > 0) {
|
||||
|
@ -2329,7 +2329,7 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
|
||||
new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
|
||||
|
||||
if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
|
||||
if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
|
||||
if ((next_offset + req_size) > MAX_ACTIONS_BUFSIZE) {
|
||||
OVS_NLERR(log, "Flow action size exceeds max %u",
|
||||
MAX_ACTIONS_BUFSIZE);
|
||||
return ERR_PTR(-EMSGSIZE);
|
||||
|
@ -2790,7 +2790,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
|
||||
|
||||
status = TP_STATUS_SEND_REQUEST;
|
||||
err = po->xmit(skb);
|
||||
if (unlikely(err > 0)) {
|
||||
if (unlikely(err != 0)) {
|
||||
if (err > 0)
|
||||
err = net_xmit_errno(err);
|
||||
if (err && __packet_get_status(po, ph) ==
|
||||
TP_STATUS_AVAILABLE) {
|
||||
@ -2992,8 +2993,12 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
|
||||
skb->no_fcs = 1;
|
||||
|
||||
err = po->xmit(skb);
|
||||
if (err > 0 && (err = net_xmit_errno(err)) != 0)
|
||||
if (unlikely(err != 0)) {
|
||||
if (err > 0)
|
||||
err = net_xmit_errno(err);
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
dev_put(dev);
|
||||
|
||||
|
@ -116,7 +116,9 @@ static __net_exit void rxrpc_exit_net(struct net *net)
|
||||
struct rxrpc_net *rxnet = rxrpc_net(net);
|
||||
|
||||
rxnet->live = false;
|
||||
del_timer_sync(&rxnet->peer_keepalive_timer);
|
||||
cancel_work_sync(&rxnet->peer_keepalive_work);
|
||||
/* Remove the timer again as the worker may have restarted it. */
|
||||
del_timer_sync(&rxnet->peer_keepalive_timer);
|
||||
rxrpc_destroy_all_calls(rxnet);
|
||||
rxrpc_destroy_all_connections(rxnet);
|
||||
|
@ -386,14 +386,19 @@ static int u32_init(struct tcf_proto *tp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int u32_destroy_key(struct tc_u_knode *n, bool free_pf)
|
||||
static void __u32_destroy_key(struct tc_u_knode *n)
|
||||
{
|
||||
struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
|
||||
|
||||
tcf_exts_destroy(&n->exts);
|
||||
tcf_exts_put_net(&n->exts);
|
||||
if (ht && --ht->refcnt == 0)
|
||||
kfree(ht);
|
||||
kfree(n);
|
||||
}
|
||||
|
||||
static void u32_destroy_key(struct tc_u_knode *n, bool free_pf)
|
||||
{
|
||||
tcf_exts_put_net(&n->exts);
|
||||
#ifdef CONFIG_CLS_U32_PERF
|
||||
if (free_pf)
|
||||
free_percpu(n->pf);
|
||||
@ -402,8 +407,7 @@ static int u32_destroy_key(struct tc_u_knode *n, bool free_pf)
|
||||
if (free_pf)
|
||||
free_percpu(n->pcpu_success);
|
||||
#endif
|
||||
kfree(n);
|
||||
return 0;
|
||||
__u32_destroy_key(n);
|
||||
}
|
||||
|
||||
/* u32_delete_key_rcu should be called when free'ing a copied
|
||||
@ -812,10 +816,6 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
|
||||
new->flags = n->flags;
|
||||
RCU_INIT_POINTER(new->ht_down, ht);
|
||||
|
||||
/* bump reference count as long as we hold pointer to structure */
|
||||
if (ht)
|
||||
ht->refcnt++;
|
||||
|
||||
#ifdef CONFIG_CLS_U32_PERF
|
||||
/* Statistics may be incremented by readers during update
|
||||
* so we must keep them in tact. When the node is later destroyed
|
||||
@ -837,6 +837,10 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* bump reference count as long as we hold pointer to structure */
|
||||
if (ht)
|
||||
ht->refcnt++;
|
||||
|
||||
return new;
|
||||
}
|
||||
|
||||
@ -903,13 +907,13 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
|
||||
tca[TCA_RATE], ovr, extack);
|
||||
|
||||
if (err) {
|
||||
u32_destroy_key(new, false);
|
||||
__u32_destroy_key(new);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = u32_replace_hw_knode(tp, new, flags, extack);
|
||||
if (err) {
|
||||
u32_destroy_key(new, false);
|
||||
__u32_destroy_key(new);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1698,8 +1698,10 @@ static int smc_shutdown(struct socket *sock, int how)
|
||||
if (smc->use_fallback) {
|
||||
rc = kernel_sock_shutdown(smc->clcsock, how);
|
||||
sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
|
||||
if (sk->sk_shutdown == SHUTDOWN_MASK)
|
||||
if (sk->sk_shutdown == SHUTDOWN_MASK) {
|
||||
sk->sk_state = SMC_CLOSED;
|
||||
sock_put(sk);
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
switch (how) {
|
||||
|
@ -46,35 +46,6 @@
|
||||
*/
|
||||
#undef ENABLE_MIC_INPUT
|
||||
|
||||
static struct clk *mclk;
|
||||
|
||||
static int at91sam9g20ek_set_bias_level(struct snd_soc_card *card,
|
||||
struct snd_soc_dapm_context *dapm,
|
||||
enum snd_soc_bias_level level)
|
||||
{
|
||||
static int mclk_on;
|
||||
int ret = 0;
|
||||
|
||||
switch (level) {
|
||||
case SND_SOC_BIAS_ON:
|
||||
case SND_SOC_BIAS_PREPARE:
|
||||
if (!mclk_on)
|
||||
ret = clk_enable(mclk);
|
||||
if (ret == 0)
|
||||
mclk_on = 1;
|
||||
break;
|
||||
|
||||
case SND_SOC_BIAS_OFF:
|
||||
case SND_SOC_BIAS_STANDBY:
|
||||
if (mclk_on)
|
||||
clk_disable(mclk);
|
||||
mclk_on = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct snd_soc_dapm_widget at91sam9g20ek_dapm_widgets[] = {
|
||||
SND_SOC_DAPM_MIC("Int Mic", NULL),
|
||||
SND_SOC_DAPM_SPK("Ext Spk", NULL),
|
||||
@ -135,7 +106,6 @@ static struct snd_soc_card snd_soc_at91sam9g20ek = {
|
||||
.owner = THIS_MODULE,
|
||||
.dai_link = &at91sam9g20ek_dai,
|
||||
.num_links = 1,
|
||||
.set_bias_level = at91sam9g20ek_set_bias_level,
|
||||
|
||||
.dapm_widgets = at91sam9g20ek_dapm_widgets,
|
||||
.num_dapm_widgets = ARRAY_SIZE(at91sam9g20ek_dapm_widgets),
|
||||
@ -148,7 +118,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
struct device_node *codec_np, *cpu_np;
|
||||
struct clk *pllb;
|
||||
struct snd_soc_card *card = &snd_soc_at91sam9g20ek;
|
||||
int ret;
|
||||
|
||||
@ -162,31 +131,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Codec MCLK is supplied by PCK0 - set it up.
|
||||
*/
|
||||
mclk = clk_get(NULL, "pck0");
|
||||
if (IS_ERR(mclk)) {
|
||||
dev_err(&pdev->dev, "Failed to get MCLK\n");
|
||||
ret = PTR_ERR(mclk);
|
||||
goto err;
|
||||
}
|
||||
|
||||
pllb = clk_get(NULL, "pllb");
|
||||
if (IS_ERR(pllb)) {
|
||||
dev_err(&pdev->dev, "Failed to get PLLB\n");
|
||||
ret = PTR_ERR(pllb);
|
||||
goto err_mclk;
|
||||
}
|
||||
ret = clk_set_parent(mclk, pllb);
|
||||
clk_put(pllb);
|
||||
if (ret != 0) {
|
||||
dev_err(&pdev->dev, "Failed to set MCLK parent\n");
|
||||
goto err_mclk;
|
||||
}
|
||||
|
||||
clk_set_rate(mclk, MCLK_RATE);
|
||||
|
||||
card->dev = &pdev->dev;
|
||||
|
||||
/* Parse device node info */
|
||||
@ -230,9 +174,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
|
||||
|
||||
return ret;
|
||||
|
||||
err_mclk:
|
||||
clk_put(mclk);
|
||||
mclk = NULL;
|
||||
err:
|
||||
atmel_ssc_put_audio(0);
|
||||
return ret;
|
||||
@ -242,8 +183,6 @@ static int at91sam9g20ek_audio_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct snd_soc_card *card = platform_get_drvdata(pdev);
|
||||
|
||||
clk_disable(mclk);
|
||||
mclk = NULL;
|
||||
snd_soc_unregister_card(card);
|
||||
atmel_ssc_put_audio(0);
|
||||
|
||||
|
@ -1206,9 +1206,16 @@ static int msm8916_wcd_digital_probe(struct platform_device *pdev)
|
||||
|
||||
dev_set_drvdata(dev, priv);
|
||||
|
||||
return devm_snd_soc_register_component(dev, &msm8916_wcd_digital,
|
||||
ret = devm_snd_soc_register_component(dev, &msm8916_wcd_digital,
|
||||
msm8916_wcd_digital_dai,
|
||||
ARRAY_SIZE(msm8916_wcd_digital_dai));
|
||||
if (ret)
|
||||
goto err_mclk;
|
||||
|
||||
return 0;
|
||||
|
||||
err_mclk:
|
||||
clk_disable_unprepare(priv->mclk);
|
||||
err_clk:
|
||||
clk_disable_unprepare(priv->ahbclk);
|
||||
return ret;
|
||||
|
@ -1676,8 +1676,7 @@ static void dapm_seq_run(struct snd_soc_card *card,
|
||||
switch (w->id) {
|
||||
case snd_soc_dapm_pre:
|
||||
if (!w->event)
|
||||
list_for_each_entry_safe_continue(w, n, list,
|
||||
power_list);
|
||||
continue;
|
||||
|
||||
if (event == SND_SOC_DAPM_STREAM_START)
|
||||
ret = w->event(w,
|
||||
@ -1689,8 +1688,7 @@ static void dapm_seq_run(struct snd_soc_card *card,
|
||||
|
||||
case snd_soc_dapm_post:
|
||||
if (!w->event)
|
||||
list_for_each_entry_safe_continue(w, n, list,
|
||||
power_list);
|
||||
continue;
|
||||
|
||||
if (event == SND_SOC_DAPM_STREAM_START)
|
||||
ret = w->event(w,
|
||||
|
@ -1210,6 +1210,7 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream)
|
||||
} while (drain_urbs && timeout);
|
||||
finish_wait(&ep->drain_wait, &wait);
|
||||
}
|
||||
port->active = 0;
|
||||
spin_unlock_irq(&ep->buffer_lock);
|
||||
}
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
*/
|
||||
|
||||
/* handling of USB vendor/product ID pairs as 32-bit numbers */
|
||||
#define USB_ID(vendor, product) (((vendor) << 16) | (product))
|
||||
#define USB_ID(vendor, product) (((unsigned int)(vendor) << 16) | (product))
|
||||
#define USB_ID_VENDOR(id) ((id) >> 16)
|
||||
#define USB_ID_PRODUCT(id) ((u16)(id))
|
||||
|
||||
|
@ -172,6 +172,17 @@ flooding_filters_add()
|
||||
local lsb
|
||||
local i
|
||||
|
||||
# Prevent unwanted packets from entering the bridge and interfering
|
||||
# with the test.
|
||||
tc qdisc add dev br0 clsact
|
||||
tc filter add dev br0 egress protocol all pref 1 handle 1 \
|
||||
matchall skip_hw action drop
|
||||
tc qdisc add dev $h1 clsact
|
||||
tc filter add dev $h1 egress protocol all pref 1 handle 1 \
|
||||
flower skip_hw dst_mac de:ad:be:ef:13:37 action pass
|
||||
tc filter add dev $h1 egress protocol all pref 2 handle 2 \
|
||||
matchall skip_hw action drop
|
||||
|
||||
tc qdisc add dev $rp2 clsact
|
||||
|
||||
for i in $(eval echo {1..$num_remotes}); do
|
||||
@ -194,6 +205,12 @@ flooding_filters_del()
|
||||
done
|
||||
|
||||
tc qdisc del dev $rp2 clsact
|
||||
|
||||
tc filter del dev $h1 egress protocol all pref 2 handle 2 matchall
|
||||
tc filter del dev $h1 egress protocol all pref 1 handle 1 flower
|
||||
tc qdisc del dev $h1 clsact
|
||||
tc filter del dev br0 egress protocol all pref 1 handle 1 matchall
|
||||
tc qdisc del dev br0 clsact
|
||||
}
|
||||
|
||||
flooding_check_packets()
|
||||
|
Loading…
Reference in New Issue
Block a user