Merge 5.10.29 into android12-5.10

Changes in 5.10.29
	ARM: dts: am33xx: add aliases for mmc interfaces
	bus: ti-sysc: Fix warning on unbind if reset is not deasserted
	platform/x86: intel-hid: Support Lenovo ThinkPad X1 Tablet Gen 2
	bpf, x86: Use kvmalloc_array instead kmalloc_array in bpf_jit_comp
	net/mlx5e: Enforce minimum value check for ICOSQ size
	net: pxa168_eth: Fix a potential data race in pxa168_eth_remove
	kunit: tool: Fix a python tuple typing error
	mISDN: fix crash in fritzpci
	mac80211: Check crypto_aead_encrypt for errors
	mac80211: choose first enabled channel for monitor
	drm/msm/dsi_pll_7nm: Fix variable usage for pll_lockdet_rate
	drm/msm/adreno: a5xx_power: Don't apply A540 lm_setup to other GPUs
	drm/msm: Ratelimit invalid-fence message
	netfilter: conntrack: Fix gre tunneling over ipv6
	netfilter: nftables: skip hook overlap logic if flowtable is stale
	net: ipa: fix init header command validation
	platform/x86: thinkpad_acpi: Allow the FnLock LED to change state
	kselftest/arm64: sve: Do not use non-canonical FFR register value
	drm/msm/disp/dpu1: icc path needs to be set before dpu runtime resume
	x86/build: Turn off -fcf-protection for realmode targets
	block: clear GD_NEED_PART_SCAN later in bdev_disk_changed
	platform/x86: intel_pmc_core: Ignore GBE LTR on Tiger Lake platforms
	ptp_qoriq: fix overflow in ptp_qoriq_adjfine() u64 calcalation
	scsi: target: pscsi: Clean up after failure in pscsi_map_sg()
	selftests/vm: fix out-of-tree build
	ia64: mca: allocate early mca with GFP_ATOMIC
	ia64: fix format strings for err_inject
	cifs: revalidate mapping when we open files for SMB1 POSIX
	cifs: Silently ignore unknown oplock break handle
	io_uring: fix timeout cancel return code
	math: Export mul_u64_u64_div_u64
	tools/resolve_btfids: Build libbpf and libsubcmd in separate directories
	tools/resolve_btfids: Check objects before removing
	tools/resolve_btfids: Set srctree variable unconditionally
	kbuild: Add resolve_btfids clean to root clean target
	kbuild: Do not clean resolve_btfids if the output does not exist
	tools/resolve_btfids: Add /libbpf to .gitignore
	bpf, x86: Validate computation of branch displacements for x86-64
	bpf, x86: Validate computation of branch displacements for x86-32
	init/Kconfig: make COMPILE_TEST depend on !S390
	init/Kconfig: make COMPILE_TEST depend on HAS_IOMEM
	Linux 5.10.29

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ib3f9484150747de48eca46c69d690830b790418e
This commit is contained in:
Greg Kroah-Hartman 2021-04-11 09:08:17 +02:00
commit c62f09151d
37 changed files with 241 additions and 121 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 28
SUBLEVEL = 29
EXTRAVERSION =
NAME = Dare mighty things
@ -1129,6 +1129,17 @@ ifdef CONFIG_STACK_VALIDATION
endif
endif
PHONY += resolve_btfids_clean
resolve_btfids_O = $(abspath $(objtree))/tools/bpf/resolve_btfids
# tools/bpf/resolve_btfids directory might not exist
# in output directory, skip its clean in that case
resolve_btfids_clean:
ifneq ($(wildcard $(resolve_btfids_O)),)
$(Q)$(MAKE) -sC $(srctree)/tools/bpf/resolve_btfids O=$(resolve_btfids_O) clean
endif
ifdef CONFIG_BPF
ifdef CONFIG_DEBUG_INFO_BTF
ifeq ($(has_libelf),1)
@ -1564,7 +1575,7 @@ vmlinuxclean:
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/link-vmlinux.sh clean
$(Q)$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) clean)
clean: archclean vmlinuxclean
clean: archclean vmlinuxclean resolve_btfids_clean
# mrproper - Delete all generated files, including .config
#

View File

@ -40,6 +40,9 @@ aliases {
ethernet1 = &cpsw_emac1;
spi0 = &spi0;
spi1 = &spi1;
mmc0 = &mmc1;
mmc1 = &mmc2;
mmc2 = &mmc3;
};
cpus {

View File

@ -59,7 +59,7 @@ show_##name(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
u32 cpu=dev->id; \
return sprintf(buf, "%lx\n", name[cpu]); \
return sprintf(buf, "%llx\n", name[cpu]); \
}
#define store(name) \
@ -86,9 +86,9 @@ store_call_start(struct device *dev, struct device_attribute *attr,
#ifdef ERR_INJ_DEBUG
printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu);
printk(KERN_DEBUG "err_type_info=%lx,\n", err_type_info[cpu]);
printk(KERN_DEBUG "err_struct_info=%lx,\n", err_struct_info[cpu]);
printk(KERN_DEBUG "err_data_buffer=%lx, %lx, %lx.\n",
printk(KERN_DEBUG "err_type_info=%llx,\n", err_type_info[cpu]);
printk(KERN_DEBUG "err_struct_info=%llx,\n", err_struct_info[cpu]);
printk(KERN_DEBUG "err_data_buffer=%llx, %llx, %llx.\n",
err_data_buffer[cpu].data1,
err_data_buffer[cpu].data2,
err_data_buffer[cpu].data3);
@ -117,8 +117,8 @@ store_call_start(struct device *dev, struct device_attribute *attr,
#ifdef ERR_INJ_DEBUG
printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]);
printk(KERN_DEBUG "capabilities=%lx,\n", capabilities[cpu]);
printk(KERN_DEBUG "resources=%lx\n", resources[cpu]);
printk(KERN_DEBUG "capabilities=%llx,\n", capabilities[cpu]);
printk(KERN_DEBUG "resources=%llx\n", resources[cpu]);
#endif
return size;
}
@ -131,7 +131,7 @@ show_virtual_to_phys(struct device *dev, struct device_attribute *attr,
char *buf)
{
unsigned int cpu=dev->id;
return sprintf(buf, "%lx\n", phys_addr[cpu]);
return sprintf(buf, "%llx\n", phys_addr[cpu]);
}
static ssize_t
@ -145,7 +145,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL);
if (ret<=0) {
#ifdef ERR_INJ_DEBUG
printk("Virtual address %lx is not existing.\n",virt_addr);
printk("Virtual address %llx is not existing.\n", virt_addr);
#endif
return -EINVAL;
}
@ -163,7 +163,7 @@ show_err_data_buffer(struct device *dev,
{
unsigned int cpu=dev->id;
return sprintf(buf, "%lx, %lx, %lx\n",
return sprintf(buf, "%llx, %llx, %llx\n",
err_data_buffer[cpu].data1,
err_data_buffer[cpu].data2,
err_data_buffer[cpu].data3);
@ -178,13 +178,13 @@ store_err_data_buffer(struct device *dev,
int ret;
#ifdef ERR_INJ_DEBUG
printk("write err_data_buffer=[%lx,%lx,%lx] on cpu%d\n",
printk("write err_data_buffer=[%llx,%llx,%llx] on cpu%d\n",
err_data_buffer[cpu].data1,
err_data_buffer[cpu].data2,
err_data_buffer[cpu].data3,
cpu);
#endif
ret=sscanf(buf, "%lx, %lx, %lx",
ret = sscanf(buf, "%llx, %llx, %llx",
&err_data_buffer[cpu].data1,
&err_data_buffer[cpu].data2,
&err_data_buffer[cpu].data3);

View File

@ -1822,7 +1822,7 @@ ia64_mca_cpu_init(void *cpu_data)
data = mca_bootmem();
first_time = 0;
} else
data = (void *)__get_free_pages(GFP_KERNEL,
data = (void *)__get_free_pages(GFP_ATOMIC,
get_order(sz));
if (!data)
panic("Could not allocate MCA memory for cpu %d\n",

View File

@ -34,7 +34,7 @@ M16_CFLAGS := $(call cc-option, -m16, $(CODE16GCC_CFLAGS))
REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -DDISABLE_BRANCH_PROFILING \
-Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
-fno-strict-aliasing -fomit-frame-pointer -fno-pic \
-mno-mmx -mno-sse
-mno-mmx -mno-sse $(call cc-option,-fcf-protection=none)
REALMODE_CFLAGS += -ffreestanding
REALMODE_CFLAGS += -fno-stack-protector

View File

@ -1476,7 +1476,16 @@ xadd: if (is_imm8(insn->off))
}
if (image) {
if (unlikely(proglen + ilen > oldproglen)) {
/*
* When populating the image, assert that:
*
* i) We do not write beyond the allocated space, and
* ii) addrs[i] did not change from the prior run, in order
* to validate assumptions made for computing branch
* displacements.
*/
if (unlikely(proglen + ilen > oldproglen ||
proglen + ilen != addrs[i])) {
pr_err("bpf_jit: fatal error\n");
return -EFAULT;
}
@ -2038,7 +2047,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
extra_pass = true;
goto skip_init_addrs;
}
addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
if (!addrs) {
prog = orig_prog;
goto out_addrs;
@ -2128,7 +2137,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
if (image)
bpf_prog_fill_jited_linfo(prog, addrs + 1);
out_addrs:
kfree(addrs);
kvfree(addrs);
kfree(jit_data);
prog->aux->jit_data = NULL;
}

View File

@ -2278,7 +2278,16 @@ emit_cond_jmp: jmp_cond = get_cond_jmp_opcode(BPF_OP(code), false);
}
if (image) {
if (unlikely(proglen + ilen > oldproglen)) {
/*
* When populating the image, assert that:
*
* i) We do not write beyond the allocated space, and
* ii) addrs[i] did not change from the prior run, in order
* to validate assumptions made for computing branch
* displacements.
*/
if (unlikely(proglen + ilen > oldproglen ||
proglen + ilen != addrs[i])) {
pr_err("bpf_jit: fatal error\n");
return -EFAULT;
}

View File

@ -3044,7 +3044,9 @@ static int sysc_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
reset_control_assert(ddata->rsts);
if (!reset_control_status(ddata->rsts))
reset_control_assert(ddata->rsts);
unprepare:
sysc_unprepare(ddata);

View File

@ -304,7 +304,7 @@ int a5xx_power_init(struct msm_gpu *gpu)
/* Set up the limits management */
if (adreno_is_a530(adreno_gpu))
a530_lm_setup(gpu);
else
else if (adreno_is_a540(adreno_gpu))
a540_lm_setup(gpu);
/* Set up SP/TP power collpase */

View File

@ -43,6 +43,8 @@
#define DPU_DEBUGFS_DIR "msm_dpu"
#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
#define MIN_IB_BW 400000000ULL /* Min ib vote 400MB */
static int dpu_kms_hw_init(struct msm_kms *kms);
static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
@ -929,6 +931,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
DPU_DEBUG("REG_DMA is not defined");
}
if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss"))
dpu_kms_parse_data_bus_icc_path(dpu_kms);
pm_runtime_get_sync(&dpu_kms->pdev->dev);
dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
@ -1030,9 +1035,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_vbif_init_memtypes(dpu_kms);
if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss"))
dpu_kms_parse_data_bus_icc_path(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
return 0;
@ -1189,10 +1191,10 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
ddev = dpu_kms->dev;
WARN_ON(!(dpu_kms->num_paths));
/* Min vote of BW is required before turning on AXI clk */
for (i = 0; i < dpu_kms->num_paths; i++)
icc_set_bw(dpu_kms->path[i], 0,
dpu_kms->catalog->perf.min_dram_ib);
icc_set_bw(dpu_kms->path[i], 0, Bps_to_icc(MIN_IB_BW));
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
if (rc) {

View File

@ -325,7 +325,7 @@ static void dsi_pll_commit(struct dsi_pll_7nm *pll)
pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1, reg->frac_div_start_low);
pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1, reg->frac_div_start_mid);
pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1, reg->frac_div_start_high);
pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, reg->pll_lockdet_rate);
pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
pll_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, 0x10); /* TODO: 0x00 for CPHY */
pll_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, reg->pll_clock_inverters);

View File

@ -45,7 +45,7 @@ int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence,
int ret;
if (fence > fctx->last_fence) {
DRM_ERROR("%s: waiting on invalid fence: %u (of %u)\n",
DRM_ERROR_RATELIMITED("%s: waiting on invalid fence: %u (of %u)\n",
fctx->name, fence, fctx->last_fence);
return -EINVAL;
}

View File

@ -694,7 +694,7 @@ isac_release(struct isac_hw *isac)
{
if (isac->type & IPAC_TYPE_ISACX)
WriteISAC(isac, ISACX_MASK, 0xff);
else
else if (isac->type != 0)
WriteISAC(isac, ISAC_MASK, 0xff);
if (isac->dch.timer.function != NULL) {
del_timer(&isac->dch.timer);

View File

@ -1544,8 +1544,8 @@ static int pxa168_eth_remove(struct platform_device *pdev)
clk_disable_unprepare(pep->clk);
mdiobus_unregister(pep->smi_bus);
mdiobus_free(pep->smi_bus);
unregister_netdev(dev);
cancel_work_sync(&pep->tx_timeout_task);
unregister_netdev(dev);
free_netdev(dev);
return 0;
}

View File

@ -2326,8 +2326,9 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params,
{
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
return order_base_2(MLX5E_UMR_WQEBBS) +
mlx5e_get_rq_log_wq_sz(rqp->rqc);
return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE,
order_base_2(MLX5E_UMR_WQEBBS) +
mlx5e_get_rq_log_wq_sz(rqp->rqc));
default: /* MLX5_WQ_TYPE_CYCLIC */
return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
}

View File

@ -175,21 +175,23 @@ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
: field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
if (mem->offset > offset_max ||
ipa->mem_offset > offset_max - mem->offset) {
dev_err(dev, "IPv%c %s%s table region offset too large "
"(0x%04x + 0x%04x > 0x%04x)\n",
ipv6 ? '6' : '4', hashed ? "hashed " : "",
route ? "route" : "filter",
ipa->mem_offset, mem->offset, offset_max);
dev_err(dev, "IPv%c %s%s table region offset too large\n",
ipv6 ? '6' : '4', hashed ? "hashed " : "",
route ? "route" : "filter");
dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
ipa->mem_offset, mem->offset, offset_max);
return false;
}
if (mem->offset > ipa->mem_size ||
mem->size > ipa->mem_size - mem->offset) {
dev_err(dev, "IPv%c %s%s table region out of range "
"(0x%04x + 0x%04x > 0x%04x)\n",
ipv6 ? '6' : '4', hashed ? "hashed " : "",
route ? "route" : "filter",
mem->offset, mem->size, ipa->mem_size);
dev_err(dev, "IPv%c %s%s table region out of range\n",
ipv6 ? '6' : '4', hashed ? "hashed " : "",
route ? "route" : "filter");
dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
mem->offset, mem->size, ipa->mem_size);
return false;
}
@ -205,22 +207,36 @@ static bool ipa_cmd_header_valid(struct ipa *ipa)
u32 size_max;
u32 size;
/* In ipa_cmd_hdr_init_local_add() we record the offset and size
* of the header table memory area. Make sure the offset and size
* fit in the fields that need to hold them, and that the entire
* range is within the overall IPA memory range.
*/
offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
if (mem->offset > offset_max ||
ipa->mem_offset > offset_max - mem->offset) {
dev_err(dev, "header table region offset too large "
"(0x%04x + 0x%04x > 0x%04x)\n",
ipa->mem_offset + mem->offset, offset_max);
dev_err(dev, "header table region offset too large\n");
dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
ipa->mem_offset, mem->offset, offset_max);
return false;
}
size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
size = ipa->mem[IPA_MEM_MODEM_HEADER].size;
size += ipa->mem[IPA_MEM_AP_HEADER].size;
if (mem->offset > ipa->mem_size || size > ipa->mem_size - mem->offset) {
dev_err(dev, "header table region out of range "
"(0x%04x + 0x%04x > 0x%04x)\n",
mem->offset, size, ipa->mem_size);
if (size > size_max) {
dev_err(dev, "header table region size too large\n");
dev_err(dev, " (0x%04x > 0x%08x)\n", size, size_max);
return false;
}
if (size > ipa->mem_size || mem->offset > ipa->mem_size - size) {
dev_err(dev, "header table region out of range\n");
dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
mem->offset, size, ipa->mem_size);
return false;
}

View File

@ -86,6 +86,13 @@ static const struct dmi_system_id button_array_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x2 Detachable"),
},
},
{
.ident = "Lenovo ThinkPad X1 Tablet Gen 2",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Tablet Gen 2"),
},
},
{ }
};

View File

@ -863,34 +863,45 @@ static int pmc_core_pll_show(struct seq_file *s, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
static ssize_t pmc_core_ltr_ignore_write(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
static int pmc_core_send_ltr_ignore(u32 value)
{
struct pmc_dev *pmcdev = &pmc;
const struct pmc_reg_map *map = pmcdev->map;
u32 val, buf_size, fd;
int err;
buf_size = count < 64 ? count : 64;
err = kstrtou32_from_user(userbuf, buf_size, 10, &val);
if (err)
return err;
u32 reg;
int err = 0;
mutex_lock(&pmcdev->lock);
if (val > map->ltr_ignore_max) {
if (value > map->ltr_ignore_max) {
err = -EINVAL;
goto out_unlock;
}
fd = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset);
fd |= (1U << val);
pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, fd);
reg = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset);
reg |= BIT(value);
pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, reg);
out_unlock:
mutex_unlock(&pmcdev->lock);
return err;
}
static ssize_t pmc_core_ltr_ignore_write(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
u32 buf_size, value;
int err;
buf_size = min_t(u32, count, 64);
err = kstrtou32_from_user(userbuf, buf_size, 10, &value);
if (err)
return err;
err = pmc_core_send_ltr_ignore(value);
return err == 0 ? count : err;
}
@ -1244,6 +1255,15 @@ static int pmc_core_probe(struct platform_device *pdev)
pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit();
dmi_check_system(pmc_core_dmi_table);
/*
* On TGL, due to a hardware limitation, the GBE LTR blocks PC10 when
* a cable is attached. Tell the PMC to ignore it.
*/
if (pmcdev->map == &tgl_reg_map) {
dev_dbg(&pdev->dev, "ignoring GBE LTR\n");
pmc_core_send_ltr_ignore(3);
}
pmc_core_dbgfs_register(pmcdev);
device_initialized = true;

View File

@ -4079,13 +4079,19 @@ static bool hotkey_notify_6xxx(const u32 hkey,
case TP_HKEY_EV_KEY_NUMLOCK:
case TP_HKEY_EV_KEY_FN:
case TP_HKEY_EV_KEY_FN_ESC:
/* key press events, we just ignore them as long as the EC
* is still reporting them in the normal keyboard stream */
*send_acpi_ev = false;
*ignore_acpi_ev = true;
return true;
case TP_HKEY_EV_KEY_FN_ESC:
/* Get the media key status to foce the status LED to update */
acpi_evalf(hkey_handle, NULL, "GMKS", "v");
*send_acpi_ev = false;
*ignore_acpi_ev = true;
return true;
case TP_HKEY_EV_TABLET_CHANGED:
tpacpi_input_send_tabletsw();
hotkey_tablet_mode_notify_change();

View File

@ -189,15 +189,16 @@ int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
tmr_add = ptp_qoriq->tmr_add;
adj = tmr_add;
/* calculate diff as adj*(scaled_ppm/65536)/1000000
* and round() to the nearest integer
/*
* Calculate diff and round() to the nearest integer
*
* diff = adj * (ppb / 1000000000)
* = adj * scaled_ppm / 65536000000
*/
adj *= scaled_ppm;
diff = div_u64(adj, 8000000);
diff = (diff >> 13) + ((diff >> 12) & 1);
diff = mul_u64_u64_div_u64(adj, scaled_ppm, 32768000000);
diff = DIV64_U64_ROUND_UP(diff, 2);
tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
ptp_qoriq->write(&regs->ctrl_regs->tmr_add, tmr_add);
return 0;

View File

@ -939,6 +939,14 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
return 0;
fail:
if (bio)
bio_put(bio);
while (req->bio) {
bio = req->bio;
req->bio = bio->bi_next;
bio_put(bio);
}
req->biotail = NULL;
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}

View File

@ -1397,13 +1397,13 @@ int bdev_disk_changed(struct block_device *bdev, bool invalidate)
lockdep_assert_held(&bdev->bd_mutex);
clear_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
rescan:
ret = blk_drop_partitions(bdev);
if (ret)
return ret;
clear_bit(GD_NEED_PART_SCAN, &disk->state);
/*
* Historically we only set the capacity to zero for devices that
* support partitions (independ of actually having partitions created).

View File

@ -164,6 +164,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
goto posix_open_ret;
}
} else {
cifs_revalidate_mapping(*pinode);
cifs_fattr_to_inode(*pinode, &fattr);
}

View File

@ -745,8 +745,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
}
}
spin_unlock(&cifs_tcp_ses_lock);
cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
return false;
cifs_dbg(FYI, "No file id matched, oplock break ignored\n");
return true;
}
void

View File

@ -1489,7 +1489,7 @@ static void io_queue_async_work(struct io_kiocb *req)
io_queue_linked_timeout(link);
}
static void io_kill_timeout(struct io_kiocb *req)
static void io_kill_timeout(struct io_kiocb *req, int status)
{
struct io_timeout_data *io = req->async_data;
int ret;
@ -1499,7 +1499,7 @@ static void io_kill_timeout(struct io_kiocb *req)
atomic_set(&req->ctx->cq_timeouts,
atomic_read(&req->ctx->cq_timeouts) + 1);
list_del_init(&req->timeout.list);
io_cqring_fill_event(req, 0);
io_cqring_fill_event(req, status);
io_put_req_deferred(req, 1);
}
}
@ -1516,7 +1516,7 @@ static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
spin_lock_irq(&ctx->completion_lock);
list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
if (io_match_task(req, tsk, files)) {
io_kill_timeout(req);
io_kill_timeout(req, -ECANCELED);
canceled++;
}
}
@ -1568,7 +1568,7 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
break;
list_del_init(&req->timeout.list);
io_kill_timeout(req);
io_kill_timeout(req, 0);
} while (!list_empty(&ctx->timeout_list));
ctx->cq_last_tm_flush = seq;

View File

@ -114,8 +114,7 @@ config INIT_ENV_ARG_LIMIT
config COMPILE_TEST
bool "Compile also drivers which will not load"
depends on !UML
default n
depends on HAS_IOMEM
help
Some drivers can be compiled on a different platform than they are
intended to be run on. Despite they cannot be loaded there (or even

View File

@ -230,4 +230,5 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
return res + div64_u64(a * b, c);
}
EXPORT_SYMBOL(mul_u64_u64_div_u64);
#endif

View File

@ -23,6 +23,7 @@ int aead_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, size_t aad_len,
struct aead_request *aead_req;
int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
u8 *__aad;
int ret;
aead_req = kzalloc(reqsize + aad_len, GFP_ATOMIC);
if (!aead_req)
@ -40,10 +41,10 @@ int aead_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, size_t aad_len,
aead_request_set_crypt(aead_req, sg, sg, data_len, b_0);
aead_request_set_ad(aead_req, sg[0].length);
crypto_aead_encrypt(aead_req);
ret = crypto_aead_encrypt(aead_req);
kfree_sensitive(aead_req);
return 0;
return ret;
}
int aead_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, size_t aad_len,

View File

@ -22,6 +22,7 @@ int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
struct aead_request *aead_req;
int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
const __le16 *fc;
int ret;
if (data_len < GMAC_MIC_LEN)
return -EINVAL;
@ -59,10 +60,10 @@ int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
aead_request_set_crypt(aead_req, sg, sg, 0, iv);
aead_request_set_ad(aead_req, GMAC_AAD_LEN + data_len);
crypto_aead_encrypt(aead_req);
ret = crypto_aead_encrypt(aead_req);
kfree_sensitive(aead_req);
return 0;
return ret;
}
struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[],

View File

@ -982,8 +982,19 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
continue;
if (!dflt_chandef.chan) {
/*
* Assign the first enabled channel to dflt_chandef
* from the list of channels
*/
for (i = 0; i < sband->n_channels; i++)
if (!(sband->channels[i].flags &
IEEE80211_CHAN_DISABLED))
break;
/* if none found then use the first anyway */
if (i == sband->n_channels)
i = 0;
cfg80211_chandef_create(&dflt_chandef,
&sband->channels[0],
&sband->channels[i],
NL80211_CHAN_NO_HT);
/* init channel we're on */
if (!local->use_chanctx && !local->_oper_chandef.chan) {

View File

@ -218,9 +218,6 @@ int nf_conntrack_gre_packet(struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
const struct nf_hook_state *state)
{
if (state->pf != NFPROTO_IPV4)
return -NF_ACCEPT;
if (!nf_ct_is_confirmed(ct)) {
unsigned int *timeouts = nf_ct_timeout_lookup(ct);

View File

@ -6573,6 +6573,9 @@ static int nft_register_flowtable_net_hooks(struct net *net,
list_for_each_entry(hook, hook_list, list) {
list_for_each_entry(ft, &table->flowtables, list) {
if (!nft_is_active_next(net, ft))
continue;
list_for_each_entry(hook2, &ft->hook_list, list) {
if (hook->ops.dev == hook2->ops.dev &&
hook->ops.pf == hook2->ops.pf) {

View File

@ -1,4 +1,3 @@
/FEATURE-DUMP.libbpf
/bpf_helper_defs.h
/fixdep
/resolve_btfids
/libbpf/

View File

@ -2,11 +2,7 @@
include ../../scripts/Makefile.include
include ../../scripts/Makefile.arch
ifeq ($(srctree),)
srctree := $(patsubst %/,%,$(dir $(CURDIR)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
endif
srctree := $(abspath $(CURDIR)/../../../)
ifeq ($(V),1)
Q =
@ -22,28 +18,29 @@ AR = $(HOSTAR)
CC = $(HOSTCC)
LD = $(HOSTLD)
ARCH = $(HOSTARCH)
RM ?= rm
OUTPUT ?= $(srctree)/tools/bpf/resolve_btfids/
LIBBPF_SRC := $(srctree)/tools/lib/bpf/
SUBCMD_SRC := $(srctree)/tools/lib/subcmd/
BPFOBJ := $(OUTPUT)/libbpf.a
SUBCMDOBJ := $(OUTPUT)/libsubcmd.a
BPFOBJ := $(OUTPUT)/libbpf/libbpf.a
SUBCMDOBJ := $(OUTPUT)/libsubcmd/libsubcmd.a
BINARY := $(OUTPUT)/resolve_btfids
BINARY_IN := $(BINARY)-in.o
all: $(BINARY)
$(OUTPUT):
$(OUTPUT) $(OUTPUT)/libbpf $(OUTPUT)/libsubcmd:
$(call msg,MKDIR,,$@)
$(Q)mkdir -p $(OUTPUT)
$(Q)mkdir -p $(@)
$(SUBCMDOBJ): fixdep FORCE
$(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(OUTPUT)
$(SUBCMDOBJ): fixdep FORCE | $(OUTPUT)/libsubcmd
$(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(abspath $(dir $@))/ $(abspath $@)
$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT)
$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT)/libbpf
$(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(abspath $(dir $@))/ $(abspath $@)
CFLAGS := -g \
@ -57,24 +54,27 @@ LIBS = -lelf -lz
export srctree OUTPUT CFLAGS Q
include $(srctree)/tools/build/Makefile.include
$(BINARY_IN): fixdep FORCE
$(BINARY_IN): fixdep FORCE | $(OUTPUT)
$(Q)$(MAKE) $(build)=resolve_btfids
$(BINARY): $(BPFOBJ) $(SUBCMDOBJ) $(BINARY_IN)
$(call msg,LINK,$@)
$(Q)$(CC) $(BINARY_IN) $(LDFLAGS) -o $@ $(BPFOBJ) $(SUBCMDOBJ) $(LIBS)
libsubcmd-clean:
$(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(OUTPUT) clean
clean_objects := $(wildcard $(OUTPUT)/*.o \
$(OUTPUT)/.*.o.cmd \
$(OUTPUT)/.*.o.d \
$(OUTPUT)/libbpf \
$(OUTPUT)/libsubcmd \
$(OUTPUT)/resolve_btfids)
libbpf-clean:
$(Q)$(MAKE) -C $(LIBBPF_SRC) OUTPUT=$(OUTPUT) clean
clean: libsubcmd-clean libbpf-clean fixdep-clean
ifneq ($(clean_objects),)
clean: fixdep-clean
$(call msg,CLEAN,$(BINARY))
$(Q)$(RM) -f $(BINARY); \
$(RM) -rf $(if $(OUTPUT),$(OUTPUT),.)/feature; \
find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
$(Q)$(RM) -rf $(clean_objects)
else
clean:
endif
tags:
$(call msg,GEN,,tags)

View File

@ -12,7 +12,7 @@ import re
CONFIG_IS_NOT_SET_PATTERN = r'^# CONFIG_(\w+) is not set$'
CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+|".*")$'
KconfigEntryBase = collections.namedtuple('KconfigEntry', ['name', 'value'])
KconfigEntryBase = collections.namedtuple('KconfigEntryBase', ['name', 'value'])
class KconfigEntry(KconfigEntryBase):

View File

@ -284,16 +284,28 @@ endfunction
// Set up test pattern in the FFR
// x0: pid
// x2: generation
//
// We need to generate a canonical FFR value, which consists of a number of
// low "1" bits, followed by a number of zeros. This gives us 17 unique values
// per 16 bits of FFR, so we create a 4 bit signature out of the PID and
// generation, and use that as the initial number of ones in the pattern.
// We fill the upper lanes of FFR with zeros.
// Beware: corrupts P0.
function setup_ffr
mov x4, x30
bl pattern
and w0, w0, #0x3
bfi w0, w2, #2, #2
mov w1, #1
lsl w1, w1, w0
sub w1, w1, #1
ldr x0, =ffrref
ldr x1, =scratch
rdvl x2, #1
lsr x2, x2, #3
bl memcpy
strh w1, [x0], 2
rdvl x1, #1
lsr x1, x1, #3
sub x1, x1, #2
bl memclr
mov x0, #0
ldr x1, =ffrref

View File

@ -99,7 +99,7 @@ endef
ifeq ($(CAN_BUILD_I386),1)
$(BINARIES_32): CFLAGS += -m32
$(BINARIES_32): LDLIBS += -lrt -ldl -lm
$(BINARIES_32): %_32: %.c
$(BINARIES_32): $(OUTPUT)/%_32: %.c
$(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@
$(foreach t,$(TARGETS),$(eval $(call gen-target-rule-32,$(t))))
endif
@ -107,7 +107,7 @@ endif
ifeq ($(CAN_BUILD_X86_64),1)
$(BINARIES_64): CFLAGS += -m64
$(BINARIES_64): LDLIBS += -lrt -ldl
$(BINARIES_64): %_64: %.c
$(BINARIES_64): $(OUTPUT)/%_64: %.c
$(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@
$(foreach t,$(TARGETS),$(eval $(call gen-target-rule-64,$(t))))
endif