Merge 02874ca52d ("tracing: Consider the NULL character when validating the event length") into android12-5.10-lts

Steps on the way to 5.10.229

Resolves conflicts in:
	drivers/net/macsec.c

Change-Id: Ibc2583ddd810808fa9ce619e71935aeb5f97805a
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2024-11-21 22:24:39 +00:00
commit c515597aec
34 changed files with 240 additions and 190 deletions

View File

@ -77,7 +77,7 @@ &gpio {
};
&hdmi {
hpd-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>;
hpd-gpios = <&expgpio 0 GPIO_ACTIVE_LOW>;
power-domains = <&power RPI_POWER_DOMAIN_HDMI>;
status = "okay";
};

View File

@ -10,7 +10,7 @@
#
# Copyright (C) 1995-2001 by Russell King
LDFLAGS_vmlinux :=--no-undefined -X
LDFLAGS_vmlinux :=--no-undefined -X --pic-veneer
ifeq ($(CONFIG_RELOCATABLE), y)
# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour

View File

@ -10,11 +10,9 @@
#include <asm/insn.h>
#include <asm/probes.h>
#define MAX_UINSN_BYTES AARCH64_INSN_SIZE
#define UPROBE_SWBP_INSN BRK64_OPCODE_UPROBES
#define UPROBE_SWBP_INSN cpu_to_le32(BRK64_OPCODE_UPROBES)
#define UPROBE_SWBP_INSN_SIZE AARCH64_INSN_SIZE
#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES
#define UPROBE_XOL_SLOT_BYTES AARCH64_INSN_SIZE
typedef u32 uprobe_opcode_t;
@ -23,8 +21,8 @@ struct arch_uprobe_task {
struct arch_uprobe {
union {
u8 insn[MAX_UINSN_BYTES];
u8 ixol[MAX_UINSN_BYTES];
__le32 insn;
__le32 ixol;
};
struct arch_probe_insn api;
bool simulate;

View File

@ -42,7 +42,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
return -EINVAL;
insn = *(probe_opcode_t *)(&auprobe->insn[0]);
insn = le32_to_cpu(auprobe->insn);
switch (arm_probe_decode_insn(insn, &auprobe->api)) {
case INSN_REJECTED:
@ -108,7 +108,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
if (!auprobe->simulate)
return false;
insn = *(probe_opcode_t *)(&auprobe->insn[0]);
insn = le32_to_cpu(auprobe->insn);
addr = instruction_pointer(regs);
if (auprobe->api.handler)

View File

@ -75,6 +75,7 @@ struct perf_sf_sde_regs {
#define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE)
#define perf_arch_fetch_caller_regs(regs, __ip) do { \
(regs)->psw.mask = 0; \
(regs)->psw.addr = (__ip); \
(regs)->gprs[15] = (unsigned long)__builtin_frame_address(0) - \
offsetof(struct stack_frame, back_chain); \

View File

@ -794,46 +794,102 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
return 1;
}
static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
unsigned long *pages, unsigned long nr_pages,
/**
* guest_range_to_gpas() - Calculate guest physical addresses of page fragments
* covering a logical range
* @vcpu: virtual cpu
* @ga: guest address, start of range
* @ar: access register
* @gpas: output argument, may be NULL
* @len: length of range in bytes
* @asce: address-space-control element to use for translation
* @mode: access mode
*
* Translate a logical range to a series of guest absolute addresses,
* such that the concatenation of page fragments starting at each gpa make up
* the whole range.
* The translation is performed as if done by the cpu for the given @asce, @ar,
* @mode and state of the @vcpu.
* If the translation causes an exception, its program interruption code is
* returned and the &struct kvm_s390_pgm_info pgm member of @vcpu is modified
* such that a subsequent call to kvm_s390_inject_prog_vcpu() will inject
* a correct exception into the guest.
* The resulting gpas are stored into @gpas, unless it is NULL.
*
* Note: All fragments except the first one start at the beginning of a page.
* When deriving the boundaries of a fragment from a gpa, all but the last
* fragment end at the end of the page.
*
* Return:
* * 0 - success
* * <0 - translation could not be performed, for example if guest
* memory could not be accessed
* * >0 - an access exception occurred. In this case the returned value
* is the program interruption code and the contents of pgm may
* be used to inject an exception into the guest.
*/
static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
unsigned long *gpas, unsigned long len,
const union asce asce, enum gacc_mode mode)
{
psw_t *psw = &vcpu->arch.sie_block->gpsw;
unsigned int offset = offset_in_page(ga);
unsigned int fragment_len;
int lap_enabled, rc = 0;
enum prot_type prot;
unsigned long gpa;
lap_enabled = low_address_protection_enabled(vcpu, asce);
while (nr_pages) {
while (min(PAGE_SIZE - offset, len) > 0) {
fragment_len = min(PAGE_SIZE - offset, len);
ga = kvm_s390_logical_to_effective(vcpu, ga);
if (mode == GACC_STORE && lap_enabled && is_low_address(ga))
return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode,
PROT_TYPE_LA);
ga &= PAGE_MASK;
if (psw_bits(*psw).dat) {
rc = guest_translate(vcpu, ga, pages, asce, mode, &prot);
rc = guest_translate(vcpu, ga, &gpa, asce, mode, &prot);
if (rc < 0)
return rc;
} else {
*pages = kvm_s390_real_to_abs(vcpu, ga);
if (kvm_is_error_gpa(vcpu->kvm, *pages))
gpa = kvm_s390_real_to_abs(vcpu, ga);
if (kvm_is_error_gpa(vcpu->kvm, gpa))
rc = PGM_ADDRESSING;
}
if (rc)
return trans_exc(vcpu, rc, ga, ar, mode, prot);
ga += PAGE_SIZE;
pages++;
nr_pages--;
if (gpas)
*gpas++ = gpa;
offset = 0;
ga += fragment_len;
len -= fragment_len;
}
return 0;
}
static int access_guest_page(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa,
void *data, unsigned int len)
{
const unsigned int offset = offset_in_page(gpa);
const gfn_t gfn = gpa_to_gfn(gpa);
int rc;
if (!gfn_to_memslot(kvm, gfn))
return PGM_ADDRESSING;
if (mode == GACC_STORE)
rc = kvm_write_guest_page(kvm, gfn, data, offset, len);
else
rc = kvm_read_guest_page(kvm, gfn, data, offset, len);
return rc;
}
int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
unsigned long len, enum gacc_mode mode)
{
psw_t *psw = &vcpu->arch.sie_block->gpsw;
unsigned long _len, nr_pages, gpa, idx;
unsigned long pages_array[2];
unsigned long *pages;
unsigned long nr_pages, idx;
unsigned long gpa_array[2];
unsigned int fragment_len;
unsigned long *gpas;
int need_ipte_lock;
union asce asce;
int rc;
@ -845,50 +901,45 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
if (rc)
return rc;
nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
pages = pages_array;
if (nr_pages > ARRAY_SIZE(pages_array))
pages = vmalloc(array_size(nr_pages, sizeof(unsigned long)));
if (!pages)
gpas = gpa_array;
if (nr_pages > ARRAY_SIZE(gpa_array))
gpas = vmalloc(array_size(nr_pages, sizeof(unsigned long)));
if (!gpas)
return -ENOMEM;
need_ipte_lock = psw_bits(*psw).dat && !asce.r;
if (need_ipte_lock)
ipte_lock(vcpu);
rc = guest_page_range(vcpu, ga, ar, pages, nr_pages, asce, mode);
rc = guest_range_to_gpas(vcpu, ga, ar, gpas, len, asce, mode);
for (idx = 0; idx < nr_pages && !rc; idx++) {
gpa = *(pages + idx) + (ga & ~PAGE_MASK);
_len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
if (mode == GACC_STORE)
rc = kvm_write_guest(vcpu->kvm, gpa, data, _len);
else
rc = kvm_read_guest(vcpu->kvm, gpa, data, _len);
len -= _len;
ga += _len;
data += _len;
fragment_len = min(PAGE_SIZE - offset_in_page(gpas[idx]), len);
rc = access_guest_page(vcpu->kvm, mode, gpas[idx], data, fragment_len);
len -= fragment_len;
data += fragment_len;
}
if (need_ipte_lock)
ipte_unlock(vcpu);
if (nr_pages > ARRAY_SIZE(pages_array))
vfree(pages);
if (nr_pages > ARRAY_SIZE(gpa_array))
vfree(gpas);
return rc;
}
int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
void *data, unsigned long len, enum gacc_mode mode)
{
unsigned long _len, gpa;
unsigned int fragment_len;
unsigned long gpa;
int rc = 0;
while (len && !rc) {
gpa = kvm_s390_real_to_abs(vcpu, gra);
_len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
if (mode)
rc = write_guest_abs(vcpu, gpa, data, _len);
else
rc = read_guest_abs(vcpu, gpa, data, _len);
len -= _len;
gra += _len;
data += _len;
fragment_len = min(PAGE_SIZE - offset_in_page(gpa), len);
rc = access_guest_page(vcpu->kvm, mode, gpa, data, fragment_len);
len -= fragment_len;
gra += fragment_len;
data += fragment_len;
}
if (rc > 0)
vcpu->arch.pgm.code = rc;
return rc;
}
@ -904,8 +955,6 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
unsigned long *gpa, enum gacc_mode mode)
{
psw_t *psw = &vcpu->arch.sie_block->gpsw;
enum prot_type prot;
union asce asce;
int rc;
@ -913,23 +962,7 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
if (rc)
return rc;
if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) {
if (mode == GACC_STORE)
return trans_exc(vcpu, PGM_PROTECTION, gva, 0,
mode, PROT_TYPE_LA);
}
if (psw_bits(*psw).dat && !asce.r) { /* Use DAT? */
rc = guest_translate(vcpu, gva, gpa, asce, mode, &prot);
if (rc > 0)
return trans_exc(vcpu, rc, gva, 0, mode, prot);
} else {
*gpa = kvm_s390_real_to_abs(vcpu, gva);
if (kvm_is_error_gpa(vcpu->kvm, *gpa))
return trans_exc(vcpu, rc, gva, PGM_ADDRESSING, mode, 0);
}
return rc;
return guest_range_to_gpas(vcpu, gva, ar, gpa, 1, asce, mode);
}
/**
@ -938,17 +971,14 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
unsigned long length, enum gacc_mode mode)
{
unsigned long gpa;
unsigned long currlen;
union asce asce;
int rc = 0;
rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
if (rc)
return rc;
ipte_lock(vcpu);
while (length > 0 && !rc) {
currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE));
rc = guest_translate_address(vcpu, gva, ar, &gpa, mode);
gva += currlen;
length -= currlen;
}
rc = guest_range_to_gpas(vcpu, gva, ar, NULL, length, asce, mode);
ipte_unlock(vcpu);
return rc;

View File

@ -344,11 +344,12 @@ int read_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
* @len: number of bytes to copy
*
* Copy @len bytes from @data (kernel space) to @gra (guest real address).
* It is up to the caller to ensure that the entire guest memory range is
* valid memory before calling this function.
* Guest low address and key protection are not checked.
*
* Returns zero on success or -EFAULT on error.
* Returns zero on success, -EFAULT when copying from @data failed, or
* PGM_ADRESSING in case @gra is outside a memslot. In this case, pgm check info
* is also stored to allow injecting into the guest (if applicable) using
* kvm_s390_inject_prog_cond().
*
* If an error occurs data may have been copied partially to guest memory.
*/
@ -367,11 +368,12 @@ int write_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
* @len: number of bytes to copy
*
* Copy @len bytes from @gra (guest real address) to @data (kernel space).
* It is up to the caller to ensure that the entire guest memory range is
* valid memory before calling this function.
* Guest key protection is not checked.
*
* Returns zero on success or -EFAULT on error.
* Returns zero on success, -EFAULT when copying to @data failed, or
* PGM_ADRESSING in case @gra is outside a memslot. In this case, pgm check info
* is also stored to allow injecting into the guest (if applicable) using
* kvm_s390_inject_prog_cond().
*
* If an error occurs data may have been copied partially to kernel space.
*/

View File

@ -2774,10 +2774,12 @@ void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfq_put_queue(bfqq);
}
static void
bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
static struct bfq_queue *bfq_merge_bfqqs(struct bfq_data *bfqd,
struct bfq_io_cq *bic,
struct bfq_queue *bfqq)
{
struct bfq_queue *new_bfqq = bfqq->new_bfqq;
bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
(unsigned long)new_bfqq->pid);
/* Save weight raising and idle window of the merged queues */
@ -2845,6 +2847,8 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
new_bfqq->pid = -1;
bfqq->bic = NULL;
bfq_release_process_ref(bfqd, bfqq);
return new_bfqq;
}
static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
@ -2880,14 +2884,8 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
* fulfilled, i.e., bic can be redirected to new_bfqq
* and bfqq can be put.
*/
bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
new_bfqq);
/*
* If we get here, bio will be queued into new_queue,
* so use new_bfqq to decide whether bio and rq can be
* merged.
*/
bfqq = new_bfqq;
while (bfqq != new_bfqq)
bfqq = bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq);
/*
* Change also bqfd->bio_bfqq, as
@ -5441,6 +5439,7 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
bool waiting, idle_timer_disabled = false;
if (new_bfqq) {
struct bfq_queue *old_bfqq = bfqq;
/*
* Release the request's reference to the old bfqq
* and make sure one is taken to the shared queue.
@ -5456,18 +5455,18 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
* then complete the merge and redirect it to
* new_bfqq.
*/
if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
bfqq, new_bfqq);
if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq) {
while (bfqq != new_bfqq)
bfqq = bfq_merge_bfqqs(bfqd, RQ_BIC(rq), bfqq);
}
bfq_clear_bfqq_just_created(bfqq);
bfq_clear_bfqq_just_created(old_bfqq);
/*
* rq is about to be enqueued into new_bfqq,
* release rq reference on bfqq
*/
bfq_put_queue(bfqq);
bfq_put_queue(old_bfqq);
rq->elv.priv[1] = new_bfqq;
bfqq = new_bfqq;
}
bfq_update_io_thinktime(bfqd, bfqq);

View File

@ -685,7 +685,7 @@ static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_dual_dsi)
struct drm_display_mode *mode = msm_host->mode;
u32 pclk_rate;
pclk_rate = mode->clock * 1000;
pclk_rate = mode->clock * 1000u;
/*
* For dual DSI mode, the current DRM mode has the complete width of the

View File

@ -135,7 +135,15 @@ int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
flags |= VBOX_MOUSE_POINTER_VISIBLE;
}
p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len, HGSMI_CH_VBVA,
/*
* The 4 extra bytes come from switching struct vbva_mouse_pointer_shape
* from having a 4 bytes fixed array at the end to using a proper VLA
* at the end. These 4 extra bytes were not subtracted from sizeof(*p)
* before the switch to the VLA, so this way the behavior is unchanged.
* Chances are these 4 extra bytes are not necessary but they are kept
* to avoid regressions.
*/
p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len + 4, HGSMI_CH_VBVA,
VBVA_MOUSE_POINTER_SHAPE);
if (!p)
return -ENOMEM;

View File

@ -351,10 +351,8 @@ struct vbva_mouse_pointer_shape {
* Bytes in the gap between the AND and the XOR mask are undefined.
* XOR mask scanlines have no gap between them and size of XOR mask is:
* xor_len = width * 4 * height.
*
* Preallocate 4 bytes for accessing actual data as p->data.
*/
u8 data[4];
u8 data[];
} __packed;
/* pointer is visible */

View File

@ -167,7 +167,7 @@ struct bnxt_qplib_swqe {
};
u32 q_key;
u32 dst_qp;
u16 avid;
u32 avid;
} send;
/* Send Raw Ethernet and QP1 */

View File

@ -245,7 +245,7 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
/* failed with status */
dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x status %#x\n",
cookie, opcode, evnt->status);
rc = -EFAULT;
rc = -EIO;
}
return rc;

View File

@ -243,6 +243,8 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
sginfo.pgsize = npde * pg_size;
sginfo.npages = 1;
rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
if (rc)
goto fail;
/* Alloc PBL pages */
sginfo.npages = npbl;
@ -254,22 +256,9 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
dst_virt_ptr =
(dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
if (hwq_attr->type == HWQ_TYPE_MR) {
/* For MR it is expected that we supply only 1 contigous
* page i.e only 1 entry in the PDL that will contain
* all the PBLs for the user supplied memory region
*/
for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
i++)
dst_virt_ptr[0][i] = src_phys_ptr[i] |
flag;
} else {
for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
i++)
dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
src_phys_ptr[i] |
PTU_PDE_VALID;
}
for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
dst_virt_ptr[0][i] = src_phys_ptr[i] | flag;
/* Alloc or init PTEs */
rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
hwq_attr->sginfo);

View File

@ -2086,7 +2086,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
err = -ENOMEM;
if (n->dev->flags & IFF_LOOPBACK) {
if (iptype == 4)
pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip);
pdev = __ip_dev_find(&init_net, *(__be32 *)peer_ip, false);
else if (IS_ENABLED(CONFIG_IPV6))
for_each_netdev(&init_net, pdev) {
if (ipv6_chk_addr(&init_net,
@ -2101,12 +2101,12 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
err = -ENODEV;
goto out;
}
if (is_vlan_dev(pdev))
pdev = vlan_dev_real_dev(pdev);
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
n, pdev, rt_tos2priority(tos));
if (!ep->l2t) {
dev_put(pdev);
if (!ep->l2t)
goto out;
}
ep->mtu = pdev->mtu;
ep->tx_chan = cxgb4_port_chan(pdev);
ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx;
@ -2119,7 +2119,6 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
ep->rss_qid = cdev->rdev.lldi.rxq_ids[
cxgb4_port_idx(pdev) * step];
set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
dev_put(pdev);
} else {
pdev = get_real_dev(n->dev);
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,

View File

@ -484,7 +484,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb->len > MAX_FRAME_SIZE)) {
dev->stats.tx_errors++;
goto out;
goto len_error;
}
/* Save skb pointer. */
@ -575,6 +575,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
map_error:
if (net_ratelimit())
dev_warn(greth->dev, "Could not create TX DMA mapping\n");
len_error:
dev_kfree_skb(skb);
out:
return err;

View File

@ -1343,6 +1343,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
skb->data, skb_len);
ret = NETDEV_TX_OK;
dev_kfree_skb_any(skb);
goto out;
}

View File

@ -727,7 +727,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL);
if (!dev->queues) {
dev->transport->free_device(dev);
hba->backend->ops->free_device(dev);
return NULL;
}

View File

@ -1988,7 +1988,7 @@ static int tcmu_netlink_event_send(struct tcmu_dev *udev,
}
ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
TCMU_MCGRP_CONFIG, GFP_KERNEL);
TCMU_MCGRP_CONFIG);
/* Wait during an add as the listener may not be up yet */
if (ret == 0 ||

View File

@ -561,6 +561,7 @@ static void typec_altmode_release(struct device *dev)
typec_altmode_put_partner(alt);
altmode_id_remove(alt->adev.dev.parent, alt->id);
put_device(alt->adev.dev.parent);
kfree(alt);
}
@ -610,6 +611,8 @@ typec_register_altmode(struct device *parent,
alt->adev.dev.type = &typec_altmode_dev_type;
dev_set_name(&alt->adev.dev, "%s.%u", dev_name(parent), id);
get_device(alt->adev.dev.parent);
/* Link partners and plugs with the ports */
if (!is_port)
typec_altmode_set_partner(alt);

View File

@ -2961,6 +2961,15 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
return rc;
if (indatalen) {
unsigned int len;
if (WARN_ON_ONCE(smb3_encryption_required(tcon) &&
(check_add_overflow(total_len - 1,
ALIGN(indatalen, 8), &len) ||
len > MAX_CIFS_SMALL_BUFFER_SIZE))) {
cifs_small_buf_release(req);
return -EIO;
}
/*
* indatalen is usually small at a couple of bytes max, so
* just allocate through generic pool

View File

@ -146,13 +146,11 @@ SYSCALL_DEFINE1(uselib, const char __user *, library)
goto out;
/*
* may_open() has already checked for this, so it should be
* impossible to trip now. But we need to be extra cautious
* and check again at the very end too.
* Check do_open_execat() for an explanation.
*/
error = -EACCES;
if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
path_noexec(&file->f_path)))
if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode)) ||
path_noexec(&file->f_path))
goto exit;
fsnotify_open(file);
@ -921,16 +919,16 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
file = do_filp_open(fd, name, &open_exec_flags);
if (IS_ERR(file))
goto out;
return file;
/*
* may_open() has already checked for this, so it should be
* impossible to trip now. But we need to be extra cautious
* and check again at the very end too.
* In the past the regular type check was here. It moved to may_open() in
* 633fb6ac3980 ("exec: move S_ISREG() check earlier"). Since then it is
* an invariant that all non-regular files error out before we get here.
*/
err = -EACCES;
if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
path_noexec(&file->f_path)))
if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode)) ||
path_noexec(&file->f_path))
goto exit;
err = deny_write_access(file);
@ -940,7 +938,6 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
if (name->name[0] != '\0')
fsnotify_open(file);
out:
return file;
exit:

View File

@ -94,7 +94,6 @@ ssize_t iomap_dio_complete(struct iomap_dio *dio)
if (offset + ret > dio->i_size &&
!(dio->flags & IOMAP_DIO_WRITE))
ret = dio->i_size - offset;
iocb->ki_pos += ret;
}
/*
@ -120,15 +119,18 @@ ssize_t iomap_dio_complete(struct iomap_dio *dio)
}
inode_dio_end(file_inode(iocb->ki_filp));
if (ret > 0) {
iocb->ki_pos += ret;
/*
* If this is a DSYNC write, make sure we push it to stable storage now
* that we've written data.
* If this is a DSYNC write, make sure we push it to stable
* storage now that we've written data.
*/
if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
if (dio->flags & IOMAP_DIO_NEED_SYNC)
ret = generic_write_sync(iocb, ret);
}
kfree(dio);
return ret;
}
EXPORT_SYMBOL_GPL(iomap_dio_complete);

View File

@ -187,7 +187,7 @@ int dbMount(struct inode *ipbmap)
}
bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag);
if (!bmp->db_numag || bmp->db_numag >= MAXAG) {
if (!bmp->db_numag || bmp->db_numag > MAXAG) {
err = -EINVAL;
goto err_release_metapage;
}

View File

@ -337,13 +337,12 @@ static inline int genlmsg_multicast(const struct genl_family *family,
* @skb: netlink message as socket buffer
* @portid: own netlink portid to avoid sending to yourself
* @group: offset of multicast group in groups array
* @flags: allocation flags
*
* This function must hold the RTNL or rcu_read_lock().
*/
int genlmsg_multicast_allns(const struct genl_family *family,
struct sk_buff *skb, u32 portid,
unsigned int group, gfp_t flags);
unsigned int group);
/**
* genlmsg_unicast - unicast a netlink message

View File

@ -256,7 +256,7 @@ int traceprobe_parse_event_name(const char **pevent, const char **pgroup,
if (len == 0) {
trace_probe_log_err(offset, NO_EVENT_NAME);
return -EINVAL;
} else if (len > MAX_EVENT_NAME_LEN) {
} else if (len >= MAX_EVENT_NAME_LEN) {
trace_probe_log_err(offset, EVENT_TOO_LONG);
return -EINVAL;
}

View File

@ -745,8 +745,7 @@ static int __init bnep_init(void)
if (flt[0])
BT_INFO("BNEP filters: %s", flt);
bnep_sock_init();
return 0;
return bnep_sock_init();
}
static void __exit bnep_exit(void)

View File

@ -273,6 +273,7 @@ static struct in_device *inetdev_init(struct net_device *dev)
/* Account for reference dev->ip_ptr (below) */
refcount_set(&in_dev->refcnt, 1);
if (dev != blackhole_netdev) {
err = devinet_sysctl_register(in_dev);
if (err) {
in_dev->dead = 1;
@ -284,6 +285,7 @@ static struct in_device *inetdev_init(struct net_device *dev)
ip_mc_init_dev(in_dev);
if (dev->flags & IFF_UP)
ip_mc_up(in_dev);
}
/* we can receive as soon as ip_ptr is set -- do this last */
rcu_assign_pointer(dev->ip_ptr, in_dev);
@ -328,6 +330,19 @@ static void inetdev_destroy(struct in_device *in_dev)
call_rcu(&in_dev->rcu_head, in_dev_rcu_put);
}
static int __init inet_blackhole_dev_init(void)
{
int err = 0;
rtnl_lock();
if (!inetdev_init(blackhole_netdev))
err = -ENOMEM;
rtnl_unlock();
return err;
}
late_initcall(inet_blackhole_dev_init);
int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
{
const struct in_ifaddr *ifa;

View File

@ -115,7 +115,7 @@ static int l2tp_tunnel_notify(struct genl_family *family,
NLM_F_ACK, tunnel, cmd);
if (ret >= 0) {
ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
ret = genlmsg_multicast_allns(family, msg, 0, 0);
/* We don't care if no one is listening */
if (ret == -ESRCH)
ret = 0;
@ -143,7 +143,7 @@ static int l2tp_session_notify(struct genl_family *family,
NLM_F_ACK, session, cmd);
if (ret >= 0) {
ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
ret = genlmsg_multicast_allns(family, msg, 0, 0);
/* We don't care if no one is listening */
if (ret == -ESRCH)
ret = 0;

View File

@ -1097,15 +1097,11 @@ static int genl_ctrl_event(int event, const struct genl_family *family,
if (IS_ERR(msg))
return PTR_ERR(msg);
if (!family->netnsok) {
if (!family->netnsok)
genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0,
0, GFP_KERNEL);
} else {
rcu_read_lock();
genlmsg_multicast_allns(&genl_ctrl, msg, 0,
0, GFP_ATOMIC);
rcu_read_unlock();
}
else
genlmsg_multicast_allns(&genl_ctrl, msg, 0, 0);
return 0;
}
@ -1414,23 +1410,23 @@ static int __init genl_init(void)
core_initcall(genl_init);
static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
gfp_t flags)
static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group)
{
struct sk_buff *tmp;
struct net *net, *prev = NULL;
bool delivered = false;
int err;
rcu_read_lock();
for_each_net_rcu(net) {
if (prev) {
tmp = skb_clone(skb, flags);
tmp = skb_clone(skb, GFP_ATOMIC);
if (!tmp) {
err = -ENOMEM;
goto error;
}
err = nlmsg_multicast(prev->genl_sock, tmp,
portid, group, flags);
portid, group, GFP_ATOMIC);
if (!err)
delivered = true;
else if (err != -ESRCH)
@ -1439,26 +1435,30 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
prev = net;
}
err = nlmsg_multicast(prev->genl_sock, skb, portid, group, GFP_ATOMIC);
rcu_read_unlock();
err = nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
if (!err)
delivered = true;
else if (err != -ESRCH)
return err;
return delivered ? 0 : -ESRCH;
error:
rcu_read_unlock();
kfree_skb(skb);
return err;
}
int genlmsg_multicast_allns(const struct genl_family *family,
struct sk_buff *skb, u32 portid,
unsigned int group, gfp_t flags)
unsigned int group)
{
if (WARN_ON_ONCE(group >= family->n_mcgrps))
return -EINVAL;
group = family->mcgrp_offset + group;
return genlmsg_mcast(skb, portid, group, flags);
return genlmsg_mcast(skb, portid, group);
}
EXPORT_SYMBOL(genlmsg_multicast_allns);

View File

@ -744,7 +744,7 @@ static int smc_pnet_add_pnetid(struct net *net, u8 *pnetid)
write_lock(&sn->pnetids_ndev.lock);
list_for_each_entry(pi, &sn->pnetids_ndev.list, list) {
if (smc_pnet_match(pnetid, pe->pnetid)) {
if (smc_pnet_match(pnetid, pi->pnetid)) {
refcount_inc(&pi->refcnt);
kfree(pe);
goto unlock;

View File

@ -15883,10 +15883,8 @@ void nl80211_common_reg_change_event(enum nl80211_commands cmd_id,
genlmsg_end(msg, hdr);
rcu_read_lock();
genlmsg_multicast_allns(&nl80211_fam, msg, 0,
NL80211_MCGRP_REGULATORY, GFP_ATOMIC);
rcu_read_unlock();
NL80211_MCGRP_REGULATORY);
return;
@ -16394,10 +16392,8 @@ void nl80211_send_beacon_hint_event(struct wiphy *wiphy,
genlmsg_end(msg, hdr);
rcu_read_lock();
genlmsg_multicast_allns(&nl80211_fam, msg, 0,
NL80211_MCGRP_REGULATORY, GFP_ATOMIC);
rcu_read_unlock();
NL80211_MCGRP_REGULATORY);
return;

View File

@ -490,6 +490,9 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream,
val_cr4 |= FSL_SAI_CR4_FRSZ(slots);
/* Set to avoid channel swap */
val_cr4 |= FSL_SAI_CR4_FCONT;
/* Set to output mode to avoid tri-stated data pins */
if (tx)
val_cr4 |= FSL_SAI_CR4_CHMOD;
@ -515,7 +518,7 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream,
FSL_SAI_CR3_TRCE((1 << pins) - 1));
regmap_update_bits(sai->regmap, FSL_SAI_xCR4(tx, ofs),
FSL_SAI_CR4_SYWD_MASK | FSL_SAI_CR4_FRSZ_MASK |
FSL_SAI_CR4_CHMOD_MASK,
FSL_SAI_CR4_CHMOD_MASK | FSL_SAI_CR4_FCONT_MASK,
val_cr4);
regmap_update_bits(sai->regmap, FSL_SAI_xCR5(tx, ofs),
FSL_SAI_CR5_WNW_MASK | FSL_SAI_CR5_W0W_MASK |

View File

@ -132,6 +132,7 @@
/* SAI Transmit and Receive Configuration 4 Register */
#define FSL_SAI_CR4_FCONT_MASK BIT(28)
#define FSL_SAI_CR4_FCONT BIT(28)
#define FSL_SAI_CR4_FCOMB_SHIFT BIT(26)
#define FSL_SAI_CR4_FCOMB_SOFT BIT(27)