Merge 02874ca52d
("tracing: Consider the NULL character when validating the event length") into android12-5.10-lts
Steps on the way to 5.10.229 Resolves conflicts in: drivers/net/macsec.c Change-Id: Ibc2583ddd810808fa9ce619e71935aeb5f97805a Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
c515597aec
@ -77,7 +77,7 @@ &gpio {
|
|||||||
};
|
};
|
||||||
|
|
||||||
&hdmi {
|
&hdmi {
|
||||||
hpd-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>;
|
hpd-gpios = <&expgpio 0 GPIO_ACTIVE_LOW>;
|
||||||
power-domains = <&power RPI_POWER_DOMAIN_HDMI>;
|
power-domains = <&power RPI_POWER_DOMAIN_HDMI>;
|
||||||
status = "okay";
|
status = "okay";
|
||||||
};
|
};
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
#
|
#
|
||||||
# Copyright (C) 1995-2001 by Russell King
|
# Copyright (C) 1995-2001 by Russell King
|
||||||
|
|
||||||
LDFLAGS_vmlinux :=--no-undefined -X
|
LDFLAGS_vmlinux :=--no-undefined -X --pic-veneer
|
||||||
|
|
||||||
ifeq ($(CONFIG_RELOCATABLE), y)
|
ifeq ($(CONFIG_RELOCATABLE), y)
|
||||||
# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
|
# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
|
||||||
|
@ -10,11 +10,9 @@
|
|||||||
#include <asm/insn.h>
|
#include <asm/insn.h>
|
||||||
#include <asm/probes.h>
|
#include <asm/probes.h>
|
||||||
|
|
||||||
#define MAX_UINSN_BYTES AARCH64_INSN_SIZE
|
#define UPROBE_SWBP_INSN cpu_to_le32(BRK64_OPCODE_UPROBES)
|
||||||
|
|
||||||
#define UPROBE_SWBP_INSN BRK64_OPCODE_UPROBES
|
|
||||||
#define UPROBE_SWBP_INSN_SIZE AARCH64_INSN_SIZE
|
#define UPROBE_SWBP_INSN_SIZE AARCH64_INSN_SIZE
|
||||||
#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES
|
#define UPROBE_XOL_SLOT_BYTES AARCH64_INSN_SIZE
|
||||||
|
|
||||||
typedef u32 uprobe_opcode_t;
|
typedef u32 uprobe_opcode_t;
|
||||||
|
|
||||||
@ -23,8 +21,8 @@ struct arch_uprobe_task {
|
|||||||
|
|
||||||
struct arch_uprobe {
|
struct arch_uprobe {
|
||||||
union {
|
union {
|
||||||
u8 insn[MAX_UINSN_BYTES];
|
__le32 insn;
|
||||||
u8 ixol[MAX_UINSN_BYTES];
|
__le32 ixol;
|
||||||
};
|
};
|
||||||
struct arch_probe_insn api;
|
struct arch_probe_insn api;
|
||||||
bool simulate;
|
bool simulate;
|
||||||
|
@ -42,7 +42,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
|
|||||||
else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
|
else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
insn = *(probe_opcode_t *)(&auprobe->insn[0]);
|
insn = le32_to_cpu(auprobe->insn);
|
||||||
|
|
||||||
switch (arm_probe_decode_insn(insn, &auprobe->api)) {
|
switch (arm_probe_decode_insn(insn, &auprobe->api)) {
|
||||||
case INSN_REJECTED:
|
case INSN_REJECTED:
|
||||||
@ -108,7 +108,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
|||||||
if (!auprobe->simulate)
|
if (!auprobe->simulate)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
insn = *(probe_opcode_t *)(&auprobe->insn[0]);
|
insn = le32_to_cpu(auprobe->insn);
|
||||||
addr = instruction_pointer(regs);
|
addr = instruction_pointer(regs);
|
||||||
|
|
||||||
if (auprobe->api.handler)
|
if (auprobe->api.handler)
|
||||||
|
@ -75,6 +75,7 @@ struct perf_sf_sde_regs {
|
|||||||
#define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE)
|
#define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE)
|
||||||
|
|
||||||
#define perf_arch_fetch_caller_regs(regs, __ip) do { \
|
#define perf_arch_fetch_caller_regs(regs, __ip) do { \
|
||||||
|
(regs)->psw.mask = 0; \
|
||||||
(regs)->psw.addr = (__ip); \
|
(regs)->psw.addr = (__ip); \
|
||||||
(regs)->gprs[15] = (unsigned long)__builtin_frame_address(0) - \
|
(regs)->gprs[15] = (unsigned long)__builtin_frame_address(0) - \
|
||||||
offsetof(struct stack_frame, back_chain); \
|
offsetof(struct stack_frame, back_chain); \
|
||||||
|
@ -794,46 +794,102 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
|
/**
|
||||||
unsigned long *pages, unsigned long nr_pages,
|
* guest_range_to_gpas() - Calculate guest physical addresses of page fragments
|
||||||
const union asce asce, enum gacc_mode mode)
|
* covering a logical range
|
||||||
|
* @vcpu: virtual cpu
|
||||||
|
* @ga: guest address, start of range
|
||||||
|
* @ar: access register
|
||||||
|
* @gpas: output argument, may be NULL
|
||||||
|
* @len: length of range in bytes
|
||||||
|
* @asce: address-space-control element to use for translation
|
||||||
|
* @mode: access mode
|
||||||
|
*
|
||||||
|
* Translate a logical range to a series of guest absolute addresses,
|
||||||
|
* such that the concatenation of page fragments starting at each gpa make up
|
||||||
|
* the whole range.
|
||||||
|
* The translation is performed as if done by the cpu for the given @asce, @ar,
|
||||||
|
* @mode and state of the @vcpu.
|
||||||
|
* If the translation causes an exception, its program interruption code is
|
||||||
|
* returned and the &struct kvm_s390_pgm_info pgm member of @vcpu is modified
|
||||||
|
* such that a subsequent call to kvm_s390_inject_prog_vcpu() will inject
|
||||||
|
* a correct exception into the guest.
|
||||||
|
* The resulting gpas are stored into @gpas, unless it is NULL.
|
||||||
|
*
|
||||||
|
* Note: All fragments except the first one start at the beginning of a page.
|
||||||
|
* When deriving the boundaries of a fragment from a gpa, all but the last
|
||||||
|
* fragment end at the end of the page.
|
||||||
|
*
|
||||||
|
* Return:
|
||||||
|
* * 0 - success
|
||||||
|
* * <0 - translation could not be performed, for example if guest
|
||||||
|
* memory could not be accessed
|
||||||
|
* * >0 - an access exception occurred. In this case the returned value
|
||||||
|
* is the program interruption code and the contents of pgm may
|
||||||
|
* be used to inject an exception into the guest.
|
||||||
|
*/
|
||||||
|
static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
|
||||||
|
unsigned long *gpas, unsigned long len,
|
||||||
|
const union asce asce, enum gacc_mode mode)
|
||||||
{
|
{
|
||||||
psw_t *psw = &vcpu->arch.sie_block->gpsw;
|
psw_t *psw = &vcpu->arch.sie_block->gpsw;
|
||||||
|
unsigned int offset = offset_in_page(ga);
|
||||||
|
unsigned int fragment_len;
|
||||||
int lap_enabled, rc = 0;
|
int lap_enabled, rc = 0;
|
||||||
enum prot_type prot;
|
enum prot_type prot;
|
||||||
|
unsigned long gpa;
|
||||||
|
|
||||||
lap_enabled = low_address_protection_enabled(vcpu, asce);
|
lap_enabled = low_address_protection_enabled(vcpu, asce);
|
||||||
while (nr_pages) {
|
while (min(PAGE_SIZE - offset, len) > 0) {
|
||||||
|
fragment_len = min(PAGE_SIZE - offset, len);
|
||||||
ga = kvm_s390_logical_to_effective(vcpu, ga);
|
ga = kvm_s390_logical_to_effective(vcpu, ga);
|
||||||
if (mode == GACC_STORE && lap_enabled && is_low_address(ga))
|
if (mode == GACC_STORE && lap_enabled && is_low_address(ga))
|
||||||
return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode,
|
return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode,
|
||||||
PROT_TYPE_LA);
|
PROT_TYPE_LA);
|
||||||
ga &= PAGE_MASK;
|
|
||||||
if (psw_bits(*psw).dat) {
|
if (psw_bits(*psw).dat) {
|
||||||
rc = guest_translate(vcpu, ga, pages, asce, mode, &prot);
|
rc = guest_translate(vcpu, ga, &gpa, asce, mode, &prot);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
return rc;
|
return rc;
|
||||||
} else {
|
} else {
|
||||||
*pages = kvm_s390_real_to_abs(vcpu, ga);
|
gpa = kvm_s390_real_to_abs(vcpu, ga);
|
||||||
if (kvm_is_error_gpa(vcpu->kvm, *pages))
|
if (kvm_is_error_gpa(vcpu->kvm, gpa))
|
||||||
rc = PGM_ADDRESSING;
|
rc = PGM_ADDRESSING;
|
||||||
}
|
}
|
||||||
if (rc)
|
if (rc)
|
||||||
return trans_exc(vcpu, rc, ga, ar, mode, prot);
|
return trans_exc(vcpu, rc, ga, ar, mode, prot);
|
||||||
ga += PAGE_SIZE;
|
if (gpas)
|
||||||
pages++;
|
*gpas++ = gpa;
|
||||||
nr_pages--;
|
offset = 0;
|
||||||
|
ga += fragment_len;
|
||||||
|
len -= fragment_len;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int access_guest_page(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa,
|
||||||
|
void *data, unsigned int len)
|
||||||
|
{
|
||||||
|
const unsigned int offset = offset_in_page(gpa);
|
||||||
|
const gfn_t gfn = gpa_to_gfn(gpa);
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
if (!gfn_to_memslot(kvm, gfn))
|
||||||
|
return PGM_ADDRESSING;
|
||||||
|
if (mode == GACC_STORE)
|
||||||
|
rc = kvm_write_guest_page(kvm, gfn, data, offset, len);
|
||||||
|
else
|
||||||
|
rc = kvm_read_guest_page(kvm, gfn, data, offset, len);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
|
int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
|
||||||
unsigned long len, enum gacc_mode mode)
|
unsigned long len, enum gacc_mode mode)
|
||||||
{
|
{
|
||||||
psw_t *psw = &vcpu->arch.sie_block->gpsw;
|
psw_t *psw = &vcpu->arch.sie_block->gpsw;
|
||||||
unsigned long _len, nr_pages, gpa, idx;
|
unsigned long nr_pages, idx;
|
||||||
unsigned long pages_array[2];
|
unsigned long gpa_array[2];
|
||||||
unsigned long *pages;
|
unsigned int fragment_len;
|
||||||
|
unsigned long *gpas;
|
||||||
int need_ipte_lock;
|
int need_ipte_lock;
|
||||||
union asce asce;
|
union asce asce;
|
||||||
int rc;
|
int rc;
|
||||||
@ -845,50 +901,45 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
|
|||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
|
nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
|
||||||
pages = pages_array;
|
gpas = gpa_array;
|
||||||
if (nr_pages > ARRAY_SIZE(pages_array))
|
if (nr_pages > ARRAY_SIZE(gpa_array))
|
||||||
pages = vmalloc(array_size(nr_pages, sizeof(unsigned long)));
|
gpas = vmalloc(array_size(nr_pages, sizeof(unsigned long)));
|
||||||
if (!pages)
|
if (!gpas)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
need_ipte_lock = psw_bits(*psw).dat && !asce.r;
|
need_ipte_lock = psw_bits(*psw).dat && !asce.r;
|
||||||
if (need_ipte_lock)
|
if (need_ipte_lock)
|
||||||
ipte_lock(vcpu);
|
ipte_lock(vcpu);
|
||||||
rc = guest_page_range(vcpu, ga, ar, pages, nr_pages, asce, mode);
|
rc = guest_range_to_gpas(vcpu, ga, ar, gpas, len, asce, mode);
|
||||||
for (idx = 0; idx < nr_pages && !rc; idx++) {
|
for (idx = 0; idx < nr_pages && !rc; idx++) {
|
||||||
gpa = *(pages + idx) + (ga & ~PAGE_MASK);
|
fragment_len = min(PAGE_SIZE - offset_in_page(gpas[idx]), len);
|
||||||
_len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
|
rc = access_guest_page(vcpu->kvm, mode, gpas[idx], data, fragment_len);
|
||||||
if (mode == GACC_STORE)
|
len -= fragment_len;
|
||||||
rc = kvm_write_guest(vcpu->kvm, gpa, data, _len);
|
data += fragment_len;
|
||||||
else
|
|
||||||
rc = kvm_read_guest(vcpu->kvm, gpa, data, _len);
|
|
||||||
len -= _len;
|
|
||||||
ga += _len;
|
|
||||||
data += _len;
|
|
||||||
}
|
}
|
||||||
if (need_ipte_lock)
|
if (need_ipte_lock)
|
||||||
ipte_unlock(vcpu);
|
ipte_unlock(vcpu);
|
||||||
if (nr_pages > ARRAY_SIZE(pages_array))
|
if (nr_pages > ARRAY_SIZE(gpa_array))
|
||||||
vfree(pages);
|
vfree(gpas);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
|
int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
|
||||||
void *data, unsigned long len, enum gacc_mode mode)
|
void *data, unsigned long len, enum gacc_mode mode)
|
||||||
{
|
{
|
||||||
unsigned long _len, gpa;
|
unsigned int fragment_len;
|
||||||
|
unsigned long gpa;
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
while (len && !rc) {
|
while (len && !rc) {
|
||||||
gpa = kvm_s390_real_to_abs(vcpu, gra);
|
gpa = kvm_s390_real_to_abs(vcpu, gra);
|
||||||
_len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
|
fragment_len = min(PAGE_SIZE - offset_in_page(gpa), len);
|
||||||
if (mode)
|
rc = access_guest_page(vcpu->kvm, mode, gpa, data, fragment_len);
|
||||||
rc = write_guest_abs(vcpu, gpa, data, _len);
|
len -= fragment_len;
|
||||||
else
|
gra += fragment_len;
|
||||||
rc = read_guest_abs(vcpu, gpa, data, _len);
|
data += fragment_len;
|
||||||
len -= _len;
|
|
||||||
gra += _len;
|
|
||||||
data += _len;
|
|
||||||
}
|
}
|
||||||
|
if (rc > 0)
|
||||||
|
vcpu->arch.pgm.code = rc;
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -904,8 +955,6 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
|
|||||||
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
|
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
|
||||||
unsigned long *gpa, enum gacc_mode mode)
|
unsigned long *gpa, enum gacc_mode mode)
|
||||||
{
|
{
|
||||||
psw_t *psw = &vcpu->arch.sie_block->gpsw;
|
|
||||||
enum prot_type prot;
|
|
||||||
union asce asce;
|
union asce asce;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
@ -913,23 +962,7 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
|
|||||||
rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
|
rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) {
|
return guest_range_to_gpas(vcpu, gva, ar, gpa, 1, asce, mode);
|
||||||
if (mode == GACC_STORE)
|
|
||||||
return trans_exc(vcpu, PGM_PROTECTION, gva, 0,
|
|
||||||
mode, PROT_TYPE_LA);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (psw_bits(*psw).dat && !asce.r) { /* Use DAT? */
|
|
||||||
rc = guest_translate(vcpu, gva, gpa, asce, mode, &prot);
|
|
||||||
if (rc > 0)
|
|
||||||
return trans_exc(vcpu, rc, gva, 0, mode, prot);
|
|
||||||
} else {
|
|
||||||
*gpa = kvm_s390_real_to_abs(vcpu, gva);
|
|
||||||
if (kvm_is_error_gpa(vcpu->kvm, *gpa))
|
|
||||||
return trans_exc(vcpu, rc, gva, PGM_ADDRESSING, mode, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
return rc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -938,17 +971,14 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
|
|||||||
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
|
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
|
||||||
unsigned long length, enum gacc_mode mode)
|
unsigned long length, enum gacc_mode mode)
|
||||||
{
|
{
|
||||||
unsigned long gpa;
|
union asce asce;
|
||||||
unsigned long currlen;
|
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
|
rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
ipte_lock(vcpu);
|
ipte_lock(vcpu);
|
||||||
while (length > 0 && !rc) {
|
rc = guest_range_to_gpas(vcpu, gva, ar, NULL, length, asce, mode);
|
||||||
currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE));
|
|
||||||
rc = guest_translate_address(vcpu, gva, ar, &gpa, mode);
|
|
||||||
gva += currlen;
|
|
||||||
length -= currlen;
|
|
||||||
}
|
|
||||||
ipte_unlock(vcpu);
|
ipte_unlock(vcpu);
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
|
@ -344,11 +344,12 @@ int read_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
|
|||||||
* @len: number of bytes to copy
|
* @len: number of bytes to copy
|
||||||
*
|
*
|
||||||
* Copy @len bytes from @data (kernel space) to @gra (guest real address).
|
* Copy @len bytes from @data (kernel space) to @gra (guest real address).
|
||||||
* It is up to the caller to ensure that the entire guest memory range is
|
|
||||||
* valid memory before calling this function.
|
|
||||||
* Guest low address and key protection are not checked.
|
* Guest low address and key protection are not checked.
|
||||||
*
|
*
|
||||||
* Returns zero on success or -EFAULT on error.
|
* Returns zero on success, -EFAULT when copying from @data failed, or
|
||||||
|
* PGM_ADRESSING in case @gra is outside a memslot. In this case, pgm check info
|
||||||
|
* is also stored to allow injecting into the guest (if applicable) using
|
||||||
|
* kvm_s390_inject_prog_cond().
|
||||||
*
|
*
|
||||||
* If an error occurs data may have been copied partially to guest memory.
|
* If an error occurs data may have been copied partially to guest memory.
|
||||||
*/
|
*/
|
||||||
@ -367,11 +368,12 @@ int write_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
|
|||||||
* @len: number of bytes to copy
|
* @len: number of bytes to copy
|
||||||
*
|
*
|
||||||
* Copy @len bytes from @gra (guest real address) to @data (kernel space).
|
* Copy @len bytes from @gra (guest real address) to @data (kernel space).
|
||||||
* It is up to the caller to ensure that the entire guest memory range is
|
|
||||||
* valid memory before calling this function.
|
|
||||||
* Guest key protection is not checked.
|
* Guest key protection is not checked.
|
||||||
*
|
*
|
||||||
* Returns zero on success or -EFAULT on error.
|
* Returns zero on success, -EFAULT when copying to @data failed, or
|
||||||
|
* PGM_ADRESSING in case @gra is outside a memslot. In this case, pgm check info
|
||||||
|
* is also stored to allow injecting into the guest (if applicable) using
|
||||||
|
* kvm_s390_inject_prog_cond().
|
||||||
*
|
*
|
||||||
* If an error occurs data may have been copied partially to kernel space.
|
* If an error occurs data may have been copied partially to kernel space.
|
||||||
*/
|
*/
|
||||||
|
@ -2774,10 +2774,12 @@ void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
|
|||||||
bfq_put_queue(bfqq);
|
bfq_put_queue(bfqq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static struct bfq_queue *bfq_merge_bfqqs(struct bfq_data *bfqd,
|
||||||
bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
|
struct bfq_io_cq *bic,
|
||||||
struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
|
struct bfq_queue *bfqq)
|
||||||
{
|
{
|
||||||
|
struct bfq_queue *new_bfqq = bfqq->new_bfqq;
|
||||||
|
|
||||||
bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
|
bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
|
||||||
(unsigned long)new_bfqq->pid);
|
(unsigned long)new_bfqq->pid);
|
||||||
/* Save weight raising and idle window of the merged queues */
|
/* Save weight raising and idle window of the merged queues */
|
||||||
@ -2845,6 +2847,8 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
|
|||||||
new_bfqq->pid = -1;
|
new_bfqq->pid = -1;
|
||||||
bfqq->bic = NULL;
|
bfqq->bic = NULL;
|
||||||
bfq_release_process_ref(bfqd, bfqq);
|
bfq_release_process_ref(bfqd, bfqq);
|
||||||
|
|
||||||
|
return new_bfqq;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
|
static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
|
||||||
@ -2880,14 +2884,8 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
|
|||||||
* fulfilled, i.e., bic can be redirected to new_bfqq
|
* fulfilled, i.e., bic can be redirected to new_bfqq
|
||||||
* and bfqq can be put.
|
* and bfqq can be put.
|
||||||
*/
|
*/
|
||||||
bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
|
while (bfqq != new_bfqq)
|
||||||
new_bfqq);
|
bfqq = bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq);
|
||||||
/*
|
|
||||||
* If we get here, bio will be queued into new_queue,
|
|
||||||
* so use new_bfqq to decide whether bio and rq can be
|
|
||||||
* merged.
|
|
||||||
*/
|
|
||||||
bfqq = new_bfqq;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Change also bqfd->bio_bfqq, as
|
* Change also bqfd->bio_bfqq, as
|
||||||
@ -5441,6 +5439,7 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
|
|||||||
bool waiting, idle_timer_disabled = false;
|
bool waiting, idle_timer_disabled = false;
|
||||||
|
|
||||||
if (new_bfqq) {
|
if (new_bfqq) {
|
||||||
|
struct bfq_queue *old_bfqq = bfqq;
|
||||||
/*
|
/*
|
||||||
* Release the request's reference to the old bfqq
|
* Release the request's reference to the old bfqq
|
||||||
* and make sure one is taken to the shared queue.
|
* and make sure one is taken to the shared queue.
|
||||||
@ -5456,18 +5455,18 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
|
|||||||
* then complete the merge and redirect it to
|
* then complete the merge and redirect it to
|
||||||
* new_bfqq.
|
* new_bfqq.
|
||||||
*/
|
*/
|
||||||
if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
|
if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq) {
|
||||||
bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
|
while (bfqq != new_bfqq)
|
||||||
bfqq, new_bfqq);
|
bfqq = bfq_merge_bfqqs(bfqd, RQ_BIC(rq), bfqq);
|
||||||
|
}
|
||||||
|
|
||||||
bfq_clear_bfqq_just_created(bfqq);
|
bfq_clear_bfqq_just_created(old_bfqq);
|
||||||
/*
|
/*
|
||||||
* rq is about to be enqueued into new_bfqq,
|
* rq is about to be enqueued into new_bfqq,
|
||||||
* release rq reference on bfqq
|
* release rq reference on bfqq
|
||||||
*/
|
*/
|
||||||
bfq_put_queue(bfqq);
|
bfq_put_queue(old_bfqq);
|
||||||
rq->elv.priv[1] = new_bfqq;
|
rq->elv.priv[1] = new_bfqq;
|
||||||
bfqq = new_bfqq;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bfq_update_io_thinktime(bfqd, bfqq);
|
bfq_update_io_thinktime(bfqd, bfqq);
|
||||||
|
@ -685,7 +685,7 @@ static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_dual_dsi)
|
|||||||
struct drm_display_mode *mode = msm_host->mode;
|
struct drm_display_mode *mode = msm_host->mode;
|
||||||
u32 pclk_rate;
|
u32 pclk_rate;
|
||||||
|
|
||||||
pclk_rate = mode->clock * 1000;
|
pclk_rate = mode->clock * 1000u;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For dual DSI mode, the current DRM mode has the complete width of the
|
* For dual DSI mode, the current DRM mode has the complete width of the
|
||||||
|
@ -135,7 +135,15 @@ int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
|
|||||||
flags |= VBOX_MOUSE_POINTER_VISIBLE;
|
flags |= VBOX_MOUSE_POINTER_VISIBLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len, HGSMI_CH_VBVA,
|
/*
|
||||||
|
* The 4 extra bytes come from switching struct vbva_mouse_pointer_shape
|
||||||
|
* from having a 4 bytes fixed array at the end to using a proper VLA
|
||||||
|
* at the end. These 4 extra bytes were not subtracted from sizeof(*p)
|
||||||
|
* before the switch to the VLA, so this way the behavior is unchanged.
|
||||||
|
* Chances are these 4 extra bytes are not necessary but they are kept
|
||||||
|
* to avoid regressions.
|
||||||
|
*/
|
||||||
|
p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len + 4, HGSMI_CH_VBVA,
|
||||||
VBVA_MOUSE_POINTER_SHAPE);
|
VBVA_MOUSE_POINTER_SHAPE);
|
||||||
if (!p)
|
if (!p)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -351,10 +351,8 @@ struct vbva_mouse_pointer_shape {
|
|||||||
* Bytes in the gap between the AND and the XOR mask are undefined.
|
* Bytes in the gap between the AND and the XOR mask are undefined.
|
||||||
* XOR mask scanlines have no gap between them and size of XOR mask is:
|
* XOR mask scanlines have no gap between them and size of XOR mask is:
|
||||||
* xor_len = width * 4 * height.
|
* xor_len = width * 4 * height.
|
||||||
*
|
|
||||||
* Preallocate 4 bytes for accessing actual data as p->data.
|
|
||||||
*/
|
*/
|
||||||
u8 data[4];
|
u8 data[];
|
||||||
} __packed;
|
} __packed;
|
||||||
|
|
||||||
/* pointer is visible */
|
/* pointer is visible */
|
||||||
|
@ -167,7 +167,7 @@ struct bnxt_qplib_swqe {
|
|||||||
};
|
};
|
||||||
u32 q_key;
|
u32 q_key;
|
||||||
u32 dst_qp;
|
u32 dst_qp;
|
||||||
u16 avid;
|
u32 avid;
|
||||||
} send;
|
} send;
|
||||||
|
|
||||||
/* Send Raw Ethernet and QP1 */
|
/* Send Raw Ethernet and QP1 */
|
||||||
|
@ -245,7 +245,7 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
|
|||||||
/* failed with status */
|
/* failed with status */
|
||||||
dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x status %#x\n",
|
dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x status %#x\n",
|
||||||
cookie, opcode, evnt->status);
|
cookie, opcode, evnt->status);
|
||||||
rc = -EFAULT;
|
rc = -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
|
@ -243,6 +243,8 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
|
|||||||
sginfo.pgsize = npde * pg_size;
|
sginfo.pgsize = npde * pg_size;
|
||||||
sginfo.npages = 1;
|
sginfo.npages = 1;
|
||||||
rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
|
rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
|
||||||
|
if (rc)
|
||||||
|
goto fail;
|
||||||
|
|
||||||
/* Alloc PBL pages */
|
/* Alloc PBL pages */
|
||||||
sginfo.npages = npbl;
|
sginfo.npages = npbl;
|
||||||
@ -254,22 +256,9 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
|
|||||||
dst_virt_ptr =
|
dst_virt_ptr =
|
||||||
(dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
|
(dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
|
||||||
src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
|
src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
|
||||||
if (hwq_attr->type == HWQ_TYPE_MR) {
|
for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
|
||||||
/* For MR it is expected that we supply only 1 contigous
|
dst_virt_ptr[0][i] = src_phys_ptr[i] | flag;
|
||||||
* page i.e only 1 entry in the PDL that will contain
|
|
||||||
* all the PBLs for the user supplied memory region
|
|
||||||
*/
|
|
||||||
for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
|
|
||||||
i++)
|
|
||||||
dst_virt_ptr[0][i] = src_phys_ptr[i] |
|
|
||||||
flag;
|
|
||||||
} else {
|
|
||||||
for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
|
|
||||||
i++)
|
|
||||||
dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
|
|
||||||
src_phys_ptr[i] |
|
|
||||||
PTU_PDE_VALID;
|
|
||||||
}
|
|
||||||
/* Alloc or init PTEs */
|
/* Alloc or init PTEs */
|
||||||
rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
|
rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
|
||||||
hwq_attr->sginfo);
|
hwq_attr->sginfo);
|
||||||
|
@ -2086,7 +2086,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
|
|||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
if (n->dev->flags & IFF_LOOPBACK) {
|
if (n->dev->flags & IFF_LOOPBACK) {
|
||||||
if (iptype == 4)
|
if (iptype == 4)
|
||||||
pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip);
|
pdev = __ip_dev_find(&init_net, *(__be32 *)peer_ip, false);
|
||||||
else if (IS_ENABLED(CONFIG_IPV6))
|
else if (IS_ENABLED(CONFIG_IPV6))
|
||||||
for_each_netdev(&init_net, pdev) {
|
for_each_netdev(&init_net, pdev) {
|
||||||
if (ipv6_chk_addr(&init_net,
|
if (ipv6_chk_addr(&init_net,
|
||||||
@ -2101,12 +2101,12 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
|
|||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
if (is_vlan_dev(pdev))
|
||||||
|
pdev = vlan_dev_real_dev(pdev);
|
||||||
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
|
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
|
||||||
n, pdev, rt_tos2priority(tos));
|
n, pdev, rt_tos2priority(tos));
|
||||||
if (!ep->l2t) {
|
if (!ep->l2t)
|
||||||
dev_put(pdev);
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
|
||||||
ep->mtu = pdev->mtu;
|
ep->mtu = pdev->mtu;
|
||||||
ep->tx_chan = cxgb4_port_chan(pdev);
|
ep->tx_chan = cxgb4_port_chan(pdev);
|
||||||
ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx;
|
ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx;
|
||||||
@ -2119,7 +2119,6 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
|
|||||||
ep->rss_qid = cdev->rdev.lldi.rxq_ids[
|
ep->rss_qid = cdev->rdev.lldi.rxq_ids[
|
||||||
cxgb4_port_idx(pdev) * step];
|
cxgb4_port_idx(pdev) * step];
|
||||||
set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
|
set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
|
||||||
dev_put(pdev);
|
|
||||||
} else {
|
} else {
|
||||||
pdev = get_real_dev(n->dev);
|
pdev = get_real_dev(n->dev);
|
||||||
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
|
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
|
||||||
|
@ -484,7 +484,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
|
|
||||||
if (unlikely(skb->len > MAX_FRAME_SIZE)) {
|
if (unlikely(skb->len > MAX_FRAME_SIZE)) {
|
||||||
dev->stats.tx_errors++;
|
dev->stats.tx_errors++;
|
||||||
goto out;
|
goto len_error;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Save skb pointer. */
|
/* Save skb pointer. */
|
||||||
@ -575,6 +575,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
map_error:
|
map_error:
|
||||||
if (net_ratelimit())
|
if (net_ratelimit())
|
||||||
dev_warn(greth->dev, "Could not create TX DMA mapping\n");
|
dev_warn(greth->dev, "Could not create TX DMA mapping\n");
|
||||||
|
len_error:
|
||||||
dev_kfree_skb(skb);
|
dev_kfree_skb(skb);
|
||||||
out:
|
out:
|
||||||
return err;
|
return err;
|
||||||
|
@ -1343,6 +1343,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
|
|||||||
netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
|
netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
|
||||||
skb->data, skb_len);
|
skb->data, skb_len);
|
||||||
ret = NETDEV_TX_OK;
|
ret = NETDEV_TX_OK;
|
||||||
|
dev_kfree_skb_any(skb);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -727,7 +727,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
|
|||||||
|
|
||||||
dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL);
|
dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL);
|
||||||
if (!dev->queues) {
|
if (!dev->queues) {
|
||||||
dev->transport->free_device(dev);
|
hba->backend->ops->free_device(dev);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1988,7 +1988,7 @@ static int tcmu_netlink_event_send(struct tcmu_dev *udev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
|
ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
|
||||||
TCMU_MCGRP_CONFIG, GFP_KERNEL);
|
TCMU_MCGRP_CONFIG);
|
||||||
|
|
||||||
/* Wait during an add as the listener may not be up yet */
|
/* Wait during an add as the listener may not be up yet */
|
||||||
if (ret == 0 ||
|
if (ret == 0 ||
|
||||||
|
@ -561,6 +561,7 @@ static void typec_altmode_release(struct device *dev)
|
|||||||
typec_altmode_put_partner(alt);
|
typec_altmode_put_partner(alt);
|
||||||
|
|
||||||
altmode_id_remove(alt->adev.dev.parent, alt->id);
|
altmode_id_remove(alt->adev.dev.parent, alt->id);
|
||||||
|
put_device(alt->adev.dev.parent);
|
||||||
kfree(alt);
|
kfree(alt);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -610,6 +611,8 @@ typec_register_altmode(struct device *parent,
|
|||||||
alt->adev.dev.type = &typec_altmode_dev_type;
|
alt->adev.dev.type = &typec_altmode_dev_type;
|
||||||
dev_set_name(&alt->adev.dev, "%s.%u", dev_name(parent), id);
|
dev_set_name(&alt->adev.dev, "%s.%u", dev_name(parent), id);
|
||||||
|
|
||||||
|
get_device(alt->adev.dev.parent);
|
||||||
|
|
||||||
/* Link partners and plugs with the ports */
|
/* Link partners and plugs with the ports */
|
||||||
if (!is_port)
|
if (!is_port)
|
||||||
typec_altmode_set_partner(alt);
|
typec_altmode_set_partner(alt);
|
||||||
|
@ -2961,6 +2961,15 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
|
|||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
if (indatalen) {
|
if (indatalen) {
|
||||||
|
unsigned int len;
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(smb3_encryption_required(tcon) &&
|
||||||
|
(check_add_overflow(total_len - 1,
|
||||||
|
ALIGN(indatalen, 8), &len) ||
|
||||||
|
len > MAX_CIFS_SMALL_BUFFER_SIZE))) {
|
||||||
|
cifs_small_buf_release(req);
|
||||||
|
return -EIO;
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* indatalen is usually small at a couple of bytes max, so
|
* indatalen is usually small at a couple of bytes max, so
|
||||||
* just allocate through generic pool
|
* just allocate through generic pool
|
||||||
|
21
fs/exec.c
21
fs/exec.c
@ -146,13 +146,11 @@ SYSCALL_DEFINE1(uselib, const char __user *, library)
|
|||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* may_open() has already checked for this, so it should be
|
* Check do_open_execat() for an explanation.
|
||||||
* impossible to trip now. But we need to be extra cautious
|
|
||||||
* and check again at the very end too.
|
|
||||||
*/
|
*/
|
||||||
error = -EACCES;
|
error = -EACCES;
|
||||||
if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
|
if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode)) ||
|
||||||
path_noexec(&file->f_path)))
|
path_noexec(&file->f_path))
|
||||||
goto exit;
|
goto exit;
|
||||||
|
|
||||||
fsnotify_open(file);
|
fsnotify_open(file);
|
||||||
@ -921,16 +919,16 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
|
|||||||
|
|
||||||
file = do_filp_open(fd, name, &open_exec_flags);
|
file = do_filp_open(fd, name, &open_exec_flags);
|
||||||
if (IS_ERR(file))
|
if (IS_ERR(file))
|
||||||
goto out;
|
return file;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* may_open() has already checked for this, so it should be
|
* In the past the regular type check was here. It moved to may_open() in
|
||||||
* impossible to trip now. But we need to be extra cautious
|
* 633fb6ac3980 ("exec: move S_ISREG() check earlier"). Since then it is
|
||||||
* and check again at the very end too.
|
* an invariant that all non-regular files error out before we get here.
|
||||||
*/
|
*/
|
||||||
err = -EACCES;
|
err = -EACCES;
|
||||||
if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
|
if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode)) ||
|
||||||
path_noexec(&file->f_path)))
|
path_noexec(&file->f_path))
|
||||||
goto exit;
|
goto exit;
|
||||||
|
|
||||||
err = deny_write_access(file);
|
err = deny_write_access(file);
|
||||||
@ -940,7 +938,6 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
|
|||||||
if (name->name[0] != '\0')
|
if (name->name[0] != '\0')
|
||||||
fsnotify_open(file);
|
fsnotify_open(file);
|
||||||
|
|
||||||
out:
|
|
||||||
return file;
|
return file;
|
||||||
|
|
||||||
exit:
|
exit:
|
||||||
|
@ -94,7 +94,6 @@ ssize_t iomap_dio_complete(struct iomap_dio *dio)
|
|||||||
if (offset + ret > dio->i_size &&
|
if (offset + ret > dio->i_size &&
|
||||||
!(dio->flags & IOMAP_DIO_WRITE))
|
!(dio->flags & IOMAP_DIO_WRITE))
|
||||||
ret = dio->i_size - offset;
|
ret = dio->i_size - offset;
|
||||||
iocb->ki_pos += ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -120,15 +119,18 @@ ssize_t iomap_dio_complete(struct iomap_dio *dio)
|
|||||||
}
|
}
|
||||||
|
|
||||||
inode_dio_end(file_inode(iocb->ki_filp));
|
inode_dio_end(file_inode(iocb->ki_filp));
|
||||||
/*
|
|
||||||
* If this is a DSYNC write, make sure we push it to stable storage now
|
|
||||||
* that we've written data.
|
|
||||||
*/
|
|
||||||
if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
|
|
||||||
ret = generic_write_sync(iocb, ret);
|
|
||||||
|
|
||||||
|
if (ret > 0) {
|
||||||
|
iocb->ki_pos += ret;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If this is a DSYNC write, make sure we push it to stable
|
||||||
|
* storage now that we've written data.
|
||||||
|
*/
|
||||||
|
if (dio->flags & IOMAP_DIO_NEED_SYNC)
|
||||||
|
ret = generic_write_sync(iocb, ret);
|
||||||
|
}
|
||||||
kfree(dio);
|
kfree(dio);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iomap_dio_complete);
|
EXPORT_SYMBOL_GPL(iomap_dio_complete);
|
||||||
|
@ -187,7 +187,7 @@ int dbMount(struct inode *ipbmap)
|
|||||||
}
|
}
|
||||||
|
|
||||||
bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag);
|
bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag);
|
||||||
if (!bmp->db_numag || bmp->db_numag >= MAXAG) {
|
if (!bmp->db_numag || bmp->db_numag > MAXAG) {
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto err_release_metapage;
|
goto err_release_metapage;
|
||||||
}
|
}
|
||||||
|
@ -337,13 +337,12 @@ static inline int genlmsg_multicast(const struct genl_family *family,
|
|||||||
* @skb: netlink message as socket buffer
|
* @skb: netlink message as socket buffer
|
||||||
* @portid: own netlink portid to avoid sending to yourself
|
* @portid: own netlink portid to avoid sending to yourself
|
||||||
* @group: offset of multicast group in groups array
|
* @group: offset of multicast group in groups array
|
||||||
* @flags: allocation flags
|
|
||||||
*
|
*
|
||||||
* This function must hold the RTNL or rcu_read_lock().
|
* This function must hold the RTNL or rcu_read_lock().
|
||||||
*/
|
*/
|
||||||
int genlmsg_multicast_allns(const struct genl_family *family,
|
int genlmsg_multicast_allns(const struct genl_family *family,
|
||||||
struct sk_buff *skb, u32 portid,
|
struct sk_buff *skb, u32 portid,
|
||||||
unsigned int group, gfp_t flags);
|
unsigned int group);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* genlmsg_unicast - unicast a netlink message
|
* genlmsg_unicast - unicast a netlink message
|
||||||
|
@ -256,7 +256,7 @@ int traceprobe_parse_event_name(const char **pevent, const char **pgroup,
|
|||||||
if (len == 0) {
|
if (len == 0) {
|
||||||
trace_probe_log_err(offset, NO_EVENT_NAME);
|
trace_probe_log_err(offset, NO_EVENT_NAME);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
} else if (len > MAX_EVENT_NAME_LEN) {
|
} else if (len >= MAX_EVENT_NAME_LEN) {
|
||||||
trace_probe_log_err(offset, EVENT_TOO_LONG);
|
trace_probe_log_err(offset, EVENT_TOO_LONG);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -745,8 +745,7 @@ static int __init bnep_init(void)
|
|||||||
if (flt[0])
|
if (flt[0])
|
||||||
BT_INFO("BNEP filters: %s", flt);
|
BT_INFO("BNEP filters: %s", flt);
|
||||||
|
|
||||||
bnep_sock_init();
|
return bnep_sock_init();
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit bnep_exit(void)
|
static void __exit bnep_exit(void)
|
||||||
|
@ -273,17 +273,19 @@ static struct in_device *inetdev_init(struct net_device *dev)
|
|||||||
/* Account for reference dev->ip_ptr (below) */
|
/* Account for reference dev->ip_ptr (below) */
|
||||||
refcount_set(&in_dev->refcnt, 1);
|
refcount_set(&in_dev->refcnt, 1);
|
||||||
|
|
||||||
err = devinet_sysctl_register(in_dev);
|
if (dev != blackhole_netdev) {
|
||||||
if (err) {
|
err = devinet_sysctl_register(in_dev);
|
||||||
in_dev->dead = 1;
|
if (err) {
|
||||||
neigh_parms_release(&arp_tbl, in_dev->arp_parms);
|
in_dev->dead = 1;
|
||||||
in_dev_put(in_dev);
|
neigh_parms_release(&arp_tbl, in_dev->arp_parms);
|
||||||
in_dev = NULL;
|
in_dev_put(in_dev);
|
||||||
goto out;
|
in_dev = NULL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
ip_mc_init_dev(in_dev);
|
||||||
|
if (dev->flags & IFF_UP)
|
||||||
|
ip_mc_up(in_dev);
|
||||||
}
|
}
|
||||||
ip_mc_init_dev(in_dev);
|
|
||||||
if (dev->flags & IFF_UP)
|
|
||||||
ip_mc_up(in_dev);
|
|
||||||
|
|
||||||
/* we can receive as soon as ip_ptr is set -- do this last */
|
/* we can receive as soon as ip_ptr is set -- do this last */
|
||||||
rcu_assign_pointer(dev->ip_ptr, in_dev);
|
rcu_assign_pointer(dev->ip_ptr, in_dev);
|
||||||
@ -328,6 +330,19 @@ static void inetdev_destroy(struct in_device *in_dev)
|
|||||||
call_rcu(&in_dev->rcu_head, in_dev_rcu_put);
|
call_rcu(&in_dev->rcu_head, in_dev_rcu_put);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __init inet_blackhole_dev_init(void)
|
||||||
|
{
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
rtnl_lock();
|
||||||
|
if (!inetdev_init(blackhole_netdev))
|
||||||
|
err = -ENOMEM;
|
||||||
|
rtnl_unlock();
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
late_initcall(inet_blackhole_dev_init);
|
||||||
|
|
||||||
int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
|
int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
|
||||||
{
|
{
|
||||||
const struct in_ifaddr *ifa;
|
const struct in_ifaddr *ifa;
|
||||||
|
@ -115,7 +115,7 @@ static int l2tp_tunnel_notify(struct genl_family *family,
|
|||||||
NLM_F_ACK, tunnel, cmd);
|
NLM_F_ACK, tunnel, cmd);
|
||||||
|
|
||||||
if (ret >= 0) {
|
if (ret >= 0) {
|
||||||
ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
|
ret = genlmsg_multicast_allns(family, msg, 0, 0);
|
||||||
/* We don't care if no one is listening */
|
/* We don't care if no one is listening */
|
||||||
if (ret == -ESRCH)
|
if (ret == -ESRCH)
|
||||||
ret = 0;
|
ret = 0;
|
||||||
@ -143,7 +143,7 @@ static int l2tp_session_notify(struct genl_family *family,
|
|||||||
NLM_F_ACK, session, cmd);
|
NLM_F_ACK, session, cmd);
|
||||||
|
|
||||||
if (ret >= 0) {
|
if (ret >= 0) {
|
||||||
ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
|
ret = genlmsg_multicast_allns(family, msg, 0, 0);
|
||||||
/* We don't care if no one is listening */
|
/* We don't care if no one is listening */
|
||||||
if (ret == -ESRCH)
|
if (ret == -ESRCH)
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
@ -1097,15 +1097,11 @@ static int genl_ctrl_event(int event, const struct genl_family *family,
|
|||||||
if (IS_ERR(msg))
|
if (IS_ERR(msg))
|
||||||
return PTR_ERR(msg);
|
return PTR_ERR(msg);
|
||||||
|
|
||||||
if (!family->netnsok) {
|
if (!family->netnsok)
|
||||||
genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0,
|
genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0,
|
||||||
0, GFP_KERNEL);
|
0, GFP_KERNEL);
|
||||||
} else {
|
else
|
||||||
rcu_read_lock();
|
genlmsg_multicast_allns(&genl_ctrl, msg, 0, 0);
|
||||||
genlmsg_multicast_allns(&genl_ctrl, msg, 0,
|
|
||||||
0, GFP_ATOMIC);
|
|
||||||
rcu_read_unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1414,23 +1410,23 @@ static int __init genl_init(void)
|
|||||||
|
|
||||||
core_initcall(genl_init);
|
core_initcall(genl_init);
|
||||||
|
|
||||||
static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
|
static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group)
|
||||||
gfp_t flags)
|
|
||||||
{
|
{
|
||||||
struct sk_buff *tmp;
|
struct sk_buff *tmp;
|
||||||
struct net *net, *prev = NULL;
|
struct net *net, *prev = NULL;
|
||||||
bool delivered = false;
|
bool delivered = false;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
for_each_net_rcu(net) {
|
for_each_net_rcu(net) {
|
||||||
if (prev) {
|
if (prev) {
|
||||||
tmp = skb_clone(skb, flags);
|
tmp = skb_clone(skb, GFP_ATOMIC);
|
||||||
if (!tmp) {
|
if (!tmp) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
err = nlmsg_multicast(prev->genl_sock, tmp,
|
err = nlmsg_multicast(prev->genl_sock, tmp,
|
||||||
portid, group, flags);
|
portid, group, GFP_ATOMIC);
|
||||||
if (!err)
|
if (!err)
|
||||||
delivered = true;
|
delivered = true;
|
||||||
else if (err != -ESRCH)
|
else if (err != -ESRCH)
|
||||||
@ -1439,26 +1435,30 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
|
|||||||
|
|
||||||
prev = net;
|
prev = net;
|
||||||
}
|
}
|
||||||
|
err = nlmsg_multicast(prev->genl_sock, skb, portid, group, GFP_ATOMIC);
|
||||||
|
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
err = nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
|
|
||||||
if (!err)
|
if (!err)
|
||||||
delivered = true;
|
delivered = true;
|
||||||
else if (err != -ESRCH)
|
else if (err != -ESRCH)
|
||||||
return err;
|
return err;
|
||||||
return delivered ? 0 : -ESRCH;
|
return delivered ? 0 : -ESRCH;
|
||||||
error:
|
error:
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
int genlmsg_multicast_allns(const struct genl_family *family,
|
int genlmsg_multicast_allns(const struct genl_family *family,
|
||||||
struct sk_buff *skb, u32 portid,
|
struct sk_buff *skb, u32 portid,
|
||||||
unsigned int group, gfp_t flags)
|
unsigned int group)
|
||||||
{
|
{
|
||||||
if (WARN_ON_ONCE(group >= family->n_mcgrps))
|
if (WARN_ON_ONCE(group >= family->n_mcgrps))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
group = family->mcgrp_offset + group;
|
group = family->mcgrp_offset + group;
|
||||||
return genlmsg_mcast(skb, portid, group, flags);
|
return genlmsg_mcast(skb, portid, group);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(genlmsg_multicast_allns);
|
EXPORT_SYMBOL(genlmsg_multicast_allns);
|
||||||
|
|
||||||
|
@ -744,7 +744,7 @@ static int smc_pnet_add_pnetid(struct net *net, u8 *pnetid)
|
|||||||
|
|
||||||
write_lock(&sn->pnetids_ndev.lock);
|
write_lock(&sn->pnetids_ndev.lock);
|
||||||
list_for_each_entry(pi, &sn->pnetids_ndev.list, list) {
|
list_for_each_entry(pi, &sn->pnetids_ndev.list, list) {
|
||||||
if (smc_pnet_match(pnetid, pe->pnetid)) {
|
if (smc_pnet_match(pnetid, pi->pnetid)) {
|
||||||
refcount_inc(&pi->refcnt);
|
refcount_inc(&pi->refcnt);
|
||||||
kfree(pe);
|
kfree(pe);
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
@ -15883,10 +15883,8 @@ void nl80211_common_reg_change_event(enum nl80211_commands cmd_id,
|
|||||||
|
|
||||||
genlmsg_end(msg, hdr);
|
genlmsg_end(msg, hdr);
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
genlmsg_multicast_allns(&nl80211_fam, msg, 0,
|
genlmsg_multicast_allns(&nl80211_fam, msg, 0,
|
||||||
NL80211_MCGRP_REGULATORY, GFP_ATOMIC);
|
NL80211_MCGRP_REGULATORY);
|
||||||
rcu_read_unlock();
|
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -16394,10 +16392,8 @@ void nl80211_send_beacon_hint_event(struct wiphy *wiphy,
|
|||||||
|
|
||||||
genlmsg_end(msg, hdr);
|
genlmsg_end(msg, hdr);
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
genlmsg_multicast_allns(&nl80211_fam, msg, 0,
|
genlmsg_multicast_allns(&nl80211_fam, msg, 0,
|
||||||
NL80211_MCGRP_REGULATORY, GFP_ATOMIC);
|
NL80211_MCGRP_REGULATORY);
|
||||||
rcu_read_unlock();
|
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -490,6 +490,9 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream,
|
|||||||
|
|
||||||
val_cr4 |= FSL_SAI_CR4_FRSZ(slots);
|
val_cr4 |= FSL_SAI_CR4_FRSZ(slots);
|
||||||
|
|
||||||
|
/* Set to avoid channel swap */
|
||||||
|
val_cr4 |= FSL_SAI_CR4_FCONT;
|
||||||
|
|
||||||
/* Set to output mode to avoid tri-stated data pins */
|
/* Set to output mode to avoid tri-stated data pins */
|
||||||
if (tx)
|
if (tx)
|
||||||
val_cr4 |= FSL_SAI_CR4_CHMOD;
|
val_cr4 |= FSL_SAI_CR4_CHMOD;
|
||||||
@ -515,7 +518,7 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream,
|
|||||||
FSL_SAI_CR3_TRCE((1 << pins) - 1));
|
FSL_SAI_CR3_TRCE((1 << pins) - 1));
|
||||||
regmap_update_bits(sai->regmap, FSL_SAI_xCR4(tx, ofs),
|
regmap_update_bits(sai->regmap, FSL_SAI_xCR4(tx, ofs),
|
||||||
FSL_SAI_CR4_SYWD_MASK | FSL_SAI_CR4_FRSZ_MASK |
|
FSL_SAI_CR4_SYWD_MASK | FSL_SAI_CR4_FRSZ_MASK |
|
||||||
FSL_SAI_CR4_CHMOD_MASK,
|
FSL_SAI_CR4_CHMOD_MASK | FSL_SAI_CR4_FCONT_MASK,
|
||||||
val_cr4);
|
val_cr4);
|
||||||
regmap_update_bits(sai->regmap, FSL_SAI_xCR5(tx, ofs),
|
regmap_update_bits(sai->regmap, FSL_SAI_xCR5(tx, ofs),
|
||||||
FSL_SAI_CR5_WNW_MASK | FSL_SAI_CR5_W0W_MASK |
|
FSL_SAI_CR5_WNW_MASK | FSL_SAI_CR5_W0W_MASK |
|
||||||
|
@ -132,6 +132,7 @@
|
|||||||
|
|
||||||
/* SAI Transmit and Receive Configuration 4 Register */
|
/* SAI Transmit and Receive Configuration 4 Register */
|
||||||
|
|
||||||
|
#define FSL_SAI_CR4_FCONT_MASK BIT(28)
|
||||||
#define FSL_SAI_CR4_FCONT BIT(28)
|
#define FSL_SAI_CR4_FCONT BIT(28)
|
||||||
#define FSL_SAI_CR4_FCOMB_SHIFT BIT(26)
|
#define FSL_SAI_CR4_FCOMB_SHIFT BIT(26)
|
||||||
#define FSL_SAI_CR4_FCOMB_SOFT BIT(27)
|
#define FSL_SAI_CR4_FCOMB_SOFT BIT(27)
|
||||||
|
Loading…
Reference in New Issue
Block a user