This is the 5.10.124 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmKzB2QACgkQONu9yGCS aT4veQ/+KF0bH3WbgK07ZD1KTGKLzpwsIbaOLyGbiD91b2DfZgbGN6fzsZGgwALs 4B2olDy2r06aTzPWxZ4eqBms59l9kK0ES0FYlNU3LjxBYA6mtL6ryEEUjXdaX+zp Vd4TGdAEMWKvXmpa2Vq6U5v4NBRIC85iwzS6foAkffgv+8tj2ApYQWIth4fN0mm6 d5aRAtcyPEtQ+hI8V3jufM2ZWnuzuy892xKG2WiAHXLJhJ4fOURe/CRrhIQCx8VW 53cZzAuXf3XfoK60DBvLvB856oUUTPu0MUpj6x1bSpxe25RZZDErt07cCo4FTTRM sOaAdHSpiu3NJRoufzWPCIKLb4dlMjsaQ3QpXGazjfbd+nCVGRDHvUF1pPdUyssy 3QPXo/d8VeFbZLGwC6FJrnybF01VpGqz+89nW3sBFxg8Qx0FN061uebC6LVNgabT 9szE1n9R6QvfFFeZ4RQeiWNZTGazPR5+tdlsue/RoYB9/5bOhDOCl91qxhiqKa/M 1wgZfhxPuxrMDAMo2gWueUT6DZW/psMGDNpgrMDxuVp9EbDLJKXupDP9NOLg0RNv ar0gCCkdxUakpg+LdN5mznpdQo+t1mYy1PB6/5c/P7xEi50rhkJFPq/K180oldZV WtIqyp4eeOGbXsu/I+BaJV1sCIeWuia0WUuwBtExQosK/8JHb8g= =yAJW -----END PGP SIGNATURE----- Merge 5.10.124 into android12-5.10-lts Changes in 5.10.124 9p: missing chunk of "fs/9p: Don't update file type when updating file attributes" nfsd: Replace use of rwsem with errseq_t bpf: Fix incorrect memory charge cost calculation in stack_map_alloc() arm64: dts: imx8mm-beacon: Enable RTS-CTS on UART3 powerpc/kasan: Silence KASAN warnings in __get_wchan() ASoC: nau8822: Add operation for internal PLL off and on dma-debug: make things less spammy under memory pressure ASoC: cs42l52: Fix TLV scales for mixer controls ASoC: cs35l36: Update digital volume TLV ASoC: cs53l30: Correct number of volume levels on SX controls ASoC: cs42l52: Correct TLV for Bypass Volume ASoC: cs42l56: Correct typo in minimum level for SX volume controls ASoC: cs42l51: Correct minimum value for SX volume control ata: libata-core: fix NULL pointer deref in ata_host_alloc_pinfo() quota: Prevent memory allocation recursion while holding dq_lock ASoC: wm8962: Fix suspend while playing music ASoC: es8328: Fix event generation for deemphasis control ASoC: wm_adsp: Fix event generation for wm_adsp_fw_put() Input: soc_button_array - also add Lenovo Yoga Tablet2 1051F to dmi_use_low_level_irq scsi: vmw_pvscsi: Expand vcpuHint to 16 bits scsi: lpfc: Fix port stuck in bypassed state after LIP in PT2PT topology scsi: lpfc: Allow reduced polling rate for nvme_admin_async_event cmd completion scsi: ipr: Fix missing/incorrect resource cleanup in error case scsi: pmcraid: Fix missing resource cleanup in error case ALSA: hda/realtek - Add HW8326 support virtio-mmio: fix missing put_device() when vm_cmdline_parent registration failed nfc: nfcmrvl: Fix memory leak in nfcmrvl_play_deferred ipv6: Fix signed integer overflow in l2tp_ip6_sendmsg net: ethernet: mtk_eth_soc: fix misuse of mem alloc interface netdev[napi]_alloc_frag mellanox: mlx5: avoid uninitialized variable warning with gcc-12 MIPS: Loongson-3: fix compile mips cpu_hwmon as module build error. gpio: dwapb: Don't print error on -EPROBE_DEFER random: credit cpu and bootloader seeds by default pNFS: Don't keep retrying if the server replied NFS4ERR_LAYOUTUNAVAILABLE pNFS: Avoid a live lock condition in pnfs_update_layout() clocksource: hyper-v: unexport __init-annotated hv_init_clocksource() i40e: Fix adding ADQ filter to TC0 i40e: Fix calculating the number of queue pairs i40e: Fix call trace in setup_tx_descriptors Drivers: hv: vmbus: Release cpu lock in error case tty: goldfish: Fix free_irq() on remove misc: atmel-ssc: Fix IRQ check in ssc_probe drm/i915/reset: Fix error_state_read ptr + offset use nvme: use sysfs_emit instead of sprintf nvme: add device name to warning in uuid_show() mlxsw: spectrum_cnt: Reorder counter pools net: bgmac: Fix an erroneous kfree() in bgmac_remove() net: ax25: Fix deadlock caused by skb_recv_datagram in ax25_recvmsg arm64: ftrace: fix branch range checks arm64: ftrace: consistently handle PLTs. certs/blacklist_hashes.c: fix const confusion in certs blacklist block: Fix handling of offline queues in blk_mq_alloc_request_hctx() faddr2line: Fix overlapping text section failures, the sequel i2c: npcm7xx: Add check for platform_driver_register irqchip/gic/realview: Fix refcount leak in realview_gic_of_init irqchip/gic-v3: Fix error handling in gic_populate_ppi_partitions irqchip/gic-v3: Fix refcount leak in gic_populate_ppi_partitions i2c: designware: Use standard optional ref clock implementation mei: me: add raptor lake point S DID comedi: vmk80xx: fix expression for tx buffer size crypto: memneq - move into lib/ USB: serial: option: add support for Cinterion MV31 with new baseline USB: serial: io_ti: add Agilent E5805A support usb: dwc2: Fix memory leak in dwc2_hcd_init usb: gadget: lpc32xx_udc: Fix refcount leak in lpc32xx_udc_probe serial: 8250: Store to lsr_save_flags after lsr read dm mirror log: round up region bitmap size to BITS_PER_LONG drm/amd/display: Cap OLED brightness per max frame-average luminance ext4: fix bug_on ext4_mb_use_inode_pa ext4: make variable "count" signed ext4: add reserved GDT blocks check KVM: arm64: Don't read a HW interrupt pending state in user context KVM: x86: Account a variety of miscellaneous allocations KVM: SVM: Use kzalloc for sev ioctl interfaces to prevent kernel data leak ALSA: hda/realtek: fix right sounds and mute/micmute LEDs for HP machine virtio-pci: Remove wrong address verification in vp_del_vqs() dma-direct: don't over-decrypt memory net/sched: act_police: more accurate MTU policing net: openvswitch: fix misuse of the cached connection on tuple changes Revert "PCI: Make pci_enable_ptm() private" igc: Enable PCIe PTM powerpc/book3e: get rid of #include <generated/compile.h> clk: imx8mp: fix usb_root_clk parent Linux 5.10.124 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I805be804872dfaf5e17d772f439ccf6d5061670e
This commit is contained in:
commit
4e3458d6d3
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 123
|
||||
SUBLEVEL = 124
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
|
@ -167,6 +167,7 @@ &uart3 {
|
||||
pinctrl-0 = <&pinctrl_uart3>;
|
||||
assigned-clocks = <&clk IMX8MM_CLK_UART3>;
|
||||
assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_80M>;
|
||||
uart-has-rtscts;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
@ -237,6 +238,8 @@ pinctrl_uart3: uart3grp {
|
||||
fsl,pins = <
|
||||
MX8MM_IOMUXC_ECSPI1_SCLK_UART3_DCE_RX 0x40
|
||||
MX8MM_IOMUXC_ECSPI1_MOSI_UART3_DCE_TX 0x40
|
||||
MX8MM_IOMUXC_ECSPI1_MISO_UART3_DCE_CTS_B 0x40
|
||||
MX8MM_IOMUXC_ECSPI1_SS0_UART3_DCE_RTS_B 0x40
|
||||
>;
|
||||
};
|
||||
|
||||
|
@ -76,6 +76,66 @@ static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the address the callsite must branch to in order to reach '*addr'.
|
||||
*
|
||||
* Due to the limited range of 'BL' instructions, modules may be placed too far
|
||||
* away to branch directly and must use a PLT.
|
||||
*
|
||||
* Returns true when '*addr' contains a reachable target address, or has been
|
||||
* modified to contain a PLT address. Returns false otherwise.
|
||||
*/
|
||||
static bool ftrace_find_callable_addr(struct dyn_ftrace *rec,
|
||||
struct module *mod,
|
||||
unsigned long *addr)
|
||||
{
|
||||
unsigned long pc = rec->ip;
|
||||
long offset = (long)*addr - (long)pc;
|
||||
struct plt_entry *plt;
|
||||
|
||||
/*
|
||||
* When the target is within range of the 'BL' instruction, use 'addr'
|
||||
* as-is and branch to that directly.
|
||||
*/
|
||||
if (offset >= -SZ_128M && offset < SZ_128M)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* When the target is outside of the range of a 'BL' instruction, we
|
||||
* must use a PLT to reach it. We can only place PLTs for modules, and
|
||||
* only when module PLT support is built-in.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* 'mod' is only set at module load time, but if we end up
|
||||
* dealing with an out-of-range condition, we can assume it
|
||||
* is due to a module being loaded far away from the kernel.
|
||||
*
|
||||
* NOTE: __module_text_address() must be called with preemption
|
||||
* disabled, but we can rely on ftrace_lock to ensure that 'mod'
|
||||
* retains its validity throughout the remainder of this code.
|
||||
*/
|
||||
if (!mod) {
|
||||
preempt_disable();
|
||||
mod = __module_text_address(pc);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
if (WARN_ON(!mod))
|
||||
return false;
|
||||
|
||||
plt = get_ftrace_plt(mod, *addr);
|
||||
if (!plt) {
|
||||
pr_err("ftrace: no module PLT for %ps\n", (void *)*addr);
|
||||
return false;
|
||||
}
|
||||
|
||||
*addr = (unsigned long)plt;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Turn on the call to ftrace_caller() in instrumented function
|
||||
*/
|
||||
@ -83,40 +143,9 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||
{
|
||||
unsigned long pc = rec->ip;
|
||||
u32 old, new;
|
||||
long offset = (long)pc - (long)addr;
|
||||
|
||||
if (offset < -SZ_128M || offset >= SZ_128M) {
|
||||
struct module *mod;
|
||||
struct plt_entry *plt;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* On kernels that support module PLTs, the offset between the
|
||||
* branch instruction and its target may legally exceed the
|
||||
* range of an ordinary relative 'bl' opcode. In this case, we
|
||||
* need to branch via a trampoline in the module.
|
||||
*
|
||||
* NOTE: __module_text_address() must be called with preemption
|
||||
* disabled, but we can rely on ftrace_lock to ensure that 'mod'
|
||||
* retains its validity throughout the remainder of this code.
|
||||
*/
|
||||
preempt_disable();
|
||||
mod = __module_text_address(pc);
|
||||
preempt_enable();
|
||||
|
||||
if (WARN_ON(!mod))
|
||||
return -EINVAL;
|
||||
|
||||
plt = get_ftrace_plt(mod, addr);
|
||||
if (!plt) {
|
||||
pr_err("ftrace: no module PLT for %ps\n", (void *)addr);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
addr = (unsigned long)plt;
|
||||
}
|
||||
if (!ftrace_find_callable_addr(rec, NULL, &addr))
|
||||
return -EINVAL;
|
||||
|
||||
old = aarch64_insn_gen_nop();
|
||||
new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
|
||||
@ -131,6 +160,11 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
|
||||
unsigned long pc = rec->ip;
|
||||
u32 old, new;
|
||||
|
||||
if (!ftrace_find_callable_addr(rec, NULL, &old_addr))
|
||||
return -EINVAL;
|
||||
if (!ftrace_find_callable_addr(rec, NULL, &addr))
|
||||
return -EINVAL;
|
||||
|
||||
old = aarch64_insn_gen_branch_imm(pc, old_addr,
|
||||
AARCH64_INSN_BRANCH_LINK);
|
||||
new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
|
||||
@ -180,54 +214,15 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
|
||||
unsigned long addr)
|
||||
{
|
||||
unsigned long pc = rec->ip;
|
||||
bool validate = true;
|
||||
u32 old = 0, new;
|
||||
long offset = (long)pc - (long)addr;
|
||||
|
||||
if (offset < -SZ_128M || offset >= SZ_128M) {
|
||||
u32 replaced;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* 'mod' is only set at module load time, but if we end up
|
||||
* dealing with an out-of-range condition, we can assume it
|
||||
* is due to a module being loaded far away from the kernel.
|
||||
*/
|
||||
if (!mod) {
|
||||
preempt_disable();
|
||||
mod = __module_text_address(pc);
|
||||
preempt_enable();
|
||||
|
||||
if (WARN_ON(!mod))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* The instruction we are about to patch may be a branch and
|
||||
* link instruction that was redirected via a PLT entry. In
|
||||
* this case, the normal validation will fail, but we can at
|
||||
* least check that we are dealing with a branch and link
|
||||
* instruction that points into the right module.
|
||||
*/
|
||||
if (aarch64_insn_read((void *)pc, &replaced))
|
||||
return -EFAULT;
|
||||
|
||||
if (!aarch64_insn_is_bl(replaced) ||
|
||||
!within_module(pc + aarch64_get_branch_offset(replaced),
|
||||
mod))
|
||||
return -EINVAL;
|
||||
|
||||
validate = false;
|
||||
} else {
|
||||
old = aarch64_insn_gen_branch_imm(pc, addr,
|
||||
AARCH64_INSN_BRANCH_LINK);
|
||||
}
|
||||
if (!ftrace_find_callable_addr(rec, mod, &addr))
|
||||
return -EINVAL;
|
||||
|
||||
old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
|
||||
new = aarch64_insn_gen_nop();
|
||||
|
||||
return ftrace_modify_code(pc, old, new, validate);
|
||||
return ftrace_modify_code(pc, old, new, true);
|
||||
}
|
||||
|
||||
void arch_ftrace_update_code(int command)
|
||||
|
@ -418,11 +418,11 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = {
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
|
||||
vgic_mmio_read_pending, vgic_mmio_write_spending,
|
||||
NULL, vgic_uaccess_write_spending, 1,
|
||||
vgic_uaccess_read_pending, vgic_uaccess_write_spending, 1,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
|
||||
vgic_mmio_read_pending, vgic_mmio_write_cpending,
|
||||
NULL, vgic_uaccess_write_cpending, 1,
|
||||
vgic_uaccess_read_pending, vgic_uaccess_write_cpending, 1,
|
||||
VGIC_ACCESS_32bit),
|
||||
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
|
||||
vgic_mmio_read_active, vgic_mmio_write_sactive,
|
||||
|
@ -226,8 +226,9 @@ int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu,
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len)
|
||||
static unsigned long __read_pending(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len,
|
||||
bool is_user)
|
||||
{
|
||||
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
|
||||
u32 value = 0;
|
||||
@ -248,7 +249,7 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
|
||||
IRQCHIP_STATE_PENDING,
|
||||
&val);
|
||||
WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
|
||||
} else if (vgic_irq_is_mapped_level(irq)) {
|
||||
} else if (!is_user && vgic_irq_is_mapped_level(irq)) {
|
||||
val = vgic_get_phys_line_level(irq);
|
||||
} else {
|
||||
val = irq_is_pending(irq);
|
||||
@ -263,6 +264,18 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
|
||||
return value;
|
||||
}
|
||||
|
||||
unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len)
|
||||
{
|
||||
return __read_pending(vcpu, addr, len, false);
|
||||
}
|
||||
|
||||
unsigned long vgic_uaccess_read_pending(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len)
|
||||
{
|
||||
return __read_pending(vcpu, addr, len, true);
|
||||
}
|
||||
|
||||
static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
|
||||
{
|
||||
return (vgic_irq_is_sgi(irq->intid) &&
|
||||
|
@ -149,6 +149,9 @@ int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu,
|
||||
unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len);
|
||||
|
||||
unsigned long vgic_uaccess_read_pending(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len);
|
||||
|
||||
void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
|
||||
gpa_t addr, unsigned int len,
|
||||
unsigned long val);
|
||||
|
@ -2108,12 +2108,12 @@ static unsigned long __get_wchan(struct task_struct *p)
|
||||
return 0;
|
||||
|
||||
do {
|
||||
sp = *(unsigned long *)sp;
|
||||
sp = READ_ONCE_NOCHECK(*(unsigned long *)sp);
|
||||
if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
|
||||
p->state == TASK_RUNNING)
|
||||
return 0;
|
||||
if (count > 0) {
|
||||
ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
|
||||
ip = READ_ONCE_NOCHECK(((unsigned long *)sp)[STACK_FRAME_LR_SAVE]);
|
||||
if (!in_sched_functions(ip))
|
||||
return ip;
|
||||
}
|
||||
|
@ -18,7 +18,6 @@
|
||||
#include <asm/prom.h>
|
||||
#include <asm/kdump.h>
|
||||
#include <mm/mmu_decl.h>
|
||||
#include <generated/compile.h>
|
||||
#include <generated/utsrelease.h>
|
||||
|
||||
struct regions {
|
||||
@ -36,10 +35,6 @@ struct regions {
|
||||
int reserved_mem_size_cells;
|
||||
};
|
||||
|
||||
/* Simplified build-specific string for starting entropy. */
|
||||
static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
|
||||
LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
|
||||
|
||||
struct regions __initdata regions;
|
||||
|
||||
static __init void kaslr_get_cmdline(void *fdt)
|
||||
@ -72,7 +67,8 @@ static unsigned long __init get_boot_seed(void *fdt)
|
||||
{
|
||||
unsigned long hash = 0;
|
||||
|
||||
hash = rotate_xor(hash, build_str, sizeof(build_str));
|
||||
/* build-specific string for starting entropy. */
|
||||
hash = rotate_xor(hash, linux_banner, strlen(linux_banner));
|
||||
hash = rotate_xor(hash, fdt, fdt_totalsize(fdt));
|
||||
|
||||
return hash;
|
||||
|
@ -1198,8 +1198,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
|
||||
return -EINVAL;
|
||||
|
||||
ret = -ENOMEM;
|
||||
ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
|
||||
save = kzalloc(sizeof(*save), GFP_KERNEL);
|
||||
ctl = kzalloc(sizeof(*ctl), GFP_KERNEL_ACCOUNT);
|
||||
save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT);
|
||||
if (!ctl || !save)
|
||||
goto out_free;
|
||||
|
||||
|
@ -537,7 +537,7 @@ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
}
|
||||
|
||||
ret = -ENOMEM;
|
||||
blob = kmalloc(params.len, GFP_KERNEL);
|
||||
blob = kzalloc(params.len, GFP_KERNEL_ACCOUNT);
|
||||
if (!blob)
|
||||
goto e_free;
|
||||
|
||||
@ -676,7 +676,7 @@ static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
|
||||
if (!IS_ALIGNED(dst_paddr, 16) ||
|
||||
!IS_ALIGNED(paddr, 16) ||
|
||||
!IS_ALIGNED(size, 16)) {
|
||||
tpage = (void *)alloc_page(GFP_KERNEL);
|
||||
tpage = (void *)alloc_page(GFP_KERNEL | __GFP_ZERO);
|
||||
if (!tpage)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -619,7 +619,7 @@ static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
|
||||
* evmcs in singe VM shares same assist page.
|
||||
*/
|
||||
if (!*p_hv_pa_pg)
|
||||
*p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
*p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT);
|
||||
|
||||
if (!*p_hv_pa_pg)
|
||||
return -ENOMEM;
|
||||
|
@ -470,6 +470,8 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
|
||||
if (!blk_mq_hw_queue_mapped(data.hctx))
|
||||
goto out_queue_exit;
|
||||
cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
|
||||
if (cpu >= nr_cpu_ids)
|
||||
goto out_queue_exit;
|
||||
data.ctx = __blk_mq_get_ctx(q, cpu);
|
||||
|
||||
if (!q->elevator)
|
||||
|
@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include "blacklist.h"
|
||||
|
||||
const char __initdata *const blacklist_hashes[] = {
|
||||
const char __initconst *const blacklist_hashes[] = {
|
||||
#include CONFIG_SYSTEM_BLACKLIST_HASH_LIST
|
||||
, NULL
|
||||
};
|
||||
|
@ -15,6 +15,7 @@ source "crypto/async_tx/Kconfig"
|
||||
#
|
||||
menuconfig CRYPTO
|
||||
tristate "Cryptographic API"
|
||||
select LIB_MEMNEQ
|
||||
help
|
||||
This option provides the core Cryptographic API.
|
||||
|
||||
|
@ -4,7 +4,7 @@
|
||||
#
|
||||
|
||||
obj-$(CONFIG_CRYPTO) += crypto.o
|
||||
crypto-y := api.o cipher.o compress.o memneq.o
|
||||
crypto-y := api.o cipher.o compress.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_ENGINE) += crypto_engine.o
|
||||
obj-$(CONFIG_CRYPTO_FIPS) += fips.o
|
||||
|
@ -5475,7 +5475,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev,
|
||||
const struct ata_port_info * const * ppi,
|
||||
int n_ports)
|
||||
{
|
||||
const struct ata_port_info *pi;
|
||||
const struct ata_port_info *pi = &ata_dummy_port_info;
|
||||
struct ata_host *host;
|
||||
int i, j;
|
||||
|
||||
@ -5483,7 +5483,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev,
|
||||
if (!host)
|
||||
return NULL;
|
||||
|
||||
for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
|
||||
for (i = 0, j = 0; i < host->n_ports; i++) {
|
||||
struct ata_port *ap = host->ports[i];
|
||||
|
||||
if (ppi[j])
|
||||
|
@ -471,29 +471,41 @@ config ADI
|
||||
and SSM (Silicon Secured Memory). Intended consumers of this
|
||||
driver include crash and makedumpfile.
|
||||
|
||||
endmenu
|
||||
|
||||
config RANDOM_TRUST_CPU
|
||||
bool "Trust the CPU manufacturer to initialize Linux's CRNG"
|
||||
bool "Initialize RNG using CPU RNG instructions"
|
||||
default y
|
||||
depends on ARCH_RANDOM
|
||||
default n
|
||||
help
|
||||
Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or
|
||||
RDRAND, IBM for the S390 and Power PC architectures) is trustworthy
|
||||
for the purposes of initializing Linux's CRNG. Since this is not
|
||||
something that can be independently audited, this amounts to trusting
|
||||
that CPU manufacturer (perhaps with the insistence or mandate
|
||||
of a Nation State's intelligence or law enforcement agencies)
|
||||
has not installed a hidden back door to compromise the CPU's
|
||||
random number generation facilities. This can also be configured
|
||||
at boot with "random.trust_cpu=on/off".
|
||||
Initialize the RNG using random numbers supplied by the CPU's
|
||||
RNG instructions (e.g. RDRAND), if supported and available. These
|
||||
random numbers are never used directly, but are rather hashed into
|
||||
the main input pool, and this happens regardless of whether or not
|
||||
this option is enabled. Instead, this option controls whether the
|
||||
they are credited and hence can initialize the RNG. Additionally,
|
||||
other sources of randomness are always used, regardless of this
|
||||
setting. Enabling this implies trusting that the CPU can supply high
|
||||
quality and non-backdoored random numbers.
|
||||
|
||||
Say Y here unless you have reason to mistrust your CPU or believe
|
||||
its RNG facilities may be faulty. This may also be configured at
|
||||
boot time with "random.trust_cpu=on/off".
|
||||
|
||||
config RANDOM_TRUST_BOOTLOADER
|
||||
bool "Trust the bootloader to initialize Linux's CRNG"
|
||||
bool "Initialize RNG using bootloader-supplied seed"
|
||||
default y
|
||||
help
|
||||
Some bootloaders can provide entropy to increase the kernel's initial
|
||||
device randomness. Say Y here to assume the entropy provided by the
|
||||
booloader is trustworthy so it will be added to the kernel's entropy
|
||||
pool. Otherwise, say N here so it will be regarded as device input that
|
||||
only mixes the entropy pool. This can also be configured at boot with
|
||||
"random.trust_bootloader=on/off".
|
||||
Initialize the RNG using a seed supplied by the bootloader or boot
|
||||
environment (e.g. EFI or a bootloader-generated device tree). This
|
||||
seed is not used directly, but is rather hashed into the main input
|
||||
pool, and this happens regardless of whether or not this option is
|
||||
enabled. Instead, this option controls whether the seed is credited
|
||||
and hence can initialize the RNG. Additionally, other sources of
|
||||
randomness are always used, regardless of this setting. Enabling
|
||||
this implies trusting that the bootloader can supply high quality and
|
||||
non-backdoored seeds.
|
||||
|
||||
Say Y here unless you have reason to mistrust your bootloader or
|
||||
believe its RNG facilities may be faulty. This may also be configured
|
||||
at boot time with "random.trust_bootloader=on/off".
|
||||
|
||||
endmenu
|
||||
|
@ -691,7 +691,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
|
||||
hws[IMX8MP_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", ccm_base + 0x44a0, 0);
|
||||
hws[IMX8MP_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", ccm_base + 0x44b0, 0);
|
||||
hws[IMX8MP_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", ccm_base + 0x44c0, 0);
|
||||
hws[IMX8MP_CLK_USB_ROOT] = imx_clk_hw_gate4("usb_root_clk", "osc_32k", ccm_base + 0x44d0, 0);
|
||||
hws[IMX8MP_CLK_USB_ROOT] = imx_clk_hw_gate4("usb_root_clk", "hsio_axi", ccm_base + 0x44d0, 0);
|
||||
hws[IMX8MP_CLK_USB_PHY_ROOT] = imx_clk_hw_gate4("usb_phy_root_clk", "usb_phy_ref", ccm_base + 0x44f0, 0);
|
||||
hws[IMX8MP_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", ccm_base + 0x4510, 0);
|
||||
hws[IMX8MP_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", ccm_base + 0x4520, 0);
|
||||
|
@ -472,4 +472,3 @@ void __init hv_init_clocksource(void)
|
||||
hv_sched_clock_offset = hv_read_reference_counter();
|
||||
hv_setup_sched_clock(read_hv_sched_clock_msr);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hv_init_clocksource);
|
||||
|
@ -646,10 +646,9 @@ static int dwapb_get_clks(struct dwapb_gpio *gpio)
|
||||
gpio->clks[1].id = "db";
|
||||
err = devm_clk_bulk_get_optional(gpio->dev, DWAPB_NR_CLOCKS,
|
||||
gpio->clks);
|
||||
if (err) {
|
||||
dev_err(gpio->dev, "Cannot get APB/Debounce clocks\n");
|
||||
return err;
|
||||
}
|
||||
if (err)
|
||||
return dev_err_probe(gpio->dev, err,
|
||||
"Cannot get APB/Debounce clocks\n");
|
||||
|
||||
err = clk_bulk_prepare_enable(DWAPB_NR_CLOCKS, gpio->clks);
|
||||
if (err) {
|
||||
|
@ -2141,7 +2141,7 @@ static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
|
||||
|
||||
static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
|
||||
{
|
||||
u32 max_cll, min_cll, max, min, q, r;
|
||||
u32 max_avg, min_cll, max, min, q, r;
|
||||
struct amdgpu_dm_backlight_caps *caps;
|
||||
struct amdgpu_display_manager *dm;
|
||||
struct drm_connector *conn_base;
|
||||
@ -2164,7 +2164,7 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
|
||||
caps = &dm->backlight_caps;
|
||||
caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
|
||||
caps->aux_support = false;
|
||||
max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
|
||||
max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
|
||||
min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
|
||||
|
||||
if (caps->ext_caps->bits.oled == 1 /*||
|
||||
@ -2192,8 +2192,8 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
|
||||
* The results of the above expressions can be verified at
|
||||
* pre_computed_values.
|
||||
*/
|
||||
q = max_cll >> 5;
|
||||
r = max_cll % 32;
|
||||
q = max_avg >> 5;
|
||||
r = max_avg % 32;
|
||||
max = (1 << q) * pre_computed_values[r];
|
||||
|
||||
// min luminance: maxLum * (CV/255)^2 / 100
|
||||
|
@ -500,7 +500,14 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
|
||||
struct device *kdev = kobj_to_dev(kobj);
|
||||
struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
|
||||
struct i915_gpu_coredump *gpu;
|
||||
ssize_t ret;
|
||||
ssize_t ret = 0;
|
||||
|
||||
/*
|
||||
* FIXME: Concurrent clients triggering resets and reading + clearing
|
||||
* dumps can cause inconsistent sysfs reads when a user calls in with a
|
||||
* non-zero offset to complete a prior partial read but the
|
||||
* gpu_coredump has been cleared or replaced.
|
||||
*/
|
||||
|
||||
gpu = i915_first_error_state(i915);
|
||||
if (IS_ERR(gpu)) {
|
||||
@ -512,8 +519,10 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
|
||||
const char *str = "No error state collected\n";
|
||||
size_t len = strlen(str);
|
||||
|
||||
ret = min_t(size_t, count, len - off);
|
||||
memcpy(buf, str + off, ret);
|
||||
if (off < len) {
|
||||
ret = min_t(size_t, count, len - off);
|
||||
memcpy(buf, str + off, ret);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -606,6 +606,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
|
||||
*/
|
||||
if (newchannel->offermsg.offer.sub_channel_index == 0) {
|
||||
mutex_unlock(&vmbus_connection.channel_mutex);
|
||||
cpus_read_unlock();
|
||||
/*
|
||||
* Don't call free_channel(), because newchannel->kobj
|
||||
* is not initialized yet.
|
||||
|
@ -474,9 +474,6 @@ int i2c_dw_prepare_clk(struct dw_i2c_dev *dev, bool prepare)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (IS_ERR(dev->clk))
|
||||
return PTR_ERR(dev->clk);
|
||||
|
||||
if (prepare) {
|
||||
/* Optional interface clock */
|
||||
ret = clk_prepare_enable(dev->pclk);
|
||||
|
@ -266,8 +266,17 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
|
||||
goto exit_reset;
|
||||
}
|
||||
|
||||
dev->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (!i2c_dw_prepare_clk(dev, true)) {
|
||||
dev->clk = devm_clk_get_optional(&pdev->dev, NULL);
|
||||
if (IS_ERR(dev->clk)) {
|
||||
ret = PTR_ERR(dev->clk);
|
||||
goto exit_reset;
|
||||
}
|
||||
|
||||
ret = i2c_dw_prepare_clk(dev, true);
|
||||
if (ret)
|
||||
goto exit_reset;
|
||||
|
||||
if (dev->clk) {
|
||||
u64 clk_khz;
|
||||
|
||||
dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz;
|
||||
|
@ -2369,8 +2369,7 @@ static struct platform_driver npcm_i2c_bus_driver = {
|
||||
static int __init npcm_i2c_init(void)
|
||||
{
|
||||
npcm_i2c_debugfs_dir = debugfs_create_dir("npcm_i2c", NULL);
|
||||
platform_driver_register(&npcm_i2c_bus_driver);
|
||||
return 0;
|
||||
return platform_driver_register(&npcm_i2c_bus_driver);
|
||||
}
|
||||
module_init(npcm_i2c_init);
|
||||
|
||||
|
@ -85,13 +85,13 @@ static const struct dmi_system_id dmi_use_low_level_irq[] = {
|
||||
},
|
||||
{
|
||||
/*
|
||||
* Lenovo Yoga Tab2 1051L, something messes with the home-button
|
||||
* Lenovo Yoga Tab2 1051F/1051L, something messes with the home-button
|
||||
* IRQ settings, leading to a non working home-button.
|
||||
*/
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "60073"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "1051L"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "1051"),
|
||||
},
|
||||
},
|
||||
{} /* Terminating entry */
|
||||
|
@ -57,6 +57,7 @@ realview_gic_of_init(struct device_node *node, struct device_node *parent)
|
||||
|
||||
/* The PB11MPCore GIC needs to be configured in the syscon */
|
||||
map = syscon_node_to_regmap(np);
|
||||
of_node_put(np);
|
||||
if (!IS_ERR(map)) {
|
||||
/* new irq mode with no DCC */
|
||||
regmap_write(map, REALVIEW_SYS_LOCK_OFFSET,
|
||||
|
@ -1848,7 +1848,7 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
|
||||
|
||||
gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
|
||||
if (!gic_data.ppi_descs)
|
||||
return;
|
||||
goto out_put_node;
|
||||
|
||||
nr_parts = of_get_child_count(parts_node);
|
||||
|
||||
@ -1889,12 +1889,15 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
|
||||
continue;
|
||||
|
||||
cpu = of_cpu_node_to_id(cpu_node);
|
||||
if (WARN_ON(cpu < 0))
|
||||
if (WARN_ON(cpu < 0)) {
|
||||
of_node_put(cpu_node);
|
||||
continue;
|
||||
}
|
||||
|
||||
pr_cont("%pOF[%d] ", cpu_node, cpu);
|
||||
|
||||
cpumask_set_cpu(cpu, &part->mask);
|
||||
of_node_put(cpu_node);
|
||||
}
|
||||
|
||||
pr_cont("}\n");
|
||||
|
@ -415,8 +415,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
|
||||
/*
|
||||
* Work out how many "unsigned long"s we need to hold the bitset.
|
||||
*/
|
||||
bitset_size = dm_round_up(region_count,
|
||||
sizeof(*lc->clean_bits) << BYTE_SHIFT);
|
||||
bitset_size = dm_round_up(region_count, BITS_PER_LONG);
|
||||
bitset_size >>= BYTE_SHIFT;
|
||||
|
||||
lc->bitset_uint32_count = bitset_size / sizeof(*lc->clean_bits);
|
||||
|
@ -232,9 +232,9 @@ static int ssc_probe(struct platform_device *pdev)
|
||||
clk_disable_unprepare(ssc->clk);
|
||||
|
||||
ssc->irq = platform_get_irq(pdev, 0);
|
||||
if (!ssc->irq) {
|
||||
if (ssc->irq < 0) {
|
||||
dev_dbg(&pdev->dev, "could not get irq\n");
|
||||
return -ENXIO;
|
||||
return ssc->irq;
|
||||
}
|
||||
|
||||
mutex_lock(&user_lock);
|
||||
|
@ -109,6 +109,8 @@
|
||||
#define MEI_DEV_ID_ADP_P 0x51E0 /* Alder Lake Point P */
|
||||
#define MEI_DEV_ID_ADP_N 0x54E0 /* Alder Lake Point N */
|
||||
|
||||
#define MEI_DEV_ID_RPL_S 0x7A68 /* Raptor Lake Point S */
|
||||
|
||||
/*
|
||||
* MEI HW Section
|
||||
*/
|
||||
|
@ -115,6 +115,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
|
||||
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
|
||||
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
|
||||
|
||||
{MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
|
||||
|
||||
/* required last entry */
|
||||
{0, }
|
||||
};
|
||||
|
@ -323,7 +323,6 @@ static void bgmac_remove(struct bcma_device *core)
|
||||
bcma_mdio_mii_unregister(bgmac->mii_bus);
|
||||
bgmac_enet_remove(bgmac);
|
||||
bcma_set_drvdata(core, NULL);
|
||||
kfree(bgmac);
|
||||
}
|
||||
|
||||
static struct bcma_driver bgmac_bcma_driver = {
|
||||
|
@ -2582,15 +2582,16 @@ static void i40e_diag_test(struct net_device *netdev,
|
||||
|
||||
set_bit(__I40E_TESTING, pf->state);
|
||||
|
||||
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
|
||||
test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
|
||||
dev_warn(&pf->pdev->dev,
|
||||
"Cannot start offline testing when PF is in reset state.\n");
|
||||
goto skip_ol_tests;
|
||||
}
|
||||
|
||||
if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) {
|
||||
dev_warn(&pf->pdev->dev,
|
||||
"Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
|
||||
data[I40E_ETH_TEST_REG] = 1;
|
||||
data[I40E_ETH_TEST_EEPROM] = 1;
|
||||
data[I40E_ETH_TEST_INTR] = 1;
|
||||
data[I40E_ETH_TEST_LINK] = 1;
|
||||
eth_test->flags |= ETH_TEST_FL_FAILED;
|
||||
clear_bit(__I40E_TESTING, pf->state);
|
||||
goto skip_ol_tests;
|
||||
}
|
||||
|
||||
@ -2637,9 +2638,17 @@ static void i40e_diag_test(struct net_device *netdev,
|
||||
data[I40E_ETH_TEST_INTR] = 0;
|
||||
}
|
||||
|
||||
skip_ol_tests:
|
||||
|
||||
netif_info(pf, drv, netdev, "testing finished\n");
|
||||
return;
|
||||
|
||||
skip_ol_tests:
|
||||
data[I40E_ETH_TEST_REG] = 1;
|
||||
data[I40E_ETH_TEST_EEPROM] = 1;
|
||||
data[I40E_ETH_TEST_INTR] = 1;
|
||||
data[I40E_ETH_TEST_LINK] = 1;
|
||||
eth_test->flags |= ETH_TEST_FL_FAILED;
|
||||
clear_bit(__I40E_TESTING, pf->state);
|
||||
netif_info(pf, drv, netdev, "testing failed\n");
|
||||
}
|
||||
|
||||
static void i40e_get_wol(struct net_device *netdev,
|
||||
|
@ -8163,6 +8163,11 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (!tc) {
|
||||
dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
|
||||
test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
|
||||
return -EBUSY;
|
||||
|
@ -2228,7 +2228,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
|
||||
}
|
||||
|
||||
if (vf->adq_enabled) {
|
||||
for (i = 0; i < I40E_MAX_VF_VSI; i++)
|
||||
for (i = 0; i < vf->num_tc; i++)
|
||||
num_qps_all += vf->ch[i].num_qps;
|
||||
if (num_qps_all != qci->num_queue_pairs) {
|
||||
aq_ret = I40E_ERR_PARAM;
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <linux/udp.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/pci.h>
|
||||
#include <net/pkt_sched.h>
|
||||
|
||||
#include <net/ipv6.h>
|
||||
@ -5041,6 +5042,10 @@ static int igc_probe(struct pci_dev *pdev,
|
||||
|
||||
pci_enable_pcie_error_reporting(pdev);
|
||||
|
||||
err = pci_enable_ptm(pdev, NULL);
|
||||
if (err < 0)
|
||||
dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n");
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
err = -ENOMEM;
|
||||
|
@ -806,6 +806,17 @@ static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
|
||||
rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
|
||||
}
|
||||
|
||||
static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
|
||||
{
|
||||
unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
|
||||
unsigned long data;
|
||||
|
||||
data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
|
||||
get_order(size));
|
||||
|
||||
return (void *)data;
|
||||
}
|
||||
|
||||
/* the qdma core needs scratch memory to be setup */
|
||||
static int mtk_init_fq_dma(struct mtk_eth *eth)
|
||||
{
|
||||
@ -1303,7 +1314,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
|
||||
goto release_desc;
|
||||
|
||||
/* alloc new buffer */
|
||||
new_data = napi_alloc_frag(ring->frag_size);
|
||||
if (ring->frag_size <= PAGE_SIZE)
|
||||
new_data = napi_alloc_frag(ring->frag_size);
|
||||
else
|
||||
new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
|
||||
if (unlikely(!new_data)) {
|
||||
netdev->stats.rx_dropped++;
|
||||
goto release_desc;
|
||||
@ -1700,7 +1714,10 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < rx_dma_size; i++) {
|
||||
ring->data[i] = netdev_alloc_frag(ring->frag_size);
|
||||
if (ring->frag_size <= PAGE_SIZE)
|
||||
ring->data[i] = netdev_alloc_frag(ring->frag_size);
|
||||
else
|
||||
ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL);
|
||||
if (!ring->data[i])
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -274,7 +274,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
|
||||
{
|
||||
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
|
||||
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
|
||||
struct lag_tracker tracker;
|
||||
struct lag_tracker tracker = { };
|
||||
bool do_bond, roce_lag;
|
||||
int err;
|
||||
|
||||
|
@ -8,8 +8,8 @@
|
||||
#include "spectrum.h"
|
||||
|
||||
enum mlxsw_sp_counter_sub_pool_id {
|
||||
MLXSW_SP_COUNTER_SUB_POOL_FLOW,
|
||||
MLXSW_SP_COUNTER_SUB_POOL_RIF,
|
||||
MLXSW_SP_COUNTER_SUB_POOL_FLOW,
|
||||
};
|
||||
|
||||
int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
|
||||
|
@ -401,13 +401,25 @@ static void nfcmrvl_play_deferred(struct nfcmrvl_usb_drv_data *drv_data)
|
||||
int err;
|
||||
|
||||
while ((urb = usb_get_from_anchor(&drv_data->deferred))) {
|
||||
usb_anchor_urb(urb, &drv_data->tx_anchor);
|
||||
|
||||
err = usb_submit_urb(urb, GFP_ATOMIC);
|
||||
if (err)
|
||||
if (err) {
|
||||
kfree(urb->setup_packet);
|
||||
usb_unanchor_urb(urb);
|
||||
usb_free_urb(urb);
|
||||
break;
|
||||
}
|
||||
|
||||
drv_data->tx_in_flight++;
|
||||
usb_free_urb(urb);
|
||||
}
|
||||
|
||||
/* Cleanup the rest deferred urbs. */
|
||||
while ((urb = usb_get_from_anchor(&drv_data->deferred))) {
|
||||
kfree(urb->setup_packet);
|
||||
usb_free_urb(urb);
|
||||
}
|
||||
usb_scuttle_anchored_urbs(&drv_data->deferred);
|
||||
}
|
||||
|
||||
static int nfcmrvl_resume(struct usb_interface *intf)
|
||||
|
@ -2813,8 +2813,8 @@ static ssize_t subsys_##field##_show(struct device *dev, \
|
||||
{ \
|
||||
struct nvme_subsystem *subsys = \
|
||||
container_of(dev, struct nvme_subsystem, dev); \
|
||||
return sprintf(buf, "%.*s\n", \
|
||||
(int)sizeof(subsys->field), subsys->field); \
|
||||
return sysfs_emit(buf, "%.*s\n", \
|
||||
(int)sizeof(subsys->field), subsys->field); \
|
||||
} \
|
||||
static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
|
||||
|
||||
@ -3335,13 +3335,13 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
|
||||
int model_len = sizeof(subsys->model);
|
||||
|
||||
if (!uuid_is_null(&ids->uuid))
|
||||
return sprintf(buf, "uuid.%pU\n", &ids->uuid);
|
||||
return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid);
|
||||
|
||||
if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
|
||||
return sprintf(buf, "eui.%16phN\n", ids->nguid);
|
||||
return sysfs_emit(buf, "eui.%16phN\n", ids->nguid);
|
||||
|
||||
if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
|
||||
return sprintf(buf, "eui.%8phN\n", ids->eui64);
|
||||
return sysfs_emit(buf, "eui.%8phN\n", ids->eui64);
|
||||
|
||||
while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
|
||||
subsys->serial[serial_len - 1] == '\0'))
|
||||
@ -3350,7 +3350,7 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
|
||||
subsys->model[model_len - 1] == '\0'))
|
||||
model_len--;
|
||||
|
||||
return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
|
||||
return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
|
||||
serial_len, subsys->serial, model_len, subsys->model,
|
||||
head->ns_id);
|
||||
}
|
||||
@ -3359,7 +3359,7 @@ static DEVICE_ATTR_RO(wwid);
|
||||
static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
|
||||
return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
|
||||
}
|
||||
static DEVICE_ATTR_RO(nguid);
|
||||
|
||||
@ -3372,25 +3372,25 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
|
||||
* we have no UUID set
|
||||
*/
|
||||
if (uuid_is_null(&ids->uuid)) {
|
||||
printk_ratelimited(KERN_WARNING
|
||||
"No UUID available providing old NGUID\n");
|
||||
return sprintf(buf, "%pU\n", ids->nguid);
|
||||
dev_warn_ratelimited(dev,
|
||||
"No UUID available providing old NGUID\n");
|
||||
return sysfs_emit(buf, "%pU\n", ids->nguid);
|
||||
}
|
||||
return sprintf(buf, "%pU\n", &ids->uuid);
|
||||
return sysfs_emit(buf, "%pU\n", &ids->uuid);
|
||||
}
|
||||
static DEVICE_ATTR_RO(uuid);
|
||||
|
||||
static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
|
||||
return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
|
||||
}
|
||||
static DEVICE_ATTR_RO(eui);
|
||||
|
||||
static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
|
||||
return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
|
||||
}
|
||||
static DEVICE_ATTR_RO(nsid);
|
||||
|
||||
@ -3455,7 +3455,7 @@ static ssize_t field##_show(struct device *dev, \
|
||||
struct device_attribute *attr, char *buf) \
|
||||
{ \
|
||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
|
||||
return sprintf(buf, "%.*s\n", \
|
||||
return sysfs_emit(buf, "%.*s\n", \
|
||||
(int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
|
||||
} \
|
||||
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
|
||||
@ -3469,7 +3469,7 @@ static ssize_t field##_show(struct device *dev, \
|
||||
struct device_attribute *attr, char *buf) \
|
||||
{ \
|
||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
|
||||
return sprintf(buf, "%d\n", ctrl->field); \
|
||||
return sysfs_emit(buf, "%d\n", ctrl->field); \
|
||||
} \
|
||||
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
|
||||
|
||||
@ -3517,9 +3517,9 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
|
||||
|
||||
if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
|
||||
state_name[ctrl->state])
|
||||
return sprintf(buf, "%s\n", state_name[ctrl->state]);
|
||||
return sysfs_emit(buf, "%s\n", state_name[ctrl->state]);
|
||||
|
||||
return sprintf(buf, "unknown state\n");
|
||||
return sysfs_emit(buf, "unknown state\n");
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
|
||||
@ -3571,9 +3571,9 @@ static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev,
|
||||
struct nvmf_ctrl_options *opts = ctrl->opts;
|
||||
|
||||
if (ctrl->opts->max_reconnects == -1)
|
||||
return sprintf(buf, "off\n");
|
||||
return sprintf(buf, "%d\n",
|
||||
opts->max_reconnects * opts->reconnect_delay);
|
||||
return sysfs_emit(buf, "off\n");
|
||||
return sysfs_emit(buf, "%d\n",
|
||||
opts->max_reconnects * opts->reconnect_delay);
|
||||
}
|
||||
|
||||
static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
|
||||
@ -3603,8 +3603,8 @@ static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev,
|
||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||
|
||||
if (ctrl->opts->reconnect_delay == -1)
|
||||
return sprintf(buf, "off\n");
|
||||
return sprintf(buf, "%d\n", ctrl->opts->reconnect_delay);
|
||||
return sysfs_emit(buf, "off\n");
|
||||
return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay);
|
||||
}
|
||||
|
||||
static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
|
||||
|
@ -624,8 +624,8 @@ static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
|
||||
struct nvme_subsystem *subsys =
|
||||
container_of(dev, struct nvme_subsystem, dev);
|
||||
|
||||
return sprintf(buf, "%s\n",
|
||||
nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
|
||||
return sysfs_emit(buf, "%s\n",
|
||||
nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
|
||||
}
|
||||
|
||||
static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
|
||||
@ -650,7 +650,7 @@ SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
|
||||
static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
|
||||
return sysfs_emit(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
|
||||
}
|
||||
DEVICE_ATTR_RO(ana_grpid);
|
||||
|
||||
@ -659,7 +659,7 @@ static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
|
||||
{
|
||||
struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
|
||||
|
||||
return sprintf(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
|
||||
return sysfs_emit(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
|
||||
}
|
||||
DEVICE_ATTR_RO(ana_state);
|
||||
|
||||
|
@ -591,11 +591,8 @@ static inline void pcie_ecrc_get_policy(char *str) { }
|
||||
|
||||
#ifdef CONFIG_PCIE_PTM
|
||||
void pci_ptm_init(struct pci_dev *dev);
|
||||
int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
|
||||
#else
|
||||
static inline void pci_ptm_init(struct pci_dev *dev) { }
|
||||
static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
|
||||
{ return -EINVAL; }
|
||||
#endif
|
||||
|
||||
struct pci_dev_reset_methods {
|
||||
|
@ -17,7 +17,7 @@ menuconfig MIPS_PLATFORM_DEVICES
|
||||
if MIPS_PLATFORM_DEVICES
|
||||
|
||||
config CPU_HWMON
|
||||
tristate "Loongson-3 CPU HWMon Driver"
|
||||
bool "Loongson-3 CPU HWMon Driver"
|
||||
depends on MACH_LOONGSON64
|
||||
select HWMON
|
||||
default y
|
||||
|
@ -9792,7 +9792,7 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!ioa_cfg->hrrq[i].host_rrq) {
|
||||
while (--i > 0)
|
||||
while (--i >= 0)
|
||||
dma_free_coherent(&pdev->dev,
|
||||
sizeof(u32) * ioa_cfg->hrrq[i].size,
|
||||
ioa_cfg->hrrq[i].host_rrq,
|
||||
@ -10065,7 +10065,7 @@ static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
|
||||
ioa_cfg->vectors_info[i].desc,
|
||||
&ioa_cfg->hrrq[i]);
|
||||
if (rc) {
|
||||
while (--i >= 0)
|
||||
while (--i > 0)
|
||||
free_irq(pci_irq_vector(pdev, i),
|
||||
&ioa_cfg->hrrq[i]);
|
||||
return rc;
|
||||
|
@ -4281,6 +4281,9 @@ struct wqe_common {
|
||||
#define wqe_sup_SHIFT 6
|
||||
#define wqe_sup_MASK 0x00000001
|
||||
#define wqe_sup_WORD word11
|
||||
#define wqe_ffrq_SHIFT 6
|
||||
#define wqe_ffrq_MASK 0x00000001
|
||||
#define wqe_ffrq_WORD word11
|
||||
#define wqe_wqec_SHIFT 7
|
||||
#define wqe_wqec_MASK 0x00000001
|
||||
#define wqe_wqec_WORD word11
|
||||
|
@ -857,7 +857,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
lpfc_nvmet_invalidate_host(phba, ndlp);
|
||||
|
||||
if (ndlp->nlp_DID == Fabric_DID) {
|
||||
if (vport->port_state <= LPFC_FDISC)
|
||||
if (vport->port_state <= LPFC_FDISC ||
|
||||
vport->fc_flag & FC_PT2PT)
|
||||
goto out;
|
||||
lpfc_linkdown_port(vport);
|
||||
spin_lock_irq(shost->host_lock);
|
||||
|
@ -1315,7 +1315,8 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
|
||||
struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
|
||||
struct nvme_common_command *sqe;
|
||||
struct lpfc_iocbq *pwqeq = &lpfc_ncmd->cur_iocbq;
|
||||
union lpfc_wqe128 *wqe = &pwqeq->wqe;
|
||||
uint32_t req_len;
|
||||
|
||||
@ -1371,8 +1372,14 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
|
||||
cstat->control_requests++;
|
||||
}
|
||||
|
||||
if (pnode->nlp_nvme_info & NLP_NVME_NSLER)
|
||||
if (pnode->nlp_nvme_info & NLP_NVME_NSLER) {
|
||||
bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
|
||||
sqe = &((struct nvme_fc_cmd_iu *)
|
||||
nCmd->cmdaddr)->sqe.common;
|
||||
if (sqe->opcode == nvme_admin_async_event)
|
||||
bf_set(wqe_ffrq, &wqe->generic.wqe_com, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Finish initializing those WQE fields that are independent
|
||||
* of the nvme_cmnd request_buffer
|
||||
|
@ -4528,7 +4528,7 @@ pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance)
|
||||
return 0;
|
||||
|
||||
out_unwind:
|
||||
while (--i > 0)
|
||||
while (--i >= 0)
|
||||
free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]);
|
||||
pci_free_irq_vectors(pdev);
|
||||
return rc;
|
||||
|
@ -333,8 +333,8 @@ struct PVSCSIRingReqDesc {
|
||||
u8 tag;
|
||||
u8 bus;
|
||||
u8 target;
|
||||
u8 vcpuHint;
|
||||
u8 unused[59];
|
||||
u16 vcpuHint;
|
||||
u8 unused[58];
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
|
@ -685,7 +685,7 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev)
|
||||
if (!devpriv->usb_rx_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
size = max(usb_endpoint_maxp(devpriv->ep_rx), MIN_BUF_SIZE);
|
||||
size = max(usb_endpoint_maxp(devpriv->ep_tx), MIN_BUF_SIZE);
|
||||
devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
|
||||
if (!devpriv->usb_tx_buf)
|
||||
return -ENOMEM;
|
||||
|
@ -428,7 +428,7 @@ static int goldfish_tty_remove(struct platform_device *pdev)
|
||||
tty_unregister_device(goldfish_tty_driver, qtty->console.index);
|
||||
iounmap(qtty->base);
|
||||
qtty->base = NULL;
|
||||
free_irq(qtty->irq, pdev);
|
||||
free_irq(qtty->irq, qtty);
|
||||
tty_port_destroy(&qtty->port);
|
||||
goldfish_tty_current_line_count--;
|
||||
if (goldfish_tty_current_line_count == 0)
|
||||
|
@ -1532,6 +1532,8 @@ static inline void __stop_tx(struct uart_8250_port *p)
|
||||
|
||||
if (em485) {
|
||||
unsigned char lsr = serial_in(p, UART_LSR);
|
||||
p->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
|
||||
|
||||
/*
|
||||
* To provide required timeing and allow FIFO transfer,
|
||||
* __stop_tx_rs485() must be called only when both FIFO and
|
||||
|
@ -5076,7 +5076,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res) {
|
||||
retval = -EINVAL;
|
||||
goto error1;
|
||||
goto error2;
|
||||
}
|
||||
hcd->rsrc_start = res->start;
|
||||
hcd->rsrc_len = resource_size(res);
|
||||
|
@ -3015,6 +3015,7 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
udc->isp1301_i2c_client = isp1301_get_client(isp1301_node);
|
||||
of_node_put(isp1301_node);
|
||||
if (!udc->isp1301_i2c_client) {
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
|
@ -168,6 +168,7 @@ static const struct usb_device_id edgeport_2port_id_table[] = {
|
||||
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) },
|
||||
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) },
|
||||
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) },
|
||||
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_E5805A) },
|
||||
{ }
|
||||
};
|
||||
|
||||
@ -206,6 +207,7 @@ static const struct usb_device_id id_table_combined[] = {
|
||||
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) },
|
||||
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) },
|
||||
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) },
|
||||
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_E5805A) },
|
||||
{ }
|
||||
};
|
||||
|
||||
|
@ -212,6 +212,7 @@
|
||||
//
|
||||
// Definitions for other product IDs
|
||||
#define ION_DEVICE_ID_MT4X56USB 0x1403 // OEM device
|
||||
#define ION_DEVICE_ID_E5805A 0x1A01 // OEM device (rebranded Edgeport/4)
|
||||
|
||||
|
||||
#define GENERATION_ID_FROM_USB_PRODUCT_ID(ProductId) \
|
||||
|
@ -432,6 +432,8 @@ static void option_instat_callback(struct urb *urb);
|
||||
#define CINTERION_PRODUCT_CLS8 0x00b0
|
||||
#define CINTERION_PRODUCT_MV31_MBIM 0x00b3
|
||||
#define CINTERION_PRODUCT_MV31_RMNET 0x00b7
|
||||
#define CINTERION_PRODUCT_MV31_2_MBIM 0x00b8
|
||||
#define CINTERION_PRODUCT_MV31_2_RMNET 0x00b9
|
||||
#define CINTERION_PRODUCT_MV32_WA 0x00f1
|
||||
#define CINTERION_PRODUCT_MV32_WB 0x00f2
|
||||
|
||||
@ -1979,6 +1981,10 @@ static const struct usb_device_id option_ids[] = {
|
||||
.driver_info = RSVD(3)},
|
||||
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff),
|
||||
.driver_info = RSVD(0)},
|
||||
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_2_MBIM, 0xff),
|
||||
.driver_info = RSVD(3)},
|
||||
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_2_RMNET, 0xff),
|
||||
.driver_info = RSVD(0)},
|
||||
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA, 0xff),
|
||||
.driver_info = RSVD(3)},
|
||||
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB, 0xff),
|
||||
|
@ -689,6 +689,7 @@ static int vm_cmdline_set(const char *device,
|
||||
if (!vm_cmdline_parent_registered) {
|
||||
err = device_register(&vm_cmdline_parent);
|
||||
if (err) {
|
||||
put_device(&vm_cmdline_parent);
|
||||
pr_err("Failed to register parent device!\n");
|
||||
return err;
|
||||
}
|
||||
|
@ -254,8 +254,7 @@ void vp_del_vqs(struct virtio_device *vdev)
|
||||
|
||||
if (vp_dev->msix_affinity_masks) {
|
||||
for (i = 0; i < vp_dev->msix_vectors; i++)
|
||||
if (vp_dev->msix_affinity_masks[i])
|
||||
free_cpumask_var(vp_dev->msix_affinity_masks[i]);
|
||||
free_cpumask_var(vp_dev->msix_affinity_masks[i]);
|
||||
}
|
||||
|
||||
if (vp_dev->msix_enabled) {
|
||||
|
@ -657,14 +657,10 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
|
||||
if (stat->st_result_mask & P9_STATS_NLINK)
|
||||
set_nlink(inode, stat->st_nlink);
|
||||
if (stat->st_result_mask & P9_STATS_MODE) {
|
||||
inode->i_mode = stat->st_mode;
|
||||
if ((S_ISBLK(inode->i_mode)) ||
|
||||
(S_ISCHR(inode->i_mode)))
|
||||
init_special_inode(inode, inode->i_mode,
|
||||
inode->i_rdev);
|
||||
mode = stat->st_mode & S_IALLUGO;
|
||||
mode |= inode->i_mode & ~S_IALLUGO;
|
||||
inode->i_mode = mode;
|
||||
}
|
||||
if (stat->st_result_mask & P9_STATS_RDEV)
|
||||
inode->i_rdev = new_decode_dev(stat->st_rdev);
|
||||
if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE) &&
|
||||
stat->st_result_mask & P9_STATS_SIZE)
|
||||
v9fs_i_size_write(inode, stat->st_size);
|
||||
|
@ -3520,6 +3520,15 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
|
||||
size = size >> bsbits;
|
||||
start = start_off >> bsbits;
|
||||
|
||||
/*
|
||||
* For tiny groups (smaller than 8MB) the chosen allocation
|
||||
* alignment may be larger than group size. Make sure the
|
||||
* alignment does not move allocation to a different group which
|
||||
* makes mballoc fail assertions later.
|
||||
*/
|
||||
start = max(start, rounddown(ac->ac_o_ex.fe_logical,
|
||||
(ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));
|
||||
|
||||
/* don't cover already allocated blocks in selected range */
|
||||
if (ar->pleft && start <= ar->lleft) {
|
||||
size -= ar->lleft + 1 - start;
|
||||
|
@ -1917,7 +1917,8 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
|
||||
struct dx_hash_info *hinfo, ext4_lblk_t *newblock)
|
||||
{
|
||||
unsigned blocksize = dir->i_sb->s_blocksize;
|
||||
unsigned count, continued;
|
||||
unsigned continued;
|
||||
int count;
|
||||
struct buffer_head *bh2;
|
||||
u32 hash2;
|
||||
struct dx_map_entry *map;
|
||||
|
@ -52,6 +52,16 @@ int ext4_resize_begin(struct super_block *sb)
|
||||
if (!capable(CAP_SYS_RESOURCE))
|
||||
return -EPERM;
|
||||
|
||||
/*
|
||||
* If the reserved GDT blocks is non-zero, the resize_inode feature
|
||||
* should always be set.
|
||||
*/
|
||||
if (EXT4_SB(sb)->s_es->s_reserved_gdt_blocks &&
|
||||
!ext4_has_feature_resize_inode(sb)) {
|
||||
ext4_error(sb, "resize_inode disabled but reserved GDT blocks non-zero");
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are not using the primary superblock/GDT copy don't resize,
|
||||
* because the user tools have no way of handling this. Probably a
|
||||
|
@ -283,6 +283,7 @@ static u32 initiate_file_draining(struct nfs_client *clp,
|
||||
rv = NFS4_OK;
|
||||
break;
|
||||
case -ENOENT:
|
||||
set_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags);
|
||||
/* Embrace your forgetfulness! */
|
||||
rv = NFS4ERR_NOMATCHING_LAYOUT;
|
||||
|
||||
|
@ -469,6 +469,7 @@ pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
|
||||
pnfs_clear_lseg_state(lseg, lseg_list);
|
||||
pnfs_clear_layoutreturn_info(lo);
|
||||
pnfs_free_returned_lsegs(lo, lseg_list, &range, 0);
|
||||
set_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags);
|
||||
if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags) &&
|
||||
!test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
|
||||
pnfs_clear_layoutreturn_waitbit(lo);
|
||||
@ -1923,8 +1924,9 @@ static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo)
|
||||
|
||||
static void nfs_layoutget_end(struct pnfs_layout_hdr *lo)
|
||||
{
|
||||
if (atomic_dec_and_test(&lo->plh_outstanding))
|
||||
wake_up_var(&lo->plh_outstanding);
|
||||
if (atomic_dec_and_test(&lo->plh_outstanding) &&
|
||||
test_and_clear_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags))
|
||||
wake_up_bit(&lo->plh_flags, NFS_LAYOUT_DRAIN);
|
||||
}
|
||||
|
||||
static bool pnfs_is_first_layoutget(struct pnfs_layout_hdr *lo)
|
||||
@ -2031,11 +2033,11 @@ pnfs_update_layout(struct inode *ino,
|
||||
* If the layout segment list is empty, but there are outstanding
|
||||
* layoutget calls, then they might be subject to a layoutrecall.
|
||||
*/
|
||||
if ((list_empty(&lo->plh_segs) || !pnfs_layout_is_valid(lo)) &&
|
||||
if (test_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags) &&
|
||||
atomic_read(&lo->plh_outstanding) != 0) {
|
||||
spin_unlock(&ino->i_lock);
|
||||
lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding,
|
||||
!atomic_read(&lo->plh_outstanding)));
|
||||
lseg = ERR_PTR(wait_on_bit(&lo->plh_flags, NFS_LAYOUT_DRAIN,
|
||||
TASK_KILLABLE));
|
||||
if (IS_ERR(lseg))
|
||||
goto out_put_layout_hdr;
|
||||
pnfs_put_layout_hdr(lo);
|
||||
@ -2155,6 +2157,12 @@ pnfs_update_layout(struct inode *ino,
|
||||
case -ERECALLCONFLICT:
|
||||
case -EAGAIN:
|
||||
break;
|
||||
case -ENODATA:
|
||||
/* The server returned NFS4ERR_LAYOUTUNAVAILABLE */
|
||||
pnfs_layout_set_fail_bit(
|
||||
lo, pnfs_iomode_to_fail_bit(iomode));
|
||||
lseg = NULL;
|
||||
goto out_put_layout_hdr;
|
||||
default:
|
||||
if (!nfs_error_is_fatal(PTR_ERR(lseg))) {
|
||||
pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
|
||||
@ -2408,7 +2416,8 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
|
||||
goto out_forget;
|
||||
}
|
||||
|
||||
if (!pnfs_layout_is_valid(lo) && !pnfs_is_first_layoutget(lo))
|
||||
if (test_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags) &&
|
||||
!pnfs_is_first_layoutget(lo))
|
||||
goto out_forget;
|
||||
|
||||
if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
|
||||
|
@ -107,6 +107,7 @@ enum {
|
||||
NFS_LAYOUT_FIRST_LAYOUTGET, /* Serialize first layoutget */
|
||||
NFS_LAYOUT_INODE_FREEING, /* The inode is being freed */
|
||||
NFS_LAYOUT_HASHED, /* The layout visible */
|
||||
NFS_LAYOUT_DRAIN,
|
||||
};
|
||||
|
||||
enum layoutdriver_policy_flags {
|
||||
|
@ -194,7 +194,6 @@ nfsd_file_alloc(struct inode *inode, unsigned int may, unsigned int hashval,
|
||||
__set_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
|
||||
}
|
||||
nf->nf_mark = NULL;
|
||||
init_rwsem(&nf->nf_rwsem);
|
||||
trace_nfsd_file_alloc(nf);
|
||||
}
|
||||
return nf;
|
||||
|
@ -46,7 +46,6 @@ struct nfsd_file {
|
||||
refcount_t nf_ref;
|
||||
unsigned char nf_may;
|
||||
struct nfsd_file_mark *nf_mark;
|
||||
struct rw_semaphore nf_rwsem;
|
||||
};
|
||||
|
||||
int nfsd_file_cache_init(void);
|
||||
|
@ -1380,6 +1380,8 @@ static void nfsd4_init_copy_res(struct nfsd4_copy *copy, bool sync)
|
||||
|
||||
static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy)
|
||||
{
|
||||
struct file *dst = copy->nf_dst->nf_file;
|
||||
struct file *src = copy->nf_src->nf_file;
|
||||
ssize_t bytes_copied = 0;
|
||||
size_t bytes_total = copy->cp_count;
|
||||
u64 src_pos = copy->cp_src_pos;
|
||||
@ -1388,9 +1390,8 @@ static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy)
|
||||
do {
|
||||
if (kthread_should_stop())
|
||||
break;
|
||||
bytes_copied = nfsd_copy_file_range(copy->nf_src->nf_file,
|
||||
src_pos, copy->nf_dst->nf_file, dst_pos,
|
||||
bytes_total);
|
||||
bytes_copied = nfsd_copy_file_range(src, src_pos, dst, dst_pos,
|
||||
bytes_total);
|
||||
if (bytes_copied <= 0)
|
||||
break;
|
||||
bytes_total -= bytes_copied;
|
||||
|
@ -535,10 +535,11 @@ __be32 nfsd4_clone_file_range(struct nfsd_file *nf_src, u64 src_pos,
|
||||
{
|
||||
struct file *src = nf_src->nf_file;
|
||||
struct file *dst = nf_dst->nf_file;
|
||||
errseq_t since;
|
||||
loff_t cloned;
|
||||
__be32 ret = 0;
|
||||
|
||||
down_write(&nf_dst->nf_rwsem);
|
||||
since = READ_ONCE(dst->f_wb_err);
|
||||
cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0);
|
||||
if (cloned < 0) {
|
||||
ret = nfserrno(cloned);
|
||||
@ -552,6 +553,8 @@ __be32 nfsd4_clone_file_range(struct nfsd_file *nf_src, u64 src_pos,
|
||||
loff_t dst_end = count ? dst_pos + count - 1 : LLONG_MAX;
|
||||
int status = vfs_fsync_range(dst, dst_pos, dst_end, 0);
|
||||
|
||||
if (!status)
|
||||
status = filemap_check_wb_err(dst->f_mapping, since);
|
||||
if (!status)
|
||||
status = commit_inode_metadata(file_inode(src));
|
||||
if (status < 0) {
|
||||
@ -561,7 +564,6 @@ __be32 nfsd4_clone_file_range(struct nfsd_file *nf_src, u64 src_pos,
|
||||
}
|
||||
}
|
||||
out_err:
|
||||
up_write(&nf_dst->nf_rwsem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -980,6 +982,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
|
||||
struct file *file = nf->nf_file;
|
||||
struct svc_export *exp;
|
||||
struct iov_iter iter;
|
||||
errseq_t since;
|
||||
__be32 nfserr;
|
||||
int host_err;
|
||||
int use_wgather;
|
||||
@ -1009,21 +1012,18 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
|
||||
flags |= RWF_SYNC;
|
||||
|
||||
iov_iter_kvec(&iter, WRITE, vec, vlen, *cnt);
|
||||
since = READ_ONCE(file->f_wb_err);
|
||||
if (flags & RWF_SYNC) {
|
||||
down_write(&nf->nf_rwsem);
|
||||
host_err = vfs_iter_write(file, &iter, &pos, flags);
|
||||
if (host_err < 0)
|
||||
nfsd_reset_boot_verifier(net_generic(SVC_NET(rqstp),
|
||||
nfsd_net_id));
|
||||
up_write(&nf->nf_rwsem);
|
||||
} else {
|
||||
down_read(&nf->nf_rwsem);
|
||||
if (verf)
|
||||
nfsd_copy_boot_verifier(verf,
|
||||
net_generic(SVC_NET(rqstp),
|
||||
nfsd_net_id));
|
||||
host_err = vfs_iter_write(file, &iter, &pos, flags);
|
||||
up_read(&nf->nf_rwsem);
|
||||
}
|
||||
if (host_err < 0) {
|
||||
nfsd_reset_boot_verifier(net_generic(SVC_NET(rqstp),
|
||||
@ -1033,6 +1033,9 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
|
||||
*cnt = host_err;
|
||||
nfsdstats.io_write += *cnt;
|
||||
fsnotify_modify(file);
|
||||
host_err = filemap_check_wb_err(file->f_mapping, since);
|
||||
if (host_err < 0)
|
||||
goto out_nfserr;
|
||||
|
||||
if (stable && use_wgather) {
|
||||
host_err = wait_for_concurrent_writes(file);
|
||||
@ -1113,19 +1116,6 @@ nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NFSD_V3
|
||||
static int
|
||||
nfsd_filemap_write_and_wait_range(struct nfsd_file *nf, loff_t offset,
|
||||
loff_t end)
|
||||
{
|
||||
struct address_space *mapping = nf->nf_file->f_mapping;
|
||||
int ret = filemap_fdatawrite_range(mapping, offset, end);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
filemap_fdatawait_range_keep_errors(mapping, offset, end);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Commit all pending writes to stable storage.
|
||||
*
|
||||
@ -1156,25 +1146,25 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
|
||||
if (err)
|
||||
goto out;
|
||||
if (EX_ISSYNC(fhp->fh_export)) {
|
||||
int err2 = nfsd_filemap_write_and_wait_range(nf, offset, end);
|
||||
errseq_t since = READ_ONCE(nf->nf_file->f_wb_err);
|
||||
int err2;
|
||||
|
||||
down_write(&nf->nf_rwsem);
|
||||
if (!err2)
|
||||
err2 = vfs_fsync_range(nf->nf_file, offset, end, 0);
|
||||
err2 = vfs_fsync_range(nf->nf_file, offset, end, 0);
|
||||
switch (err2) {
|
||||
case 0:
|
||||
nfsd_copy_boot_verifier(verf, net_generic(nf->nf_net,
|
||||
nfsd_net_id));
|
||||
err2 = filemap_check_wb_err(nf->nf_file->f_mapping,
|
||||
since);
|
||||
break;
|
||||
case -EINVAL:
|
||||
err = nfserr_notsupp;
|
||||
break;
|
||||
default:
|
||||
err = nfserrno(err2);
|
||||
nfsd_reset_boot_verifier(net_generic(nf->nf_net,
|
||||
nfsd_net_id));
|
||||
}
|
||||
up_write(&nf->nf_rwsem);
|
||||
err = nfserrno(err2);
|
||||
} else
|
||||
nfsd_copy_boot_verifier(verf, net_generic(nf->nf_net,
|
||||
nfsd_net_id));
|
||||
|
@ -79,6 +79,7 @@
|
||||
#include <linux/capability.h>
|
||||
#include <linux/quotaops.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include "../internal.h" /* ugh */
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
@ -427,9 +428,11 @@ EXPORT_SYMBOL(mark_info_dirty);
|
||||
int dquot_acquire(struct dquot *dquot)
|
||||
{
|
||||
int ret = 0, ret2 = 0;
|
||||
unsigned int memalloc;
|
||||
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
|
||||
|
||||
mutex_lock(&dquot->dq_lock);
|
||||
memalloc = memalloc_nofs_save();
|
||||
if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
|
||||
ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot);
|
||||
if (ret < 0)
|
||||
@ -460,6 +463,7 @@ int dquot_acquire(struct dquot *dquot)
|
||||
smp_mb__before_atomic();
|
||||
set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
|
||||
out_iolock:
|
||||
memalloc_nofs_restore(memalloc);
|
||||
mutex_unlock(&dquot->dq_lock);
|
||||
return ret;
|
||||
}
|
||||
@ -471,9 +475,11 @@ EXPORT_SYMBOL(dquot_acquire);
|
||||
int dquot_commit(struct dquot *dquot)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned int memalloc;
|
||||
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
|
||||
|
||||
mutex_lock(&dquot->dq_lock);
|
||||
memalloc = memalloc_nofs_save();
|
||||
if (!clear_dquot_dirty(dquot))
|
||||
goto out_lock;
|
||||
/* Inactive dquot can be only if there was error during read/init
|
||||
@ -483,6 +489,7 @@ int dquot_commit(struct dquot *dquot)
|
||||
else
|
||||
ret = -EIO;
|
||||
out_lock:
|
||||
memalloc_nofs_restore(memalloc);
|
||||
mutex_unlock(&dquot->dq_lock);
|
||||
return ret;
|
||||
}
|
||||
@ -494,9 +501,11 @@ EXPORT_SYMBOL(dquot_commit);
|
||||
int dquot_release(struct dquot *dquot)
|
||||
{
|
||||
int ret = 0, ret2 = 0;
|
||||
unsigned int memalloc;
|
||||
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
|
||||
|
||||
mutex_lock(&dquot->dq_lock);
|
||||
memalloc = memalloc_nofs_save();
|
||||
/* Check whether we are not racing with some other dqget() */
|
||||
if (dquot_is_busy(dquot))
|
||||
goto out_dqlock;
|
||||
@ -512,6 +521,7 @@ int dquot_release(struct dquot *dquot)
|
||||
}
|
||||
clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
|
||||
out_dqlock:
|
||||
memalloc_nofs_restore(memalloc);
|
||||
mutex_unlock(&dquot->dq_lock);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1622,6 +1622,13 @@ static inline bool pci_aer_available(void) { return false; }
|
||||
|
||||
bool pci_ats_disabled(void);
|
||||
|
||||
#ifdef CONFIG_PCIE_PTM
|
||||
int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
|
||||
#else
|
||||
static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
|
||||
{ return -EINVAL; }
|
||||
#endif
|
||||
|
||||
void pci_cfg_access_lock(struct pci_dev *dev);
|
||||
bool pci_cfg_access_trylock(struct pci_dev *dev);
|
||||
void pci_cfg_access_unlock(struct pci_dev *dev);
|
||||
|
@ -121,7 +121,8 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
|
||||
return ERR_PTR(-E2BIG);
|
||||
|
||||
cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
|
||||
err = bpf_map_charge_init(&mem, cost);
|
||||
err = bpf_map_charge_init(&mem, cost + attr->max_entries *
|
||||
(sizeof(struct stack_map_bucket) + (u64)value_size));
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
|
@ -564,7 +564,7 @@ static void add_dma_entry(struct dma_debug_entry *entry)
|
||||
|
||||
rc = active_cacheline_insert(entry);
|
||||
if (rc == -ENOMEM) {
|
||||
pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
|
||||
pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n");
|
||||
global_disable = true;
|
||||
}
|
||||
|
||||
|
@ -191,7 +191,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
|
||||
goto out_free_pages;
|
||||
if (force_dma_unencrypted(dev)) {
|
||||
err = set_memory_decrypted((unsigned long)ret,
|
||||
1 << get_order(size));
|
||||
PFN_UP(size));
|
||||
if (err)
|
||||
goto out_free_pages;
|
||||
}
|
||||
@ -213,7 +213,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
|
||||
ret = page_address(page);
|
||||
if (force_dma_unencrypted(dev)) {
|
||||
err = set_memory_decrypted((unsigned long)ret,
|
||||
1 << get_order(size));
|
||||
PFN_UP(size));
|
||||
if (err)
|
||||
goto out_free_pages;
|
||||
}
|
||||
@ -234,7 +234,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
|
||||
out_encrypt_pages:
|
||||
if (force_dma_unencrypted(dev)) {
|
||||
err = set_memory_encrypted((unsigned long)page_address(page),
|
||||
1 << get_order(size));
|
||||
PFN_UP(size));
|
||||
/* If memory cannot be re-encrypted, it must be leaked */
|
||||
if (err)
|
||||
return NULL;
|
||||
@ -247,8 +247,6 @@ void *dma_direct_alloc(struct device *dev, size_t size,
|
||||
void dma_direct_free(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
|
||||
{
|
||||
unsigned int page_order = get_order(size);
|
||||
|
||||
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
|
||||
!force_dma_unencrypted(dev)) {
|
||||
/* cpu_addr is a struct page cookie, not a kernel address */
|
||||
@ -269,7 +267,7 @@ void dma_direct_free(struct device *dev, size_t size,
|
||||
return;
|
||||
|
||||
if (force_dma_unencrypted(dev))
|
||||
set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
|
||||
set_memory_encrypted((unsigned long)cpu_addr, PFN_UP(size));
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
|
||||
vunmap(cpu_addr);
|
||||
@ -305,8 +303,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||
|
||||
ret = page_address(page);
|
||||
if (force_dma_unencrypted(dev)) {
|
||||
if (set_memory_decrypted((unsigned long)ret,
|
||||
1 << get_order(size)))
|
||||
if (set_memory_decrypted((unsigned long)ret, PFN_UP(size)))
|
||||
goto out_free_pages;
|
||||
}
|
||||
memset(ret, 0, size);
|
||||
@ -322,7 +319,6 @@ void dma_direct_free_pages(struct device *dev, size_t size,
|
||||
struct page *page, dma_addr_t dma_addr,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
unsigned int page_order = get_order(size);
|
||||
void *vaddr = page_address(page);
|
||||
|
||||
/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
|
||||
@ -331,7 +327,7 @@ void dma_direct_free_pages(struct device *dev, size_t size,
|
||||
return;
|
||||
|
||||
if (force_dma_unencrypted(dev))
|
||||
set_memory_encrypted((unsigned long)vaddr, 1 << page_order);
|
||||
set_memory_encrypted((unsigned long)vaddr, PFN_UP(size));
|
||||
|
||||
dma_free_contiguous(dev, page, size);
|
||||
}
|
||||
|
@ -103,6 +103,9 @@ config INDIRECT_PIO
|
||||
|
||||
source "lib/crypto/Kconfig"
|
||||
|
||||
config LIB_MEMNEQ
|
||||
bool
|
||||
|
||||
config CRC_CCITT
|
||||
tristate "CRC-CCITT functions"
|
||||
help
|
||||
|
@ -248,6 +248,7 @@ obj-$(CONFIG_DIMLIB) += dim/
|
||||
obj-$(CONFIG_SIGNATURE) += digsig.o
|
||||
|
||||
lib-$(CONFIG_CLZ_TAB) += clz_tab.o
|
||||
lib-$(CONFIG_LIB_MEMNEQ) += memneq.o
|
||||
|
||||
obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o
|
||||
obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o
|
||||
|
@ -71,6 +71,7 @@ config CRYPTO_LIB_CURVE25519
|
||||
tristate "Curve25519 scalar multiplication library"
|
||||
depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519
|
||||
select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n
|
||||
select LIB_MEMNEQ
|
||||
help
|
||||
Enable the Curve25519 library interface. This interface may be
|
||||
fulfilled by either the generic implementation or an arch-specific
|
||||
|
@ -1653,9 +1653,12 @@ static int ax25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
|
||||
int flags)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct sk_buff *skb;
|
||||
struct sk_buff *skb, *last;
|
||||
struct sk_buff_head *sk_queue;
|
||||
int copied;
|
||||
int err = 0;
|
||||
int off = 0;
|
||||
long timeo;
|
||||
|
||||
lock_sock(sk);
|
||||
/*
|
||||
@ -1667,11 +1670,29 @@ static int ax25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Now we can treat all alike */
|
||||
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
|
||||
flags & MSG_DONTWAIT, &err);
|
||||
if (skb == NULL)
|
||||
goto out;
|
||||
/* We need support for non-blocking reads. */
|
||||
sk_queue = &sk->sk_receive_queue;
|
||||
skb = __skb_try_recv_datagram(sk, sk_queue, flags, &off, &err, &last);
|
||||
/* If no packet is available, release_sock(sk) and try again. */
|
||||
if (!skb) {
|
||||
if (err != -EAGAIN)
|
||||
goto out;
|
||||
release_sock(sk);
|
||||
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
|
||||
while (timeo && !__skb_wait_for_more_packets(sk, sk_queue, &err,
|
||||
&timeo, last)) {
|
||||
skb = __skb_try_recv_datagram(sk, sk_queue, flags, &off,
|
||||
&err, &last);
|
||||
if (skb)
|
||||
break;
|
||||
|
||||
if (err != -EAGAIN)
|
||||
goto done;
|
||||
}
|
||||
if (!skb)
|
||||
goto done;
|
||||
lock_sock(sk);
|
||||
}
|
||||
|
||||
if (!sk_to_ax25(sk)->pidincl)
|
||||
skb_pull(skb, 1); /* Remove PID */
|
||||
@ -1718,6 +1739,7 @@ static int ax25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
|
||||
out:
|
||||
release_sock(sk);
|
||||
|
||||
done:
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -502,14 +502,15 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
||||
struct ipcm6_cookie ipc6;
|
||||
int addr_len = msg->msg_namelen;
|
||||
int transhdrlen = 4; /* zero session-id */
|
||||
int ulen = len + transhdrlen;
|
||||
int ulen;
|
||||
int err;
|
||||
|
||||
/* Rough check on arithmetic overflow,
|
||||
* better check is made in ip6_append_data().
|
||||
*/
|
||||
if (len > INT_MAX)
|
||||
if (len > INT_MAX - transhdrlen)
|
||||
return -EMSGSIZE;
|
||||
ulen = len + transhdrlen;
|
||||
|
||||
/* Mirror BSD error message compatibility */
|
||||
if (msg->msg_flags & MSG_OOB)
|
||||
|
@ -372,6 +372,7 @@ static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
|
||||
update_ip_l4_checksum(skb, nh, *addr, new_addr);
|
||||
csum_replace4(&nh->check, *addr, new_addr);
|
||||
skb_clear_hash(skb);
|
||||
ovs_ct_clear(skb, NULL);
|
||||
*addr = new_addr;
|
||||
}
|
||||
|
||||
@ -419,6 +420,7 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
|
||||
update_ipv6_checksum(skb, l4_proto, addr, new_addr);
|
||||
|
||||
skb_clear_hash(skb);
|
||||
ovs_ct_clear(skb, NULL);
|
||||
memcpy(addr, new_addr, sizeof(__be32[4]));
|
||||
}
|
||||
|
||||
@ -659,6 +661,7 @@ static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
|
||||
static void set_tp_port(struct sk_buff *skb, __be16 *port,
|
||||
__be16 new_port, __sum16 *check)
|
||||
{
|
||||
ovs_ct_clear(skb, NULL);
|
||||
inet_proto_csum_replace2(check, skb, *port, new_port, false);
|
||||
*port = new_port;
|
||||
}
|
||||
@ -698,6 +701,7 @@ static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
|
||||
uh->dest = dst;
|
||||
flow_key->tp.src = src;
|
||||
flow_key->tp.dst = dst;
|
||||
ovs_ct_clear(skb, NULL);
|
||||
}
|
||||
|
||||
skb_clear_hash(skb);
|
||||
@ -760,6 +764,8 @@ static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
|
||||
sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
|
||||
|
||||
skb_clear_hash(skb);
|
||||
ovs_ct_clear(skb, NULL);
|
||||
|
||||
flow_key->tp.src = sh->source;
|
||||
flow_key->tp.dst = sh->dest;
|
||||
|
||||
|
@ -1324,7 +1324,8 @@ int ovs_ct_clear(struct sk_buff *skb, struct sw_flow_key *key)
|
||||
if (skb_nfct(skb)) {
|
||||
nf_conntrack_put(skb_nfct(skb));
|
||||
nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
|
||||
ovs_ct_fill_key(skb, key);
|
||||
if (key)
|
||||
ovs_ct_fill_key(skb, key);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -213,6 +213,20 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool tcf_police_mtu_check(struct sk_buff *skb, u32 limit)
|
||||
{
|
||||
u32 len;
|
||||
|
||||
if (skb_is_gso(skb))
|
||||
return skb_gso_validate_mac_len(skb, limit);
|
||||
|
||||
len = qdisc_pkt_len(skb);
|
||||
if (skb_at_tc_ingress(skb))
|
||||
len += skb->mac_len;
|
||||
|
||||
return len <= limit;
|
||||
}
|
||||
|
||||
static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
|
||||
struct tcf_result *res)
|
||||
{
|
||||
@ -235,7 +249,7 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
|
||||
goto inc_overlimits;
|
||||
}
|
||||
|
||||
if (qdisc_pkt_len(skb) <= p->tcfp_mtu) {
|
||||
if (tcf_police_mtu_check(skb, p->tcfp_mtu)) {
|
||||
if (!p->rate_present) {
|
||||
ret = p->tcfp_result;
|
||||
goto end;
|
||||
|
@ -95,17 +95,25 @@ __faddr2line() {
|
||||
local print_warnings=$4
|
||||
|
||||
local sym_name=${func_addr%+*}
|
||||
local offset=${func_addr#*+}
|
||||
offset=${offset%/*}
|
||||
local func_offset=${func_addr#*+}
|
||||
func_offset=${func_offset%/*}
|
||||
local user_size=
|
||||
local file_type
|
||||
local is_vmlinux=0
|
||||
[[ $func_addr =~ "/" ]] && user_size=${func_addr#*/}
|
||||
|
||||
if [[ -z $sym_name ]] || [[ -z $offset ]] || [[ $sym_name = $func_addr ]]; then
|
||||
if [[ -z $sym_name ]] || [[ -z $func_offset ]] || [[ $sym_name = $func_addr ]]; then
|
||||
warn "bad func+offset $func_addr"
|
||||
DONE=1
|
||||
return
|
||||
fi
|
||||
|
||||
# vmlinux uses absolute addresses in the section table rather than
|
||||
# section offsets.
|
||||
local file_type=$(${READELF} --file-header $objfile |
|
||||
${AWK} '$1 == "Type:" { print $2; exit }')
|
||||
[[ $file_type = "EXEC" ]] && is_vmlinux=1
|
||||
|
||||
# Go through each of the object's symbols which match the func name.
|
||||
# In rare cases there might be duplicates, in which case we print all
|
||||
# matches.
|
||||
@ -114,9 +122,11 @@ __faddr2line() {
|
||||
local sym_addr=0x${fields[1]}
|
||||
local sym_elf_size=${fields[2]}
|
||||
local sym_sec=${fields[6]}
|
||||
local sec_size
|
||||
local sec_name
|
||||
|
||||
# Get the section size:
|
||||
local sec_size=$(${READELF} --section-headers --wide $objfile |
|
||||
sec_size=$(${READELF} --section-headers --wide $objfile |
|
||||
sed 's/\[ /\[/' |
|
||||
${AWK} -v sec=$sym_sec '$1 == "[" sec "]" { print "0x" $6; exit }')
|
||||
|
||||
@ -126,6 +136,17 @@ __faddr2line() {
|
||||
return
|
||||
fi
|
||||
|
||||
# Get the section name:
|
||||
sec_name=$(${READELF} --section-headers --wide $objfile |
|
||||
sed 's/\[ /\[/' |
|
||||
${AWK} -v sec=$sym_sec '$1 == "[" sec "]" { print $2; exit }')
|
||||
|
||||
if [[ -z $sec_name ]]; then
|
||||
warn "bad section name: section: $sym_sec"
|
||||
DONE=1
|
||||
return
|
||||
fi
|
||||
|
||||
# Calculate the symbol size.
|
||||
#
|
||||
# Unfortunately we can't use the ELF size, because kallsyms
|
||||
@ -174,10 +195,10 @@ __faddr2line() {
|
||||
|
||||
sym_size=0x$(printf %x $sym_size)
|
||||
|
||||
# Calculate the section address from user-supplied offset:
|
||||
local addr=$(($sym_addr + $offset))
|
||||
# Calculate the address from user-supplied offset:
|
||||
local addr=$(($sym_addr + $func_offset))
|
||||
if [[ -z $addr ]] || [[ $addr = 0 ]]; then
|
||||
warn "bad address: $sym_addr + $offset"
|
||||
warn "bad address: $sym_addr + $func_offset"
|
||||
DONE=1
|
||||
return
|
||||
fi
|
||||
@ -191,9 +212,9 @@ __faddr2line() {
|
||||
fi
|
||||
|
||||
# Make sure the provided offset is within the symbol's range:
|
||||
if [[ $offset -gt $sym_size ]]; then
|
||||
if [[ $func_offset -gt $sym_size ]]; then
|
||||
[[ $print_warnings = 1 ]] &&
|
||||
echo "skipping $sym_name address at $addr due to size mismatch ($offset > $sym_size)"
|
||||
echo "skipping $sym_name address at $addr due to size mismatch ($func_offset > $sym_size)"
|
||||
continue
|
||||
fi
|
||||
|
||||
@ -202,11 +223,13 @@ __faddr2line() {
|
||||
[[ $FIRST = 0 ]] && echo
|
||||
FIRST=0
|
||||
|
||||
echo "$sym_name+$offset/$sym_size:"
|
||||
echo "$sym_name+$func_offset/$sym_size:"
|
||||
|
||||
# Pass section address to addr2line and strip absolute paths
|
||||
# from the output:
|
||||
local output=$(${ADDR2LINE} -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;")
|
||||
local args="--functions --pretty-print --inlines --exe=$objfile"
|
||||
[[ $is_vmlinux = 0 ]] && args="$args --section=$sec_name"
|
||||
local output=$(${ADDR2LINE} $args $addr | sed "s; $dir_prefix\(\./\)*; ;")
|
||||
[[ -z $output ]] && continue
|
||||
|
||||
# Default output (non --list):
|
||||
|
@ -660,6 +660,7 @@ static const struct hda_vendor_id hda_vendor_ids[] = {
|
||||
{ 0x14f1, "Conexant" },
|
||||
{ 0x17e8, "Chrontel" },
|
||||
{ 0x1854, "LG" },
|
||||
{ 0x19e5, "Huawei" },
|
||||
{ 0x1aec, "Wolfson Microelectronics" },
|
||||
{ 0x1af4, "QEMU" },
|
||||
{ 0x434d, "C-Media" },
|
||||
|
@ -439,6 +439,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
|
||||
case 0x10ec0245:
|
||||
case 0x10ec0255:
|
||||
case 0x10ec0256:
|
||||
case 0x19e58326:
|
||||
case 0x10ec0257:
|
||||
case 0x10ec0282:
|
||||
case 0x10ec0283:
|
||||
@ -576,6 +577,7 @@ static void alc_shutup_pins(struct hda_codec *codec)
|
||||
switch (codec->core.vendor_id) {
|
||||
case 0x10ec0236:
|
||||
case 0x10ec0256:
|
||||
case 0x19e58326:
|
||||
case 0x10ec0283:
|
||||
case 0x10ec0286:
|
||||
case 0x10ec0288:
|
||||
@ -3252,6 +3254,7 @@ static void alc_disable_headset_jack_key(struct hda_codec *codec)
|
||||
case 0x10ec0230:
|
||||
case 0x10ec0236:
|
||||
case 0x10ec0256:
|
||||
case 0x19e58326:
|
||||
alc_write_coef_idx(codec, 0x48, 0x0);
|
||||
alc_update_coef_idx(codec, 0x49, 0x0045, 0x0);
|
||||
break;
|
||||
@ -3280,6 +3283,7 @@ static void alc_enable_headset_jack_key(struct hda_codec *codec)
|
||||
case 0x10ec0230:
|
||||
case 0x10ec0236:
|
||||
case 0x10ec0256:
|
||||
case 0x19e58326:
|
||||
alc_write_coef_idx(codec, 0x48, 0xd011);
|
||||
alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
|
||||
break;
|
||||
@ -4849,6 +4853,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
|
||||
case 0x10ec0230:
|
||||
case 0x10ec0236:
|
||||
case 0x10ec0256:
|
||||
case 0x19e58326:
|
||||
alc_process_coef_fw(codec, coef0256);
|
||||
break;
|
||||
case 0x10ec0234:
|
||||
@ -4964,6 +4969,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
|
||||
case 0x10ec0230:
|
||||
case 0x10ec0236:
|
||||
case 0x10ec0256:
|
||||
case 0x19e58326:
|
||||
alc_write_coef_idx(codec, 0x45, 0xc489);
|
||||
snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
|
||||
alc_process_coef_fw(codec, coef0256);
|
||||
@ -5114,6 +5120,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
|
||||
case 0x10ec0230:
|
||||
case 0x10ec0236:
|
||||
case 0x10ec0256:
|
||||
case 0x19e58326:
|
||||
alc_write_coef_idx(codec, 0x1b, 0x0e4b);
|
||||
alc_write_coef_idx(codec, 0x45, 0xc089);
|
||||
msleep(50);
|
||||
@ -5213,6 +5220,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
|
||||
case 0x10ec0230:
|
||||
case 0x10ec0236:
|
||||
case 0x10ec0256:
|
||||
case 0x19e58326:
|
||||
alc_process_coef_fw(codec, coef0256);
|
||||
break;
|
||||
case 0x10ec0234:
|
||||
@ -5327,6 +5335,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
|
||||
case 0x10ec0230:
|
||||
case 0x10ec0236:
|
||||
case 0x10ec0256:
|
||||
case 0x19e58326:
|
||||
alc_process_coef_fw(codec, coef0256);
|
||||
break;
|
||||
case 0x10ec0234:
|
||||
@ -5428,6 +5437,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
|
||||
case 0x10ec0230:
|
||||
case 0x10ec0236:
|
||||
case 0x10ec0256:
|
||||
case 0x19e58326:
|
||||
alc_write_coef_idx(codec, 0x1b, 0x0e4b);
|
||||
alc_write_coef_idx(codec, 0x06, 0x6104);
|
||||
alc_write_coefex_idx(codec, 0x57, 0x3, 0x09a3);
|
||||
@ -5722,6 +5732,7 @@ static void alc255_set_default_jack_type(struct hda_codec *codec)
|
||||
case 0x10ec0230:
|
||||
case 0x10ec0236:
|
||||
case 0x10ec0256:
|
||||
case 0x19e58326:
|
||||
alc_process_coef_fw(codec, alc256fw);
|
||||
break;
|
||||
}
|
||||
@ -6325,6 +6336,7 @@ static void alc_combo_jack_hp_jd_restart(struct hda_codec *codec)
|
||||
case 0x10ec0236:
|
||||
case 0x10ec0255:
|
||||
case 0x10ec0256:
|
||||
case 0x19e58326:
|
||||
alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */
|
||||
alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15);
|
||||
break;
|
||||
@ -8781,6 +8793,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
||||
SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
|
||||
SND_PCI_QUIRK(0x103c, 0x888d, "HP ZBook Power 15.6 inch G8 Mobile Workstation PC", ALC236_FIXUP_HP_GPIO_LED),
|
||||
SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED),
|
||||
SND_PCI_QUIRK(0x103c, 0x89aa, "HP EliteBook 630 G9", ALC236_FIXUP_HP_GPIO_LED),
|
||||
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
|
||||
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
|
||||
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
|
||||
@ -9813,6 +9826,7 @@ static int patch_alc269(struct hda_codec *codec)
|
||||
case 0x10ec0230:
|
||||
case 0x10ec0236:
|
||||
case 0x10ec0256:
|
||||
case 0x19e58326:
|
||||
spec->codec_variant = ALC269_TYPE_ALC256;
|
||||
spec->shutup = alc256_shutup;
|
||||
spec->init_hook = alc256_init;
|
||||
@ -11255,6 +11269,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
|
||||
HDA_CODEC_ENTRY(0x10ec0b00, "ALCS1200A", patch_alc882),
|
||||
HDA_CODEC_ENTRY(0x10ec1168, "ALC1220", patch_alc882),
|
||||
HDA_CODEC_ENTRY(0x10ec1220, "ALC1220", patch_alc882),
|
||||
HDA_CODEC_ENTRY(0x19e58326, "HW8326", patch_alc269),
|
||||
{} /* terminator */
|
||||
};
|
||||
MODULE_DEVICE_TABLE(hdaudio, snd_hda_id_realtek);
|
||||
|
@ -444,7 +444,8 @@ static bool cs35l36_volatile_reg(struct device *dev, unsigned int reg)
|
||||
}
|
||||
}
|
||||
|
||||
static DECLARE_TLV_DB_SCALE(dig_vol_tlv, -10200, 25, 0);
|
||||
static const DECLARE_TLV_DB_RANGE(dig_vol_tlv, 0, 912,
|
||||
TLV_DB_MINMAX_ITEM(-10200, 1200));
|
||||
static DECLARE_TLV_DB_SCALE(amp_gain_tlv, 0, 1, 1);
|
||||
|
||||
static const char * const cs35l36_pcm_sftramp_text[] = {
|
||||
|
@ -146,7 +146,7 @@ static const struct snd_kcontrol_new cs42l51_snd_controls[] = {
|
||||
0, 0xA0, 96, adc_att_tlv),
|
||||
SOC_DOUBLE_R_SX_TLV("PGA Volume",
|
||||
CS42L51_ALC_PGA_CTL, CS42L51_ALC_PGB_CTL,
|
||||
0, 0x1A, 30, pga_tlv),
|
||||
0, 0x19, 30, pga_tlv),
|
||||
SOC_SINGLE("Playback Deemphasis Switch", CS42L51_DAC_CTL, 3, 1, 0),
|
||||
SOC_SINGLE("Auto-Mute Switch", CS42L51_DAC_CTL, 2, 1, 0),
|
||||
SOC_SINGLE("Soft Ramp Switch", CS42L51_DAC_CTL, 1, 1, 0),
|
||||
|
@ -137,7 +137,9 @@ static DECLARE_TLV_DB_SCALE(mic_tlv, 1600, 100, 0);
|
||||
|
||||
static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0);
|
||||
|
||||
static DECLARE_TLV_DB_SCALE(mix_tlv, -50, 50, 0);
|
||||
static DECLARE_TLV_DB_SCALE(pass_tlv, -6000, 50, 0);
|
||||
|
||||
static DECLARE_TLV_DB_SCALE(mix_tlv, -5150, 50, 0);
|
||||
|
||||
static DECLARE_TLV_DB_SCALE(beep_tlv, -56, 200, 0);
|
||||
|
||||
@ -351,7 +353,7 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = {
|
||||
CS42L52_SPKB_VOL, 0, 0x40, 0xC0, hl_tlv),
|
||||
|
||||
SOC_DOUBLE_R_SX_TLV("Bypass Volume", CS42L52_PASSTHRUA_VOL,
|
||||
CS42L52_PASSTHRUB_VOL, 0, 0x88, 0x90, pga_tlv),
|
||||
CS42L52_PASSTHRUB_VOL, 0, 0x88, 0x90, pass_tlv),
|
||||
|
||||
SOC_DOUBLE("Bypass Mute", CS42L52_MISC_CTL, 4, 5, 1, 0),
|
||||
|
||||
@ -364,7 +366,7 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = {
|
||||
CS42L52_ADCB_VOL, 0, 0xA0, 0x78, ipd_tlv),
|
||||
SOC_DOUBLE_R_SX_TLV("ADC Mixer Volume",
|
||||
CS42L52_ADCA_MIXER_VOL, CS42L52_ADCB_MIXER_VOL,
|
||||
0, 0x19, 0x7F, ipd_tlv),
|
||||
0, 0x19, 0x7F, mix_tlv),
|
||||
|
||||
SOC_DOUBLE("ADC Switch", CS42L52_ADC_MISC_CTL, 0, 1, 1, 0),
|
||||
|
||||
|
@ -391,9 +391,9 @@ static const struct snd_kcontrol_new cs42l56_snd_controls[] = {
|
||||
SOC_DOUBLE("ADC Boost Switch", CS42L56_GAIN_BIAS_CTL, 3, 2, 1, 1),
|
||||
|
||||
SOC_DOUBLE_R_SX_TLV("Headphone Volume", CS42L56_HPA_VOLUME,
|
||||
CS42L56_HPB_VOLUME, 0, 0x84, 0x48, hl_tlv),
|
||||
CS42L56_HPB_VOLUME, 0, 0x44, 0x48, hl_tlv),
|
||||
SOC_DOUBLE_R_SX_TLV("LineOut Volume", CS42L56_LOA_VOLUME,
|
||||
CS42L56_LOB_VOLUME, 0, 0x84, 0x48, hl_tlv),
|
||||
CS42L56_LOB_VOLUME, 0, 0x44, 0x48, hl_tlv),
|
||||
|
||||
SOC_SINGLE_TLV("Bass Shelving Volume", CS42L56_TONE_CTL,
|
||||
0, 0x00, 1, tone_tlv),
|
||||
|
@ -347,22 +347,22 @@ static const struct snd_kcontrol_new cs53l30_snd_controls[] = {
|
||||
SOC_ENUM("ADC2 NG Delay", adc2_ng_delay_enum),
|
||||
|
||||
SOC_SINGLE_SX_TLV("ADC1A PGA Volume",
|
||||
CS53L30_ADC1A_AFE_CTL, 0, 0x34, 0x18, pga_tlv),
|
||||
CS53L30_ADC1A_AFE_CTL, 0, 0x34, 0x24, pga_tlv),
|
||||
SOC_SINGLE_SX_TLV("ADC1B PGA Volume",
|
||||
CS53L30_ADC1B_AFE_CTL, 0, 0x34, 0x18, pga_tlv),
|
||||
CS53L30_ADC1B_AFE_CTL, 0, 0x34, 0x24, pga_tlv),
|
||||
SOC_SINGLE_SX_TLV("ADC2A PGA Volume",
|
||||
CS53L30_ADC2A_AFE_CTL, 0, 0x34, 0x18, pga_tlv),
|
||||
CS53L30_ADC2A_AFE_CTL, 0, 0x34, 0x24, pga_tlv),
|
||||
SOC_SINGLE_SX_TLV("ADC2B PGA Volume",
|
||||
CS53L30_ADC2B_AFE_CTL, 0, 0x34, 0x18, pga_tlv),
|
||||
CS53L30_ADC2B_AFE_CTL, 0, 0x34, 0x24, pga_tlv),
|
||||
|
||||
SOC_SINGLE_SX_TLV("ADC1A Digital Volume",
|
||||
CS53L30_ADC1A_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv),
|
||||
CS53L30_ADC1A_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv),
|
||||
SOC_SINGLE_SX_TLV("ADC1B Digital Volume",
|
||||
CS53L30_ADC1B_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv),
|
||||
CS53L30_ADC1B_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv),
|
||||
SOC_SINGLE_SX_TLV("ADC2A Digital Volume",
|
||||
CS53L30_ADC2A_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv),
|
||||
CS53L30_ADC2A_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv),
|
||||
SOC_SINGLE_SX_TLV("ADC2B Digital Volume",
|
||||
CS53L30_ADC2B_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv),
|
||||
CS53L30_ADC2B_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv),
|
||||
};
|
||||
|
||||
static const struct snd_soc_dapm_widget cs53l30_dapm_widgets[] = {
|
||||
|
@ -161,13 +161,16 @@ static int es8328_put_deemph(struct snd_kcontrol *kcontrol,
|
||||
if (deemph > 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (es8328->deemph == deemph)
|
||||
return 0;
|
||||
|
||||
ret = es8328_set_deemph(component);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
es8328->deemph = deemph;
|
||||
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
|
@ -740,6 +740,8 @@ static int nau8822_set_pll(struct snd_soc_dai *dai, int pll_id, int source,
|
||||
pll_param->pll_int, pll_param->pll_frac,
|
||||
pll_param->mclk_scaler, pll_param->pre_factor);
|
||||
|
||||
snd_soc_component_update_bits(component,
|
||||
NAU8822_REG_POWER_MANAGEMENT_1, NAU8822_PLL_EN_MASK, NAU8822_PLL_OFF);
|
||||
snd_soc_component_update_bits(component,
|
||||
NAU8822_REG_PLL_N, NAU8822_PLLMCLK_DIV2 | NAU8822_PLLN_MASK,
|
||||
(pll_param->pre_factor ? NAU8822_PLLMCLK_DIV2 : 0) |
|
||||
@ -757,6 +759,8 @@ static int nau8822_set_pll(struct snd_soc_dai *dai, int pll_id, int source,
|
||||
pll_param->mclk_scaler << NAU8822_MCLKSEL_SFT);
|
||||
snd_soc_component_update_bits(component,
|
||||
NAU8822_REG_CLOCKING, NAU8822_CLKM_MASK, NAU8822_CLKM_PLL);
|
||||
snd_soc_component_update_bits(component,
|
||||
NAU8822_REG_POWER_MANAGEMENT_1, NAU8822_PLL_EN_MASK, NAU8822_PLL_ON);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -90,6 +90,9 @@
|
||||
#define NAU8822_REFIMP_3K 0x3
|
||||
#define NAU8822_IOBUF_EN (0x1 << 2)
|
||||
#define NAU8822_ABIAS_EN (0x1 << 3)
|
||||
#define NAU8822_PLL_EN_MASK (0x1 << 5)
|
||||
#define NAU8822_PLL_ON (0x1 << 5)
|
||||
#define NAU8822_PLL_OFF (0x0 << 5)
|
||||
|
||||
/* NAU8822_REG_AUDIO_INTERFACE (0x4) */
|
||||
#define NAU8822_AIFMT_MASK (0x3 << 3)
|
||||
|
@ -3864,6 +3864,7 @@ static int wm8962_runtime_suspend(struct device *dev)
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops wm8962_pm = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
|
||||
SET_RUNTIME_PM_OPS(wm8962_runtime_suspend, wm8962_runtime_resume, NULL)
|
||||
};
|
||||
|
||||
|
@ -800,7 +800,7 @@ int wm_adsp_fw_put(struct snd_kcontrol *kcontrol,
|
||||
struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
|
||||
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
|
||||
struct wm_adsp *dsp = snd_soc_component_get_drvdata(component);
|
||||
int ret = 0;
|
||||
int ret = 1;
|
||||
|
||||
if (ucontrol->value.enumerated.item[0] == dsp[e->shift_l].fw)
|
||||
return 0;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user