Merge a763d5a5ab
("Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi") into android-mainline
Steps on the way to 5.16-rc5 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: If9710821309694d52ff569055887915cd31dae98
This commit is contained in:
commit
3eba5ed860
2
.mailmap
2
.mailmap
@ -126,6 +126,8 @@ Greg Kroah-Hartman <gregkh@suse.de>
|
|||||||
Greg Kroah-Hartman <greg@kroah.com>
|
Greg Kroah-Hartman <greg@kroah.com>
|
||||||
Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com>
|
Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com>
|
||||||
Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com>
|
Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com>
|
||||||
|
Guo Ren <guoren@kernel.org> <guoren@linux.alibaba.com>
|
||||||
|
Guo Ren <guoren@kernel.org> <ren_guo@c-sky.com>
|
||||||
Gustavo Padovan <gustavo@las.ic.unicamp.br>
|
Gustavo Padovan <gustavo@las.ic.unicamp.br>
|
||||||
Gustavo Padovan <padovan@profusion.mobi>
|
Gustavo Padovan <padovan@profusion.mobi>
|
||||||
Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org>
|
Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org>
|
||||||
|
@ -136,7 +136,7 @@ examples:
|
|||||||
samsung,syscon-phandle = <&pmu_system_controller>;
|
samsung,syscon-phandle = <&pmu_system_controller>;
|
||||||
|
|
||||||
/* NTC thermistor is a hwmon device */
|
/* NTC thermistor is a hwmon device */
|
||||||
ncp15wb473 {
|
thermistor {
|
||||||
compatible = "murata,ncp15wb473";
|
compatible = "murata,ncp15wb473";
|
||||||
pullup-uv = <1800000>;
|
pullup-uv = <1800000>;
|
||||||
pullup-ohm = <47000>;
|
pullup-ohm = <47000>;
|
||||||
|
@ -142,7 +142,7 @@ examples:
|
|||||||
down {
|
down {
|
||||||
label = "GPIO Key DOWN";
|
label = "GPIO Key DOWN";
|
||||||
linux,code = <108>;
|
linux,code = <108>;
|
||||||
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
|
interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -79,6 +79,8 @@ properties:
|
|||||||
|
|
||||||
properties:
|
properties:
|
||||||
data-lanes:
|
data-lanes:
|
||||||
|
description:
|
||||||
|
Note that 'fsl,imx7-mipi-csi2' only supports up to 2 data lines.
|
||||||
items:
|
items:
|
||||||
minItems: 1
|
minItems: 1
|
||||||
maxItems: 4
|
maxItems: 4
|
||||||
@ -91,18 +93,6 @@ properties:
|
|||||||
required:
|
required:
|
||||||
- data-lanes
|
- data-lanes
|
||||||
|
|
||||||
allOf:
|
|
||||||
- if:
|
|
||||||
properties:
|
|
||||||
compatible:
|
|
||||||
contains:
|
|
||||||
const: fsl,imx7-mipi-csi2
|
|
||||||
then:
|
|
||||||
properties:
|
|
||||||
data-lanes:
|
|
||||||
items:
|
|
||||||
maxItems: 2
|
|
||||||
|
|
||||||
port@1:
|
port@1:
|
||||||
$ref: /schemas/graph.yaml#/properties/port
|
$ref: /schemas/graph.yaml#/properties/port
|
||||||
description:
|
description:
|
||||||
|
@ -91,6 +91,14 @@ properties:
|
|||||||
compensate for the board being designed with the lanes
|
compensate for the board being designed with the lanes
|
||||||
swapped.
|
swapped.
|
||||||
|
|
||||||
|
enet-phy-lane-no-swap:
|
||||||
|
$ref: /schemas/types.yaml#/definitions/flag
|
||||||
|
description:
|
||||||
|
If set, indicates that PHY will disable swap of the
|
||||||
|
TX/RX lanes. This property allows the PHY to work correcly after
|
||||||
|
e.g. wrong bootstrap configuration caused by issues in PCB
|
||||||
|
layout design.
|
||||||
|
|
||||||
eee-broken-100tx:
|
eee-broken-100tx:
|
||||||
$ref: /schemas/types.yaml#/definitions/flag
|
$ref: /schemas/types.yaml#/definitions/flag
|
||||||
description:
|
description:
|
||||||
|
@ -105,7 +105,7 @@ examples:
|
|||||||
reg = <0x65>;
|
reg = <0x65>;
|
||||||
interrupt-parent = <&gpio1>;
|
interrupt-parent = <&gpio1>;
|
||||||
interrupts = <16 IRQ_TYPE_EDGE_FALLING>;
|
interrupts = <16 IRQ_TYPE_EDGE_FALLING>;
|
||||||
ti,watchdog-timer = <0>;
|
ti,watchdog-timeout-ms = <0>;
|
||||||
ti,sc-ocp-limit-microamp = <2000000>;
|
ti,sc-ocp-limit-microamp = <2000000>;
|
||||||
ti,sc-ovp-limit-microvolt = <17800000>;
|
ti,sc-ovp-limit-microvolt = <17800000>;
|
||||||
monitored-battery = <&bat>;
|
monitored-battery = <&bat>;
|
||||||
|
@ -19,6 +19,9 @@ properties:
|
|||||||
clocks:
|
clocks:
|
||||||
maxItems: 1
|
maxItems: 1
|
||||||
|
|
||||||
|
interrupts:
|
||||||
|
maxItems: 1
|
||||||
|
|
||||||
"#sound-dai-cells":
|
"#sound-dai-cells":
|
||||||
const: 0
|
const: 0
|
||||||
|
|
||||||
|
@ -10286,9 +10286,9 @@ F: lib/Kconfig.kcsan
|
|||||||
F: scripts/Makefile.kcsan
|
F: scripts/Makefile.kcsan
|
||||||
|
|
||||||
KDUMP
|
KDUMP
|
||||||
M: Dave Young <dyoung@redhat.com>
|
|
||||||
M: Baoquan He <bhe@redhat.com>
|
M: Baoquan He <bhe@redhat.com>
|
||||||
R: Vivek Goyal <vgoyal@redhat.com>
|
R: Vivek Goyal <vgoyal@redhat.com>
|
||||||
|
R: Dave Young <dyoung@redhat.com>
|
||||||
L: kexec@lists.infradead.org
|
L: kexec@lists.infradead.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
W: http://lse.sourceforge.net/kdump/
|
W: http://lse.sourceforge.net/kdump/
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
* Copyright The Asahi Linux Contributors
|
* Copyright The Asahi Linux Contributors
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <dt-bindings/gpio/gpio.h>
|
||||||
#include <dt-bindings/interrupt-controller/apple-aic.h>
|
#include <dt-bindings/interrupt-controller/apple-aic.h>
|
||||||
#include <dt-bindings/interrupt-controller/irq.h>
|
#include <dt-bindings/interrupt-controller/irq.h>
|
||||||
#include <dt-bindings/pinctrl/apple.h>
|
#include <dt-bindings/pinctrl/apple.h>
|
||||||
@ -281,7 +282,7 @@ pcie0: pcie@690000000 {
|
|||||||
port00: pci@0,0 {
|
port00: pci@0,0 {
|
||||||
device_type = "pci";
|
device_type = "pci";
|
||||||
reg = <0x0 0x0 0x0 0x0 0x0>;
|
reg = <0x0 0x0 0x0 0x0 0x0>;
|
||||||
reset-gpios = <&pinctrl_ap 152 0>;
|
reset-gpios = <&pinctrl_ap 152 GPIO_ACTIVE_LOW>;
|
||||||
max-link-speed = <2>;
|
max-link-speed = <2>;
|
||||||
|
|
||||||
#address-cells = <3>;
|
#address-cells = <3>;
|
||||||
@ -301,7 +302,7 @@ port00: pci@0,0 {
|
|||||||
port01: pci@1,0 {
|
port01: pci@1,0 {
|
||||||
device_type = "pci";
|
device_type = "pci";
|
||||||
reg = <0x800 0x0 0x0 0x0 0x0>;
|
reg = <0x800 0x0 0x0 0x0 0x0>;
|
||||||
reset-gpios = <&pinctrl_ap 153 0>;
|
reset-gpios = <&pinctrl_ap 153 GPIO_ACTIVE_LOW>;
|
||||||
max-link-speed = <2>;
|
max-link-speed = <2>;
|
||||||
|
|
||||||
#address-cells = <3>;
|
#address-cells = <3>;
|
||||||
@ -321,7 +322,7 @@ port01: pci@1,0 {
|
|||||||
port02: pci@2,0 {
|
port02: pci@2,0 {
|
||||||
device_type = "pci";
|
device_type = "pci";
|
||||||
reg = <0x1000 0x0 0x0 0x0 0x0>;
|
reg = <0x1000 0x0 0x0 0x0 0x0>;
|
||||||
reset-gpios = <&pinctrl_ap 33 0>;
|
reset-gpios = <&pinctrl_ap 33 GPIO_ACTIVE_LOW>;
|
||||||
max-link-speed = <1>;
|
max-link-speed = <1>;
|
||||||
|
|
||||||
#address-cells = <3>;
|
#address-cells = <3>;
|
||||||
|
@ -97,7 +97,7 @@
|
|||||||
KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
||||||
#define KVM_REQ_TLB_FLUSH_CURRENT KVM_ARCH_REQ(26)
|
#define KVM_REQ_TLB_FLUSH_CURRENT KVM_ARCH_REQ(26)
|
||||||
#define KVM_REQ_TLB_FLUSH_GUEST \
|
#define KVM_REQ_TLB_FLUSH_GUEST \
|
||||||
KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_NO_WAKEUP)
|
KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
||||||
#define KVM_REQ_APF_READY KVM_ARCH_REQ(28)
|
#define KVM_REQ_APF_READY KVM_ARCH_REQ(28)
|
||||||
#define KVM_REQ_MSR_FILTER_CHANGED KVM_ARCH_REQ(29)
|
#define KVM_REQ_MSR_FILTER_CHANGED KVM_ARCH_REQ(29)
|
||||||
#define KVM_REQ_UPDATE_CPU_DIRTY_LOGGING \
|
#define KVM_REQ_UPDATE_CPU_DIRTY_LOGGING \
|
||||||
|
@ -1922,11 +1922,13 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
|
|||||||
|
|
||||||
all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
|
all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
|
||||||
|
|
||||||
|
if (all_cpus)
|
||||||
|
goto check_and_send_ipi;
|
||||||
|
|
||||||
if (!sparse_banks_len)
|
if (!sparse_banks_len)
|
||||||
goto ret_success;
|
goto ret_success;
|
||||||
|
|
||||||
if (!all_cpus &&
|
if (kvm_read_guest(kvm,
|
||||||
kvm_read_guest(kvm,
|
|
||||||
hc->ingpa + offsetof(struct hv_send_ipi_ex,
|
hc->ingpa + offsetof(struct hv_send_ipi_ex,
|
||||||
vp_set.bank_contents),
|
vp_set.bank_contents),
|
||||||
sparse_banks,
|
sparse_banks,
|
||||||
@ -1934,6 +1936,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
|
|||||||
return HV_STATUS_INVALID_HYPERCALL_INPUT;
|
return HV_STATUS_INVALID_HYPERCALL_INPUT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
check_and_send_ipi:
|
||||||
if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
|
if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
|
||||||
return HV_STATUS_INVALID_HYPERCALL_INPUT;
|
return HV_STATUS_INVALID_HYPERCALL_INPUT;
|
||||||
|
|
||||||
|
@ -2646,15 +2646,6 @@ int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
|
|||||||
if (!loaded_vmcs->msr_bitmap)
|
if (!loaded_vmcs->msr_bitmap)
|
||||||
goto out_vmcs;
|
goto out_vmcs;
|
||||||
memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
|
memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_HYPERV) &&
|
|
||||||
static_branch_unlikely(&enable_evmcs) &&
|
|
||||||
(ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
|
|
||||||
struct hv_enlightened_vmcs *evmcs =
|
|
||||||
(struct hv_enlightened_vmcs *)loaded_vmcs->vmcs;
|
|
||||||
|
|
||||||
evmcs->hv_enlightenments_control.msr_bitmap = 1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state));
|
memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state));
|
||||||
@ -6842,6 +6833,19 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
|
|||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto free_pml;
|
goto free_pml;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Use Hyper-V 'Enlightened MSR Bitmap' feature when KVM runs as a
|
||||||
|
* nested (L1) hypervisor and Hyper-V in L0 supports it. Enable the
|
||||||
|
* feature only for vmcs01, KVM currently isn't equipped to realize any
|
||||||
|
* performance benefits from enabling it for vmcs02.
|
||||||
|
*/
|
||||||
|
if (IS_ENABLED(CONFIG_HYPERV) && static_branch_unlikely(&enable_evmcs) &&
|
||||||
|
(ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
|
||||||
|
struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
|
||||||
|
|
||||||
|
evmcs->hv_enlightenments_control.msr_bitmap = 1;
|
||||||
|
}
|
||||||
|
|
||||||
/* The MSR bitmap starts with all ones */
|
/* The MSR bitmap starts with all ones */
|
||||||
bitmap_fill(vmx->shadow_msr_intercept.read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
|
bitmap_fill(vmx->shadow_msr_intercept.read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
|
||||||
bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
|
bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
|
||||||
|
@ -890,7 +890,8 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
|||||||
!load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)))
|
!load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
|
if (!(cr0 & X86_CR0_PG) &&
|
||||||
|
(is_64_bit_mode(vcpu) || kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
static_call(kvm_x86_set_cr0)(vcpu, cr0);
|
static_call(kvm_x86_set_cr0)(vcpu, cr0);
|
||||||
@ -7121,7 +7122,13 @@ static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
|
|||||||
unsigned short port, void *val, unsigned int count)
|
unsigned short port, void *val, unsigned int count)
|
||||||
{
|
{
|
||||||
if (vcpu->arch.pio.count) {
|
if (vcpu->arch.pio.count) {
|
||||||
/* Complete previous iteration. */
|
/*
|
||||||
|
* Complete a previous iteration that required userspace I/O.
|
||||||
|
* Note, @count isn't guaranteed to match pio.count as userspace
|
||||||
|
* can modify ECX before rerunning the vCPU. Ignore any such
|
||||||
|
* shenanigans as KVM doesn't support modifying the rep count,
|
||||||
|
* and the emulator ensures @count doesn't overflow the buffer.
|
||||||
|
*/
|
||||||
} else {
|
} else {
|
||||||
int r = __emulator_pio_in(vcpu, size, port, count);
|
int r = __emulator_pio_in(vcpu, size, port, count);
|
||||||
if (!r)
|
if (!r)
|
||||||
@ -7130,7 +7137,6 @@ static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
|
|||||||
/* Results already available, fall through. */
|
/* Results already available, fall through. */
|
||||||
}
|
}
|
||||||
|
|
||||||
WARN_ON(count != vcpu->arch.pio.count);
|
|
||||||
complete_emulator_pio_in(vcpu, val);
|
complete_emulator_pio_in(vcpu, val);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -341,8 +341,7 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
|
|||||||
} else {
|
} else {
|
||||||
ret = bio_iov_iter_get_pages(bio, iter);
|
ret = bio_iov_iter_get_pages(bio, iter);
|
||||||
if (unlikely(ret)) {
|
if (unlikely(ret)) {
|
||||||
bio->bi_status = BLK_STS_IOERR;
|
bio_put(bio);
|
||||||
bio_endio(bio);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -220,6 +220,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
|
|||||||
pgrp = task_pgrp(current);
|
pgrp = task_pgrp(current);
|
||||||
else
|
else
|
||||||
pgrp = find_vpid(who);
|
pgrp = find_vpid(who);
|
||||||
|
read_lock(&tasklist_lock);
|
||||||
do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
|
do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
|
||||||
tmpio = get_task_ioprio(p);
|
tmpio = get_task_ioprio(p);
|
||||||
if (tmpio < 0)
|
if (tmpio < 0)
|
||||||
@ -229,6 +230,8 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
|
|||||||
else
|
else
|
||||||
ret = ioprio_best(ret, tmpio);
|
ret = ioprio_best(ret, tmpio);
|
||||||
} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
|
} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
|
||||||
|
read_unlock(&tasklist_lock);
|
||||||
|
|
||||||
break;
|
break;
|
||||||
case IOPRIO_WHO_USER:
|
case IOPRIO_WHO_USER:
|
||||||
uid = make_kuid(current_user_ns(), who);
|
uid = make_kuid(current_user_ns(), who);
|
||||||
|
@ -4579,23 +4579,20 @@ static int binder_thread_release(struct binder_proc *proc,
|
|||||||
__release(&t->lock);
|
__release(&t->lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this thread used poll, make sure we remove the waitqueue
|
* If this thread used poll, make sure we remove the waitqueue from any
|
||||||
* from any epoll data structures holding it with POLLFREE.
|
* poll data structures holding it.
|
||||||
* waitqueue_active() is safe to use here because we're holding
|
|
||||||
* the inner lock.
|
|
||||||
*/
|
*/
|
||||||
if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
|
if (thread->looper & BINDER_LOOPER_STATE_POLL)
|
||||||
waitqueue_active(&thread->wait)) {
|
wake_up_pollfree(&thread->wait);
|
||||||
wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
|
|
||||||
}
|
|
||||||
|
|
||||||
binder_inner_proc_unlock(thread->proc);
|
binder_inner_proc_unlock(thread->proc);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is needed to avoid races between wake_up_poll() above and
|
* This is needed to avoid races between wake_up_pollfree() above and
|
||||||
* and ep_remove_waitqueue() called for other reasons (eg the epoll file
|
* someone else removing the last entry from the queue for other reasons
|
||||||
* descriptor being closed); ep_remove_waitqueue() holds an RCU read
|
* (e.g. ep_remove_wait_queue() being called due to an epoll file
|
||||||
* lock, so we can be sure it's done after calling synchronize_rcu().
|
* descriptor being closed). Such other users hold an RCU read lock, so
|
||||||
|
* we can be sure they're done after we call synchronize_rcu().
|
||||||
*/
|
*/
|
||||||
if (thread->looper & BINDER_LOOPER_STATE_POLL)
|
if (thread->looper & BINDER_LOOPER_STATE_POLL)
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
|
@ -94,6 +94,7 @@ struct ceva_ahci_priv {
|
|||||||
static unsigned int ceva_ahci_read_id(struct ata_device *dev,
|
static unsigned int ceva_ahci_read_id(struct ata_device *dev,
|
||||||
struct ata_taskfile *tf, u16 *id)
|
struct ata_taskfile *tf, u16 *id)
|
||||||
{
|
{
|
||||||
|
__le16 *__id = (__le16 *)id;
|
||||||
u32 err_mask;
|
u32 err_mask;
|
||||||
|
|
||||||
err_mask = ata_do_dev_read_id(dev, tf, id);
|
err_mask = ata_do_dev_read_id(dev, tf, id);
|
||||||
@ -103,7 +104,7 @@ static unsigned int ceva_ahci_read_id(struct ata_device *dev,
|
|||||||
* Since CEVA controller does not support device sleep feature, we
|
* Since CEVA controller does not support device sleep feature, we
|
||||||
* need to clear DEVSLP (bit 8) in word78 of the IDENTIFY DEVICE data.
|
* need to clear DEVSLP (bit 8) in word78 of the IDENTIFY DEVICE data.
|
||||||
*/
|
*/
|
||||||
id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
|
__id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -3920,6 +3920,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
|||||||
{ "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
|
{ "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
|
||||||
/* Odd clown on sil3726/4726 PMPs */
|
/* Odd clown on sil3726/4726 PMPs */
|
||||||
{ "Config Disk", NULL, ATA_HORKAGE_DISABLE },
|
{ "Config Disk", NULL, ATA_HORKAGE_DISABLE },
|
||||||
|
/* Similar story with ASMedia 1092 */
|
||||||
|
{ "ASMT109x- Config", NULL, ATA_HORKAGE_DISABLE },
|
||||||
|
|
||||||
/* Weird ATAPI devices */
|
/* Weird ATAPI devices */
|
||||||
{ "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
|
{ "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
|
||||||
|
@ -370,7 +370,7 @@ static struct platform_driver imx8qxp_lpcg_clk_driver = {
|
|||||||
.probe = imx8qxp_lpcg_clk_probe,
|
.probe = imx8qxp_lpcg_clk_probe,
|
||||||
};
|
};
|
||||||
|
|
||||||
builtin_platform_driver(imx8qxp_lpcg_clk_driver);
|
module_platform_driver(imx8qxp_lpcg_clk_driver);
|
||||||
|
|
||||||
MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>");
|
MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>");
|
||||||
MODULE_DESCRIPTION("NXP i.MX8QXP LPCG clock driver");
|
MODULE_DESCRIPTION("NXP i.MX8QXP LPCG clock driver");
|
||||||
|
@ -308,7 +308,7 @@ static struct platform_driver imx8qxp_clk_driver = {
|
|||||||
},
|
},
|
||||||
.probe = imx8qxp_clk_probe,
|
.probe = imx8qxp_clk_probe,
|
||||||
};
|
};
|
||||||
builtin_platform_driver(imx8qxp_clk_driver);
|
module_platform_driver(imx8qxp_clk_driver);
|
||||||
|
|
||||||
MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>");
|
MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>");
|
||||||
MODULE_DESCRIPTION("NXP i.MX8QXP clock driver");
|
MODULE_DESCRIPTION("NXP i.MX8QXP clock driver");
|
||||||
|
@ -1429,6 +1429,15 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops);
|
|||||||
void clk_trion_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
|
void clk_trion_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
|
||||||
const struct alpha_pll_config *config)
|
const struct alpha_pll_config *config)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* If the bootloader left the PLL enabled it's likely that there are
|
||||||
|
* RCGs that will lock up if we disable the PLL below.
|
||||||
|
*/
|
||||||
|
if (trion_pll_is_enabled(pll, regmap)) {
|
||||||
|
pr_debug("Trion PLL is already enabled, skipping configuration\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
|
clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
|
||||||
regmap_write(regmap, PLL_CAL_L_VAL(pll), TRION_PLL_CAL_VAL);
|
regmap_write(regmap, PLL_CAL_L_VAL(pll), TRION_PLL_CAL_VAL);
|
||||||
clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha);
|
clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha);
|
||||||
|
@ -28,7 +28,7 @@ static u8 mux_get_parent(struct clk_hw *hw)
|
|||||||
val &= mask;
|
val &= mask;
|
||||||
|
|
||||||
if (mux->parent_map)
|
if (mux->parent_map)
|
||||||
return qcom_find_src_index(hw, mux->parent_map, val);
|
return qcom_find_cfg_index(hw, mux->parent_map, val);
|
||||||
|
|
||||||
return val;
|
return val;
|
||||||
}
|
}
|
||||||
|
@ -69,6 +69,18 @@ int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(qcom_find_src_index);
|
EXPORT_SYMBOL_GPL(qcom_find_src_index);
|
||||||
|
|
||||||
|
int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map, u8 cfg)
|
||||||
|
{
|
||||||
|
int i, num_parents = clk_hw_get_num_parents(hw);
|
||||||
|
|
||||||
|
for (i = 0; i < num_parents; i++)
|
||||||
|
if (cfg == map[i].cfg)
|
||||||
|
return i;
|
||||||
|
|
||||||
|
return -ENOENT;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(qcom_find_cfg_index);
|
||||||
|
|
||||||
struct regmap *
|
struct regmap *
|
||||||
qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc)
|
qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc)
|
||||||
{
|
{
|
||||||
|
@ -49,6 +49,8 @@ extern void
|
|||||||
qcom_pll_set_fsm_mode(struct regmap *m, u32 reg, u8 bias_count, u8 lock_count);
|
qcom_pll_set_fsm_mode(struct regmap *m, u32 reg, u8 bias_count, u8 lock_count);
|
||||||
extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map,
|
extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map,
|
||||||
u8 src);
|
u8 src);
|
||||||
|
extern int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map,
|
||||||
|
u8 cfg);
|
||||||
|
|
||||||
extern int qcom_cc_register_board_clk(struct device *dev, const char *path,
|
extern int qcom_cc_register_board_clk(struct device *dev, const char *path,
|
||||||
const char *name, unsigned long rate);
|
const char *name, unsigned long rate);
|
||||||
|
@ -1121,7 +1121,7 @@ static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
|
|||||||
.name = "gcc_sdcc1_apps_clk_src",
|
.name = "gcc_sdcc1_apps_clk_src",
|
||||||
.parent_data = gcc_parent_data_1,
|
.parent_data = gcc_parent_data_1,
|
||||||
.num_parents = ARRAY_SIZE(gcc_parent_data_1),
|
.num_parents = ARRAY_SIZE(gcc_parent_data_1),
|
||||||
.ops = &clk_rcg2_ops,
|
.ops = &clk_rcg2_floor_ops,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1143,7 +1143,7 @@ static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
|
|||||||
.name = "gcc_sdcc1_ice_core_clk_src",
|
.name = "gcc_sdcc1_ice_core_clk_src",
|
||||||
.parent_data = gcc_parent_data_0,
|
.parent_data = gcc_parent_data_0,
|
||||||
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
|
||||||
.ops = &clk_rcg2_floor_ops,
|
.ops = &clk_rcg2_ops,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -543,8 +543,8 @@ static void __init of_syscon_icst_setup(struct device_node *np)
|
|||||||
|
|
||||||
regclk = icst_clk_setup(NULL, &icst_desc, name, parent_name, map, ctype);
|
regclk = icst_clk_setup(NULL, &icst_desc, name, parent_name, map, ctype);
|
||||||
if (IS_ERR(regclk)) {
|
if (IS_ERR(regclk)) {
|
||||||
kfree(name);
|
|
||||||
pr_err("error setting up syscon ICST clock %s\n", name);
|
pr_err("error setting up syscon ICST clock %s\n", name);
|
||||||
|
kfree(name);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
of_clk_add_provider(np, of_clk_src_simple_get, regclk);
|
of_clk_add_provider(np, of_clk_src_simple_get, regclk);
|
||||||
|
@ -2576,7 +2576,8 @@ static int dm_resume(void *handle)
|
|||||||
*/
|
*/
|
||||||
link_enc_cfg_init(dm->dc, dc_state);
|
link_enc_cfg_init(dm->dc, dc_state);
|
||||||
|
|
||||||
amdgpu_dm_outbox_init(adev);
|
if (dc_enable_dmub_notifications(adev->dm.dc))
|
||||||
|
amdgpu_dm_outbox_init(adev);
|
||||||
|
|
||||||
r = dm_dmub_hw_init(adev);
|
r = dm_dmub_hw_init(adev);
|
||||||
if (r)
|
if (r)
|
||||||
@ -2625,6 +2626,10 @@ static int dm_resume(void *handle)
|
|||||||
/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
|
/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
|
||||||
dc_resource_state_construct(dm->dc, dm_state->context);
|
dc_resource_state_construct(dm->dc, dm_state->context);
|
||||||
|
|
||||||
|
/* Re-enable outbox interrupts for DPIA. */
|
||||||
|
if (dc_enable_dmub_notifications(adev->dm.dc))
|
||||||
|
amdgpu_dm_outbox_init(adev);
|
||||||
|
|
||||||
/* Before powering on DC we need to re-initialize DMUB. */
|
/* Before powering on DC we need to re-initialize DMUB. */
|
||||||
r = dm_dmub_hw_init(adev);
|
r = dm_dmub_hw_init(adev);
|
||||||
if (r)
|
if (r)
|
||||||
|
@ -226,6 +226,8 @@ static inline void get_edp_links(const struct dc *dc,
|
|||||||
*edp_num = 0;
|
*edp_num = 0;
|
||||||
for (i = 0; i < dc->link_count; i++) {
|
for (i = 0; i < dc->link_count; i++) {
|
||||||
// report any eDP links, even unconnected DDI's
|
// report any eDP links, even unconnected DDI's
|
||||||
|
if (!dc->links[i])
|
||||||
|
continue;
|
||||||
if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) {
|
if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) {
|
||||||
edp_links[*edp_num] = dc->links[i];
|
edp_links[*edp_num] = dc->links[i];
|
||||||
if (++(*edp_num) == MAX_NUM_EDP)
|
if (++(*edp_num) == MAX_NUM_EDP)
|
||||||
|
@ -404,8 +404,17 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
|
|||||||
|
|
||||||
if (*fence) {
|
if (*fence) {
|
||||||
ret = dma_fence_chain_find_seqno(fence, point);
|
ret = dma_fence_chain_find_seqno(fence, point);
|
||||||
if (!ret)
|
if (!ret) {
|
||||||
|
/* If the requested seqno is already signaled
|
||||||
|
* drm_syncobj_find_fence may return a NULL
|
||||||
|
* fence. To make sure the recipient gets
|
||||||
|
* signalled, use a new fence instead.
|
||||||
|
*/
|
||||||
|
if (!*fence)
|
||||||
|
*fence = dma_fence_get_stub();
|
||||||
|
|
||||||
goto out;
|
goto out;
|
||||||
|
}
|
||||||
dma_fence_put(*fence);
|
dma_fence_put(*fence);
|
||||||
} else {
|
} else {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
|
@ -3277,6 +3277,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
|
|||||||
out_fence = eb_requests_create(&eb, in_fence, out_fence_fd);
|
out_fence = eb_requests_create(&eb, in_fence, out_fence_fd);
|
||||||
if (IS_ERR(out_fence)) {
|
if (IS_ERR(out_fence)) {
|
||||||
err = PTR_ERR(out_fence);
|
err = PTR_ERR(out_fence);
|
||||||
|
out_fence = NULL;
|
||||||
if (eb.requests[0])
|
if (eb.requests[0])
|
||||||
goto err_request;
|
goto err_request;
|
||||||
else
|
else
|
||||||
|
@ -1127,6 +1127,15 @@ icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
|
|||||||
GAMT_CHKN_BIT_REG,
|
GAMT_CHKN_BIT_REG,
|
||||||
GAMT_CHKN_DISABLE_L3_COH_PIPE);
|
GAMT_CHKN_DISABLE_L3_COH_PIPE);
|
||||||
|
|
||||||
|
/* Wa_1407352427:icl,ehl */
|
||||||
|
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
|
||||||
|
PSDUNIT_CLKGATE_DIS);
|
||||||
|
|
||||||
|
/* Wa_1406680159:icl,ehl */
|
||||||
|
wa_write_or(wal,
|
||||||
|
SUBSLICE_UNIT_LEVEL_CLKGATE,
|
||||||
|
GWUNIT_CLKGATE_DIS);
|
||||||
|
|
||||||
/* Wa_1607087056:icl,ehl,jsl */
|
/* Wa_1607087056:icl,ehl,jsl */
|
||||||
if (IS_ICELAKE(i915) ||
|
if (IS_ICELAKE(i915) ||
|
||||||
IS_JSL_EHL_GT_STEP(i915, STEP_A0, STEP_B0))
|
IS_JSL_EHL_GT_STEP(i915, STEP_A0, STEP_B0))
|
||||||
@ -1852,15 +1861,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||||||
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
|
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
|
||||||
VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
|
VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
|
||||||
|
|
||||||
/* Wa_1407352427:icl,ehl */
|
|
||||||
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
|
|
||||||
PSDUNIT_CLKGATE_DIS);
|
|
||||||
|
|
||||||
/* Wa_1406680159:icl,ehl */
|
|
||||||
wa_write_or(wal,
|
|
||||||
SUBSLICE_UNIT_LEVEL_CLKGATE,
|
|
||||||
GWUNIT_CLKGATE_DIS);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Wa_1408767742:icl[a2..forever],ehl[all]
|
* Wa_1408767742:icl[a2..forever],ehl[all]
|
||||||
* Wa_1605460711:icl[a0..c0]
|
* Wa_1605460711:icl[a0..c0]
|
||||||
|
@ -1103,7 +1103,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
|
|||||||
* as an indication that we're about to swap out.
|
* as an indication that we're about to swap out.
|
||||||
*/
|
*/
|
||||||
memset(&place, 0, sizeof(place));
|
memset(&place, 0, sizeof(place));
|
||||||
place.mem_type = TTM_PL_SYSTEM;
|
place.mem_type = bo->resource->mem_type;
|
||||||
if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
|
if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
@ -1135,6 +1135,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
|
|||||||
struct ttm_place hop;
|
struct ttm_place hop;
|
||||||
|
|
||||||
memset(&hop, 0, sizeof(hop));
|
memset(&hop, 0, sizeof(hop));
|
||||||
|
place.mem_type = TTM_PL_SYSTEM;
|
||||||
ret = ttm_resource_alloc(bo, &place, &evict_mem);
|
ret = ttm_resource_alloc(bo, &place, &evict_mem);
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -729,7 +729,7 @@ static int corsairpsu_probe(struct hid_device *hdev, const struct hid_device_id
|
|||||||
corsairpsu_check_cmd_support(priv);
|
corsairpsu_check_cmd_support(priv);
|
||||||
|
|
||||||
priv->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, "corsairpsu", priv,
|
priv->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, "corsairpsu", priv,
|
||||||
&corsairpsu_chip_info, 0);
|
&corsairpsu_chip_info, NULL);
|
||||||
|
|
||||||
if (IS_ERR(priv->hwmon_dev)) {
|
if (IS_ERR(priv->hwmon_dev)) {
|
||||||
ret = PTR_ERR(priv->hwmon_dev);
|
ret = PTR_ERR(priv->hwmon_dev);
|
||||||
|
@ -627,10 +627,9 @@ static void __init i8k_init_procfs(struct device *dev)
|
|||||||
{
|
{
|
||||||
struct dell_smm_data *data = dev_get_drvdata(dev);
|
struct dell_smm_data *data = dev_get_drvdata(dev);
|
||||||
|
|
||||||
/* Register the proc entry */
|
/* Only register exit function if creation was successful */
|
||||||
proc_create_data("i8k", 0, NULL, &i8k_proc_ops, data);
|
if (proc_create_data("i8k", 0, NULL, &i8k_proc_ops, data))
|
||||||
|
devm_add_action_or_reset(dev, i8k_exit_procfs, NULL);
|
||||||
devm_add_action_or_reset(dev, i8k_exit_procfs, NULL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
@ -1527,7 +1527,7 @@ static u16 nct6775_wmi_read_value(struct nct6775_data *data, u16 reg)
|
|||||||
|
|
||||||
nct6775_wmi_set_bank(data, reg);
|
nct6775_wmi_set_bank(data, reg);
|
||||||
|
|
||||||
err = nct6775_asuswmi_read(data->bank, reg, &tmp);
|
err = nct6775_asuswmi_read(data->bank, reg & 0xff, &tmp);
|
||||||
if (err)
|
if (err)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -336,8 +336,6 @@ static int pwm_fan_probe(struct platform_device *pdev)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx->pwm_value = MAX_PWM;
|
|
||||||
|
|
||||||
pwm_init_state(ctx->pwm, &ctx->pwm_state);
|
pwm_init_state(ctx->pwm, &ctx->pwm_state);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -23,7 +23,7 @@
|
|||||||
/*
|
/*
|
||||||
* I2C command delays (in microseconds)
|
* I2C command delays (in microseconds)
|
||||||
*/
|
*/
|
||||||
#define SHT4X_MEAS_DELAY 1000
|
#define SHT4X_MEAS_DELAY_HPM 8200 /* see t_MEAS,h in datasheet */
|
||||||
#define SHT4X_DELAY_EXTRA 10000
|
#define SHT4X_DELAY_EXTRA 10000
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -90,7 +90,7 @@ static int sht4x_read_values(struct sht4x_data *data)
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
usleep_range(SHT4X_MEAS_DELAY, SHT4X_MEAS_DELAY + SHT4X_DELAY_EXTRA);
|
usleep_range(SHT4X_MEAS_DELAY_HPM, SHT4X_MEAS_DELAY_HPM + SHT4X_DELAY_EXTRA);
|
||||||
|
|
||||||
ret = i2c_master_recv(client, raw_data, SHT4X_RESPONSE_LENGTH);
|
ret = i2c_master_recv(client, raw_data, SHT4X_RESPONSE_LENGTH);
|
||||||
if (ret != SHT4X_RESPONSE_LENGTH) {
|
if (ret != SHT4X_RESPONSE_LENGTH) {
|
||||||
|
@ -636,7 +636,7 @@ static irqreturn_t mpc_i2c_isr(int irq, void *dev_id)
|
|||||||
status = readb(i2c->base + MPC_I2C_SR);
|
status = readb(i2c->base + MPC_I2C_SR);
|
||||||
if (status & CSR_MIF) {
|
if (status & CSR_MIF) {
|
||||||
/* Wait up to 100us for transfer to properly complete */
|
/* Wait up to 100us for transfer to properly complete */
|
||||||
readb_poll_timeout(i2c->base + MPC_I2C_SR, status, !(status & CSR_MCF), 0, 100);
|
readb_poll_timeout_atomic(i2c->base + MPC_I2C_SR, status, status & CSR_MCF, 0, 100);
|
||||||
writeb(0, i2c->base + MPC_I2C_SR);
|
writeb(0, i2c->base + MPC_I2C_SR);
|
||||||
mpc_i2c_do_intr(i2c, status);
|
mpc_i2c_do_intr(i2c, status);
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
|
@ -22,24 +22,24 @@
|
|||||||
/**
|
/**
|
||||||
* struct virtio_i2c - virtio I2C data
|
* struct virtio_i2c - virtio I2C data
|
||||||
* @vdev: virtio device for this controller
|
* @vdev: virtio device for this controller
|
||||||
* @completion: completion of virtio I2C message
|
|
||||||
* @adap: I2C adapter for this controller
|
* @adap: I2C adapter for this controller
|
||||||
* @vq: the virtio virtqueue for communication
|
* @vq: the virtio virtqueue for communication
|
||||||
*/
|
*/
|
||||||
struct virtio_i2c {
|
struct virtio_i2c {
|
||||||
struct virtio_device *vdev;
|
struct virtio_device *vdev;
|
||||||
struct completion completion;
|
|
||||||
struct i2c_adapter adap;
|
struct i2c_adapter adap;
|
||||||
struct virtqueue *vq;
|
struct virtqueue *vq;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct virtio_i2c_req - the virtio I2C request structure
|
* struct virtio_i2c_req - the virtio I2C request structure
|
||||||
|
* @completion: completion of virtio I2C message
|
||||||
* @out_hdr: the OUT header of the virtio I2C message
|
* @out_hdr: the OUT header of the virtio I2C message
|
||||||
* @buf: the buffer into which data is read, or from which it's written
|
* @buf: the buffer into which data is read, or from which it's written
|
||||||
* @in_hdr: the IN header of the virtio I2C message
|
* @in_hdr: the IN header of the virtio I2C message
|
||||||
*/
|
*/
|
||||||
struct virtio_i2c_req {
|
struct virtio_i2c_req {
|
||||||
|
struct completion completion;
|
||||||
struct virtio_i2c_out_hdr out_hdr ____cacheline_aligned;
|
struct virtio_i2c_out_hdr out_hdr ____cacheline_aligned;
|
||||||
uint8_t *buf ____cacheline_aligned;
|
uint8_t *buf ____cacheline_aligned;
|
||||||
struct virtio_i2c_in_hdr in_hdr ____cacheline_aligned;
|
struct virtio_i2c_in_hdr in_hdr ____cacheline_aligned;
|
||||||
@ -47,9 +47,11 @@ struct virtio_i2c_req {
|
|||||||
|
|
||||||
static void virtio_i2c_msg_done(struct virtqueue *vq)
|
static void virtio_i2c_msg_done(struct virtqueue *vq)
|
||||||
{
|
{
|
||||||
struct virtio_i2c *vi = vq->vdev->priv;
|
struct virtio_i2c_req *req;
|
||||||
|
unsigned int len;
|
||||||
|
|
||||||
complete(&vi->completion);
|
while ((req = virtqueue_get_buf(vq, &len)))
|
||||||
|
complete(&req->completion);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int virtio_i2c_prepare_reqs(struct virtqueue *vq,
|
static int virtio_i2c_prepare_reqs(struct virtqueue *vq,
|
||||||
@ -62,6 +64,8 @@ static int virtio_i2c_prepare_reqs(struct virtqueue *vq,
|
|||||||
for (i = 0; i < num; i++) {
|
for (i = 0; i < num; i++) {
|
||||||
int outcnt = 0, incnt = 0;
|
int outcnt = 0, incnt = 0;
|
||||||
|
|
||||||
|
init_completion(&reqs[i].completion);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Only 7-bit mode supported for this moment. For the address
|
* Only 7-bit mode supported for this moment. For the address
|
||||||
* format, Please check the Virtio I2C Specification.
|
* format, Please check the Virtio I2C Specification.
|
||||||
@ -106,21 +110,15 @@ static int virtio_i2c_complete_reqs(struct virtqueue *vq,
|
|||||||
struct virtio_i2c_req *reqs,
|
struct virtio_i2c_req *reqs,
|
||||||
struct i2c_msg *msgs, int num)
|
struct i2c_msg *msgs, int num)
|
||||||
{
|
{
|
||||||
struct virtio_i2c_req *req;
|
|
||||||
bool failed = false;
|
bool failed = false;
|
||||||
unsigned int len;
|
|
||||||
int i, j = 0;
|
int i, j = 0;
|
||||||
|
|
||||||
for (i = 0; i < num; i++) {
|
for (i = 0; i < num; i++) {
|
||||||
/* Detach the ith request from the vq */
|
struct virtio_i2c_req *req = &reqs[i];
|
||||||
req = virtqueue_get_buf(vq, &len);
|
|
||||||
|
|
||||||
/*
|
wait_for_completion(&req->completion);
|
||||||
* Condition req == &reqs[i] should always meet since we have
|
|
||||||
* total num requests in the vq. reqs[i] can never be NULL here.
|
if (!failed && req->in_hdr.status != VIRTIO_I2C_MSG_OK)
|
||||||
*/
|
|
||||||
if (!failed && (WARN_ON(req != &reqs[i]) ||
|
|
||||||
req->in_hdr.status != VIRTIO_I2C_MSG_OK))
|
|
||||||
failed = true;
|
failed = true;
|
||||||
|
|
||||||
i2c_put_dma_safe_msg_buf(reqs[i].buf, &msgs[i], !failed);
|
i2c_put_dma_safe_msg_buf(reqs[i].buf, &msgs[i], !failed);
|
||||||
@ -156,12 +154,8 @@ static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
|
|||||||
* remote here to clear the virtqueue, so we can try another set of
|
* remote here to clear the virtqueue, so we can try another set of
|
||||||
* messages later on.
|
* messages later on.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
reinit_completion(&vi->completion);
|
|
||||||
virtqueue_kick(vq);
|
virtqueue_kick(vq);
|
||||||
|
|
||||||
wait_for_completion(&vi->completion);
|
|
||||||
|
|
||||||
count = virtio_i2c_complete_reqs(vq, reqs, msgs, count);
|
count = virtio_i2c_complete_reqs(vq, reqs, msgs, count);
|
||||||
|
|
||||||
err_free:
|
err_free:
|
||||||
@ -210,8 +204,6 @@ static int virtio_i2c_probe(struct virtio_device *vdev)
|
|||||||
vdev->priv = vi;
|
vdev->priv = vi;
|
||||||
vi->vdev = vdev;
|
vi->vdev = vdev;
|
||||||
|
|
||||||
init_completion(&vi->completion);
|
|
||||||
|
|
||||||
ret = virtio_i2c_setup_vqs(vi);
|
ret = virtio_i2c_setup_vqs(vi);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -2189,6 +2189,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
|
|||||||
|
|
||||||
if (!num_sectors || num_sectors > max_sectors)
|
if (!num_sectors || num_sectors > max_sectors)
|
||||||
num_sectors = max_sectors;
|
num_sectors = max_sectors;
|
||||||
|
rdev->sb_start = sb_start;
|
||||||
}
|
}
|
||||||
sb = page_address(rdev->sb_page);
|
sb = page_address(rdev->sb_page);
|
||||||
sb->data_size = cpu_to_le64(num_sectors);
|
sb->data_size = cpu_to_le64(num_sectors);
|
||||||
@ -6270,7 +6271,8 @@ static void __md_stop(struct mddev *mddev)
|
|||||||
spin_lock(&mddev->lock);
|
spin_lock(&mddev->lock);
|
||||||
mddev->pers = NULL;
|
mddev->pers = NULL;
|
||||||
spin_unlock(&mddev->lock);
|
spin_unlock(&mddev->lock);
|
||||||
pers->free(mddev, mddev->private);
|
if (mddev->private)
|
||||||
|
pers->free(mddev, mddev->private);
|
||||||
mddev->private = NULL;
|
mddev->private = NULL;
|
||||||
if (pers->sync_request && mddev->to_remove == NULL)
|
if (pers->sync_request && mddev->to_remove == NULL)
|
||||||
mddev->to_remove = &md_redundancy_group;
|
mddev->to_remove = &md_redundancy_group;
|
||||||
|
@ -2291,8 +2291,10 @@ static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card
|
|||||||
sdr_set_field(host->base + PAD_DS_TUNE,
|
sdr_set_field(host->base + PAD_DS_TUNE,
|
||||||
PAD_DS_TUNE_DLY1, i);
|
PAD_DS_TUNE_DLY1, i);
|
||||||
ret = mmc_get_ext_csd(card, &ext_csd);
|
ret = mmc_get_ext_csd(card, &ext_csd);
|
||||||
if (!ret)
|
if (!ret) {
|
||||||
result_dly1 |= (1 << i);
|
result_dly1 |= (1 << i);
|
||||||
|
kfree(ext_csd);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
host->hs400_tuning = false;
|
host->hs400_tuning = false;
|
||||||
|
|
||||||
|
@ -673,7 +673,7 @@ static int renesas_sdhi_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
|||||||
|
|
||||||
/* Issue CMD19 twice for each tap */
|
/* Issue CMD19 twice for each tap */
|
||||||
for (i = 0; i < 2 * priv->tap_num; i++) {
|
for (i = 0; i < 2 * priv->tap_num; i++) {
|
||||||
int cmd_error;
|
int cmd_error = 0;
|
||||||
|
|
||||||
/* Set sampling clock position */
|
/* Set sampling clock position */
|
||||||
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, i % priv->tap_num);
|
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, i % priv->tap_num);
|
||||||
|
@ -666,6 +666,7 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
|
|||||||
struct request *rq)
|
struct request *rq)
|
||||||
{
|
{
|
||||||
if (ctrl->state != NVME_CTRL_DELETING_NOIO &&
|
if (ctrl->state != NVME_CTRL_DELETING_NOIO &&
|
||||||
|
ctrl->state != NVME_CTRL_DELETING &&
|
||||||
ctrl->state != NVME_CTRL_DEAD &&
|
ctrl->state != NVME_CTRL_DEAD &&
|
||||||
!test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
|
!test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
|
||||||
!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
|
!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
|
||||||
@ -1749,9 +1750,20 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
|
|||||||
*/
|
*/
|
||||||
if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
|
if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
if (ctrl->max_integrity_segments)
|
|
||||||
ns->features |=
|
ns->features |= NVME_NS_EXT_LBAS;
|
||||||
(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
|
|
||||||
|
/*
|
||||||
|
* The current fabrics transport drivers support namespace
|
||||||
|
* metadata formats only if nvme_ns_has_pi() returns true.
|
||||||
|
* Suppress support for all other formats so the namespace will
|
||||||
|
* have a 0 capacity and not be usable through the block stack.
|
||||||
|
*
|
||||||
|
* Note, this check will need to be modified if any drivers
|
||||||
|
* gain the ability to use other metadata formats.
|
||||||
|
*/
|
||||||
|
if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns))
|
||||||
|
ns->features |= NVME_NS_METADATA_SUPPORTED;
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* For PCIe controllers, we can't easily remap the separate
|
* For PCIe controllers, we can't easily remap the separate
|
||||||
@ -2696,8 +2708,9 @@ static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
|
|||||||
|
|
||||||
if (tmp->cntlid == ctrl->cntlid) {
|
if (tmp->cntlid == ctrl->cntlid) {
|
||||||
dev_err(ctrl->device,
|
dev_err(ctrl->device,
|
||||||
"Duplicate cntlid %u with %s, rejecting\n",
|
"Duplicate cntlid %u with %s, subsys %s, rejecting\n",
|
||||||
ctrl->cntlid, dev_name(tmp->device));
|
ctrl->cntlid, dev_name(tmp->device),
|
||||||
|
subsys->subnqn);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -866,7 +866,7 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
|
|||||||
}
|
}
|
||||||
if (ana_log_size > ctrl->ana_log_size) {
|
if (ana_log_size > ctrl->ana_log_size) {
|
||||||
nvme_mpath_stop(ctrl);
|
nvme_mpath_stop(ctrl);
|
||||||
kfree(ctrl->ana_log_buf);
|
nvme_mpath_uninit(ctrl);
|
||||||
ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL);
|
ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL);
|
||||||
if (!ctrl->ana_log_buf)
|
if (!ctrl->ana_log_buf)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -886,4 +886,5 @@ void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
|
|||||||
{
|
{
|
||||||
kfree(ctrl->ana_log_buf);
|
kfree(ctrl->ana_log_buf);
|
||||||
ctrl->ana_log_buf = NULL;
|
ctrl->ana_log_buf = NULL;
|
||||||
|
ctrl->ana_log_size = 0;
|
||||||
}
|
}
|
||||||
|
@ -709,7 +709,7 @@ static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
|
|||||||
return true;
|
return true;
|
||||||
if (ctrl->ops->flags & NVME_F_FABRICS &&
|
if (ctrl->ops->flags & NVME_F_FABRICS &&
|
||||||
ctrl->state == NVME_CTRL_DELETING)
|
ctrl->state == NVME_CTRL_DELETING)
|
||||||
return true;
|
return queue_live;
|
||||||
return __nvme_check_ready(ctrl, rq, queue_live);
|
return __nvme_check_ready(ctrl, rq, queue_live);
|
||||||
}
|
}
|
||||||
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
|
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
|
||||||
|
@ -166,7 +166,10 @@ static int nvme_zone_parse_entry(struct nvme_ns *ns,
|
|||||||
zone.len = ns->zsze;
|
zone.len = ns->zsze;
|
||||||
zone.capacity = nvme_lba_to_sect(ns, le64_to_cpu(entry->zcap));
|
zone.capacity = nvme_lba_to_sect(ns, le64_to_cpu(entry->zcap));
|
||||||
zone.start = nvme_lba_to_sect(ns, le64_to_cpu(entry->zslba));
|
zone.start = nvme_lba_to_sect(ns, le64_to_cpu(entry->zslba));
|
||||||
zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp));
|
if (zone.cond == BLK_ZONE_COND_FULL)
|
||||||
|
zone.wp = zone.start + zone.len;
|
||||||
|
else
|
||||||
|
zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp));
|
||||||
|
|
||||||
return cb(&zone, idx, data);
|
return cb(&zone, idx, data);
|
||||||
}
|
}
|
||||||
|
@ -922,7 +922,14 @@ static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
|
|||||||
size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
|
size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!nvme_is_write(cmd->req.cmd) ||
|
/*
|
||||||
|
* This command has not been processed yet, hence we are trying to
|
||||||
|
* figure out if there is still pending data left to receive. If
|
||||||
|
* we don't, we can simply prepare for the next pdu and bail out,
|
||||||
|
* otherwise we will need to prepare a buffer and receive the
|
||||||
|
* stale data before continuing forward.
|
||||||
|
*/
|
||||||
|
if (!nvme_is_write(cmd->req.cmd) || !data_len ||
|
||||||
data_len > cmd->req.port->inline_data_size) {
|
data_len > cmd->req.port->inline_data_size) {
|
||||||
nvmet_prepare_receive_pdu(queue);
|
nvmet_prepare_receive_pdu(queue);
|
||||||
return;
|
return;
|
||||||
|
@ -76,6 +76,26 @@ struct device_node *of_irq_find_parent(struct device_node *child)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(of_irq_find_parent);
|
EXPORT_SYMBOL_GPL(of_irq_find_parent);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* These interrupt controllers abuse interrupt-map for unspeakable
|
||||||
|
* reasons and rely on the core code to *ignore* it (the drivers do
|
||||||
|
* their own parsing of the property).
|
||||||
|
*
|
||||||
|
* If you think of adding to the list for something *new*, think
|
||||||
|
* again. There is a high chance that you will be sent back to the
|
||||||
|
* drawing board.
|
||||||
|
*/
|
||||||
|
static const char * const of_irq_imap_abusers[] = {
|
||||||
|
"CBEA,platform-spider-pic",
|
||||||
|
"sti,platform-spider-pic",
|
||||||
|
"realtek,rtl-intc",
|
||||||
|
"fsl,ls1021a-extirq",
|
||||||
|
"fsl,ls1043a-extirq",
|
||||||
|
"fsl,ls1088a-extirq",
|
||||||
|
"renesas,rza1-irqc",
|
||||||
|
NULL,
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* of_irq_parse_raw - Low level interrupt tree parsing
|
* of_irq_parse_raw - Low level interrupt tree parsing
|
||||||
* @addr: address specifier (start of "reg" property of the device) in be32 format
|
* @addr: address specifier (start of "reg" property of the device) in be32 format
|
||||||
@ -159,12 +179,15 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
|
|||||||
/*
|
/*
|
||||||
* Now check if cursor is an interrupt-controller and
|
* Now check if cursor is an interrupt-controller and
|
||||||
* if it is then we are done, unless there is an
|
* if it is then we are done, unless there is an
|
||||||
* interrupt-map which takes precedence.
|
* interrupt-map which takes precedence except on one
|
||||||
|
* of these broken platforms that want to parse
|
||||||
|
* interrupt-map themselves for $reason.
|
||||||
*/
|
*/
|
||||||
bool intc = of_property_read_bool(ipar, "interrupt-controller");
|
bool intc = of_property_read_bool(ipar, "interrupt-controller");
|
||||||
|
|
||||||
imap = of_get_property(ipar, "interrupt-map", &imaplen);
|
imap = of_get_property(ipar, "interrupt-map", &imaplen);
|
||||||
if (imap == NULL && intc) {
|
if (intc &&
|
||||||
|
(!imap || of_device_compatible_match(ipar, of_irq_imap_abusers))) {
|
||||||
pr_debug(" -> got it !\n");
|
pr_debug(" -> got it !\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -32,7 +32,6 @@
|
|||||||
#define PCIE_CORE_DEV_ID_REG 0x0
|
#define PCIE_CORE_DEV_ID_REG 0x0
|
||||||
#define PCIE_CORE_CMD_STATUS_REG 0x4
|
#define PCIE_CORE_CMD_STATUS_REG 0x4
|
||||||
#define PCIE_CORE_DEV_REV_REG 0x8
|
#define PCIE_CORE_DEV_REV_REG 0x8
|
||||||
#define PCIE_CORE_EXP_ROM_BAR_REG 0x30
|
|
||||||
#define PCIE_CORE_PCIEXP_CAP 0xc0
|
#define PCIE_CORE_PCIEXP_CAP 0xc0
|
||||||
#define PCIE_CORE_ERR_CAPCTL_REG 0x118
|
#define PCIE_CORE_ERR_CAPCTL_REG 0x118
|
||||||
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
|
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
|
||||||
@ -774,10 +773,6 @@ advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
|
|||||||
*value = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
|
*value = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
|
||||||
return PCI_BRIDGE_EMUL_HANDLED;
|
return PCI_BRIDGE_EMUL_HANDLED;
|
||||||
|
|
||||||
case PCI_ROM_ADDRESS1:
|
|
||||||
*value = advk_readl(pcie, PCIE_CORE_EXP_ROM_BAR_REG);
|
|
||||||
return PCI_BRIDGE_EMUL_HANDLED;
|
|
||||||
|
|
||||||
case PCI_INTERRUPT_LINE: {
|
case PCI_INTERRUPT_LINE: {
|
||||||
/*
|
/*
|
||||||
* From the whole 32bit register we support reading from HW only
|
* From the whole 32bit register we support reading from HW only
|
||||||
@ -810,10 +805,6 @@ advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
|
|||||||
advk_writel(pcie, new, PCIE_CORE_CMD_STATUS_REG);
|
advk_writel(pcie, new, PCIE_CORE_CMD_STATUS_REG);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case PCI_ROM_ADDRESS1:
|
|
||||||
advk_writel(pcie, new, PCIE_CORE_EXP_ROM_BAR_REG);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case PCI_INTERRUPT_LINE:
|
case PCI_INTERRUPT_LINE:
|
||||||
if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
|
if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
|
||||||
u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG);
|
u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG);
|
||||||
|
@ -516,7 +516,7 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
|
|||||||
int ret, i;
|
int ret, i;
|
||||||
|
|
||||||
reset = gpiod_get_from_of_node(np, "reset-gpios", 0,
|
reset = gpiod_get_from_of_node(np, "reset-gpios", 0,
|
||||||
GPIOD_OUT_LOW, "#PERST");
|
GPIOD_OUT_LOW, "PERST#");
|
||||||
if (IS_ERR(reset))
|
if (IS_ERR(reset))
|
||||||
return PTR_ERR(reset);
|
return PTR_ERR(reset);
|
||||||
|
|
||||||
@ -539,12 +539,22 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
|
|||||||
|
|
||||||
rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK);
|
rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK);
|
||||||
|
|
||||||
|
/* Assert PERST# before setting up the clock */
|
||||||
|
gpiod_set_value(reset, 1);
|
||||||
|
|
||||||
ret = apple_pcie_setup_refclk(pcie, port);
|
ret = apple_pcie_setup_refclk(pcie, port);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
/* The minimal Tperst-clk value is 100us (PCIe CEM r5.0, 2.9.2) */
|
||||||
|
usleep_range(100, 200);
|
||||||
|
|
||||||
|
/* Deassert PERST# */
|
||||||
rmw_set(PORT_PERST_OFF, port->base + PORT_PERST);
|
rmw_set(PORT_PERST_OFF, port->base + PORT_PERST);
|
||||||
gpiod_set_value(reset, 1);
|
gpiod_set_value(reset, 0);
|
||||||
|
|
||||||
|
/* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
|
||||||
|
msleep(100);
|
||||||
|
|
||||||
ret = readl_relaxed_poll_timeout(port->base + PORT_STATUS, stat,
|
ret = readl_relaxed_poll_timeout(port->base + PORT_STATUS, stat,
|
||||||
stat & PORT_STATUS_READY, 100, 250000);
|
stat & PORT_STATUS_READY, 100, 250000);
|
||||||
|
@ -282,12 +282,12 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
|
|||||||
if (rc) {
|
if (rc) {
|
||||||
pm8001_dbg(pm8001_ha, FAIL,
|
pm8001_dbg(pm8001_ha, FAIL,
|
||||||
"pm8001_setup_irq failed [ret: %d]\n", rc);
|
"pm8001_setup_irq failed [ret: %d]\n", rc);
|
||||||
goto err_out_shost;
|
goto err_out;
|
||||||
}
|
}
|
||||||
/* Request Interrupt */
|
/* Request Interrupt */
|
||||||
rc = pm8001_request_irq(pm8001_ha);
|
rc = pm8001_request_irq(pm8001_ha);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto err_out_shost;
|
goto err_out;
|
||||||
|
|
||||||
count = pm8001_ha->max_q_num;
|
count = pm8001_ha->max_q_num;
|
||||||
/* Queues are chosen based on the number of cores/msix availability */
|
/* Queues are chosen based on the number of cores/msix availability */
|
||||||
@ -423,8 +423,6 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
|
|||||||
pm8001_tag_init(pm8001_ha);
|
pm8001_tag_init(pm8001_ha);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_out_shost:
|
|
||||||
scsi_remove_host(pm8001_ha->shost);
|
|
||||||
err_out_nodev:
|
err_out_nodev:
|
||||||
for (i = 0; i < pm8001_ha->max_memcnt; i++) {
|
for (i = 0; i < pm8001_ha->max_memcnt; i++) {
|
||||||
if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
|
if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
|
||||||
|
@ -732,7 +732,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
|
|||||||
{
|
{
|
||||||
struct qedi_work_map *work, *work_tmp;
|
struct qedi_work_map *work, *work_tmp;
|
||||||
u32 proto_itt = cqe->itid;
|
u32 proto_itt = cqe->itid;
|
||||||
itt_t protoitt = 0;
|
|
||||||
int found = 0;
|
int found = 0;
|
||||||
struct qedi_cmd *qedi_cmd = NULL;
|
struct qedi_cmd *qedi_cmd = NULL;
|
||||||
u32 iscsi_cid;
|
u32 iscsi_cid;
|
||||||
@ -812,16 +811,12 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
check_cleanup_reqs:
|
check_cleanup_reqs:
|
||||||
if (qedi_conn->cmd_cleanup_req > 0) {
|
if (atomic_inc_return(&qedi_conn->cmd_cleanup_cmpl) ==
|
||||||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
|
qedi_conn->cmd_cleanup_req) {
|
||||||
|
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
|
||||||
"Freeing tid=0x%x for cid=0x%x\n",
|
"Freeing tid=0x%x for cid=0x%x\n",
|
||||||
cqe->itid, qedi_conn->iscsi_conn_id);
|
cqe->itid, qedi_conn->iscsi_conn_id);
|
||||||
qedi_conn->cmd_cleanup_cmpl++;
|
|
||||||
wake_up(&qedi_conn->wait_queue);
|
wake_up(&qedi_conn->wait_queue);
|
||||||
} else {
|
|
||||||
QEDI_ERR(&qedi->dbg_ctx,
|
|
||||||
"Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x\n",
|
|
||||||
protoitt, cqe->itid, qedi_conn->iscsi_conn_id);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1163,7 +1158,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
|
|||||||
}
|
}
|
||||||
|
|
||||||
qedi_conn->cmd_cleanup_req = 0;
|
qedi_conn->cmd_cleanup_req = 0;
|
||||||
qedi_conn->cmd_cleanup_cmpl = 0;
|
atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0);
|
||||||
|
|
||||||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
|
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
|
||||||
"active_cmd_count=%d, cid=0x%x, in_recovery=%d, lun_reset=%d\n",
|
"active_cmd_count=%d, cid=0x%x, in_recovery=%d, lun_reset=%d\n",
|
||||||
@ -1215,16 +1210,15 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
|
|||||||
qedi_conn->iscsi_conn_id);
|
qedi_conn->iscsi_conn_id);
|
||||||
|
|
||||||
rval = wait_event_interruptible_timeout(qedi_conn->wait_queue,
|
rval = wait_event_interruptible_timeout(qedi_conn->wait_queue,
|
||||||
((qedi_conn->cmd_cleanup_req ==
|
(qedi_conn->cmd_cleanup_req ==
|
||||||
qedi_conn->cmd_cleanup_cmpl) ||
|
atomic_read(&qedi_conn->cmd_cleanup_cmpl)) ||
|
||||||
test_bit(QEDI_IN_RECOVERY,
|
test_bit(QEDI_IN_RECOVERY, &qedi->flags),
|
||||||
&qedi->flags)),
|
5 * HZ);
|
||||||
5 * HZ);
|
|
||||||
if (rval) {
|
if (rval) {
|
||||||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
|
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
|
||||||
"i/o cmd_cleanup_req=%d, equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
|
"i/o cmd_cleanup_req=%d, equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
|
||||||
qedi_conn->cmd_cleanup_req,
|
qedi_conn->cmd_cleanup_req,
|
||||||
qedi_conn->cmd_cleanup_cmpl,
|
atomic_read(&qedi_conn->cmd_cleanup_cmpl),
|
||||||
qedi_conn->iscsi_conn_id);
|
qedi_conn->iscsi_conn_id);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -1233,7 +1227,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
|
|||||||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
|
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
|
||||||
"i/o cmd_cleanup_req=%d, not equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
|
"i/o cmd_cleanup_req=%d, not equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
|
||||||
qedi_conn->cmd_cleanup_req,
|
qedi_conn->cmd_cleanup_req,
|
||||||
qedi_conn->cmd_cleanup_cmpl,
|
atomic_read(&qedi_conn->cmd_cleanup_cmpl),
|
||||||
qedi_conn->iscsi_conn_id);
|
qedi_conn->iscsi_conn_id);
|
||||||
|
|
||||||
iscsi_host_for_each_session(qedi->shost,
|
iscsi_host_for_each_session(qedi->shost,
|
||||||
@ -1242,11 +1236,10 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
|
|||||||
|
|
||||||
/* Enable IOs for all other sessions except current.*/
|
/* Enable IOs for all other sessions except current.*/
|
||||||
if (!wait_event_interruptible_timeout(qedi_conn->wait_queue,
|
if (!wait_event_interruptible_timeout(qedi_conn->wait_queue,
|
||||||
(qedi_conn->cmd_cleanup_req ==
|
(qedi_conn->cmd_cleanup_req ==
|
||||||
qedi_conn->cmd_cleanup_cmpl) ||
|
atomic_read(&qedi_conn->cmd_cleanup_cmpl)) ||
|
||||||
test_bit(QEDI_IN_RECOVERY,
|
test_bit(QEDI_IN_RECOVERY, &qedi->flags),
|
||||||
&qedi->flags),
|
5 * HZ)) {
|
||||||
5 * HZ)) {
|
|
||||||
iscsi_host_for_each_session(qedi->shost,
|
iscsi_host_for_each_session(qedi->shost,
|
||||||
qedi_mark_device_available);
|
qedi_mark_device_available);
|
||||||
return -1;
|
return -1;
|
||||||
@ -1266,7 +1259,7 @@ void qedi_clearsq(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
|
|||||||
|
|
||||||
qedi_ep = qedi_conn->ep;
|
qedi_ep = qedi_conn->ep;
|
||||||
qedi_conn->cmd_cleanup_req = 0;
|
qedi_conn->cmd_cleanup_req = 0;
|
||||||
qedi_conn->cmd_cleanup_cmpl = 0;
|
atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0);
|
||||||
|
|
||||||
if (!qedi_ep) {
|
if (!qedi_ep) {
|
||||||
QEDI_WARN(&qedi->dbg_ctx,
|
QEDI_WARN(&qedi->dbg_ctx,
|
||||||
|
@ -412,7 +412,7 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
|
|||||||
qedi_conn->iscsi_conn_id = qedi_ep->iscsi_cid;
|
qedi_conn->iscsi_conn_id = qedi_ep->iscsi_cid;
|
||||||
qedi_conn->fw_cid = qedi_ep->fw_cid;
|
qedi_conn->fw_cid = qedi_ep->fw_cid;
|
||||||
qedi_conn->cmd_cleanup_req = 0;
|
qedi_conn->cmd_cleanup_req = 0;
|
||||||
qedi_conn->cmd_cleanup_cmpl = 0;
|
atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0);
|
||||||
|
|
||||||
if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn)) {
|
if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn)) {
|
||||||
rc = -EINVAL;
|
rc = -EINVAL;
|
||||||
|
@ -155,7 +155,7 @@ struct qedi_conn {
|
|||||||
spinlock_t list_lock; /* internal conn lock */
|
spinlock_t list_lock; /* internal conn lock */
|
||||||
u32 active_cmd_count;
|
u32 active_cmd_count;
|
||||||
u32 cmd_cleanup_req;
|
u32 cmd_cleanup_req;
|
||||||
u32 cmd_cleanup_cmpl;
|
atomic_t cmd_cleanup_cmpl;
|
||||||
|
|
||||||
u32 iscsi_conn_id;
|
u32 iscsi_conn_id;
|
||||||
int itt;
|
int itt;
|
||||||
|
@ -2491,6 +2491,9 @@ ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
|
|||||||
struct va_format vaf;
|
struct va_format vaf;
|
||||||
char pbuf[64];
|
char pbuf[64];
|
||||||
|
|
||||||
|
if (!ql_mask_match(level) && !trace_ql_dbg_log_enabled())
|
||||||
|
return;
|
||||||
|
|
||||||
va_start(va, fmt);
|
va_start(va, fmt);
|
||||||
|
|
||||||
vaf.fmt = fmt;
|
vaf.fmt = fmt;
|
||||||
|
@ -4342,7 +4342,7 @@ static int resp_report_zones(struct scsi_cmnd *scp,
|
|||||||
rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
|
rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
|
||||||
max_zones);
|
max_zones);
|
||||||
|
|
||||||
arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC);
|
arr = kzalloc(alloc_len, GFP_ATOMIC);
|
||||||
if (!arr) {
|
if (!arr) {
|
||||||
mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
|
mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
|
||||||
INSUFF_RES_ASCQ);
|
INSUFF_RES_ASCQ);
|
||||||
|
@ -29,7 +29,7 @@ static const char * const fivr_strings[] = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static const struct mmio_reg tgl_fivr_mmio_regs[] = {
|
static const struct mmio_reg tgl_fivr_mmio_regs[] = {
|
||||||
{ 0, 0x5A18, 3, 0x7, 12}, /* vco_ref_code_lo */
|
{ 0, 0x5A18, 3, 0x7, 11}, /* vco_ref_code_lo */
|
||||||
{ 0, 0x5A18, 8, 0xFF, 16}, /* vco_ref_code_hi */
|
{ 0, 0x5A18, 8, 0xFF, 16}, /* vco_ref_code_hi */
|
||||||
{ 0, 0x5A08, 8, 0xFF, 0}, /* spread_spectrum_pct */
|
{ 0, 0x5A08, 8, 0xFF, 0}, /* spread_spectrum_pct */
|
||||||
{ 0, 0x5A08, 1, 0x1, 8}, /* spread_spectrum_clk_enable */
|
{ 0, 0x5A08, 1, 0x1, 8}, /* spread_spectrum_clk_enable */
|
||||||
|
186
fs/aio.c
186
fs/aio.c
@ -181,8 +181,9 @@ struct poll_iocb {
|
|||||||
struct file *file;
|
struct file *file;
|
||||||
struct wait_queue_head *head;
|
struct wait_queue_head *head;
|
||||||
__poll_t events;
|
__poll_t events;
|
||||||
bool done;
|
|
||||||
bool cancelled;
|
bool cancelled;
|
||||||
|
bool work_scheduled;
|
||||||
|
bool work_need_resched;
|
||||||
struct wait_queue_entry wait;
|
struct wait_queue_entry wait;
|
||||||
struct work_struct work;
|
struct work_struct work;
|
||||||
};
|
};
|
||||||
@ -1619,6 +1620,51 @@ static void aio_poll_put_work(struct work_struct *work)
|
|||||||
iocb_put(iocb);
|
iocb_put(iocb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Safely lock the waitqueue which the request is on, synchronizing with the
|
||||||
|
* case where the ->poll() provider decides to free its waitqueue early.
|
||||||
|
*
|
||||||
|
* Returns true on success, meaning that req->head->lock was locked, req->wait
|
||||||
|
* is on req->head, and an RCU read lock was taken. Returns false if the
|
||||||
|
* request was already removed from its waitqueue (which might no longer exist).
|
||||||
|
*/
|
||||||
|
static bool poll_iocb_lock_wq(struct poll_iocb *req)
|
||||||
|
{
|
||||||
|
wait_queue_head_t *head;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* While we hold the waitqueue lock and the waitqueue is nonempty,
|
||||||
|
* wake_up_pollfree() will wait for us. However, taking the waitqueue
|
||||||
|
* lock in the first place can race with the waitqueue being freed.
|
||||||
|
*
|
||||||
|
* We solve this as eventpoll does: by taking advantage of the fact that
|
||||||
|
* all users of wake_up_pollfree() will RCU-delay the actual free. If
|
||||||
|
* we enter rcu_read_lock() and see that the pointer to the queue is
|
||||||
|
* non-NULL, we can then lock it without the memory being freed out from
|
||||||
|
* under us, then check whether the request is still on the queue.
|
||||||
|
*
|
||||||
|
* Keep holding rcu_read_lock() as long as we hold the queue lock, in
|
||||||
|
* case the caller deletes the entry from the queue, leaving it empty.
|
||||||
|
* In that case, only RCU prevents the queue memory from being freed.
|
||||||
|
*/
|
||||||
|
rcu_read_lock();
|
||||||
|
head = smp_load_acquire(&req->head);
|
||||||
|
if (head) {
|
||||||
|
spin_lock(&head->lock);
|
||||||
|
if (!list_empty(&req->wait.entry))
|
||||||
|
return true;
|
||||||
|
spin_unlock(&head->lock);
|
||||||
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void poll_iocb_unlock_wq(struct poll_iocb *req)
|
||||||
|
{
|
||||||
|
spin_unlock(&req->head->lock);
|
||||||
|
rcu_read_unlock();
|
||||||
|
}
|
||||||
|
|
||||||
static void aio_poll_complete_work(struct work_struct *work)
|
static void aio_poll_complete_work(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct poll_iocb *req = container_of(work, struct poll_iocb, work);
|
struct poll_iocb *req = container_of(work, struct poll_iocb, work);
|
||||||
@ -1638,14 +1684,27 @@ static void aio_poll_complete_work(struct work_struct *work)
|
|||||||
* avoid further branches in the fast path.
|
* avoid further branches in the fast path.
|
||||||
*/
|
*/
|
||||||
spin_lock_irq(&ctx->ctx_lock);
|
spin_lock_irq(&ctx->ctx_lock);
|
||||||
if (!mask && !READ_ONCE(req->cancelled)) {
|
if (poll_iocb_lock_wq(req)) {
|
||||||
add_wait_queue(req->head, &req->wait);
|
if (!mask && !READ_ONCE(req->cancelled)) {
|
||||||
spin_unlock_irq(&ctx->ctx_lock);
|
/*
|
||||||
return;
|
* The request isn't actually ready to be completed yet.
|
||||||
}
|
* Reschedule completion if another wakeup came in.
|
||||||
|
*/
|
||||||
|
if (req->work_need_resched) {
|
||||||
|
schedule_work(&req->work);
|
||||||
|
req->work_need_resched = false;
|
||||||
|
} else {
|
||||||
|
req->work_scheduled = false;
|
||||||
|
}
|
||||||
|
poll_iocb_unlock_wq(req);
|
||||||
|
spin_unlock_irq(&ctx->ctx_lock);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
list_del_init(&req->wait.entry);
|
||||||
|
poll_iocb_unlock_wq(req);
|
||||||
|
} /* else, POLLFREE has freed the waitqueue, so we must complete */
|
||||||
list_del_init(&iocb->ki_list);
|
list_del_init(&iocb->ki_list);
|
||||||
iocb->ki_res.res = mangle_poll(mask);
|
iocb->ki_res.res = mangle_poll(mask);
|
||||||
req->done = true;
|
|
||||||
spin_unlock_irq(&ctx->ctx_lock);
|
spin_unlock_irq(&ctx->ctx_lock);
|
||||||
|
|
||||||
iocb_put(iocb);
|
iocb_put(iocb);
|
||||||
@ -1657,13 +1716,14 @@ static int aio_poll_cancel(struct kiocb *iocb)
|
|||||||
struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
|
struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
|
||||||
struct poll_iocb *req = &aiocb->poll;
|
struct poll_iocb *req = &aiocb->poll;
|
||||||
|
|
||||||
spin_lock(&req->head->lock);
|
if (poll_iocb_lock_wq(req)) {
|
||||||
WRITE_ONCE(req->cancelled, true);
|
WRITE_ONCE(req->cancelled, true);
|
||||||
if (!list_empty(&req->wait.entry)) {
|
if (!req->work_scheduled) {
|
||||||
list_del_init(&req->wait.entry);
|
schedule_work(&aiocb->poll.work);
|
||||||
schedule_work(&aiocb->poll.work);
|
req->work_scheduled = true;
|
||||||
}
|
}
|
||||||
spin_unlock(&req->head->lock);
|
poll_iocb_unlock_wq(req);
|
||||||
|
} /* else, the request was force-cancelled by POLLFREE already */
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1680,21 +1740,27 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
|
|||||||
if (mask && !(mask & req->events))
|
if (mask && !(mask & req->events))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
list_del_init(&req->wait.entry);
|
/*
|
||||||
|
* Complete the request inline if possible. This requires that three
|
||||||
if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
|
* conditions be met:
|
||||||
|
* 1. An event mask must have been passed. If a plain wakeup was done
|
||||||
|
* instead, then mask == 0 and we have to call vfs_poll() to get
|
||||||
|
* the events, so inline completion isn't possible.
|
||||||
|
* 2. The completion work must not have already been scheduled.
|
||||||
|
* 3. ctx_lock must not be busy. We have to use trylock because we
|
||||||
|
* already hold the waitqueue lock, so this inverts the normal
|
||||||
|
* locking order. Use irqsave/irqrestore because not all
|
||||||
|
* filesystems (e.g. fuse) call this function with IRQs disabled,
|
||||||
|
* yet IRQs have to be disabled before ctx_lock is obtained.
|
||||||
|
*/
|
||||||
|
if (mask && !req->work_scheduled &&
|
||||||
|
spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
|
||||||
struct kioctx *ctx = iocb->ki_ctx;
|
struct kioctx *ctx = iocb->ki_ctx;
|
||||||
|
|
||||||
/*
|
list_del_init(&req->wait.entry);
|
||||||
* Try to complete the iocb inline if we can. Use
|
|
||||||
* irqsave/irqrestore because not all filesystems (e.g. fuse)
|
|
||||||
* call this function with IRQs disabled and because IRQs
|
|
||||||
* have to be disabled before ctx_lock is obtained.
|
|
||||||
*/
|
|
||||||
list_del(&iocb->ki_list);
|
list_del(&iocb->ki_list);
|
||||||
iocb->ki_res.res = mangle_poll(mask);
|
iocb->ki_res.res = mangle_poll(mask);
|
||||||
req->done = true;
|
if (iocb->ki_eventfd && !eventfd_signal_allowed()) {
|
||||||
if (iocb->ki_eventfd && eventfd_signal_allowed()) {
|
|
||||||
iocb = NULL;
|
iocb = NULL;
|
||||||
INIT_WORK(&req->work, aio_poll_put_work);
|
INIT_WORK(&req->work, aio_poll_put_work);
|
||||||
schedule_work(&req->work);
|
schedule_work(&req->work);
|
||||||
@ -1703,7 +1769,43 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
|
|||||||
if (iocb)
|
if (iocb)
|
||||||
iocb_put(iocb);
|
iocb_put(iocb);
|
||||||
} else {
|
} else {
|
||||||
schedule_work(&req->work);
|
/*
|
||||||
|
* Schedule the completion work if needed. If it was already
|
||||||
|
* scheduled, record that another wakeup came in.
|
||||||
|
*
|
||||||
|
* Don't remove the request from the waitqueue here, as it might
|
||||||
|
* not actually be complete yet (we won't know until vfs_poll()
|
||||||
|
* is called), and we must not miss any wakeups. POLLFREE is an
|
||||||
|
* exception to this; see below.
|
||||||
|
*/
|
||||||
|
if (req->work_scheduled) {
|
||||||
|
req->work_need_resched = true;
|
||||||
|
} else {
|
||||||
|
schedule_work(&req->work);
|
||||||
|
req->work_scheduled = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the waitqueue is being freed early but we can't complete
|
||||||
|
* the request inline, we have to tear down the request as best
|
||||||
|
* we can. That means immediately removing the request from its
|
||||||
|
* waitqueue and preventing all further accesses to the
|
||||||
|
* waitqueue via the request. We also need to schedule the
|
||||||
|
* completion work (done above). Also mark the request as
|
||||||
|
* cancelled, to potentially skip an unneeded call to ->poll().
|
||||||
|
*/
|
||||||
|
if (mask & POLLFREE) {
|
||||||
|
WRITE_ONCE(req->cancelled, true);
|
||||||
|
list_del_init(&req->wait.entry);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Careful: this *must* be the last step, since as soon
|
||||||
|
* as req->head is NULL'ed out, the request can be
|
||||||
|
* completed and freed, since aio_poll_complete_work()
|
||||||
|
* will no longer need to take the waitqueue lock.
|
||||||
|
*/
|
||||||
|
smp_store_release(&req->head, NULL);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
@ -1711,6 +1813,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
|
|||||||
struct aio_poll_table {
|
struct aio_poll_table {
|
||||||
struct poll_table_struct pt;
|
struct poll_table_struct pt;
|
||||||
struct aio_kiocb *iocb;
|
struct aio_kiocb *iocb;
|
||||||
|
bool queued;
|
||||||
int error;
|
int error;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1721,11 +1824,12 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
|
|||||||
struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
|
struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
|
||||||
|
|
||||||
/* multiple wait queues per file are not supported */
|
/* multiple wait queues per file are not supported */
|
||||||
if (unlikely(pt->iocb->poll.head)) {
|
if (unlikely(pt->queued)) {
|
||||||
pt->error = -EINVAL;
|
pt->error = -EINVAL;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pt->queued = true;
|
||||||
pt->error = 0;
|
pt->error = 0;
|
||||||
pt->iocb->poll.head = head;
|
pt->iocb->poll.head = head;
|
||||||
add_wait_queue(head, &pt->iocb->poll.wait);
|
add_wait_queue(head, &pt->iocb->poll.wait);
|
||||||
@ -1750,12 +1854,14 @@ static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
|
|||||||
req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
|
req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
|
||||||
|
|
||||||
req->head = NULL;
|
req->head = NULL;
|
||||||
req->done = false;
|
|
||||||
req->cancelled = false;
|
req->cancelled = false;
|
||||||
|
req->work_scheduled = false;
|
||||||
|
req->work_need_resched = false;
|
||||||
|
|
||||||
apt.pt._qproc = aio_poll_queue_proc;
|
apt.pt._qproc = aio_poll_queue_proc;
|
||||||
apt.pt._key = req->events;
|
apt.pt._key = req->events;
|
||||||
apt.iocb = aiocb;
|
apt.iocb = aiocb;
|
||||||
|
apt.queued = false;
|
||||||
apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
|
apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
|
||||||
|
|
||||||
/* initialized the list so that we can do list_empty checks */
|
/* initialized the list so that we can do list_empty checks */
|
||||||
@ -1764,23 +1870,35 @@ static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
|
|||||||
|
|
||||||
mask = vfs_poll(req->file, &apt.pt) & req->events;
|
mask = vfs_poll(req->file, &apt.pt) & req->events;
|
||||||
spin_lock_irq(&ctx->ctx_lock);
|
spin_lock_irq(&ctx->ctx_lock);
|
||||||
if (likely(req->head)) {
|
if (likely(apt.queued)) {
|
||||||
spin_lock(&req->head->lock);
|
bool on_queue = poll_iocb_lock_wq(req);
|
||||||
if (unlikely(list_empty(&req->wait.entry))) {
|
|
||||||
if (apt.error)
|
if (!on_queue || req->work_scheduled) {
|
||||||
|
/*
|
||||||
|
* aio_poll_wake() already either scheduled the async
|
||||||
|
* completion work, or completed the request inline.
|
||||||
|
*/
|
||||||
|
if (apt.error) /* unsupported case: multiple queues */
|
||||||
cancel = true;
|
cancel = true;
|
||||||
apt.error = 0;
|
apt.error = 0;
|
||||||
mask = 0;
|
mask = 0;
|
||||||
}
|
}
|
||||||
if (mask || apt.error) {
|
if (mask || apt.error) {
|
||||||
|
/* Steal to complete synchronously. */
|
||||||
list_del_init(&req->wait.entry);
|
list_del_init(&req->wait.entry);
|
||||||
} else if (cancel) {
|
} else if (cancel) {
|
||||||
|
/* Cancel if possible (may be too late though). */
|
||||||
WRITE_ONCE(req->cancelled, true);
|
WRITE_ONCE(req->cancelled, true);
|
||||||
} else if (!req->done) { /* actually waiting for an event */
|
} else if (on_queue) {
|
||||||
|
/*
|
||||||
|
* Actually waiting for an event, so add the request to
|
||||||
|
* active_reqs so that it can be cancelled if needed.
|
||||||
|
*/
|
||||||
list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
|
list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
|
||||||
aiocb->ki_cancel = aio_poll_cancel;
|
aiocb->ki_cancel = aio_poll_cancel;
|
||||||
}
|
}
|
||||||
spin_unlock(&req->head->lock);
|
if (on_queue)
|
||||||
|
poll_iocb_unlock_wq(req);
|
||||||
}
|
}
|
||||||
if (mask) { /* no async, we'd stolen it */
|
if (mask) { /* no async, we'd stolen it */
|
||||||
aiocb->ki_res.res = mangle_poll(mask);
|
aiocb->ki_res.res = mangle_poll(mask);
|
||||||
|
@ -143,10 +143,13 @@ int btrfs_check_data_free_space(struct btrfs_inode *inode,
|
|||||||
|
|
||||||
/* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
|
/* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
|
||||||
ret = btrfs_qgroup_reserve_data(inode, reserved, start, len);
|
ret = btrfs_qgroup_reserve_data(inode, reserved, start, len);
|
||||||
if (ret < 0)
|
if (ret < 0) {
|
||||||
btrfs_free_reserved_data_space_noquota(fs_info, len);
|
btrfs_free_reserved_data_space_noquota(fs_info, len);
|
||||||
else
|
extent_changeset_free(*reserved);
|
||||||
|
*reserved = NULL;
|
||||||
|
} else {
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -452,8 +455,11 @@ int btrfs_delalloc_reserve_space(struct btrfs_inode *inode,
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
ret = btrfs_delalloc_reserve_metadata(inode, len);
|
ret = btrfs_delalloc_reserve_metadata(inode, len);
|
||||||
if (ret < 0)
|
if (ret < 0) {
|
||||||
btrfs_free_reserved_data_space(inode, *reserved, start, len);
|
btrfs_free_reserved_data_space(inode, *reserved, start, len);
|
||||||
|
extent_changeset_free(*reserved);
|
||||||
|
*reserved = NULL;
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6051,6 +6051,9 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
|
|||||||
int dev_ret = 0;
|
int dev_ret = 0;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
if (range->start == U64_MAX)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check range overflow if range->len is set.
|
* Check range overflow if range->len is set.
|
||||||
* The default range->len is U64_MAX.
|
* The default range->len is U64_MAX.
|
||||||
|
@ -4313,6 +4313,20 @@ static void set_btree_ioerr(struct page *page, struct extent_buffer *eb)
|
|||||||
if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
|
if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A read may stumble upon this buffer later, make sure that it gets an
|
||||||
|
* error and knows there was an error.
|
||||||
|
*/
|
||||||
|
clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We need to set the mapping with the io error as well because a write
|
||||||
|
* error will flip the file system readonly, and then syncfs() will
|
||||||
|
* return a 0 because we are readonly if we don't modify the err seq for
|
||||||
|
* the superblock.
|
||||||
|
*/
|
||||||
|
mapping_set_error(page->mapping, -EIO);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we error out, we should add back the dirty_metadata_bytes
|
* If we error out, we should add back the dirty_metadata_bytes
|
||||||
* to make it consistent.
|
* to make it consistent.
|
||||||
|
@ -3187,10 +3187,8 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
|
|||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
vol_args = memdup_user(arg, sizeof(*vol_args));
|
vol_args = memdup_user(arg, sizeof(*vol_args));
|
||||||
if (IS_ERR(vol_args)) {
|
if (IS_ERR(vol_args))
|
||||||
ret = PTR_ERR(vol_args);
|
return PTR_ERR(vol_args);
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (vol_args->flags & ~BTRFS_DEVICE_REMOVE_ARGS_MASK) {
|
if (vol_args->flags & ~BTRFS_DEVICE_REMOVE_ARGS_MASK) {
|
||||||
ret = -EOPNOTSUPP;
|
ret = -EOPNOTSUPP;
|
||||||
|
@ -334,7 +334,8 @@ int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
|
|||||||
key.offset = ref_id;
|
key.offset = ref_id;
|
||||||
again:
|
again:
|
||||||
ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
|
ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
|
||||||
BUG_ON(ret < 0);
|
if (ret < 0)
|
||||||
|
goto out;
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
leaf = path->nodes[0];
|
leaf = path->nodes[0];
|
||||||
ref = btrfs_item_ptr(leaf, path->slots[0],
|
ref = btrfs_item_ptr(leaf, path->slots[0],
|
||||||
|
@ -2908,6 +2908,8 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
|
|||||||
path->nodes[*level]->len);
|
path->nodes[*level]->len);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
btrfs_redirty_list_add(trans->transaction,
|
||||||
|
next);
|
||||||
} else {
|
} else {
|
||||||
if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
|
if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
|
||||||
clear_extent_buffer_dirty(next);
|
clear_extent_buffer_dirty(next);
|
||||||
@ -2988,6 +2990,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
|
|||||||
next->start, next->len);
|
next->start, next->len);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
btrfs_redirty_list_add(trans->transaction, next);
|
||||||
} else {
|
} else {
|
||||||
if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
|
if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
|
||||||
clear_extent_buffer_dirty(next);
|
clear_extent_buffer_dirty(next);
|
||||||
@ -3438,8 +3441,6 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
|
|||||||
EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
|
EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
|
||||||
extent_io_tree_release(&log->log_csum_range);
|
extent_io_tree_release(&log->log_csum_range);
|
||||||
|
|
||||||
if (trans && log->node)
|
|
||||||
btrfs_redirty_list_add(trans->transaction, log->node);
|
|
||||||
btrfs_put_root(log);
|
btrfs_put_root(log);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1860,6 +1860,7 @@ int btrfs_zone_finish(struct btrfs_block_group *block_group)
|
|||||||
block_group->alloc_offset = block_group->zone_capacity;
|
block_group->alloc_offset = block_group->zone_capacity;
|
||||||
block_group->free_space_ctl->free_space = 0;
|
block_group->free_space_ctl->free_space = 0;
|
||||||
btrfs_clear_treelog_bg(block_group);
|
btrfs_clear_treelog_bg(block_group);
|
||||||
|
btrfs_clear_data_reloc_bg(block_group);
|
||||||
spin_unlock(&block_group->lock);
|
spin_unlock(&block_group->lock);
|
||||||
|
|
||||||
ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
|
ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
|
||||||
@ -1942,6 +1943,7 @@ void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 len
|
|||||||
ASSERT(block_group->alloc_offset == block_group->zone_capacity);
|
ASSERT(block_group->alloc_offset == block_group->zone_capacity);
|
||||||
ASSERT(block_group->free_space_ctl->free_space == 0);
|
ASSERT(block_group->free_space_ctl->free_space == 0);
|
||||||
btrfs_clear_treelog_bg(block_group);
|
btrfs_clear_treelog_bg(block_group);
|
||||||
|
btrfs_clear_data_reloc_bg(block_group);
|
||||||
spin_unlock(&block_group->lock);
|
spin_unlock(&block_group->lock);
|
||||||
|
|
||||||
map = block_group->physical_map;
|
map = block_group->physical_map;
|
||||||
|
@ -590,8 +590,8 @@ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
|
|||||||
{
|
{
|
||||||
unsigned int tioffset; /* challenge message target info area */
|
unsigned int tioffset; /* challenge message target info area */
|
||||||
unsigned int tilen; /* challenge message target info area length */
|
unsigned int tilen; /* challenge message target info area length */
|
||||||
|
|
||||||
CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr;
|
CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr;
|
||||||
|
__u32 server_flags;
|
||||||
|
|
||||||
if (blob_len < sizeof(CHALLENGE_MESSAGE)) {
|
if (blob_len < sizeof(CHALLENGE_MESSAGE)) {
|
||||||
cifs_dbg(VFS, "challenge blob len %d too small\n", blob_len);
|
cifs_dbg(VFS, "challenge blob len %d too small\n", blob_len);
|
||||||
@ -609,12 +609,37 @@ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
server_flags = le32_to_cpu(pblob->NegotiateFlags);
|
||||||
|
cifs_dbg(FYI, "%s: negotiate=0x%08x challenge=0x%08x\n", __func__,
|
||||||
|
ses->ntlmssp->client_flags, server_flags);
|
||||||
|
|
||||||
|
if ((ses->ntlmssp->client_flags & (NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN)) &&
|
||||||
|
(!(server_flags & NTLMSSP_NEGOTIATE_56) && !(server_flags & NTLMSSP_NEGOTIATE_128))) {
|
||||||
|
cifs_dbg(VFS, "%s: requested signing/encryption but server did not return either 56-bit or 128-bit session key size\n",
|
||||||
|
__func__);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
if (!(server_flags & NTLMSSP_NEGOTIATE_NTLM) && !(server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC)) {
|
||||||
|
cifs_dbg(VFS, "%s: server does not seem to support either NTLMv1 or NTLMv2\n", __func__);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
if (ses->server->sign && !(server_flags & NTLMSSP_NEGOTIATE_SIGN)) {
|
||||||
|
cifs_dbg(VFS, "%s: forced packet signing but server does not seem to support it\n",
|
||||||
|
__func__);
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
}
|
||||||
|
if ((ses->ntlmssp->client_flags & NTLMSSP_NEGOTIATE_KEY_XCH) &&
|
||||||
|
!(server_flags & NTLMSSP_NEGOTIATE_KEY_XCH))
|
||||||
|
pr_warn_once("%s: authentication has been weakened as server does not support key exchange\n",
|
||||||
|
__func__);
|
||||||
|
|
||||||
|
ses->ntlmssp->server_flags = server_flags;
|
||||||
|
|
||||||
memcpy(ses->ntlmssp->cryptkey, pblob->Challenge, CIFS_CRYPTO_KEY_SIZE);
|
memcpy(ses->ntlmssp->cryptkey, pblob->Challenge, CIFS_CRYPTO_KEY_SIZE);
|
||||||
/* BB we could decode pblob->NegotiateFlags; some may be useful */
|
|
||||||
/* In particular we can examine sign flags */
|
/* In particular we can examine sign flags */
|
||||||
/* BB spec says that if AvId field of MsvAvTimestamp is populated then
|
/* BB spec says that if AvId field of MsvAvTimestamp is populated then
|
||||||
we must set the MIC field of the AUTHENTICATE_MESSAGE */
|
we must set the MIC field of the AUTHENTICATE_MESSAGE */
|
||||||
ses->ntlmssp->server_flags = le32_to_cpu(pblob->NegotiateFlags);
|
|
||||||
tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset);
|
tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset);
|
||||||
tilen = le16_to_cpu(pblob->TargetInfoArray.Length);
|
tilen = le16_to_cpu(pblob->TargetInfoArray.Length);
|
||||||
if (tioffset > blob_len || tioffset + tilen > blob_len) {
|
if (tioffset > blob_len || tioffset + tilen > blob_len) {
|
||||||
@ -721,13 +746,13 @@ int build_ntlmssp_negotiate_blob(unsigned char **pbuffer,
|
|||||||
flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET |
|
flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET |
|
||||||
NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
|
NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
|
||||||
NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
|
NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
|
||||||
NTLMSSP_NEGOTIATE_SEAL;
|
NTLMSSP_NEGOTIATE_ALWAYS_SIGN | NTLMSSP_NEGOTIATE_SEAL |
|
||||||
if (server->sign)
|
NTLMSSP_NEGOTIATE_SIGN;
|
||||||
flags |= NTLMSSP_NEGOTIATE_SIGN;
|
|
||||||
if (!server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
|
if (!server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
|
||||||
flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
|
flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
|
||||||
|
|
||||||
tmp = *pbuffer + sizeof(NEGOTIATE_MESSAGE);
|
tmp = *pbuffer + sizeof(NEGOTIATE_MESSAGE);
|
||||||
|
ses->ntlmssp->client_flags = flags;
|
||||||
sec_blob->NegotiateFlags = cpu_to_le32(flags);
|
sec_blob->NegotiateFlags = cpu_to_le32(flags);
|
||||||
|
|
||||||
/* these fields should be null in negotiate phase MS-NLMP 3.1.5.1.1 */
|
/* these fields should be null in negotiate phase MS-NLMP 3.1.5.1.1 */
|
||||||
@ -779,15 +804,8 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
|
|||||||
memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
|
memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
|
||||||
sec_blob->MessageType = NtLmAuthenticate;
|
sec_blob->MessageType = NtLmAuthenticate;
|
||||||
|
|
||||||
flags = NTLMSSP_NEGOTIATE_56 |
|
flags = ses->ntlmssp->server_flags | NTLMSSP_REQUEST_TARGET |
|
||||||
NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO |
|
NTLMSSP_NEGOTIATE_TARGET_INFO | NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED;
|
||||||
NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
|
|
||||||
NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
|
|
||||||
NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED;
|
|
||||||
if (ses->server->sign)
|
|
||||||
flags |= NTLMSSP_NEGOTIATE_SIGN;
|
|
||||||
if (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
|
|
||||||
flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
|
|
||||||
|
|
||||||
tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
|
tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
|
||||||
sec_blob->NegotiateFlags = cpu_to_le32(flags);
|
sec_blob->NegotiateFlags = cpu_to_le32(flags);
|
||||||
@ -834,9 +852,9 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
|
|||||||
*pbuffer, &tmp,
|
*pbuffer, &tmp,
|
||||||
nls_cp);
|
nls_cp);
|
||||||
|
|
||||||
if (((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) ||
|
if ((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) &&
|
||||||
(ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
|
(!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess) &&
|
||||||
&& !calc_seckey(ses)) {
|
!calc_seckey(ses)) {
|
||||||
memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
|
memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
|
||||||
sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
|
sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
|
||||||
sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
|
sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
|
||||||
|
29
fs/io-wq.c
29
fs/io-wq.c
@ -142,6 +142,7 @@ static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
|
|||||||
struct io_wqe_acct *acct,
|
struct io_wqe_acct *acct,
|
||||||
struct io_cb_cancel_data *match);
|
struct io_cb_cancel_data *match);
|
||||||
static void create_worker_cb(struct callback_head *cb);
|
static void create_worker_cb(struct callback_head *cb);
|
||||||
|
static void io_wq_cancel_tw_create(struct io_wq *wq);
|
||||||
|
|
||||||
static bool io_worker_get(struct io_worker *worker)
|
static bool io_worker_get(struct io_worker *worker)
|
||||||
{
|
{
|
||||||
@ -357,12 +358,22 @@ static bool io_queue_worker_create(struct io_worker *worker,
|
|||||||
test_and_set_bit_lock(0, &worker->create_state))
|
test_and_set_bit_lock(0, &worker->create_state))
|
||||||
goto fail_release;
|
goto fail_release;
|
||||||
|
|
||||||
|
atomic_inc(&wq->worker_refs);
|
||||||
init_task_work(&worker->create_work, func);
|
init_task_work(&worker->create_work, func);
|
||||||
worker->create_index = acct->index;
|
worker->create_index = acct->index;
|
||||||
if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) {
|
if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) {
|
||||||
clear_bit_unlock(0, &worker->create_state);
|
/*
|
||||||
|
* EXIT may have been set after checking it above, check after
|
||||||
|
* adding the task_work and remove any creation item if it is
|
||||||
|
* now set. wq exit does that too, but we can have added this
|
||||||
|
* work item after we canceled in io_wq_exit_workers().
|
||||||
|
*/
|
||||||
|
if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
|
||||||
|
io_wq_cancel_tw_create(wq);
|
||||||
|
io_worker_ref_put(wq);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
io_worker_ref_put(wq);
|
||||||
clear_bit_unlock(0, &worker->create_state);
|
clear_bit_unlock(0, &worker->create_state);
|
||||||
fail_release:
|
fail_release:
|
||||||
io_worker_release(worker);
|
io_worker_release(worker);
|
||||||
@ -1198,13 +1209,9 @@ void io_wq_exit_start(struct io_wq *wq)
|
|||||||
set_bit(IO_WQ_BIT_EXIT, &wq->state);
|
set_bit(IO_WQ_BIT_EXIT, &wq->state);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void io_wq_exit_workers(struct io_wq *wq)
|
static void io_wq_cancel_tw_create(struct io_wq *wq)
|
||||||
{
|
{
|
||||||
struct callback_head *cb;
|
struct callback_head *cb;
|
||||||
int node;
|
|
||||||
|
|
||||||
if (!wq->task)
|
|
||||||
return;
|
|
||||||
|
|
||||||
while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
|
while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
|
||||||
struct io_worker *worker;
|
struct io_worker *worker;
|
||||||
@ -1212,6 +1219,16 @@ static void io_wq_exit_workers(struct io_wq *wq)
|
|||||||
worker = container_of(cb, struct io_worker, create_work);
|
worker = container_of(cb, struct io_worker, create_work);
|
||||||
io_worker_cancel_cb(worker);
|
io_worker_cancel_cb(worker);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void io_wq_exit_workers(struct io_wq *wq)
|
||||||
|
{
|
||||||
|
int node;
|
||||||
|
|
||||||
|
if (!wq->task)
|
||||||
|
return;
|
||||||
|
|
||||||
|
io_wq_cancel_tw_create(wq);
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
for_each_node(node) {
|
for_each_node(node) {
|
||||||
|
@ -9824,7 +9824,7 @@ static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Find any io_uring ctx that this task has registered or done IO on, and cancel
|
* Find any io_uring ctx that this task has registered or done IO on, and cancel
|
||||||
* requests. @sqd should be not-null IIF it's an SQPOLL thread cancellation.
|
* requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
|
||||||
*/
|
*/
|
||||||
static __cold void io_uring_cancel_generic(bool cancel_all,
|
static __cold void io_uring_cancel_generic(bool cancel_all,
|
||||||
struct io_sq_data *sqd)
|
struct io_sq_data *sqd)
|
||||||
@ -9866,8 +9866,10 @@ static __cold void io_uring_cancel_generic(bool cancel_all,
|
|||||||
cancel_all);
|
cancel_all);
|
||||||
}
|
}
|
||||||
|
|
||||||
prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
|
prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
|
||||||
|
io_run_task_work();
|
||||||
io_uring_drop_tctx_refs(current);
|
io_uring_drop_tctx_refs(current);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we've seen completions, retry without waiting. This
|
* If we've seen completions, retry without waiting. This
|
||||||
* avoids a race where a completion comes in before we did
|
* avoids a race where a completion comes in before we did
|
||||||
|
@ -2156,6 +2156,7 @@ static struct notifier_block nfsd4_cld_block = {
|
|||||||
int
|
int
|
||||||
register_cld_notifier(void)
|
register_cld_notifier(void)
|
||||||
{
|
{
|
||||||
|
WARN_ON(!nfsd_net_id);
|
||||||
return rpc_pipefs_notifier_register(&nfsd4_cld_block);
|
return rpc_pipefs_notifier_register(&nfsd4_cld_block);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1207,6 +1207,11 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool delegation_hashed(struct nfs4_delegation *dp)
|
||||||
|
{
|
||||||
|
return !(list_empty(&dp->dl_perfile));
|
||||||
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
unhash_delegation_locked(struct nfs4_delegation *dp)
|
unhash_delegation_locked(struct nfs4_delegation *dp)
|
||||||
{
|
{
|
||||||
@ -1214,7 +1219,7 @@ unhash_delegation_locked(struct nfs4_delegation *dp)
|
|||||||
|
|
||||||
lockdep_assert_held(&state_lock);
|
lockdep_assert_held(&state_lock);
|
||||||
|
|
||||||
if (list_empty(&dp->dl_perfile))
|
if (!delegation_hashed(dp))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
|
dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
|
||||||
@ -4598,7 +4603,7 @@ static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
|
|||||||
* queued for a lease break. Don't queue it again.
|
* queued for a lease break. Don't queue it again.
|
||||||
*/
|
*/
|
||||||
spin_lock(&state_lock);
|
spin_lock(&state_lock);
|
||||||
if (dp->dl_time == 0) {
|
if (delegation_hashed(dp) && dp->dl_time == 0) {
|
||||||
dp->dl_time = ktime_get_boottime_seconds();
|
dp->dl_time = ktime_get_boottime_seconds();
|
||||||
list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
|
list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
|
||||||
}
|
}
|
||||||
|
@ -1521,12 +1521,9 @@ static int __init init_nfsd(void)
|
|||||||
int retval;
|
int retval;
|
||||||
printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n");
|
printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n");
|
||||||
|
|
||||||
retval = register_cld_notifier();
|
|
||||||
if (retval)
|
|
||||||
return retval;
|
|
||||||
retval = nfsd4_init_slabs();
|
retval = nfsd4_init_slabs();
|
||||||
if (retval)
|
if (retval)
|
||||||
goto out_unregister_notifier;
|
return retval;
|
||||||
retval = nfsd4_init_pnfs();
|
retval = nfsd4_init_pnfs();
|
||||||
if (retval)
|
if (retval)
|
||||||
goto out_free_slabs;
|
goto out_free_slabs;
|
||||||
@ -1545,9 +1542,14 @@ static int __init init_nfsd(void)
|
|||||||
goto out_free_exports;
|
goto out_free_exports;
|
||||||
retval = register_pernet_subsys(&nfsd_net_ops);
|
retval = register_pernet_subsys(&nfsd_net_ops);
|
||||||
if (retval < 0)
|
if (retval < 0)
|
||||||
|
goto out_free_filesystem;
|
||||||
|
retval = register_cld_notifier();
|
||||||
|
if (retval)
|
||||||
goto out_free_all;
|
goto out_free_all;
|
||||||
return 0;
|
return 0;
|
||||||
out_free_all:
|
out_free_all:
|
||||||
|
unregister_pernet_subsys(&nfsd_net_ops);
|
||||||
|
out_free_filesystem:
|
||||||
unregister_filesystem(&nfsd_fs_type);
|
unregister_filesystem(&nfsd_fs_type);
|
||||||
out_free_exports:
|
out_free_exports:
|
||||||
remove_proc_entry("fs/nfs/exports", NULL);
|
remove_proc_entry("fs/nfs/exports", NULL);
|
||||||
@ -1561,13 +1563,12 @@ static int __init init_nfsd(void)
|
|||||||
nfsd4_exit_pnfs();
|
nfsd4_exit_pnfs();
|
||||||
out_free_slabs:
|
out_free_slabs:
|
||||||
nfsd4_free_slabs();
|
nfsd4_free_slabs();
|
||||||
out_unregister_notifier:
|
|
||||||
unregister_cld_notifier();
|
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __exit exit_nfsd(void)
|
static void __exit exit_nfsd(void)
|
||||||
{
|
{
|
||||||
|
unregister_cld_notifier();
|
||||||
unregister_pernet_subsys(&nfsd_net_ops);
|
unregister_pernet_subsys(&nfsd_net_ops);
|
||||||
nfsd_drc_slab_free();
|
nfsd_drc_slab_free();
|
||||||
remove_proc_entry("fs/nfs/exports", NULL);
|
remove_proc_entry("fs/nfs/exports", NULL);
|
||||||
@ -1577,7 +1578,6 @@ static void __exit exit_nfsd(void)
|
|||||||
nfsd4_free_slabs();
|
nfsd4_free_slabs();
|
||||||
nfsd4_exit_pnfs();
|
nfsd4_exit_pnfs();
|
||||||
unregister_filesystem(&nfsd_fs_type);
|
unregister_filesystem(&nfsd_fs_type);
|
||||||
unregister_cld_notifier();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
|
MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
|
||||||
|
@ -35,17 +35,7 @@
|
|||||||
|
|
||||||
void signalfd_cleanup(struct sighand_struct *sighand)
|
void signalfd_cleanup(struct sighand_struct *sighand)
|
||||||
{
|
{
|
||||||
wait_queue_head_t *wqh = &sighand->signalfd_wqh;
|
wake_up_pollfree(&sighand->signalfd_wqh);
|
||||||
/*
|
|
||||||
* The lockless check can race with remove_wait_queue() in progress,
|
|
||||||
* but in this case its caller should run under rcu_read_lock() and
|
|
||||||
* sighand_cachep is SLAB_TYPESAFE_BY_RCU, we can safely return.
|
|
||||||
*/
|
|
||||||
if (likely(!waitqueue_active(wqh)))
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* wait_queue_entry_t->func(POLLFREE) should do remove_wait_queue() */
|
|
||||||
wake_up_poll(wqh, EPOLLHUP | POLLFREE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct signalfd_ctx {
|
struct signalfd_ctx {
|
||||||
|
@ -72,16 +72,3 @@ void cifs_arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int l
|
|||||||
ctx->y = y;
|
ctx->y = y;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(cifs_arc4_crypt);
|
EXPORT_SYMBOL_GPL(cifs_arc4_crypt);
|
||||||
|
|
||||||
static int __init
|
|
||||||
init_smbfs_common(void)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
static void __init
|
|
||||||
exit_smbfs_common(void)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
module_init(init_smbfs_common)
|
|
||||||
module_exit(exit_smbfs_common)
|
|
||||||
|
@ -161,6 +161,77 @@ struct tracefs_fs_info {
|
|||||||
struct tracefs_mount_opts mount_opts;
|
struct tracefs_mount_opts mount_opts;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void change_gid(struct dentry *dentry, kgid_t gid)
|
||||||
|
{
|
||||||
|
if (!dentry->d_inode)
|
||||||
|
return;
|
||||||
|
dentry->d_inode->i_gid = gid;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Taken from d_walk, but without he need for handling renames.
|
||||||
|
* Nothing can be renamed while walking the list, as tracefs
|
||||||
|
* does not support renames. This is only called when mounting
|
||||||
|
* or remounting the file system, to set all the files to
|
||||||
|
* the given gid.
|
||||||
|
*/
|
||||||
|
static void set_gid(struct dentry *parent, kgid_t gid)
|
||||||
|
{
|
||||||
|
struct dentry *this_parent;
|
||||||
|
struct list_head *next;
|
||||||
|
|
||||||
|
this_parent = parent;
|
||||||
|
spin_lock(&this_parent->d_lock);
|
||||||
|
|
||||||
|
change_gid(this_parent, gid);
|
||||||
|
repeat:
|
||||||
|
next = this_parent->d_subdirs.next;
|
||||||
|
resume:
|
||||||
|
while (next != &this_parent->d_subdirs) {
|
||||||
|
struct list_head *tmp = next;
|
||||||
|
struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
|
||||||
|
next = tmp->next;
|
||||||
|
|
||||||
|
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
|
||||||
|
|
||||||
|
change_gid(dentry, gid);
|
||||||
|
|
||||||
|
if (!list_empty(&dentry->d_subdirs)) {
|
||||||
|
spin_unlock(&this_parent->d_lock);
|
||||||
|
spin_release(&dentry->d_lock.dep_map, _RET_IP_);
|
||||||
|
this_parent = dentry;
|
||||||
|
spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
|
||||||
|
goto repeat;
|
||||||
|
}
|
||||||
|
spin_unlock(&dentry->d_lock);
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* All done at this level ... ascend and resume the search.
|
||||||
|
*/
|
||||||
|
rcu_read_lock();
|
||||||
|
ascend:
|
||||||
|
if (this_parent != parent) {
|
||||||
|
struct dentry *child = this_parent;
|
||||||
|
this_parent = child->d_parent;
|
||||||
|
|
||||||
|
spin_unlock(&child->d_lock);
|
||||||
|
spin_lock(&this_parent->d_lock);
|
||||||
|
|
||||||
|
/* go into the first sibling still alive */
|
||||||
|
do {
|
||||||
|
next = child->d_child.next;
|
||||||
|
if (next == &this_parent->d_subdirs)
|
||||||
|
goto ascend;
|
||||||
|
child = list_entry(next, struct dentry, d_child);
|
||||||
|
} while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
|
||||||
|
rcu_read_unlock();
|
||||||
|
goto resume;
|
||||||
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
spin_unlock(&this_parent->d_lock);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
|
static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
|
||||||
{
|
{
|
||||||
substring_t args[MAX_OPT_ARGS];
|
substring_t args[MAX_OPT_ARGS];
|
||||||
@ -193,6 +264,7 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
|
|||||||
if (!gid_valid(gid))
|
if (!gid_valid(gid))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
opts->gid = gid;
|
opts->gid = gid;
|
||||||
|
set_gid(tracefs_mount->mnt_root, gid);
|
||||||
break;
|
break;
|
||||||
case Opt_mode:
|
case Opt_mode:
|
||||||
if (match_octal(&args[0], &option))
|
if (match_octal(&args[0], &option))
|
||||||
@ -414,6 +486,8 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
|
|||||||
inode->i_mode = mode;
|
inode->i_mode = mode;
|
||||||
inode->i_fop = fops ? fops : &tracefs_file_operations;
|
inode->i_fop = fops ? fops : &tracefs_file_operations;
|
||||||
inode->i_private = data;
|
inode->i_private = data;
|
||||||
|
inode->i_uid = d_inode(dentry->d_parent)->i_uid;
|
||||||
|
inode->i_gid = d_inode(dentry->d_parent)->i_gid;
|
||||||
d_instantiate(dentry, inode);
|
d_instantiate(dentry, inode);
|
||||||
fsnotify_create(dentry->d_parent->d_inode, dentry);
|
fsnotify_create(dentry->d_parent->d_inode, dentry);
|
||||||
return end_creating(dentry);
|
return end_creating(dentry);
|
||||||
@ -436,6 +510,8 @@ static struct dentry *__create_dir(const char *name, struct dentry *parent,
|
|||||||
inode->i_mode = S_IFDIR | S_IRWXU | S_IRUSR| S_IRGRP | S_IXUSR | S_IXGRP;
|
inode->i_mode = S_IFDIR | S_IRWXU | S_IRUSR| S_IRGRP | S_IXUSR | S_IXGRP;
|
||||||
inode->i_op = ops;
|
inode->i_op = ops;
|
||||||
inode->i_fop = &simple_dir_operations;
|
inode->i_fop = &simple_dir_operations;
|
||||||
|
inode->i_uid = d_inode(dentry->d_parent)->i_uid;
|
||||||
|
inode->i_gid = d_inode(dentry->d_parent)->i_gid;
|
||||||
|
|
||||||
/* directory inodes start off with i_nlink == 2 (for "." entry) */
|
/* directory inodes start off with i_nlink == 2 (for "." entry) */
|
||||||
inc_nlink(inode);
|
inc_nlink(inode);
|
||||||
|
@ -1765,7 +1765,10 @@ static int
|
|||||||
xfs_remount_ro(
|
xfs_remount_ro(
|
||||||
struct xfs_mount *mp)
|
struct xfs_mount *mp)
|
||||||
{
|
{
|
||||||
int error;
|
struct xfs_icwalk icw = {
|
||||||
|
.icw_flags = XFS_ICWALK_FLAG_SYNC,
|
||||||
|
};
|
||||||
|
int error;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Cancel background eofb scanning so it cannot race with the final
|
* Cancel background eofb scanning so it cannot race with the final
|
||||||
@ -1773,8 +1776,13 @@ xfs_remount_ro(
|
|||||||
*/
|
*/
|
||||||
xfs_blockgc_stop(mp);
|
xfs_blockgc_stop(mp);
|
||||||
|
|
||||||
/* Get rid of any leftover CoW reservations... */
|
/*
|
||||||
error = xfs_blockgc_free_space(mp, NULL);
|
* Clear out all remaining COW staging extents and speculative post-EOF
|
||||||
|
* preallocations so that we don't leave inodes requiring inactivation
|
||||||
|
* cleanups during reclaim on a read-only mount. We must process every
|
||||||
|
* cached inode, so this requires a synchronous cache scan.
|
||||||
|
*/
|
||||||
|
error = xfs_blockgc_free_space(mp, &icw);
|
||||||
if (error) {
|
if (error) {
|
||||||
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
|
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
|
||||||
return error;
|
return error;
|
||||||
|
@ -20,6 +20,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/math.h>
|
#include <linux/math.h>
|
||||||
|
#include <linux/sched.h>
|
||||||
|
|
||||||
extern unsigned long loops_per_jiffy;
|
extern unsigned long loops_per_jiffy;
|
||||||
|
|
||||||
@ -58,7 +59,18 @@ void calibrate_delay(void);
|
|||||||
void __attribute__((weak)) calibration_delay_done(void);
|
void __attribute__((weak)) calibration_delay_done(void);
|
||||||
void msleep(unsigned int msecs);
|
void msleep(unsigned int msecs);
|
||||||
unsigned long msleep_interruptible(unsigned int msecs);
|
unsigned long msleep_interruptible(unsigned int msecs);
|
||||||
void usleep_range(unsigned long min, unsigned long max);
|
void usleep_range_state(unsigned long min, unsigned long max,
|
||||||
|
unsigned int state);
|
||||||
|
|
||||||
|
static inline void usleep_range(unsigned long min, unsigned long max)
|
||||||
|
{
|
||||||
|
usleep_range_state(min, max, TASK_UNINTERRUPTIBLE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void usleep_idle_range(unsigned long min, unsigned long max)
|
||||||
|
{
|
||||||
|
usleep_range_state(min, max, TASK_IDLE);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void ssleep(unsigned int seconds)
|
static inline void ssleep(unsigned int seconds)
|
||||||
{
|
{
|
||||||
|
@ -51,9 +51,9 @@
|
|||||||
#define _LINUX_PERCPU_REFCOUNT_H
|
#define _LINUX_PERCPU_REFCOUNT_H
|
||||||
|
|
||||||
#include <linux/atomic.h>
|
#include <linux/atomic.h>
|
||||||
#include <linux/kernel.h>
|
|
||||||
#include <linux/percpu.h>
|
#include <linux/percpu.h>
|
||||||
#include <linux/rcupdate.h>
|
#include <linux/rcupdate.h>
|
||||||
|
#include <linux/types.h>
|
||||||
#include <linux/gfp.h>
|
#include <linux/gfp.h>
|
||||||
|
|
||||||
struct percpu_ref;
|
struct percpu_ref;
|
||||||
|
@ -129,7 +129,7 @@ static inline bool pm_runtime_suspended(struct device *dev)
|
|||||||
* pm_runtime_active - Check whether or not a device is runtime-active.
|
* pm_runtime_active - Check whether or not a device is runtime-active.
|
||||||
* @dev: Target device.
|
* @dev: Target device.
|
||||||
*
|
*
|
||||||
* Return %true if runtime PM is enabled for @dev and its runtime PM status is
|
* Return %true if runtime PM is disabled for @dev or its runtime PM status is
|
||||||
* %RPM_ACTIVE, or %false otherwise.
|
* %RPM_ACTIVE, or %false otherwise.
|
||||||
*
|
*
|
||||||
* Note that the return value of this function can only be trusted if it is
|
* Note that the return value of this function can only be trusted if it is
|
||||||
|
@ -217,6 +217,7 @@ void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void
|
|||||||
void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
|
void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
|
||||||
void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
|
void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
|
||||||
void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
|
void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
|
||||||
|
void __wake_up_pollfree(struct wait_queue_head *wq_head);
|
||||||
|
|
||||||
#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
|
#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
|
||||||
#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
|
#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
|
||||||
@ -245,6 +246,31 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
|
|||||||
#define wake_up_interruptible_sync_poll_locked(x, m) \
|
#define wake_up_interruptible_sync_poll_locked(x, m) \
|
||||||
__wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
|
__wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
|
||||||
|
|
||||||
|
/**
|
||||||
|
* wake_up_pollfree - signal that a polled waitqueue is going away
|
||||||
|
* @wq_head: the wait queue head
|
||||||
|
*
|
||||||
|
* In the very rare cases where a ->poll() implementation uses a waitqueue whose
|
||||||
|
* lifetime is tied to a task rather than to the 'struct file' being polled,
|
||||||
|
* this function must be called before the waitqueue is freed so that
|
||||||
|
* non-blocking polls (e.g. epoll) are notified that the queue is going away.
|
||||||
|
*
|
||||||
|
* The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
|
||||||
|
* an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
|
||||||
|
*/
|
||||||
|
static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* For performance reasons, we don't always take the queue lock here.
|
||||||
|
* Therefore, we might race with someone removing the last entry from
|
||||||
|
* the queue, and proceed while they still hold the queue lock.
|
||||||
|
* However, rcu_read_lock() is required to be held in such cases, so we
|
||||||
|
* can safely proceed with an RCU-delayed free.
|
||||||
|
*/
|
||||||
|
if (waitqueue_active(wq_head))
|
||||||
|
__wake_up_pollfree(wq_head);
|
||||||
|
}
|
||||||
|
|
||||||
#define ___wait_cond_timeout(condition) \
|
#define ___wait_cond_timeout(condition) \
|
||||||
({ \
|
({ \
|
||||||
bool __cond = (condition); \
|
bool __cond = (condition); \
|
||||||
|
@ -29,7 +29,7 @@
|
|||||||
#define POLLRDHUP 0x2000
|
#define POLLRDHUP 0x2000
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define POLLFREE (__force __poll_t)0x4000 /* currently only for epoll */
|
#define POLLFREE (__force __poll_t)0x4000
|
||||||
|
|
||||||
#define POLL_BUSY_LOOP (__force __poll_t)0x8000
|
#define POLL_BUSY_LOOP (__force __poll_t)0x8000
|
||||||
|
|
||||||
|
@ -66,10 +66,17 @@ struct rlimit64 {
|
|||||||
#define _STK_LIM (8*1024*1024)
|
#define _STK_LIM (8*1024*1024)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* GPG2 wants 64kB of mlocked memory, to make sure pass phrases
|
* Limit the amount of locked memory by some sane default:
|
||||||
* and other sensitive information are never written to disk.
|
* root can always increase this limit if needed.
|
||||||
|
*
|
||||||
|
* The main use-cases are (1) preventing sensitive memory
|
||||||
|
* from being swapped; (2) real-time operations; (3) via
|
||||||
|
* IOURING_REGISTER_BUFFERS.
|
||||||
|
*
|
||||||
|
* The first two don't need much. The latter will take as
|
||||||
|
* much as it can get. 8MB is a reasonably sane default.
|
||||||
*/
|
*/
|
||||||
#define MLOCK_LIMIT ((PAGE_SIZE > 64*1024) ? PAGE_SIZE : 64*1024)
|
#define MLOCK_LIMIT (8*1024*1024)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Due to binary compatibility, the actual resource numbers
|
* Due to binary compatibility, the actual resource numbers
|
||||||
|
@ -238,6 +238,13 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
|
EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
|
||||||
|
|
||||||
|
void __wake_up_pollfree(struct wait_queue_head *wq_head)
|
||||||
|
{
|
||||||
|
__wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE));
|
||||||
|
/* POLLFREE must have cleared the queue. */
|
||||||
|
WARN_ON_ONCE(waitqueue_active(wq_head));
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Note: we use "set_current_state()" _after_ the wait-queue add,
|
* Note: we use "set_current_state()" _after_ the wait-queue add,
|
||||||
* because we need a memory barrier there on SMP, so that any
|
* because we need a memory barrier there on SMP, so that any
|
||||||
|
@ -2057,26 +2057,28 @@ unsigned long msleep_interruptible(unsigned int msecs)
|
|||||||
EXPORT_SYMBOL(msleep_interruptible);
|
EXPORT_SYMBOL(msleep_interruptible);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* usleep_range - Sleep for an approximate time
|
* usleep_range_state - Sleep for an approximate time in a given state
|
||||||
* @min: Minimum time in usecs to sleep
|
* @min: Minimum time in usecs to sleep
|
||||||
* @max: Maximum time in usecs to sleep
|
* @max: Maximum time in usecs to sleep
|
||||||
|
* @state: State of the current task that will be while sleeping
|
||||||
*
|
*
|
||||||
* In non-atomic context where the exact wakeup time is flexible, use
|
* In non-atomic context where the exact wakeup time is flexible, use
|
||||||
* usleep_range() instead of udelay(). The sleep improves responsiveness
|
* usleep_range_state() instead of udelay(). The sleep improves responsiveness
|
||||||
* by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
|
* by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
|
||||||
* power usage by allowing hrtimers to take advantage of an already-
|
* power usage by allowing hrtimers to take advantage of an already-
|
||||||
* scheduled interrupt instead of scheduling a new one just for this sleep.
|
* scheduled interrupt instead of scheduling a new one just for this sleep.
|
||||||
*/
|
*/
|
||||||
void __sched usleep_range(unsigned long min, unsigned long max)
|
void __sched usleep_range_state(unsigned long min, unsigned long max,
|
||||||
|
unsigned int state)
|
||||||
{
|
{
|
||||||
ktime_t exp = ktime_add_us(ktime_get(), min);
|
ktime_t exp = ktime_add_us(ktime_get(), min);
|
||||||
u64 delta = (u64)(max - min) * NSEC_PER_USEC;
|
u64 delta = (u64)(max - min) * NSEC_PER_USEC;
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
__set_current_state(TASK_UNINTERRUPTIBLE);
|
__set_current_state(state);
|
||||||
/* Do not return before the requested sleep time has elapsed */
|
/* Do not return before the requested sleep time has elapsed */
|
||||||
if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS))
|
if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(usleep_range);
|
EXPORT_SYMBOL(usleep_range_state);
|
||||||
|
@ -5217,6 +5217,7 @@ int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
|
|||||||
{
|
{
|
||||||
struct ftrace_direct_func *direct;
|
struct ftrace_direct_func *direct;
|
||||||
struct ftrace_func_entry *entry;
|
struct ftrace_func_entry *entry;
|
||||||
|
struct ftrace_hash *hash;
|
||||||
int ret = -ENODEV;
|
int ret = -ENODEV;
|
||||||
|
|
||||||
mutex_lock(&direct_mutex);
|
mutex_lock(&direct_mutex);
|
||||||
@ -5225,7 +5226,8 @@ int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
|
|||||||
if (!entry)
|
if (!entry)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
if (direct_functions->count == 1)
|
hash = direct_ops.func_hash->filter_hash;
|
||||||
|
if (hash->count == 1)
|
||||||
unregister_ftrace_function(&direct_ops);
|
unregister_ftrace_function(&direct_ops);
|
||||||
|
|
||||||
ret = ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
|
ret = ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
|
||||||
@ -5540,6 +5542,10 @@ int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
|
|||||||
err = unregister_ftrace_function(ops);
|
err = unregister_ftrace_function(ops);
|
||||||
remove_direct_functions_hash(hash, addr);
|
remove_direct_functions_hash(hash, addr);
|
||||||
mutex_unlock(&direct_mutex);
|
mutex_unlock(&direct_mutex);
|
||||||
|
|
||||||
|
/* cleanup for possible another register call */
|
||||||
|
ops->func = NULL;
|
||||||
|
ops->trampoline = 0;
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(unregister_ftrace_direct_multi);
|
EXPORT_SYMBOL_GPL(unregister_ftrace_direct_multi);
|
||||||
|
@ -1237,9 +1237,8 @@ static int __create_synth_event(const char *name, const char *raw_fields)
|
|||||||
argv + consumed, &consumed,
|
argv + consumed, &consumed,
|
||||||
&field_version);
|
&field_version);
|
||||||
if (IS_ERR(field)) {
|
if (IS_ERR(field)) {
|
||||||
argv_free(argv);
|
|
||||||
ret = PTR_ERR(field);
|
ret = PTR_ERR(field);
|
||||||
goto err;
|
goto err_free_arg;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1262,18 +1261,19 @@ static int __create_synth_event(const char *name, const char *raw_fields)
|
|||||||
if (cmd_version > 1 && n_fields_this_loop >= 1) {
|
if (cmd_version > 1 && n_fields_this_loop >= 1) {
|
||||||
synth_err(SYNTH_ERR_INVALID_CMD, errpos(field_str));
|
synth_err(SYNTH_ERR_INVALID_CMD, errpos(field_str));
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto err;
|
goto err_free_arg;
|
||||||
}
|
}
|
||||||
|
|
||||||
fields[n_fields++] = field;
|
fields[n_fields++] = field;
|
||||||
if (n_fields == SYNTH_FIELDS_MAX) {
|
if (n_fields == SYNTH_FIELDS_MAX) {
|
||||||
synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0);
|
synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0);
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto err;
|
goto err_free_arg;
|
||||||
}
|
}
|
||||||
|
|
||||||
n_fields_this_loop++;
|
n_fields_this_loop++;
|
||||||
}
|
}
|
||||||
|
argv_free(argv);
|
||||||
|
|
||||||
if (consumed < argc) {
|
if (consumed < argc) {
|
||||||
synth_err(SYNTH_ERR_INVALID_CMD, 0);
|
synth_err(SYNTH_ERR_INVALID_CMD, 0);
|
||||||
@ -1281,7 +1281,6 @@ static int __create_synth_event(const char *name, const char *raw_fields)
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
argv_free(argv);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (n_fields == 0) {
|
if (n_fields == 0) {
|
||||||
@ -1307,6 +1306,8 @@ static int __create_synth_event(const char *name, const char *raw_fields)
|
|||||||
kfree(saved_fields);
|
kfree(saved_fields);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
err_free_arg:
|
||||||
|
argv_free(argv);
|
||||||
err:
|
err:
|
||||||
for (i = 0; i < n_fields; i++)
|
for (i = 0; i < n_fields; i++)
|
||||||
free_synth_field(fields[i]);
|
free_synth_field(fields[i]);
|
||||||
|
@ -428,7 +428,7 @@ config THP_SWAP
|
|||||||
# UP and nommu archs use km based percpu allocator
|
# UP and nommu archs use km based percpu allocator
|
||||||
#
|
#
|
||||||
config NEED_PER_CPU_KM
|
config NEED_PER_CPU_KM
|
||||||
depends on !SMP
|
depends on !SMP || !MMU
|
||||||
bool
|
bool
|
||||||
default y
|
default y
|
||||||
|
|
||||||
|
@ -945,6 +945,13 @@ void bdi_unregister(struct backing_dev_info *bdi)
|
|||||||
wb_shutdown(&bdi->wb);
|
wb_shutdown(&bdi->wb);
|
||||||
cgwb_bdi_unregister(bdi);
|
cgwb_bdi_unregister(bdi);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If this BDI's min ratio has been set, use bdi_set_min_ratio() to
|
||||||
|
* update the global bdi_min_ratio.
|
||||||
|
*/
|
||||||
|
if (bdi->min_ratio)
|
||||||
|
bdi_set_min_ratio(bdi, 0);
|
||||||
|
|
||||||
if (bdi->dev) {
|
if (bdi->dev) {
|
||||||
bdi_debug_unregister(bdi);
|
bdi_debug_unregister(bdi);
|
||||||
device_unregister(bdi->dev);
|
device_unregister(bdi->dev);
|
||||||
|
@ -282,7 +282,6 @@ int damon_set_targets(struct damon_ctx *ctx,
|
|||||||
for (i = 0; i < nr_ids; i++) {
|
for (i = 0; i < nr_ids; i++) {
|
||||||
t = damon_new_target(ids[i]);
|
t = damon_new_target(ids[i]);
|
||||||
if (!t) {
|
if (!t) {
|
||||||
pr_err("Failed to alloc damon_target\n");
|
|
||||||
/* The caller should do cleanup of the ids itself */
|
/* The caller should do cleanup of the ids itself */
|
||||||
damon_for_each_target_safe(t, next, ctx)
|
damon_for_each_target_safe(t, next, ctx)
|
||||||
damon_destroy_target(t);
|
damon_destroy_target(t);
|
||||||
@ -312,16 +311,10 @@ int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
|
|||||||
unsigned long aggr_int, unsigned long primitive_upd_int,
|
unsigned long aggr_int, unsigned long primitive_upd_int,
|
||||||
unsigned long min_nr_reg, unsigned long max_nr_reg)
|
unsigned long min_nr_reg, unsigned long max_nr_reg)
|
||||||
{
|
{
|
||||||
if (min_nr_reg < 3) {
|
if (min_nr_reg < 3)
|
||||||
pr_err("min_nr_regions (%lu) must be at least 3\n",
|
|
||||||
min_nr_reg);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
if (min_nr_reg > max_nr_reg)
|
||||||
if (min_nr_reg > max_nr_reg) {
|
|
||||||
pr_err("invalid nr_regions. min (%lu) > max (%lu)\n",
|
|
||||||
min_nr_reg, max_nr_reg);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
|
|
||||||
ctx->sample_interval = sample_int;
|
ctx->sample_interval = sample_int;
|
||||||
ctx->aggr_interval = aggr_int;
|
ctx->aggr_interval = aggr_int;
|
||||||
@ -980,10 +973,11 @@ static unsigned long damos_wmark_wait_us(struct damos *scheme)
|
|||||||
|
|
||||||
static void kdamond_usleep(unsigned long usecs)
|
static void kdamond_usleep(unsigned long usecs)
|
||||||
{
|
{
|
||||||
if (usecs > 100 * 1000)
|
/* See Documentation/timers/timers-howto.rst for the thresholds */
|
||||||
schedule_timeout_interruptible(usecs_to_jiffies(usecs));
|
if (usecs > 20 * USEC_PER_MSEC)
|
||||||
|
schedule_timeout_idle(usecs_to_jiffies(usecs));
|
||||||
else
|
else
|
||||||
usleep_range(usecs, usecs + 1);
|
usleep_idle_range(usecs, usecs + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Returns negative error code if it's not activated but should return */
|
/* Returns negative error code if it's not activated but should return */
|
||||||
@ -1038,7 +1032,7 @@ static int kdamond_fn(void *data)
|
|||||||
ctx->callback.after_sampling(ctx))
|
ctx->callback.after_sampling(ctx))
|
||||||
done = true;
|
done = true;
|
||||||
|
|
||||||
usleep_range(ctx->sample_interval, ctx->sample_interval + 1);
|
kdamond_usleep(ctx->sample_interval);
|
||||||
|
|
||||||
if (ctx->primitive.check_accesses)
|
if (ctx->primitive.check_accesses)
|
||||||
max_nr_accesses = ctx->primitive.check_accesses(ctx);
|
max_nr_accesses = ctx->primitive.check_accesses(ctx);
|
||||||
|
@ -210,10 +210,8 @@ static struct damos **str_to_schemes(const char *str, ssize_t len,
|
|||||||
&wmarks.low, &parsed);
|
&wmarks.low, &parsed);
|
||||||
if (ret != 18)
|
if (ret != 18)
|
||||||
break;
|
break;
|
||||||
if (!damos_action_valid(action)) {
|
if (!damos_action_valid(action))
|
||||||
pr_err("wrong action %d\n", action);
|
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
|
||||||
|
|
||||||
pos += parsed;
|
pos += parsed;
|
||||||
scheme = damon_new_scheme(min_sz, max_sz, min_nr_a, max_nr_a,
|
scheme = damon_new_scheme(min_sz, max_sz, min_nr_a, max_nr_a,
|
||||||
|
@ -135,7 +135,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
|
|||||||
struct damon_addr_range *three_regions,
|
struct damon_addr_range *three_regions,
|
||||||
unsigned long *expected, int nr_expected)
|
unsigned long *expected, int nr_expected)
|
||||||
{
|
{
|
||||||
struct damon_ctx *ctx = damon_new_ctx();
|
|
||||||
struct damon_target *t;
|
struct damon_target *t;
|
||||||
struct damon_region *r;
|
struct damon_region *r;
|
||||||
int i;
|
int i;
|
||||||
@ -145,7 +144,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
|
|||||||
r = damon_new_region(regions[i * 2], regions[i * 2 + 1]);
|
r = damon_new_region(regions[i * 2], regions[i * 2 + 1]);
|
||||||
damon_add_region(r, t);
|
damon_add_region(r, t);
|
||||||
}
|
}
|
||||||
damon_add_target(ctx, t);
|
|
||||||
|
|
||||||
damon_va_apply_three_regions(t, three_regions);
|
damon_va_apply_three_regions(t, three_regions);
|
||||||
|
|
||||||
@ -154,8 +152,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
|
|||||||
KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]);
|
KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]);
|
||||||
KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]);
|
KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
damon_destroy_ctx(ctx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -252,60 +248,59 @@ static void damon_test_apply_three_regions4(struct kunit *test)
|
|||||||
new_three_regions, expected, ARRAY_SIZE(expected));
|
new_three_regions, expected, ARRAY_SIZE(expected));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void damon_test_split_evenly(struct kunit *test)
|
static void damon_test_split_evenly_fail(struct kunit *test,
|
||||||
|
unsigned long start, unsigned long end, unsigned int nr_pieces)
|
||||||
{
|
{
|
||||||
struct damon_ctx *c = damon_new_ctx();
|
struct damon_target *t = damon_new_target(42);
|
||||||
struct damon_target *t;
|
struct damon_region *r = damon_new_region(start, end);
|
||||||
struct damon_region *r;
|
|
||||||
unsigned long i;
|
|
||||||
|
|
||||||
KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5),
|
|
||||||
-EINVAL);
|
|
||||||
|
|
||||||
t = damon_new_target(42);
|
|
||||||
r = damon_new_region(0, 100);
|
|
||||||
KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 0), -EINVAL);
|
|
||||||
|
|
||||||
damon_add_region(r, t);
|
damon_add_region(r, t);
|
||||||
KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 10), 0);
|
KUNIT_EXPECT_EQ(test,
|
||||||
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 10u);
|
damon_va_evenly_split_region(t, r, nr_pieces), -EINVAL);
|
||||||
|
|
||||||
i = 0;
|
|
||||||
damon_for_each_region(r, t) {
|
|
||||||
KUNIT_EXPECT_EQ(test, r->ar.start, i++ * 10);
|
|
||||||
KUNIT_EXPECT_EQ(test, r->ar.end, i * 10);
|
|
||||||
}
|
|
||||||
damon_free_target(t);
|
|
||||||
|
|
||||||
t = damon_new_target(42);
|
|
||||||
r = damon_new_region(5, 59);
|
|
||||||
damon_add_region(r, t);
|
|
||||||
KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 5), 0);
|
|
||||||
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u);
|
|
||||||
|
|
||||||
i = 0;
|
|
||||||
damon_for_each_region(r, t) {
|
|
||||||
if (i == 4)
|
|
||||||
break;
|
|
||||||
KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i++);
|
|
||||||
KUNIT_EXPECT_EQ(test, r->ar.end, 5 + 10 * i);
|
|
||||||
}
|
|
||||||
KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i);
|
|
||||||
KUNIT_EXPECT_EQ(test, r->ar.end, 59ul);
|
|
||||||
damon_free_target(t);
|
|
||||||
|
|
||||||
t = damon_new_target(42);
|
|
||||||
r = damon_new_region(5, 6);
|
|
||||||
damon_add_region(r, t);
|
|
||||||
KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 2), -EINVAL);
|
|
||||||
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u);
|
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u);
|
||||||
|
|
||||||
damon_for_each_region(r, t) {
|
damon_for_each_region(r, t) {
|
||||||
KUNIT_EXPECT_EQ(test, r->ar.start, 5ul);
|
KUNIT_EXPECT_EQ(test, r->ar.start, start);
|
||||||
KUNIT_EXPECT_EQ(test, r->ar.end, 6ul);
|
KUNIT_EXPECT_EQ(test, r->ar.end, end);
|
||||||
}
|
}
|
||||||
|
|
||||||
damon_free_target(t);
|
damon_free_target(t);
|
||||||
damon_destroy_ctx(c);
|
}
|
||||||
|
|
||||||
|
static void damon_test_split_evenly_succ(struct kunit *test,
|
||||||
|
unsigned long start, unsigned long end, unsigned int nr_pieces)
|
||||||
|
{
|
||||||
|
struct damon_target *t = damon_new_target(42);
|
||||||
|
struct damon_region *r = damon_new_region(start, end);
|
||||||
|
unsigned long expected_width = (end - start) / nr_pieces;
|
||||||
|
unsigned long i = 0;
|
||||||
|
|
||||||
|
damon_add_region(r, t);
|
||||||
|
KUNIT_EXPECT_EQ(test,
|
||||||
|
damon_va_evenly_split_region(t, r, nr_pieces), 0);
|
||||||
|
KUNIT_EXPECT_EQ(test, damon_nr_regions(t), nr_pieces);
|
||||||
|
|
||||||
|
damon_for_each_region(r, t) {
|
||||||
|
if (i == nr_pieces - 1)
|
||||||
|
break;
|
||||||
|
KUNIT_EXPECT_EQ(test,
|
||||||
|
r->ar.start, start + i++ * expected_width);
|
||||||
|
KUNIT_EXPECT_EQ(test, r->ar.end, start + i * expected_width);
|
||||||
|
}
|
||||||
|
KUNIT_EXPECT_EQ(test, r->ar.start, start + i * expected_width);
|
||||||
|
KUNIT_EXPECT_EQ(test, r->ar.end, end);
|
||||||
|
damon_free_target(t);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void damon_test_split_evenly(struct kunit *test)
|
||||||
|
{
|
||||||
|
KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5),
|
||||||
|
-EINVAL);
|
||||||
|
|
||||||
|
damon_test_split_evenly_fail(test, 0, 100, 0);
|
||||||
|
damon_test_split_evenly_succ(test, 0, 100, 10);
|
||||||
|
damon_test_split_evenly_succ(test, 5, 59, 5);
|
||||||
|
damon_test_split_evenly_fail(test, 5, 6, 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct kunit_case damon_test_cases[] = {
|
static struct kunit_case damon_test_cases[] = {
|
||||||
|
@ -627,7 +627,6 @@ int damon_va_apply_scheme(struct damon_ctx *ctx, struct damon_target *t,
|
|||||||
case DAMOS_STAT:
|
case DAMOS_STAT:
|
||||||
return 0;
|
return 0;
|
||||||
default:
|
default:
|
||||||
pr_warn("Wrong action %d\n", scheme->action);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3253,8 +3253,6 @@ static struct page *next_uptodate_page(struct page *page,
|
|||||||
goto skip;
|
goto skip;
|
||||||
if (!PageUptodate(page) || PageReadahead(page))
|
if (!PageUptodate(page) || PageReadahead(page))
|
||||||
goto skip;
|
goto skip;
|
||||||
if (PageHWPoison(page))
|
|
||||||
goto skip;
|
|
||||||
if (!trylock_page(page))
|
if (!trylock_page(page))
|
||||||
goto skip;
|
goto skip;
|
||||||
if (page->mapping != mapping)
|
if (page->mapping != mapping)
|
||||||
|
@ -2973,7 +2973,7 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid)
|
|||||||
struct huge_bootmem_page *m = NULL; /* initialize for clang */
|
struct huge_bootmem_page *m = NULL; /* initialize for clang */
|
||||||
int nr_nodes, node;
|
int nr_nodes, node;
|
||||||
|
|
||||||
if (nid >= nr_online_nodes)
|
if (nid != NUMA_NO_NODE && nid >= nr_online_nodes)
|
||||||
return 0;
|
return 0;
|
||||||
/* do node specific alloc */
|
/* do node specific alloc */
|
||||||
if (nid != NUMA_NO_NODE) {
|
if (nid != NUMA_NO_NODE) {
|
||||||
|
106
mm/memcontrol.c
106
mm/memcontrol.c
@ -776,24 +776,6 @@ void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
|
|||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* mod_objcg_mlstate() may be called with irq enabled, so
|
|
||||||
* mod_memcg_lruvec_state() should be used.
|
|
||||||
*/
|
|
||||||
static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
|
|
||||||
struct pglist_data *pgdat,
|
|
||||||
enum node_stat_item idx, int nr)
|
|
||||||
{
|
|
||||||
struct mem_cgroup *memcg;
|
|
||||||
struct lruvec *lruvec;
|
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
memcg = obj_cgroup_memcg(objcg);
|
|
||||||
lruvec = mem_cgroup_lruvec(memcg, pgdat);
|
|
||||||
mod_memcg_lruvec_state(lruvec, idx, nr);
|
|
||||||
rcu_read_unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __count_memcg_events - account VM events in a cgroup
|
* __count_memcg_events - account VM events in a cgroup
|
||||||
* @memcg: the memory cgroup
|
* @memcg: the memory cgroup
|
||||||
@ -2137,41 +2119,6 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
|
||||||
* Most kmem_cache_alloc() calls are from user context. The irq disable/enable
|
|
||||||
* sequence used in this case to access content from object stock is slow.
|
|
||||||
* To optimize for user context access, there are now two object stocks for
|
|
||||||
* task context and interrupt context access respectively.
|
|
||||||
*
|
|
||||||
* The task context object stock can be accessed by disabling preemption only
|
|
||||||
* which is cheap in non-preempt kernel. The interrupt context object stock
|
|
||||||
* can only be accessed after disabling interrupt. User context code can
|
|
||||||
* access interrupt object stock, but not vice versa.
|
|
||||||
*/
|
|
||||||
static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
|
|
||||||
{
|
|
||||||
struct memcg_stock_pcp *stock;
|
|
||||||
|
|
||||||
if (likely(in_task())) {
|
|
||||||
*pflags = 0UL;
|
|
||||||
preempt_disable();
|
|
||||||
stock = this_cpu_ptr(&memcg_stock);
|
|
||||||
return &stock->task_obj;
|
|
||||||
}
|
|
||||||
|
|
||||||
local_irq_save(*pflags);
|
|
||||||
stock = this_cpu_ptr(&memcg_stock);
|
|
||||||
return &stock->irq_obj;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void put_obj_stock(unsigned long flags)
|
|
||||||
{
|
|
||||||
if (likely(in_task()))
|
|
||||||
preempt_enable();
|
|
||||||
else
|
|
||||||
local_irq_restore(flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* consume_stock: Try to consume stocked charge on this cpu.
|
* consume_stock: Try to consume stocked charge on this cpu.
|
||||||
* @memcg: memcg to consume from.
|
* @memcg: memcg to consume from.
|
||||||
@ -2816,6 +2763,59 @@ static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
|
|||||||
*/
|
*/
|
||||||
#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
|
#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Most kmem_cache_alloc() calls are from user context. The irq disable/enable
|
||||||
|
* sequence used in this case to access content from object stock is slow.
|
||||||
|
* To optimize for user context access, there are now two object stocks for
|
||||||
|
* task context and interrupt context access respectively.
|
||||||
|
*
|
||||||
|
* The task context object stock can be accessed by disabling preemption only
|
||||||
|
* which is cheap in non-preempt kernel. The interrupt context object stock
|
||||||
|
* can only be accessed after disabling interrupt. User context code can
|
||||||
|
* access interrupt object stock, but not vice versa.
|
||||||
|
*/
|
||||||
|
static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
|
||||||
|
{
|
||||||
|
struct memcg_stock_pcp *stock;
|
||||||
|
|
||||||
|
if (likely(in_task())) {
|
||||||
|
*pflags = 0UL;
|
||||||
|
preempt_disable();
|
||||||
|
stock = this_cpu_ptr(&memcg_stock);
|
||||||
|
return &stock->task_obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
local_irq_save(*pflags);
|
||||||
|
stock = this_cpu_ptr(&memcg_stock);
|
||||||
|
return &stock->irq_obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void put_obj_stock(unsigned long flags)
|
||||||
|
{
|
||||||
|
if (likely(in_task()))
|
||||||
|
preempt_enable();
|
||||||
|
else
|
||||||
|
local_irq_restore(flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* mod_objcg_mlstate() may be called with irq enabled, so
|
||||||
|
* mod_memcg_lruvec_state() should be used.
|
||||||
|
*/
|
||||||
|
static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
|
||||||
|
struct pglist_data *pgdat,
|
||||||
|
enum node_stat_item idx, int nr)
|
||||||
|
{
|
||||||
|
struct mem_cgroup *memcg;
|
||||||
|
struct lruvec *lruvec;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
memcg = obj_cgroup_memcg(objcg);
|
||||||
|
lruvec = mem_cgroup_lruvec(memcg, pgdat);
|
||||||
|
mod_memcg_lruvec_state(lruvec, idx, nr);
|
||||||
|
rcu_read_unlock();
|
||||||
|
}
|
||||||
|
|
||||||
int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
|
int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
|
||||||
gfp_t gfp, bool new_page)
|
gfp_t gfp, bool new_page)
|
||||||
{
|
{
|
||||||
|
15
mm/slub.c
15
mm/slub.c
@ -5081,6 +5081,7 @@ struct loc_track {
|
|||||||
unsigned long max;
|
unsigned long max;
|
||||||
unsigned long count;
|
unsigned long count;
|
||||||
struct location *loc;
|
struct location *loc;
|
||||||
|
loff_t idx;
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct dentry *slab_debugfs_root;
|
static struct dentry *slab_debugfs_root;
|
||||||
@ -6052,11 +6053,11 @@ __initcall(slab_sysfs_init);
|
|||||||
#if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
|
#if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
|
||||||
static int slab_debugfs_show(struct seq_file *seq, void *v)
|
static int slab_debugfs_show(struct seq_file *seq, void *v)
|
||||||
{
|
{
|
||||||
|
|
||||||
struct location *l;
|
|
||||||
unsigned int idx = *(unsigned int *)v;
|
|
||||||
struct loc_track *t = seq->private;
|
struct loc_track *t = seq->private;
|
||||||
|
struct location *l;
|
||||||
|
unsigned long idx;
|
||||||
|
|
||||||
|
idx = (unsigned long) t->idx;
|
||||||
if (idx < t->count) {
|
if (idx < t->count) {
|
||||||
l = &t->loc[idx];
|
l = &t->loc[idx];
|
||||||
|
|
||||||
@ -6105,16 +6106,18 @@ static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
|
|||||||
{
|
{
|
||||||
struct loc_track *t = seq->private;
|
struct loc_track *t = seq->private;
|
||||||
|
|
||||||
v = ppos;
|
t->idx = ++(*ppos);
|
||||||
++*ppos;
|
|
||||||
if (*ppos <= t->count)
|
if (*ppos <= t->count)
|
||||||
return v;
|
return ppos;
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
|
static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
|
||||||
{
|
{
|
||||||
|
struct loc_track *t = seq->private;
|
||||||
|
|
||||||
|
t->idx = *ppos;
|
||||||
return ppos;
|
return ppos;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,6 +4,7 @@ obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct.o
|
|||||||
obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct-too.o
|
obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct-too.o
|
||||||
obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct-modify.o
|
obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct-modify.o
|
||||||
obj-$(CONFIG_SAMPLE_FTRACE_DIRECT_MULTI) += ftrace-direct-multi.o
|
obj-$(CONFIG_SAMPLE_FTRACE_DIRECT_MULTI) += ftrace-direct-multi.o
|
||||||
|
obj-$(CONFIG_SAMPLE_FTRACE_DIRECT_MULTI) += ftrace-direct-multi-modify.o
|
||||||
|
|
||||||
CFLAGS_sample-trace-array.o := -I$(src)
|
CFLAGS_sample-trace-array.o := -I$(src)
|
||||||
obj-$(CONFIG_SAMPLE_TRACE_ARRAY) += sample-trace-array.o
|
obj-$(CONFIG_SAMPLE_TRACE_ARRAY) += sample-trace-array.o
|
||||||
|
152
samples/ftrace/ftrace-direct-multi-modify.c
Normal file
152
samples/ftrace/ftrace-direct-multi-modify.c
Normal file
@ -0,0 +1,152 @@
|
|||||||
|
// SPDX-License-Identifier: GPL-2.0-only
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/kthread.h>
|
||||||
|
#include <linux/ftrace.h>
|
||||||
|
#include <asm/asm-offsets.h>
|
||||||
|
|
||||||
|
void my_direct_func1(unsigned long ip)
|
||||||
|
{
|
||||||
|
trace_printk("my direct func1 ip %lx\n", ip);
|
||||||
|
}
|
||||||
|
|
||||||
|
void my_direct_func2(unsigned long ip)
|
||||||
|
{
|
||||||
|
trace_printk("my direct func2 ip %lx\n", ip);
|
||||||
|
}
|
||||||
|
|
||||||
|
extern void my_tramp1(void *);
|
||||||
|
extern void my_tramp2(void *);
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
|
||||||
|
asm (
|
||||||
|
" .pushsection .text, \"ax\", @progbits\n"
|
||||||
|
" .type my_tramp1, @function\n"
|
||||||
|
" .globl my_tramp1\n"
|
||||||
|
" my_tramp1:"
|
||||||
|
" pushq %rbp\n"
|
||||||
|
" movq %rsp, %rbp\n"
|
||||||
|
" pushq %rdi\n"
|
||||||
|
" movq 8(%rbp), %rdi\n"
|
||||||
|
" call my_direct_func1\n"
|
||||||
|
" popq %rdi\n"
|
||||||
|
" leave\n"
|
||||||
|
" ret\n"
|
||||||
|
" .size my_tramp1, .-my_tramp1\n"
|
||||||
|
" .type my_tramp2, @function\n"
|
||||||
|
"\n"
|
||||||
|
" .globl my_tramp2\n"
|
||||||
|
" my_tramp2:"
|
||||||
|
" pushq %rbp\n"
|
||||||
|
" movq %rsp, %rbp\n"
|
||||||
|
" pushq %rdi\n"
|
||||||
|
" movq 8(%rbp), %rdi\n"
|
||||||
|
" call my_direct_func2\n"
|
||||||
|
" popq %rdi\n"
|
||||||
|
" leave\n"
|
||||||
|
" ret\n"
|
||||||
|
" .size my_tramp2, .-my_tramp2\n"
|
||||||
|
" .popsection\n"
|
||||||
|
);
|
||||||
|
|
||||||
|
#endif /* CONFIG_X86_64 */
|
||||||
|
|
||||||
|
#ifdef CONFIG_S390
|
||||||
|
|
||||||
|
asm (
|
||||||
|
" .pushsection .text, \"ax\", @progbits\n"
|
||||||
|
" .type my_tramp1, @function\n"
|
||||||
|
" .globl my_tramp1\n"
|
||||||
|
" my_tramp1:"
|
||||||
|
" lgr %r1,%r15\n"
|
||||||
|
" stmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
|
||||||
|
" stg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
|
||||||
|
" aghi %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
|
||||||
|
" stg %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
|
||||||
|
" lgr %r2,%r0\n"
|
||||||
|
" brasl %r14,my_direct_func1\n"
|
||||||
|
" aghi %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
|
||||||
|
" lmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
|
||||||
|
" lg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
|
||||||
|
" lgr %r1,%r0\n"
|
||||||
|
" br %r1\n"
|
||||||
|
" .size my_tramp1, .-my_tramp1\n"
|
||||||
|
"\n"
|
||||||
|
" .type my_tramp2, @function\n"
|
||||||
|
" .globl my_tramp2\n"
|
||||||
|
" my_tramp2:"
|
||||||
|
" lgr %r1,%r15\n"
|
||||||
|
" stmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
|
||||||
|
" stg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
|
||||||
|
" aghi %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
|
||||||
|
" stg %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
|
||||||
|
" lgr %r2,%r0\n"
|
||||||
|
" brasl %r14,my_direct_func2\n"
|
||||||
|
" aghi %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
|
||||||
|
" lmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
|
||||||
|
" lg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
|
||||||
|
" lgr %r1,%r0\n"
|
||||||
|
" br %r1\n"
|
||||||
|
" .size my_tramp2, .-my_tramp2\n"
|
||||||
|
" .popsection\n"
|
||||||
|
);
|
||||||
|
|
||||||
|
#endif /* CONFIG_S390 */
|
||||||
|
|
||||||
|
static unsigned long my_tramp = (unsigned long)my_tramp1;
|
||||||
|
static unsigned long tramps[2] = {
|
||||||
|
(unsigned long)my_tramp1,
|
||||||
|
(unsigned long)my_tramp2,
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct ftrace_ops direct;
|
||||||
|
|
||||||
|
static int simple_thread(void *arg)
|
||||||
|
{
|
||||||
|
static int t;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
while (!kthread_should_stop()) {
|
||||||
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
|
schedule_timeout(2 * HZ);
|
||||||
|
|
||||||
|
if (ret)
|
||||||
|
continue;
|
||||||
|
t ^= 1;
|
||||||
|
ret = modify_ftrace_direct_multi(&direct, tramps[t]);
|
||||||
|
if (!ret)
|
||||||
|
my_tramp = tramps[t];
|
||||||
|
WARN_ON_ONCE(ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct task_struct *simple_tsk;
|
||||||
|
|
||||||
|
static int __init ftrace_direct_multi_init(void)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ftrace_set_filter_ip(&direct, (unsigned long) wake_up_process, 0, 0);
|
||||||
|
ftrace_set_filter_ip(&direct, (unsigned long) schedule, 0, 0);
|
||||||
|
|
||||||
|
ret = register_ftrace_direct_multi(&direct, my_tramp);
|
||||||
|
|
||||||
|
if (!ret)
|
||||||
|
simple_tsk = kthread_run(simple_thread, NULL, "event-sample-fn");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __exit ftrace_direct_multi_exit(void)
|
||||||
|
{
|
||||||
|
kthread_stop(simple_tsk);
|
||||||
|
unregister_ftrace_direct_multi(&direct, my_tramp);
|
||||||
|
}
|
||||||
|
|
||||||
|
module_init(ftrace_direct_multi_init);
|
||||||
|
module_exit(ftrace_direct_multi_exit);
|
||||||
|
|
||||||
|
MODULE_AUTHOR("Jiri Olsa");
|
||||||
|
MODULE_DESCRIPTION("Example use case of using modify_ftrace_direct_multi()");
|
||||||
|
MODULE_LICENSE("GPL");
|
@ -264,6 +264,7 @@ static int copy_ctl_value_to_user(void __user *userdata,
|
|||||||
struct snd_ctl_elem_value *data,
|
struct snd_ctl_elem_value *data,
|
||||||
int type, int count)
|
int type, int count)
|
||||||
{
|
{
|
||||||
|
struct snd_ctl_elem_value32 __user *data32 = userdata;
|
||||||
int i, size;
|
int i, size;
|
||||||
|
|
||||||
if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
|
if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
|
||||||
@ -280,6 +281,8 @@ static int copy_ctl_value_to_user(void __user *userdata,
|
|||||||
if (copy_to_user(valuep, data->value.bytes.data, size))
|
if (copy_to_user(valuep, data->value.bytes.data, size))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
if (copy_to_user(&data32->id, &data->id, sizeof(data32->id)))
|
||||||
|
return -EFAULT;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -147,7 +147,7 @@ snd_pcm_hw_param_value_min(const struct snd_pcm_hw_params *params,
|
|||||||
*
|
*
|
||||||
* Return the maximum value for field PAR.
|
* Return the maximum value for field PAR.
|
||||||
*/
|
*/
|
||||||
static unsigned int
|
static int
|
||||||
snd_pcm_hw_param_value_max(const struct snd_pcm_hw_params *params,
|
snd_pcm_hw_param_value_max(const struct snd_pcm_hw_params *params,
|
||||||
snd_pcm_hw_param_t var, int *dir)
|
snd_pcm_hw_param_t var, int *dir)
|
||||||
{
|
{
|
||||||
@ -682,18 +682,24 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
|
|||||||
struct snd_pcm_hw_params *oss_params,
|
struct snd_pcm_hw_params *oss_params,
|
||||||
struct snd_pcm_hw_params *slave_params)
|
struct snd_pcm_hw_params *slave_params)
|
||||||
{
|
{
|
||||||
size_t s;
|
ssize_t s;
|
||||||
size_t oss_buffer_size, oss_period_size, oss_periods;
|
ssize_t oss_buffer_size;
|
||||||
size_t min_period_size, max_period_size;
|
ssize_t oss_period_size, oss_periods;
|
||||||
|
ssize_t min_period_size, max_period_size;
|
||||||
struct snd_pcm_runtime *runtime = substream->runtime;
|
struct snd_pcm_runtime *runtime = substream->runtime;
|
||||||
size_t oss_frame_size;
|
size_t oss_frame_size;
|
||||||
|
|
||||||
oss_frame_size = snd_pcm_format_physical_width(params_format(oss_params)) *
|
oss_frame_size = snd_pcm_format_physical_width(params_format(oss_params)) *
|
||||||
params_channels(oss_params) / 8;
|
params_channels(oss_params) / 8;
|
||||||
|
|
||||||
|
oss_buffer_size = snd_pcm_hw_param_value_max(slave_params,
|
||||||
|
SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
|
||||||
|
NULL);
|
||||||
|
if (oss_buffer_size <= 0)
|
||||||
|
return -EINVAL;
|
||||||
oss_buffer_size = snd_pcm_plug_client_size(substream,
|
oss_buffer_size = snd_pcm_plug_client_size(substream,
|
||||||
snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, NULL)) * oss_frame_size;
|
oss_buffer_size * oss_frame_size);
|
||||||
if (!oss_buffer_size)
|
if (oss_buffer_size <= 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
oss_buffer_size = rounddown_pow_of_two(oss_buffer_size);
|
oss_buffer_size = rounddown_pow_of_two(oss_buffer_size);
|
||||||
if (atomic_read(&substream->mmap_count)) {
|
if (atomic_read(&substream->mmap_count)) {
|
||||||
@ -730,7 +736,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
|
|||||||
|
|
||||||
min_period_size = snd_pcm_plug_client_size(substream,
|
min_period_size = snd_pcm_plug_client_size(substream,
|
||||||
snd_pcm_hw_param_value_min(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL));
|
snd_pcm_hw_param_value_min(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL));
|
||||||
if (min_period_size) {
|
if (min_period_size > 0) {
|
||||||
min_period_size *= oss_frame_size;
|
min_period_size *= oss_frame_size;
|
||||||
min_period_size = roundup_pow_of_two(min_period_size);
|
min_period_size = roundup_pow_of_two(min_period_size);
|
||||||
if (oss_period_size < min_period_size)
|
if (oss_period_size < min_period_size)
|
||||||
@ -739,7 +745,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
|
|||||||
|
|
||||||
max_period_size = snd_pcm_plug_client_size(substream,
|
max_period_size = snd_pcm_plug_client_size(substream,
|
||||||
snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL));
|
snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL));
|
||||||
if (max_period_size) {
|
if (max_period_size > 0) {
|
||||||
max_period_size *= oss_frame_size;
|
max_period_size *= oss_frame_size;
|
||||||
max_period_size = rounddown_pow_of_two(max_period_size);
|
max_period_size = rounddown_pow_of_two(max_period_size);
|
||||||
if (oss_period_size > max_period_size)
|
if (oss_period_size > max_period_size)
|
||||||
@ -752,7 +758,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
|
|||||||
oss_periods = substream->oss.setup.periods;
|
oss_periods = substream->oss.setup.periods;
|
||||||
|
|
||||||
s = snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIODS, NULL);
|
s = snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIODS, NULL);
|
||||||
if (runtime->oss.maxfrags && s > runtime->oss.maxfrags)
|
if (s > 0 && runtime->oss.maxfrags && s > runtime->oss.maxfrags)
|
||||||
s = runtime->oss.maxfrags;
|
s = runtime->oss.maxfrags;
|
||||||
if (oss_periods > s)
|
if (oss_periods > s)
|
||||||
oss_periods = s;
|
oss_periods = s;
|
||||||
@ -878,8 +884,15 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
|
|||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto failure;
|
goto failure;
|
||||||
}
|
}
|
||||||
choose_rate(substream, sparams, runtime->oss.rate);
|
|
||||||
snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_CHANNELS, runtime->oss.channels, NULL);
|
err = choose_rate(substream, sparams, runtime->oss.rate);
|
||||||
|
if (err < 0)
|
||||||
|
goto failure;
|
||||||
|
err = snd_pcm_hw_param_near(substream, sparams,
|
||||||
|
SNDRV_PCM_HW_PARAM_CHANNELS,
|
||||||
|
runtime->oss.channels, NULL);
|
||||||
|
if (err < 0)
|
||||||
|
goto failure;
|
||||||
|
|
||||||
format = snd_pcm_oss_format_from(runtime->oss.format);
|
format = snd_pcm_oss_format_from(runtime->oss.format);
|
||||||
|
|
||||||
@ -1956,7 +1969,7 @@ static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsign
|
|||||||
if (runtime->oss.subdivision || runtime->oss.fragshift)
|
if (runtime->oss.subdivision || runtime->oss.fragshift)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
fragshift = val & 0xffff;
|
fragshift = val & 0xffff;
|
||||||
if (fragshift >= 31)
|
if (fragshift >= 25) /* should be large enough */
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
runtime->oss.fragshift = fragshift;
|
runtime->oss.fragshift = fragshift;
|
||||||
runtime->oss.maxfrags = (val >> 16) & 0xffff;
|
runtime->oss.maxfrags = (val >> 16) & 0xffff;
|
||||||
|
@ -6503,22 +6503,26 @@ static void alc287_fixup_legion_15imhg05_speakers(struct hda_codec *codec,
|
|||||||
/* for alc285_fixup_ideapad_s740_coef() */
|
/* for alc285_fixup_ideapad_s740_coef() */
|
||||||
#include "ideapad_s740_helper.c"
|
#include "ideapad_s740_helper.c"
|
||||||
|
|
||||||
static void alc256_fixup_tongfang_reset_persistent_settings(struct hda_codec *codec,
|
static const struct coef_fw alc256_fixup_set_coef_defaults_coefs[] = {
|
||||||
const struct hda_fixup *fix,
|
WRITE_COEF(0x10, 0x0020), WRITE_COEF(0x24, 0x0000),
|
||||||
int action)
|
WRITE_COEF(0x26, 0x0000), WRITE_COEF(0x29, 0x3000),
|
||||||
|
WRITE_COEF(0x37, 0xfe05), WRITE_COEF(0x45, 0x5089),
|
||||||
|
{}
|
||||||
|
};
|
||||||
|
|
||||||
|
static void alc256_fixup_set_coef_defaults(struct hda_codec *codec,
|
||||||
|
const struct hda_fixup *fix,
|
||||||
|
int action)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* A certain other OS sets these coeffs to different values. On at least one TongFang
|
* A certain other OS sets these coeffs to different values. On at least
|
||||||
* barebone these settings might survive even a cold reboot. So to restore a clean slate the
|
* one TongFang barebone these settings might survive even a cold
|
||||||
* values are explicitly reset to default here. Without this, the external microphone is
|
* reboot. So to restore a clean slate the values are explicitly reset
|
||||||
* always in a plugged-in state, while the internal microphone is always in an unplugged
|
* to default here. Without this, the external microphone is always in a
|
||||||
* state, breaking the ability to use the internal microphone.
|
* plugged-in state, while the internal microphone is always in an
|
||||||
*/
|
* unplugged state, breaking the ability to use the internal microphone.
|
||||||
alc_write_coef_idx(codec, 0x24, 0x0000);
|
*/
|
||||||
alc_write_coef_idx(codec, 0x26, 0x0000);
|
alc_process_coef_fw(codec, alc256_fixup_set_coef_defaults_coefs);
|
||||||
alc_write_coef_idx(codec, 0x29, 0x3000);
|
|
||||||
alc_write_coef_idx(codec, 0x37, 0xfe05);
|
|
||||||
alc_write_coef_idx(codec, 0x45, 0x5089);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct coef_fw alc233_fixup_no_audio_jack_coefs[] = {
|
static const struct coef_fw alc233_fixup_no_audio_jack_coefs[] = {
|
||||||
@ -6759,7 +6763,7 @@ enum {
|
|||||||
ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE,
|
ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE,
|
||||||
ALC287_FIXUP_YOGA7_14ITL_SPEAKERS,
|
ALC287_FIXUP_YOGA7_14ITL_SPEAKERS,
|
||||||
ALC287_FIXUP_13S_GEN2_SPEAKERS,
|
ALC287_FIXUP_13S_GEN2_SPEAKERS,
|
||||||
ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS,
|
ALC256_FIXUP_SET_COEF_DEFAULTS,
|
||||||
ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
|
ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
|
||||||
ALC233_FIXUP_NO_AUDIO_JACK,
|
ALC233_FIXUP_NO_AUDIO_JACK,
|
||||||
};
|
};
|
||||||
@ -8465,9 +8469,9 @@ static const struct hda_fixup alc269_fixups[] = {
|
|||||||
.chained = true,
|
.chained = true,
|
||||||
.chain_id = ALC269_FIXUP_HEADSET_MODE,
|
.chain_id = ALC269_FIXUP_HEADSET_MODE,
|
||||||
},
|
},
|
||||||
[ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS] = {
|
[ALC256_FIXUP_SET_COEF_DEFAULTS] = {
|
||||||
.type = HDA_FIXUP_FUNC,
|
.type = HDA_FIXUP_FUNC,
|
||||||
.v.func = alc256_fixup_tongfang_reset_persistent_settings,
|
.v.func = alc256_fixup_set_coef_defaults,
|
||||||
},
|
},
|
||||||
[ALC245_FIXUP_HP_GPIO_LED] = {
|
[ALC245_FIXUP_HP_GPIO_LED] = {
|
||||||
.type = HDA_FIXUP_FUNC,
|
.type = HDA_FIXUP_FUNC,
|
||||||
@ -8929,7 +8933,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
|||||||
SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
|
SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
|
||||||
SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
|
SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
|
||||||
SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
|
SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
|
||||||
SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS),
|
SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_SET_COEF_DEFAULTS),
|
||||||
SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
|
SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
|
||||||
SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
|
SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
|
||||||
SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
|
SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
|
||||||
@ -10231,6 +10235,27 @@ static void alc671_fixup_hp_headset_mic2(struct hda_codec *codec,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void alc897_hp_automute_hook(struct hda_codec *codec,
|
||||||
|
struct hda_jack_callback *jack)
|
||||||
|
{
|
||||||
|
struct alc_spec *spec = codec->spec;
|
||||||
|
int vref;
|
||||||
|
|
||||||
|
snd_hda_gen_hp_automute(codec, jack);
|
||||||
|
vref = spec->gen.hp_jack_present ? (PIN_HP | AC_PINCTL_VREF_100) : PIN_HP;
|
||||||
|
snd_hda_codec_write(codec, 0x1b, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
|
||||||
|
vref);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void alc897_fixup_lenovo_headset_mic(struct hda_codec *codec,
|
||||||
|
const struct hda_fixup *fix, int action)
|
||||||
|
{
|
||||||
|
struct alc_spec *spec = codec->spec;
|
||||||
|
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
|
||||||
|
spec->gen.hp_automute_hook = alc897_hp_automute_hook;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static const struct coef_fw alc668_coefs[] = {
|
static const struct coef_fw alc668_coefs[] = {
|
||||||
WRITE_COEF(0x01, 0xbebe), WRITE_COEF(0x02, 0xaaaa), WRITE_COEF(0x03, 0x0),
|
WRITE_COEF(0x01, 0xbebe), WRITE_COEF(0x02, 0xaaaa), WRITE_COEF(0x03, 0x0),
|
||||||
WRITE_COEF(0x04, 0x0180), WRITE_COEF(0x06, 0x0), WRITE_COEF(0x07, 0x0f80),
|
WRITE_COEF(0x04, 0x0180), WRITE_COEF(0x06, 0x0), WRITE_COEF(0x07, 0x0f80),
|
||||||
@ -10311,6 +10336,8 @@ enum {
|
|||||||
ALC668_FIXUP_ASUS_NO_HEADSET_MIC,
|
ALC668_FIXUP_ASUS_NO_HEADSET_MIC,
|
||||||
ALC668_FIXUP_HEADSET_MIC,
|
ALC668_FIXUP_HEADSET_MIC,
|
||||||
ALC668_FIXUP_MIC_DET_COEF,
|
ALC668_FIXUP_MIC_DET_COEF,
|
||||||
|
ALC897_FIXUP_LENOVO_HEADSET_MIC,
|
||||||
|
ALC897_FIXUP_HEADSET_MIC_PIN,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct hda_fixup alc662_fixups[] = {
|
static const struct hda_fixup alc662_fixups[] = {
|
||||||
@ -10717,6 +10744,19 @@ static const struct hda_fixup alc662_fixups[] = {
|
|||||||
{}
|
{}
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
[ALC897_FIXUP_LENOVO_HEADSET_MIC] = {
|
||||||
|
.type = HDA_FIXUP_FUNC,
|
||||||
|
.v.func = alc897_fixup_lenovo_headset_mic,
|
||||||
|
},
|
||||||
|
[ALC897_FIXUP_HEADSET_MIC_PIN] = {
|
||||||
|
.type = HDA_FIXUP_PINS,
|
||||||
|
.v.pins = (const struct hda_pintbl[]) {
|
||||||
|
{ 0x1a, 0x03a11050 },
|
||||||
|
{ }
|
||||||
|
},
|
||||||
|
.chained = true,
|
||||||
|
.chain_id = ALC897_FIXUP_LENOVO_HEADSET_MIC
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct snd_pci_quirk alc662_fixup_tbl[] = {
|
static const struct snd_pci_quirk alc662_fixup_tbl[] = {
|
||||||
@ -10761,6 +10801,10 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
|
|||||||
SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
|
SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
|
||||||
SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE),
|
SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE),
|
||||||
SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC662_FIXUP_LENOVO_MULTI_CODECS),
|
SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC662_FIXUP_LENOVO_MULTI_CODECS),
|
||||||
|
SND_PCI_QUIRK(0x17aa, 0x32ca, "Lenovo ThinkCentre M80", ALC897_FIXUP_HEADSET_MIC_PIN),
|
||||||
|
SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN),
|
||||||
|
SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN),
|
||||||
|
SND_PCI_QUIRK(0x17aa, 0x32f7, "Lenovo ThinkCentre M90", ALC897_FIXUP_HEADSET_MIC_PIN),
|
||||||
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
|
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
|
||||||
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
|
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
|
||||||
SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO),
|
SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO),
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user