This is the 6.1.70 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmWSsnYACgkQONu9yGCS aT7ZRw//bmrTWoNbFf/qdM11oPF9EHus9FUgSlP5yvNaa6jcPfwGx71NPXUkz+wU xKobh1VwK7TJxq4JHFQeMmupW/8++NeWNygwtYsllwnsMGzHL+mz2Txysrr/mhMx WUs6UVYXRxnuQJJDSqtTvMoyllpAJ1QQxJNuhKKOI1i+0DIu9YjQklD/4eW3cebv 8B9f3CeOyP/oL5Z0MqFTP8OnWx6X3jTbO4caor+qsyR+frgpXgBppTF76RHcd8lX MLVlx7aqr4wcml/uUMsolw8Zjbb719mX+KW3LHltl8wHftZeinYUsu1afnlb5dG1 rAaVgut0PmjTAQ/KwIp54CGO2MADwApMCUXIm0yyKSpNfw+HKR10bpz64HOFp9KQ 368YpjDJ3onkQdrLjV57w37YBRLyWxipeBya2+S4rdyPSfuvPkPCRNVkEDnHVAnH jxEhuoMZ2f/CIA8BT32y4DYDvEaIdfp7jVvEDFREDyIVXRMBhIneMhhyjU+Oe7Rw 1q/sfEJejXFa5VvC+Jl+K5LouP59M5MTq3RkCoYxZKz+bdfpOLEJ6AZJoZHcS02J QlM/pL213nC1ye3tuWFu3tNPzPS/G6LNQfGgSsBUzRn9IX2osn/epNFnCHBIFqlK apjrXObrmqKE6jNvy6ktHUDpnEXPZFpvirSXRN2Lk9SYh76bFP0= =d63o -----END PGP SIGNATURE----- Merge 6.1.70 into android14-6.1-lts Changes in 6.1.70 kasan: disable kasan_non_canonical_hook() for HW tags bpf: Fix prog_array_map_poke_run map poke update HID: i2c-hid: acpi: Unify ACPI ID tables format HID: i2c-hid: Add IDEA5002 to i2c_hid_acpi_blacklist[] drm/amd/display: fix hw rotated modes when PSR-SU is enabled ARM: dts: dra7: Fix DRA7 L3 NoC node register size ARM: OMAP2+: Fix null pointer dereference and memory leak in omap_soc_device_init reset: Fix crash when freeing non-existent optional resets s390/vx: fix save/restore of fpu kernel context wifi: iwlwifi: pcie: add another missing bh-disable for rxq->lock wifi: mac80211: check if the existing link config remains unchanged wifi: mac80211: mesh: check element parsing succeeded wifi: mac80211: mesh_plink: fix matches_local logic Revert "net/mlx5e: fix double free of encap_header in update funcs" Revert "net/mlx5e: fix double free of encap_header" net/mlx5e: Fix slab-out-of-bounds in mlx5_query_nic_vport_mac_list() net/mlx5: Introduce and use opcode getter in command interface net/mlx5: Prevent high-rate FW commands from populating all slots net/mlx5: Re-organize mlx5_cmd struct net/mlx5e: Fix a race in command alloc flow net/mlx5e: fix a potential double-free in fs_udp_create_groups net/mlx5: Fix fw tracer first block check net/mlx5e: Correct snprintf truncation handling for fw_version buffer net/mlx5e: Correct snprintf truncation handling for fw_version buffer used by representors net: mscc: ocelot: fix eMAC TX RMON stats for bucket 256-511 and above octeontx2-pf: Fix graceful exit during PFC configuration failure net: Return error from sk_stream_wait_connect() if sk_wait_event() fails net: sched: ife: fix potential use-after-free ethernet: atheros: fix a memleak in atl1e_setup_ring_resources net/rose: fix races in rose_kill_by_device() Bluetooth: Fix deadlock in vhci_send_frame Bluetooth: hci_event: shut up a false-positive warning net: mana: select PAGE_POOL net: check vlan filter feature in vlan_vids_add_by_dev() and vlan_vids_del_by_dev() afs: Fix the dynamic root's d_delete to always delete unused dentries afs: Fix dynamic root lookup DNS check net: check dev->gso_max_size in gso_features_check() keys, dns: Allow key types (eg. DNS) to be reclaimed immediately on expiry afs: Fix overwriting of result of DNS query afs: Fix use-after-free due to get/remove race in volume tree ASoC: hdmi-codec: fix missing report for jack initial status ASoC: fsl_sai: Fix channel swap issue on i.MX8MP i2c: aspeed: Handle the coalesced stop conditions with the start conditions. x86/xen: add CPU dependencies for 32-bit build pinctrl: at91-pio4: use dedicated lock class for IRQ gpiolib: cdev: add gpio_device locking wrapper around gpio_ioctl() nvme-pci: fix sleeping function called from interrupt context drm/i915/mtl: limit second scaler vertical scaling in ver >= 14 drm/i915: Relocate intel_atomic_setup_scalers() drm/i915: Fix intel_atomic_setup_scalers() plane_state handling drm/i915/dpt: Only do the POT stride remap when using DPT drm/i915/mtl: Add MTL for remapping CCS FBs drm/i915: Fix ADL+ tiled plane stride when the POT stride is smaller than the original interconnect: Treat xlate() returning NULL node as an error iio: imu: inv_mpu6050: fix an error code problem in inv_mpu6050_read_raw interconnect: qcom: sm8250: Enable sync_state Input: ipaq-micro-keys - add error handling for devm_kmemdup scsi: bnx2fc: Fix skb double free in bnx2fc_rcv() iio: common: ms_sensors: ms_sensors_i2c: fix humidity conversion time table iio: adc: ti_am335x_adc: Fix return value check of tiadc_request_dma() iio: triggered-buffer: prevent possible freeing of wrong buffer ALSA: usb-audio: Increase delay in MOTU M quirk usb-storage: Add quirk for incorrect WP on Kingston DT Ultimate 3.0 G3 wifi: cfg80211: Add my certificate wifi: cfg80211: fix certs build to not depend on file order USB: serial: ftdi_sio: update Actisense PIDs constant names USB: serial: option: add Quectel EG912Y module support USB: serial: option: add Foxconn T99W265 with new baseline USB: serial: option: add Quectel RM500Q R13 firmware support ALSA: hda/realtek: Add quirk for ASUS ROG GV302XA Bluetooth: hci_event: Fix not checking if HCI_OP_INQUIRY has been sent Bluetooth: af_bluetooth: Fix Use-After-Free in bt_sock_recvmsg Bluetooth: L2CAP: Send reject on command corrupted request Bluetooth: MGMT/SMP: Fix address type when using SMP over BREDR/LE Bluetooth: Add more enc key size check net: usb: ax88179_178a: avoid failed operations when device is disconnected Input: soc_button_array - add mapping for airplane mode button net: 9p: avoid freeing uninit memory in p9pdu_vreadf net: rfkill: gpio: set GPIO direction net: ks8851: Fix TX stall caused by TX buffer overrun dt-bindings: nvmem: mxs-ocotp: Document fsl,ocotp smb: client: fix OOB in cifsd when receiving compounded resps smb: client: fix potential OOB in cifs_dump_detail() smb: client: fix OOB in SMB2_query_info_init() smb: client: fix OOB in smbCalcSize() drm/i915: Reject async flips with bigjoiner 9p: prevent read overrun in protocol dump tracepoint RISC-V: Fix do_notify_resume / do_work_pending prototype loop: do not enforce max_loop hard limit by (new) default dm thin metadata: Fix ABBA deadlock by resetting dm_bufio_client Revert "drm/amd/display: Do not set DRR on pipe commit" btrfs: zoned: no longer count fresh BG region as zone unusable ubifs: fix possible dereference after free ublk: move ublk_cancel_dev() out of ub->mutex selftests: mptcp: join: fix subflow_send_ack lookup Revert "scsi: aacraid: Reply queue mapping to CPUs based on IRQ affinity" scsi: core: Always send batch on reset or error handling command tracing / synthetic: Disable events after testing in synth_event_gen_test_init() dm-integrity: don't modify bio's immutable bio_vec in integrity_metadata() pinctrl: starfive: jh7100: ignore disabled device tree nodes bus: ti-sysc: Flush posted write only after srst_udelay gpio: dwapb: mask/unmask IRQ when disable/enale it lib/vsprintf: Fix %pfwf when current node refcount == 0 thunderbolt: Fix memory leak in margining_port_remove() KVM: arm64: vgic: Simplify kvm_vgic_destroy() KVM: arm64: vgic: Add a non-locking primitive for kvm_vgic_vcpu_destroy() KVM: arm64: vgic: Force vcpu vgic teardown on vcpu destroy x86/alternatives: Sync core before enabling interrupts mm/damon/core: make damon_start() waits until kdamond_fn() starts fuse: share lookup state between submount and its parent wifi: cfg80211: fix CQM for non-range use wifi: nl80211: fix deadlock in nl80211_set_cqm_rssi (6.6.x) loop: deprecate autoloading callback loop_probe() Linux 6.1.70 Change-Id: I72bfbd39ae932d290b13d6fdde8e6684a84ec9e1 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
1e63881f5c
@ -14,9 +14,11 @@ allOf:
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- fsl,imx23-ocotp
|
||||
- fsl,imx28-ocotp
|
||||
items:
|
||||
- enum:
|
||||
- fsl,imx23-ocotp
|
||||
- fsl,imx28-ocotp
|
||||
- const: fsl,ocotp
|
||||
|
||||
"#address-cells":
|
||||
const: 1
|
||||
@ -40,7 +42,7 @@ additionalProperties: false
|
||||
examples:
|
||||
- |
|
||||
ocotp: efuse@8002c000 {
|
||||
compatible = "fsl,imx28-ocotp";
|
||||
compatible = "fsl,imx28-ocotp", "fsl,ocotp";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
reg = <0x8002c000 0x2000>;
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 1
|
||||
SUBLEVEL = 69
|
||||
SUBLEVEL = 70
|
||||
EXTRAVERSION =
|
||||
NAME = Curry Ramen
|
||||
|
||||
|
@ -144,7 +144,7 @@ ocp: ocp {
|
||||
|
||||
l3-noc@44000000 {
|
||||
compatible = "ti,dra7-l3-noc";
|
||||
reg = <0x44000000 0x1000>,
|
||||
reg = <0x44000000 0x1000000>,
|
||||
<0x45000000 0x1000>;
|
||||
interrupts-extended = <&crossbar_mpu GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<&wakeupgen GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
|
||||
|
@ -793,11 +793,16 @@ void __init omap_soc_device_init(void)
|
||||
|
||||
soc_dev_attr->machine = soc_name;
|
||||
soc_dev_attr->family = omap_get_family();
|
||||
if (!soc_dev_attr->family) {
|
||||
kfree(soc_dev_attr);
|
||||
return;
|
||||
}
|
||||
soc_dev_attr->revision = soc_rev;
|
||||
soc_dev_attr->custom_attr_group = omap_soc_groups[0];
|
||||
|
||||
soc_dev = soc_device_register(soc_dev_attr);
|
||||
if (IS_ERR(soc_dev)) {
|
||||
kfree(soc_dev_attr->family);
|
||||
kfree(soc_dev_attr);
|
||||
return;
|
||||
}
|
||||
|
@ -489,7 +489,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
|
||||
kvm_timer_vcpu_terminate(vcpu);
|
||||
kvm_pmu_vcpu_destroy(vcpu);
|
||||
|
||||
kvm_vgic_vcpu_destroy(vcpu);
|
||||
kvm_arm_vcpu_destroy(vcpu);
|
||||
}
|
||||
|
||||
|
@ -368,7 +368,7 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
|
||||
vgic_v4_teardown(kvm);
|
||||
}
|
||||
|
||||
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
||||
|
||||
@ -379,29 +379,39 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
vgic_flush_pending_lpis(vcpu);
|
||||
|
||||
INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
|
||||
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
|
||||
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
|
||||
vgic_unregister_redist_iodev(vcpu);
|
||||
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
|
||||
}
|
||||
}
|
||||
|
||||
static void __kvm_vgic_destroy(struct kvm *kvm)
|
||||
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
unsigned long i;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
|
||||
lockdep_assert_held(&kvm->arch.config_lock);
|
||||
|
||||
vgic_debug_destroy(kvm);
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||
kvm_vgic_vcpu_destroy(vcpu);
|
||||
|
||||
kvm_vgic_dist_destroy(kvm);
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
__kvm_vgic_vcpu_destroy(vcpu);
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
}
|
||||
|
||||
void kvm_vgic_destroy(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
unsigned long i;
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
|
||||
vgic_debug_destroy(kvm);
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||
__kvm_vgic_vcpu_destroy(vcpu);
|
||||
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
__kvm_vgic_destroy(kvm);
|
||||
|
||||
kvm_vgic_dist_destroy(kvm);
|
||||
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -469,25 +479,26 @@ int kvm_vgic_map_resources(struct kvm *kvm)
|
||||
type = VGIC_V3;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
__kvm_vgic_destroy(kvm);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
dist->ready = true;
|
||||
dist_base = dist->vgic_dist_base;
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
|
||||
ret = vgic_register_dist_iodev(kvm, dist_base, type);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
kvm_err("Unable to register VGIC dist MMIO regions\n");
|
||||
kvm_vgic_destroy(kvm);
|
||||
}
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
return ret;
|
||||
|
||||
goto out_slots;
|
||||
out:
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
out_slots:
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
|
||||
if (ret)
|
||||
kvm_vgic_destroy(kvm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -820,7 +820,7 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
|
||||
void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
|
||||
|
||||
|
@ -239,6 +239,7 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq);
|
||||
int vgic_v3_save_pending_tables(struct kvm *kvm);
|
||||
int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count);
|
||||
int vgic_register_redist_iodev(struct kvm_vcpu *vcpu);
|
||||
void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu);
|
||||
bool vgic_v3_check_base(struct kvm *kvm);
|
||||
|
||||
void vgic_v3_load(struct kvm_vcpu *vcpu);
|
||||
|
@ -7,6 +7,6 @@
|
||||
#include <uapi/asm/ptrace.h>
|
||||
|
||||
asmlinkage __visible
|
||||
void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
|
||||
void do_work_pending(struct pt_regs *regs, unsigned long thread_info_flags);
|
||||
|
||||
#endif
|
||||
|
@ -79,7 +79,7 @@ static inline int test_fp_ctl(u32 fpc)
|
||||
#define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31)
|
||||
|
||||
#define KERNEL_VXR (KERNEL_VXR_LOW|KERNEL_VXR_HIGH)
|
||||
#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_V0V7)
|
||||
#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_LOW)
|
||||
|
||||
struct kernel_fpu;
|
||||
|
||||
|
@ -1015,8 +1015,8 @@ void __init_or_module text_poke_early(void *addr, const void *opcode,
|
||||
} else {
|
||||
local_irq_save(flags);
|
||||
memcpy(addr, opcode, len);
|
||||
local_irq_restore(flags);
|
||||
sync_core();
|
||||
local_irq_restore(flags);
|
||||
|
||||
/*
|
||||
* Could also do a CLFLUSH here to speed up CPU recovery; but
|
||||
|
@ -9,6 +9,7 @@ config XEN
|
||||
select PARAVIRT_CLOCK
|
||||
select X86_HV_CALLBACK_VECTOR
|
||||
depends on X86_64 || (X86_32 && X86_PAE)
|
||||
depends on X86_64 || (X86_GENERIC || MPENTIUM4 || MCORE2 || MATOM || MK8)
|
||||
depends on X86_LOCAL_APIC && X86_TSC
|
||||
help
|
||||
This is the Linux Xen port. Enabling this will allow the
|
||||
|
@ -1777,14 +1777,43 @@ static const struct block_device_operations lo_fops = {
|
||||
/*
|
||||
* If max_loop is specified, create that many devices upfront.
|
||||
* This also becomes a hard limit. If max_loop is not specified,
|
||||
* the default isn't a hard limit (as before commit 85c50197716c
|
||||
* changed the default value from 0 for max_loop=0 reasons), just
|
||||
* create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
|
||||
* init time. Loop devices can be requested on-demand with the
|
||||
* /dev/loop-control interface, or be instantiated by accessing
|
||||
* a 'dead' device node.
|
||||
*/
|
||||
static int max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
|
||||
module_param(max_loop, int, 0444);
|
||||
|
||||
#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
|
||||
static bool max_loop_specified;
|
||||
|
||||
static int max_loop_param_set_int(const char *val,
|
||||
const struct kernel_param *kp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = param_set_int(val, kp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
max_loop_specified = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct kernel_param_ops max_loop_param_ops = {
|
||||
.set = max_loop_param_set_int,
|
||||
.get = param_get_int,
|
||||
};
|
||||
|
||||
module_param_cb(max_loop, &max_loop_param_ops, &max_loop, 0444);
|
||||
MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
|
||||
#else
|
||||
module_param(max_loop, int, 0444);
|
||||
MODULE_PARM_DESC(max_loop, "Initial number of loop devices");
|
||||
#endif
|
||||
|
||||
module_param(max_part, int, 0444);
|
||||
MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
|
||||
|
||||
@ -2089,14 +2118,18 @@ static void loop_remove(struct loop_device *lo)
|
||||
put_disk(lo->lo_disk);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
|
||||
static void loop_probe(dev_t dev)
|
||||
{
|
||||
int idx = MINOR(dev) >> part_shift;
|
||||
|
||||
if (max_loop && idx >= max_loop)
|
||||
if (max_loop_specified && max_loop && idx >= max_loop)
|
||||
return;
|
||||
loop_add(idx);
|
||||
}
|
||||
#else
|
||||
#define loop_probe NULL
|
||||
#endif /* !CONFIG_BLOCK_LEGACY_AUTOLOAD */
|
||||
|
||||
static int loop_control_remove(int idx)
|
||||
{
|
||||
@ -2277,6 +2310,9 @@ module_exit(loop_exit);
|
||||
static int __init max_loop_setup(char *str)
|
||||
{
|
||||
max_loop = simple_strtol(str, NULL, 0);
|
||||
#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
|
||||
max_loop_specified = true;
|
||||
#endif
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -103,6 +103,9 @@ struct ublk_uring_cmd_pdu {
|
||||
*/
|
||||
#define UBLK_IO_FLAG_NEED_GET_DATA 0x08
|
||||
|
||||
/* atomic RW with ubq->cancel_lock */
|
||||
#define UBLK_IO_FLAG_CANCELED 0x80000000
|
||||
|
||||
struct ublk_io {
|
||||
/* userspace buffer address from io cmd */
|
||||
__u64 addr;
|
||||
@ -126,6 +129,7 @@ struct ublk_queue {
|
||||
unsigned int max_io_sz;
|
||||
bool force_abort;
|
||||
unsigned short nr_io_ready; /* how many ios setup */
|
||||
spinlock_t cancel_lock;
|
||||
struct ublk_device *dev;
|
||||
struct ublk_io ios[];
|
||||
};
|
||||
@ -1045,28 +1049,28 @@ static inline bool ublk_queue_ready(struct ublk_queue *ubq)
|
||||
return ubq->nr_io_ready == ubq->q_depth;
|
||||
}
|
||||
|
||||
static void ublk_cmd_cancel_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
|
||||
{
|
||||
io_uring_cmd_done(cmd, UBLK_IO_RES_ABORT, 0, issue_flags);
|
||||
}
|
||||
|
||||
static void ublk_cancel_queue(struct ublk_queue *ubq)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!ublk_queue_ready(ubq))
|
||||
return;
|
||||
|
||||
for (i = 0; i < ubq->q_depth; i++) {
|
||||
struct ublk_io *io = &ubq->ios[i];
|
||||
|
||||
if (io->flags & UBLK_IO_FLAG_ACTIVE)
|
||||
io_uring_cmd_complete_in_task(io->cmd,
|
||||
ublk_cmd_cancel_cb);
|
||||
}
|
||||
if (io->flags & UBLK_IO_FLAG_ACTIVE) {
|
||||
bool done;
|
||||
|
||||
/* all io commands are canceled */
|
||||
ubq->nr_io_ready = 0;
|
||||
spin_lock(&ubq->cancel_lock);
|
||||
done = !!(io->flags & UBLK_IO_FLAG_CANCELED);
|
||||
if (!done)
|
||||
io->flags |= UBLK_IO_FLAG_CANCELED;
|
||||
spin_unlock(&ubq->cancel_lock);
|
||||
|
||||
if (!done)
|
||||
io_uring_cmd_done(io->cmd,
|
||||
UBLK_IO_RES_ABORT, 0,
|
||||
IO_URING_F_UNLOCKED);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Cancel all pending commands, must be called after del_gendisk() returns */
|
||||
@ -1113,7 +1117,6 @@ static void __ublk_quiesce_dev(struct ublk_device *ub)
|
||||
blk_mq_quiesce_queue(ub->ub_disk->queue);
|
||||
ublk_wait_tagset_rqs_idle(ub);
|
||||
ub->dev_info.state = UBLK_S_DEV_QUIESCED;
|
||||
ublk_cancel_dev(ub);
|
||||
/* we are going to release task_struct of ubq_daemon and resets
|
||||
* ->ubq_daemon to NULL. So in monitor_work, check on ubq_daemon causes UAF.
|
||||
* Besides, monitor_work is not necessary in QUIESCED state since we have
|
||||
@ -1136,6 +1139,7 @@ static void ublk_quiesce_work_fn(struct work_struct *work)
|
||||
__ublk_quiesce_dev(ub);
|
||||
unlock:
|
||||
mutex_unlock(&ub->mutex);
|
||||
ublk_cancel_dev(ub);
|
||||
}
|
||||
|
||||
static void ublk_unquiesce_dev(struct ublk_device *ub)
|
||||
@ -1175,8 +1179,8 @@ static void ublk_stop_dev(struct ublk_device *ub)
|
||||
put_disk(ub->ub_disk);
|
||||
ub->ub_disk = NULL;
|
||||
unlock:
|
||||
ublk_cancel_dev(ub);
|
||||
mutex_unlock(&ub->mutex);
|
||||
ublk_cancel_dev(ub);
|
||||
cancel_delayed_work_sync(&ub->monitor_work);
|
||||
}
|
||||
|
||||
@ -1353,6 +1357,7 @@ static int ublk_init_queue(struct ublk_device *ub, int q_id)
|
||||
void *ptr;
|
||||
int size;
|
||||
|
||||
spin_lock_init(&ubq->cancel_lock);
|
||||
ubq->flags = ub->dev_info.flags;
|
||||
ubq->q_id = q_id;
|
||||
ubq->q_depth = ub->dev_info.queue_depth;
|
||||
@ -1882,8 +1887,9 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
|
||||
int i;
|
||||
|
||||
WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
|
||||
|
||||
/* All old ioucmds have to be completed */
|
||||
WARN_ON_ONCE(ubq->nr_io_ready);
|
||||
ubq->nr_io_ready = 0;
|
||||
/* old daemon is PF_EXITING, put it now */
|
||||
put_task_struct(ubq->ubq_daemon);
|
||||
/* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/slab.h>
|
||||
@ -44,6 +45,7 @@ struct vhci_data {
|
||||
bool wakeup;
|
||||
__u16 msft_opcode;
|
||||
bool aosp_capable;
|
||||
atomic_t initialized;
|
||||
};
|
||||
|
||||
static int vhci_open_dev(struct hci_dev *hdev)
|
||||
@ -75,11 +77,10 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
|
||||
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
|
||||
|
||||
mutex_lock(&data->open_mutex);
|
||||
skb_queue_tail(&data->readq, skb);
|
||||
mutex_unlock(&data->open_mutex);
|
||||
|
||||
wake_up_interruptible(&data->read_wait);
|
||||
if (atomic_read(&data->initialized))
|
||||
wake_up_interruptible(&data->read_wait);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -363,7 +364,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
|
||||
skb_put_u8(skb, 0xff);
|
||||
skb_put_u8(skb, opcode);
|
||||
put_unaligned_le16(hdev->id, skb_put(skb, 2));
|
||||
skb_queue_tail(&data->readq, skb);
|
||||
skb_queue_head(&data->readq, skb);
|
||||
atomic_inc(&data->initialized);
|
||||
|
||||
wake_up_interruptible(&data->read_wait);
|
||||
return 0;
|
||||
|
@ -2174,13 +2174,23 @@ static int sysc_reset(struct sysc *ddata)
|
||||
sysc_val = sysc_read_sysconfig(ddata);
|
||||
sysc_val |= sysc_mask;
|
||||
sysc_write(ddata, sysc_offset, sysc_val);
|
||||
/* Flush posted write */
|
||||
|
||||
/*
|
||||
* Some devices need a delay before reading registers
|
||||
* after reset. Presumably a srst_udelay is not needed
|
||||
* for devices that use a rstctrl register reset.
|
||||
*/
|
||||
if (ddata->cfg.srst_udelay)
|
||||
fsleep(ddata->cfg.srst_udelay);
|
||||
|
||||
/*
|
||||
* Flush posted write. For devices needing srst_udelay
|
||||
* this should trigger an interconnect error if the
|
||||
* srst_udelay value is needed but not configured.
|
||||
*/
|
||||
sysc_val = sysc_read_sysconfig(ddata);
|
||||
}
|
||||
|
||||
if (ddata->cfg.srst_udelay)
|
||||
fsleep(ddata->cfg.srst_udelay);
|
||||
|
||||
if (ddata->post_reset_quirk)
|
||||
ddata->post_reset_quirk(ddata);
|
||||
|
||||
|
@ -283,13 +283,15 @@ static void dwapb_irq_enable(struct irq_data *d)
|
||||
{
|
||||
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
||||
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
||||
unsigned long flags;
|
||||
u32 val;
|
||||
|
||||
raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
|
||||
val = dwapb_read(gpio, GPIO_INTEN);
|
||||
val |= BIT(irqd_to_hwirq(d));
|
||||
val = dwapb_read(gpio, GPIO_INTEN) | BIT(hwirq);
|
||||
dwapb_write(gpio, GPIO_INTEN, val);
|
||||
val = dwapb_read(gpio, GPIO_INTMASK) & ~BIT(hwirq);
|
||||
dwapb_write(gpio, GPIO_INTMASK, val);
|
||||
raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
|
||||
}
|
||||
|
||||
@ -297,12 +299,14 @@ static void dwapb_irq_disable(struct irq_data *d)
|
||||
{
|
||||
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
||||
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
||||
unsigned long flags;
|
||||
u32 val;
|
||||
|
||||
raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
|
||||
val = dwapb_read(gpio, GPIO_INTEN);
|
||||
val &= ~BIT(irqd_to_hwirq(d));
|
||||
val = dwapb_read(gpio, GPIO_INTMASK) | BIT(hwirq);
|
||||
dwapb_write(gpio, GPIO_INTMASK, val);
|
||||
val = dwapb_read(gpio, GPIO_INTEN) & ~BIT(hwirq);
|
||||
dwapb_write(gpio, GPIO_INTEN, val);
|
||||
raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
|
||||
}
|
||||
|
@ -2444,10 +2444,7 @@ static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* gpio_ioctl() - ioctl handler for the GPIO chardev
|
||||
*/
|
||||
static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
static long gpio_ioctl_unlocked(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct gpio_chardev_data *cdev = file->private_data;
|
||||
struct gpio_device *gdev = cdev->gdev;
|
||||
@ -2484,6 +2481,17 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* gpio_ioctl() - ioctl handler for the GPIO chardev
|
||||
*/
|
||||
static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct gpio_chardev_data *cdev = file->private_data;
|
||||
|
||||
return call_ioctl_locked(file, cmd, arg, cdev->gdev,
|
||||
gpio_ioctl_unlocked);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
static long gpio_ioctl_compat(struct file *file, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
|
@ -5104,6 +5104,9 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
|
||||
if (plane->type == DRM_PLANE_TYPE_CURSOR)
|
||||
return;
|
||||
|
||||
if (new_plane_state->rotation != DRM_MODE_ROTATE_0)
|
||||
goto ffu;
|
||||
|
||||
num_clips = drm_plane_get_damage_clips_count(new_plane_state);
|
||||
clips = drm_plane_get_damage_clips(new_plane_state);
|
||||
|
||||
|
@ -465,6 +465,7 @@ struct dc_cursor_mi_param {
|
||||
struct fixed31_32 v_scale_ratio;
|
||||
enum dc_rotation_angle rotation;
|
||||
bool mirror;
|
||||
struct dc_stream_state *stream;
|
||||
};
|
||||
|
||||
/* IPP related types */
|
||||
|
@ -3427,7 +3427,8 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
|
||||
.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
|
||||
.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
|
||||
.rotation = pipe_ctx->plane_state->rotation,
|
||||
.mirror = pipe_ctx->plane_state->horizontal_mirror
|
||||
.mirror = pipe_ctx->plane_state->horizontal_mirror,
|
||||
.stream = pipe_ctx->stream,
|
||||
};
|
||||
bool pipe_split_on = false;
|
||||
bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
|
||||
|
@ -1075,8 +1075,16 @@ void hubp2_cursor_set_position(
|
||||
if (src_y_offset < 0)
|
||||
src_y_offset = 0;
|
||||
/* Save necessary cursor info x, y position. w, h is saved in attribute func. */
|
||||
hubp->cur_rect.x = src_x_offset + param->viewport.x;
|
||||
hubp->cur_rect.y = src_y_offset + param->viewport.y;
|
||||
if (param->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
|
||||
param->rotation != ROTATION_ANGLE_0) {
|
||||
hubp->cur_rect.x = 0;
|
||||
hubp->cur_rect.y = 0;
|
||||
hubp->cur_rect.w = param->stream->timing.h_addressable;
|
||||
hubp->cur_rect.h = param->stream->timing.v_addressable;
|
||||
} else {
|
||||
hubp->cur_rect.x = src_x_offset + param->viewport.x;
|
||||
hubp->cur_rect.y = src_y_offset + param->viewport.y;
|
||||
}
|
||||
}
|
||||
|
||||
void hubp2_clk_cntl(struct hubp *hubp, bool enable)
|
||||
|
@ -994,5 +994,8 @@ void dcn30_prepare_bandwidth(struct dc *dc,
|
||||
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
|
||||
|
||||
dcn20_prepare_bandwidth(dc, context);
|
||||
|
||||
dc_dmub_srv_p_state_delegate(dc,
|
||||
context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching, context);
|
||||
}
|
||||
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include "intel_global_state.h"
|
||||
#include "intel_hdcp.h"
|
||||
#include "intel_psr.h"
|
||||
#include "intel_fb.h"
|
||||
#include "skl_universal_plane.h"
|
||||
|
||||
/**
|
||||
@ -302,198 +303,6 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
|
||||
kfree(crtc_state);
|
||||
}
|
||||
|
||||
static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
|
||||
int num_scalers_need, struct intel_crtc *intel_crtc,
|
||||
const char *name, int idx,
|
||||
struct intel_plane_state *plane_state,
|
||||
int *scaler_id)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
|
||||
int j;
|
||||
u32 mode;
|
||||
|
||||
if (*scaler_id < 0) {
|
||||
/* find a free scaler */
|
||||
for (j = 0; j < intel_crtc->num_scalers; j++) {
|
||||
if (scaler_state->scalers[j].in_use)
|
||||
continue;
|
||||
|
||||
*scaler_id = j;
|
||||
scaler_state->scalers[*scaler_id].in_use = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (drm_WARN(&dev_priv->drm, *scaler_id < 0,
|
||||
"Cannot find scaler for %s:%d\n", name, idx))
|
||||
return;
|
||||
|
||||
/* set scaler mode */
|
||||
if (plane_state && plane_state->hw.fb &&
|
||||
plane_state->hw.fb->format->is_yuv &&
|
||||
plane_state->hw.fb->format->num_planes > 1) {
|
||||
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
|
||||
if (DISPLAY_VER(dev_priv) == 9) {
|
||||
mode = SKL_PS_SCALER_MODE_NV12;
|
||||
} else if (icl_is_hdr_plane(dev_priv, plane->id)) {
|
||||
/*
|
||||
* On gen11+'s HDR planes we only use the scaler for
|
||||
* scaling. They have a dedicated chroma upsampler, so
|
||||
* we don't need the scaler to upsample the UV plane.
|
||||
*/
|
||||
mode = PS_SCALER_MODE_NORMAL;
|
||||
} else {
|
||||
struct intel_plane *linked =
|
||||
plane_state->planar_linked_plane;
|
||||
|
||||
mode = PS_SCALER_MODE_PLANAR;
|
||||
|
||||
if (linked)
|
||||
mode |= PS_PLANE_Y_SEL(linked->id);
|
||||
}
|
||||
} else if (DISPLAY_VER(dev_priv) >= 10) {
|
||||
mode = PS_SCALER_MODE_NORMAL;
|
||||
} else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) {
|
||||
/*
|
||||
* when only 1 scaler is in use on a pipe with 2 scalers
|
||||
* scaler 0 operates in high quality (HQ) mode.
|
||||
* In this case use scaler 0 to take advantage of HQ mode
|
||||
*/
|
||||
scaler_state->scalers[*scaler_id].in_use = 0;
|
||||
*scaler_id = 0;
|
||||
scaler_state->scalers[0].in_use = 1;
|
||||
mode = SKL_PS_SCALER_MODE_HQ;
|
||||
} else {
|
||||
mode = SKL_PS_SCALER_MODE_DYN;
|
||||
}
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "Attached scaler id %u.%u to %s:%d\n",
|
||||
intel_crtc->pipe, *scaler_id, name, idx);
|
||||
scaler_state->scalers[*scaler_id].mode = mode;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
|
||||
* @dev_priv: i915 device
|
||||
* @intel_crtc: intel crtc
|
||||
* @crtc_state: incoming crtc_state to validate and setup scalers
|
||||
*
|
||||
* This function sets up scalers based on staged scaling requests for
|
||||
* a @crtc and its planes. It is called from crtc level check path. If request
|
||||
* is a supportable request, it attaches scalers to requested planes and crtc.
|
||||
*
|
||||
* This function takes into account the current scaler(s) in use by any planes
|
||||
* not being part of this atomic state
|
||||
*
|
||||
* Returns:
|
||||
* 0 - scalers were setup succesfully
|
||||
* error code - otherwise
|
||||
*/
|
||||
int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
|
||||
struct intel_crtc *intel_crtc,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_plane *plane = NULL;
|
||||
struct intel_plane *intel_plane;
|
||||
struct intel_plane_state *plane_state = NULL;
|
||||
struct intel_crtc_scaler_state *scaler_state =
|
||||
&crtc_state->scaler_state;
|
||||
struct drm_atomic_state *drm_state = crtc_state->uapi.state;
|
||||
struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
|
||||
int num_scalers_need;
|
||||
int i;
|
||||
|
||||
num_scalers_need = hweight32(scaler_state->scaler_users);
|
||||
|
||||
/*
|
||||
* High level flow:
|
||||
* - staged scaler requests are already in scaler_state->scaler_users
|
||||
* - check whether staged scaling requests can be supported
|
||||
* - add planes using scalers that aren't in current transaction
|
||||
* - assign scalers to requested users
|
||||
* - as part of plane commit, scalers will be committed
|
||||
* (i.e., either attached or detached) to respective planes in hw
|
||||
* - as part of crtc_commit, scaler will be either attached or detached
|
||||
* to crtc in hw
|
||||
*/
|
||||
|
||||
/* fail if required scalers > available scalers */
|
||||
if (num_scalers_need > intel_crtc->num_scalers){
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Too many scaling requests %d > %d\n",
|
||||
num_scalers_need, intel_crtc->num_scalers);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* walkthrough scaler_users bits and start assigning scalers */
|
||||
for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
|
||||
int *scaler_id;
|
||||
const char *name;
|
||||
int idx;
|
||||
|
||||
/* skip if scaler not required */
|
||||
if (!(scaler_state->scaler_users & (1 << i)))
|
||||
continue;
|
||||
|
||||
if (i == SKL_CRTC_INDEX) {
|
||||
name = "CRTC";
|
||||
idx = intel_crtc->base.base.id;
|
||||
|
||||
/* panel fitter case: assign as a crtc scaler */
|
||||
scaler_id = &scaler_state->scaler_id;
|
||||
} else {
|
||||
name = "PLANE";
|
||||
|
||||
/* plane scaler case: assign as a plane scaler */
|
||||
/* find the plane that set the bit as scaler_user */
|
||||
plane = drm_state->planes[i].ptr;
|
||||
|
||||
/*
|
||||
* to enable/disable hq mode, add planes that are using scaler
|
||||
* into this transaction
|
||||
*/
|
||||
if (!plane) {
|
||||
struct drm_plane_state *state;
|
||||
|
||||
/*
|
||||
* GLK+ scalers don't have a HQ mode so it
|
||||
* isn't necessary to change between HQ and dyn mode
|
||||
* on those platforms.
|
||||
*/
|
||||
if (DISPLAY_VER(dev_priv) >= 10)
|
||||
continue;
|
||||
|
||||
plane = drm_plane_from_index(&dev_priv->drm, i);
|
||||
state = drm_atomic_get_plane_state(drm_state, plane);
|
||||
if (IS_ERR(state)) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Failed to add [PLANE:%d] to drm_state\n",
|
||||
plane->base.id);
|
||||
return PTR_ERR(state);
|
||||
}
|
||||
}
|
||||
|
||||
intel_plane = to_intel_plane(plane);
|
||||
idx = plane->base.id;
|
||||
|
||||
/* plane on different crtc cannot be a scaler user of this crtc */
|
||||
if (drm_WARN_ON(&dev_priv->drm,
|
||||
intel_plane->pipe != intel_crtc->pipe))
|
||||
continue;
|
||||
|
||||
plane_state = intel_atomic_get_new_plane_state(intel_state,
|
||||
intel_plane);
|
||||
scaler_id = &plane_state->scaler_id;
|
||||
}
|
||||
|
||||
intel_atomic_setup_scaler(scaler_state, num_scalers_need,
|
||||
intel_crtc, name, idx,
|
||||
plane_state, scaler_id);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct drm_atomic_state *
|
||||
intel_atomic_state_alloc(struct drm_device *dev)
|
||||
{
|
||||
|
@ -52,8 +52,4 @@ struct intel_crtc_state *
|
||||
intel_atomic_get_crtc_state(struct drm_atomic_state *state,
|
||||
struct intel_crtc *crtc);
|
||||
|
||||
int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
|
||||
struct intel_crtc *intel_crtc,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
|
||||
#endif /* __INTEL_ATOMIC_H__ */
|
||||
|
@ -6481,6 +6481,17 @@ static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: Bigjoiner+async flip is busted currently.
|
||||
* Remove this check once the issues are fixed.
|
||||
*/
|
||||
if (new_crtc_state->bigjoiner_pipes) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"[CRTC:%d:%s] async flip disallowed with bigjoiner\n",
|
||||
crtc->base.base.id, crtc->base.name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
|
||||
new_plane_state, i) {
|
||||
if (plane->pipe != crtc->pipe)
|
||||
|
@ -1176,7 +1176,8 @@ bool intel_fb_needs_pot_stride_remap(const struct intel_framebuffer *fb)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(fb->base.dev);
|
||||
|
||||
return IS_ALDERLAKE_P(i915) && fb->base.modifier != DRM_FORMAT_MOD_LINEAR;
|
||||
return (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14) &&
|
||||
intel_fb_uses_dpt(&fb->base);
|
||||
}
|
||||
|
||||
static int intel_fb_pitch(const struct intel_framebuffer *fb, int color_plane, unsigned int rotation)
|
||||
@ -1312,9 +1313,11 @@ plane_view_scanout_stride(const struct intel_framebuffer *fb, int color_plane,
|
||||
unsigned int tile_width,
|
||||
unsigned int src_stride_tiles, unsigned int dst_stride_tiles)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(fb->base.dev);
|
||||
unsigned int stride_tiles;
|
||||
|
||||
if (IS_ALDERLAKE_P(to_i915(fb->base.dev)))
|
||||
if ((IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14) &&
|
||||
src_stride_tiles < dst_stride_tiles)
|
||||
stride_tiles = src_stride_tiles;
|
||||
else
|
||||
stride_tiles = dst_stride_tiles;
|
||||
@ -1520,7 +1523,8 @@ static void intel_fb_view_init(struct drm_i915_private *i915, struct intel_fb_vi
|
||||
memset(view, 0, sizeof(*view));
|
||||
view->gtt.type = view_type;
|
||||
|
||||
if (view_type == I915_GTT_VIEW_REMAPPED && IS_ALDERLAKE_P(i915))
|
||||
if (view_type == I915_GTT_VIEW_REMAPPED &&
|
||||
(IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14))
|
||||
view->gtt.remapped.plane_alignment = SZ_2M / PAGE_SIZE;
|
||||
}
|
||||
|
||||
|
@ -337,6 +337,263 @@ int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
|
||||
int num_scalers_need, struct intel_crtc *intel_crtc,
|
||||
const char *name, int idx,
|
||||
struct intel_plane_state *plane_state,
|
||||
int *scaler_id)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
|
||||
int j;
|
||||
u32 mode;
|
||||
|
||||
if (*scaler_id < 0) {
|
||||
/* find a free scaler */
|
||||
for (j = 0; j < intel_crtc->num_scalers; j++) {
|
||||
if (scaler_state->scalers[j].in_use)
|
||||
continue;
|
||||
|
||||
*scaler_id = j;
|
||||
scaler_state->scalers[*scaler_id].in_use = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (drm_WARN(&dev_priv->drm, *scaler_id < 0,
|
||||
"Cannot find scaler for %s:%d\n", name, idx))
|
||||
return -EINVAL;
|
||||
|
||||
/* set scaler mode */
|
||||
if (plane_state && plane_state->hw.fb &&
|
||||
plane_state->hw.fb->format->is_yuv &&
|
||||
plane_state->hw.fb->format->num_planes > 1) {
|
||||
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
|
||||
|
||||
if (DISPLAY_VER(dev_priv) == 9) {
|
||||
mode = SKL_PS_SCALER_MODE_NV12;
|
||||
} else if (icl_is_hdr_plane(dev_priv, plane->id)) {
|
||||
/*
|
||||
* On gen11+'s HDR planes we only use the scaler for
|
||||
* scaling. They have a dedicated chroma upsampler, so
|
||||
* we don't need the scaler to upsample the UV plane.
|
||||
*/
|
||||
mode = PS_SCALER_MODE_NORMAL;
|
||||
} else {
|
||||
struct intel_plane *linked =
|
||||
plane_state->planar_linked_plane;
|
||||
|
||||
mode = PS_SCALER_MODE_PLANAR;
|
||||
|
||||
if (linked)
|
||||
mode |= PS_PLANE_Y_SEL(linked->id);
|
||||
}
|
||||
} else if (DISPLAY_VER(dev_priv) >= 10) {
|
||||
mode = PS_SCALER_MODE_NORMAL;
|
||||
} else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) {
|
||||
/*
|
||||
* when only 1 scaler is in use on a pipe with 2 scalers
|
||||
* scaler 0 operates in high quality (HQ) mode.
|
||||
* In this case use scaler 0 to take advantage of HQ mode
|
||||
*/
|
||||
scaler_state->scalers[*scaler_id].in_use = 0;
|
||||
*scaler_id = 0;
|
||||
scaler_state->scalers[0].in_use = 1;
|
||||
mode = SKL_PS_SCALER_MODE_HQ;
|
||||
} else {
|
||||
mode = SKL_PS_SCALER_MODE_DYN;
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: we should also check the scaler factors for pfit, so
|
||||
* this shouldn't be tied directly to planes.
|
||||
*/
|
||||
if (plane_state && plane_state->hw.fb) {
|
||||
const struct drm_framebuffer *fb = plane_state->hw.fb;
|
||||
const struct drm_rect *src = &plane_state->uapi.src;
|
||||
const struct drm_rect *dst = &plane_state->uapi.dst;
|
||||
int hscale, vscale, max_vscale, max_hscale;
|
||||
|
||||
/*
|
||||
* FIXME: When two scalers are needed, but only one of
|
||||
* them needs to downscale, we should make sure that
|
||||
* the one that needs downscaling support is assigned
|
||||
* as the first scaler, so we don't reject downscaling
|
||||
* unnecessarily.
|
||||
*/
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 14) {
|
||||
/*
|
||||
* On versions 14 and up, only the first
|
||||
* scaler supports a vertical scaling factor
|
||||
* of more than 1.0, while a horizontal
|
||||
* scaling factor of 3.0 is supported.
|
||||
*/
|
||||
max_hscale = 0x30000 - 1;
|
||||
if (*scaler_id == 0)
|
||||
max_vscale = 0x30000 - 1;
|
||||
else
|
||||
max_vscale = 0x10000;
|
||||
|
||||
} else if (DISPLAY_VER(dev_priv) >= 10 ||
|
||||
!intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) {
|
||||
max_hscale = 0x30000 - 1;
|
||||
max_vscale = 0x30000 - 1;
|
||||
} else {
|
||||
max_hscale = 0x20000 - 1;
|
||||
max_vscale = 0x20000 - 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: We should change the if-else block above to
|
||||
* support HQ vs dynamic scaler properly.
|
||||
*/
|
||||
|
||||
/* Check if required scaling is within limits */
|
||||
hscale = drm_rect_calc_hscale(src, dst, 1, max_hscale);
|
||||
vscale = drm_rect_calc_vscale(src, dst, 1, max_vscale);
|
||||
|
||||
if (hscale < 0 || vscale < 0) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Scaler %d doesn't support required plane scaling\n",
|
||||
*scaler_id);
|
||||
drm_rect_debug_print("src: ", src, true);
|
||||
drm_rect_debug_print("dst: ", dst, false);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "Attached scaler id %u.%u to %s:%d\n",
|
||||
intel_crtc->pipe, *scaler_id, name, idx);
|
||||
scaler_state->scalers[*scaler_id].mode = mode;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
|
||||
* @dev_priv: i915 device
|
||||
* @intel_crtc: intel crtc
|
||||
* @crtc_state: incoming crtc_state to validate and setup scalers
|
||||
*
|
||||
* This function sets up scalers based on staged scaling requests for
|
||||
* a @crtc and its planes. It is called from crtc level check path. If request
|
||||
* is a supportable request, it attaches scalers to requested planes and crtc.
|
||||
*
|
||||
* This function takes into account the current scaler(s) in use by any planes
|
||||
* not being part of this atomic state
|
||||
*
|
||||
* Returns:
|
||||
* 0 - scalers were setup successfully
|
||||
* error code - otherwise
|
||||
*/
|
||||
int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
|
||||
struct intel_crtc *intel_crtc,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_plane *plane = NULL;
|
||||
struct intel_plane *intel_plane;
|
||||
struct intel_crtc_scaler_state *scaler_state =
|
||||
&crtc_state->scaler_state;
|
||||
struct drm_atomic_state *drm_state = crtc_state->uapi.state;
|
||||
struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
|
||||
int num_scalers_need;
|
||||
int i;
|
||||
|
||||
num_scalers_need = hweight32(scaler_state->scaler_users);
|
||||
|
||||
/*
|
||||
* High level flow:
|
||||
* - staged scaler requests are already in scaler_state->scaler_users
|
||||
* - check whether staged scaling requests can be supported
|
||||
* - add planes using scalers that aren't in current transaction
|
||||
* - assign scalers to requested users
|
||||
* - as part of plane commit, scalers will be committed
|
||||
* (i.e., either attached or detached) to respective planes in hw
|
||||
* - as part of crtc_commit, scaler will be either attached or detached
|
||||
* to crtc in hw
|
||||
*/
|
||||
|
||||
/* fail if required scalers > available scalers */
|
||||
if (num_scalers_need > intel_crtc->num_scalers) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Too many scaling requests %d > %d\n",
|
||||
num_scalers_need, intel_crtc->num_scalers);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* walkthrough scaler_users bits and start assigning scalers */
|
||||
for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
|
||||
struct intel_plane_state *plane_state = NULL;
|
||||
int *scaler_id;
|
||||
const char *name;
|
||||
int idx, ret;
|
||||
|
||||
/* skip if scaler not required */
|
||||
if (!(scaler_state->scaler_users & (1 << i)))
|
||||
continue;
|
||||
|
||||
if (i == SKL_CRTC_INDEX) {
|
||||
name = "CRTC";
|
||||
idx = intel_crtc->base.base.id;
|
||||
|
||||
/* panel fitter case: assign as a crtc scaler */
|
||||
scaler_id = &scaler_state->scaler_id;
|
||||
} else {
|
||||
name = "PLANE";
|
||||
|
||||
/* plane scaler case: assign as a plane scaler */
|
||||
/* find the plane that set the bit as scaler_user */
|
||||
plane = drm_state->planes[i].ptr;
|
||||
|
||||
/*
|
||||
* to enable/disable hq mode, add planes that are using scaler
|
||||
* into this transaction
|
||||
*/
|
||||
if (!plane) {
|
||||
struct drm_plane_state *state;
|
||||
|
||||
/*
|
||||
* GLK+ scalers don't have a HQ mode so it
|
||||
* isn't necessary to change between HQ and dyn mode
|
||||
* on those platforms.
|
||||
*/
|
||||
if (DISPLAY_VER(dev_priv) >= 10)
|
||||
continue;
|
||||
|
||||
plane = drm_plane_from_index(&dev_priv->drm, i);
|
||||
state = drm_atomic_get_plane_state(drm_state, plane);
|
||||
if (IS_ERR(state)) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Failed to add [PLANE:%d] to drm_state\n",
|
||||
plane->base.id);
|
||||
return PTR_ERR(state);
|
||||
}
|
||||
}
|
||||
|
||||
intel_plane = to_intel_plane(plane);
|
||||
idx = plane->base.id;
|
||||
|
||||
/* plane on different crtc cannot be a scaler user of this crtc */
|
||||
if (drm_WARN_ON(&dev_priv->drm,
|
||||
intel_plane->pipe != intel_crtc->pipe))
|
||||
continue;
|
||||
|
||||
plane_state = intel_atomic_get_new_plane_state(intel_state,
|
||||
intel_plane);
|
||||
scaler_id = &plane_state->scaler_id;
|
||||
}
|
||||
|
||||
ret = intel_atomic_setup_scaler(scaler_state, num_scalers_need,
|
||||
intel_crtc, name, idx,
|
||||
plane_state, scaler_id);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int glk_coef_tap(int i)
|
||||
{
|
||||
return i % 7;
|
||||
|
@ -8,17 +8,22 @@
|
||||
#include <linux/types.h>
|
||||
|
||||
enum drm_scaling_filter;
|
||||
struct drm_i915_private;
|
||||
struct intel_crtc_state;
|
||||
struct intel_plane_state;
|
||||
struct intel_plane;
|
||||
enum pipe;
|
||||
struct drm_i915_private;
|
||||
struct intel_crtc;
|
||||
struct intel_crtc_state;
|
||||
struct intel_plane;
|
||||
struct intel_plane_state;
|
||||
|
||||
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
|
||||
|
||||
int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
|
||||
struct intel_plane_state *plane_state);
|
||||
|
||||
int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
|
||||
struct intel_crtc *intel_crtc,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
|
||||
void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
|
||||
|
||||
void skl_program_plane_scaler(struct intel_plane *plane,
|
||||
@ -26,4 +31,5 @@ void skl_program_plane_scaler(struct intel_plane *plane,
|
||||
const struct intel_plane_state *plane_state);
|
||||
void skl_detach_scalers(const struct intel_crtc_state *crtc_state);
|
||||
void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
|
||||
|
||||
#endif
|
||||
|
@ -39,8 +39,13 @@ static const struct acpi_device_id i2c_hid_acpi_blacklist[] = {
|
||||
* The CHPN0001 ACPI device, which is used to describe the Chipone
|
||||
* ICN8505 controller, has a _CID of PNP0C50 but is not HID compatible.
|
||||
*/
|
||||
{"CHPN0001", 0 },
|
||||
{ },
|
||||
{ "CHPN0001" },
|
||||
/*
|
||||
* The IDEA5002 ACPI device causes high interrupt usage and spurious
|
||||
* wakeups from suspend.
|
||||
*/
|
||||
{ "IDEA5002" },
|
||||
{ }
|
||||
};
|
||||
|
||||
/* HID I²C Device: 3cdff6f7-4267-4555-ad05-b30a3d8938de */
|
||||
@ -115,9 +120,9 @@ static int i2c_hid_acpi_probe(struct i2c_client *client)
|
||||
}
|
||||
|
||||
static const struct acpi_device_id i2c_hid_acpi_match[] = {
|
||||
{"ACPI0C50", 0 },
|
||||
{"PNP0C50", 0 },
|
||||
{ },
|
||||
{ "ACPI0C50" },
|
||||
{ "PNP0C50" },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, i2c_hid_acpi_match);
|
||||
|
||||
|
@ -249,18 +249,46 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
|
||||
if (!slave)
|
||||
return 0;
|
||||
|
||||
command = readl(bus->base + ASPEED_I2C_CMD_REG);
|
||||
/*
|
||||
* Handle stop conditions early, prior to SLAVE_MATCH. Some masters may drive
|
||||
* transfers with low enough latency between the nak/stop phase of the current
|
||||
* command and the start/address phase of the following command that the
|
||||
* interrupts are coalesced by the time we process them.
|
||||
*/
|
||||
if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) {
|
||||
irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
|
||||
bus->slave_state = ASPEED_I2C_SLAVE_STOP;
|
||||
}
|
||||
|
||||
/* Slave was requested, restart state machine. */
|
||||
if (irq_status & ASPEED_I2CD_INTR_TX_NAK &&
|
||||
bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) {
|
||||
irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
|
||||
bus->slave_state = ASPEED_I2C_SLAVE_STOP;
|
||||
}
|
||||
|
||||
/* Propagate any stop conditions to the slave implementation. */
|
||||
if (bus->slave_state == ASPEED_I2C_SLAVE_STOP) {
|
||||
i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
|
||||
bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Now that we've dealt with any potentially coalesced stop conditions,
|
||||
* address any start conditions.
|
||||
*/
|
||||
if (irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH) {
|
||||
irq_handled |= ASPEED_I2CD_INTR_SLAVE_MATCH;
|
||||
bus->slave_state = ASPEED_I2C_SLAVE_START;
|
||||
}
|
||||
|
||||
/* Slave is not currently active, irq was for someone else. */
|
||||
/*
|
||||
* If the slave has been stopped and not started then slave interrupt
|
||||
* handling is complete.
|
||||
*/
|
||||
if (bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
|
||||
return irq_handled;
|
||||
|
||||
command = readl(bus->base + ASPEED_I2C_CMD_REG);
|
||||
dev_dbg(bus->dev, "slave irq status 0x%08x, cmd 0x%08x\n",
|
||||
irq_status, command);
|
||||
|
||||
@ -279,17 +307,6 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
|
||||
irq_handled |= ASPEED_I2CD_INTR_RX_DONE;
|
||||
}
|
||||
|
||||
/* Slave was asked to stop. */
|
||||
if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) {
|
||||
irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
|
||||
bus->slave_state = ASPEED_I2C_SLAVE_STOP;
|
||||
}
|
||||
if (irq_status & ASPEED_I2CD_INTR_TX_NAK &&
|
||||
bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) {
|
||||
irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
|
||||
bus->slave_state = ASPEED_I2C_SLAVE_STOP;
|
||||
}
|
||||
|
||||
switch (bus->slave_state) {
|
||||
case ASPEED_I2C_SLAVE_READ_REQUESTED:
|
||||
if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_ACK))
|
||||
@ -324,8 +341,7 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
|
||||
i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value);
|
||||
break;
|
||||
case ASPEED_I2C_SLAVE_STOP:
|
||||
i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
|
||||
bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
|
||||
/* Stop event handling is done early. Unreachable. */
|
||||
break;
|
||||
case ASPEED_I2C_SLAVE_START:
|
||||
/* Slave was just started. Waiting for the next event. */;
|
||||
|
@ -671,8 +671,10 @@ static int tiadc_probe(struct platform_device *pdev)
|
||||
platform_set_drvdata(pdev, indio_dev);
|
||||
|
||||
err = tiadc_request_dma(pdev, adc_dev);
|
||||
if (err && err == -EPROBE_DEFER)
|
||||
if (err && err != -ENODEV) {
|
||||
dev_err_probe(&pdev->dev, err, "DMA request failed\n");
|
||||
goto err_dma;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -46,6 +46,16 @@ int iio_triggered_buffer_setup_ext(struct iio_dev *indio_dev,
|
||||
struct iio_buffer *buffer;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* iio_triggered_buffer_cleanup() assumes that the buffer allocated here
|
||||
* is assigned to indio_dev->buffer but this is only the case if this
|
||||
* function is the first caller to iio_device_attach_buffer(). If
|
||||
* indio_dev->buffer is already set then we can't proceed otherwise the
|
||||
* cleanup function will try to free a buffer that was not allocated here.
|
||||
*/
|
||||
if (indio_dev->buffer)
|
||||
return -EADDRINUSE;
|
||||
|
||||
buffer = iio_kfifo_allocate();
|
||||
if (!buffer) {
|
||||
ret = -ENOMEM;
|
||||
|
@ -15,8 +15,8 @@
|
||||
/* Conversion times in us */
|
||||
static const u16 ms_sensors_ht_t_conversion_time[] = { 50000, 25000,
|
||||
13000, 7000 };
|
||||
static const u16 ms_sensors_ht_h_conversion_time[] = { 16000, 3000,
|
||||
5000, 8000 };
|
||||
static const u16 ms_sensors_ht_h_conversion_time[] = { 16000, 5000,
|
||||
3000, 8000 };
|
||||
static const u16 ms_sensors_tp_conversion_time[] = { 500, 1100, 2100,
|
||||
4100, 8220, 16440 };
|
||||
|
||||
|
@ -736,13 +736,13 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
|
||||
ret = inv_mpu6050_sensor_show(st, st->reg->gyro_offset,
|
||||
chan->channel2, val);
|
||||
mutex_unlock(&st->lock);
|
||||
return IIO_VAL_INT;
|
||||
return ret;
|
||||
case IIO_ACCEL:
|
||||
mutex_lock(&st->lock);
|
||||
ret = inv_mpu6050_sensor_show(st, st->reg->accl_offset,
|
||||
chan->channel2, val);
|
||||
mutex_unlock(&st->lock);
|
||||
return IIO_VAL_INT;
|
||||
return ret;
|
||||
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -105,6 +105,9 @@ static int micro_key_probe(struct platform_device *pdev)
|
||||
keys->codes = devm_kmemdup(&pdev->dev, micro_keycodes,
|
||||
keys->input->keycodesize * keys->input->keycodemax,
|
||||
GFP_KERNEL);
|
||||
if (!keys->codes)
|
||||
return -ENOMEM;
|
||||
|
||||
keys->input->keycode = keys->codes;
|
||||
|
||||
__set_bit(EV_KEY, keys->input->evbit);
|
||||
|
@ -299,6 +299,11 @@ static int soc_button_parse_btn_desc(struct device *dev,
|
||||
info->name = "power";
|
||||
info->event_code = KEY_POWER;
|
||||
info->wakeup = true;
|
||||
} else if (upage == 0x01 && usage == 0xc6) {
|
||||
info->name = "airplane mode switch";
|
||||
info->event_type = EV_SW;
|
||||
info->event_code = SW_RFKILL_ALL;
|
||||
info->active_low = false;
|
||||
} else if (upage == 0x01 && usage == 0xca) {
|
||||
info->name = "rotation lock switch";
|
||||
info->event_type = EV_SW;
|
||||
|
@ -381,6 +381,9 @@ struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec)
|
||||
}
|
||||
mutex_unlock(&icc_lock);
|
||||
|
||||
if (!node)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (IS_ERR(node))
|
||||
return ERR_CAST(node);
|
||||
|
||||
|
@ -551,6 +551,7 @@ static struct platform_driver qnoc_driver = {
|
||||
.driver = {
|
||||
.name = "qnoc-sm8250",
|
||||
.of_match_table = qnoc_of_match,
|
||||
.sync_state = icc_sync_state,
|
||||
},
|
||||
};
|
||||
module_platform_driver(qnoc_driver);
|
||||
|
@ -1923,6 +1923,13 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
|
||||
|
||||
void dm_bufio_client_reset(struct dm_bufio_client *c)
|
||||
{
|
||||
drop_buffers(c);
|
||||
flush_work(&c->shrink_work);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_bufio_client_reset);
|
||||
|
||||
void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
|
||||
{
|
||||
c->start = start;
|
||||
|
@ -1763,11 +1763,12 @@ static void integrity_metadata(struct work_struct *w)
|
||||
sectors_to_process = dio->range.n_sectors;
|
||||
|
||||
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
|
||||
struct bio_vec bv_copy = bv;
|
||||
unsigned int pos;
|
||||
char *mem, *checksums_ptr;
|
||||
|
||||
again:
|
||||
mem = bvec_kmap_local(&bv);
|
||||
mem = bvec_kmap_local(&bv_copy);
|
||||
pos = 0;
|
||||
checksums_ptr = checksums;
|
||||
do {
|
||||
@ -1776,7 +1777,7 @@ static void integrity_metadata(struct work_struct *w)
|
||||
sectors_to_process -= ic->sectors_per_block;
|
||||
pos += ic->sectors_per_block << SECTOR_SHIFT;
|
||||
sector += ic->sectors_per_block;
|
||||
} while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
|
||||
} while (pos < bv_copy.bv_len && sectors_to_process && checksums != checksums_onstack);
|
||||
kunmap_local(mem);
|
||||
|
||||
r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
|
||||
@ -1801,9 +1802,9 @@ static void integrity_metadata(struct work_struct *w)
|
||||
if (!sectors_to_process)
|
||||
break;
|
||||
|
||||
if (unlikely(pos < bv.bv_len)) {
|
||||
bv.bv_offset += pos;
|
||||
bv.bv_len -= pos;
|
||||
if (unlikely(pos < bv_copy.bv_len)) {
|
||||
bv_copy.bv_offset += pos;
|
||||
bv_copy.bv_len -= pos;
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
|
@ -597,6 +597,8 @@ static int __format_metadata(struct dm_pool_metadata *pmd)
|
||||
r = dm_tm_create_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION,
|
||||
&pmd->tm, &pmd->metadata_sm);
|
||||
if (r < 0) {
|
||||
pmd->tm = NULL;
|
||||
pmd->metadata_sm = NULL;
|
||||
DMERR("tm_create_with_sm failed");
|
||||
return r;
|
||||
}
|
||||
@ -605,6 +607,7 @@ static int __format_metadata(struct dm_pool_metadata *pmd)
|
||||
if (IS_ERR(pmd->data_sm)) {
|
||||
DMERR("sm_disk_create failed");
|
||||
r = PTR_ERR(pmd->data_sm);
|
||||
pmd->data_sm = NULL;
|
||||
goto bad_cleanup_tm;
|
||||
}
|
||||
|
||||
@ -635,11 +638,15 @@ static int __format_metadata(struct dm_pool_metadata *pmd)
|
||||
|
||||
bad_cleanup_nb_tm:
|
||||
dm_tm_destroy(pmd->nb_tm);
|
||||
pmd->nb_tm = NULL;
|
||||
bad_cleanup_data_sm:
|
||||
dm_sm_destroy(pmd->data_sm);
|
||||
pmd->data_sm = NULL;
|
||||
bad_cleanup_tm:
|
||||
dm_tm_destroy(pmd->tm);
|
||||
pmd->tm = NULL;
|
||||
dm_sm_destroy(pmd->metadata_sm);
|
||||
pmd->metadata_sm = NULL;
|
||||
|
||||
return r;
|
||||
}
|
||||
@ -705,6 +712,8 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
|
||||
sizeof(disk_super->metadata_space_map_root),
|
||||
&pmd->tm, &pmd->metadata_sm);
|
||||
if (r < 0) {
|
||||
pmd->tm = NULL;
|
||||
pmd->metadata_sm = NULL;
|
||||
DMERR("tm_open_with_sm failed");
|
||||
goto bad_unlock_sblock;
|
||||
}
|
||||
@ -714,6 +723,7 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
|
||||
if (IS_ERR(pmd->data_sm)) {
|
||||
DMERR("sm_disk_open failed");
|
||||
r = PTR_ERR(pmd->data_sm);
|
||||
pmd->data_sm = NULL;
|
||||
goto bad_cleanup_tm;
|
||||
}
|
||||
|
||||
@ -740,9 +750,12 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
|
||||
|
||||
bad_cleanup_data_sm:
|
||||
dm_sm_destroy(pmd->data_sm);
|
||||
pmd->data_sm = NULL;
|
||||
bad_cleanup_tm:
|
||||
dm_tm_destroy(pmd->tm);
|
||||
pmd->tm = NULL;
|
||||
dm_sm_destroy(pmd->metadata_sm);
|
||||
pmd->metadata_sm = NULL;
|
||||
bad_unlock_sblock:
|
||||
dm_bm_unlock(sblock);
|
||||
|
||||
@ -789,9 +802,13 @@ static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd,
|
||||
bool destroy_bm)
|
||||
{
|
||||
dm_sm_destroy(pmd->data_sm);
|
||||
pmd->data_sm = NULL;
|
||||
dm_sm_destroy(pmd->metadata_sm);
|
||||
pmd->metadata_sm = NULL;
|
||||
dm_tm_destroy(pmd->nb_tm);
|
||||
pmd->nb_tm = NULL;
|
||||
dm_tm_destroy(pmd->tm);
|
||||
pmd->tm = NULL;
|
||||
if (destroy_bm)
|
||||
dm_block_manager_destroy(pmd->bm);
|
||||
}
|
||||
@ -999,8 +1016,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
|
||||
__func__, r);
|
||||
}
|
||||
pmd_write_unlock(pmd);
|
||||
if (!pmd->fail_io)
|
||||
__destroy_persistent_data_objects(pmd, true);
|
||||
__destroy_persistent_data_objects(pmd, true);
|
||||
|
||||
kfree(pmd);
|
||||
return 0;
|
||||
@ -1875,53 +1891,29 @@ static void __set_abort_with_changes_flags(struct dm_pool_metadata *pmd)
|
||||
int dm_pool_abort_metadata(struct dm_pool_metadata *pmd)
|
||||
{
|
||||
int r = -EINVAL;
|
||||
struct dm_block_manager *old_bm = NULL, *new_bm = NULL;
|
||||
|
||||
/* fail_io is double-checked with pmd->root_lock held below */
|
||||
if (unlikely(pmd->fail_io))
|
||||
return r;
|
||||
|
||||
/*
|
||||
* Replacement block manager (new_bm) is created and old_bm destroyed outside of
|
||||
* pmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of
|
||||
* shrinker associated with the block manager's bufio client vs pmd root_lock).
|
||||
* - must take shrinker_rwsem without holding pmd->root_lock
|
||||
*/
|
||||
new_bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
|
||||
THIN_MAX_CONCURRENT_LOCKS);
|
||||
|
||||
pmd_write_lock(pmd);
|
||||
if (pmd->fail_io) {
|
||||
pmd_write_unlock(pmd);
|
||||
goto out;
|
||||
return r;
|
||||
}
|
||||
|
||||
__set_abort_with_changes_flags(pmd);
|
||||
__destroy_persistent_data_objects(pmd, false);
|
||||
old_bm = pmd->bm;
|
||||
if (IS_ERR(new_bm)) {
|
||||
DMERR("could not create block manager during abort");
|
||||
pmd->bm = NULL;
|
||||
r = PTR_ERR(new_bm);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
pmd->bm = new_bm;
|
||||
/* destroy data_sm/metadata_sm/nb_tm/tm */
|
||||
__destroy_persistent_data_objects(pmd, false);
|
||||
|
||||
/* reset bm */
|
||||
dm_block_manager_reset(pmd->bm);
|
||||
|
||||
/* rebuild data_sm/metadata_sm/nb_tm/tm */
|
||||
r = __open_or_format_metadata(pmd, false);
|
||||
if (r) {
|
||||
pmd->bm = NULL;
|
||||
goto out_unlock;
|
||||
}
|
||||
new_bm = NULL;
|
||||
out_unlock:
|
||||
if (r)
|
||||
pmd->fail_io = true;
|
||||
pmd_write_unlock(pmd);
|
||||
dm_block_manager_destroy(old_bm);
|
||||
out:
|
||||
if (new_bm && !IS_ERR(new_bm))
|
||||
dm_block_manager_destroy(new_bm);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -415,6 +415,12 @@ void dm_block_manager_destroy(struct dm_block_manager *bm)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_block_manager_destroy);
|
||||
|
||||
void dm_block_manager_reset(struct dm_block_manager *bm)
|
||||
{
|
||||
dm_bufio_client_reset(bm->bufio);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_block_manager_reset);
|
||||
|
||||
unsigned int dm_bm_block_size(struct dm_block_manager *bm)
|
||||
{
|
||||
return dm_bufio_get_block_size(bm->bufio);
|
||||
|
@ -35,6 +35,7 @@ struct dm_block_manager *dm_block_manager_create(
|
||||
struct block_device *bdev, unsigned int block_size,
|
||||
unsigned int max_held_per_thread);
|
||||
void dm_block_manager_destroy(struct dm_block_manager *bm);
|
||||
void dm_block_manager_reset(struct dm_block_manager *bm);
|
||||
|
||||
unsigned int dm_bm_block_size(struct dm_block_manager *bm);
|
||||
dm_block_t dm_bm_nr_blocks(struct dm_block_manager *bm);
|
||||
|
@ -76,7 +76,8 @@ struct dm_space_map {
|
||||
|
||||
static inline void dm_sm_destroy(struct dm_space_map *sm)
|
||||
{
|
||||
sm->destroy(sm);
|
||||
if (sm)
|
||||
sm->destroy(sm);
|
||||
}
|
||||
|
||||
static inline int dm_sm_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
|
||||
|
@ -197,6 +197,9 @@ EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
|
||||
|
||||
void dm_tm_destroy(struct dm_transaction_manager *tm)
|
||||
{
|
||||
if (!tm)
|
||||
return;
|
||||
|
||||
if (!tm->is_clone)
|
||||
wipe_shadow_table(tm);
|
||||
|
||||
|
@ -866,10 +866,13 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
|
||||
netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n",
|
||||
offset, adapter->ring_size);
|
||||
err = -1;
|
||||
goto failed;
|
||||
goto free_buffer;
|
||||
}
|
||||
|
||||
return 0;
|
||||
free_buffer:
|
||||
kfree(tx_ring->tx_buffer);
|
||||
tx_ring->tx_buffer = NULL;
|
||||
failed:
|
||||
if (adapter->ring_vir_addr != NULL) {
|
||||
dma_free_coherent(&pdev->dev, adapter->ring_size,
|
||||
|
@ -399,9 +399,10 @@ static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
|
||||
static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
|
||||
{
|
||||
struct otx2_nic *pfvf = netdev_priv(dev);
|
||||
u8 old_pfc_en;
|
||||
int err;
|
||||
|
||||
/* Save PFC configuration to interface */
|
||||
old_pfc_en = pfvf->pfc_en;
|
||||
pfvf->pfc_en = pfc->pfc_en;
|
||||
|
||||
if (pfvf->hw.tx_queues >= NIX_PF_PFC_PRIO_MAX)
|
||||
@ -411,13 +412,17 @@ static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
|
||||
* supported by the tx queue configuration
|
||||
*/
|
||||
err = otx2_check_pfc_config(pfvf);
|
||||
if (err)
|
||||
if (err) {
|
||||
pfvf->pfc_en = old_pfc_en;
|
||||
return err;
|
||||
}
|
||||
|
||||
process_pfc:
|
||||
err = otx2_config_priority_flow_ctrl(pfvf);
|
||||
if (err)
|
||||
if (err) {
|
||||
pfvf->pfc_en = old_pfc_en;
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Request Per channel Bpids */
|
||||
if (pfc->pfc_en)
|
||||
@ -425,6 +430,12 @@ static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
|
||||
|
||||
err = otx2_pfc_txschq_update(pfvf);
|
||||
if (err) {
|
||||
if (pfc->pfc_en)
|
||||
otx2_nix_config_bp(pfvf, false);
|
||||
|
||||
otx2_pfc_txschq_stop(pfvf);
|
||||
pfvf->pfc_en = old_pfc_en;
|
||||
otx2_config_priority_flow_ctrl(pfvf);
|
||||
dev_err(pfvf->dev, "%s failed to update TX schedulers\n", __func__);
|
||||
return err;
|
||||
}
|
||||
|
@ -48,6 +48,25 @@
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "diag/cmd_tracepoint.h"
|
||||
|
||||
struct mlx5_ifc_mbox_out_bits {
|
||||
u8 status[0x8];
|
||||
u8 reserved_at_8[0x18];
|
||||
|
||||
u8 syndrome[0x20];
|
||||
|
||||
u8 reserved_at_40[0x40];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_mbox_in_bits {
|
||||
u8 opcode[0x10];
|
||||
u8 uid[0x10];
|
||||
|
||||
u8 reserved_at_20[0x10];
|
||||
u8 op_mod[0x10];
|
||||
|
||||
u8 reserved_at_40[0x40];
|
||||
};
|
||||
|
||||
enum {
|
||||
CMD_IF_REV = 5,
|
||||
};
|
||||
@ -71,6 +90,26 @@ enum {
|
||||
MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
|
||||
};
|
||||
|
||||
static u16 in_to_opcode(void *in)
|
||||
{
|
||||
return MLX5_GET(mbox_in, in, opcode);
|
||||
}
|
||||
|
||||
/* Returns true for opcodes that might be triggered very frequently and throttle
|
||||
* the command interface. Limit their command slots usage.
|
||||
*/
|
||||
static bool mlx5_cmd_is_throttle_opcode(u16 op)
|
||||
{
|
||||
switch (op) {
|
||||
case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
|
||||
case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
|
||||
case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
|
||||
case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct mlx5_cmd_work_ent *
|
||||
cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in,
|
||||
struct mlx5_cmd_msg *out, void *uout, int uout_size,
|
||||
@ -92,6 +131,7 @@ cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in,
|
||||
ent->context = context;
|
||||
ent->cmd = cmd;
|
||||
ent->page_queue = page_queue;
|
||||
ent->op = in_to_opcode(in->first.data);
|
||||
refcount_set(&ent->refcnt, 1);
|
||||
|
||||
return ent;
|
||||
@ -116,24 +156,27 @@ static u8 alloc_token(struct mlx5_cmd *cmd)
|
||||
return token;
|
||||
}
|
||||
|
||||
static int cmd_alloc_index(struct mlx5_cmd *cmd)
|
||||
static int cmd_alloc_index(struct mlx5_cmd *cmd, struct mlx5_cmd_work_ent *ent)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&cmd->alloc_lock, flags);
|
||||
ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
|
||||
if (ret < cmd->max_reg_cmds)
|
||||
clear_bit(ret, &cmd->bitmask);
|
||||
ret = find_first_bit(&cmd->vars.bitmask, cmd->vars.max_reg_cmds);
|
||||
if (ret < cmd->vars.max_reg_cmds) {
|
||||
clear_bit(ret, &cmd->vars.bitmask);
|
||||
ent->idx = ret;
|
||||
cmd->ent_arr[ent->idx] = ent;
|
||||
}
|
||||
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
|
||||
|
||||
return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
|
||||
return ret < cmd->vars.max_reg_cmds ? ret : -ENOMEM;
|
||||
}
|
||||
|
||||
static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
|
||||
{
|
||||
lockdep_assert_held(&cmd->alloc_lock);
|
||||
set_bit(idx, &cmd->bitmask);
|
||||
set_bit(idx, &cmd->vars.bitmask);
|
||||
}
|
||||
|
||||
static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
|
||||
@ -152,7 +195,7 @@ static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)
|
||||
|
||||
if (ent->idx >= 0) {
|
||||
cmd_free_index(cmd, ent->idx);
|
||||
up(ent->page_queue ? &cmd->pages_sem : &cmd->sem);
|
||||
up(ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem);
|
||||
}
|
||||
|
||||
cmd_free_ent(ent);
|
||||
@ -162,7 +205,7 @@ static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)
|
||||
|
||||
static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
|
||||
{
|
||||
return cmd->cmd_buf + (idx << cmd->log_stride);
|
||||
return cmd->cmd_buf + (idx << cmd->vars.log_stride);
|
||||
}
|
||||
|
||||
static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg *msg)
|
||||
@ -753,25 +796,6 @@ static int cmd_status_to_err(u8 status)
|
||||
}
|
||||
}
|
||||
|
||||
struct mlx5_ifc_mbox_out_bits {
|
||||
u8 status[0x8];
|
||||
u8 reserved_at_8[0x18];
|
||||
|
||||
u8 syndrome[0x20];
|
||||
|
||||
u8 reserved_at_40[0x40];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_mbox_in_bits {
|
||||
u8 opcode[0x10];
|
||||
u8 uid[0x10];
|
||||
|
||||
u8 reserved_at_20[0x10];
|
||||
u8 op_mod[0x10];
|
||||
|
||||
u8 reserved_at_40[0x40];
|
||||
};
|
||||
|
||||
void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out)
|
||||
{
|
||||
u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
|
||||
@ -789,7 +813,7 @@ static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
|
||||
u16 opcode, op_mod;
|
||||
u16 uid;
|
||||
|
||||
opcode = MLX5_GET(mbox_in, in, opcode);
|
||||
opcode = in_to_opcode(in);
|
||||
op_mod = MLX5_GET(mbox_in, in, op_mod);
|
||||
uid = MLX5_GET(mbox_in, in, uid);
|
||||
|
||||
@ -801,7 +825,7 @@ int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out)
|
||||
{
|
||||
/* aborted due to PCI error or via reset flow mlx5_cmd_trigger_completions() */
|
||||
if (err == -ENXIO) {
|
||||
u16 opcode = MLX5_GET(mbox_in, in, opcode);
|
||||
u16 opcode = in_to_opcode(in);
|
||||
u32 syndrome;
|
||||
u8 status;
|
||||
|
||||
@ -830,9 +854,9 @@ static void dump_command(struct mlx5_core_dev *dev,
|
||||
struct mlx5_cmd_work_ent *ent, int input)
|
||||
{
|
||||
struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
|
||||
u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
|
||||
struct mlx5_cmd_mailbox *next = msg->next;
|
||||
int n = mlx5_calc_cmd_blocks(msg);
|
||||
u16 op = ent->op;
|
||||
int data_only;
|
||||
u32 offset = 0;
|
||||
int dump_len;
|
||||
@ -884,11 +908,6 @@ static void dump_command(struct mlx5_core_dev *dev,
|
||||
mlx5_core_dbg(dev, "cmd[%d]: end dump\n", ent->idx);
|
||||
}
|
||||
|
||||
static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
|
||||
{
|
||||
return MLX5_GET(mbox_in, in->first.data, opcode);
|
||||
}
|
||||
|
||||
static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
|
||||
|
||||
static void cb_timeout_handler(struct work_struct *work)
|
||||
@ -906,13 +925,13 @@ static void cb_timeout_handler(struct work_struct *work)
|
||||
/* Maybe got handled by eq recover ? */
|
||||
if (!test_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state)) {
|
||||
mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, recovered after timeout\n", ent->idx,
|
||||
mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
|
||||
mlx5_command_str(ent->op), ent->op);
|
||||
goto out; /* phew, already handled */
|
||||
}
|
||||
|
||||
ent->ret = -ETIMEDOUT;
|
||||
mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n",
|
||||
ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
|
||||
ent->idx, mlx5_command_str(ent->op), ent->op);
|
||||
mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
|
||||
|
||||
out:
|
||||
@ -955,10 +974,10 @@ static void cmd_work_handler(struct work_struct *work)
|
||||
cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
|
||||
|
||||
complete(&ent->handling);
|
||||
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
|
||||
sem = ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem;
|
||||
down(sem);
|
||||
if (!ent->page_queue) {
|
||||
alloc_ret = cmd_alloc_index(cmd);
|
||||
alloc_ret = cmd_alloc_index(cmd, ent);
|
||||
if (alloc_ret < 0) {
|
||||
mlx5_core_err_rl(dev, "failed to allocate command entry\n");
|
||||
if (ent->callback) {
|
||||
@ -973,20 +992,18 @@ static void cmd_work_handler(struct work_struct *work)
|
||||
up(sem);
|
||||
return;
|
||||
}
|
||||
ent->idx = alloc_ret;
|
||||
} else {
|
||||
ent->idx = cmd->max_reg_cmds;
|
||||
ent->idx = cmd->vars.max_reg_cmds;
|
||||
spin_lock_irqsave(&cmd->alloc_lock, flags);
|
||||
clear_bit(ent->idx, &cmd->bitmask);
|
||||
clear_bit(ent->idx, &cmd->vars.bitmask);
|
||||
cmd->ent_arr[ent->idx] = ent;
|
||||
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
|
||||
}
|
||||
|
||||
cmd->ent_arr[ent->idx] = ent;
|
||||
lay = get_inst(cmd, ent->idx);
|
||||
ent->lay = lay;
|
||||
memset(lay, 0, sizeof(*lay));
|
||||
memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
|
||||
ent->op = be32_to_cpu(lay->in[0]) >> 16;
|
||||
if (ent->in->next)
|
||||
lay->in_ptr = cpu_to_be64(ent->in->next->dma);
|
||||
lay->inlen = cpu_to_be32(ent->in->len);
|
||||
@ -1099,12 +1116,12 @@ static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev,
|
||||
*/
|
||||
if (wait_for_completion_timeout(&ent->done, timeout)) {
|
||||
mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) recovered after timeout\n", ent->idx,
|
||||
mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
|
||||
mlx5_command_str(ent->op), ent->op);
|
||||
return;
|
||||
}
|
||||
|
||||
mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) No done completion\n", ent->idx,
|
||||
mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
|
||||
mlx5_command_str(ent->op), ent->op);
|
||||
|
||||
ent->ret = -ETIMEDOUT;
|
||||
mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
|
||||
@ -1131,12 +1148,10 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
|
||||
|
||||
if (err == -ETIMEDOUT) {
|
||||
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
|
||||
mlx5_command_str(msg_to_opcode(ent->in)),
|
||||
msg_to_opcode(ent->in));
|
||||
mlx5_command_str(ent->op), ent->op);
|
||||
} else if (err == -ECANCELED) {
|
||||
mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
|
||||
mlx5_command_str(msg_to_opcode(ent->in)),
|
||||
msg_to_opcode(ent->in));
|
||||
mlx5_command_str(ent->op), ent->op);
|
||||
}
|
||||
mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
|
||||
err, deliv_status_to_str(ent->status), ent->status);
|
||||
@ -1170,7 +1185,6 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
|
||||
u8 status = 0;
|
||||
int err = 0;
|
||||
s64 ds;
|
||||
u16 op;
|
||||
|
||||
if (callback && page_queue)
|
||||
return -EINVAL;
|
||||
@ -1210,9 +1224,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
|
||||
goto out_free;
|
||||
|
||||
ds = ent->ts2 - ent->ts1;
|
||||
op = MLX5_GET(mbox_in, in->first.data, opcode);
|
||||
if (op < MLX5_CMD_OP_MAX) {
|
||||
stats = &cmd->stats[op];
|
||||
if (ent->op < MLX5_CMD_OP_MAX) {
|
||||
stats = &cmd->stats[ent->op];
|
||||
spin_lock_irq(&stats->lock);
|
||||
stats->sum += ds;
|
||||
++stats->n;
|
||||
@ -1220,7 +1233,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
|
||||
}
|
||||
mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
|
||||
"fw exec time for %s is %lld nsec\n",
|
||||
mlx5_command_str(op), ds);
|
||||
mlx5_command_str(ent->op), ds);
|
||||
|
||||
out_free:
|
||||
status = ent->status;
|
||||
@ -1558,15 +1571,15 @@ void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode)
|
||||
struct mlx5_cmd *cmd = &dev->cmd;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < cmd->max_reg_cmds; i++)
|
||||
down(&cmd->sem);
|
||||
down(&cmd->pages_sem);
|
||||
for (i = 0; i < cmd->vars.max_reg_cmds; i++)
|
||||
down(&cmd->vars.sem);
|
||||
down(&cmd->vars.pages_sem);
|
||||
|
||||
cmd->allowed_opcode = opcode;
|
||||
|
||||
up(&cmd->pages_sem);
|
||||
for (i = 0; i < cmd->max_reg_cmds; i++)
|
||||
up(&cmd->sem);
|
||||
up(&cmd->vars.pages_sem);
|
||||
for (i = 0; i < cmd->vars.max_reg_cmds; i++)
|
||||
up(&cmd->vars.sem);
|
||||
}
|
||||
|
||||
static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
|
||||
@ -1574,15 +1587,15 @@ static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
|
||||
struct mlx5_cmd *cmd = &dev->cmd;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < cmd->max_reg_cmds; i++)
|
||||
down(&cmd->sem);
|
||||
down(&cmd->pages_sem);
|
||||
for (i = 0; i < cmd->vars.max_reg_cmds; i++)
|
||||
down(&cmd->vars.sem);
|
||||
down(&cmd->vars.pages_sem);
|
||||
|
||||
cmd->mode = mode;
|
||||
|
||||
up(&cmd->pages_sem);
|
||||
for (i = 0; i < cmd->max_reg_cmds; i++)
|
||||
up(&cmd->sem);
|
||||
up(&cmd->vars.pages_sem);
|
||||
for (i = 0; i < cmd->vars.max_reg_cmds; i++)
|
||||
up(&cmd->vars.sem);
|
||||
}
|
||||
|
||||
static int cmd_comp_notifier(struct notifier_block *nb,
|
||||
@ -1641,7 +1654,7 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
|
||||
|
||||
/* there can be at most 32 command queues */
|
||||
vector = vec & 0xffffffff;
|
||||
for (i = 0; i < (1 << cmd->log_sz); i++) {
|
||||
for (i = 0; i < (1 << cmd->vars.log_sz); i++) {
|
||||
if (test_bit(i, &vector)) {
|
||||
ent = cmd->ent_arr[i];
|
||||
|
||||
@ -1730,7 +1743,7 @@ static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
|
||||
/* wait for pending handlers to complete */
|
||||
mlx5_eq_synchronize_cmd_irq(dev);
|
||||
spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
|
||||
vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
|
||||
vector = ~dev->cmd.vars.bitmask & ((1ul << (1 << dev->cmd.vars.log_sz)) - 1);
|
||||
if (!vector)
|
||||
goto no_trig;
|
||||
|
||||
@ -1739,14 +1752,14 @@ static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
|
||||
* to guarantee pending commands will not get freed in the meanwhile.
|
||||
* For that reason, it also has to be done inside the alloc_lock.
|
||||
*/
|
||||
for_each_set_bit(i, &bitmask, (1 << cmd->log_sz))
|
||||
for_each_set_bit(i, &bitmask, (1 << cmd->vars.log_sz))
|
||||
cmd_ent_get(cmd->ent_arr[i]);
|
||||
vector |= MLX5_TRIGGERED_CMD_COMP;
|
||||
spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
|
||||
|
||||
mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
|
||||
mlx5_cmd_comp_handler(dev, vector, true);
|
||||
for_each_set_bit(i, &bitmask, (1 << cmd->log_sz))
|
||||
for_each_set_bit(i, &bitmask, (1 << cmd->vars.log_sz))
|
||||
cmd_ent_put(cmd->ent_arr[i]);
|
||||
return;
|
||||
|
||||
@ -1759,22 +1772,22 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev)
|
||||
struct mlx5_cmd *cmd = &dev->cmd;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < cmd->max_reg_cmds; i++) {
|
||||
while (down_trylock(&cmd->sem)) {
|
||||
for (i = 0; i < cmd->vars.max_reg_cmds; i++) {
|
||||
while (down_trylock(&cmd->vars.sem)) {
|
||||
mlx5_cmd_trigger_completions(dev);
|
||||
cond_resched();
|
||||
}
|
||||
}
|
||||
|
||||
while (down_trylock(&cmd->pages_sem)) {
|
||||
while (down_trylock(&cmd->vars.pages_sem)) {
|
||||
mlx5_cmd_trigger_completions(dev);
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
/* Unlock cmdif */
|
||||
up(&cmd->pages_sem);
|
||||
for (i = 0; i < cmd->max_reg_cmds; i++)
|
||||
up(&cmd->sem);
|
||||
up(&cmd->vars.pages_sem);
|
||||
for (i = 0; i < cmd->vars.max_reg_cmds; i++)
|
||||
up(&cmd->vars.sem);
|
||||
}
|
||||
|
||||
static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
|
||||
@ -1817,7 +1830,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
|
||||
|
||||
static int is_manage_pages(void *in)
|
||||
{
|
||||
return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
|
||||
return in_to_opcode(in) == MLX5_CMD_OP_MANAGE_PAGES;
|
||||
}
|
||||
|
||||
/* Notes:
|
||||
@ -1828,8 +1841,9 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
|
||||
int out_size, mlx5_cmd_cbk_t callback, void *context,
|
||||
bool force_polling)
|
||||
{
|
||||
u16 opcode = MLX5_GET(mbox_in, in, opcode);
|
||||
struct mlx5_cmd_msg *inb, *outb;
|
||||
u16 opcode = in_to_opcode(in);
|
||||
bool throttle_op;
|
||||
int pages_queue;
|
||||
gfp_t gfp;
|
||||
u8 token;
|
||||
@ -1838,13 +1852,21 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
|
||||
if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode))
|
||||
return -ENXIO;
|
||||
|
||||
throttle_op = mlx5_cmd_is_throttle_opcode(opcode);
|
||||
if (throttle_op) {
|
||||
/* atomic context may not sleep */
|
||||
if (callback)
|
||||
return -EINVAL;
|
||||
down(&dev->cmd.vars.throttle_sem);
|
||||
}
|
||||
|
||||
pages_queue = is_manage_pages(in);
|
||||
gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
|
||||
|
||||
inb = alloc_msg(dev, in_size, gfp);
|
||||
if (IS_ERR(inb)) {
|
||||
err = PTR_ERR(inb);
|
||||
return err;
|
||||
goto out_up;
|
||||
}
|
||||
|
||||
token = alloc_token(&dev->cmd);
|
||||
@ -1878,6 +1900,9 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
|
||||
mlx5_free_cmd_msg(dev, outb);
|
||||
out_in:
|
||||
free_msg(dev, inb);
|
||||
out_up:
|
||||
if (throttle_op)
|
||||
up(&dev->cmd.vars.throttle_sem);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1952,8 +1977,8 @@ static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, u16 op
|
||||
int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size)
|
||||
{
|
||||
int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
|
||||
u16 opcode = MLX5_GET(mbox_in, in, opcode);
|
||||
u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
|
||||
u16 opcode = in_to_opcode(in);
|
||||
|
||||
return cmd_status_err(dev, err, opcode, op_mod, out);
|
||||
}
|
||||
@ -1998,8 +2023,8 @@ int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
|
||||
void *out, int out_size)
|
||||
{
|
||||
int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
|
||||
u16 opcode = MLX5_GET(mbox_in, in, opcode);
|
||||
u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
|
||||
u16 opcode = in_to_opcode(in);
|
||||
|
||||
err = cmd_status_err(dev, err, opcode, op_mod, out);
|
||||
return mlx5_cmd_check(dev, err, in, out);
|
||||
@ -2051,7 +2076,7 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
|
||||
|
||||
work->ctx = ctx;
|
||||
work->user_callback = callback;
|
||||
work->opcode = MLX5_GET(mbox_in, in, opcode);
|
||||
work->opcode = in_to_opcode(in);
|
||||
work->op_mod = MLX5_GET(mbox_in, in, op_mod);
|
||||
work->out = out;
|
||||
if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
|
||||
@ -2187,16 +2212,16 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
|
||||
goto err_free_pool;
|
||||
|
||||
cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
|
||||
cmd->log_sz = cmd_l >> 4 & 0xf;
|
||||
cmd->log_stride = cmd_l & 0xf;
|
||||
if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
|
||||
cmd->vars.log_sz = cmd_l >> 4 & 0xf;
|
||||
cmd->vars.log_stride = cmd_l & 0xf;
|
||||
if (1 << cmd->vars.log_sz > MLX5_MAX_COMMANDS) {
|
||||
mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n",
|
||||
1 << cmd->log_sz);
|
||||
1 << cmd->vars.log_sz);
|
||||
err = -EINVAL;
|
||||
goto err_free_page;
|
||||
}
|
||||
|
||||
if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
|
||||
if (cmd->vars.log_sz + cmd->vars.log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
|
||||
mlx5_core_err(dev, "command queue size overflow\n");
|
||||
err = -EINVAL;
|
||||
goto err_free_page;
|
||||
@ -2204,13 +2229,13 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
|
||||
|
||||
cmd->state = MLX5_CMDIF_STATE_DOWN;
|
||||
cmd->checksum_disabled = 1;
|
||||
cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
|
||||
cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
|
||||
cmd->vars.max_reg_cmds = (1 << cmd->vars.log_sz) - 1;
|
||||
cmd->vars.bitmask = (1UL << cmd->vars.max_reg_cmds) - 1;
|
||||
|
||||
cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
|
||||
if (cmd->cmdif_rev > CMD_IF_REV) {
|
||||
cmd->vars.cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
|
||||
if (cmd->vars.cmdif_rev > CMD_IF_REV) {
|
||||
mlx5_core_err(dev, "driver does not support command interface version. driver %d, firmware %d\n",
|
||||
CMD_IF_REV, cmd->cmdif_rev);
|
||||
CMD_IF_REV, cmd->vars.cmdif_rev);
|
||||
err = -EOPNOTSUPP;
|
||||
goto err_free_page;
|
||||
}
|
||||
@ -2220,8 +2245,9 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
|
||||
for (i = 0; i < MLX5_CMD_OP_MAX; i++)
|
||||
spin_lock_init(&cmd->stats[i].lock);
|
||||
|
||||
sema_init(&cmd->sem, cmd->max_reg_cmds);
|
||||
sema_init(&cmd->pages_sem, 1);
|
||||
sema_init(&cmd->vars.sem, cmd->vars.max_reg_cmds);
|
||||
sema_init(&cmd->vars.pages_sem, 1);
|
||||
sema_init(&cmd->vars.throttle_sem, DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2));
|
||||
|
||||
cmd_h = (u32)((u64)(cmd->dma) >> 32);
|
||||
cmd_l = (u32)(cmd->dma);
|
||||
|
@ -176,8 +176,8 @@ static ssize_t slots_read(struct file *filp, char __user *buf, size_t count,
|
||||
int ret;
|
||||
|
||||
cmd = filp->private_data;
|
||||
weight = bitmap_weight(&cmd->bitmask, cmd->max_reg_cmds);
|
||||
field = cmd->max_reg_cmds - weight;
|
||||
weight = bitmap_weight(&cmd->vars.bitmask, cmd->vars.max_reg_cmds);
|
||||
field = cmd->vars.max_reg_cmds - weight;
|
||||
ret = snprintf(tbuf, sizeof(tbuf), "%d\n", field);
|
||||
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
|
||||
}
|
||||
|
@ -691,7 +691,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
|
||||
|
||||
while (block_timestamp > tracer->last_timestamp) {
|
||||
/* Check block override if it's not the first block */
|
||||
if (!tracer->last_timestamp) {
|
||||
if (tracer->last_timestamp) {
|
||||
u64 *ts_event;
|
||||
/* To avoid block override be the HW in case of buffer
|
||||
* wraparound, the time stamp of the previous block
|
||||
|
@ -154,6 +154,7 @@ static int fs_udp_create_groups(struct mlx5e_flow_table *ft, enum fs_udp_type ty
|
||||
in = kvzalloc(inlen, GFP_KERNEL);
|
||||
if (!in || !ft->g) {
|
||||
kfree(ft->g);
|
||||
ft->g = NULL;
|
||||
kvfree(in);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -300,6 +300,9 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
|
||||
if (err)
|
||||
goto destroy_neigh_entry;
|
||||
|
||||
e->encap_size = ipv4_encap_size;
|
||||
e->encap_header = encap_header;
|
||||
|
||||
if (!(nud_state & NUD_VALID)) {
|
||||
neigh_event_send(attr.n, NULL);
|
||||
/* the encap entry will be made valid on neigh update event
|
||||
@ -319,8 +322,6 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
|
||||
goto destroy_neigh_entry;
|
||||
}
|
||||
|
||||
e->encap_size = ipv4_encap_size;
|
||||
e->encap_header = encap_header;
|
||||
e->flags |= MLX5_ENCAP_ENTRY_VALID;
|
||||
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
|
||||
mlx5e_route_lookup_ipv4_put(&attr);
|
||||
@ -403,12 +404,16 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
|
||||
if (err)
|
||||
goto free_encap;
|
||||
|
||||
e->encap_size = ipv4_encap_size;
|
||||
kfree(e->encap_header);
|
||||
e->encap_header = encap_header;
|
||||
|
||||
if (!(nud_state & NUD_VALID)) {
|
||||
neigh_event_send(attr.n, NULL);
|
||||
/* the encap entry will be made valid on neigh update event
|
||||
* and not used before that.
|
||||
*/
|
||||
goto free_encap;
|
||||
goto release_neigh;
|
||||
}
|
||||
|
||||
memset(&reformat_params, 0, sizeof(reformat_params));
|
||||
@ -422,10 +427,6 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
|
||||
goto free_encap;
|
||||
}
|
||||
|
||||
e->encap_size = ipv4_encap_size;
|
||||
kfree(e->encap_header);
|
||||
e->encap_header = encap_header;
|
||||
|
||||
e->flags |= MLX5_ENCAP_ENTRY_VALID;
|
||||
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
|
||||
mlx5e_route_lookup_ipv4_put(&attr);
|
||||
@ -567,6 +568,9 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
|
||||
if (err)
|
||||
goto destroy_neigh_entry;
|
||||
|
||||
e->encap_size = ipv6_encap_size;
|
||||
e->encap_header = encap_header;
|
||||
|
||||
if (!(nud_state & NUD_VALID)) {
|
||||
neigh_event_send(attr.n, NULL);
|
||||
/* the encap entry will be made valid on neigh update event
|
||||
@ -586,8 +590,6 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
|
||||
goto destroy_neigh_entry;
|
||||
}
|
||||
|
||||
e->encap_size = ipv6_encap_size;
|
||||
e->encap_header = encap_header;
|
||||
e->flags |= MLX5_ENCAP_ENTRY_VALID;
|
||||
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
|
||||
mlx5e_route_lookup_ipv6_put(&attr);
|
||||
@ -669,12 +671,16 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
|
||||
if (err)
|
||||
goto free_encap;
|
||||
|
||||
e->encap_size = ipv6_encap_size;
|
||||
kfree(e->encap_header);
|
||||
e->encap_header = encap_header;
|
||||
|
||||
if (!(nud_state & NUD_VALID)) {
|
||||
neigh_event_send(attr.n, NULL);
|
||||
/* the encap entry will be made valid on neigh update event
|
||||
* and not used before that.
|
||||
*/
|
||||
goto free_encap;
|
||||
goto release_neigh;
|
||||
}
|
||||
|
||||
memset(&reformat_params, 0, sizeof(reformat_params));
|
||||
@ -688,10 +694,6 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
|
||||
goto free_encap;
|
||||
}
|
||||
|
||||
e->encap_size = ipv6_encap_size;
|
||||
kfree(e->encap_header);
|
||||
e->encap_header = encap_header;
|
||||
|
||||
e->flags |= MLX5_ENCAP_ENTRY_VALID;
|
||||
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
|
||||
mlx5e_route_lookup_ipv6_put(&attr);
|
||||
|
@ -49,7 +49,7 @@ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
|
||||
count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
||||
"%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
|
||||
fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
|
||||
if (count == sizeof(drvinfo->fw_version))
|
||||
if (count >= sizeof(drvinfo->fw_version))
|
||||
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
||||
"%d.%d.%04d", fw_rev_maj(mdev),
|
||||
fw_rev_min(mdev), fw_rev_sub(mdev));
|
||||
|
@ -76,7 +76,7 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev,
|
||||
count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
||||
"%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
|
||||
fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
|
||||
if (count == sizeof(drvinfo->fw_version))
|
||||
if (count >= sizeof(drvinfo->fw_version))
|
||||
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
||||
"%d.%d.%04d", fw_rev_maj(mdev),
|
||||
fw_rev_min(mdev), fw_rev_sub(mdev));
|
||||
|
@ -277,7 +277,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
|
||||
req_list_size = max_list_size;
|
||||
}
|
||||
|
||||
out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_in) +
|
||||
out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_out) +
|
||||
req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
|
||||
|
||||
out = kvzalloc(out_sz, GFP_KERNEL);
|
||||
|
@ -350,6 +350,8 @@ union ks8851_tx_hdr {
|
||||
* @rxd: Space for receiving SPI data, in DMA-able space.
|
||||
* @txd: Space for transmitting SPI data, in DMA-able space.
|
||||
* @msg_enable: The message flags controlling driver output (see ethtool).
|
||||
* @tx_space: Free space in the hardware TX buffer (cached copy of KS_TXMIR).
|
||||
* @queued_len: Space required in hardware TX buffer for queued packets in txq.
|
||||
* @fid: Incrementing frame id tag.
|
||||
* @rc_ier: Cached copy of KS_IER.
|
||||
* @rc_ccr: Cached copy of KS_CCR.
|
||||
@ -399,6 +401,7 @@ struct ks8851_net {
|
||||
struct work_struct rxctrl_work;
|
||||
|
||||
struct sk_buff_head txq;
|
||||
unsigned int queued_len;
|
||||
|
||||
struct eeprom_93cx6 eeprom;
|
||||
struct regulator *vdd_reg;
|
||||
|
@ -362,16 +362,18 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
|
||||
handled |= IRQ_RXPSI;
|
||||
|
||||
if (status & IRQ_TXI) {
|
||||
handled |= IRQ_TXI;
|
||||
|
||||
/* no lock here, tx queue should have been stopped */
|
||||
|
||||
/* update our idea of how much tx space is available to the
|
||||
* system */
|
||||
ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR);
|
||||
unsigned short tx_space = ks8851_rdreg16(ks, KS_TXMIR);
|
||||
|
||||
netif_dbg(ks, intr, ks->netdev,
|
||||
"%s: txspace %d\n", __func__, ks->tx_space);
|
||||
"%s: txspace %d\n", __func__, tx_space);
|
||||
|
||||
spin_lock(&ks->statelock);
|
||||
ks->tx_space = tx_space;
|
||||
if (netif_queue_stopped(ks->netdev))
|
||||
netif_wake_queue(ks->netdev);
|
||||
spin_unlock(&ks->statelock);
|
||||
|
||||
handled |= IRQ_TXI;
|
||||
}
|
||||
|
||||
if (status & IRQ_RXI)
|
||||
@ -414,9 +416,6 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
|
||||
if (status & IRQ_LCI)
|
||||
mii_check_link(&ks->mii);
|
||||
|
||||
if (status & IRQ_TXI)
|
||||
netif_wake_queue(ks->netdev);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
@ -500,6 +499,7 @@ static int ks8851_net_open(struct net_device *dev)
|
||||
ks8851_wrreg16(ks, KS_ISR, ks->rc_ier);
|
||||
ks8851_wrreg16(ks, KS_IER, ks->rc_ier);
|
||||
|
||||
ks->queued_len = 0;
|
||||
netif_start_queue(ks->netdev);
|
||||
|
||||
netif_dbg(ks, ifup, ks->netdev, "network device up\n");
|
||||
|
@ -286,6 +286,18 @@ static void ks8851_wrfifo_spi(struct ks8851_net *ks, struct sk_buff *txp,
|
||||
netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__);
|
||||
}
|
||||
|
||||
/**
|
||||
* calc_txlen - calculate size of message to send packet
|
||||
* @len: Length of data
|
||||
*
|
||||
* Returns the size of the TXFIFO message needed to send
|
||||
* this packet.
|
||||
*/
|
||||
static unsigned int calc_txlen(unsigned int len)
|
||||
{
|
||||
return ALIGN(len + 4, 4);
|
||||
}
|
||||
|
||||
/**
|
||||
* ks8851_rx_skb_spi - receive skbuff
|
||||
* @ks: The device state
|
||||
@ -305,7 +317,9 @@ static void ks8851_rx_skb_spi(struct ks8851_net *ks, struct sk_buff *skb)
|
||||
*/
|
||||
static void ks8851_tx_work(struct work_struct *work)
|
||||
{
|
||||
unsigned int dequeued_len = 0;
|
||||
struct ks8851_net_spi *kss;
|
||||
unsigned short tx_space;
|
||||
struct ks8851_net *ks;
|
||||
unsigned long flags;
|
||||
struct sk_buff *txb;
|
||||
@ -322,6 +336,8 @@ static void ks8851_tx_work(struct work_struct *work)
|
||||
last = skb_queue_empty(&ks->txq);
|
||||
|
||||
if (txb) {
|
||||
dequeued_len += calc_txlen(txb->len);
|
||||
|
||||
ks8851_wrreg16_spi(ks, KS_RXQCR,
|
||||
ks->rc_rxqcr | RXQCR_SDA);
|
||||
ks8851_wrfifo_spi(ks, txb, last);
|
||||
@ -332,6 +348,13 @@ static void ks8851_tx_work(struct work_struct *work)
|
||||
}
|
||||
}
|
||||
|
||||
tx_space = ks8851_rdreg16_spi(ks, KS_TXMIR);
|
||||
|
||||
spin_lock(&ks->statelock);
|
||||
ks->queued_len -= dequeued_len;
|
||||
ks->tx_space = tx_space;
|
||||
spin_unlock(&ks->statelock);
|
||||
|
||||
ks8851_unlock_spi(ks, &flags);
|
||||
}
|
||||
|
||||
@ -346,18 +369,6 @@ static void ks8851_flush_tx_work_spi(struct ks8851_net *ks)
|
||||
flush_work(&kss->tx_work);
|
||||
}
|
||||
|
||||
/**
|
||||
* calc_txlen - calculate size of message to send packet
|
||||
* @len: Length of data
|
||||
*
|
||||
* Returns the size of the TXFIFO message needed to send
|
||||
* this packet.
|
||||
*/
|
||||
static unsigned int calc_txlen(unsigned int len)
|
||||
{
|
||||
return ALIGN(len + 4, 4);
|
||||
}
|
||||
|
||||
/**
|
||||
* ks8851_start_xmit_spi - transmit packet using SPI
|
||||
* @skb: The buffer to transmit
|
||||
@ -386,16 +397,17 @@ static netdev_tx_t ks8851_start_xmit_spi(struct sk_buff *skb,
|
||||
|
||||
spin_lock(&ks->statelock);
|
||||
|
||||
if (needed > ks->tx_space) {
|
||||
if (ks->queued_len + needed > ks->tx_space) {
|
||||
netif_stop_queue(dev);
|
||||
ret = NETDEV_TX_BUSY;
|
||||
} else {
|
||||
ks->tx_space -= needed;
|
||||
ks->queued_len += needed;
|
||||
skb_queue_tail(&ks->txq, skb);
|
||||
}
|
||||
|
||||
spin_unlock(&ks->statelock);
|
||||
schedule_work(&kss->tx_work);
|
||||
if (ret == NETDEV_TX_OK)
|
||||
schedule_work(&kss->tx_work);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -19,6 +19,7 @@ config MICROSOFT_MANA
|
||||
tristate "Microsoft Azure Network Adapter (MANA) support"
|
||||
depends on PCI_MSI && X86_64
|
||||
depends on PCI_HYPERV
|
||||
select PAGE_POOL
|
||||
help
|
||||
This driver supports Microsoft Azure Network Adapter (MANA).
|
||||
So far, the driver is only supported on X86_64.
|
||||
|
@ -216,10 +216,10 @@ static void ocelot_port_rmon_stats_cb(struct ocelot *ocelot, int port, void *pri
|
||||
rmon_stats->hist_tx[0] = s[OCELOT_STAT_TX_64];
|
||||
rmon_stats->hist_tx[1] = s[OCELOT_STAT_TX_65_127];
|
||||
rmon_stats->hist_tx[2] = s[OCELOT_STAT_TX_128_255];
|
||||
rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_128_255];
|
||||
rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_256_511];
|
||||
rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_512_1023];
|
||||
rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1024_1526];
|
||||
rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_256_511];
|
||||
rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_512_1023];
|
||||
rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_1024_1526];
|
||||
rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1527_MAX];
|
||||
}
|
||||
|
||||
void ocelot_port_get_rmon_stats(struct ocelot *ocelot, int port,
|
||||
|
@ -173,6 +173,7 @@ struct ax88179_data {
|
||||
u8 in_pm;
|
||||
u32 wol_supported;
|
||||
u32 wolopts;
|
||||
u8 disconnecting;
|
||||
};
|
||||
|
||||
struct ax88179_int_data {
|
||||
@ -208,6 +209,7 @@ static int __ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
|
||||
{
|
||||
int ret;
|
||||
int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
|
||||
struct ax88179_data *ax179_data = dev->driver_priv;
|
||||
|
||||
BUG_ON(!dev);
|
||||
|
||||
@ -219,7 +221,7 @@ static int __ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
|
||||
ret = fn(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
||||
value, index, data, size);
|
||||
|
||||
if (unlikely(ret < 0))
|
||||
if (unlikely((ret < 0) && !(ret == -ENODEV && ax179_data->disconnecting)))
|
||||
netdev_warn(dev->net, "Failed to read reg index 0x%04x: %d\n",
|
||||
index, ret);
|
||||
|
||||
@ -231,6 +233,7 @@ static int __ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
|
||||
{
|
||||
int ret;
|
||||
int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
|
||||
struct ax88179_data *ax179_data = dev->driver_priv;
|
||||
|
||||
BUG_ON(!dev);
|
||||
|
||||
@ -242,7 +245,7 @@ static int __ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
|
||||
ret = fn(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
||||
value, index, data, size);
|
||||
|
||||
if (unlikely(ret < 0))
|
||||
if (unlikely((ret < 0) && !(ret == -ENODEV && ax179_data->disconnecting)))
|
||||
netdev_warn(dev->net, "Failed to write reg index 0x%04x: %d\n",
|
||||
index, ret);
|
||||
|
||||
@ -492,6 +495,20 @@ static int ax88179_resume(struct usb_interface *intf)
|
||||
return usbnet_resume(intf);
|
||||
}
|
||||
|
||||
static void ax88179_disconnect(struct usb_interface *intf)
|
||||
{
|
||||
struct usbnet *dev = usb_get_intfdata(intf);
|
||||
struct ax88179_data *ax179_data;
|
||||
|
||||
if (!dev)
|
||||
return;
|
||||
|
||||
ax179_data = dev->driver_priv;
|
||||
ax179_data->disconnecting = 1;
|
||||
|
||||
usbnet_disconnect(intf);
|
||||
}
|
||||
|
||||
static void
|
||||
ax88179_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
|
||||
{
|
||||
@ -1906,7 +1923,7 @@ static struct usb_driver ax88179_178a_driver = {
|
||||
.suspend = ax88179_suspend,
|
||||
.resume = ax88179_resume,
|
||||
.reset_resume = ax88179_resume,
|
||||
.disconnect = usbnet_disconnect,
|
||||
.disconnect = ax88179_disconnect,
|
||||
.supports_autosuspend = 1,
|
||||
.disable_hub_initiated_lpm = 1,
|
||||
};
|
||||
|
@ -3034,7 +3034,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
|
||||
struct iwl_rxq *rxq = &trans_pcie->rxq[0];
|
||||
u32 i, r, j, rb_len = 0;
|
||||
|
||||
spin_lock(&rxq->lock);
|
||||
spin_lock_bh(&rxq->lock);
|
||||
|
||||
r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
|
||||
|
||||
@ -3058,7 +3058,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
|
||||
*data = iwl_fw_error_next_data(*data);
|
||||
}
|
||||
|
||||
spin_unlock(&rxq->lock);
|
||||
spin_unlock_bh(&rxq->lock);
|
||||
|
||||
return rb_len;
|
||||
}
|
||||
|
@ -4835,6 +4835,8 @@ static void nvme_fw_act_work(struct work_struct *work)
|
||||
struct nvme_ctrl, fw_act_work);
|
||||
unsigned long fw_act_timeout;
|
||||
|
||||
nvme_auth_stop(ctrl);
|
||||
|
||||
if (ctrl->mtfa)
|
||||
fw_act_timeout = jiffies +
|
||||
msecs_to_jiffies(ctrl->mtfa * 100);
|
||||
@ -4890,7 +4892,6 @@ static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
|
||||
* firmware activation.
|
||||
*/
|
||||
if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
|
||||
nvme_auth_stop(ctrl);
|
||||
requeue = false;
|
||||
queue_work(nvme_wq, &ctrl->fw_act_work);
|
||||
}
|
||||
|
@ -1033,6 +1033,13 @@ static const struct of_device_id atmel_pctrl_of_match[] = {
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* This lock class allows to tell lockdep that parent IRQ and children IRQ do
|
||||
* not share the same class so it does not raise false positive
|
||||
*/
|
||||
static struct lock_class_key atmel_lock_key;
|
||||
static struct lock_class_key atmel_request_key;
|
||||
|
||||
static int atmel_pinctrl_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
@ -1185,6 +1192,7 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
|
||||
irq_set_chip_and_handler(irq, &atmel_gpio_irq_chip,
|
||||
handle_simple_irq);
|
||||
irq_set_chip_data(irq, atmel_pioctrl);
|
||||
irq_set_lockdep_class(irq, &atmel_lock_key, &atmel_request_key);
|
||||
dev_dbg(dev,
|
||||
"atmel gpio irq domain: hwirq: %d, linux irq: %d\n",
|
||||
i, irq);
|
||||
|
@ -489,7 +489,7 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev,
|
||||
|
||||
nmaps = 0;
|
||||
ngroups = 0;
|
||||
for_each_child_of_node(np, child) {
|
||||
for_each_available_child_of_node(np, child) {
|
||||
int npinmux = of_property_count_u32_elems(child, "pinmux");
|
||||
int npins = of_property_count_u32_elems(child, "pins");
|
||||
|
||||
@ -524,7 +524,7 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev,
|
||||
nmaps = 0;
|
||||
ngroups = 0;
|
||||
mutex_lock(&sfp->mutex);
|
||||
for_each_child_of_node(np, child) {
|
||||
for_each_available_child_of_node(np, child) {
|
||||
int npins;
|
||||
int i;
|
||||
|
||||
|
@ -807,6 +807,9 @@ static void __reset_control_put_internal(struct reset_control *rstc)
|
||||
{
|
||||
lockdep_assert_held(&reset_list_mutex);
|
||||
|
||||
if (IS_ERR_OR_NULL(rstc))
|
||||
return;
|
||||
|
||||
kref_put(&rstc->refcnt, __reset_control_release);
|
||||
}
|
||||
|
||||
@ -1017,11 +1020,8 @@ EXPORT_SYMBOL_GPL(reset_control_put);
|
||||
void reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs)
|
||||
{
|
||||
mutex_lock(&reset_list_mutex);
|
||||
while (num_rstcs--) {
|
||||
if (IS_ERR_OR_NULL(rstcs[num_rstcs].rstc))
|
||||
continue;
|
||||
while (num_rstcs--)
|
||||
__reset_control_put_internal(rstcs[num_rstcs].rstc);
|
||||
}
|
||||
mutex_unlock(&reset_list_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(reset_control_bulk_put);
|
||||
|
@ -1678,7 +1678,6 @@ struct aac_dev
|
||||
u32 handle_pci_error;
|
||||
bool init_reset;
|
||||
u8 soft_reset_support;
|
||||
u8 use_map_queue;
|
||||
};
|
||||
|
||||
#define aac_adapter_interrupt(dev) \
|
||||
|
@ -223,12 +223,8 @@ int aac_fib_setup(struct aac_dev * dev)
|
||||
struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
|
||||
{
|
||||
struct fib *fibptr;
|
||||
u32 blk_tag;
|
||||
int i;
|
||||
|
||||
blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
|
||||
i = blk_mq_unique_tag_to_tag(blk_tag);
|
||||
fibptr = &dev->fibs[i];
|
||||
fibptr = &dev->fibs[scsi_cmd_to_rq(scmd)->tag];
|
||||
/*
|
||||
* Null out fields that depend on being zero at the start of
|
||||
* each I/O
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
#include <linux/compat.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
@ -506,15 +505,6 @@ static int aac_slave_configure(struct scsi_device *sdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void aac_map_queues(struct Scsi_Host *shost)
|
||||
{
|
||||
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
|
||||
|
||||
blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
|
||||
aac->pdev, 0);
|
||||
aac->use_map_queue = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* aac_change_queue_depth - alter queue depths
|
||||
* @sdev: SCSI device we are considering
|
||||
@ -1499,7 +1489,6 @@ static struct scsi_host_template aac_driver_template = {
|
||||
.bios_param = aac_biosparm,
|
||||
.shost_groups = aac_host_groups,
|
||||
.slave_configure = aac_slave_configure,
|
||||
.map_queues = aac_map_queues,
|
||||
.change_queue_depth = aac_change_queue_depth,
|
||||
.sdev_groups = aac_dev_groups,
|
||||
.eh_abort_handler = aac_eh_abort,
|
||||
@ -1787,8 +1776,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
shost->max_lun = AAC_MAX_LUN;
|
||||
|
||||
pci_set_drvdata(pdev, shost);
|
||||
shost->nr_hw_queues = aac->max_msix;
|
||||
shost->host_tagset = 1;
|
||||
|
||||
error = scsi_add_host(shost, &pdev->dev);
|
||||
if (error)
|
||||
@ -1921,7 +1908,6 @@ static void aac_remove_one(struct pci_dev *pdev)
|
||||
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
|
||||
|
||||
aac_cancel_rescan_worker(aac);
|
||||
aac->use_map_queue = false;
|
||||
scsi_remove_host(shost);
|
||||
|
||||
__aac_shutdown(aac);
|
||||
|
@ -493,10 +493,6 @@ static int aac_src_deliver_message(struct fib *fib)
|
||||
#endif
|
||||
|
||||
u16 vector_no;
|
||||
struct scsi_cmnd *scmd;
|
||||
u32 blk_tag;
|
||||
struct Scsi_Host *shost = dev->scsi_host_ptr;
|
||||
struct blk_mq_queue_map *qmap;
|
||||
|
||||
atomic_inc(&q->numpending);
|
||||
|
||||
@ -509,25 +505,8 @@ static int aac_src_deliver_message(struct fib *fib)
|
||||
if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)
|
||||
&& dev->sa_firmware)
|
||||
vector_no = aac_get_vector(dev);
|
||||
else {
|
||||
if (!fib->vector_no || !fib->callback_data) {
|
||||
if (shost && dev->use_map_queue) {
|
||||
qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
|
||||
vector_no = qmap->mq_map[raw_smp_processor_id()];
|
||||
}
|
||||
/*
|
||||
* We hardcode the vector_no for
|
||||
* reserved commands as a valid shost is
|
||||
* absent during the init
|
||||
*/
|
||||
else
|
||||
vector_no = 0;
|
||||
} else {
|
||||
scmd = (struct scsi_cmnd *)fib->callback_data;
|
||||
blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
|
||||
vector_no = blk_mq_unique_tag_to_hwq(blk_tag);
|
||||
}
|
||||
}
|
||||
else
|
||||
vector_no = fib->vector_no;
|
||||
|
||||
if (native_hba) {
|
||||
if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) {
|
||||
|
@ -429,7 +429,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
struct fcoe_ctlr *ctlr;
|
||||
struct fcoe_rcv_info *fr;
|
||||
struct fcoe_percpu_s *bg;
|
||||
struct sk_buff *tmp_skb;
|
||||
|
||||
interface = container_of(ptype, struct bnx2fc_interface,
|
||||
fcoe_packet_type);
|
||||
@ -441,11 +440,9 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
goto err;
|
||||
}
|
||||
|
||||
tmp_skb = skb_share_check(skb, GFP_ATOMIC);
|
||||
if (!tmp_skb)
|
||||
goto err;
|
||||
|
||||
skb = tmp_skb;
|
||||
skb = skb_share_check(skb, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return -1;
|
||||
|
||||
if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
|
||||
printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n");
|
||||
|
@ -1118,6 +1118,7 @@ static enum scsi_disposition scsi_send_eh_cmnd(struct scsi_cmnd *scmd,
|
||||
|
||||
scsi_log_send(scmd);
|
||||
scmd->submitter = SUBMITTED_BY_SCSI_ERROR_HANDLER;
|
||||
scmd->flags |= SCMD_LAST;
|
||||
|
||||
/*
|
||||
* Lock sdev->state_mutex to avoid that scsi_device_quiesce() can
|
||||
@ -2412,6 +2413,7 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
|
||||
scsi_init_command(dev, scmd);
|
||||
|
||||
scmd->submitter = SUBMITTED_BY_SCSI_RESET_IOCTL;
|
||||
scmd->flags |= SCMD_LAST;
|
||||
memset(&scmd->sdb, 0, sizeof(scmd->sdb));
|
||||
|
||||
scmd->cmd_len = 0;
|
||||
|
@ -943,7 +943,7 @@ static void margining_port_remove(struct tb_port *port)
|
||||
snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
|
||||
parent = debugfs_lookup(dir_name, port->sw->debugfs_dir);
|
||||
if (parent)
|
||||
debugfs_remove_recursive(debugfs_lookup("margining", parent));
|
||||
debugfs_lookup_and_remove("margining", parent);
|
||||
|
||||
kfree(port->usb4->margining);
|
||||
port->usb4->margining = NULL;
|
||||
|
@ -1033,9 +1033,9 @@ static const struct usb_device_id id_table_combined[] = {
|
||||
{ USB_DEVICE(FTDI_VID, ACTISENSE_USG_PID) },
|
||||
{ USB_DEVICE(FTDI_VID, ACTISENSE_NGT_PID) },
|
||||
{ USB_DEVICE(FTDI_VID, ACTISENSE_NGW_PID) },
|
||||
{ USB_DEVICE(FTDI_VID, ACTISENSE_D9AC_PID) },
|
||||
{ USB_DEVICE(FTDI_VID, ACTISENSE_D9AD_PID) },
|
||||
{ USB_DEVICE(FTDI_VID, ACTISENSE_D9AE_PID) },
|
||||
{ USB_DEVICE(FTDI_VID, ACTISENSE_UID_PID) },
|
||||
{ USB_DEVICE(FTDI_VID, ACTISENSE_USA_PID) },
|
||||
{ USB_DEVICE(FTDI_VID, ACTISENSE_NGX_PID) },
|
||||
{ USB_DEVICE(FTDI_VID, ACTISENSE_D9AF_PID) },
|
||||
{ USB_DEVICE(FTDI_VID, CHETCO_SEAGAUGE_PID) },
|
||||
{ USB_DEVICE(FTDI_VID, CHETCO_SEASWITCH_PID) },
|
||||
|
@ -1568,9 +1568,9 @@
|
||||
#define ACTISENSE_USG_PID 0xD9A9 /* USG USB Serial Adapter */
|
||||
#define ACTISENSE_NGT_PID 0xD9AA /* NGT NMEA2000 Interface */
|
||||
#define ACTISENSE_NGW_PID 0xD9AB /* NGW NMEA2000 Gateway */
|
||||
#define ACTISENSE_D9AC_PID 0xD9AC /* Actisense Reserved */
|
||||
#define ACTISENSE_D9AD_PID 0xD9AD /* Actisense Reserved */
|
||||
#define ACTISENSE_D9AE_PID 0xD9AE /* Actisense Reserved */
|
||||
#define ACTISENSE_UID_PID 0xD9AC /* USB Isolating Device */
|
||||
#define ACTISENSE_USA_PID 0xD9AD /* USB to Serial Adapter */
|
||||
#define ACTISENSE_NGX_PID 0xD9AE /* NGX NMEA2000 Gateway */
|
||||
#define ACTISENSE_D9AF_PID 0xD9AF /* Actisense Reserved */
|
||||
#define CHETCO_SEAGAUGE_PID 0xA548 /* SeaGauge USB Adapter */
|
||||
#define CHETCO_SEASWITCH_PID 0xA549 /* SeaSwitch USB Adapter */
|
||||
|
@ -272,6 +272,7 @@ static void option_instat_callback(struct urb *urb);
|
||||
#define QUECTEL_PRODUCT_RM500Q 0x0800
|
||||
#define QUECTEL_PRODUCT_RM520N 0x0801
|
||||
#define QUECTEL_PRODUCT_EC200U 0x0901
|
||||
#define QUECTEL_PRODUCT_EG912Y 0x6001
|
||||
#define QUECTEL_PRODUCT_EC200S_CN 0x6002
|
||||
#define QUECTEL_PRODUCT_EC200A 0x6005
|
||||
#define QUECTEL_PRODUCT_EM061K_LWW 0x6008
|
||||
@ -1232,6 +1233,7 @@ static const struct usb_device_id option_ids[] = {
|
||||
{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0700, 0xff), /* BG95 */
|
||||
.driver_info = RSVD(3) | ZLP },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0x40) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
|
||||
.driver_info = ZLP },
|
||||
@ -1244,6 +1246,7 @@ static const struct usb_device_id option_ids[] = {
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG912Y, 0xff, 0, 0) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
|
||||
|
||||
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
|
||||
@ -2242,6 +2245,8 @@ static const struct usb_device_id option_ids[] = {
|
||||
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
|
||||
{ USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */
|
||||
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0da, 0xff), /* Foxconn T99W265 MBIM variant */
|
||||
.driver_info = RSVD(3) | RSVD(5) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0db, 0xff), /* Foxconn T99W265 MBIM */
|
||||
.driver_info = RSVD(3) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0ee, 0xff), /* Foxconn T99W368 MBIM */
|
||||
|
@ -1305,6 +1305,17 @@ UNUSUAL_DEV( 0x090c, 0x6000, 0x0100, 0x0100,
|
||||
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
|
||||
US_FL_INITIAL_READ10 ),
|
||||
|
||||
/*
|
||||
* Patch by Tasos Sahanidis <tasos@tasossah.com>
|
||||
* This flash drive always shows up with write protect enabled
|
||||
* during the first mode sense.
|
||||
*/
|
||||
UNUSUAL_DEV(0x0951, 0x1697, 0x0100, 0x0100,
|
||||
"Kingston",
|
||||
"DT Ultimate G3",
|
||||
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
|
||||
US_FL_NO_WP_DETECT),
|
||||
|
||||
/*
|
||||
* This Pentax still camera is not conformant
|
||||
* to the USB storage specification: -
|
||||
|
@ -409,10 +409,12 @@ static int afs_update_cell(struct afs_cell *cell)
|
||||
if (ret == -ENOMEM)
|
||||
goto out_wake;
|
||||
|
||||
ret = -ENOMEM;
|
||||
vllist = afs_alloc_vlserver_list(0);
|
||||
if (!vllist)
|
||||
if (!vllist) {
|
||||
if (ret >= 0)
|
||||
ret = -ENOMEM;
|
||||
goto out_wake;
|
||||
}
|
||||
|
||||
switch (ret) {
|
||||
case -ENODATA:
|
||||
|
@ -114,6 +114,7 @@ static int afs_probe_cell_name(struct dentry *dentry)
|
||||
struct afs_net *net = afs_d2net(dentry);
|
||||
const char *name = dentry->d_name.name;
|
||||
size_t len = dentry->d_name.len;
|
||||
char *result = NULL;
|
||||
int ret;
|
||||
|
||||
/* Names prefixed with a dot are R/W mounts. */
|
||||
@ -131,9 +132,22 @@ static int afs_probe_cell_name(struct dentry *dentry)
|
||||
}
|
||||
|
||||
ret = dns_query(net->net, "afsdb", name, len, "srv=1",
|
||||
NULL, NULL, false);
|
||||
if (ret == -ENODATA || ret == -ENOKEY)
|
||||
&result, NULL, false);
|
||||
if (ret == -ENODATA || ret == -ENOKEY || ret == 0)
|
||||
ret = -ENOENT;
|
||||
if (ret > 0 && ret >= sizeof(struct dns_server_list_v1_header)) {
|
||||
struct dns_server_list_v1_header *v1 = (void *)result;
|
||||
|
||||
if (v1->hdr.zero == 0 &&
|
||||
v1->hdr.content == DNS_PAYLOAD_IS_SERVER_LIST &&
|
||||
v1->hdr.version == 1 &&
|
||||
(v1->status != DNS_LOOKUP_GOOD &&
|
||||
v1->status != DNS_LOOKUP_GOOD_WITH_BAD))
|
||||
return -ENOENT;
|
||||
|
||||
}
|
||||
|
||||
kfree(result);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -252,20 +266,9 @@ static int afs_dynroot_d_revalidate(struct dentry *dentry, unsigned int flags)
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allow the VFS to enquire as to whether a dentry should be unhashed (mustn't
|
||||
* sleep)
|
||||
* - called from dput() when d_count is going to 0.
|
||||
* - return 1 to request dentry be unhashed, 0 otherwise
|
||||
*/
|
||||
static int afs_dynroot_d_delete(const struct dentry *dentry)
|
||||
{
|
||||
return d_really_is_positive(dentry);
|
||||
}
|
||||
|
||||
const struct dentry_operations afs_dynroot_dentry_operations = {
|
||||
.d_revalidate = afs_dynroot_d_revalidate,
|
||||
.d_delete = afs_dynroot_d_delete,
|
||||
.d_delete = always_delete_dentry,
|
||||
.d_release = afs_d_release,
|
||||
.d_automount = afs_d_automount,
|
||||
};
|
||||
|
@ -585,6 +585,7 @@ struct afs_volume {
|
||||
#define AFS_VOLUME_OFFLINE 4 /* - T if volume offline notice given */
|
||||
#define AFS_VOLUME_BUSY 5 /* - T if volume busy notice given */
|
||||
#define AFS_VOLUME_MAYBE_NO_IBULK 6 /* - T if some servers don't have InlineBulkStatus */
|
||||
#define AFS_VOLUME_RM_TREE 7 /* - Set if volume removed from cell->volumes */
|
||||
#ifdef CONFIG_AFS_FSCACHE
|
||||
struct fscache_volume *cache; /* Caching cookie */
|
||||
#endif
|
||||
@ -1517,6 +1518,7 @@ extern struct afs_vlserver_list *afs_extract_vlserver_list(struct afs_cell *,
|
||||
extern struct afs_volume *afs_create_volume(struct afs_fs_context *);
|
||||
extern int afs_activate_volume(struct afs_volume *);
|
||||
extern void afs_deactivate_volume(struct afs_volume *);
|
||||
bool afs_try_get_volume(struct afs_volume *volume, enum afs_volume_trace reason);
|
||||
extern struct afs_volume *afs_get_volume(struct afs_volume *, enum afs_volume_trace);
|
||||
extern void afs_put_volume(struct afs_net *, struct afs_volume *, enum afs_volume_trace);
|
||||
extern int afs_check_volume_status(struct afs_volume *, struct afs_operation *);
|
||||
|
@ -32,8 +32,13 @@ static struct afs_volume *afs_insert_volume_into_cell(struct afs_cell *cell,
|
||||
} else if (p->vid > volume->vid) {
|
||||
pp = &(*pp)->rb_right;
|
||||
} else {
|
||||
volume = afs_get_volume(p, afs_volume_trace_get_cell_insert);
|
||||
goto found;
|
||||
if (afs_try_get_volume(p, afs_volume_trace_get_cell_insert)) {
|
||||
volume = p;
|
||||
goto found;
|
||||
}
|
||||
|
||||
set_bit(AFS_VOLUME_RM_TREE, &volume->flags);
|
||||
rb_replace_node_rcu(&p->cell_node, &volume->cell_node, &cell->volumes);
|
||||
}
|
||||
}
|
||||
|
||||
@ -56,7 +61,8 @@ static void afs_remove_volume_from_cell(struct afs_volume *volume)
|
||||
afs_volume_trace_remove);
|
||||
write_seqlock(&cell->volume_lock);
|
||||
hlist_del_rcu(&volume->proc_link);
|
||||
rb_erase(&volume->cell_node, &cell->volumes);
|
||||
if (!test_and_set_bit(AFS_VOLUME_RM_TREE, &volume->flags))
|
||||
rb_erase(&volume->cell_node, &cell->volumes);
|
||||
write_sequnlock(&cell->volume_lock);
|
||||
}
|
||||
}
|
||||
@ -235,6 +241,20 @@ static void afs_destroy_volume(struct afs_net *net, struct afs_volume *volume)
|
||||
_leave(" [destroyed]");
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to get a reference on a volume record.
|
||||
*/
|
||||
bool afs_try_get_volume(struct afs_volume *volume, enum afs_volume_trace reason)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (__refcount_inc_not_zero(&volume->ref, &r)) {
|
||||
trace_afs_volume(volume->vid, r + 1, reason);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get a reference on a volume record.
|
||||
*/
|
||||
|
@ -2685,13 +2685,8 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
|
||||
bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold);
|
||||
|
||||
spin_lock(&ctl->tree_lock);
|
||||
/* Count initial region as zone_unusable until it gets activated. */
|
||||
if (!used)
|
||||
to_free = size;
|
||||
else if (initial &&
|
||||
test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &block_group->fs_info->flags) &&
|
||||
(block_group->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)))
|
||||
to_free = 0;
|
||||
else if (initial)
|
||||
to_free = block_group->zone_capacity;
|
||||
else if (offset >= block_group->alloc_offset)
|
||||
@ -2719,8 +2714,7 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
|
||||
reclaimable_unusable = block_group->zone_unusable -
|
||||
(block_group->length - block_group->zone_capacity);
|
||||
/* All the region is now unusable. Mark it as unused and reclaim */
|
||||
if (block_group->zone_unusable == block_group->length &&
|
||||
block_group->alloc_offset) {
|
||||
if (block_group->zone_unusable == block_group->length) {
|
||||
btrfs_mark_bg_unused(block_group);
|
||||
} else if (bg_reclaim_threshold &&
|
||||
reclaimable_unusable >=
|
||||
|
@ -1574,19 +1574,9 @@ void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
|
||||
return;
|
||||
|
||||
WARN_ON(cache->bytes_super != 0);
|
||||
|
||||
/* Check for block groups never get activated */
|
||||
if (test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &cache->fs_info->flags) &&
|
||||
cache->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM) &&
|
||||
!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags) &&
|
||||
cache->alloc_offset == 0) {
|
||||
unusable = cache->length;
|
||||
free = 0;
|
||||
} else {
|
||||
unusable = (cache->alloc_offset - cache->used) +
|
||||
(cache->length - cache->zone_capacity);
|
||||
free = cache->zone_capacity - cache->alloc_offset;
|
||||
}
|
||||
unusable = (cache->alloc_offset - cache->used) +
|
||||
(cache->length - cache->zone_capacity);
|
||||
free = cache->zone_capacity - cache->alloc_offset;
|
||||
|
||||
/* We only need ->free_space in ALLOC_SEQ block groups */
|
||||
cache->cached = BTRFS_CACHE_FINISHED;
|
||||
@ -1882,7 +1872,6 @@ struct btrfs_device *btrfs_zoned_get_device(struct btrfs_fs_info *fs_info,
|
||||
bool btrfs_zone_activate(struct btrfs_block_group *block_group)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = block_group->fs_info;
|
||||
struct btrfs_space_info *space_info = block_group->space_info;
|
||||
struct map_lookup *map;
|
||||
struct btrfs_device *device;
|
||||
u64 physical;
|
||||
@ -1894,7 +1883,6 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
|
||||
|
||||
map = block_group->physical_map;
|
||||
|
||||
spin_lock(&space_info->lock);
|
||||
spin_lock(&block_group->lock);
|
||||
if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
|
||||
ret = true;
|
||||
@ -1923,14 +1911,7 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
|
||||
|
||||
/* Successfully activated all the zones */
|
||||
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
|
||||
WARN_ON(block_group->alloc_offset != 0);
|
||||
if (block_group->zone_unusable == block_group->length) {
|
||||
block_group->zone_unusable = block_group->length - block_group->zone_capacity;
|
||||
space_info->bytes_zone_unusable -= block_group->zone_capacity;
|
||||
}
|
||||
spin_unlock(&block_group->lock);
|
||||
btrfs_try_granting_tickets(fs_info, space_info);
|
||||
spin_unlock(&space_info->lock);
|
||||
|
||||
/* For the active block group list */
|
||||
btrfs_get_block_group(block_group);
|
||||
@ -1943,7 +1924,6 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&block_group->lock);
|
||||
spin_unlock(&space_info->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -107,6 +107,19 @@ static inline void get_fuse_backing_path(const struct dentry *d,
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Submount lookup tracking */
|
||||
struct fuse_submount_lookup {
|
||||
/** Refcount */
|
||||
refcount_t count;
|
||||
|
||||
/** Unique ID, which identifies the inode between userspace
|
||||
* and kernel */
|
||||
u64 nodeid;
|
||||
|
||||
/** The request used for sending the FORGET message */
|
||||
struct fuse_forget_link *forget;
|
||||
};
|
||||
|
||||
/** FUSE inode */
|
||||
struct fuse_inode {
|
||||
/** Inode data */
|
||||
@ -213,6 +226,8 @@ struct fuse_inode {
|
||||
*/
|
||||
struct fuse_inode_dax *dax;
|
||||
#endif
|
||||
/** Submount specific lookup tracking */
|
||||
struct fuse_submount_lookup *submount_lookup;
|
||||
};
|
||||
|
||||
/** FUSE inode state bits */
|
||||
|
@ -68,6 +68,24 @@ struct fuse_forget_link *fuse_alloc_forget(void)
|
||||
return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL_ACCOUNT);
|
||||
}
|
||||
|
||||
static struct fuse_submount_lookup *fuse_alloc_submount_lookup(void)
|
||||
{
|
||||
struct fuse_submount_lookup *sl;
|
||||
|
||||
sl = kzalloc(sizeof(struct fuse_submount_lookup), GFP_KERNEL_ACCOUNT);
|
||||
if (!sl)
|
||||
return NULL;
|
||||
sl->forget = fuse_alloc_forget();
|
||||
if (!sl->forget)
|
||||
goto out_free;
|
||||
|
||||
return sl;
|
||||
|
||||
out_free:
|
||||
kfree(sl);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct inode *fuse_alloc_inode(struct super_block *sb)
|
||||
{
|
||||
struct fuse_inode *fi;
|
||||
@ -87,6 +105,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
|
||||
fi->attr_version = 0;
|
||||
fi->orig_ino = 0;
|
||||
fi->state = 0;
|
||||
fi->submount_lookup = NULL;
|
||||
mutex_init(&fi->mutex);
|
||||
spin_lock_init(&fi->lock);
|
||||
fi->forget = fuse_alloc_forget();
|
||||
@ -121,6 +140,17 @@ static void fuse_free_inode(struct inode *inode)
|
||||
kmem_cache_free(fuse_inode_cachep, fi);
|
||||
}
|
||||
|
||||
static void fuse_cleanup_submount_lookup(struct fuse_conn *fc,
|
||||
struct fuse_submount_lookup *sl)
|
||||
{
|
||||
if (!refcount_dec_and_test(&sl->count))
|
||||
return;
|
||||
|
||||
fuse_queue_forget(fc, sl->forget, sl->nodeid, 1);
|
||||
sl->forget = NULL;
|
||||
kfree(sl);
|
||||
}
|
||||
|
||||
static void fuse_evict_inode(struct inode *inode)
|
||||
{
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
@ -139,6 +169,11 @@ static void fuse_evict_inode(struct inode *inode)
|
||||
fi->nlookup);
|
||||
fi->forget = NULL;
|
||||
}
|
||||
|
||||
if (fi->submount_lookup) {
|
||||
fuse_cleanup_submount_lookup(fc, fi->submount_lookup);
|
||||
fi->submount_lookup = NULL;
|
||||
}
|
||||
}
|
||||
if (S_ISREG(inode->i_mode) && !fuse_is_bad(inode)) {
|
||||
WARN_ON(!list_empty(&fi->write_files));
|
||||
@ -349,6 +384,13 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
|
||||
fuse_dax_dontcache(inode, attr->flags);
|
||||
}
|
||||
|
||||
static void fuse_init_submount_lookup(struct fuse_submount_lookup *sl,
|
||||
u64 nodeid)
|
||||
{
|
||||
sl->nodeid = nodeid;
|
||||
refcount_set(&sl->count, 1);
|
||||
}
|
||||
|
||||
static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
|
||||
{
|
||||
inode->i_mode = attr->mode & S_IFMT;
|
||||
@ -485,12 +527,22 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
|
||||
*/
|
||||
if (fc->auto_submounts && (attr->flags & FUSE_ATTR_SUBMOUNT) &&
|
||||
S_ISDIR(attr->mode)) {
|
||||
struct fuse_inode *fi;
|
||||
|
||||
inode = new_inode(sb);
|
||||
if (!inode)
|
||||
return NULL;
|
||||
|
||||
fuse_init_inode(inode, attr);
|
||||
get_fuse_inode(inode)->nodeid = nodeid;
|
||||
fi = get_fuse_inode(inode);
|
||||
fi->nodeid = nodeid;
|
||||
fi->submount_lookup = fuse_alloc_submount_lookup();
|
||||
if (!fi->submount_lookup) {
|
||||
iput(inode);
|
||||
return NULL;
|
||||
}
|
||||
/* Sets nlookup = 1 on fi->submount_lookup->nlookup */
|
||||
fuse_init_submount_lookup(fi->submount_lookup, nodeid);
|
||||
inode->i_flags |= S_AUTOMOUNT;
|
||||
goto done;
|
||||
}
|
||||
@ -513,11 +565,11 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
|
||||
iput(inode);
|
||||
goto retry;
|
||||
}
|
||||
done:
|
||||
fi = get_fuse_inode(inode);
|
||||
spin_lock(&fi->lock);
|
||||
fi->nlookup++;
|
||||
spin_unlock(&fi->lock);
|
||||
done:
|
||||
fuse_change_attributes(inode, attr, attr_valid, attr_version);
|
||||
|
||||
return inode;
|
||||
@ -1610,6 +1662,8 @@ static int fuse_fill_super_submount(struct super_block *sb,
|
||||
struct super_block *parent_sb = parent_fi->inode.i_sb;
|
||||
struct fuse_attr root_attr;
|
||||
struct inode *root;
|
||||
struct fuse_submount_lookup *sl;
|
||||
struct fuse_inode *fi;
|
||||
|
||||
fuse_sb_defaults(sb);
|
||||
fm->sb = sb;
|
||||
@ -1632,12 +1686,27 @@ static int fuse_fill_super_submount(struct super_block *sb,
|
||||
* its nlookup should not be incremented. fuse_iget() does
|
||||
* that, though, so undo it here.
|
||||
*/
|
||||
get_fuse_inode(root)->nlookup--;
|
||||
fi = get_fuse_inode(root);
|
||||
fi->nlookup--;
|
||||
|
||||
sb->s_d_op = &fuse_dentry_operations;
|
||||
sb->s_root = d_make_root(root);
|
||||
if (!sb->s_root)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Grab the parent's submount_lookup pointer and take a
|
||||
* reference on the shared nlookup from the parent. This is to
|
||||
* prevent the last forget for this nodeid from getting
|
||||
* triggered until all users have finished with it.
|
||||
*/
|
||||
sl = parent_fi->submount_lookup;
|
||||
WARN_ON(!sl);
|
||||
if (sl) {
|
||||
refcount_inc(&sl->count);
|
||||
fi->submount_lookup = sl;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -38,11 +38,13 @@ void cifs_dump_detail(void *buf, struct TCP_Server_Info *server)
|
||||
#ifdef CONFIG_CIFS_DEBUG2
|
||||
struct smb_hdr *smb = buf;
|
||||
|
||||
cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d\n",
|
||||
smb->Command, smb->Status.CifsError,
|
||||
smb->Flags, smb->Flags2, smb->Mid, smb->Pid);
|
||||
cifs_dbg(VFS, "smb buf %p len %u\n", smb,
|
||||
server->ops->calc_smb_size(smb));
|
||||
cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d Wct: %d\n",
|
||||
smb->Command, smb->Status.CifsError, smb->Flags,
|
||||
smb->Flags2, smb->Mid, smb->Pid, smb->WordCount);
|
||||
if (!server->ops->check_message(buf, server->total_read, server)) {
|
||||
cifs_dbg(VFS, "smb buf %p len %u\n", smb,
|
||||
server->ops->calc_smb_size(smb));
|
||||
}
|
||||
#endif /* CONFIG_CIFS_DEBUG2 */
|
||||
}
|
||||
|
||||
|
@ -513,7 +513,8 @@ struct smb_version_operations {
|
||||
struct mid_q_entry **, char **, int *);
|
||||
enum securityEnum (*select_sectype)(struct TCP_Server_Info *,
|
||||
enum securityEnum);
|
||||
int (*next_header)(char *);
|
||||
int (*next_header)(struct TCP_Server_Info *server, char *buf,
|
||||
unsigned int *noff);
|
||||
/* ioctl passthrough for query_info */
|
||||
int (*ioctl_query_info)(const unsigned int xid,
|
||||
struct cifs_tcon *tcon,
|
||||
|
@ -1225,7 +1225,12 @@ cifs_demultiplex_thread(void *p)
|
||||
server->total_read += length;
|
||||
|
||||
if (server->ops->next_header) {
|
||||
next_offset = server->ops->next_header(buf);
|
||||
if (server->ops->next_header(server, buf, &next_offset)) {
|
||||
cifs_dbg(VFS, "%s: malformed response (next_offset=%u)\n",
|
||||
__func__, next_offset);
|
||||
cifs_reconnect(server, true);
|
||||
continue;
|
||||
}
|
||||
if (next_offset)
|
||||
server->pdu_size = next_offset;
|
||||
}
|
||||
|
@ -350,6 +350,10 @@ checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
|
||||
cifs_dbg(VFS, "Length less than smb header size\n");
|
||||
}
|
||||
return -EIO;
|
||||
} else if (total_read < sizeof(*smb) + 2 * smb->WordCount) {
|
||||
cifs_dbg(VFS, "%s: can't read BCC due to invalid WordCount(%u)\n",
|
||||
__func__, smb->WordCount);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* otherwise, there is enough to get to the BCC */
|
||||
|
@ -5196,17 +5196,22 @@ smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
|
||||
NULL, 0, 0, false);
|
||||
}
|
||||
|
||||
static int
|
||||
smb2_next_header(char *buf)
|
||||
static int smb2_next_header(struct TCP_Server_Info *server, char *buf,
|
||||
unsigned int *noff)
|
||||
{
|
||||
struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
|
||||
struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf;
|
||||
|
||||
if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM)
|
||||
return sizeof(struct smb2_transform_hdr) +
|
||||
le32_to_cpu(t_hdr->OriginalMessageSize);
|
||||
|
||||
return le32_to_cpu(hdr->NextCommand);
|
||||
if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
|
||||
*noff = le32_to_cpu(t_hdr->OriginalMessageSize);
|
||||
if (unlikely(check_add_overflow(*noff, sizeof(*t_hdr), noff)))
|
||||
return -EINVAL;
|
||||
} else {
|
||||
*noff = le32_to_cpu(hdr->NextCommand);
|
||||
}
|
||||
if (unlikely(*noff && *noff < MID_HEADER_SIZE(server)))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -372,10 +372,15 @@ static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
|
||||
void **request_buf, unsigned int *total_len)
|
||||
{
|
||||
/* BB eventually switch this to SMB2 specific small buf size */
|
||||
if (smb2_command == SMB2_SET_INFO)
|
||||
switch (smb2_command) {
|
||||
case SMB2_SET_INFO:
|
||||
case SMB2_QUERY_INFO:
|
||||
*request_buf = cifs_buf_get();
|
||||
else
|
||||
break;
|
||||
default:
|
||||
*request_buf = cifs_small_buf_get();
|
||||
break;
|
||||
}
|
||||
if (*request_buf == NULL) {
|
||||
/* BB should we add a retry in here if not a writepage? */
|
||||
return -ENOMEM;
|
||||
@ -3523,8 +3528,13 @@ SMB2_query_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
|
||||
struct smb2_query_info_req *req;
|
||||
struct kvec *iov = rqst->rq_iov;
|
||||
unsigned int total_len;
|
||||
size_t len;
|
||||
int rc;
|
||||
|
||||
if (unlikely(check_add_overflow(input_len, sizeof(*req), &len) ||
|
||||
len > CIFSMaxBufSize))
|
||||
return -EINVAL;
|
||||
|
||||
rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
|
||||
(void **) &req, &total_len);
|
||||
if (rc)
|
||||
@ -3546,7 +3556,7 @@ SMB2_query_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
|
||||
|
||||
iov[0].iov_base = (char *)req;
|
||||
/* 1 for Buffer */
|
||||
iov[0].iov_len = total_len - 1 + input_len;
|
||||
iov[0].iov_len = len;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3554,7 +3564,7 @@ void
|
||||
SMB2_query_info_free(struct smb_rqst *rqst)
|
||||
{
|
||||
if (rqst && rqst->rq_iov)
|
||||
cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
|
||||
cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */
|
||||
}
|
||||
|
||||
static int
|
||||
@ -5439,6 +5449,11 @@ build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void free_qfs_info_req(struct kvec *iov)
|
||||
{
|
||||
cifs_buf_release(iov->iov_base);
|
||||
}
|
||||
|
||||
int
|
||||
SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
|
||||
u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
|
||||
@ -5470,7 +5485,7 @@ SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
|
||||
|
||||
rc = cifs_send_recv(xid, ses, server,
|
||||
&rqst, &resp_buftype, flags, &rsp_iov);
|
||||
cifs_small_buf_release(iov.iov_base);
|
||||
free_qfs_info_req(&iov);
|
||||
if (rc) {
|
||||
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
|
||||
goto posix_qfsinf_exit;
|
||||
@ -5521,7 +5536,7 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
|
||||
|
||||
rc = cifs_send_recv(xid, ses, server,
|
||||
&rqst, &resp_buftype, flags, &rsp_iov);
|
||||
cifs_small_buf_release(iov.iov_base);
|
||||
free_qfs_info_req(&iov);
|
||||
if (rc) {
|
||||
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
|
||||
goto qfsinf_exit;
|
||||
@ -5588,7 +5603,7 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
|
||||
|
||||
rc = cifs_send_recv(xid, ses, server,
|
||||
&rqst, &resp_buftype, flags, &rsp_iov);
|
||||
cifs_small_buf_release(iov.iov_base);
|
||||
free_qfs_info_req(&iov);
|
||||
if (rc) {
|
||||
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
|
||||
goto qfsattr_exit;
|
||||
|
@ -65,6 +65,7 @@ static void do_insert_old_idx(struct ubifs_info *c,
|
||||
else {
|
||||
ubifs_err(c, "old idx added twice!");
|
||||
kfree(old_idx);
|
||||
return;
|
||||
}
|
||||
}
|
||||
rb_link_node(&old_idx->rb, parent, p);
|
||||
|
@ -8,6 +8,7 @@
|
||||
#ifndef _DAMON_H_
|
||||
#define _DAMON_H_
|
||||
|
||||
#include <linux/completion.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/time64.h>
|
||||
#include <linux/types.h>
|
||||
@ -457,6 +458,8 @@ struct damon_ctx {
|
||||
/* private: internal use only */
|
||||
struct timespec64 last_aggregation;
|
||||
struct timespec64 last_ops_update;
|
||||
/* for waiting until the execution of the kdamond_fn is started */
|
||||
struct completion kdamond_started;
|
||||
|
||||
/* public: */
|
||||
struct task_struct *kdamond;
|
||||
|
@ -37,6 +37,8 @@ dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
|
||||
*/
|
||||
void dm_bufio_client_destroy(struct dm_bufio_client *c);
|
||||
|
||||
void dm_bufio_client_reset(struct dm_bufio_client *c);
|
||||
|
||||
/*
|
||||
* Set the sector range.
|
||||
* When this function is called, there must be no I/O in progress on the bufio
|
||||
|
@ -475,10 +475,10 @@ static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
|
||||
|
||||
#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
||||
void kasan_non_canonical_hook(unsigned long addr);
|
||||
#else /* CONFIG_KASAN */
|
||||
#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
|
||||
static inline void kasan_non_canonical_hook(unsigned long addr) { }
|
||||
#endif /* CONFIG_KASAN */
|
||||
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
|
||||
|
||||
#endif /* LINUX_KASAN_H */
|
||||
|
@ -74,6 +74,7 @@ struct key_type {
|
||||
|
||||
unsigned int flags;
|
||||
#define KEY_TYPE_NET_DOMAIN 0x00000001 /* Keys of this type have a net namespace domain */
|
||||
#define KEY_TYPE_INSTANT_REAP 0x00000002 /* Keys of this type don't have a delay after expiring */
|
||||
|
||||
/* vet a description */
|
||||
int (*vet_description)(const char *description);
|
||||
|
@ -282,18 +282,23 @@ struct mlx5_cmd_stats {
|
||||
struct mlx5_cmd {
|
||||
struct mlx5_nb nb;
|
||||
|
||||
/* members which needs to be queried or reinitialized each reload */
|
||||
struct {
|
||||
u16 cmdif_rev;
|
||||
u8 log_sz;
|
||||
u8 log_stride;
|
||||
int max_reg_cmds;
|
||||
unsigned long bitmask;
|
||||
struct semaphore sem;
|
||||
struct semaphore pages_sem;
|
||||
struct semaphore throttle_sem;
|
||||
} vars;
|
||||
enum mlx5_cmdif_state state;
|
||||
void *cmd_alloc_buf;
|
||||
dma_addr_t alloc_dma;
|
||||
int alloc_size;
|
||||
void *cmd_buf;
|
||||
dma_addr_t dma;
|
||||
u16 cmdif_rev;
|
||||
u8 log_sz;
|
||||
u8 log_stride;
|
||||
int max_reg_cmds;
|
||||
int events;
|
||||
u32 __iomem *vector;
|
||||
|
||||
/* protect command queue allocations
|
||||
*/
|
||||
@ -303,11 +308,8 @@ struct mlx5_cmd {
|
||||
*/
|
||||
spinlock_t token_lock;
|
||||
u8 token;
|
||||
unsigned long bitmask;
|
||||
char wq_name[MLX5_CMD_WQ_MAX_NAME];
|
||||
struct workqueue_struct *wq;
|
||||
struct semaphore sem;
|
||||
struct semaphore pages_sem;
|
||||
int mode;
|
||||
u16 allowed_opcode;
|
||||
struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
|
||||
|
@ -188,6 +188,7 @@ struct blocked_key {
|
||||
struct smp_csrk {
|
||||
bdaddr_t bdaddr;
|
||||
u8 bdaddr_type;
|
||||
u8 link_type;
|
||||
u8 type;
|
||||
u8 val[16];
|
||||
};
|
||||
@ -197,6 +198,7 @@ struct smp_ltk {
|
||||
struct rcu_head rcu;
|
||||
bdaddr_t bdaddr;
|
||||
u8 bdaddr_type;
|
||||
u8 link_type;
|
||||
u8 authenticated;
|
||||
u8 type;
|
||||
u8 enc_size;
|
||||
@ -211,6 +213,7 @@ struct smp_irk {
|
||||
bdaddr_t rpa;
|
||||
bdaddr_t bdaddr;
|
||||
u8 addr_type;
|
||||
u8 link_type;
|
||||
u8 val[16];
|
||||
};
|
||||
|
||||
@ -218,6 +221,8 @@ struct link_key {
|
||||
struct list_head list;
|
||||
struct rcu_head rcu;
|
||||
bdaddr_t bdaddr;
|
||||
u8 bdaddr_type;
|
||||
u8 link_type;
|
||||
u8 type;
|
||||
u8 val[HCI_LINK_KEY_SIZE];
|
||||
u8 pin_len;
|
||||
|
@ -178,18 +178,21 @@ TRACE_EVENT(9p_protocol_dump,
|
||||
__field( void *, clnt )
|
||||
__field( __u8, type )
|
||||
__field( __u16, tag )
|
||||
__array( unsigned char, line, P9_PROTO_DUMP_SZ )
|
||||
__dynamic_array(unsigned char, line,
|
||||
min_t(size_t, pdu->capacity, P9_PROTO_DUMP_SZ))
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->clnt = clnt;
|
||||
__entry->type = pdu->id;
|
||||
__entry->tag = pdu->tag;
|
||||
memcpy(__entry->line, pdu->sdata, P9_PROTO_DUMP_SZ);
|
||||
memcpy(__get_dynamic_array(line), pdu->sdata,
|
||||
__get_dynamic_array_len(line));
|
||||
),
|
||||
TP_printk("clnt %lu %s(tag = %d)\n%.3x: %16ph\n%.3x: %16ph\n",
|
||||
TP_printk("clnt %lu %s(tag = %d)\n%*ph\n",
|
||||
(unsigned long)__entry->clnt, show_9p_op(__entry->type),
|
||||
__entry->tag, 0, __entry->line, 16, __entry->line + 16)
|
||||
__entry->tag, __get_dynamic_array_len(line),
|
||||
__get_dynamic_array(line))
|
||||
);
|
||||
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user