This is the 5.4.263 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmVyyWIACgkQONu9yGCS aT6Y8A//QJPg7pguCawsJGrem3a5dvhi9scNMmfuhKZOKS73JEmt4yudB9IOUjIX 1c1aBcJo5yYMZq5L9mhXnlgkgqENxE9fI45FtMdwoKiriEQ0w9OBLlfZuKN9lwzC tyIigaGE5DD3SqL8e/04LNmMPPdolM38lJ368fYaD3T4d7LfwK0qHJFL8dSg4OFQ VaePViMFgbodjtSXoERNjVLaNtSlQDQytiWHMiQX2uf6CIIRbm+zFHn2Se1mUgh3 WGT9JfXZ+achPw6OLhSIjwL+7vowhn3eRETq4zGkkNSK+rmB6W7zjPhou4SYsmc+ FAYXvalmhQWWjlmIyZzO7GIVtgx19VuEYB8h5KLvp6DXQ0h0wCBOGgsfIT4icbgW wO0R+toWYY3Y79OLRGiMjiL9b60njJYnrm7JrheRD+BIm2jva+Tb7UxhC6QDMfH6 a8fya8iJDNZWggwpx67JUANdMO8e+2rS4ttNxW0gTZSHhyEjo1HXctKBEmmtXk4s HGNV5xUniPnzrP8rduNqePG5B6c3wqOHUwj45L4scGmeC0DzW7E8EBgkHfRcU6CG ik9z5nQeDikREfK7cp8OSFtLaEBWSIX57XwHWDTMVPDGTN8EQ6eI7vTnQH3xOhA8 VWFfwcU6avROM/ih7eJ+X4JvuDKcAGTPeD6oF3II0MLPK2m7ZmE= =p/ty -----END PGP SIGNATURE----- Merge 5.4.263 into android11-5.4-lts Changes in 5.4.263 driver core: Release all resources during unbind before updating device links RDMA/irdma: Prevent zero-length STAG registration PCI: keystone: Drop __init from ks_pcie_add_pcie_{ep,port}() afs: Make error on cell lookup failure consistent with OpenAFS drm/panel: simple: Fix Innolux G101ICE-L01 bus flags drm/panel: simple: Fix Innolux G101ICE-L01 timings ata: pata_isapnp: Add missing error check for devm_ioport_map() drm/rockchip: vop: Fix color for RGB888/BGR888 format on VOP full HID: core: store the unique system identifier in hid_device HID: fix HID device resource race between HID core and debugging support ipv4: Correct/silence an endian warning in __ip_do_redirect net: usb: ax88179_178a: fix failed operations during ax88179_reset arm/xen: fix xen_vcpu_info allocation alignment amd-xgbe: handle corner-case during sfp hotplug amd-xgbe: handle the corner-case during tx completion amd-xgbe: propagate the correct speed and duplex status net: axienet: Fix check for partial TX checksum afs: Return ENOENT if no cell DNS record can be found afs: Fix file locking on R/O volumes to operate in local mode nvmet: remove unnecessary ctrl parameter nvmet: nul-terminate the NQNs passed in the connect command MIPS: KVM: Fix a build warning about variable set but not used ext4: add a new helper to check if es must be kept ext4: factor out __es_alloc_extent() and __es_free_extent() ext4: use pre-allocated es in __es_insert_extent() ext4: use pre-allocated es in __es_remove_extent() ext4: using nofail preallocation in ext4_es_remove_extent() ext4: using nofail preallocation in ext4_es_insert_delayed_block() ext4: using nofail preallocation in ext4_es_insert_extent() ext4: fix slab-use-after-free in ext4_es_insert_extent() ext4: make sure allocate pending entry not fail arm64: cpufeature: Extract capped perfmon fields KVM: arm64: limit PMU version to PMUv3 for ARMv8.1 ACPI: resource: Skip IRQ override on ASUS ExpertBook B1402CVA bcache: replace a mistaken IS_ERR() by IS_ERR_OR_NULL() in btree_gc_coalesce() s390/dasd: protect device queue against concurrent access USB: serial: option: add Luat Air72*U series products hv_netvsc: Fix race of register_netdevice_notifier and VF register hv_netvsc: Mark VF as slave before exposing it to user-mode dm-delay: fix a race between delay_presuspend and delay_bio bcache: check return value from btree_node_alloc_replacement() bcache: prevent potential division by zero error USB: serial: option: add Fibocom L7xx modules USB: serial: option: fix FM101R-GL defines USB: serial: option: don't claim interface 4 for ZTE MF290 USB: dwc2: write HCINT with INTMASK applied usb: dwc3: set the dma max_seg_size USB: dwc3: qcom: fix resource leaks on probe deferral USB: dwc3: qcom: fix wakeup after probe deferral io_uring: fix off-by one bvec index pinctrl: avoid reload of p state in list iteration firewire: core: fix possible memory leak in create_units() mmc: block: Do not lose cache flush during CQE error recovery ALSA: hda: Disable power-save on KONTRON SinglePC ALSA: hda/realtek: Headset Mic VREF to 100% ALSA: hda/realtek: Add supported ALC257 for ChromeOS dm-verity: align struct dm_verity_fec_io properly dm verity: don't perform FEC for failed readahead IO bcache: revert replacing IS_ERR_OR_NULL with IS_ERR powerpc: Don't clobber f0/vs0 during fp|altivec register save btrfs: add dmesg output for first mount and last unmount of a filesystem btrfs: fix off-by-one when checking chunk map includes logical address btrfs: send: ensure send_fd is writable btrfs: make error messages more clear when getting a chunk map Input: xpad - add HyperX Clutch Gladiate Support ipv4: igmp: fix refcnt uaf issue when receiving igmp query packet net: stmmac: xgmac: Disable FPE MMC interrupts ravb: Fix races between ravb_tx_timeout_work() and net related ops net: ravb: Use pm_runtime_resume_and_get() net: ravb: Start TX queues after HW initialization succeeded smb3: fix touch -h of symlink s390/mm: fix phys vs virt confusion in mark_kernel_pXd() functions family s390/cmma: fix detection of DAT pages mtd: cfi_cmdset_0001: Support the absence of protection registers mtd: cfi_cmdset_0001: Byte swap OTP info fbdev: stifb: Make the STI next font pointer a 32-bit signed offset ima: annotate iint mutex to avoid lockdep false positive warnings ovl: skip overlayfs superblocks at global sync ima: detect changes to the backing overlay file scsi: qla2xxx: Simplify the code for aborting SCSI commands scsi: core: Introduce the scsi_cmd_to_rq() function scsi: qla2xxx: Use scsi_cmd_to_rq() instead of scsi_cmnd.request scsi: qla2xxx: Fix system crash due to bad pointer access cpufreq: imx6q: don't warn for disabling a non-existing frequency cpufreq: imx6q: Don't disable 792 Mhz OPP unnecessarily mmc: cqhci: Increase recovery halt timeout mmc: cqhci: Warn of halt or task clear failure mmc: cqhci: Fix task clearing in CQE error recovery mmc: core: convert comma to semicolon mmc: block: Retry commands in CQE error recovery Linux 5.4.263 Change-Id: I5187b50207d7ed37d7448664448409ed75106ea1 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
0780b1ab09
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 262
|
||||
SUBLEVEL = 263
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
@ -362,7 +362,8 @@ static int __init xen_guest_init(void)
|
||||
* for secondary CPUs as they are brought up.
|
||||
* For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
|
||||
*/
|
||||
xen_vcpu_info = alloc_percpu(struct vcpu_info);
|
||||
xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
|
||||
1 << fls(sizeof(struct vcpu_info) - 1));
|
||||
if (xen_vcpu_info == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -449,6 +449,29 @@ cpuid_feature_extract_unsigned_field(u64 features, int field)
|
||||
return cpuid_feature_extract_unsigned_field_width(features, field, 4);
|
||||
}
|
||||
|
||||
/*
|
||||
* Fields that identify the version of the Performance Monitors Extension do
|
||||
* not follow the standard ID scheme. See ARM DDI 0487E.a page D13-2825,
|
||||
* "Alternative ID scheme used for the Performance Monitors Extension version".
|
||||
*/
|
||||
static inline u64 __attribute_const__
|
||||
cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap)
|
||||
{
|
||||
u64 val = cpuid_feature_extract_unsigned_field(features, field);
|
||||
u64 mask = GENMASK_ULL(field + 3, field);
|
||||
|
||||
/* Treat IMPLEMENTATION DEFINED functionality as unimplemented */
|
||||
if (val == 0xf)
|
||||
val = 0;
|
||||
|
||||
if (val > cap) {
|
||||
features &= ~mask;
|
||||
features |= (cap << field) & mask;
|
||||
}
|
||||
|
||||
return features;
|
||||
}
|
||||
|
||||
static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp)
|
||||
{
|
||||
return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
|
||||
|
@ -735,6 +735,12 @@
|
||||
#define ID_AA64DFR0_TRACEVER_SHIFT 4
|
||||
#define ID_AA64DFR0_DEBUGVER_SHIFT 0
|
||||
|
||||
#define ID_AA64DFR0_PMUVER_8_1 0x4
|
||||
|
||||
#define ID_DFR0_PERFMON_SHIFT 24
|
||||
|
||||
#define ID_DFR0_PERFMON_8_1 0x4
|
||||
|
||||
#define ID_ISAR5_RDM_SHIFT 24
|
||||
#define ID_ISAR5_CRC32_SHIFT 16
|
||||
#define ID_ISAR5_SHA2_SHIFT 12
|
||||
|
@ -1105,6 +1105,16 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
|
||||
(0xfUL << ID_AA64ISAR1_API_SHIFT) |
|
||||
(0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
|
||||
(0xfUL << ID_AA64ISAR1_GPI_SHIFT));
|
||||
} else if (id == SYS_ID_AA64DFR0_EL1) {
|
||||
/* Limit guests to PMUv3 for ARMv8.1 */
|
||||
val = cpuid_feature_cap_perfmon_field(val,
|
||||
ID_AA64DFR0_PMUVER_SHIFT,
|
||||
ID_AA64DFR0_PMUVER_8_1);
|
||||
} else if (id == SYS_ID_DFR0_EL1) {
|
||||
/* Limit guests to PMUv3 for ARMv8.1 */
|
||||
val = cpuid_feature_cap_perfmon_field(val,
|
||||
ID_DFR0_PERFMON_SHIFT,
|
||||
ID_DFR0_PERFMON_8_1);
|
||||
}
|
||||
|
||||
return val;
|
||||
|
@ -693,7 +693,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
|
||||
gfn_t gfn = gpa >> PAGE_SHIFT;
|
||||
int srcu_idx, err;
|
||||
kvm_pfn_t pfn;
|
||||
pte_t *ptep, entry, old_pte;
|
||||
pte_t *ptep, entry;
|
||||
bool writeable;
|
||||
unsigned long prot_bits;
|
||||
unsigned long mmu_seq;
|
||||
@ -766,7 +766,6 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
|
||||
entry = pfn_pte(pfn, __pgprot(prot_bits));
|
||||
|
||||
/* Write the PTE */
|
||||
old_pte = *ptep;
|
||||
set_pte(ptep, entry);
|
||||
|
||||
err = 0;
|
||||
|
@ -24,6 +24,15 @@
|
||||
#include <asm/feature-fixups.h>
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
#define __REST_1FPVSR(n,c,base) \
|
||||
BEGIN_FTR_SECTION \
|
||||
b 2f; \
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
|
||||
REST_FPR(n,base); \
|
||||
b 3f; \
|
||||
2: REST_VSR(n,c,base); \
|
||||
3:
|
||||
|
||||
#define __REST_32FPVSRS(n,c,base) \
|
||||
BEGIN_FTR_SECTION \
|
||||
b 2f; \
|
||||
@ -42,9 +51,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
|
||||
2: SAVE_32VSRS(n,c,base); \
|
||||
3:
|
||||
#else
|
||||
#define __REST_1FPVSR(n,b,base) REST_FPR(n, base)
|
||||
#define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base)
|
||||
#define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base)
|
||||
#endif
|
||||
#define REST_1FPVSR(n,c,base) __REST_1FPVSR(n,__REG_##c,__REG_##base)
|
||||
#define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base)
|
||||
#define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base)
|
||||
|
||||
@ -68,6 +79,7 @@ _GLOBAL(store_fp_state)
|
||||
SAVE_32FPVSRS(0, R4, R3)
|
||||
mffs fr0
|
||||
stfd fr0,FPSTATE_FPSCR(r3)
|
||||
REST_1FPVSR(0, R4, R3)
|
||||
blr
|
||||
EXPORT_SYMBOL(store_fp_state)
|
||||
|
||||
@ -132,6 +144,7 @@ _GLOBAL(save_fpu)
|
||||
2: SAVE_32FPVSRS(0, R4, R6)
|
||||
mffs fr0
|
||||
stfd fr0,FPSTATE_FPSCR(r6)
|
||||
REST_1FPVSR(0, R4, R6)
|
||||
blr
|
||||
|
||||
/*
|
||||
|
@ -32,6 +32,7 @@ _GLOBAL(store_vr_state)
|
||||
mfvscr v0
|
||||
li r4, VRSTATE_VSCR
|
||||
stvx v0, r4, r3
|
||||
lvx v0, 0, r3
|
||||
blr
|
||||
EXPORT_SYMBOL(store_vr_state)
|
||||
|
||||
@ -102,6 +103,7 @@ _GLOBAL(save_altivec)
|
||||
mfvscr v0
|
||||
li r4,VRSTATE_VSCR
|
||||
stvx v0,r4,r7
|
||||
lvx v0,0,r7
|
||||
blr
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
|
@ -112,7 +112,7 @@ static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end)
|
||||
next = pmd_addr_end(addr, end);
|
||||
if (pmd_none(*pmd) || pmd_large(*pmd))
|
||||
continue;
|
||||
page = virt_to_page(pmd_val(*pmd));
|
||||
page = phys_to_page(pmd_val(*pmd));
|
||||
set_bit(PG_arch_1, &page->flags);
|
||||
} while (pmd++, addr = next, addr != end);
|
||||
}
|
||||
@ -130,8 +130,8 @@ static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
|
||||
if (pud_none(*pud) || pud_large(*pud))
|
||||
continue;
|
||||
if (!pud_folded(*pud)) {
|
||||
page = virt_to_page(pud_val(*pud));
|
||||
for (i = 0; i < 3; i++)
|
||||
page = phys_to_page(pud_val(*pud));
|
||||
for (i = 0; i < 4; i++)
|
||||
set_bit(PG_arch_1, &page[i].flags);
|
||||
}
|
||||
mark_kernel_pmd(pud, addr, next);
|
||||
@ -151,8 +151,8 @@ static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
|
||||
if (p4d_none(*p4d))
|
||||
continue;
|
||||
if (!p4d_folded(*p4d)) {
|
||||
page = virt_to_page(p4d_val(*p4d));
|
||||
for (i = 0; i < 3; i++)
|
||||
page = phys_to_page(p4d_val(*p4d));
|
||||
for (i = 0; i < 4; i++)
|
||||
set_bit(PG_arch_1, &page[i].flags);
|
||||
}
|
||||
mark_kernel_pud(p4d, addr, next);
|
||||
@ -173,8 +173,8 @@ static void mark_kernel_pgd(void)
|
||||
if (pgd_none(*pgd))
|
||||
continue;
|
||||
if (!pgd_folded(*pgd)) {
|
||||
page = virt_to_page(pgd_val(*pgd));
|
||||
for (i = 0; i < 3; i++)
|
||||
page = phys_to_page(pgd_val(*pgd));
|
||||
for (i = 0; i < 4; i++)
|
||||
set_bit(PG_arch_1, &page[i].flags);
|
||||
}
|
||||
mark_kernel_p4d(pgd, addr, next);
|
||||
|
@ -448,6 +448,13 @@ static const struct dmi_system_id asus_laptop[] = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Asus ExpertBook B1402CVA */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B1402CVA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
|
||||
.matches = {
|
||||
|
@ -82,6 +82,9 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
|
||||
if (pnp_port_valid(idev, 1)) {
|
||||
ctl_addr = devm_ioport_map(&idev->dev,
|
||||
pnp_port_start(idev, 1), 1);
|
||||
if (!ctl_addr)
|
||||
return -ENOMEM;
|
||||
|
||||
ap->ioaddr.altstatus_addr = ctl_addr;
|
||||
ap->ioaddr.ctl_addr = ctl_addr;
|
||||
ap->ops = &isapnp_port_ops;
|
||||
|
@ -1188,8 +1188,6 @@ static void __device_release_driver(struct device *dev, struct device *parent)
|
||||
else if (drv->remove)
|
||||
drv->remove(dev);
|
||||
|
||||
device_links_driver_cleanup(dev);
|
||||
|
||||
devres_release_all(dev);
|
||||
arch_teardown_dma_ops(dev);
|
||||
dev->driver = NULL;
|
||||
@ -1199,6 +1197,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
|
||||
pm_runtime_reinit(dev);
|
||||
dev_pm_set_driver_flags(dev, 0);
|
||||
|
||||
device_links_driver_cleanup(dev);
|
||||
|
||||
klist_remove(&dev->p->knode_driver);
|
||||
device_pm_check_callbacks(dev);
|
||||
if (dev->bus)
|
||||
|
@ -210,6 +210,14 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
|
||||
.suspend = cpufreq_generic_suspend,
|
||||
};
|
||||
|
||||
static void imx6x_disable_freq_in_opp(struct device *dev, unsigned long freq)
|
||||
{
|
||||
int ret = dev_pm_opp_disable(dev, freq);
|
||||
|
||||
if (ret < 0 && ret != -ENODEV)
|
||||
dev_warn(dev, "failed to disable %ldMHz OPP\n", freq / 1000000);
|
||||
}
|
||||
|
||||
#define OCOTP_CFG3 0x440
|
||||
#define OCOTP_CFG3_SPEED_SHIFT 16
|
||||
#define OCOTP_CFG3_SPEED_1P2GHZ 0x3
|
||||
@ -245,17 +253,15 @@ static void imx6q_opp_check_speed_grading(struct device *dev)
|
||||
val &= 0x3;
|
||||
|
||||
if (val < OCOTP_CFG3_SPEED_996MHZ)
|
||||
if (dev_pm_opp_disable(dev, 996000000))
|
||||
dev_warn(dev, "failed to disable 996MHz OPP\n");
|
||||
imx6x_disable_freq_in_opp(dev, 996000000);
|
||||
|
||||
if (of_machine_is_compatible("fsl,imx6q") ||
|
||||
of_machine_is_compatible("fsl,imx6qp")) {
|
||||
if (val != OCOTP_CFG3_SPEED_852MHZ)
|
||||
if (dev_pm_opp_disable(dev, 852000000))
|
||||
dev_warn(dev, "failed to disable 852MHz OPP\n");
|
||||
imx6x_disable_freq_in_opp(dev, 852000000);
|
||||
|
||||
if (val != OCOTP_CFG3_SPEED_1P2GHZ)
|
||||
if (dev_pm_opp_disable(dev, 1200000000))
|
||||
dev_warn(dev, "failed to disable 1.2GHz OPP\n");
|
||||
imx6x_disable_freq_in_opp(dev, 1200000000);
|
||||
}
|
||||
iounmap(base);
|
||||
put_node:
|
||||
@ -308,20 +314,16 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
|
||||
val >>= OCOTP_CFG3_SPEED_SHIFT;
|
||||
val &= 0x3;
|
||||
|
||||
if (of_machine_is_compatible("fsl,imx6ul")) {
|
||||
if (of_machine_is_compatible("fsl,imx6ul"))
|
||||
if (val != OCOTP_CFG3_6UL_SPEED_696MHZ)
|
||||
if (dev_pm_opp_disable(dev, 696000000))
|
||||
dev_warn(dev, "failed to disable 696MHz OPP\n");
|
||||
}
|
||||
imx6x_disable_freq_in_opp(dev, 696000000);
|
||||
|
||||
if (of_machine_is_compatible("fsl,imx6ull")) {
|
||||
if (val != OCOTP_CFG3_6ULL_SPEED_792MHZ)
|
||||
if (dev_pm_opp_disable(dev, 792000000))
|
||||
dev_warn(dev, "failed to disable 792MHz OPP\n");
|
||||
if (val < OCOTP_CFG3_6ULL_SPEED_792MHZ)
|
||||
imx6x_disable_freq_in_opp(dev, 792000000);
|
||||
|
||||
if (val != OCOTP_CFG3_6ULL_SPEED_900MHZ)
|
||||
if (dev_pm_opp_disable(dev, 900000000))
|
||||
dev_warn(dev, "failed to disable 900MHz OPP\n");
|
||||
imx6x_disable_freq_in_opp(dev, 900000000);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -719,14 +719,11 @@ static void create_units(struct fw_device *device)
|
||||
fw_unit_attributes,
|
||||
&unit->attribute_group);
|
||||
|
||||
if (device_register(&unit->device) < 0)
|
||||
goto skip_unit;
|
||||
|
||||
fw_device_get(device);
|
||||
if (device_register(&unit->device) < 0) {
|
||||
put_device(&unit->device);
|
||||
continue;
|
||||
|
||||
skip_unit:
|
||||
kfree(unit);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1636,13 +1636,13 @@ static const struct panel_desc innolux_g070y2_l01 = {
|
||||
static const struct display_timing innolux_g101ice_l01_timing = {
|
||||
.pixelclock = { 60400000, 71100000, 74700000 },
|
||||
.hactive = { 1280, 1280, 1280 },
|
||||
.hfront_porch = { 41, 80, 100 },
|
||||
.hback_porch = { 40, 79, 99 },
|
||||
.hsync_len = { 1, 1, 1 },
|
||||
.hfront_porch = { 30, 60, 70 },
|
||||
.hback_porch = { 30, 60, 70 },
|
||||
.hsync_len = { 22, 40, 60 },
|
||||
.vactive = { 800, 800, 800 },
|
||||
.vfront_porch = { 5, 11, 14 },
|
||||
.vback_porch = { 4, 11, 14 },
|
||||
.vsync_len = { 1, 1, 1 },
|
||||
.vfront_porch = { 3, 8, 14 },
|
||||
.vback_porch = { 3, 8, 14 },
|
||||
.vsync_len = { 4, 7, 12 },
|
||||
.flags = DISPLAY_FLAGS_DE_HIGH,
|
||||
};
|
||||
|
||||
@ -1659,6 +1659,7 @@ static const struct panel_desc innolux_g101ice_l01 = {
|
||||
.disable = 200,
|
||||
},
|
||||
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
|
||||
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
|
||||
};
|
||||
|
||||
static const struct display_timing innolux_g121i1_l01_timing = {
|
||||
|
@ -234,14 +234,22 @@ static inline void vop_cfg_done(struct vop *vop)
|
||||
VOP_REG_SET(vop, common, cfg_done, 1);
|
||||
}
|
||||
|
||||
static bool has_rb_swapped(uint32_t format)
|
||||
static bool has_rb_swapped(uint32_t version, uint32_t format)
|
||||
{
|
||||
switch (format) {
|
||||
case DRM_FORMAT_XBGR8888:
|
||||
case DRM_FORMAT_ABGR8888:
|
||||
case DRM_FORMAT_BGR888:
|
||||
case DRM_FORMAT_BGR565:
|
||||
return true;
|
||||
/*
|
||||
* full framework (IP version 3.x) only need rb swapped for RGB888 and
|
||||
* little framework (IP version 2.x) only need rb swapped for BGR888,
|
||||
* check for 3.x to also only rb swap BGR888 for unknown vop version
|
||||
*/
|
||||
case DRM_FORMAT_RGB888:
|
||||
return VOP_MAJOR(version) == 3;
|
||||
case DRM_FORMAT_BGR888:
|
||||
return VOP_MAJOR(version) != 3;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
@ -886,7 +894,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
|
||||
VOP_WIN_SET(vop, win, dsp_info, dsp_info);
|
||||
VOP_WIN_SET(vop, win, dsp_st, dsp_st);
|
||||
|
||||
rb_swap = has_rb_swapped(fb->format->format);
|
||||
rb_swap = has_rb_swapped(vop->data->version, fb->format->format);
|
||||
VOP_WIN_SET(vop, win, rb_swap, rb_swap);
|
||||
|
||||
/*
|
||||
|
@ -703,15 +703,22 @@ static void hid_close_report(struct hid_device *device)
|
||||
* Free a device structure, all reports, and all fields.
|
||||
*/
|
||||
|
||||
static void hid_device_release(struct device *dev)
|
||||
void hiddev_free(struct kref *ref)
|
||||
{
|
||||
struct hid_device *hid = to_hid_device(dev);
|
||||
struct hid_device *hid = container_of(ref, struct hid_device, ref);
|
||||
|
||||
hid_close_report(hid);
|
||||
kfree(hid->dev_rdesc);
|
||||
kfree(hid);
|
||||
}
|
||||
|
||||
static void hid_device_release(struct device *dev)
|
||||
{
|
||||
struct hid_device *hid = to_hid_device(dev);
|
||||
|
||||
kref_put(&hid->ref, hiddev_free);
|
||||
}
|
||||
|
||||
/*
|
||||
* Fetch a report description item from the data stream. We support long
|
||||
* items, though they are not used yet.
|
||||
@ -2441,10 +2448,12 @@ int hid_add_device(struct hid_device *hdev)
|
||||
hid_warn(hdev, "bad device descriptor (%d)\n", ret);
|
||||
}
|
||||
|
||||
hdev->id = atomic_inc_return(&id);
|
||||
|
||||
/* XXX hack, any other cleaner solution after the driver core
|
||||
* is converted to allow more than 20 bytes as the device name? */
|
||||
dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
|
||||
hdev->vendor, hdev->product, atomic_inc_return(&id));
|
||||
hdev->vendor, hdev->product, hdev->id);
|
||||
|
||||
hid_debug_register(hdev, dev_name(&hdev->dev));
|
||||
ret = device_add(&hdev->dev);
|
||||
@ -2487,6 +2496,7 @@ struct hid_device *hid_allocate_device(void)
|
||||
spin_lock_init(&hdev->debug_list_lock);
|
||||
sema_init(&hdev->driver_input_lock, 1);
|
||||
mutex_init(&hdev->ll_open_lock);
|
||||
kref_init(&hdev->ref);
|
||||
|
||||
return hdev;
|
||||
}
|
||||
|
@ -1082,6 +1082,7 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
|
||||
goto out;
|
||||
}
|
||||
list->hdev = (struct hid_device *) inode->i_private;
|
||||
kref_get(&list->hdev->ref);
|
||||
file->private_data = list;
|
||||
mutex_init(&list->read_mutex);
|
||||
|
||||
@ -1174,6 +1175,8 @@ static int hid_debug_events_release(struct inode *inode, struct file *file)
|
||||
list_del(&list->node);
|
||||
spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
|
||||
kfifo_free(&list->hid_debug_fifo);
|
||||
|
||||
kref_put(&list->hdev->ref, hiddev_free);
|
||||
kfree(list);
|
||||
|
||||
return 0;
|
||||
|
@ -2945,6 +2945,9 @@ static enum i40iw_status_code i40iw_sc_alloc_stag(
|
||||
u64 header;
|
||||
enum i40iw_page_size page_size;
|
||||
|
||||
if (!info->total_len && !info->all_memory)
|
||||
return -EINVAL;
|
||||
|
||||
page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
|
||||
cqp = dev->cqp;
|
||||
wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
|
||||
@ -3003,6 +3006,9 @@ static enum i40iw_status_code i40iw_sc_mr_reg_non_shared(
|
||||
u8 addr_type;
|
||||
enum i40iw_page_size page_size;
|
||||
|
||||
if (!info->total_len && !info->all_memory)
|
||||
return -EINVAL;
|
||||
|
||||
page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
|
||||
if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
|
||||
I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
|
||||
|
@ -779,6 +779,7 @@ struct i40iw_allocate_stag_info {
|
||||
bool use_hmc_fcn_index;
|
||||
u8 hmc_fcn_index;
|
||||
bool use_pf_rid;
|
||||
bool all_memory;
|
||||
};
|
||||
|
||||
struct i40iw_reg_ns_stag_info {
|
||||
@ -797,6 +798,7 @@ struct i40iw_reg_ns_stag_info {
|
||||
bool use_hmc_fcn_index;
|
||||
u8 hmc_fcn_index;
|
||||
bool use_pf_rid;
|
||||
bool all_memory;
|
||||
};
|
||||
|
||||
struct i40iw_fast_reg_stag_info {
|
||||
|
@ -1500,7 +1500,8 @@ static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
|
||||
static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr)
|
||||
{
|
||||
struct i40iw_allocate_stag_info *info;
|
||||
struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
|
||||
struct ib_pd *pd = iwmr->ibmr.pd;
|
||||
struct i40iw_pd *iwpd = to_iwpd(pd);
|
||||
enum i40iw_status_code status;
|
||||
int err = 0;
|
||||
struct i40iw_cqp_request *cqp_request;
|
||||
@ -1517,6 +1518,7 @@ static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr
|
||||
info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
|
||||
info->pd_id = iwpd->sc_pd.pd_id;
|
||||
info->total_len = iwmr->length;
|
||||
info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
|
||||
info->remote_access = true;
|
||||
cqp_info->cqp_cmd = OP_ALLOC_STAG;
|
||||
cqp_info->post_sq = 1;
|
||||
@ -1570,6 +1572,8 @@ static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
|
||||
iwmr->type = IW_MEMREG_TYPE_MEM;
|
||||
palloc = &iwpbl->pble_alloc;
|
||||
iwmr->page_cnt = max_num_sg;
|
||||
/* Use system PAGE_SIZE as the sg page sizes are unknown at this point */
|
||||
iwmr->length = max_num_sg * PAGE_SIZE;
|
||||
mutex_lock(&iwdev->pbl_mutex);
|
||||
status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
|
||||
mutex_unlock(&iwdev->pbl_mutex);
|
||||
@ -1666,7 +1670,8 @@ static int i40iw_hwreg_mr(struct i40iw_device *iwdev,
|
||||
{
|
||||
struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
|
||||
struct i40iw_reg_ns_stag_info *stag_info;
|
||||
struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
|
||||
struct ib_pd *pd = iwmr->ibmr.pd;
|
||||
struct i40iw_pd *iwpd = to_iwpd(pd);
|
||||
struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
|
||||
enum i40iw_status_code status;
|
||||
int err = 0;
|
||||
@ -1686,6 +1691,7 @@ static int i40iw_hwreg_mr(struct i40iw_device *iwdev,
|
||||
stag_info->total_len = iwmr->length;
|
||||
stag_info->access_rights = access;
|
||||
stag_info->pd_id = iwpd->sc_pd.pd_id;
|
||||
stag_info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
|
||||
stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED;
|
||||
stag_info->page_size = iwmr->page_size;
|
||||
|
||||
|
@ -119,6 +119,7 @@ static const struct xpad_device {
|
||||
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
|
||||
{ 0x044f, 0x0f10, "Thrustmaster Modena GT Wheel", 0, XTYPE_XBOX },
|
||||
{ 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 },
|
||||
{ 0x03f0, 0x0495, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE },
|
||||
{ 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX },
|
||||
{ 0x045e, 0x0285, "Microsoft X-Box pad (Japan)", 0, XTYPE_XBOX },
|
||||
{ 0x045e, 0x0287, "Microsoft Xbox Controller S", 0, XTYPE_XBOX },
|
||||
@ -431,6 +432,7 @@ static const struct usb_device_id xpad_table[] = {
|
||||
XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 Controller */
|
||||
XPAD_XBOX360_VENDOR(0x03eb), /* Wooting Keyboards (Legacy) */
|
||||
XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster X-Box 360 controllers */
|
||||
XPAD_XBOXONE_VENDOR(0x03f0), /* HP HyperX Xbox One Controllers */
|
||||
XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
|
||||
XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft X-Box One controllers */
|
||||
XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
|
||||
|
@ -1391,7 +1391,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
|
||||
memset(new_nodes, 0, sizeof(new_nodes));
|
||||
closure_init_stack(&cl);
|
||||
|
||||
while (nodes < GC_MERGE_NODES && !IS_ERR(r[nodes].b))
|
||||
while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
|
||||
keys += r[nodes++].keys;
|
||||
|
||||
blocks = btree_default_blocks(b->c) * 2 / 3;
|
||||
@ -1538,7 +1538,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
|
||||
bch_keylist_free(&keylist);
|
||||
|
||||
for (i = 0; i < nodes; i++)
|
||||
if (!IS_ERR(new_nodes[i])) {
|
||||
if (!IS_ERR_OR_NULL(new_nodes[i])) {
|
||||
btree_node_free(new_nodes[i]);
|
||||
rw_unlock(true, new_nodes[i]);
|
||||
}
|
||||
@ -1555,6 +1555,8 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
|
||||
return 0;
|
||||
|
||||
n = btree_node_alloc_replacement(replace, NULL);
|
||||
if (IS_ERR(n))
|
||||
return 0;
|
||||
|
||||
/* recheck reserve after allocating replacement node */
|
||||
if (btree_check_reserve(b, NULL)) {
|
||||
|
@ -1057,7 +1057,7 @@ SHOW(__bch_cache)
|
||||
sum += INITIAL_PRIO - cached[i];
|
||||
|
||||
if (n)
|
||||
do_div(sum, n);
|
||||
sum = div64_u64(sum, n);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(q); i++)
|
||||
q[i] = INITIAL_PRIO - cached[n * (i + 1) /
|
||||
|
@ -30,7 +30,7 @@ struct delay_c {
|
||||
struct workqueue_struct *kdelayd_wq;
|
||||
struct work_struct flush_expired_bios;
|
||||
struct list_head delayed_bios;
|
||||
atomic_t may_delay;
|
||||
bool may_delay;
|
||||
|
||||
struct delay_class read;
|
||||
struct delay_class write;
|
||||
@ -191,7 +191,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||
INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
|
||||
INIT_LIST_HEAD(&dc->delayed_bios);
|
||||
mutex_init(&dc->timer_lock);
|
||||
atomic_set(&dc->may_delay, 1);
|
||||
dc->may_delay = true;
|
||||
dc->argc = argc;
|
||||
|
||||
ret = delay_class_ctr(ti, &dc->read, argv);
|
||||
@ -245,7 +245,7 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
|
||||
struct dm_delay_info *delayed;
|
||||
unsigned long expires = 0;
|
||||
|
||||
if (!c->delay || !atomic_read(&dc->may_delay))
|
||||
if (!c->delay)
|
||||
return DM_MAPIO_REMAPPED;
|
||||
|
||||
delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
|
||||
@ -254,6 +254,10 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
|
||||
delayed->expires = expires = jiffies + msecs_to_jiffies(c->delay);
|
||||
|
||||
mutex_lock(&delayed_bios_lock);
|
||||
if (unlikely(!dc->may_delay)) {
|
||||
mutex_unlock(&delayed_bios_lock);
|
||||
return DM_MAPIO_REMAPPED;
|
||||
}
|
||||
c->ops++;
|
||||
list_add_tail(&delayed->list, &dc->delayed_bios);
|
||||
mutex_unlock(&delayed_bios_lock);
|
||||
@ -267,7 +271,10 @@ static void delay_presuspend(struct dm_target *ti)
|
||||
{
|
||||
struct delay_c *dc = ti->private;
|
||||
|
||||
atomic_set(&dc->may_delay, 0);
|
||||
mutex_lock(&delayed_bios_lock);
|
||||
dc->may_delay = false;
|
||||
mutex_unlock(&delayed_bios_lock);
|
||||
|
||||
del_timer_sync(&dc->delay_timer);
|
||||
flush_bios(flush_delayed_bios(dc, 1));
|
||||
}
|
||||
@ -276,7 +283,7 @@ static void delay_resume(struct dm_target *ti)
|
||||
{
|
||||
struct delay_c *dc = ti->private;
|
||||
|
||||
atomic_set(&dc->may_delay, 1);
|
||||
dc->may_delay = true;
|
||||
}
|
||||
|
||||
static int delay_map(struct dm_target *ti, struct bio *bio)
|
||||
|
@ -24,7 +24,8 @@ bool verity_fec_is_enabled(struct dm_verity *v)
|
||||
*/
|
||||
static inline struct dm_verity_fec_io *fec_io(struct dm_verity_io *io)
|
||||
{
|
||||
return (struct dm_verity_fec_io *) verity_io_digest_end(io->v, io);
|
||||
return (struct dm_verity_fec_io *)
|
||||
((char *)io + io->v->ti->per_io_data_size - sizeof(struct dm_verity_fec_io));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -579,7 +579,9 @@ static void verity_end_io(struct bio *bio)
|
||||
struct dm_verity_io *io = bio->bi_private;
|
||||
|
||||
if (bio->bi_status &&
|
||||
(!verity_fec_is_enabled(io->v) || verity_is_system_shutting_down())) {
|
||||
(!verity_fec_is_enabled(io->v) ||
|
||||
verity_is_system_shutting_down() ||
|
||||
(bio->bi_opf & REQ_RAHEAD))) {
|
||||
verity_finish_io(io, bio->bi_status);
|
||||
return;
|
||||
}
|
||||
|
@ -110,12 +110,6 @@ static inline u8 *verity_io_want_digest(struct dm_verity *v,
|
||||
return (u8 *)(io + 1) + v->ahash_reqsize + v->digest_size;
|
||||
}
|
||||
|
||||
static inline u8 *verity_io_digest_end(struct dm_verity *v,
|
||||
struct dm_verity_io *io)
|
||||
{
|
||||
return verity_io_want_digest(v, io) + v->digest_size;
|
||||
}
|
||||
|
||||
extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
|
||||
struct bvec_iter *iter,
|
||||
int (*process)(struct dm_verity *v,
|
||||
|
@ -1471,6 +1471,8 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
|
||||
blk_mq_requeue_request(req, true);
|
||||
else
|
||||
__blk_mq_end_request(req, BLK_STS_OK);
|
||||
} else if (mq->in_recovery) {
|
||||
blk_mq_requeue_request(req, true);
|
||||
} else {
|
||||
blk_mq_end_request(req, BLK_STS_OK);
|
||||
}
|
||||
|
@ -564,22 +564,25 @@ int mmc_cqe_recovery(struct mmc_host *host)
|
||||
host->cqe_ops->cqe_recovery_start(host);
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.opcode = MMC_STOP_TRANSMISSION,
|
||||
cmd.flags = MMC_RSP_R1B | MMC_CMD_AC,
|
||||
cmd.opcode = MMC_STOP_TRANSMISSION;
|
||||
cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
|
||||
cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
|
||||
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
|
||||
mmc_wait_for_cmd(host, &cmd, 0);
|
||||
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
|
||||
mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.opcode = MMC_CMDQ_TASK_MGMT;
|
||||
cmd.arg = 1; /* Discard entire queue */
|
||||
cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
|
||||
cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
|
||||
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
|
||||
err = mmc_wait_for_cmd(host, &cmd, 0);
|
||||
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
|
||||
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
|
||||
|
||||
host->cqe_ops->cqe_recovery_finish(host);
|
||||
|
||||
if (err)
|
||||
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
|
||||
|
||||
mmc_retune_release(host);
|
||||
|
||||
return err;
|
||||
|
@ -884,7 +884,7 @@ static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
|
||||
ret = cqhci_tasks_cleared(cq_host);
|
||||
|
||||
if (!ret)
|
||||
pr_debug("%s: cqhci: Failed to clear tasks\n",
|
||||
pr_warn("%s: cqhci: Failed to clear tasks\n",
|
||||
mmc_hostname(mmc));
|
||||
|
||||
return ret;
|
||||
@ -918,7 +918,7 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
|
||||
ret = cqhci_halted(cq_host);
|
||||
|
||||
if (!ret)
|
||||
pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
|
||||
pr_warn("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -926,10 +926,10 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
|
||||
/*
|
||||
* After halting we expect to be able to use the command line. We interpret the
|
||||
* failure to halt to mean the data lines might still be in use (and the upper
|
||||
* layers will need to send a STOP command), so we set the timeout based on a
|
||||
* generous command timeout.
|
||||
* layers will need to send a STOP command), however failing to halt complicates
|
||||
* the recovery, so set a timeout that would reasonably allow I/O to complete.
|
||||
*/
|
||||
#define CQHCI_START_HALT_TIMEOUT 5
|
||||
#define CQHCI_START_HALT_TIMEOUT 500
|
||||
|
||||
static void cqhci_recovery_start(struct mmc_host *mmc)
|
||||
{
|
||||
@ -1017,28 +1017,28 @@ static void cqhci_recovery_finish(struct mmc_host *mmc)
|
||||
|
||||
ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
|
||||
|
||||
if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
|
||||
ok = false;
|
||||
|
||||
/*
|
||||
* The specification contradicts itself, by saying that tasks cannot be
|
||||
* cleared if CQHCI does not halt, but if CQHCI does not halt, it should
|
||||
* be disabled/re-enabled, but not to disable before clearing tasks.
|
||||
* Have a go anyway.
|
||||
*/
|
||||
if (!ok) {
|
||||
pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
|
||||
if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
|
||||
ok = false;
|
||||
|
||||
/* Disable to make sure tasks really are cleared */
|
||||
cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
|
||||
cqcfg &= ~CQHCI_ENABLE;
|
||||
cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
|
||||
|
||||
cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
|
||||
cqcfg |= CQHCI_ENABLE;
|
||||
cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
|
||||
/* Be sure that there are no tasks */
|
||||
ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
|
||||
if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
|
||||
ok = false;
|
||||
WARN_ON(!ok);
|
||||
}
|
||||
|
||||
cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
|
||||
|
||||
if (!ok)
|
||||
cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT);
|
||||
|
||||
cqhci_recover_mrqs(cq_host);
|
||||
|
||||
|
@ -420,8 +420,25 @@ read_pri_intelext(struct map_info *map, __u16 adr)
|
||||
extra_size = 0;
|
||||
|
||||
/* Protection Register info */
|
||||
if (extp->NumProtectionFields) {
|
||||
struct cfi_intelext_otpinfo *otp =
|
||||
(struct cfi_intelext_otpinfo *)&extp->extra[0];
|
||||
|
||||
extra_size += (extp->NumProtectionFields - 1) *
|
||||
sizeof(struct cfi_intelext_otpinfo);
|
||||
|
||||
if (extp_size >= sizeof(*extp) + extra_size) {
|
||||
int i;
|
||||
|
||||
/* Do some byteswapping if necessary */
|
||||
for (i = 0; i < extp->NumProtectionFields - 1; i++) {
|
||||
otp->ProtRegAddr = le32_to_cpu(otp->ProtRegAddr);
|
||||
otp->FactGroups = le16_to_cpu(otp->FactGroups);
|
||||
otp->UserGroups = le16_to_cpu(otp->UserGroups);
|
||||
otp++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (extp->MinorVersion >= '1') {
|
||||
@ -695,12 +712,14 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
|
||||
*/
|
||||
if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
|
||||
&& extp->FeatureSupport & (1 << 9)) {
|
||||
int offs = 0;
|
||||
struct cfi_private *newcfi;
|
||||
struct flchip *chip;
|
||||
struct flchip_shared *shared;
|
||||
int offs, numregions, numparts, partshift, numvirtchips, i, j;
|
||||
int numregions, numparts, partshift, numvirtchips, i, j;
|
||||
|
||||
/* Protection Register info */
|
||||
if (extp->NumProtectionFields)
|
||||
offs = (extp->NumProtectionFields - 1) *
|
||||
sizeof(struct cfi_intelext_otpinfo);
|
||||
|
||||
|
@ -682,10 +682,24 @@ static void xgbe_service(struct work_struct *work)
|
||||
static void xgbe_service_timer(struct timer_list *t)
|
||||
{
|
||||
struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer);
|
||||
struct xgbe_channel *channel;
|
||||
unsigned int i;
|
||||
|
||||
queue_work(pdata->dev_workqueue, &pdata->service_work);
|
||||
|
||||
mod_timer(&pdata->service_timer, jiffies + HZ);
|
||||
|
||||
if (!pdata->tx_usecs)
|
||||
return;
|
||||
|
||||
for (i = 0; i < pdata->channel_count; i++) {
|
||||
channel = pdata->channel[i];
|
||||
if (!channel->tx_ring || channel->tx_timer_active)
|
||||
break;
|
||||
channel->tx_timer_active = 1;
|
||||
mod_timer(&channel->tx_timer,
|
||||
jiffies + usecs_to_jiffies(pdata->tx_usecs));
|
||||
}
|
||||
}
|
||||
|
||||
static void xgbe_init_timers(struct xgbe_prv_data *pdata)
|
||||
|
@ -314,10 +314,15 @@ static int xgbe_get_link_ksettings(struct net_device *netdev,
|
||||
|
||||
cmd->base.phy_address = pdata->phy.address;
|
||||
|
||||
cmd->base.autoneg = pdata->phy.autoneg;
|
||||
if (netif_carrier_ok(netdev)) {
|
||||
cmd->base.speed = pdata->phy.speed;
|
||||
cmd->base.duplex = pdata->phy.duplex;
|
||||
} else {
|
||||
cmd->base.speed = SPEED_UNKNOWN;
|
||||
cmd->base.duplex = DUPLEX_UNKNOWN;
|
||||
}
|
||||
|
||||
cmd->base.autoneg = pdata->phy.autoneg;
|
||||
cmd->base.port = PORT_NONE;
|
||||
|
||||
XGBE_LM_COPY(cmd, supported, lks, supported);
|
||||
|
@ -1178,6 +1178,18 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
|
||||
if (pdata->phy.duplex != DUPLEX_FULL)
|
||||
return -EINVAL;
|
||||
|
||||
/* Force the mode change for SFI in Fixed PHY config.
|
||||
* Fixed PHY configs needs PLL to be enabled while doing mode set.
|
||||
* When the SFP module isn't connected during boot, driver assumes
|
||||
* AN is ON and attempts autonegotiation. However, if the connected
|
||||
* SFP comes up in Fixed PHY config, the link will not come up as
|
||||
* PLL isn't enabled while the initial mode set command is issued.
|
||||
* So, force the mode change for SFI in Fixed PHY configuration to
|
||||
* fix link issues.
|
||||
*/
|
||||
if (mode == XGBE_MODE_SFI)
|
||||
xgbe_change_mode(pdata, mode);
|
||||
else
|
||||
xgbe_set_mode(pdata, mode);
|
||||
|
||||
return 0;
|
||||
|
@ -1380,13 +1380,13 @@ static int ravb_open(struct net_device *ndev)
|
||||
if (priv->chip_id == RCAR_GEN2)
|
||||
ravb_ptp_init(ndev, priv->pdev);
|
||||
|
||||
netif_tx_start_all_queues(ndev);
|
||||
|
||||
/* PHY control start */
|
||||
error = ravb_phy_start(ndev);
|
||||
if (error)
|
||||
goto out_ptp_stop;
|
||||
|
||||
netif_tx_start_all_queues(ndev);
|
||||
|
||||
return 0;
|
||||
|
||||
out_ptp_stop:
|
||||
@ -1435,6 +1435,12 @@ static void ravb_tx_timeout_work(struct work_struct *work)
|
||||
struct net_device *ndev = priv->ndev;
|
||||
int error;
|
||||
|
||||
if (!rtnl_trylock()) {
|
||||
usleep_range(1000, 2000);
|
||||
schedule_work(&priv->work);
|
||||
return;
|
||||
}
|
||||
|
||||
netif_tx_stop_all_queues(ndev);
|
||||
|
||||
/* Stop PTP Clock driver */
|
||||
@ -1467,7 +1473,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
|
||||
*/
|
||||
netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
|
||||
__func__, error);
|
||||
return;
|
||||
goto out_unlock;
|
||||
}
|
||||
ravb_emac_init(ndev);
|
||||
|
||||
@ -1477,6 +1483,9 @@ static void ravb_tx_timeout_work(struct work_struct *work)
|
||||
ravb_ptp_init(ndev, priv->pdev);
|
||||
|
||||
netif_tx_start_all_queues(ndev);
|
||||
|
||||
out_unlock:
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
/* Packet transmit function for Ethernet AVB */
|
||||
@ -2030,7 +2039,9 @@ static int ravb_probe(struct platform_device *pdev)
|
||||
ndev->hw_features = NETIF_F_RXCSUM;
|
||||
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
pm_runtime_get_sync(&pdev->dev);
|
||||
error = pm_runtime_resume_and_get(&pdev->dev);
|
||||
if (error < 0)
|
||||
goto out_rpm_disable;
|
||||
|
||||
/* The Ether-specific entries in the device structure. */
|
||||
ndev->base_addr = res->start;
|
||||
@ -2201,6 +2212,7 @@ static int ravb_probe(struct platform_device *pdev)
|
||||
free_netdev(ndev);
|
||||
|
||||
pm_runtime_put(&pdev->dev);
|
||||
out_rpm_disable:
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
return error;
|
||||
}
|
||||
|
@ -170,8 +170,10 @@
|
||||
#define MMC_XGMAC_RX_DISCARD_OCT_GB 0x1b4
|
||||
#define MMC_XGMAC_RX_ALIGN_ERR_PKT 0x1bc
|
||||
|
||||
#define MMC_XGMAC_TX_FPE_INTR_MASK 0x204
|
||||
#define MMC_XGMAC_TX_FPE_FRAG 0x208
|
||||
#define MMC_XGMAC_TX_HOLD_REQ 0x20c
|
||||
#define MMC_XGMAC_RX_FPE_INTR_MASK 0x224
|
||||
#define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR 0x228
|
||||
#define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c
|
||||
#define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230
|
||||
@ -336,6 +338,8 @@ static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr)
|
||||
{
|
||||
writel(0x0, mmcaddr + MMC_RX_INTR_MASK);
|
||||
writel(0x0, mmcaddr + MMC_TX_INTR_MASK);
|
||||
writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_TX_FPE_INTR_MASK);
|
||||
writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_FPE_INTR_MASK);
|
||||
writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_IPC_INTR_MASK);
|
||||
}
|
||||
|
||||
|
@ -656,7 +656,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
|
||||
/* Tx Full Checksum Offload Enabled */
|
||||
cur_p->app0 |= 2;
|
||||
} else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
|
||||
} else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
|
||||
csum_start_off = skb_transport_offset(skb);
|
||||
csum_index_off = csum_start_off + skb->csum_offset;
|
||||
/* Tx Partial Checksum Offload Enabled */
|
||||
|
@ -2060,9 +2060,6 @@ static int netvsc_vf_join(struct net_device *vf_netdev,
|
||||
goto upper_link_failed;
|
||||
}
|
||||
|
||||
/* set slave flag before open to prevent IPv6 addrconf */
|
||||
vf_netdev->flags |= IFF_SLAVE;
|
||||
|
||||
schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
|
||||
|
||||
call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
|
||||
@ -2160,23 +2157,38 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
|
||||
return hv_get_drvdata(ndev_ctx->device_ctx);
|
||||
}
|
||||
|
||||
/* Fallback path to check synthetic vf with
|
||||
* help of mac addr
|
||||
/* Fallback path to check synthetic vf with help of mac addr.
|
||||
* Because this function can be called before vf_netdev is
|
||||
* initialized (NETDEV_POST_INIT) when its perm_addr has not been copied
|
||||
* from dev_addr, also try to match to its dev_addr.
|
||||
* Note: On Hyper-V and Azure, it's not possible to set a MAC address
|
||||
* on a VF that matches to the MAC of a unrelated NETVSC device.
|
||||
*/
|
||||
list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
|
||||
ndev = hv_get_drvdata(ndev_ctx->device_ctx);
|
||||
if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr)) {
|
||||
netdev_notice(vf_netdev,
|
||||
"falling back to mac addr based matching\n");
|
||||
if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr) ||
|
||||
ether_addr_equal(vf_netdev->dev_addr, ndev->perm_addr))
|
||||
return ndev;
|
||||
}
|
||||
}
|
||||
|
||||
netdev_notice(vf_netdev,
|
||||
"no netdev found for vf serial:%u\n", serial);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int netvsc_prepare_bonding(struct net_device *vf_netdev)
|
||||
{
|
||||
struct net_device *ndev;
|
||||
|
||||
ndev = get_netvsc_byslot(vf_netdev);
|
||||
if (!ndev)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
/* set slave flag before open to prevent IPv6 addrconf */
|
||||
vf_netdev->flags |= IFF_SLAVE;
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static int netvsc_register_vf(struct net_device *vf_netdev)
|
||||
{
|
||||
struct net_device_context *net_device_ctx;
|
||||
@ -2495,6 +2507,8 @@ static int netvsc_netdev_event(struct notifier_block *this,
|
||||
return NOTIFY_DONE;
|
||||
|
||||
switch (event) {
|
||||
case NETDEV_POST_INIT:
|
||||
return netvsc_prepare_bonding(event_dev);
|
||||
case NETDEV_REGISTER:
|
||||
return netvsc_register_vf(event_dev);
|
||||
case NETDEV_UNREGISTER:
|
||||
@ -2528,12 +2542,17 @@ static int __init netvsc_drv_init(void)
|
||||
}
|
||||
netvsc_ring_bytes = ring_size * PAGE_SIZE;
|
||||
|
||||
register_netdevice_notifier(&netvsc_netdev_notifier);
|
||||
|
||||
ret = vmbus_driver_register(&netvsc_drv);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_vmbus_reg;
|
||||
|
||||
register_netdevice_notifier(&netvsc_netdev_notifier);
|
||||
return 0;
|
||||
|
||||
err_vmbus_reg:
|
||||
unregister_netdevice_notifier(&netvsc_netdev_notifier);
|
||||
return ret;
|
||||
}
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -1594,11 +1594,11 @@ static int ax88179_reset(struct usbnet *dev)
|
||||
|
||||
*tmp16 = AX_PHYPWR_RSTCTL_IPRL;
|
||||
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
|
||||
msleep(200);
|
||||
msleep(500);
|
||||
|
||||
*tmp = AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
|
||||
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp);
|
||||
msleep(100);
|
||||
msleep(200);
|
||||
|
||||
/* Ethernet PHY Auto Detach*/
|
||||
ax88179_auto_detach(dev, 0);
|
||||
|
@ -1107,19 +1107,19 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
|
||||
ctrl->cap |= NVMET_QUEUE_SIZE - 1;
|
||||
}
|
||||
|
||||
u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
|
||||
struct nvmet_req *req, struct nvmet_ctrl **ret)
|
||||
struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
|
||||
const char *hostnqn, u16 cntlid,
|
||||
struct nvmet_req *req)
|
||||
{
|
||||
struct nvmet_ctrl *ctrl = NULL;
|
||||
struct nvmet_subsys *subsys;
|
||||
struct nvmet_ctrl *ctrl;
|
||||
u16 status = 0;
|
||||
|
||||
subsys = nvmet_find_get_subsys(req->port, subsysnqn);
|
||||
if (!subsys) {
|
||||
pr_warn("connect request for invalid subsystem %s!\n",
|
||||
subsysnqn);
|
||||
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
|
||||
return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mutex_lock(&subsys->lock);
|
||||
@ -1132,20 +1132,21 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
|
||||
if (!kref_get_unless_zero(&ctrl->ref))
|
||||
continue;
|
||||
|
||||
*ret = ctrl;
|
||||
goto out;
|
||||
/* ctrl found */
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
|
||||
ctrl = NULL; /* ctrl not found */
|
||||
pr_warn("could not find controller %d for subsys %s / host %s\n",
|
||||
cntlid, subsysnqn, hostnqn);
|
||||
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
|
||||
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
|
||||
|
||||
out:
|
||||
found:
|
||||
mutex_unlock(&subsys->lock);
|
||||
nvmet_subsys_put(subsys);
|
||||
return status;
|
||||
out:
|
||||
return ctrl;
|
||||
}
|
||||
|
||||
u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd)
|
||||
|
@ -182,6 +182,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
|
||||
goto out;
|
||||
}
|
||||
|
||||
d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
|
||||
d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
|
||||
status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
|
||||
le32_to_cpu(c->kato), &ctrl);
|
||||
if (status) {
|
||||
@ -213,7 +215,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
|
||||
{
|
||||
struct nvmf_connect_command *c = &req->cmd->connect;
|
||||
struct nvmf_connect_data *d;
|
||||
struct nvmet_ctrl *ctrl = NULL;
|
||||
struct nvmet_ctrl *ctrl;
|
||||
u16 qid = le16_to_cpu(c->qid);
|
||||
u16 status = 0;
|
||||
|
||||
@ -237,11 +239,14 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
|
||||
goto out;
|
||||
}
|
||||
|
||||
status = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
|
||||
le16_to_cpu(d->cntlid),
|
||||
req, &ctrl);
|
||||
if (status)
|
||||
d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
|
||||
d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
|
||||
ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
|
||||
le16_to_cpu(d->cntlid), req);
|
||||
if (!ctrl) {
|
||||
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (unlikely(qid > ctrl->subsys->max_qid)) {
|
||||
pr_warn("invalid queue id (%d)\n", qid);
|
||||
|
@ -394,8 +394,9 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
|
||||
void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
|
||||
u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
|
||||
struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
|
||||
u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
|
||||
struct nvmet_req *req, struct nvmet_ctrl **ret);
|
||||
struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
|
||||
const char *hostnqn, u16 cntlid,
|
||||
struct nvmet_req *req);
|
||||
void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
|
||||
u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd);
|
||||
|
||||
|
@ -861,7 +861,7 @@ static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
|
||||
return ks_pcie_handle_error_irq(ks_pcie);
|
||||
}
|
||||
|
||||
static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,
|
||||
static int ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
struct dw_pcie *pci = ks_pcie->pci;
|
||||
@ -992,7 +992,7 @@ static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
|
||||
.get_features = &ks_pcie_am654_get_features,
|
||||
};
|
||||
|
||||
static int __init ks_pcie_add_pcie_ep(struct keystone_pcie *ks_pcie,
|
||||
static int ks_pcie_add_pcie_ep(struct keystone_pcie *ks_pcie,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
int ret;
|
||||
|
@ -1238,17 +1238,17 @@ static void pinctrl_link_add(struct pinctrl_dev *pctldev,
|
||||
static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
|
||||
{
|
||||
struct pinctrl_setting *setting, *setting2;
|
||||
struct pinctrl_state *old_state = p->state;
|
||||
struct pinctrl_state *old_state = READ_ONCE(p->state);
|
||||
int ret;
|
||||
|
||||
if (p->state) {
|
||||
if (old_state) {
|
||||
/*
|
||||
* For each pinmux setting in the old state, forget SW's record
|
||||
* of mux owner for that pingroup. Any pingroups which are
|
||||
* still owned by the new state will be re-acquired by the call
|
||||
* to pinmux_enable_setting() in the loop below.
|
||||
*/
|
||||
list_for_each_entry(setting, &p->state->settings, node) {
|
||||
list_for_each_entry(setting, &old_state->settings, node) {
|
||||
if (setting->type != PIN_MAP_TYPE_MUX_GROUP)
|
||||
continue;
|
||||
pinmux_disable_setting(setting);
|
||||
|
@ -737,18 +737,20 @@ static void dasd_profile_start(struct dasd_block *block,
|
||||
* we count each request only once.
|
||||
*/
|
||||
device = cqr->startdev;
|
||||
if (device->profile.data) {
|
||||
if (!device->profile.data)
|
||||
return;
|
||||
|
||||
spin_lock(get_ccwdev_lock(device->cdev));
|
||||
counter = 1; /* request is not yet queued on the start device */
|
||||
list_for_each(l, &device->ccw_queue)
|
||||
if (++counter >= 31)
|
||||
break;
|
||||
}
|
||||
spin_unlock(get_ccwdev_lock(device->cdev));
|
||||
|
||||
spin_lock(&device->profile.lock);
|
||||
if (device->profile.data) {
|
||||
device->profile.data->dasd_io_nr_req[counter]++;
|
||||
if (rq_data_dir(req) == READ)
|
||||
device->profile.data->dasd_read_nr_req[counter]++;
|
||||
}
|
||||
spin_unlock(&device->profile.lock);
|
||||
}
|
||||
|
||||
|
@ -596,9 +596,6 @@ typedef struct srb {
|
||||
struct fc_port *fcport;
|
||||
struct scsi_qla_host *vha;
|
||||
unsigned int start_timer:1;
|
||||
unsigned int abort:1;
|
||||
unsigned int aborted:1;
|
||||
unsigned int completed:1;
|
||||
|
||||
uint32_t handle;
|
||||
uint16_t flags;
|
||||
|
@ -2479,11 +2479,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
|
||||
return;
|
||||
}
|
||||
|
||||
if (sp->abort)
|
||||
sp->aborted = 1;
|
||||
else
|
||||
sp->completed = 1;
|
||||
|
||||
if (sp->cmd_type != TYPE_SRB) {
|
||||
req->outstanding_cmds[handle] = NULL;
|
||||
ql_dbg(ql_dbg_io, vha, 0x3015,
|
||||
|
@ -814,7 +814,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
||||
uint16_t hwq;
|
||||
struct qla_qpair *qpair = NULL;
|
||||
|
||||
tag = blk_mq_unique_tag(cmd->request);
|
||||
tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
|
||||
hwq = blk_mq_unique_tag_to_hwq(tag);
|
||||
qpair = ha->queue_pair_map[hwq];
|
||||
|
||||
@ -1243,17 +1243,6 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
|
||||
return fast_fail_status != SUCCESS ? fast_fail_status : FAILED;
|
||||
|
||||
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
|
||||
if (sp->completed) {
|
||||
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
if (sp->abort || sp->aborted) {
|
||||
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
sp->abort = 1;
|
||||
sp->comp = ∁
|
||||
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
|
||||
|
||||
@ -1661,6 +1650,10 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
|
||||
return QLA_SUCCESS;
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller must ensure that no completion interrupts will happen
|
||||
* while this function is in progress.
|
||||
*/
|
||||
static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
|
||||
unsigned long *flags)
|
||||
__releases(qp->qp_lock_ptr)
|
||||
@ -1669,6 +1662,7 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
|
||||
DECLARE_COMPLETION_ONSTACK(comp);
|
||||
scsi_qla_host_t *vha = qp->vha;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
|
||||
int rval;
|
||||
bool ret_cmd;
|
||||
uint32_t ratov_j;
|
||||
@ -1688,7 +1682,6 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
|
||||
}
|
||||
|
||||
sp->comp = ∁
|
||||
sp->abort = 1;
|
||||
spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
|
||||
|
||||
rval = ha->isp_ops->abort_command(sp);
|
||||
@ -1712,13 +1705,25 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
|
||||
}
|
||||
|
||||
spin_lock_irqsave(qp->qp_lock_ptr, *flags);
|
||||
if (ret_cmd && (!sp->completed || !sp->aborted))
|
||||
switch (sp->type) {
|
||||
case SRB_SCSI_CMD:
|
||||
if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
|
||||
sp->done(sp, res);
|
||||
break;
|
||||
default:
|
||||
if (ret_cmd)
|
||||
sp->done(sp, res);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
sp->done(sp, res);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller must ensure that no completion interrupts will happen
|
||||
* while this function is in progress.
|
||||
*/
|
||||
static void
|
||||
__qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
|
||||
{
|
||||
@ -1776,6 +1781,10 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
|
||||
spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller must ensure that no completion interrupts will happen
|
||||
* while this function is in progress.
|
||||
*/
|
||||
void
|
||||
qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
|
||||
{
|
||||
|
@ -2045,15 +2045,17 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
|
||||
{
|
||||
struct dwc2_qtd *qtd;
|
||||
struct dwc2_host_chan *chan;
|
||||
u32 hcint, hcintmsk;
|
||||
u32 hcint, hcintraw, hcintmsk;
|
||||
|
||||
chan = hsotg->hc_ptr_array[chnum];
|
||||
|
||||
hcint = dwc2_readl(hsotg, HCINT(chnum));
|
||||
hcintraw = dwc2_readl(hsotg, HCINT(chnum));
|
||||
hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
|
||||
hcint = hcintraw & hcintmsk;
|
||||
dwc2_writel(hsotg, hcint, HCINT(chnum));
|
||||
|
||||
if (!chan) {
|
||||
dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
|
||||
dwc2_writel(hsotg, hcint, HCINT(chnum));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2062,11 +2064,9 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
|
||||
chnum);
|
||||
dev_vdbg(hsotg->dev,
|
||||
" hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
|
||||
hcint, hcintmsk, hcint & hcintmsk);
|
||||
hcintraw, hcintmsk, hcint);
|
||||
}
|
||||
|
||||
dwc2_writel(hsotg, hcint, HCINT(chnum));
|
||||
|
||||
/*
|
||||
* If we got an interrupt after someone called
|
||||
* dwc2_hcd_endpoint_disable() we don't want to crash below
|
||||
@ -2076,8 +2076,7 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
|
||||
return;
|
||||
}
|
||||
|
||||
chan->hcint = hcint;
|
||||
hcint &= hcintmsk;
|
||||
chan->hcint = hcintraw;
|
||||
|
||||
/*
|
||||
* If the channel was halted due to a dequeue, the qtd list might
|
||||
|
@ -1546,6 +1546,8 @@ static int dwc3_probe(struct platform_device *pdev)
|
||||
|
||||
pm_runtime_put(dev);
|
||||
|
||||
dma_set_max_seg_size(dev, UINT_MAX);
|
||||
|
||||
return 0;
|
||||
|
||||
err5:
|
||||
|
@ -361,7 +361,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
|
||||
irq_set_status_flags(irq, IRQ_NOAUTOEN);
|
||||
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
|
||||
qcom_dwc3_resume_irq,
|
||||
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
|
||||
IRQF_ONESHOT,
|
||||
"qcom_dwc3 HS", qcom);
|
||||
if (ret) {
|
||||
dev_err(qcom->dev, "hs_phy_irq failed: %d\n", ret);
|
||||
@ -376,7 +376,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
|
||||
irq_set_status_flags(irq, IRQ_NOAUTOEN);
|
||||
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
|
||||
qcom_dwc3_resume_irq,
|
||||
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
|
||||
IRQF_ONESHOT,
|
||||
"qcom_dwc3 DP_HS", qcom);
|
||||
if (ret) {
|
||||
dev_err(qcom->dev, "dp_hs_phy_irq failed: %d\n", ret);
|
||||
@ -391,7 +391,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
|
||||
irq_set_status_flags(irq, IRQ_NOAUTOEN);
|
||||
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
|
||||
qcom_dwc3_resume_irq,
|
||||
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
|
||||
IRQF_ONESHOT,
|
||||
"qcom_dwc3 DM_HS", qcom);
|
||||
if (ret) {
|
||||
dev_err(qcom->dev, "dm_hs_phy_irq failed: %d\n", ret);
|
||||
@ -406,7 +406,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
|
||||
irq_set_status_flags(irq, IRQ_NOAUTOEN);
|
||||
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
|
||||
qcom_dwc3_resume_irq,
|
||||
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
|
||||
IRQF_ONESHOT,
|
||||
"qcom_dwc3 SS", qcom);
|
||||
if (ret) {
|
||||
dev_err(qcom->dev, "ss_phy_irq failed: %d\n", ret);
|
||||
@ -553,6 +553,7 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
|
||||
if (!qcom->dwc3) {
|
||||
ret = -ENODEV;
|
||||
dev_err(dev, "failed to get dwc3 platform device\n");
|
||||
of_platform_depopulate(dev);
|
||||
}
|
||||
|
||||
node_put:
|
||||
@ -666,7 +667,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
|
||||
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to register DWC3 Core, err=%d\n", ret);
|
||||
goto depopulate;
|
||||
goto clk_disable;
|
||||
}
|
||||
|
||||
qcom->mode = usb_get_dr_mode(&qcom->dwc3->dev);
|
||||
@ -692,7 +693,8 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
|
||||
if (np)
|
||||
of_platform_depopulate(&pdev->dev);
|
||||
else
|
||||
platform_device_put(pdev);
|
||||
platform_device_del(qcom->dwc3);
|
||||
platform_device_put(qcom->dwc3);
|
||||
clk_disable:
|
||||
for (i = qcom->num_clocks - 1; i >= 0; i--) {
|
||||
clk_disable_unprepare(qcom->clks[i]);
|
||||
@ -714,7 +716,8 @@ static int dwc3_qcom_remove(struct platform_device *pdev)
|
||||
if (np)
|
||||
of_platform_depopulate(&pdev->dev);
|
||||
else
|
||||
platform_device_put(pdev);
|
||||
platform_device_del(qcom->dwc3);
|
||||
platform_device_put(qcom->dwc3);
|
||||
|
||||
for (i = qcom->num_clocks - 1; i >= 0; i--) {
|
||||
clk_disable_unprepare(qcom->clks[i]);
|
||||
|
@ -203,8 +203,8 @@ static void option_instat_callback(struct urb *urb);
|
||||
#define DELL_PRODUCT_5829E_ESIM 0x81e4
|
||||
#define DELL_PRODUCT_5829E 0x81e6
|
||||
|
||||
#define DELL_PRODUCT_FM101R 0x8213
|
||||
#define DELL_PRODUCT_FM101R_ESIM 0x8215
|
||||
#define DELL_PRODUCT_FM101R_ESIM 0x8213
|
||||
#define DELL_PRODUCT_FM101R 0x8215
|
||||
|
||||
#define KYOCERA_VENDOR_ID 0x0c88
|
||||
#define KYOCERA_PRODUCT_KPC650 0x17da
|
||||
@ -609,6 +609,8 @@ static void option_instat_callback(struct urb *urb);
|
||||
#define UNISOC_VENDOR_ID 0x1782
|
||||
/* TOZED LT70-C based on UNISOC SL8563 uses UNISOC's vendor ID */
|
||||
#define TOZED_PRODUCT_LT70C 0x4055
|
||||
/* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */
|
||||
#define LUAT_PRODUCT_AIR720U 0x4e00
|
||||
|
||||
/* Device flags */
|
||||
|
||||
@ -1546,7 +1548,8 @@ static const struct usb_device_id option_ids[] = {
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
|
||||
.driver_info = RSVD(4) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff),
|
||||
.driver_info = RSVD(4) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
|
||||
.driver_info = RSVD(4) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) },
|
||||
@ -2249,6 +2252,7 @@ static const struct usb_device_id option_ids[] = {
|
||||
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
|
||||
{ USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x1782, 0x4d11, 0xff) }, /* Fibocom L610 (ECM/RNDIS mode) */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x0001, 0xff, 0xff, 0xff) }, /* Fibocom L716-EU (ECM/RNDIS mode) */
|
||||
{ USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
|
||||
.driver_info = RSVD(4) | RSVD(5) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
|
||||
@ -2271,6 +2275,7 @@ static const struct usb_device_id option_ids[] = {
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
|
||||
{ } /* Terminating entry */
|
||||
};
|
||||
MODULE_DEVICE_TABLE(usb, option_ids);
|
||||
|
@ -237,7 +237,7 @@ struct sti_rom_font {
|
||||
u8 height;
|
||||
u8 font_type; /* language type */
|
||||
u8 bytes_per_char;
|
||||
u32 next_font;
|
||||
s32 next_font; /* note: signed int */
|
||||
u8 underline_height;
|
||||
u8 underline_pos;
|
||||
u8 res008[2];
|
||||
|
@ -38,8 +38,8 @@ static int afs_probe_cell_name(struct dentry *dentry)
|
||||
|
||||
ret = dns_query(net->net, "afsdb", name, len, "srv=1",
|
||||
NULL, NULL, false);
|
||||
if (ret == -ENODATA)
|
||||
ret = -EDESTADDRREQ;
|
||||
if (ret == -ENODATA || ret == -ENOKEY)
|
||||
ret = -ENOENT;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -391,6 +391,8 @@ static int afs_validate_fc(struct fs_context *fc)
|
||||
return PTR_ERR(volume);
|
||||
|
||||
ctx->volume = volume;
|
||||
if (volume->type != AFSVL_RWVOL)
|
||||
ctx->flock_mode = afs_flock_mode_local;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -58,6 +58,12 @@ static bool afs_start_vl_iteration(struct afs_vl_cursor *vc)
|
||||
}
|
||||
|
||||
/* Status load is ordered after lookup counter load */
|
||||
if (cell->dns_status == DNS_LOOKUP_GOT_NOT_FOUND) {
|
||||
pr_warn("No record of cell %s\n", cell->name);
|
||||
vc->error = -ENOENT;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (cell->dns_source == DNS_RECORD_UNAVAILABLE) {
|
||||
vc->error = -EDESTADDRREQ;
|
||||
return false;
|
||||
@ -276,6 +282,7 @@ bool afs_select_vlserver(struct afs_vl_cursor *vc)
|
||||
*/
|
||||
static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc)
|
||||
{
|
||||
struct afs_cell *cell = vc->cell;
|
||||
static int count;
|
||||
int i;
|
||||
|
||||
@ -285,6 +292,9 @@ static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc)
|
||||
|
||||
rcu_read_lock();
|
||||
pr_notice("EDESTADDR occurred\n");
|
||||
pr_notice("CELL: %s err=%d\n", cell->name, cell->error);
|
||||
pr_notice("DNS: src=%u st=%u lc=%x\n",
|
||||
cell->dns_source, cell->dns_status, cell->dns_lookup_count);
|
||||
pr_notice("VC: ut=%lx ix=%u ni=%hu fl=%hx err=%hd\n",
|
||||
vc->untried, vc->index, vc->nr_iterations, vc->flags, vc->error);
|
||||
|
||||
|
@ -2829,6 +2829,7 @@ int open_ctree(struct super_block *sb,
|
||||
goto fail_alloc;
|
||||
}
|
||||
|
||||
btrfs_info(fs_info, "first mount of filesystem %pU", disk_super->fsid);
|
||||
/*
|
||||
* Verify the type first, if that or the checksum value are
|
||||
* corrupted, we'll find out
|
||||
|
@ -7381,7 +7381,7 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
|
||||
sctx->flags = arg->flags;
|
||||
|
||||
sctx->send_filp = fget(arg->send_fd);
|
||||
if (!sctx->send_filp) {
|
||||
if (!sctx->send_filp || !(sctx->send_filp->f_mode & FMODE_WRITE)) {
|
||||
ret = -EBADF;
|
||||
goto out;
|
||||
}
|
||||
|
@ -291,7 +291,10 @@ void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
|
||||
|
||||
static void btrfs_put_super(struct super_block *sb)
|
||||
{
|
||||
close_ctree(btrfs_sb(sb));
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
|
||||
|
||||
btrfs_info(fs_info, "last unmount of filesystem %pU", fs_info->fs_devices->fsid);
|
||||
close_ctree(fs_info);
|
||||
}
|
||||
|
||||
enum {
|
||||
|
@ -3090,15 +3090,16 @@ struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
|
||||
read_unlock(&em_tree->lock);
|
||||
|
||||
if (!em) {
|
||||
btrfs_crit(fs_info, "unable to find logical %llu length %llu",
|
||||
btrfs_crit(fs_info,
|
||||
"unable to find chunk map for logical %llu length %llu",
|
||||
logical, length);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (em->start > logical || em->start + em->len < logical) {
|
||||
if (em->start > logical || em->start + em->len <= logical) {
|
||||
btrfs_crit(fs_info,
|
||||
"found a bad mapping, wanted %llu-%llu, found %llu-%llu",
|
||||
logical, length, em->start, em->start + em->len);
|
||||
"found a bad chunk map, wanted %llu-%llu, found %llu-%llu",
|
||||
logical, logical + length, em->start, em->start + em->len);
|
||||
free_extent_map(em);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
@ -1062,6 +1062,7 @@ const struct inode_operations cifs_file_inode_ops = {
|
||||
|
||||
const struct inode_operations cifs_symlink_inode_ops = {
|
||||
.get_link = cifs_get_link,
|
||||
.setattr = cifs_setattr,
|
||||
.permission = cifs_permission,
|
||||
.listxattr = cifs_listxattr,
|
||||
};
|
||||
|
@ -144,14 +144,17 @@
|
||||
static struct kmem_cache *ext4_es_cachep;
|
||||
static struct kmem_cache *ext4_pending_cachep;
|
||||
|
||||
static int __es_insert_extent(struct inode *inode, struct extent_status *newes);
|
||||
static int __es_insert_extent(struct inode *inode, struct extent_status *newes,
|
||||
struct extent_status *prealloc);
|
||||
static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
|
||||
ext4_lblk_t end, int *reserved);
|
||||
ext4_lblk_t end, int *reserved,
|
||||
struct extent_status *prealloc);
|
||||
static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
|
||||
static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
|
||||
struct ext4_inode_info *locked_ei);
|
||||
static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
|
||||
ext4_lblk_t len);
|
||||
static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
|
||||
ext4_lblk_t len,
|
||||
struct pending_reservation **prealloc);
|
||||
|
||||
int __init ext4_init_es(void)
|
||||
{
|
||||
@ -439,22 +442,49 @@ static void ext4_es_list_del(struct inode *inode)
|
||||
spin_unlock(&sbi->s_es_lock);
|
||||
}
|
||||
|
||||
static struct extent_status *
|
||||
ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
|
||||
ext4_fsblk_t pblk)
|
||||
static inline struct pending_reservation *__alloc_pending(bool nofail)
|
||||
{
|
||||
if (!nofail)
|
||||
return kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
|
||||
|
||||
return kmem_cache_zalloc(ext4_pending_cachep, GFP_KERNEL | __GFP_NOFAIL);
|
||||
}
|
||||
|
||||
static inline void __free_pending(struct pending_reservation *pr)
|
||||
{
|
||||
kmem_cache_free(ext4_pending_cachep, pr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if we cannot fail to allocate memory for this extent_status
|
||||
* entry and cannot reclaim it until its status changes.
|
||||
*/
|
||||
static inline bool ext4_es_must_keep(struct extent_status *es)
|
||||
{
|
||||
/* fiemap, bigalloc, and seek_data/hole need to use it. */
|
||||
if (ext4_es_is_delayed(es))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline struct extent_status *__es_alloc_extent(bool nofail)
|
||||
{
|
||||
if (!nofail)
|
||||
return kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
|
||||
|
||||
return kmem_cache_zalloc(ext4_es_cachep, GFP_KERNEL | __GFP_NOFAIL);
|
||||
}
|
||||
|
||||
static void ext4_es_init_extent(struct inode *inode, struct extent_status *es,
|
||||
ext4_lblk_t lblk, ext4_lblk_t len, ext4_fsblk_t pblk)
|
||||
{
|
||||
struct extent_status *es;
|
||||
es = kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
|
||||
if (es == NULL)
|
||||
return NULL;
|
||||
es->es_lblk = lblk;
|
||||
es->es_len = len;
|
||||
es->es_pblk = pblk;
|
||||
|
||||
/*
|
||||
* We don't count delayed extent because we never try to reclaim them
|
||||
*/
|
||||
if (!ext4_es_is_delayed(es)) {
|
||||
/* We never try to reclaim a must kept extent, so we don't count it. */
|
||||
if (!ext4_es_must_keep(es)) {
|
||||
if (!EXT4_I(inode)->i_es_shk_nr++)
|
||||
ext4_es_list_add(inode);
|
||||
percpu_counter_inc(&EXT4_SB(inode->i_sb)->
|
||||
@ -463,8 +493,11 @@ ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
|
||||
|
||||
EXT4_I(inode)->i_es_all_nr++;
|
||||
percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
|
||||
}
|
||||
|
||||
return es;
|
||||
static inline void __es_free_extent(struct extent_status *es)
|
||||
{
|
||||
kmem_cache_free(ext4_es_cachep, es);
|
||||
}
|
||||
|
||||
static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
|
||||
@ -472,8 +505,8 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
|
||||
EXT4_I(inode)->i_es_all_nr--;
|
||||
percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
|
||||
|
||||
/* Decrease the shrink counter when this es is not delayed */
|
||||
if (!ext4_es_is_delayed(es)) {
|
||||
/* Decrease the shrink counter when we can reclaim the extent. */
|
||||
if (!ext4_es_must_keep(es)) {
|
||||
BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0);
|
||||
if (!--EXT4_I(inode)->i_es_shk_nr)
|
||||
ext4_es_list_del(inode);
|
||||
@ -481,7 +514,7 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
|
||||
s_es_stats.es_stats_shk_cnt);
|
||||
}
|
||||
|
||||
kmem_cache_free(ext4_es_cachep, es);
|
||||
__es_free_extent(es);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -743,7 +776,8 @@ static inline void ext4_es_insert_extent_check(struct inode *inode,
|
||||
}
|
||||
#endif
|
||||
|
||||
static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
|
||||
static int __es_insert_extent(struct inode *inode, struct extent_status *newes,
|
||||
struct extent_status *prealloc)
|
||||
{
|
||||
struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
|
||||
struct rb_node **p = &tree->root.rb_node;
|
||||
@ -783,10 +817,15 @@ static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
|
||||
}
|
||||
}
|
||||
|
||||
es = ext4_es_alloc_extent(inode, newes->es_lblk, newes->es_len,
|
||||
newes->es_pblk);
|
||||
if (prealloc)
|
||||
es = prealloc;
|
||||
else
|
||||
es = __es_alloc_extent(false);
|
||||
if (!es)
|
||||
return -ENOMEM;
|
||||
ext4_es_init_extent(inode, es, newes->es_lblk, newes->es_len,
|
||||
newes->es_pblk);
|
||||
|
||||
rb_link_node(&es->rb_node, parent, p);
|
||||
rb_insert_color(&es->rb_node, &tree->root);
|
||||
|
||||
@ -807,8 +846,12 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
|
||||
{
|
||||
struct extent_status newes;
|
||||
ext4_lblk_t end = lblk + len - 1;
|
||||
int err = 0;
|
||||
int err1 = 0, err2 = 0, err3 = 0;
|
||||
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
||||
struct extent_status *es1 = NULL;
|
||||
struct extent_status *es2 = NULL;
|
||||
struct pending_reservation *pr = NULL;
|
||||
bool revise_pending = false;
|
||||
|
||||
es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n",
|
||||
lblk, len, pblk, status, inode->i_ino);
|
||||
@ -833,29 +876,57 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
|
||||
|
||||
ext4_es_insert_extent_check(inode, &newes);
|
||||
|
||||
write_lock(&EXT4_I(inode)->i_es_lock);
|
||||
err = __es_remove_extent(inode, lblk, end, NULL);
|
||||
if (err != 0)
|
||||
goto error;
|
||||
revise_pending = sbi->s_cluster_ratio > 1 &&
|
||||
test_opt(inode->i_sb, DELALLOC) &&
|
||||
(status & (EXTENT_STATUS_WRITTEN |
|
||||
EXTENT_STATUS_UNWRITTEN));
|
||||
retry:
|
||||
err = __es_insert_extent(inode, &newes);
|
||||
if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
|
||||
128, EXT4_I(inode)))
|
||||
goto retry;
|
||||
if (err == -ENOMEM && !ext4_es_is_delayed(&newes))
|
||||
err = 0;
|
||||
if (err1 && !es1)
|
||||
es1 = __es_alloc_extent(true);
|
||||
if ((err1 || err2) && !es2)
|
||||
es2 = __es_alloc_extent(true);
|
||||
if ((err1 || err2 || err3) && revise_pending && !pr)
|
||||
pr = __alloc_pending(true);
|
||||
write_lock(&EXT4_I(inode)->i_es_lock);
|
||||
|
||||
if (sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) &&
|
||||
(status & EXTENT_STATUS_WRITTEN ||
|
||||
status & EXTENT_STATUS_UNWRITTEN))
|
||||
__revise_pending(inode, lblk, len);
|
||||
err1 = __es_remove_extent(inode, lblk, end, NULL, es1);
|
||||
if (err1 != 0)
|
||||
goto error;
|
||||
/* Free preallocated extent if it didn't get used. */
|
||||
if (es1) {
|
||||
if (!es1->es_len)
|
||||
__es_free_extent(es1);
|
||||
es1 = NULL;
|
||||
}
|
||||
|
||||
err2 = __es_insert_extent(inode, &newes, es2);
|
||||
if (err2 == -ENOMEM && !ext4_es_must_keep(&newes))
|
||||
err2 = 0;
|
||||
if (err2 != 0)
|
||||
goto error;
|
||||
/* Free preallocated extent if it didn't get used. */
|
||||
if (es2) {
|
||||
if (!es2->es_len)
|
||||
__es_free_extent(es2);
|
||||
es2 = NULL;
|
||||
}
|
||||
|
||||
if (revise_pending) {
|
||||
err3 = __revise_pending(inode, lblk, len, &pr);
|
||||
if (err3 != 0)
|
||||
goto error;
|
||||
if (pr) {
|
||||
__free_pending(pr);
|
||||
pr = NULL;
|
||||
}
|
||||
}
|
||||
error:
|
||||
write_unlock(&EXT4_I(inode)->i_es_lock);
|
||||
if (err1 || err2 || err3)
|
||||
goto retry;
|
||||
|
||||
ext4_es_print_tree(inode);
|
||||
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -885,7 +956,7 @@ void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
|
||||
|
||||
es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk);
|
||||
if (!es || es->es_lblk > end)
|
||||
__es_insert_extent(inode, &newes);
|
||||
__es_insert_extent(inode, &newes, NULL);
|
||||
write_unlock(&EXT4_I(inode)->i_es_lock);
|
||||
}
|
||||
|
||||
@ -1253,7 +1324,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
|
||||
rc->ndelonly--;
|
||||
node = rb_next(&pr->rb_node);
|
||||
rb_erase(&pr->rb_node, &tree->root);
|
||||
kmem_cache_free(ext4_pending_cachep, pr);
|
||||
__free_pending(pr);
|
||||
if (!node)
|
||||
break;
|
||||
pr = rb_entry(node, struct pending_reservation,
|
||||
@ -1272,6 +1343,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
|
||||
* @lblk - first block in range
|
||||
* @end - last block in range
|
||||
* @reserved - number of cluster reservations released
|
||||
* @prealloc - pre-allocated es to avoid memory allocation failures
|
||||
*
|
||||
* If @reserved is not NULL and delayed allocation is enabled, counts
|
||||
* block/cluster reservations freed by removing range and if bigalloc
|
||||
@ -1279,7 +1351,8 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
|
||||
* error code on failure.
|
||||
*/
|
||||
static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
|
||||
ext4_lblk_t end, int *reserved)
|
||||
ext4_lblk_t end, int *reserved,
|
||||
struct extent_status *prealloc)
|
||||
{
|
||||
struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
|
||||
struct rb_node *node;
|
||||
@ -1287,14 +1360,12 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
|
||||
struct extent_status orig_es;
|
||||
ext4_lblk_t len1, len2;
|
||||
ext4_fsblk_t block;
|
||||
int err;
|
||||
int err = 0;
|
||||
bool count_reserved = true;
|
||||
struct rsvd_count rc;
|
||||
|
||||
if (reserved == NULL || !test_opt(inode->i_sb, DELALLOC))
|
||||
count_reserved = false;
|
||||
retry:
|
||||
err = 0;
|
||||
|
||||
es = __es_tree_search(&tree->root, lblk);
|
||||
if (!es)
|
||||
@ -1328,14 +1399,13 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
|
||||
orig_es.es_len - len2;
|
||||
ext4_es_store_pblock_status(&newes, block,
|
||||
ext4_es_status(&orig_es));
|
||||
err = __es_insert_extent(inode, &newes);
|
||||
err = __es_insert_extent(inode, &newes, prealloc);
|
||||
if (err) {
|
||||
if (!ext4_es_must_keep(&newes))
|
||||
return 0;
|
||||
|
||||
es->es_lblk = orig_es.es_lblk;
|
||||
es->es_len = orig_es.es_len;
|
||||
if ((err == -ENOMEM) &&
|
||||
__es_shrink(EXT4_SB(inode->i_sb),
|
||||
128, EXT4_I(inode)))
|
||||
goto retry;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
@ -1415,6 +1485,7 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
|
||||
ext4_lblk_t end;
|
||||
int err = 0;
|
||||
int reserved = 0;
|
||||
struct extent_status *es = NULL;
|
||||
|
||||
trace_ext4_es_remove_extent(inode, lblk, len);
|
||||
es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
|
||||
@ -1426,17 +1497,29 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
|
||||
end = lblk + len - 1;
|
||||
BUG_ON(end < lblk);
|
||||
|
||||
retry:
|
||||
if (err && !es)
|
||||
es = __es_alloc_extent(true);
|
||||
/*
|
||||
* ext4_clear_inode() depends on us taking i_es_lock unconditionally
|
||||
* so that we are sure __es_shrink() is done with the inode before it
|
||||
* is reclaimed.
|
||||
*/
|
||||
write_lock(&EXT4_I(inode)->i_es_lock);
|
||||
err = __es_remove_extent(inode, lblk, end, &reserved);
|
||||
err = __es_remove_extent(inode, lblk, end, &reserved, es);
|
||||
/* Free preallocated extent if it didn't get used. */
|
||||
if (es) {
|
||||
if (!es->es_len)
|
||||
__es_free_extent(es);
|
||||
es = NULL;
|
||||
}
|
||||
write_unlock(&EXT4_I(inode)->i_es_lock);
|
||||
if (err)
|
||||
goto retry;
|
||||
|
||||
ext4_es_print_tree(inode);
|
||||
ext4_da_release_space(inode, reserved);
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
|
||||
@ -1683,11 +1766,8 @@ static int es_do_reclaim_extents(struct ext4_inode_info *ei, ext4_lblk_t end,
|
||||
|
||||
(*nr_to_scan)--;
|
||||
node = rb_next(&es->rb_node);
|
||||
/*
|
||||
* We can't reclaim delayed extent from status tree because
|
||||
* fiemap, bigallic, and seek_data/hole need to use it.
|
||||
*/
|
||||
if (ext4_es_is_delayed(es))
|
||||
|
||||
if (ext4_es_must_keep(es))
|
||||
goto next;
|
||||
if (ext4_es_is_referenced(es)) {
|
||||
ext4_es_clear_referenced(es);
|
||||
@ -1751,7 +1831,7 @@ void ext4_clear_inode_es(struct inode *inode)
|
||||
while (node) {
|
||||
es = rb_entry(node, struct extent_status, rb_node);
|
||||
node = rb_next(node);
|
||||
if (!ext4_es_is_delayed(es)) {
|
||||
if (!ext4_es_must_keep(es)) {
|
||||
rb_erase(&es->rb_node, &tree->root);
|
||||
ext4_es_free_extent(inode, es);
|
||||
}
|
||||
@ -1838,11 +1918,13 @@ static struct pending_reservation *__get_pending(struct inode *inode,
|
||||
*
|
||||
* @inode - file containing the cluster
|
||||
* @lblk - logical block in the cluster to be added
|
||||
* @prealloc - preallocated pending entry
|
||||
*
|
||||
* Returns 0 on successful insertion and -ENOMEM on failure. If the
|
||||
* pending reservation is already in the set, returns successfully.
|
||||
*/
|
||||
static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
|
||||
static int __insert_pending(struct inode *inode, ext4_lblk_t lblk,
|
||||
struct pending_reservation **prealloc)
|
||||
{
|
||||
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
||||
struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
|
||||
@ -1868,11 +1950,16 @@ static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
|
||||
}
|
||||
}
|
||||
|
||||
pr = kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
|
||||
if (pr == NULL) {
|
||||
if (likely(*prealloc == NULL)) {
|
||||
pr = __alloc_pending(false);
|
||||
if (!pr) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
pr = *prealloc;
|
||||
*prealloc = NULL;
|
||||
}
|
||||
pr->lclu = lclu;
|
||||
|
||||
rb_link_node(&pr->rb_node, parent, p);
|
||||
@ -1901,7 +1988,7 @@ static void __remove_pending(struct inode *inode, ext4_lblk_t lblk)
|
||||
if (pr != NULL) {
|
||||
tree = &EXT4_I(inode)->i_pending_tree;
|
||||
rb_erase(&pr->rb_node, &tree->root);
|
||||
kmem_cache_free(ext4_pending_cachep, pr);
|
||||
__free_pending(pr);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1962,7 +2049,10 @@ int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
|
||||
bool allocated)
|
||||
{
|
||||
struct extent_status newes;
|
||||
int err = 0;
|
||||
int err1 = 0, err2 = 0, err3 = 0;
|
||||
struct extent_status *es1 = NULL;
|
||||
struct extent_status *es2 = NULL;
|
||||
struct pending_reservation *pr = NULL;
|
||||
|
||||
es_debug("add [%u/1) delayed to extent status tree of inode %lu\n",
|
||||
lblk, inode->i_ino);
|
||||
@ -1974,29 +2064,52 @@ int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
|
||||
|
||||
ext4_es_insert_extent_check(inode, &newes);
|
||||
|
||||
retry:
|
||||
if (err1 && !es1)
|
||||
es1 = __es_alloc_extent(true);
|
||||
if ((err1 || err2) && !es2)
|
||||
es2 = __es_alloc_extent(true);
|
||||
if ((err1 || err2 || err3) && allocated && !pr)
|
||||
pr = __alloc_pending(true);
|
||||
write_lock(&EXT4_I(inode)->i_es_lock);
|
||||
|
||||
err = __es_remove_extent(inode, lblk, lblk, NULL);
|
||||
if (err != 0)
|
||||
err1 = __es_remove_extent(inode, lblk, lblk, NULL, es1);
|
||||
if (err1 != 0)
|
||||
goto error;
|
||||
retry:
|
||||
err = __es_insert_extent(inode, &newes);
|
||||
if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
|
||||
128, EXT4_I(inode)))
|
||||
goto retry;
|
||||
if (err != 0)
|
||||
/* Free preallocated extent if it didn't get used. */
|
||||
if (es1) {
|
||||
if (!es1->es_len)
|
||||
__es_free_extent(es1);
|
||||
es1 = NULL;
|
||||
}
|
||||
|
||||
err2 = __es_insert_extent(inode, &newes, es2);
|
||||
if (err2 != 0)
|
||||
goto error;
|
||||
/* Free preallocated extent if it didn't get used. */
|
||||
if (es2) {
|
||||
if (!es2->es_len)
|
||||
__es_free_extent(es2);
|
||||
es2 = NULL;
|
||||
}
|
||||
|
||||
if (allocated)
|
||||
__insert_pending(inode, lblk);
|
||||
|
||||
if (allocated) {
|
||||
err3 = __insert_pending(inode, lblk, &pr);
|
||||
if (err3 != 0)
|
||||
goto error;
|
||||
if (pr) {
|
||||
__free_pending(pr);
|
||||
pr = NULL;
|
||||
}
|
||||
}
|
||||
error:
|
||||
write_unlock(&EXT4_I(inode)->i_es_lock);
|
||||
if (err1 || err2 || err3)
|
||||
goto retry;
|
||||
|
||||
ext4_es_print_tree(inode);
|
||||
ext4_print_pending_tree(inode);
|
||||
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2097,21 +2210,24 @@ unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
|
||||
* @inode - file containing the range
|
||||
* @lblk - logical block defining the start of range
|
||||
* @len - length of range in blocks
|
||||
* @prealloc - preallocated pending entry
|
||||
*
|
||||
* Used after a newly allocated extent is added to the extents status tree.
|
||||
* Requires that the extents in the range have either written or unwritten
|
||||
* status. Must be called while holding i_es_lock.
|
||||
*/
|
||||
static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
|
||||
ext4_lblk_t len)
|
||||
static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
|
||||
ext4_lblk_t len,
|
||||
struct pending_reservation **prealloc)
|
||||
{
|
||||
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
||||
ext4_lblk_t end = lblk + len - 1;
|
||||
ext4_lblk_t first, last;
|
||||
bool f_del = false, l_del = false;
|
||||
int ret = 0;
|
||||
|
||||
if (len == 0)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Two cases - block range within single cluster and block range
|
||||
@ -2132,7 +2248,9 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
|
||||
f_del = __es_scan_range(inode, &ext4_es_is_delonly,
|
||||
first, lblk - 1);
|
||||
if (f_del) {
|
||||
__insert_pending(inode, first);
|
||||
ret = __insert_pending(inode, first, prealloc);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
} else {
|
||||
last = EXT4_LBLK_CMASK(sbi, end) +
|
||||
sbi->s_cluster_ratio - 1;
|
||||
@ -2140,9 +2258,11 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
|
||||
l_del = __es_scan_range(inode,
|
||||
&ext4_es_is_delonly,
|
||||
end + 1, last);
|
||||
if (l_del)
|
||||
__insert_pending(inode, last);
|
||||
else
|
||||
if (l_del) {
|
||||
ret = __insert_pending(inode, last, prealloc);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
} else
|
||||
__remove_pending(inode, last);
|
||||
}
|
||||
} else {
|
||||
@ -2150,18 +2270,24 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
|
||||
if (first != lblk)
|
||||
f_del = __es_scan_range(inode, &ext4_es_is_delonly,
|
||||
first, lblk - 1);
|
||||
if (f_del)
|
||||
__insert_pending(inode, first);
|
||||
else
|
||||
if (f_del) {
|
||||
ret = __insert_pending(inode, first, prealloc);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
} else
|
||||
__remove_pending(inode, first);
|
||||
|
||||
last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1;
|
||||
if (last != end)
|
||||
l_del = __es_scan_range(inode, &ext4_es_is_delonly,
|
||||
end + 1, last);
|
||||
if (l_del)
|
||||
__insert_pending(inode, last);
|
||||
else
|
||||
if (l_del) {
|
||||
ret = __insert_pending(inode, last, prealloc);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
} else
|
||||
__remove_pending(inode, last);
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
@ -1256,7 +1256,7 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
|
||||
*/
|
||||
const struct bio_vec *bvec = imu->bvec;
|
||||
|
||||
if (offset <= bvec->bv_len) {
|
||||
if (offset < bvec->bv_len) {
|
||||
iov_iter_advance(iter, offset);
|
||||
} else {
|
||||
unsigned long seg_skip;
|
||||
|
@ -268,8 +268,8 @@ static int ovl_sync_fs(struct super_block *sb, int wait)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If this is a sync(2) call or an emergency sync, all the super blocks
|
||||
* will be iterated, including upper_sb, so no need to do anything.
|
||||
* Not called for sync(2) call or an emergency sync (SB_I_SKIP_SYNC).
|
||||
* All the super blocks will be iterated, including upper_sb.
|
||||
*
|
||||
* If this is a syncfs(2) call, then we do need to call
|
||||
* sync_filesystem() on upper_sb, but enough if we do it when being
|
||||
@ -1733,6 +1733,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
|
||||
sb->s_xattr = ovl_xattr_handlers;
|
||||
sb->s_fs_info = ofs;
|
||||
sb->s_flags |= SB_POSIXACL;
|
||||
sb->s_iflags |= SB_I_SKIP_SYNC;
|
||||
|
||||
err = -ENOMEM;
|
||||
root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR, 0));
|
||||
|
@ -76,7 +76,8 @@ static void sync_inodes_one_sb(struct super_block *sb, void *arg)
|
||||
|
||||
static void sync_fs_one_sb(struct super_block *sb, void *arg)
|
||||
{
|
||||
if (!sb_rdonly(sb) && sb->s_op->sync_fs)
|
||||
if (!sb_rdonly(sb) && !(sb->s_iflags & SB_I_SKIP_SYNC) &&
|
||||
sb->s_op->sync_fs)
|
||||
sb->s_op->sync_fs(sb, *(int *)arg);
|
||||
}
|
||||
|
||||
|
@ -1454,6 +1454,8 @@ extern int send_sigurg(struct fown_struct *fown);
|
||||
#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020
|
||||
#define SB_I_UNTRUSTED_MOUNTER 0x00000040
|
||||
|
||||
#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */
|
||||
|
||||
/* Possible states of 'frozen' field */
|
||||
enum {
|
||||
SB_UNFROZEN = 0, /* FS is unfrozen */
|
||||
|
@ -623,11 +623,16 @@ struct hid_device { /* device report descriptor */
|
||||
struct list_head debug_list;
|
||||
spinlock_t debug_list_lock;
|
||||
wait_queue_head_t debug_wait;
|
||||
struct kref ref;
|
||||
|
||||
unsigned int id; /* system unique id */
|
||||
|
||||
ANDROID_KABI_RESERVE(1);
|
||||
ANDROID_KABI_RESERVE(2);
|
||||
};
|
||||
|
||||
void hiddev_free(struct kref *ref);
|
||||
|
||||
#define to_hid_device(pdev) \
|
||||
container_of(pdev, struct hid_device, dev)
|
||||
|
||||
|
@ -149,6 +149,12 @@ struct scsi_cmnd {
|
||||
ANDROID_KABI_RESERVE(4);
|
||||
};
|
||||
|
||||
/* Variant of blk_mq_rq_from_pdu() that verifies the type of its argument. */
|
||||
static inline struct request *scsi_cmd_to_rq(struct scsi_cmnd *scmd)
|
||||
{
|
||||
return blk_mq_rq_from_pdu(scmd);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the driver private allocation behind the command.
|
||||
* Only works if cmd_size is set in the host template.
|
||||
|
@ -218,8 +218,10 @@ static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
|
||||
int tv = prandom_u32() % max_delay;
|
||||
|
||||
im->tm_running = 1;
|
||||
if (!mod_timer(&im->timer, jiffies+tv+2))
|
||||
refcount_inc(&im->refcnt);
|
||||
if (refcount_inc_not_zero(&im->refcnt)) {
|
||||
if (mod_timer(&im->timer, jiffies + tv + 2))
|
||||
ip_ma_put(im);
|
||||
}
|
||||
}
|
||||
|
||||
static void igmp_gq_start_timer(struct in_device *in_dev)
|
||||
|
@ -799,7 +799,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
|
||||
goto reject_redirect;
|
||||
}
|
||||
|
||||
n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
|
||||
n = __ipv4_neigh_lookup(rt->dst.dev, (__force u32)new_gw);
|
||||
if (!n)
|
||||
n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
|
||||
if (!IS_ERR(n)) {
|
||||
|
@ -66,9 +66,32 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode)
|
||||
return iint;
|
||||
}
|
||||
|
||||
static void iint_free(struct integrity_iint_cache *iint)
|
||||
#define IMA_MAX_NESTING (FILESYSTEM_MAX_STACK_DEPTH+1)
|
||||
|
||||
/*
|
||||
* It is not clear that IMA should be nested at all, but as long is it measures
|
||||
* files both on overlayfs and on underlying fs, we need to annotate the iint
|
||||
* mutex to avoid lockdep false positives related to IMA + overlayfs.
|
||||
* See ovl_lockdep_annotate_inode_mutex_key() for more details.
|
||||
*/
|
||||
static inline void iint_lockdep_annotate(struct integrity_iint_cache *iint,
|
||||
struct inode *inode)
|
||||
{
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
static struct lock_class_key iint_mutex_key[IMA_MAX_NESTING];
|
||||
|
||||
int depth = inode->i_sb->s_stack_depth;
|
||||
|
||||
if (WARN_ON_ONCE(depth < 0 || depth >= IMA_MAX_NESTING))
|
||||
depth = 0;
|
||||
|
||||
lockdep_set_class(&iint->mutex, &iint_mutex_key[depth]);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void iint_init_always(struct integrity_iint_cache *iint,
|
||||
struct inode *inode)
|
||||
{
|
||||
kfree(iint->ima_hash);
|
||||
iint->ima_hash = NULL;
|
||||
iint->version = 0;
|
||||
iint->flags = 0UL;
|
||||
@ -80,6 +103,14 @@ static void iint_free(struct integrity_iint_cache *iint)
|
||||
iint->ima_creds_status = INTEGRITY_UNKNOWN;
|
||||
iint->evm_status = INTEGRITY_UNKNOWN;
|
||||
iint->measured_pcrs = 0;
|
||||
mutex_init(&iint->mutex);
|
||||
iint_lockdep_annotate(iint, inode);
|
||||
}
|
||||
|
||||
static void iint_free(struct integrity_iint_cache *iint)
|
||||
{
|
||||
kfree(iint->ima_hash);
|
||||
mutex_destroy(&iint->mutex);
|
||||
kmem_cache_free(iint_cache, iint);
|
||||
}
|
||||
|
||||
@ -112,6 +143,8 @@ struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
|
||||
if (!iint)
|
||||
return NULL;
|
||||
|
||||
iint_init_always(iint, inode);
|
||||
|
||||
write_lock(&integrity_iint_lock);
|
||||
|
||||
p = &integrity_iint_tree.rb_node;
|
||||
@ -161,25 +194,18 @@ void integrity_inode_free(struct inode *inode)
|
||||
iint_free(iint);
|
||||
}
|
||||
|
||||
static void init_once(void *foo)
|
||||
static void iint_init_once(void *foo)
|
||||
{
|
||||
struct integrity_iint_cache *iint = foo;
|
||||
|
||||
memset(iint, 0, sizeof(*iint));
|
||||
iint->ima_file_status = INTEGRITY_UNKNOWN;
|
||||
iint->ima_mmap_status = INTEGRITY_UNKNOWN;
|
||||
iint->ima_bprm_status = INTEGRITY_UNKNOWN;
|
||||
iint->ima_read_status = INTEGRITY_UNKNOWN;
|
||||
iint->ima_creds_status = INTEGRITY_UNKNOWN;
|
||||
iint->evm_status = INTEGRITY_UNKNOWN;
|
||||
mutex_init(&iint->mutex);
|
||||
}
|
||||
|
||||
static int __init integrity_iintcache_init(void)
|
||||
{
|
||||
iint_cache =
|
||||
kmem_cache_create("iint_cache", sizeof(struct integrity_iint_cache),
|
||||
0, SLAB_PANIC, init_once);
|
||||
0, SLAB_PANIC, iint_init_once);
|
||||
return 0;
|
||||
}
|
||||
DEFINE_LSM(integrity) = {
|
||||
|
@ -209,6 +209,7 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
|
||||
{
|
||||
const char *audit_cause = "failed";
|
||||
struct inode *inode = file_inode(file);
|
||||
struct inode *real_inode = d_real_inode(file_dentry(file));
|
||||
const char *filename = file->f_path.dentry->d_name.name;
|
||||
int result = 0;
|
||||
int length;
|
||||
@ -259,6 +260,10 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
|
||||
iint->ima_hash = tmpbuf;
|
||||
memcpy(iint->ima_hash, &hash, length);
|
||||
iint->version = i_version;
|
||||
if (real_inode != inode) {
|
||||
iint->real_ino = real_inode->i_ino;
|
||||
iint->real_dev = real_inode->i_sb->s_dev;
|
||||
}
|
||||
|
||||
/* Possibly temporary failure due to type of read (eg. O_DIRECT) */
|
||||
if (!result)
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <linux/ima.h>
|
||||
#include <linux/iversion.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/iversion.h>
|
||||
|
||||
#include "ima.h"
|
||||
|
||||
@ -193,7 +194,7 @@ static int process_measurement(struct file *file, const struct cred *cred,
|
||||
u32 secid, char *buf, loff_t size, int mask,
|
||||
enum ima_hooks func)
|
||||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
struct inode *backing_inode, *inode = file_inode(file);
|
||||
struct integrity_iint_cache *iint = NULL;
|
||||
struct ima_template_desc *template_desc = NULL;
|
||||
char *pathbuf = NULL;
|
||||
@ -267,6 +268,19 @@ static int process_measurement(struct file *file, const struct cred *cred,
|
||||
iint->measured_pcrs = 0;
|
||||
}
|
||||
|
||||
/* Detect and re-evaluate changes made to the backing file. */
|
||||
backing_inode = d_real_inode(file_dentry(file));
|
||||
if (backing_inode != inode &&
|
||||
(action & IMA_DO_MASK) && (iint->flags & IMA_DONE_MASK)) {
|
||||
if (!IS_I_VERSION(backing_inode) ||
|
||||
backing_inode->i_sb->s_dev != iint->real_dev ||
|
||||
backing_inode->i_ino != iint->real_ino ||
|
||||
!inode_eq_iversion(backing_inode, iint->version)) {
|
||||
iint->flags &= ~IMA_DONE_MASK;
|
||||
iint->measured_pcrs = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Determine if already appraised/measured based on bitmask
|
||||
* (IMA_MEASURE, IMA_MEASURED, IMA_XXXX_APPRAISE, IMA_XXXX_APPRAISED,
|
||||
* IMA_AUDIT, IMA_AUDITED)
|
||||
|
@ -124,6 +124,8 @@ struct integrity_iint_cache {
|
||||
unsigned long flags;
|
||||
unsigned long measured_pcrs;
|
||||
unsigned long atomic_flags;
|
||||
unsigned long real_ino;
|
||||
dev_t real_dev;
|
||||
enum integrity_status ima_file_status:4;
|
||||
enum integrity_status ima_mmap_status:4;
|
||||
enum integrity_status ima_bprm_status:4;
|
||||
|
@ -2244,6 +2244,8 @@ static struct snd_pci_quirk power_save_blacklist[] = {
|
||||
SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
|
||||
/* https://bugs.launchpad.net/bugs/1821663 */
|
||||
SND_PCI_QUIRK(0x1631, 0xe017, "Packard Bell NEC IMEDIA 5204", 0),
|
||||
/* KONTRON SinglePC may cause a stall at runtime resume */
|
||||
SND_PCI_QUIRK(0x1734, 0x1232, "KONTRON SinglePC", 0),
|
||||
{}
|
||||
};
|
||||
#endif /* CONFIG_PM */
|
||||
|
@ -1935,6 +1935,7 @@ enum {
|
||||
ALC887_FIXUP_ASUS_AUDIO,
|
||||
ALC887_FIXUP_ASUS_HMIC,
|
||||
ALCS1200A_FIXUP_MIC_VREF,
|
||||
ALC888VD_FIXUP_MIC_100VREF,
|
||||
};
|
||||
|
||||
static void alc889_fixup_coef(struct hda_codec *codec,
|
||||
@ -2488,6 +2489,13 @@ static const struct hda_fixup alc882_fixups[] = {
|
||||
{}
|
||||
}
|
||||
},
|
||||
[ALC888VD_FIXUP_MIC_100VREF] = {
|
||||
.type = HDA_FIXUP_PINCTLS,
|
||||
.v.pins = (const struct hda_pintbl[]) {
|
||||
{ 0x18, PIN_VREF100 }, /* headset mic */
|
||||
{}
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
static const struct snd_pci_quirk alc882_fixup_tbl[] = {
|
||||
@ -2557,6 +2565,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
|
||||
SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
|
||||
|
||||
SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
|
||||
SND_PCI_QUIRK(0x10ec, 0x12d8, "iBase Elo Touch", ALC888VD_FIXUP_MIC_100VREF),
|
||||
SND_PCI_QUIRK(0x13fe, 0x1009, "Advantech MIT-W101", ALC886_FIXUP_EAPD),
|
||||
SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
|
||||
SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
|
||||
@ -3198,6 +3207,7 @@ static void alc_disable_headset_jack_key(struct hda_codec *codec)
|
||||
case 0x10ec0230:
|
||||
case 0x10ec0236:
|
||||
case 0x10ec0256:
|
||||
case 0x10ec0257:
|
||||
case 0x19e58326:
|
||||
alc_write_coef_idx(codec, 0x48, 0x0);
|
||||
alc_update_coef_idx(codec, 0x49, 0x0045, 0x0);
|
||||
@ -3227,6 +3237,7 @@ static void alc_enable_headset_jack_key(struct hda_codec *codec)
|
||||
case 0x10ec0230:
|
||||
case 0x10ec0236:
|
||||
case 0x10ec0256:
|
||||
case 0x10ec0257:
|
||||
case 0x19e58326:
|
||||
alc_write_coef_idx(codec, 0x48, 0xd011);
|
||||
alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
|
||||
@ -6176,6 +6187,7 @@ static void alc_combo_jack_hp_jd_restart(struct hda_codec *codec)
|
||||
case 0x10ec0236:
|
||||
case 0x10ec0255:
|
||||
case 0x10ec0256:
|
||||
case 0x10ec0257:
|
||||
case 0x19e58326:
|
||||
alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */
|
||||
alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15);
|
||||
|
Loading…
Reference in New Issue
Block a user