Merge 5.10.203 into android12-5.10-lts

Changes in 5.10.203
	RDMA/irdma: Prevent zero-length STAG registration
	PCI: keystone: Drop __init from ks_pcie_add_pcie_{ep,port}()
	afs: Fix afs_server_list to be cleaned up with RCU
	afs: Make error on cell lookup failure consistent with OpenAFS
	drm/panel: boe-tv101wum-nl6: Fine tune the panel power sequence
	drm/panel: auo,b101uan08.3: Fine tune the panel power sequence
	drm/panel: simple: Fix Innolux G101ICE-L01 bus flags
	drm/panel: simple: Fix Innolux G101ICE-L01 timings
	wireguard: use DEV_STATS_INC()
	ata: pata_isapnp: Add missing error check for devm_ioport_map()
	drm/rockchip: vop: Fix color for RGB888/BGR888 format on VOP full
	HID: core: store the unique system identifier in hid_device
	HID: fix HID device resource race between HID core and debugging support
	ipv4: Correct/silence an endian warning in __ip_do_redirect
	net: usb: ax88179_178a: fix failed operations during ax88179_reset
	net/smc: avoid data corruption caused by decline
	arm/xen: fix xen_vcpu_info allocation alignment
	amd-xgbe: handle corner-case during sfp hotplug
	amd-xgbe: handle the corner-case during tx completion
	amd-xgbe: propagate the correct speed and duplex status
	net: axienet: Fix check for partial TX checksum
	afs: Return ENOENT if no cell DNS record can be found
	afs: Fix file locking on R/O volumes to operate in local mode
	nvmet: remove unnecessary ctrl parameter
	nvmet: nul-terminate the NQNs passed in the connect command
	USB: dwc3: qcom: fix resource leaks on probe deferral
	USB: dwc3: qcom: fix ACPI platform device leak
	lockdep: Fix block chain corruption
	media: ccs: Correctly initialise try compose rectangle
	MIPS: KVM: Fix a build warning about variable set but not used
	ext4: add a new helper to check if es must be kept
	ext4: factor out __es_alloc_extent() and __es_free_extent()
	ext4: use pre-allocated es in __es_insert_extent()
	ext4: use pre-allocated es in __es_remove_extent()
	ext4: using nofail preallocation in ext4_es_remove_extent()
	ext4: using nofail preallocation in ext4_es_insert_delayed_block()
	ext4: using nofail preallocation in ext4_es_insert_extent()
	ext4: fix slab-use-after-free in ext4_es_insert_extent()
	ext4: make sure allocate pending entry not fail
	nfsd: lock_rename() needs both directories to live on the same fs
	ASoC: simple-card: fixup asoc_simple_probe() error handling
	ACPI: resource: Skip IRQ override on ASUS ExpertBook B1402CVA
	swiotlb-xen: provide the "max_mapping_size" method
	bcache: replace a mistaken IS_ERR() by IS_ERR_OR_NULL() in btree_gc_coalesce()
	bcache: fixup multi-threaded bch_sectors_dirty_init() wake-up race
	s390/dasd: protect device queue against concurrent access
	USB: serial: option: add Luat Air72*U series products
	hv_netvsc: Fix race of register_netdevice_notifier and VF register
	hv_netvsc: Mark VF as slave before exposing it to user-mode
	dm-delay: fix a race between delay_presuspend and delay_bio
	bcache: check return value from btree_node_alloc_replacement()
	bcache: prevent potential division by zero error
	bcache: fixup init dirty data errors
	bcache: fixup lock c->root error
	USB: serial: option: add Fibocom L7xx modules
	USB: serial: option: fix FM101R-GL defines
	USB: serial: option: don't claim interface 4 for ZTE MF290
	USB: dwc2: write HCINT with INTMASK applied
	usb: dwc3: Fix default mode initialization
	usb: dwc3: set the dma max_seg_size
	USB: dwc3: qcom: fix wakeup after probe deferral
	io_uring: fix off-by one bvec index
	pinctrl: avoid reload of p state in list iteration
	firewire: core: fix possible memory leak in create_units()
	mmc: block: Do not lose cache flush during CQE error recovery
	ALSA: hda: Disable power-save on KONTRON SinglePC
	ALSA: hda/realtek: Headset Mic VREF to 100%
	ALSA: hda/realtek: Add supported ALC257 for ChromeOS
	dm-verity: align struct dm_verity_fec_io properly
	dm verity: don't perform FEC for failed readahead IO
	bcache: revert replacing IS_ERR_OR_NULL with IS_ERR
	iommu/vt-d: Add MTL to quirk list to skip TE disabling
	powerpc: Don't clobber f0/vs0 during fp|altivec register save
	parisc: Drop the HP-UX ENOSYM and EREMOTERELEASE error codes
	btrfs: add dmesg output for first mount and last unmount of a filesystem
	btrfs: ref-verify: fix memory leaks in btrfs_ref_tree_mod()
	btrfs: fix off-by-one when checking chunk map includes logical address
	btrfs: send: ensure send_fd is writable
	btrfs: make error messages more clear when getting a chunk map
	Input: xpad - add HyperX Clutch Gladiate Support
	hv_netvsc: fix race of netvsc and VF register_netdevice
	USB: core: Change configuration warnings to notices
	usb: config: fix iteration issue in 'usb_get_bos_descriptor()'
	ipv4: igmp: fix refcnt uaf issue when receiving igmp query packet
	dpaa2-eth: increase the needed headroom to account for alignment
	selftests/net: ipsec: fix constant out of range
	selftests/net: mptcp: fix uninitialized variable warnings
	net: stmmac: xgmac: Disable FPE MMC interrupts
	octeontx2-pf: Fix adding mbox work queue entry when num_vfs > 64
	Revert "workqueue: remove unused cancel_work()"
	r8169: prevent potential deadlock in rtl8169_close
	ravb: Fix races between ravb_tx_timeout_work() and net related ops
	net: ravb: Use pm_runtime_resume_and_get()
	net: ravb: Start TX queues after HW initialization succeeded
	smb3: fix touch -h of symlink
	ASoC: Intel: Move soc_intel_is_foo() helpers to a generic header
	ASoC: SOF: sof-pci-dev: use community key on all Up boards
	ASoC: SOF: sof-pci-dev: add parameter to override topology filename
	ASoC: SOF: sof-pci-dev: don't use the community key on APL Chromebooks
	ASoC: SOF: sof-pci-dev: Fix community key quirk detection
	s390/mm: fix phys vs virt confusion in mark_kernel_pXd() functions family
	s390/cmma: fix detection of DAT pages
	misc: pci_endpoint_test: Add deviceID for AM64 and J7200
	misc: pci_endpoint_test: Add deviceID for J721S2 PCIe EP device support
	fbdev: stifb: Make the STI next font pointer a 32-bit signed offset
	ima: annotate iint mutex to avoid lockdep false positive warnings
	driver core: Move the "removable" attribute from USB to core
	drm/amdgpu: don't use ATRM for external devices
	fs: add ctime accessors infrastructure
	smb3: fix caching of ctime on setxattr
	scsi: core: Introduce the scsi_cmd_to_rq() function
	scsi: qla2xxx: Use scsi_cmd_to_rq() instead of scsi_cmnd.request
	scsi: qla2xxx: Fix system crash due to bad pointer access
	cpufreq: imx6q: don't warn for disabling a non-existing frequency
	cpufreq: imx6q: Don't disable 792 Mhz OPP unnecessarily
	mmc: cqhci: Increase recovery halt timeout
	mmc: cqhci: Warn of halt or task clear failure
	mmc: cqhci: Fix task clearing in CQE error recovery
	mmc: core: convert comma to semicolon
	mmc: block: Retry commands in CQE error recovery
	mmc: core: add helpers mmc_regulator_enable/disable_vqmmc
	mmc: sdhci-sprd: Fix vqmmc not shutting down after the card was pulled
	r8169: disable ASPM in case of tx timeout
	r8169: fix deadlock on RTL8125 in jumbo mtu mode
	driver core: Release all resources during unbind before updating device links
	Linux 5.10.203

Change-Id: I7feccd8526f0286020be24411be0e6113129ff65
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-12-08 16:27:19 +00:00
commit bdd8d64f36
111 changed files with 1200 additions and 516 deletions

View File

@ -154,17 +154,6 @@ Description:
files hold a string value (enable or disable) indicating whether files hold a string value (enable or disable) indicating whether
or not USB3 hardware LPM U1 or U2 is enabled for the device. or not USB3 hardware LPM U1 or U2 is enabled for the device.
What: /sys/bus/usb/devices/.../removable
Date: February 2012
Contact: Matthew Garrett <mjg@redhat.com>
Description:
Some information about whether a given USB device is
physically fixed to the platform can be inferred from a
combination of hub descriptor bits and platform-specific data
such as ACPI. This file will read either "removable" or
"fixed" if the information is available, and "unknown"
otherwise.
What: /sys/bus/usb/devices/.../ltm_capable What: /sys/bus/usb/devices/.../ltm_capable
Date: July 2012 Date: July 2012
Contact: Sarah Sharp <sarah.a.sharp@linux.intel.com> Contact: Sarah Sharp <sarah.a.sharp@linux.intel.com>

View File

@ -0,0 +1,17 @@
What: /sys/devices/.../removable
Date: May 2021
Contact: Rajat Jain <rajatxjain@gmail.com>
Description:
Information about whether a given device can be removed from the
platform by the user. This is determined by its subsystem in a
bus / platform-specific way. This attribute is only present for
devices that can support determining such information:
"removable": device can be removed from the platform by the user
"fixed": device is fixed to the platform / cannot be removed
by the user.
"unknown": The information is unavailable / cannot be deduced.
Currently this is only supported by USB (which infers the
information from a combination of hub descriptor bits and
platform-specific data such as ACPI).

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 10 PATCHLEVEL = 10
SUBLEVEL = 202 SUBLEVEL = 203
EXTRAVERSION = EXTRAVERSION =
NAME = Dare mighty things NAME = Dare mighty things

View File

@ -362,7 +362,8 @@ static int __init xen_guest_init(void)
* for secondary CPUs as they are brought up. * for secondary CPUs as they are brought up.
* For uniformity we use VCPUOP_register_vcpu_info even on cpu0. * For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
*/ */
xen_vcpu_info = alloc_percpu(struct vcpu_info); xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
1 << fls(sizeof(struct vcpu_info) - 1));
if (xen_vcpu_info == NULL) if (xen_vcpu_info == NULL)
return -ENOMEM; return -ENOMEM;

View File

@ -667,7 +667,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
gfn_t gfn = gpa >> PAGE_SHIFT; gfn_t gfn = gpa >> PAGE_SHIFT;
int srcu_idx, err; int srcu_idx, err;
kvm_pfn_t pfn; kvm_pfn_t pfn;
pte_t *ptep, entry, old_pte; pte_t *ptep, entry;
bool writeable; bool writeable;
unsigned long prot_bits; unsigned long prot_bits;
unsigned long mmu_seq; unsigned long mmu_seq;
@ -739,7 +739,6 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
entry = pfn_pte(pfn, __pgprot(prot_bits)); entry = pfn_pte(pfn, __pgprot(prot_bits));
/* Write the PTE */ /* Write the PTE */
old_pte = *ptep;
set_pte(ptep, entry); set_pte(ptep, entry);
err = 0; err = 0;

View File

@ -75,7 +75,6 @@
/* We now return you to your regularly scheduled HPUX. */ /* We now return you to your regularly scheduled HPUX. */
#define ENOSYM 215 /* symbol does not exist in executable */
#define ENOTSOCK 216 /* Socket operation on non-socket */ #define ENOTSOCK 216 /* Socket operation on non-socket */
#define EDESTADDRREQ 217 /* Destination address required */ #define EDESTADDRREQ 217 /* Destination address required */
#define EMSGSIZE 218 /* Message too long */ #define EMSGSIZE 218 /* Message too long */
@ -101,7 +100,6 @@
#define ETIMEDOUT 238 /* Connection timed out */ #define ETIMEDOUT 238 /* Connection timed out */
#define ECONNREFUSED 239 /* Connection refused */ #define ECONNREFUSED 239 /* Connection refused */
#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */ #define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
#define EREMOTERELEASE 240 /* Remote peer released connection */
#define EHOSTDOWN 241 /* Host is down */ #define EHOSTDOWN 241 /* Host is down */
#define EHOSTUNREACH 242 /* No route to host */ #define EHOSTUNREACH 242 /* No route to host */

View File

@ -23,6 +23,15 @@
#include <asm/feature-fixups.h> #include <asm/feature-fixups.h>
#ifdef CONFIG_VSX #ifdef CONFIG_VSX
#define __REST_1FPVSR(n,c,base) \
BEGIN_FTR_SECTION \
b 2f; \
END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
REST_FPR(n,base); \
b 3f; \
2: REST_VSR(n,c,base); \
3:
#define __REST_32FPVSRS(n,c,base) \ #define __REST_32FPVSRS(n,c,base) \
BEGIN_FTR_SECTION \ BEGIN_FTR_SECTION \
b 2f; \ b 2f; \
@ -41,9 +50,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
2: SAVE_32VSRS(n,c,base); \ 2: SAVE_32VSRS(n,c,base); \
3: 3:
#else #else
#define __REST_1FPVSR(n,b,base) REST_FPR(n, base)
#define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base) #define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base)
#define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base) #define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base)
#endif #endif
#define REST_1FPVSR(n,c,base) __REST_1FPVSR(n,__REG_##c,__REG_##base)
#define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base) #define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base)
#define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base) #define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base)
@ -67,6 +78,7 @@ _GLOBAL(store_fp_state)
SAVE_32FPVSRS(0, R4, R3) SAVE_32FPVSRS(0, R4, R3)
mffs fr0 mffs fr0
stfd fr0,FPSTATE_FPSCR(r3) stfd fr0,FPSTATE_FPSCR(r3)
REST_1FPVSR(0, R4, R3)
blr blr
EXPORT_SYMBOL(store_fp_state) EXPORT_SYMBOL(store_fp_state)
@ -132,4 +144,5 @@ _GLOBAL(save_fpu)
2: SAVE_32FPVSRS(0, R4, R6) 2: SAVE_32FPVSRS(0, R4, R6)
mffs fr0 mffs fr0
stfd fr0,FPSTATE_FPSCR(r6) stfd fr0,FPSTATE_FPSCR(r6)
REST_1FPVSR(0, R4, R6)
blr blr

View File

@ -32,6 +32,7 @@ _GLOBAL(store_vr_state)
mfvscr v0 mfvscr v0
li r4, VRSTATE_VSCR li r4, VRSTATE_VSCR
stvx v0, r4, r3 stvx v0, r4, r3
lvx v0, 0, r3
blr blr
EXPORT_SYMBOL(store_vr_state) EXPORT_SYMBOL(store_vr_state)
@ -104,6 +105,7 @@ _GLOBAL(save_altivec)
mfvscr v0 mfvscr v0
li r4,VRSTATE_VSCR li r4,VRSTATE_VSCR
stvx v0,r4,r7 stvx v0,r4,r7
lvx v0,0,r7
blr blr
#ifdef CONFIG_VSX #ifdef CONFIG_VSX

View File

@ -112,7 +112,7 @@ static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end)
next = pmd_addr_end(addr, end); next = pmd_addr_end(addr, end);
if (pmd_none(*pmd) || pmd_large(*pmd)) if (pmd_none(*pmd) || pmd_large(*pmd))
continue; continue;
page = virt_to_page(pmd_val(*pmd)); page = phys_to_page(pmd_val(*pmd));
set_bit(PG_arch_1, &page->flags); set_bit(PG_arch_1, &page->flags);
} while (pmd++, addr = next, addr != end); } while (pmd++, addr = next, addr != end);
} }
@ -130,8 +130,8 @@ static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
if (pud_none(*pud) || pud_large(*pud)) if (pud_none(*pud) || pud_large(*pud))
continue; continue;
if (!pud_folded(*pud)) { if (!pud_folded(*pud)) {
page = virt_to_page(pud_val(*pud)); page = phys_to_page(pud_val(*pud));
for (i = 0; i < 3; i++) for (i = 0; i < 4; i++)
set_bit(PG_arch_1, &page[i].flags); set_bit(PG_arch_1, &page[i].flags);
} }
mark_kernel_pmd(pud, addr, next); mark_kernel_pmd(pud, addr, next);
@ -151,8 +151,8 @@ static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
if (p4d_none(*p4d)) if (p4d_none(*p4d))
continue; continue;
if (!p4d_folded(*p4d)) { if (!p4d_folded(*p4d)) {
page = virt_to_page(p4d_val(*p4d)); page = phys_to_page(p4d_val(*p4d));
for (i = 0; i < 3; i++) for (i = 0; i < 4; i++)
set_bit(PG_arch_1, &page[i].flags); set_bit(PG_arch_1, &page[i].flags);
} }
mark_kernel_pud(p4d, addr, next); mark_kernel_pud(p4d, addr, next);
@ -173,8 +173,8 @@ static void mark_kernel_pgd(void)
if (pgd_none(*pgd)) if (pgd_none(*pgd))
continue; continue;
if (!pgd_folded(*pgd)) { if (!pgd_folded(*pgd)) {
page = virt_to_page(pgd_val(*pgd)); page = phys_to_page(pgd_val(*pgd));
for (i = 0; i < 3; i++) for (i = 0; i < 4; i++)
set_bit(PG_arch_1, &page[i].flags); set_bit(PG_arch_1, &page[i].flags);
} }
mark_kernel_p4d(pgd, addr, next); mark_kernel_p4d(pgd, addr, next);

View File

@ -448,6 +448,13 @@ static const struct dmi_system_id asus_laptop[] = {
DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"), DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"),
}, },
}, },
{
/* Asus ExpertBook B1402CVA */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BOARD_NAME, "B1402CVA"),
},
},
{ {
/* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */ /* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
.matches = { .matches = {

View File

@ -82,6 +82,9 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
if (pnp_port_valid(idev, 1)) { if (pnp_port_valid(idev, 1)) {
ctl_addr = devm_ioport_map(&idev->dev, ctl_addr = devm_ioport_map(&idev->dev,
pnp_port_start(idev, 1), 1); pnp_port_start(idev, 1), 1);
if (!ctl_addr)
return -ENOMEM;
ap->ioaddr.altstatus_addr = ctl_addr; ap->ioaddr.altstatus_addr = ctl_addr;
ap->ioaddr.ctl_addr = ctl_addr; ap->ioaddr.ctl_addr = ctl_addr;
ap->ops = &isapnp_port_ops; ap->ops = &isapnp_port_ops;

View File

@ -2338,6 +2338,25 @@ static ssize_t online_store(struct device *dev, struct device_attribute *attr,
} }
static DEVICE_ATTR_RW(online); static DEVICE_ATTR_RW(online);
static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
const char *loc;
switch (dev->removable) {
case DEVICE_REMOVABLE:
loc = "removable";
break;
case DEVICE_FIXED:
loc = "fixed";
break;
default:
loc = "unknown";
}
return sysfs_emit(buf, "%s\n", loc);
}
static DEVICE_ATTR_RO(removable);
int device_add_groups(struct device *dev, const struct attribute_group **groups) int device_add_groups(struct device *dev, const struct attribute_group **groups)
{ {
return sysfs_create_groups(&dev->kobj, groups); return sysfs_create_groups(&dev->kobj, groups);
@ -2515,8 +2534,16 @@ static int device_add_attrs(struct device *dev)
goto err_remove_dev_online; goto err_remove_dev_online;
} }
if (dev_removable_is_valid(dev)) {
error = device_create_file(dev, &dev_attr_removable);
if (error)
goto err_remove_dev_waiting_for_supplier;
}
return 0; return 0;
err_remove_dev_waiting_for_supplier:
device_remove_file(dev, &dev_attr_waiting_for_supplier);
err_remove_dev_online: err_remove_dev_online:
device_remove_file(dev, &dev_attr_online); device_remove_file(dev, &dev_attr_online);
err_remove_dev_groups: err_remove_dev_groups:
@ -2536,6 +2563,7 @@ static void device_remove_attrs(struct device *dev)
struct class *class = dev->class; struct class *class = dev->class;
const struct device_type *type = dev->type; const struct device_type *type = dev->type;
device_remove_file(dev, &dev_attr_removable);
device_remove_file(dev, &dev_attr_waiting_for_supplier); device_remove_file(dev, &dev_attr_waiting_for_supplier);
device_remove_file(dev, &dev_attr_online); device_remove_file(dev, &dev_attr_online);
device_remove_groups(dev, dev->groups); device_remove_groups(dev, dev->groups);

View File

@ -1187,8 +1187,6 @@ static void __device_release_driver(struct device *dev, struct device *parent)
else if (drv->remove) else if (drv->remove)
drv->remove(dev); drv->remove(dev);
device_links_driver_cleanup(dev);
devres_release_all(dev); devres_release_all(dev);
arch_teardown_dma_ops(dev); arch_teardown_dma_ops(dev);
kfree(dev->dma_range_map); kfree(dev->dma_range_map);
@ -1200,6 +1198,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
pm_runtime_reinit(dev); pm_runtime_reinit(dev);
dev_pm_set_driver_flags(dev, 0); dev_pm_set_driver_flags(dev, 0);
device_links_driver_cleanup(dev);
klist_remove(&dev->p->knode_driver); klist_remove(&dev->p->knode_driver);
device_pm_check_callbacks(dev); device_pm_check_callbacks(dev);
if (dev->bus) if (dev->bus)

View File

@ -209,6 +209,14 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
.suspend = cpufreq_generic_suspend, .suspend = cpufreq_generic_suspend,
}; };
static void imx6x_disable_freq_in_opp(struct device *dev, unsigned long freq)
{
int ret = dev_pm_opp_disable(dev, freq);
if (ret < 0 && ret != -ENODEV)
dev_warn(dev, "failed to disable %ldMHz OPP\n", freq / 1000000);
}
#define OCOTP_CFG3 0x440 #define OCOTP_CFG3 0x440
#define OCOTP_CFG3_SPEED_SHIFT 16 #define OCOTP_CFG3_SPEED_SHIFT 16
#define OCOTP_CFG3_SPEED_1P2GHZ 0x3 #define OCOTP_CFG3_SPEED_1P2GHZ 0x3
@ -254,17 +262,15 @@ static int imx6q_opp_check_speed_grading(struct device *dev)
val &= 0x3; val &= 0x3;
if (val < OCOTP_CFG3_SPEED_996MHZ) if (val < OCOTP_CFG3_SPEED_996MHZ)
if (dev_pm_opp_disable(dev, 996000000)) imx6x_disable_freq_in_opp(dev, 996000000);
dev_warn(dev, "failed to disable 996MHz OPP\n");
if (of_machine_is_compatible("fsl,imx6q") || if (of_machine_is_compatible("fsl,imx6q") ||
of_machine_is_compatible("fsl,imx6qp")) { of_machine_is_compatible("fsl,imx6qp")) {
if (val != OCOTP_CFG3_SPEED_852MHZ) if (val != OCOTP_CFG3_SPEED_852MHZ)
if (dev_pm_opp_disable(dev, 852000000)) imx6x_disable_freq_in_opp(dev, 852000000);
dev_warn(dev, "failed to disable 852MHz OPP\n");
if (val != OCOTP_CFG3_SPEED_1P2GHZ) if (val != OCOTP_CFG3_SPEED_1P2GHZ)
if (dev_pm_opp_disable(dev, 1200000000)) imx6x_disable_freq_in_opp(dev, 1200000000);
dev_warn(dev, "failed to disable 1.2GHz OPP\n");
} }
return 0; return 0;
@ -316,20 +322,16 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
val >>= OCOTP_CFG3_SPEED_SHIFT; val >>= OCOTP_CFG3_SPEED_SHIFT;
val &= 0x3; val &= 0x3;
if (of_machine_is_compatible("fsl,imx6ul")) { if (of_machine_is_compatible("fsl,imx6ul"))
if (val != OCOTP_CFG3_6UL_SPEED_696MHZ) if (val != OCOTP_CFG3_6UL_SPEED_696MHZ)
if (dev_pm_opp_disable(dev, 696000000)) imx6x_disable_freq_in_opp(dev, 696000000);
dev_warn(dev, "failed to disable 696MHz OPP\n");
}
if (of_machine_is_compatible("fsl,imx6ull")) { if (of_machine_is_compatible("fsl,imx6ull")) {
if (val != OCOTP_CFG3_6ULL_SPEED_792MHZ) if (val < OCOTP_CFG3_6ULL_SPEED_792MHZ)
if (dev_pm_opp_disable(dev, 792000000)) imx6x_disable_freq_in_opp(dev, 792000000);
dev_warn(dev, "failed to disable 792MHz OPP\n");
if (val != OCOTP_CFG3_6ULL_SPEED_900MHZ) if (val != OCOTP_CFG3_6ULL_SPEED_900MHZ)
if (dev_pm_opp_disable(dev, 900000000)) imx6x_disable_freq_in_opp(dev, 900000000);
dev_warn(dev, "failed to disable 900MHz OPP\n");
} }
return ret; return ret;

View File

@ -719,14 +719,11 @@ static void create_units(struct fw_device *device)
fw_unit_attributes, fw_unit_attributes,
&unit->attribute_group); &unit->attribute_group);
if (device_register(&unit->device) < 0)
goto skip_unit;
fw_device_get(device); fw_device_get(device);
continue; if (device_register(&unit->device) < 0) {
put_device(&unit->device);
skip_unit: continue;
kfree(unit); }
} }
} }

View File

@ -29,6 +29,7 @@
#include "amdgpu.h" #include "amdgpu.h"
#include "atom.h" #include "atom.h"
#include <linux/device.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/acpi.h> #include <linux/acpi.h>
@ -285,6 +286,10 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU) if (adev->flags & AMD_IS_APU)
return false; return false;
/* ATRM is for on-platform devices only */
if (dev_is_removable(&adev->pdev->dev))
return false;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
dhandle = ACPI_HANDLE(&pdev->dev); dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle) if (!dhandle)

View File

@ -36,6 +36,7 @@ struct panel_desc {
const struct panel_init_cmd *init_cmds; const struct panel_init_cmd *init_cmds;
unsigned int lanes; unsigned int lanes;
bool discharge_on_disable; bool discharge_on_disable;
bool lp11_before_reset;
}; };
struct boe_panel { struct boe_panel {
@ -551,6 +552,10 @@ static int boe_panel_prepare(struct drm_panel *panel)
usleep_range(5000, 10000); usleep_range(5000, 10000);
if (boe->desc->lp11_before_reset) {
mipi_dsi_dcs_nop(boe->dsi);
usleep_range(1000, 2000);
}
gpiod_set_value(boe->enable_gpio, 1); gpiod_set_value(boe->enable_gpio, 1);
usleep_range(1000, 2000); usleep_range(1000, 2000);
gpiod_set_value(boe->enable_gpio, 0); gpiod_set_value(boe->enable_gpio, 0);
@ -692,6 +697,7 @@ static const struct panel_desc auo_b101uan08_3_desc = {
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_LPM, MIPI_DSI_MODE_LPM,
.init_cmds = auo_b101uan08_3_init_cmd, .init_cmds = auo_b101uan08_3_init_cmd,
.lp11_before_reset = true,
}; };
static const struct drm_display_mode boe_tv105wum_nw0_default_mode = { static const struct drm_display_mode boe_tv105wum_nw0_default_mode = {
@ -719,6 +725,7 @@ static const struct panel_desc boe_tv105wum_nw0_desc = {
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_LPM, MIPI_DSI_MODE_LPM,
.init_cmds = boe_init_cmd, .init_cmds = boe_init_cmd,
.lp11_before_reset = true,
}; };
static int boe_panel_get_modes(struct drm_panel *panel, static int boe_panel_get_modes(struct drm_panel *panel,

View File

@ -2154,13 +2154,13 @@ static const struct panel_desc innolux_g070y2_l01 = {
static const struct display_timing innolux_g101ice_l01_timing = { static const struct display_timing innolux_g101ice_l01_timing = {
.pixelclock = { 60400000, 71100000, 74700000 }, .pixelclock = { 60400000, 71100000, 74700000 },
.hactive = { 1280, 1280, 1280 }, .hactive = { 1280, 1280, 1280 },
.hfront_porch = { 41, 80, 100 }, .hfront_porch = { 30, 60, 70 },
.hback_porch = { 40, 79, 99 }, .hback_porch = { 30, 60, 70 },
.hsync_len = { 1, 1, 1 }, .hsync_len = { 22, 40, 60 },
.vactive = { 800, 800, 800 }, .vactive = { 800, 800, 800 },
.vfront_porch = { 5, 11, 14 }, .vfront_porch = { 3, 8, 14 },
.vback_porch = { 4, 11, 14 }, .vback_porch = { 3, 8, 14 },
.vsync_len = { 1, 1, 1 }, .vsync_len = { 4, 7, 12 },
.flags = DISPLAY_FLAGS_DE_HIGH, .flags = DISPLAY_FLAGS_DE_HIGH,
}; };
@ -2177,6 +2177,7 @@ static const struct panel_desc innolux_g101ice_l01 = {
.disable = 200, .disable = 200,
}, },
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS, .connector_type = DRM_MODE_CONNECTOR_LVDS,
}; };

View File

@ -248,14 +248,22 @@ static inline void vop_cfg_done(struct vop *vop)
VOP_REG_SET(vop, common, cfg_done, 1); VOP_REG_SET(vop, common, cfg_done, 1);
} }
static bool has_rb_swapped(uint32_t format) static bool has_rb_swapped(uint32_t version, uint32_t format)
{ {
switch (format) { switch (format) {
case DRM_FORMAT_XBGR8888: case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888: case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_BGR888:
case DRM_FORMAT_BGR565: case DRM_FORMAT_BGR565:
return true; return true;
/*
* full framework (IP version 3.x) only need rb swapped for RGB888 and
* little framework (IP version 2.x) only need rb swapped for BGR888,
* check for 3.x to also only rb swap BGR888 for unknown vop version
*/
case DRM_FORMAT_RGB888:
return VOP_MAJOR(version) == 3;
case DRM_FORMAT_BGR888:
return VOP_MAJOR(version) != 3;
default: default:
return false; return false;
} }
@ -988,7 +996,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
VOP_WIN_SET(vop, win, dsp_info, dsp_info); VOP_WIN_SET(vop, win, dsp_info, dsp_info);
VOP_WIN_SET(vop, win, dsp_st, dsp_st); VOP_WIN_SET(vop, win, dsp_st, dsp_st);
rb_swap = has_rb_swapped(fb->format->format); rb_swap = has_rb_swapped(vop->data->version, fb->format->format);
VOP_WIN_SET(vop, win, rb_swap, rb_swap); VOP_WIN_SET(vop, win, rb_swap, rb_swap);
/* /*

View File

@ -703,15 +703,22 @@ static void hid_close_report(struct hid_device *device)
* Free a device structure, all reports, and all fields. * Free a device structure, all reports, and all fields.
*/ */
static void hid_device_release(struct device *dev) void hiddev_free(struct kref *ref)
{ {
struct hid_device *hid = to_hid_device(dev); struct hid_device *hid = container_of(ref, struct hid_device, ref);
hid_close_report(hid); hid_close_report(hid);
kfree(hid->dev_rdesc); kfree(hid->dev_rdesc);
kfree(hid); kfree(hid);
} }
static void hid_device_release(struct device *dev)
{
struct hid_device *hid = to_hid_device(dev);
kref_put(&hid->ref, hiddev_free);
}
/* /*
* Fetch a report description item from the data stream. We support long * Fetch a report description item from the data stream. We support long
* items, though they are not used yet. * items, though they are not used yet.
@ -2445,10 +2452,12 @@ int hid_add_device(struct hid_device *hdev)
hid_warn(hdev, "bad device descriptor (%d)\n", ret); hid_warn(hdev, "bad device descriptor (%d)\n", ret);
} }
hdev->id = atomic_inc_return(&id);
/* XXX hack, any other cleaner solution after the driver core /* XXX hack, any other cleaner solution after the driver core
* is converted to allow more than 20 bytes as the device name? */ * is converted to allow more than 20 bytes as the device name? */
dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus, dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
hdev->vendor, hdev->product, atomic_inc_return(&id)); hdev->vendor, hdev->product, hdev->id);
hid_debug_register(hdev, dev_name(&hdev->dev)); hid_debug_register(hdev, dev_name(&hdev->dev));
ret = device_add(&hdev->dev); ret = device_add(&hdev->dev);
@ -2491,6 +2500,7 @@ struct hid_device *hid_allocate_device(void)
spin_lock_init(&hdev->debug_list_lock); spin_lock_init(&hdev->debug_list_lock);
sema_init(&hdev->driver_input_lock, 1); sema_init(&hdev->driver_input_lock, 1);
mutex_init(&hdev->ll_open_lock); mutex_init(&hdev->ll_open_lock);
kref_init(&hdev->ref);
return hdev; return hdev;
} }

View File

@ -1082,6 +1082,7 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
goto out; goto out;
} }
list->hdev = (struct hid_device *) inode->i_private; list->hdev = (struct hid_device *) inode->i_private;
kref_get(&list->hdev->ref);
file->private_data = list; file->private_data = list;
mutex_init(&list->read_mutex); mutex_init(&list->read_mutex);
@ -1174,6 +1175,8 @@ static int hid_debug_events_release(struct inode *inode, struct file *file)
list_del(&list->node); list_del(&list->node);
spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags); spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
kfifo_free(&list->hid_debug_fifo); kfifo_free(&list->hid_debug_fifo);
kref_put(&list->hdev->ref, hiddev_free);
kfree(list); kfree(list);
return 0; return 0;

View File

@ -3033,6 +3033,9 @@ static enum i40iw_status_code i40iw_sc_alloc_stag(
u64 header; u64 header;
enum i40iw_page_size page_size; enum i40iw_page_size page_size;
if (!info->total_len && !info->all_memory)
return -EINVAL;
page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K; page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
cqp = dev->cqp; cqp = dev->cqp;
wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
@ -3091,6 +3094,9 @@ static enum i40iw_status_code i40iw_sc_mr_reg_non_shared(
u8 addr_type; u8 addr_type;
enum i40iw_page_size page_size; enum i40iw_page_size page_size;
if (!info->total_len && !info->all_memory)
return -EINVAL;
page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K; page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY | if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY)) I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))

View File

@ -786,6 +786,7 @@ struct i40iw_allocate_stag_info {
bool use_hmc_fcn_index; bool use_hmc_fcn_index;
u8 hmc_fcn_index; u8 hmc_fcn_index;
bool use_pf_rid; bool use_pf_rid;
bool all_memory;
}; };
struct i40iw_reg_ns_stag_info { struct i40iw_reg_ns_stag_info {
@ -804,6 +805,7 @@ struct i40iw_reg_ns_stag_info {
bool use_hmc_fcn_index; bool use_hmc_fcn_index;
u8 hmc_fcn_index; u8 hmc_fcn_index;
bool use_pf_rid; bool use_pf_rid;
bool all_memory;
}; };
struct i40iw_fast_reg_stag_info { struct i40iw_fast_reg_stag_info {

View File

@ -1494,7 +1494,8 @@ static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr) static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr)
{ {
struct i40iw_allocate_stag_info *info; struct i40iw_allocate_stag_info *info;
struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd); struct ib_pd *pd = iwmr->ibmr.pd;
struct i40iw_pd *iwpd = to_iwpd(pd);
enum i40iw_status_code status; enum i40iw_status_code status;
int err = 0; int err = 0;
struct i40iw_cqp_request *cqp_request; struct i40iw_cqp_request *cqp_request;
@ -1511,6 +1512,7 @@ static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr
info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT; info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
info->pd_id = iwpd->sc_pd.pd_id; info->pd_id = iwpd->sc_pd.pd_id;
info->total_len = iwmr->length; info->total_len = iwmr->length;
info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
info->remote_access = true; info->remote_access = true;
cqp_info->cqp_cmd = OP_ALLOC_STAG; cqp_info->cqp_cmd = OP_ALLOC_STAG;
cqp_info->post_sq = 1; cqp_info->post_sq = 1;
@ -1563,6 +1565,8 @@ static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
iwmr->type = IW_MEMREG_TYPE_MEM; iwmr->type = IW_MEMREG_TYPE_MEM;
palloc = &iwpbl->pble_alloc; palloc = &iwpbl->pble_alloc;
iwmr->page_cnt = max_num_sg; iwmr->page_cnt = max_num_sg;
/* Use system PAGE_SIZE as the sg page sizes are unknown at this point */
iwmr->length = max_num_sg * PAGE_SIZE;
mutex_lock(&iwdev->pbl_mutex); mutex_lock(&iwdev->pbl_mutex);
status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt); status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
mutex_unlock(&iwdev->pbl_mutex); mutex_unlock(&iwdev->pbl_mutex);
@ -1659,7 +1663,8 @@ static int i40iw_hwreg_mr(struct i40iw_device *iwdev,
{ {
struct i40iw_pbl *iwpbl = &iwmr->iwpbl; struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
struct i40iw_reg_ns_stag_info *stag_info; struct i40iw_reg_ns_stag_info *stag_info;
struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd); struct ib_pd *pd = iwmr->ibmr.pd;
struct i40iw_pd *iwpd = to_iwpd(pd);
struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc; struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
enum i40iw_status_code status; enum i40iw_status_code status;
int err = 0; int err = 0;
@ -1679,6 +1684,7 @@ static int i40iw_hwreg_mr(struct i40iw_device *iwdev,
stag_info->total_len = iwmr->length; stag_info->total_len = iwmr->length;
stag_info->access_rights = access; stag_info->access_rights = access;
stag_info->pd_id = iwpd->sc_pd.pd_id; stag_info->pd_id = iwpd->sc_pd.pd_id;
stag_info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED; stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED;
stag_info->page_size = iwmr->page_size; stag_info->page_size = iwmr->page_size;

View File

@ -119,6 +119,7 @@ static const struct xpad_device {
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX }, { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
{ 0x044f, 0x0f10, "Thrustmaster Modena GT Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f10, "Thrustmaster Modena GT Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 }, { 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 },
{ 0x03f0, 0x0495, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE },
{ 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX }, { 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX },
{ 0x045e, 0x0285, "Microsoft X-Box pad (Japan)", 0, XTYPE_XBOX }, { 0x045e, 0x0285, "Microsoft X-Box pad (Japan)", 0, XTYPE_XBOX },
{ 0x045e, 0x0287, "Microsoft Xbox Controller S", 0, XTYPE_XBOX }, { 0x045e, 0x0287, "Microsoft Xbox Controller S", 0, XTYPE_XBOX },
@ -431,6 +432,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 Controller */ XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 Controller */
XPAD_XBOX360_VENDOR(0x03eb), /* Wooting Keyboards (Legacy) */ XPAD_XBOX360_VENDOR(0x03eb), /* Wooting Keyboards (Legacy) */
XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster X-Box 360 controllers */
XPAD_XBOXONE_VENDOR(0x03f0), /* HP HyperX Xbox One Controllers */
XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft X-Box One controllers */ XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft X-Box One controllers */
XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */ XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */

View File

@ -6325,7 +6325,7 @@ static void quirk_igfx_skip_te_disable(struct pci_dev *dev)
ver = (dev->device >> 8) & 0xff; ver = (dev->device >> 8) & 0xff;
if (ver != 0x45 && ver != 0x46 && ver != 0x4c && if (ver != 0x45 && ver != 0x46 && ver != 0x4c &&
ver != 0x4e && ver != 0x8a && ver != 0x98 && ver != 0x4e && ver != 0x8a && ver != 0x98 &&
ver != 0x9a && ver != 0xa7) ver != 0x9a && ver != 0xa7 && ver != 0x7d)
return; return;
if (risky_device(dev)) if (risky_device(dev))

View File

@ -1342,7 +1342,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
memset(new_nodes, 0, sizeof(new_nodes)); memset(new_nodes, 0, sizeof(new_nodes));
closure_init_stack(&cl); closure_init_stack(&cl);
while (nodes < GC_MERGE_NODES && !IS_ERR(r[nodes].b)) while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
keys += r[nodes++].keys; keys += r[nodes++].keys;
blocks = btree_default_blocks(b->c) * 2 / 3; blocks = btree_default_blocks(b->c) * 2 / 3;
@ -1489,7 +1489,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
bch_keylist_free(&keylist); bch_keylist_free(&keylist);
for (i = 0; i < nodes; i++) for (i = 0; i < nodes; i++)
if (!IS_ERR(new_nodes[i])) { if (!IS_ERR_OR_NULL(new_nodes[i])) {
btree_node_free(new_nodes[i]); btree_node_free(new_nodes[i]);
rw_unlock(true, new_nodes[i]); rw_unlock(true, new_nodes[i]);
} }
@ -1506,6 +1506,8 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
return 0; return 0;
n = btree_node_alloc_replacement(replace, NULL); n = btree_node_alloc_replacement(replace, NULL);
if (IS_ERR(n))
return 0;
/* recheck reserve after allocating replacement node */ /* recheck reserve after allocating replacement node */
if (btree_check_reserve(b, NULL)) { if (btree_check_reserve(b, NULL)) {

View File

@ -1078,7 +1078,7 @@ SHOW(__bch_cache)
sum += INITIAL_PRIO - cached[i]; sum += INITIAL_PRIO - cached[i];
if (n) if (n)
do_div(sum, n); sum = div64_u64(sum, n);
for (i = 0; i < ARRAY_SIZE(q); i++) for (i = 0; i < ARRAY_SIZE(q); i++)
q[i] = INITIAL_PRIO - cached[n * (i + 1) / q[i] = INITIAL_PRIO - cached[n * (i + 1) /

View File

@ -921,24 +921,35 @@ static int bch_btre_dirty_init_thread_nr(void)
void bch_sectors_dirty_init(struct bcache_device *d) void bch_sectors_dirty_init(struct bcache_device *d)
{ {
int i; int i;
struct btree *b = NULL;
struct bkey *k = NULL; struct bkey *k = NULL;
struct btree_iter iter; struct btree_iter iter;
struct sectors_dirty_init op; struct sectors_dirty_init op;
struct cache_set *c = d->c; struct cache_set *c = d->c;
struct bch_dirty_init_state state; struct bch_dirty_init_state state;
retry_lock:
b = c->root;
rw_lock(0, b, b->level);
if (b != c->root) {
rw_unlock(0, b);
goto retry_lock;
}
/* Just count root keys if no leaf node */ /* Just count root keys if no leaf node */
rw_lock(0, c->root, c->root->level);
if (c->root->level == 0) { if (c->root->level == 0) {
bch_btree_op_init(&op.op, -1); bch_btree_op_init(&op.op, -1);
op.inode = d->id; op.inode = d->id;
op.count = 0; op.count = 0;
for_each_key_filter(&c->root->keys, for_each_key_filter(&c->root->keys,
k, &iter, bch_ptr_invalid) k, &iter, bch_ptr_invalid) {
if (KEY_INODE(k) != op.inode)
continue;
sectors_dirty_init_fn(&op.op, c->root, k); sectors_dirty_init_fn(&op.op, c->root, k);
}
rw_unlock(0, c->root); rw_unlock(0, b);
return; return;
} }
@ -958,23 +969,24 @@ void bch_sectors_dirty_init(struct bcache_device *d)
if (atomic_read(&state.enough)) if (atomic_read(&state.enough))
break; break;
atomic_inc(&state.started);
state.infos[i].state = &state; state.infos[i].state = &state;
state.infos[i].thread = state.infos[i].thread =
kthread_run(bch_dirty_init_thread, &state.infos[i], kthread_run(bch_dirty_init_thread, &state.infos[i],
"bch_dirtcnt[%d]", i); "bch_dirtcnt[%d]", i);
if (IS_ERR(state.infos[i].thread)) { if (IS_ERR(state.infos[i].thread)) {
pr_err("fails to run thread bch_dirty_init[%d]\n", i); pr_err("fails to run thread bch_dirty_init[%d]\n", i);
atomic_dec(&state.started);
for (--i; i >= 0; i--) for (--i; i >= 0; i--)
kthread_stop(state.infos[i].thread); kthread_stop(state.infos[i].thread);
goto out; goto out;
} }
atomic_inc(&state.started);
} }
out: out:
/* Must wait for all threads to stop. */ /* Must wait for all threads to stop. */
wait_event(state.wait, atomic_read(&state.started) == 0); wait_event(state.wait, atomic_read(&state.started) == 0);
rw_unlock(0, c->root); rw_unlock(0, b);
} }
void bch_cached_dev_writeback_init(struct cached_dev *dc) void bch_cached_dev_writeback_init(struct cached_dev *dc)

View File

@ -30,7 +30,7 @@ struct delay_c {
struct workqueue_struct *kdelayd_wq; struct workqueue_struct *kdelayd_wq;
struct work_struct flush_expired_bios; struct work_struct flush_expired_bios;
struct list_head delayed_bios; struct list_head delayed_bios;
atomic_t may_delay; bool may_delay;
struct delay_class read; struct delay_class read;
struct delay_class write; struct delay_class write;
@ -191,7 +191,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
INIT_WORK(&dc->flush_expired_bios, flush_expired_bios); INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
INIT_LIST_HEAD(&dc->delayed_bios); INIT_LIST_HEAD(&dc->delayed_bios);
mutex_init(&dc->timer_lock); mutex_init(&dc->timer_lock);
atomic_set(&dc->may_delay, 1); dc->may_delay = true;
dc->argc = argc; dc->argc = argc;
ret = delay_class_ctr(ti, &dc->read, argv); ret = delay_class_ctr(ti, &dc->read, argv);
@ -245,7 +245,7 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
struct dm_delay_info *delayed; struct dm_delay_info *delayed;
unsigned long expires = 0; unsigned long expires = 0;
if (!c->delay || !atomic_read(&dc->may_delay)) if (!c->delay)
return DM_MAPIO_REMAPPED; return DM_MAPIO_REMAPPED;
delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info)); delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
@ -254,6 +254,10 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
delayed->expires = expires = jiffies + msecs_to_jiffies(c->delay); delayed->expires = expires = jiffies + msecs_to_jiffies(c->delay);
mutex_lock(&delayed_bios_lock); mutex_lock(&delayed_bios_lock);
if (unlikely(!dc->may_delay)) {
mutex_unlock(&delayed_bios_lock);
return DM_MAPIO_REMAPPED;
}
c->ops++; c->ops++;
list_add_tail(&delayed->list, &dc->delayed_bios); list_add_tail(&delayed->list, &dc->delayed_bios);
mutex_unlock(&delayed_bios_lock); mutex_unlock(&delayed_bios_lock);
@ -267,7 +271,10 @@ static void delay_presuspend(struct dm_target *ti)
{ {
struct delay_c *dc = ti->private; struct delay_c *dc = ti->private;
atomic_set(&dc->may_delay, 0); mutex_lock(&delayed_bios_lock);
dc->may_delay = false;
mutex_unlock(&delayed_bios_lock);
del_timer_sync(&dc->delay_timer); del_timer_sync(&dc->delay_timer);
flush_bios(flush_delayed_bios(dc, 1)); flush_bios(flush_delayed_bios(dc, 1));
} }
@ -276,7 +283,7 @@ static void delay_resume(struct dm_target *ti)
{ {
struct delay_c *dc = ti->private; struct delay_c *dc = ti->private;
atomic_set(&dc->may_delay, 1); dc->may_delay = true;
} }
static int delay_map(struct dm_target *ti, struct bio *bio) static int delay_map(struct dm_target *ti, struct bio *bio)

View File

@ -24,7 +24,8 @@ bool verity_fec_is_enabled(struct dm_verity *v)
*/ */
static inline struct dm_verity_fec_io *fec_io(struct dm_verity_io *io) static inline struct dm_verity_fec_io *fec_io(struct dm_verity_io *io)
{ {
return (struct dm_verity_fec_io *) verity_io_digest_end(io->v, io); return (struct dm_verity_fec_io *)
((char *)io + io->v->ti->per_io_data_size - sizeof(struct dm_verity_fec_io));
} }
/* /*

View File

@ -583,7 +583,9 @@ static void verity_end_io(struct bio *bio)
struct dm_verity_io *io = bio->bi_private; struct dm_verity_io *io = bio->bi_private;
if (bio->bi_status && if (bio->bi_status &&
(!verity_fec_is_enabled(io->v) || verity_is_system_shutting_down())) { (!verity_fec_is_enabled(io->v) ||
verity_is_system_shutting_down() ||
(bio->bi_opf & REQ_RAHEAD))) {
verity_finish_io(io, bio->bi_status); verity_finish_io(io, bio->bi_status);
return; return;
} }

View File

@ -111,12 +111,6 @@ static inline u8 *verity_io_want_digest(struct dm_verity *v,
return (u8 *)(io + 1) + v->ahash_reqsize + v->digest_size; return (u8 *)(io + 1) + v->ahash_reqsize + v->digest_size;
} }
static inline u8 *verity_io_digest_end(struct dm_verity *v,
struct dm_verity_io *io)
{
return verity_io_want_digest(v, io) + v->digest_size;
}
extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io, extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
struct bvec_iter *iter, struct bvec_iter *iter,
int (*process)(struct dm_verity *v, int (*process)(struct dm_verity *v,

View File

@ -2647,7 +2647,7 @@ static int smiapp_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
try_fmt->code = sensor->internal_csi_format->code; try_fmt->code = sensor->internal_csi_format->code;
try_fmt->field = V4L2_FIELD_NONE; try_fmt->field = V4L2_FIELD_NONE;
if (ssd != sensor->pixel_array) if (ssd == sensor->pixel_array)
continue; continue;
try_comp = v4l2_subdev_get_try_compose(sd, fh->pad, i); try_comp = v4l2_subdev_get_try_compose(sd, fh->pad, i);

View File

@ -70,6 +70,9 @@
#define PCI_DEVICE_ID_TI_J721E 0xb00d #define PCI_DEVICE_ID_TI_J721E 0xb00d
#define PCI_DEVICE_ID_TI_AM654 0xb00c #define PCI_DEVICE_ID_TI_AM654 0xb00c
#define PCI_DEVICE_ID_TI_J7200 0xb00f
#define PCI_DEVICE_ID_TI_AM64 0xb010
#define PCI_DEVICE_ID_TI_J721S2 0xb013
#define PCI_DEVICE_ID_LS1088A 0x80c0 #define PCI_DEVICE_ID_LS1088A 0x80c0
#define is_am654_pci_dev(pdev) \ #define is_am654_pci_dev(pdev) \
@ -1000,6 +1003,15 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E), { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
.driver_data = (kernel_ulong_t)&j721e_data, .driver_data = (kernel_ulong_t)&j721e_data,
}, },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
.driver_data = (kernel_ulong_t)&j721e_data,
},
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
.driver_data = (kernel_ulong_t)&j721e_data,
},
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
.driver_data = (kernel_ulong_t)&j721e_data,
},
{ } { }
}; };
MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl); MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);

View File

@ -1428,6 +1428,8 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
blk_mq_requeue_request(req, true); blk_mq_requeue_request(req, true);
else else
__blk_mq_end_request(req, BLK_STS_OK); __blk_mq_end_request(req, BLK_STS_OK);
} else if (mq->in_recovery) {
blk_mq_requeue_request(req, true);
} else { } else {
blk_mq_end_request(req, BLK_STS_OK); blk_mq_end_request(req, BLK_STS_OK);
} }

View File

@ -548,22 +548,25 @@ int mmc_cqe_recovery(struct mmc_host *host)
host->cqe_ops->cqe_recovery_start(host); host->cqe_ops->cqe_recovery_start(host);
memset(&cmd, 0, sizeof(cmd)); memset(&cmd, 0, sizeof(cmd));
cmd.opcode = MMC_STOP_TRANSMISSION, cmd.opcode = MMC_STOP_TRANSMISSION;
cmd.flags = MMC_RSP_R1B | MMC_CMD_AC, cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */ cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT, cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
mmc_wait_for_cmd(host, &cmd, 0); mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
memset(&cmd, 0, sizeof(cmd)); memset(&cmd, 0, sizeof(cmd));
cmd.opcode = MMC_CMDQ_TASK_MGMT; cmd.opcode = MMC_CMDQ_TASK_MGMT;
cmd.arg = 1; /* Discard entire queue */ cmd.arg = 1; /* Discard entire queue */
cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */ cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT, cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
err = mmc_wait_for_cmd(host, &cmd, 0); err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
host->cqe_ops->cqe_recovery_finish(host); host->cqe_ops->cqe_recovery_finish(host);
if (err)
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
mmc_retune_release(host); mmc_retune_release(host);
return err; return err;

View File

@ -271,3 +271,44 @@ int mmc_regulator_get_supply(struct mmc_host *mmc)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(mmc_regulator_get_supply); EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
/**
* mmc_regulator_enable_vqmmc - enable VQMMC regulator for a host
* @mmc: the host to regulate
*
* Returns 0 or errno. Enables the regulator for vqmmc.
* Keeps track of the enable status for ensuring that calls to
* regulator_enable/disable are balanced.
*/
int mmc_regulator_enable_vqmmc(struct mmc_host *mmc)
{
int ret = 0;
if (!IS_ERR(mmc->supply.vqmmc) && !mmc->vqmmc_enabled) {
ret = regulator_enable(mmc->supply.vqmmc);
if (ret < 0)
dev_err(mmc_dev(mmc), "enabling vqmmc regulator failed\n");
else
mmc->vqmmc_enabled = true;
}
return ret;
}
EXPORT_SYMBOL_GPL(mmc_regulator_enable_vqmmc);
/**
* mmc_regulator_disable_vqmmc - disable VQMMC regulator for a host
* @mmc: the host to regulate
*
* Returns 0 or errno. Disables the regulator for vqmmc.
* Keeps track of the enable status for ensuring that calls to
* regulator_enable/disable are balanced.
*/
void mmc_regulator_disable_vqmmc(struct mmc_host *mmc)
{
if (!IS_ERR(mmc->supply.vqmmc) && mmc->vqmmc_enabled) {
regulator_disable(mmc->supply.vqmmc);
mmc->vqmmc_enabled = false;
}
}
EXPORT_SYMBOL_GPL(mmc_regulator_disable_vqmmc);

View File

@ -930,8 +930,8 @@ static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
ret = cqhci_tasks_cleared(cq_host); ret = cqhci_tasks_cleared(cq_host);
if (!ret) if (!ret)
pr_debug("%s: cqhci: Failed to clear tasks\n", pr_warn("%s: cqhci: Failed to clear tasks\n",
mmc_hostname(mmc)); mmc_hostname(mmc));
return ret; return ret;
} }
@ -964,7 +964,7 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
ret = cqhci_halted(cq_host); ret = cqhci_halted(cq_host);
if (!ret) if (!ret)
pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc)); pr_warn("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
return ret; return ret;
} }
@ -972,10 +972,10 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
/* /*
* After halting we expect to be able to use the command line. We interpret the * After halting we expect to be able to use the command line. We interpret the
* failure to halt to mean the data lines might still be in use (and the upper * failure to halt to mean the data lines might still be in use (and the upper
* layers will need to send a STOP command), so we set the timeout based on a * layers will need to send a STOP command), however failing to halt complicates
* generous command timeout. * the recovery, so set a timeout that would reasonably allow I/O to complete.
*/ */
#define CQHCI_START_HALT_TIMEOUT 5 #define CQHCI_START_HALT_TIMEOUT 500
static void cqhci_recovery_start(struct mmc_host *mmc) static void cqhci_recovery_start(struct mmc_host *mmc)
{ {
@ -1063,28 +1063,28 @@ static void cqhci_recovery_finish(struct mmc_host *mmc)
ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT); ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
ok = false;
/* /*
* The specification contradicts itself, by saying that tasks cannot be * The specification contradicts itself, by saying that tasks cannot be
* cleared if CQHCI does not halt, but if CQHCI does not halt, it should * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
* be disabled/re-enabled, but not to disable before clearing tasks. * be disabled/re-enabled, but not to disable before clearing tasks.
* Have a go anyway. * Have a go anyway.
*/ */
if (!ok) { if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc)); ok = false;
cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
cqcfg &= ~CQHCI_ENABLE; /* Disable to make sure tasks really are cleared */
cqhci_writel(cq_host, cqcfg, CQHCI_CFG); cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
cqcfg |= CQHCI_ENABLE; cqcfg &= ~CQHCI_ENABLE;
cqhci_writel(cq_host, cqcfg, CQHCI_CFG); cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
/* Be sure that there are no tasks */
ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT); cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT)) cqcfg |= CQHCI_ENABLE;
ok = false; cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
WARN_ON(!ok);
} cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
if (!ok)
cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT);
cqhci_recover_mrqs(cq_host); cqhci_recover_mrqs(cq_host);

View File

@ -392,12 +392,33 @@ static void sdhci_sprd_request_done(struct sdhci_host *host,
mmc_request_done(host->mmc, mrq); mmc_request_done(host->mmc, mrq);
} }
static void sdhci_sprd_set_power(struct sdhci_host *host, unsigned char mode,
unsigned short vdd)
{
struct mmc_host *mmc = host->mmc;
switch (mode) {
case MMC_POWER_OFF:
mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, 0);
mmc_regulator_disable_vqmmc(mmc);
break;
case MMC_POWER_ON:
mmc_regulator_enable_vqmmc(mmc);
break;
case MMC_POWER_UP:
mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, vdd);
break;
}
}
static struct sdhci_ops sdhci_sprd_ops = { static struct sdhci_ops sdhci_sprd_ops = {
.read_l = sdhci_sprd_readl, .read_l = sdhci_sprd_readl,
.write_l = sdhci_sprd_writel, .write_l = sdhci_sprd_writel,
.write_w = sdhci_sprd_writew, .write_w = sdhci_sprd_writew,
.write_b = sdhci_sprd_writeb, .write_b = sdhci_sprd_writeb,
.set_clock = sdhci_sprd_set_clock, .set_clock = sdhci_sprd_set_clock,
.set_power = sdhci_sprd_set_power,
.get_max_clock = sdhci_sprd_get_max_clock, .get_max_clock = sdhci_sprd_get_max_clock,
.get_min_clock = sdhci_sprd_get_min_clock, .get_min_clock = sdhci_sprd_get_min_clock,
.set_bus_width = sdhci_set_bus_width, .set_bus_width = sdhci_set_bus_width,
@ -663,6 +684,10 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 | host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
SDHCI_SUPPORT_DDR50); SDHCI_SUPPORT_DDR50);
ret = mmc_regulator_get_supply(host->mmc);
if (ret)
goto pm_runtime_disable;
ret = sdhci_setup_host(host); ret = sdhci_setup_host(host);
if (ret) if (ret)
goto pm_runtime_disable; goto pm_runtime_disable;

View File

@ -682,10 +682,24 @@ static void xgbe_service(struct work_struct *work)
static void xgbe_service_timer(struct timer_list *t) static void xgbe_service_timer(struct timer_list *t)
{ {
struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer); struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer);
struct xgbe_channel *channel;
unsigned int i;
queue_work(pdata->dev_workqueue, &pdata->service_work); queue_work(pdata->dev_workqueue, &pdata->service_work);
mod_timer(&pdata->service_timer, jiffies + HZ); mod_timer(&pdata->service_timer, jiffies + HZ);
if (!pdata->tx_usecs)
return;
for (i = 0; i < pdata->channel_count; i++) {
channel = pdata->channel[i];
if (!channel->tx_ring || channel->tx_timer_active)
break;
channel->tx_timer_active = 1;
mod_timer(&channel->tx_timer,
jiffies + usecs_to_jiffies(pdata->tx_usecs));
}
} }
static void xgbe_init_timers(struct xgbe_prv_data *pdata) static void xgbe_init_timers(struct xgbe_prv_data *pdata)

View File

@ -314,10 +314,15 @@ static int xgbe_get_link_ksettings(struct net_device *netdev,
cmd->base.phy_address = pdata->phy.address; cmd->base.phy_address = pdata->phy.address;
cmd->base.autoneg = pdata->phy.autoneg; if (netif_carrier_ok(netdev)) {
cmd->base.speed = pdata->phy.speed; cmd->base.speed = pdata->phy.speed;
cmd->base.duplex = pdata->phy.duplex; cmd->base.duplex = pdata->phy.duplex;
} else {
cmd->base.speed = SPEED_UNKNOWN;
cmd->base.duplex = DUPLEX_UNKNOWN;
}
cmd->base.autoneg = pdata->phy.autoneg;
cmd->base.port = PORT_NONE; cmd->base.port = PORT_NONE;
XGBE_LM_COPY(cmd, supported, lks, supported); XGBE_LM_COPY(cmd, supported, lks, supported);

View File

@ -1178,7 +1178,19 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
if (pdata->phy.duplex != DUPLEX_FULL) if (pdata->phy.duplex != DUPLEX_FULL)
return -EINVAL; return -EINVAL;
xgbe_set_mode(pdata, mode); /* Force the mode change for SFI in Fixed PHY config.
* Fixed PHY configs needs PLL to be enabled while doing mode set.
* When the SFP module isn't connected during boot, driver assumes
* AN is ON and attempts autonegotiation. However, if the connected
* SFP comes up in Fixed PHY config, the link will not come up as
* PLL isn't enabled while the initial mode set command is issued.
* So, force the mode change for SFI in Fixed PHY configuration to
* fix link issues.
*/
if (mode == XGBE_MODE_SFI)
xgbe_change_mode(pdata, mode);
else
xgbe_set_mode(pdata, mode);
return 0; return 0;
} }

View File

@ -928,14 +928,12 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
dma_addr_t addr; dma_addr_t addr;
buffer_start = skb->data - dpaa2_eth_needed_headroom(skb); buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
/* If there's enough room to align the FD address, do it.
* It will help hardware optimize accesses.
*/
aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
DPAA2_ETH_TX_BUF_ALIGN); DPAA2_ETH_TX_BUF_ALIGN);
if (aligned_start >= skb->head) if (aligned_start >= skb->head)
buffer_start = aligned_start; buffer_start = aligned_start;
else
return -ENOMEM;
/* Store a backpointer to the skb at the beginning of the buffer /* Store a backpointer to the skb at the beginning of the buffer
* (in the private data area) such that we can release it * (in the private data area) such that we can release it
@ -4337,6 +4335,8 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
if (err) if (err)
goto err_dl_port_add; goto err_dl_port_add;
net_dev->needed_headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
err = register_netdev(net_dev); err = register_netdev(net_dev);
if (err < 0) { if (err < 0) {
dev_err(dev, "register_netdev() failed\n"); dev_err(dev, "register_netdev() failed\n");

View File

@ -664,7 +664,7 @@ static inline bool dpaa2_eth_rx_pause_enabled(u64 link_options)
static inline unsigned int dpaa2_eth_needed_headroom(struct sk_buff *skb) static inline unsigned int dpaa2_eth_needed_headroom(struct sk_buff *skb)
{ {
unsigned int headroom = DPAA2_ETH_SWA_SIZE; unsigned int headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
/* If we don't have an skb (e.g. XDP buffer), we only need space for /* If we don't have an skb (e.g. XDP buffer), we only need space for
* the software annotation area * the software annotation area

View File

@ -556,7 +556,9 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr); otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr, otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
TYPE_PFVF); TYPE_PFVF);
vfs -= 64; if (intr)
trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
vfs = 64;
} }
intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0)); intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
@ -564,7 +566,8 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF); otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr); if (intr)
trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
return IRQ_HANDLED; return IRQ_HANDLED;
} }

View File

@ -582,6 +582,8 @@ struct rtl8169_tc_offsets {
enum rtl_flag { enum rtl_flag {
RTL_FLAG_TASK_ENABLED = 0, RTL_FLAG_TASK_ENABLED = 0,
RTL_FLAG_TASK_RESET_PENDING, RTL_FLAG_TASK_RESET_PENDING,
RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE,
RTL_FLAG_TASK_TX_TIMEOUT,
RTL_FLAG_MAX RTL_FLAG_MAX
}; };
@ -4036,7 +4038,7 @@ static void rtl8169_tx_timeout(struct net_device *dev, unsigned int txqueue)
{ {
struct rtl8169_private *tp = netdev_priv(dev); struct rtl8169_private *tp = netdev_priv(dev);
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); rtl_schedule_task(tp, RTL_FLAG_TASK_TX_TIMEOUT);
} }
static int rtl8169_tx_map(struct rtl8169_private *tp, const u32 *opts, u32 len, static int rtl8169_tx_map(struct rtl8169_private *tp, const u32 *opts, u32 len,
@ -4656,6 +4658,7 @@ static void rtl_task(struct work_struct *work)
{ {
struct rtl8169_private *tp = struct rtl8169_private *tp =
container_of(work, struct rtl8169_private, wk.work); container_of(work, struct rtl8169_private, wk.work);
int ret;
rtnl_lock(); rtnl_lock();
@ -4663,9 +4666,21 @@ static void rtl_task(struct work_struct *work)
!test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags)) !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
goto out_unlock; goto out_unlock;
if (test_and_clear_bit(RTL_FLAG_TASK_TX_TIMEOUT, tp->wk.flags)) {
/* ASPM compatibility issues are a typical reason for tx timeouts */
ret = pci_disable_link_state(tp->pci_dev, PCIE_LINK_STATE_L1 |
PCIE_LINK_STATE_L0S);
if (!ret)
netdev_warn_once(tp->dev, "ASPM disabled on Tx timeout\n");
goto reset;
}
if (test_and_clear_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags)) { if (test_and_clear_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags)) {
reset:
rtl_reset_work(tp); rtl_reset_work(tp);
netif_wake_queue(tp->dev); netif_wake_queue(tp->dev);
} else if (test_and_clear_bit(RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, tp->wk.flags)) {
rtl_reset_work(tp);
} }
out_unlock: out_unlock:
rtnl_unlock(); rtnl_unlock();
@ -4699,7 +4714,7 @@ static void r8169_phylink_handler(struct net_device *ndev)
} else { } else {
/* In few cases rx is broken after link-down otherwise */ /* In few cases rx is broken after link-down otherwise */
if (rtl_is_8125(tp)) if (rtl_is_8125(tp))
rtl_reset_work(tp); rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE);
pm_runtime_idle(d); pm_runtime_idle(d);
} }
@ -4769,7 +4784,7 @@ static int rtl8169_close(struct net_device *dev)
rtl8169_down(tp); rtl8169_down(tp);
rtl8169_rx_clear(tp); rtl8169_rx_clear(tp);
cancel_work_sync(&tp->wk.work); cancel_work(&tp->wk.work);
free_irq(pci_irq_vector(pdev, 0), tp); free_irq(pci_irq_vector(pdev, 0), tp);
@ -5035,6 +5050,8 @@ static void rtl_remove_one(struct pci_dev *pdev)
if (pci_dev_run_wake(pdev)) if (pci_dev_run_wake(pdev))
pm_runtime_get_noresume(&pdev->dev); pm_runtime_get_noresume(&pdev->dev);
cancel_work_sync(&tp->wk.work);
unregister_netdev(tp->dev); unregister_netdev(tp->dev);
if (r8168_check_dash(tp)) if (r8168_check_dash(tp))

View File

@ -1383,13 +1383,13 @@ static int ravb_open(struct net_device *ndev)
if (priv->chip_id == RCAR_GEN2) if (priv->chip_id == RCAR_GEN2)
ravb_ptp_init(ndev, priv->pdev); ravb_ptp_init(ndev, priv->pdev);
netif_tx_start_all_queues(ndev);
/* PHY control start */ /* PHY control start */
error = ravb_phy_start(ndev); error = ravb_phy_start(ndev);
if (error) if (error)
goto out_ptp_stop; goto out_ptp_stop;
netif_tx_start_all_queues(ndev);
return 0; return 0;
out_ptp_stop: out_ptp_stop:
@ -1438,6 +1438,12 @@ static void ravb_tx_timeout_work(struct work_struct *work)
struct net_device *ndev = priv->ndev; struct net_device *ndev = priv->ndev;
int error; int error;
if (!rtnl_trylock()) {
usleep_range(1000, 2000);
schedule_work(&priv->work);
return;
}
netif_tx_stop_all_queues(ndev); netif_tx_stop_all_queues(ndev);
/* Stop PTP Clock driver */ /* Stop PTP Clock driver */
@ -1470,7 +1476,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
*/ */
netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n", netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
__func__, error); __func__, error);
return; goto out_unlock;
} }
ravb_emac_init(ndev); ravb_emac_init(ndev);
@ -1480,6 +1486,9 @@ static void ravb_tx_timeout_work(struct work_struct *work)
ravb_ptp_init(ndev, priv->pdev); ravb_ptp_init(ndev, priv->pdev);
netif_tx_start_all_queues(ndev); netif_tx_start_all_queues(ndev);
out_unlock:
rtnl_unlock();
} }
/* Packet transmit function for Ethernet AVB */ /* Packet transmit function for Ethernet AVB */
@ -2063,7 +2072,9 @@ static int ravb_probe(struct platform_device *pdev)
ndev->hw_features = NETIF_F_RXCSUM; ndev->hw_features = NETIF_F_RXCSUM;
pm_runtime_enable(&pdev->dev); pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev); error = pm_runtime_resume_and_get(&pdev->dev);
if (error < 0)
goto out_rpm_disable;
/* The Ether-specific entries in the device structure. */ /* The Ether-specific entries in the device structure. */
ndev->base_addr = res->start; ndev->base_addr = res->start;
@ -2238,6 +2249,7 @@ static int ravb_probe(struct platform_device *pdev)
free_netdev(ndev); free_netdev(ndev);
pm_runtime_put(&pdev->dev); pm_runtime_put(&pdev->dev);
out_rpm_disable:
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
return error; return error;
} }

View File

@ -177,8 +177,10 @@
#define MMC_XGMAC_RX_DISCARD_OCT_GB 0x1b4 #define MMC_XGMAC_RX_DISCARD_OCT_GB 0x1b4
#define MMC_XGMAC_RX_ALIGN_ERR_PKT 0x1bc #define MMC_XGMAC_RX_ALIGN_ERR_PKT 0x1bc
#define MMC_XGMAC_TX_FPE_INTR_MASK 0x204
#define MMC_XGMAC_TX_FPE_FRAG 0x208 #define MMC_XGMAC_TX_FPE_FRAG 0x208
#define MMC_XGMAC_TX_HOLD_REQ 0x20c #define MMC_XGMAC_TX_HOLD_REQ 0x20c
#define MMC_XGMAC_RX_FPE_INTR_MASK 0x224
#define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR 0x228 #define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR 0x228
#define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c #define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c
#define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230 #define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230
@ -352,6 +354,8 @@ static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr)
{ {
writel(0x0, mmcaddr + MMC_RX_INTR_MASK); writel(0x0, mmcaddr + MMC_RX_INTR_MASK);
writel(0x0, mmcaddr + MMC_TX_INTR_MASK); writel(0x0, mmcaddr + MMC_TX_INTR_MASK);
writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_TX_FPE_INTR_MASK);
writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_FPE_INTR_MASK);
writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_IPC_INTR_MASK); writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_IPC_INTR_MASK);
} }

View File

@ -763,7 +763,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
/* Tx Full Checksum Offload Enabled */ /* Tx Full Checksum Offload Enabled */
cur_p->app0 |= 2; cur_p->app0 |= 2;
} else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
csum_start_off = skb_transport_offset(skb); csum_start_off = skb_transport_offset(skb);
csum_index_off = csum_start_off + skb->csum_offset; csum_index_off = csum_start_off + skb->csum_offset;
/* Tx Partial Checksum Offload Enabled */ /* Tx Partial Checksum Offload Enabled */

View File

@ -2217,9 +2217,6 @@ static int netvsc_vf_join(struct net_device *vf_netdev,
goto upper_link_failed; goto upper_link_failed;
} }
/* set slave flag before open to prevent IPv6 addrconf */
vf_netdev->flags |= IFF_SLAVE;
schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
@ -2317,16 +2314,18 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
return hv_get_drvdata(ndev_ctx->device_ctx); return hv_get_drvdata(ndev_ctx->device_ctx);
} }
/* Fallback path to check synthetic vf with /* Fallback path to check synthetic vf with help of mac addr.
* help of mac addr * Because this function can be called before vf_netdev is
* initialized (NETDEV_POST_INIT) when its perm_addr has not been copied
* from dev_addr, also try to match to its dev_addr.
* Note: On Hyper-V and Azure, it's not possible to set a MAC address
* on a VF that matches to the MAC of a unrelated NETVSC device.
*/ */
list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
ndev = hv_get_drvdata(ndev_ctx->device_ctx); ndev = hv_get_drvdata(ndev_ctx->device_ctx);
if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr)) { if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr) ||
netdev_notice(vf_netdev, ether_addr_equal(vf_netdev->dev_addr, ndev->perm_addr))
"falling back to mac addr based matching\n");
return ndev; return ndev;
}
} }
netdev_notice(vf_netdev, netdev_notice(vf_netdev,
@ -2334,6 +2333,19 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
return NULL; return NULL;
} }
static int netvsc_prepare_bonding(struct net_device *vf_netdev)
{
struct net_device *ndev;
ndev = get_netvsc_byslot(vf_netdev);
if (!ndev)
return NOTIFY_DONE;
/* set slave flag before open to prevent IPv6 addrconf */
vf_netdev->flags |= IFF_SLAVE;
return NOTIFY_DONE;
}
static int netvsc_register_vf(struct net_device *vf_netdev) static int netvsc_register_vf(struct net_device *vf_netdev)
{ {
struct net_device_context *net_device_ctx; struct net_device_context *net_device_ctx;
@ -2516,6 +2528,21 @@ static int netvsc_probe(struct hv_device *dev,
goto devinfo_failed; goto devinfo_failed;
} }
/* We must get rtnl lock before scheduling nvdev->subchan_work,
* otherwise netvsc_subchan_work() can get rtnl lock first and wait
* all subchannels to show up, but that may not happen because
* netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
* -> ... -> device_add() -> ... -> __device_attach() can't get
* the device lock, so all the subchannels can't be processed --
* finally netvsc_subchan_work() hangs forever.
*
* The rtnl lock also needs to be held before rndis_filter_device_add()
* which advertises nvsp_2_vsc_capability / sriov bit, and triggers
* VF NIC offering and registering. If VF NIC finished register_netdev()
* earlier it may cause name based config failure.
*/
rtnl_lock();
nvdev = rndis_filter_device_add(dev, device_info); nvdev = rndis_filter_device_add(dev, device_info);
if (IS_ERR(nvdev)) { if (IS_ERR(nvdev)) {
ret = PTR_ERR(nvdev); ret = PTR_ERR(nvdev);
@ -2525,16 +2552,6 @@ static int netvsc_probe(struct hv_device *dev,
memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN); memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN);
/* We must get rtnl lock before scheduling nvdev->subchan_work,
* otherwise netvsc_subchan_work() can get rtnl lock first and wait
* all subchannels to show up, but that may not happen because
* netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
* -> ... -> device_add() -> ... -> __device_attach() can't get
* the device lock, so all the subchannels can't be processed --
* finally netvsc_subchan_work() hangs forever.
*/
rtnl_lock();
if (nvdev->num_chn > 1) if (nvdev->num_chn > 1)
schedule_work(&nvdev->subchan_work); schedule_work(&nvdev->subchan_work);
@ -2568,9 +2585,9 @@ static int netvsc_probe(struct hv_device *dev,
return 0; return 0;
register_failed: register_failed:
rtnl_unlock();
rndis_filter_device_remove(dev, nvdev); rndis_filter_device_remove(dev, nvdev);
rndis_failed: rndis_failed:
rtnl_unlock();
netvsc_devinfo_put(device_info); netvsc_devinfo_put(device_info);
devinfo_failed: devinfo_failed:
free_percpu(net_device_ctx->vf_stats); free_percpu(net_device_ctx->vf_stats);
@ -2737,6 +2754,8 @@ static int netvsc_netdev_event(struct notifier_block *this,
return NOTIFY_DONE; return NOTIFY_DONE;
switch (event) { switch (event) {
case NETDEV_POST_INIT:
return netvsc_prepare_bonding(event_dev);
case NETDEV_REGISTER: case NETDEV_REGISTER:
return netvsc_register_vf(event_dev); return netvsc_register_vf(event_dev);
case NETDEV_UNREGISTER: case NETDEV_UNREGISTER:
@ -2771,12 +2790,17 @@ static int __init netvsc_drv_init(void)
} }
netvsc_ring_bytes = ring_size * PAGE_SIZE; netvsc_ring_bytes = ring_size * PAGE_SIZE;
register_netdevice_notifier(&netvsc_netdev_notifier);
ret = vmbus_driver_register(&netvsc_drv); ret = vmbus_driver_register(&netvsc_drv);
if (ret) if (ret)
return ret; goto err_vmbus_reg;
register_netdevice_notifier(&netvsc_netdev_notifier);
return 0; return 0;
err_vmbus_reg:
unregister_netdevice_notifier(&netvsc_netdev_notifier);
return ret;
} }
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");

View File

@ -1700,11 +1700,11 @@ static int ax88179_reset(struct usbnet *dev)
*tmp16 = AX_PHYPWR_RSTCTL_IPRL; *tmp16 = AX_PHYPWR_RSTCTL_IPRL;
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16); ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_PHYPWR_RSTCTL, 2, 2, tmp16);
msleep(200); msleep(500);
*tmp = AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS; *tmp = AX_CLK_SELECT_ACS | AX_CLK_SELECT_BCS;
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp); ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_CLK_SELECT, 1, 1, tmp);
msleep(100); msleep(200);
/* Ethernet PHY Auto Detach*/ /* Ethernet PHY Auto Detach*/
ax88179_auto_detach(dev, 0); ax88179_auto_detach(dev, 0);

View File

@ -193,7 +193,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
*/ */
while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) { while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) {
dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue)); dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue));
++dev->stats.tx_dropped; DEV_STATS_INC(dev, tx_dropped);
} }
skb_queue_splice_tail(&packets, &peer->staged_packet_queue); skb_queue_splice_tail(&packets, &peer->staged_packet_queue);
spin_unlock_bh(&peer->staged_packet_queue.lock); spin_unlock_bh(&peer->staged_packet_queue.lock);
@ -211,7 +211,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
else if (skb->protocol == htons(ETH_P_IPV6)) else if (skb->protocol == htons(ETH_P_IPV6))
icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
err: err:
++dev->stats.tx_errors; DEV_STATS_INC(dev, tx_errors);
kfree_skb(skb); kfree_skb(skb);
return ret; return ret;
} }

View File

@ -423,20 +423,20 @@ static void wg_packet_consume_data_done(struct wg_peer *peer,
net_dbg_skb_ratelimited("%s: Packet has unallowed src IP (%pISc) from peer %llu (%pISpfsc)\n", net_dbg_skb_ratelimited("%s: Packet has unallowed src IP (%pISc) from peer %llu (%pISpfsc)\n",
dev->name, skb, peer->internal_id, dev->name, skb, peer->internal_id,
&peer->endpoint.addr); &peer->endpoint.addr);
++dev->stats.rx_errors; DEV_STATS_INC(dev, rx_errors);
++dev->stats.rx_frame_errors; DEV_STATS_INC(dev, rx_frame_errors);
goto packet_processed; goto packet_processed;
dishonest_packet_type: dishonest_packet_type:
net_dbg_ratelimited("%s: Packet is neither ipv4 nor ipv6 from peer %llu (%pISpfsc)\n", net_dbg_ratelimited("%s: Packet is neither ipv4 nor ipv6 from peer %llu (%pISpfsc)\n",
dev->name, peer->internal_id, &peer->endpoint.addr); dev->name, peer->internal_id, &peer->endpoint.addr);
++dev->stats.rx_errors; DEV_STATS_INC(dev, rx_errors);
++dev->stats.rx_frame_errors; DEV_STATS_INC(dev, rx_frame_errors);
goto packet_processed; goto packet_processed;
dishonest_packet_size: dishonest_packet_size:
net_dbg_ratelimited("%s: Packet has incorrect size from peer %llu (%pISpfsc)\n", net_dbg_ratelimited("%s: Packet has incorrect size from peer %llu (%pISpfsc)\n",
dev->name, peer->internal_id, &peer->endpoint.addr); dev->name, peer->internal_id, &peer->endpoint.addr);
++dev->stats.rx_errors; DEV_STATS_INC(dev, rx_errors);
++dev->stats.rx_length_errors; DEV_STATS_INC(dev, rx_length_errors);
goto packet_processed; goto packet_processed;
packet_processed: packet_processed:
dev_kfree_skb(skb); dev_kfree_skb(skb);

View File

@ -333,7 +333,8 @@ static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first)
void wg_packet_purge_staged_packets(struct wg_peer *peer) void wg_packet_purge_staged_packets(struct wg_peer *peer)
{ {
spin_lock_bh(&peer->staged_packet_queue.lock); spin_lock_bh(&peer->staged_packet_queue.lock);
peer->device->dev->stats.tx_dropped += peer->staged_packet_queue.qlen; DEV_STATS_ADD(peer->device->dev, tx_dropped,
peer->staged_packet_queue.qlen);
__skb_queue_purge(&peer->staged_packet_queue); __skb_queue_purge(&peer->staged_packet_queue);
spin_unlock_bh(&peer->staged_packet_queue.lock); spin_unlock_bh(&peer->staged_packet_queue.lock);
} }

View File

@ -1189,19 +1189,19 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
ctrl->cap |= NVMET_QUEUE_SIZE - 1; ctrl->cap |= NVMET_QUEUE_SIZE - 1;
} }
u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid, struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
struct nvmet_req *req, struct nvmet_ctrl **ret) const char *hostnqn, u16 cntlid,
struct nvmet_req *req)
{ {
struct nvmet_ctrl *ctrl = NULL;
struct nvmet_subsys *subsys; struct nvmet_subsys *subsys;
struct nvmet_ctrl *ctrl;
u16 status = 0;
subsys = nvmet_find_get_subsys(req->port, subsysnqn); subsys = nvmet_find_get_subsys(req->port, subsysnqn);
if (!subsys) { if (!subsys) {
pr_warn("connect request for invalid subsystem %s!\n", pr_warn("connect request for invalid subsystem %s!\n",
subsysnqn); subsysnqn);
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; goto out;
} }
mutex_lock(&subsys->lock); mutex_lock(&subsys->lock);
@ -1214,20 +1214,21 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
if (!kref_get_unless_zero(&ctrl->ref)) if (!kref_get_unless_zero(&ctrl->ref))
continue; continue;
*ret = ctrl; /* ctrl found */
goto out; goto found;
} }
} }
ctrl = NULL; /* ctrl not found */
pr_warn("could not find controller %d for subsys %s / host %s\n", pr_warn("could not find controller %d for subsys %s / host %s\n",
cntlid, subsysnqn, hostnqn); cntlid, subsysnqn, hostnqn);
req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
out: found:
mutex_unlock(&subsys->lock); mutex_unlock(&subsys->lock);
nvmet_subsys_put(subsys); nvmet_subsys_put(subsys);
return status; out:
return ctrl;
} }
u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd) u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd)

View File

@ -189,6 +189,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
goto out; goto out;
} }
d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req, status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
le32_to_cpu(c->kato), &ctrl); le32_to_cpu(c->kato), &ctrl);
if (status) { if (status) {
@ -223,7 +225,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
{ {
struct nvmf_connect_command *c = &req->cmd->connect; struct nvmf_connect_command *c = &req->cmd->connect;
struct nvmf_connect_data *d; struct nvmf_connect_data *d;
struct nvmet_ctrl *ctrl = NULL; struct nvmet_ctrl *ctrl;
u16 qid = le16_to_cpu(c->qid); u16 qid = le16_to_cpu(c->qid);
u16 status = 0; u16 status = 0;
@ -250,11 +252,14 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
goto out; goto out;
} }
status = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn, d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
le16_to_cpu(d->cntlid), d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
req, &ctrl); ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
if (status) le16_to_cpu(d->cntlid), req);
if (!ctrl) {
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
goto out; goto out;
}
if (unlikely(qid > ctrl->subsys->max_qid)) { if (unlikely(qid > ctrl->subsys->max_qid)) {
pr_warn("invalid queue id (%d)\n", qid); pr_warn("invalid queue id (%d)\n", qid);

View File

@ -430,8 +430,9 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new); void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp); struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid, struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
struct nvmet_req *req, struct nvmet_ctrl **ret); const char *hostnqn, u16 cntlid,
struct nvmet_req *req);
void nvmet_ctrl_put(struct nvmet_ctrl *ctrl); void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd); u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd);

View File

@ -865,8 +865,8 @@ static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
return ks_pcie_handle_error_irq(ks_pcie); return ks_pcie_handle_error_irq(ks_pcie);
} }
static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie, static int ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,
struct platform_device *pdev) struct platform_device *pdev)
{ {
struct dw_pcie *pci = ks_pcie->pci; struct dw_pcie *pci = ks_pcie->pci;
struct pcie_port *pp = &pci->pp; struct pcie_port *pp = &pci->pp;
@ -978,8 +978,8 @@ static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
.get_features = &ks_pcie_am654_get_features, .get_features = &ks_pcie_am654_get_features,
}; };
static int __init ks_pcie_add_pcie_ep(struct keystone_pcie *ks_pcie, static int ks_pcie_add_pcie_ep(struct keystone_pcie *ks_pcie,
struct platform_device *pdev) struct platform_device *pdev)
{ {
int ret; int ret;
struct dw_pcie_ep *ep; struct dw_pcie_ep *ep;

View File

@ -1239,17 +1239,17 @@ static void pinctrl_link_add(struct pinctrl_dev *pctldev,
static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state) static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
{ {
struct pinctrl_setting *setting, *setting2; struct pinctrl_setting *setting, *setting2;
struct pinctrl_state *old_state = p->state; struct pinctrl_state *old_state = READ_ONCE(p->state);
int ret; int ret;
if (p->state) { if (old_state) {
/* /*
* For each pinmux setting in the old state, forget SW's record * For each pinmux setting in the old state, forget SW's record
* of mux owner for that pingroup. Any pingroups which are * of mux owner for that pingroup. Any pingroups which are
* still owned by the new state will be re-acquired by the call * still owned by the new state will be re-acquired by the call
* to pinmux_enable_setting() in the loop below. * to pinmux_enable_setting() in the loop below.
*/ */
list_for_each_entry(setting, &p->state->settings, node) { list_for_each_entry(setting, &old_state->settings, node) {
if (setting->type != PIN_MAP_TYPE_MUX_GROUP) if (setting->type != PIN_MAP_TYPE_MUX_GROUP)
continue; continue;
pinmux_disable_setting(setting); pinmux_disable_setting(setting);

View File

@ -737,18 +737,20 @@ static void dasd_profile_start(struct dasd_block *block,
* we count each request only once. * we count each request only once.
*/ */
device = cqr->startdev; device = cqr->startdev;
if (device->profile.data) { if (!device->profile.data)
counter = 1; /* request is not yet queued on the start device */ return;
list_for_each(l, &device->ccw_queue)
if (++counter >= 31) spin_lock(get_ccwdev_lock(device->cdev));
break; counter = 1; /* request is not yet queued on the start device */
} list_for_each(l, &device->ccw_queue)
if (++counter >= 31)
break;
spin_unlock(get_ccwdev_lock(device->cdev));
spin_lock(&device->profile.lock); spin_lock(&device->profile.lock);
if (device->profile.data) { device->profile.data->dasd_io_nr_req[counter]++;
device->profile.data->dasd_io_nr_req[counter]++; if (rq_data_dir(req) == READ)
if (rq_data_dir(req) == READ) device->profile.data->dasd_read_nr_req[counter]++;
device->profile.data->dasd_read_nr_req[counter]++;
}
spin_unlock(&device->profile.lock); spin_unlock(&device->profile.lock);
} }

View File

@ -839,7 +839,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
uint16_t hwq; uint16_t hwq;
struct qla_qpair *qpair = NULL; struct qla_qpair *qpair = NULL;
tag = blk_mq_unique_tag(cmd->request); tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
hwq = blk_mq_unique_tag_to_hwq(tag); hwq = blk_mq_unique_tag_to_hwq(tag);
qpair = ha->queue_pair_map[hwq]; qpair = ha->queue_pair_map[hwq];
@ -1714,8 +1714,16 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
} }
spin_lock_irqsave(qp->qp_lock_ptr, *flags); spin_lock_irqsave(qp->qp_lock_ptr, *flags);
if (ret_cmd && blk_mq_request_started(cmd->request)) switch (sp->type) {
sp->done(sp, res); case SRB_SCSI_CMD:
if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
sp->done(sp, res);
break;
default:
if (ret_cmd)
sp->done(sp, res);
break;
}
} else { } else {
sp->done(sp, res); sp->done(sp, res);
} }

View File

@ -61,7 +61,7 @@ static void usb_parse_ssp_isoc_endpoint_companion(struct device *ddev,
desc = (struct usb_ssp_isoc_ep_comp_descriptor *) buffer; desc = (struct usb_ssp_isoc_ep_comp_descriptor *) buffer;
if (desc->bDescriptorType != USB_DT_SSP_ISOC_ENDPOINT_COMP || if (desc->bDescriptorType != USB_DT_SSP_ISOC_ENDPOINT_COMP ||
size < USB_DT_SSP_ISOC_EP_COMP_SIZE) { size < USB_DT_SSP_ISOC_EP_COMP_SIZE) {
dev_warn(ddev, "Invalid SuperSpeedPlus isoc endpoint companion" dev_notice(ddev, "Invalid SuperSpeedPlus isoc endpoint companion"
"for config %d interface %d altsetting %d ep %d.\n", "for config %d interface %d altsetting %d ep %d.\n",
cfgno, inum, asnum, ep->desc.bEndpointAddress); cfgno, inum, asnum, ep->desc.bEndpointAddress);
return; return;
@ -83,7 +83,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP || if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP ||
size < USB_DT_SS_EP_COMP_SIZE) { size < USB_DT_SS_EP_COMP_SIZE) {
dev_warn(ddev, "No SuperSpeed endpoint companion for config %d " dev_notice(ddev, "No SuperSpeed endpoint companion for config %d "
" interface %d altsetting %d ep %d: " " interface %d altsetting %d ep %d: "
"using minimum values\n", "using minimum values\n",
cfgno, inum, asnum, ep->desc.bEndpointAddress); cfgno, inum, asnum, ep->desc.bEndpointAddress);
@ -109,13 +109,13 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
/* Check the various values */ /* Check the various values */
if (usb_endpoint_xfer_control(&ep->desc) && desc->bMaxBurst != 0) { if (usb_endpoint_xfer_control(&ep->desc) && desc->bMaxBurst != 0) {
dev_warn(ddev, "Control endpoint with bMaxBurst = %d in " dev_notice(ddev, "Control endpoint with bMaxBurst = %d in "
"config %d interface %d altsetting %d ep %d: " "config %d interface %d altsetting %d ep %d: "
"setting to zero\n", desc->bMaxBurst, "setting to zero\n", desc->bMaxBurst,
cfgno, inum, asnum, ep->desc.bEndpointAddress); cfgno, inum, asnum, ep->desc.bEndpointAddress);
ep->ss_ep_comp.bMaxBurst = 0; ep->ss_ep_comp.bMaxBurst = 0;
} else if (desc->bMaxBurst > 15) { } else if (desc->bMaxBurst > 15) {
dev_warn(ddev, "Endpoint with bMaxBurst = %d in " dev_notice(ddev, "Endpoint with bMaxBurst = %d in "
"config %d interface %d altsetting %d ep %d: " "config %d interface %d altsetting %d ep %d: "
"setting to 15\n", desc->bMaxBurst, "setting to 15\n", desc->bMaxBurst,
cfgno, inum, asnum, ep->desc.bEndpointAddress); cfgno, inum, asnum, ep->desc.bEndpointAddress);
@ -125,7 +125,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
if ((usb_endpoint_xfer_control(&ep->desc) || if ((usb_endpoint_xfer_control(&ep->desc) ||
usb_endpoint_xfer_int(&ep->desc)) && usb_endpoint_xfer_int(&ep->desc)) &&
desc->bmAttributes != 0) { desc->bmAttributes != 0) {
dev_warn(ddev, "%s endpoint with bmAttributes = %d in " dev_notice(ddev, "%s endpoint with bmAttributes = %d in "
"config %d interface %d altsetting %d ep %d: " "config %d interface %d altsetting %d ep %d: "
"setting to zero\n", "setting to zero\n",
usb_endpoint_xfer_control(&ep->desc) ? "Control" : "Bulk", usb_endpoint_xfer_control(&ep->desc) ? "Control" : "Bulk",
@ -134,7 +134,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
ep->ss_ep_comp.bmAttributes = 0; ep->ss_ep_comp.bmAttributes = 0;
} else if (usb_endpoint_xfer_bulk(&ep->desc) && } else if (usb_endpoint_xfer_bulk(&ep->desc) &&
desc->bmAttributes > 16) { desc->bmAttributes > 16) {
dev_warn(ddev, "Bulk endpoint with more than 65536 streams in " dev_notice(ddev, "Bulk endpoint with more than 65536 streams in "
"config %d interface %d altsetting %d ep %d: " "config %d interface %d altsetting %d ep %d: "
"setting to max\n", "setting to max\n",
cfgno, inum, asnum, ep->desc.bEndpointAddress); cfgno, inum, asnum, ep->desc.bEndpointAddress);
@ -142,7 +142,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
} else if (usb_endpoint_xfer_isoc(&ep->desc) && } else if (usb_endpoint_xfer_isoc(&ep->desc) &&
!USB_SS_SSP_ISOC_COMP(desc->bmAttributes) && !USB_SS_SSP_ISOC_COMP(desc->bmAttributes) &&
USB_SS_MULT(desc->bmAttributes) > 3) { USB_SS_MULT(desc->bmAttributes) > 3) {
dev_warn(ddev, "Isoc endpoint has Mult of %d in " dev_notice(ddev, "Isoc endpoint has Mult of %d in "
"config %d interface %d altsetting %d ep %d: " "config %d interface %d altsetting %d ep %d: "
"setting to 3\n", "setting to 3\n",
USB_SS_MULT(desc->bmAttributes), USB_SS_MULT(desc->bmAttributes),
@ -160,7 +160,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
else else
max_tx = 999999; max_tx = 999999;
if (le16_to_cpu(desc->wBytesPerInterval) > max_tx) { if (le16_to_cpu(desc->wBytesPerInterval) > max_tx) {
dev_warn(ddev, "%s endpoint with wBytesPerInterval of %d in " dev_notice(ddev, "%s endpoint with wBytesPerInterval of %d in "
"config %d interface %d altsetting %d ep %d: " "config %d interface %d altsetting %d ep %d: "
"setting to %d\n", "setting to %d\n",
usb_endpoint_xfer_isoc(&ep->desc) ? "Isoc" : "Int", usb_endpoint_xfer_isoc(&ep->desc) ? "Isoc" : "Int",
@ -273,7 +273,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
else if (d->bLength >= USB_DT_ENDPOINT_SIZE) else if (d->bLength >= USB_DT_ENDPOINT_SIZE)
n = USB_DT_ENDPOINT_SIZE; n = USB_DT_ENDPOINT_SIZE;
else { else {
dev_warn(ddev, "config %d interface %d altsetting %d has an " dev_notice(ddev, "config %d interface %d altsetting %d has an "
"invalid endpoint descriptor of length %d, skipping\n", "invalid endpoint descriptor of length %d, skipping\n",
cfgno, inum, asnum, d->bLength); cfgno, inum, asnum, d->bLength);
goto skip_to_next_endpoint_or_interface_descriptor; goto skip_to_next_endpoint_or_interface_descriptor;
@ -281,7 +281,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
i = d->bEndpointAddress & ~USB_ENDPOINT_DIR_MASK; i = d->bEndpointAddress & ~USB_ENDPOINT_DIR_MASK;
if (i >= 16 || i == 0) { if (i >= 16 || i == 0) {
dev_warn(ddev, "config %d interface %d altsetting %d has an " dev_notice(ddev, "config %d interface %d altsetting %d has an "
"invalid endpoint with address 0x%X, skipping\n", "invalid endpoint with address 0x%X, skipping\n",
cfgno, inum, asnum, d->bEndpointAddress); cfgno, inum, asnum, d->bEndpointAddress);
goto skip_to_next_endpoint_or_interface_descriptor; goto skip_to_next_endpoint_or_interface_descriptor;
@ -293,7 +293,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
/* Check for duplicate endpoint addresses */ /* Check for duplicate endpoint addresses */
if (config_endpoint_is_duplicate(config, inum, asnum, d)) { if (config_endpoint_is_duplicate(config, inum, asnum, d)) {
dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n", dev_notice(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
cfgno, inum, asnum, d->bEndpointAddress); cfgno, inum, asnum, d->bEndpointAddress);
goto skip_to_next_endpoint_or_interface_descriptor; goto skip_to_next_endpoint_or_interface_descriptor;
} }
@ -301,7 +301,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
/* Ignore some endpoints */ /* Ignore some endpoints */
if (udev->quirks & USB_QUIRK_ENDPOINT_IGNORE) { if (udev->quirks & USB_QUIRK_ENDPOINT_IGNORE) {
if (usb_endpoint_is_ignored(udev, ifp, d)) { if (usb_endpoint_is_ignored(udev, ifp, d)) {
dev_warn(ddev, "config %d interface %d altsetting %d has an ignored endpoint with address 0x%X, skipping\n", dev_notice(ddev, "config %d interface %d altsetting %d has an ignored endpoint with address 0x%X, skipping\n",
cfgno, inum, asnum, cfgno, inum, asnum,
d->bEndpointAddress); d->bEndpointAddress);
goto skip_to_next_endpoint_or_interface_descriptor; goto skip_to_next_endpoint_or_interface_descriptor;
@ -378,7 +378,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
} }
} }
if (d->bInterval < i || d->bInterval > j) { if (d->bInterval < i || d->bInterval > j) {
dev_warn(ddev, "config %d interface %d altsetting %d " dev_notice(ddev, "config %d interface %d altsetting %d "
"endpoint 0x%X has an invalid bInterval %d, " "endpoint 0x%X has an invalid bInterval %d, "
"changing to %d\n", "changing to %d\n",
cfgno, inum, asnum, cfgno, inum, asnum,
@ -391,7 +391,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
* them usable, we will try treating them as Interrupt endpoints. * them usable, we will try treating them as Interrupt endpoints.
*/ */
if (udev->speed == USB_SPEED_LOW && usb_endpoint_xfer_bulk(d)) { if (udev->speed == USB_SPEED_LOW && usb_endpoint_xfer_bulk(d)) {
dev_warn(ddev, "config %d interface %d altsetting %d " dev_notice(ddev, "config %d interface %d altsetting %d "
"endpoint 0x%X is Bulk; changing to Interrupt\n", "endpoint 0x%X is Bulk; changing to Interrupt\n",
cfgno, inum, asnum, d->bEndpointAddress); cfgno, inum, asnum, d->bEndpointAddress);
endpoint->desc.bmAttributes = USB_ENDPOINT_XFER_INT; endpoint->desc.bmAttributes = USB_ENDPOINT_XFER_INT;
@ -408,7 +408,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
*/ */
maxp = le16_to_cpu(endpoint->desc.wMaxPacketSize); maxp = le16_to_cpu(endpoint->desc.wMaxPacketSize);
if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) { if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) {
dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n", dev_notice(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n",
cfgno, inum, asnum, d->bEndpointAddress); cfgno, inum, asnum, d->bEndpointAddress);
} }
@ -439,7 +439,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)]; j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)];
if (maxp > j) { if (maxp > j) {
dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n", dev_notice(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n",
cfgno, inum, asnum, d->bEndpointAddress, maxp, j); cfgno, inum, asnum, d->bEndpointAddress, maxp, j);
maxp = j; maxp = j;
endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp); endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp);
@ -452,7 +452,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
*/ */
if (udev->speed == USB_SPEED_HIGH && usb_endpoint_xfer_bulk(d)) { if (udev->speed == USB_SPEED_HIGH && usb_endpoint_xfer_bulk(d)) {
if (maxp != 512) if (maxp != 512)
dev_warn(ddev, "config %d interface %d altsetting %d " dev_notice(ddev, "config %d interface %d altsetting %d "
"bulk endpoint 0x%X has invalid maxpacket %d\n", "bulk endpoint 0x%X has invalid maxpacket %d\n",
cfgno, inum, asnum, d->bEndpointAddress, cfgno, inum, asnum, d->bEndpointAddress,
maxp); maxp);
@ -533,7 +533,7 @@ static int usb_parse_interface(struct device *ddev, int cfgno,
i < intfc->num_altsetting; i < intfc->num_altsetting;
(++i, ++alt)) { (++i, ++alt)) {
if (alt->desc.bAlternateSetting == asnum) { if (alt->desc.bAlternateSetting == asnum) {
dev_warn(ddev, "Duplicate descriptor for config %d " dev_notice(ddev, "Duplicate descriptor for config %d "
"interface %d altsetting %d, skipping\n", "interface %d altsetting %d, skipping\n",
cfgno, inum, asnum); cfgno, inum, asnum);
goto skip_to_next_interface_descriptor; goto skip_to_next_interface_descriptor;
@ -559,7 +559,7 @@ static int usb_parse_interface(struct device *ddev, int cfgno,
num_ep = num_ep_orig = alt->desc.bNumEndpoints; num_ep = num_ep_orig = alt->desc.bNumEndpoints;
alt->desc.bNumEndpoints = 0; /* Use as a counter */ alt->desc.bNumEndpoints = 0; /* Use as a counter */
if (num_ep > USB_MAXENDPOINTS) { if (num_ep > USB_MAXENDPOINTS) {
dev_warn(ddev, "too many endpoints for config %d interface %d " dev_notice(ddev, "too many endpoints for config %d interface %d "
"altsetting %d: %d, using maximum allowed: %d\n", "altsetting %d: %d, using maximum allowed: %d\n",
cfgno, inum, asnum, num_ep, USB_MAXENDPOINTS); cfgno, inum, asnum, num_ep, USB_MAXENDPOINTS);
num_ep = USB_MAXENDPOINTS; num_ep = USB_MAXENDPOINTS;
@ -590,7 +590,7 @@ static int usb_parse_interface(struct device *ddev, int cfgno,
} }
if (n != num_ep_orig) if (n != num_ep_orig)
dev_warn(ddev, "config %d interface %d altsetting %d has %d " dev_notice(ddev, "config %d interface %d altsetting %d has %d "
"endpoint descriptor%s, different from the interface " "endpoint descriptor%s, different from the interface "
"descriptor's value: %d\n", "descriptor's value: %d\n",
cfgno, inum, asnum, n, plural(n), num_ep_orig); cfgno, inum, asnum, n, plural(n), num_ep_orig);
@ -625,7 +625,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
if (config->desc.bDescriptorType != USB_DT_CONFIG || if (config->desc.bDescriptorType != USB_DT_CONFIG ||
config->desc.bLength < USB_DT_CONFIG_SIZE || config->desc.bLength < USB_DT_CONFIG_SIZE ||
config->desc.bLength > size) { config->desc.bLength > size) {
dev_err(ddev, "invalid descriptor for config index %d: " dev_notice(ddev, "invalid descriptor for config index %d: "
"type = 0x%X, length = %d\n", cfgidx, "type = 0x%X, length = %d\n", cfgidx,
config->desc.bDescriptorType, config->desc.bLength); config->desc.bDescriptorType, config->desc.bLength);
return -EINVAL; return -EINVAL;
@ -636,7 +636,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
size -= config->desc.bLength; size -= config->desc.bLength;
if (nintf > USB_MAXINTERFACES) { if (nintf > USB_MAXINTERFACES) {
dev_warn(ddev, "config %d has too many interfaces: %d, " dev_notice(ddev, "config %d has too many interfaces: %d, "
"using maximum allowed: %d\n", "using maximum allowed: %d\n",
cfgno, nintf, USB_MAXINTERFACES); cfgno, nintf, USB_MAXINTERFACES);
nintf = USB_MAXINTERFACES; nintf = USB_MAXINTERFACES;
@ -650,7 +650,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
(buffer2 += header->bLength, size2 -= header->bLength)) { (buffer2 += header->bLength, size2 -= header->bLength)) {
if (size2 < sizeof(struct usb_descriptor_header)) { if (size2 < sizeof(struct usb_descriptor_header)) {
dev_warn(ddev, "config %d descriptor has %d excess " dev_notice(ddev, "config %d descriptor has %d excess "
"byte%s, ignoring\n", "byte%s, ignoring\n",
cfgno, size2, plural(size2)); cfgno, size2, plural(size2));
break; break;
@ -658,7 +658,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
header = (struct usb_descriptor_header *) buffer2; header = (struct usb_descriptor_header *) buffer2;
if ((header->bLength > size2) || (header->bLength < 2)) { if ((header->bLength > size2) || (header->bLength < 2)) {
dev_warn(ddev, "config %d has an invalid descriptor " dev_notice(ddev, "config %d has an invalid descriptor "
"of length %d, skipping remainder of the config\n", "of length %d, skipping remainder of the config\n",
cfgno, header->bLength); cfgno, header->bLength);
break; break;
@ -670,7 +670,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
d = (struct usb_interface_descriptor *) header; d = (struct usb_interface_descriptor *) header;
if (d->bLength < USB_DT_INTERFACE_SIZE) { if (d->bLength < USB_DT_INTERFACE_SIZE) {
dev_warn(ddev, "config %d has an invalid " dev_notice(ddev, "config %d has an invalid "
"interface descriptor of length %d, " "interface descriptor of length %d, "
"skipping\n", cfgno, d->bLength); "skipping\n", cfgno, d->bLength);
continue; continue;
@ -680,7 +680,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
if ((dev->quirks & USB_QUIRK_HONOR_BNUMINTERFACES) && if ((dev->quirks & USB_QUIRK_HONOR_BNUMINTERFACES) &&
n >= nintf_orig) { n >= nintf_orig) {
dev_warn(ddev, "config %d has more interface " dev_notice(ddev, "config %d has more interface "
"descriptors, than it declares in " "descriptors, than it declares in "
"bNumInterfaces, ignoring interface " "bNumInterfaces, ignoring interface "
"number: %d\n", cfgno, inum); "number: %d\n", cfgno, inum);
@ -688,7 +688,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
} }
if (inum >= nintf_orig) if (inum >= nintf_orig)
dev_warn(ddev, "config %d has an invalid " dev_notice(ddev, "config %d has an invalid "
"interface number: %d but max is %d\n", "interface number: %d but max is %d\n",
cfgno, inum, nintf_orig - 1); cfgno, inum, nintf_orig - 1);
@ -713,14 +713,14 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
d = (struct usb_interface_assoc_descriptor *)header; d = (struct usb_interface_assoc_descriptor *)header;
if (d->bLength < USB_DT_INTERFACE_ASSOCIATION_SIZE) { if (d->bLength < USB_DT_INTERFACE_ASSOCIATION_SIZE) {
dev_warn(ddev, dev_notice(ddev,
"config %d has an invalid interface association descriptor of length %d, skipping\n", "config %d has an invalid interface association descriptor of length %d, skipping\n",
cfgno, d->bLength); cfgno, d->bLength);
continue; continue;
} }
if (iad_num == USB_MAXIADS) { if (iad_num == USB_MAXIADS) {
dev_warn(ddev, "found more Interface " dev_notice(ddev, "found more Interface "
"Association Descriptors " "Association Descriptors "
"than allocated for in " "than allocated for in "
"configuration %d\n", cfgno); "configuration %d\n", cfgno);
@ -731,7 +731,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
} else if (header->bDescriptorType == USB_DT_DEVICE || } else if (header->bDescriptorType == USB_DT_DEVICE ||
header->bDescriptorType == USB_DT_CONFIG) header->bDescriptorType == USB_DT_CONFIG)
dev_warn(ddev, "config %d contains an unexpected " dev_notice(ddev, "config %d contains an unexpected "
"descriptor of type 0x%X, skipping\n", "descriptor of type 0x%X, skipping\n",
cfgno, header->bDescriptorType); cfgno, header->bDescriptorType);
@ -740,11 +740,11 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
config->desc.wTotalLength = cpu_to_le16(buffer2 - buffer0); config->desc.wTotalLength = cpu_to_le16(buffer2 - buffer0);
if (n != nintf) if (n != nintf)
dev_warn(ddev, "config %d has %d interface%s, different from " dev_notice(ddev, "config %d has %d interface%s, different from "
"the descriptor's value: %d\n", "the descriptor's value: %d\n",
cfgno, n, plural(n), nintf_orig); cfgno, n, plural(n), nintf_orig);
else if (n == 0) else if (n == 0)
dev_warn(ddev, "config %d has no interfaces?\n", cfgno); dev_notice(ddev, "config %d has no interfaces?\n", cfgno);
config->desc.bNumInterfaces = nintf = n; config->desc.bNumInterfaces = nintf = n;
/* Check for missing interface numbers */ /* Check for missing interface numbers */
@ -754,7 +754,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
break; break;
} }
if (j >= nintf) if (j >= nintf)
dev_warn(ddev, "config %d has no interface number " dev_notice(ddev, "config %d has no interface number "
"%d\n", cfgno, i); "%d\n", cfgno, i);
} }
@ -762,7 +762,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
for (i = 0; i < nintf; ++i) { for (i = 0; i < nintf; ++i) {
j = nalts[i]; j = nalts[i];
if (j > USB_MAXALTSETTING) { if (j > USB_MAXALTSETTING) {
dev_warn(ddev, "too many alternate settings for " dev_notice(ddev, "too many alternate settings for "
"config %d interface %d: %d, " "config %d interface %d: %d, "
"using maximum allowed: %d\n", "using maximum allowed: %d\n",
cfgno, inums[i], j, USB_MAXALTSETTING); cfgno, inums[i], j, USB_MAXALTSETTING);
@ -811,7 +811,7 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
break; break;
} }
if (n >= intfc->num_altsetting) if (n >= intfc->num_altsetting)
dev_warn(ddev, "config %d interface %d has no " dev_notice(ddev, "config %d interface %d has no "
"altsetting %d\n", cfgno, inums[i], j); "altsetting %d\n", cfgno, inums[i], j);
} }
} }
@ -868,7 +868,7 @@ int usb_get_configuration(struct usb_device *dev)
int result; int result;
if (ncfg > USB_MAXCONFIG) { if (ncfg > USB_MAXCONFIG) {
dev_warn(ddev, "too many configurations: %d, " dev_notice(ddev, "too many configurations: %d, "
"using maximum allowed: %d\n", ncfg, USB_MAXCONFIG); "using maximum allowed: %d\n", ncfg, USB_MAXCONFIG);
dev->descriptor.bNumConfigurations = ncfg = USB_MAXCONFIG; dev->descriptor.bNumConfigurations = ncfg = USB_MAXCONFIG;
} }
@ -902,7 +902,7 @@ int usb_get_configuration(struct usb_device *dev)
"descriptor/%s: %d\n", cfgno, "start", result); "descriptor/%s: %d\n", cfgno, "start", result);
if (result != -EPIPE) if (result != -EPIPE)
goto err; goto err;
dev_err(ddev, "chopping to %d config(s)\n", cfgno); dev_notice(ddev, "chopping to %d config(s)\n", cfgno);
dev->descriptor.bNumConfigurations = cfgno; dev->descriptor.bNumConfigurations = cfgno;
break; break;
} else if (result < 4) { } else if (result < 4) {
@ -934,7 +934,7 @@ int usb_get_configuration(struct usb_device *dev)
goto err; goto err;
} }
if (result < length) { if (result < length) {
dev_warn(ddev, "config index %d descriptor too short " dev_notice(ddev, "config index %d descriptor too short "
"(expected %i, got %i)\n", cfgno, length, result); "(expected %i, got %i)\n", cfgno, length, result);
length = result; length = result;
} }
@ -993,7 +993,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
/* Get BOS descriptor */ /* Get BOS descriptor */
ret = usb_get_descriptor(dev, USB_DT_BOS, 0, bos, USB_DT_BOS_SIZE); ret = usb_get_descriptor(dev, USB_DT_BOS, 0, bos, USB_DT_BOS_SIZE);
if (ret < USB_DT_BOS_SIZE || bos->bLength < USB_DT_BOS_SIZE) { if (ret < USB_DT_BOS_SIZE || bos->bLength < USB_DT_BOS_SIZE) {
dev_err(ddev, "unable to get BOS descriptor or descriptor too short\n"); dev_notice(ddev, "unable to get BOS descriptor or descriptor too short\n");
if (ret >= 0) if (ret >= 0)
ret = -ENOMSG; ret = -ENOMSG;
kfree(bos); kfree(bos);
@ -1021,7 +1021,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
ret = usb_get_descriptor(dev, USB_DT_BOS, 0, buffer, total_len); ret = usb_get_descriptor(dev, USB_DT_BOS, 0, buffer, total_len);
if (ret < total_len) { if (ret < total_len) {
dev_err(ddev, "unable to get BOS descriptor set\n"); dev_notice(ddev, "unable to get BOS descriptor set\n");
if (ret >= 0) if (ret >= 0)
ret = -ENOMSG; ret = -ENOMSG;
goto err; goto err;
@ -1046,8 +1046,8 @@ int usb_get_bos_descriptor(struct usb_device *dev)
} }
if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) { if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
dev_warn(ddev, "descriptor type invalid, skip\n"); dev_notice(ddev, "descriptor type invalid, skip\n");
continue; goto skip_to_next_descriptor;
} }
switch (cap_type) { switch (cap_type) {
@ -1080,6 +1080,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
break; break;
} }
skip_to_next_descriptor:
total_len -= length; total_len -= length;
buffer += length; buffer += length;
} }

View File

@ -2449,6 +2449,8 @@ static void set_usb_port_removable(struct usb_device *udev)
u16 wHubCharacteristics; u16 wHubCharacteristics;
bool removable = true; bool removable = true;
dev_set_removable(&udev->dev, DEVICE_REMOVABLE_UNKNOWN);
if (!hdev) if (!hdev)
return; return;
@ -2460,11 +2462,11 @@ static void set_usb_port_removable(struct usb_device *udev)
*/ */
switch (hub->ports[udev->portnum - 1]->connect_type) { switch (hub->ports[udev->portnum - 1]->connect_type) {
case USB_PORT_CONNECT_TYPE_HOT_PLUG: case USB_PORT_CONNECT_TYPE_HOT_PLUG:
udev->removable = USB_DEVICE_REMOVABLE; dev_set_removable(&udev->dev, DEVICE_REMOVABLE);
return; return;
case USB_PORT_CONNECT_TYPE_HARD_WIRED: case USB_PORT_CONNECT_TYPE_HARD_WIRED:
case USB_PORT_NOT_USED: case USB_PORT_NOT_USED:
udev->removable = USB_DEVICE_FIXED; dev_set_removable(&udev->dev, DEVICE_FIXED);
return; return;
default: default:
break; break;
@ -2489,9 +2491,9 @@ static void set_usb_port_removable(struct usb_device *udev)
} }
if (removable) if (removable)
udev->removable = USB_DEVICE_REMOVABLE; dev_set_removable(&udev->dev, DEVICE_REMOVABLE);
else else
udev->removable = USB_DEVICE_FIXED; dev_set_removable(&udev->dev, DEVICE_FIXED);
} }
@ -2563,8 +2565,7 @@ int usb_new_device(struct usb_device *udev)
device_enable_async_suspend(&udev->dev); device_enable_async_suspend(&udev->dev);
/* check whether the hub or firmware marks this port as non-removable */ /* check whether the hub or firmware marks this port as non-removable */
if (udev->parent) set_usb_port_removable(udev);
set_usb_port_removable(udev);
/* Register the device. The device driver is responsible /* Register the device. The device driver is responsible
* for configuring the device and invoking the add-device * for configuring the device and invoking the add-device

View File

@ -298,29 +298,6 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
} }
static DEVICE_ATTR_RO(urbnum); static DEVICE_ATTR_RO(urbnum);
static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_device *udev;
char *state;
udev = to_usb_device(dev);
switch (udev->removable) {
case USB_DEVICE_REMOVABLE:
state = "removable";
break;
case USB_DEVICE_FIXED:
state = "fixed";
break;
default:
state = "unknown";
}
return sprintf(buf, "%s\n", state);
}
static DEVICE_ATTR_RO(removable);
static ssize_t ltm_capable_show(struct device *dev, static ssize_t ltm_capable_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
@ -825,7 +802,6 @@ static struct attribute *dev_attrs[] = {
&dev_attr_avoid_reset_quirk.attr, &dev_attr_avoid_reset_quirk.attr,
&dev_attr_authorized.attr, &dev_attr_authorized.attr,
&dev_attr_remove.attr, &dev_attr_remove.attr,
&dev_attr_removable.attr,
&dev_attr_ltm_capable.attr, &dev_attr_ltm_capable.attr,
#ifdef CONFIG_OF #ifdef CONFIG_OF
&dev_attr_devspec.attr, &dev_attr_devspec.attr,

View File

@ -2045,15 +2045,17 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
{ {
struct dwc2_qtd *qtd; struct dwc2_qtd *qtd;
struct dwc2_host_chan *chan; struct dwc2_host_chan *chan;
u32 hcint, hcintmsk; u32 hcint, hcintraw, hcintmsk;
chan = hsotg->hc_ptr_array[chnum]; chan = hsotg->hc_ptr_array[chnum];
hcint = dwc2_readl(hsotg, HCINT(chnum)); hcintraw = dwc2_readl(hsotg, HCINT(chnum));
hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum)); hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
hcint = hcintraw & hcintmsk;
dwc2_writel(hsotg, hcint, HCINT(chnum));
if (!chan) { if (!chan) {
dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n"); dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
dwc2_writel(hsotg, hcint, HCINT(chnum));
return; return;
} }
@ -2062,11 +2064,9 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
chnum); chnum);
dev_vdbg(hsotg->dev, dev_vdbg(hsotg->dev,
" hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n", " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
hcint, hcintmsk, hcint & hcintmsk); hcintraw, hcintmsk, hcint);
} }
dwc2_writel(hsotg, hcint, HCINT(chnum));
/* /*
* If we got an interrupt after someone called * If we got an interrupt after someone called
* dwc2_hcd_endpoint_disable() we don't want to crash below * dwc2_hcd_endpoint_disable() we don't want to crash below
@ -2076,8 +2076,7 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
return; return;
} }
chan->hcint = hcint; chan->hcint = hcintraw;
hcint &= hcintmsk;
/* /*
* If the channel was halted due to a dequeue, the qtd list might * If the channel was halted due to a dequeue, the qtd list might

View File

@ -1686,6 +1686,8 @@ static int dwc3_probe(struct platform_device *pdev)
pm_runtime_put(dev); pm_runtime_put(dev);
dma_set_max_seg_size(dev, UINT_MAX);
return 0; return 0;
err5: err5:

View File

@ -554,6 +554,7 @@ static int dwc3_setup_role_switch(struct dwc3 *dwc)
dwc->role_switch_default_mode = USB_DR_MODE_PERIPHERAL; dwc->role_switch_default_mode = USB_DR_MODE_PERIPHERAL;
mode = DWC3_GCTL_PRTCAP_DEVICE; mode = DWC3_GCTL_PRTCAP_DEVICE;
} }
dwc3_set_mode(dwc, mode);
dwc3_role_switch.fwnode = dev_fwnode(dwc->dev); dwc3_role_switch.fwnode = dev_fwnode(dwc->dev);
dwc3_role_switch.set = dwc3_usb_role_switch_set; dwc3_role_switch.set = dwc3_usb_role_switch_set;
@ -563,7 +564,6 @@ static int dwc3_setup_role_switch(struct dwc3 *dwc)
if (IS_ERR(dwc->role_sw)) if (IS_ERR(dwc->role_sw))
return PTR_ERR(dwc->role_sw); return PTR_ERR(dwc->role_sw);
dwc3_set_mode(dwc, mode);
return 0; return 0;
} }
#else #else

View File

@ -492,7 +492,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
irq_set_status_flags(irq, IRQ_NOAUTOEN); irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL, ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq, qcom_dwc3_resume_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT, IRQF_ONESHOT,
"qcom_dwc3 HS", qcom); "qcom_dwc3 HS", qcom);
if (ret) { if (ret) {
dev_err(qcom->dev, "hs_phy_irq failed: %d\n", ret); dev_err(qcom->dev, "hs_phy_irq failed: %d\n", ret);
@ -507,7 +507,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
irq_set_status_flags(irq, IRQ_NOAUTOEN); irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL, ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq, qcom_dwc3_resume_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT, IRQF_ONESHOT,
"qcom_dwc3 DP_HS", qcom); "qcom_dwc3 DP_HS", qcom);
if (ret) { if (ret) {
dev_err(qcom->dev, "dp_hs_phy_irq failed: %d\n", ret); dev_err(qcom->dev, "dp_hs_phy_irq failed: %d\n", ret);
@ -522,7 +522,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
irq_set_status_flags(irq, IRQ_NOAUTOEN); irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL, ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq, qcom_dwc3_resume_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT, IRQF_ONESHOT,
"qcom_dwc3 DM_HS", qcom); "qcom_dwc3 DM_HS", qcom);
if (ret) { if (ret) {
dev_err(qcom->dev, "dm_hs_phy_irq failed: %d\n", ret); dev_err(qcom->dev, "dm_hs_phy_irq failed: %d\n", ret);
@ -537,7 +537,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
irq_set_status_flags(irq, IRQ_NOAUTOEN); irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL, ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq, qcom_dwc3_resume_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT, IRQF_ONESHOT,
"qcom_dwc3 SS", qcom); "qcom_dwc3 SS", qcom);
if (ret) { if (ret) {
dev_err(qcom->dev, "ss_phy_irq failed: %d\n", ret); dev_err(qcom->dev, "ss_phy_irq failed: %d\n", ret);
@ -690,6 +690,7 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
if (!qcom->dwc3) { if (!qcom->dwc3) {
ret = -ENODEV; ret = -ENODEV;
dev_err(dev, "failed to get dwc3 platform device\n"); dev_err(dev, "failed to get dwc3 platform device\n");
of_platform_depopulate(dev);
} }
node_put: node_put:
@ -698,9 +699,9 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
return ret; return ret;
} }
static struct platform_device * static struct platform_device *dwc3_qcom_create_urs_usb_platdev(struct device *dev)
dwc3_qcom_create_urs_usb_platdev(struct device *dev)
{ {
struct platform_device *urs_usb = NULL;
struct fwnode_handle *fwh; struct fwnode_handle *fwh;
struct acpi_device *adev; struct acpi_device *adev;
char name[8]; char name[8];
@ -720,9 +721,26 @@ dwc3_qcom_create_urs_usb_platdev(struct device *dev)
adev = to_acpi_device_node(fwh); adev = to_acpi_device_node(fwh);
if (!adev) if (!adev)
return NULL; goto err_put_handle;
return acpi_create_platform_device(adev, NULL); urs_usb = acpi_create_platform_device(adev, NULL);
if (IS_ERR_OR_NULL(urs_usb))
goto err_put_handle;
return urs_usb;
err_put_handle:
fwnode_handle_put(fwh);
return urs_usb;
}
static void dwc3_qcom_destroy_urs_usb_platdev(struct platform_device *urs_usb)
{
struct fwnode_handle *fwh = urs_usb->dev.fwnode;
platform_device_unregister(urs_usb);
fwnode_handle_put(fwh);
} }
static int dwc3_qcom_probe(struct platform_device *pdev) static int dwc3_qcom_probe(struct platform_device *pdev)
@ -807,13 +825,13 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
if (IS_ERR(qcom->qscratch_base)) { if (IS_ERR(qcom->qscratch_base)) {
dev_err(dev, "failed to map qscratch, err=%d\n", ret); dev_err(dev, "failed to map qscratch, err=%d\n", ret);
ret = PTR_ERR(qcom->qscratch_base); ret = PTR_ERR(qcom->qscratch_base);
goto clk_disable; goto free_urs;
} }
ret = dwc3_qcom_setup_irq(pdev); ret = dwc3_qcom_setup_irq(pdev);
if (ret) { if (ret) {
dev_err(dev, "failed to setup IRQs, err=%d\n", ret); dev_err(dev, "failed to setup IRQs, err=%d\n", ret);
goto clk_disable; goto free_urs;
} }
/* /*
@ -832,7 +850,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
if (ret) { if (ret) {
dev_err(dev, "failed to register DWC3 Core, err=%d\n", ret); dev_err(dev, "failed to register DWC3 Core, err=%d\n", ret);
goto depopulate; goto free_urs;
} }
ret = dwc3_qcom_interconnect_init(qcom); ret = dwc3_qcom_interconnect_init(qcom);
@ -864,7 +882,11 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
if (np) if (np)
of_platform_depopulate(&pdev->dev); of_platform_depopulate(&pdev->dev);
else else
platform_device_put(pdev); platform_device_del(qcom->dwc3);
platform_device_put(qcom->dwc3);
free_urs:
if (qcom->urs_usb)
dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb);
clk_disable: clk_disable:
for (i = qcom->num_clocks - 1; i >= 0; i--) { for (i = qcom->num_clocks - 1; i >= 0; i--) {
clk_disable_unprepare(qcom->clks[i]); clk_disable_unprepare(qcom->clks[i]);
@ -886,7 +908,11 @@ static int dwc3_qcom_remove(struct platform_device *pdev)
if (np) if (np)
of_platform_depopulate(&pdev->dev); of_platform_depopulate(&pdev->dev);
else else
platform_device_put(pdev); platform_device_del(qcom->dwc3);
platform_device_put(qcom->dwc3);
if (qcom->urs_usb)
dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb);
for (i = qcom->num_clocks - 1; i >= 0; i--) { for (i = qcom->num_clocks - 1; i >= 0; i--) {
clk_disable_unprepare(qcom->clks[i]); clk_disable_unprepare(qcom->clks[i]);

View File

@ -203,8 +203,8 @@ static void option_instat_callback(struct urb *urb);
#define DELL_PRODUCT_5829E_ESIM 0x81e4 #define DELL_PRODUCT_5829E_ESIM 0x81e4
#define DELL_PRODUCT_5829E 0x81e6 #define DELL_PRODUCT_5829E 0x81e6
#define DELL_PRODUCT_FM101R 0x8213 #define DELL_PRODUCT_FM101R_ESIM 0x8213
#define DELL_PRODUCT_FM101R_ESIM 0x8215 #define DELL_PRODUCT_FM101R 0x8215
#define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da #define KYOCERA_PRODUCT_KPC650 0x17da
@ -609,6 +609,8 @@ static void option_instat_callback(struct urb *urb);
#define UNISOC_VENDOR_ID 0x1782 #define UNISOC_VENDOR_ID 0x1782
/* TOZED LT70-C based on UNISOC SL8563 uses UNISOC's vendor ID */ /* TOZED LT70-C based on UNISOC SL8563 uses UNISOC's vendor ID */
#define TOZED_PRODUCT_LT70C 0x4055 #define TOZED_PRODUCT_LT70C 0x4055
/* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */
#define LUAT_PRODUCT_AIR720U 0x4e00
/* Device flags */ /* Device flags */
@ -1546,7 +1548,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff), { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) }, .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
.driver_info = RSVD(4) }, .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) },
@ -2249,6 +2252,7 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) }, .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
{ USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */ { USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x1782, 0x4d11, 0xff) }, /* Fibocom L610 (ECM/RNDIS mode) */ { USB_DEVICE_INTERFACE_CLASS(0x1782, 0x4d11, 0xff) }, /* Fibocom L610 (ECM/RNDIS mode) */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x0001, 0xff, 0xff, 0xff) }, /* Fibocom L716-EU (ECM/RNDIS mode) */
{ USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */ { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
.driver_info = RSVD(4) | RSVD(5) }, .driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
@ -2271,6 +2275,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
{ } /* Terminating entry */ { } /* Terminating entry */
}; };
MODULE_DEVICE_TABLE(usb, option_ids); MODULE_DEVICE_TABLE(usb, option_ids);

View File

@ -231,7 +231,7 @@ struct sti_rom_font {
u8 height; u8 height;
u8 font_type; /* language type */ u8 font_type; /* language type */
u8 bytes_per_char; u8 bytes_per_char;
u32 next_font; s32 next_font; /* note: signed int */
u8 underline_height; u8 underline_height;
u8 underline_pos; u8 underline_pos;
u8 res008[2]; u8 res008[2];

View File

@ -579,4 +579,5 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
.get_sgtable = dma_common_get_sgtable, .get_sgtable = dma_common_get_sgtable,
.alloc_pages = dma_common_alloc_pages, .alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages, .free_pages = dma_common_free_pages,
.max_mapping_size = swiotlb_max_mapping_size,
}; };

View File

@ -131,8 +131,8 @@ static int afs_probe_cell_name(struct dentry *dentry)
ret = dns_query(net->net, "afsdb", name, len, "srv=1", ret = dns_query(net->net, "afsdb", name, len, "srv=1",
NULL, NULL, false); NULL, NULL, false);
if (ret == -ENODATA) if (ret == -ENODATA || ret == -ENOKEY)
ret = -EDESTADDRREQ; ret = -ENOENT;
return ret; return ret;
} }

View File

@ -546,6 +546,7 @@ struct afs_server_entry {
}; };
struct afs_server_list { struct afs_server_list {
struct rcu_head rcu;
afs_volid_t vids[AFS_MAXTYPES]; /* Volume IDs */ afs_volid_t vids[AFS_MAXTYPES]; /* Volume IDs */
refcount_t usage; refcount_t usage;
unsigned char nr_servers; unsigned char nr_servers;

View File

@ -17,7 +17,7 @@ void afs_put_serverlist(struct afs_net *net, struct afs_server_list *slist)
for (i = 0; i < slist->nr_servers; i++) for (i = 0; i < slist->nr_servers; i++)
afs_unuse_server(net, slist->servers[i].server, afs_unuse_server(net, slist->servers[i].server,
afs_server_trace_put_slist); afs_server_trace_put_slist);
kfree(slist); kfree_rcu(slist, rcu);
} }
} }

View File

@ -406,6 +406,8 @@ static int afs_validate_fc(struct fs_context *fc)
return PTR_ERR(volume); return PTR_ERR(volume);
ctx->volume = volume; ctx->volume = volume;
if (volume->type != AFSVL_RWVOL)
ctx->flock_mode = afs_flock_mode_local;
} }
return 0; return 0;

View File

@ -58,6 +58,12 @@ static bool afs_start_vl_iteration(struct afs_vl_cursor *vc)
} }
/* Status load is ordered after lookup counter load */ /* Status load is ordered after lookup counter load */
if (cell->dns_status == DNS_LOOKUP_GOT_NOT_FOUND) {
pr_warn("No record of cell %s\n", cell->name);
vc->error = -ENOENT;
return false;
}
if (cell->dns_source == DNS_RECORD_UNAVAILABLE) { if (cell->dns_source == DNS_RECORD_UNAVAILABLE) {
vc->error = -EDESTADDRREQ; vc->error = -EDESTADDRREQ;
return false; return false;
@ -285,6 +291,7 @@ bool afs_select_vlserver(struct afs_vl_cursor *vc)
*/ */
static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc) static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc)
{ {
struct afs_cell *cell = vc->cell;
static int count; static int count;
int i; int i;
@ -294,6 +301,9 @@ static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc)
rcu_read_lock(); rcu_read_lock();
pr_notice("EDESTADDR occurred\n"); pr_notice("EDESTADDR occurred\n");
pr_notice("CELL: %s err=%d\n", cell->name, cell->error);
pr_notice("DNS: src=%u st=%u lc=%x\n",
cell->dns_source, cell->dns_status, cell->dns_lookup_count);
pr_notice("VC: ut=%lx ix=%u ni=%hu fl=%hx err=%hd\n", pr_notice("VC: ut=%lx ix=%u ni=%hu fl=%hx err=%hd\n",
vc->untried, vc->index, vc->nr_iterations, vc->flags, vc->error); vc->untried, vc->index, vc->nr_iterations, vc->flags, vc->error);

View File

@ -2972,6 +2972,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
goto fail_alloc; goto fail_alloc;
} }
btrfs_info(fs_info, "first mount of filesystem %pU", disk_super->fsid);
/* /*
* Verify the type first, if that or the checksum value are * Verify the type first, if that or the checksum value are
* corrupted, we'll find out * corrupted, we'll find out

View File

@ -804,6 +804,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
dump_ref_action(fs_info, ra); dump_ref_action(fs_info, ra);
kfree(ref); kfree(ref);
kfree(ra); kfree(ra);
kfree(re);
goto out_unlock; goto out_unlock;
} else if (be->num_refs == 0) { } else if (be->num_refs == 0) {
btrfs_err(fs_info, btrfs_err(fs_info,
@ -813,6 +814,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
dump_ref_action(fs_info, ra); dump_ref_action(fs_info, ra);
kfree(ref); kfree(ref);
kfree(ra); kfree(ra);
kfree(re);
goto out_unlock; goto out_unlock;
} }

View File

@ -7303,7 +7303,7 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
sctx->flags = arg->flags; sctx->flags = arg->flags;
sctx->send_filp = fget(arg->send_fd); sctx->send_filp = fget(arg->send_fd);
if (!sctx->send_filp) { if (!sctx->send_filp || !(sctx->send_filp->f_mode & FMODE_WRITE)) {
ret = -EBADF; ret = -EBADF;
goto out; goto out;
} }

View File

@ -318,7 +318,10 @@ void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
static void btrfs_put_super(struct super_block *sb) static void btrfs_put_super(struct super_block *sb)
{ {
close_ctree(btrfs_sb(sb)); struct btrfs_fs_info *fs_info = btrfs_sb(sb);
btrfs_info(fs_info, "last unmount of filesystem %pU", fs_info->fs_devices->fsid);
close_ctree(fs_info);
} }
enum { enum {

View File

@ -2992,15 +2992,16 @@ struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
read_unlock(&em_tree->lock); read_unlock(&em_tree->lock);
if (!em) { if (!em) {
btrfs_crit(fs_info, "unable to find logical %llu length %llu", btrfs_crit(fs_info,
"unable to find chunk map for logical %llu length %llu",
logical, length); logical, length);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
if (em->start > logical || em->start + em->len < logical) { if (em->start > logical || em->start + em->len <= logical) {
btrfs_crit(fs_info, btrfs_crit(fs_info,
"found a bad mapping, wanted %llu-%llu, found %llu-%llu", "found a bad chunk map, wanted %llu-%llu, found %llu-%llu",
logical, length, em->start, em->start + em->len); logical, logical + length, em->start, em->start + em->len);
free_extent_map(em); free_extent_map(em);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }

View File

@ -1076,6 +1076,7 @@ const struct inode_operations cifs_file_inode_ops = {
const struct inode_operations cifs_symlink_inode_ops = { const struct inode_operations cifs_symlink_inode_ops = {
.get_link = cifs_get_link, .get_link = cifs_get_link,
.setattr = cifs_setattr,
.permission = cifs_permission, .permission = cifs_permission,
.listxattr = cifs_listxattr, .listxattr = cifs_listxattr,
}; };

View File

@ -157,10 +157,13 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto out; goto out;
if (pTcon->ses->server->ops->set_EA) if (pTcon->ses->server->ops->set_EA) {
rc = pTcon->ses->server->ops->set_EA(xid, pTcon, rc = pTcon->ses->server->ops->set_EA(xid, pTcon,
full_path, name, value, (__u16)size, full_path, name, value, (__u16)size,
cifs_sb->local_nls, cifs_sb); cifs_sb->local_nls, cifs_sb);
if (rc == 0)
inode_set_ctime_current(inode);
}
break; break;
case XATTR_CIFS_ACL: case XATTR_CIFS_ACL:

View File

@ -144,14 +144,17 @@
static struct kmem_cache *ext4_es_cachep; static struct kmem_cache *ext4_es_cachep;
static struct kmem_cache *ext4_pending_cachep; static struct kmem_cache *ext4_pending_cachep;
static int __es_insert_extent(struct inode *inode, struct extent_status *newes); static int __es_insert_extent(struct inode *inode, struct extent_status *newes,
struct extent_status *prealloc);
static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t end, int *reserved); ext4_lblk_t end, int *reserved,
struct extent_status *prealloc);
static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan); static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
struct ext4_inode_info *locked_ei); struct ext4_inode_info *locked_ei);
static void __revise_pending(struct inode *inode, ext4_lblk_t lblk, static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len); ext4_lblk_t len,
struct pending_reservation **prealloc);
int __init ext4_init_es(void) int __init ext4_init_es(void)
{ {
@ -448,22 +451,49 @@ static void ext4_es_list_del(struct inode *inode)
spin_unlock(&sbi->s_es_lock); spin_unlock(&sbi->s_es_lock);
} }
static struct extent_status * static inline struct pending_reservation *__alloc_pending(bool nofail)
ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len, {
ext4_fsblk_t pblk) if (!nofail)
return kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC);
return kmem_cache_zalloc(ext4_pending_cachep, GFP_KERNEL | __GFP_NOFAIL);
}
static inline void __free_pending(struct pending_reservation *pr)
{
kmem_cache_free(ext4_pending_cachep, pr);
}
/*
* Returns true if we cannot fail to allocate memory for this extent_status
* entry and cannot reclaim it until its status changes.
*/
static inline bool ext4_es_must_keep(struct extent_status *es)
{
/* fiemap, bigalloc, and seek_data/hole need to use it. */
if (ext4_es_is_delayed(es))
return true;
return false;
}
static inline struct extent_status *__es_alloc_extent(bool nofail)
{
if (!nofail)
return kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
return kmem_cache_zalloc(ext4_es_cachep, GFP_KERNEL | __GFP_NOFAIL);
}
static void ext4_es_init_extent(struct inode *inode, struct extent_status *es,
ext4_lblk_t lblk, ext4_lblk_t len, ext4_fsblk_t pblk)
{ {
struct extent_status *es;
es = kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
if (es == NULL)
return NULL;
es->es_lblk = lblk; es->es_lblk = lblk;
es->es_len = len; es->es_len = len;
es->es_pblk = pblk; es->es_pblk = pblk;
/* /* We never try to reclaim a must kept extent, so we don't count it. */
* We don't count delayed extent because we never try to reclaim them if (!ext4_es_must_keep(es)) {
*/
if (!ext4_es_is_delayed(es)) {
if (!EXT4_I(inode)->i_es_shk_nr++) if (!EXT4_I(inode)->i_es_shk_nr++)
ext4_es_list_add(inode); ext4_es_list_add(inode);
percpu_counter_inc(&EXT4_SB(inode->i_sb)-> percpu_counter_inc(&EXT4_SB(inode->i_sb)->
@ -472,8 +502,11 @@ ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
EXT4_I(inode)->i_es_all_nr++; EXT4_I(inode)->i_es_all_nr++;
percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt); percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
}
return es; static inline void __es_free_extent(struct extent_status *es)
{
kmem_cache_free(ext4_es_cachep, es);
} }
static void ext4_es_free_extent(struct inode *inode, struct extent_status *es) static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
@ -481,8 +514,8 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
EXT4_I(inode)->i_es_all_nr--; EXT4_I(inode)->i_es_all_nr--;
percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt); percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
/* Decrease the shrink counter when this es is not delayed */ /* Decrease the shrink counter when we can reclaim the extent. */
if (!ext4_es_is_delayed(es)) { if (!ext4_es_must_keep(es)) {
BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0); BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0);
if (!--EXT4_I(inode)->i_es_shk_nr) if (!--EXT4_I(inode)->i_es_shk_nr)
ext4_es_list_del(inode); ext4_es_list_del(inode);
@ -490,7 +523,7 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
s_es_stats.es_stats_shk_cnt); s_es_stats.es_stats_shk_cnt);
} }
kmem_cache_free(ext4_es_cachep, es); __es_free_extent(es);
} }
/* /*
@ -752,7 +785,8 @@ static inline void ext4_es_insert_extent_check(struct inode *inode,
} }
#endif #endif
static int __es_insert_extent(struct inode *inode, struct extent_status *newes) static int __es_insert_extent(struct inode *inode, struct extent_status *newes,
struct extent_status *prealloc)
{ {
struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
struct rb_node **p = &tree->root.rb_node; struct rb_node **p = &tree->root.rb_node;
@ -792,10 +826,15 @@ static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
} }
} }
es = ext4_es_alloc_extent(inode, newes->es_lblk, newes->es_len, if (prealloc)
newes->es_pblk); es = prealloc;
else
es = __es_alloc_extent(false);
if (!es) if (!es)
return -ENOMEM; return -ENOMEM;
ext4_es_init_extent(inode, es, newes->es_lblk, newes->es_len,
newes->es_pblk);
rb_link_node(&es->rb_node, parent, p); rb_link_node(&es->rb_node, parent, p);
rb_insert_color(&es->rb_node, &tree->root); rb_insert_color(&es->rb_node, &tree->root);
@ -816,8 +855,12 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
{ {
struct extent_status newes; struct extent_status newes;
ext4_lblk_t end = lblk + len - 1; ext4_lblk_t end = lblk + len - 1;
int err = 0; int err1 = 0, err2 = 0, err3 = 0;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct extent_status *es1 = NULL;
struct extent_status *es2 = NULL;
struct pending_reservation *pr = NULL;
bool revise_pending = false;
if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
return 0; return 0;
@ -845,29 +888,57 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_es_insert_extent_check(inode, &newes); ext4_es_insert_extent_check(inode, &newes);
write_lock(&EXT4_I(inode)->i_es_lock); revise_pending = sbi->s_cluster_ratio > 1 &&
err = __es_remove_extent(inode, lblk, end, NULL); test_opt(inode->i_sb, DELALLOC) &&
if (err != 0) (status & (EXTENT_STATUS_WRITTEN |
goto error; EXTENT_STATUS_UNWRITTEN));
retry: retry:
err = __es_insert_extent(inode, &newes); if (err1 && !es1)
if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb), es1 = __es_alloc_extent(true);
128, EXT4_I(inode))) if ((err1 || err2) && !es2)
goto retry; es2 = __es_alloc_extent(true);
if (err == -ENOMEM && !ext4_es_is_delayed(&newes)) if ((err1 || err2 || err3) && revise_pending && !pr)
err = 0; pr = __alloc_pending(true);
write_lock(&EXT4_I(inode)->i_es_lock);
if (sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) && err1 = __es_remove_extent(inode, lblk, end, NULL, es1);
(status & EXTENT_STATUS_WRITTEN || if (err1 != 0)
status & EXTENT_STATUS_UNWRITTEN)) goto error;
__revise_pending(inode, lblk, len); /* Free preallocated extent if it didn't get used. */
if (es1) {
if (!es1->es_len)
__es_free_extent(es1);
es1 = NULL;
}
err2 = __es_insert_extent(inode, &newes, es2);
if (err2 == -ENOMEM && !ext4_es_must_keep(&newes))
err2 = 0;
if (err2 != 0)
goto error;
/* Free preallocated extent if it didn't get used. */
if (es2) {
if (!es2->es_len)
__es_free_extent(es2);
es2 = NULL;
}
if (revise_pending) {
err3 = __revise_pending(inode, lblk, len, &pr);
if (err3 != 0)
goto error;
if (pr) {
__free_pending(pr);
pr = NULL;
}
}
error: error:
write_unlock(&EXT4_I(inode)->i_es_lock); write_unlock(&EXT4_I(inode)->i_es_lock);
if (err1 || err2 || err3)
goto retry;
ext4_es_print_tree(inode); ext4_es_print_tree(inode);
return 0;
return err;
} }
/* /*
@ -900,7 +971,7 @@ void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk); es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk);
if (!es || es->es_lblk > end) if (!es || es->es_lblk > end)
__es_insert_extent(inode, &newes); __es_insert_extent(inode, &newes, NULL);
write_unlock(&EXT4_I(inode)->i_es_lock); write_unlock(&EXT4_I(inode)->i_es_lock);
} }
@ -1271,7 +1342,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
rc->ndelonly--; rc->ndelonly--;
node = rb_next(&pr->rb_node); node = rb_next(&pr->rb_node);
rb_erase(&pr->rb_node, &tree->root); rb_erase(&pr->rb_node, &tree->root);
kmem_cache_free(ext4_pending_cachep, pr); __free_pending(pr);
if (!node) if (!node)
break; break;
pr = rb_entry(node, struct pending_reservation, pr = rb_entry(node, struct pending_reservation,
@ -1290,6 +1361,7 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
* @lblk - first block in range * @lblk - first block in range
* @end - last block in range * @end - last block in range
* @reserved - number of cluster reservations released * @reserved - number of cluster reservations released
* @prealloc - pre-allocated es to avoid memory allocation failures
* *
* If @reserved is not NULL and delayed allocation is enabled, counts * If @reserved is not NULL and delayed allocation is enabled, counts
* block/cluster reservations freed by removing range and if bigalloc * block/cluster reservations freed by removing range and if bigalloc
@ -1297,7 +1369,8 @@ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end,
* error code on failure. * error code on failure.
*/ */
static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t end, int *reserved) ext4_lblk_t end, int *reserved,
struct extent_status *prealloc)
{ {
struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
struct rb_node *node; struct rb_node *node;
@ -1305,14 +1378,12 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
struct extent_status orig_es; struct extent_status orig_es;
ext4_lblk_t len1, len2; ext4_lblk_t len1, len2;
ext4_fsblk_t block; ext4_fsblk_t block;
int err; int err = 0;
bool count_reserved = true; bool count_reserved = true;
struct rsvd_count rc; struct rsvd_count rc;
if (reserved == NULL || !test_opt(inode->i_sb, DELALLOC)) if (reserved == NULL || !test_opt(inode->i_sb, DELALLOC))
count_reserved = false; count_reserved = false;
retry:
err = 0;
es = __es_tree_search(&tree->root, lblk); es = __es_tree_search(&tree->root, lblk);
if (!es) if (!es)
@ -1346,14 +1417,13 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
orig_es.es_len - len2; orig_es.es_len - len2;
ext4_es_store_pblock_status(&newes, block, ext4_es_store_pblock_status(&newes, block,
ext4_es_status(&orig_es)); ext4_es_status(&orig_es));
err = __es_insert_extent(inode, &newes); err = __es_insert_extent(inode, &newes, prealloc);
if (err) { if (err) {
if (!ext4_es_must_keep(&newes))
return 0;
es->es_lblk = orig_es.es_lblk; es->es_lblk = orig_es.es_lblk;
es->es_len = orig_es.es_len; es->es_len = orig_es.es_len;
if ((err == -ENOMEM) &&
__es_shrink(EXT4_SB(inode->i_sb),
128, EXT4_I(inode)))
goto retry;
goto out; goto out;
} }
} else { } else {
@ -1433,6 +1503,7 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t end; ext4_lblk_t end;
int err = 0; int err = 0;
int reserved = 0; int reserved = 0;
struct extent_status *es = NULL;
if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
return 0; return 0;
@ -1447,17 +1518,29 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
end = lblk + len - 1; end = lblk + len - 1;
BUG_ON(end < lblk); BUG_ON(end < lblk);
retry:
if (err && !es)
es = __es_alloc_extent(true);
/* /*
* ext4_clear_inode() depends on us taking i_es_lock unconditionally * ext4_clear_inode() depends on us taking i_es_lock unconditionally
* so that we are sure __es_shrink() is done with the inode before it * so that we are sure __es_shrink() is done with the inode before it
* is reclaimed. * is reclaimed.
*/ */
write_lock(&EXT4_I(inode)->i_es_lock); write_lock(&EXT4_I(inode)->i_es_lock);
err = __es_remove_extent(inode, lblk, end, &reserved); err = __es_remove_extent(inode, lblk, end, &reserved, es);
/* Free preallocated extent if it didn't get used. */
if (es) {
if (!es->es_len)
__es_free_extent(es);
es = NULL;
}
write_unlock(&EXT4_I(inode)->i_es_lock); write_unlock(&EXT4_I(inode)->i_es_lock);
if (err)
goto retry;
ext4_es_print_tree(inode); ext4_es_print_tree(inode);
ext4_da_release_space(inode, reserved); ext4_da_release_space(inode, reserved);
return err; return 0;
} }
static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
@ -1704,11 +1787,8 @@ static int es_do_reclaim_extents(struct ext4_inode_info *ei, ext4_lblk_t end,
(*nr_to_scan)--; (*nr_to_scan)--;
node = rb_next(&es->rb_node); node = rb_next(&es->rb_node);
/*
* We can't reclaim delayed extent from status tree because if (ext4_es_must_keep(es))
* fiemap, bigallic, and seek_data/hole need to use it.
*/
if (ext4_es_is_delayed(es))
goto next; goto next;
if (ext4_es_is_referenced(es)) { if (ext4_es_is_referenced(es)) {
ext4_es_clear_referenced(es); ext4_es_clear_referenced(es);
@ -1772,7 +1852,7 @@ void ext4_clear_inode_es(struct inode *inode)
while (node) { while (node) {
es = rb_entry(node, struct extent_status, rb_node); es = rb_entry(node, struct extent_status, rb_node);
node = rb_next(node); node = rb_next(node);
if (!ext4_es_is_delayed(es)) { if (!ext4_es_must_keep(es)) {
rb_erase(&es->rb_node, &tree->root); rb_erase(&es->rb_node, &tree->root);
ext4_es_free_extent(inode, es); ext4_es_free_extent(inode, es);
} }
@ -1859,11 +1939,13 @@ static struct pending_reservation *__get_pending(struct inode *inode,
* *
* @inode - file containing the cluster * @inode - file containing the cluster
* @lblk - logical block in the cluster to be added * @lblk - logical block in the cluster to be added
* @prealloc - preallocated pending entry
* *
* Returns 0 on successful insertion and -ENOMEM on failure. If the * Returns 0 on successful insertion and -ENOMEM on failure. If the
* pending reservation is already in the set, returns successfully. * pending reservation is already in the set, returns successfully.
*/ */
static int __insert_pending(struct inode *inode, ext4_lblk_t lblk) static int __insert_pending(struct inode *inode, ext4_lblk_t lblk,
struct pending_reservation **prealloc)
{ {
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree; struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree;
@ -1889,10 +1971,15 @@ static int __insert_pending(struct inode *inode, ext4_lblk_t lblk)
} }
} }
pr = kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC); if (likely(*prealloc == NULL)) {
if (pr == NULL) { pr = __alloc_pending(false);
ret = -ENOMEM; if (!pr) {
goto out; ret = -ENOMEM;
goto out;
}
} else {
pr = *prealloc;
*prealloc = NULL;
} }
pr->lclu = lclu; pr->lclu = lclu;
@ -1922,7 +2009,7 @@ static void __remove_pending(struct inode *inode, ext4_lblk_t lblk)
if (pr != NULL) { if (pr != NULL) {
tree = &EXT4_I(inode)->i_pending_tree; tree = &EXT4_I(inode)->i_pending_tree;
rb_erase(&pr->rb_node, &tree->root); rb_erase(&pr->rb_node, &tree->root);
kmem_cache_free(ext4_pending_cachep, pr); __free_pending(pr);
} }
} }
@ -1983,7 +2070,10 @@ int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
bool allocated) bool allocated)
{ {
struct extent_status newes; struct extent_status newes;
int err = 0; int err1 = 0, err2 = 0, err3 = 0;
struct extent_status *es1 = NULL;
struct extent_status *es2 = NULL;
struct pending_reservation *pr = NULL;
if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
return 0; return 0;
@ -1998,29 +2088,52 @@ int ext4_es_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk,
ext4_es_insert_extent_check(inode, &newes); ext4_es_insert_extent_check(inode, &newes);
retry:
if (err1 && !es1)
es1 = __es_alloc_extent(true);
if ((err1 || err2) && !es2)
es2 = __es_alloc_extent(true);
if ((err1 || err2 || err3) && allocated && !pr)
pr = __alloc_pending(true);
write_lock(&EXT4_I(inode)->i_es_lock); write_lock(&EXT4_I(inode)->i_es_lock);
err = __es_remove_extent(inode, lblk, lblk, NULL); err1 = __es_remove_extent(inode, lblk, lblk, NULL, es1);
if (err != 0) if (err1 != 0)
goto error; goto error;
retry: /* Free preallocated extent if it didn't get used. */
err = __es_insert_extent(inode, &newes); if (es1) {
if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb), if (!es1->es_len)
128, EXT4_I(inode))) __es_free_extent(es1);
goto retry; es1 = NULL;
if (err != 0) }
err2 = __es_insert_extent(inode, &newes, es2);
if (err2 != 0)
goto error; goto error;
/* Free preallocated extent if it didn't get used. */
if (es2) {
if (!es2->es_len)
__es_free_extent(es2);
es2 = NULL;
}
if (allocated) if (allocated) {
__insert_pending(inode, lblk); err3 = __insert_pending(inode, lblk, &pr);
if (err3 != 0)
goto error;
if (pr) {
__free_pending(pr);
pr = NULL;
}
}
error: error:
write_unlock(&EXT4_I(inode)->i_es_lock); write_unlock(&EXT4_I(inode)->i_es_lock);
if (err1 || err2 || err3)
goto retry;
ext4_es_print_tree(inode); ext4_es_print_tree(inode);
ext4_print_pending_tree(inode); ext4_print_pending_tree(inode);
return 0;
return err;
} }
/* /*
@ -2121,21 +2234,24 @@ unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk,
* @inode - file containing the range * @inode - file containing the range
* @lblk - logical block defining the start of range * @lblk - logical block defining the start of range
* @len - length of range in blocks * @len - length of range in blocks
* @prealloc - preallocated pending entry
* *
* Used after a newly allocated extent is added to the extents status tree. * Used after a newly allocated extent is added to the extents status tree.
* Requires that the extents in the range have either written or unwritten * Requires that the extents in the range have either written or unwritten
* status. Must be called while holding i_es_lock. * status. Must be called while holding i_es_lock.
*/ */
static void __revise_pending(struct inode *inode, ext4_lblk_t lblk, static int __revise_pending(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len) ext4_lblk_t len,
struct pending_reservation **prealloc)
{ {
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_lblk_t end = lblk + len - 1; ext4_lblk_t end = lblk + len - 1;
ext4_lblk_t first, last; ext4_lblk_t first, last;
bool f_del = false, l_del = false; bool f_del = false, l_del = false;
int ret = 0;
if (len == 0) if (len == 0)
return; return 0;
/* /*
* Two cases - block range within single cluster and block range * Two cases - block range within single cluster and block range
@ -2156,7 +2272,9 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
f_del = __es_scan_range(inode, &ext4_es_is_delonly, f_del = __es_scan_range(inode, &ext4_es_is_delonly,
first, lblk - 1); first, lblk - 1);
if (f_del) { if (f_del) {
__insert_pending(inode, first); ret = __insert_pending(inode, first, prealloc);
if (ret < 0)
goto out;
} else { } else {
last = EXT4_LBLK_CMASK(sbi, end) + last = EXT4_LBLK_CMASK(sbi, end) +
sbi->s_cluster_ratio - 1; sbi->s_cluster_ratio - 1;
@ -2164,9 +2282,11 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
l_del = __es_scan_range(inode, l_del = __es_scan_range(inode,
&ext4_es_is_delonly, &ext4_es_is_delonly,
end + 1, last); end + 1, last);
if (l_del) if (l_del) {
__insert_pending(inode, last); ret = __insert_pending(inode, last, prealloc);
else if (ret < 0)
goto out;
} else
__remove_pending(inode, last); __remove_pending(inode, last);
} }
} else { } else {
@ -2174,18 +2294,24 @@ static void __revise_pending(struct inode *inode, ext4_lblk_t lblk,
if (first != lblk) if (first != lblk)
f_del = __es_scan_range(inode, &ext4_es_is_delonly, f_del = __es_scan_range(inode, &ext4_es_is_delonly,
first, lblk - 1); first, lblk - 1);
if (f_del) if (f_del) {
__insert_pending(inode, first); ret = __insert_pending(inode, first, prealloc);
else if (ret < 0)
goto out;
} else
__remove_pending(inode, first); __remove_pending(inode, first);
last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1; last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1;
if (last != end) if (last != end)
l_del = __es_scan_range(inode, &ext4_es_is_delonly, l_del = __es_scan_range(inode, &ext4_es_is_delonly,
end + 1, last); end + 1, last);
if (l_del) if (l_del) {
__insert_pending(inode, last); ret = __insert_pending(inode, last, prealloc);
else if (ret < 0)
goto out;
} else
__remove_pending(inode, last); __remove_pending(inode, last);
} }
out:
return ret;
} }

View File

@ -2392,6 +2392,22 @@ int vfs_ioc_fssetxattr_check(struct inode *inode, const struct fsxattr *old_fa,
} }
EXPORT_SYMBOL(vfs_ioc_fssetxattr_check); EXPORT_SYMBOL(vfs_ioc_fssetxattr_check);
/**
* inode_set_ctime_current - set the ctime to current_time
* @inode: inode
*
* Set the inode->i_ctime to the current value for the inode. Returns
* the current value that was assigned to i_ctime.
*/
struct timespec64 inode_set_ctime_current(struct inode *inode)
{
struct timespec64 now = current_time(inode);
inode_set_ctime(inode, now.tv_sec, now.tv_nsec);
return now;
}
EXPORT_SYMBOL(inode_set_ctime_current);
/** /**
* in_group_or_capable - check whether caller is CAP_FSETID privileged * in_group_or_capable - check whether caller is CAP_FSETID privileged
* @inode: inode to check * @inode: inode to check

View File

@ -1758,6 +1758,12 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen)) if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen))
goto out; goto out;
err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev;
if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
goto out;
if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
goto out;
retry: retry:
host_err = fh_want_write(ffhp); host_err = fh_want_write(ffhp);
if (host_err) { if (host_err) {
@ -1792,12 +1798,6 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
if (ndentry == trap) if (ndentry == trap)
goto out_dput_new; goto out_dput_new;
host_err = -EXDEV;
if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
goto out_dput_new;
if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
goto out_dput_new;
if (nfsd_has_cached_files(ndentry)) { if (nfsd_has_cached_files(ndentry)) {
has_cached = true; has_cached = true;
goto out_dput_old; goto out_dput_old;

View File

@ -351,6 +351,22 @@ enum dl_dev_state {
DL_DEV_UNBINDING, DL_DEV_UNBINDING,
}; };
/**
* enum device_removable - Whether the device is removable. The criteria for a
* device to be classified as removable is determined by its subsystem or bus.
* @DEVICE_REMOVABLE_NOT_SUPPORTED: This attribute is not supported for this
* device (default).
* @DEVICE_REMOVABLE_UNKNOWN: Device location is Unknown.
* @DEVICE_FIXED: Device is not removable by the user.
* @DEVICE_REMOVABLE: Device is removable by the user.
*/
enum device_removable {
DEVICE_REMOVABLE_NOT_SUPPORTED = 0, /* must be 0 */
DEVICE_REMOVABLE_UNKNOWN,
DEVICE_FIXED,
DEVICE_REMOVABLE,
};
/** /**
* struct dev_links_info - Device data related to device links. * struct dev_links_info - Device data related to device links.
* @suppliers: List of links to supplier devices. * @suppliers: List of links to supplier devices.
@ -432,6 +448,9 @@ struct dev_links_info {
* device (i.e. the bus driver that discovered the device). * device (i.e. the bus driver that discovered the device).
* @iommu_group: IOMMU group the device belongs to. * @iommu_group: IOMMU group the device belongs to.
* @iommu: Per device generic IOMMU runtime data * @iommu: Per device generic IOMMU runtime data
* @removable: Whether the device can be removed from the system. This
* should be set by the subsystem / bus driver that discovered
* the device.
* *
* @offline_disabled: If set, the device is permanently online. * @offline_disabled: If set, the device is permanently online.
* @offline: Set after successful invocation of bus type's .offline(). * @offline: Set after successful invocation of bus type's .offline().
@ -542,6 +561,8 @@ struct device {
struct iommu_group *iommu_group; struct iommu_group *iommu_group;
struct dev_iommu *iommu; struct dev_iommu *iommu;
enum device_removable removable;
bool offline_disabled:1; bool offline_disabled:1;
bool offline:1; bool offline:1;
bool of_node_reused:1; bool of_node_reused:1;
@ -791,6 +812,22 @@ static inline bool dev_has_sync_state(struct device *dev)
return false; return false;
} }
static inline void dev_set_removable(struct device *dev,
enum device_removable removable)
{
dev->removable = removable;
}
static inline bool dev_is_removable(struct device *dev)
{
return dev->removable == DEVICE_REMOVABLE;
}
static inline bool dev_removable_is_valid(struct device *dev)
{
return dev->removable != DEVICE_REMOVABLE_NOT_SUPPORTED;
}
/* /*
* High level routines for use by the bus drivers * High level routines for use by the bus drivers
*/ */

View File

@ -1628,7 +1628,50 @@ static inline void i_gid_write(struct inode *inode, gid_t gid)
inode->i_gid = make_kgid(inode->i_sb->s_user_ns, gid); inode->i_gid = make_kgid(inode->i_sb->s_user_ns, gid);
} }
extern struct timespec64 current_time(struct inode *inode); struct timespec64 current_time(struct inode *inode);
struct timespec64 inode_set_ctime_current(struct inode *inode);
/**
* inode_get_ctime - fetch the current ctime from the inode
* @inode: inode from which to fetch ctime
*
* Grab the current ctime from the inode and return it.
*/
static inline struct timespec64 inode_get_ctime(const struct inode *inode)
{
return inode->i_ctime;
}
/**
* inode_set_ctime_to_ts - set the ctime in the inode
* @inode: inode in which to set the ctime
* @ts: value to set in the ctime field
*
* Set the ctime in @inode to @ts
*/
static inline struct timespec64 inode_set_ctime_to_ts(struct inode *inode,
struct timespec64 ts)
{
inode->i_ctime = ts;
return ts;
}
/**
* inode_set_ctime - set the ctime in the inode
* @inode: inode in which to set the ctime
* @sec: tv_sec value to set
* @nsec: tv_nsec value to set
*
* Set the ctime in @inode to { @sec, @nsec }
*/
static inline struct timespec64 inode_set_ctime(struct inode *inode,
time64_t sec, long nsec)
{
struct timespec64 ts = { .tv_sec = sec,
.tv_nsec = nsec };
return inode_set_ctime_to_ts(inode, ts);
}
/* /*
* Snapshotting support. * Snapshotting support.

View File

@ -624,8 +624,13 @@ struct hid_device { /* device report descriptor */
struct list_head debug_list; struct list_head debug_list;
spinlock_t debug_list_lock; spinlock_t debug_list_lock;
wait_queue_head_t debug_wait; wait_queue_head_t debug_wait;
struct kref ref;
unsigned int id; /* system unique id */
}; };
void hiddev_free(struct kref *ref);
#define to_hid_device(pdev) \ #define to_hid_device(pdev) \
container_of(pdev, struct hid_device, dev) container_of(pdev, struct hid_device, dev)

View File

@ -421,6 +421,7 @@ struct mmc_host {
unsigned int use_blk_mq:1; /* use blk-mq */ unsigned int use_blk_mq:1; /* use blk-mq */
unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */ unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */
unsigned int can_dma_map_merge:1; /* merging can be used */ unsigned int can_dma_map_merge:1; /* merging can be used */
unsigned int vqmmc_enabled:1; /* vqmmc regulator is enabled */
int rescan_disable; /* disable card detection */ int rescan_disable; /* disable card detection */
int rescan_entered; /* used with nonremovable devices */ int rescan_entered; /* used with nonremovable devices */
@ -572,6 +573,8 @@ static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc,
#endif #endif
int mmc_regulator_get_supply(struct mmc_host *mmc); int mmc_regulator_get_supply(struct mmc_host *mmc);
int mmc_regulator_enable_vqmmc(struct mmc_host *mmc);
void mmc_regulator_disable_vqmmc(struct mmc_host *mmc);
static inline int mmc_card_is_removable(struct mmc_host *host) static inline int mmc_card_is_removable(struct mmc_host *host)
{ {

View File

@ -0,0 +1,65 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Helpers for Intel SoC model detection
*
* Copyright (c) 2019, Intel Corporation.
*/
#ifndef __PLATFORM_DATA_X86_SOC_H
#define __PLATFORM_DATA_X86_SOC_H
#if IS_ENABLED(CONFIG_X86)
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#define SOC_INTEL_IS_CPU(soc, type) \
static inline bool soc_intel_is_##soc(void) \
{ \
static const struct x86_cpu_id soc##_cpu_ids[] = { \
X86_MATCH_INTEL_FAM6_MODEL(type, NULL), \
{} \
}; \
const struct x86_cpu_id *id; \
\
id = x86_match_cpu(soc##_cpu_ids); \
if (id) \
return true; \
return false; \
}
SOC_INTEL_IS_CPU(byt, ATOM_SILVERMONT);
SOC_INTEL_IS_CPU(cht, ATOM_AIRMONT);
SOC_INTEL_IS_CPU(apl, ATOM_GOLDMONT);
SOC_INTEL_IS_CPU(glk, ATOM_GOLDMONT_PLUS);
SOC_INTEL_IS_CPU(cml, KABYLAKE_L);
#else /* IS_ENABLED(CONFIG_X86) */
static inline bool soc_intel_is_byt(void)
{
return false;
}
static inline bool soc_intel_is_cht(void)
{
return false;
}
static inline bool soc_intel_is_apl(void)
{
return false;
}
static inline bool soc_intel_is_glk(void)
{
return false;
}
static inline bool soc_intel_is_cml(void)
{
return false;
}
#endif /* IS_ENABLED(CONFIG_X86) */
#endif /* __PLATFORM_DATA_X86_SOC_H */

View File

@ -494,12 +494,6 @@ struct usb_dev_state;
struct usb_tt; struct usb_tt;
enum usb_device_removable {
USB_DEVICE_REMOVABLE_UNKNOWN = 0,
USB_DEVICE_REMOVABLE,
USB_DEVICE_FIXED,
};
enum usb_port_connect_type { enum usb_port_connect_type {
USB_PORT_CONNECT_TYPE_UNKNOWN = 0, USB_PORT_CONNECT_TYPE_UNKNOWN = 0,
USB_PORT_CONNECT_TYPE_HOT_PLUG, USB_PORT_CONNECT_TYPE_HOT_PLUG,
@ -722,7 +716,6 @@ struct usb_device {
#endif #endif
struct wusb_dev *wusb_dev; struct wusb_dev *wusb_dev;
int slot_id; int slot_id;
enum usb_device_removable removable;
struct usb2_lpm_parameters l1_params; struct usb2_lpm_parameters l1_params;
struct usb3_lpm_parameters u1_params; struct usb3_lpm_parameters u1_params;
struct usb3_lpm_parameters u2_params; struct usb3_lpm_parameters u2_params;

View File

@ -466,6 +466,7 @@ extern int schedule_on_each_cpu(work_func_t func);
int execute_in_process_context(work_func_t fn, struct execute_work *); int execute_in_process_context(work_func_t fn, struct execute_work *);
extern bool flush_work(struct work_struct *work); extern bool flush_work(struct work_struct *work);
extern bool cancel_work(struct work_struct *work);
extern bool cancel_work_sync(struct work_struct *work); extern bool cancel_work_sync(struct work_struct *work);
extern bool flush_delayed_work(struct delayed_work *dwork); extern bool flush_delayed_work(struct delayed_work *dwork);

View File

@ -150,6 +150,12 @@ struct scsi_cmnd {
ANDROID_KABI_RESERVE(4); ANDROID_KABI_RESERVE(4);
}; };
/* Variant of blk_mq_rq_from_pdu() that verifies the type of its argument. */
static inline struct request *scsi_cmd_to_rq(struct scsi_cmnd *scmd)
{
return blk_mq_rq_from_pdu(scmd);
}
/* /*
* Return the driver private allocation behind the command. * Return the driver private allocation behind the command.
* Only works if cmd_size is set in the host template. * Only works if cmd_size is set in the host template.

View File

@ -3159,7 +3159,7 @@ static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter
*/ */
const struct bio_vec *bvec = imu->bvec; const struct bio_vec *bvec = imu->bvec;
if (offset <= bvec->bv_len) { if (offset < bvec->bv_len) {
iov_iter_advance(iter, offset); iov_iter_advance(iter, offset);
} else { } else {
unsigned long seg_skip; unsigned long seg_skip;

View File

@ -3357,7 +3357,8 @@ static int alloc_chain_hlocks(int req)
size = chain_block_size(curr); size = chain_block_size(curr);
if (likely(size >= req)) { if (likely(size >= req)) {
del_chain_block(0, size, chain_block_next(curr)); del_chain_block(0, size, chain_block_next(curr));
add_chain_block(curr + req, size - req); if (size > req)
add_chain_block(curr + req, size - req);
return curr; return curr;
} }
} }

View File

@ -3257,6 +3257,15 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork)
return ret; return ret;
} }
/*
* See cancel_delayed_work()
*/
bool cancel_work(struct work_struct *work)
{
return __cancel_work(work, false);
}
EXPORT_SYMBOL(cancel_work);
/** /**
* cancel_delayed_work - cancel a delayed work * cancel_delayed_work - cancel a delayed work
* @dwork: delayed_work to cancel * @dwork: delayed_work to cancel

View File

@ -110,9 +110,6 @@ static const char *names_0[] = {
E(ENOSPC), E(ENOSPC),
E(ENOSR), E(ENOSR),
E(ENOSTR), E(ENOSTR),
#ifdef ENOSYM
E(ENOSYM),
#endif
E(ENOSYS), E(ENOSYS),
E(ENOTBLK), E(ENOTBLK),
E(ENOTCONN), E(ENOTCONN),
@ -143,9 +140,6 @@ static const char *names_0[] = {
#endif #endif
E(EREMOTE), E(EREMOTE),
E(EREMOTEIO), E(EREMOTEIO),
#ifdef EREMOTERELEASE
E(EREMOTERELEASE),
#endif
E(ERESTART), E(ERESTART),
E(ERFKILL), E(ERFKILL),
E(EROFS), E(EROFS),

View File

@ -216,8 +216,10 @@ static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
int tv = prandom_u32() % max_delay; int tv = prandom_u32() % max_delay;
im->tm_running = 1; im->tm_running = 1;
if (!mod_timer(&im->timer, jiffies+tv+2)) if (refcount_inc_not_zero(&im->refcnt)) {
refcount_inc(&im->refcnt); if (mod_timer(&im->timer, jiffies + tv + 2))
ip_ma_put(im);
}
} }
static void igmp_gq_start_timer(struct in_device *in_dev) static void igmp_gq_start_timer(struct in_device *in_dev)

Some files were not shown because too many files have changed in this diff Show More