This is the 5.10.102 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmIWE/gACgkQONu9yGCS aT74Cw//awQY9EgXHMRNNU5+qun0ESkj918YvfAIKGDRutaOXK00YUdkqjuxEP6V gOso0rbXe1LiravgrbPSaYbgSvhGVPOPXHLhb7QQkiodxMZ0oaEEzST2VzM4a19k xfhne2BeqCCl4oUUA84NZPcCpohaLhRjZesMWR23zWNbVMEIt7uV8eeqRcC0S4YA r7cA4PURBMUiTlWWssBwlrEVy5JZrlcTYCvljxcpd8ws1Xs0KnmxX4gIDpQzNoFj Dfwg3lSFcl+ar/Xrg14qL3q3iO8zpaJ2msguZ4vq+OLiYWoTfoFcBF625eQJn6/B eWrzpNUqzFv5Eu31vMnv4Wm/iN8/1Yd+lacSqKwxejdmZzwX79VRqv9kD2naT/eP C/tkI1b93Ig7+teIjm+aqMAFDoAeR75oVK0KwKEkDIT19mbX1CSex9VnJtaSHZTM ON7N8NBUyIPLmhc3jencrsFNIY11W7xUedcN/+JmrZoi4Xy4Yr0mh3H4fZVs/vAH I6uYZgylTZiX0esCqJeiHrasETL0IStDw57+wbrjLlBrATgkMbhZQ+VTBxio/mxV yok4TiIB+EHaNMOgIAuT6uaZrXwDwHekR1EX1s4p2cPTz1kstp1ZIC2Gdv910xKB gprzPa/ocwqk71QODgwhogwntR/a4edbbYvCVHKLXQH9Pd81jkM= =TZTu -----END PGP SIGNATURE----- Merge 5.10.102 into android12-5.10-lts Changes in 5.10.102 drm/nouveau/pmu/gm200-: use alternate falcon reset sequence mm: memcg: synchronize objcg lists with a dedicated spinlock rcu: Do not report strict GPs for outgoing CPUs fget: clarify and improve __fget_files() implementation fs/proc: task_mmu.c: don't read mapcount for migration entry can: isotp: prevent race between isotp_bind() and isotp_setsockopt() can: isotp: add SF_BROADCAST support for functional addressing scsi: lpfc: Fix mailbox command failure during driver initialization HID:Add support for UGTABLET WP5540 Revert "svm: Add warning message for AVIC IPI invalid target" serial: parisc: GSC: fix build when IOSAPIC is not set parisc: Drop __init from map_pages declaration parisc: Fix data TLB miss in sba_unmap_sg parisc: Fix sglist access in ccio-dma.c mmc: block: fix read single on recovery logic mm: don't try to NUMA-migrate COW pages that have other uses PCI: hv: Fix NUMA node assignment when kernel boots with custom NUMA topology parisc: Add ioread64_lo_hi() and iowrite64_lo_hi() btrfs: send: in case of IO error log it platform/x86: touchscreen_dmi: Add info for the RWC NANOTE P8 AY07J 2-in-1 platform/x86: ISST: Fix possible circular locking dependency detected selftests: rtc: Increase test timeout so that all tests run kselftest: signal all child processes net: ieee802154: at86rf230: Stop leaking skb's selftests/zram: Skip max_comp_streams interface on newer kernel selftests/zram01.sh: Fix compression ratio calculation selftests/zram: Adapt the situation that /dev/zram0 is being used selftests: openat2: Print also errno in failure messages selftests: openat2: Add missing dependency in Makefile selftests: openat2: Skip testcases that fail with EOPNOTSUPP selftests: skip mincore.check_file_mmap when fs lacks needed support ax25: improve the incomplete fix to avoid UAF and NPD bugs vfs: make freeze_super abort when sync_filesystem returns error quota: make dquot_quota_sync return errors from ->sync_fs scsi: pm8001: Fix use-after-free for aborted TMF sas_task scsi: pm8001: Fix use-after-free for aborted SSP/STP sas_task nvme: fix a possible use-after-free in controller reset during load nvme-tcp: fix possible use-after-free in transport error_recovery work nvme-rdma: fix possible use-after-free in transport error_recovery work drm/amdgpu: fix logic inversion in check x86/Xen: streamline (and fix) PV CPU enumeration Revert "module, async: async_synchronize_full() on module init iff async is used" gcc-plugins/stackleak: Use noinstr in favor of notrace random: wake up /dev/random writers after zap kbuild: lto: merge module sections kbuild: lto: Merge module sections if and only if CONFIG_LTO_CLANG is enabled iwlwifi: fix use-after-free drm/radeon: Fix backlight control on iMac 12,1 drm/i915/opregion: check port number bounds for SWSCI display power state vsock: remove vsock from connected table when connect is interrupted by a signal drm/i915/gvt: Make DRM_I915_GVT depend on X86 iwlwifi: pcie: fix locking when "HW not ready" iwlwifi: pcie: gen2: fix locking when "HW not ready" selftests: netfilter: fix exit value for nft_concat_range netfilter: nft_synproxy: unregister hooks on init error path ipv6: per-netns exclusive flowlabel checks net: dsa: lan9303: fix reset on probe net: dsa: lantiq_gswip: fix use after free in gswip_remove() net: ieee802154: ca8210: Fix lifs/sifs periods ping: fix the dif and sdif check in ping_lookup bonding: force carrier update when releasing slave drop_monitor: fix data-race in dropmon_net_event / trace_napi_poll_hit net_sched: add __rcu annotation to netdev->qdisc bonding: fix data-races around agg_select_timer libsubcmd: Fix use-after-free for realloc(..., 0) dpaa2-eth: Initialize mutex used in one step timestamping path perf bpf: Defer freeing string after possible strlen() on it selftests/exec: Add non-regular to TEST_GEN_PROGS ALSA: hda/realtek: Add quirk for Legion Y9000X 2019 ALSA: hda/realtek: Fix deadlock by COEF mutex ALSA: hda: Fix regression on forced probe mask option ALSA: hda: Fix missing codec probe on Shenker Dock 15 ASoC: ops: Fix stereo change notifications in snd_soc_put_volsw() ASoC: ops: Fix stereo change notifications in snd_soc_put_volsw_range() powerpc/lib/sstep: fix 'ptesync' build error mtd: rawnand: gpmi: don't leak PM reference in error path KVM: SVM: Never reject emulation due to SMAP errata for !SEV guests ASoC: tas2770: Insert post reset delay block/wbt: fix negative inflight counter when remove scsi device NFS: LOOKUP_DIRECTORY is also ok with symlinks NFS: Do not report writeback errors in nfs_getattr() tty: n_tty: do not look ahead for EOL character past the end of the buffer mtd: rawnand: qcom: Fix clock sequencing in qcom_nandc_probe() mtd: rawnand: brcmnand: Fixed incorrect sub-page ECC status Drivers: hv: vmbus: Fix memory leak in vmbus_add_channel_kobj KVM: x86/pmu: Refactoring find_arch_event() to pmc_perf_hw_id() KVM: x86/pmu: Don't truncate the PerfEvtSeln MSR when creating a perf event KVM: x86/pmu: Use AMD64_RAW_EVENT_MASK for PERF_TYPE_RAW NFS: Don't set NFS_INO_INVALID_XATTR if there is no xattr cache ARM: OMAP2+: hwmod: Add of_node_put() before break ARM: OMAP2+: adjust the location of put_device() call in omapdss_init_of phy: usb: Leave some clocks running during suspend irqchip/sifive-plic: Add missing thead,c900-plic match string netfilter: conntrack: don't refresh sctp entries in closed state arm64: dts: meson-gx: add ATF BL32 reserved-memory region arm64: dts: meson-g12: add ATF BL32 reserved-memory region arm64: dts: meson-g12: drop BL32 region from SEI510/SEI610 pidfd: fix test failure due to stack overflow on some arches selftests: fixup build warnings in pidfd / clone3 tests kconfig: let 'shell' return enough output for deep path names lib/iov_iter: initialize "flags" in new pipe_buffer ata: libata-core: Disable TRIM on M88V29 soc: aspeed: lpc-ctrl: Block error printing on probe defer cases xprtrdma: fix pointer derefs in error cases of rpcrdma_ep_create drm/rockchip: dw_hdmi: Do not leave clock enabled in error case tracing: Fix tp_printk option related with tp_printk_stop_on_boot net: usb: qmi_wwan: Add support for Dell DW5829e net: macb: Align the dma and coherent dma masks kconfig: fix failing to generate auto.conf scsi: lpfc: Fix pt2pt NVMe PRLI reject LOGO loop EDAC: Fix calculation of returned address and next offset in edac_align_ptr() net: sched: limit TC_ACT_REPEAT loops dmaengine: sh: rcar-dmac: Check for error num after setting mask dmaengine: stm32-dmamux: Fix PM disable depth imbalance in stm32_dmamux_probe dmaengine: sh: rcar-dmac: Check for error num after dma_set_max_seg_size i2c: qcom-cci: don't delete an unregistered adapter i2c: qcom-cci: don't put a device tree node before i2c_add_adapter() copy_process(): Move fd_install() out of sighand->siglock critical section i2c: brcmstb: fix support for DSL and CM variants lockdep: Correct lock_classes index mapping Linux 5.10.102 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Ief12c0d77b23f9796b81a8fc3b79ac6589e81dc9
This commit is contained in:
commit
79553fad5c
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 101
|
||||
SUBLEVEL = 102
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
|
@ -263,9 +263,9 @@ static int __init omapdss_init_of(void)
|
||||
}
|
||||
|
||||
r = of_platform_populate(node, NULL, NULL, &pdev->dev);
|
||||
put_device(&pdev->dev);
|
||||
if (r) {
|
||||
pr_err("Unable to populate DSS submodule devices\n");
|
||||
put_device(&pdev->dev);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -749,9 +749,11 @@ static int __init _init_clkctrl_providers(void)
|
||||
|
||||
for_each_matching_node(np, ti_clkctrl_match_table) {
|
||||
ret = _setup_clkctrl_provider(np);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
of_node_put(np);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -101,6 +101,12 @@ secmon_reserved: secmon@5000000 {
|
||||
no-map;
|
||||
};
|
||||
|
||||
/* 32 MiB reserved for ARM Trusted Firmware (BL32) */
|
||||
secmon_reserved_bl32: secmon@5300000 {
|
||||
reg = <0x0 0x05300000 0x0 0x2000000>;
|
||||
no-map;
|
||||
};
|
||||
|
||||
linux,cma {
|
||||
compatible = "shared-dma-pool";
|
||||
reusable;
|
||||
|
@ -157,14 +157,6 @@ vddio_ao1v8: regulator-vddio_ao1v8 {
|
||||
regulator-always-on;
|
||||
};
|
||||
|
||||
reserved-memory {
|
||||
/* TEE Reserved Memory */
|
||||
bl32_reserved: bl32@5000000 {
|
||||
reg = <0x0 0x05300000 0x0 0x2000000>;
|
||||
no-map;
|
||||
};
|
||||
};
|
||||
|
||||
sdio_pwrseq: sdio-pwrseq {
|
||||
compatible = "mmc-pwrseq-simple";
|
||||
reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
|
||||
|
@ -43,6 +43,12 @@ secmon_reserved_alt: secmon@5000000 {
|
||||
no-map;
|
||||
};
|
||||
|
||||
/* 32 MiB reserved for ARM Trusted Firmware (BL32) */
|
||||
secmon_reserved_bl32: secmon@5300000 {
|
||||
reg = <0x0 0x05300000 0x0 0x2000000>;
|
||||
no-map;
|
||||
};
|
||||
|
||||
linux,cma {
|
||||
compatible = "shared-dma-pool";
|
||||
reusable;
|
||||
|
@ -203,14 +203,6 @@ vddio_ao1v8: regulator-vddio_ao1v8 {
|
||||
regulator-always-on;
|
||||
};
|
||||
|
||||
reserved-memory {
|
||||
/* TEE Reserved Memory */
|
||||
bl32_reserved: bl32@5000000 {
|
||||
reg = <0x0 0x05300000 0x0 0x2000000>;
|
||||
no-map;
|
||||
};
|
||||
};
|
||||
|
||||
sdio_pwrseq: sdio-pwrseq {
|
||||
compatible = "mmc-pwrseq-simple";
|
||||
reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
|
||||
|
@ -346,6 +346,16 @@ u64 ioread64be(const void __iomem *addr)
|
||||
return *((u64 *)addr);
|
||||
}
|
||||
|
||||
u64 ioread64_lo_hi(const void __iomem *addr)
|
||||
{
|
||||
u32 low, high;
|
||||
|
||||
low = ioread32(addr);
|
||||
high = ioread32(addr + sizeof(u32));
|
||||
|
||||
return low + ((u64)high << 32);
|
||||
}
|
||||
|
||||
u64 ioread64_hi_lo(const void __iomem *addr)
|
||||
{
|
||||
u32 low, high;
|
||||
@ -419,6 +429,12 @@ void iowrite64be(u64 datum, void __iomem *addr)
|
||||
}
|
||||
}
|
||||
|
||||
void iowrite64_lo_hi(u64 val, void __iomem *addr)
|
||||
{
|
||||
iowrite32(val, addr);
|
||||
iowrite32(val >> 32, addr + sizeof(u32));
|
||||
}
|
||||
|
||||
void iowrite64_hi_lo(u64 val, void __iomem *addr)
|
||||
{
|
||||
iowrite32(val >> 32, addr + sizeof(u32));
|
||||
@ -527,6 +543,7 @@ EXPORT_SYMBOL(ioread32);
|
||||
EXPORT_SYMBOL(ioread32be);
|
||||
EXPORT_SYMBOL(ioread64);
|
||||
EXPORT_SYMBOL(ioread64be);
|
||||
EXPORT_SYMBOL(ioread64_lo_hi);
|
||||
EXPORT_SYMBOL(ioread64_hi_lo);
|
||||
EXPORT_SYMBOL(iowrite8);
|
||||
EXPORT_SYMBOL(iowrite16);
|
||||
@ -535,6 +552,7 @@ EXPORT_SYMBOL(iowrite32);
|
||||
EXPORT_SYMBOL(iowrite32be);
|
||||
EXPORT_SYMBOL(iowrite64);
|
||||
EXPORT_SYMBOL(iowrite64be);
|
||||
EXPORT_SYMBOL(iowrite64_lo_hi);
|
||||
EXPORT_SYMBOL(iowrite64_hi_lo);
|
||||
EXPORT_SYMBOL(ioread8_rep);
|
||||
EXPORT_SYMBOL(ioread16_rep);
|
||||
|
@ -346,7 +346,7 @@ static void __init setup_bootmem(void)
|
||||
|
||||
static bool kernel_set_to_readonly;
|
||||
|
||||
static void __init map_pages(unsigned long start_vaddr,
|
||||
static void __ref map_pages(unsigned long start_vaddr,
|
||||
unsigned long start_paddr, unsigned long size,
|
||||
pgprot_t pgprot, int force)
|
||||
{
|
||||
@ -458,7 +458,7 @@ void __init set_kernel_text_rw(int enable_read_write)
|
||||
flush_tlb_all();
|
||||
}
|
||||
|
||||
void __ref free_initmem(void)
|
||||
void free_initmem(void)
|
||||
{
|
||||
unsigned long init_begin = (unsigned long)__init_begin;
|
||||
unsigned long init_end = (unsigned long)__init_end;
|
||||
@ -472,7 +472,6 @@ void __ref free_initmem(void)
|
||||
/* The init text pages are marked R-X. We have to
|
||||
* flush the icache and mark them RW-
|
||||
*
|
||||
* This is tricky, because map_pages is in the init section.
|
||||
* Do a dummy remap of the data section first (the data
|
||||
* section is already PAGE_KERNEL) to pull in the TLB entries
|
||||
* for map_kernel */
|
||||
|
@ -3062,12 +3062,14 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
|
||||
case BARRIER_EIEIO:
|
||||
eieio();
|
||||
break;
|
||||
#ifdef CONFIG_PPC64
|
||||
case BARRIER_LWSYNC:
|
||||
asm volatile("lwsync" : : : "memory");
|
||||
break;
|
||||
case BARRIER_PTESYNC:
|
||||
asm volatile("ptesync" : : : "memory");
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -95,7 +95,7 @@ static void kvm_perf_overflow_intr(struct perf_event *perf_event,
|
||||
}
|
||||
|
||||
static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
|
||||
unsigned config, bool exclude_user,
|
||||
u64 config, bool exclude_user,
|
||||
bool exclude_kernel, bool intr,
|
||||
bool in_tx, bool in_tx_cp)
|
||||
{
|
||||
@ -170,8 +170,8 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
|
||||
|
||||
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
|
||||
{
|
||||
unsigned config, type = PERF_TYPE_RAW;
|
||||
u8 event_select, unit_mask;
|
||||
u64 config;
|
||||
u32 type = PERF_TYPE_RAW;
|
||||
struct kvm *kvm = pmc->vcpu->kvm;
|
||||
struct kvm_pmu_event_filter *filter;
|
||||
int i;
|
||||
@ -203,23 +203,18 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
|
||||
if (!allow_event)
|
||||
return;
|
||||
|
||||
event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
|
||||
unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
|
||||
|
||||
if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
|
||||
ARCH_PERFMON_EVENTSEL_INV |
|
||||
ARCH_PERFMON_EVENTSEL_CMASK |
|
||||
HSW_IN_TX |
|
||||
HSW_IN_TX_CHECKPOINTED))) {
|
||||
config = kvm_x86_ops.pmu_ops->find_arch_event(pmc_to_pmu(pmc),
|
||||
event_select,
|
||||
unit_mask);
|
||||
config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc);
|
||||
if (config != PERF_COUNT_HW_MAX)
|
||||
type = PERF_TYPE_HARDWARE;
|
||||
}
|
||||
|
||||
if (type == PERF_TYPE_RAW)
|
||||
config = eventsel & X86_RAW_EVENT_MASK;
|
||||
config = eventsel & AMD64_RAW_EVENT_MASK;
|
||||
|
||||
if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
|
||||
return;
|
||||
|
@ -24,8 +24,7 @@ struct kvm_event_hw_type_mapping {
|
||||
};
|
||||
|
||||
struct kvm_pmu_ops {
|
||||
unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
|
||||
u8 unit_mask);
|
||||
unsigned int (*pmc_perf_hw_id)(struct kvm_pmc *pmc);
|
||||
unsigned (*find_fixed_event)(int idx);
|
||||
bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
|
||||
struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
|
||||
|
@ -344,8 +344,6 @@ int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
|
||||
break;
|
||||
}
|
||||
case AVIC_IPI_FAILURE_INVALID_TARGET:
|
||||
WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n",
|
||||
index, svm->vcpu.vcpu_id, icrh, icrl);
|
||||
break;
|
||||
case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
|
||||
WARN_ONCE(1, "Invalid backing page\n");
|
||||
|
@ -126,10 +126,10 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
|
||||
return &pmu->gp_counters[msr_to_index(msr)];
|
||||
}
|
||||
|
||||
static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
|
||||
u8 event_select,
|
||||
u8 unit_mask)
|
||||
static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
|
||||
{
|
||||
u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
|
||||
u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
|
||||
@ -312,7 +312,7 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
struct kvm_pmu_ops amd_pmu_ops = {
|
||||
.find_arch_event = amd_find_arch_event,
|
||||
.pmc_perf_hw_id = amd_pmc_perf_hw_id,
|
||||
.find_fixed_event = amd_find_fixed_event,
|
||||
.pmc_is_enabled = amd_pmc_is_enabled,
|
||||
.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
|
||||
|
@ -4103,6 +4103,10 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int i
|
||||
bool smep, smap, is_user;
|
||||
unsigned long cr4;
|
||||
|
||||
/* Emulation is always possible when KVM has access to all guest state. */
|
||||
if (!sev_guest(vcpu->kvm))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Detect and workaround Errata 1096 Fam_17h_00_0Fh.
|
||||
*
|
||||
@ -4151,9 +4155,6 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int i
|
||||
smap = cr4 & X86_CR4_SMAP;
|
||||
is_user = svm_get_cpl(vcpu) == 3;
|
||||
if (smap && (!smep || is_user)) {
|
||||
if (!sev_guest(vcpu->kvm))
|
||||
return true;
|
||||
|
||||
pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 1096\n");
|
||||
|
||||
/*
|
||||
|
@ -68,10 +68,11 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
|
||||
reprogram_counter(pmu, bit);
|
||||
}
|
||||
|
||||
static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
|
||||
u8 event_select,
|
||||
u8 unit_mask)
|
||||
static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc)
|
||||
{
|
||||
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
|
||||
u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
|
||||
u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
|
||||
@ -432,7 +433,7 @@ static void intel_pmu_reset(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
struct kvm_pmu_ops intel_pmu_ops = {
|
||||
.find_arch_event = intel_find_arch_event,
|
||||
.pmc_perf_hw_id = intel_pmc_perf_hw_id,
|
||||
.find_fixed_event = intel_find_fixed_event,
|
||||
.pmc_is_enabled = intel_pmc_is_enabled,
|
||||
.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
|
||||
|
@ -1387,10 +1387,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
|
||||
|
||||
xen_acpi_sleep_register();
|
||||
|
||||
/* Avoid searching for BIOS MP tables */
|
||||
x86_init.mpparse.find_smp_config = x86_init_noop;
|
||||
x86_init.mpparse.get_smp_config = x86_init_uint_noop;
|
||||
|
||||
xen_boot_params_init_edd();
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
|
@ -149,28 +149,12 @@ int xen_smp_intr_init_pv(unsigned int cpu)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void __init xen_fill_possible_map(void)
|
||||
{
|
||||
int i, rc;
|
||||
|
||||
if (xen_initial_domain())
|
||||
return;
|
||||
|
||||
for (i = 0; i < nr_cpu_ids; i++) {
|
||||
rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
|
||||
if (rc >= 0) {
|
||||
num_processors++;
|
||||
set_cpu_possible(i, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void __init xen_filter_cpu_maps(void)
|
||||
static void __init _get_smp_config(unsigned int early)
|
||||
{
|
||||
int i, rc;
|
||||
unsigned int subtract = 0;
|
||||
|
||||
if (!xen_initial_domain())
|
||||
if (early)
|
||||
return;
|
||||
|
||||
num_processors = 0;
|
||||
@ -211,7 +195,6 @@ static void __init xen_pv_smp_prepare_boot_cpu(void)
|
||||
* sure the old memory can be recycled. */
|
||||
make_lowmem_page_readwrite(xen_initial_gdt);
|
||||
|
||||
xen_filter_cpu_maps();
|
||||
xen_setup_vcpu_info_placement();
|
||||
|
||||
/*
|
||||
@ -491,5 +474,8 @@ static const struct smp_ops xen_smp_ops __initconst = {
|
||||
void __init xen_smp_init(void)
|
||||
{
|
||||
smp_ops = xen_smp_ops;
|
||||
xen_fill_possible_map();
|
||||
|
||||
/* Avoid searching for BIOS MP tables */
|
||||
x86_init.mpparse.find_smp_config = x86_init_noop;
|
||||
x86_init.mpparse.get_smp_config = _get_smp_config;
|
||||
}
|
||||
|
@ -6399,6 +6399,8 @@ static void bfq_exit_queue(struct elevator_queue *e)
|
||||
spin_unlock_irq(&bfqd->lock);
|
||||
#endif
|
||||
|
||||
wbt_enable_default(bfqd->queue);
|
||||
|
||||
kfree(bfqd);
|
||||
}
|
||||
|
||||
|
@ -518,8 +518,6 @@ void elv_unregister_queue(struct request_queue *q)
|
||||
kobject_del(&e->kobj);
|
||||
|
||||
e->registered = 0;
|
||||
/* Re-enable throttling in case elevator disabled it */
|
||||
wbt_enable_default(q);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3989,6 +3989,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
||||
|
||||
/* devices that don't properly handle TRIM commands */
|
||||
{ "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
|
||||
{ "M88V29*", NULL, ATA_HORKAGE_NOTRIM, },
|
||||
|
||||
/*
|
||||
* As defined, the DRAT (Deterministic Read After Trim) and RZAT
|
||||
|
@ -1987,7 +1987,10 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
|
||||
*/
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
input_pool.entropy_count = 0;
|
||||
if (xchg(&input_pool.entropy_count, 0) && random_write_wakeup_bits) {
|
||||
wake_up_interruptible(&random_write_wait);
|
||||
kill_fasync(&fasync, SIGIO, POLL_OUT);
|
||||
}
|
||||
return 0;
|
||||
case RNDRESEEDCRNG:
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
|
@ -1844,8 +1844,13 @@ static int rcar_dmac_probe(struct platform_device *pdev)
|
||||
|
||||
dmac->dev = &pdev->dev;
|
||||
platform_set_drvdata(pdev, dmac);
|
||||
dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
|
||||
dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
|
||||
ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = rcar_dmac_parse_of(&pdev->dev, dmac);
|
||||
if (ret < 0)
|
||||
|
@ -292,10 +292,12 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
|
||||
ret = of_dma_router_register(node, stm32_dmamux_route_allocate,
|
||||
&stm32_dmamux->dmarouter);
|
||||
if (ret)
|
||||
goto err_clk;
|
||||
goto pm_disable;
|
||||
|
||||
return 0;
|
||||
|
||||
pm_disable:
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
err_clk:
|
||||
clk_disable_unprepare(stm32_dmamux->clk);
|
||||
|
||||
|
@ -210,7 +210,7 @@ void *edac_align_ptr(void **p, unsigned int size, int n_elems)
|
||||
else
|
||||
return (char *)ptr;
|
||||
|
||||
r = (unsigned long)p % align;
|
||||
r = (unsigned long)ptr % align;
|
||||
|
||||
if (r == 0)
|
||||
return (char *)ptr;
|
||||
|
@ -2120,7 +2120,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
if (direct_submit && !ring->sched.ready) {
|
||||
if (!direct_submit && !ring->sched.ready) {
|
||||
DRM_ERROR("Trying to move memory with ring turned off.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -100,6 +100,7 @@ config DRM_I915_USERPTR
|
||||
config DRM_I915_GVT
|
||||
bool "Enable Intel GVT-g graphics virtualization host support"
|
||||
depends on DRM_I915
|
||||
depends on X86
|
||||
depends on 64BIT
|
||||
default n
|
||||
help
|
||||
|
@ -361,6 +361,21 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
|
||||
port++;
|
||||
}
|
||||
|
||||
/*
|
||||
* The port numbering and mapping here is bizarre. The now-obsolete
|
||||
* swsci spec supports ports numbered [0..4]. Port E is handled as a
|
||||
* special case, but port F and beyond are not. The functionality is
|
||||
* supposed to be obsolete for new platforms. Just bail out if the port
|
||||
* number is out of bounds after mapping.
|
||||
*/
|
||||
if (port > 4) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n",
|
||||
intel_encoder->base.base.id, intel_encoder->base.name,
|
||||
port_name(intel_encoder->port), port);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!enable)
|
||||
parm |= 4 << 8;
|
||||
|
||||
|
@ -119,8 +119,12 @@ nvkm_falcon_disable(struct nvkm_falcon *falcon)
|
||||
int
|
||||
nvkm_falcon_reset(struct nvkm_falcon *falcon)
|
||||
{
|
||||
if (!falcon->func->reset) {
|
||||
nvkm_falcon_disable(falcon);
|
||||
return nvkm_falcon_enable(falcon);
|
||||
}
|
||||
|
||||
return falcon->func->reset(falcon);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -23,9 +23,38 @@
|
||||
*/
|
||||
#include "priv.h"
|
||||
|
||||
static int
|
||||
gm200_pmu_flcn_reset(struct nvkm_falcon *falcon)
|
||||
{
|
||||
struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon);
|
||||
|
||||
nvkm_falcon_wr32(falcon, 0x014, 0x0000ffff);
|
||||
pmu->func->reset(pmu);
|
||||
return nvkm_falcon_enable(falcon);
|
||||
}
|
||||
|
||||
const struct nvkm_falcon_func
|
||||
gm200_pmu_flcn = {
|
||||
.debug = 0xc08,
|
||||
.fbif = 0xe00,
|
||||
.load_imem = nvkm_falcon_v1_load_imem,
|
||||
.load_dmem = nvkm_falcon_v1_load_dmem,
|
||||
.read_dmem = nvkm_falcon_v1_read_dmem,
|
||||
.bind_context = nvkm_falcon_v1_bind_context,
|
||||
.wait_for_halt = nvkm_falcon_v1_wait_for_halt,
|
||||
.clear_interrupt = nvkm_falcon_v1_clear_interrupt,
|
||||
.set_start_addr = nvkm_falcon_v1_set_start_addr,
|
||||
.start = nvkm_falcon_v1_start,
|
||||
.enable = nvkm_falcon_v1_enable,
|
||||
.disable = nvkm_falcon_v1_disable,
|
||||
.reset = gm200_pmu_flcn_reset,
|
||||
.cmdq = { 0x4a0, 0x4b0, 4 },
|
||||
.msgq = { 0x4c8, 0x4cc, 0 },
|
||||
};
|
||||
|
||||
static const struct nvkm_pmu_func
|
||||
gm200_pmu = {
|
||||
.flcn = >215_pmu_flcn,
|
||||
.flcn = &gm200_pmu_flcn,
|
||||
.enabled = gf100_pmu_enabled,
|
||||
.reset = gf100_pmu_reset,
|
||||
};
|
||||
|
@ -211,7 +211,7 @@ gm20b_pmu_recv(struct nvkm_pmu *pmu)
|
||||
|
||||
static const struct nvkm_pmu_func
|
||||
gm20b_pmu = {
|
||||
.flcn = >215_pmu_flcn,
|
||||
.flcn = &gm200_pmu_flcn,
|
||||
.enabled = gf100_pmu_enabled,
|
||||
.intr = gt215_pmu_intr,
|
||||
.recv = gm20b_pmu_recv,
|
||||
|
@ -39,7 +39,7 @@ gp102_pmu_enabled(struct nvkm_pmu *pmu)
|
||||
|
||||
static const struct nvkm_pmu_func
|
||||
gp102_pmu = {
|
||||
.flcn = >215_pmu_flcn,
|
||||
.flcn = &gm200_pmu_flcn,
|
||||
.enabled = gp102_pmu_enabled,
|
||||
.reset = gp102_pmu_reset,
|
||||
};
|
||||
|
@ -78,7 +78,7 @@ gp10b_pmu_acr = {
|
||||
|
||||
static const struct nvkm_pmu_func
|
||||
gp10b_pmu = {
|
||||
.flcn = >215_pmu_flcn,
|
||||
.flcn = &gm200_pmu_flcn,
|
||||
.enabled = gf100_pmu_enabled,
|
||||
.intr = gt215_pmu_intr,
|
||||
.recv = gm20b_pmu_recv,
|
||||
|
@ -44,6 +44,8 @@ void gf100_pmu_reset(struct nvkm_pmu *);
|
||||
|
||||
void gk110_pmu_pgob(struct nvkm_pmu *, bool);
|
||||
|
||||
extern const struct nvkm_falcon_func gm200_pmu_flcn;
|
||||
|
||||
void gm20b_pmu_acr_bld_patch(struct nvkm_acr *, u32, s64);
|
||||
void gm20b_pmu_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *);
|
||||
int gm20b_pmu_acr_boot(struct nvkm_falcon *);
|
||||
|
@ -197,7 +197,8 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
|
||||
* so don't register a backlight device
|
||||
*/
|
||||
if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
|
||||
(rdev->pdev->device == 0x6741))
|
||||
(rdev->pdev->device == 0x6741) &&
|
||||
!dmi_match(DMI_PRODUCT_NAME, "iMac12,1"))
|
||||
return;
|
||||
|
||||
if (!radeon_encoder->enc_priv)
|
||||
|
@ -529,13 +529,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(hdmi->vpll_clk);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
hdmi->phy = devm_phy_optional_get(dev, "hdmi");
|
||||
if (IS_ERR(hdmi->phy)) {
|
||||
ret = PTR_ERR(hdmi->phy);
|
||||
@ -544,6 +537,13 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(hdmi->vpll_clk);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
|
||||
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
|
||||
|
||||
|
@ -1322,6 +1322,7 @@
|
||||
#define USB_VENDOR_ID_UGTIZER 0x2179
|
||||
#define USB_DEVICE_ID_UGTIZER_TABLET_GP0610 0x0053
|
||||
#define USB_DEVICE_ID_UGTIZER_TABLET_GT5040 0x0077
|
||||
#define USB_DEVICE_ID_UGTIZER_TABLET_WP5540 0x0004
|
||||
|
||||
#define USB_VENDOR_ID_VIEWSONIC 0x0543
|
||||
#define USB_DEVICE_ID_VIEWSONIC_PD1011 0xe621
|
||||
|
@ -187,6 +187,7 @@ static const struct hid_device_id hid_quirks[] = {
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_WP5540), HID_QUIRK_MULTI_INPUT },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH), HID_QUIRK_MULTI_INPUT },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH), HID_QUIRK_MULTI_INPUT },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT },
|
||||
|
@ -1944,8 +1944,10 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
|
||||
kobj->kset = dev->channels_kset;
|
||||
ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
|
||||
"%u", relid);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
kobject_put(kobj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = sysfs_create_group(kobj, &vmbus_chan_group);
|
||||
|
||||
@ -1954,6 +1956,7 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
|
||||
* The calling functions' error handling paths will cleanup the
|
||||
* empty channel directory.
|
||||
*/
|
||||
kobject_put(kobj);
|
||||
dev_err(device, "Unable to set up channel sysfs files\n");
|
||||
return ret;
|
||||
}
|
||||
|
@ -674,7 +674,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
|
||||
|
||||
/* set the data in/out register size for compatible SoCs */
|
||||
if (of_device_is_compatible(dev->device->of_node,
|
||||
"brcmstb,brcmper-i2c"))
|
||||
"brcm,brcmper-i2c"))
|
||||
dev->data_regsz = sizeof(u8);
|
||||
else
|
||||
dev->data_regsz = sizeof(u32);
|
||||
|
@ -558,7 +558,7 @@ static int cci_probe(struct platform_device *pdev)
|
||||
cci->master[idx].adap.quirks = &cci->data->quirks;
|
||||
cci->master[idx].adap.algo = &cci_algo;
|
||||
cci->master[idx].adap.dev.parent = dev;
|
||||
cci->master[idx].adap.dev.of_node = child;
|
||||
cci->master[idx].adap.dev.of_node = of_node_get(child);
|
||||
cci->master[idx].master = idx;
|
||||
cci->master[idx].cci = cci;
|
||||
|
||||
@ -643,9 +643,11 @@ static int cci_probe(struct platform_device *pdev)
|
||||
continue;
|
||||
|
||||
ret = i2c_add_adapter(&cci->master[i].adap);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
of_node_put(cci->master[i].adap.dev.of_node);
|
||||
goto error_i2c;
|
||||
}
|
||||
}
|
||||
|
||||
pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
|
||||
pm_runtime_use_autosuspend(dev);
|
||||
@ -655,9 +657,11 @@ static int cci_probe(struct platform_device *pdev)
|
||||
return 0;
|
||||
|
||||
error_i2c:
|
||||
for (; i >= 0; i--) {
|
||||
if (cci->master[i].cci)
|
||||
for (--i ; i >= 0; i--) {
|
||||
if (cci->master[i].cci) {
|
||||
i2c_del_adapter(&cci->master[i].adap);
|
||||
of_node_put(cci->master[i].adap.dev.of_node);
|
||||
}
|
||||
}
|
||||
error:
|
||||
disable_irq(cci->irq);
|
||||
@ -673,8 +677,10 @@ static int cci_remove(struct platform_device *pdev)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < cci->data->num_masters; i++) {
|
||||
if (cci->master[i].cci)
|
||||
if (cci->master[i].cci) {
|
||||
i2c_del_adapter(&cci->master[i].adap);
|
||||
of_node_put(cci->master[i].adap.dev.of_node);
|
||||
}
|
||||
cci_halt(cci, i);
|
||||
}
|
||||
|
||||
|
@ -400,3 +400,4 @@ static int __init plic_init(struct device_node *node,
|
||||
|
||||
IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init);
|
||||
IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */
|
||||
IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_init); /* for firmware driver */
|
||||
|
@ -1652,12 +1652,13 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
|
||||
struct mmc_card *card = mq->card;
|
||||
struct mmc_host *host = card->host;
|
||||
blk_status_t error = BLK_STS_OK;
|
||||
int retries = 0;
|
||||
|
||||
do {
|
||||
u32 status;
|
||||
int err;
|
||||
int retries = 0;
|
||||
|
||||
while (retries++ <= MMC_READ_SINGLE_RETRIES) {
|
||||
mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
|
||||
|
||||
mmc_wait_for_req(host, mrq);
|
||||
@ -1673,10 +1674,9 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
|
||||
goto error_exit;
|
||||
}
|
||||
|
||||
if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES)
|
||||
continue;
|
||||
|
||||
retries = 0;
|
||||
if (!mrq->cmd->error)
|
||||
break;
|
||||
}
|
||||
|
||||
if (mrq->cmd->error ||
|
||||
mrq->data->error ||
|
||||
|
@ -2062,7 +2062,7 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
|
||||
mtd->oobsize / trans,
|
||||
host->hwcfg.sector_size_1k);
|
||||
|
||||
if (!ret) {
|
||||
if (ret != -EBADMSG) {
|
||||
*err_addr = brcmnand_get_uncorrecc_addr(ctrl);
|
||||
|
||||
if (*err_addr)
|
||||
|
@ -2291,7 +2291,7 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip,
|
||||
this->hw.must_apply_timings = false;
|
||||
ret = gpmi_nfc_apply_timings(this);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out_pm;
|
||||
}
|
||||
|
||||
dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
|
||||
@ -2420,6 +2420,7 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip,
|
||||
|
||||
this->bch = false;
|
||||
|
||||
out_pm:
|
||||
pm_runtime_mark_last_busy(this->dev);
|
||||
pm_runtime_put_autosuspend(this->dev);
|
||||
|
||||
|
@ -2,7 +2,6 @@
|
||||
/*
|
||||
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/bitops.h>
|
||||
@ -2968,10 +2967,6 @@ static int qcom_nandc_probe(struct platform_device *pdev)
|
||||
if (!nandc->base_dma)
|
||||
return -ENXIO;
|
||||
|
||||
ret = qcom_nandc_alloc(nandc);
|
||||
if (ret)
|
||||
goto err_nandc_alloc;
|
||||
|
||||
ret = clk_prepare_enable(nandc->core_clk);
|
||||
if (ret)
|
||||
goto err_core_clk;
|
||||
@ -2980,6 +2975,10 @@ static int qcom_nandc_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
goto err_aon_clk;
|
||||
|
||||
ret = qcom_nandc_alloc(nandc);
|
||||
if (ret)
|
||||
goto err_nandc_alloc;
|
||||
|
||||
ret = qcom_nandc_setup(nandc);
|
||||
if (ret)
|
||||
goto err_setup;
|
||||
@ -2991,15 +2990,14 @@ static int qcom_nandc_probe(struct platform_device *pdev)
|
||||
return 0;
|
||||
|
||||
err_setup:
|
||||
qcom_nandc_unalloc(nandc);
|
||||
err_nandc_alloc:
|
||||
clk_disable_unprepare(nandc->aon_clk);
|
||||
err_aon_clk:
|
||||
clk_disable_unprepare(nandc->core_clk);
|
||||
err_core_clk:
|
||||
qcom_nandc_unalloc(nandc);
|
||||
err_nandc_alloc:
|
||||
dma_unmap_resource(dev, res->start, resource_size(res),
|
||||
DMA_BIDIRECTIONAL, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -223,7 +223,7 @@ static inline int __check_agg_selection_timer(struct port *port)
|
||||
if (bond == NULL)
|
||||
return 0;
|
||||
|
||||
return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0;
|
||||
return atomic_read(&BOND_AD_INFO(bond).agg_select_timer) ? 1 : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1976,7 +1976,7 @@ static void ad_marker_response_received(struct bond_marker *marker,
|
||||
*/
|
||||
void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
|
||||
{
|
||||
BOND_AD_INFO(bond).agg_select_timer = timeout;
|
||||
atomic_set(&BOND_AD_INFO(bond).agg_select_timer, timeout);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2259,6 +2259,28 @@ void bond_3ad_update_ad_actor_settings(struct bonding *bond)
|
||||
spin_unlock_bh(&bond->mode_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* bond_agg_timer_advance - advance agg_select_timer
|
||||
* @bond: bonding structure
|
||||
*
|
||||
* Return true when agg_select_timer reaches 0.
|
||||
*/
|
||||
static bool bond_agg_timer_advance(struct bonding *bond)
|
||||
{
|
||||
int val, nval;
|
||||
|
||||
while (1) {
|
||||
val = atomic_read(&BOND_AD_INFO(bond).agg_select_timer);
|
||||
if (!val)
|
||||
return false;
|
||||
nval = val - 1;
|
||||
if (atomic_cmpxchg(&BOND_AD_INFO(bond).agg_select_timer,
|
||||
val, nval) == val)
|
||||
break;
|
||||
}
|
||||
return nval == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* bond_3ad_state_machine_handler - handle state machines timeout
|
||||
* @work: work context to fetch bonding struct to work on from
|
||||
@ -2294,9 +2316,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
|
||||
if (!bond_has_slaves(bond))
|
||||
goto re_arm;
|
||||
|
||||
/* check if agg_select_timer timer after initialize is timed out */
|
||||
if (BOND_AD_INFO(bond).agg_select_timer &&
|
||||
!(--BOND_AD_INFO(bond).agg_select_timer)) {
|
||||
if (bond_agg_timer_advance(bond)) {
|
||||
slave = bond_first_slave_rcu(bond);
|
||||
port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL;
|
||||
|
||||
|
@ -2272,10 +2272,9 @@ static int __bond_release_one(struct net_device *bond_dev,
|
||||
bond_select_active_slave(bond);
|
||||
}
|
||||
|
||||
if (!bond_has_slaves(bond)) {
|
||||
bond_set_carrier(bond);
|
||||
if (!bond_has_slaves(bond))
|
||||
eth_hw_addr_random(bond_dev);
|
||||
}
|
||||
|
||||
unblock_netpoll_tx();
|
||||
synchronize_rcu();
|
||||
|
@ -1305,7 +1305,7 @@ static int lan9303_probe_reset_gpio(struct lan9303 *chip,
|
||||
struct device_node *np)
|
||||
{
|
||||
chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset",
|
||||
GPIOD_OUT_LOW);
|
||||
GPIOD_OUT_HIGH);
|
||||
if (IS_ERR(chip->reset_gpio))
|
||||
return PTR_ERR(chip->reset_gpio);
|
||||
|
||||
|
@ -2114,8 +2114,8 @@ static int gswip_remove(struct platform_device *pdev)
|
||||
|
||||
if (priv->ds->slave_mii_bus) {
|
||||
mdiobus_unregister(priv->ds->slave_mii_bus);
|
||||
mdiobus_free(priv->ds->slave_mii_bus);
|
||||
of_node_put(priv->ds->slave_mii_bus->dev.of_node);
|
||||
mdiobus_free(priv->ds->slave_mii_bus);
|
||||
}
|
||||
|
||||
for (i = 0; i < priv->num_gphy_fw; i++)
|
||||
|
@ -4534,7 +4534,7 @@ static int macb_probe(struct platform_device *pdev)
|
||||
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
|
||||
dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
|
||||
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
|
||||
bp->hw_dma_cap |= HW_DMA_CAP_64B;
|
||||
}
|
||||
#endif
|
||||
|
@ -4225,7 +4225,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
|
||||
}
|
||||
|
||||
INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
|
||||
|
||||
mutex_init(&priv->onestep_tstamp_lock);
|
||||
skb_queue_head_init(&priv->tx_skbs);
|
||||
|
||||
/* Obtain a MC portal */
|
||||
|
@ -100,6 +100,7 @@ struct at86rf230_local {
|
||||
unsigned long cal_timeout;
|
||||
bool is_tx;
|
||||
bool is_tx_from_off;
|
||||
bool was_tx;
|
||||
u8 tx_retry;
|
||||
struct sk_buff *tx_skb;
|
||||
struct at86rf230_state_change tx;
|
||||
@ -343,7 +344,11 @@ at86rf230_async_error_recover_complete(void *context)
|
||||
if (ctx->free)
|
||||
kfree(ctx);
|
||||
|
||||
if (lp->was_tx) {
|
||||
lp->was_tx = 0;
|
||||
dev_kfree_skb_any(lp->tx_skb);
|
||||
ieee802154_wake_queue(lp->hw);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
@ -352,7 +357,11 @@ at86rf230_async_error_recover(void *context)
|
||||
struct at86rf230_state_change *ctx = context;
|
||||
struct at86rf230_local *lp = ctx->lp;
|
||||
|
||||
if (lp->is_tx) {
|
||||
lp->was_tx = 1;
|
||||
lp->is_tx = 0;
|
||||
}
|
||||
|
||||
at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON,
|
||||
at86rf230_async_error_recover_complete);
|
||||
}
|
||||
|
@ -2977,8 +2977,8 @@ static void ca8210_hw_setup(struct ieee802154_hw *ca8210_hw)
|
||||
ca8210_hw->phy->cca.opt = NL802154_CCA_OPT_ENERGY_CARRIER_AND;
|
||||
ca8210_hw->phy->cca_ed_level = -9800;
|
||||
ca8210_hw->phy->symbol_duration = 16;
|
||||
ca8210_hw->phy->lifs_period = 40;
|
||||
ca8210_hw->phy->sifs_period = 12;
|
||||
ca8210_hw->phy->lifs_period = 40 * ca8210_hw->phy->symbol_duration;
|
||||
ca8210_hw->phy->sifs_period = 12 * ca8210_hw->phy->symbol_duration;
|
||||
ca8210_hw->flags =
|
||||
IEEE802154_HW_AFILT |
|
||||
IEEE802154_HW_OMIT_CKSUM |
|
||||
|
@ -1333,6 +1333,8 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
|
||||
{QMI_FIXED_INTF(0x413c, 0x81e4, 0)}, /* Dell Wireless 5829e with eSIM support*/
|
||||
{QMI_FIXED_INTF(0x413c, 0x81e6, 0)}, /* Dell Wireless 5829e */
|
||||
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
|
||||
{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
|
||||
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
|
||||
|
@ -1646,6 +1646,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
|
||||
out_unbind:
|
||||
complete(&drv->request_firmware_complete);
|
||||
device_release_driver(drv->trans->dev);
|
||||
/* drv has just been freed by the release */
|
||||
failure = false;
|
||||
free:
|
||||
if (failure)
|
||||
iwl_dealloc_ucode(drv);
|
||||
|
@ -320,8 +320,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
|
||||
/* This may fail if AMT took ownership of the device */
|
||||
if (iwl_pcie_prepare_card_hw(trans)) {
|
||||
IWL_WARN(trans, "Exit HW not ready\n");
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
iwl_enable_rfkill_int(trans);
|
||||
|
@ -1313,8 +1313,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
|
||||
/* This may fail if AMT took ownership of the device */
|
||||
if (iwl_pcie_prepare_card_hw(trans)) {
|
||||
IWL_WARN(trans, "Exit HW not ready\n");
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
iwl_enable_rfkill_int(trans);
|
||||
|
@ -4259,6 +4259,13 @@ static void nvme_async_event_work(struct work_struct *work)
|
||||
container_of(work, struct nvme_ctrl, async_event_work);
|
||||
|
||||
nvme_aen_uevent(ctrl);
|
||||
|
||||
/*
|
||||
* The transport drivers must guarantee AER submission here is safe by
|
||||
* flushing ctrl async_event_work after changing the controller state
|
||||
* from LIVE and before freeing the admin queue.
|
||||
*/
|
||||
if (ctrl->state == NVME_CTRL_LIVE)
|
||||
ctrl->ops->submit_async_event(ctrl);
|
||||
}
|
||||
|
||||
|
@ -1200,6 +1200,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
|
||||
struct nvme_rdma_ctrl, err_work);
|
||||
|
||||
nvme_stop_keep_alive(&ctrl->ctrl);
|
||||
flush_work(&ctrl->ctrl.async_event_work);
|
||||
nvme_rdma_teardown_io_queues(ctrl, false);
|
||||
nvme_start_queues(&ctrl->ctrl);
|
||||
nvme_rdma_teardown_admin_queue(ctrl, false);
|
||||
|
@ -2077,6 +2077,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
|
||||
struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
|
||||
|
||||
nvme_stop_keep_alive(ctrl);
|
||||
flush_work(&ctrl->async_event_work);
|
||||
nvme_tcp_teardown_io_queues(ctrl, false);
|
||||
/* unquiesce to fail fast pending requests */
|
||||
nvme_start_queues(ctrl);
|
||||
|
@ -1003,7 +1003,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
||||
ioc->usg_calls++;
|
||||
#endif
|
||||
|
||||
while(sg_dma_len(sglist) && nents--) {
|
||||
while (nents && sg_dma_len(sglist)) {
|
||||
|
||||
#ifdef CCIO_COLLECT_STATS
|
||||
ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
|
||||
@ -1011,6 +1011,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
||||
ccio_unmap_page(dev, sg_dma_address(sglist),
|
||||
sg_dma_len(sglist), direction, 0);
|
||||
++sglist;
|
||||
nents--;
|
||||
}
|
||||
|
||||
DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
|
||||
|
@ -1047,7 +1047,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
||||
spin_unlock_irqrestore(&ioc->res_lock, flags);
|
||||
#endif
|
||||
|
||||
while (sg_dma_len(sglist) && nents--) {
|
||||
while (nents && sg_dma_len(sglist)) {
|
||||
|
||||
sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
|
||||
direction, 0);
|
||||
@ -1056,6 +1056,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
||||
ioc->usingle_calls--; /* kluge since call is unmap_sg() */
|
||||
#endif
|
||||
++sglist;
|
||||
nents--;
|
||||
}
|
||||
|
||||
DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
|
||||
|
@ -1841,8 +1841,17 @@ static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
|
||||
if (!hv_dev)
|
||||
continue;
|
||||
|
||||
if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY)
|
||||
set_dev_node(&dev->dev, hv_dev->desc.virtual_numa_node);
|
||||
if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY &&
|
||||
hv_dev->desc.virtual_numa_node < num_possible_nodes())
|
||||
/*
|
||||
* The kernel may boot with some NUMA nodes offline
|
||||
* (e.g. in a KDUMP kernel) or with NUMA disabled via
|
||||
* "numa=off". In those cases, adjust the host provided
|
||||
* NUMA node to a valid NUMA node used by the kernel.
|
||||
*/
|
||||
set_dev_node(&dev->dev,
|
||||
numa_map_to_online_node(
|
||||
hv_dev->desc.virtual_numa_node));
|
||||
|
||||
put_pcichild(hv_dev);
|
||||
}
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <linux/soc/brcmstb/brcmstb.h>
|
||||
#include <dt-bindings/phy/phy.h>
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/suspend.h>
|
||||
|
||||
#include "phy-brcm-usb-init.h"
|
||||
|
||||
@ -69,12 +70,35 @@ struct brcm_usb_phy_data {
|
||||
int init_count;
|
||||
int wake_irq;
|
||||
struct brcm_usb_phy phys[BRCM_USB_PHY_ID_MAX];
|
||||
struct notifier_block pm_notifier;
|
||||
bool pm_active;
|
||||
};
|
||||
|
||||
static s8 *node_reg_names[BRCM_REGS_MAX] = {
|
||||
"crtl", "xhci_ec", "xhci_gbl", "usb_phy", "usb_mdio", "bdc_ec"
|
||||
};
|
||||
|
||||
static int brcm_pm_notifier(struct notifier_block *notifier,
|
||||
unsigned long pm_event,
|
||||
void *unused)
|
||||
{
|
||||
struct brcm_usb_phy_data *priv =
|
||||
container_of(notifier, struct brcm_usb_phy_data, pm_notifier);
|
||||
|
||||
switch (pm_event) {
|
||||
case PM_HIBERNATION_PREPARE:
|
||||
case PM_SUSPEND_PREPARE:
|
||||
priv->pm_active = true;
|
||||
break;
|
||||
case PM_POST_RESTORE:
|
||||
case PM_POST_HIBERNATION:
|
||||
case PM_POST_SUSPEND:
|
||||
priv->pm_active = false;
|
||||
break;
|
||||
}
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static irqreturn_t brcm_usb_phy_wake_isr(int irq, void *dev_id)
|
||||
{
|
||||
struct phy *gphy = dev_id;
|
||||
@ -90,6 +114,9 @@ static int brcm_usb_phy_init(struct phy *gphy)
|
||||
struct brcm_usb_phy_data *priv =
|
||||
container_of(phy, struct brcm_usb_phy_data, phys[phy->id]);
|
||||
|
||||
if (priv->pm_active)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Use a lock to make sure a second caller waits until
|
||||
* the base phy is inited before using it.
|
||||
@ -119,6 +146,9 @@ static int brcm_usb_phy_exit(struct phy *gphy)
|
||||
struct brcm_usb_phy_data *priv =
|
||||
container_of(phy, struct brcm_usb_phy_data, phys[phy->id]);
|
||||
|
||||
if (priv->pm_active)
|
||||
return 0;
|
||||
|
||||
dev_dbg(&gphy->dev, "EXIT\n");
|
||||
if (phy->id == BRCM_USB_PHY_2_0)
|
||||
brcm_usb_uninit_eohci(&priv->ini);
|
||||
@ -484,6 +514,9 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
priv->pm_notifier.notifier_call = brcm_pm_notifier;
|
||||
register_pm_notifier(&priv->pm_notifier);
|
||||
|
||||
mutex_init(&priv->mutex);
|
||||
|
||||
/* make sure invert settings are correct */
|
||||
@ -524,7 +557,10 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
|
||||
|
||||
static int brcm_usb_phy_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct brcm_usb_phy_data *priv = dev_get_drvdata(&pdev->dev);
|
||||
|
||||
sysfs_remove_group(&pdev->dev.kobj, &brcm_usb_phy_group);
|
||||
unregister_pm_notifier(&priv->pm_notifier);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -535,6 +571,7 @@ static int brcm_usb_phy_suspend(struct device *dev)
|
||||
struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
|
||||
|
||||
if (priv->init_count) {
|
||||
dev_dbg(dev, "SUSPEND\n");
|
||||
priv->ini.wake_enabled = device_may_wakeup(dev);
|
||||
if (priv->phys[BRCM_USB_PHY_3_0].inited)
|
||||
brcm_usb_uninit_xhci(&priv->ini);
|
||||
@ -574,6 +611,7 @@ static int brcm_usb_phy_resume(struct device *dev)
|
||||
* Uninitialize anything that wasn't previously initialized.
|
||||
*/
|
||||
if (priv->init_count) {
|
||||
dev_dbg(dev, "RESUME\n");
|
||||
if (priv->wake_irq >= 0)
|
||||
disable_irq_wake(priv->wake_irq);
|
||||
brcm_usb_init_common(&priv->ini);
|
||||
|
@ -532,7 +532,10 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static DEFINE_MUTEX(punit_misc_dev_lock);
|
||||
/* Lock to prevent module registration when already opened by user space */
|
||||
static DEFINE_MUTEX(punit_misc_dev_open_lock);
|
||||
/* Lock to allow one share misc device for all ISST interace */
|
||||
static DEFINE_MUTEX(punit_misc_dev_reg_lock);
|
||||
static int misc_usage_count;
|
||||
static int misc_device_ret;
|
||||
static int misc_device_open;
|
||||
@ -542,7 +545,7 @@ static int isst_if_open(struct inode *inode, struct file *file)
|
||||
int i, ret = 0;
|
||||
|
||||
/* Fail open, if a module is going away */
|
||||
mutex_lock(&punit_misc_dev_lock);
|
||||
mutex_lock(&punit_misc_dev_open_lock);
|
||||
for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
|
||||
struct isst_if_cmd_cb *cb = &punit_callbacks[i];
|
||||
|
||||
@ -564,7 +567,7 @@ static int isst_if_open(struct inode *inode, struct file *file)
|
||||
} else {
|
||||
misc_device_open++;
|
||||
}
|
||||
mutex_unlock(&punit_misc_dev_lock);
|
||||
mutex_unlock(&punit_misc_dev_open_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -573,7 +576,7 @@ static int isst_if_relase(struct inode *inode, struct file *f)
|
||||
{
|
||||
int i;
|
||||
|
||||
mutex_lock(&punit_misc_dev_lock);
|
||||
mutex_lock(&punit_misc_dev_open_lock);
|
||||
misc_device_open--;
|
||||
for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
|
||||
struct isst_if_cmd_cb *cb = &punit_callbacks[i];
|
||||
@ -581,7 +584,7 @@ static int isst_if_relase(struct inode *inode, struct file *f)
|
||||
if (cb->registered)
|
||||
module_put(cb->owner);
|
||||
}
|
||||
mutex_unlock(&punit_misc_dev_lock);
|
||||
mutex_unlock(&punit_misc_dev_open_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -598,6 +601,43 @@ static struct miscdevice isst_if_char_driver = {
|
||||
.fops = &isst_if_char_driver_ops,
|
||||
};
|
||||
|
||||
static int isst_misc_reg(void)
|
||||
{
|
||||
mutex_lock(&punit_misc_dev_reg_lock);
|
||||
if (misc_device_ret)
|
||||
goto unlock_exit;
|
||||
|
||||
if (!misc_usage_count) {
|
||||
misc_device_ret = isst_if_cpu_info_init();
|
||||
if (misc_device_ret)
|
||||
goto unlock_exit;
|
||||
|
||||
misc_device_ret = misc_register(&isst_if_char_driver);
|
||||
if (misc_device_ret) {
|
||||
isst_if_cpu_info_exit();
|
||||
goto unlock_exit;
|
||||
}
|
||||
}
|
||||
misc_usage_count++;
|
||||
|
||||
unlock_exit:
|
||||
mutex_unlock(&punit_misc_dev_reg_lock);
|
||||
|
||||
return misc_device_ret;
|
||||
}
|
||||
|
||||
static void isst_misc_unreg(void)
|
||||
{
|
||||
mutex_lock(&punit_misc_dev_reg_lock);
|
||||
if (misc_usage_count)
|
||||
misc_usage_count--;
|
||||
if (!misc_usage_count && !misc_device_ret) {
|
||||
misc_deregister(&isst_if_char_driver);
|
||||
isst_if_cpu_info_exit();
|
||||
}
|
||||
mutex_unlock(&punit_misc_dev_reg_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* isst_if_cdev_register() - Register callback for IOCTL
|
||||
* @device_type: The device type this callback handling.
|
||||
@ -615,38 +655,31 @@ static struct miscdevice isst_if_char_driver = {
|
||||
*/
|
||||
int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
|
||||
{
|
||||
if (misc_device_ret)
|
||||
return misc_device_ret;
|
||||
int ret;
|
||||
|
||||
if (device_type >= ISST_IF_DEV_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&punit_misc_dev_lock);
|
||||
mutex_lock(&punit_misc_dev_open_lock);
|
||||
/* Device is already open, we don't want to add new callbacks */
|
||||
if (misc_device_open) {
|
||||
mutex_unlock(&punit_misc_dev_lock);
|
||||
mutex_unlock(&punit_misc_dev_open_lock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
if (!misc_usage_count) {
|
||||
int ret;
|
||||
|
||||
misc_device_ret = misc_register(&isst_if_char_driver);
|
||||
if (misc_device_ret)
|
||||
goto unlock_exit;
|
||||
|
||||
ret = isst_if_cpu_info_init();
|
||||
if (ret) {
|
||||
misc_deregister(&isst_if_char_driver);
|
||||
misc_device_ret = ret;
|
||||
goto unlock_exit;
|
||||
}
|
||||
}
|
||||
memcpy(&punit_callbacks[device_type], cb, sizeof(*cb));
|
||||
punit_callbacks[device_type].registered = 1;
|
||||
misc_usage_count++;
|
||||
unlock_exit:
|
||||
mutex_unlock(&punit_misc_dev_lock);
|
||||
mutex_unlock(&punit_misc_dev_open_lock);
|
||||
|
||||
return misc_device_ret;
|
||||
ret = isst_misc_reg();
|
||||
if (ret) {
|
||||
/*
|
||||
* No need of mutex as the misc device register failed
|
||||
* as no one can open device yet. Hence no contention.
|
||||
*/
|
||||
punit_callbacks[device_type].registered = 0;
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(isst_if_cdev_register);
|
||||
|
||||
@ -661,16 +694,12 @@ EXPORT_SYMBOL_GPL(isst_if_cdev_register);
|
||||
*/
|
||||
void isst_if_cdev_unregister(int device_type)
|
||||
{
|
||||
mutex_lock(&punit_misc_dev_lock);
|
||||
misc_usage_count--;
|
||||
isst_misc_unreg();
|
||||
mutex_lock(&punit_misc_dev_open_lock);
|
||||
punit_callbacks[device_type].registered = 0;
|
||||
if (device_type == ISST_IF_DEV_MBOX)
|
||||
isst_delete_hash();
|
||||
if (!misc_usage_count && !misc_device_ret) {
|
||||
misc_deregister(&isst_if_char_driver);
|
||||
isst_if_cpu_info_exit();
|
||||
}
|
||||
mutex_unlock(&punit_misc_dev_lock);
|
||||
mutex_unlock(&punit_misc_dev_open_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
|
||||
|
||||
|
@ -756,6 +756,21 @@ static const struct ts_dmi_data predia_basic_data = {
|
||||
.properties = predia_basic_props,
|
||||
};
|
||||
|
||||
static const struct property_entry rwc_nanote_p8_props[] = {
|
||||
PROPERTY_ENTRY_U32("touchscreen-min-y", 46),
|
||||
PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
|
||||
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
|
||||
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
|
||||
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rwc-nanote-p8.fw"),
|
||||
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
|
||||
{ }
|
||||
};
|
||||
|
||||
static const struct ts_dmi_data rwc_nanote_p8_data = {
|
||||
.acpi_name = "MSSL1680:00",
|
||||
.properties = rwc_nanote_p8_props,
|
||||
};
|
||||
|
||||
static const struct property_entry schneider_sct101ctm_props[] = {
|
||||
PROPERTY_ENTRY_U32("touchscreen-size-x", 1715),
|
||||
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
|
||||
@ -1326,6 +1341,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
|
||||
DMI_EXACT_MATCH(DMI_BOARD_NAME, "0E57"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* RWC NANOTE P8 */
|
||||
.driver_data = (void *)&rwc_nanote_p8_data,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "Default string"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "AY07J"),
|
||||
DMI_MATCH(DMI_PRODUCT_SKU, "0001")
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Schneider SCT101CTM */
|
||||
.driver_data = (void *)&schneider_sct101ctm_data,
|
||||
|
@ -374,6 +374,7 @@ struct lpfc_vport {
|
||||
#define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */
|
||||
#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */
|
||||
#define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/
|
||||
#define FC_PT2PT_NO_NVME 0x1000 /* Don't send NVME PRLI */
|
||||
#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
|
||||
#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
|
||||
#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
|
||||
|
@ -1142,6 +1142,9 @@ lpfc_issue_lip(struct Scsi_Host *shost)
|
||||
pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
|
||||
pmboxq->u.mb.mbxOwner = OWN_HOST;
|
||||
|
||||
if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME))
|
||||
vport->fc_flag &= ~FC_PT2PT_NO_NVME;
|
||||
|
||||
mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
|
||||
|
||||
if ((mbxstatus == MBX_SUCCESS) &&
|
||||
|
@ -1067,7 +1067,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
|
||||
/* FLOGI failed, so there is no fabric */
|
||||
spin_lock_irq(shost->host_lock);
|
||||
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
|
||||
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP |
|
||||
FC_PT2PT_NO_NVME);
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
|
||||
/* If private loop, then allow max outstanding els to be
|
||||
@ -3945,6 +3946,23 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
/* Added for Vendor specifc support
|
||||
* Just keep retrying for these Rsn / Exp codes
|
||||
*/
|
||||
if ((vport->fc_flag & FC_PT2PT) &&
|
||||
cmd == ELS_CMD_NVMEPRLI) {
|
||||
switch (stat.un.b.lsRjtRsnCode) {
|
||||
case LSRJT_UNABLE_TPC:
|
||||
case LSRJT_INVALID_CMD:
|
||||
case LSRJT_LOGICAL_ERR:
|
||||
case LSRJT_CMD_UNSUPPORTED:
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
|
||||
"0168 NVME PRLI LS_RJT "
|
||||
"reason %x port doesn't "
|
||||
"support NVME, disabling NVME\n",
|
||||
stat.un.b.lsRjtRsnCode);
|
||||
retry = 0;
|
||||
vport->fc_flag |= FC_PT2PT_NO_NVME;
|
||||
goto out_retry;
|
||||
}
|
||||
}
|
||||
switch (stat.un.b.lsRjtRsnCode) {
|
||||
case LSRJT_UNABLE_TPC:
|
||||
/* The driver has a VALID PLOGI but the rport has
|
||||
|
@ -2010,8 +2010,9 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
|
||||
* is configured try it.
|
||||
*/
|
||||
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
|
||||
if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
|
||||
(vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
|
||||
if ((!(vport->fc_flag & FC_PT2PT_NO_NVME)) &&
|
||||
(vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
|
||||
vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
|
||||
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
|
||||
/* We need to update the localport also */
|
||||
lpfc_nvme_update_localport(vport);
|
||||
|
@ -7372,6 +7372,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
||||
struct lpfc_vport *vport = phba->pport;
|
||||
struct lpfc_dmabuf *mp;
|
||||
struct lpfc_rqb *rqbp;
|
||||
u32 flg;
|
||||
|
||||
/* Perform a PCI function reset to start from clean */
|
||||
rc = lpfc_pci_function_reset(phba);
|
||||
@ -7385,7 +7386,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
||||
else {
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
|
||||
flg = phba->sli.sli_flag;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
/* Allow a little time after setting SLI_ACTIVE for any polled
|
||||
* MBX commands to complete via BSG.
|
||||
*/
|
||||
for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) {
|
||||
msleep(20);
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
flg = phba->sli.sli_flag;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
}
|
||||
}
|
||||
|
||||
lpfc_sli4_dip(phba);
|
||||
@ -8922,7 +8933,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
|
||||
"(%d):2541 Mailbox command x%x "
|
||||
"(x%x/x%x) failure: "
|
||||
"mqe_sta: x%x mcqe_sta: x%x/x%x "
|
||||
"Data: x%x x%x\n,",
|
||||
"Data: x%x x%x\n",
|
||||
mboxq->vport ? mboxq->vport->vpi : 0,
|
||||
mboxq->u.mb.mbxCommand,
|
||||
lpfc_sli_config_mbox_subsys_get(phba,
|
||||
@ -8956,7 +8967,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
|
||||
"(%d):2597 Sync Mailbox command "
|
||||
"x%x (x%x/x%x) failure: "
|
||||
"mqe_sta: x%x mcqe_sta: x%x/x%x "
|
||||
"Data: x%x x%x\n,",
|
||||
"Data: x%x x%x\n",
|
||||
mboxq->vport ? mboxq->vport->vpi : 0,
|
||||
mboxq->u.mb.mbxCommand,
|
||||
lpfc_sli_config_mbox_subsys_get(phba,
|
||||
|
@ -753,8 +753,13 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
|
||||
res = -TMF_RESP_FUNC_FAILED;
|
||||
/* Even TMF timed out, return direct. */
|
||||
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
|
||||
struct pm8001_ccb_info *ccb = task->lldd_task;
|
||||
|
||||
pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n",
|
||||
tmf->tmf);
|
||||
|
||||
if (ccb)
|
||||
ccb->task = NULL;
|
||||
goto ex_err;
|
||||
}
|
||||
|
||||
|
@ -2133,9 +2133,9 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
|
||||
pm8001_dbg(pm8001_ha, FAIL,
|
||||
"task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
|
||||
t, status, ts->resp, ts->stat);
|
||||
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
||||
if (t->slow_task)
|
||||
complete(&t->slow_task->completion);
|
||||
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&t->task_state_lock, flags);
|
||||
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
||||
@ -2726,9 +2726,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
|
||||
pm8001_dbg(pm8001_ha, FAIL,
|
||||
"task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
|
||||
t, status, ts->resp, ts->stat);
|
||||
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
||||
if (t->slow_task)
|
||||
complete(&t->slow_task->completion);
|
||||
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&t->task_state_lock, flags);
|
||||
pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
|
||||
|
@ -251,10 +251,9 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
lpc_ctrl->clk = devm_clk_get(dev, NULL);
|
||||
if (IS_ERR(lpc_ctrl->clk)) {
|
||||
dev_err(dev, "couldn't get clock\n");
|
||||
return PTR_ERR(lpc_ctrl->clk);
|
||||
}
|
||||
if (IS_ERR(lpc_ctrl->clk))
|
||||
return dev_err_probe(dev, PTR_ERR(lpc_ctrl->clk),
|
||||
"couldn't get clock\n");
|
||||
rc = clk_prepare_enable(lpc_ctrl->clk);
|
||||
if (rc) {
|
||||
dev_err(dev, "couldn't enable clock\n");
|
||||
|
@ -2024,7 +2024,7 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
|
||||
return false;
|
||||
|
||||
canon_head = smp_load_acquire(&ldata->canon_head);
|
||||
n = min(*nr + 1, canon_head - ldata->read_tail);
|
||||
n = min(*nr, canon_head - ldata->read_tail);
|
||||
|
||||
tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1);
|
||||
size = min_t(size_t, tail + n, N_TTY_BUF_SIZE);
|
||||
@ -2046,10 +2046,8 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
|
||||
n += N_TTY_BUF_SIZE;
|
||||
c = n + found;
|
||||
|
||||
if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) {
|
||||
c = min(*nr, c);
|
||||
if (!found || read_buf(ldata, eol) != __DISABLED_CHAR)
|
||||
n = c;
|
||||
}
|
||||
|
||||
n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu tail:%zu more:%zu\n",
|
||||
__func__, eol, found, n, c, tail, more);
|
||||
|
@ -26,7 +26,7 @@ static int __init serial_init_chip(struct parisc_device *dev)
|
||||
unsigned long address;
|
||||
int err;
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#if defined(CONFIG_64BIT) && defined(CONFIG_IOSAPIC)
|
||||
if (!dev->irq && (dev->id.sversion == 0xad))
|
||||
dev->irq = iosapic_serial_irq(dev);
|
||||
#endif
|
||||
|
@ -5006,6 +5006,10 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
|
||||
lock_page(page);
|
||||
if (!PageUptodate(page)) {
|
||||
unlock_page(page);
|
||||
btrfs_err(fs_info,
|
||||
"send: IO error at offset %llu for inode %llu root %llu",
|
||||
page_offset(page), sctx->cur_ino,
|
||||
sctx->send_root->root_key.objectid);
|
||||
put_page(page);
|
||||
ret = -EIO;
|
||||
break;
|
||||
|
72
fs/file.c
72
fs/file.c
@ -817,28 +817,68 @@ void do_close_on_exec(struct files_struct *files)
|
||||
spin_unlock(&files->file_lock);
|
||||
}
|
||||
|
||||
static inline struct file *__fget_files_rcu(struct files_struct *files,
|
||||
unsigned int fd, fmode_t mask, unsigned int refs)
|
||||
{
|
||||
for (;;) {
|
||||
struct file *file;
|
||||
struct fdtable *fdt = rcu_dereference_raw(files->fdt);
|
||||
struct file __rcu **fdentry;
|
||||
|
||||
if (unlikely(fd >= fdt->max_fds))
|
||||
return NULL;
|
||||
|
||||
fdentry = fdt->fd + array_index_nospec(fd, fdt->max_fds);
|
||||
file = rcu_dereference_raw(*fdentry);
|
||||
if (unlikely(!file))
|
||||
return NULL;
|
||||
|
||||
if (unlikely(file->f_mode & mask))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Ok, we have a file pointer. However, because we do
|
||||
* this all locklessly under RCU, we may be racing with
|
||||
* that file being closed.
|
||||
*
|
||||
* Such a race can take two forms:
|
||||
*
|
||||
* (a) the file ref already went down to zero,
|
||||
* and get_file_rcu_many() fails. Just try
|
||||
* again:
|
||||
*/
|
||||
if (unlikely(!get_file_rcu_many(file, refs)))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* (b) the file table entry has changed under us.
|
||||
* Note that we don't need to re-check the 'fdt->fd'
|
||||
* pointer having changed, because it always goes
|
||||
* hand-in-hand with 'fdt'.
|
||||
*
|
||||
* If so, we need to put our refs and try again.
|
||||
*/
|
||||
if (unlikely(rcu_dereference_raw(files->fdt) != fdt) ||
|
||||
unlikely(rcu_dereference_raw(*fdentry) != file)) {
|
||||
fput_many(file, refs);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ok, we have a ref to the file, and checked that it
|
||||
* still exists.
|
||||
*/
|
||||
return file;
|
||||
}
|
||||
}
|
||||
|
||||
static struct file *__fget_files(struct files_struct *files, unsigned int fd,
|
||||
fmode_t mask, unsigned int refs)
|
||||
{
|
||||
struct file *file;
|
||||
|
||||
rcu_read_lock();
|
||||
loop:
|
||||
file = fcheck_files(files, fd);
|
||||
if (file) {
|
||||
/* File object ref couldn't be taken.
|
||||
* dup2() atomicity guarantee is the reason
|
||||
* we loop to catch the new file (or NULL pointer)
|
||||
*/
|
||||
if (file->f_mode & mask)
|
||||
file = NULL;
|
||||
else if (!get_file_rcu_many(file, refs))
|
||||
goto loop;
|
||||
else if (__fcheck_files(files, fd) != file) {
|
||||
fput_many(file, refs);
|
||||
goto loop;
|
||||
}
|
||||
}
|
||||
file = __fget_files_rcu(files, fd, mask, refs);
|
||||
rcu_read_unlock();
|
||||
|
||||
return file;
|
||||
|
@ -1780,14 +1780,14 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
|
||||
if (!res) {
|
||||
inode = d_inode(dentry);
|
||||
if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
|
||||
!S_ISDIR(inode->i_mode))
|
||||
!(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)))
|
||||
res = ERR_PTR(-ENOTDIR);
|
||||
else if (inode && S_ISREG(inode->i_mode))
|
||||
res = ERR_PTR(-EOPENSTALE);
|
||||
} else if (!IS_ERR(res)) {
|
||||
inode = d_inode(res);
|
||||
if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
|
||||
!S_ISDIR(inode->i_mode)) {
|
||||
!(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) {
|
||||
dput(res);
|
||||
res = ERR_PTR(-ENOTDIR);
|
||||
} else if (inode && S_ISREG(inode->i_mode)) {
|
||||
|
@ -195,6 +195,18 @@ bool nfs_check_cache_invalid(struct inode *inode, unsigned long flags)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfs_check_cache_invalid);
|
||||
|
||||
#ifdef CONFIG_NFS_V4_2
|
||||
static bool nfs_has_xattr_cache(const struct nfs_inode *nfsi)
|
||||
{
|
||||
return nfsi->xattr_cache != NULL;
|
||||
}
|
||||
#else
|
||||
static bool nfs_has_xattr_cache(const struct nfs_inode *nfsi)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
|
||||
{
|
||||
struct nfs_inode *nfsi = NFS_I(inode);
|
||||
@ -210,6 +222,8 @@ static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
|
||||
} else if (flags & NFS_INO_REVAL_PAGECACHE)
|
||||
flags |= NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE;
|
||||
|
||||
if (!nfs_has_xattr_cache(nfsi))
|
||||
flags &= ~NFS_INO_INVALID_XATTR;
|
||||
if (inode->i_mapping->nrpages == 0)
|
||||
flags &= ~(NFS_INO_INVALID_DATA|NFS_INO_DATA_INVAL_DEFER);
|
||||
nfsi->cache_validity |= flags;
|
||||
@ -807,12 +821,9 @@ int nfs_getattr(const struct path *path, struct kstat *stat,
|
||||
}
|
||||
|
||||
/* Flush out writes to the server in order to update c/mtime. */
|
||||
if ((request_mask & (STATX_CTIME|STATX_MTIME)) &&
|
||||
S_ISREG(inode->i_mode)) {
|
||||
err = filemap_write_and_wait(inode->i_mapping);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
if ((request_mask & (STATX_CTIME | STATX_MTIME)) &&
|
||||
S_ISREG(inode->i_mode))
|
||||
filemap_write_and_wait(inode->i_mapping);
|
||||
|
||||
/*
|
||||
* We may force a getattr if the user cares about atime.
|
||||
|
@ -487,7 +487,8 @@ static void smaps_page_accumulate(struct mem_size_stats *mss,
|
||||
}
|
||||
|
||||
static void smaps_account(struct mem_size_stats *mss, struct page *page,
|
||||
bool compound, bool young, bool dirty, bool locked)
|
||||
bool compound, bool young, bool dirty, bool locked,
|
||||
bool migration)
|
||||
{
|
||||
int i, nr = compound ? compound_nr(page) : 1;
|
||||
unsigned long size = nr * PAGE_SIZE;
|
||||
@ -514,8 +515,15 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
|
||||
* page_count(page) == 1 guarantees the page is mapped exactly once.
|
||||
* If any subpage of the compound page mapped with PTE it would elevate
|
||||
* page_count().
|
||||
*
|
||||
* The page_mapcount() is called to get a snapshot of the mapcount.
|
||||
* Without holding the page lock this snapshot can be slightly wrong as
|
||||
* we cannot always read the mapcount atomically. It is not safe to
|
||||
* call page_mapcount() even with PTL held if the page is not mapped,
|
||||
* especially for migration entries. Treat regular migration entries
|
||||
* as mapcount == 1.
|
||||
*/
|
||||
if (page_count(page) == 1) {
|
||||
if ((page_count(page) == 1) || migration) {
|
||||
smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty,
|
||||
locked, true);
|
||||
return;
|
||||
@ -552,6 +560,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
|
||||
struct vm_area_struct *vma = walk->vma;
|
||||
bool locked = !!(vma->vm_flags & VM_LOCKED);
|
||||
struct page *page = NULL;
|
||||
bool migration = false;
|
||||
|
||||
if (pte_present(*pte)) {
|
||||
page = vm_normal_page(vma, addr, *pte);
|
||||
@ -571,9 +580,10 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
|
||||
} else {
|
||||
mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
|
||||
}
|
||||
} else if (is_migration_entry(swpent))
|
||||
} else if (is_migration_entry(swpent)) {
|
||||
migration = true;
|
||||
page = migration_entry_to_page(swpent);
|
||||
else if (is_device_private_entry(swpent))
|
||||
} else if (is_device_private_entry(swpent))
|
||||
page = device_private_entry_to_page(swpent);
|
||||
} else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap
|
||||
&& pte_none(*pte))) {
|
||||
@ -587,7 +597,8 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
|
||||
if (!page)
|
||||
return;
|
||||
|
||||
smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
|
||||
smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte),
|
||||
locked, migration);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
@ -598,6 +609,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
|
||||
struct vm_area_struct *vma = walk->vma;
|
||||
bool locked = !!(vma->vm_flags & VM_LOCKED);
|
||||
struct page *page = NULL;
|
||||
bool migration = false;
|
||||
|
||||
if (pmd_present(*pmd)) {
|
||||
/* FOLL_DUMP will return -EFAULT on huge zero page */
|
||||
@ -605,9 +617,11 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
|
||||
} else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
|
||||
swp_entry_t entry = pmd_to_swp_entry(*pmd);
|
||||
|
||||
if (is_migration_entry(entry))
|
||||
if (is_migration_entry(entry)) {
|
||||
migration = true;
|
||||
page = migration_entry_to_page(entry);
|
||||
}
|
||||
}
|
||||
if (IS_ERR_OR_NULL(page))
|
||||
return;
|
||||
if (PageAnon(page))
|
||||
@ -618,7 +632,9 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
|
||||
/* pass */;
|
||||
else
|
||||
mss->file_thp += HPAGE_PMD_SIZE;
|
||||
smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
|
||||
|
||||
smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
|
||||
locked, migration);
|
||||
}
|
||||
#else
|
||||
static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
|
||||
@ -1434,6 +1450,7 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
|
||||
{
|
||||
u64 frame = 0, flags = 0;
|
||||
struct page *page = NULL;
|
||||
bool migration = false;
|
||||
|
||||
if (pte_present(pte)) {
|
||||
if (pm->show_pfn)
|
||||
@ -1451,8 +1468,10 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
|
||||
frame = swp_type(entry) |
|
||||
(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
|
||||
flags |= PM_SWAP;
|
||||
if (is_migration_entry(entry))
|
||||
if (is_migration_entry(entry)) {
|
||||
migration = true;
|
||||
page = migration_entry_to_page(entry);
|
||||
}
|
||||
|
||||
if (is_device_private_entry(entry))
|
||||
page = device_private_entry_to_page(entry);
|
||||
@ -1460,7 +1479,7 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
|
||||
|
||||
if (page && !PageAnon(page))
|
||||
flags |= PM_FILE;
|
||||
if (page && page_mapcount(page) == 1)
|
||||
if (page && !migration && page_mapcount(page) == 1)
|
||||
flags |= PM_MMAP_EXCLUSIVE;
|
||||
if (vma->vm_flags & VM_SOFTDIRTY)
|
||||
flags |= PM_SOFT_DIRTY;
|
||||
@ -1476,8 +1495,9 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
|
||||
spinlock_t *ptl;
|
||||
pte_t *pte, *orig_pte;
|
||||
int err = 0;
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
bool migration = false;
|
||||
|
||||
ptl = pmd_trans_huge_lock(pmdp, vma);
|
||||
if (ptl) {
|
||||
u64 flags = 0, frame = 0;
|
||||
@ -1512,11 +1532,12 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
|
||||
if (pmd_swp_soft_dirty(pmd))
|
||||
flags |= PM_SOFT_DIRTY;
|
||||
VM_BUG_ON(!is_pmd_migration_entry(pmd));
|
||||
migration = is_migration_entry(entry);
|
||||
page = migration_entry_to_page(entry);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (page && page_mapcount(page) == 1)
|
||||
if (page && !migration && page_mapcount(page) == 1)
|
||||
flags |= PM_MMAP_EXCLUSIVE;
|
||||
|
||||
for (; addr != end; addr += PAGE_SIZE) {
|
||||
|
@ -692,9 +692,14 @@ int dquot_quota_sync(struct super_block *sb, int type)
|
||||
/* This is not very clever (and fast) but currently I don't know about
|
||||
* any other simple way of getting quota data to disk and we must get
|
||||
* them there for userspace to be visible... */
|
||||
if (sb->s_op->sync_fs)
|
||||
sb->s_op->sync_fs(sb, 1);
|
||||
sync_blockdev(sb->s_bdev);
|
||||
if (sb->s_op->sync_fs) {
|
||||
ret = sb->s_op->sync_fs(sb, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
ret = sync_blockdev(sb->s_bdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Now when everything is written we can discard the pagecache so
|
||||
|
19
fs/super.c
19
fs/super.c
@ -1667,11 +1667,9 @@ static void lockdep_sb_freeze_acquire(struct super_block *sb)
|
||||
percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
|
||||
}
|
||||
|
||||
static void sb_freeze_unlock(struct super_block *sb)
|
||||
static void sb_freeze_unlock(struct super_block *sb, int level)
|
||||
{
|
||||
int level;
|
||||
|
||||
for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
|
||||
for (level--; level >= 0; level--)
|
||||
percpu_up_write(sb->s_writers.rw_sem + level);
|
||||
}
|
||||
|
||||
@ -1742,7 +1740,14 @@ int freeze_super(struct super_block *sb)
|
||||
sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
|
||||
|
||||
/* All writers are done so after syncing there won't be dirty data */
|
||||
sync_filesystem(sb);
|
||||
ret = sync_filesystem(sb);
|
||||
if (ret) {
|
||||
sb->s_writers.frozen = SB_UNFROZEN;
|
||||
sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT);
|
||||
wake_up(&sb->s_writers.wait_unfrozen);
|
||||
deactivate_locked_super(sb);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Now wait for internal filesystem counter */
|
||||
sb->s_writers.frozen = SB_FREEZE_FS;
|
||||
@ -1754,7 +1759,7 @@ int freeze_super(struct super_block *sb)
|
||||
printk(KERN_ERR
|
||||
"VFS:Filesystem freeze failed\n");
|
||||
sb->s_writers.frozen = SB_UNFROZEN;
|
||||
sb_freeze_unlock(sb);
|
||||
sb_freeze_unlock(sb, SB_FREEZE_FS);
|
||||
wake_up(&sb->s_writers.wait_unfrozen);
|
||||
deactivate_locked_super(sb);
|
||||
return ret;
|
||||
@ -1805,7 +1810,7 @@ static int thaw_super_locked(struct super_block *sb)
|
||||
}
|
||||
|
||||
sb->s_writers.frozen = SB_UNFROZEN;
|
||||
sb_freeze_unlock(sb);
|
||||
sb_freeze_unlock(sb, SB_FREEZE_FS);
|
||||
out:
|
||||
wake_up(&sb->s_writers.wait_unfrozen);
|
||||
deactivate_locked_super(sb);
|
||||
|
@ -197,7 +197,7 @@ struct obj_cgroup {
|
||||
struct mem_cgroup *memcg;
|
||||
atomic_t nr_charged_bytes;
|
||||
union {
|
||||
struct list_head list;
|
||||
struct list_head list; /* protected by objcg_lock */
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
};
|
||||
@ -300,7 +300,8 @@ struct mem_cgroup {
|
||||
int kmemcg_id;
|
||||
enum memcg_kmem_state kmem_state;
|
||||
struct obj_cgroup __rcu *objcg;
|
||||
struct list_head objcg_list; /* list of inherited objcgs */
|
||||
/* list of inherited objcgs, protected by objcg_lock */
|
||||
struct list_head objcg_list;
|
||||
#endif
|
||||
|
||||
MEMCG_PADDING(_pad2_);
|
||||
|
@ -2094,7 +2094,7 @@ struct net_device {
|
||||
struct netdev_queue *_tx ____cacheline_aligned_in_smp;
|
||||
unsigned int num_tx_queues;
|
||||
unsigned int real_num_tx_queues;
|
||||
struct Qdisc *qdisc;
|
||||
struct Qdisc __rcu *qdisc;
|
||||
unsigned int tx_queue_len;
|
||||
spinlock_t tx_global_lock;
|
||||
|
||||
|
@ -1576,7 +1576,6 @@ extern struct pid *cad_pid;
|
||||
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
|
||||
#define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */
|
||||
#define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */
|
||||
#define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */
|
||||
#define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */
|
||||
#define PF_FROZEN 0x00010000 /* Frozen for system suspend */
|
||||
#define PF_KSWAPD 0x00020000 /* I am kswapd */
|
||||
|
@ -262,7 +262,7 @@ struct ad_system {
|
||||
struct ad_bond_info {
|
||||
struct ad_system system; /* 802.3ad system structure */
|
||||
struct bond_3ad_stats stats;
|
||||
u32 agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
|
||||
atomic_t agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
|
||||
u16 aggregator_identifier;
|
||||
};
|
||||
|
||||
|
@ -390,17 +390,20 @@ static inline void txopt_put(struct ipv6_txoptions *opt)
|
||||
kfree_rcu(opt, rcu);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label);
|
||||
|
||||
extern struct static_key_false_deferred ipv6_flowlabel_exclusive;
|
||||
static inline struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk,
|
||||
__be32 label)
|
||||
{
|
||||
if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key))
|
||||
if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key) &&
|
||||
READ_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl))
|
||||
return __fl6_sock_lookup(sk, label) ? : ERR_PTR(-ENOENT);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
|
||||
struct ip6_flowlabel *fl,
|
||||
|
@ -81,9 +81,10 @@ struct netns_ipv6 {
|
||||
spinlock_t fib6_gc_lock;
|
||||
unsigned int ip6_rt_gc_expire;
|
||||
unsigned long ip6_rt_last_gc;
|
||||
unsigned char flowlabel_has_excl;
|
||||
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
|
||||
unsigned int fib6_rules_require_fldissect;
|
||||
bool fib6_has_custom_rules;
|
||||
unsigned int fib6_rules_require_fldissect;
|
||||
#ifdef CONFIG_IPV6_SUBTREES
|
||||
unsigned int fib6_routes_require_src;
|
||||
#endif
|
||||
|
@ -135,7 +135,7 @@ struct can_isotp_ll_options {
|
||||
#define CAN_ISOTP_FORCE_RXSTMIN 0x100 /* ignore CFs depending on rx stmin */
|
||||
#define CAN_ISOTP_RX_EXT_ADDR 0x200 /* different rx extended addressing */
|
||||
#define CAN_ISOTP_WAIT_TX_DONE 0x400 /* wait for tx completion */
|
||||
|
||||
#define CAN_ISOTP_SF_BROADCAST 0x800 /* 1-to-N functional addressing */
|
||||
|
||||
/* default values */
|
||||
|
||||
|
@ -205,9 +205,6 @@ async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
|
||||
atomic_inc(&entry_count);
|
||||
spin_unlock_irqrestore(&async_lock, flags);
|
||||
|
||||
/* mark that this task has queued an async job, used by module init */
|
||||
current->flags |= PF_USED_ASYNC;
|
||||
|
||||
/* schedule for execution */
|
||||
queue_work_node(node, system_unbound_wq, &entry->work);
|
||||
|
||||
|
@ -2304,10 +2304,6 @@ static __latent_entropy struct task_struct *copy_process(
|
||||
goto bad_fork_cancel_cgroup;
|
||||
}
|
||||
|
||||
/* past the last point of failure */
|
||||
if (pidfile)
|
||||
fd_install(pidfd, pidfile);
|
||||
|
||||
init_task_pid_links(p);
|
||||
if (likely(p->pid)) {
|
||||
ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
|
||||
@ -2356,6 +2352,9 @@ static __latent_entropy struct task_struct *copy_process(
|
||||
syscall_tracepoint_update(p);
|
||||
write_unlock_irq(&tasklist_lock);
|
||||
|
||||
if (pidfile)
|
||||
fd_install(pidfd, pidfile);
|
||||
|
||||
proc_fork_connector(p);
|
||||
sched_post_fork(p, args);
|
||||
cgroup_post_fork(p, args);
|
||||
|
@ -3387,7 +3387,7 @@ struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
|
||||
u16 chain_hlock = chain_hlocks[chain->base + i];
|
||||
unsigned int class_idx = chain_hlock_class_idx(chain_hlock);
|
||||
|
||||
return lock_classes + class_idx - 1;
|
||||
return lock_classes + class_idx;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3455,7 +3455,7 @@ static void print_chain_keys_chain(struct lock_chain *chain)
|
||||
hlock_id = chain_hlocks[chain->base + i];
|
||||
chain_key = print_chain_key_iteration(hlock_id, chain_key);
|
||||
|
||||
print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id) - 1);
|
||||
print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id));
|
||||
printk("\n");
|
||||
}
|
||||
}
|
||||
|
@ -3739,12 +3739,6 @@ static noinline int do_init_module(struct module *mod)
|
||||
}
|
||||
freeinit->module_init = mod->init_layout.base;
|
||||
|
||||
/*
|
||||
* We want to find out whether @mod uses async during init. Clear
|
||||
* PF_USED_ASYNC. async_schedule*() will set it.
|
||||
*/
|
||||
current->flags &= ~PF_USED_ASYNC;
|
||||
|
||||
do_mod_ctors(mod);
|
||||
/* Start the module */
|
||||
if (mod->init != NULL)
|
||||
@ -3770,22 +3764,13 @@ static noinline int do_init_module(struct module *mod)
|
||||
|
||||
/*
|
||||
* We need to finish all async code before the module init sequence
|
||||
* is done. This has potential to deadlock. For example, a newly
|
||||
* detected block device can trigger request_module() of the
|
||||
* default iosched from async probing task. Once userland helper
|
||||
* reaches here, async_synchronize_full() will wait on the async
|
||||
* task waiting on request_module() and deadlock.
|
||||
* is done. This has potential to deadlock if synchronous module
|
||||
* loading is requested from async (which is not allowed!).
|
||||
*
|
||||
* This deadlock is avoided by perfomring async_synchronize_full()
|
||||
* iff module init queued any async jobs. This isn't a full
|
||||
* solution as it will deadlock the same if module loading from
|
||||
* async jobs nests more than once; however, due to the various
|
||||
* constraints, this hack seems to be the best option for now.
|
||||
* Please refer to the following thread for details.
|
||||
*
|
||||
* http://thread.gmane.org/gmane.linux.kernel/1420814
|
||||
* See commit 0fdff3ec6d87 ("async, kmod: warn on synchronous
|
||||
* request_module() from async workers") for more details.
|
||||
*/
|
||||
if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC))
|
||||
if (!mod->async_probe_requested)
|
||||
async_synchronize_full();
|
||||
|
||||
ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base +
|
||||
|
@ -628,7 +628,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
|
||||
set_tsk_need_resched(current);
|
||||
set_preempt_need_resched();
|
||||
if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled &&
|
||||
!rdp->defer_qs_iw_pending && exp) {
|
||||
!rdp->defer_qs_iw_pending && exp && cpu_online(rdp->cpu)) {
|
||||
// Get scheduler to re-evaluate and call hooks.
|
||||
// If !IRQ_WORK, FQS scan will eventually IPI.
|
||||
init_irq_work(&rdp->defer_qs_iw,
|
||||
|
@ -48,7 +48,7 @@ int stack_erasing_sysctl(struct ctl_table *table, int write,
|
||||
#define skip_erasing() false
|
||||
#endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */
|
||||
|
||||
asmlinkage void notrace stackleak_erase(void)
|
||||
asmlinkage void noinstr stackleak_erase(void)
|
||||
{
|
||||
/* It would be nice not to have 'kstack_ptr' and 'boundary' on stack */
|
||||
unsigned long kstack_ptr = current->lowest_stack;
|
||||
@ -102,9 +102,8 @@ asmlinkage void notrace stackleak_erase(void)
|
||||
/* Reset the 'lowest_stack' value for the next syscall */
|
||||
current->lowest_stack = current_top_of_stack() - THREAD_SIZE/64;
|
||||
}
|
||||
NOKPROBE_SYMBOL(stackleak_erase);
|
||||
|
||||
void __used __no_caller_saved_registers notrace stackleak_track_stack(void)
|
||||
void __used __no_caller_saved_registers noinstr stackleak_track_stack(void)
|
||||
{
|
||||
unsigned long sp = current_stack_pointer;
|
||||
|
||||
|
@ -251,6 +251,10 @@ __setup("trace_clock=", set_trace_boot_clock);
|
||||
|
||||
static int __init set_tracepoint_printk(char *str)
|
||||
{
|
||||
/* Ignore the "tp_printk_stop_on_boot" param */
|
||||
if (*str == '_')
|
||||
return 0;
|
||||
|
||||
if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
|
||||
tracepoint_printk = 1;
|
||||
return 1;
|
||||
|
@ -407,6 +407,7 @@ static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t by
|
||||
return 0;
|
||||
|
||||
buf->ops = &page_cache_pipe_buf_ops;
|
||||
buf->flags = 0;
|
||||
get_page(page);
|
||||
buf->page = page;
|
||||
buf->offset = offset;
|
||||
@ -543,6 +544,7 @@ static size_t push_pipe(struct iov_iter *i, size_t size,
|
||||
break;
|
||||
|
||||
buf->ops = &default_pipe_buf_ops;
|
||||
buf->flags = 0;
|
||||
buf->page = page;
|
||||
buf->offset = 0;
|
||||
buf->len = min_t(ssize_t, left, PAGE_SIZE);
|
||||
|
@ -251,7 +251,7 @@ struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
extern spinlock_t css_set_lock;
|
||||
static DEFINE_SPINLOCK(objcg_lock);
|
||||
|
||||
static void obj_cgroup_release(struct percpu_ref *ref)
|
||||
{
|
||||
@ -285,13 +285,13 @@ static void obj_cgroup_release(struct percpu_ref *ref)
|
||||
WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
|
||||
nr_pages = nr_bytes >> PAGE_SHIFT;
|
||||
|
||||
spin_lock_irqsave(&css_set_lock, flags);
|
||||
spin_lock_irqsave(&objcg_lock, flags);
|
||||
memcg = obj_cgroup_memcg(objcg);
|
||||
if (nr_pages)
|
||||
__memcg_kmem_uncharge(memcg, nr_pages);
|
||||
list_del(&objcg->list);
|
||||
mem_cgroup_put(memcg);
|
||||
spin_unlock_irqrestore(&css_set_lock, flags);
|
||||
spin_unlock_irqrestore(&objcg_lock, flags);
|
||||
|
||||
percpu_ref_exit(ref);
|
||||
kfree_rcu(objcg, rcu);
|
||||
@ -323,7 +323,7 @@ static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
|
||||
|
||||
objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
|
||||
|
||||
spin_lock_irq(&css_set_lock);
|
||||
spin_lock_irq(&objcg_lock);
|
||||
|
||||
/* Move active objcg to the parent's list */
|
||||
xchg(&objcg->memcg, parent);
|
||||
@ -338,7 +338,7 @@ static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
|
||||
}
|
||||
list_splice(&memcg->objcg_list, &parent->objcg_list);
|
||||
|
||||
spin_unlock_irq(&css_set_lock);
|
||||
spin_unlock_irq(&objcg_lock);
|
||||
|
||||
percpu_ref_kill(&objcg->refcnt);
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user