Merge 7d78b7ebdf
("Merge tag 'mips-fixes_5.19_1' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux") into android-mainline
Steps on the way to 5.19-rc2 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Iaeb2c2f1c0ca0b5aa07b2d9d6f5e51ddf3ce9c6c
This commit is contained in:
commit
469d2d8f59
@ -75,7 +75,6 @@ examples:
|
||||
sd-uhs-sdr104;
|
||||
sdhci,auto-cmd12;
|
||||
interrupts = <0x0 0x26 0x4>;
|
||||
interrupt-names = "sdio0_0";
|
||||
clocks = <&scmi_clk 245>;
|
||||
clock-names = "sw_sdio";
|
||||
};
|
||||
@ -94,7 +93,6 @@ examples:
|
||||
non-removable;
|
||||
bus-width = <0x8>;
|
||||
interrupts = <0x0 0x27 0x4>;
|
||||
interrupt-names = "sdio1_0";
|
||||
clocks = <&scmi_clk 245>;
|
||||
clock-names = "sw_sdio";
|
||||
};
|
||||
|
@ -56,6 +56,9 @@ properties:
|
||||
- const: core
|
||||
- const: axi
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
marvell,xenon-sdhc-id:
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
minimum: 0
|
||||
|
@ -37,30 +37,31 @@ The network filesystem helper library needs a place to store a bit of state for
|
||||
its use on each netfs inode it is helping to manage. To this end, a context
|
||||
structure is defined::
|
||||
|
||||
struct netfs_i_context {
|
||||
struct netfs_inode {
|
||||
struct inode inode;
|
||||
const struct netfs_request_ops *ops;
|
||||
struct fscache_cookie *cache;
|
||||
struct fscache_cookie *cache;
|
||||
};
|
||||
|
||||
A network filesystem that wants to use netfs lib must place one of these
|
||||
directly after the VFS ``struct inode`` it allocates, usually as part of its
|
||||
own struct. This can be done in a way similar to the following::
|
||||
A network filesystem that wants to use netfs lib must place one of these in its
|
||||
inode wrapper struct instead of the VFS ``struct inode``. This can be done in
|
||||
a way similar to the following::
|
||||
|
||||
struct my_inode {
|
||||
struct {
|
||||
/* These must be contiguous */
|
||||
struct inode vfs_inode;
|
||||
struct netfs_i_context netfs_ctx;
|
||||
};
|
||||
struct netfs_inode netfs; /* Netfslib context and vfs inode */
|
||||
...
|
||||
};
|
||||
|
||||
This allows netfslib to find its state by simple offset from the inode pointer,
|
||||
thereby allowing the netfslib helper functions to be pointed to directly by the
|
||||
VFS/VM operation tables.
|
||||
This allows netfslib to find its state by using ``container_of()`` from the
|
||||
inode pointer, thereby allowing the netfslib helper functions to be pointed to
|
||||
directly by the VFS/VM operation tables.
|
||||
|
||||
The structure contains the following fields:
|
||||
|
||||
* ``inode``
|
||||
|
||||
The VFS inode structure.
|
||||
|
||||
* ``ops``
|
||||
|
||||
The set of operations provided by the network filesystem to netfslib.
|
||||
@ -78,14 +79,12 @@ To help deal with the per-inode context, a number helper functions are
|
||||
provided. Firstly, a function to perform basic initialisation on a context and
|
||||
set the operations table pointer::
|
||||
|
||||
void netfs_i_context_init(struct inode *inode,
|
||||
const struct netfs_request_ops *ops);
|
||||
void netfs_inode_init(struct inode *inode,
|
||||
const struct netfs_request_ops *ops);
|
||||
|
||||
then two functions to cast between the VFS inode structure and the netfs
|
||||
context::
|
||||
then a function to cast from the VFS inode structure to the netfs context::
|
||||
|
||||
struct netfs_i_context *netfs_i_context(struct inode *inode);
|
||||
struct inode *netfs_inode(struct netfs_i_context *ctx);
|
||||
struct netfs_inode *netfs_node(struct inode *inode);
|
||||
|
||||
and finally, a function to get the cache cookie pointer from the context
|
||||
attached to an inode (or NULL if fscache is disabled)::
|
||||
|
@ -3757,6 +3757,13 @@ F: include/linux/bpf_lsm.h
|
||||
F: kernel/bpf/bpf_lsm.c
|
||||
F: security/bpf/
|
||||
|
||||
BPFTOOL
|
||||
M: Quentin Monnet <quentin@isovalent.com>
|
||||
L: bpf@vger.kernel.org
|
||||
S: Maintained
|
||||
F: kernel/bpf/disasm.*
|
||||
F: tools/bpf/bpftool/
|
||||
|
||||
BROADCOM B44 10/100 ETHERNET DRIVER
|
||||
M: Michael Chan <michael.chan@broadcom.com>
|
||||
L: netdev@vger.kernel.org
|
||||
|
4
Makefile
4
Makefile
@ -808,6 +808,7 @@ stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG) := -fstack-protector-strong
|
||||
KBUILD_CFLAGS += $(stackp-flags-y)
|
||||
|
||||
KBUILD_CFLAGS-$(CONFIG_WERROR) += -Werror
|
||||
KBUILD_CFLAGS-$(CONFIG_CC_NO_ARRAY_BOUNDS) += -Wno-array-bounds
|
||||
KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) $(CONFIG_CC_IMPLICIT_FALLTHROUGH)
|
||||
|
||||
ifdef CONFIG_CC_IS_CLANG
|
||||
@ -825,6 +826,9 @@ endif
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
|
||||
|
||||
# These result in bogus false positives
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning, dangling-pointer)
|
||||
|
||||
ifdef CONFIG_FRAME_POINTER
|
||||
KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
|
||||
else
|
||||
|
@ -1478,6 +1478,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
bpf_jit_binary_free(header);
|
||||
prog->bpf_func = NULL;
|
||||
prog->jited = 0;
|
||||
prog->jited_len = 0;
|
||||
goto out_off;
|
||||
}
|
||||
bpf_jit_binary_lock_ro(header);
|
||||
|
@ -223,7 +223,6 @@ config PPC
|
||||
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
|
||||
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
|
||||
select HAVE_IOREMAP_PROT
|
||||
select HAVE_IRQ_EXIT_ON_IRQ_STACK
|
||||
select HAVE_IRQ_TIME_ACCOUNTING
|
||||
select HAVE_KERNEL_GZIP
|
||||
select HAVE_KERNEL_LZMA if DEFAULT_UIMAGE
|
||||
@ -786,7 +785,6 @@ config THREAD_SHIFT
|
||||
range 13 15
|
||||
default "15" if PPC_256K_PAGES
|
||||
default "14" if PPC64
|
||||
default "14" if KASAN
|
||||
default "13"
|
||||
help
|
||||
Used to define the stack size. The default is almost always what you
|
||||
|
@ -14,10 +14,16 @@
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#if defined(CONFIG_VMAP_STACK) && CONFIG_THREAD_SHIFT < PAGE_SHIFT
|
||||
#ifdef CONFIG_KASAN
|
||||
#define MIN_THREAD_SHIFT (CONFIG_THREAD_SHIFT + 1)
|
||||
#else
|
||||
#define MIN_THREAD_SHIFT CONFIG_THREAD_SHIFT
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_VMAP_STACK) && MIN_THREAD_SHIFT < PAGE_SHIFT
|
||||
#define THREAD_SHIFT PAGE_SHIFT
|
||||
#else
|
||||
#define THREAD_SHIFT CONFIG_THREAD_SHIFT
|
||||
#define THREAD_SHIFT MIN_THREAD_SHIFT
|
||||
#endif
|
||||
|
||||
#define THREAD_SIZE (1 << THREAD_SHIFT)
|
||||
|
@ -37,6 +37,8 @@ KASAN_SANITIZE_paca.o := n
|
||||
KASAN_SANITIZE_setup_64.o := n
|
||||
KASAN_SANITIZE_mce.o := n
|
||||
KASAN_SANITIZE_mce_power.o := n
|
||||
KASAN_SANITIZE_udbg.o := n
|
||||
KASAN_SANITIZE_udbg_16550.o := n
|
||||
|
||||
# we have to be particularly careful in ppc64 to exclude code that
|
||||
# runs with translations off, as we cannot access the shadow with
|
||||
|
@ -2158,12 +2158,12 @@ static unsigned long ___get_wchan(struct task_struct *p)
|
||||
return 0;
|
||||
|
||||
do {
|
||||
sp = *(unsigned long *)sp;
|
||||
sp = READ_ONCE_NOCHECK(*(unsigned long *)sp);
|
||||
if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
|
||||
task_is_running(p))
|
||||
return 0;
|
||||
if (count > 0) {
|
||||
ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
|
||||
ip = READ_ONCE_NOCHECK(((unsigned long *)sp)[STACK_FRAME_LR_SAVE]);
|
||||
if (!in_sched_functions(ip))
|
||||
return ip;
|
||||
}
|
||||
|
@ -17,9 +17,13 @@ int ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data)
|
||||
|
||||
#ifdef CONFIG_PPC_FPU_REGS
|
||||
flush_fp_to_thread(child);
|
||||
if (fpidx < (PT_FPSCR - PT_FPR0))
|
||||
memcpy(data, &child->thread.TS_FPR(fpidx), sizeof(long));
|
||||
else
|
||||
if (fpidx < (PT_FPSCR - PT_FPR0)) {
|
||||
if (IS_ENABLED(CONFIG_PPC32))
|
||||
// On 32-bit the index we are passed refers to 32-bit words
|
||||
*data = ((u32 *)child->thread.fp_state.fpr)[fpidx];
|
||||
else
|
||||
memcpy(data, &child->thread.TS_FPR(fpidx), sizeof(long));
|
||||
} else
|
||||
*data = child->thread.fp_state.fpscr;
|
||||
#else
|
||||
*data = 0;
|
||||
@ -39,9 +43,13 @@ int ptrace_put_fpr(struct task_struct *child, int index, unsigned long data)
|
||||
|
||||
#ifdef CONFIG_PPC_FPU_REGS
|
||||
flush_fp_to_thread(child);
|
||||
if (fpidx < (PT_FPSCR - PT_FPR0))
|
||||
memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long));
|
||||
else
|
||||
if (fpidx < (PT_FPSCR - PT_FPR0)) {
|
||||
if (IS_ENABLED(CONFIG_PPC32))
|
||||
// On 32-bit the index we are passed refers to 32-bit words
|
||||
((u32 *)child->thread.fp_state.fpr)[fpidx] = data;
|
||||
else
|
||||
memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long));
|
||||
} else
|
||||
child->thread.fp_state.fpscr = data;
|
||||
#endif
|
||||
|
||||
|
@ -444,4 +444,7 @@ void __init pt_regs_check(void)
|
||||
* real registers.
|
||||
*/
|
||||
BUILD_BUG_ON(PT_DSCR < sizeof(struct user_pt_regs) / sizeof(unsigned long));
|
||||
|
||||
// ptrace_get/put_fpr() rely on PPC32 and VSX being incompatible
|
||||
BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_VSX));
|
||||
}
|
||||
|
@ -993,8 +993,8 @@ int rtas_call_reentrant(int token, int nargs, int nret, int *outputs, ...)
|
||||
*
|
||||
* Return: A pointer to the specified errorlog or NULL if not found.
|
||||
*/
|
||||
struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
|
||||
uint16_t section_id)
|
||||
noinstr struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
|
||||
uint16_t section_id)
|
||||
{
|
||||
struct rtas_ext_event_log_v6 *ext_log =
|
||||
(struct rtas_ext_event_log_v6 *)log->buffer;
|
||||
|
@ -224,7 +224,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
|
||||
|
||||
/* wait for all the CPUs to hit real mode but timeout if they don't come in */
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
|
||||
static void __maybe_unused crash_kexec_wait_realmode(int cpu)
|
||||
noinstr static void __maybe_unused crash_kexec_wait_realmode(int cpu)
|
||||
{
|
||||
unsigned int msecs;
|
||||
int i;
|
||||
|
@ -19,7 +19,6 @@
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/kdump.h>
|
||||
#include <mm/mmu_decl.h>
|
||||
#include <generated/compile.h>
|
||||
#include <generated/utsrelease.h>
|
||||
|
||||
struct regions {
|
||||
@ -37,10 +36,6 @@ struct regions {
|
||||
int reserved_mem_size_cells;
|
||||
};
|
||||
|
||||
/* Simplified build-specific string for starting entropy. */
|
||||
static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
|
||||
LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
|
||||
|
||||
struct regions __initdata regions;
|
||||
|
||||
static __init void kaslr_get_cmdline(void *fdt)
|
||||
@ -71,7 +66,8 @@ static unsigned long __init get_boot_seed(void *fdt)
|
||||
{
|
||||
unsigned long hash = 0;
|
||||
|
||||
hash = rotate_xor(hash, build_str, sizeof(build_str));
|
||||
/* build-specific string for starting entropy. */
|
||||
hash = rotate_xor(hash, linux_banner, strlen(linux_banner));
|
||||
hash = rotate_xor(hash, fdt, fdt_totalsize(fdt));
|
||||
|
||||
return hash;
|
||||
|
@ -4,6 +4,7 @@
|
||||
# in particular, idle code runs a bunch of things in real mode
|
||||
KASAN_SANITIZE_idle.o := n
|
||||
KASAN_SANITIZE_pci-ioda.o := n
|
||||
KASAN_SANITIZE_pci-ioda-tce.o := n
|
||||
# pnv_machine_check_early
|
||||
KASAN_SANITIZE_setup.o := n
|
||||
|
||||
|
@ -465,6 +465,9 @@ static int papr_scm_pmu_check_events(struct papr_scm_priv *p, struct nvdimm_pmu
|
||||
u32 available_events;
|
||||
int index, rc = 0;
|
||||
|
||||
if (!p->stat_buffer_len)
|
||||
return -ENOENT;
|
||||
|
||||
available_events = (p->stat_buffer_len - sizeof(struct papr_scm_perf_stats))
|
||||
/ sizeof(struct papr_scm_perf_stat);
|
||||
if (available_events == 0)
|
||||
|
@ -125,6 +125,7 @@ config S390
|
||||
select CLONE_BACKWARDS2
|
||||
select DMA_OPS if PCI
|
||||
select DYNAMIC_FTRACE if FUNCTION_TRACER
|
||||
select GCC12_NO_ARRAY_BOUNDS
|
||||
select GENERIC_ALLOCATOR
|
||||
select GENERIC_CPU_AUTOPROBE
|
||||
select GENERIC_CPU_VULNERABILITIES
|
||||
|
@ -32,15 +32,7 @@ KBUILD_CFLAGS_DECOMPRESSOR += -fno-stack-protector
|
||||
KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-member)
|
||||
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
|
||||
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
|
||||
|
||||
ifdef CONFIG_CC_IS_GCC
|
||||
ifeq ($(call cc-ifversion, -ge, 1200, y), y)
|
||||
ifeq ($(call cc-ifversion, -lt, 1300, y), y)
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds)
|
||||
KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, array-bounds)
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_CC_NO_ARRAY_BOUNDS),-Wno-array-bounds)
|
||||
|
||||
UTS_MACHINE := s390x
|
||||
STACK_SIZE := $(if $(CONFIG_KASAN),65536,16384)
|
||||
|
@ -653,6 +653,7 @@ struct kvm_vcpu_arch {
|
||||
u64 ia32_misc_enable_msr;
|
||||
u64 smbase;
|
||||
u64 smi_count;
|
||||
bool at_instruction_boundary;
|
||||
bool tpr_access_reporting;
|
||||
bool xsaves_enabled;
|
||||
bool xfd_no_write_intercept;
|
||||
@ -1300,6 +1301,8 @@ struct kvm_vcpu_stat {
|
||||
u64 nested_run;
|
||||
u64 directed_yield_attempted;
|
||||
u64 directed_yield_successful;
|
||||
u64 preemption_reported;
|
||||
u64 preemption_other;
|
||||
u64 guest_mode;
|
||||
};
|
||||
|
||||
|
@ -439,7 +439,7 @@ do { \
|
||||
[ptr] "+m" (*_ptr), \
|
||||
[old] "+a" (__old) \
|
||||
: [new] ltype (__new) \
|
||||
: "memory", "cc"); \
|
||||
: "memory"); \
|
||||
if (unlikely(__err)) \
|
||||
goto label; \
|
||||
if (unlikely(!success)) \
|
||||
|
@ -5179,7 +5179,7 @@ static void __kvm_mmu_free_obsolete_roots(struct kvm *kvm, struct kvm_mmu *mmu)
|
||||
roots_to_free |= KVM_MMU_ROOT_CURRENT;
|
||||
|
||||
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
|
||||
if (is_obsolete_root(kvm, mmu->root.hpa))
|
||||
if (is_obsolete_root(kvm, mmu->prev_roots[i].hpa))
|
||||
roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
|
||||
}
|
||||
|
||||
|
@ -145,6 +145,15 @@ static bool try_step_up(struct tdp_iter *iter)
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Step the iterator back up a level in the paging structure. Should only be
|
||||
* used when the iterator is below the root level.
|
||||
*/
|
||||
void tdp_iter_step_up(struct tdp_iter *iter)
|
||||
{
|
||||
WARN_ON(!try_step_up(iter));
|
||||
}
|
||||
|
||||
/*
|
||||
* Step to the next SPTE in a pre-order traversal of the paging structure.
|
||||
* To get to the next SPTE, the iterator either steps down towards the goal
|
||||
|
@ -114,5 +114,6 @@ void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root,
|
||||
int min_level, gfn_t next_last_level_gfn);
|
||||
void tdp_iter_next(struct tdp_iter *iter);
|
||||
void tdp_iter_restart(struct tdp_iter *iter);
|
||||
void tdp_iter_step_up(struct tdp_iter *iter);
|
||||
|
||||
#endif /* __KVM_X86_MMU_TDP_ITER_H */
|
||||
|
@ -1742,12 +1742,12 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
|
||||
gfn_t start = slot->base_gfn;
|
||||
gfn_t end = start + slot->npages;
|
||||
struct tdp_iter iter;
|
||||
int max_mapping_level;
|
||||
kvm_pfn_t pfn;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
tdp_root_for_each_pte(iter, root, start, end) {
|
||||
retry:
|
||||
if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
|
||||
continue;
|
||||
|
||||
@ -1755,15 +1755,41 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
|
||||
!is_last_spte(iter.old_spte, iter.level))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* This is a leaf SPTE. Check if the PFN it maps can
|
||||
* be mapped at a higher level.
|
||||
*/
|
||||
pfn = spte_to_pfn(iter.old_spte);
|
||||
if (kvm_is_reserved_pfn(pfn) ||
|
||||
iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn,
|
||||
pfn, PG_LEVEL_NUM))
|
||||
|
||||
if (kvm_is_reserved_pfn(pfn))
|
||||
continue;
|
||||
|
||||
max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot,
|
||||
iter.gfn, pfn, PG_LEVEL_NUM);
|
||||
|
||||
WARN_ON(max_mapping_level < iter.level);
|
||||
|
||||
/*
|
||||
* If this page is already mapped at the highest
|
||||
* viable level, there's nothing more to do.
|
||||
*/
|
||||
if (max_mapping_level == iter.level)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* The page can be remapped at a higher level, so step
|
||||
* up to zap the parent SPTE.
|
||||
*/
|
||||
while (max_mapping_level > iter.level)
|
||||
tdp_iter_step_up(&iter);
|
||||
|
||||
/* Note, a successful atomic zap also does a remote TLB flush. */
|
||||
if (tdp_mmu_zap_spte_atomic(kvm, &iter))
|
||||
goto retry;
|
||||
tdp_mmu_zap_spte_atomic(kvm, &iter);
|
||||
|
||||
/*
|
||||
* If the atomic zap fails, the iter will recurse back into
|
||||
* the same subtree to retry.
|
||||
*/
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
@ -982,7 +982,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
|
||||
if (svm->tsc_ratio_msr != kvm_default_tsc_scaling_ratio) {
|
||||
WARN_ON(!svm->tsc_scaling_enabled);
|
||||
vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
|
||||
svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio);
|
||||
__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
|
||||
}
|
||||
|
||||
svm->nested.ctl.nested_cr3 = 0;
|
||||
@ -1387,7 +1387,7 @@ void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.tsc_scaling_ratio =
|
||||
kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio,
|
||||
svm->tsc_ratio_msr);
|
||||
svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio);
|
||||
__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
|
||||
}
|
||||
|
||||
/* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */
|
||||
|
@ -465,11 +465,24 @@ static int has_svm(void)
|
||||
return 1;
|
||||
}
|
||||
|
||||
void __svm_write_tsc_multiplier(u64 multiplier)
|
||||
{
|
||||
preempt_disable();
|
||||
|
||||
if (multiplier == __this_cpu_read(current_tsc_ratio))
|
||||
goto out;
|
||||
|
||||
wrmsrl(MSR_AMD64_TSC_RATIO, multiplier);
|
||||
__this_cpu_write(current_tsc_ratio, multiplier);
|
||||
out:
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static void svm_hardware_disable(void)
|
||||
{
|
||||
/* Make sure we clean up behind us */
|
||||
if (tsc_scaling)
|
||||
wrmsrl(MSR_AMD64_TSC_RATIO, SVM_TSC_RATIO_DEFAULT);
|
||||
__svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
|
||||
|
||||
cpu_svm_disable();
|
||||
|
||||
@ -515,8 +528,7 @@ static int svm_hardware_enable(void)
|
||||
* Set the default value, even if we don't use TSC scaling
|
||||
* to avoid having stale value in the msr
|
||||
*/
|
||||
wrmsrl(MSR_AMD64_TSC_RATIO, SVM_TSC_RATIO_DEFAULT);
|
||||
__this_cpu_write(current_tsc_ratio, SVM_TSC_RATIO_DEFAULT);
|
||||
__svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
|
||||
}
|
||||
|
||||
|
||||
@ -999,11 +1011,12 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
|
||||
vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
|
||||
}
|
||||
|
||||
void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
|
||||
static void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
|
||||
{
|
||||
wrmsrl(MSR_AMD64_TSC_RATIO, multiplier);
|
||||
__svm_write_tsc_multiplier(multiplier);
|
||||
}
|
||||
|
||||
|
||||
/* Evaluate instruction intercepts that depend on guest CPUID features. */
|
||||
static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu,
|
||||
struct vcpu_svm *svm)
|
||||
@ -1363,13 +1376,8 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
|
||||
sev_es_prepare_switch_to_guest(hostsa);
|
||||
}
|
||||
|
||||
if (tsc_scaling) {
|
||||
u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
|
||||
if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
|
||||
__this_cpu_write(current_tsc_ratio, tsc_ratio);
|
||||
wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
|
||||
}
|
||||
}
|
||||
if (tsc_scaling)
|
||||
__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
|
||||
|
||||
if (likely(tsc_aux_uret_slot >= 0))
|
||||
kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
|
||||
@ -4255,6 +4263,8 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
|
||||
|
||||
static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR)
|
||||
vcpu->arch.at_instruction_boundary = true;
|
||||
}
|
||||
|
||||
static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
|
||||
|
@ -590,7 +590,7 @@ int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
|
||||
bool has_error_code, u32 error_code);
|
||||
int nested_svm_exit_special(struct vcpu_svm *svm);
|
||||
void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu);
|
||||
void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier);
|
||||
void __svm_write_tsc_multiplier(u64 multiplier);
|
||||
void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
|
||||
struct vmcb_control_area *control);
|
||||
void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
|
||||
|
@ -6547,6 +6547,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
|
||||
return;
|
||||
|
||||
handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc));
|
||||
vcpu->arch.at_instruction_boundary = true;
|
||||
}
|
||||
|
||||
static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
|
||||
|
@ -296,6 +296,8 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
|
||||
STATS_DESC_COUNTER(VCPU, nested_run),
|
||||
STATS_DESC_COUNTER(VCPU, directed_yield_attempted),
|
||||
STATS_DESC_COUNTER(VCPU, directed_yield_successful),
|
||||
STATS_DESC_COUNTER(VCPU, preemption_reported),
|
||||
STATS_DESC_COUNTER(VCPU, preemption_other),
|
||||
STATS_DESC_ICOUNTER(VCPU, guest_mode)
|
||||
};
|
||||
|
||||
@ -4625,6 +4627,19 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
|
||||
struct kvm_memslots *slots;
|
||||
static const u8 preempted = KVM_VCPU_PREEMPTED;
|
||||
|
||||
/*
|
||||
* The vCPU can be marked preempted if and only if the VM-Exit was on
|
||||
* an instruction boundary and will not trigger guest emulation of any
|
||||
* kind (see vcpu_run). Vendor specific code controls (conservatively)
|
||||
* when this is true, for example allowing the vCPU to be marked
|
||||
* preempted if and only if the VM-Exit was due to a host interrupt.
|
||||
*/
|
||||
if (!vcpu->arch.at_instruction_boundary) {
|
||||
vcpu->stat.preemption_other++;
|
||||
return;
|
||||
}
|
||||
|
||||
vcpu->stat.preemption_reported++;
|
||||
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
|
||||
return;
|
||||
|
||||
@ -4654,19 +4669,21 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int idx;
|
||||
|
||||
if (vcpu->preempted && !vcpu->arch.guest_state_protected)
|
||||
vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
|
||||
if (vcpu->preempted) {
|
||||
if (!vcpu->arch.guest_state_protected)
|
||||
vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
|
||||
|
||||
/*
|
||||
* Take the srcu lock as memslots will be accessed to check the gfn
|
||||
* cache generation against the memslots generation.
|
||||
*/
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
if (kvm_xen_msr_enabled(vcpu->kvm))
|
||||
kvm_xen_runstate_set_preempted(vcpu);
|
||||
else
|
||||
kvm_steal_time_set_preempted(vcpu);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
/*
|
||||
* Take the srcu lock as memslots will be accessed to check the gfn
|
||||
* cache generation against the memslots generation.
|
||||
*/
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
if (kvm_xen_msr_enabled(vcpu->kvm))
|
||||
kvm_xen_runstate_set_preempted(vcpu);
|
||||
else
|
||||
kvm_steal_time_set_preempted(vcpu);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
}
|
||||
|
||||
static_call(kvm_x86_vcpu_put)(vcpu);
|
||||
vcpu->arch.last_host_tsc = rdtsc();
|
||||
@ -10422,6 +10439,13 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.l1tf_flush_l1d = true;
|
||||
|
||||
for (;;) {
|
||||
/*
|
||||
* If another guest vCPU requests a PV TLB flush in the middle
|
||||
* of instruction emulation, the rest of the emulation could
|
||||
* use a stale page translation. Assume that any code after
|
||||
* this point can start executing an instruction.
|
||||
*/
|
||||
vcpu->arch.at_instruction_boundary = false;
|
||||
if (kvm_vcpu_running(vcpu)) {
|
||||
r = vcpu_enter_guest(vcpu);
|
||||
} else {
|
||||
|
@ -159,8 +159,10 @@ static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu)
|
||||
* behalf of the vCPU. Only if the VMM does actually block
|
||||
* does it need to enter RUNSTATE_blocked.
|
||||
*/
|
||||
if (vcpu->preempted)
|
||||
kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable);
|
||||
if (WARN_ON_ONCE(!vcpu->preempted))
|
||||
return;
|
||||
|
||||
kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable);
|
||||
}
|
||||
|
||||
/* 32-bit compatibility definitions, also used natively in 32-bit build */
|
||||
|
@ -23,6 +23,13 @@
|
||||
#include <openssl/err.h>
|
||||
#include <openssl/engine.h>
|
||||
|
||||
/*
|
||||
* OpenSSL 3.0 deprecates the OpenSSL's ENGINE API.
|
||||
*
|
||||
* Remove this if/when that API is no longer used
|
||||
*/
|
||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
||||
|
||||
#define PKEY_ID_PKCS7 2
|
||||
|
||||
static __attribute__((noreturn))
|
||||
|
@ -69,7 +69,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc,
|
||||
drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
|
||||
if (plane == &ipu_crtc->plane[0]->base)
|
||||
disable_full = true;
|
||||
if (&ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
|
||||
if (ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
|
||||
disable_partial = true;
|
||||
}
|
||||
|
||||
|
@ -402,6 +402,7 @@ config JOYSTICK_N64
|
||||
config JOYSTICK_SENSEHAT
|
||||
tristate "Raspberry Pi Sense HAT joystick"
|
||||
depends on INPUT && I2C
|
||||
depends on HAS_IOMEM
|
||||
select MFD_SIMPLE_MFD_I2C
|
||||
help
|
||||
Say Y here if you want to enable the driver for the
|
||||
|
@ -85,13 +85,13 @@ static const struct dmi_system_id dmi_use_low_level_irq[] = {
|
||||
},
|
||||
{
|
||||
/*
|
||||
* Lenovo Yoga Tab2 1051L, something messes with the home-button
|
||||
* Lenovo Yoga Tab2 1051F/1051L, something messes with the home-button
|
||||
* IRQ settings, leading to a non working home-button.
|
||||
*/
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "60073"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "1051L"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "1051"),
|
||||
},
|
||||
},
|
||||
{} /* Terminating entry */
|
||||
|
@ -942,17 +942,22 @@ static int bcm5974_probe(struct usb_interface *iface,
|
||||
if (!dev->tp_data)
|
||||
goto err_free_bt_buffer;
|
||||
|
||||
if (dev->bt_urb)
|
||||
if (dev->bt_urb) {
|
||||
usb_fill_int_urb(dev->bt_urb, udev,
|
||||
usb_rcvintpipe(udev, cfg->bt_ep),
|
||||
dev->bt_data, dev->cfg.bt_datalen,
|
||||
bcm5974_irq_button, dev, 1);
|
||||
|
||||
dev->bt_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
|
||||
}
|
||||
|
||||
usb_fill_int_urb(dev->tp_urb, udev,
|
||||
usb_rcvintpipe(udev, cfg->tp_ep),
|
||||
dev->tp_data, dev->cfg.tp_datalen,
|
||||
bcm5974_irq_trackpad, dev, 1);
|
||||
|
||||
dev->tp_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
|
||||
|
||||
/* create bcm5974 device */
|
||||
usb_make_path(udev, dev->phys, sizeof(dev->phys));
|
||||
strlcat(dev->phys, "/input0", sizeof(dev->phys));
|
||||
|
@ -1499,8 +1499,7 @@ void mmc_blk_cqe_recovery(struct mmc_queue *mq)
|
||||
err = mmc_cqe_recovery(host);
|
||||
if (err)
|
||||
mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY);
|
||||
else
|
||||
mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
|
||||
mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
|
||||
|
||||
pr_debug("%s: CQE recovery done\n", mmc_hostname(host));
|
||||
}
|
||||
|
@ -982,6 +982,9 @@ static int gl9763e_runtime_resume(struct sdhci_pci_chip *chip)
|
||||
struct sdhci_host *host = slot->host;
|
||||
u16 clock;
|
||||
|
||||
if (host->mmc->ios.power_mode != MMC_POWER_ON)
|
||||
return 0;
|
||||
|
||||
clock = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
|
||||
|
||||
clock |= SDHCI_CLOCK_PLL_EN;
|
||||
|
@ -51,6 +51,7 @@ static char *status_str[] = {
|
||||
};
|
||||
|
||||
static char *type_str[] = {
|
||||
"", /* Type 0 is not defined */
|
||||
"AMT_MSG_DISCOVERY",
|
||||
"AMT_MSG_ADVERTISEMENT",
|
||||
"AMT_MSG_REQUEST",
|
||||
@ -2220,8 +2221,7 @@ static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb)
|
||||
struct amt_header_advertisement *amta;
|
||||
int hdr_size;
|
||||
|
||||
hdr_size = sizeof(*amta) - sizeof(struct amt_header);
|
||||
|
||||
hdr_size = sizeof(*amta) + sizeof(struct udphdr);
|
||||
if (!pskb_may_pull(skb, hdr_size))
|
||||
return true;
|
||||
|
||||
@ -2251,19 +2251,27 @@ static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb)
|
||||
struct ethhdr *eth;
|
||||
struct iphdr *iph;
|
||||
|
||||
hdr_size = sizeof(*amtmd) + sizeof(struct udphdr);
|
||||
if (!pskb_may_pull(skb, hdr_size))
|
||||
return true;
|
||||
|
||||
amtmd = (struct amt_header_mcast_data *)(udp_hdr(skb) + 1);
|
||||
if (amtmd->reserved || amtmd->version)
|
||||
return true;
|
||||
|
||||
hdr_size = sizeof(*amtmd) + sizeof(struct udphdr);
|
||||
if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_IP), false))
|
||||
return true;
|
||||
|
||||
skb_reset_network_header(skb);
|
||||
skb_push(skb, sizeof(*eth));
|
||||
skb_reset_mac_header(skb);
|
||||
skb_pull(skb, sizeof(*eth));
|
||||
eth = eth_hdr(skb);
|
||||
|
||||
if (!pskb_may_pull(skb, sizeof(*iph)))
|
||||
return true;
|
||||
iph = ip_hdr(skb);
|
||||
|
||||
if (iph->version == 4) {
|
||||
if (!ipv4_is_multicast(iph->daddr))
|
||||
return true;
|
||||
@ -2274,6 +2282,9 @@ static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb)
|
||||
} else if (iph->version == 6) {
|
||||
struct ipv6hdr *ip6h;
|
||||
|
||||
if (!pskb_may_pull(skb, sizeof(*ip6h)))
|
||||
return true;
|
||||
|
||||
ip6h = ipv6_hdr(skb);
|
||||
if (!ipv6_addr_is_multicast(&ip6h->daddr))
|
||||
return true;
|
||||
@ -2306,8 +2317,7 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
|
||||
struct iphdr *iph;
|
||||
int hdr_size, len;
|
||||
|
||||
hdr_size = sizeof(*amtmq) - sizeof(struct amt_header);
|
||||
|
||||
hdr_size = sizeof(*amtmq) + sizeof(struct udphdr);
|
||||
if (!pskb_may_pull(skb, hdr_size))
|
||||
return true;
|
||||
|
||||
@ -2315,22 +2325,27 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
|
||||
if (amtmq->reserved || amtmq->version)
|
||||
return true;
|
||||
|
||||
hdr_size = sizeof(*amtmq) + sizeof(struct udphdr) - sizeof(*eth);
|
||||
hdr_size -= sizeof(*eth);
|
||||
if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_TEB), false))
|
||||
return true;
|
||||
|
||||
oeth = eth_hdr(skb);
|
||||
skb_reset_mac_header(skb);
|
||||
skb_pull(skb, sizeof(*eth));
|
||||
skb_reset_network_header(skb);
|
||||
eth = eth_hdr(skb);
|
||||
if (!pskb_may_pull(skb, sizeof(*iph)))
|
||||
return true;
|
||||
|
||||
iph = ip_hdr(skb);
|
||||
if (iph->version == 4) {
|
||||
if (!ipv4_is_multicast(iph->daddr))
|
||||
return true;
|
||||
if (!pskb_may_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS +
|
||||
sizeof(*ihv3)))
|
||||
return true;
|
||||
|
||||
if (!ipv4_is_multicast(iph->daddr))
|
||||
return true;
|
||||
|
||||
ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
|
||||
skb_reset_transport_header(skb);
|
||||
skb_push(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
|
||||
@ -2345,15 +2360,17 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
|
||||
ip_eth_mc_map(iph->daddr, eth->h_dest);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
} else if (iph->version == 6) {
|
||||
struct ipv6hdr *ip6h = ipv6_hdr(skb);
|
||||
struct mld2_query *mld2q;
|
||||
struct ipv6hdr *ip6h;
|
||||
|
||||
if (!ipv6_addr_is_multicast(&ip6h->daddr))
|
||||
return true;
|
||||
if (!pskb_may_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS +
|
||||
sizeof(*mld2q)))
|
||||
return true;
|
||||
|
||||
ip6h = ipv6_hdr(skb);
|
||||
if (!ipv6_addr_is_multicast(&ip6h->daddr))
|
||||
return true;
|
||||
|
||||
mld2q = skb_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
|
||||
skb_reset_transport_header(skb);
|
||||
skb_push(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
|
||||
@ -2389,23 +2406,23 @@ static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb)
|
||||
{
|
||||
struct amt_header_membership_update *amtmu;
|
||||
struct amt_tunnel_list *tunnel;
|
||||
struct udphdr *udph;
|
||||
struct ethhdr *eth;
|
||||
struct iphdr *iph;
|
||||
int len;
|
||||
int len, hdr_size;
|
||||
|
||||
iph = ip_hdr(skb);
|
||||
udph = udp_hdr(skb);
|
||||
|
||||
if (__iptunnel_pull_header(skb, sizeof(*udph), skb->protocol,
|
||||
false, false))
|
||||
hdr_size = sizeof(*amtmu) + sizeof(struct udphdr);
|
||||
if (!pskb_may_pull(skb, hdr_size))
|
||||
return true;
|
||||
|
||||
amtmu = (struct amt_header_membership_update *)skb->data;
|
||||
amtmu = (struct amt_header_membership_update *)(udp_hdr(skb) + 1);
|
||||
if (amtmu->reserved || amtmu->version)
|
||||
return true;
|
||||
|
||||
skb_pull(skb, sizeof(*amtmu));
|
||||
if (iptunnel_pull_header(skb, hdr_size, skb->protocol, false))
|
||||
return true;
|
||||
|
||||
skb_reset_network_header(skb);
|
||||
|
||||
list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) {
|
||||
@ -2426,6 +2443,9 @@ static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb)
|
||||
return true;
|
||||
|
||||
report:
|
||||
if (!pskb_may_pull(skb, sizeof(*iph)))
|
||||
return true;
|
||||
|
||||
iph = ip_hdr(skb);
|
||||
if (iph->version == 4) {
|
||||
if (ip_mc_check_igmp(skb)) {
|
||||
@ -2679,7 +2699,8 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb)
|
||||
amt = rcu_dereference_sk_user_data(sk);
|
||||
if (!amt) {
|
||||
err = true;
|
||||
goto drop;
|
||||
kfree_skb(skb);
|
||||
goto out;
|
||||
}
|
||||
|
||||
skb->dev = amt->dev;
|
||||
|
@ -2070,8 +2070,10 @@ static int gswip_gphy_fw_list(struct gswip_priv *priv,
|
||||
for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
|
||||
err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
|
||||
gphy_fw_np, i);
|
||||
if (err)
|
||||
if (err) {
|
||||
of_node_put(gphy_fw_np);
|
||||
goto remove_gphy;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
|
||||
|
@ -50,22 +50,25 @@ static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip,
|
||||
}
|
||||
|
||||
static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
|
||||
u16 ctrl, u16 status, u16 lpa,
|
||||
u16 bmsr, u16 lpa, u16 status,
|
||||
struct phylink_link_state *state)
|
||||
{
|
||||
state->link = false;
|
||||
|
||||
/* If the BMSR reports that the link had failed, report this to
|
||||
* phylink.
|
||||
*/
|
||||
if (!(bmsr & BMSR_LSTATUS))
|
||||
return 0;
|
||||
|
||||
state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
|
||||
state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
|
||||
|
||||
if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) {
|
||||
/* The Spped and Duplex Resolved register is 1 if AN is enabled
|
||||
* and complete, or if AN is disabled. So with disabled AN we
|
||||
* still get here on link up. But we want to set an_complete
|
||||
* only if AN was enabled, thus we look at BMCR_ANENABLE.
|
||||
* (According to 802.3-2008 section 22.2.4.2.10, we should be
|
||||
* able to get this same value from BMSR_ANEGCAPABLE, but tests
|
||||
* show that these Marvell PHYs don't conform to this part of
|
||||
* the specificaion - BMSR_ANEGCAPABLE is simply always 1.)
|
||||
* still get here on link up.
|
||||
*/
|
||||
state->an_complete = !!(ctrl & BMCR_ANENABLE);
|
||||
state->duplex = status &
|
||||
MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ?
|
||||
DUPLEX_FULL : DUPLEX_HALF;
|
||||
@ -191,12 +194,12 @@ int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
|
||||
int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
|
||||
int lane, struct phylink_link_state *state)
|
||||
{
|
||||
u16 lpa, status, ctrl;
|
||||
u16 bmsr, lpa, status;
|
||||
int err;
|
||||
|
||||
err = mv88e6352_serdes_read(chip, MII_BMCR, &ctrl);
|
||||
err = mv88e6352_serdes_read(chip, MII_BMSR, &bmsr);
|
||||
if (err) {
|
||||
dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
|
||||
dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -212,7 +215,7 @@ int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
|
||||
return err;
|
||||
}
|
||||
|
||||
return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
|
||||
return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state);
|
||||
}
|
||||
|
||||
int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
|
||||
@ -918,13 +921,13 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
|
||||
static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
|
||||
int port, int lane, struct phylink_link_state *state)
|
||||
{
|
||||
u16 lpa, status, ctrl;
|
||||
u16 bmsr, lpa, status;
|
||||
int err;
|
||||
|
||||
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
|
||||
MV88E6390_SGMII_BMCR, &ctrl);
|
||||
MV88E6390_SGMII_BMSR, &bmsr);
|
||||
if (err) {
|
||||
dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
|
||||
dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -942,7 +945,7 @@ static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
|
||||
return err;
|
||||
}
|
||||
|
||||
return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
|
||||
return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state);
|
||||
}
|
||||
|
||||
static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,
|
||||
|
@ -955,35 +955,21 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_priv *priv, int port,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool rtl8365mb_phy_mode_supported(struct dsa_switch *ds, int port,
|
||||
phy_interface_t interface)
|
||||
{
|
||||
int ext_int;
|
||||
|
||||
ext_int = rtl8365mb_extint_port_map[port];
|
||||
|
||||
if (ext_int < 0 &&
|
||||
(interface == PHY_INTERFACE_MODE_NA ||
|
||||
interface == PHY_INTERFACE_MODE_INTERNAL ||
|
||||
interface == PHY_INTERFACE_MODE_GMII))
|
||||
/* Internal PHY */
|
||||
return true;
|
||||
else if ((ext_int >= 1) &&
|
||||
phy_interface_mode_is_rgmii(interface))
|
||||
/* Extension MAC */
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void rtl8365mb_phylink_get_caps(struct dsa_switch *ds, int port,
|
||||
struct phylink_config *config)
|
||||
{
|
||||
if (dsa_is_user_port(ds, port))
|
||||
if (dsa_is_user_port(ds, port)) {
|
||||
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
|
||||
config->supported_interfaces);
|
||||
else if (dsa_is_cpu_port(ds, port))
|
||||
|
||||
/* GMII is the default interface mode for phylib, so
|
||||
* we have to support it for ports with integrated PHY.
|
||||
*/
|
||||
__set_bit(PHY_INTERFACE_MODE_GMII,
|
||||
config->supported_interfaces);
|
||||
} else if (dsa_is_cpu_port(ds, port)) {
|
||||
phy_interface_set_rgmii(config->supported_interfaces);
|
||||
}
|
||||
|
||||
config->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
|
||||
MAC_10 | MAC_100 | MAC_1000FD;
|
||||
@ -996,12 +982,6 @@ static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port,
|
||||
struct realtek_priv *priv = ds->priv;
|
||||
int ret;
|
||||
|
||||
if (!rtl8365mb_phy_mode_supported(ds, port, state->interface)) {
|
||||
dev_err(priv->dev, "phy mode %s is unsupported on port %d\n",
|
||||
phy_modes(state->interface), port);
|
||||
return;
|
||||
}
|
||||
|
||||
if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) {
|
||||
dev_err(priv->dev,
|
||||
"port %d supports only conventional PHY or fixed-link\n",
|
||||
|
@ -163,7 +163,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
|
||||
mdio = mdiobus_alloc();
|
||||
if (mdio == NULL) {
|
||||
netdev_err(dev, "Error allocating MDIO bus\n");
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto put_node;
|
||||
}
|
||||
|
||||
mdio->name = ALTERA_TSE_RESOURCE_NAME;
|
||||
@ -180,6 +181,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
|
||||
mdio->id);
|
||||
goto out_free_mdio;
|
||||
}
|
||||
of_node_put(mdio_node);
|
||||
|
||||
if (netif_msg_drv(priv))
|
||||
netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
|
||||
@ -189,6 +191,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
|
||||
out_free_mdio:
|
||||
mdiobus_free(mdio);
|
||||
mdio = NULL;
|
||||
put_node:
|
||||
of_node_put(mdio_node);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -820,7 +820,7 @@ static int au1000_rx(struct net_device *dev)
|
||||
pr_cont("\n");
|
||||
}
|
||||
}
|
||||
prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
|
||||
prxd->buff_stat = lower_32_bits(pDB->dma_addr) | RX_DMA_ENABLE;
|
||||
aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
|
||||
wmb(); /* drain writebuffer */
|
||||
|
||||
@ -996,7 +996,7 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
ps->tx_packets++;
|
||||
ps->tx_bytes += ptxd->len;
|
||||
|
||||
ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
|
||||
ptxd->buff_stat = lower_32_bits(pDB->dma_addr) | TX_DMA_ENABLE;
|
||||
wmb(); /* drain writebuffer */
|
||||
dev_kfree_skb(skb);
|
||||
aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
|
||||
@ -1131,9 +1131,9 @@ static int au1000_probe(struct platform_device *pdev)
|
||||
/* Allocate the data buffers
|
||||
* Snooping works fine with eth on all au1xxx
|
||||
*/
|
||||
aup->vaddr = (u32)dma_alloc_coherent(&pdev->dev, MAX_BUF_SIZE *
|
||||
(NUM_TX_BUFFS + NUM_RX_BUFFS),
|
||||
&aup->dma_addr, 0);
|
||||
aup->vaddr = dma_alloc_coherent(&pdev->dev, MAX_BUF_SIZE *
|
||||
(NUM_TX_BUFFS + NUM_RX_BUFFS),
|
||||
&aup->dma_addr, 0);
|
||||
if (!aup->vaddr) {
|
||||
dev_err(&pdev->dev, "failed to allocate data buffers\n");
|
||||
err = -ENOMEM;
|
||||
@ -1234,8 +1234,8 @@ static int au1000_probe(struct platform_device *pdev)
|
||||
for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
|
||||
pDB->pnext = pDBfree;
|
||||
pDBfree = pDB;
|
||||
pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
|
||||
pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
|
||||
pDB->vaddr = aup->vaddr + MAX_BUF_SIZE * i;
|
||||
pDB->dma_addr = aup->dma_addr + MAX_BUF_SIZE * i;
|
||||
pDB++;
|
||||
}
|
||||
aup->pDBfree = pDBfree;
|
||||
@ -1246,7 +1246,7 @@ static int au1000_probe(struct platform_device *pdev)
|
||||
if (!pDB)
|
||||
goto err_out;
|
||||
|
||||
aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
|
||||
aup->rx_dma_ring[i]->buff_stat = lower_32_bits(pDB->dma_addr);
|
||||
aup->rx_db_inuse[i] = pDB;
|
||||
}
|
||||
|
||||
@ -1255,7 +1255,7 @@ static int au1000_probe(struct platform_device *pdev)
|
||||
if (!pDB)
|
||||
goto err_out;
|
||||
|
||||
aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
|
||||
aup->tx_dma_ring[i]->buff_stat = lower_32_bits(pDB->dma_addr);
|
||||
aup->tx_dma_ring[i]->len = 0;
|
||||
aup->tx_db_inuse[i] = pDB;
|
||||
}
|
||||
@ -1310,7 +1310,7 @@ static int au1000_probe(struct platform_device *pdev)
|
||||
iounmap(aup->mac);
|
||||
err_remap1:
|
||||
dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
|
||||
(void *)aup->vaddr, aup->dma_addr);
|
||||
aup->vaddr, aup->dma_addr);
|
||||
err_vaddr:
|
||||
free_netdev(dev);
|
||||
err_alloc:
|
||||
@ -1343,7 +1343,7 @@ static int au1000_remove(struct platform_device *pdev)
|
||||
au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
|
||||
|
||||
dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
|
||||
(void *)aup->vaddr, aup->dma_addr);
|
||||
aup->vaddr, aup->dma_addr);
|
||||
|
||||
iounmap(aup->macdma);
|
||||
iounmap(aup->mac);
|
||||
|
@ -106,8 +106,8 @@ struct au1000_private {
|
||||
struct mac_reg *mac; /* mac registers */
|
||||
u32 *enable; /* address of MAC Enable Register */
|
||||
void __iomem *macdma; /* base of MAC DMA port */
|
||||
u32 vaddr; /* virtual address of rx/tx buffers */
|
||||
dma_addr_t dma_addr; /* dma address of rx/tx buffers */
|
||||
void *vaddr; /* virtual address of rx/tx buffers */
|
||||
dma_addr_t dma_addr; /* dma address of rx/tx buffers */
|
||||
|
||||
spinlock_t lock; /* Serialise access to device */
|
||||
|
||||
|
@ -2784,7 +2784,7 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
|
||||
|
||||
netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
|
||||
netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
|
||||
netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
|
||||
netdev_dbg(netdev, "Protocol: %#06x\n", ntohs(eth->h_proto));
|
||||
|
||||
for (i = 0; i < skb->len; i += 32) {
|
||||
unsigned int len = min(skb->len - i, 32U);
|
||||
|
@ -234,6 +234,7 @@ struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac)
|
||||
np = of_get_child_by_name(core->dev.of_node, "mdio");
|
||||
|
||||
err = of_mdiobus_register(mii_bus, np);
|
||||
of_node_put(np);
|
||||
if (err) {
|
||||
dev_err(&core->dev, "Registration of mii bus failed\n");
|
||||
goto err_free_bus;
|
||||
|
@ -1184,9 +1184,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
|
||||
|
||||
switch (xcast_mode) {
|
||||
case IXGBEVF_XCAST_MODE_NONE:
|
||||
disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
|
||||
disable = IXGBE_VMOLR_ROMPE |
|
||||
IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
|
||||
enable = 0;
|
||||
enable = IXGBE_VMOLR_BAM;
|
||||
break;
|
||||
case IXGBEVF_XCAST_MODE_MULTI:
|
||||
disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
|
||||
@ -1208,9 +1208,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
disable = 0;
|
||||
disable = IXGBE_VMOLR_VPE;
|
||||
enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
|
||||
IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
|
||||
IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE;
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -899,6 +899,17 @@ static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
|
||||
return true;
|
||||
}
|
||||
|
||||
static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
|
||||
{
|
||||
unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
|
||||
unsigned long data;
|
||||
|
||||
data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
|
||||
get_order(size));
|
||||
|
||||
return (void *)data;
|
||||
}
|
||||
|
||||
/* the qdma core needs scratch memory to be setup */
|
||||
static int mtk_init_fq_dma(struct mtk_eth *eth)
|
||||
{
|
||||
@ -1467,7 +1478,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
|
||||
goto release_desc;
|
||||
|
||||
/* alloc new buffer */
|
||||
new_data = napi_alloc_frag(ring->frag_size);
|
||||
if (ring->frag_size <= PAGE_SIZE)
|
||||
new_data = napi_alloc_frag(ring->frag_size);
|
||||
else
|
||||
new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
|
||||
if (unlikely(!new_data)) {
|
||||
netdev->stats.rx_dropped++;
|
||||
goto release_desc;
|
||||
@ -1914,7 +1928,10 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < rx_dma_size; i++) {
|
||||
ring->data[i] = netdev_alloc_frag(ring->frag_size);
|
||||
if (ring->frag_size <= PAGE_SIZE)
|
||||
ring->data[i] = netdev_alloc_frag(ring->frag_size);
|
||||
else
|
||||
ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL);
|
||||
if (!ring->data[i])
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -2110,7 +2110,7 @@ static int mlx4_en_get_module_eeprom(struct net_device *dev,
|
||||
en_err(priv,
|
||||
"mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
|
||||
i, offset, ee->len - i, ret);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
i += ret;
|
||||
|
@ -783,7 +783,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
|
||||
{
|
||||
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
|
||||
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
|
||||
struct lag_tracker tracker;
|
||||
struct lag_tracker tracker = { };
|
||||
bool do_bond, roce_lag;
|
||||
int err;
|
||||
int i;
|
||||
|
@ -1072,13 +1072,11 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
|
||||
|
||||
ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
|
||||
if (ret) {
|
||||
goto err_dvr_probe;
|
||||
goto err_alloc_irq;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_dvr_probe:
|
||||
pci_free_irq_vectors(pdev);
|
||||
err_alloc_irq:
|
||||
clk_disable_unprepare(plat->stmmac_clk);
|
||||
clk_unregister_fixed_rate(plat->stmmac_clk);
|
||||
|
@ -137,6 +137,7 @@
|
||||
#define DP83867_DOWNSHIFT_2_COUNT 2
|
||||
#define DP83867_DOWNSHIFT_4_COUNT 4
|
||||
#define DP83867_DOWNSHIFT_8_COUNT 8
|
||||
#define DP83867_SGMII_AUTONEG_EN BIT(7)
|
||||
|
||||
/* CFG3 bits */
|
||||
#define DP83867_CFG3_INT_OE BIT(7)
|
||||
@ -855,6 +856,32 @@ static int dp83867_phy_reset(struct phy_device *phydev)
|
||||
DP83867_PHYCR_FORCE_LINK_GOOD, 0);
|
||||
}
|
||||
|
||||
static void dp83867_link_change_notify(struct phy_device *phydev)
|
||||
{
|
||||
/* There is a limitation in DP83867 PHY device where SGMII AN is
|
||||
* only triggered once after the device is booted up. Even after the
|
||||
* PHY TPI is down and up again, SGMII AN is not triggered and
|
||||
* hence no new in-band message from PHY to MAC side SGMII.
|
||||
* This could cause an issue during power up, when PHY is up prior
|
||||
* to MAC. At this condition, once MAC side SGMII is up, MAC side
|
||||
* SGMII wouldn`t receive new in-band message from TI PHY with
|
||||
* correct link status, speed and duplex info.
|
||||
* Thus, implemented a SW solution here to retrigger SGMII Auto-Neg
|
||||
* whenever there is a link change.
|
||||
*/
|
||||
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
|
||||
int val = 0;
|
||||
|
||||
val = phy_clear_bits(phydev, DP83867_CFG2,
|
||||
DP83867_SGMII_AUTONEG_EN);
|
||||
if (val < 0)
|
||||
return;
|
||||
|
||||
phy_set_bits(phydev, DP83867_CFG2,
|
||||
DP83867_SGMII_AUTONEG_EN);
|
||||
}
|
||||
}
|
||||
|
||||
static struct phy_driver dp83867_driver[] = {
|
||||
{
|
||||
.phy_id = DP83867_PHY_ID,
|
||||
@ -879,6 +906,8 @@ static struct phy_driver dp83867_driver[] = {
|
||||
|
||||
.suspend = genphy_suspend,
|
||||
.resume = genphy_resume,
|
||||
|
||||
.link_change_notify = dp83867_link_change_notify,
|
||||
},
|
||||
};
|
||||
module_phy_driver(dp83867_driver);
|
||||
|
@ -1046,7 +1046,6 @@ int __init mdio_bus_init(void)
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mdio_bus_init);
|
||||
|
||||
#if IS_ENABLED(CONFIG_PHYLIB)
|
||||
void mdio_bus_exit(void)
|
||||
|
@ -388,13 +388,25 @@ static void nfcmrvl_play_deferred(struct nfcmrvl_usb_drv_data *drv_data)
|
||||
int err;
|
||||
|
||||
while ((urb = usb_get_from_anchor(&drv_data->deferred))) {
|
||||
usb_anchor_urb(urb, &drv_data->tx_anchor);
|
||||
|
||||
err = usb_submit_urb(urb, GFP_ATOMIC);
|
||||
if (err)
|
||||
if (err) {
|
||||
kfree(urb->setup_packet);
|
||||
usb_unanchor_urb(urb);
|
||||
usb_free_urb(urb);
|
||||
break;
|
||||
}
|
||||
|
||||
drv_data->tx_in_flight++;
|
||||
usb_free_urb(urb);
|
||||
}
|
||||
|
||||
/* Cleanup the rest deferred urbs. */
|
||||
while ((urb = usb_get_from_anchor(&drv_data->deferred))) {
|
||||
kfree(urb->setup_packet);
|
||||
usb_free_urb(urb);
|
||||
}
|
||||
usb_scuttle_anchored_urbs(&drv_data->deferred);
|
||||
}
|
||||
|
||||
static int nfcmrvl_resume(struct usb_interface *intf)
|
||||
|
@ -300,6 +300,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
|
||||
int r = 0;
|
||||
struct device *dev = &hdev->ndev->dev;
|
||||
struct nfc_evt_transaction *transaction;
|
||||
u32 aid_len;
|
||||
u8 params_len;
|
||||
|
||||
pr_debug("connectivity gate event: %x\n", event);
|
||||
|
||||
@ -308,43 +310,48 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
|
||||
r = nfc_se_connectivity(hdev->ndev, host);
|
||||
break;
|
||||
case ST21NFCA_EVT_TRANSACTION:
|
||||
/*
|
||||
* According to specification etsi 102 622
|
||||
/* According to specification etsi 102 622
|
||||
* 11.2.2.4 EVT_TRANSACTION Table 52
|
||||
* Description Tag Length
|
||||
* AID 81 5 to 16
|
||||
* PARAMETERS 82 0 to 255
|
||||
*
|
||||
* The key differences are aid storage length is variably sized
|
||||
* in the packet, but fixed in nfc_evt_transaction, and that the aid_len
|
||||
* is u8 in the packet, but u32 in the structure, and the tags in
|
||||
* the packet are not included in nfc_evt_transaction.
|
||||
*
|
||||
* size in bytes: 1 1 5-16 1 1 0-255
|
||||
* offset: 0 1 2 aid_len + 2 aid_len + 3 aid_len + 4
|
||||
* member name: aid_tag(M) aid_len aid params_tag(M) params_len params
|
||||
* example: 0x81 5-16 X 0x82 0-255 X
|
||||
*/
|
||||
if (skb->len < NFC_MIN_AID_LENGTH + 2 &&
|
||||
skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
|
||||
if (skb->len < 2 || skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
|
||||
return -EPROTO;
|
||||
|
||||
transaction = devm_kzalloc(dev, skb->len - 2, GFP_KERNEL);
|
||||
aid_len = skb->data[1];
|
||||
|
||||
if (skb->len < aid_len + 4 || aid_len > sizeof(transaction->aid))
|
||||
return -EPROTO;
|
||||
|
||||
params_len = skb->data[aid_len + 3];
|
||||
|
||||
/* Verify PARAMETERS tag is (82), and final check that there is enough
|
||||
* space in the packet to read everything.
|
||||
*/
|
||||
if ((skb->data[aid_len + 2] != NFC_EVT_TRANSACTION_PARAMS_TAG) ||
|
||||
(skb->len < aid_len + 4 + params_len))
|
||||
return -EPROTO;
|
||||
|
||||
transaction = devm_kzalloc(dev, sizeof(*transaction) + params_len, GFP_KERNEL);
|
||||
if (!transaction)
|
||||
return -ENOMEM;
|
||||
|
||||
transaction->aid_len = skb->data[1];
|
||||
transaction->aid_len = aid_len;
|
||||
transaction->params_len = params_len;
|
||||
|
||||
/* Checking if the length of the AID is valid */
|
||||
if (transaction->aid_len > sizeof(transaction->aid))
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(transaction->aid, &skb->data[2],
|
||||
transaction->aid_len);
|
||||
|
||||
/* Check next byte is PARAMETERS tag (82) */
|
||||
if (skb->data[transaction->aid_len + 2] !=
|
||||
NFC_EVT_TRANSACTION_PARAMS_TAG)
|
||||
return -EPROTO;
|
||||
|
||||
transaction->params_len = skb->data[transaction->aid_len + 3];
|
||||
|
||||
/* Total size is allocated (skb->len - 2) minus fixed array members */
|
||||
if (transaction->params_len > ((skb->len - 2) - sizeof(struct nfc_evt_transaction)))
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(transaction->params, skb->data +
|
||||
transaction->aid_len + 4, transaction->params_len);
|
||||
memcpy(transaction->aid, &skb->data[2], aid_len);
|
||||
memcpy(transaction->params, &skb->data[aid_len + 4], params_len);
|
||||
|
||||
r = nfc_se_transaction(hdev->ndev, host, transaction);
|
||||
break;
|
||||
|
@ -17,7 +17,7 @@ menuconfig MIPS_PLATFORM_DEVICES
|
||||
if MIPS_PLATFORM_DEVICES
|
||||
|
||||
config CPU_HWMON
|
||||
tristate "Loongson-3 CPU HWMon Driver"
|
||||
bool "Loongson-3 CPU HWMon Driver"
|
||||
depends on MACH_LOONGSON64
|
||||
select HWMON
|
||||
default y
|
||||
|
@ -62,12 +62,12 @@ void v9fs_cache_inode_get_cookie(struct inode *inode)
|
||||
version = cpu_to_le32(v9inode->qid.version);
|
||||
path = cpu_to_le64(v9inode->qid.path);
|
||||
v9ses = v9fs_inode2v9ses(inode);
|
||||
v9inode->netfs_ctx.cache =
|
||||
v9inode->netfs.cache =
|
||||
fscache_acquire_cookie(v9fs_session_cache(v9ses),
|
||||
0,
|
||||
&path, sizeof(path),
|
||||
&version, sizeof(version),
|
||||
i_size_read(&v9inode->vfs_inode));
|
||||
i_size_read(&v9inode->netfs.inode));
|
||||
|
||||
p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n",
|
||||
inode, v9fs_inode_cookie(v9inode));
|
||||
|
@ -625,7 +625,7 @@ static void v9fs_inode_init_once(void *foo)
|
||||
struct v9fs_inode *v9inode = (struct v9fs_inode *)foo;
|
||||
|
||||
memset(&v9inode->qid, 0, sizeof(v9inode->qid));
|
||||
inode_init_once(&v9inode->vfs_inode);
|
||||
inode_init_once(&v9inode->netfs.inode);
|
||||
}
|
||||
|
||||
/**
|
||||
|
10
fs/9p/v9fs.h
10
fs/9p/v9fs.h
@ -109,11 +109,7 @@ struct v9fs_session_info {
|
||||
#define V9FS_INO_INVALID_ATTR 0x01
|
||||
|
||||
struct v9fs_inode {
|
||||
struct {
|
||||
/* These must be contiguous */
|
||||
struct inode vfs_inode; /* the VFS's inode record */
|
||||
struct netfs_i_context netfs_ctx; /* Netfslib context */
|
||||
};
|
||||
struct netfs_inode netfs; /* Netfslib context and vfs inode */
|
||||
struct p9_qid qid;
|
||||
unsigned int cache_validity;
|
||||
struct p9_fid *writeback_fid;
|
||||
@ -122,13 +118,13 @@ struct v9fs_inode {
|
||||
|
||||
static inline struct v9fs_inode *V9FS_I(const struct inode *inode)
|
||||
{
|
||||
return container_of(inode, struct v9fs_inode, vfs_inode);
|
||||
return container_of(inode, struct v9fs_inode, netfs.inode);
|
||||
}
|
||||
|
||||
static inline struct fscache_cookie *v9fs_inode_cookie(struct v9fs_inode *v9inode)
|
||||
{
|
||||
#ifdef CONFIG_9P_FSCACHE
|
||||
return netfs_i_cookie(&v9inode->vfs_inode);
|
||||
return netfs_i_cookie(&v9inode->netfs.inode);
|
||||
#else
|
||||
return NULL;
|
||||
#endif
|
||||
|
@ -140,7 +140,7 @@ static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
|
||||
transferred_or_error != -ENOBUFS) {
|
||||
version = cpu_to_le32(v9inode->qid.version);
|
||||
fscache_invalidate(v9fs_inode_cookie(v9inode), &version,
|
||||
i_size_read(&v9inode->vfs_inode), 0);
|
||||
i_size_read(&v9inode->netfs.inode), 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -234,7 +234,7 @@ struct inode *v9fs_alloc_inode(struct super_block *sb)
|
||||
v9inode->writeback_fid = NULL;
|
||||
v9inode->cache_validity = 0;
|
||||
mutex_init(&v9inode->v_mutex);
|
||||
return &v9inode->vfs_inode;
|
||||
return &v9inode->netfs.inode;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -252,7 +252,7 @@ void v9fs_free_inode(struct inode *inode)
|
||||
*/
|
||||
static void v9fs_set_netfs_context(struct inode *inode)
|
||||
{
|
||||
netfs_i_context_init(inode, &v9fs_req_ops);
|
||||
netfs_inode_init(inode, &v9fs_req_ops);
|
||||
}
|
||||
|
||||
int v9fs_init_inode(struct v9fs_session_info *v9ses,
|
||||
|
@ -30,7 +30,7 @@ void afs_invalidate_mmap_work(struct work_struct *work)
|
||||
{
|
||||
struct afs_vnode *vnode = container_of(work, struct afs_vnode, cb_work);
|
||||
|
||||
unmap_mapping_pages(vnode->vfs_inode.i_mapping, 0, 0, false);
|
||||
unmap_mapping_pages(vnode->netfs.inode.i_mapping, 0, 0, false);
|
||||
}
|
||||
|
||||
void afs_server_init_callback_work(struct work_struct *work)
|
||||
|
32
fs/afs/dir.c
32
fs/afs/dir.c
@ -109,7 +109,7 @@ struct afs_lookup_cookie {
|
||||
*/
|
||||
static void afs_dir_read_cleanup(struct afs_read *req)
|
||||
{
|
||||
struct address_space *mapping = req->vnode->vfs_inode.i_mapping;
|
||||
struct address_space *mapping = req->vnode->netfs.inode.i_mapping;
|
||||
struct folio *folio;
|
||||
pgoff_t last = req->nr_pages - 1;
|
||||
|
||||
@ -153,7 +153,7 @@ static bool afs_dir_check_folio(struct afs_vnode *dvnode, struct folio *folio,
|
||||
block = kmap_local_folio(folio, offset);
|
||||
if (block->hdr.magic != AFS_DIR_MAGIC) {
|
||||
printk("kAFS: %s(%lx): [%llx] bad magic %zx/%zx is %04hx\n",
|
||||
__func__, dvnode->vfs_inode.i_ino,
|
||||
__func__, dvnode->netfs.inode.i_ino,
|
||||
pos, offset, size, ntohs(block->hdr.magic));
|
||||
trace_afs_dir_check_failed(dvnode, pos + offset, i_size);
|
||||
kunmap_local(block);
|
||||
@ -183,7 +183,7 @@ static bool afs_dir_check_folio(struct afs_vnode *dvnode, struct folio *folio,
|
||||
static void afs_dir_dump(struct afs_vnode *dvnode, struct afs_read *req)
|
||||
{
|
||||
union afs_xdr_dir_block *block;
|
||||
struct address_space *mapping = dvnode->vfs_inode.i_mapping;
|
||||
struct address_space *mapping = dvnode->netfs.inode.i_mapping;
|
||||
struct folio *folio;
|
||||
pgoff_t last = req->nr_pages - 1;
|
||||
size_t offset, size;
|
||||
@ -217,7 +217,7 @@ static void afs_dir_dump(struct afs_vnode *dvnode, struct afs_read *req)
|
||||
*/
|
||||
static int afs_dir_check(struct afs_vnode *dvnode, struct afs_read *req)
|
||||
{
|
||||
struct address_space *mapping = dvnode->vfs_inode.i_mapping;
|
||||
struct address_space *mapping = dvnode->netfs.inode.i_mapping;
|
||||
struct folio *folio;
|
||||
pgoff_t last = req->nr_pages - 1;
|
||||
int ret = 0;
|
||||
@ -269,7 +269,7 @@ static int afs_dir_open(struct inode *inode, struct file *file)
|
||||
static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key)
|
||||
__acquires(&dvnode->validate_lock)
|
||||
{
|
||||
struct address_space *mapping = dvnode->vfs_inode.i_mapping;
|
||||
struct address_space *mapping = dvnode->netfs.inode.i_mapping;
|
||||
struct afs_read *req;
|
||||
loff_t i_size;
|
||||
int nr_pages, i;
|
||||
@ -287,7 +287,7 @@ static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key)
|
||||
req->cleanup = afs_dir_read_cleanup;
|
||||
|
||||
expand:
|
||||
i_size = i_size_read(&dvnode->vfs_inode);
|
||||
i_size = i_size_read(&dvnode->netfs.inode);
|
||||
if (i_size < 2048) {
|
||||
ret = afs_bad(dvnode, afs_file_error_dir_small);
|
||||
goto error;
|
||||
@ -305,7 +305,7 @@ static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key)
|
||||
req->actual_len = i_size; /* May change */
|
||||
req->len = nr_pages * PAGE_SIZE; /* We can ask for more than there is */
|
||||
req->data_version = dvnode->status.data_version; /* May change */
|
||||
iov_iter_xarray(&req->def_iter, READ, &dvnode->vfs_inode.i_mapping->i_pages,
|
||||
iov_iter_xarray(&req->def_iter, READ, &dvnode->netfs.inode.i_mapping->i_pages,
|
||||
0, i_size);
|
||||
req->iter = &req->def_iter;
|
||||
|
||||
@ -897,7 +897,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
|
||||
|
||||
out_op:
|
||||
if (op->error == 0) {
|
||||
inode = &op->file[1].vnode->vfs_inode;
|
||||
inode = &op->file[1].vnode->netfs.inode;
|
||||
op->file[1].vnode = NULL;
|
||||
}
|
||||
|
||||
@ -1139,7 +1139,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
|
||||
afs_stat_v(dir, n_reval);
|
||||
|
||||
/* search the directory for this vnode */
|
||||
ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key, &dir_version);
|
||||
ret = afs_do_lookup_one(&dir->netfs.inode, dentry, &fid, key, &dir_version);
|
||||
switch (ret) {
|
||||
case 0:
|
||||
/* the filename maps to something */
|
||||
@ -1170,7 +1170,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
|
||||
_debug("%pd: file deleted (uq %u -> %u I:%u)",
|
||||
dentry, fid.unique,
|
||||
vnode->fid.unique,
|
||||
vnode->vfs_inode.i_generation);
|
||||
vnode->netfs.inode.i_generation);
|
||||
goto not_found;
|
||||
}
|
||||
goto out_valid;
|
||||
@ -1368,7 +1368,7 @@ static void afs_dir_remove_subdir(struct dentry *dentry)
|
||||
if (d_really_is_positive(dentry)) {
|
||||
struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
|
||||
|
||||
clear_nlink(&vnode->vfs_inode);
|
||||
clear_nlink(&vnode->netfs.inode);
|
||||
set_bit(AFS_VNODE_DELETED, &vnode->flags);
|
||||
clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
|
||||
clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
|
||||
@ -1487,8 +1487,8 @@ static void afs_dir_remove_link(struct afs_operation *op)
|
||||
/* Already done */
|
||||
} else if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) {
|
||||
write_seqlock(&vnode->cb_lock);
|
||||
drop_nlink(&vnode->vfs_inode);
|
||||
if (vnode->vfs_inode.i_nlink == 0) {
|
||||
drop_nlink(&vnode->netfs.inode);
|
||||
if (vnode->netfs.inode.i_nlink == 0) {
|
||||
set_bit(AFS_VNODE_DELETED, &vnode->flags);
|
||||
__afs_break_callback(vnode, afs_cb_break_for_unlink);
|
||||
}
|
||||
@ -1504,7 +1504,7 @@ static void afs_dir_remove_link(struct afs_operation *op)
|
||||
op->error = ret;
|
||||
}
|
||||
|
||||
_debug("nlink %d [val %d]", vnode->vfs_inode.i_nlink, op->error);
|
||||
_debug("nlink %d [val %d]", vnode->netfs.inode.i_nlink, op->error);
|
||||
}
|
||||
|
||||
static void afs_unlink_success(struct afs_operation *op)
|
||||
@ -1680,8 +1680,8 @@ static void afs_link_success(struct afs_operation *op)
|
||||
afs_update_dentry_version(op, dvp, op->dentry);
|
||||
if (op->dentry_2->d_parent == op->dentry->d_parent)
|
||||
afs_update_dentry_version(op, dvp, op->dentry_2);
|
||||
ihold(&vp->vnode->vfs_inode);
|
||||
d_instantiate(op->dentry, &vp->vnode->vfs_inode);
|
||||
ihold(&vp->vnode->netfs.inode);
|
||||
d_instantiate(op->dentry, &vp->vnode->netfs.inode);
|
||||
}
|
||||
|
||||
static void afs_link_put(struct afs_operation *op)
|
||||
|
@ -109,7 +109,7 @@ static void afs_clear_contig_bits(union afs_xdr_dir_block *block,
|
||||
*/
|
||||
static struct folio *afs_dir_get_folio(struct afs_vnode *vnode, pgoff_t index)
|
||||
{
|
||||
struct address_space *mapping = vnode->vfs_inode.i_mapping;
|
||||
struct address_space *mapping = vnode->netfs.inode.i_mapping;
|
||||
struct folio *folio;
|
||||
|
||||
folio = __filemap_get_folio(mapping, index,
|
||||
@ -216,7 +216,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
|
||||
|
||||
_enter(",,{%d,%s},", name->len, name->name);
|
||||
|
||||
i_size = i_size_read(&vnode->vfs_inode);
|
||||
i_size = i_size_read(&vnode->netfs.inode);
|
||||
if (i_size > AFS_DIR_BLOCK_SIZE * AFS_DIR_MAX_BLOCKS ||
|
||||
(i_size & (AFS_DIR_BLOCK_SIZE - 1))) {
|
||||
clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
|
||||
@ -336,7 +336,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
|
||||
if (b < AFS_DIR_BLOCKS_WITH_CTR)
|
||||
meta->meta.alloc_ctrs[b] -= need_slots;
|
||||
|
||||
inode_inc_iversion_raw(&vnode->vfs_inode);
|
||||
inode_inc_iversion_raw(&vnode->netfs.inode);
|
||||
afs_stat_v(vnode, n_dir_cr);
|
||||
_debug("Insert %s in %u[%u]", name->name, b, slot);
|
||||
|
||||
@ -383,7 +383,7 @@ void afs_edit_dir_remove(struct afs_vnode *vnode,
|
||||
|
||||
_enter(",,{%d,%s},", name->len, name->name);
|
||||
|
||||
i_size = i_size_read(&vnode->vfs_inode);
|
||||
i_size = i_size_read(&vnode->netfs.inode);
|
||||
if (i_size < AFS_DIR_BLOCK_SIZE ||
|
||||
i_size > AFS_DIR_BLOCK_SIZE * AFS_DIR_MAX_BLOCKS ||
|
||||
(i_size & (AFS_DIR_BLOCK_SIZE - 1))) {
|
||||
@ -463,7 +463,7 @@ void afs_edit_dir_remove(struct afs_vnode *vnode,
|
||||
if (b < AFS_DIR_BLOCKS_WITH_CTR)
|
||||
meta->meta.alloc_ctrs[b] += need_slots;
|
||||
|
||||
inode_set_iversion_raw(&vnode->vfs_inode, vnode->status.data_version);
|
||||
inode_set_iversion_raw(&vnode->netfs.inode, vnode->status.data_version);
|
||||
afs_stat_v(vnode, n_dir_rm);
|
||||
_debug("Remove %s from %u[%u]", name->name, b, slot);
|
||||
|
||||
|
@ -131,7 +131,7 @@ int afs_sillyrename(struct afs_vnode *dvnode, struct afs_vnode *vnode,
|
||||
goto out;
|
||||
} while (!d_is_negative(sdentry));
|
||||
|
||||
ihold(&vnode->vfs_inode);
|
||||
ihold(&vnode->netfs.inode);
|
||||
|
||||
ret = afs_do_silly_rename(dvnode, vnode, dentry, sdentry, key);
|
||||
switch (ret) {
|
||||
@ -148,7 +148,7 @@ int afs_sillyrename(struct afs_vnode *dvnode, struct afs_vnode *vnode,
|
||||
d_drop(sdentry);
|
||||
}
|
||||
|
||||
iput(&vnode->vfs_inode);
|
||||
iput(&vnode->netfs.inode);
|
||||
dput(sdentry);
|
||||
out:
|
||||
_leave(" = %d", ret);
|
||||
|
@ -76,7 +76,7 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
|
||||
/* there shouldn't be an existing inode */
|
||||
BUG_ON(!(inode->i_state & I_NEW));
|
||||
|
||||
netfs_i_context_init(inode, NULL);
|
||||
netfs_inode_init(inode, NULL);
|
||||
inode->i_size = 0;
|
||||
inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
|
||||
if (root) {
|
||||
|
@ -194,7 +194,7 @@ int afs_release(struct inode *inode, struct file *file)
|
||||
afs_put_wb_key(af->wb);
|
||||
|
||||
if ((file->f_mode & FMODE_WRITE)) {
|
||||
i_size = i_size_read(&vnode->vfs_inode);
|
||||
i_size = i_size_read(&vnode->netfs.inode);
|
||||
afs_set_cache_aux(vnode, &aux);
|
||||
fscache_unuse_cookie(afs_vnode_cache(vnode), &aux, &i_size);
|
||||
} else {
|
||||
@ -325,7 +325,7 @@ static void afs_issue_read(struct netfs_io_subrequest *subreq)
|
||||
fsreq->iter = &fsreq->def_iter;
|
||||
|
||||
iov_iter_xarray(&fsreq->def_iter, READ,
|
||||
&fsreq->vnode->vfs_inode.i_mapping->i_pages,
|
||||
&fsreq->vnode->netfs.inode.i_mapping->i_pages,
|
||||
fsreq->pos, fsreq->len);
|
||||
|
||||
afs_fetch_data(fsreq->vnode, fsreq);
|
||||
|
@ -232,14 +232,14 @@ int afs_put_operation(struct afs_operation *op)
|
||||
if (op->file[1].modification && op->file[1].vnode != op->file[0].vnode)
|
||||
clear_bit(AFS_VNODE_MODIFYING, &op->file[1].vnode->flags);
|
||||
if (op->file[0].put_vnode)
|
||||
iput(&op->file[0].vnode->vfs_inode);
|
||||
iput(&op->file[0].vnode->netfs.inode);
|
||||
if (op->file[1].put_vnode)
|
||||
iput(&op->file[1].vnode->vfs_inode);
|
||||
iput(&op->file[1].vnode->netfs.inode);
|
||||
|
||||
if (op->more_files) {
|
||||
for (i = 0; i < op->nr_files - 2; i++)
|
||||
if (op->more_files[i].put_vnode)
|
||||
iput(&op->more_files[i].vnode->vfs_inode);
|
||||
iput(&op->more_files[i].vnode->netfs.inode);
|
||||
kfree(op->more_files);
|
||||
}
|
||||
|
||||
|
@ -58,7 +58,7 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren
|
||||
*/
|
||||
static void afs_set_netfs_context(struct afs_vnode *vnode)
|
||||
{
|
||||
netfs_i_context_init(&vnode->vfs_inode, &afs_req_ops);
|
||||
netfs_inode_init(&vnode->netfs.inode, &afs_req_ops);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -96,7 +96,7 @@ static int afs_inode_init_from_status(struct afs_operation *op,
|
||||
inode->i_flags |= S_NOATIME;
|
||||
inode->i_uid = make_kuid(&init_user_ns, status->owner);
|
||||
inode->i_gid = make_kgid(&init_user_ns, status->group);
|
||||
set_nlink(&vnode->vfs_inode, status->nlink);
|
||||
set_nlink(&vnode->netfs.inode, status->nlink);
|
||||
|
||||
switch (status->type) {
|
||||
case AFS_FTYPE_FILE:
|
||||
@ -139,7 +139,7 @@ static int afs_inode_init_from_status(struct afs_operation *op,
|
||||
afs_set_netfs_context(vnode);
|
||||
|
||||
vnode->invalid_before = status->data_version;
|
||||
inode_set_iversion_raw(&vnode->vfs_inode, status->data_version);
|
||||
inode_set_iversion_raw(&vnode->netfs.inode, status->data_version);
|
||||
|
||||
if (!vp->scb.have_cb) {
|
||||
/* it's a symlink we just created (the fileserver
|
||||
@ -163,7 +163,7 @@ static void afs_apply_status(struct afs_operation *op,
|
||||
{
|
||||
struct afs_file_status *status = &vp->scb.status;
|
||||
struct afs_vnode *vnode = vp->vnode;
|
||||
struct inode *inode = &vnode->vfs_inode;
|
||||
struct inode *inode = &vnode->netfs.inode;
|
||||
struct timespec64 t;
|
||||
umode_t mode;
|
||||
bool data_changed = false;
|
||||
@ -246,7 +246,7 @@ static void afs_apply_status(struct afs_operation *op,
|
||||
* idea of what the size should be that's not the same as
|
||||
* what's on the server.
|
||||
*/
|
||||
vnode->netfs_ctx.remote_i_size = status->size;
|
||||
vnode->netfs.remote_i_size = status->size;
|
||||
if (change_size) {
|
||||
afs_set_i_size(vnode, status->size);
|
||||
inode->i_ctime = t;
|
||||
@ -289,7 +289,7 @@ void afs_vnode_commit_status(struct afs_operation *op, struct afs_vnode_param *v
|
||||
*/
|
||||
if (vp->scb.status.abort_code == VNOVNODE) {
|
||||
set_bit(AFS_VNODE_DELETED, &vnode->flags);
|
||||
clear_nlink(&vnode->vfs_inode);
|
||||
clear_nlink(&vnode->netfs.inode);
|
||||
__afs_break_callback(vnode, afs_cb_break_for_deleted);
|
||||
op->flags &= ~AFS_OPERATION_DIR_CONFLICT;
|
||||
}
|
||||
@ -306,8 +306,8 @@ void afs_vnode_commit_status(struct afs_operation *op, struct afs_vnode_param *v
|
||||
if (vp->scb.have_cb)
|
||||
afs_apply_callback(op, vp);
|
||||
} else if (vp->op_unlinked && !(op->flags & AFS_OPERATION_DIR_CONFLICT)) {
|
||||
drop_nlink(&vnode->vfs_inode);
|
||||
if (vnode->vfs_inode.i_nlink == 0) {
|
||||
drop_nlink(&vnode->netfs.inode);
|
||||
if (vnode->netfs.inode.i_nlink == 0) {
|
||||
set_bit(AFS_VNODE_DELETED, &vnode->flags);
|
||||
__afs_break_callback(vnode, afs_cb_break_for_deleted);
|
||||
}
|
||||
@ -326,7 +326,7 @@ static void afs_fetch_status_success(struct afs_operation *op)
|
||||
struct afs_vnode *vnode = vp->vnode;
|
||||
int ret;
|
||||
|
||||
if (vnode->vfs_inode.i_state & I_NEW) {
|
||||
if (vnode->netfs.inode.i_state & I_NEW) {
|
||||
ret = afs_inode_init_from_status(op, vp, vnode);
|
||||
op->error = ret;
|
||||
if (ret == 0)
|
||||
@ -430,7 +430,7 @@ static void afs_get_inode_cache(struct afs_vnode *vnode)
|
||||
struct afs_vnode_cache_aux aux;
|
||||
|
||||
if (vnode->status.type != AFS_FTYPE_FILE) {
|
||||
vnode->netfs_ctx.cache = NULL;
|
||||
vnode->netfs.cache = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
@ -457,7 +457,7 @@ static void afs_get_inode_cache(struct afs_vnode *vnode)
|
||||
struct inode *afs_iget(struct afs_operation *op, struct afs_vnode_param *vp)
|
||||
{
|
||||
struct afs_vnode_param *dvp = &op->file[0];
|
||||
struct super_block *sb = dvp->vnode->vfs_inode.i_sb;
|
||||
struct super_block *sb = dvp->vnode->netfs.inode.i_sb;
|
||||
struct afs_vnode *vnode;
|
||||
struct inode *inode;
|
||||
int ret;
|
||||
@ -582,10 +582,10 @@ static void afs_zap_data(struct afs_vnode *vnode)
|
||||
/* nuke all the non-dirty pages that aren't locked, mapped or being
|
||||
* written back in a regular file and completely discard the pages in a
|
||||
* directory or symlink */
|
||||
if (S_ISREG(vnode->vfs_inode.i_mode))
|
||||
invalidate_remote_inode(&vnode->vfs_inode);
|
||||
if (S_ISREG(vnode->netfs.inode.i_mode))
|
||||
invalidate_remote_inode(&vnode->netfs.inode);
|
||||
else
|
||||
invalidate_inode_pages2(vnode->vfs_inode.i_mapping);
|
||||
invalidate_inode_pages2(vnode->netfs.inode.i_mapping);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -683,8 +683,8 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
|
||||
key_serial(key));
|
||||
|
||||
if (unlikely(test_bit(AFS_VNODE_DELETED, &vnode->flags))) {
|
||||
if (vnode->vfs_inode.i_nlink)
|
||||
clear_nlink(&vnode->vfs_inode);
|
||||
if (vnode->netfs.inode.i_nlink)
|
||||
clear_nlink(&vnode->netfs.inode);
|
||||
goto valid;
|
||||
}
|
||||
|
||||
@ -826,7 +826,7 @@ void afs_evict_inode(struct inode *inode)
|
||||
static void afs_setattr_success(struct afs_operation *op)
|
||||
{
|
||||
struct afs_vnode_param *vp = &op->file[0];
|
||||
struct inode *inode = &vp->vnode->vfs_inode;
|
||||
struct inode *inode = &vp->vnode->netfs.inode;
|
||||
loff_t old_i_size = i_size_read(inode);
|
||||
|
||||
op->setattr.old_i_size = old_i_size;
|
||||
@ -843,7 +843,7 @@ static void afs_setattr_success(struct afs_operation *op)
|
||||
static void afs_setattr_edit_file(struct afs_operation *op)
|
||||
{
|
||||
struct afs_vnode_param *vp = &op->file[0];
|
||||
struct inode *inode = &vp->vnode->vfs_inode;
|
||||
struct inode *inode = &vp->vnode->netfs.inode;
|
||||
|
||||
if (op->setattr.attr->ia_valid & ATTR_SIZE) {
|
||||
loff_t size = op->setattr.attr->ia_size;
|
||||
@ -875,7 +875,7 @@ int afs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
|
||||
ATTR_MTIME | ATTR_MTIME_SET | ATTR_TIMES_SET | ATTR_TOUCH;
|
||||
struct afs_operation *op;
|
||||
struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
|
||||
struct inode *inode = &vnode->vfs_inode;
|
||||
struct inode *inode = &vnode->netfs.inode;
|
||||
loff_t i_size;
|
||||
int ret;
|
||||
|
||||
|
@ -619,12 +619,7 @@ enum afs_lock_state {
|
||||
* leak from one inode to another.
|
||||
*/
|
||||
struct afs_vnode {
|
||||
struct {
|
||||
/* These must be contiguous */
|
||||
struct inode vfs_inode; /* the VFS's inode record */
|
||||
struct netfs_i_context netfs_ctx; /* Netfslib context */
|
||||
};
|
||||
|
||||
struct netfs_inode netfs; /* Netfslib context and vfs inode */
|
||||
struct afs_volume *volume; /* volume on which vnode resides */
|
||||
struct afs_fid fid; /* the file identifier for this inode */
|
||||
struct afs_file_status status; /* AFS status info for this file */
|
||||
@ -675,7 +670,7 @@ struct afs_vnode {
|
||||
static inline struct fscache_cookie *afs_vnode_cache(struct afs_vnode *vnode)
|
||||
{
|
||||
#ifdef CONFIG_AFS_FSCACHE
|
||||
return netfs_i_cookie(&vnode->vfs_inode);
|
||||
return netfs_i_cookie(&vnode->netfs.inode);
|
||||
#else
|
||||
return NULL;
|
||||
#endif
|
||||
@ -685,7 +680,7 @@ static inline void afs_vnode_set_cache(struct afs_vnode *vnode,
|
||||
struct fscache_cookie *cookie)
|
||||
{
|
||||
#ifdef CONFIG_AFS_FSCACHE
|
||||
vnode->netfs_ctx.cache = cookie;
|
||||
vnode->netfs.cache = cookie;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -892,7 +887,7 @@ static inline void afs_invalidate_cache(struct afs_vnode *vnode, unsigned int fl
|
||||
|
||||
afs_set_cache_aux(vnode, &aux);
|
||||
fscache_invalidate(afs_vnode_cache(vnode), &aux,
|
||||
i_size_read(&vnode->vfs_inode), flags);
|
||||
i_size_read(&vnode->netfs.inode), flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1217,7 +1212,7 @@ static inline struct afs_net *afs_i2net(struct inode *inode)
|
||||
|
||||
static inline struct afs_net *afs_v2net(struct afs_vnode *vnode)
|
||||
{
|
||||
return afs_i2net(&vnode->vfs_inode);
|
||||
return afs_i2net(&vnode->netfs.inode);
|
||||
}
|
||||
|
||||
static inline struct afs_net *afs_sock2net(struct sock *sk)
|
||||
@ -1593,12 +1588,12 @@ extern void yfs_fs_store_opaque_acl2(struct afs_operation *);
|
||||
*/
|
||||
static inline struct afs_vnode *AFS_FS_I(struct inode *inode)
|
||||
{
|
||||
return container_of(inode, struct afs_vnode, vfs_inode);
|
||||
return container_of(inode, struct afs_vnode, netfs.inode);
|
||||
}
|
||||
|
||||
static inline struct inode *AFS_VNODE_TO_I(struct afs_vnode *vnode)
|
||||
{
|
||||
return &vnode->vfs_inode;
|
||||
return &vnode->netfs.inode;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1621,8 +1616,8 @@ static inline void afs_update_dentry_version(struct afs_operation *op,
|
||||
*/
|
||||
static inline void afs_set_i_size(struct afs_vnode *vnode, u64 size)
|
||||
{
|
||||
i_size_write(&vnode->vfs_inode, size);
|
||||
vnode->vfs_inode.i_blocks = ((size + 1023) >> 10) << 1;
|
||||
i_size_write(&vnode->netfs.inode, size);
|
||||
vnode->netfs.inode.i_blocks = ((size + 1023) >> 10) << 1;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -659,7 +659,7 @@ static void afs_i_init_once(void *_vnode)
|
||||
struct afs_vnode *vnode = _vnode;
|
||||
|
||||
memset(vnode, 0, sizeof(*vnode));
|
||||
inode_init_once(&vnode->vfs_inode);
|
||||
inode_init_once(&vnode->netfs.inode);
|
||||
mutex_init(&vnode->io_lock);
|
||||
init_rwsem(&vnode->validate_lock);
|
||||
spin_lock_init(&vnode->wb_lock);
|
||||
@ -700,8 +700,8 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
|
||||
init_rwsem(&vnode->rmdir_lock);
|
||||
INIT_WORK(&vnode->cb_work, afs_invalidate_mmap_work);
|
||||
|
||||
_leave(" = %p", &vnode->vfs_inode);
|
||||
return &vnode->vfs_inode;
|
||||
_leave(" = %p", &vnode->netfs.inode);
|
||||
return &vnode->netfs.inode;
|
||||
}
|
||||
|
||||
static void afs_free_inode(struct inode *inode)
|
||||
|
@ -146,10 +146,10 @@ int afs_write_end(struct file *file, struct address_space *mapping,
|
||||
|
||||
write_end_pos = pos + copied;
|
||||
|
||||
i_size = i_size_read(&vnode->vfs_inode);
|
||||
i_size = i_size_read(&vnode->netfs.inode);
|
||||
if (write_end_pos > i_size) {
|
||||
write_seqlock(&vnode->cb_lock);
|
||||
i_size = i_size_read(&vnode->vfs_inode);
|
||||
i_size = i_size_read(&vnode->netfs.inode);
|
||||
if (write_end_pos > i_size)
|
||||
afs_set_i_size(vnode, write_end_pos);
|
||||
write_sequnlock(&vnode->cb_lock);
|
||||
@ -257,7 +257,7 @@ static void afs_redirty_pages(struct writeback_control *wbc,
|
||||
*/
|
||||
static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len)
|
||||
{
|
||||
struct address_space *mapping = vnode->vfs_inode.i_mapping;
|
||||
struct address_space *mapping = vnode->netfs.inode.i_mapping;
|
||||
struct folio *folio;
|
||||
pgoff_t end;
|
||||
|
||||
@ -354,7 +354,6 @@ static const struct afs_operation_ops afs_store_data_operation = {
|
||||
static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos,
|
||||
bool laundering)
|
||||
{
|
||||
struct netfs_i_context *ictx = &vnode->netfs_ctx;
|
||||
struct afs_operation *op;
|
||||
struct afs_wb_key *wbk = NULL;
|
||||
loff_t size = iov_iter_count(iter);
|
||||
@ -385,9 +384,9 @@ static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t
|
||||
op->store.write_iter = iter;
|
||||
op->store.pos = pos;
|
||||
op->store.size = size;
|
||||
op->store.i_size = max(pos + size, ictx->remote_i_size);
|
||||
op->store.i_size = max(pos + size, vnode->netfs.remote_i_size);
|
||||
op->store.laundering = laundering;
|
||||
op->mtime = vnode->vfs_inode.i_mtime;
|
||||
op->mtime = vnode->netfs.inode.i_mtime;
|
||||
op->flags |= AFS_OPERATION_UNINTR;
|
||||
op->ops = &afs_store_data_operation;
|
||||
|
||||
@ -554,7 +553,7 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
|
||||
struct iov_iter iter;
|
||||
unsigned long priv;
|
||||
unsigned int offset, to, len, max_len;
|
||||
loff_t i_size = i_size_read(&vnode->vfs_inode);
|
||||
loff_t i_size = i_size_read(&vnode->netfs.inode);
|
||||
bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
|
||||
bool caching = fscache_cookie_enabled(afs_vnode_cache(vnode));
|
||||
long count = wbc->nr_to_write;
|
||||
@ -845,7 +844,7 @@ ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
|
||||
_enter("{%llx:%llu},{%zu},",
|
||||
vnode->fid.vid, vnode->fid.vnode, count);
|
||||
|
||||
if (IS_SWAPFILE(&vnode->vfs_inode)) {
|
||||
if (IS_SWAPFILE(&vnode->netfs.inode)) {
|
||||
printk(KERN_INFO
|
||||
"AFS: Attempt to write to active swap file!\n");
|
||||
return -EBUSY;
|
||||
@ -958,8 +957,8 @@ void afs_prune_wb_keys(struct afs_vnode *vnode)
|
||||
/* Discard unused keys */
|
||||
spin_lock(&vnode->wb_lock);
|
||||
|
||||
if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
|
||||
!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
|
||||
if (!mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
|
||||
!mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_DIRTY)) {
|
||||
list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
|
||||
if (refcount_read(&wbk->usage) == 1)
|
||||
list_move(&wbk->vnode_link, &graveyard);
|
||||
@ -1034,6 +1033,6 @@ static void afs_write_to_cache(struct afs_vnode *vnode,
|
||||
bool caching)
|
||||
{
|
||||
fscache_write_to_cache(afs_vnode_cache(vnode),
|
||||
vnode->vfs_inode.i_mapping, start, len, i_size,
|
||||
vnode->netfs.inode.i_mapping, start, len, i_size,
|
||||
afs_write_to_cache_done, vnode, caching);
|
||||
}
|
||||
|
@ -1798,7 +1798,7 @@ enum {
|
||||
static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
|
||||
s64 pool, struct ceph_string *pool_ns)
|
||||
{
|
||||
struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
|
||||
struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->netfs.inode);
|
||||
struct ceph_mds_client *mdsc = fsc->mdsc;
|
||||
struct ceph_osd_request *rd_req = NULL, *wr_req = NULL;
|
||||
struct rb_node **p, *parent;
|
||||
@ -1913,7 +1913,7 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
|
||||
0, false, true);
|
||||
err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false);
|
||||
|
||||
wr_req->r_mtime = ci->vfs_inode.i_mtime;
|
||||
wr_req->r_mtime = ci->netfs.inode.i_mtime;
|
||||
err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false);
|
||||
|
||||
if (!err)
|
||||
|
@ -29,9 +29,9 @@ void ceph_fscache_register_inode_cookie(struct inode *inode)
|
||||
if (!(inode->i_state & I_NEW))
|
||||
return;
|
||||
|
||||
WARN_ON_ONCE(ci->netfs_ctx.cache);
|
||||
WARN_ON_ONCE(ci->netfs.cache);
|
||||
|
||||
ci->netfs_ctx.cache =
|
||||
ci->netfs.cache =
|
||||
fscache_acquire_cookie(fsc->fscache, 0,
|
||||
&ci->i_vino, sizeof(ci->i_vino),
|
||||
&ci->i_version, sizeof(ci->i_version),
|
||||
|
@ -28,7 +28,7 @@ void ceph_fscache_invalidate(struct inode *inode, bool dio_write);
|
||||
|
||||
static inline struct fscache_cookie *ceph_fscache_cookie(struct ceph_inode_info *ci)
|
||||
{
|
||||
return netfs_i_cookie(&ci->vfs_inode);
|
||||
return netfs_i_cookie(&ci->netfs.inode);
|
||||
}
|
||||
|
||||
static inline void ceph_fscache_resize(struct inode *inode, loff_t to)
|
||||
|
104
fs/ceph/caps.c
104
fs/ceph/caps.c
@ -492,7 +492,7 @@ static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
|
||||
struct ceph_mount_options *opt = mdsc->fsc->mount_options;
|
||||
ci->i_hold_caps_max = round_jiffies(jiffies +
|
||||
opt->caps_wanted_delay_max * HZ);
|
||||
dout("__cap_set_timeouts %p %lu\n", &ci->vfs_inode,
|
||||
dout("__cap_set_timeouts %p %lu\n", &ci->netfs.inode,
|
||||
ci->i_hold_caps_max - jiffies);
|
||||
}
|
||||
|
||||
@ -507,7 +507,7 @@ static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
|
||||
static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
|
||||
struct ceph_inode_info *ci)
|
||||
{
|
||||
dout("__cap_delay_requeue %p flags 0x%lx at %lu\n", &ci->vfs_inode,
|
||||
dout("__cap_delay_requeue %p flags 0x%lx at %lu\n", &ci->netfs.inode,
|
||||
ci->i_ceph_flags, ci->i_hold_caps_max);
|
||||
if (!mdsc->stopping) {
|
||||
spin_lock(&mdsc->cap_delay_lock);
|
||||
@ -531,7 +531,7 @@ static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
|
||||
static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
|
||||
struct ceph_inode_info *ci)
|
||||
{
|
||||
dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode);
|
||||
dout("__cap_delay_requeue_front %p\n", &ci->netfs.inode);
|
||||
spin_lock(&mdsc->cap_delay_lock);
|
||||
ci->i_ceph_flags |= CEPH_I_FLUSH;
|
||||
if (!list_empty(&ci->i_cap_delay_list))
|
||||
@ -548,7 +548,7 @@ static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
|
||||
static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
|
||||
struct ceph_inode_info *ci)
|
||||
{
|
||||
dout("__cap_delay_cancel %p\n", &ci->vfs_inode);
|
||||
dout("__cap_delay_cancel %p\n", &ci->netfs.inode);
|
||||
if (list_empty(&ci->i_cap_delay_list))
|
||||
return;
|
||||
spin_lock(&mdsc->cap_delay_lock);
|
||||
@ -568,7 +568,7 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
|
||||
* Each time we receive FILE_CACHE anew, we increment
|
||||
* i_rdcache_gen.
|
||||
*/
|
||||
if (S_ISREG(ci->vfs_inode.i_mode) &&
|
||||
if (S_ISREG(ci->netfs.inode.i_mode) &&
|
||||
(issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
|
||||
(had & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0) {
|
||||
ci->i_rdcache_gen++;
|
||||
@ -583,14 +583,14 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
|
||||
if ((issued & CEPH_CAP_FILE_SHARED) != (had & CEPH_CAP_FILE_SHARED)) {
|
||||
if (issued & CEPH_CAP_FILE_SHARED)
|
||||
atomic_inc(&ci->i_shared_gen);
|
||||
if (S_ISDIR(ci->vfs_inode.i_mode)) {
|
||||
dout(" marking %p NOT complete\n", &ci->vfs_inode);
|
||||
if (S_ISDIR(ci->netfs.inode.i_mode)) {
|
||||
dout(" marking %p NOT complete\n", &ci->netfs.inode);
|
||||
__ceph_dir_clear_complete(ci);
|
||||
}
|
||||
}
|
||||
|
||||
/* Wipe saved layout if we're losing DIR_CREATE caps */
|
||||
if (S_ISDIR(ci->vfs_inode.i_mode) && (had & CEPH_CAP_DIR_CREATE) &&
|
||||
if (S_ISDIR(ci->netfs.inode.i_mode) && (had & CEPH_CAP_DIR_CREATE) &&
|
||||
!(issued & CEPH_CAP_DIR_CREATE)) {
|
||||
ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
|
||||
memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
|
||||
@ -771,7 +771,7 @@ static int __cap_is_valid(struct ceph_cap *cap)
|
||||
|
||||
if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
|
||||
dout("__cap_is_valid %p cap %p issued %s "
|
||||
"but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode,
|
||||
"but STALE (gen %u vs %u)\n", &cap->ci->netfs.inode,
|
||||
cap, ceph_cap_string(cap->issued), cap->cap_gen, gen);
|
||||
return 0;
|
||||
}
|
||||
@ -797,7 +797,7 @@ int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
|
||||
if (!__cap_is_valid(cap))
|
||||
continue;
|
||||
dout("__ceph_caps_issued %p cap %p issued %s\n",
|
||||
&ci->vfs_inode, cap, ceph_cap_string(cap->issued));
|
||||
&ci->netfs.inode, cap, ceph_cap_string(cap->issued));
|
||||
have |= cap->issued;
|
||||
if (implemented)
|
||||
*implemented |= cap->implemented;
|
||||
@ -844,12 +844,12 @@ static void __touch_cap(struct ceph_cap *cap)
|
||||
|
||||
spin_lock(&s->s_cap_lock);
|
||||
if (!s->s_cap_iterator) {
|
||||
dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap,
|
||||
dout("__touch_cap %p cap %p mds%d\n", &cap->ci->netfs.inode, cap,
|
||||
s->s_mds);
|
||||
list_move_tail(&cap->session_caps, &s->s_caps);
|
||||
} else {
|
||||
dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n",
|
||||
&cap->ci->vfs_inode, cap, s->s_mds);
|
||||
&cap->ci->netfs.inode, cap, s->s_mds);
|
||||
}
|
||||
spin_unlock(&s->s_cap_lock);
|
||||
}
|
||||
@ -867,7 +867,7 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
|
||||
|
||||
if ((have & mask) == mask) {
|
||||
dout("__ceph_caps_issued_mask ino 0x%llx snap issued %s"
|
||||
" (mask %s)\n", ceph_ino(&ci->vfs_inode),
|
||||
" (mask %s)\n", ceph_ino(&ci->netfs.inode),
|
||||
ceph_cap_string(have),
|
||||
ceph_cap_string(mask));
|
||||
return 1;
|
||||
@ -879,7 +879,7 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
|
||||
continue;
|
||||
if ((cap->issued & mask) == mask) {
|
||||
dout("__ceph_caps_issued_mask ino 0x%llx cap %p issued %s"
|
||||
" (mask %s)\n", ceph_ino(&ci->vfs_inode), cap,
|
||||
" (mask %s)\n", ceph_ino(&ci->netfs.inode), cap,
|
||||
ceph_cap_string(cap->issued),
|
||||
ceph_cap_string(mask));
|
||||
if (touch)
|
||||
@ -891,7 +891,7 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
|
||||
have |= cap->issued;
|
||||
if ((have & mask) == mask) {
|
||||
dout("__ceph_caps_issued_mask ino 0x%llx combo issued %s"
|
||||
" (mask %s)\n", ceph_ino(&ci->vfs_inode),
|
||||
" (mask %s)\n", ceph_ino(&ci->netfs.inode),
|
||||
ceph_cap_string(cap->issued),
|
||||
ceph_cap_string(mask));
|
||||
if (touch) {
|
||||
@ -919,7 +919,7 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
|
||||
int __ceph_caps_issued_mask_metric(struct ceph_inode_info *ci, int mask,
|
||||
int touch)
|
||||
{
|
||||
struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
|
||||
struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
|
||||
int r;
|
||||
|
||||
r = __ceph_caps_issued_mask(ci, mask, touch);
|
||||
@ -950,7 +950,7 @@ int __ceph_caps_revoking_other(struct ceph_inode_info *ci,
|
||||
|
||||
int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
|
||||
{
|
||||
struct inode *inode = &ci->vfs_inode;
|
||||
struct inode *inode = &ci->netfs.inode;
|
||||
int ret;
|
||||
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
@ -969,8 +969,8 @@ int __ceph_caps_used(struct ceph_inode_info *ci)
|
||||
if (ci->i_rd_ref)
|
||||
used |= CEPH_CAP_FILE_RD;
|
||||
if (ci->i_rdcache_ref ||
|
||||
(S_ISREG(ci->vfs_inode.i_mode) &&
|
||||
ci->vfs_inode.i_data.nrpages))
|
||||
(S_ISREG(ci->netfs.inode.i_mode) &&
|
||||
ci->netfs.inode.i_data.nrpages))
|
||||
used |= CEPH_CAP_FILE_CACHE;
|
||||
if (ci->i_wr_ref)
|
||||
used |= CEPH_CAP_FILE_WR;
|
||||
@ -993,11 +993,11 @@ int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
|
||||
const int WR_SHIFT = ffs(CEPH_FILE_MODE_WR);
|
||||
const int LAZY_SHIFT = ffs(CEPH_FILE_MODE_LAZY);
|
||||
struct ceph_mount_options *opt =
|
||||
ceph_inode_to_client(&ci->vfs_inode)->mount_options;
|
||||
ceph_inode_to_client(&ci->netfs.inode)->mount_options;
|
||||
unsigned long used_cutoff = jiffies - opt->caps_wanted_delay_max * HZ;
|
||||
unsigned long idle_cutoff = jiffies - opt->caps_wanted_delay_min * HZ;
|
||||
|
||||
if (S_ISDIR(ci->vfs_inode.i_mode)) {
|
||||
if (S_ISDIR(ci->netfs.inode.i_mode)) {
|
||||
int want = 0;
|
||||
|
||||
/* use used_cutoff here, to keep dir's wanted caps longer */
|
||||
@ -1050,7 +1050,7 @@ int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
|
||||
int __ceph_caps_wanted(struct ceph_inode_info *ci)
|
||||
{
|
||||
int w = __ceph_caps_file_wanted(ci) | __ceph_caps_used(ci);
|
||||
if (S_ISDIR(ci->vfs_inode.i_mode)) {
|
||||
if (S_ISDIR(ci->netfs.inode.i_mode)) {
|
||||
/* we want EXCL if holding caps of dir ops */
|
||||
if (w & CEPH_CAP_ANY_DIR_OPS)
|
||||
w |= CEPH_CAP_FILE_EXCL;
|
||||
@ -1116,9 +1116,9 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
|
||||
|
||||
lockdep_assert_held(&ci->i_ceph_lock);
|
||||
|
||||
dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
|
||||
dout("__ceph_remove_cap %p from %p\n", cap, &ci->netfs.inode);
|
||||
|
||||
mdsc = ceph_inode_to_client(&ci->vfs_inode)->mdsc;
|
||||
mdsc = ceph_inode_to_client(&ci->netfs.inode)->mdsc;
|
||||
|
||||
/* remove from inode's cap rbtree, and clear auth cap */
|
||||
rb_erase(&cap->ci_node, &ci->i_caps);
|
||||
@ -1169,7 +1169,7 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
|
||||
* keep i_snap_realm.
|
||||
*/
|
||||
if (ci->i_wr_ref == 0 && ci->i_snap_realm)
|
||||
ceph_change_snap_realm(&ci->vfs_inode, NULL);
|
||||
ceph_change_snap_realm(&ci->netfs.inode, NULL);
|
||||
|
||||
__cap_delay_cancel(mdsc, ci);
|
||||
}
|
||||
@ -1188,11 +1188,11 @@ void ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
|
||||
|
||||
lockdep_assert_held(&ci->i_ceph_lock);
|
||||
|
||||
fsc = ceph_inode_to_client(&ci->vfs_inode);
|
||||
fsc = ceph_inode_to_client(&ci->netfs.inode);
|
||||
WARN_ON_ONCE(ci->i_auth_cap == cap &&
|
||||
!list_empty(&ci->i_dirty_item) &&
|
||||
!fsc->blocklisted &&
|
||||
!ceph_inode_is_shutdown(&ci->vfs_inode));
|
||||
!ceph_inode_is_shutdown(&ci->netfs.inode));
|
||||
|
||||
__ceph_remove_cap(cap, queue_release);
|
||||
}
|
||||
@ -1343,7 +1343,7 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap,
|
||||
int flushing, u64 flush_tid, u64 oldest_flush_tid)
|
||||
{
|
||||
struct ceph_inode_info *ci = cap->ci;
|
||||
struct inode *inode = &ci->vfs_inode;
|
||||
struct inode *inode = &ci->netfs.inode;
|
||||
int held, revoking;
|
||||
|
||||
lockdep_assert_held(&ci->i_ceph_lock);
|
||||
@ -1440,7 +1440,7 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap,
|
||||
static void __send_cap(struct cap_msg_args *arg, struct ceph_inode_info *ci)
|
||||
{
|
||||
struct ceph_msg *msg;
|
||||
struct inode *inode = &ci->vfs_inode;
|
||||
struct inode *inode = &ci->netfs.inode;
|
||||
|
||||
msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, CAP_MSG_SIZE, GFP_NOFS, false);
|
||||
if (!msg) {
|
||||
@ -1528,7 +1528,7 @@ static void __ceph_flush_snaps(struct ceph_inode_info *ci,
|
||||
__releases(ci->i_ceph_lock)
|
||||
__acquires(ci->i_ceph_lock)
|
||||
{
|
||||
struct inode *inode = &ci->vfs_inode;
|
||||
struct inode *inode = &ci->netfs.inode;
|
||||
struct ceph_mds_client *mdsc = session->s_mdsc;
|
||||
struct ceph_cap_snap *capsnap;
|
||||
u64 oldest_flush_tid = 0;
|
||||
@ -1622,7 +1622,7 @@ static void __ceph_flush_snaps(struct ceph_inode_info *ci,
|
||||
void ceph_flush_snaps(struct ceph_inode_info *ci,
|
||||
struct ceph_mds_session **psession)
|
||||
{
|
||||
struct inode *inode = &ci->vfs_inode;
|
||||
struct inode *inode = &ci->netfs.inode;
|
||||
struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
|
||||
struct ceph_mds_session *session = NULL;
|
||||
int mds;
|
||||
@ -1682,8 +1682,8 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
|
||||
struct ceph_cap_flush **pcf)
|
||||
{
|
||||
struct ceph_mds_client *mdsc =
|
||||
ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
|
||||
struct inode *inode = &ci->vfs_inode;
|
||||
ceph_sb_to_client(ci->netfs.inode.i_sb)->mdsc;
|
||||
struct inode *inode = &ci->netfs.inode;
|
||||
int was = ci->i_dirty_caps;
|
||||
int dirty = 0;
|
||||
|
||||
@ -1696,7 +1696,7 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
|
||||
return 0;
|
||||
}
|
||||
|
||||
dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode,
|
||||
dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->netfs.inode,
|
||||
ceph_cap_string(mask), ceph_cap_string(was),
|
||||
ceph_cap_string(was | mask));
|
||||
ci->i_dirty_caps |= mask;
|
||||
@ -1712,7 +1712,7 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
|
||||
ci->i_snap_realm->cached_context);
|
||||
}
|
||||
dout(" inode %p now dirty snapc %p auth cap %p\n",
|
||||
&ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap);
|
||||
&ci->netfs.inode, ci->i_head_snapc, ci->i_auth_cap);
|
||||
BUG_ON(!list_empty(&ci->i_dirty_item));
|
||||
spin_lock(&mdsc->cap_dirty_lock);
|
||||
list_add(&ci->i_dirty_item, &session->s_cap_dirty);
|
||||
@ -1875,7 +1875,7 @@ static int try_nonblocking_invalidate(struct inode *inode)
|
||||
|
||||
bool __ceph_should_report_size(struct ceph_inode_info *ci)
|
||||
{
|
||||
loff_t size = i_size_read(&ci->vfs_inode);
|
||||
loff_t size = i_size_read(&ci->netfs.inode);
|
||||
/* mds will adjust max size according to the reported size */
|
||||
if (ci->i_flushing_caps & CEPH_CAP_FILE_WR)
|
||||
return false;
|
||||
@ -1900,7 +1900,7 @@ bool __ceph_should_report_size(struct ceph_inode_info *ci)
|
||||
void ceph_check_caps(struct ceph_inode_info *ci, int flags,
|
||||
struct ceph_mds_session *session)
|
||||
{
|
||||
struct inode *inode = &ci->vfs_inode;
|
||||
struct inode *inode = &ci->netfs.inode;
|
||||
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
|
||||
struct ceph_cap *cap;
|
||||
u64 flush_tid, oldest_flush_tid;
|
||||
@ -2467,7 +2467,7 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
|
||||
__releases(ci->i_ceph_lock)
|
||||
__acquires(ci->i_ceph_lock)
|
||||
{
|
||||
struct inode *inode = &ci->vfs_inode;
|
||||
struct inode *inode = &ci->netfs.inode;
|
||||
struct ceph_cap *cap;
|
||||
struct ceph_cap_flush *cf;
|
||||
int ret;
|
||||
@ -2560,7 +2560,7 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
|
||||
cap = ci->i_auth_cap;
|
||||
if (!(cap && cap->session == session)) {
|
||||
pr_err("%p auth cap %p not mds%d ???\n",
|
||||
&ci->vfs_inode, cap, session->s_mds);
|
||||
&ci->netfs.inode, cap, session->s_mds);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
continue;
|
||||
}
|
||||
@ -2610,7 +2610,7 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
|
||||
cap = ci->i_auth_cap;
|
||||
if (!(cap && cap->session == session)) {
|
||||
pr_err("%p auth cap %p not mds%d ???\n",
|
||||
&ci->vfs_inode, cap, session->s_mds);
|
||||
&ci->netfs.inode, cap, session->s_mds);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
continue;
|
||||
}
|
||||
@ -2630,7 +2630,7 @@ void ceph_kick_flushing_inode_caps(struct ceph_mds_session *session,
|
||||
|
||||
lockdep_assert_held(&ci->i_ceph_lock);
|
||||
|
||||
dout("%s %p flushing %s\n", __func__, &ci->vfs_inode,
|
||||
dout("%s %p flushing %s\n", __func__, &ci->netfs.inode,
|
||||
ceph_cap_string(ci->i_flushing_caps));
|
||||
|
||||
if (!list_empty(&ci->i_cap_flush_list)) {
|
||||
@ -2673,10 +2673,10 @@ void ceph_take_cap_refs(struct ceph_inode_info *ci, int got,
|
||||
}
|
||||
if (got & CEPH_CAP_FILE_BUFFER) {
|
||||
if (ci->i_wb_ref == 0)
|
||||
ihold(&ci->vfs_inode);
|
||||
ihold(&ci->netfs.inode);
|
||||
ci->i_wb_ref++;
|
||||
dout("%s %p wb %d -> %d (?)\n", __func__,
|
||||
&ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref);
|
||||
&ci->netfs.inode, ci->i_wb_ref-1, ci->i_wb_ref);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3004,7 +3004,7 @@ int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff, int *got
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (S_ISREG(ci->vfs_inode.i_mode) &&
|
||||
if (S_ISREG(ci->netfs.inode.i_mode) &&
|
||||
ci->i_inline_version != CEPH_INLINE_NONE &&
|
||||
(_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
|
||||
i_size_read(inode) > 0) {
|
||||
@ -3094,7 +3094,7 @@ enum put_cap_refs_mode {
|
||||
static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
|
||||
enum put_cap_refs_mode mode)
|
||||
{
|
||||
struct inode *inode = &ci->vfs_inode;
|
||||
struct inode *inode = &ci->netfs.inode;
|
||||
int last = 0, put = 0, flushsnaps = 0, wake = 0;
|
||||
bool check_flushsnaps = false;
|
||||
|
||||
@ -3202,7 +3202,7 @@ void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci, int had)
|
||||
void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
|
||||
struct ceph_snap_context *snapc)
|
||||
{
|
||||
struct inode *inode = &ci->vfs_inode;
|
||||
struct inode *inode = &ci->netfs.inode;
|
||||
struct ceph_cap_snap *capsnap = NULL, *iter;
|
||||
int put = 0;
|
||||
bool last = false;
|
||||
@ -3698,7 +3698,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
|
||||
session->s_mds,
|
||||
&list_first_entry(&session->s_cap_flushing,
|
||||
struct ceph_inode_info,
|
||||
i_flushing_item)->vfs_inode);
|
||||
i_flushing_item)->netfs.inode);
|
||||
}
|
||||
}
|
||||
mdsc->num_cap_flushing--;
|
||||
@ -4345,7 +4345,7 @@ unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
|
||||
break;
|
||||
list_del_init(&ci->i_cap_delay_list);
|
||||
|
||||
inode = igrab(&ci->vfs_inode);
|
||||
inode = igrab(&ci->netfs.inode);
|
||||
if (inode) {
|
||||
spin_unlock(&mdsc->cap_delay_lock);
|
||||
dout("check_delayed_caps on %p\n", inode);
|
||||
@ -4373,7 +4373,7 @@ static void flush_dirty_session_caps(struct ceph_mds_session *s)
|
||||
while (!list_empty(&s->s_cap_dirty)) {
|
||||
ci = list_first_entry(&s->s_cap_dirty, struct ceph_inode_info,
|
||||
i_dirty_item);
|
||||
inode = &ci->vfs_inode;
|
||||
inode = &ci->netfs.inode;
|
||||
ihold(inode);
|
||||
dout("flush_dirty_caps %llx.%llx\n", ceph_vinop(inode));
|
||||
spin_unlock(&mdsc->cap_dirty_lock);
|
||||
@ -4407,7 +4407,7 @@ void __ceph_touch_fmode(struct ceph_inode_info *ci,
|
||||
|
||||
void ceph_get_fmode(struct ceph_inode_info *ci, int fmode, int count)
|
||||
{
|
||||
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->vfs_inode.i_sb);
|
||||
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->netfs.inode.i_sb);
|
||||
int bits = (fmode << 1) | 1;
|
||||
bool already_opened = false;
|
||||
int i;
|
||||
@ -4441,7 +4441,7 @@ void ceph_get_fmode(struct ceph_inode_info *ci, int fmode, int count)
|
||||
*/
|
||||
void ceph_put_fmode(struct ceph_inode_info *ci, int fmode, int count)
|
||||
{
|
||||
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->vfs_inode.i_sb);
|
||||
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->netfs.inode.i_sb);
|
||||
int bits = (fmode << 1) | 1;
|
||||
bool is_closed = true;
|
||||
int i;
|
||||
@ -4656,7 +4656,7 @@ int ceph_purge_inode_cap(struct inode *inode, struct ceph_cap *cap, bool *invali
|
||||
lockdep_assert_held(&ci->i_ceph_lock);
|
||||
|
||||
dout("removing cap %p, ci is %p, inode is %p\n",
|
||||
cap, ci, &ci->vfs_inode);
|
||||
cap, ci, &ci->netfs.inode);
|
||||
|
||||
is_auth = (cap == ci->i_auth_cap);
|
||||
__ceph_remove_cap(cap, false);
|
||||
|
@ -205,7 +205,7 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
|
||||
{
|
||||
struct ceph_inode_info *ci = ceph_inode(inode);
|
||||
struct ceph_mount_options *opt =
|
||||
ceph_inode_to_client(&ci->vfs_inode)->mount_options;
|
||||
ceph_inode_to_client(&ci->netfs.inode)->mount_options;
|
||||
struct ceph_file_info *fi;
|
||||
int ret;
|
||||
|
||||
|
@ -176,7 +176,7 @@ static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
|
||||
rb_insert_color(&frag->node, &ci->i_fragtree);
|
||||
|
||||
dout("get_or_create_frag added %llx.%llx frag %x\n",
|
||||
ceph_vinop(&ci->vfs_inode), f);
|
||||
ceph_vinop(&ci->netfs.inode), f);
|
||||
return frag;
|
||||
}
|
||||
|
||||
@ -457,10 +457,10 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
|
||||
if (!ci)
|
||||
return NULL;
|
||||
|
||||
dout("alloc_inode %p\n", &ci->vfs_inode);
|
||||
dout("alloc_inode %p\n", &ci->netfs.inode);
|
||||
|
||||
/* Set parameters for the netfs library */
|
||||
netfs_i_context_init(&ci->vfs_inode, &ceph_netfs_ops);
|
||||
netfs_inode_init(&ci->netfs.inode, &ceph_netfs_ops);
|
||||
|
||||
spin_lock_init(&ci->i_ceph_lock);
|
||||
|
||||
@ -547,7 +547,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
|
||||
INIT_WORK(&ci->i_work, ceph_inode_work);
|
||||
ci->i_work_mask = 0;
|
||||
memset(&ci->i_btime, '\0', sizeof(ci->i_btime));
|
||||
return &ci->vfs_inode;
|
||||
return &ci->netfs.inode;
|
||||
}
|
||||
|
||||
void ceph_free_inode(struct inode *inode)
|
||||
@ -1978,7 +1978,7 @@ static void ceph_inode_work(struct work_struct *work)
|
||||
{
|
||||
struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
|
||||
i_work);
|
||||
struct inode *inode = &ci->vfs_inode;
|
||||
struct inode *inode = &ci->netfs.inode;
|
||||
|
||||
if (test_and_clear_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask)) {
|
||||
dout("writeback %p\n", inode);
|
||||
|
@ -1564,7 +1564,7 @@ int ceph_iterate_session_caps(struct ceph_mds_session *session,
|
||||
p = session->s_caps.next;
|
||||
while (p != &session->s_caps) {
|
||||
cap = list_entry(p, struct ceph_cap, session_caps);
|
||||
inode = igrab(&cap->ci->vfs_inode);
|
||||
inode = igrab(&cap->ci->netfs.inode);
|
||||
if (!inode) {
|
||||
p = p->next;
|
||||
continue;
|
||||
@ -1622,7 +1622,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
|
||||
int iputs;
|
||||
|
||||
dout("removing cap %p, ci is %p, inode is %p\n",
|
||||
cap, ci, &ci->vfs_inode);
|
||||
cap, ci, &ci->netfs.inode);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
iputs = ceph_purge_inode_cap(inode, cap, &invalidate);
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
|
@ -521,7 +521,7 @@ static bool has_new_snaps(struct ceph_snap_context *o,
|
||||
static void ceph_queue_cap_snap(struct ceph_inode_info *ci,
|
||||
struct ceph_cap_snap **pcapsnap)
|
||||
{
|
||||
struct inode *inode = &ci->vfs_inode;
|
||||
struct inode *inode = &ci->netfs.inode;
|
||||
struct ceph_snap_context *old_snapc, *new_snapc;
|
||||
struct ceph_cap_snap *capsnap = *pcapsnap;
|
||||
struct ceph_buffer *old_blob = NULL;
|
||||
@ -652,7 +652,7 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci,
|
||||
int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
|
||||
struct ceph_cap_snap *capsnap)
|
||||
{
|
||||
struct inode *inode = &ci->vfs_inode;
|
||||
struct inode *inode = &ci->netfs.inode;
|
||||
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
|
||||
|
||||
BUG_ON(capsnap->writing);
|
||||
@ -712,7 +712,7 @@ static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
|
||||
|
||||
spin_lock(&realm->inodes_with_caps_lock);
|
||||
list_for_each_entry(ci, &realm->inodes_with_caps, i_snap_realm_item) {
|
||||
struct inode *inode = igrab(&ci->vfs_inode);
|
||||
struct inode *inode = igrab(&ci->netfs.inode);
|
||||
if (!inode)
|
||||
continue;
|
||||
spin_unlock(&realm->inodes_with_caps_lock);
|
||||
@ -904,7 +904,7 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
|
||||
while (!list_empty(&mdsc->snap_flush_list)) {
|
||||
ci = list_first_entry(&mdsc->snap_flush_list,
|
||||
struct ceph_inode_info, i_snap_flush_item);
|
||||
inode = &ci->vfs_inode;
|
||||
inode = &ci->netfs.inode;
|
||||
ihold(inode);
|
||||
spin_unlock(&mdsc->snap_flush_lock);
|
||||
ceph_flush_snaps(ci, &session);
|
||||
|
@ -876,7 +876,7 @@ mempool_t *ceph_wb_pagevec_pool;
|
||||
static void ceph_inode_init_once(void *foo)
|
||||
{
|
||||
struct ceph_inode_info *ci = foo;
|
||||
inode_init_once(&ci->vfs_inode);
|
||||
inode_init_once(&ci->netfs.inode);
|
||||
}
|
||||
|
||||
static int __init init_caches(void)
|
||||
|
@ -316,11 +316,7 @@ struct ceph_inode_xattrs_info {
|
||||
* Ceph inode.
|
||||
*/
|
||||
struct ceph_inode_info {
|
||||
struct {
|
||||
/* These must be contiguous */
|
||||
struct inode vfs_inode;
|
||||
struct netfs_i_context netfs_ctx; /* Netfslib context */
|
||||
};
|
||||
struct netfs_inode netfs; /* Netfslib context and vfs inode */
|
||||
struct ceph_vino i_vino; /* ceph ino + snap */
|
||||
|
||||
spinlock_t i_ceph_lock;
|
||||
@ -436,7 +432,7 @@ struct ceph_inode_info {
|
||||
static inline struct ceph_inode_info *
|
||||
ceph_inode(const struct inode *inode)
|
||||
{
|
||||
return container_of(inode, struct ceph_inode_info, vfs_inode);
|
||||
return container_of(inode, struct ceph_inode_info, netfs.inode);
|
||||
}
|
||||
|
||||
static inline struct ceph_fs_client *
|
||||
@ -1316,7 +1312,7 @@ static inline void __ceph_update_quota(struct ceph_inode_info *ci,
|
||||
has_quota = __ceph_has_quota(ci, QUOTA_GET_ANY);
|
||||
|
||||
if (had_quota != has_quota)
|
||||
ceph_adjust_quota_realms_count(&ci->vfs_inode, has_quota);
|
||||
ceph_adjust_quota_realms_count(&ci->netfs.inode, has_quota);
|
||||
}
|
||||
|
||||
extern void ceph_handle_quota(struct ceph_mds_client *mdsc,
|
||||
|
@ -57,7 +57,7 @@ static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci)
|
||||
static ssize_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
|
||||
size_t size)
|
||||
{
|
||||
struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
|
||||
struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
|
||||
struct ceph_osd_client *osdc = &fsc->client->osdc;
|
||||
struct ceph_string *pool_ns;
|
||||
s64 pool = ci->i_layout.pool_id;
|
||||
@ -69,7 +69,7 @@ static ssize_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
|
||||
|
||||
pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
|
||||
|
||||
dout("ceph_vxattrcb_layout %p\n", &ci->vfs_inode);
|
||||
dout("ceph_vxattrcb_layout %p\n", &ci->netfs.inode);
|
||||
down_read(&osdc->lock);
|
||||
pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
|
||||
if (pool_name) {
|
||||
@ -161,7 +161,7 @@ static ssize_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
|
||||
char *val, size_t size)
|
||||
{
|
||||
ssize_t ret;
|
||||
struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
|
||||
struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
|
||||
struct ceph_osd_client *osdc = &fsc->client->osdc;
|
||||
s64 pool = ci->i_layout.pool_id;
|
||||
const char *pool_name;
|
||||
@ -313,7 +313,7 @@ static ssize_t ceph_vxattrcb_snap_btime(struct ceph_inode_info *ci, char *val,
|
||||
static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci,
|
||||
char *val, size_t size)
|
||||
{
|
||||
struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
|
||||
struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
|
||||
|
||||
return ceph_fmt_xattr(val, size, "%pU", &fsc->client->fsid);
|
||||
}
|
||||
@ -321,7 +321,7 @@ static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci,
|
||||
static ssize_t ceph_vxattrcb_client_id(struct ceph_inode_info *ci,
|
||||
char *val, size_t size)
|
||||
{
|
||||
struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
|
||||
struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
|
||||
|
||||
return ceph_fmt_xattr(val, size, "client%lld",
|
||||
ceph_client_gid(fsc->client));
|
||||
@ -629,7 +629,7 @@ static int __set_xattr(struct ceph_inode_info *ci,
|
||||
}
|
||||
|
||||
dout("__set_xattr_val added %llx.%llx xattr %p %.*s=%.*s\n",
|
||||
ceph_vinop(&ci->vfs_inode), xattr, name_len, name, val_len, val);
|
||||
ceph_vinop(&ci->netfs.inode), xattr, name_len, name, val_len, val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -871,7 +871,7 @@ struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci)
|
||||
struct ceph_buffer *old_blob = NULL;
|
||||
void *dest;
|
||||
|
||||
dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
|
||||
dout("__build_xattrs_blob %p\n", &ci->netfs.inode);
|
||||
if (ci->i_xattrs.dirty) {
|
||||
int need = __get_required_blob_size(ci, 0, 0);
|
||||
|
||||
|
@ -377,7 +377,7 @@ cifs_alloc_inode(struct super_block *sb)
|
||||
cifs_inode->flags = 0;
|
||||
spin_lock_init(&cifs_inode->writers_lock);
|
||||
cifs_inode->writers = 0;
|
||||
cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
|
||||
cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
|
||||
cifs_inode->server_eof = 0;
|
||||
cifs_inode->uniqueid = 0;
|
||||
cifs_inode->createtime = 0;
|
||||
@ -389,12 +389,12 @@ cifs_alloc_inode(struct super_block *sb)
|
||||
* Can not set i_flags here - they get immediately overwritten to zero
|
||||
* by the VFS.
|
||||
*/
|
||||
/* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME; */
|
||||
/* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
|
||||
INIT_LIST_HEAD(&cifs_inode->openFileList);
|
||||
INIT_LIST_HEAD(&cifs_inode->llist);
|
||||
INIT_LIST_HEAD(&cifs_inode->deferred_closes);
|
||||
spin_lock_init(&cifs_inode->deferred_lock);
|
||||
return &cifs_inode->vfs_inode;
|
||||
return &cifs_inode->netfs.inode;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1418,7 +1418,7 @@ cifs_init_once(void *inode)
|
||||
{
|
||||
struct cifsInodeInfo *cifsi = inode;
|
||||
|
||||
inode_init_once(&cifsi->vfs_inode);
|
||||
inode_init_once(&cifsi->netfs.inode);
|
||||
init_rwsem(&cifsi->lock_sem);
|
||||
}
|
||||
|
||||
|
@ -1479,20 +1479,16 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
|
||||
#define CIFS_CACHE_RW_FLG (CIFS_CACHE_READ_FLG | CIFS_CACHE_WRITE_FLG)
|
||||
#define CIFS_CACHE_RHW_FLG (CIFS_CACHE_RW_FLG | CIFS_CACHE_HANDLE_FLG)
|
||||
|
||||
#define CIFS_CACHE_READ(cinode) ((cinode->oplock & CIFS_CACHE_READ_FLG) || (CIFS_SB(cinode->vfs_inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE))
|
||||
#define CIFS_CACHE_READ(cinode) ((cinode->oplock & CIFS_CACHE_READ_FLG) || (CIFS_SB(cinode->netfs.inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE))
|
||||
#define CIFS_CACHE_HANDLE(cinode) (cinode->oplock & CIFS_CACHE_HANDLE_FLG)
|
||||
#define CIFS_CACHE_WRITE(cinode) ((cinode->oplock & CIFS_CACHE_WRITE_FLG) || (CIFS_SB(cinode->vfs_inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE))
|
||||
#define CIFS_CACHE_WRITE(cinode) ((cinode->oplock & CIFS_CACHE_WRITE_FLG) || (CIFS_SB(cinode->netfs.inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE))
|
||||
|
||||
/*
|
||||
* One of these for each file inode
|
||||
*/
|
||||
|
||||
struct cifsInodeInfo {
|
||||
struct {
|
||||
/* These must be contiguous */
|
||||
struct inode vfs_inode; /* the VFS's inode record */
|
||||
struct netfs_i_context netfs_ctx; /* Netfslib context */
|
||||
};
|
||||
struct netfs_inode netfs; /* Netfslib context and vfs inode */
|
||||
bool can_cache_brlcks;
|
||||
struct list_head llist; /* locks helb by this inode */
|
||||
/*
|
||||
@ -1531,7 +1527,7 @@ struct cifsInodeInfo {
|
||||
static inline struct cifsInodeInfo *
|
||||
CIFS_I(struct inode *inode)
|
||||
{
|
||||
return container_of(inode, struct cifsInodeInfo, vfs_inode);
|
||||
return container_of(inode, struct cifsInodeInfo, netfs.inode);
|
||||
}
|
||||
|
||||
static inline struct cifs_sb_info *
|
||||
|
@ -2004,7 +2004,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
|
||||
bool fsuid_only)
|
||||
{
|
||||
struct cifsFileInfo *open_file = NULL;
|
||||
struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
|
||||
struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
|
||||
|
||||
/* only filter by fsuid on multiuser mounts */
|
||||
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
|
||||
@ -2060,7 +2060,7 @@ cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
|
||||
return rc;
|
||||
}
|
||||
|
||||
cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
|
||||
cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
|
||||
|
||||
/* only filter by fsuid on multiuser mounts */
|
||||
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
|
||||
@ -4669,14 +4669,14 @@ bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
|
||||
/* This inode is open for write at least once */
|
||||
struct cifs_sb_info *cifs_sb;
|
||||
|
||||
cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
|
||||
cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
|
||||
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
|
||||
/* since no page cache to corrupt on directio
|
||||
we can change size safely */
|
||||
return true;
|
||||
}
|
||||
|
||||
if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
|
||||
if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@ -101,13 +101,13 @@ void cifs_fscache_get_inode_cookie(struct inode *inode)
|
||||
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
|
||||
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
|
||||
|
||||
cifs_fscache_fill_coherency(&cifsi->vfs_inode, &cd);
|
||||
cifs_fscache_fill_coherency(&cifsi->netfs.inode, &cd);
|
||||
|
||||
cifsi->netfs_ctx.cache =
|
||||
cifsi->netfs.cache =
|
||||
fscache_acquire_cookie(tcon->fscache, 0,
|
||||
&cifsi->uniqueid, sizeof(cifsi->uniqueid),
|
||||
&cd, sizeof(cd),
|
||||
i_size_read(&cifsi->vfs_inode));
|
||||
i_size_read(&cifsi->netfs.inode));
|
||||
}
|
||||
|
||||
void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool update)
|
||||
@ -131,7 +131,7 @@ void cifs_fscache_release_inode_cookie(struct inode *inode)
|
||||
if (cookie) {
|
||||
cifs_dbg(FYI, "%s: (0x%p)\n", __func__, cookie);
|
||||
fscache_relinquish_cookie(cookie, false);
|
||||
cifsi->netfs_ctx.cache = NULL;
|
||||
cifsi->netfs.cache = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -52,10 +52,10 @@ void cifs_fscache_fill_coherency(struct inode *inode,
|
||||
struct cifsInodeInfo *cifsi = CIFS_I(inode);
|
||||
|
||||
memset(cd, 0, sizeof(*cd));
|
||||
cd->last_write_time_sec = cpu_to_le64(cifsi->vfs_inode.i_mtime.tv_sec);
|
||||
cd->last_write_time_nsec = cpu_to_le32(cifsi->vfs_inode.i_mtime.tv_nsec);
|
||||
cd->last_change_time_sec = cpu_to_le64(cifsi->vfs_inode.i_ctime.tv_sec);
|
||||
cd->last_change_time_nsec = cpu_to_le32(cifsi->vfs_inode.i_ctime.tv_nsec);
|
||||
cd->last_write_time_sec = cpu_to_le64(cifsi->netfs.inode.i_mtime.tv_sec);
|
||||
cd->last_write_time_nsec = cpu_to_le32(cifsi->netfs.inode.i_mtime.tv_nsec);
|
||||
cd->last_change_time_sec = cpu_to_le64(cifsi->netfs.inode.i_ctime.tv_sec);
|
||||
cd->last_change_time_nsec = cpu_to_le32(cifsi->netfs.inode.i_ctime.tv_nsec);
|
||||
}
|
||||
|
||||
|
||||
|
@ -115,7 +115,7 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
|
||||
__func__, cifs_i->uniqueid);
|
||||
set_bit(CIFS_INO_INVALID_MAPPING, &cifs_i->flags);
|
||||
/* Invalidate fscache cookie */
|
||||
cifs_fscache_fill_coherency(&cifs_i->vfs_inode, &cd);
|
||||
cifs_fscache_fill_coherency(&cifs_i->netfs.inode, &cd);
|
||||
fscache_invalidate(cifs_inode_cookie(inode), &cd, i_size_read(inode), 0);
|
||||
}
|
||||
|
||||
@ -2499,7 +2499,7 @@ int cifs_fiemap(struct inode *inode, struct fiemap_extent_info *fei, u64 start,
|
||||
u64 len)
|
||||
{
|
||||
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
|
||||
struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_i->vfs_inode.i_sb);
|
||||
struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_i->netfs.inode.i_sb);
|
||||
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
|
||||
struct TCP_Server_Info *server = tcon->ses->server;
|
||||
struct cifsFileInfo *cfile;
|
||||
|
@ -537,11 +537,11 @@ void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
|
||||
if (oplock == OPLOCK_EXCLUSIVE) {
|
||||
cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG;
|
||||
cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
|
||||
&cinode->vfs_inode);
|
||||
&cinode->netfs.inode);
|
||||
} else if (oplock == OPLOCK_READ) {
|
||||
cinode->oplock = CIFS_CACHE_READ_FLG;
|
||||
cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
|
||||
&cinode->vfs_inode);
|
||||
&cinode->netfs.inode);
|
||||
} else
|
||||
cinode->oplock = 0;
|
||||
}
|
||||
|
@ -4260,15 +4260,15 @@ smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
|
||||
if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
|
||||
cinode->oplock = CIFS_CACHE_RHW_FLG;
|
||||
cifs_dbg(FYI, "Batch Oplock granted on inode %p\n",
|
||||
&cinode->vfs_inode);
|
||||
&cinode->netfs.inode);
|
||||
} else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
|
||||
cinode->oplock = CIFS_CACHE_RW_FLG;
|
||||
cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
|
||||
&cinode->vfs_inode);
|
||||
&cinode->netfs.inode);
|
||||
} else if (oplock == SMB2_OPLOCK_LEVEL_II) {
|
||||
cinode->oplock = CIFS_CACHE_READ_FLG;
|
||||
cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
|
||||
&cinode->vfs_inode);
|
||||
&cinode->netfs.inode);
|
||||
} else
|
||||
cinode->oplock = 0;
|
||||
}
|
||||
@ -4307,7 +4307,7 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
|
||||
|
||||
cinode->oplock = new_oplock;
|
||||
cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
|
||||
&cinode->vfs_inode);
|
||||
&cinode->netfs.inode);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1549,7 +1549,7 @@ static int __ext2_write_inode(struct inode *inode, int do_sync)
|
||||
if (IS_ERR(raw_inode))
|
||||
return -EIO;
|
||||
|
||||
/* For fields not not tracking in the in-memory inode,
|
||||
/* For fields not tracking in the in-memory inode,
|
||||
* initialise them to zero for new inodes. */
|
||||
if (ei->i_state & EXT2_STATE_NEW)
|
||||
memset(raw_inode, 0, EXT2_SB(sb)->s_inode_size);
|
||||
|
@ -120,6 +120,7 @@ static bool inode_io_list_move_locked(struct inode *inode,
|
||||
struct list_head *head)
|
||||
{
|
||||
assert_spin_locked(&wb->list_lock);
|
||||
assert_spin_locked(&inode->i_lock);
|
||||
|
||||
list_move(&inode->i_io_list, head);
|
||||
|
||||
@ -1365,9 +1366,9 @@ static int move_expired_inodes(struct list_head *delaying_queue,
|
||||
inode = wb_inode(delaying_queue->prev);
|
||||
if (inode_dirtied_after(inode, dirtied_before))
|
||||
break;
|
||||
spin_lock(&inode->i_lock);
|
||||
list_move(&inode->i_io_list, &tmp);
|
||||
moved++;
|
||||
spin_lock(&inode->i_lock);
|
||||
inode->i_state |= I_SYNC_QUEUED;
|
||||
spin_unlock(&inode->i_lock);
|
||||
if (sb_is_blkdev_sb(inode->i_sb))
|
||||
@ -1383,7 +1384,12 @@ static int move_expired_inodes(struct list_head *delaying_queue,
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Move inodes from one superblock together */
|
||||
/*
|
||||
* Although inode's i_io_list is moved from 'tmp' to 'dispatch_queue',
|
||||
* we don't take inode->i_lock here because it is just a pointless overhead.
|
||||
* Inode is already marked as I_SYNC_QUEUED so writeback list handling is
|
||||
* fully under our control.
|
||||
*/
|
||||
while (!list_empty(&tmp)) {
|
||||
sb = wb_inode(tmp.prev)->i_sb;
|
||||
list_for_each_prev_safe(pos, node, &tmp) {
|
||||
@ -1826,8 +1832,8 @@ static long writeback_sb_inodes(struct super_block *sb,
|
||||
* We'll have another go at writing back this inode
|
||||
* when we completed a full scan of b_io.
|
||||
*/
|
||||
spin_unlock(&inode->i_lock);
|
||||
requeue_io(inode, wb);
|
||||
spin_unlock(&inode->i_lock);
|
||||
trace_writeback_sb_inodes_requeue(inode);
|
||||
continue;
|
||||
}
|
||||
@ -2358,6 +2364,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
int dirtytime = 0;
|
||||
struct bdi_writeback *wb = NULL;
|
||||
|
||||
trace_writeback_mark_inode_dirty(inode, flags);
|
||||
|
||||
@ -2409,6 +2416,17 @@ void __mark_inode_dirty(struct inode *inode, int flags)
|
||||
inode->i_state &= ~I_DIRTY_TIME;
|
||||
inode->i_state |= flags;
|
||||
|
||||
/*
|
||||
* Grab inode's wb early because it requires dropping i_lock and we
|
||||
* need to make sure following checks happen atomically with dirty
|
||||
* list handling so that we don't move inodes under flush worker's
|
||||
* hands.
|
||||
*/
|
||||
if (!was_dirty) {
|
||||
wb = locked_inode_to_wb_and_lock_list(inode);
|
||||
spin_lock(&inode->i_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the inode is queued for writeback by flush worker, just
|
||||
* update its dirty state. Once the flush worker is done with
|
||||
@ -2416,7 +2434,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
|
||||
* list, based upon its state.
|
||||
*/
|
||||
if (inode->i_state & I_SYNC_QUEUED)
|
||||
goto out_unlock_inode;
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* Only add valid (hashed) inodes to the superblock's
|
||||
@ -2424,22 +2442,19 @@ void __mark_inode_dirty(struct inode *inode, int flags)
|
||||
*/
|
||||
if (!S_ISBLK(inode->i_mode)) {
|
||||
if (inode_unhashed(inode))
|
||||
goto out_unlock_inode;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (inode->i_state & I_FREEING)
|
||||
goto out_unlock_inode;
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* If the inode was already on b_dirty/b_io/b_more_io, don't
|
||||
* reposition it (that would break b_dirty time-ordering).
|
||||
*/
|
||||
if (!was_dirty) {
|
||||
struct bdi_writeback *wb;
|
||||
struct list_head *dirty_list;
|
||||
bool wakeup_bdi = false;
|
||||
|
||||
wb = locked_inode_to_wb_and_lock_list(inode);
|
||||
|
||||
inode->dirtied_when = jiffies;
|
||||
if (dirtytime)
|
||||
inode->dirtied_time_when = jiffies;
|
||||
@ -2453,6 +2468,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
|
||||
dirty_list);
|
||||
|
||||
spin_unlock(&wb->list_lock);
|
||||
spin_unlock(&inode->i_lock);
|
||||
trace_writeback_dirty_inode_enqueue(inode);
|
||||
|
||||
/*
|
||||
@ -2467,6 +2483,9 @@ void __mark_inode_dirty(struct inode *inode, int flags)
|
||||
return;
|
||||
}
|
||||
}
|
||||
out_unlock:
|
||||
if (wb)
|
||||
spin_unlock(&wb->list_lock);
|
||||
out_unlock_inode:
|
||||
spin_unlock(&inode->i_lock);
|
||||
}
|
||||
|
@ -27,7 +27,7 @@
|
||||
* Inode locking rules:
|
||||
*
|
||||
* inode->i_lock protects:
|
||||
* inode->i_state, inode->i_hash, __iget()
|
||||
* inode->i_state, inode->i_hash, __iget(), inode->i_io_list
|
||||
* Inode LRU list locks protect:
|
||||
* inode->i_sb->s_inode_lru, inode->i_lru
|
||||
* inode->i_sb->s_inode_list_lock protects:
|
||||
|
@ -155,7 +155,7 @@ static void netfs_rreq_expand(struct netfs_io_request *rreq,
|
||||
void netfs_readahead(struct readahead_control *ractl)
|
||||
{
|
||||
struct netfs_io_request *rreq;
|
||||
struct netfs_i_context *ctx = netfs_i_context(ractl->mapping->host);
|
||||
struct netfs_inode *ctx = netfs_inode(ractl->mapping->host);
|
||||
int ret;
|
||||
|
||||
_enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
|
||||
@ -215,7 +215,7 @@ int netfs_read_folio(struct file *file, struct folio *folio)
|
||||
{
|
||||
struct address_space *mapping = folio_file_mapping(folio);
|
||||
struct netfs_io_request *rreq;
|
||||
struct netfs_i_context *ctx = netfs_i_context(mapping->host);
|
||||
struct netfs_inode *ctx = netfs_inode(mapping->host);
|
||||
int ret;
|
||||
|
||||
_enter("%lx", folio_index(folio));
|
||||
@ -331,7 +331,7 @@ int netfs_write_begin(struct file *file, struct address_space *mapping,
|
||||
void **_fsdata)
|
||||
{
|
||||
struct netfs_io_request *rreq;
|
||||
struct netfs_i_context *ctx = netfs_i_context(file_inode(file ));
|
||||
struct netfs_inode *ctx = netfs_inode(file_inode(file ));
|
||||
struct folio *folio;
|
||||
unsigned int fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
|
||||
pgoff_t index = pos >> PAGE_SHIFT;
|
||||
|
@ -91,7 +91,7 @@ static inline void netfs_stat_d(atomic_t *stat)
|
||||
/*
|
||||
* Miscellaneous functions.
|
||||
*/
|
||||
static inline bool netfs_is_cache_enabled(struct netfs_i_context *ctx)
|
||||
static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_FSCACHE)
|
||||
struct fscache_cookie *cookie = ctx->cache;
|
||||
|
@ -18,7 +18,7 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
|
||||
{
|
||||
static atomic_t debug_ids;
|
||||
struct inode *inode = file ? file_inode(file) : mapping->host;
|
||||
struct netfs_i_context *ctx = netfs_i_context(inode);
|
||||
struct netfs_inode *ctx = netfs_inode(inode);
|
||||
struct netfs_io_request *rreq;
|
||||
int ret;
|
||||
|
||||
|
@ -79,6 +79,7 @@
|
||||
#include <linux/capability.h>
|
||||
#include <linux/quotaops.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include "../internal.h" /* ugh */
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
@ -425,9 +426,11 @@ EXPORT_SYMBOL(mark_info_dirty);
|
||||
int dquot_acquire(struct dquot *dquot)
|
||||
{
|
||||
int ret = 0, ret2 = 0;
|
||||
unsigned int memalloc;
|
||||
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
|
||||
|
||||
mutex_lock(&dquot->dq_lock);
|
||||
memalloc = memalloc_nofs_save();
|
||||
if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
|
||||
ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot);
|
||||
if (ret < 0)
|
||||
@ -458,6 +461,7 @@ int dquot_acquire(struct dquot *dquot)
|
||||
smp_mb__before_atomic();
|
||||
set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
|
||||
out_iolock:
|
||||
memalloc_nofs_restore(memalloc);
|
||||
mutex_unlock(&dquot->dq_lock);
|
||||
return ret;
|
||||
}
|
||||
@ -469,9 +473,11 @@ EXPORT_SYMBOL(dquot_acquire);
|
||||
int dquot_commit(struct dquot *dquot)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned int memalloc;
|
||||
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
|
||||
|
||||
mutex_lock(&dquot->dq_lock);
|
||||
memalloc = memalloc_nofs_save();
|
||||
if (!clear_dquot_dirty(dquot))
|
||||
goto out_lock;
|
||||
/* Inactive dquot can be only if there was error during read/init
|
||||
@ -481,6 +487,7 @@ int dquot_commit(struct dquot *dquot)
|
||||
else
|
||||
ret = -EIO;
|
||||
out_lock:
|
||||
memalloc_nofs_restore(memalloc);
|
||||
mutex_unlock(&dquot->dq_lock);
|
||||
return ret;
|
||||
}
|
||||
@ -492,9 +499,11 @@ EXPORT_SYMBOL(dquot_commit);
|
||||
int dquot_release(struct dquot *dquot)
|
||||
{
|
||||
int ret = 0, ret2 = 0;
|
||||
unsigned int memalloc;
|
||||
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
|
||||
|
||||
mutex_lock(&dquot->dq_lock);
|
||||
memalloc = memalloc_nofs_save();
|
||||
/* Check whether we are not racing with some other dqget() */
|
||||
if (dquot_is_busy(dquot))
|
||||
goto out_dqlock;
|
||||
@ -510,6 +519,7 @@ int dquot_release(struct dquot *dquot)
|
||||
}
|
||||
clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
|
||||
out_dqlock:
|
||||
memalloc_nofs_restore(memalloc);
|
||||
mutex_unlock(&dquot->dq_lock);
|
||||
return ret;
|
||||
}
|
||||
|
@ -119,9 +119,10 @@ typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error,
|
||||
bool was_async);
|
||||
|
||||
/*
|
||||
* Per-inode description. This must be directly after the inode struct.
|
||||
* Per-inode context. This wraps the VFS inode.
|
||||
*/
|
||||
struct netfs_i_context {
|
||||
struct netfs_inode {
|
||||
struct inode inode; /* The VFS inode */
|
||||
const struct netfs_request_ops *ops;
|
||||
#if IS_ENABLED(CONFIG_FSCACHE)
|
||||
struct fscache_cookie *cache;
|
||||
@ -256,7 +257,7 @@ struct netfs_cache_ops {
|
||||
* boundary as appropriate.
|
||||
*/
|
||||
enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq,
|
||||
loff_t i_size);
|
||||
loff_t i_size);
|
||||
|
||||
/* Prepare a write operation, working out what part of the write we can
|
||||
* actually do.
|
||||
@ -288,45 +289,35 @@ extern void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
|
||||
extern void netfs_stats_show(struct seq_file *);
|
||||
|
||||
/**
|
||||
* netfs_i_context - Get the netfs inode context from the inode
|
||||
* netfs_inode - Get the netfs inode context from the inode
|
||||
* @inode: The inode to query
|
||||
*
|
||||
* Get the netfs lib inode context from the network filesystem's inode. The
|
||||
* context struct is expected to directly follow on from the VFS inode struct.
|
||||
*/
|
||||
static inline struct netfs_i_context *netfs_i_context(struct inode *inode)
|
||||
static inline struct netfs_inode *netfs_inode(struct inode *inode)
|
||||
{
|
||||
return (void *)inode + sizeof(*inode);
|
||||
return container_of(inode, struct netfs_inode, inode);
|
||||
}
|
||||
|
||||
/**
|
||||
* netfs_inode - Get the netfs inode from the inode context
|
||||
* @ctx: The context to query
|
||||
*
|
||||
* Get the netfs inode from the netfs library's inode context. The VFS inode
|
||||
* is expected to directly precede the context struct.
|
||||
*/
|
||||
static inline struct inode *netfs_inode(struct netfs_i_context *ctx)
|
||||
{
|
||||
return (void *)ctx - sizeof(struct inode);
|
||||
}
|
||||
|
||||
/**
|
||||
* netfs_i_context_init - Initialise a netfs lib context
|
||||
* netfs_inode_init - Initialise a netfslib inode context
|
||||
* @inode: The inode with which the context is associated
|
||||
* @ops: The netfs's operations list
|
||||
*
|
||||
* Initialise the netfs library context struct. This is expected to follow on
|
||||
* directly from the VFS inode struct.
|
||||
*/
|
||||
static inline void netfs_i_context_init(struct inode *inode,
|
||||
const struct netfs_request_ops *ops)
|
||||
static inline void netfs_inode_init(struct inode *inode,
|
||||
const struct netfs_request_ops *ops)
|
||||
{
|
||||
struct netfs_i_context *ctx = netfs_i_context(inode);
|
||||
struct netfs_inode *ctx = netfs_inode(inode);
|
||||
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
ctx->ops = ops;
|
||||
ctx->remote_i_size = i_size_read(inode);
|
||||
#if IS_ENABLED(CONFIG_FSCACHE)
|
||||
ctx->cache = NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -338,7 +329,7 @@ static inline void netfs_i_context_init(struct inode *inode,
|
||||
*/
|
||||
static inline void netfs_resize_file(struct inode *inode, loff_t new_i_size)
|
||||
{
|
||||
struct netfs_i_context *ctx = netfs_i_context(inode);
|
||||
struct netfs_inode *ctx = netfs_inode(inode);
|
||||
|
||||
ctx->remote_i_size = new_i_size;
|
||||
}
|
||||
@ -352,7 +343,7 @@ static inline void netfs_resize_file(struct inode *inode, loff_t new_i_size)
|
||||
static inline struct fscache_cookie *netfs_i_cookie(struct inode *inode)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_FSCACHE)
|
||||
struct netfs_i_context *ctx = netfs_i_context(inode);
|
||||
struct netfs_inode *ctx = netfs_inode(inode);
|
||||
return ctx->cache;
|
||||
#else
|
||||
return NULL;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user