Merge 6.1.50 into android14-6.1-lts

Changes in 6.1.50
	NFSv4.2: fix error handling in nfs42_proc_getxattr
	NFSv4: fix out path in __nfs4_get_acl_uncached
	xprtrdma: Remap Receive buffers after a reconnect
	drm/ast: Use drm_aperture_remove_conflicting_pci_framebuffers
	fbdev/radeon: use pci aperture helpers
	drm/gma500: Use drm_aperture_remove_conflicting_pci_framebuffers
	drm/aperture: Remove primary argument
	video/aperture: Only kick vgacon when the pdev is decoding vga
	video/aperture: Move vga handling to pci function
	PCI: acpiphp: Reassign resources on bridge if necessary
	MIPS: cpu-features: Enable octeon_cache by cpu_type
	MIPS: cpu-features: Use boot_cpu_type for CPU type based features
	jbd2: remove t_checkpoint_io_list
	jbd2: remove journal_clean_one_cp_list()
	jbd2: fix a race when checking checkpoint buffer busy
	can: raw: fix receiver memory leak
	can: raw: fix lockdep issue in raw_release()
	s390/zcrypt: remove unnecessary (void *) conversions
	s390/zcrypt: fix reply buffer calculations for CCA replies
	drm/i915: Add the gen12_needs_ccs_aux_inv helper
	drm/i915/gt: Ensure memory quiesced before invalidation
	drm/i915/gt: Poll aux invalidation register bit on invalidation
	drm/i915/gt: Support aux invalidation on all engines
	tracing: Fix cpu buffers unavailable due to 'record_disabled' missed
	tracing: Fix memleak due to race between current_tracer and trace
	octeontx2-af: SDP: fix receive link config
	devlink: move code to a dedicated directory
	devlink: add missing unregister linecard notification
	net: dsa: felix: fix oversize frame dropping for always closed tc-taprio gates
	sock: annotate data-races around prot->memory_pressure
	dccp: annotate data-races in dccp_poll()
	ipvlan: Fix a reference count leak warning in ipvlan_ns_exit()
	mlxsw: pci: Set time stamp fields also when its type is MIRROR_UTC
	mlxsw: reg: Fix SSPR register layout
	mlxsw: Fix the size of 'VIRT_ROUTER_MSB'
	selftests: mlxsw: Fix test failure on Spectrum-4
	net: dsa: mt7530: fix handling of 802.1X PAE frames
	net: bgmac: Fix return value check for fixed_phy_register()
	net: bcmgenet: Fix return value check for fixed_phy_register()
	net: validate veth and vxcan peer ifindexes
	ipv4: fix data-races around inet->inet_id
	ice: fix receive buffer size miscalculation
	Revert "ice: Fix ice VF reset during iavf initialization"
	ice: Fix NULL pointer deref during VF reset
	selftests: bonding: do not set port down before adding to bond
	can: isotp: fix support for transmission of SF without flow control
	igb: Avoid starting unnecessary workqueues
	igc: Fix the typo in the PTM Control macro
	net/sched: fix a qdisc modification with ambiguous command request
	i40e: fix potential NULL pointer dereferencing of pf->vf i40e_sync_vsi_filters()
	netfilter: nf_tables: flush pending destroy work before netlink notifier
	netfilter: nf_tables: fix out of memory error handling
	rtnetlink: Reject negative ifindexes in RTM_NEWLINK
	bonding: fix macvlan over alb bond support
	KVM: x86: Preserve TDP MMU roots until they are explicitly invalidated
	KVM: x86/mmu: Fix an sign-extension bug with mmu_seq that hangs vCPUs
	io_uring: get rid of double locking
	io_uring: extract a io_msg_install_complete helper
	io_uring/msg_ring: move double lock/unlock helpers higher up
	io_uring/msg_ring: fix missing lock on overflow for IOPOLL
	ASoC: amd: yc: Add VivoBook Pro 15 to quirks list for acp6x
	ASoC: cs35l41: Correct amp_gain_tlv values
	ibmveth: Use dcbf rather than dcbfl
	wifi: mac80211: limit reorder_buf_filtered to avoid UBSAN warning
	platform/x86: ideapad-laptop: Add support for new hotkeys found on ThinkBook 14s Yoga ITL
	NFSv4: Fix dropped lock for racing OPEN and delegation return
	clk: Fix slab-out-of-bounds error in devm_clk_release()
	mm,ima,kexec,of: use memblock_free_late from ima_free_kexec_buffer
	shmem: fix smaps BUG sleeping while atomic
	ALSA: ymfpci: Fix the missing snd_card_free() call at probe error
	mm/gup: handle cont-PTE hugetlb pages correctly in gup_must_unshare() via GUP-fast
	mm: add a call to flush_cache_vmap() in vmap_pfn()
	mm: memory-failure: fix unexpected return value in soft_offline_page()
	NFS: Fix a use after free in nfs_direct_join_group()
	nfsd: Fix race to FREE_STATEID and cl_revoked
	selinux: set next pointer before attaching to list
	batman-adv: Trigger events for auto adjusted MTU
	batman-adv: Don't increase MTU when set by user
	batman-adv: Do not get eth header before batadv_check_management_packet
	batman-adv: Fix TT global entry leak when client roamed back
	batman-adv: Fix batadv_v_ogm_aggr_send memory leak
	batman-adv: Hold rtnl lock during MTU update via netlink
	lib/clz_ctz.c: Fix __clzdi2() and __ctzdi2() for 32-bit kernels
	riscv: Handle zicsr/zifencei issue between gcc and binutils
	riscv: Fix build errors using binutils2.37 toolchains
	radix tree: remove unused variable
	of: unittest: Fix EXPECT for parse_phandle_with_args_map() test
	of: dynamic: Refactor action prints to not use "%pOF" inside devtree_lock
	pinctrl: amd: Mask wake bits on probe again
	media: vcodec: Fix potential array out-of-bounds in encoder queue_setup
	PCI: acpiphp: Use pci_assign_unassigned_bridge_resources() only for non-root bus
	drm/vmwgfx: Fix shader stage validation
	drm/i915/dgfx: Enable d3cold at s2idle
	drm/display/dp: Fix the DP DSC Receiver cap size
	x86/fpu: Invalidate FPU state correctly on exec()
	x86/fpu: Set X86_FEATURE_OSXSAVE feature after enabling OSXSAVE in CR4
	hwmon: (aquacomputer_d5next) Add selective 200ms delay after sending ctrl report
	selftests/net: mv bpf/nat6to4.c to net folder
	nfs: use vfs setgid helper
	nfsd: use vfs setgid helper
	cgroup/cpuset: Rename functions dealing with DEADLINE accounting
	sched/cpuset: Bring back cpuset_mutex
	sched/cpuset: Keep track of SCHED_DEADLINE task in cpusets
	cgroup/cpuset: Iterate only if DEADLINE tasks are present
	sched/deadline: Create DL BW alloc, free & check overflow interface
	cgroup/cpuset: Free DL BW in case can_attach() fails
	thunderbolt: Fix Thunderbolt 3 display flickering issue on 2nd hot plug onwards
	ublk: remove check IO_URING_F_SQE128 in ublk_ch_uring_cmd
	can: raw: add missing refcount for memory leak fix
	madvise:madvise_free_pte_range(): don't use mapcount() against large folio for sharing check
	scsi: snic: Fix double free in snic_tgt_create()
	scsi: core: raid_class: Remove raid_component_add()
	clk: Fix undefined reference to `clk_rate_exclusive_{get,put}'
	pinctrl: renesas: rzg2l: Fix NULL pointer dereference in rzg2l_dt_subnode_to_map()
	pinctrl: renesas: rzv2m: Fix NULL pointer dereference in rzv2m_dt_subnode_to_map()
	pinctrl: renesas: rza2: Add lock around pinctrl_generic{{add,remove}_group,{add,remove}_function}
	dma-buf/sw_sync: Avoid recursive lock during fence signal
	gpio: sim: dispose of irq mappings before destroying the irq_sim domain
	gpio: sim: pass the GPIO device's software node to irq domain
	ASoC: amd: yc: Fix a non-functional mic on Lenovo 82SJ
	maple_tree: disable mas_wr_append() when other readers are possible
	ASoC: amd: vangogh: select CONFIG_SND_AMD_ACP_CONFIG
	Linux 6.1.50

Change-Id: I9b8e3da5baa106b08b2b90974c19128141817580
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-09-02 20:40:21 +00:00
commit 0910193fd6
157 changed files with 1374 additions and 1002 deletions

View File

@ -6034,7 +6034,7 @@ S: Supported
F: Documentation/networking/devlink
F: include/net/devlink.h
F: include/uapi/linux/devlink.h
F: net/core/devlink.c
F: net/devlink/
DH ELECTRONICS IMX6 DHCOM BOARD SUPPORT
M: Christoph Niedermaier <cniedermaier@dh-electronics.com>

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 49
SUBLEVEL = 50
EXTRAVERSION =
NAME = Curry Ramen

View File

@ -121,7 +121,24 @@
#define cpu_has_4k_cache __isa_ge_or_opt(1, MIPS_CPU_4K_CACHE)
#endif
#ifndef cpu_has_octeon_cache
#define cpu_has_octeon_cache 0
#define cpu_has_octeon_cache \
({ \
int __res; \
\
switch (boot_cpu_type()) { \
case CPU_CAVIUM_OCTEON: \
case CPU_CAVIUM_OCTEON_PLUS: \
case CPU_CAVIUM_OCTEON2: \
case CPU_CAVIUM_OCTEON3: \
__res = 1; \
break; \
\
default: \
__res = 0; \
} \
\
__res; \
})
#endif
/* Don't override `cpu_has_fpu' to 1 or the "nofpu" option won't work. */
#ifndef cpu_has_fpu
@ -351,7 +368,7 @@
({ \
int __res; \
\
switch (current_cpu_type()) { \
switch (boot_cpu_type()) { \
case CPU_M14KC: \
case CPU_74K: \
case CPU_1074K: \

View File

@ -448,24 +448,30 @@ config TOOLCHAIN_HAS_ZIHINTPAUSE
config TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
def_bool y
# https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=aed44286efa8ae8717a77d94b51ac3614e2ca6dc
depends on AS_IS_GNU && AS_VERSION >= 23800
# https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=98416dbb0a62579d4a7a4a76bab51b5b52fec2cd
depends on AS_IS_GNU && AS_VERSION >= 23600
help
Newer binutils versions default to ISA spec version 20191213 which
moves some instructions from the I extension to the Zicsr and Zifencei
extensions.
Binutils-2.38 and GCC-12.1.0 bumped the default ISA spec to the newer
20191213 version, which moves some instructions from the I extension to
the Zicsr and Zifencei extensions. This requires explicitly specifying
Zicsr and Zifencei when binutils >= 2.38 or GCC >= 12.1.0. Zicsr
and Zifencei are supported in binutils from version 2.36 onwards.
To make life easier, and avoid forcing toolchains that default to a
newer ISA spec to version 2.2, relax the check to binutils >= 2.36.
For clang < 17 or GCC < 11.3.0, for which this is not possible or need
special treatment, this is dealt with in TOOLCHAIN_NEEDS_OLD_ISA_SPEC.
config TOOLCHAIN_NEEDS_OLD_ISA_SPEC
def_bool y
depends on TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
# https://github.com/llvm/llvm-project/commit/22e199e6afb1263c943c0c0d4498694e15bf8a16
depends on CC_IS_CLANG && CLANG_VERSION < 170000
# https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=d29f5d6ab513c52fd872f532c492e35ae9fd6671
depends on (CC_IS_CLANG && CLANG_VERSION < 170000) || (CC_IS_GCC && GCC_VERSION < 110300)
help
Certain versions of clang do not support zicsr and zifencei via -march
but newer versions of binutils require it for the reasons noted in the
help text of CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI. This
option causes an older ISA spec compatible with these older versions
of clang to be passed to GAS, which has the same result as passing zicsr
and zifencei to -march.
Certain versions of clang and GCC do not support zicsr and zifencei via
-march. This option causes an older ISA spec compatible with these older
versions of clang and GCC to be passed to GAS, which has the same result
as passing zicsr and zifencei to -march.
config FPU
bool "FPU support"

View File

@ -11,7 +11,13 @@ compat_vdso-syms += flush_icache
COMPAT_CC := $(CC)
COMPAT_LD := $(LD)
COMPAT_CC_FLAGS := -march=rv32g -mabi=ilp32
# binutils 2.35 does not support the zifencei extension, but in the ISA
# spec 20191213, G stands for IMAFD_ZICSR_ZIFENCEI.
ifdef CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
COMPAT_CC_FLAGS := -march=rv32g -mabi=ilp32
else
COMPAT_CC_FLAGS := -march=rv32imafd -mabi=ilp32
endif
COMPAT_LD_FLAGS := -melf32lriscv
# Disable attributes, as they're useless and break the build.

View File

@ -19,8 +19,7 @@
* FPU state for a task MUST let the rest of the kernel know that the
* FPU registers are no longer valid for this task.
*
* Either one of these invalidation functions is enough. Invalidate
* a resource you control: CPU if using the CPU for something else
* Invalidate a resource you control: CPU if using the CPU for something else
* (with preemption disabled), FPU for the current task, or a task that
* is prevented from running by the current task.
*/

View File

@ -679,7 +679,7 @@ static void fpu_reset_fpregs(void)
struct fpu *fpu = &current->thread.fpu;
fpregs_lock();
fpu__drop(fpu);
__fpu_invalidate_fpregs_state(fpu);
/*
* This does not change the actual hardware registers. It just
* resets the memory image and sets TIF_NEED_FPU_LOAD so a

View File

@ -882,6 +882,13 @@ void __init fpu__init_system_xstate(unsigned int legacy_size)
goto out_disable;
}
/*
* CPU capabilities initialization runs before FPU init. So
* X86_FEATURE_OSXSAVE is not set. Now that XSAVE is completely
* functional, set the feature bit so depending code works.
*/
setup_force_cpu_cap(X86_FEATURE_OSXSAVE);
print_xstate_offset_size();
pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
fpu_kernel_cfg.max_features,

View File

@ -4212,7 +4212,8 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
* root was invalidated by a memslot update or a relevant mmu_notifier fired.
*/
static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault, int mmu_seq)
struct kvm_page_fault *fault,
unsigned long mmu_seq)
{
struct kvm_mmu_page *sp = to_shadow_page(vcpu->arch.mmu->root.hpa);

View File

@ -51,7 +51,17 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
if (!kvm->arch.tdp_mmu_enabled)
return;
/* Also waits for any queued work items. */
/*
* Invalidate all roots, which besides the obvious, schedules all roots
* for zapping and thus puts the TDP MMU's reference to each root, i.e.
* ultimately frees all roots.
*/
kvm_tdp_mmu_invalidate_all_roots(kvm);
/*
* Destroying a workqueue also first flushes the workqueue, i.e. no
* need to invoke kvm_tdp_mmu_zap_invalidated_roots().
*/
destroy_workqueue(kvm->arch.tdp_mmu_zap_wq);
WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
@ -127,16 +137,6 @@ static void tdp_mmu_schedule_zap_root(struct kvm *kvm, struct kvm_mmu_page *root
queue_work(kvm->arch.tdp_mmu_zap_wq, &root->tdp_mmu_async_work);
}
static inline bool kvm_tdp_root_mark_invalid(struct kvm_mmu_page *page)
{
union kvm_mmu_page_role role = page->role;
role.invalid = true;
/* No need to use cmpxchg, only the invalid bit can change. */
role.word = xchg(&page->role.word, role.word);
return role.invalid;
}
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
bool shared)
{
@ -145,45 +145,12 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
return;
WARN_ON(!root->tdp_mmu_page);
/*
* The root now has refcount=0. It is valid, but readers already
* cannot acquire a reference to it because kvm_tdp_mmu_get_root()
* rejects it. This remains true for the rest of the execution
* of this function, because readers visit valid roots only
* (except for tdp_mmu_zap_root_work(), which however
* does not acquire any reference itself).
*
* Even though there are flows that need to visit all roots for
* correctness, they all take mmu_lock for write, so they cannot yet
* run concurrently. The same is true after kvm_tdp_root_mark_invalid,
* since the root still has refcount=0.
*
* However, tdp_mmu_zap_root can yield, and writers do not expect to
* see refcount=0 (see for example kvm_tdp_mmu_invalidate_all_roots()).
* So the root temporarily gets an extra reference, going to refcount=1
* while staying invalid. Readers still cannot acquire any reference;
* but writers are now allowed to run if tdp_mmu_zap_root yields and
* they might take an extra reference if they themselves yield.
* Therefore, when the reference is given back by the worker,
* there is no guarantee that the refcount is still 1. If not, whoever
* puts the last reference will free the page, but they will not have to
* zap the root because a root cannot go from invalid to valid.
* The TDP MMU itself holds a reference to each root until the root is
* explicitly invalidated, i.e. the final reference should be never be
* put for a valid root.
*/
if (!kvm_tdp_root_mark_invalid(root)) {
refcount_set(&root->tdp_mmu_root_count, 1);
/*
* Zapping the root in a worker is not just "nice to have";
* it is required because kvm_tdp_mmu_invalidate_all_roots()
* skips already-invalid roots. If kvm_tdp_mmu_put_root() did
* not add the root to the workqueue, kvm_tdp_mmu_zap_all_fast()
* might return with some roots not zapped yet.
*/
tdp_mmu_schedule_zap_root(kvm, root);
return;
}
KVM_BUG_ON(!is_tdp_mmu_page(root) || !root->role.invalid, kvm);
spin_lock(&kvm->arch.tdp_mmu_pages_lock);
list_del_rcu(&root->link);
@ -329,7 +296,14 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
root = tdp_mmu_alloc_sp(vcpu);
tdp_mmu_init_sp(root, NULL, 0, role);
refcount_set(&root->tdp_mmu_root_count, 1);
/*
* TDP MMU roots are kept until they are explicitly invalidated, either
* by a memslot update or by the destruction of the VM. Initialize the
* refcount to two; one reference for the vCPU, and one reference for
* the TDP MMU itself, which is held until the root is invalidated and
* is ultimately put by tdp_mmu_zap_root_work().
*/
refcount_set(&root->tdp_mmu_root_count, 2);
spin_lock(&kvm->arch.tdp_mmu_pages_lock);
list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
@ -1027,32 +1001,49 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
/*
* Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that
* is about to be zapped, e.g. in response to a memslots update. The actual
* zapping is performed asynchronously, so a reference is taken on all roots.
* Using a separate workqueue makes it easy to ensure that the destruction is
* performed before the "fast zap" completes, without keeping a separate list
* of invalidated roots; the list is effectively the list of work items in
* the workqueue.
* zapping is performed asynchronously. Using a separate workqueue makes it
* easy to ensure that the destruction is performed before the "fast zap"
* completes, without keeping a separate list of invalidated roots; the list is
* effectively the list of work items in the workqueue.
*
* Get a reference even if the root is already invalid, the asynchronous worker
* assumes it was gifted a reference to the root it processes. Because mmu_lock
* is held for write, it should be impossible to observe a root with zero refcount,
* i.e. the list of roots cannot be stale.
*
* This has essentially the same effect for the TDP MMU
* as updating mmu_valid_gen does for the shadow MMU.
* Note, the asynchronous worker is gifted the TDP MMU's reference.
* See kvm_tdp_mmu_get_vcpu_root_hpa().
*/
void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
{
struct kvm_mmu_page *root;
lockdep_assert_held_write(&kvm->mmu_lock);
list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
if (!root->role.invalid &&
!WARN_ON_ONCE(!kvm_tdp_mmu_get_root(root))) {
/*
* mmu_lock must be held for write to ensure that a root doesn't become
* invalid while there are active readers (invalidating a root while
* there are active readers may or may not be problematic in practice,
* but it's uncharted territory and not supported).
*
* Waive the assertion if there are no users of @kvm, i.e. the VM is
* being destroyed after all references have been put, or if no vCPUs
* have been created (which means there are no roots), i.e. the VM is
* being destroyed in an error path of KVM_CREATE_VM.
*/
if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
refcount_read(&kvm->users_count) && kvm->created_vcpus)
lockdep_assert_held_write(&kvm->mmu_lock);
/*
* As above, mmu_lock isn't held when destroying the VM! There can't
* be other references to @kvm, i.e. nothing else can invalidate roots
* or be consuming roots, but walking the list of roots does need to be
* guarded against roots being deleted by the asynchronous zap worker.
*/
rcu_read_lock();
list_for_each_entry_rcu(root, &kvm->arch.tdp_mmu_roots, link) {
if (!root->role.invalid) {
root->role.invalid = true;
tdp_mmu_schedule_zap_root(kvm, root);
}
}
rcu_read_unlock();
}
/*

View File

@ -1223,9 +1223,6 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
__func__, cmd->cmd_op, ub_cmd->q_id, tag,
ub_cmd->result);
if (!(issue_flags & IO_URING_F_SQE128))
goto out;
if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
goto out;

View File

@ -205,18 +205,19 @@ EXPORT_SYMBOL(devm_clk_put);
struct clk *devm_get_clk_from_child(struct device *dev,
struct device_node *np, const char *con_id)
{
struct clk **ptr, *clk;
struct devm_clk_state *state;
struct clk *clk;
ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL);
if (!state)
return ERR_PTR(-ENOMEM);
clk = of_clk_get_by_name(np, con_id);
if (!IS_ERR(clk)) {
*ptr = clk;
devres_add(dev, ptr);
state->clk = clk;
devres_add(dev, state);
} else {
devres_free(ptr);
devres_free(state);
}
return clk;

View File

@ -191,6 +191,7 @@ static const struct dma_fence_ops timeline_fence_ops = {
*/
static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
{
LIST_HEAD(signalled);
struct sync_pt *pt, *next;
trace_sync_timeline(obj);
@ -203,21 +204,20 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
if (!timeline_fence_signaled(&pt->base))
break;
list_del_init(&pt->link);
dma_fence_get(&pt->base);
list_move_tail(&pt->link, &signalled);
rb_erase(&pt->node, &obj->pt_tree);
/*
* A signal callback may release the last reference to this
* fence, causing it to be freed. That operation has to be
* last to avoid a use after free inside this loop, and must
* be after we remove the fence from the timeline in order to
* prevent deadlocking on timeline->lock inside
* timeline_fence_release().
*/
dma_fence_signal_locked(&pt->base);
}
spin_unlock_irq(&obj->lock);
list_for_each_entry_safe(pt, next, &signalled, link) {
list_del_init(&pt->link);
dma_fence_put(&pt->base);
}
}
/**

View File

@ -290,6 +290,15 @@ static void gpio_sim_mutex_destroy(void *data)
mutex_destroy(lock);
}
static void gpio_sim_dispose_mappings(void *data)
{
struct gpio_sim_chip *chip = data;
unsigned int i;
for (i = 0; i < chip->gc.ngpio; i++)
irq_dispose_mapping(irq_find_mapping(chip->irq_sim, i));
}
static void gpio_sim_sysfs_remove(void *data)
{
struct gpio_sim_chip *chip = data;
@ -398,10 +407,14 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
if (!chip->pull_map)
return -ENOMEM;
chip->irq_sim = devm_irq_domain_create_sim(dev, NULL, num_lines);
chip->irq_sim = devm_irq_domain_create_sim(dev, swnode, num_lines);
if (IS_ERR(chip->irq_sim))
return PTR_ERR(chip->irq_sim);
ret = devm_add_action_or_reset(dev, gpio_sim_dispose_mappings, chip);
if (ret)
return ret;
mutex_init(&chip->lock);
ret = devm_add_action_or_reset(dev, gpio_sim_mutex_destroy,
&chip->lock);

View File

@ -290,7 +290,7 @@ static int hdlcd_drm_bind(struct device *dev)
*/
if (hdlcd_read(hdlcd, HDLCD_REG_COMMAND)) {
hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
drm_aperture_remove_framebuffers(false, &hdlcd_driver);
drm_aperture_remove_framebuffers(&hdlcd_driver);
}
drm_mode_config_reset(drm);

View File

@ -95,7 +95,7 @@ static int armada_drm_bind(struct device *dev)
}
/* Remove early framebuffers */
ret = drm_aperture_remove_framebuffers(false, &armada_drm_driver);
ret = drm_aperture_remove_framebuffers(&armada_drm_driver);
if (ret) {
dev_err(dev, "[" DRM_NAME ":%s] can't kick out simple-fb: %d\n",
__func__, ret);

View File

@ -89,27 +89,13 @@ static const struct pci_device_id ast_pciidlist[] = {
MODULE_DEVICE_TABLE(pci, ast_pciidlist);
static int ast_remove_conflicting_framebuffers(struct pci_dev *pdev)
{
bool primary = false;
resource_size_t base, size;
base = pci_resource_start(pdev, 0);
size = pci_resource_len(pdev, 0);
#ifdef CONFIG_X86
primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
#endif
return drm_aperture_remove_conflicting_framebuffers(base, size, primary, &ast_driver);
}
static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct ast_private *ast;
struct drm_device *dev;
int ret;
ret = ast_remove_conflicting_framebuffers(pdev);
ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &ast_driver);
if (ret)
return ret;

View File

@ -32,17 +32,13 @@
*
* static int remove_conflicting_framebuffers(struct pci_dev *pdev)
* {
* bool primary = false;
* resource_size_t base, size;
* int ret;
*
* base = pci_resource_start(pdev, 0);
* size = pci_resource_len(pdev, 0);
* #ifdef CONFIG_X86
* primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
* #endif
*
* return drm_aperture_remove_conflicting_framebuffers(base, size, primary,
* return drm_aperture_remove_conflicting_framebuffers(base, size,
* &example_driver);
* }
*
@ -161,7 +157,6 @@ EXPORT_SYMBOL(devm_aperture_acquire_from_firmware);
* drm_aperture_remove_conflicting_framebuffers - remove existing framebuffers in the given range
* @base: the aperture's base address in physical memory
* @size: aperture size in bytes
* @primary: also kick vga16fb if present
* @req_driver: requesting DRM driver
*
* This function removes graphics device drivers which use the memory range described by
@ -171,9 +166,9 @@ EXPORT_SYMBOL(devm_aperture_acquire_from_firmware);
* 0 on success, or a negative errno code otherwise
*/
int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size,
bool primary, const struct drm_driver *req_driver)
const struct drm_driver *req_driver)
{
return aperture_remove_conflicting_devices(base, size, primary, req_driver->name);
return aperture_remove_conflicting_devices(base, size, false, req_driver->name);
}
EXPORT_SYMBOL(drm_aperture_remove_conflicting_framebuffers);

View File

@ -424,12 +424,17 @@ static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/*
* We cannot yet easily find the framebuffer's location in memory. So
* remove all framebuffers here.
* remove all framebuffers here. Note that we still want the pci special
* handling to kick out vgacon.
*
* TODO: Refactor psb_driver_load() to map vdc_reg earlier. Then we
* might be able to read the framebuffer range from the device.
*/
ret = drm_aperture_remove_framebuffers(true, &driver);
ret = drm_aperture_remove_framebuffers(&driver);
if (ret)
return ret;
ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
if (ret)
return ret;

View File

@ -74,7 +74,6 @@ static int hyperv_setup_vram(struct hyperv_drm_device *hv,
drm_aperture_remove_conflicting_framebuffers(screen_info.lfb_base,
screen_info.lfb_size,
false,
&hyperv_driver);
hv->fb_size = (unsigned long)hv->mmio_megabytes * 1024 * 1024;

View File

@ -165,14 +165,60 @@ static u32 preparser_disable(bool state)
return MI_ARB_CHECK | 1 << 8 | state;
}
u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg)
static i915_reg_t gen12_get_aux_inv_reg(struct intel_engine_cs *engine)
{
u32 gsi_offset = gt->uncore->gsi_offset;
switch (engine->id) {
case RCS0:
return GEN12_CCS_AUX_INV;
case BCS0:
return GEN12_BCS0_AUX_INV;
case VCS0:
return GEN12_VD0_AUX_INV;
case VCS2:
return GEN12_VD2_AUX_INV;
case VECS0:
return GEN12_VE0_AUX_INV;
case CCS0:
return GEN12_CCS0_AUX_INV;
default:
return INVALID_MMIO_REG;
}
}
static bool gen12_needs_ccs_aux_inv(struct intel_engine_cs *engine)
{
i915_reg_t reg = gen12_get_aux_inv_reg(engine);
if (IS_PONTEVECCHIO(engine->i915))
return false;
/*
* So far platforms supported by i915 having flat ccs do not require
* AUX invalidation. Check also whether the engine requires it.
*/
return i915_mmio_reg_valid(reg) && !HAS_FLAT_CCS(engine->i915);
}
u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs)
{
i915_reg_t inv_reg = gen12_get_aux_inv_reg(engine);
u32 gsi_offset = engine->gt->uncore->gsi_offset;
if (!gen12_needs_ccs_aux_inv(engine))
return cs;
*cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN;
*cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
*cs++ = AUX_INV;
*cs++ = MI_NOOP;
*cs++ = MI_SEMAPHORE_WAIT_TOKEN |
MI_SEMAPHORE_REGISTER_POLL |
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_EQ_SDD;
*cs++ = 0;
*cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
*cs++ = 0;
*cs++ = 0;
return cs;
}
@ -181,7 +227,11 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
{
struct intel_engine_cs *engine = rq->engine;
if (mode & EMIT_FLUSH) {
/*
* On Aux CCS platforms the invalidation of the Aux
* table requires quiescing memory traffic beforehand
*/
if (mode & EMIT_FLUSH || gen12_needs_ccs_aux_inv(engine)) {
u32 flags = 0;
u32 *cs;
@ -236,10 +286,9 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
else if (engine->class == COMPUTE_CLASS)
flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
if (!HAS_FLAT_CCS(rq->engine->i915))
count = 8 + 4;
else
count = 8;
count = 8;
if (gen12_needs_ccs_aux_inv(rq->engine))
count += 8;
cs = intel_ring_begin(rq, count);
if (IS_ERR(cs))
@ -254,11 +303,7 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
if (!HAS_FLAT_CCS(rq->engine->i915)) {
/* hsdes: 1809175790 */
cs = gen12_emit_aux_table_inv(rq->engine->gt, cs,
GEN12_CCS_AUX_INV);
}
cs = gen12_emit_aux_table_inv(engine, cs);
*cs++ = preparser_disable(false);
intel_ring_advance(rq, cs);
@ -269,21 +314,14 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
{
intel_engine_mask_t aux_inv = 0;
u32 cmd, *cs;
u32 cmd = 4;
u32 *cs;
cmd = 4;
if (mode & EMIT_INVALIDATE) {
cmd += 2;
if (!HAS_FLAT_CCS(rq->engine->i915) &&
(rq->engine->class == VIDEO_DECODE_CLASS ||
rq->engine->class == VIDEO_ENHANCEMENT_CLASS)) {
aux_inv = rq->engine->mask &
~GENMASK(_BCS(I915_MAX_BCS - 1), BCS0);
if (aux_inv)
cmd += 4;
}
if (gen12_needs_ccs_aux_inv(rq->engine))
cmd += 8;
}
cs = intel_ring_begin(rq, cmd);
@ -314,14 +352,7 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
*cs++ = 0; /* upper addr */
*cs++ = 0; /* value */
if (aux_inv) { /* hsdes: 1809175790 */
if (rq->engine->class == VIDEO_DECODE_CLASS)
cs = gen12_emit_aux_table_inv(rq->engine->gt,
cs, GEN12_VD0_AUX_INV);
else
cs = gen12_emit_aux_table_inv(rq->engine->gt,
cs, GEN12_VE0_AUX_INV);
}
cs = gen12_emit_aux_table_inv(rq->engine, cs);
if (mode & EMIT_INVALIDATE)
*cs++ = preparser_disable(false);

View File

@ -13,6 +13,7 @@
#include "intel_gt_regs.h"
#include "intel_gpu_commands.h"
struct intel_engine_cs;
struct intel_gt;
struct i915_request;
@ -46,7 +47,7 @@ u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg);
u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs);
static inline u32 *
__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)

View File

@ -120,6 +120,7 @@
#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
#define MI_SEMAPHORE_WAIT_TOKEN MI_INSTR(0x1c, 3) /* GEN12+ */
#define MI_SEMAPHORE_REGISTER_POLL (1 << 16)
#define MI_SEMAPHORE_POLL (1 << 15)
#define MI_SEMAPHORE_SAD_GT_SDD (0 << 12)
#define MI_SEMAPHORE_SAD_GTE_SDD (1 << 12)

View File

@ -1296,10 +1296,7 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
IS_DG2_G11(ce->engine->i915))
cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0);
/* hsdes: 1809175790 */
if (!HAS_FLAT_CCS(ce->engine->i915))
cs = gen12_emit_aux_table_inv(ce->engine->gt,
cs, GEN12_CCS_AUX_INV);
cs = gen12_emit_aux_table_inv(ce->engine, cs);
/* Wa_16014892111 */
if (IS_DG2(ce->engine->i915))
@ -1322,17 +1319,7 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE,
0);
/* hsdes: 1809175790 */
if (!HAS_FLAT_CCS(ce->engine->i915)) {
if (ce->engine->class == VIDEO_DECODE_CLASS)
cs = gen12_emit_aux_table_inv(ce->engine->gt,
cs, GEN12_VD0_AUX_INV);
else if (ce->engine->class == VIDEO_ENHANCEMENT_CLASS)
cs = gen12_emit_aux_table_inv(ce->engine->gt,
cs, GEN12_VE0_AUX_INV);
}
return cs;
return gen12_emit_aux_table_inv(ce->engine, cs);
}
static void

View File

@ -574,7 +574,6 @@ static int i915_pcode_init(struct drm_i915_private *i915)
static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct pci_dev *root_pdev;
int ret;
if (i915_inject_probe_failure(dev_priv))
@ -686,15 +685,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
intel_bw_init_hw(dev_priv);
/*
* FIXME: Temporary hammer to avoid freezing the machine on our DGFX
* This should be totally removed when we handle the pci states properly
* on runtime PM and on s2idle cases.
*/
root_pdev = pcie_find_root_port(pdev);
if (root_pdev)
pci_d3cold_disable(root_pdev);
return 0;
err_msi:
@ -718,16 +708,11 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct pci_dev *root_pdev;
i915_perf_fini(dev_priv);
if (pdev->msi_enabled)
pci_disable_msi(pdev);
root_pdev = pcie_find_root_port(pdev);
if (root_pdev)
pci_d3cold_enable(root_pdev);
}
/**
@ -1625,6 +1610,8 @@ static int intel_runtime_suspend(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct pci_dev *root_pdev;
struct intel_gt *gt;
int ret, i;
@ -1674,6 +1661,15 @@ static int intel_runtime_suspend(struct device *kdev)
drm_err(&dev_priv->drm,
"Unclaimed access detected prior to suspending\n");
/*
* FIXME: Temporary hammer to avoid freezing the machine on our DGFX
* This should be totally removed when we handle the pci states properly
* on runtime PM.
*/
root_pdev = pcie_find_root_port(pdev);
if (root_pdev)
pci_d3cold_disable(root_pdev);
rpm->suspended = true;
/*
@ -1712,6 +1708,8 @@ static int intel_runtime_resume(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct pci_dev *root_pdev;
struct intel_gt *gt;
int ret, i;
@ -1725,6 +1723,11 @@ static int intel_runtime_resume(struct device *kdev)
intel_opregion_notify_adapter(dev_priv, PCI_D0);
rpm->suspended = false;
root_pdev = pcie_find_root_port(pdev);
if (root_pdev)
pci_d3cold_enable(root_pdev);
if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
drm_dbg(&dev_priv->drm,
"Unclaimed access during suspend, bios?\n");

View File

@ -285,7 +285,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
* Remove early framebuffers (ie. simplefb). The framebuffer can be
* located anywhere in RAM
*/
ret = drm_aperture_remove_framebuffers(false, &meson_driver);
ret = drm_aperture_remove_framebuffers(&meson_driver);
if (ret)
goto free_drm;

View File

@ -157,7 +157,7 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
}
/* the fw fb could be anywhere in memory */
ret = drm_aperture_remove_framebuffers(false, dev->driver);
ret = drm_aperture_remove_framebuffers(dev->driver);
if (ret)
goto fini;

View File

@ -140,7 +140,7 @@ static int rockchip_drm_bind(struct device *dev)
int ret;
/* Remove existing drivers that may own the framebuffer memory. */
ret = drm_aperture_remove_framebuffers(false, &rockchip_drm_driver);
ret = drm_aperture_remove_framebuffers(&rockchip_drm_driver);
if (ret) {
DRM_DEV_ERROR(dev,
"Failed to remove existing framebuffers - %d.\n",

View File

@ -185,7 +185,7 @@ static int stm_drm_platform_probe(struct platform_device *pdev)
DRM_DEBUG("%s\n", __func__);
ret = drm_aperture_remove_framebuffers(false, &drv_driver);
ret = drm_aperture_remove_framebuffers(&drv_driver);
if (ret)
return ret;

View File

@ -98,7 +98,7 @@ static int sun4i_drv_bind(struct device *dev)
goto unbind_all;
/* Remove early framebuffers (ie. simplefb) */
ret = drm_aperture_remove_framebuffers(false, &sun4i_drv_driver);
ret = drm_aperture_remove_framebuffers(&sun4i_drv_driver);
if (ret)
goto unbind_all;

View File

@ -1252,7 +1252,7 @@ static int host1x_drm_probe(struct host1x_device *dev)
drm_mode_config_reset(drm);
err = drm_aperture_remove_framebuffers(false, &tegra_drm_driver);
err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
if (err < 0)
goto hub;

View File

@ -351,7 +351,7 @@ static int vc4_drm_bind(struct device *dev)
return -EPROBE_DEFER;
}
ret = drm_aperture_remove_framebuffers(false, driver);
ret = drm_aperture_remove_framebuffers(driver);
if (ret)
return ret;

View File

@ -1683,4 +1683,16 @@ static inline bool vmw_has_fences(struct vmw_private *vmw)
return (vmw_fifo_caps(vmw) & SVGA_FIFO_CAP_FENCE) != 0;
}
static inline bool vmw_shadertype_is_valid(enum vmw_sm_type shader_model,
u32 shader_type)
{
SVGA3dShaderType max_allowed = SVGA3D_SHADERTYPE_PREDX_MAX;
if (shader_model >= VMW_SM_5)
max_allowed = SVGA3D_SHADERTYPE_MAX;
else if (shader_model >= VMW_SM_4)
max_allowed = SVGA3D_SHADERTYPE_DX10_MAX;
return shader_type >= SVGA3D_SHADERTYPE_MIN && shader_type < max_allowed;
}
#endif

View File

@ -1985,7 +1985,7 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header);
if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
if (!vmw_shadertype_is_valid(VMW_SM_LEGACY, cmd->body.type)) {
VMW_DEBUG_USER("Illegal shader type %u.\n",
(unsigned int) cmd->body.type);
return -EINVAL;
@ -2108,8 +2108,6 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ?
SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10;
struct vmw_resource *res = NULL;
struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
@ -2126,6 +2124,14 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type) ||
cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
(unsigned int) cmd->body.type,
(unsigned int) cmd->body.slot);
return -EINVAL;
}
binding.bi.ctx = ctx_node->ctx;
binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_cb;
@ -2134,14 +2140,6 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
binding.size = cmd->body.sizeInBytes;
binding.slot = cmd->body.slot;
if (binding.shader_slot >= max_shader_num ||
binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
(unsigned int) cmd->body.type,
(unsigned int) binding.slot);
return -EINVAL;
}
vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
binding.slot);
@ -2200,15 +2198,13 @@ static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
container_of(header, typeof(*cmd), header);
SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
sizeof(SVGA3dShaderResourceViewId);
if ((u64) cmd->body.startView + (u64) num_sr_view >
(u64) SVGA3D_DX_MAX_SRVIEWS ||
cmd->body.type >= max_allowed) {
!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
VMW_DEBUG_USER("Invalid shader binding.\n");
return -EINVAL;
}
@ -2232,8 +2228,6 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
struct vmw_resource *res = NULL;
struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_ctx_bindinfo_shader binding;
@ -2244,8 +2238,7 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header);
if (cmd->body.type >= max_allowed ||
cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
VMW_DEBUG_USER("Illegal shader type %u.\n",
(unsigned int) cmd->body.type);
return -EINVAL;

View File

@ -12,9 +12,11 @@
#include <linux/crc16.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/hid.h>
#include <linux/hwmon.h>
#include <linux/jiffies.h>
#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/seq_file.h>
@ -49,6 +51,8 @@ static const char *const aqc_device_names[] = {
#define CTRL_REPORT_ID 0x03
#define CTRL_REPORT_DELAY 200 /* ms */
/* The HID report that the official software always sends
* after writing values, currently same for all devices
*/
@ -269,6 +273,9 @@ struct aqc_data {
enum kinds kind;
const char *name;
ktime_t last_ctrl_report_op;
int ctrl_report_delay; /* Delay between two ctrl report operations, in ms */
int buffer_size;
u8 *buffer;
int checksum_start;
@ -325,17 +332,35 @@ static int aqc_pwm_to_percent(long val)
return DIV_ROUND_CLOSEST(val * 100 * 100, 255);
}
static void aqc_delay_ctrl_report(struct aqc_data *priv)
{
/*
* If previous read or write is too close to this one, delay the current operation
* to give the device enough time to process the previous one.
*/
if (priv->ctrl_report_delay) {
s64 delta = ktime_ms_delta(ktime_get(), priv->last_ctrl_report_op);
if (delta < priv->ctrl_report_delay)
msleep(priv->ctrl_report_delay - delta);
}
}
/* Expects the mutex to be locked */
static int aqc_get_ctrl_data(struct aqc_data *priv)
{
int ret;
aqc_delay_ctrl_report(priv);
memset(priv->buffer, 0x00, priv->buffer_size);
ret = hid_hw_raw_request(priv->hdev, CTRL_REPORT_ID, priv->buffer, priv->buffer_size,
HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
if (ret < 0)
ret = -ENODATA;
priv->last_ctrl_report_op = ktime_get();
return ret;
}
@ -345,6 +370,8 @@ static int aqc_send_ctrl_data(struct aqc_data *priv)
int ret;
u16 checksum;
aqc_delay_ctrl_report(priv);
/* Init and xorout value for CRC-16/USB is 0xffff */
checksum = crc16(0xffff, priv->buffer + priv->checksum_start, priv->checksum_length);
checksum ^= 0xffff;
@ -356,12 +383,16 @@ static int aqc_send_ctrl_data(struct aqc_data *priv)
ret = hid_hw_raw_request(priv->hdev, CTRL_REPORT_ID, priv->buffer, priv->buffer_size,
HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
if (ret < 0)
return ret;
goto record_access_and_ret;
/* The official software sends this report after every change, so do it here as well */
ret = hid_hw_raw_request(priv->hdev, SECONDARY_CTRL_REPORT_ID, secondary_ctrl_report,
SECONDARY_CTRL_REPORT_SIZE, HID_FEATURE_REPORT,
HID_REQ_SET_REPORT);
record_access_and_ret:
priv->last_ctrl_report_op = ktime_get();
return ret;
}
@ -853,6 +884,7 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
priv->virtual_temp_sensor_start_offset = D5NEXT_VIRTUAL_SENSORS_START;
priv->power_cycle_count_offset = D5NEXT_POWER_CYCLES;
priv->buffer_size = D5NEXT_CTRL_REPORT_SIZE;
priv->ctrl_report_delay = CTRL_REPORT_DELAY;
priv->temp_label = label_d5next_temp;
priv->virtual_temp_label = label_virtual_temp_sensors;
@ -893,6 +925,7 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
priv->virtual_temp_sensor_start_offset = OCTO_VIRTUAL_SENSORS_START;
priv->power_cycle_count_offset = OCTO_POWER_CYCLES;
priv->buffer_size = OCTO_CTRL_REPORT_SIZE;
priv->ctrl_report_delay = CTRL_REPORT_DELAY;
priv->temp_label = label_temp_sensors;
priv->virtual_temp_label = label_virtual_temp_sensors;
@ -913,6 +946,7 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
priv->virtual_temp_sensor_start_offset = QUADRO_VIRTUAL_SENSORS_START;
priv->power_cycle_count_offset = QUADRO_POWER_CYCLES;
priv->buffer_size = QUADRO_CTRL_REPORT_SIZE;
priv->ctrl_report_delay = CTRL_REPORT_DELAY;
priv->flow_sensor_offset = QUADRO_FLOW_SENSOR_OFFSET;
priv->temp_label = label_temp_sensors;

View File

@ -821,6 +821,8 @@ static int vb2ops_venc_queue_setup(struct vb2_queue *vq,
return -EINVAL;
if (*nplanes) {
if (*nplanes != q_data->fmt->num_planes)
return -EINVAL;
for (i = 0; i < *nplanes; i++)
if (sizes[i] < q_data->sizeimage[i])
return -EINVAL;

View File

@ -660,10 +660,10 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
return NULL;
arp = (struct arp_pkt *)skb_network_header(skb);
/* Don't modify or load balance ARPs that do not originate locally
* (e.g.,arrive via a bridge).
/* Don't modify or load balance ARPs that do not originate
* from the bond itself or a VLAN directly above the bond.
*/
if (!bond_slave_has_mac_rx(bond, arp->mac_src))
if (!bond_slave_has_mac_rcu(bond, arp->mac_src))
return NULL;
dev = ip_dev_find(dev_net(bond->dev), arp->ip_src);

View File

@ -192,12 +192,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
nla_peer = data[VXCAN_INFO_PEER];
ifmp = nla_data(nla_peer);
err = rtnl_nla_parse_ifla(peer_tb,
nla_data(nla_peer) +
sizeof(struct ifinfomsg),
nla_len(nla_peer) -
sizeof(struct ifinfomsg),
NULL);
err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
if (err < 0)
return err;

View File

@ -1005,6 +1005,10 @@ mt753x_trap_frames(struct mt7530_priv *priv)
mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
MT753X_BPDU_CPU_ONLY);
/* Trap 802.1X PAE frames to the CPU port(s) */
mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_PORT_FW_MASK,
MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY));
/* Trap LLDP frames with :0E MAC DA to the CPU port(s) */
mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_PORT_FW_MASK,
MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY));

View File

@ -64,6 +64,8 @@ enum mt753x_id {
/* Registers for BPDU and PAE frame control*/
#define MT753X_BPC 0x24
#define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0)
#define MT753X_PAE_PORT_FW_MASK GENMASK(18, 16)
#define MT753X_PAE_PORT_FW(x) FIELD_PREP(MT753X_PAE_PORT_FW_MASK, x)
/* Register for :03 and :0E MAC DA frame control */
#define MT753X_RGAC2 0x2c

View File

@ -1071,6 +1071,9 @@ static u64 vsc9959_tas_remaining_gate_len_ps(u64 gate_len_ns)
if (gate_len_ns == U64_MAX)
return U64_MAX;
if (gate_len_ns < VSC9959_TAS_MIN_GATE_LEN_NS)
return 0;
return (gate_len_ns - VSC9959_TAS_MIN_GATE_LEN_NS) * PSEC_PER_NSEC;
}

View File

@ -1448,7 +1448,7 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac)
int err;
phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
if (!phy_dev || IS_ERR(phy_dev)) {
if (IS_ERR(phy_dev)) {
dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
return -ENODEV;
}

View File

@ -608,7 +608,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
};
phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
if (!phydev || IS_ERR(phydev)) {
if (IS_ERR(phydev)) {
dev_err(kdev, "failed to register fixed PHY device\n");
return -ENODEV;
}

View File

@ -1466,7 +1466,7 @@ static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt)
tp->write_seq = snd_isn;
tp->snd_nxt = snd_isn;
tp->snd_una = snd_isn;
inet_sk(sk)->inet_id = get_random_u16();
atomic_set(&inet_sk(sk)->inet_id, get_random_u16());
assign_rxopt(sk, opt);
if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10))

View File

@ -203,7 +203,7 @@ static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
unsigned long offset;
for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
asm("dcbf %0,%1,1" :: "b" (addr), "r" (offset));
}
/* replenish the buffers for a pool. note that we don't need to

View File

@ -2615,7 +2615,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
retval = i40e_correct_mac_vlan_filters
(vsi, &tmp_add_list, &tmp_del_list,
vlan_filters);
else
else if (pf->vf)
retval = i40e_correct_vf_mac_vlan_filters
(vsi, &tmp_add_list, &tmp_del_list,
vlan_filters, pf->vf[vsi->vf_id].trusted);
@ -2788,7 +2788,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
}
/* if the VF is not trusted do not do promisc */
if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
if (vsi->type == I40E_VSI_SRIOV && pf->vf &&
!pf->vf[vsi->vf_id].trusted) {
clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
goto out;
}

View File

@ -396,7 +396,8 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
/* Receive Packet Data Buffer Size.
* The Packet Data Buffer Size is defined in 128 byte units.
*/
rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
rlan_ctx.dbuf = DIV_ROUND_UP(ring->rx_buf_len,
BIT_ULL(ICE_RLAN_CTX_DBUF_S));
/* use 32 byte descriptors */
rlan_ctx.dsize = 1;

View File

@ -1240,7 +1240,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
if (!vf)
return -EINVAL;
ret = ice_check_vf_ready_for_reset(vf);
ret = ice_check_vf_ready_for_cfg(vf);
if (ret)
goto out_put_vf;
@ -1355,7 +1355,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
goto out_put_vf;
}
ret = ice_check_vf_ready_for_reset(vf);
ret = ice_check_vf_ready_for_cfg(vf);
if (ret)
goto out_put_vf;
@ -1409,7 +1409,7 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
return -EOPNOTSUPP;
}
ret = ice_check_vf_ready_for_reset(vf);
ret = ice_check_vf_ready_for_cfg(vf);
if (ret)
goto out_put_vf;
@ -1722,7 +1722,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
if (!vf)
return -EINVAL;
ret = ice_check_vf_ready_for_reset(vf);
ret = ice_check_vf_ready_for_cfg(vf);
if (ret)
goto out_put_vf;

View File

@ -185,25 +185,6 @@ int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
return 0;
}
/**
* ice_check_vf_ready_for_reset - check if VF is ready to be reset
* @vf: VF to check if it's ready to be reset
*
* The purpose of this function is to ensure that the VF is not in reset,
* disabled, and is both initialized and active, thus enabling us to safely
* initialize another reset.
*/
int ice_check_vf_ready_for_reset(struct ice_vf *vf)
{
int ret;
ret = ice_check_vf_ready_for_cfg(vf);
if (!ret && !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
ret = -EAGAIN;
return ret;
}
/**
* ice_trigger_vf_reset - Reset a VF on HW
* @vf: pointer to the VF structure
@ -588,11 +569,17 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
return 0;
}
if (flags & ICE_VF_RESET_LOCK)
mutex_lock(&vf->cfg_lock);
else
lockdep_assert_held(&vf->cfg_lock);
if (ice_is_vf_disabled(vf)) {
vsi = ice_get_vf_vsi(vf);
if (!vsi) {
dev_dbg(dev, "VF is already removed\n");
return -EINVAL;
err = -EINVAL;
goto out_unlock;
}
ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
@ -601,14 +588,9 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
vf->vf_id);
return 0;
goto out_unlock;
}
if (flags & ICE_VF_RESET_LOCK)
mutex_lock(&vf->cfg_lock);
else
lockdep_assert_held(&vf->cfg_lock);
/* Set VF disable bit state here, before triggering reset */
set_bit(ICE_VF_STATE_DIS, vf->vf_states);
ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false);

View File

@ -214,7 +214,6 @@ u16 ice_get_num_vfs(struct ice_pf *pf);
struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
bool ice_is_vf_disabled(struct ice_vf *vf);
int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
int ice_check_vf_ready_for_reset(struct ice_vf *vf);
void ice_set_vf_state_dis(struct ice_vf *vf);
bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf);
void

View File

@ -3722,7 +3722,6 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
ice_vc_notify_vf_link_state(vf);
break;
case VIRTCHNL_OP_RESET_VF:
clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
ops->reset_vf(vf);
break;
case VIRTCHNL_OP_ADD_ETH_ADDR:

View File

@ -1404,18 +1404,6 @@ void igb_ptp_init(struct igb_adapter *adapter)
return;
}
spin_lock_init(&adapter->tmreg_lock);
INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
igb_ptp_overflow_check);
adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
igb_ptp_reset(adapter);
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
@ -1425,6 +1413,18 @@ void igb_ptp_init(struct igb_adapter *adapter)
dev_info(&adapter->pdev->dev, "added PHC on %s\n",
adapter->netdev->name);
adapter->ptp_flags |= IGB_PTP_ENABLED;
spin_lock_init(&adapter->tmreg_lock);
INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
igb_ptp_overflow_check);
adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
igb_ptp_reset(adapter);
}
}

View File

@ -536,7 +536,7 @@
#define IGC_PTM_CTRL_START_NOW BIT(29) /* Start PTM Now */
#define IGC_PTM_CTRL_EN BIT(30) /* Enable PTM */
#define IGC_PTM_CTRL_TRIG BIT(31) /* PTM Cycle trigger */
#define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x2f) << 2)
#define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x3f) << 2)
#define IGC_PTM_CTRL_PTM_TO(usec) (((usec) & 0xff) << 8)
#define IGC_PTM_SHORT_CYC_DEFAULT 10 /* Default Short/interrupted cycle interval */

View File

@ -4005,9 +4005,10 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
if (link < 0)
return NIX_AF_ERR_RX_LINK_INVALID;
nix_find_link_frs(rvu, req, pcifunc);
linkcfg:
nix_find_link_frs(rvu, req, pcifunc);
cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
if (req->update_minlen)

View File

@ -32,8 +32,8 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8),
MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2),
MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6),
MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_MSB, 0x18, 17, 3),
MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_LSB, 0x18, 20, 8),
MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_MSB, 0x18, 17, 4),
MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_LSB, 0x18, 21, 8),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4),

View File

@ -517,11 +517,15 @@ static void mlxsw_pci_skb_cb_ts_set(struct mlxsw_pci *mlxsw_pci,
struct sk_buff *skb,
enum mlxsw_pci_cqe_v cqe_v, char *cqe)
{
u8 ts_type;
if (cqe_v != MLXSW_PCI_CQE_V2)
return;
if (mlxsw_pci_cqe2_time_stamp_type_get(cqe) !=
MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC)
ts_type = mlxsw_pci_cqe2_time_stamp_type_get(cqe);
if (ts_type != MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC &&
ts_type != MLXSW_PCI_CQE_TIME_STAMP_TYPE_MIRROR_UTC)
return;
mlxsw_skb_cb(skb)->cqe_ts.sec = mlxsw_pci_cqe2_time_stamp_sec_get(cqe);

View File

@ -97,14 +97,6 @@ MLXSW_ITEM32(reg, sspr, m, 0x00, 31, 1);
*/
MLXSW_ITEM32_LP(reg, sspr, 0x00, 16, 0x00, 12);
/* reg_sspr_sub_port
* Virtual port within the physical port.
* Should be set to 0 when virtual ports are not enabled on the port.
*
* Access: RW
*/
MLXSW_ITEM32(reg, sspr, sub_port, 0x00, 8, 8);
/* reg_sspr_system_port
* Unique identifier within the stacking domain that represents all the ports
* that are available in the system (external ports).
@ -120,7 +112,6 @@ static inline void mlxsw_reg_sspr_pack(char *payload, u16 local_port)
MLXSW_REG_ZERO(sspr, payload);
mlxsw_reg_sspr_m_set(payload, 1);
mlxsw_reg_sspr_local_port_set(payload, local_port);
mlxsw_reg_sspr_sub_port_set(payload, 0);
mlxsw_reg_sspr_system_port_set(payload, local_port);
}

View File

@ -193,7 +193,7 @@ mlxsw_sp2_mr_tcam_rule_parse(struct mlxsw_sp_acl_rule *rule,
key->vrid, GENMASK(7, 0));
mlxsw_sp_acl_rulei_keymask_u32(rulei,
MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
key->vrid >> 8, GENMASK(2, 0));
key->vrid >> 8, GENMASK(3, 0));
switch (key->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
return mlxsw_sp2_mr_tcam_rule_parse4(rulei, key);

View File

@ -169,7 +169,7 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = {
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4[] = {
MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 24, 8),
MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x00, 0, 3),
MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x00, 0, 3, 0, true),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = {
@ -319,7 +319,7 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5b[] = {
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4b[] = {
MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 13, 8),
MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x04, 21, 4, 0, true),
MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x04, 21, 4),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2b[] = {

View File

@ -748,7 +748,8 @@ static int ipvlan_device_event(struct notifier_block *unused,
write_pnet(&port->pnet, newnet);
ipvlan_migrate_l3s_hook(oldnet, newnet);
if (port->mode == IPVLAN_MODE_L3S)
ipvlan_migrate_l3s_hook(oldnet, newnet);
break;
}
case NETDEV_UNREGISTER:

View File

@ -1716,10 +1716,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
nla_peer = data[VETH_INFO_PEER];
ifmp = nla_data(nla_peer);
err = rtnl_nla_parse_ifla(peer_tb,
nla_data(nla_peer) + sizeof(struct ifinfomsg),
nla_len(nla_peer) - sizeof(struct ifinfomsg),
NULL);
err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
if (err < 0)
return err;

View File

@ -63,15 +63,14 @@ int of_reconfig_notifier_unregister(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(of_reconfig_notifier_unregister);
#ifdef DEBUG
const char *action_names[] = {
static const char *action_names[] = {
[0] = "INVALID",
[OF_RECONFIG_ATTACH_NODE] = "ATTACH_NODE",
[OF_RECONFIG_DETACH_NODE] = "DETACH_NODE",
[OF_RECONFIG_ADD_PROPERTY] = "ADD_PROPERTY",
[OF_RECONFIG_REMOVE_PROPERTY] = "REMOVE_PROPERTY",
[OF_RECONFIG_UPDATE_PROPERTY] = "UPDATE_PROPERTY",
};
#endif
int of_reconfig_notify(unsigned long action, struct of_reconfig_data *p)
{
@ -594,21 +593,9 @@ static int __of_changeset_entry_apply(struct of_changeset_entry *ce)
}
ret = __of_add_property(ce->np, ce->prop);
if (ret) {
pr_err("changeset: add_property failed @%pOF/%s\n",
ce->np,
ce->prop->name);
break;
}
break;
case OF_RECONFIG_REMOVE_PROPERTY:
ret = __of_remove_property(ce->np, ce->prop);
if (ret) {
pr_err("changeset: remove_property failed @%pOF/%s\n",
ce->np,
ce->prop->name);
break;
}
break;
case OF_RECONFIG_UPDATE_PROPERTY:
@ -622,20 +609,17 @@ static int __of_changeset_entry_apply(struct of_changeset_entry *ce)
}
ret = __of_update_property(ce->np, ce->prop, &old_prop);
if (ret) {
pr_err("changeset: update_property failed @%pOF/%s\n",
ce->np,
ce->prop->name);
break;
}
break;
default:
ret = -EINVAL;
}
raw_spin_unlock_irqrestore(&devtree_lock, flags);
if (ret)
if (ret) {
pr_err("changeset: apply failed: %-15s %pOF:%s\n",
action_names[ce->action], ce->np, ce->prop->name);
return ret;
}
switch (ce->action) {
case OF_RECONFIG_ATTACH_NODE:
@ -921,6 +905,9 @@ int of_changeset_action(struct of_changeset *ocs, unsigned long action,
if (!ce)
return -ENOMEM;
if (WARN_ON(action >= ARRAY_SIZE(action_names)))
return -EINVAL;
/* get a reference to the node */
ce->action = action;
ce->np = of_node_get(np);

View File

@ -184,7 +184,8 @@ int __init ima_free_kexec_buffer(void)
if (ret)
return ret;
return memblock_phys_free(addr, size);
memblock_free_late(addr, size);
return 0;
}
#endif

View File

@ -657,12 +657,12 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
memset(&args, 0, sizeof(args));
EXPECT_BEGIN(KERN_INFO,
"OF: /testcase-data/phandle-tests/consumer-b: could not find phandle");
"OF: /testcase-data/phandle-tests/consumer-b: could not find phandle 12345678");
rc = of_parse_phandle_with_args_map(np, "phandle-list-bad-phandle",
"phandle", 0, &args);
EXPECT_END(KERN_INFO,
"OF: /testcase-data/phandle-tests/consumer-b: could not find phandle");
"OF: /testcase-data/phandle-tests/consumer-b: could not find phandle 12345678");
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);

View File

@ -504,12 +504,15 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
if (pass && dev->subordinate) {
check_hotplug_bridge(slot, dev);
pcibios_resource_survey_bus(dev->subordinate);
__pci_bus_size_bridges(dev->subordinate,
&add_list);
if (pci_is_root_bus(bus))
__pci_bus_size_bridges(dev->subordinate, &add_list);
}
}
}
__pci_bus_assign_resources(bus, &add_list, NULL);
if (pci_is_root_bus(bus))
__pci_bus_assign_resources(bus, &add_list, NULL);
else
pci_assign_unassigned_bridge_resources(bus->self);
}
acpiphp_sanitize_bus(bus);

View File

@ -862,6 +862,33 @@ static const struct pinconf_ops amd_pinconf_ops = {
.pin_config_group_set = amd_pinconf_group_set,
};
static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
{
struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
unsigned long flags;
u32 pin_reg, mask;
int i;
mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) |
BIT(WAKE_CNTRL_OFF_S4);
for (i = 0; i < desc->npins; i++) {
int pin = desc->pins[i].number;
const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin);
if (!pd)
continue;
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + pin * 4);
pin_reg &= ~mask;
writel(pin_reg, gpio_dev->base + pin * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
}
#ifdef CONFIG_PM_SLEEP
static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin)
{
@ -1099,6 +1126,9 @@ static int amd_gpio_probe(struct platform_device *pdev)
return PTR_ERR(gpio_dev->pctrl);
}
/* Disable and mask interrupts */
amd_gpio_irq_init(gpio_dev);
girq = &gpio_dev->gc.irq;
gpio_irq_chip_set_chip(girq, &amd_gpio_irqchip);
/* This will let us handle the parent IRQ in the driver */

View File

@ -14,6 +14,7 @@
#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinmux.h>
@ -46,6 +47,7 @@ struct rza2_pinctrl_priv {
struct pinctrl_dev *pctl;
struct pinctrl_gpio_range gpio_range;
int npins;
struct mutex mutex; /* serialize adding groups and functions */
};
#define RZA2_PDR(port) (0x0000 + (port) * 2) /* Direction 16-bit */
@ -358,10 +360,14 @@ static int rza2_dt_node_to_map(struct pinctrl_dev *pctldev,
psel_val[i] = MUX_FUNC(value);
}
mutex_lock(&priv->mutex);
/* Register a single pin group listing all the pins we read from DT */
gsel = pinctrl_generic_add_group(pctldev, np->name, pins, npins, NULL);
if (gsel < 0)
return gsel;
if (gsel < 0) {
ret = gsel;
goto unlock;
}
/*
* Register a single group function where the 'data' is an array PSEL
@ -390,6 +396,8 @@ static int rza2_dt_node_to_map(struct pinctrl_dev *pctldev,
(*map)->data.mux.function = np->name;
*num_maps = 1;
mutex_unlock(&priv->mutex);
return 0;
remove_function:
@ -398,6 +406,9 @@ static int rza2_dt_node_to_map(struct pinctrl_dev *pctldev,
remove_group:
pinctrl_generic_remove_group(pctldev, gsel);
unlock:
mutex_unlock(&priv->mutex);
dev_err(priv->dev, "Unable to parse DT node %s\n", np->name);
return ret;
@ -473,6 +484,8 @@ static int rza2_pinctrl_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
mutex_init(&priv->mutex);
platform_set_drvdata(pdev, priv);
priv->npins = (int)(uintptr_t)of_device_get_match_data(&pdev->dev) *

View File

@ -11,6 +11,7 @@
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/pinctrl/pinconf-generic.h>
@ -146,10 +147,11 @@ struct rzg2l_pinctrl {
struct gpio_chip gpio_chip;
struct pinctrl_gpio_range gpio_range;
DECLARE_BITMAP(tint_slot, RZG2L_TINT_MAX_INTERRUPT);
spinlock_t bitmap_lock;
spinlock_t bitmap_lock; /* protect tint_slot bitmap */
unsigned int hwirq[RZG2L_TINT_MAX_INTERRUPT];
spinlock_t lock;
spinlock_t lock; /* lock read/write registers */
struct mutex mutex; /* serialize adding groups and functions */
};
static const unsigned int iolh_groupa_mA[] = { 2, 4, 8, 12 };
@ -359,11 +361,13 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
name = np->name;
}
mutex_lock(&pctrl->mutex);
/* Register a single pin group listing all the pins we read from DT */
gsel = pinctrl_generic_add_group(pctldev, name, pins, num_pinmux, NULL);
if (gsel < 0) {
ret = gsel;
goto done;
goto unlock;
}
/*
@ -377,6 +381,8 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
goto remove_group;
}
mutex_unlock(&pctrl->mutex);
maps[idx].type = PIN_MAP_TYPE_MUX_GROUP;
maps[idx].data.mux.group = name;
maps[idx].data.mux.function = name;
@ -388,6 +394,8 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
remove_group:
pinctrl_generic_remove_group(pctldev, gsel);
unlock:
mutex_unlock(&pctrl->mutex);
done:
*index = idx;
kfree(configs);
@ -1501,6 +1509,7 @@ static int rzg2l_pinctrl_probe(struct platform_device *pdev)
spin_lock_init(&pctrl->lock);
spin_lock_init(&pctrl->bitmap_lock);
mutex_init(&pctrl->mutex);
platform_set_drvdata(pdev, pctrl);

View File

@ -14,6 +14,7 @@
#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
@ -121,7 +122,8 @@ struct rzv2m_pinctrl {
struct gpio_chip gpio_chip;
struct pinctrl_gpio_range gpio_range;
spinlock_t lock;
spinlock_t lock; /* lock read/write registers */
struct mutex mutex; /* serialize adding groups and functions */
};
static const unsigned int drv_1_8V_group2_uA[] = { 1800, 3800, 7800, 11000 };
@ -320,11 +322,13 @@ static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev,
name = np->name;
}
mutex_lock(&pctrl->mutex);
/* Register a single pin group listing all the pins we read from DT */
gsel = pinctrl_generic_add_group(pctldev, name, pins, num_pinmux, NULL);
if (gsel < 0) {
ret = gsel;
goto done;
goto unlock;
}
/*
@ -338,6 +342,8 @@ static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev,
goto remove_group;
}
mutex_unlock(&pctrl->mutex);
maps[idx].type = PIN_MAP_TYPE_MUX_GROUP;
maps[idx].data.mux.group = name;
maps[idx].data.mux.function = name;
@ -349,6 +355,8 @@ static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev,
remove_group:
pinctrl_generic_remove_group(pctldev, gsel);
unlock:
mutex_unlock(&pctrl->mutex);
done:
*index = idx;
kfree(configs);
@ -1070,6 +1078,7 @@ static int rzv2m_pinctrl_probe(struct platform_device *pdev)
}
spin_lock_init(&pctrl->lock);
mutex_init(&pctrl->mutex);
platform_set_drvdata(pdev, pctrl);

View File

@ -1176,6 +1176,11 @@ static const struct key_entry ideapad_keymap[] = {
{ KE_IGNORE, 0x03 | IDEAPAD_WMI_KEY },
/* Customizable Lenovo Hotkey ("star" with 'S' inside) */
{ KE_KEY, 0x01 | IDEAPAD_WMI_KEY, { KEY_FAVORITES } },
{ KE_KEY, 0x04 | IDEAPAD_WMI_KEY, { KEY_SELECTIVE_SCREENSHOT } },
/* Lenovo Support */
{ KE_KEY, 0x07 | IDEAPAD_WMI_KEY, { KEY_HELP } },
{ KE_KEY, 0x0e | IDEAPAD_WMI_KEY, { KEY_PICKUP_PHONE } },
{ KE_KEY, 0x0f | IDEAPAD_WMI_KEY, { KEY_HANGUP_PHONE } },
/* Dark mode toggle */
{ KE_KEY, 0x13 | IDEAPAD_WMI_KEY, { KEY_PROG1 } },
/* Sound profile switch */

View File

@ -926,8 +926,7 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq,
.type = TYPE82_RSP_CODE,
.reply_code = REP82_ERROR_MACHINE_FAILURE,
};
struct response_type *resp_type =
(struct response_type *)msg->private;
struct response_type *resp_type = msg->private;
struct type86x_reply *t86r;
int len;
@ -982,8 +981,7 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq,
.type = TYPE82_RSP_CODE,
.reply_code = REP82_ERROR_MACHINE_FAILURE,
};
struct response_type *resp_type =
(struct response_type *)msg->private;
struct response_type *resp_type = msg->private;
struct type86_ep11_reply *t86r;
int len;
@ -1156,23 +1154,36 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq,
struct ica_xcRB *xcrb,
struct ap_message *ap_msg)
{
int rc;
struct response_type *rtype = (struct response_type *)(ap_msg->private);
struct response_type *rtype = ap_msg->private;
struct {
struct type6_hdr hdr;
struct CPRBX cprbx;
/* ... more data blocks ... */
} __packed * msg = ap_msg->msg;
unsigned int max_payload_size;
int rc, delta;
/*
* Set the queue's reply buffer length minus 128 byte padding
* as reply limit for the card firmware.
*/
msg->hdr.fromcardlen1 = min_t(unsigned int, msg->hdr.fromcardlen1,
zq->reply.bufsize - 128);
if (msg->hdr.fromcardlen2)
msg->hdr.fromcardlen2 =
zq->reply.bufsize - msg->hdr.fromcardlen1 - 128;
/* calculate maximum payload for this card and msg type */
max_payload_size = zq->reply.bufsize - sizeof(struct type86_fmt2_msg);
/* limit each of the two from fields to the maximum payload size */
msg->hdr.fromcardlen1 = min(msg->hdr.fromcardlen1, max_payload_size);
msg->hdr.fromcardlen2 = min(msg->hdr.fromcardlen2, max_payload_size);
/* calculate delta if the sum of both exceeds max payload size */
delta = msg->hdr.fromcardlen1 + msg->hdr.fromcardlen2
- max_payload_size;
if (delta > 0) {
/*
* Sum exceeds maximum payload size, prune fromcardlen1
* (always trust fromcardlen2)
*/
if (delta > msg->hdr.fromcardlen1) {
rc = -EINVAL;
goto out;
}
msg->hdr.fromcardlen1 -= delta;
}
init_completion(&rtype->work);
rc = ap_queue_message(zq->queue, ap_msg);
@ -1243,7 +1254,7 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *
{
int rc;
unsigned int lfmt;
struct response_type *rtype = (struct response_type *)(ap_msg->private);
struct response_type *rtype = ap_msg->private;
struct {
struct type6_hdr hdr;
struct ep11_cprb cprbx;
@ -1365,7 +1376,7 @@ static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq,
short int verb_length;
short int key_length;
} __packed * msg = ap_msg->msg;
struct response_type *rtype = (struct response_type *)(ap_msg->private);
struct response_type *rtype = ap_msg->private;
int rc;
msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);

View File

@ -209,54 +209,6 @@ raid_attr_ro_state(level);
raid_attr_ro_fn(resync);
raid_attr_ro_state_fn(state);
static void raid_component_release(struct device *dev)
{
struct raid_component *rc =
container_of(dev, struct raid_component, dev);
dev_printk(KERN_ERR, rc->dev.parent, "COMPONENT RELEASE\n");
put_device(rc->dev.parent);
kfree(rc);
}
int raid_component_add(struct raid_template *r,struct device *raid_dev,
struct device *component_dev)
{
struct device *cdev =
attribute_container_find_class_device(&r->raid_attrs.ac,
raid_dev);
struct raid_component *rc;
struct raid_data *rd = dev_get_drvdata(cdev);
int err;
rc = kzalloc(sizeof(*rc), GFP_KERNEL);
if (!rc)
return -ENOMEM;
INIT_LIST_HEAD(&rc->node);
device_initialize(&rc->dev);
rc->dev.release = raid_component_release;
rc->dev.parent = get_device(component_dev);
rc->num = rd->component_count++;
dev_set_name(&rc->dev, "component-%d", rc->num);
list_add_tail(&rc->node, &rd->component_list);
rc->dev.class = &raid_class.class;
err = device_add(&rc->dev);
if (err)
goto err_out;
return 0;
err_out:
put_device(&rc->dev);
list_del(&rc->node);
rd->component_count--;
put_device(component_dev);
kfree(rc);
return err;
}
EXPORT_SYMBOL(raid_component_add);
struct raid_template *
raid_class_attach(struct raid_function_template *ft)
{

View File

@ -303,12 +303,11 @@ snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid)
"Snic Tgt: device_add, with err = %d\n",
ret);
put_device(&tgt->dev);
put_device(&snic->shost->shost_gendev);
spin_lock_irqsave(snic->shost->host_lock, flags);
list_del(&tgt->list);
spin_unlock_irqrestore(snic->shost->host_lock, flags);
kfree(tgt);
put_device(&tgt->dev);
tgt = NULL;
return tgt;

View File

@ -415,7 +415,8 @@ int tb_switch_tmu_disable(struct tb_switch *sw)
* uni-directional mode and we don't want to change it's TMU
* mode.
*/
tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
return ret;
tb_port_tmu_time_sync_disable(up);
ret = tb_port_tmu_time_sync_disable(down);

View File

@ -298,14 +298,6 @@ int aperture_remove_conflicting_devices(resource_size_t base, resource_size_t si
aperture_detach_devices(base, size);
/*
* If this is the primary adapter, there could be a VGA device
* that consumes the VGA framebuffer I/O range. Remove this device
* as well.
*/
if (primary)
aperture_detach_devices(VGA_FB_PHYS_BASE, VGA_FB_PHYS_SIZE);
return 0;
}
EXPORT_SYMBOL(aperture_remove_conflicting_devices);
@ -344,13 +336,22 @@ int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *na
aperture_detach_devices(base, size);
}
/*
* WARNING: Apparently we must kick fbdev drivers before vgacon,
* otherwise the vga fbdev driver falls over.
*/
ret = vga_remove_vgacon(pdev);
if (ret)
return ret;
if (primary) {
/*
* If this is the primary adapter, there could be a VGA device
* that consumes the VGA framebuffer I/O range. Remove this
* device as well.
*/
aperture_detach_devices(VGA_FB_PHYS_BASE, VGA_FB_PHYS_SIZE);
/*
* WARNING: Apparently we must kick fbdev drivers before vgacon,
* otherwise the vga fbdev driver falls over.
*/
ret = vga_remove_vgacon(pdev);
if (ret)
return ret;
}
return 0;

View File

@ -2238,14 +2238,6 @@ static const struct bin_attribute edid2_attr = {
.read = radeon_show_edid2,
};
static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
{
resource_size_t base = pci_resource_start(pdev, 0);
resource_size_t size = pci_resource_len(pdev, 0);
return aperture_remove_conflicting_devices(base, size, false, KBUILD_MODNAME);
}
static int radeonfb_pci_register(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@ -2296,7 +2288,7 @@ static int radeonfb_pci_register(struct pci_dev *pdev,
rinfo->fb_base_phys = pci_resource_start (pdev, 0);
rinfo->mmio_base_phys = pci_resource_start (pdev, 2);
ret = radeon_kick_out_firmware_fb(pdev);
ret = aperture_remove_conflicting_pci_devices(pdev, KBUILD_MODNAME);
if (ret)
goto err_release_fb;

View File

@ -47,6 +47,7 @@ int setattr_should_drop_sgid(struct user_namespace *mnt_userns,
return ATTR_KILL_SGID;
return 0;
}
EXPORT_SYMBOL(setattr_should_drop_sgid);
/**
* setattr_should_drop_suidgid - determine whether the set{g,u}id bit needs to

View File

@ -242,5 +242,3 @@ ssize_t __kernel_write_iter(struct file *file, struct iov_iter *from, loff_t *po
/*
* fs/attr.c
*/
int setattr_should_drop_sgid(struct user_namespace *mnt_userns,
const struct inode *inode);

View File

@ -27,7 +27,7 @@
*
* Called with j_list_lock held.
*/
static inline void __buffer_unlink_first(struct journal_head *jh)
static inline void __buffer_unlink(struct journal_head *jh)
{
transaction_t *transaction = jh->b_cp_transaction;
@ -40,23 +40,6 @@ static inline void __buffer_unlink_first(struct journal_head *jh)
}
}
/*
* Unlink a buffer from a transaction checkpoint(io) list.
*
* Called with j_list_lock held.
*/
static inline void __buffer_unlink(struct journal_head *jh)
{
transaction_t *transaction = jh->b_cp_transaction;
__buffer_unlink_first(jh);
if (transaction->t_checkpoint_io_list == jh) {
transaction->t_checkpoint_io_list = jh->b_cpnext;
if (transaction->t_checkpoint_io_list == jh)
transaction->t_checkpoint_io_list = NULL;
}
}
/*
* Check a checkpoint buffer could be release or not.
*
@ -367,19 +350,24 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
/* Checkpoint list management */
/*
* journal_clean_one_cp_list
* journal_shrink_one_cp_list
*
* Find all the written-back checkpoint buffers in the given list and
* release them. If 'destroy' is set, clean all buffers unconditionally.
* Find all the written-back checkpoint buffers in the given list
* and try to release them. If the whole transaction is released, set
* the 'released' parameter. Return the number of released checkpointed
* buffers.
*
* Called with j_list_lock held.
* Returns 1 if we freed the transaction, 0 otherwise.
*/
static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy)
static unsigned long journal_shrink_one_cp_list(struct journal_head *jh,
bool destroy, bool *released)
{
struct journal_head *last_jh;
struct journal_head *next_jh = jh;
unsigned long nr_freed = 0;
int ret;
*released = false;
if (!jh)
return 0;
@ -388,57 +376,15 @@ static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy)
jh = next_jh;
next_jh = jh->b_cpnext;
if (!destroy && __cp_buffer_busy(jh))
return 0;
if (__jbd2_journal_remove_checkpoint(jh))
return 1;
/*
* This function only frees up some memory
* if possible so we dont have an obligation
* to finish processing. Bail out if preemption
* requested:
*/
if (need_resched())
return 0;
} while (jh != last_jh);
return 0;
}
/*
* journal_shrink_one_cp_list
*
* Find 'nr_to_scan' written-back checkpoint buffers in the given list
* and try to release them. If the whole transaction is released, set
* the 'released' parameter. Return the number of released checkpointed
* buffers.
*
* Called with j_list_lock held.
*/
static unsigned long journal_shrink_one_cp_list(struct journal_head *jh,
unsigned long *nr_to_scan,
bool *released)
{
struct journal_head *last_jh;
struct journal_head *next_jh = jh;
unsigned long nr_freed = 0;
int ret;
if (!jh || *nr_to_scan == 0)
return 0;
last_jh = jh->b_cpprev;
do {
jh = next_jh;
next_jh = jh->b_cpnext;
(*nr_to_scan)--;
if (__cp_buffer_busy(jh))
continue;
if (destroy) {
ret = __jbd2_journal_remove_checkpoint(jh);
} else {
ret = jbd2_journal_try_remove_checkpoint(jh);
if (ret < 0)
continue;
}
nr_freed++;
ret = __jbd2_journal_remove_checkpoint(jh);
if (ret) {
*released = true;
break;
@ -446,7 +392,7 @@ static unsigned long journal_shrink_one_cp_list(struct journal_head *jh,
if (need_resched())
break;
} while (jh != last_jh && *nr_to_scan);
} while (jh != last_jh);
return nr_freed;
}
@ -464,11 +410,11 @@ unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal,
unsigned long *nr_to_scan)
{
transaction_t *transaction, *last_transaction, *next_transaction;
bool released;
bool __maybe_unused released;
tid_t first_tid = 0, last_tid = 0, next_tid = 0;
tid_t tid = 0;
unsigned long nr_freed = 0;
unsigned long nr_scanned = *nr_to_scan;
unsigned long freed;
again:
spin_lock(&journal->j_list_lock);
@ -497,19 +443,11 @@ unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal,
transaction = next_transaction;
next_transaction = transaction->t_cpnext;
tid = transaction->t_tid;
released = false;
nr_freed += journal_shrink_one_cp_list(transaction->t_checkpoint_list,
nr_to_scan, &released);
if (*nr_to_scan == 0)
break;
if (need_resched() || spin_needbreak(&journal->j_list_lock))
break;
if (released)
continue;
nr_freed += journal_shrink_one_cp_list(transaction->t_checkpoint_io_list,
nr_to_scan, &released);
freed = journal_shrink_one_cp_list(transaction->t_checkpoint_list,
false, &released);
nr_freed += freed;
(*nr_to_scan) -= min(*nr_to_scan, freed);
if (*nr_to_scan == 0)
break;
if (need_resched() || spin_needbreak(&journal->j_list_lock))
@ -530,9 +468,8 @@ unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal,
if (*nr_to_scan && next_tid)
goto again;
out:
nr_scanned -= *nr_to_scan;
trace_jbd2_shrink_checkpoint_list(journal, first_tid, tid, last_tid,
nr_freed, nr_scanned, next_tid);
nr_freed, next_tid);
return nr_freed;
}
@ -548,7 +485,7 @@ unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal,
void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
{
transaction_t *transaction, *last_transaction, *next_transaction;
int ret;
bool released;
transaction = journal->j_checkpoint_transactions;
if (!transaction)
@ -559,8 +496,8 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
do {
transaction = next_transaction;
next_transaction = transaction->t_cpnext;
ret = journal_clean_one_cp_list(transaction->t_checkpoint_list,
destroy);
journal_shrink_one_cp_list(transaction->t_checkpoint_list,
destroy, &released);
/*
* This function only frees up some memory if possible so we
* dont have an obligation to finish processing. Bail out if
@ -568,23 +505,12 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
*/
if (need_resched())
return;
if (ret)
continue;
/*
* It is essential that we are as careful as in the case of
* t_checkpoint_list with removing the buffer from the list as
* we can possibly see not yet submitted buffers on io_list
*/
ret = journal_clean_one_cp_list(transaction->
t_checkpoint_io_list, destroy);
if (need_resched())
return;
/*
* Stop scanning if we couldn't free the transaction. This
* avoids pointless scanning of transactions which still
* weren't checkpointed.
*/
if (!ret)
if (!released)
return;
} while (transaction != last_transaction);
}
@ -663,7 +589,7 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
jbd2_journal_put_journal_head(jh);
/* Is this transaction empty? */
if (transaction->t_checkpoint_list || transaction->t_checkpoint_io_list)
if (transaction->t_checkpoint_list)
return 0;
/*
@ -694,6 +620,34 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
return 1;
}
/*
* Check the checkpoint buffer and try to remove it from the checkpoint
* list if it's clean. Returns -EBUSY if it is not clean, returns 1 if
* it frees the transaction, 0 otherwise.
*
* This function is called with j_list_lock held.
*/
int jbd2_journal_try_remove_checkpoint(struct journal_head *jh)
{
struct buffer_head *bh = jh2bh(jh);
if (!trylock_buffer(bh))
return -EBUSY;
if (buffer_dirty(bh)) {
unlock_buffer(bh);
return -EBUSY;
}
unlock_buffer(bh);
/*
* Buffer is clean and the IO has finished (we held the buffer
* lock) so the checkpoint is done. We can safely remove the
* buffer from this transaction.
*/
JBUFFER_TRACE(jh, "remove from checkpoint list");
return __jbd2_journal_remove_checkpoint(jh);
}
/*
* journal_insert_checkpoint: put a committed buffer onto a checkpoint
* list so that we know when it is safe to clean the transaction out of
@ -755,7 +709,6 @@ void __jbd2_journal_drop_transaction(journal_t *journal, transaction_t *transact
J_ASSERT(transaction->t_forget == NULL);
J_ASSERT(transaction->t_shadow_list == NULL);
J_ASSERT(transaction->t_checkpoint_list == NULL);
J_ASSERT(transaction->t_checkpoint_io_list == NULL);
J_ASSERT(atomic_read(&transaction->t_updates) == 0);
J_ASSERT(journal->j_committing_transaction != transaction);
J_ASSERT(journal->j_running_transaction != transaction);

View File

@ -1171,8 +1171,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
spin_lock(&journal->j_list_lock);
commit_transaction->t_state = T_FINISHED;
/* Check if the transaction can be dropped now that we are finished */
if (commit_transaction->t_checkpoint_list == NULL &&
commit_transaction->t_checkpoint_io_list == NULL) {
if (commit_transaction->t_checkpoint_list == NULL) {
__jbd2_journal_drop_transaction(journal, commit_transaction);
jbd2_journal_free_transaction(commit_transaction);
}

View File

@ -1784,8 +1784,7 @@ int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh)
* Otherwise, if the buffer has been written to disk,
* it is safe to remove the checkpoint and drop it.
*/
if (!buffer_dirty(bh)) {
__jbd2_journal_remove_checkpoint(jh);
if (jbd2_journal_try_remove_checkpoint(jh) >= 0) {
spin_unlock(&journal->j_list_lock);
goto drop;
}
@ -2112,20 +2111,14 @@ __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
jh = bh2jh(bh);
if (buffer_locked(bh) || buffer_dirty(bh))
goto out;
if (jh->b_next_transaction != NULL || jh->b_transaction != NULL)
goto out;
return;
spin_lock(&journal->j_list_lock);
if (jh->b_cp_transaction != NULL) {
/* written-back checkpointed metadata buffer */
JBUFFER_TRACE(jh, "remove from checkpoint list");
__jbd2_journal_remove_checkpoint(jh);
}
/* Remove written-back checkpointed metadata buffer */
if (jh->b_cp_transaction != NULL)
jbd2_journal_try_remove_checkpoint(jh);
spin_unlock(&journal->j_list_lock);
out:
return;
}

View File

@ -474,20 +474,26 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
return result;
}
static void
nfs_direct_join_group(struct list_head *list, struct inode *inode)
static void nfs_direct_join_group(struct list_head *list, struct inode *inode)
{
struct nfs_page *req, *next;
struct nfs_page *req, *subreq;
list_for_each_entry(req, list, wb_list) {
if (req->wb_head != req || req->wb_this_page == req)
if (req->wb_head != req)
continue;
for (next = req->wb_this_page;
next != req->wb_head;
next = next->wb_this_page) {
nfs_list_remove_request(next);
nfs_release_request(next);
}
subreq = req->wb_this_page;
if (subreq == req)
continue;
do {
/*
* Remove subrequests from this list before freeing
* them in the call to nfs_join_page_group().
*/
if (!list_empty(&subreq->wb_list)) {
nfs_list_remove_request(subreq);
nfs_release_request(subreq);
}
} while ((subreq = subreq->wb_this_page) != req);
nfs_join_page_group(req, inode);
}
}

View File

@ -717,9 +717,7 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
if ((attr->ia_valid & ATTR_KILL_SUID) != 0 &&
inode->i_mode & S_ISUID)
inode->i_mode &= ~S_ISUID;
if ((attr->ia_valid & ATTR_KILL_SGID) != 0 &&
(inode->i_mode & (S_ISGID | S_IXGRP)) ==
(S_ISGID | S_IXGRP))
if (setattr_should_drop_sgid(&init_user_ns, inode))
inode->i_mode &= ~S_ISGID;
if ((attr->ia_valid & ATTR_MODE) != 0) {
int mode = attr->ia_mode & S_IALLUGO;

View File

@ -1359,7 +1359,6 @@ ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name,
for (i = 0; i < np; i++) {
pages[i] = alloc_page(GFP_KERNEL);
if (!pages[i]) {
np = i + 1;
err = -ENOMEM;
goto out;
}
@ -1383,8 +1382,8 @@ ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name,
} while (exception.retry);
out:
while (--np >= 0)
__free_page(pages[np]);
while (--i >= 0)
__free_page(pages[i]);
kfree(pages);
return err;

View File

@ -5995,9 +5995,8 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf,
out_ok:
ret = res.acl_len;
out_free:
for (i = 0; i < npages; i++)
if (pages[i])
__free_page(pages[i]);
while (--i >= 0)
__free_page(pages[i]);
if (res.acl_scratch)
__free_page(res.acl_scratch);
kfree(pages);
@ -7171,8 +7170,15 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
} else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
goto out_restart;
break;
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_OLD_STATEID:
if (data->arg.new_lock_owner != 0 &&
nfs4_refresh_open_old_stateid(&data->arg.open_stateid,
lsp->ls_state))
goto out_restart;
if (nfs4_refresh_lock_old_stateid(&data->arg.lock_stateid, lsp))
goto out_restart;
fallthrough;
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
if (data->arg.new_lock_owner != 0) {

View File

@ -1368,9 +1368,9 @@ static void revoke_delegation(struct nfs4_delegation *dp)
WARN_ON(!list_empty(&dp->dl_recall_lru));
if (clp->cl_minorversion) {
spin_lock(&clp->cl_lock);
dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
refcount_inc(&dp->dl_stid.sc_count);
spin_lock(&clp->cl_lock);
list_add(&dp->dl_recall_lru, &clp->cl_revoked);
spin_unlock(&clp->cl_lock);
}

View File

@ -321,7 +321,9 @@ nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap)
iap->ia_mode &= ~S_ISGID;
} else {
/* set ATTR_KILL_* bits and let VFS handle it */
iap->ia_valid |= (ATTR_KILL_SUID | ATTR_KILL_SGID);
iap->ia_valid |= ATTR_KILL_SUID;
iap->ia_valid |=
setattr_should_drop_sgid(&init_user_ns, inode);
}
}
}

View File

@ -1525,7 +1525,7 @@ enum drm_dp_phy {
#define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_DSC_RECEIVER_CAP_SIZE 0xf
#define DP_DSC_RECEIVER_CAP_SIZE 0x10 /* DSC Capabilities 0x60 through 0x6F */
#define EDP_PSR_RECEIVER_CAP_SIZE 2
#define EDP_DISPLAY_CTL_CAP_SIZE 3
#define DP_LTTPR_COMMON_CAP_SIZE 8

View File

@ -13,14 +13,13 @@ int devm_aperture_acquire_from_firmware(struct drm_device *dev, resource_size_t
resource_size_t size);
int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size,
bool primary, const struct drm_driver *req_driver);
const struct drm_driver *req_driver);
int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
const struct drm_driver *req_driver);
/**
* drm_aperture_remove_framebuffers - remove all existing framebuffers
* @primary: also kick vga16fb if present
* @req_driver: requesting DRM driver
*
* This function removes all graphics device drivers. Use this function on systems
@ -30,9 +29,9 @@ int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
* 0 on success, or a negative errno code otherwise
*/
static inline int
drm_aperture_remove_framebuffers(bool primary, const struct drm_driver *req_driver)
drm_aperture_remove_framebuffers(const struct drm_driver *req_driver)
{
return drm_aperture_remove_conflicting_framebuffers(0, (resource_size_t)-1, primary,
return drm_aperture_remove_conflicting_framebuffers(0, (resource_size_t)-1,
req_driver);
}

View File

@ -183,6 +183,39 @@ int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale);
*/
bool clk_is_match(const struct clk *p, const struct clk *q);
/**
* clk_rate_exclusive_get - get exclusivity over the rate control of a
* producer
* @clk: clock source
*
* This function allows drivers to get exclusive control over the rate of a
* provider. It prevents any other consumer to execute, even indirectly,
* opereation which could alter the rate of the provider or cause glitches
*
* If exlusivity is claimed more than once on clock, even by the same driver,
* the rate effectively gets locked as exclusivity can't be preempted.
*
* Must not be called from within atomic context.
*
* Returns success (0) or negative errno.
*/
int clk_rate_exclusive_get(struct clk *clk);
/**
* clk_rate_exclusive_put - release exclusivity over the rate control of a
* producer
* @clk: clock source
*
* This function allows drivers to release the exclusivity it previously got
* from clk_rate_exclusive_get()
*
* The caller must balance the number of clk_rate_exclusive_get() and
* clk_rate_exclusive_put() calls.
*
* Must not be called from within atomic context.
*/
void clk_rate_exclusive_put(struct clk *clk);
#else
static inline int clk_notifier_register(struct clk *clk,
@ -236,6 +269,13 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q)
return p == q;
}
static inline int clk_rate_exclusive_get(struct clk *clk)
{
return 0;
}
static inline void clk_rate_exclusive_put(struct clk *clk) {}
#endif
#ifdef CONFIG_HAVE_CLK_PREPARE
@ -583,38 +623,6 @@ struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id);
*/
struct clk *devm_get_clk_from_child(struct device *dev,
struct device_node *np, const char *con_id);
/**
* clk_rate_exclusive_get - get exclusivity over the rate control of a
* producer
* @clk: clock source
*
* This function allows drivers to get exclusive control over the rate of a
* provider. It prevents any other consumer to execute, even indirectly,
* opereation which could alter the rate of the provider or cause glitches
*
* If exlusivity is claimed more than once on clock, even by the same driver,
* the rate effectively gets locked as exclusivity can't be preempted.
*
* Must not be called from within atomic context.
*
* Returns success (0) or negative errno.
*/
int clk_rate_exclusive_get(struct clk *clk);
/**
* clk_rate_exclusive_put - release exclusivity over the rate control of a
* producer
* @clk: clock source
*
* This function allows drivers to release the exclusivity it previously got
* from clk_rate_exclusive_get()
*
* The caller must balance the number of clk_rate_exclusive_get() and
* clk_rate_exclusive_put() calls.
*
* Must not be called from within atomic context.
*/
void clk_rate_exclusive_put(struct clk *clk);
/**
* clk_enable - inform the system when the clock source should be running.
@ -974,14 +982,6 @@ static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {}
static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
static inline int clk_rate_exclusive_get(struct clk *clk)
{
return 0;
}
static inline void clk_rate_exclusive_put(struct clk *clk) {}
static inline int clk_enable(struct clk *clk)
{
return 0;

View File

@ -71,6 +71,10 @@ extern void cpuset_init_smp(void);
extern void cpuset_force_rebuild(void);
extern void cpuset_update_active_cpus(void);
extern void cpuset_wait_for_hotplug(void);
extern void inc_dl_tasks_cs(struct task_struct *task);
extern void dec_dl_tasks_cs(struct task_struct *task);
extern void cpuset_lock(void);
extern void cpuset_unlock(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
@ -194,6 +198,11 @@ static inline void cpuset_update_active_cpus(void)
static inline void cpuset_wait_for_hotplug(void) { }
static inline void inc_dl_tasks_cs(struct task_struct *task) { }
static inline void dec_dl_tasks_cs(struct task_struct *task) { }
static inline void cpuset_lock(void) { }
static inline void cpuset_unlock(void) { }
static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
{

View File

@ -3131,6 +3131,8 @@ extern struct inode *new_inode(struct super_block *sb);
extern void free_inode_nonrcu(struct inode *inode);
extern int setattr_should_drop_suidgid(struct user_namespace *, struct inode *);
extern int file_remove_privs(struct file *);
int setattr_should_drop_sgid(struct user_namespace *mnt_userns,
const struct inode *inode);
/*
* This must be used for allocating filesystems specific inodes to set

View File

@ -622,12 +622,6 @@ struct transaction_s
*/
struct journal_head *t_checkpoint_list;
/*
* Doubly-linked circular list of all buffers submitted for IO while
* checkpointing. [j_list_lock]
*/
struct journal_head *t_checkpoint_io_list;
/*
* Doubly-linked circular list of metadata buffers being
* shadowed by log IO. The IO buffers on the iobuf list and
@ -1441,6 +1435,7 @@ extern void jbd2_journal_commit_transaction(journal_t *);
void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal, unsigned long *nr_to_scan);
int __jbd2_journal_remove_checkpoint(struct journal_head *);
int jbd2_journal_try_remove_checkpoint(struct journal_head *jh);
void jbd2_journal_destroy_checkpoint(journal_t *journal);
void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);

View File

@ -1977,6 +1977,25 @@ static inline size_t folio_size(struct folio *folio)
return PAGE_SIZE << folio_order(folio);
}
/**
* folio_estimated_sharers - Estimate the number of sharers of a folio.
* @folio: The folio.
*
* folio_estimated_sharers() aims to serve as a function to efficiently
* estimate the number of processes sharing a folio. This is done by
* looking at the precise mapcount of the first subpage in the folio, and
* assuming the other subpages are the same. This may not be true for large
* folios. If you want exact mapcounts for exact calculations, look at
* page_mapcount() or folio_total_mapcount().
*
* Return: The estimated number of processes sharing a folio.
*/
static inline int folio_estimated_sharers(struct folio *folio)
{
return page_mapcount(folio_page(folio, 0));
}
#ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
static inline int arch_make_page_accessible(struct page *page)
{
@ -3342,6 +3361,16 @@ static inline bool gup_must_unshare(unsigned int flags, struct page *page)
if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
smp_rmb();
/*
* During GUP-fast we might not get called on the head page for a
* hugetlb page that is mapped using cont-PTE, because GUP-fast does
* not work with the abstracted hugetlb PTEs that always point at the
* head page. For hugetlb, PageAnonExclusive only applies on the head
* page (as it cannot be partially COW-shared), so lookup the head page.
*/
if (unlikely(!PageHead(page) && PageHuge(page)))
page = compound_head(page);
/*
* Note that PageKsm() pages cannot be exclusive, and consequently,
* cannot get pinned.

View File

@ -77,7 +77,3 @@ DEFINE_RAID_ATTRIBUTE(enum raid_state, state)
struct raid_template *raid_class_attach(struct raid_function_template *);
void raid_class_release(struct raid_template *);
int __must_check raid_component_add(struct raid_template *, struct device *,
struct device *);

View File

@ -1875,7 +1875,9 @@ current_restore_flags(unsigned long orig_flags, unsigned long flags)
}
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus);
extern int task_can_attach(struct task_struct *p);
extern int dl_bw_alloc(int cpu, u64 dl_bw);
extern void dl_bw_free(int cpu, u64 dl_bw);
#ifdef CONFIG_SMP
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);

View File

@ -727,23 +727,14 @@ static inline struct slave *bond_slave_has_mac(struct bonding *bond,
}
/* Caller must hold rcu_read_lock() for read */
static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac)
static inline bool bond_slave_has_mac_rcu(struct bonding *bond, const u8 *mac)
{
struct list_head *iter;
struct slave *tmp;
struct netdev_hw_addr *ha;
bond_for_each_slave_rcu(bond, tmp, iter)
if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
return true;
if (netdev_uc_empty(bond->dev))
return false;
netdev_for_each_uc_addr(ha, bond->dev)
if (ether_addr_equal_64bits(mac, ha->addr))
return true;
return false;
}

View File

@ -223,8 +223,8 @@ struct inet_sock {
__s16 uc_ttl;
__u16 cmsg_flags;
struct ip_options_rcu __rcu *inet_opt;
atomic_t inet_id;
__be16 inet_sport;
__u16 inet_id;
__u8 tos;
__u8 min_ttl;

View File

@ -534,8 +534,19 @@ static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
* generator as much as we can.
*/
if (sk && inet_sk(sk)->inet_daddr) {
iph->id = htons(inet_sk(sk)->inet_id);
inet_sk(sk)->inet_id += segs;
int val;
/* avoid atomic operations for TCP,
* as we hold socket lock at this point.
*/
if (sk_is_tcp(sk)) {
sock_owned_by_me(sk);
val = atomic_read(&inet_sk(sk)->inet_id);
atomic_set(&inet_sk(sk)->inet_id, val + segs);
} else {
val = atomic_add_return(segs, &inet_sk(sk)->inet_id);
}
iph->id = htons(val);
return;
}
if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {

View File

@ -6463,6 +6463,7 @@ void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap,
* marks frames marked in the bitmap as having been filtered. Afterwards, it
* checks if any frames in the window starting from @ssn can now be released
* (in case they were only waiting for frames that were filtered.)
* (Only work correctly if @max_rx_aggregation_subframes <= 64 frames)
*/
void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
u16 ssn, u64 filtered,

Some files were not shown because too many files have changed in this diff Show More