Merge 6.1.45 into android14-6.1-lts

Changes in 6.1.45
	io_uring: gate iowait schedule on having pending requests
	perf: Fix function pointer case
	net/mlx5: Free irqs only on shutdown callback
	net: ipa: only reset hashed tables when supported
	iommu/arm-smmu-v3: Work around MMU-600 erratum 1076982
	iommu/arm-smmu-v3: Document MMU-700 erratum 2812531
	iommu/arm-smmu-v3: Add explicit feature for nesting
	iommu/arm-smmu-v3: Document nesting-related errata
	arm64: dts: imx8mm-venice-gw7903: disable disp_blk_ctrl
	arm64: dts: imx8mm-venice-gw7904: disable disp_blk_ctrl
	arm64: dts: phycore-imx8mm: Label typo-fix of VPU
	arm64: dts: phycore-imx8mm: Correction in gpio-line-names
	arm64: dts: imx8mn-var-som: add missing pull-up for onboard PHY reset pinmux
	arm64: dts: freescale: Fix VPU G2 clock
	firmware: smccc: Fix use of uninitialised results structure
	lib/bitmap: workaround const_eval test build failure
	firmware: arm_scmi: Fix chan_free cleanup on SMC
	word-at-a-time: use the same return type for has_zero regardless of endianness
	KVM: s390: fix sthyi error handling
	erofs: fix wrong primary bvec selection on deduplicated extents
	wifi: cfg80211: Fix return value in scan logic
	net/mlx5e: fix double free in macsec_fs_tx_create_crypto_table_groups
	net/mlx5: DR, fix memory leak in mlx5dr_cmd_create_reformat_ctx
	net/mlx5: fix potential memory leak in mlx5e_init_rep_rx
	net/mlx5e: fix return value check in mlx5e_ipsec_remove_trailer()
	net/mlx5e: Fix crash moving to switchdev mode when ntuple offload is set
	net/mlx5e: Move representor neigh cleanup to profile cleanup_tx
	bpf: Add length check for SK_DIAG_BPF_STORAGE_REQ_MAP_FD parsing
	rtnetlink: let rtnl_bridge_setlink checks IFLA_BRIDGE_MODE length
	net: dsa: fix value check in bcm_sf2_sw_probe()
	perf test uprobe_from_different_cu: Skip if there is no gcc
	net: sched: cls_u32: Fix match key mis-addressing
	mISDN: hfcpci: Fix potential deadlock on &hc->lock
	qed: Fix scheduling in a tasklet while getting stats
	net: annotate data-races around sk->sk_reserved_mem
	net: annotate data-race around sk->sk_txrehash
	net: annotate data-races around sk->sk_max_pacing_rate
	net: add missing READ_ONCE(sk->sk_rcvlowat) annotation
	net: add missing READ_ONCE(sk->sk_sndbuf) annotation
	net: add missing READ_ONCE(sk->sk_rcvbuf) annotation
	net: annotate data-races around sk->sk_mark
	net: add missing data-race annotations around sk->sk_peek_off
	net: add missing data-race annotation for sk_ll_usec
	net: annotate data-races around sk->sk_priority
	net/sched: taprio: Limit TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME to INT_MAX.
	ice: Fix RDMA VSI removal during queue rebuild
	bpf, cpumap: Handle skb as well when clean up ptr_ring
	net/sched: cls_u32: No longer copy tcf_result on update to avoid use-after-free
	net/sched: cls_fw: No longer copy tcf_result on update to avoid use-after-free
	net/sched: cls_route: No longer copy tcf_result on update to avoid use-after-free
	bpf: sockmap: Remove preempt_disable in sock_map_sk_acquire
	net: ll_temac: fix error checking of irq_of_parse_and_map()
	net: korina: handle clk prepare error in korina_probe()
	net: netsec: Ignore 'phy-mode' on SynQuacer in DT mode
	bnxt_en: Fix page pool logic for page size >= 64K
	bnxt_en: Fix max_mtu setting for multi-buf XDP
	net: dcb: choose correct policy to parse DCB_ATTR_BCN
	s390/qeth: Don't call dev_close/dev_open (DOWN/UP)
	ip6mr: Fix skb_under_panic in ip6mr_cache_report()
	vxlan: Fix nexthop hash size
	net/mlx5: fs_core: Make find_closest_ft more generic
	net/mlx5: fs_core: Skip the FTs in the same FS_TYPE_PRIO_CHAINS fs_prio
	prestera: fix fallback to previous version on same major version
	tcp_metrics: fix addr_same() helper
	tcp_metrics: annotate data-races around tm->tcpm_stamp
	tcp_metrics: annotate data-races around tm->tcpm_lock
	tcp_metrics: annotate data-races around tm->tcpm_vals[]
	tcp_metrics: annotate data-races around tm->tcpm_net
	tcp_metrics: fix data-race in tcpm_suck_dst() vs fastopen
	rust: allocator: Prevent mis-aligned allocation
	scsi: zfcp: Defer fc_rport blocking until after ADISC response
	scsi: storvsc: Limit max_sectors for virtual Fibre Channel devices
	libceph: fix potential hang in ceph_osdc_notify()
	USB: zaurus: Add ID for A-300/B-500/C-700
	ceph: defer stopping mdsc delayed_work
	firmware: arm_scmi: Drop OF node reference in the transport channel setup
	exfat: use kvmalloc_array/kvfree instead of kmalloc_array/kfree
	exfat: release s_lock before calling dir_emit()
	mtd: spinand: toshiba: Fix ecc_get_status
	mtd: rawnand: meson: fix OOB available bytes for ECC
	bpf: Disable preemption in bpf_perf_event_output
	arm64: dts: stratix10: fix incorrect I2C property for SCL signal
	net: tun_chr_open(): set sk_uid from current_fsuid()
	net: tap_open(): set sk_uid from current_fsuid()
	wifi: mt76: mt7615: do not advertise 5 GHz on first phy of MT7615D (DBDC)
	x86/hyperv: Disable IBT when hypercall page lacks ENDBR instruction
	rbd: prevent busy loop when requesting exclusive lock
	bpf: Disable preemption in bpf_event_output
	powerpc/ftrace: Create a dummy stackframe to fix stack unwind
	arm64/fpsimd: Sync and zero pad FPSIMD state for streaming SVE
	arm64/fpsimd: Clear SME state in the target task when setting the VL
	arm64/fpsimd: Sync FPSIMD state with SVE for SME only systems
	open: make RESOLVE_CACHED correctly test for O_TMPFILE
	drm/ttm: check null pointer before accessing when swapping
	drm/i915: Fix premature release of request's reusable memory
	drm/i915/gt: Cleanup aux invalidation registers
	clk: imx93: Propagate correct error in imx93_clocks_probe()
	bpf, cpumap: Make sure kthread is running before map update returns
	file: reinstate f_pos locking optimization for regular files
	mm: kmem: fix a NULL pointer dereference in obj_stock_flush_required()
	fs/ntfs3: Use __GFP_NOWARN allocation at ntfs_load_attr_list()
	fs/sysv: Null check to prevent null-ptr-deref bug
	Bluetooth: L2CAP: Fix use-after-free in l2cap_sock_ready_cb
	debugobjects: Recheck debug_objects_enabled before reporting
	net: usbnet: Fix WARNING in usbnet_start_xmit/usb_submit_urb
	fs: Protect reconfiguration of sb read-write from racing writes
	ext2: Drop fragment support
	btrfs: remove BUG_ON()'s in add_new_free_space()
	f2fs: fix to do sanity check on direct node in truncate_dnode()
	io_uring: annotate offset timeout races
	mtd: rawnand: omap_elm: Fix incorrect type in assignment
	mtd: rawnand: rockchip: fix oobfree offset and description
	mtd: rawnand: rockchip: Align hwecc vs. raw page helper layouts
	mtd: rawnand: fsl_upm: Fix an off-by one test in fun_exec_op()
	powerpc/mm/altmap: Fix altmap boundary check
	drm/imx/ipuv3: Fix front porch adjustment upon hactive aligning
	drm/amd/display: Ensure that planes are in the same order
	drm/amd/display: skip CLEAR_PAYLOAD_ID_TABLE if device mst_en is 0
	selftests/rseq: Play nice with binaries statically linked against glibc 2.35+
	f2fs: fix to set flush_merge opt and show noflush_merge
	f2fs: don't reset unchangable mount option in f2fs_remount()
	exfat: check if filename entries exceeds max filename length
	arm64/ptrace: Don't enable SVE when setting streaming SVE
	drm/amdgpu: add vram reservation based on vram_usagebyfirmware_v2_2
	drm/amdgpu: Remove unnecessary domain argument
	drm/amdgpu: Use apt name for FW reserved region
	Revert "drm/i915: Disable DC states for all commits"
	x86/CPU/AMD: Do not leak quotient data after a division by 0
	Linux 6.1.45

Change-Id: Ic63af3f07f26c867c9fc361b2f7055dbc04143d2
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-09-02 20:07:57 +00:00
commit 706ba4ef8d
165 changed files with 1569 additions and 642 deletions

View File

@ -138,6 +138,10 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | MMU-500 | #841119,826419 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | MMU-600 | #1076982,1209401| N/A |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | MMU-700 | #2268618,2812531| N/A |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
| ARM | GIC-700 | #2941627 | ARM64_ERRATUM_2941627 |
+----------------+-----------------+-----------------+-----------------------------+

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 44
SUBLEVEL = 45
EXTRAVERSION =
NAME = Curry Ramen

View File

@ -128,7 +128,7 @@ &i2c1 {
status = "okay";
clock-frequency = <100000>;
i2c-sda-falling-time-ns = <890>; /* hcnt */
i2c-sdl-falling-time-ns = <890>; /* lcnt */
i2c-scl-falling-time-ns = <890>; /* lcnt */
adc@14 {
compatible = "lltc,ltc2497";

View File

@ -141,7 +141,7 @@ &i2c2 {
status = "okay";
clock-frequency = <100000>;
i2c-sda-falling-time-ns = <890>; /* hcnt */
i2c-sdl-falling-time-ns = <890>; /* lcnt */
i2c-scl-falling-time-ns = <890>; /* lcnt */
adc@14 {
compatible = "lltc,ltc2497";

View File

@ -141,7 +141,7 @@ can0: can@0 {
};
&gpio1 {
gpio-line-names = "nINT_ETHPHY", "LED_RED", "WDOG_INT", "X_RTC_INT",
gpio-line-names = "", "LED_RED", "WDOG_INT", "X_RTC_INT",
"", "", "", "RESET_ETHPHY",
"CAN_nINT", "CAN_EN", "nENABLE_FLATLINK", "",
"USB_OTG_VBUS_EN", "", "LED_GREEN", "LED_BLUE";

View File

@ -111,7 +111,7 @@ som_flash: flash@0 {
};
&gpio1 {
gpio-line-names = "nINT_ETHPHY", "", "WDOG_INT", "X_RTC_INT",
gpio-line-names = "", "", "WDOG_INT", "X_RTC_INT",
"", "", "", "RESET_ETHPHY",
"", "", "nENABLE_FLATLINK";
};
@ -210,7 +210,7 @@ regulator-state-mem {
};
};
reg_vdd_gpu: buck3 {
reg_vdd_vpu: buck3 {
regulator-always-on;
regulator-boot-on;
regulator-max-microvolt = <1000000>;

View File

@ -559,6 +559,10 @@ &pcie0 {
status = "okay";
};
&disp_blk_ctrl {
status = "disabled";
};
&pgc_mipi {
status = "disabled";
};

View File

@ -617,6 +617,10 @@ &pcie0 {
status = "okay";
};
&disp_blk_ctrl {
status = "disabled";
};
&pgc_mipi {
status = "disabled";
};

View File

@ -351,7 +351,7 @@ MX8MN_IOMUXC_ENET_TXC_ENET1_RGMII_TXC 0x1f
MX8MN_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x91
MX8MN_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x91
MX8MN_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x1f
MX8MN_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x19
MX8MN_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x159
>;
};

View File

@ -756,7 +756,7 @@ pgc_vpu: power-domain@6 {
<&clk IMX8MQ_SYS1_PLL_800M>,
<&clk IMX8MQ_VPU_PLL>;
assigned-clock-rates = <600000000>,
<600000000>,
<300000000>,
<800000000>,
<0>;
};

View File

@ -634,7 +634,7 @@ static void fpsimd_to_sve(struct task_struct *task)
void *sst = task->thread.sve_state;
struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
if (!system_supports_sve())
if (!system_supports_sve() && !system_supports_sme())
return;
vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread));
@ -660,7 +660,7 @@ static void sve_to_fpsimd(struct task_struct *task)
unsigned int i;
__uint128_t const *p;
if (!system_supports_sve())
if (!system_supports_sve() && !system_supports_sme())
return;
vl = thread_get_cur_vl(&task->thread);
@ -791,7 +791,8 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
void *sst = task->thread.sve_state;
struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
if (!test_tsk_thread_flag(task, TIF_SVE))
if (!test_tsk_thread_flag(task, TIF_SVE) &&
!thread_sm_enabled(&task->thread))
return;
vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread));
@ -863,7 +864,7 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type,
*/
task->thread.svcr &= ~(SVCR_SM_MASK |
SVCR_ZA_MASK);
clear_thread_flag(TIF_SME);
clear_tsk_thread_flag(task, TIF_SME);
free_sme = true;
}
}

View File

@ -937,11 +937,13 @@ static int sve_set_common(struct task_struct *target,
/*
* Ensure target->thread.sve_state is up to date with target's
* FPSIMD regs, so that a short copyin leaves trailing
* registers unmodified. Always enable SVE even if going into
* streaming mode.
* registers unmodified. Only enable SVE if we are
* configuring normal SVE, a system with streaming SVE may not
* have normal SVE.
*/
fpsimd_sync_to_sve(target);
set_tsk_thread_flag(target, TIF_SVE);
if (type == ARM64_VEC_SVE)
set_tsk_thread_flag(target, TIF_SVE);
BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
start = SVE_PT_SVE_OFFSET;

View File

@ -34,7 +34,7 @@ static inline long find_zero(unsigned long mask)
return leading_zero_bits >> 3;
}
static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
{
unsigned long rhs = val | c->low_bits;
*data = rhs;

View File

@ -33,6 +33,9 @@
* and then arrange for the ftrace function to be called.
*/
.macro ftrace_regs_entry allregs
/* Create a minimal stack frame for representing B */
PPC_STLU r1, -STACK_FRAME_MIN_SIZE(r1)
/* Create our stack frame + pt_regs */
PPC_STLU r1,-SWITCH_FRAME_SIZE(r1)
@ -42,7 +45,7 @@
#ifdef CONFIG_PPC64
/* Save the original return address in A's stack frame */
std r0, LRSAVE+SWITCH_FRAME_SIZE(r1)
std r0, LRSAVE+SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE(r1)
/* Ok to continue? */
lbz r3, PACA_FTRACE_ENABLED(r13)
cmpdi r3, 0
@ -77,6 +80,8 @@
mflr r7
/* Save it as pt_regs->nip */
PPC_STL r7, _NIP(r1)
/* Also save it in B's stackframe header for proper unwind */
PPC_STL r7, LRSAVE+SWITCH_FRAME_SIZE(r1)
/* Save the read LR in pt_regs->link */
PPC_STL r0, _LINK(r1)
@ -142,7 +147,7 @@
#endif
/* Pop our stack frame */
addi r1, r1, SWITCH_FRAME_SIZE
addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
#ifdef CONFIG_LIVEPATCH_64
/* Based on the cmpd above, if the NIP was altered handle livepatch */

View File

@ -314,8 +314,7 @@ void __ref vmemmap_free(unsigned long start, unsigned long end,
start = ALIGN_DOWN(start, page_size);
if (altmap) {
alt_start = altmap->base_pfn;
alt_end = altmap->base_pfn + altmap->reserve +
altmap->free + altmap->alloc + altmap->align;
alt_end = altmap->base_pfn + altmap->reserve + altmap->free;
}
pr_debug("vmemmap_free %lx...%lx\n", start, end);

View File

@ -459,9 +459,9 @@ static int sthyi_update_cache(u64 *rc)
*
* Fills the destination with system information returned by the STHYI
* instruction. The data is generated by emulation or execution of STHYI,
* if available. The return value is the condition code that would be
* returned, the rc parameter is the return code which is passed in
* register R2 + 1.
* if available. The return value is either a negative error value or
* the condition code that would be returned, the rc parameter is the
* return code which is passed in register R2 + 1.
*/
int sthyi_fill(void *dst, u64 *rc)
{

View File

@ -389,8 +389,8 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu)
*/
int handle_sthyi(struct kvm_vcpu *vcpu)
{
int reg1, reg2, r = 0;
u64 code, addr, cc = 0, rc = 0;
int reg1, reg2, cc = 0, r = 0;
u64 code, addr, rc = 0;
struct sthyi_sctns *sctns = NULL;
if (!test_kvm_facility(vcpu->kvm, 74))
@ -421,7 +421,10 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
return -ENOMEM;
cc = sthyi_fill(sctns, &rc);
if (cc < 0) {
free_page((unsigned long)sctns);
return cc;
}
out:
if (!cc) {
if (kvm_s390_pv_cpu_is_protected(vcpu)) {

View File

@ -14,6 +14,7 @@
#include <asm/apic.h>
#include <asm/desc.h>
#include <asm/sev.h>
#include <asm/ibt.h>
#include <asm/hypervisor.h>
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>
@ -467,6 +468,26 @@ void __init hyperv_init(void)
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
}
/*
* Some versions of Hyper-V that provide IBT in guest VMs have a bug
* in that there's no ENDBR64 instruction at the entry to the
* hypercall page. Because hypercalls are invoked via an indirect call
* to the hypercall page, all hypercall attempts fail when IBT is
* enabled, and Linux panics. For such buggy versions, disable IBT.
*
* Fixed versions of Hyper-V always provide ENDBR64 on the hypercall
* page, so if future Linux kernel versions enable IBT for 32-bit
* builds, additional hypercall page hackery will be required here
* to provide an ENDBR32.
*/
#ifdef CONFIG_X86_KERNEL_IBT
if (cpu_feature_enabled(X86_FEATURE_IBT) &&
*(u32 *)hv_hypercall_pg != gen_endbr()) {
setup_clear_cpu_cap(X86_FEATURE_IBT);
pr_warn("Hyper-V: Disabling IBT because of Hyper-V bug\n");
}
#endif
/*
* hyperv_init() is called before LAPIC is initialized: see
* apic_intr_mode_init() -> x86_platform.apic_post_init() and

View File

@ -476,4 +476,5 @@
/* BUG word 2 */
#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */
#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */
#endif /* _ASM_X86_CPUFEATURES_H */

View File

@ -801,10 +801,12 @@ extern u16 get_llc_id(unsigned int cpu);
extern u32 amd_get_nodes_per_socket(void);
extern u32 amd_get_highest_perf(void);
extern bool cpu_has_ibpb_brtype_microcode(void);
extern void amd_clear_divider(void);
#else
static inline u32 amd_get_nodes_per_socket(void) { return 0; }
static inline u32 amd_get_highest_perf(void) { return 0; }
static inline bool cpu_has_ibpb_brtype_microcode(void) { return false; }
static inline void amd_clear_divider(void) { }
#endif
#define for_each_possible_hypervisor_cpuid_base(function) \

View File

@ -75,6 +75,10 @@ static const int amd_zenbleed[] =
AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf),
AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf));
static const int amd_div0[] =
AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf),
AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf));
static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
{
int osvw_id = *erratum++;
@ -1115,6 +1119,11 @@ static void init_amd(struct cpuinfo_x86 *c)
check_null_seg_clears_base(c);
zenbleed_check(c);
if (cpu_has_amd_erratum(c, amd_div0)) {
pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
setup_force_cpu_bug(X86_BUG_DIV0);
}
}
#ifdef CONFIG_X86_32
@ -1275,3 +1284,13 @@ void amd_check_microcode(void)
{
on_each_cpu(zenbleed_check_cpu, NULL, 1);
}
/*
* Issue a DIV 0/1 insn to clear any division data from previous DIV
* operations.
*/
void noinstr amd_clear_divider(void)
{
asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0)
:: "a" (0), "d" (0), "r" (1));
}

View File

@ -206,6 +206,8 @@ DEFINE_IDTENTRY(exc_divide_error)
{
do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE,
FPE_INTDIV, error_get_trap_addr(regs));
amd_clear_divider();
}
DEFINE_IDTENTRY(exc_overflow)

View File

@ -3676,7 +3676,7 @@ static int rbd_lock(struct rbd_device *rbd_dev)
ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
RBD_LOCK_TAG, "", 0);
if (ret)
if (ret && ret != -EEXIST)
return ret;
__rbd_lock(rbd_dev, cookie);
@ -3879,7 +3879,7 @@ static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev)
&rbd_dev->header_oloc, RBD_LOCK_NAME,
&lock_type, &lock_tag, &lockers, &num_lockers);
if (ret) {
rbd_warn(rbd_dev, "failed to retrieve lockers: %d", ret);
rbd_warn(rbd_dev, "failed to get header lockers: %d", ret);
return ERR_PTR(ret);
}
@ -3941,8 +3941,10 @@ static int find_watcher(struct rbd_device *rbd_dev,
ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
&rbd_dev->header_oloc, &watchers,
&num_watchers);
if (ret)
if (ret) {
rbd_warn(rbd_dev, "failed to get watchers: %d", ret);
return ret;
}
sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
for (i = 0; i < num_watchers; i++) {
@ -3986,8 +3988,12 @@ static int rbd_try_lock(struct rbd_device *rbd_dev)
locker = refreshed_locker = NULL;
ret = rbd_lock(rbd_dev);
if (ret != -EBUSY)
if (!ret)
goto out;
if (ret != -EBUSY) {
rbd_warn(rbd_dev, "failed to lock header: %d", ret);
goto out;
}
/* determine if the current lock holder is still alive */
locker = get_lock_owner_info(rbd_dev);
@ -4090,11 +4096,8 @@ static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
ret = rbd_try_lock(rbd_dev);
if (ret < 0) {
rbd_warn(rbd_dev, "failed to lock header: %d", ret);
if (ret == -EBLOCKLISTED)
goto out;
ret = 1; /* request lock anyway */
rbd_warn(rbd_dev, "failed to acquire lock: %d", ret);
goto out;
}
if (ret > 0) {
up_write(&rbd_dev->lock_rwsem);
@ -6628,12 +6631,11 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
cancel_delayed_work_sync(&rbd_dev->lock_dwork);
if (!ret)
ret = -ETIMEDOUT;
}
if (ret) {
rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret);
return ret;
rbd_warn(rbd_dev, "failed to acquire lock: %ld", ret);
}
if (ret)
return ret;
/*
* The lock may have been released by now, unless automatic lock

View File

@ -288,7 +288,7 @@ static int imx93_clocks_probe(struct platform_device *pdev)
anatop_base = devm_of_iomap(dev, np, 0, NULL);
of_node_put(np);
if (WARN_ON(IS_ERR(anatop_base))) {
ret = PTR_ERR(base);
ret = PTR_ERR(anatop_base);
goto unregister_hws;
}

View File

@ -106,8 +106,10 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
return -ENOMEM;
shmem = of_parse_phandle(cdev->of_node, "shmem", idx);
if (!of_device_is_compatible(shmem, "arm,scmi-shmem"))
if (!of_device_is_compatible(shmem, "arm,scmi-shmem")) {
of_node_put(shmem);
return -ENXIO;
}
ret = of_address_to_resource(shmem, 0, &res);
of_node_put(shmem);

View File

@ -23,6 +23,7 @@
/**
* struct scmi_smc - Structure representing a SCMI smc transport
*
* @irq: An optional IRQ for completion
* @cinfo: SCMI channel info
* @shmem: Transmit/Receive shared memory area
* @shmem_lock: Lock to protect access to Tx/Rx shared memory area.
@ -33,6 +34,7 @@
*/
struct scmi_smc {
int irq;
struct scmi_chan_info *cinfo;
struct scmi_shared_mem __iomem *shmem;
/* Protect access to shmem area */
@ -106,7 +108,7 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
struct resource res;
struct device_node *np;
u32 func_id;
int ret, irq;
int ret;
if (!tx)
return -ENODEV;
@ -116,8 +118,10 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
return -ENOMEM;
np = of_parse_phandle(cdev->of_node, "shmem", 0);
if (!of_device_is_compatible(np, "arm,scmi-shmem"))
if (!of_device_is_compatible(np, "arm,scmi-shmem")) {
of_node_put(np);
return -ENXIO;
}
ret = of_address_to_resource(np, 0, &res);
of_node_put(np);
@ -142,11 +146,10 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
* completion of a message is signaled by an interrupt rather than by
* the return of the SMC call.
*/
irq = of_irq_get_byname(cdev->of_node, "a2p");
if (irq > 0) {
ret = devm_request_irq(dev, irq, smc_msg_done_isr,
IRQF_NO_SUSPEND,
dev_name(dev), scmi_info);
scmi_info->irq = of_irq_get_byname(cdev->of_node, "a2p");
if (scmi_info->irq > 0) {
ret = request_irq(scmi_info->irq, smc_msg_done_isr,
IRQF_NO_SUSPEND, dev_name(dev), scmi_info);
if (ret) {
dev_err(dev, "failed to setup SCMI smc irq\n");
return ret;
@ -168,6 +171,10 @@ static int smc_chan_free(int id, void *p, void *data)
struct scmi_chan_info *cinfo = p;
struct scmi_smc *scmi_info = cinfo->transport_info;
/* Ignore any possible further reception on the IRQ path */
if (scmi_info->irq > 0)
free_irq(scmi_info->irq, scmi_info);
cinfo->transport_info = NULL;
scmi_info->cinfo = NULL;

View File

@ -34,7 +34,6 @@ static struct soc_device_attribute *soc_dev_attr;
static int __init smccc_soc_init(void)
{
struct arm_smccc_res res;
int soc_id_rev, soc_id_version;
static char soc_id_str[20], soc_id_rev_str[12];
static char soc_id_jep106_id_str[12];
@ -55,23 +54,14 @@ static int __init smccc_soc_init(void)
return 0;
}
if ((int)res.a0 < 0) {
pr_info("ARCH_FEATURES(ARCH_SOC_ID) returned error: %lx\n",
res.a0);
if (soc_id_version < 0) {
pr_err("Invalid SoC Version: %x\n", soc_id_version);
return -EINVAL;
}
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_SOC_ID, 0, &res);
if ((int)res.a0 < 0) {
pr_err("ARCH_SOC_ID(0) returned error: %lx\n", res.a0);
return -EINVAL;
}
soc_id_version = res.a0;
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_SOC_ID, 1, &res);
if ((int)res.a0 < 0) {
pr_err("ARCH_SOC_ID(1) returned error: %lx\n", res.a0);
soc_id_rev = arm_smccc_get_soc_id_revision();
if (soc_id_rev < 0) {
pr_err("Invalid SoC Revision: %x\n", soc_id_rev);
return -EINVAL;
}

View File

@ -101,39 +101,97 @@ void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev)
}
}
static int amdgpu_atomfirmware_allocate_fb_v2_1(struct amdgpu_device *adev,
struct vram_usagebyfirmware_v2_1 *fw_usage, int *usage_bytes)
{
uint32_t start_addr, fw_size, drv_size;
start_addr = le32_to_cpu(fw_usage->start_address_in_kb);
fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb);
drv_size = le16_to_cpu(fw_usage->used_by_driver_in_kb);
DRM_DEBUG("atom firmware v2_1 requested %08x %dkb fw %dkb drv\n",
start_addr,
fw_size,
drv_size);
if ((start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
(uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
/* Firmware request VRAM reservation for SR-IOV */
adev->mman.fw_vram_usage_start_offset = (start_addr &
(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
adev->mman.fw_vram_usage_size = fw_size << 10;
/* Use the default scratch size */
*usage_bytes = 0;
} else {
*usage_bytes = drv_size << 10;
}
return 0;
}
static int amdgpu_atomfirmware_allocate_fb_v2_2(struct amdgpu_device *adev,
struct vram_usagebyfirmware_v2_2 *fw_usage, int *usage_bytes)
{
uint32_t fw_start_addr, fw_size, drv_start_addr, drv_size;
fw_start_addr = le32_to_cpu(fw_usage->fw_region_start_address_in_kb);
fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb);
drv_start_addr = le32_to_cpu(fw_usage->driver_region0_start_address_in_kb);
drv_size = le32_to_cpu(fw_usage->used_by_driver_region0_in_kb);
DRM_DEBUG("atom requested fw start at %08x %dkb and drv start at %08x %dkb\n",
fw_start_addr,
fw_size,
drv_start_addr,
drv_size);
if ((fw_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION << 30)) == 0) {
/* Firmware request VRAM reservation for SR-IOV */
adev->mman.fw_vram_usage_start_offset = (fw_start_addr &
(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
adev->mman.fw_vram_usage_size = fw_size << 10;
}
if ((drv_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION << 30)) == 0) {
/* driver request VRAM reservation for SR-IOV */
adev->mman.drv_vram_usage_start_offset = (drv_start_addr &
(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
adev->mman.drv_vram_usage_size = drv_size << 10;
}
*usage_bytes = 0;
return 0;
}
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
{
struct atom_context *ctx = adev->mode_info.atom_context;
int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
vram_usagebyfirmware);
struct vram_usagebyfirmware_v2_1 *firmware_usage;
uint32_t start_addr, size;
struct vram_usagebyfirmware_v2_1 *fw_usage_v2_1;
struct vram_usagebyfirmware_v2_2 *fw_usage_v2_2;
uint16_t data_offset;
uint8_t frev, crev;
int usage_bytes = 0;
if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
firmware_usage = (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
DRM_DEBUG("atom firmware requested %08x %dkb fw %dkb drv\n",
le32_to_cpu(firmware_usage->start_address_in_kb),
le16_to_cpu(firmware_usage->used_by_firmware_in_kb),
le16_to_cpu(firmware_usage->used_by_driver_in_kb));
start_addr = le32_to_cpu(firmware_usage->start_address_in_kb);
size = le16_to_cpu(firmware_usage->used_by_firmware_in_kb);
if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
(uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
/* Firmware request VRAM reservation for SR-IOV */
adev->mman.fw_vram_usage_start_offset = (start_addr &
(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
adev->mman.fw_vram_usage_size = size << 10;
/* Use the default scratch size */
usage_bytes = 0;
} else {
usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) << 10;
if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) {
if (frev == 2 && crev == 1) {
fw_usage_v2_1 =
(struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
amdgpu_atomfirmware_allocate_fb_v2_1(adev,
fw_usage_v2_1,
&usage_bytes);
} else if (frev >= 2 && crev >= 2) {
fw_usage_v2_2 =
(struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset);
amdgpu_atomfirmware_allocate_fb_v2_2(adev,
fw_usage_v2_2,
&usage_bytes);
}
}
ctx->scratch_size_bytes = 0;
if (usage_bytes == 0)
usage_bytes = 20 * 1024;

View File

@ -347,17 +347,16 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
* @adev: amdgpu device object
* @offset: offset of the BO
* @size: size of the BO
* @domain: where to place it
* @bo_ptr: used to initialize BOs in structures
* @cpu_addr: optional CPU address mapping
*
* Creates a kernel BO at a specific offset in the address space of the domain.
* Creates a kernel BO at a specific offset in VRAM.
*
* Returns:
* 0 on success, negative error code otherwise.
*/
int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
uint64_t offset, uint64_t size, uint32_t domain,
uint64_t offset, uint64_t size,
struct amdgpu_bo **bo_ptr, void **cpu_addr)
{
struct ttm_operation_ctx ctx = { false, false };
@ -367,8 +366,9 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
offset &= PAGE_MASK;
size = ALIGN(size, PAGE_SIZE);
r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
NULL, cpu_addr);
r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, bo_ptr, NULL,
cpu_addr);
if (r)
return r;

View File

@ -284,7 +284,7 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
u32 domain, struct amdgpu_bo **bo_ptr,
u64 *gpu_addr, void **cpu_addr);
int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
uint64_t offset, uint64_t size, uint32_t domain,
uint64_t offset, uint64_t size,
struct amdgpu_bo **bo_ptr, void **cpu_addr);
int amdgpu_bo_create_user(struct amdgpu_device *adev,
struct amdgpu_bo_param *bp,

View File

@ -1537,6 +1537,23 @@ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
NULL, &adev->mman.fw_vram_usage_va);
}
/*
* Driver Reservation functions
*/
/**
* amdgpu_ttm_drv_reserve_vram_fini - free drv reserved vram
*
* @adev: amdgpu_device pointer
*
* free drv reserved vram if it has been reserved.
*/
static void amdgpu_ttm_drv_reserve_vram_fini(struct amdgpu_device *adev)
{
amdgpu_bo_free_kernel(&adev->mman.drv_vram_usage_reserved_bo,
NULL,
NULL);
}
/**
* amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
*
@ -1558,11 +1575,34 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
return amdgpu_bo_create_kernel_at(adev,
adev->mman.fw_vram_usage_start_offset,
adev->mman.fw_vram_usage_size,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->mman.fw_vram_usage_reserved_bo,
&adev->mman.fw_vram_usage_va);
}
/**
* amdgpu_ttm_drv_reserve_vram_init - create bo vram reservation from driver
*
* @adev: amdgpu_device pointer
*
* create bo vram reservation from drv.
*/
static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev)
{
uint64_t vram_size = adev->gmc.visible_vram_size;
adev->mman.drv_vram_usage_reserved_bo = NULL;
if (adev->mman.drv_vram_usage_size == 0 ||
adev->mman.drv_vram_usage_size > vram_size)
return 0;
return amdgpu_bo_create_kernel_at(adev,
adev->mman.drv_vram_usage_start_offset,
adev->mman.drv_vram_usage_size,
&adev->mman.drv_vram_usage_reserved_bo,
NULL);
}
/*
* Memoy training reservation functions
*/
@ -1585,14 +1625,15 @@ static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
return 0;
}
static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev,
uint32_t reserve_size)
{
struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
memset(ctx, 0, sizeof(*ctx));
ctx->c2p_train_data_offset =
ALIGN((adev->gmc.mc_vram_size - adev->mman.discovery_tmr_size - SZ_1M), SZ_1M);
ALIGN((adev->gmc.mc_vram_size - reserve_size - SZ_1M), SZ_1M);
ctx->p2c_train_data_offset =
(adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
ctx->train_data_size =
@ -1610,9 +1651,10 @@ static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
*/
static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
{
int ret;
struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
bool mem_train_support = false;
uint32_t reserve_size = 0;
int ret;
if (!amdgpu_sriov_vf(adev)) {
if (amdgpu_atomfirmware_mem_training_supported(adev))
@ -1628,18 +1670,18 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
* Otherwise, fallback to legacy approach to check and reserve tmr block for ip
* discovery data and G6 memory training data respectively
*/
adev->mman.discovery_tmr_size =
amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
if (!adev->mman.discovery_tmr_size)
adev->mman.discovery_tmr_size = DISCOVERY_TMR_OFFSET;
if (adev->bios)
reserve_size =
amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
if (!reserve_size)
reserve_size = DISCOVERY_TMR_OFFSET;
if (mem_train_support) {
/* reserve vram for mem train according to TMR location */
amdgpu_ttm_training_data_block_init(adev);
amdgpu_ttm_training_data_block_init(adev, reserve_size);
ret = amdgpu_bo_create_kernel_at(adev,
ctx->c2p_train_data_offset,
ctx->train_data_size,
AMDGPU_GEM_DOMAIN_VRAM,
&ctx->c2p_bo,
NULL);
if (ret) {
@ -1651,14 +1693,14 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
}
ret = amdgpu_bo_create_kernel_at(adev,
adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
adev->mman.discovery_tmr_size,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->mman.discovery_memory,
adev->gmc.real_vram_size - reserve_size,
reserve_size,
&adev->mman.fw_reserved_memory,
NULL);
if (ret) {
DRM_ERROR("alloc tmr failed(%d)!\n", ret);
amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory,
NULL, NULL);
return ret;
}
@ -1730,6 +1772,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
}
/*
*The reserved vram for driver must be pinned to the specified
*place on the VRAM, so reserve it early.
*/
r = amdgpu_ttm_drv_reserve_vram_init(adev);
if (r)
return r;
/*
* only NAVI10 and onwards ASIC support for IP discovery.
* If IP discovery enabled, a block of memory should be
@ -1746,21 +1796,18 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
* avoid display artifacts while transitioning between pre-OS
* and driver. */
r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->mman.stolen_vga_memory,
NULL);
if (r)
return r;
r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
adev->mman.stolen_extended_size,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->mman.stolen_extended_memory,
NULL);
if (r)
return r;
r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset,
adev->mman.stolen_reserved_size,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->mman.stolen_reserved_memory,
NULL);
if (r)
@ -1847,14 +1894,16 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
/* return the stolen vga memory back to VRAM */
amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
/* return the IP Discovery TMR memory back to VRAM */
amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
/* return the FW reserved memory back to VRAM */
amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL,
NULL);
if (adev->mman.stolen_reserved_size)
amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
NULL, NULL);
amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
&adev->mman.sdma_access_ptr);
amdgpu_ttm_fw_reserve_vram_fini(adev);
amdgpu_ttm_drv_reserve_vram_fini(adev);
if (drm_dev_enter(adev_to_drm(adev), &idx)) {

View File

@ -78,7 +78,8 @@ struct amdgpu_mman {
/* discovery */
uint8_t *discovery_bin;
uint32_t discovery_tmr_size;
struct amdgpu_bo *discovery_memory;
/* fw reserved memory */
struct amdgpu_bo *fw_reserved_memory;
/* firmware VRAM reservation */
u64 fw_vram_usage_start_offset;
@ -86,6 +87,11 @@ struct amdgpu_mman {
struct amdgpu_bo *fw_vram_usage_reserved_bo;
void *fw_vram_usage_va;
/* driver VRAM reservation */
u64 drv_vram_usage_start_offset;
u64 drv_vram_usage_size;
struct amdgpu_bo *drv_vram_usage_reserved_bo;
/* PAGE_SIZE'd BO for process memory r/w over SDMA. */
struct amdgpu_bo *sdma_access_bo;
void *sdma_access_ptr;

View File

@ -391,7 +391,6 @@ static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
*/
if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
AMDGPU_GPU_PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&bo, NULL))
DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);

View File

@ -351,6 +351,19 @@ static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
return false;
}
static inline void reverse_planes_order(struct dc_surface_update *array_of_surface_update,
int planes_count)
{
int i, j;
struct dc_surface_update surface_updates_temp;
for (i = 0, j = planes_count - 1; i < j; i++, j--) {
surface_updates_temp = array_of_surface_update[i];
array_of_surface_update[i] = array_of_surface_update[j];
array_of_surface_update[j] = surface_updates_temp;
}
}
/**
* update_planes_and_stream_adapter() - Send planes to be updated in DC
*
@ -367,6 +380,8 @@ static inline bool update_planes_and_stream_adapter(struct dc *dc,
struct dc_stream_update *stream_update,
struct dc_surface_update *array_of_surface_update)
{
reverse_planes_order(array_of_surface_update, planes_count);
/*
* Previous frame finished and HW is ready for optimization.
*/

View File

@ -2092,6 +2092,7 @@ static enum dc_status enable_link_dp_mst(
struct pipe_ctx *pipe_ctx)
{
struct dc_link *link = pipe_ctx->stream->link;
unsigned char mstm_cntl;
/* sink signal type after MST branch is MST. Multiple MST sinks
* share one link. Link DP PHY is enable or training only once.
@ -2100,7 +2101,9 @@ static enum dc_status enable_link_dp_mst(
return DC_OK;
/* clear payload table */
dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link);
core_link_read_dpcd(link, DP_MSTM_CTRL, &mstm_cntl, 1);
if (mstm_cntl & DP_MST_EN)
dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link);
/* to make sure the pending down rep can be processed
* before enabling the link

View File

@ -705,20 +705,65 @@ struct atom_gpio_pin_lut_v2_1
};
/*
***************************************************************************
Data Table vram_usagebyfirmware structure
***************************************************************************
*/
/*
* VBIOS/PRE-OS always reserve a FB region at the top of frame buffer. driver should not write
* access that region. driver can allocate their own reservation region as long as it does not
* overlap firwmare's reservation region.
* if (pre-NV1X) atom data table firmwareInfoTable version < 3.3:
* in this case, atom data table vram_usagebyfirmwareTable version always <= 2.1
* if VBIOS/UEFI GOP is posted:
* VBIOS/UEFIGOP update used_by_firmware_in_kb = total reserved size by VBIOS
* update start_address_in_kb = total_mem_size_in_kb - used_by_firmware_in_kb;
* ( total_mem_size_in_kb = reg(CONFIG_MEMSIZE)<<10)
* driver can allocate driver reservation region under firmware reservation,
* used_by_driver_in_kb = driver reservation size
* driver reservation start address = (start_address_in_kb - used_by_driver_in_kb)
* Comment1[hchan]: There is only one reservation at the beginning of the FB reserved by
* host driver. Host driver would overwrite the table with the following
* used_by_firmware_in_kb = total reserved size for pf-vf info exchange and
* set SRIOV_MSG_SHARE_RESERVATION mask start_address_in_kb = 0
* else there is no VBIOS reservation region:
* driver must allocate driver reservation region at top of FB.
* driver set used_by_driver_in_kb = driver reservation size
* driver reservation start address = (total_mem_size_in_kb - used_by_driver_in_kb)
* same as Comment1
* else (NV1X and after):
* if VBIOS/UEFI GOP is posted:
* VBIOS/UEFIGOP update:
* used_by_firmware_in_kb = atom_firmware_Info_v3_3.fw_reserved_size_in_kb;
* start_address_in_kb = total_mem_size_in_kb - used_by_firmware_in_kb;
* (total_mem_size_in_kb = reg(CONFIG_MEMSIZE)<<10)
* if vram_usagebyfirmwareTable version <= 2.1:
* driver can allocate driver reservation region under firmware reservation,
* driver set used_by_driver_in_kb = driver reservation size
* driver reservation start address = start_address_in_kb - used_by_driver_in_kb
* same as Comment1
* else driver can:
* allocate it reservation any place as long as it does overlap pre-OS FW reservation area
* set used_by_driver_region0_in_kb = driver reservation size
* set driver_region0_start_address_in_kb = driver reservation region start address
* Comment2[hchan]: Host driver can set used_by_firmware_in_kb and start_address_in_kb to
* zero as the reservation for VF as it doesnt exist. And Host driver should also
* update atom_firmware_Info table to remove the same VBIOS reservation as well.
*/
struct vram_usagebyfirmware_v2_1
{
struct atom_common_table_header table_header;
uint32_t start_address_in_kb;
uint16_t used_by_firmware_in_kb;
uint16_t used_by_driver_in_kb;
struct atom_common_table_header table_header;
uint32_t start_address_in_kb;
uint16_t used_by_firmware_in_kb;
uint16_t used_by_driver_in_kb;
};
struct vram_usagebyfirmware_v2_2 {
struct atom_common_table_header table_header;
uint32_t fw_region_start_address_in_kb;
uint16_t used_by_firmware_in_kb;
uint16_t reserved;
uint32_t driver_region0_start_address_in_kb;
uint32_t used_by_driver_region0_in_kb;
uint32_t reserved32[7];
};
/*
***************************************************************************

View File

@ -7123,8 +7123,6 @@ static void intel_update_crtc(struct intel_atomic_state *state,
intel_fbc_update(state, crtc);
drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF));
if (!modeset &&
(new_crtc_state->uapi.color_mgmt_changed ||
new_crtc_state->update_pipe))
@ -7501,28 +7499,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
drm_atomic_helper_wait_for_dependencies(&state->base);
drm_dp_mst_atomic_wait_for_dependencies(&state->base);
/*
* During full modesets we write a lot of registers, wait
* for PLLs, etc. Doing that while DC states are enabled
* is not a good idea.
*
* During fastsets and other updates we also need to
* disable DC states due to the following scenario:
* 1. DC5 exit and PSR exit happen
* 2. Some or all _noarm() registers are written
* 3. Due to some long delay PSR is re-entered
* 4. DC5 entry -> DMC saves the already written new
* _noarm() registers and the old not yet written
* _arm() registers
* 5. DC5 exit -> DMC restores a mixture of old and
* new register values and arms the update
* 6. PSR exit -> hardware latches a mixture of old and
* new register values -> corrupted frame, or worse
* 7. New _arm() registers are finally written
* 8. Hardware finally latches a complete set of new
* register values, and subsequent frames will be OK again
*/
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF);
if (state->modeset)
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
intel_atomic_prepare_plane_clear_colors(state);
@ -7661,8 +7639,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* the culprit.
*/
intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
}
intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, wakeref);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
/*

View File

@ -256,8 +256,8 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
if (!HAS_FLAT_CCS(rq->engine->i915)) {
/* hsdes: 1809175790 */
cs = gen12_emit_aux_table_inv(rq->engine->gt,
cs, GEN12_GFX_CCS_AUX_NV);
cs = gen12_emit_aux_table_inv(rq->engine->gt, cs,
GEN12_CCS_AUX_INV);
}
*cs++ = preparser_disable(false);
@ -317,10 +317,10 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
if (aux_inv) { /* hsdes: 1809175790 */
if (rq->engine->class == VIDEO_DECODE_CLASS)
cs = gen12_emit_aux_table_inv(rq->engine->gt,
cs, GEN12_VD0_AUX_NV);
cs, GEN12_VD0_AUX_INV);
else
cs = gen12_emit_aux_table_inv(rq->engine->gt,
cs, GEN12_VE0_AUX_NV);
cs, GEN12_VE0_AUX_INV);
}
if (mode & EMIT_INVALIDATE)

View File

@ -301,9 +301,11 @@
#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4)
#define BSD_HWS_PGA_GEN7 _MMIO(0x4180)
#define GEN12_GFX_CCS_AUX_NV _MMIO(0x4208)
#define GEN12_VD0_AUX_NV _MMIO(0x4218)
#define GEN12_VD1_AUX_NV _MMIO(0x4228)
#define GEN12_CCS_AUX_INV _MMIO(0x4208)
#define GEN12_VD0_AUX_INV _MMIO(0x4218)
#define GEN12_VE0_AUX_INV _MMIO(0x4238)
#define GEN12_BCS0_AUX_INV _MMIO(0x4248)
#define GEN8_RTCR _MMIO(0x4260)
#define GEN8_M1TCR _MMIO(0x4264)
@ -311,14 +313,12 @@
#define GEN8_BTCR _MMIO(0x426c)
#define GEN8_VTCR _MMIO(0x4270)
#define GEN12_VD2_AUX_NV _MMIO(0x4298)
#define GEN12_VD3_AUX_NV _MMIO(0x42a8)
#define GEN12_VE0_AUX_NV _MMIO(0x4238)
#define BLT_HWS_PGA_GEN7 _MMIO(0x4280)
#define GEN12_VE1_AUX_NV _MMIO(0x42b8)
#define GEN12_VD2_AUX_INV _MMIO(0x4298)
#define GEN12_CCS0_AUX_INV _MMIO(0x42c8)
#define AUX_INV REG_BIT(0)
#define VEBOX_HWS_PGA_GEN7 _MMIO(0x4380)
#define GEN12_AUX_ERR_DBG _MMIO(0x43f4)

View File

@ -1299,7 +1299,7 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
/* hsdes: 1809175790 */
if (!HAS_FLAT_CCS(ce->engine->i915))
cs = gen12_emit_aux_table_inv(ce->engine->gt,
cs, GEN12_GFX_CCS_AUX_NV);
cs, GEN12_CCS_AUX_INV);
/* Wa_16014892111 */
if (IS_DG2(ce->engine->i915))
@ -1326,10 +1326,10 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
if (!HAS_FLAT_CCS(ce->engine->i915)) {
if (ce->engine->class == VIDEO_DECODE_CLASS)
cs = gen12_emit_aux_table_inv(ce->engine->gt,
cs, GEN12_VD0_AUX_NV);
cs, GEN12_VD0_AUX_INV);
else if (ce->engine->class == VIDEO_ENHANCEMENT_CLASS)
cs = gen12_emit_aux_table_inv(ce->engine->gt,
cs, GEN12_VE0_AUX_NV);
cs, GEN12_VE0_AUX_INV);
}
return cs;

View File

@ -449,8 +449,11 @@ int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
}
} while (unlikely(is_barrier(active)));
if (!__i915_active_fence_set(active, fence))
fence = __i915_active_fence_set(active, fence);
if (!fence)
__i915_active_acquire(ref);
else
dma_fence_put(fence);
out:
i915_active_release(ref);
@ -469,13 +472,9 @@ __i915_active_set_fence(struct i915_active *ref,
return NULL;
}
rcu_read_lock();
prev = __i915_active_fence_set(active, fence);
if (prev)
prev = dma_fence_get_rcu(prev);
else
if (!prev)
__i915_active_acquire(ref);
rcu_read_unlock();
return prev;
}
@ -1019,10 +1018,11 @@ void i915_request_add_active_barriers(struct i915_request *rq)
*
* Records the new @fence as the last active fence along its timeline in
* this active tracker, moving the tracking callbacks from the previous
* fence onto this one. Returns the previous fence (if not already completed),
* which the caller must ensure is executed before the new fence. To ensure
* that the order of fences within the timeline of the i915_active_fence is
* understood, it should be locked by the caller.
* fence onto this one. Gets and returns a reference to the previous fence
* (if not already completed), which the caller must put after making sure
* that it is executed before the new fence. To ensure that the order of
* fences within the timeline of the i915_active_fence is understood, it
* should be locked by the caller.
*/
struct dma_fence *
__i915_active_fence_set(struct i915_active_fence *active,
@ -1031,7 +1031,23 @@ __i915_active_fence_set(struct i915_active_fence *active,
struct dma_fence *prev;
unsigned long flags;
if (fence == rcu_access_pointer(active->fence))
/*
* In case of fences embedded in i915_requests, their memory is
* SLAB_FAILSAFE_BY_RCU, then it can be reused right after release
* by new requests. Then, there is a risk of passing back a pointer
* to a new, completely unrelated fence that reuses the same memory
* while tracked under a different active tracker. Combined with i915
* perf open/close operations that build await dependencies between
* engine kernel context requests and user requests from different
* timelines, this can lead to dependency loops and infinite waits.
*
* As a countermeasure, we try to get a reference to the active->fence
* first, so if we succeed and pass it back to our user then it is not
* released and potentially reused by an unrelated request before the
* user has a chance to set up an await dependency on it.
*/
prev = i915_active_fence_get(active);
if (fence == prev)
return fence;
GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
@ -1040,27 +1056,56 @@ __i915_active_fence_set(struct i915_active_fence *active,
* Consider that we have two threads arriving (A and B), with
* C already resident as the active->fence.
*
* A does the xchg first, and so it sees C or NULL depending
* on the timing of the interrupt handler. If it is NULL, the
* previous fence must have been signaled and we know that
* we are first on the timeline. If it is still present,
* we acquire the lock on that fence and serialise with the interrupt
* handler, in the process removing it from any future interrupt
* callback. A will then wait on C before executing (if present).
*
* As B is second, it sees A as the previous fence and so waits for
* it to complete its transition and takes over the occupancy for
* itself -- remembering that it needs to wait on A before executing.
* Both A and B have got a reference to C or NULL, depending on the
* timing of the interrupt handler. Let's assume that if A has got C
* then it has locked C first (before B).
*
* Note the strong ordering of the timeline also provides consistent
* nesting rules for the fence->lock; the inner lock is always the
* older lock.
*/
spin_lock_irqsave(fence->lock, flags);
prev = xchg(__active_fence_slot(active), fence);
if (prev) {
GEM_BUG_ON(prev == fence);
if (prev)
spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
/*
* A does the cmpxchg first, and so it sees C or NULL, as before, or
* something else, depending on the timing of other threads and/or
* interrupt handler. If not the same as before then A unlocks C if
* applicable and retries, starting from an attempt to get a new
* active->fence. Meanwhile, B follows the same path as A.
* Once A succeeds with cmpxch, B fails again, retires, gets A from
* active->fence, locks it as soon as A completes, and possibly
* succeeds with cmpxchg.
*/
while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) {
if (prev) {
spin_unlock(prev->lock);
dma_fence_put(prev);
}
spin_unlock_irqrestore(fence->lock, flags);
prev = i915_active_fence_get(active);
GEM_BUG_ON(prev == fence);
spin_lock_irqsave(fence->lock, flags);
if (prev)
spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
}
/*
* If prev is NULL then the previous fence must have been signaled
* and we know that we are first on the timeline. If it is still
* present then, having the lock on that fence already acquired, we
* serialise with the interrupt handler, in the process of removing it
* from any future interrupt callback. A will then wait on C before
* executing (if present).
*
* As B is second, it sees A as the previous fence and so waits for
* it to complete its transition and takes over the occupancy for
* itself -- remembering that it needs to wait on A before executing.
*/
if (prev) {
__list_del_entry(&active->cb.node);
spin_unlock(prev->lock); /* serialise with prev->cb_list */
}
@ -1077,11 +1122,7 @@ int i915_active_fence_set(struct i915_active_fence *active,
int err = 0;
/* Must maintain timeline ordering wrt previous active requests */
rcu_read_lock();
fence = __i915_active_fence_set(active, &rq->fence);
if (fence) /* but the previous fence may not belong to that timeline! */
fence = dma_fence_get_rcu(fence);
rcu_read_unlock();
if (fence) {
err = i915_request_await_dma_fence(rq, fence);
dma_fence_put(fence);

View File

@ -1647,6 +1647,11 @@ __i915_request_ensure_parallel_ordering(struct i915_request *rq,
request_to_parent(rq)->parallel.last_rq = i915_request_get(rq);
/*
* Users have to put a reference potentially got by
* __i915_active_fence_set() to the returned request
* when no longer needed
*/
return to_request(__i915_active_fence_set(&timeline->last_request,
&rq->fence));
}
@ -1693,6 +1698,10 @@ __i915_request_ensure_ordering(struct i915_request *rq,
0);
}
/*
* Users have to put the reference to prev potentially got
* by __i915_active_fence_set() when no longer needed
*/
return prev;
}
@ -1736,6 +1745,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
prev = __i915_request_ensure_ordering(rq, timeline);
else
prev = __i915_request_ensure_parallel_ordering(rq, timeline);
if (prev)
i915_request_put(prev);
/*
* Make sure that no request gazumped us - if it was allocated after

View File

@ -310,7 +310,7 @@ static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc)
dev_warn(ipu_crtc->dev, "8-pixel align hactive %d -> %d\n",
sig_cfg.mode.hactive, new_hactive);
sig_cfg.mode.hfront_porch = new_hactive - sig_cfg.mode.hactive;
sig_cfg.mode.hfront_porch -= new_hactive - sig_cfg.mode.hactive;
sig_cfg.mode.hactive = new_hactive;
}

View File

@ -552,7 +552,8 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
if (bo->pin_count) {
*locked = false;
*busy = false;
if (busy)
*busy = false;
return false;
}

View File

@ -882,6 +882,12 @@ static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
{
int index;
if (cmds->num == CMDQ_BATCH_ENTRIES - 1 &&
(smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC)) {
arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true);
cmds->num = 0;
}
if (cmds->num == CMDQ_BATCH_ENTRIES) {
arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, false);
cmds->num = 0;
@ -3422,6 +3428,44 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
return 0;
}
#define IIDR_IMPLEMENTER_ARM 0x43b
#define IIDR_PRODUCTID_ARM_MMU_600 0x483
#define IIDR_PRODUCTID_ARM_MMU_700 0x487
static void arm_smmu_device_iidr_probe(struct arm_smmu_device *smmu)
{
u32 reg;
unsigned int implementer, productid, variant, revision;
reg = readl_relaxed(smmu->base + ARM_SMMU_IIDR);
implementer = FIELD_GET(IIDR_IMPLEMENTER, reg);
productid = FIELD_GET(IIDR_PRODUCTID, reg);
variant = FIELD_GET(IIDR_VARIANT, reg);
revision = FIELD_GET(IIDR_REVISION, reg);
switch (implementer) {
case IIDR_IMPLEMENTER_ARM:
switch (productid) {
case IIDR_PRODUCTID_ARM_MMU_600:
/* Arm erratum 1076982 */
if (variant == 0 && revision <= 2)
smmu->features &= ~ARM_SMMU_FEAT_SEV;
/* Arm erratum 1209401 */
if (variant < 2)
smmu->features &= ~ARM_SMMU_FEAT_NESTING;
break;
case IIDR_PRODUCTID_ARM_MMU_700:
/* Arm erratum 2812531 */
smmu->features &= ~ARM_SMMU_FEAT_BTM;
smmu->options |= ARM_SMMU_OPT_CMDQ_FORCE_SYNC;
/* Arm errata 2268618, 2812531 */
smmu->features &= ~ARM_SMMU_FEAT_NESTING;
break;
}
break;
}
}
static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
{
u32 reg;
@ -3628,6 +3672,12 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
smmu->ias = max(smmu->ias, smmu->oas);
if ((smmu->features & ARM_SMMU_FEAT_TRANS_S1) &&
(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
smmu->features |= ARM_SMMU_FEAT_NESTING;
arm_smmu_device_iidr_probe(smmu);
if (arm_smmu_sva_supported(smmu))
smmu->features |= ARM_SMMU_FEAT_SVA;

View File

@ -69,6 +69,12 @@
#define IDR5_VAX GENMASK(11, 10)
#define IDR5_VAX_52_BIT 1
#define ARM_SMMU_IIDR 0x18
#define IIDR_PRODUCTID GENMASK(31, 20)
#define IIDR_VARIANT GENMASK(19, 16)
#define IIDR_REVISION GENMASK(15, 12)
#define IIDR_IMPLEMENTER GENMASK(11, 0)
#define ARM_SMMU_CR0 0x20
#define CR0_ATSCHK (1 << 4)
#define CR0_CMDQEN (1 << 3)
@ -639,11 +645,13 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_BTM (1 << 16)
#define ARM_SMMU_FEAT_SVA (1 << 17)
#define ARM_SMMU_FEAT_E2H (1 << 18)
#define ARM_SMMU_FEAT_NESTING (1 << 19)
u32 features;
#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
#define ARM_SMMU_OPT_PAGE0_REGS_ONLY (1 << 1)
#define ARM_SMMU_OPT_MSIPOLL (1 << 2)
#define ARM_SMMU_OPT_CMDQ_FORCE_SYNC (1 << 3)
u32 options;
struct arm_smmu_cmdq cmdq;

View File

@ -839,7 +839,7 @@ hfcpci_fill_fifo(struct bchannel *bch)
*z1t = cpu_to_le16(new_z1); /* now send data */
if (bch->tx_idx < bch->tx_skb->len)
return;
dev_kfree_skb(bch->tx_skb);
dev_kfree_skb_any(bch->tx_skb);
if (get_next_bframe(bch))
goto next_t_frame;
return;
@ -895,7 +895,7 @@ hfcpci_fill_fifo(struct bchannel *bch)
}
bz->za[new_f1].z1 = cpu_to_le16(new_z1); /* for next buffer */
bz->f1 = new_f1; /* next frame */
dev_kfree_skb(bch->tx_skb);
dev_kfree_skb_any(bch->tx_skb);
get_next_bframe(bch);
}
@ -1119,7 +1119,7 @@ tx_birq(struct bchannel *bch)
if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len)
hfcpci_fill_fifo(bch);
else {
dev_kfree_skb(bch->tx_skb);
dev_kfree_skb_any(bch->tx_skb);
if (get_next_bframe(bch))
hfcpci_fill_fifo(bch);
}
@ -2277,7 +2277,7 @@ _hfcpci_softirq(struct device *dev, void *unused)
return 0;
if (hc->hw.int_m2 & HFCPCI_IRQ_ENABLE) {
spin_lock(&hc->lock);
spin_lock_irq(&hc->lock);
bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
if (bch && bch->state == ISDN_P_B_RAW) { /* B1 rx&tx */
main_rec_hfcpci(bch);
@ -2288,7 +2288,7 @@ _hfcpci_softirq(struct device *dev, void *unused)
main_rec_hfcpci(bch);
tx_birq(bch);
}
spin_unlock(&hc->lock);
spin_unlock_irq(&hc->lock);
}
return 0;
}

View File

@ -135,7 +135,7 @@ static int fun_exec_op(struct nand_chip *chip, const struct nand_operation *op,
unsigned int i;
int ret;
if (op->cs > NAND_MAX_CHIPS)
if (op->cs >= NAND_MAX_CHIPS)
return -EINVAL;
if (check_only)

View File

@ -1184,7 +1184,6 @@ static int meson_nand_attach_chip(struct nand_chip *nand)
struct meson_nfc *nfc = nand_get_controller_data(nand);
struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand);
struct mtd_info *mtd = nand_to_mtd(nand);
int nsectors = mtd->writesize / 1024;
int ret;
if (!mtd->name) {
@ -1202,7 +1201,7 @@ static int meson_nand_attach_chip(struct nand_chip *nand)
nand->options |= NAND_NO_SUBPAGE_WRITE;
ret = nand_ecc_choose_conf(nand, nfc->data->ecc_caps,
mtd->oobsize - 2 * nsectors);
mtd->oobsize - 2);
if (ret) {
dev_err(nfc->dev, "failed to ECC init\n");
return -EINVAL;

View File

@ -177,17 +177,17 @@ static void elm_load_syndrome(struct elm_info *info,
switch (info->bch_type) {
case BCH8_ECC:
/* syndrome fragment 0 = ecc[9-12B] */
val = cpu_to_be32(*(u32 *) &ecc[9]);
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[9]);
elm_write_reg(info, offset, val);
/* syndrome fragment 1 = ecc[5-8B] */
offset += 4;
val = cpu_to_be32(*(u32 *) &ecc[5]);
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[5]);
elm_write_reg(info, offset, val);
/* syndrome fragment 2 = ecc[1-4B] */
offset += 4;
val = cpu_to_be32(*(u32 *) &ecc[1]);
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[1]);
elm_write_reg(info, offset, val);
/* syndrome fragment 3 = ecc[0B] */
@ -197,35 +197,35 @@ static void elm_load_syndrome(struct elm_info *info,
break;
case BCH4_ECC:
/* syndrome fragment 0 = ecc[20-52b] bits */
val = (cpu_to_be32(*(u32 *) &ecc[3]) >> 4) |
val = ((__force u32)cpu_to_be32(*(u32 *)&ecc[3]) >> 4) |
((ecc[2] & 0xf) << 28);
elm_write_reg(info, offset, val);
/* syndrome fragment 1 = ecc[0-20b] bits */
offset += 4;
val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12;
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 12;
elm_write_reg(info, offset, val);
break;
case BCH16_ECC:
val = cpu_to_be32(*(u32 *) &ecc[22]);
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[22]);
elm_write_reg(info, offset, val);
offset += 4;
val = cpu_to_be32(*(u32 *) &ecc[18]);
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[18]);
elm_write_reg(info, offset, val);
offset += 4;
val = cpu_to_be32(*(u32 *) &ecc[14]);
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[14]);
elm_write_reg(info, offset, val);
offset += 4;
val = cpu_to_be32(*(u32 *) &ecc[10]);
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[10]);
elm_write_reg(info, offset, val);
offset += 4;
val = cpu_to_be32(*(u32 *) &ecc[6]);
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[6]);
elm_write_reg(info, offset, val);
offset += 4;
val = cpu_to_be32(*(u32 *) &ecc[2]);
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[2]);
elm_write_reg(info, offset, val);
offset += 4;
val = cpu_to_be32(*(u32 *) &ecc[0]) >> 16;
val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 16;
elm_write_reg(info, offset, val);
break;
default:

View File

@ -562,9 +562,10 @@ static int rk_nfc_write_page_raw(struct nand_chip *chip, const u8 *buf,
* BBM OOB1 OOB2 OOB3 |......| PA0 PA1 PA2 PA3
*
* The rk_nfc_ooblayout_free() function already has reserved
* these 4 bytes with:
* these 4 bytes together with 2 bytes for BBM
* by reducing it's length:
*
* oob_region->offset = NFC_SYS_DATA_SIZE + 2;
* oob_region->length = rknand->metadata_size - NFC_SYS_DATA_SIZE - 2;
*/
if (!i)
memcpy(rk_nfc_oob_ptr(chip, i),
@ -597,7 +598,7 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
int pages_per_blk = mtd->erasesize / mtd->writesize;
int ret = 0, i, boot_rom_mode = 0;
dma_addr_t dma_data, dma_oob;
u32 reg;
u32 tmp;
u8 *oob;
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
@ -624,6 +625,13 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
*
* 0xFF 0xFF 0xFF 0xFF | BBM OOB1 OOB2 OOB3 | ...
*
* The code here just swaps the first 4 bytes with the last
* 4 bytes without losing any data.
*
* The chip->oob_poi data layout:
*
* BBM OOB1 OOB2 OOB3 |......| PA0 PA1 PA2 PA3
*
* Configure the ECC algorithm supported by the boot ROM.
*/
if ((page < (pages_per_blk * rknand->boot_blks)) &&
@ -634,21 +642,17 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
}
for (i = 0; i < ecc->steps; i++) {
if (!i) {
reg = 0xFFFFFFFF;
} else {
if (!i)
oob = chip->oob_poi + (ecc->steps - 1) * NFC_SYS_DATA_SIZE;
else
oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE;
reg = oob[0] | oob[1] << 8 | oob[2] << 16 |
oob[3] << 24;
}
if (!i && boot_rom_mode)
reg = (page & (pages_per_blk - 1)) * 4;
tmp = oob[0] | oob[1] << 8 | oob[2] << 16 | oob[3] << 24;
if (nfc->cfg->type == NFC_V9)
nfc->oob_buf[i] = reg;
nfc->oob_buf[i] = tmp;
else
nfc->oob_buf[i * (oob_step / 4)] = reg;
nfc->oob_buf[i * (oob_step / 4)] = tmp;
}
dma_data = dma_map_single(nfc->dev, (void *)nfc->page_buf,
@ -811,12 +815,17 @@ static int rk_nfc_read_page_hwecc(struct nand_chip *chip, u8 *buf, int oob_on,
goto timeout_err;
}
for (i = 1; i < ecc->steps; i++) {
oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE;
for (i = 0; i < ecc->steps; i++) {
if (!i)
oob = chip->oob_poi + (ecc->steps - 1) * NFC_SYS_DATA_SIZE;
else
oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE;
if (nfc->cfg->type == NFC_V9)
tmp = nfc->oob_buf[i];
else
tmp = nfc->oob_buf[i * (oob_step / 4)];
*oob++ = (u8)tmp;
*oob++ = (u8)(tmp >> 8);
*oob++ = (u8)(tmp >> 16);
@ -933,12 +942,8 @@ static int rk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
if (section)
return -ERANGE;
/*
* The beginning of the OOB area stores the reserved data for the NFC,
* the size of the reserved data is NFC_SYS_DATA_SIZE bytes.
*/
oob_region->length = rknand->metadata_size - NFC_SYS_DATA_SIZE - 2;
oob_region->offset = NFC_SYS_DATA_SIZE + 2;
oob_region->offset = 2;
return 0;
}

View File

@ -73,7 +73,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand,
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 mbf = 0;
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, &mbf);
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf);
switch (status & STATUS_ECC_MASK) {
case STATUS_ECC_NO_BITFLIPS:
@ -92,7 +92,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand,
if (spi_mem_exec_op(spinand->spimem, &op))
return nanddev_get_ecc_conf(nand)->strength;
mbf >>= 4;
mbf = *(spinand->scratchbuf) >> 4;
if (WARN_ON(mbf > nanddev_get_ecc_conf(nand)->strength || !mbf))
return nanddev_get_ecc_conf(nand)->strength;

View File

@ -1436,7 +1436,9 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
clk_prepare_enable(priv->clk);
ret = clk_prepare_enable(priv->clk);
if (ret)
return ret;
priv->clk_mdiv = devm_clk_get_optional(&pdev->dev, "sw_switch_mdiv");
if (IS_ERR(priv->clk_mdiv)) {
@ -1444,7 +1446,9 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
goto out_clk;
}
clk_prepare_enable(priv->clk_mdiv);
ret = clk_prepare_enable(priv->clk_mdiv);
if (ret)
goto out_clk;
ret = bcm_sf2_sw_rst(priv);
if (ret) {

View File

@ -721,17 +721,24 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
struct bnxt_rx_ring_info *rxr,
unsigned int *offset,
gfp_t gfp)
{
struct device *dev = &bp->pdev->dev;
struct page *page;
page = page_pool_dev_alloc_pages(rxr->page_pool);
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
BNXT_RX_PAGE_SIZE);
} else {
page = page_pool_dev_alloc_pages(rxr->page_pool);
*offset = 0;
}
if (!page)
return NULL;
*mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
*mapping = dma_map_page_attrs(dev, page, *offset, BNXT_RX_PAGE_SIZE,
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
if (dma_mapping_error(dev, *mapping)) {
page_pool_recycle_direct(rxr->page_pool, page);
return NULL;
@ -771,15 +778,16 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
dma_addr_t mapping;
if (BNXT_RX_PAGE_MODE(bp)) {
unsigned int offset;
struct page *page =
__bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
__bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
if (!page)
return -ENOMEM;
mapping += bp->rx_dma_offset;
rx_buf->data = page;
rx_buf->data_ptr = page_address(page) + bp->rx_offset;
rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
} else {
u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
@ -839,7 +847,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
unsigned int offset = 0;
if (BNXT_RX_PAGE_MODE(bp)) {
page = __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
if (!page)
return -ENOMEM;
@ -986,15 +994,15 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
return NULL;
}
dma_addr -= bp->rx_dma_offset;
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
skb = build_skb(page_address(page), PAGE_SIZE);
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
skb = build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
if (!skb) {
page_pool_recycle_direct(rxr->page_pool, page);
return NULL;
}
skb_mark_for_recycle(skb);
skb_reserve(skb, bp->rx_dma_offset);
skb_reserve(skb, bp->rx_offset);
__skb_put(skb, len);
return skb;
@ -1020,8 +1028,8 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
return NULL;
}
dma_addr -= bp->rx_dma_offset;
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
if (unlikely(!payload))
payload = eth_get_headlen(bp->dev, data_ptr, len);
@ -1034,7 +1042,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
skb_mark_for_recycle(skb);
off = (void *)data_ptr - page_address(page);
skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
payload + NET_IP_ALIGN);
@ -1169,7 +1177,7 @@ static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
skb->data_len += total_frag_len;
skb->len += total_frag_len;
skb->truesize += PAGE_SIZE * agg_bufs;
skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs;
return skb;
}
@ -2972,8 +2980,8 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
rx_buf->data = NULL;
if (BNXT_RX_PAGE_MODE(bp)) {
mapping -= bp->rx_dma_offset;
dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
bp->rx_dir,
dma_unmap_page_attrs(&pdev->dev, mapping,
BNXT_RX_PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
page_pool_recycle_direct(rxr->page_pool, data);
} else {
@ -3241,6 +3249,8 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
pp.nid = dev_to_node(&bp->pdev->dev);
pp.dev = &bp->pdev->dev;
pp.dma_dir = DMA_BIDIRECTIONAL;
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE)
pp.flags |= PP_FLAG_PAGE_FRAG;
rxr->page_pool = page_pool_create(&pp);
if (IS_ERR(rxr->page_pool)) {
@ -4017,26 +4027,29 @@ void bnxt_set_ring_params(struct bnxt *bp)
*/
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
{
struct net_device *dev = bp->dev;
if (page_mode) {
bp->flags &= ~BNXT_FLAG_AGG_RINGS;
bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
if (bp->xdp_prog->aux->xdp_has_frags)
dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
else
dev->max_mtu =
min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
bp->flags |= BNXT_FLAG_JUMBO;
bp->rx_skb_func = bnxt_rx_multi_page_skb;
bp->dev->max_mtu =
min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
} else {
bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
bp->rx_skb_func = bnxt_rx_page_skb;
bp->dev->max_mtu =
min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
}
bp->rx_dir = DMA_BIDIRECTIONAL;
/* Disable LRO or GRO_HW */
netdev_update_features(bp->dev);
netdev_update_features(dev);
} else {
bp->dev->max_mtu = bp->max_mtu;
dev->max_mtu = bp->max_mtu;
bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
bp->rx_dir = DMA_FROM_DEVICE;
bp->rx_skb_func = bnxt_rx_skb;

View File

@ -180,8 +180,8 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 cons, u8 *data_ptr, unsigned int len,
struct xdp_buff *xdp)
{
u32 buflen = BNXT_RX_PAGE_SIZE;
struct bnxt_sw_rx_bd *rx_buf;
u32 buflen = PAGE_SIZE;
struct pci_dev *pdev;
dma_addr_t mapping;
u32 offset;
@ -297,7 +297,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
rx_buf = &rxr->rx_buf_ring[cons];
mapping = rx_buf->mapping - bp->rx_dma_offset;
dma_unmap_page_attrs(&pdev->dev, mapping,
PAGE_SIZE, bp->rx_dir,
BNXT_RX_PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
/* if we are unable to allocate a new buffer, abort and reuse */
@ -478,7 +478,7 @@ bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
}
xdp_update_skb_shared_info(skb, num_frags,
sinfo->xdp_frags_size,
PAGE_SIZE * sinfo->nr_frags,
BNXT_RX_PAGE_SIZE * sinfo->nr_frags,
xdp_buff_is_frag_pfmemalloc(xdp));
return skb;
}

View File

@ -8777,6 +8777,7 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_pf *pf = np->vsi->back;
bool locked = false;
int err;
switch (type) {
@ -8786,10 +8787,27 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
ice_setup_tc_block_cb,
np, np, true);
case TC_SETUP_QDISC_MQPRIO:
if (pf->adev) {
mutex_lock(&pf->adev_mutex);
device_lock(&pf->adev->dev);
locked = true;
if (pf->adev->dev.driver) {
netdev_err(netdev, "Cannot change qdisc when RDMA is active\n");
err = -EBUSY;
goto adev_unlock;
}
}
/* setup traffic classifier for receive side */
mutex_lock(&pf->tc_mutex);
err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
mutex_unlock(&pf->tc_mutex);
adev_unlock:
if (locked) {
device_unlock(&pf->adev->dev);
mutex_unlock(&pf->adev_mutex);
}
return err;
default:
return -EOPNOTSUPP;

View File

@ -1302,11 +1302,10 @@ static int korina_probe(struct platform_device *pdev)
else if (of_get_ethdev_address(pdev->dev.of_node, dev) < 0)
eth_hw_addr_random(dev);
clk = devm_clk_get_optional(&pdev->dev, "mdioclk");
clk = devm_clk_get_optional_enabled(&pdev->dev, "mdioclk");
if (IS_ERR(clk))
return PTR_ERR(clk);
if (clk) {
clk_prepare_enable(clk);
lp->mii_clock_freq = clk_get_rate(clk);
} else {
lp->mii_clock_freq = 200000000; /* max possible input clk */

View File

@ -702,7 +702,8 @@ static int prestera_fw_get(struct prestera_fw *fw)
err = request_firmware_direct(&fw->bin, fw_path, fw->dev.dev);
if (err) {
if (ver_maj == PRESTERA_SUPP_FW_MAJ_VER) {
if (ver_maj != PRESTERA_PREV_FW_MAJ_VER ||
ver_min != PRESTERA_PREV_FW_MIN_VER) {
ver_maj = PRESTERA_PREV_FW_MAJ_VER;
ver_min = PRESTERA_PREV_FW_MIN_VER;

View File

@ -58,7 +58,9 @@ static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x)
trailer_len = alen + plen + 2;
pskb_trim(skb, skb->len - trailer_len);
ret = pskb_trim(skb, skb->len - trailer_len);
if (unlikely(ret))
return ret;
if (skb->protocol == htons(ETH_P_IP)) {
ipv4hdr->tot_len = htons(ntohs(ipv4hdr->tot_len) - trailer_len);
ip_send_check(ipv4hdr);

View File

@ -160,6 +160,7 @@ static int macsec_fs_tx_create_crypto_table_groups(struct mlx5e_flow_table *ft)
if (!in) {
kfree(ft->g);
ft->g = NULL;
return -ENOMEM;
}

View File

@ -136,6 +136,16 @@ static void arfs_del_rules(struct mlx5e_flow_steering *fs);
int mlx5e_arfs_disable(struct mlx5e_flow_steering *fs)
{
/* Moving to switchdev mode, fs->arfs is freed by mlx5e_nic_profile
* cleanup_rx callback and it is not recreated when
* mlx5e_uplink_rep_profile is loaded as mlx5e_create_flow_steering()
* is not called by the uplink_rep profile init_rx callback. Thus, if
* ntuple is set, moving to switchdev flow will enter this function
* with fs->arfs nullified.
*/
if (!mlx5e_fs_get_arfs(fs))
return 0;
arfs_del_rules(fs);
return arfs_disable(fs);

View File

@ -912,7 +912,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
if (err) {
mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
return err;
goto err_rx_res_free;
}
err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
@ -946,6 +946,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
mlx5e_rx_res_destroy(priv->rx_res);
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
err_rx_res_free:
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
err_free_fs:
@ -1039,6 +1040,10 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
return err;
}
err = mlx5e_rep_neigh_init(rpriv);
if (err)
goto err_neigh_init;
if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
err = mlx5e_init_uplink_rep_tx(rpriv);
if (err)
@ -1055,6 +1060,8 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
mlx5e_cleanup_uplink_rep_tx(rpriv);
err_init_tx:
mlx5e_rep_neigh_cleanup(rpriv);
err_neigh_init:
mlx5e_destroy_tises(priv);
return err;
}
@ -1068,22 +1075,17 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
mlx5e_cleanup_uplink_rep_tx(rpriv);
mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_destroy_tises(priv);
}
static void mlx5e_rep_enable(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
mlx5e_set_netdev_mtu_boundaries(priv);
mlx5e_rep_neigh_init(rpriv);
}
static void mlx5e_rep_disable(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
mlx5e_rep_neigh_cleanup(rpriv);
}
static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
@ -1118,7 +1120,6 @@ static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event
static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
u16 max_mtu;
@ -1138,7 +1139,6 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
mlx5_notifier_register(mdev, &priv->events_nb);
mlx5e_dcbnl_initialize(priv);
mlx5e_dcbnl_init_app(priv);
mlx5e_rep_neigh_init(rpriv);
mlx5e_rep_bridge_init(priv);
netdev->wanted_features |= NETIF_F_HW_TC;
@ -1153,7 +1153,6 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_core_dev *mdev = priv->mdev;
rtnl_lock();
@ -1163,7 +1162,6 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
rtnl_unlock();
mlx5e_rep_bridge_cleanup(priv);
mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_dcbnl_delete_app(priv);
mlx5_notifier_unregister(mdev, &priv->events_nb);
mlx5e_rep_tc_disable(priv);

View File

@ -1061,7 +1061,7 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
if (!mlx5_core_is_sf(dev))
clear_rmap(dev);
mlx5_irq_table_destroy(dev);
mlx5_irq_table_free_irqs(dev);
mutex_unlock(&table->lock);
}

View File

@ -860,7 +860,7 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
struct fs_node *iter = list_entry(start, struct fs_node, list);
struct mlx5_flow_table *ft = NULL;
if (!root || root->type == FS_TYPE_PRIO_CHAINS)
if (!root)
return NULL;
list_for_each_advance_continue(iter, &root->children, reverse) {
@ -876,20 +876,42 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
return ft;
}
/* If reverse is false then return the first flow table in next priority of
* prio in the tree, else return the last flow table in the previous priority
* of prio in the tree.
*/
static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
static struct fs_node *find_prio_chains_parent(struct fs_node *parent,
struct fs_node **child)
{
struct fs_node *node = NULL;
while (parent && parent->type != FS_TYPE_PRIO_CHAINS) {
node = parent;
parent = parent->parent;
}
if (child)
*child = node;
return parent;
}
/* If reverse is false then return the first flow table next to the passed node
* in the tree, else return the last flow table before the node in the tree.
* If skip is true, skip the flow tables in the same prio_chains prio.
*/
static struct mlx5_flow_table *find_closest_ft(struct fs_node *node, bool reverse,
bool skip)
{
struct fs_node *prio_chains_parent = NULL;
struct mlx5_flow_table *ft = NULL;
struct fs_node *curr_node;
struct fs_node *parent;
parent = prio->node.parent;
curr_node = &prio->node;
if (skip)
prio_chains_parent = find_prio_chains_parent(node, NULL);
parent = node->parent;
curr_node = node;
while (!ft && parent) {
ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
if (parent != prio_chains_parent)
ft = find_closest_ft_recursive(parent, &curr_node->list,
reverse);
curr_node = parent;
parent = curr_node->parent;
}
@ -897,15 +919,15 @@ static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool revers
}
/* Assuming all the tree is locked by mutex chain lock */
static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
static struct mlx5_flow_table *find_next_chained_ft(struct fs_node *node)
{
return find_closest_ft(prio, false);
return find_closest_ft(node, false, true);
}
/* Assuming all the tree is locked by mutex chain lock */
static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
static struct mlx5_flow_table *find_prev_chained_ft(struct fs_node *node)
{
return find_closest_ft(prio, true);
return find_closest_ft(node, true, true);
}
static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
@ -917,7 +939,7 @@ static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
return find_next_chained_ft(prio);
return find_next_chained_ft(&prio->node);
}
static int connect_fts_in_prio(struct mlx5_core_dev *dev,
@ -941,21 +963,55 @@ static int connect_fts_in_prio(struct mlx5_core_dev *dev,
return 0;
}
static struct mlx5_flow_table *find_closet_ft_prio_chains(struct fs_node *node,
struct fs_node *parent,
struct fs_node **child,
bool reverse)
{
struct mlx5_flow_table *ft;
ft = find_closest_ft(node, reverse, false);
if (ft && parent == find_prio_chains_parent(&ft->node, child))
return ft;
return NULL;
}
/* Connect flow tables from previous priority of prio to ft */
static int connect_prev_fts(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
struct fs_prio *prio)
{
struct fs_node *prio_parent, *parent = NULL, *child, *node;
struct mlx5_flow_table *prev_ft;
int err = 0;
prev_ft = find_prev_chained_ft(prio);
if (prev_ft) {
prio_parent = find_prio_chains_parent(&prio->node, &child);
/* return directly if not under the first sub ns of prio_chains prio */
if (prio_parent && !list_is_first(&child->list, &prio_parent->children))
return 0;
prev_ft = find_prev_chained_ft(&prio->node);
while (prev_ft) {
struct fs_prio *prev_prio;
fs_get_obj(prev_prio, prev_ft->node.parent);
return connect_fts_in_prio(dev, prev_prio, ft);
err = connect_fts_in_prio(dev, prev_prio, ft);
if (err)
break;
if (!parent) {
parent = find_prio_chains_parent(&prev_prio->node, &child);
if (!parent)
break;
}
node = child;
prev_ft = find_closet_ft_prio_chains(node, parent, &child, true);
}
return 0;
return err;
}
static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
@ -1094,7 +1150,7 @@ static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table
if (err)
return err;
next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
next_ft = first_ft ? first_ft : find_next_chained_ft(&prio->node);
err = connect_fwd_rules(dev, ft, next_ft);
if (err)
return err;
@ -1169,7 +1225,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
next_ft = unmanaged ? ft_attr->next_ft :
find_next_chained_ft(fs_prio);
find_next_chained_ft(&fs_prio->node);
ft->def_miss_action = ns->def_miss_action;
ft->ns = ns;
err = root->cmds->create_flow_table(root, ft, ft_attr, next_ft);
@ -2157,13 +2213,20 @@ EXPORT_SYMBOL(mlx5_del_flow_rules);
/* Assuming prio->node.children(flow tables) is sorted by level */
static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
{
struct fs_node *prio_parent, *child;
struct fs_prio *prio;
fs_get_obj(prio, ft->node.parent);
if (!list_is_last(&ft->node.list, &prio->node.children))
return list_next_entry(ft, node.list);
return find_next_chained_ft(prio);
prio_parent = find_prio_chains_parent(&prio->node, &child);
if (prio_parent && list_is_first(&child->list, &prio_parent->children))
return find_closest_ft(&prio->node, false, false);
return find_next_chained_ft(&prio->node);
}
static int update_root_ft_destroy(struct mlx5_flow_table *ft)

View File

@ -14,6 +14,7 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev);
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
int mlx5_irq_table_create(struct mlx5_core_dev *dev);
void mlx5_irq_table_destroy(struct mlx5_core_dev *dev);
void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev);
int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table);
int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table);
struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev);

View File

@ -591,6 +591,24 @@ static void irq_pools_destroy(struct mlx5_irq_table *table)
irq_pool_free(table->pf_pool);
}
static void mlx5_irq_pool_free_irqs(struct mlx5_irq_pool *pool)
{
struct mlx5_irq *irq;
unsigned long index;
xa_for_each(&pool->irqs, index, irq)
free_irq(irq->irqn, &irq->nh);
}
static void mlx5_irq_pools_free_irqs(struct mlx5_irq_table *table)
{
if (table->sf_ctrl_pool) {
mlx5_irq_pool_free_irqs(table->sf_comp_pool);
mlx5_irq_pool_free_irqs(table->sf_ctrl_pool);
}
mlx5_irq_pool_free_irqs(table->pf_pool);
}
/* irq_table API */
int mlx5_irq_table_init(struct mlx5_core_dev *dev)
@ -670,6 +688,17 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
pci_free_irq_vectors(dev->pdev);
}
void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev)
{
struct mlx5_irq_table *table = dev->priv.irq_table;
if (mlx5_core_is_sf(dev))
return;
mlx5_irq_pools_free_irqs(table);
pci_free_irq_vectors(dev->pdev);
}
int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table)
{
if (table->sf_comp_pool)

View File

@ -538,11 +538,12 @@ int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
if (err)
return err;
goto err_free_in;
*reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
kvfree(in);
err_free_in:
kvfree(in);
return err;
}

View File

@ -193,6 +193,22 @@ void qed_hw_remove(struct qed_dev *cdev);
*/
struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn);
/**
* qed_ptt_acquire_context(): Allocate a PTT window honoring the context
* atomicy.
*
* @p_hwfn: HW device data.
* @is_atomic: Hint from the caller - if the func can sleep or not.
*
* Context: The function should not sleep in case is_atomic == true.
* Return: struct qed_ptt.
*
* Should be called at the entry point to the driver
* (at the beginning of an exported function).
*/
struct qed_ptt *qed_ptt_acquire_context(struct qed_hwfn *p_hwfn,
bool is_atomic);
/**
* qed_ptt_release(): Release PTT Window.
*

View File

@ -693,13 +693,14 @@ static void _qed_fcoe_get_pstats(struct qed_hwfn *p_hwfn,
}
static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn,
struct qed_fcoe_stats *p_stats)
struct qed_fcoe_stats *p_stats,
bool is_atomic)
{
struct qed_ptt *p_ptt;
memset(p_stats, 0, sizeof(*p_stats));
p_ptt = qed_ptt_acquire(p_hwfn);
p_ptt = qed_ptt_acquire_context(p_hwfn, is_atomic);
if (!p_ptt) {
DP_ERR(p_hwfn, "Failed to acquire ptt\n");
@ -973,19 +974,27 @@ static int qed_fcoe_destroy_conn(struct qed_dev *cdev,
QED_SPQ_MODE_EBLOCK, NULL);
}
static int qed_fcoe_stats_context(struct qed_dev *cdev,
struct qed_fcoe_stats *stats,
bool is_atomic)
{
return qed_fcoe_get_stats(QED_AFFIN_HWFN(cdev), stats, is_atomic);
}
static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats)
{
return qed_fcoe_get_stats(QED_AFFIN_HWFN(cdev), stats);
return qed_fcoe_stats_context(cdev, stats, false);
}
void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
struct qed_mcp_fcoe_stats *stats)
struct qed_mcp_fcoe_stats *stats,
bool is_atomic)
{
struct qed_fcoe_stats proto_stats;
/* Retrieve FW statistics */
memset(&proto_stats, 0, sizeof(proto_stats));
if (qed_fcoe_stats(cdev, &proto_stats)) {
if (qed_fcoe_stats_context(cdev, &proto_stats, is_atomic)) {
DP_VERBOSE(cdev, QED_MSG_STORAGE,
"Failed to collect FCoE statistics\n");
return;

View File

@ -28,8 +28,20 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn);
void qed_fcoe_setup(struct qed_hwfn *p_hwfn);
void qed_fcoe_free(struct qed_hwfn *p_hwfn);
/**
* qed_get_protocol_stats_fcoe(): Fills provided statistics
* struct with statistics.
*
* @cdev: Qed dev pointer.
* @stats: Points to struct that will be filled with statistics.
* @is_atomic: Hint from the caller - if the func can sleep or not.
*
* Context: The function should not sleep in case is_atomic == true.
* Return: Void.
*/
void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
struct qed_mcp_fcoe_stats *stats);
struct qed_mcp_fcoe_stats *stats,
bool is_atomic);
#else /* CONFIG_QED_FCOE */
static inline int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
{
@ -40,7 +52,8 @@ static inline void qed_fcoe_setup(struct qed_hwfn *p_hwfn) {}
static inline void qed_fcoe_free(struct qed_hwfn *p_hwfn) {}
static inline void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
struct qed_mcp_fcoe_stats *stats)
struct qed_mcp_fcoe_stats *stats,
bool is_atomic)
{
}
#endif /* CONFIG_QED_FCOE */

View File

@ -23,7 +23,10 @@
#include "qed_reg_addr.h"
#include "qed_sriov.h"
#define QED_BAR_ACQUIRE_TIMEOUT 1000
#define QED_BAR_ACQUIRE_TIMEOUT_USLEEP_CNT 1000
#define QED_BAR_ACQUIRE_TIMEOUT_USLEEP 1000
#define QED_BAR_ACQUIRE_TIMEOUT_UDELAY_CNT 100000
#define QED_BAR_ACQUIRE_TIMEOUT_UDELAY 10
/* Invalid values */
#define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1))
@ -84,12 +87,22 @@ void qed_ptt_pool_free(struct qed_hwfn *p_hwfn)
}
struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
{
return qed_ptt_acquire_context(p_hwfn, false);
}
struct qed_ptt *qed_ptt_acquire_context(struct qed_hwfn *p_hwfn, bool is_atomic)
{
struct qed_ptt *p_ptt;
unsigned int i;
unsigned int i, count;
if (is_atomic)
count = QED_BAR_ACQUIRE_TIMEOUT_UDELAY_CNT;
else
count = QED_BAR_ACQUIRE_TIMEOUT_USLEEP_CNT;
/* Take the free PTT from the list */
for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) {
for (i = 0; i < count; i++) {
spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) {
@ -105,7 +118,12 @@ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
}
spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
usleep_range(1000, 2000);
if (is_atomic)
udelay(QED_BAR_ACQUIRE_TIMEOUT_UDELAY);
else
usleep_range(QED_BAR_ACQUIRE_TIMEOUT_USLEEP,
QED_BAR_ACQUIRE_TIMEOUT_USLEEP * 2);
}
DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n");

View File

@ -999,13 +999,14 @@ static void _qed_iscsi_get_pstats(struct qed_hwfn *p_hwfn,
}
static int qed_iscsi_get_stats(struct qed_hwfn *p_hwfn,
struct qed_iscsi_stats *stats)
struct qed_iscsi_stats *stats,
bool is_atomic)
{
struct qed_ptt *p_ptt;
memset(stats, 0, sizeof(*stats));
p_ptt = qed_ptt_acquire(p_hwfn);
p_ptt = qed_ptt_acquire_context(p_hwfn, is_atomic);
if (!p_ptt) {
DP_ERR(p_hwfn, "Failed to acquire ptt\n");
return -EAGAIN;
@ -1336,9 +1337,16 @@ static int qed_iscsi_destroy_conn(struct qed_dev *cdev,
QED_SPQ_MODE_EBLOCK, NULL);
}
static int qed_iscsi_stats_context(struct qed_dev *cdev,
struct qed_iscsi_stats *stats,
bool is_atomic)
{
return qed_iscsi_get_stats(QED_AFFIN_HWFN(cdev), stats, is_atomic);
}
static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats)
{
return qed_iscsi_get_stats(QED_AFFIN_HWFN(cdev), stats);
return qed_iscsi_stats_context(cdev, stats, false);
}
static int qed_iscsi_change_mac(struct qed_dev *cdev,
@ -1358,13 +1366,14 @@ static int qed_iscsi_change_mac(struct qed_dev *cdev,
}
void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
struct qed_mcp_iscsi_stats *stats)
struct qed_mcp_iscsi_stats *stats,
bool is_atomic)
{
struct qed_iscsi_stats proto_stats;
/* Retrieve FW statistics */
memset(&proto_stats, 0, sizeof(proto_stats));
if (qed_iscsi_stats(cdev, &proto_stats)) {
if (qed_iscsi_stats_context(cdev, &proto_stats, is_atomic)) {
DP_VERBOSE(cdev, QED_MSG_STORAGE,
"Failed to collect ISCSI statistics\n");
return;

View File

@ -39,11 +39,14 @@ void qed_iscsi_free(struct qed_hwfn *p_hwfn);
*
* @cdev: Qed dev pointer.
* @stats: Points to struct that will be filled with statistics.
* @is_atomic: Hint from the caller - if the func can sleep or not.
*
* Context: The function should not sleep in case is_atomic == true.
* Return: Void.
*/
void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
struct qed_mcp_iscsi_stats *stats);
struct qed_mcp_iscsi_stats *stats,
bool is_atomic);
#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
static inline int qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
{
@ -56,7 +59,8 @@ static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn) {}
static inline void
qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
struct qed_mcp_iscsi_stats *stats) {}
struct qed_mcp_iscsi_stats *stats,
bool is_atomic) {}
#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
#endif

View File

@ -1863,7 +1863,8 @@ static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
}
static void _qed_get_vport_stats(struct qed_dev *cdev,
struct qed_eth_stats *stats)
struct qed_eth_stats *stats,
bool is_atomic)
{
u8 fw_vport = 0;
int i;
@ -1872,10 +1873,11 @@ static void _qed_get_vport_stats(struct qed_dev *cdev,
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
: NULL;
struct qed_ptt *p_ptt;
bool b_get_port_stats;
p_ptt = IS_PF(cdev) ? qed_ptt_acquire_context(p_hwfn, is_atomic)
: NULL;
if (IS_PF(cdev)) {
/* The main vport index is relative first */
if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
@ -1900,6 +1902,13 @@ static void _qed_get_vport_stats(struct qed_dev *cdev,
}
void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
{
qed_get_vport_stats_context(cdev, stats, false);
}
void qed_get_vport_stats_context(struct qed_dev *cdev,
struct qed_eth_stats *stats,
bool is_atomic)
{
u32 i;
@ -1908,7 +1917,7 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
return;
}
_qed_get_vport_stats(cdev, stats);
_qed_get_vport_stats(cdev, stats, is_atomic);
if (!cdev->reset_stats)
return;
@ -1960,7 +1969,7 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
if (!cdev->reset_stats) {
DP_INFO(cdev, "Reset stats not allocated\n");
} else {
_qed_get_vport_stats(cdev, cdev->reset_stats);
_qed_get_vport_stats(cdev, cdev->reset_stats, false);
cdev->reset_stats->common.link_change_count = 0;
}
}

View File

@ -249,8 +249,32 @@ qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
/**
* qed_get_vport_stats(): Fills provided statistics
* struct with statistics.
*
* @cdev: Qed dev pointer.
* @stats: Points to struct that will be filled with statistics.
*
* Return: Void.
*/
void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
/**
* qed_get_vport_stats_context(): Fills provided statistics
* struct with statistics.
*
* @cdev: Qed dev pointer.
* @stats: Points to struct that will be filled with statistics.
* @is_atomic: Hint from the caller - if the func can sleep or not.
*
* Context: The function should not sleep in case is_atomic == true.
* Return: Void.
*/
void qed_get_vport_stats_context(struct qed_dev *cdev,
struct qed_eth_stats *stats,
bool is_atomic);
void qed_reset_vport_stats(struct qed_dev *cdev);
/**

View File

@ -3101,7 +3101,7 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
switch (type) {
case QED_MCP_LAN_STATS:
qed_get_vport_stats(cdev, &eth_stats);
qed_get_vport_stats_context(cdev, &eth_stats, true);
stats->lan_stats.ucast_rx_pkts =
eth_stats.common.rx_ucast_pkts;
stats->lan_stats.ucast_tx_pkts =
@ -3109,10 +3109,10 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
stats->lan_stats.fcs_err = -1;
break;
case QED_MCP_FCOE_STATS:
qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats, true);
break;
case QED_MCP_ISCSI_STATS:
qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats, true);
break;
default:
DP_VERBOSE(cdev, QED_MSG_SP,

View File

@ -1851,6 +1851,17 @@ static int netsec_of_probe(struct platform_device *pdev,
return err;
}
/*
* SynQuacer is physically configured with TX and RX delays
* but the standard firmware claimed otherwise for a long
* time, ignore it.
*/
if (of_machine_is_compatible("socionext,developer-box") &&
priv->phy_interface != PHY_INTERFACE_MODE_RGMII_ID) {
dev_warn(&pdev->dev, "Outdated firmware reports incorrect PHY mode, overriding\n");
priv->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
}
priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (!priv->phy_np) {
dev_err(&pdev->dev, "missing required property 'phy-handle'\n");

View File

@ -1568,12 +1568,16 @@ static int temac_probe(struct platform_device *pdev)
}
/* Error handle returned DMA RX and TX interrupts */
if (lp->rx_irq < 0)
return dev_err_probe(&pdev->dev, lp->rx_irq,
if (lp->rx_irq <= 0) {
rc = lp->rx_irq ?: -EINVAL;
return dev_err_probe(&pdev->dev, rc,
"could not get DMA RX irq\n");
if (lp->tx_irq < 0)
return dev_err_probe(&pdev->dev, lp->tx_irq,
}
if (lp->tx_irq <= 0) {
rc = lp->tx_irq ?: -EINVAL;
return dev_err_probe(&pdev->dev, rc,
"could not get DMA TX irq\n");
}
if (temac_np) {
/* Retrieve the MAC address */

View File

@ -311,16 +311,15 @@ static int ipa_filter_reset(struct ipa *ipa, bool modem)
if (ret)
return ret;
ret = ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER, modem);
if (ret || !ipa_table_hash_support(ipa))
return ret;
ret = ipa_filter_reset_table(ipa, IPA_MEM_V4_FILTER_HASHED, modem);
if (ret)
return ret;
ret = ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER, modem);
if (ret)
return ret;
ret = ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER_HASHED, modem);
return ret;
return ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER_HASHED, modem);
}
/* The AP routes and modem routes are each contiguous within the
@ -329,11 +328,12 @@ static int ipa_filter_reset(struct ipa *ipa, bool modem)
* */
static int ipa_route_reset(struct ipa *ipa, bool modem)
{
bool hash_support = ipa_table_hash_support(ipa);
struct gsi_trans *trans;
u16 first;
u16 count;
trans = ipa_cmd_trans_alloc(ipa, 4);
trans = ipa_cmd_trans_alloc(ipa, hash_support ? 4 : 2);
if (!trans) {
dev_err(&ipa->pdev->dev,
"no transaction for %s route reset\n",
@ -350,12 +350,14 @@ static int ipa_route_reset(struct ipa *ipa, bool modem)
}
ipa_table_reset_add(trans, false, first, count, IPA_MEM_V4_ROUTE);
ipa_table_reset_add(trans, false, first, count,
IPA_MEM_V4_ROUTE_HASHED);
ipa_table_reset_add(trans, false, first, count, IPA_MEM_V6_ROUTE);
ipa_table_reset_add(trans, false, first, count,
IPA_MEM_V6_ROUTE_HASHED);
if (hash_support) {
ipa_table_reset_add(trans, false, first, count,
IPA_MEM_V4_ROUTE_HASHED);
ipa_table_reset_add(trans, false, first, count,
IPA_MEM_V6_ROUTE_HASHED);
}
gsi_trans_commit_wait(trans);

View File

@ -533,7 +533,7 @@ static int tap_open(struct inode *inode, struct file *file)
q->sock.state = SS_CONNECTED;
q->sock.file = file;
q->sock.ops = &tap_socket_ops;
sock_init_data_uid(&q->sock, &q->sk, inode->i_uid);
sock_init_data_uid(&q->sock, &q->sk, current_fsuid());
q->sk.sk_write_space = tap_sock_write_space;
q->sk.sk_destruct = tap_sock_destruct;
q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;

View File

@ -3457,7 +3457,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
tfile->socket.file = file;
tfile->socket.ops = &tun_socket_ops;
sock_init_data_uid(&tfile->socket, &tfile->sk, inode->i_uid);
sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid());
tfile->sk.sk_write_space = tun_sock_write_space;
tfile->sk.sk_sndbuf = INT_MAX;

View File

@ -618,9 +618,23 @@ static const struct usb_device_id products[] = {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
.idProduct = 0x8005, /* A-300 */
ZAURUS_FAKE_INTERFACE,
.driver_info = 0,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
.idProduct = 0x8006, /* B-500/SL-5600 */
ZAURUS_MASTER_INTERFACE,
.driver_info = 0,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
.idProduct = 0x8006, /* B-500/SL-5600 */
ZAURUS_FAKE_INTERFACE,
.driver_info = 0,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
@ -628,6 +642,13 @@ static const struct usb_device_id products[] = {
.idProduct = 0x8007, /* C-700 */
ZAURUS_MASTER_INTERFACE,
.driver_info = 0,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
.idProduct = 0x8007, /* C-700 */
ZAURUS_FAKE_INTERFACE,
.driver_info = 0,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,

View File

@ -1770,6 +1770,10 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
} else if (!info->in || !info->out)
status = usbnet_get_endpoints (dev, udev);
else {
u8 ep_addrs[3] = {
info->in + USB_DIR_IN, info->out + USB_DIR_OUT, 0
};
dev->in = usb_rcvbulkpipe (xdev, info->in);
dev->out = usb_sndbulkpipe (xdev, info->out);
if (!(info->flags & FLAG_NO_SETINT))
@ -1779,6 +1783,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
else
status = 0;
if (status == 0 && !usb_check_bulk_endpoints(udev, ep_addrs))
status = -EINVAL;
}
if (status >= 0 && dev->status)
status = init_status (dev, udev);

View File

@ -289,9 +289,23 @@ static const struct usb_device_id products [] = {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
.idProduct = 0x8005, /* A-300 */
ZAURUS_FAKE_INTERFACE,
.driver_info = (unsigned long)&bogus_mdlm_info,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
.idProduct = 0x8006, /* B-500/SL-5600 */
ZAURUS_MASTER_INTERFACE,
.driver_info = ZAURUS_PXA_INFO,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
.idProduct = 0x8006, /* B-500/SL-5600 */
ZAURUS_FAKE_INTERFACE,
.driver_info = (unsigned long)&bogus_mdlm_info,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
@ -299,6 +313,13 @@ static const struct usb_device_id products [] = {
.idProduct = 0x8007, /* C-700 */
ZAURUS_MASTER_INTERFACE,
.driver_info = ZAURUS_PXA_INFO,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
.idProduct = 0x8007, /* C-700 */
ZAURUS_FAKE_INTERFACE,
.driver_info = (unsigned long)&bogus_mdlm_info,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,

View File

@ -123,12 +123,12 @@ mt7615_eeprom_parse_hw_band_cap(struct mt7615_dev *dev)
case MT_EE_5GHZ:
dev->mphy.cap.has_5ghz = true;
break;
case MT_EE_2GHZ:
dev->mphy.cap.has_2ghz = true;
break;
case MT_EE_DBDC:
dev->dbdc_support = true;
fallthrough;
case MT_EE_2GHZ:
dev->mphy.cap.has_2ghz = true;
break;
default:
dev->mphy.cap.has_2ghz = true;
dev->mphy.cap.has_5ghz = true;

View File

@ -716,7 +716,6 @@ struct qeth_card_info {
u16 chid;
u8 ids_valid:1; /* cssid,iid,chid */
u8 dev_addr_is_registered:1;
u8 open_when_online:1;
u8 promisc_mode:1;
u8 use_v1_blkt:1;
u8 is_vm_nic:1;

View File

@ -5371,8 +5371,6 @@ int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
qeth_clear_ipacmd_list(card);
rtnl_lock();
card->info.open_when_online = card->dev->flags & IFF_UP;
dev_close(card->dev);
netif_device_detach(card->dev);
netif_carrier_off(card->dev);
rtnl_unlock();

View File

@ -2387,9 +2387,12 @@ static int qeth_l2_set_online(struct qeth_card *card, bool carrier_ok)
qeth_enable_hw_features(dev);
qeth_l2_enable_brport_features(card);
if (card->info.open_when_online) {
card->info.open_when_online = 0;
dev_open(dev, NULL);
if (netif_running(dev)) {
local_bh_disable();
napi_schedule(&card->napi);
/* kick-start the NAPI softirq: */
local_bh_enable();
qeth_l2_set_rx_mode(dev);
}
rtnl_unlock();
}

View File

@ -2017,9 +2017,11 @@ static int qeth_l3_set_online(struct qeth_card *card, bool carrier_ok)
netif_device_attach(dev);
qeth_enable_hw_features(dev);
if (card->info.open_when_online) {
card->info.open_when_online = 0;
dev_open(dev, NULL);
if (netif_running(dev)) {
local_bh_disable();
napi_schedule(&card->napi);
/* kick-start the NAPI softirq: */
local_bh_enable();
}
rtnl_unlock();
}

View File

@ -534,8 +534,7 @@ static void zfcp_fc_adisc_handler(void *data)
/* re-init to undo drop from zfcp_fc_adisc() */
port->d_id = ntoh24(adisc_resp->adisc_port_id);
/* port is good, unblock rport without going through erp */
zfcp_scsi_schedule_rport_register(port);
/* port is still good, nothing to do */
out:
atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
put_device(&port->dev);
@ -595,9 +594,6 @@ void zfcp_fc_link_test_work(struct work_struct *work)
int retval;
set_worker_desc("zadisc%16llx", port->wwpn); /* < WORKER_DESC_LEN=24 */
get_device(&port->dev);
port->rport_task = RPORT_DEL;
zfcp_scsi_rport_work(&port->rport_work);
/* only issue one test command at one time per port */
if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST)

View File

@ -365,6 +365,7 @@ static void storvsc_on_channel_callback(void *context);
#define STORVSC_FC_MAX_LUNS_PER_TARGET 255
#define STORVSC_FC_MAX_TARGETS 128
#define STORVSC_FC_MAX_CHANNELS 8
#define STORVSC_FC_MAX_XFER_SIZE ((u32)(512 * 1024))
#define STORVSC_IDE_MAX_LUNS_PER_TARGET 64
#define STORVSC_IDE_MAX_TARGETS 1
@ -2002,6 +2003,9 @@ static int storvsc_probe(struct hv_device *device,
* protecting it from any weird value.
*/
max_xfer_bytes = round_down(stor_device->max_transfer_bytes, HV_HYP_PAGE_SIZE);
if (is_fc)
max_xfer_bytes = min(max_xfer_bytes, STORVSC_FC_MAX_XFER_SIZE);
/* max_hw_sectors_kb */
host->max_sectors = max_xfer_bytes >> 9;
/*

View File

@ -494,12 +494,16 @@ static void fragment_free_space(struct btrfs_block_group *block_group)
* used yet since their free space will be released as soon as the transaction
* commits.
*/
u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end)
int add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end,
u64 *total_added_ret)
{
struct btrfs_fs_info *info = block_group->fs_info;
u64 extent_start, extent_end, size, total_added = 0;
u64 extent_start, extent_end, size;
int ret;
if (total_added_ret)
*total_added_ret = 0;
while (start < end) {
ret = find_first_extent_bit(&info->excluded_extents, start,
&extent_start, &extent_end,
@ -512,10 +516,12 @@ u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end
start = extent_end + 1;
} else if (extent_start > start && extent_start < end) {
size = extent_start - start;
total_added += size;
ret = btrfs_add_free_space_async_trimmed(block_group,
start, size);
BUG_ON(ret); /* -ENOMEM or logic error */
if (ret)
return ret;
if (total_added_ret)
*total_added_ret += size;
start = extent_end + 1;
} else {
break;
@ -524,13 +530,15 @@ u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end
if (start < end) {
size = end - start;
total_added += size;
ret = btrfs_add_free_space_async_trimmed(block_group, start,
size);
BUG_ON(ret); /* -ENOMEM or logic error */
if (ret)
return ret;
if (total_added_ret)
*total_added_ret += size;
}
return total_added;
return 0;
}
static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
@ -637,8 +645,13 @@ static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
if (key.type == BTRFS_EXTENT_ITEM_KEY ||
key.type == BTRFS_METADATA_ITEM_KEY) {
total_found += add_new_free_space(block_group, last,
key.objectid);
u64 space_added;
ret = add_new_free_space(block_group, last, key.objectid,
&space_added);
if (ret)
goto out;
total_found += space_added;
if (key.type == BTRFS_METADATA_ITEM_KEY)
last = key.objectid +
fs_info->nodesize;
@ -653,11 +666,10 @@ static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
}
path->slots[0]++;
}
ret = 0;
total_found += add_new_free_space(block_group, last,
block_group->start + block_group->length);
ret = add_new_free_space(block_group, last,
block_group->start + block_group->length,
NULL);
out:
btrfs_free_path(path);
return ret;
@ -2101,9 +2113,11 @@ static int read_one_block_group(struct btrfs_fs_info *info,
btrfs_free_excluded_extents(cache);
} else if (cache->used == 0) {
cache->cached = BTRFS_CACHE_FINISHED;
add_new_free_space(cache, cache->start,
cache->start + cache->length);
ret = add_new_free_space(cache, cache->start,
cache->start + cache->length, NULL);
btrfs_free_excluded_extents(cache);
if (ret)
goto error;
}
ret = btrfs_add_block_group_cache(info, cache);
@ -2529,9 +2543,12 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
return ERR_PTR(ret);
}
add_new_free_space(cache, chunk_offset, chunk_offset + size);
ret = add_new_free_space(cache, chunk_offset, chunk_offset + size, NULL);
btrfs_free_excluded_extents(cache);
if (ret) {
btrfs_put_block_group(cache);
return ERR_PTR(ret);
}
/*
* Ensure the corresponding space_info object is created and

View File

@ -284,8 +284,8 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait);
void btrfs_put_caching_control(struct btrfs_caching_control *ctl);
struct btrfs_caching_control *btrfs_get_caching_control(
struct btrfs_block_group *cache);
u64 add_new_free_space(struct btrfs_block_group *block_group,
u64 start, u64 end);
int add_new_free_space(struct btrfs_block_group *block_group,
u64 start, u64 end, u64 *total_added_ret);
struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
struct btrfs_fs_info *fs_info,
const u64 chunk_offset);

View File

@ -1510,9 +1510,13 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
if (prev_bit == 0 && bit == 1) {
extent_start = offset;
} else if (prev_bit == 1 && bit == 0) {
total_found += add_new_free_space(block_group,
extent_start,
offset);
u64 space_added;
ret = add_new_free_space(block_group, extent_start,
offset, &space_added);
if (ret)
goto out;
total_found += space_added;
if (total_found > CACHING_CTL_WAKE_UP) {
total_found = 0;
wake_up(&caching_ctl->wait);
@ -1524,8 +1528,9 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
}
}
if (prev_bit == 1) {
total_found += add_new_free_space(block_group, extent_start,
end);
ret = add_new_free_space(block_group, extent_start, end, NULL);
if (ret)
goto out;
extent_count++;
}
@ -1564,6 +1569,8 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
end = block_group->start + block_group->length;
while (1) {
u64 space_added;
ret = btrfs_next_item(root, path);
if (ret < 0)
goto out;
@ -1578,8 +1585,11 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY);
ASSERT(key.objectid < end && key.objectid + key.offset <= end);
total_found += add_new_free_space(block_group, key.objectid,
key.objectid + key.offset);
ret = add_new_free_space(block_group, key.objectid,
key.objectid + key.offset, &space_added);
if (ret)
goto out;
total_found += space_added;
if (total_found > CACHING_CTL_WAKE_UP) {
total_found = 0;
wake_up(&caching_ctl->wait);

View File

@ -4758,7 +4758,7 @@ static void delayed_work(struct work_struct *work)
dout("mdsc delayed_work\n");
if (mdsc->stopping)
if (mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHED)
return;
mutex_lock(&mdsc->mutex);
@ -4937,7 +4937,7 @@ void send_flush_mdlog(struct ceph_mds_session *s)
void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
{
dout("pre_umount\n");
mdsc->stopping = 1;
mdsc->stopping = CEPH_MDSC_STOPPING_BEGIN;
ceph_mdsc_iterate_sessions(mdsc, send_flush_mdlog, true);
ceph_mdsc_iterate_sessions(mdsc, lock_unlock_session, false);

View File

@ -380,6 +380,11 @@ struct cap_wait {
int want;
};
enum {
CEPH_MDSC_STOPPING_BEGIN = 1,
CEPH_MDSC_STOPPING_FLUSHED = 2,
};
/*
* mds client state
*/

View File

@ -1374,6 +1374,16 @@ static void ceph_kill_sb(struct super_block *s)
ceph_mdsc_pre_umount(fsc->mdsc);
flush_fs_workqueues(fsc);
/*
* Though the kill_anon_super() will finally trigger the
* sync_filesystem() anyway, we still need to do it here
* and then bump the stage of shutdown to stop the work
* queue as earlier as possible.
*/
sync_filesystem(s);
fsc->mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHED;
kill_anon_super(s);
fsc->client->extra_mon_dispatch = NULL;

View File

@ -1121,7 +1121,6 @@ static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK) &&
(bvec->end == PAGE_SIZE ||
bvec->offset + bvec->end == be->pcl->length)) {
pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT;
DBG_BUGON(pgnr >= be->nr_pages);
if (!be->decompressed_pages[pgnr]) {

View File

@ -69,7 +69,7 @@ static int exfat_allocate_bitmap(struct super_block *sb,
}
sbi->map_sectors = ((need_map_size - 1) >>
(sb->s_blocksize_bits)) + 1;
sbi->vol_amap = kmalloc_array(sbi->map_sectors,
sbi->vol_amap = kvmalloc_array(sbi->map_sectors,
sizeof(struct buffer_head *), GFP_KERNEL);
if (!sbi->vol_amap)
return -ENOMEM;
@ -84,7 +84,7 @@ static int exfat_allocate_bitmap(struct super_block *sb,
while (j < i)
brelse(sbi->vol_amap[j++]);
kfree(sbi->vol_amap);
kvfree(sbi->vol_amap);
sbi->vol_amap = NULL;
return -EIO;
}
@ -138,7 +138,7 @@ void exfat_free_bitmap(struct exfat_sb_info *sbi)
for (i = 0; i < sbi->map_sectors; i++)
__brelse(sbi->vol_amap[i]);
kfree(sbi->vol_amap);
kvfree(sbi->vol_amap);
}
int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync)

View File

@ -34,6 +34,7 @@ static void exfat_get_uniname_from_ext_entry(struct super_block *sb,
{
int i;
struct exfat_entry_set_cache *es;
unsigned int uni_len = 0, len;
es = exfat_get_dentry_set(sb, p_dir, entry, ES_ALL_ENTRIES);
if (!es)
@ -52,7 +53,10 @@ static void exfat_get_uniname_from_ext_entry(struct super_block *sb,
if (exfat_get_entry_type(ep) != TYPE_EXTEND)
break;
exfat_extract_uni_name(ep, uniname);
len = exfat_extract_uni_name(ep, uniname);
uni_len += len;
if (len != EXFAT_FILE_NAME_LEN || uni_len >= MAX_NAME_LENGTH)
break;
uniname += EXFAT_FILE_NAME_LEN;
}
@ -210,7 +214,10 @@ static void exfat_free_namebuf(struct exfat_dentry_namebuf *nb)
exfat_init_namebuf(nb);
}
/* skip iterating emit_dots when dir is empty */
/*
* Before calling dir_emit*(), sbi->s_lock should be released
* because page fault can occur in dir_emit*().
*/
#define ITER_POS_FILLED_DOTS (2)
static int exfat_iterate(struct file *file, struct dir_context *ctx)
{
@ -225,11 +232,10 @@ static int exfat_iterate(struct file *file, struct dir_context *ctx)
int err = 0, fake_offset = 0;
exfat_init_namebuf(nb);
mutex_lock(&EXFAT_SB(sb)->s_lock);
cpos = ctx->pos;
if (!dir_emit_dots(file, ctx))
goto unlock;
goto out;
if (ctx->pos == ITER_POS_FILLED_DOTS) {
cpos = 0;
@ -241,16 +247,18 @@ static int exfat_iterate(struct file *file, struct dir_context *ctx)
/* name buffer should be allocated before use */
err = exfat_alloc_namebuf(nb);
if (err)
goto unlock;
goto out;
get_new:
mutex_lock(&EXFAT_SB(sb)->s_lock);
if (ei->flags == ALLOC_NO_FAT_CHAIN && cpos >= i_size_read(inode))
goto end_of_dir;
err = exfat_readdir(inode, &cpos, &de);
if (err) {
/*
* At least we tried to read a sector. Move cpos to next sector
* position (should be aligned).
* At least we tried to read a sector.
* Move cpos to next sector position (should be aligned).
*/
if (err == -EIO) {
cpos += 1 << (sb->s_blocksize_bits);
@ -273,16 +281,10 @@ static int exfat_iterate(struct file *file, struct dir_context *ctx)
inum = iunique(sb, EXFAT_ROOT_INO);
}
/*
* Before calling dir_emit(), sb_lock should be released.
* Because page fault can occur in dir_emit() when the size
* of buffer given from user is larger than one page size.
*/
mutex_unlock(&EXFAT_SB(sb)->s_lock);
if (!dir_emit(ctx, nb->lfn, strlen(nb->lfn), inum,
(de.attr & ATTR_SUBDIR) ? DT_DIR : DT_REG))
goto out_unlocked;
mutex_lock(&EXFAT_SB(sb)->s_lock);
goto out;
ctx->pos = cpos;
goto get_new;
@ -290,9 +292,8 @@ static int exfat_iterate(struct file *file, struct dir_context *ctx)
if (!cpos && fake_offset)
cpos = ITER_POS_FILLED_DOTS;
ctx->pos = cpos;
unlock:
mutex_unlock(&EXFAT_SB(sb)->s_lock);
out_unlocked:
out:
/*
* To improve performance, free namebuf after unlock sb_lock.
* If namebuf is not allocated, this function do nothing
@ -1027,7 +1028,8 @@ int exfat_find_dir_entry(struct super_block *sb, struct exfat_inode_info *ei,
if (entry_type == TYPE_EXTEND) {
unsigned short entry_uniname[16], unichar;
if (step != DIRENT_STEP_NAME) {
if (step != DIRENT_STEP_NAME ||
name_len >= MAX_NAME_LENGTH) {
step = DIRENT_STEP_FILE;
continue;
}

Some files were not shown because too many files have changed in this diff Show More