Merge 6.1.72 into android14-6.1-lts

Changes in 6.1.72
	keys, dns: Fix missing size check of V1 server-list header
	block: Don't invalidate pagecache for invalid falloc modes
	ALSA: hda/realtek: enable SND_PCI_QUIRK for hp pavilion 14-ec1xxx series
	ALSA: hda/realtek: fix mute/micmute LEDs for a HP ZBook
	ALSA: hda/realtek: Fix mute and mic-mute LEDs for HP ProBook 440 G6
	mptcp: prevent tcp diag from closing listener subflows
	Revert "PCI/ASPM: Remove pcie_aspm_pm_state_change()"
	drm/mgag200: Fix gamma lut not initialized for G200ER, G200EV, G200SE
	cifs: cifs_chan_is_iface_active should be called with chan_lock held
	cifs: do not depend on release_iface for maintaining iface_list
	KVM: x86/pmu: fix masking logic for MSR_CORE_PERF_GLOBAL_CTRL
	wifi: iwlwifi: pcie: don't synchronize IRQs from IRQ
	drm/bridge: ti-sn65dsi86: Never store more than msg->size bytes in AUX xfer
	netfilter: use skb_ip_totlen and iph_totlen
	netfilter: nf_tables: set transport offset from mac header for netdev/egress
	nfc: llcp_core: Hold a ref to llcp_local->dev when holding a ref to llcp_local
	octeontx2-af: Fix marking couple of structure as __packed
	drm/i915/dp: Fix passing the correct DPCD_REV for drm_dp_set_phy_test_pattern
	ice: Fix link_down_on_close message
	ice: Shut down VSI with "link-down-on-close" enabled
	i40e: Fix filter input checks to prevent config with invalid values
	igc: Report VLAN EtherType matching back to user
	igc: Check VLAN TCI mask
	igc: Check VLAN EtherType mask
	ASoC: fsl_rpmsg: Fix error handler with pm_runtime_enable
	ASoC: mediatek: mt8186: fix AUD_PAD_TOP register and offset
	mlxbf_gige: fix receive packet race condition
	net: sched: em_text: fix possible memory leak in em_text_destroy()
	r8169: Fix PCI error on system resume
	can: raw: add support for SO_MARK
	net-timestamp: extend SOF_TIMESTAMPING_OPT_ID to HW timestamps
	net: annotate data-races around sk->sk_tsflags
	net: annotate data-races around sk->sk_bind_phc
	net: Implement missing getsockopt(SO_TIMESTAMPING_NEW)
	selftests: bonding: do not set port down when adding to bond
	ARM: sun9i: smp: Fix array-index-out-of-bounds read in sunxi_mc_smp_init
	sfc: fix a double-free bug in efx_probe_filters
	net: bcmgenet: Fix FCS generation for fragmented skbuffs
	netfilter: nft_immediate: drop chain reference counter on error
	net: Save and restore msg_namelen in sock_sendmsg
	i40e: fix use-after-free in i40e_aqc_add_filters()
	ASoC: meson: g12a-toacodec: Validate written enum values
	ASoC: meson: g12a-tohdmitx: Validate written enum values
	ASoC: meson: g12a-toacodec: Fix event generation
	ASoC: meson: g12a-tohdmitx: Fix event generation for S/PDIF mux
	i40e: Restore VF MSI-X state during PCI reset
	igc: Fix hicredit calculation
	net/qla3xxx: fix potential memleak in ql_alloc_buffer_queues
	net/smc: fix invalid link access in dumping SMC-R connections
	octeontx2-af: Always configure NIX TX link credits based on max frame size
	octeontx2-af: Re-enable MAC TX in otx2_stop processing
	asix: Add check for usbnet_get_endpoints
	net: ravb: Wait for operating mode to be applied
	bnxt_en: Remove mis-applied code from bnxt_cfg_ntp_filters()
	net: Implement missing SO_TIMESTAMPING_NEW cmsg support
	selftests: secretmem: floor the memory size to the multiple of page_size
	cpu/SMT: Create topology_smt_thread_allowed()
	cpu/SMT: Make SMT control more robust against enumeration failures
	srcu: Fix callbacks acceleration mishandling
	bpf, x64: Fix tailcall infinite loop
	bpf, x86: Simplify the parsing logic of structure parameters
	bpf, x86: save/restore regs with BPF_DW size
	net: Declare MSG_SPLICE_PAGES internal sendmsg() flag
	udp: Convert udp_sendpage() to use MSG_SPLICE_PAGES
	splice, net: Add a splice_eof op to file-ops and socket-ops
	ipv4, ipv6: Use splice_eof() to flush
	udp: introduce udp->udp_flags
	udp: move udp->no_check6_tx to udp->udp_flags
	udp: move udp->no_check6_rx to udp->udp_flags
	udp: move udp->gro_enabled to udp->udp_flags
	udp: move udp->accept_udp_{l4|fraglist} to udp->udp_flags
	udp: lockless UDP_ENCAP_L2TPINUDP / UDP_GRO
	udp: annotate data-races around udp->encap_type
	wifi: iwlwifi: yoyo: swap cdb and jacket bits values
	arm64: dts: qcom: sdm845: align RPMh regulator nodes with bindings
	arm64: dts: qcom: sdm845: Fix PSCI power domain names
	fbdev: imsttfb: Release framebuffer and dealloc cmap on error path
	fbdev: imsttfb: fix double free in probe()
	bpf: decouple prune and jump points
	bpf: remove unnecessary prune and jump points
	bpf: Remove unused insn_cnt argument from visit_[func_call_]insn()
	bpf: clean up visit_insn()'s instruction processing
	bpf: Support new 32bit offset jmp instruction
	bpf: handle ldimm64 properly in check_cfg()
	bpf: fix precision backtracking instruction iteration
	blk-mq: make sure active queue usage is held for bio_integrity_prep()
	net/mlx5: Increase size of irq name buffer
	s390/mm: add missing arch_set_page_dat() call to vmem_crst_alloc()
	s390/cpumf: support user space events for counting
	f2fs: clean up i_compress_flag and i_compress_level usage
	f2fs: convert to use bitmap API
	f2fs: assign default compression level
	f2fs: set the default compress_level on ioctl
	selftests: mptcp: fix fastclose with csum failure
	selftests: mptcp: set FAILING_LINKS in run_tests
	media: camss: sm8250: Virtual channels for CSID
	media: qcom: camss: Fix set CSI2_RX_CFG1_VC_MODE when VC is greater than 3
	ext4: convert move_extent_per_page() to use folios
	khugepage: replace try_to_release_page() with filemap_release_folio()
	memory-failure: convert truncate_error_page() to use folio
	mm: merge folio_has_private()/filemap_release_folio() call pairs
	mm, netfs, fscache: stop read optimisation when folio removed from pagecache
	filemap: add a per-mapping stable writes flag
	block: update the stable_writes flag in bdev_add
	smb: client: fix missing mode bits for SMB symlinks
	net: dpaa2-eth: rearrange variable in dpaa2_eth_get_ethtool_stats
	dpaa2-eth: recycle the RX buffer only after all processing done
	ethtool: don't propagate EOPNOTSUPP from dumps
	bpf, sockmap: af_unix stream sockets need to hold ref for pair sock
	firmware: arm_scmi: Fix frequency truncation by promoting multiplier type
	ALSA: hda/realtek: Add quirk for Lenovo Yoga Pro 7
	genirq/affinity: Remove the 'firstvec' parameter from irq_build_affinity_masks
	genirq/affinity: Pass affinity managed mask array to irq_build_affinity_masks
	genirq/affinity: Don't pass irq_affinity_desc array to irq_build_affinity_masks
	genirq/affinity: Rename irq_build_affinity_masks as group_cpus_evenly
	genirq/affinity: Move group_cpus_evenly() into lib/
	lib/group_cpus.c: avoid acquiring cpu hotplug lock in group_cpus_evenly
	mm/memory_hotplug: add missing mem_hotplug_lock
	mm/memory_hotplug: fix error handling in add_memory_resource()
	net: sched: call tcf_ct_params_free to free params in tcf_ct_init
	netfilter: flowtable: allow unidirectional rules
	netfilter: flowtable: cache info of last offload
	net/sched: act_ct: offload UDP NEW connections
	net/sched: act_ct: Fix promotion of offloaded unreplied tuple
	netfilter: flowtable: GC pushes back packets to classic path
	net/sched: act_ct: Take per-cb reference to tcf_ct_flow_table
	octeontx2-af: Fix pause frame configuration
	octeontx2-af: Support variable number of lmacs
	btrfs: fix qgroup_free_reserved_data int overflow
	btrfs: mark the len field in struct btrfs_ordered_sum as unsigned
	ring-buffer: Fix 32-bit rb_time_read() race with rb_time_cmpxchg()
	firewire: ohci: suppress unexpected system reboot in AMD Ryzen machines and ASM108x/VT630x PCIe cards
	x86/kprobes: fix incorrect return address calculation in kprobe_emulate_call_indirect
	i2c: core: Fix atomic xfer check for non-preempt config
	mm: fix unmap_mapping_range high bits shift bug
	drm/amdgpu: skip gpu_info fw loading on navi12
	drm/amd/display: add nv12 bounding box
	mmc: meson-mx-sdhc: Fix initialization frozen issue
	mmc: rpmb: fixes pause retune on all RPMB partitions.
	mmc: core: Cancel delayed work before releasing host
	mmc: sdhci-sprd: Fix eMMC init failure after hw reset
	genirq/affinity: Only build SMP-only helper functions on SMP kernels
	f2fs: compress: fix to assign compress_level for lz4 correctly
	net/sched: act_ct: additional checks for outdated flows
	net/sched: act_ct: Always fill offloading tuple iifidx
	bpf: Fix a verifier bug due to incorrect branch offset comparison with cpu=v4
	bpf: syzkaller found null ptr deref in unix_bpf proto add
	media: qcom: camss: Comment CSID dt_id field
	smb3: Replace smb2pdu 1-element arrays with flex-arrays
	Revert "interconnect: qcom: sm8250: Enable sync_state"
	Linux 6.1.72

Change-Id: Id00eb2ae1159d4d5fa0ef914e672c5669cbf5b0a
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2024-01-14 11:30:17 +00:00
commit e1b12db2de
207 changed files with 2339 additions and 1394 deletions

View File

@ -10845,6 +10845,8 @@ L: linux-kernel@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
F: kernel/irq/
F: include/linux/group_cpus.h
F: lib/group_cpus.c
IRQCHIP DRIVERS
M: Thomas Gleixner <tglx@linutronix.de>

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 71
SUBLEVEL = 72
EXTRAVERSION =
NAME = Curry Ramen

View File

@ -34,6 +34,9 @@ config ARCH_HAS_SUBPAGE_FAULTS
config HOTPLUG_SMT
bool
config SMT_NUM_THREADS_DYNAMIC
bool
config GENERIC_ENTRY
bool

View File

@ -808,12 +808,12 @@ static int __init sunxi_mc_smp_init(void)
break;
}
is_a83t = sunxi_mc_smp_data[i].is_a83t;
of_node_put(node);
if (ret)
return -ENODEV;
is_a83t = sunxi_mc_smp_data[i].is_a83t;
if (!sunxi_mc_smp_cpu_table_init())
return -EINVAL;

View File

@ -150,15 +150,15 @@ &cpufreq_hw {
};
&psci {
/delete-node/ cpu0;
/delete-node/ cpu1;
/delete-node/ cpu2;
/delete-node/ cpu3;
/delete-node/ cpu4;
/delete-node/ cpu5;
/delete-node/ cpu6;
/delete-node/ cpu7;
/delete-node/ cpu-cluster0;
/delete-node/ power-domain-cpu0;
/delete-node/ power-domain-cpu1;
/delete-node/ power-domain-cpu2;
/delete-node/ power-domain-cpu3;
/delete-node/ power-domain-cpu4;
/delete-node/ power-domain-cpu5;
/delete-node/ power-domain-cpu6;
/delete-node/ power-domain-cpu7;
/delete-node/ power-domain-cluster;
};
&cpus {
@ -351,7 +351,9 @@ flash@0 {
&apps_rsc {
pm8998-rpmh-regulators {
/delete-property/ power-domains;
regulators-0 {
compatible = "qcom,pm8998-rpmh-regulators";
qcom,pmic-id = "a";
@ -633,7 +635,7 @@ src_pp1800_lvs2: lvs2 {
};
};
pm8005-rpmh-regulators {
regulators-1 {
compatible = "qcom,pm8005-rpmh-regulators";
qcom,pmic-id = "c";

View File

@ -271,7 +271,7 @@ &adsp_pas {
};
&apps_rsc {
pm8998-rpmh-regulators {
regulators-0 {
compatible = "qcom,pm8998-rpmh-regulators";
qcom,pmic-id = "a";
vdd-s1-supply = <&vph_pwr>;
@ -396,7 +396,7 @@ vreg_lvs2a_1p8: lvs2 {
};
};
pmi8998-rpmh-regulators {
regulators-1 {
compatible = "qcom,pmi8998-rpmh-regulators";
qcom,pmic-id = "b";

View File

@ -166,7 +166,7 @@ &adsp_pas {
};
&apps_rsc {
pm8998-rpmh-regulators {
regulators-0 {
compatible = "qcom,pm8998-rpmh-regulators";
qcom,pmic-id = "a";
@ -419,7 +419,7 @@ vreg_lvs2a_1p8: lvs2 {
};
};
pmi8998-rpmh-regulators {
regulators-1 {
compatible = "qcom,pmi8998-rpmh-regulators";
qcom,pmic-id = "b";
@ -433,7 +433,7 @@ vreg_bob: bob {
};
};
pm8005-rpmh-regulators {
regulators-2 {
compatible = "qcom,pm8005-rpmh-regulators";
qcom,pmic-id = "c";

View File

@ -117,7 +117,7 @@ &adsp_pas {
};
&apps_rsc {
pm8998-rpmh-regulators {
regulators-0 {
compatible = "qcom,pm8998-rpmh-regulators";
qcom,pmic-id = "a";
@ -382,7 +382,7 @@ vreg_lvs2a_1p8: lvs2 {
};
};
pmi8998-rpmh-regulators {
regulators-1 {
compatible = "qcom,pmi8998-rpmh-regulators";
qcom,pmic-id = "b";
@ -396,7 +396,7 @@ vreg_bob: bob {
};
};
pm8005-rpmh-regulators {
regulators-2 {
compatible = "qcom,pm8005-rpmh-regulators";
qcom,pmic-id = "c";

View File

@ -144,7 +144,7 @@ &adsp_pas {
};
&apps_rsc {
pm8998-rpmh-regulators {
regulators-0 {
compatible = "qcom,pm8998-rpmh-regulators";
qcom,pmic-id = "a";
@ -280,7 +280,7 @@ vreg_l28a_3p0: ldo28 {
};
};
pmi8998-rpmh-regulators {
regulators-1 {
compatible = "qcom,pmi8998-rpmh-regulators";
qcom,pmic-id = "b";
@ -294,7 +294,7 @@ vreg_bob: bob {
};
};
pm8005-rpmh-regulators {
regulators-2 {
compatible = "qcom,pm8005-rpmh-regulators";
qcom,pmic-id = "c";

View File

@ -110,7 +110,7 @@ &adsp_pas {
};
&apps_rsc {
pm8998-rpmh-regulators {
regulators-0 {
compatible = "qcom,pm8998-rpmh-regulators";
qcom,pmic-id = "a";
@ -375,7 +375,7 @@ vreg_lvs2a_1p8: lvs2 {
};
};
pmi8998-rpmh-regulators {
regulators-1 {
compatible = "qcom,pmi8998-rpmh-regulators";
qcom,pmic-id = "b";
@ -389,7 +389,7 @@ vreg_bob: bob {
};
};
pm8005-rpmh-regulators {
regulators-2 {
compatible = "qcom,pm8005-rpmh-regulators";
qcom,pmic-id = "c";

View File

@ -78,7 +78,7 @@ ramoops@ffc00000 {
};
&apps_rsc {
pm8998-rpmh-regulators {
regulators-0 {
compatible = "qcom,pm8998-rpmh-regulators";
qcom,pmic-id = "a";
@ -308,7 +308,7 @@ vreg_lvs2a_1p8: lvs2 {
};
};
pmi8998-rpmh-regulators {
regulators-1 {
compatible = "qcom,pmi8998-rpmh-regulators";
qcom,pmic-id = "b";
@ -319,7 +319,7 @@ src_vreg_bob: bob {
};
};
pm8005-rpmh-regulators {
regulators-2 {
compatible = "qcom,pm8005-rpmh-regulators";
qcom,pmic-id = "c";

View File

@ -125,7 +125,7 @@ &adsp_pas {
};
&apps_rsc {
pm8998-rpmh-regulators {
regulators-0 {
compatible = "qcom,pm8998-rpmh-regulators";
qcom,pmic-id = "a";

View File

@ -143,7 +143,7 @@ vreg_s4a_1p8: vreg-s4a-1p8 {
};
&apps_rsc {
pm8998-rpmh-regulators {
regulators-0 {
compatible = "qcom,pm8998-rpmh-regulators";
qcom,pmic-id = "a";
@ -343,7 +343,7 @@ vreg_lvs2a_1p8: lvs2 {
};
};
pmi8998-rpmh-regulators {
regulators-1 {
compatible = "qcom,pmi8998-rpmh-regulators";
qcom,pmic-id = "b";
@ -355,7 +355,7 @@ vreg_bob: bob {
};
};
pm8005-rpmh-regulators {
regulators-2 {
compatible = "qcom,pm8005-rpmh-regulators";
qcom,pmic-id = "c";

View File

@ -99,7 +99,7 @@ &adsp_pas {
};
&apps_rsc {
pm8998-rpmh-regulators {
regulators-0 {
compatible = "qcom,pm8998-rpmh-regulators";
qcom,pmic-id = "a";

View File

@ -129,7 +129,7 @@ &adsp_pas {
};
&apps_rsc {
pm8998-rpmh-regulators {
regulators-0 {
compatible = "qcom,pm8998-rpmh-regulators";
qcom,pmic-id = "a";

View File

@ -2,7 +2,7 @@
/*
* Performance event support for s390x - CPU-measurement Counter Facility
*
* Copyright IBM Corp. 2012, 2021
* Copyright IBM Corp. 2012, 2022
* Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
* Thomas Richter <tmricht@linux.ibm.com>
*/
@ -434,6 +434,12 @@ static void cpumf_hw_inuse(void)
mutex_unlock(&pmc_reserve_mutex);
}
static int is_userspace_event(u64 ev)
{
return cpumf_generic_events_user[PERF_COUNT_HW_CPU_CYCLES] == ev ||
cpumf_generic_events_user[PERF_COUNT_HW_INSTRUCTIONS] == ev;
}
static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
{
struct perf_event_attr *attr = &event->attr;
@ -456,19 +462,26 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
if (is_sampling_event(event)) /* No sampling support */
return -ENOENT;
ev = attr->config;
/* Count user space (problem-state) only */
if (!attr->exclude_user && attr->exclude_kernel) {
if (ev >= ARRAY_SIZE(cpumf_generic_events_user))
return -EOPNOTSUPP;
ev = cpumf_generic_events_user[ev];
/* No support for kernel space counters only */
/*
* Count user space (problem-state) only
* Handle events 32 and 33 as 0:u and 1:u
*/
if (!is_userspace_event(ev)) {
if (ev >= ARRAY_SIZE(cpumf_generic_events_user))
return -EOPNOTSUPP;
ev = cpumf_generic_events_user[ev];
}
} else if (!attr->exclude_kernel && attr->exclude_user) {
/* No support for kernel space counters only */
return -EOPNOTSUPP;
} else { /* Count user and kernel space */
if (ev >= ARRAY_SIZE(cpumf_generic_events_basic))
return -EOPNOTSUPP;
ev = cpumf_generic_events_basic[ev];
} else {
/* Count user and kernel space, incl. events 32 + 33 */
if (!is_userspace_event(ev)) {
if (ev >= ARRAY_SIZE(cpumf_generic_events_basic))
return -EOPNOTSUPP;
ev = cpumf_generic_events_basic[ev];
}
}
break;

View File

@ -11,6 +11,7 @@
#include <linux/list.h>
#include <linux/hugetlb.h>
#include <linux/slab.h>
#include <asm/page-states.h>
#include <asm/cacheflush.h>
#include <asm/nospec-branch.h>
#include <asm/pgalloc.h>
@ -44,8 +45,11 @@ void *vmem_crst_alloc(unsigned long val)
unsigned long *table;
table = vmem_alloc_pages(CRST_ALLOC_ORDER);
if (table)
crst_table_init(table, val);
if (!table)
return NULL;
crst_table_init(table, val);
if (slab_is_available())
arch_set_page_dat(virt_to_page(table), CRST_ALLOC_ORDER);
return table;
}

View File

@ -4033,12 +4033,17 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
u64 pebs_mask = cpuc->pebs_enabled & x86_pmu.pebs_capable;
int global_ctrl, pebs_enable;
/*
* In addition to obeying exclude_guest/exclude_host, remove bits being
* used for PEBS when running a guest, because PEBS writes to virtual
* addresses (not physical addresses).
*/
*nr = 0;
global_ctrl = (*nr)++;
arr[global_ctrl] = (struct perf_guest_switch_msr){
.msr = MSR_CORE_PERF_GLOBAL_CTRL,
.host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask,
.guest = intel_ctrl & (~cpuc->intel_ctrl_host_mask | ~pebs_mask),
.guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask & ~pebs_mask,
};
if (!x86_pmu.pebs)

View File

@ -549,7 +549,8 @@ static void kprobe_emulate_call_indirect(struct kprobe *p, struct pt_regs *regs)
{
unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg];
int3_emulate_call(regs, regs_get_register(regs, offs));
int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + p->ainsn.size);
int3_emulate_jmp(regs, regs_get_register(regs, offs));
}
NOKPROBE_SYMBOL(kprobe_emulate_call_indirect);

View File

@ -893,6 +893,10 @@ static void emit_nops(u8 **pprog, int len)
#define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
#define RESTORE_TAIL_CALL_CNT(stack) \
EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8)
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
int oldproglen, struct jit_context *ctx, bool jmp_padding)
{
@ -1436,9 +1440,7 @@ st: if (is_imm8(insn->off))
case BPF_JMP | BPF_CALL:
func = (u8 *) __bpf_call_base + imm32;
if (tail_call_reachable) {
/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
EMIT3_off32(0x48, 0x8B, 0x85,
-round_up(bpf_prog->aux->stack_depth, 8) - 8);
RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
return -EINVAL;
} else {
@ -1623,16 +1625,24 @@ st: if (is_imm8(insn->off))
break;
case BPF_JMP | BPF_JA:
if (insn->off == -1)
/* -1 jmp instructions will always jump
* backwards two bytes. Explicitly handling
* this case avoids wasting too many passes
* when there are long sequences of replaced
* dead code.
*/
jmp_offset = -2;
else
jmp_offset = addrs[i + insn->off] - addrs[i];
case BPF_JMP32 | BPF_JA:
if (BPF_CLASS(insn->code) == BPF_JMP) {
if (insn->off == -1)
/* -1 jmp instructions will always jump
* backwards two bytes. Explicitly handling
* this case avoids wasting too many passes
* when there are long sequences of replaced
* dead code.
*/
jmp_offset = -2;
else
jmp_offset = addrs[i + insn->off] - addrs[i];
} else {
if (insn->imm == -1)
jmp_offset = -2;
else
jmp_offset = addrs[i + insn->imm] - addrs[i];
}
if (!jmp_offset) {
/*
@ -1750,63 +1760,37 @@ st: if (is_imm8(insn->off))
return proglen;
}
static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_regs,
int stack_size)
{
int i, j, arg_size, nr_regs;
int i;
/* Store function arguments to stack.
* For a function that accepts two pointers the sequence will be:
* mov QWORD PTR [rbp-0x10],rdi
* mov QWORD PTR [rbp-0x8],rsi
*/
for (i = 0, j = 0; i < min(nr_args, 6); i++) {
if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) {
nr_regs = (m->arg_size[i] + 7) / 8;
arg_size = 8;
} else {
nr_regs = 1;
arg_size = m->arg_size[i];
}
while (nr_regs) {
emit_stx(prog, bytes_to_bpf_size(arg_size),
BPF_REG_FP,
j == 5 ? X86_REG_R9 : BPF_REG_1 + j,
-(stack_size - j * 8));
nr_regs--;
j++;
}
}
for (i = 0; i < min(nr_regs, 6); i++)
emit_stx(prog, BPF_DW, BPF_REG_FP,
i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
-(stack_size - i * 8));
}
static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_regs,
int stack_size)
{
int i, j, arg_size, nr_regs;
int i;
/* Restore function arguments from stack.
* For a function that accepts two pointers the sequence will be:
* EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
* EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
*/
for (i = 0, j = 0; i < min(nr_args, 6); i++) {
if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) {
nr_regs = (m->arg_size[i] + 7) / 8;
arg_size = 8;
} else {
nr_regs = 1;
arg_size = m->arg_size[i];
}
while (nr_regs) {
emit_ldx(prog, bytes_to_bpf_size(arg_size),
j == 5 ? X86_REG_R9 : BPF_REG_1 + j,
BPF_REG_FP,
-(stack_size - j * 8));
nr_regs--;
j++;
}
}
for (i = 0; i < min(nr_regs, 6); i++)
emit_ldx(prog, BPF_DW,
i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
BPF_REG_FP,
-(stack_size - i * 8));
}
static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
@ -2031,8 +2015,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
struct bpf_tramp_links *tlinks,
void *func_addr)
{
int ret, i, nr_args = m->nr_args, extra_nregs = 0;
int regs_off, ip_off, args_off, stack_size = nr_args * 8, run_ctx_off;
int i, ret, nr_regs = m->nr_args, stack_size = 0;
int regs_off, nregs_off, ip_off, run_ctx_off;
struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
@ -2041,17 +2025,14 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
u8 *prog;
bool save_ret;
/* x86-64 supports up to 6 arguments. 7+ can be added in the future */
if (nr_args > 6)
return -ENOTSUPP;
for (i = 0; i < MAX_BPF_FUNC_ARGS; i++) {
/* extra registers for struct arguments */
for (i = 0; i < m->nr_args; i++)
if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
extra_nregs += (m->arg_size[i] + 7) / 8 - 1;
}
if (nr_args + extra_nregs > 6)
nr_regs += (m->arg_size[i] + 7) / 8 - 1;
/* x86-64 supports up to 6 arguments. 7+ can be added in the future */
if (nr_regs > 6)
return -ENOTSUPP;
stack_size += extra_nregs * 8;
/* Generated trampoline stack layout:
*
@ -2065,11 +2046,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
* [ ... ]
* RBP - regs_off [ reg_arg1 ] program's ctx pointer
*
* RBP - args_off [ arg regs count ] always
* RBP - nregs_off [ regs count ] always
*
* RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag
*
* RBP - run_ctx_off [ bpf_tramp_run_ctx ]
* RSP [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX
*/
/* room for return value of orig_call or fentry prog */
@ -2077,11 +2059,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
if (save_ret)
stack_size += 8;
stack_size += nr_regs * 8;
regs_off = stack_size;
/* args count */
/* regs count */
stack_size += 8;
args_off = stack_size;
nregs_off = stack_size;
if (flags & BPF_TRAMP_F_IP_ARG)
stack_size += 8; /* room for IP address argument */
@ -2106,14 +2089,16 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
EMIT1(0x55); /* push rbp */
EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
EMIT1(0x50); /* push rax */
EMIT1(0x53); /* push rbx */
/* Store number of argument registers of the traced function:
* mov rax, nr_args + extra_nregs
* mov QWORD PTR [rbp - args_off], rax
* mov rax, nr_regs
* mov QWORD PTR [rbp - nregs_off], rax
*/
emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_args + extra_nregs);
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -args_off);
emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_regs);
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -nregs_off);
if (flags & BPF_TRAMP_F_IP_ARG) {
/* Store IP address of the traced function:
@ -2124,7 +2109,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off);
}
save_regs(m, &prog, nr_args, regs_off);
save_regs(m, &prog, nr_regs, regs_off);
if (flags & BPF_TRAMP_F_CALL_ORIG) {
/* arg1: mov rdi, im */
@ -2154,11 +2139,17 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
}
if (flags & BPF_TRAMP_F_CALL_ORIG) {
restore_regs(m, &prog, nr_args, regs_off);
restore_regs(m, &prog, nr_regs, regs_off);
if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
/* Before calling the original function, restore the
* tail_call_cnt from stack to rax.
*/
RESTORE_TAIL_CALL_CNT(stack_size);
if (flags & BPF_TRAMP_F_ORIG_STACK) {
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
EMIT2(0xff, 0xd0); /* call *rax */
emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8);
EMIT2(0xff, 0xd3); /* call *rbx */
} else {
/* call original function */
if (emit_call(&prog, orig_call, prog)) {
@ -2195,7 +2186,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
}
if (flags & BPF_TRAMP_F_RESTORE_REGS)
restore_regs(m, &prog, nr_args, regs_off);
restore_regs(m, &prog, nr_regs, regs_off);
/* This needs to be done regardless. If there were fmod_ret programs,
* the return value is only updated on the stack and still needs to be
@ -2209,7 +2200,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
ret = -EINVAL;
goto cleanup;
}
}
} else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX)
/* Before running the original function, restore the
* tail_call_cnt from stack to rax.
*/
RESTORE_TAIL_CALL_CNT(stack_size);
/* restore return value of orig_call or fentry prog back into RAX */
if (save_ret)
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);

View File

@ -512,6 +512,8 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
void bdev_add(struct block_device *bdev, dev_t dev)
{
if (bdev_stable_writes(bdev))
mapping_set_stable_writes(bdev->bd_inode->i_mapping);
bdev->bd_dev = dev;
bdev->bd_inode->i_rdev = dev;
bdev->bd_inode->i_ino = dev;

View File

@ -2853,11 +2853,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
};
struct request *rq;
if (unlikely(bio_queue_enter(bio)))
return NULL;
if (blk_mq_attempt_bio_merge(q, bio, nsegs))
goto queue_exit;
return NULL;
rq_qos_throttle(q, bio);
@ -2873,35 +2870,23 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
queue_exit:
blk_queue_exit(q);
return NULL;
}
static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
/* return true if this @rq can be used for @bio */
static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug,
struct bio *bio)
{
struct request *rq;
enum hctx_type type, hctx_type;
enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf);
enum hctx_type hctx_type = rq->mq_hctx->type;
if (!plug)
return NULL;
rq = rq_list_peek(&plug->cached_rq);
if (!rq || rq->q != q)
return NULL;
WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
*bio = NULL;
return NULL;
}
type = blk_mq_get_hctx_type((*bio)->bi_opf);
hctx_type = rq->mq_hctx->type;
if (type != hctx_type &&
!(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
return NULL;
if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
return NULL;
return false;
if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
return false;
/*
* If any qos ->throttle() end up blocking, we will have flushed the
@ -2909,11 +2894,11 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
* before we throttle.
*/
plug->cached_rq = rq_list_next(rq);
rq_qos_throttle(q, *bio);
rq_qos_throttle(rq->q, bio);
rq->cmd_flags = (*bio)->bi_opf;
rq->cmd_flags = bio->bi_opf;
INIT_LIST_HEAD(&rq->queuelist);
return rq;
return true;
}
static void bio_set_ioprio(struct bio *bio)
@ -2942,7 +2927,7 @@ void blk_mq_submit_bio(struct bio *bio)
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
struct blk_plug *plug = blk_mq_plug(bio);
const int is_sync = op_is_sync(bio->bi_opf);
struct request *rq;
struct request *rq = NULL;
unsigned int nr_segs = 1;
blk_status_t ret;
@ -2953,20 +2938,36 @@ void blk_mq_submit_bio(struct bio *bio)
return;
}
if (!bio_integrity_prep(bio))
return;
bio_set_ioprio(bio);
rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
if (!rq) {
if (!bio)
if (plug) {
rq = rq_list_peek(&plug->cached_rq);
if (rq && rq->q != q)
rq = NULL;
}
if (rq) {
if (!bio_integrity_prep(bio))
return;
rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
if (unlikely(!rq))
if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
return;
if (blk_mq_can_use_cached_rq(rq, plug, bio))
goto done;
percpu_ref_get(&q->q_usage_counter);
} else {
if (unlikely(bio_queue_enter(bio)))
return;
if (!bio_integrity_prep(bio))
goto fail;
}
rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
if (unlikely(!rq)) {
fail:
blk_queue_exit(q);
return;
}
done:
trace_block_getrq(bio);
rq_qos_track(q, rq, bio);

View File

@ -655,24 +655,35 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
filemap_invalidate_lock(inode->i_mapping);
/* Invalidate the page cache, including dirty pages. */
error = truncate_bdev_range(bdev, file->f_mode, start, end);
if (error)
goto fail;
/*
* Invalidate the page cache, including dirty pages, for valid
* de-allocate mode calls to fallocate().
*/
switch (mode) {
case FALLOC_FL_ZERO_RANGE:
case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
error = truncate_bdev_range(bdev, file->f_mode, start, end);
if (error)
goto fail;
error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
len >> SECTOR_SHIFT, GFP_KERNEL,
BLKDEV_ZERO_NOUNMAP);
break;
case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
error = truncate_bdev_range(bdev, file->f_mode, start, end);
if (error)
goto fail;
error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
len >> SECTOR_SHIFT, GFP_KERNEL,
BLKDEV_ZERO_NOFALLBACK);
break;
case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
error = truncate_bdev_range(bdev, file->f_mode, start, end);
if (error)
goto fail;
error = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
len >> SECTOR_SHIFT, GFP_KERNEL);
break;

View File

@ -175,6 +175,9 @@ int memory_notify(unsigned long val, void *v)
return blocking_notifier_call_chain(&memory_chain, val, v);
}
/*
* Must acquire mem_hotplug_lock in write mode.
*/
static int memory_block_online(struct memory_block *mem)
{
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
@ -193,10 +196,11 @@ static int memory_block_online(struct memory_block *mem)
* stage helps to keep accounting easier to follow - e.g vmemmaps
* belong to the same zone as the memory they backed.
*/
mem_hotplug_begin();
if (nr_vmemmap_pages) {
ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
if (ret)
return ret;
goto out;
}
ret = online_pages(start_pfn + nr_vmemmap_pages,
@ -204,7 +208,7 @@ static int memory_block_online(struct memory_block *mem)
if (ret) {
if (nr_vmemmap_pages)
mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
return ret;
goto out;
}
/*
@ -216,9 +220,14 @@ static int memory_block_online(struct memory_block *mem)
nr_vmemmap_pages);
mem->zone = zone;
out:
mem_hotplug_done();
return ret;
}
/*
* Must acquire mem_hotplug_lock in write mode.
*/
static int memory_block_offline(struct memory_block *mem)
{
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
@ -233,6 +242,7 @@ static int memory_block_offline(struct memory_block *mem)
* Unaccount before offlining, such that unpopulated zone and kthreads
* can properly be torn down in offline_pages().
*/
mem_hotplug_begin();
if (nr_vmemmap_pages)
adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
-nr_vmemmap_pages);
@ -244,13 +254,15 @@ static int memory_block_offline(struct memory_block *mem)
if (nr_vmemmap_pages)
adjust_present_page_count(pfn_to_page(start_pfn),
mem->group, nr_vmemmap_pages);
return ret;
goto out;
}
if (nr_vmemmap_pages)
mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
mem->zone = NULL;
out:
mem_hotplug_done();
return ret;
}

View File

@ -279,6 +279,51 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
#define QUIRK_TI_SLLZ059 0x20
#define QUIRK_IR_WAKE 0x40
// On PCI Express Root Complex in any type of AMD Ryzen machine, VIA VT6306/6307/6308 with Asmedia
// ASM1083/1085 brings an inconvenience that the read accesses to 'Isochronous Cycle Timer' register
// (at offset 0xf0 in PCI I/O space) often causes unexpected system reboot. The mechanism is not
// clear, since the read access to the other registers is enough safe; e.g. 'Node ID' register,
// while it is probable due to detection of any type of PCIe error.
#define QUIRK_REBOOT_BY_CYCLE_TIMER_READ 0x80000000
#if IS_ENABLED(CONFIG_X86)
static bool has_reboot_by_cycle_timer_read_quirk(const struct fw_ohci *ohci)
{
return !!(ohci->quirks & QUIRK_REBOOT_BY_CYCLE_TIMER_READ);
}
#define PCI_DEVICE_ID_ASMEDIA_ASM108X 0x1080
static bool detect_vt630x_with_asm1083_on_amd_ryzen_machine(const struct pci_dev *pdev)
{
const struct pci_dev *pcie_to_pci_bridge;
// Detect any type of AMD Ryzen machine.
if (!static_cpu_has(X86_FEATURE_ZEN))
return false;
// Detect VIA VT6306/6307/6308.
if (pdev->vendor != PCI_VENDOR_ID_VIA)
return false;
if (pdev->device != PCI_DEVICE_ID_VIA_VT630X)
return false;
// Detect Asmedia ASM1083/1085.
pcie_to_pci_bridge = pdev->bus->self;
if (pcie_to_pci_bridge->vendor != PCI_VENDOR_ID_ASMEDIA)
return false;
if (pcie_to_pci_bridge->device != PCI_DEVICE_ID_ASMEDIA_ASM108X)
return false;
return true;
}
#else
#define has_reboot_by_cycle_timer_read_quirk(ohci) false
#define detect_vt630x_with_asm1083_on_amd_ryzen_machine(pdev) false
#endif
/* In case of multiple matches in ohci_quirks[], only the first one is used. */
static const struct {
unsigned short vendor, device, revision, flags;
@ -1713,6 +1758,9 @@ static u32 get_cycle_time(struct fw_ohci *ohci)
s32 diff01, diff12;
int i;
if (has_reboot_by_cycle_timer_read_quirk(ohci))
return 0;
c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
if (ohci->quirks & QUIRK_CYCLE_TIMER) {
@ -3615,6 +3663,9 @@ static int pci_probe(struct pci_dev *dev,
if (param_quirks)
ohci->quirks = param_quirks;
if (detect_vt630x_with_asm1083_on_amd_ryzen_machine(dev))
ohci->quirks |= QUIRK_REBOOT_BY_CYCLE_TIMER_READ;
/*
* Because dma_alloc_coherent() allocates at least one page,
* we save space by using a common buffer for the AR request/

View File

@ -131,7 +131,7 @@ struct perf_dom_info {
u32 opp_count;
u32 sustained_freq_khz;
u32 sustained_perf_level;
u32 mult_factor;
unsigned long mult_factor;
char name[SCMI_MAX_STR_SIZE];
struct scmi_opp opp[MAX_OPPS];
struct scmi_fc_info *fc_info;
@ -223,8 +223,8 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
dom_info->mult_factor = 1000;
else
dom_info->mult_factor =
(dom_info->sustained_freq_khz * 1000) /
dom_info->sustained_perf_level;
(dom_info->sustained_freq_khz * 1000UL)
/ dom_info->sustained_perf_level;
strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
}

View File

@ -1976,15 +1976,8 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
adev->firmware.gpu_info_fw = NULL;
if (adev->mman.discovery_bin) {
/*
* FIXME: The bounding box is still needed by Navi12, so
* temporarily read it from gpu_info firmware. Should be dropped
* when DAL no longer needs it.
*/
if (adev->asic_type != CHIP_NAVI12)
return 0;
}
if (adev->mman.discovery_bin)
return 0;
switch (adev->asic_type) {
default:

View File

@ -438,7 +438,115 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
.use_urgent_burst_bw = 0
};
struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = {
.clock_limits = {
{
.state = 0,
.dcfclk_mhz = 560.0,
.fabricclk_mhz = 560.0,
.dispclk_mhz = 513.0,
.dppclk_mhz = 513.0,
.phyclk_mhz = 540.0,
.socclk_mhz = 560.0,
.dscclk_mhz = 171.0,
.dram_speed_mts = 1069.0,
},
{
.state = 1,
.dcfclk_mhz = 694.0,
.fabricclk_mhz = 694.0,
.dispclk_mhz = 642.0,
.dppclk_mhz = 642.0,
.phyclk_mhz = 600.0,
.socclk_mhz = 694.0,
.dscclk_mhz = 214.0,
.dram_speed_mts = 1324.0,
},
{
.state = 2,
.dcfclk_mhz = 875.0,
.fabricclk_mhz = 875.0,
.dispclk_mhz = 734.0,
.dppclk_mhz = 734.0,
.phyclk_mhz = 810.0,
.socclk_mhz = 875.0,
.dscclk_mhz = 245.0,
.dram_speed_mts = 1670.0,
},
{
.state = 3,
.dcfclk_mhz = 1000.0,
.fabricclk_mhz = 1000.0,
.dispclk_mhz = 1100.0,
.dppclk_mhz = 1100.0,
.phyclk_mhz = 810.0,
.socclk_mhz = 1000.0,
.dscclk_mhz = 367.0,
.dram_speed_mts = 2000.0,
},
{
.state = 4,
.dcfclk_mhz = 1200.0,
.fabricclk_mhz = 1200.0,
.dispclk_mhz = 1284.0,
.dppclk_mhz = 1284.0,
.phyclk_mhz = 810.0,
.socclk_mhz = 1200.0,
.dscclk_mhz = 428.0,
.dram_speed_mts = 2000.0,
},
{
.state = 5,
.dcfclk_mhz = 1200.0,
.fabricclk_mhz = 1200.0,
.dispclk_mhz = 1284.0,
.dppclk_mhz = 1284.0,
.phyclk_mhz = 810.0,
.socclk_mhz = 1200.0,
.dscclk_mhz = 428.0,
.dram_speed_mts = 2000.0,
},
},
.num_states = 5,
.sr_exit_time_us = 1.9,
.sr_enter_plus_exit_time_us = 4.4,
.urgent_latency_us = 3.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0,
.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
.max_avg_sdp_bw_use_normal_percent = 40.0,
.max_avg_dram_bw_use_normal_percent = 40.0,
.writeback_latency_us = 12.0,
.ideal_dram_bw_after_urgent_percent = 40.0,
.max_request_size_bytes = 256,
.dram_channel_width_bytes = 16,
.fabric_datapath_to_dcn_data_return_bytes = 64,
.dcn_downspread_percent = 0.5,
.downspread_percent = 0.5,
.dram_page_open_time_ns = 50.0,
.dram_rw_turnaround_time_ns = 17.5,
.dram_return_buffer_per_channel_bytes = 8192,
.round_trip_ping_latency_dcfclk_cycles = 131,
.urgent_out_of_order_return_per_channel_bytes = 4096,
.channel_interleave_bytes = 256,
.num_banks = 8,
.num_chans = 16,
.vmm_page_size_bytes = 4096,
.dram_clock_change_latency_us = 45.0,
.writeback_dram_clock_change_latency_us = 23.0,
.return_bus_width_bytes = 64,
.dispclk_dppclk_vco_speed_mhz = 3850,
.xfc_bus_transport_time_us = 20,
.xfc_xbuf_latency_tolerance_us = 50,
.use_urgent_burst_bw = 0,
};
struct _vcs_dpi_ip_params_st dcn2_1_ip = {
.odm_capable = 1,

View File

@ -527,6 +527,7 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
u32 request_val = AUX_CMD_REQ(msg->request);
u8 *buf = msg->buffer;
unsigned int len = msg->size;
unsigned int short_len;
unsigned int val;
int ret;
u8 addr_len[SN_AUX_LENGTH_REG + 1 - SN_AUX_ADDR_19_16_REG];
@ -600,7 +601,8 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
}
if (val & AUX_IRQ_STATUS_AUX_SHORT) {
ret = regmap_read(pdata->regmap, SN_AUX_LENGTH_REG, &len);
ret = regmap_read(pdata->regmap, SN_AUX_LENGTH_REG, &short_len);
len = min(len, short_len);
if (ret)
goto exit;
} else if (val & AUX_IRQ_STATUS_NAT_I2C_FAIL) {

View File

@ -3707,7 +3707,7 @@ static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
intel_dp->train_set, crtc_state->lane_count);
drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
link_status[DP_DPCD_REV]);
intel_dp->dpcd[DP_DPCD_REV]);
}
static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)

View File

@ -390,6 +390,11 @@ void mgag200_primary_plane_helper_atomic_disable(struct drm_plane *plane,
.destroy = drm_plane_cleanup, \
DRM_GEM_SHADOW_PLANE_FUNCS
void mgag200_crtc_set_gamma_linear(struct mga_device *mdev, const struct drm_format_info *format);
void mgag200_crtc_set_gamma(struct mga_device *mdev,
const struct drm_format_info *format,
struct drm_color_lut *lut);
enum drm_mode_status mgag200_crtc_helper_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode);
int mgag200_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state);

View File

@ -202,6 +202,11 @@ static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_g200er_reset_tagfifo(mdev);
if (crtc_state->gamma_lut)
mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
else
mgag200_crtc_set_gamma_linear(mdev, format);
mgag200_enable_display(mdev);
if (funcs->enable_vidrst)

View File

@ -203,6 +203,11 @@ static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_g200ev_set_hiprilvl(mdev);
if (crtc_state->gamma_lut)
mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
else
mgag200_crtc_set_gamma_linear(mdev, format);
mgag200_enable_display(mdev);
if (funcs->enable_vidrst)

View File

@ -334,6 +334,11 @@ static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_g200se_set_hiprilvl(mdev, adjusted_mode, format);
if (crtc_state->gamma_lut)
mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
else
mgag200_crtc_set_gamma_linear(mdev, format);
mgag200_enable_display(mdev);
if (funcs->enable_vidrst)

View File

@ -28,8 +28,8 @@
* This file contains setup code for the CRTC.
*/
static void mgag200_crtc_set_gamma_linear(struct mga_device *mdev,
const struct drm_format_info *format)
void mgag200_crtc_set_gamma_linear(struct mga_device *mdev,
const struct drm_format_info *format)
{
int i;
@ -65,9 +65,9 @@ static void mgag200_crtc_set_gamma_linear(struct mga_device *mdev,
}
}
static void mgag200_crtc_set_gamma(struct mga_device *mdev,
const struct drm_format_info *format,
struct drm_color_lut *lut)
void mgag200_crtc_set_gamma(struct mga_device *mdev,
const struct drm_format_info *format,
struct drm_color_lut *lut)
{
int i;

View File

@ -3,6 +3,7 @@
* i2c-core.h - interfaces internal to the I2C framework
*/
#include <linux/kconfig.h>
#include <linux/rwsem.h>
struct i2c_devinfo {
@ -29,7 +30,8 @@ int i2c_dev_irq_from_resources(const struct resource *resources,
*/
static inline bool i2c_in_atomic_xfer_mode(void)
{
return system_state > SYSTEM_RUNNING && !preemptible();
return system_state > SYSTEM_RUNNING &&
(IS_ENABLED(CONFIG_PREEMPT_COUNT) ? !preemptible() : irqs_disabled());
}
static inline int __i2c_lock_bus_helper(struct i2c_adapter *adap)

View File

@ -551,7 +551,6 @@ static struct platform_driver qnoc_driver = {
.driver = {
.name = "qnoc-sm8250",
.of_match_table = qnoc_of_match,
.sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);

View File

@ -334,13 +334,14 @@ static const struct csid_format csid_formats[] = {
},
};
static void csid_configure_stream(struct csid_device *csid, u8 enable)
static void __csid_configure_stream(struct csid_device *csid, u8 enable, u8 vc)
{
struct csid_testgen_config *tg = &csid->testgen;
u32 val;
u32 phy_sel = 0;
u8 lane_cnt = csid->phy.lane_cnt;
struct v4l2_mbus_framefmt *input_format = &csid->fmt[MSM_CSID_PAD_SRC];
/* Source pads matching RDI channels on hardware. Pad 1 -> RDI0, Pad 2 -> RDI1, etc. */
struct v4l2_mbus_framefmt *input_format = &csid->fmt[MSM_CSID_PAD_FIRST_SRC + vc];
const struct csid_format *format = csid_get_fmt_entry(csid->formats, csid->nformats,
input_format->code);
@ -351,8 +352,19 @@ static void csid_configure_stream(struct csid_device *csid, u8 enable)
phy_sel = csid->phy.csiphy_id;
if (enable) {
u8 vc = 0; /* Virtual Channel 0 */
u8 dt_id = vc * 4;
/*
* DT_ID is a two bit bitfield that is concatenated with
* the four least significant bits of the five bit VC
* bitfield to generate an internal CID value.
*
* CSID_RDI_CFG0(vc)
* DT_ID : 28:27
* VC : 26:22
* DT : 21:16
*
* CID : VC 3:0 << 2 | DT_ID 1:0
*/
u8 dt_id = vc & 0x03;
if (tg->enabled) {
/* configure one DT, infinite frames */
@ -392,42 +404,42 @@ static void csid_configure_stream(struct csid_device *csid, u8 enable)
val |= format->data_type << RDI_CFG0_DATA_TYPE;
val |= vc << RDI_CFG0_VIRTUAL_CHANNEL;
val |= dt_id << RDI_CFG0_DT_ID;
writel_relaxed(val, csid->base + CSID_RDI_CFG0(0));
writel_relaxed(val, csid->base + CSID_RDI_CFG0(vc));
/* CSID_TIMESTAMP_STB_POST_IRQ */
val = 2 << RDI_CFG1_TIMESTAMP_STB_SEL;
writel_relaxed(val, csid->base + CSID_RDI_CFG1(0));
writel_relaxed(val, csid->base + CSID_RDI_CFG1(vc));
val = 1;
writel_relaxed(val, csid->base + CSID_RDI_FRM_DROP_PERIOD(0));
writel_relaxed(val, csid->base + CSID_RDI_FRM_DROP_PERIOD(vc));
val = 0;
writel_relaxed(val, csid->base + CSID_RDI_FRM_DROP_PATTERN(0));
writel_relaxed(val, csid->base + CSID_RDI_FRM_DROP_PATTERN(vc));
val = 1;
writel_relaxed(val, csid->base + CSID_RDI_IRQ_SUBSAMPLE_PERIOD(0));
writel_relaxed(val, csid->base + CSID_RDI_IRQ_SUBSAMPLE_PERIOD(vc));
val = 0;
writel_relaxed(val, csid->base + CSID_RDI_IRQ_SUBSAMPLE_PATTERN(0));
writel_relaxed(val, csid->base + CSID_RDI_IRQ_SUBSAMPLE_PATTERN(vc));
val = 1;
writel_relaxed(val, csid->base + CSID_RDI_RPP_PIX_DROP_PERIOD(0));
writel_relaxed(val, csid->base + CSID_RDI_RPP_PIX_DROP_PERIOD(vc));
val = 0;
writel_relaxed(val, csid->base + CSID_RDI_RPP_PIX_DROP_PATTERN(0));
writel_relaxed(val, csid->base + CSID_RDI_RPP_PIX_DROP_PATTERN(vc));
val = 1;
writel_relaxed(val, csid->base + CSID_RDI_RPP_LINE_DROP_PERIOD(0));
writel_relaxed(val, csid->base + CSID_RDI_RPP_LINE_DROP_PERIOD(vc));
val = 0;
writel_relaxed(val, csid->base + CSID_RDI_RPP_LINE_DROP_PATTERN(0));
writel_relaxed(val, csid->base + CSID_RDI_RPP_LINE_DROP_PATTERN(vc));
val = 0;
writel_relaxed(val, csid->base + CSID_RDI_CTRL(0));
writel_relaxed(val, csid->base + CSID_RDI_CTRL(vc));
val = readl_relaxed(csid->base + CSID_RDI_CFG0(0));
val = readl_relaxed(csid->base + CSID_RDI_CFG0(vc));
val |= 1 << RDI_CFG0_ENABLE;
writel_relaxed(val, csid->base + CSID_RDI_CFG0(0));
writel_relaxed(val, csid->base + CSID_RDI_CFG0(vc));
}
if (tg->enabled) {
@ -446,6 +458,8 @@ static void csid_configure_stream(struct csid_device *csid, u8 enable)
writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG0);
val = 1 << CSI2_RX_CFG1_PACKET_ECC_CORRECTION_EN;
if (vc > 3)
val |= 1 << CSI2_RX_CFG1_VC_MODE;
val |= 1 << CSI2_RX_CFG1_MISR_EN;
writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG1);
@ -453,7 +467,16 @@ static void csid_configure_stream(struct csid_device *csid, u8 enable)
val = HALT_CMD_RESUME_AT_FRAME_BOUNDARY << RDI_CTRL_HALT_CMD;
else
val = HALT_CMD_HALT_AT_FRAME_BOUNDARY << RDI_CTRL_HALT_CMD;
writel_relaxed(val, csid->base + CSID_RDI_CTRL(0));
writel_relaxed(val, csid->base + CSID_RDI_CTRL(vc));
}
static void csid_configure_stream(struct csid_device *csid, u8 enable)
{
u8 i;
/* Loop through all enabled VCs and configure stream for each */
for (i = 0; i < MSM_CSID_MAX_SRC_STREAMS; i++)
if (csid->phy.en_vc & BIT(i))
__csid_configure_stream(csid, enable, i);
}
static int csid_configure_testgen_pattern(struct csid_device *csid, s32 val)
@ -499,6 +522,7 @@ static irqreturn_t csid_isr(int irq, void *dev)
struct csid_device *csid = dev;
u32 val;
u8 reset_done;
int i;
val = readl_relaxed(csid->base + CSID_TOP_IRQ_STATUS);
writel_relaxed(val, csid->base + CSID_TOP_IRQ_CLEAR);
@ -507,8 +531,12 @@ static irqreturn_t csid_isr(int irq, void *dev)
val = readl_relaxed(csid->base + CSID_CSI2_RX_IRQ_STATUS);
writel_relaxed(val, csid->base + CSID_CSI2_RX_IRQ_CLEAR);
val = readl_relaxed(csid->base + CSID_CSI2_RDIN_IRQ_STATUS(0));
writel_relaxed(val, csid->base + CSID_CSI2_RDIN_IRQ_CLEAR(0));
/* Read and clear IRQ status for each enabled RDI channel */
for (i = 0; i < MSM_CSID_MAX_SRC_STREAMS; i++)
if (csid->phy.en_vc & BIT(i)) {
val = readl_relaxed(csid->base + CSID_CSI2_RDIN_IRQ_STATUS(i));
writel_relaxed(val, csid->base + CSID_CSI2_RDIN_IRQ_CLEAR(i));
}
val = 1 << IRQ_CMD_CLEAR;
writel_relaxed(val, csid->base + CSID_IRQ_CMD);

View File

@ -196,6 +196,8 @@ static int csid_set_power(struct v4l2_subdev *sd, int on)
return ret;
}
csid->phy.need_vc_update = true;
enable_irq(csid->irq);
ret = csid->ops->reset(csid);
@ -249,7 +251,10 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
return -ENOLINK;
}
csid->ops->configure_stream(csid, enable);
if (csid->phy.need_vc_update) {
csid->ops->configure_stream(csid, enable);
csid->phy.need_vc_update = false;
}
return 0;
}
@ -460,6 +465,7 @@ static int csid_set_format(struct v4l2_subdev *sd,
{
struct csid_device *csid = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
int i;
format = __csid_get_format(csid, sd_state, fmt->pad, fmt->which);
if (format == NULL)
@ -468,14 +474,14 @@ static int csid_set_format(struct v4l2_subdev *sd,
csid_try_format(csid, sd_state, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
/* Propagate the format from sink to source pads */
if (fmt->pad == MSM_CSID_PAD_SINK) {
format = __csid_get_format(csid, sd_state, MSM_CSID_PAD_SRC,
fmt->which);
for (i = MSM_CSID_PAD_FIRST_SRC; i < MSM_CSID_PADS_NUM; ++i) {
format = __csid_get_format(csid, sd_state, i, fmt->which);
*format = fmt->format;
csid_try_format(csid, sd_state, MSM_CSID_PAD_SRC, format,
fmt->which);
*format = fmt->format;
csid_try_format(csid, sd_state, i, format, fmt->which);
}
}
return 0;
@ -738,7 +744,6 @@ static int csid_link_setup(struct media_entity *entity,
struct csid_device *csid;
struct csiphy_device *csiphy;
struct csiphy_lanes_cfg *lane_cfg;
struct v4l2_subdev_format format = { 0 };
sd = media_entity_to_v4l2_subdev(entity);
csid = v4l2_get_subdevdata(sd);
@ -761,11 +766,22 @@ static int csid_link_setup(struct media_entity *entity,
lane_cfg = &csiphy->cfg.csi2->lane_cfg;
csid->phy.lane_cnt = lane_cfg->num_data;
csid->phy.lane_assign = csid_get_lane_assign(lane_cfg);
}
/* Decide which virtual channels to enable based on which source pads are enabled */
if (local->flags & MEDIA_PAD_FL_SOURCE) {
struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
struct csid_device *csid = v4l2_get_subdevdata(sd);
struct device *dev = csid->camss->dev;
/* Reset format on source pad to sink pad format */
format.pad = MSM_CSID_PAD_SRC;
format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
csid_set_format(&csid->subdev, NULL, &format);
if (flags & MEDIA_LNK_FL_ENABLED)
csid->phy.en_vc |= BIT(local->index - 1);
else
csid->phy.en_vc &= ~BIT(local->index - 1);
csid->phy.need_vc_update = true;
dev_dbg(dev, "%s: Enabled CSID virtual channels mask 0x%x\n",
__func__, csid->phy.en_vc);
}
return 0;
@ -816,6 +832,7 @@ int msm_csid_register_entity(struct csid_device *csid,
struct v4l2_subdev *sd = &csid->subdev;
struct media_pad *pads = csid->pads;
struct device *dev = csid->camss->dev;
int i;
int ret;
v4l2_subdev_init(sd, &csid_v4l2_ops);
@ -852,7 +869,8 @@ int msm_csid_register_entity(struct csid_device *csid,
}
pads[MSM_CSID_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
pads[MSM_CSID_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
for (i = MSM_CSID_PAD_FIRST_SRC; i < MSM_CSID_PADS_NUM; ++i)
pads[i].flags = MEDIA_PAD_FL_SOURCE;
sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
sd->entity.ops = &csid_media_ops;

View File

@ -19,8 +19,13 @@
#include <media/v4l2-subdev.h>
#define MSM_CSID_PAD_SINK 0
#define MSM_CSID_PAD_SRC 1
#define MSM_CSID_PADS_NUM 2
#define MSM_CSID_PAD_FIRST_SRC 1
#define MSM_CSID_PADS_NUM 5
#define MSM_CSID_PAD_SRC (MSM_CSID_PAD_FIRST_SRC)
/* CSID hardware can demultiplex up to 4 outputs */
#define MSM_CSID_MAX_SRC_STREAMS 4
#define DATA_TYPE_EMBEDDED_DATA_8BIT 0x12
#define DATA_TYPE_YUV420_8BIT 0x18
@ -81,6 +86,8 @@ struct csid_phy_config {
u8 csiphy_id;
u8 lane_cnt;
u32 lane_assign;
u32 en_vc;
u8 need_vc_update;
};
struct csid_device;

View File

@ -869,9 +869,10 @@ static const struct block_device_operations mmc_bdops = {
static int mmc_blk_part_switch_pre(struct mmc_card *card,
unsigned int part_type)
{
const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB;
int ret = 0;
if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
if ((part_type & mask) == mask) {
if (card->ext_csd.cmdq_en) {
ret = mmc_cmdq_disable(card);
if (ret)
@ -886,9 +887,10 @@ static int mmc_blk_part_switch_pre(struct mmc_card *card,
static int mmc_blk_part_switch_post(struct mmc_card *card,
unsigned int part_type)
{
const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB;
int ret = 0;
if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
if ((part_type & mask) == mask) {
mmc_retune_unpause(card->host);
if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
ret = mmc_cmdq_enable(card);
@ -3183,4 +3185,3 @@ module_exit(mmc_blk_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");

View File

@ -671,6 +671,7 @@ EXPORT_SYMBOL(mmc_remove_host);
*/
void mmc_free_host(struct mmc_host *host)
{
cancel_delayed_work_sync(&host->detect);
mmc_pwrseq_free(host);
put_device(&host->class_dev);
}

View File

@ -269,7 +269,7 @@ static int meson_mx_sdhc_enable_clks(struct mmc_host *mmc)
static int meson_mx_sdhc_set_clk(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct meson_mx_sdhc_host *host = mmc_priv(mmc);
u32 rx_clk_phase;
u32 val, rx_clk_phase;
int ret;
meson_mx_sdhc_disable_clks(mmc);
@ -290,27 +290,11 @@ static int meson_mx_sdhc_set_clk(struct mmc_host *mmc, struct mmc_ios *ios)
mmc->actual_clock = clk_get_rate(host->sd_clk);
/*
* according to Amlogic the following latching points are
* selected with empirical values, there is no (known) formula
* to calculate these.
* Phase 90 should work in most cases. For data transmission,
* meson_mx_sdhc_execute_tuning() will find a accurate value
*/
if (mmc->actual_clock > 100000000) {
rx_clk_phase = 1;
} else if (mmc->actual_clock > 45000000) {
if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
rx_clk_phase = 15;
else
rx_clk_phase = 11;
} else if (mmc->actual_clock >= 25000000) {
rx_clk_phase = 15;
} else if (mmc->actual_clock > 5000000) {
rx_clk_phase = 23;
} else if (mmc->actual_clock > 1000000) {
rx_clk_phase = 55;
} else {
rx_clk_phase = 1061;
}
regmap_read(host->regmap, MESON_SDHC_CLKC, &val);
rx_clk_phase = FIELD_GET(MESON_SDHC_CLKC_CLK_DIV, val) / 4;
regmap_update_bits(host->regmap, MESON_SDHC_CLK2,
MESON_SDHC_CLK2_RX_CLK_PHASE,
FIELD_PREP(MESON_SDHC_CLK2_RX_CLK_PHASE,

View File

@ -228,15 +228,19 @@ static inline void _sdhci_sprd_set_clock(struct sdhci_host *host,
div = ((div & 0x300) >> 2) | ((div & 0xFF) << 8);
sdhci_enable_clk(host, div);
val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI);
mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN | SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN;
/* Enable CLK_AUTO when the clock is greater than 400K. */
if (clk > 400000) {
val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI);
mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN |
SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN;
if (mask != (val & mask)) {
val |= mask;
sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI);
}
} else {
if (val & mask) {
val &= ~mask;
sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI);
}
}
}

View File

@ -12081,6 +12081,8 @@ static void bnxt_sp_task(struct work_struct *work)
bnxt_cfg_ntp_filters(bp);
if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
bnxt_hwrm_exec_fwd_req(bp);
if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
netdev_info(bp->dev, "Receive PF driver unload event!\n");
if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
bnxt_hwrm_port_qstats(bp, 0);
bnxt_hwrm_port_qstats_ext(bp, 0);
@ -13059,8 +13061,6 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
}
}
}
if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
netdev_info(bp->dev, "Receive PF driver unload event!\n");
}
#else

View File

@ -2131,8 +2131,10 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
/* Note: if we ever change from DMA_TX_APPEND_CRC below we
* will need to restore software padding of "runt" packets
*/
len_stat |= DMA_TX_APPEND_CRC;
if (!i) {
len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
len_stat |= DMA_SOP;
if (skb->ip_summed == CHECKSUM_PARTIAL)
len_stat |= DMA_TX_DO_CSUM;
}

View File

@ -509,8 +509,6 @@ static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
memcpy(skb->data, fd_vaddr + fd_offset, fd_length);
dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
return skb;
}
@ -528,6 +526,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_drv_stats *percpu_extras;
struct device *dev = priv->net_dev->dev.parent;
struct dpaa2_fas *fas;
bool recycle_rx_buf = false;
void *buf_data;
u32 status = 0;
u32 xdp_act;
@ -560,6 +559,8 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
} else {
recycle_rx_buf = true;
}
} else if (fd_format == dpaa2_fd_sg) {
WARN_ON(priv->xdp_prog);
@ -607,6 +608,8 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
list_add_tail(&skb->list, ch->rx_list);
if (recycle_rx_buf)
dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
return;
err_build_skb:

View File

@ -227,17 +227,8 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
struct ethtool_stats *stats,
u64 *data)
{
int i = 0;
int j, k, err;
int num_cnt;
union dpni_statistics dpni_stats;
u32 fcnt, bcnt;
u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
u32 buf_cnt;
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct dpaa2_eth_drv_stats *extras;
struct dpaa2_eth_ch_stats *ch_stats;
union dpni_statistics dpni_stats;
int dpni_stats_page_size[DPNI_STATISTICS_CNT] = {
sizeof(dpni_stats.page_0),
sizeof(dpni_stats.page_1),
@ -247,6 +238,13 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
sizeof(dpni_stats.page_5),
sizeof(dpni_stats.page_6),
};
u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
struct dpaa2_eth_ch_stats *ch_stats;
struct dpaa2_eth_drv_stats *extras;
int j, k, err, num_cnt, i = 0;
u32 fcnt, bcnt;
u32 buf_cnt;
memset(data, 0,
sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));

View File

@ -104,12 +104,18 @@ static struct workqueue_struct *i40e_wq;
static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
struct net_device *netdev, int delta)
{
struct netdev_hw_addr_list *ha_list;
struct netdev_hw_addr *ha;
if (!f || !netdev)
return;
netdev_for_each_mc_addr(ha, netdev) {
if (is_unicast_ether_addr(f->macaddr) || is_link_local_ether_addr(f->macaddr))
ha_list = &netdev->uc;
else
ha_list = &netdev->mc;
netdev_hw_addr_list_for_each(ha, ha_list) {
if (ether_addr_equal(ha->addr, f->macaddr)) {
ha->refcount += delta;
if (ha->refcount <= 0)
@ -16444,6 +16450,9 @@ static void i40e_pci_error_reset_done(struct pci_dev *pdev)
return;
i40e_reset_and_rebuild(pf, false, false);
#ifdef CONFIG_PCI_IOV
i40e_restore_all_vfs_msi_state(pdev);
#endif /* CONFIG_PCI_IOV */
}
/**

View File

@ -152,6 +152,32 @@ void i40e_vc_notify_reset(struct i40e_pf *pf)
(u8 *)&pfe, sizeof(struct virtchnl_pf_event));
}
#ifdef CONFIG_PCI_IOV
void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev)
{
u16 vf_id;
u16 pos;
/* Continue only if this is a PF */
if (!pdev->is_physfn)
return;
if (!pci_num_vf(pdev))
return;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
if (pos) {
struct pci_dev *vf_dev = NULL;
pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
while ((vf_dev = pci_get_device(pdev->vendor, vf_id, vf_dev))) {
if (vf_dev->is_virtfn && vf_dev->physfn == pdev)
pci_restore_msi_state(vf_dev);
}
}
}
#endif /* CONFIG_PCI_IOV */
/**
* i40e_vc_notify_vf_reset
* @vf: pointer to the VF structure
@ -3451,16 +3477,16 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf,
bool found = false;
int bkt;
if (!tc_filter->action) {
if (tc_filter->action != VIRTCHNL_ACTION_TC_REDIRECT) {
dev_info(&pf->pdev->dev,
"VF %d: Currently ADq doesn't support Drop Action\n",
vf->vf_id);
"VF %d: ADQ doesn't support this action (%d)\n",
vf->vf_id, tc_filter->action);
goto err;
}
/* action_meta is TC number here to which the filter is applied */
if (!tc_filter->action_meta ||
tc_filter->action_meta > I40E_MAX_VF_VSI) {
tc_filter->action_meta > vf->num_tc) {
dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
vf->vf_id, tc_filter->action_meta);
goto err;

View File

@ -135,6 +135,9 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable);
void i40e_vc_notify_link_state(struct i40e_pf *pf);
void i40e_vc_notify_reset(struct i40e_pf *pf);
#ifdef CONFIG_PCI_IOV
void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev);
#endif /* CONFIG_PCI_IOV */
int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
struct ifla_vf_stats *vf_stats);

View File

@ -2138,7 +2138,7 @@ static int ice_configure_phy(struct ice_vsi *vsi)
/* Ensure we have media as we cannot configure a medialess port */
if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
return -EPERM;
return -ENOMEDIUM;
ice_print_topo_conflict(vsi);
@ -9065,8 +9065,14 @@ int ice_stop(struct net_device *netdev)
int link_err = ice_force_phys_link_state(vsi, false);
if (link_err) {
netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
vsi->vsi_num, link_err);
if (link_err == -ENOMEDIUM)
netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n",
vsi->vsi_num);
else
netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
vsi->vsi_num, link_err);
ice_vsi_close(vsi);
return -EIO;
}
}

View File

@ -538,6 +538,7 @@ struct igc_nfc_filter {
u16 etype;
__be16 vlan_etype;
u16 vlan_tci;
u16 vlan_tci_mask;
u8 src_addr[ETH_ALEN];
u8 dst_addr[ETH_ALEN];
u8 user_data[8];

View File

@ -957,6 +957,7 @@ static int igc_ethtool_set_coalesce(struct net_device *netdev,
}
#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
#define VLAN_TCI_FULL_MASK ((__force __be16)~0)
static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter,
struct ethtool_rxnfc *cmd)
{
@ -979,10 +980,16 @@ static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter,
fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK;
}
if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) {
fsp->flow_type |= FLOW_EXT;
fsp->h_ext.vlan_etype = rule->filter.vlan_etype;
fsp->m_ext.vlan_etype = ETHER_TYPE_FULL_MASK;
}
if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
fsp->flow_type |= FLOW_EXT;
fsp->h_ext.vlan_tci = htons(rule->filter.vlan_tci);
fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK);
fsp->m_ext.vlan_tci = htons(rule->filter.vlan_tci_mask);
}
if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
@ -1217,6 +1224,7 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule,
if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) {
rule->filter.vlan_tci = ntohs(fsp->h_ext.vlan_tci);
rule->filter.vlan_tci_mask = ntohs(fsp->m_ext.vlan_tci);
rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI;
}
@ -1254,11 +1262,19 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule,
memcpy(rule->filter.user_mask, fsp->m_ext.data, sizeof(fsp->m_ext.data));
}
/* When multiple filter options or user data or vlan etype is set, use a
* flex filter.
/* The i225/i226 has various different filters. Flex filters provide a
* way to match up to the first 128 bytes of a packet. Use them for:
* a) For specific user data
* b) For VLAN EtherType
* c) For full TCI match
* d) Or in case multiple filter criteria are set
*
* Otherwise, use the simple MAC, VLAN PRIO or EtherType filters.
*/
if ((rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) ||
(rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) ||
((rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) &&
rule->filter.vlan_tci_mask == ntohs(VLAN_TCI_FULL_MASK)) ||
(rule->filter.match_flags & (rule->filter.match_flags - 1)))
rule->flex = true;
else
@ -1328,6 +1344,26 @@ static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter,
return -EINVAL;
}
/* There are two ways to match the VLAN TCI:
* 1. Match on PCP field and use vlan prio filter for it
* 2. Match on complete TCI field and use flex filter for it
*/
if ((fsp->flow_type & FLOW_EXT) &&
fsp->m_ext.vlan_tci &&
fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK) &&
fsp->m_ext.vlan_tci != VLAN_TCI_FULL_MASK) {
netdev_dbg(netdev, "VLAN mask not supported\n");
return -EOPNOTSUPP;
}
/* VLAN EtherType can only be matched by full mask. */
if ((fsp->flow_type & FLOW_EXT) &&
fsp->m_ext.vlan_etype &&
fsp->m_ext.vlan_etype != ETHER_TYPE_FULL_MASK) {
netdev_dbg(netdev, "VLAN EtherType mask not supported\n");
return -EOPNOTSUPP;
}
if (fsp->location >= IGC_MAX_RXNFC_RULES) {
netdev_dbg(netdev, "Invalid location\n");
return -EINVAL;

View File

@ -178,7 +178,7 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
wr32(IGC_TQAVCC(i), tqavcc);
wr32(IGC_TQAVHC(i),
0x80000000 + ring->hicredit * 0x7735);
0x80000000 + ring->hicredit * 0x7736);
} else {
/* Disable any CBS for the queue */
txqctl &= ~(IGC_TXQCTL_QAV_SEL_MASK);

View File

@ -78,7 +78,7 @@ static bool is_dev_rpm(void *cgxd)
bool is_lmac_valid(struct cgx *cgx, int lmac_id)
{
if (!cgx || lmac_id < 0 || lmac_id >= MAX_LMAC_PER_CGX)
if (!cgx || lmac_id < 0 || lmac_id >= cgx->max_lmac_per_mac)
return false;
return test_bit(lmac_id, &cgx->lmac_bmap);
}
@ -90,7 +90,7 @@ static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id)
{
int tmp, id = 0;
for_each_set_bit(tmp, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
for_each_set_bit(tmp, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
if (tmp == lmac_id)
break;
id++;
@ -121,7 +121,7 @@ u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
{
if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
if (!cgx || lmac_id >= cgx->max_lmac_per_mac)
return NULL;
return cgx->lmac_idmap[lmac_id];
@ -1410,7 +1410,7 @@ int cgx_get_fwdata_base(u64 *base)
if (!cgx)
return -ENXIO;
first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac);
if (!err)
@ -1499,7 +1499,7 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
{
int first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
int first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
u64 req = 0;
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
@ -1537,7 +1537,7 @@ static void cgx_lmac_linkup_work(struct work_struct *work)
int i, err;
/* Do Link up for all the enabled lmacs */
for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
err = cgx_fwi_link_change(cgx, i, true);
if (err)
dev_info(dev, "cgx port %d:%d Link up command failed\n",
@ -1557,14 +1557,6 @@ int cgx_lmac_linkup_start(void *cgxd)
return 0;
}
static void cgx_lmac_get_fifolen(struct cgx *cgx)
{
u64 cfg;
cfg = cgx_read(cgx, 0, CGX_CONST);
cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
}
static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac,
int cnt, bool req_free)
{
@ -1619,17 +1611,14 @@ static int cgx_lmac_init(struct cgx *cgx)
u64 lmac_list;
int i, err;
cgx_lmac_get_fifolen(cgx);
cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
/* lmac_list specifies which lmacs are enabled
* when bit n is set to 1, LMAC[n] is enabled
*/
if (cgx->mac_ops->non_contiguous_serdes_lane)
lmac_list = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL;
if (cgx->lmac_count > MAX_LMAC_PER_CGX)
cgx->lmac_count = MAX_LMAC_PER_CGX;
if (cgx->lmac_count > cgx->max_lmac_per_mac)
cgx->lmac_count = cgx->max_lmac_per_mac;
for (i = 0; i < cgx->lmac_count; i++) {
lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL);
@ -1707,7 +1696,7 @@ static int cgx_lmac_exit(struct cgx *cgx)
}
/* Free all lmac related resources */
for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
lmac = cgx->lmac_idmap[i];
if (!lmac)
continue;
@ -1723,6 +1712,12 @@ static int cgx_lmac_exit(struct cgx *cgx)
static void cgx_populate_features(struct cgx *cgx)
{
u64 cfg;
cfg = cgx_read(cgx, 0, CGX_CONST);
cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg);
if (is_dev_rpm(cgx))
cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM |
RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);

View File

@ -18,11 +18,8 @@
/* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 0
#define CGX_ID_MASK 0x7
#define MAX_LMAC_PER_CGX 4
#define CGX_ID_MASK 0xF
#define MAX_DMAC_ENTRIES_PER_CGX 32
#define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */
#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
/* Registers */
#define CGXX_CMRX_CFG 0x00
@ -56,6 +53,7 @@
#define CGXX_SCRATCH1_REG 0x1058
#define CGX_CONST 0x2000
#define CGX_CONST_RXFIFO_SIZE GENMASK_ULL(23, 0)
#define CGX_CONST_MAX_LMACS GENMASK_ULL(31, 24)
#define CGXX_SPUX_CONTROL1 0x10000
#define CGXX_SPUX_LNX_FEC_CORR_BLOCKS 0x10700
#define CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS 0x10800

View File

@ -128,7 +128,10 @@ struct cgx {
struct pci_dev *pdev;
u8 cgx_id;
u8 lmac_count;
struct lmac *lmac_idmap[MAX_LMAC_PER_CGX];
/* number of LMACs per MAC could be 4 or 8 */
u8 max_lmac_per_mac;
#define MAX_LMAC_COUNT 8
struct lmac *lmac_idmap[MAX_LMAC_COUNT];
struct work_struct cgx_cmd_work;
struct workqueue_struct *cgx_cmd_workq;
struct list_head cgx_list;

View File

@ -514,7 +514,7 @@ struct npc_lt_def {
u8 ltype_mask;
u8 ltype_match;
u8 lid;
};
} __packed;
struct npc_lt_def_ipsec {
u8 ltype_mask;
@ -522,7 +522,7 @@ struct npc_lt_def_ipsec {
u8 lid;
u8 spi_offset;
u8 spi_nz;
};
} __packed;
struct npc_lt_def_apad {
u8 ltype_mask;

View File

@ -283,6 +283,11 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
cfg = FIELD_SET(RPM_PFC_CLASS_MASK, 0, cfg);
rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, cfg);
/* Disable forward pause to driver */
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD;
rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
/* Enable channel mask for all LMACS */
rpm_write(rpm, 0, RPMX_CMR_CHAN_MSK_OR, ~0ULL);
}
@ -451,12 +456,10 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p
if (rx_pause) {
cfg &= ~(RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE);
} else {
cfg |= (RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE);
}
if (tx_pause) {

View File

@ -480,7 +480,7 @@ struct rvu {
u8 cgx_mapped_pfs;
u8 cgx_cnt_max; /* CGX port count max */
u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */
u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for
u64 *cgxlmac2pf_map; /* bitmap of mapped pfs for
* every cgx lmac port
*/
unsigned long pf_notify_bmap; /* Flags for PF notification */
@ -850,6 +850,7 @@ u32 rvu_cgx_get_fifolen(struct rvu *rvu);
void *rvu_first_cgx_pdata(struct rvu *rvu);
int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable);
int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable);
int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause,
u16 pfc_en);
int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause);

View File

@ -55,8 +55,9 @@ bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature)
return (cgx_features_get(cgxd) & feature);
}
#define CGX_OFFSET(x) ((x) * rvu->hw->lmac_per_cgx)
/* Returns bitmap of mapped PFs */
static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
static u64 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
{
return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
}
@ -71,7 +72,8 @@ int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
if (!pfmap)
return -ENODEV;
else
return find_first_bit(&pfmap, 16);
return find_first_bit(&pfmap,
rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
}
static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
@ -129,14 +131,14 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
if (!cgx_cnt_max)
return 0;
if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
if (cgx_cnt_max > 0xF || rvu->hw->lmac_per_cgx > 0xF)
return -EINVAL;
/* Alloc map table
* An additional entry is required since PF id starts from 1 and
* hence entry at offset 0 is invalid.
*/
size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
size = (cgx_cnt_max * rvu->hw->lmac_per_cgx + 1) * sizeof(u8);
rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
if (!rvu->pf2cgxlmac_map)
return -ENOMEM;
@ -145,9 +147,10 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
memset(rvu->pf2cgxlmac_map, 0xFF, size);
/* Reverse map table */
rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
GFP_KERNEL);
rvu->cgxlmac2pf_map =
devm_kzalloc(rvu->dev,
cgx_cnt_max * rvu->hw->lmac_per_cgx * sizeof(u64),
GFP_KERNEL);
if (!rvu->cgxlmac2pf_map)
return -ENOMEM;
@ -156,7 +159,7 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
if (!rvu_cgx_pdata(cgx, rvu))
continue;
lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
iter);
rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
@ -235,7 +238,8 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
do {
pfid = find_first_bit(&pfmap, 16);
pfid = find_first_bit(&pfmap,
rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
clear_bit(pfid, &pfmap);
/* check if notification is enabled */
@ -310,7 +314,7 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu)
if (!cgxd)
continue;
lmac_bmap = cgx_get_lmac_bmap(cgxd);
for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) {
for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) {
err = cgx_lmac_evh_register(&cb, cgxd, lmac);
if (err)
dev_err(rvu->dev,
@ -396,7 +400,7 @@ int rvu_cgx_exit(struct rvu *rvu)
if (!cgxd)
continue;
lmac_bmap = cgx_get_lmac_bmap(cgxd);
for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX)
for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx)
cgx_lmac_evh_unregister(cgxd, lmac);
}
@ -456,6 +460,23 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start);
}
int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable)
{
int pf = rvu_get_pf(pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
if (!is_cgx_config_permitted(rvu, pcifunc))
return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgxd = rvu_cgx_pdata(cgx_id, rvu);
mac_ops = get_mac_ops(cgxd);
return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
}
int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
{
struct mac_ops *mac_ops;

View File

@ -2618,7 +2618,7 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
rvu->rvu_dbg.cgx_root);
for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) {
/* lmac debugfs dir */
sprintf(dname, "lmac%d", lmac_id);
rvu->rvu_dbg.lmac =

View File

@ -3923,90 +3923,18 @@ static void nix_find_link_frs(struct rvu *rvu,
req->minlen = minlen;
}
static int
nix_config_link_credits(struct rvu *rvu, int blkaddr, int link,
u16 pcifunc, u64 tx_credits)
{
struct rvu_hwinfo *hw = rvu->hw;
int pf = rvu_get_pf(pcifunc);
u8 cgx_id = 0, lmac_id = 0;
unsigned long poll_tmo;
bool restore_tx_en = 0;
struct nix_hw *nix_hw;
u64 cfg, sw_xoff = 0;
u32 schq = 0;
u32 credits;
int rc;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;
if (tx_credits == nix_hw->tx_credits[link])
return 0;
/* Enable cgx tx if disabled for credits to be back */
if (is_pf_cgxmapped(rvu, pf)) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
lmac_id, true);
}
mutex_lock(&rvu->rsrc_lock);
/* Disable new traffic to link */
if (hw->cap.nix_shaping) {
schq = nix_get_tx_link(rvu, pcifunc);
sw_xoff = rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq));
rvu_write64(rvu, blkaddr,
NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0));
}
rc = NIX_AF_ERR_LINK_CREDITS;
poll_tmo = jiffies + usecs_to_jiffies(200000);
/* Wait for credits to return */
do {
if (time_after(jiffies, poll_tmo))
goto exit;
usleep_range(100, 200);
cfg = rvu_read64(rvu, blkaddr,
NIX_AF_TX_LINKX_NORM_CREDIT(link));
credits = (cfg >> 12) & 0xFFFFFULL;
} while (credits != nix_hw->tx_credits[link]);
cfg &= ~(0xFFFFFULL << 12);
cfg |= (tx_credits << 12);
rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
rc = 0;
nix_hw->tx_credits[link] = tx_credits;
exit:
/* Enable traffic back */
if (hw->cap.nix_shaping && !sw_xoff)
rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq), 0);
/* Restore state of cgx tx */
if (restore_tx_en)
rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
mutex_unlock(&rvu->rsrc_lock);
return rc;
}
int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
struct msg_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
int pf = rvu_get_pf(pcifunc);
int blkaddr, schq, link = -1;
struct nix_txsch *txsch;
u64 cfg, lmac_fifo_len;
int blkaddr, link = -1;
struct nix_hw *nix_hw;
struct rvu_pfvf *pfvf;
u8 cgx = 0, lmac = 0;
u16 max_mtu;
u64 cfg;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
@ -4027,25 +3955,6 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
return NIX_AF_ERR_FRS_INVALID;
/* Check if requester wants to update SMQ's */
if (!req->update_smq)
goto rx_frscfg;
/* Update min/maxlen in each of the SMQ attached to this PF/VF */
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
mutex_lock(&rvu->rsrc_lock);
for (schq = 0; schq < txsch->schq.max; schq++) {
if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue;
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
if (req->update_minlen)
cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
}
mutex_unlock(&rvu->rsrc_lock);
rx_frscfg:
/* Check if config is for SDP link */
if (req->sdp_link) {
if (!hw->sdp_links)
@ -4068,7 +3977,6 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
if (link < 0)
return NIX_AF_ERR_RX_LINK_INVALID;
linkcfg:
nix_find_link_frs(rvu, req, pcifunc);
@ -4078,19 +3986,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
cfg = (cfg & ~0xFFFFULL) | req->minlen;
rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
if (req->sdp_link || pf == 0)
return 0;
/* Update transmit credits for CGX links */
lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, lmac);
if (!lmac_fifo_len) {
dev_err(rvu->dev,
"%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
__func__, cgx, lmac);
return 0;
}
return nix_config_link_credits(rvu, blkaddr, link, pcifunc,
(lmac_fifo_len - req->maxlen) / 16);
return 0;
}
int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
@ -4183,7 +4079,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
/* Get LMAC id's from bitmap */
lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter);
if (!lmac_fifo_len) {
dev_err(rvu->dev,
@ -4610,7 +4506,13 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
pfvf = rvu_get_pfvf(rvu, pcifunc);
clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
return rvu_cgx_start_stop_io(rvu, pcifunc, false);
err = rvu_cgx_start_stop_io(rvu, pcifunc, false);
if (err)
return err;
rvu_cgx_tx_enable(rvu, pcifunc, true);
return 0;
}
#define RX_SA_BASE GENMASK_ULL(52, 7)

View File

@ -1999,7 +1999,9 @@ int rvu_npc_exact_init(struct rvu *rvu)
/* Install SDP drop rule */
drop_mcam_idx = &table->num_drop_rules;
max_lmac_cnt = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX + PF_CGXMAP_BASE;
max_lmac_cnt = rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx +
PF_CGXMAP_BASE;
for (i = PF_CGXMAP_BASE; i < max_lmac_cnt; i++) {
if (rvu->pf2cgxlmac_map[i] == 0xFF)
continue;

View File

@ -25,7 +25,7 @@
struct mlx5_irq {
struct atomic_notifier_head nh;
cpumask_var_t mask;
char name[MLX5_MAX_IRQ_NAME];
char name[MLX5_MAX_IRQ_FORMATTED_NAME];
struct mlx5_irq_pool *pool;
int refcount;
u32 index;
@ -236,8 +236,8 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
else
irq_sf_set_name(pool, name, i);
ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
snprintf(irq->name, MLX5_MAX_IRQ_NAME,
"%s@pci:%s", name, pci_name(dev->pdev));
snprintf(irq->name, MLX5_MAX_IRQ_FORMATTED_NAME,
MLX5_IRQ_NAME_FORMAT_STR, name, pci_name(dev->pdev));
err = request_irq(irq->irqn, irq_int_handler, 0, irq->name,
&irq->nh);
if (err) {

View File

@ -7,6 +7,9 @@
#include <linux/mlx5/driver.h>
#define MLX5_MAX_IRQ_NAME (32)
#define MLX5_IRQ_NAME_FORMAT_STR ("%s@pci:%s")
#define MLX5_MAX_IRQ_FORMATTED_NAME \
(MLX5_MAX_IRQ_NAME + sizeof(MLX5_IRQ_NAME_FORMAT_STR))
/* max irq_index is 2047, so four chars */
#define MLX5_MAX_IRQ_IDX_CHARS (4)
#define MLX5_EQ_REFS_PER_IRQ (2)

View File

@ -267,6 +267,13 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
priv->stats.rx_truncate_errors++;
}
/* Read receive consumer index before replenish so that this routine
* returns accurate return value even if packet is received into
* just-replenished buffer prior to exiting this routine.
*/
rx_ci = readq(priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI);
rx_ci_rem = rx_ci % priv->rx_q_entries;
/* Let hardware know we've replenished one buffer */
rx_pi++;
@ -279,8 +286,6 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
rx_pi_rem = rx_pi % priv->rx_q_entries;
if (rx_pi_rem == 0)
priv->valid_polarity ^= 1;
rx_ci = readq(priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI);
rx_ci_rem = rx_ci % priv->rx_q_entries;
if (skb)
netif_receive_skb(skb);

View File

@ -2591,6 +2591,7 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
netdev_err(qdev->ndev, "lBufQ failed\n");
kfree(qdev->lrg_buf);
return -ENOMEM;
}
qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
@ -2615,6 +2616,7 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
qdev->lrg_buf_q_alloc_size,
qdev->lrg_buf_q_alloc_virt_addr,
qdev->lrg_buf_q_alloc_phy_addr);
kfree(qdev->lrg_buf);
return -ENOMEM;
}

View File

@ -1145,7 +1145,7 @@ static void rtl8168ep_driver_start(struct rtl8169_private *tp)
{
r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 10);
rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
}
static void rtl8168_driver_start(struct rtl8169_private *tp)

View File

@ -68,16 +68,27 @@ int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
return -ETIMEDOUT;
}
static int ravb_config(struct net_device *ndev)
static int ravb_set_opmode(struct net_device *ndev, u32 opmode)
{
u32 csr_ops = 1U << (opmode & CCC_OPC);
u32 ccc_mask = CCC_OPC;
int error;
/* Set config mode */
ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
/* Check if the operating mode is changed to the config mode */
error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
if (error)
netdev_err(ndev, "failed to switch device to config mode\n");
/* If gPTP active in config mode is supported it needs to be configured
* along with CSEL and operating mode in the same access. This is a
* hardware limitation.
*/
if (opmode & CCC_GAC)
ccc_mask |= CCC_GAC | CCC_CSEL;
/* Set operating mode */
ravb_modify(ndev, CCC, ccc_mask, opmode);
/* Check if the operating mode is changed to the requested one */
error = ravb_wait(ndev, CSR, CSR_OPS, csr_ops);
if (error) {
netdev_err(ndev, "failed to switch device to requested mode (%u)\n",
opmode & CCC_OPC);
}
return error;
}
@ -675,7 +686,7 @@ static int ravb_dmac_init(struct net_device *ndev)
int error;
/* Set CONFIG mode */
error = ravb_config(ndev);
error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
if (error)
return error;
@ -684,9 +695,7 @@ static int ravb_dmac_init(struct net_device *ndev)
return error;
/* Setting the control will start the AVB-DMAC process. */
ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION);
return 0;
return ravb_set_opmode(ndev, CCC_OPC_OPERATION);
}
static void ravb_get_tx_tstamp(struct net_device *ndev)
@ -1048,7 +1057,7 @@ static int ravb_stop_dma(struct net_device *ndev)
return error;
/* Stop AVB-DMAC process */
return ravb_config(ndev);
return ravb_set_opmode(ndev, CCC_OPC_CONFIG);
}
/* E-MAC interrupt handler */
@ -2576,21 +2585,25 @@ static int ravb_set_gti(struct net_device *ndev)
return 0;
}
static void ravb_set_config_mode(struct net_device *ndev)
static int ravb_set_config_mode(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
int error;
if (info->gptp) {
ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
if (error)
return error;
/* Set CSEL value */
ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
} else if (info->ccc_gac) {
ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
CCC_GAC | CCC_CSEL_HPB);
error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB);
} else {
ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
}
return error;
}
/* Set tx and rx clock internal delay modes */
@ -2810,7 +2823,9 @@ static int ravb_probe(struct platform_device *pdev)
ndev->ethtool_ops = &ravb_ethtool_ops;
/* Set AVB config mode */
ravb_set_config_mode(ndev);
error = ravb_set_config_mode(ndev);
if (error)
goto out_disable_gptp_clk;
if (info->gptp || info->ccc_gac) {
/* Set GTI value */
@ -2933,8 +2948,7 @@ static int ravb_remove(struct platform_device *pdev)
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma);
/* Set reset mode */
ravb_write(ndev, CCC_OPC_RESET, CCC);
ravb_set_opmode(ndev, CCC_OPC_RESET);
clk_disable_unprepare(priv->gptp_clk);
clk_disable_unprepare(priv->refclk);
@ -3018,8 +3032,11 @@ static int __maybe_unused ravb_resume(struct device *dev)
int ret = 0;
/* If WoL is enabled set reset mode to rearm the WoL logic */
if (priv->wol_enabled)
ravb_write(ndev, CCC_OPC_RESET, CCC);
if (priv->wol_enabled) {
ret = ravb_set_opmode(ndev, CCC_OPC_RESET);
if (ret)
return ret;
}
/* All register have been reset to default values.
* Restore all registers which where setup at probe time and
@ -3027,7 +3044,9 @@ static int __maybe_unused ravb_resume(struct device *dev)
*/
/* Set AVB config mode */
ravb_set_config_mode(ndev);
ret = ravb_set_config_mode(ndev);
if (ret)
return ret;
if (info->gptp || info->ccc_gac) {
/* Set GTI value */

View File

@ -820,8 +820,10 @@ int efx_probe_filters(struct efx_nic *efx)
}
if (!success) {
efx_for_each_channel(channel, efx)
efx_for_each_channel(channel, efx) {
kfree(channel->rps_flow_id);
channel->rps_flow_id = NULL;
}
efx->type->filter_table_remove(efx);
rc = -ENOMEM;
goto out_unlock;

View File

@ -629,7 +629,7 @@ static void __gtp_encap_destroy(struct sock *sk)
gtp->sk0 = NULL;
else
gtp->sk1u = NULL;
udp_sk(sk)->encap_type = 0;
WRITE_ONCE(udp_sk(sk)->encap_type, 0);
rcu_assign_sk_user_data(sk, NULL);
release_sock(sk);
sock_put(sk);
@ -681,7 +681,7 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
switch (udp_sk(sk)->encap_type) {
switch (READ_ONCE(udp_sk(sk)->encap_type)) {
case UDP_ENCAP_GTP0:
netdev_dbg(gtp->dev, "received GTP0 packet\n");
ret = gtp0_udp_encap_recv(gtp, skb);

View File

@ -161,7 +161,9 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
u8 buf[ETH_ALEN];
struct ax88172a_private *priv;
usbnet_get_endpoints(dev, intf);
ret = usbnet_get_endpoints(dev, intf);
if (ret)
return ret;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)

View File

@ -348,8 +348,8 @@
#define RFIC_REG_RD 0xAD0470
#define WFPM_CTRL_REG 0xA03030
#define WFPM_OTP_CFG1_ADDR 0x00a03098
#define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(4)
#define WFPM_OTP_CFG1_IS_CDB_BIT BIT(5)
#define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(5)
#define WFPM_OTP_CFG1_IS_CDB_BIT BIT(4)
#define WFPM_GP2 0xA030B4

View File

@ -745,7 +745,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
}
}
void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq);
static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
{
@ -792,7 +792,7 @@ static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans));
}
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq);
void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
#ifdef CONFIG_IWLWIFI_DEBUGFS

View File

@ -1781,7 +1781,7 @@ static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
return inta;
}
void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
@ -1805,7 +1805,7 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
isr_stats->rfkill++;
if (prev != report)
iwl_trans_pcie_rf_kill(trans, report);
iwl_trans_pcie_rf_kill(trans, report, from_irq);
mutex_unlock(&trans_pcie->mutex);
if (hw_rfkill) {
@ -1945,7 +1945,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
/* HW RF KILL switch toggled */
if (inta & CSR_INT_BIT_RF_KILL) {
iwl_pcie_handle_rfkill_irq(trans);
iwl_pcie_handle_rfkill_irq(trans, true);
handled |= CSR_INT_BIT_RF_KILL;
}
@ -2362,7 +2362,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
/* HW RF KILL switch toggled */
if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
iwl_pcie_handle_rfkill_irq(trans);
iwl_pcie_handle_rfkill_irq(trans, true);
if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
IWL_ERR(trans,

View File

@ -1080,7 +1080,7 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans)
report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
if (prev != report)
iwl_trans_pcie_rf_kill(trans, report);
iwl_trans_pcie_rf_kill(trans, report, false);
return hw_rfkill;
}
@ -1234,7 +1234,7 @@ static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
trans_pcie->hw_mask = trans_pcie->hw_init_mask;
}
static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool from_irq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@ -1261,7 +1261,8 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
IWL_DEBUG_INFO(trans,
"DEVICE_ENABLED bit was set and is now cleared\n");
iwl_pcie_synchronize_irqs(trans);
if (!from_irq)
iwl_pcie_synchronize_irqs(trans);
iwl_pcie_rx_napi_sync(trans);
iwl_pcie_tx_stop(trans);
iwl_pcie_rx_stop(trans);
@ -1451,7 +1452,7 @@ void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
}
if (hw_rfkill != was_in_rfkill)
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
iwl_trans_pcie_rf_kill(trans, hw_rfkill, false);
}
static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
@ -1466,12 +1467,12 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
mutex_lock(&trans_pcie->mutex);
trans_pcie->opmode_down = true;
was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
_iwl_trans_pcie_stop_device(trans);
_iwl_trans_pcie_stop_device(trans, false);
iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
mutex_unlock(&trans_pcie->mutex);
}
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq)
{
struct iwl_trans_pcie __maybe_unused *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
@ -1484,7 +1485,7 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
if (trans->trans_cfg->gen2)
_iwl_trans_pcie_gen2_stop_device(trans);
else
_iwl_trans_pcie_stop_device(trans);
_iwl_trans_pcie_stop_device(trans, from_irq);
}
}
@ -2815,7 +2816,7 @@ static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
IWL_WARN(trans, "changing debug rfkill %d->%d\n",
trans_pcie->debug_rfkill, new_value);
trans_pcie->debug_rfkill = new_value;
iwl_pcie_handle_rfkill_irq(trans);
iwl_pcie_handle_rfkill_irq(trans, false);
return count;
}

View File

@ -1302,6 +1302,9 @@ static int pci_set_full_power_state(struct pci_dev *dev)
pci_restore_bars(dev);
}
if (dev->bus->self)
pcie_aspm_pm_state_change(dev->bus->self);
return 0;
}
@ -1396,6 +1399,9 @@ static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
pci_power_name(dev->current_state),
pci_power_name(state));
if (dev->bus->self)
pcie_aspm_pm_state_change(dev->bus->self);
return 0;
}

View File

@ -567,10 +567,12 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
#ifdef CONFIG_PCIEASPM
void pcie_aspm_init_link_state(struct pci_dev *pdev);
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
void pcie_aspm_pm_state_change(struct pci_dev *pdev);
void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
#else
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { }
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
#endif

View File

@ -1055,6 +1055,25 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
up_read(&pci_bus_sem);
}
/* @pdev: the root port or switch downstream port */
void pcie_aspm_pm_state_change(struct pci_dev *pdev)
{
struct pcie_link_state *link = pdev->link_state;
if (aspm_disabled || !link)
return;
/*
* Devices changed PM state, we should recheck if latency
* meets all functions' requirement
*/
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
pcie_update_aspm_capable(link->root);
pcie_config_aspm_path(link);
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
}
void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
{
struct pcie_link_state *link = pdev->link_state;

View File

@ -1419,7 +1419,6 @@ static int init_imstt(struct fb_info *info)
if ((info->var.xres * info->var.yres) * (info->var.bits_per_pixel >> 3) > info->fix.smem_len
|| !(compute_imstt_regvals(par, info->var.xres, info->var.yres))) {
printk("imsttfb: %ux%ux%u not supported\n", info->var.xres, info->var.yres, info->var.bits_per_pixel);
framebuffer_release(info);
return -ENODEV;
}
@ -1452,10 +1451,11 @@ static int init_imstt(struct fb_info *info)
FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_YPAN;
fb_alloc_cmap(&info->cmap, 0, 0);
if (fb_alloc_cmap(&info->cmap, 0, 0))
return -ENODEV;
if (register_framebuffer(info) < 0) {
framebuffer_release(info);
fb_dealloc_cmap(&info->cmap);
return -ENODEV;
}

View File

@ -68,6 +68,8 @@ void v9fs_cache_inode_get_cookie(struct inode *inode)
&path, sizeof(path),
&version, sizeof(version),
i_size_read(&v9inode->netfs.inode));
if (v9inode->netfs.cache)
mapping_set_release_always(inode->i_mapping);
p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n",
inode, v9fs_inode_cookie(v9inode));

View File

@ -682,6 +682,8 @@ static inline void afs_vnode_set_cache(struct afs_vnode *vnode,
{
#ifdef CONFIG_AFS_FSCACHE
vnode->netfs.cache = cookie;
if (cookie)
mapping_set_release_always(vnode->netfs.inode.i_mapping);
#endif
}

View File

@ -197,7 +197,7 @@ void btrfs_free_reserved_data_space(struct btrfs_inode *inode,
start = round_down(start, fs_info->sectorsize);
btrfs_free_reserved_data_space_noquota(fs_info, len);
btrfs_qgroup_free_data(inode, reserved, start, len);
btrfs_qgroup_free_data(inode, reserved, start, len, NULL);
}
/**

View File

@ -602,7 +602,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
}
sums->bytenr = start;
sums->len = (int)size;
sums->len = size;
offset = (start - key.offset) >> fs_info->sectorsize_bits;
offset *= csum_size;

View File

@ -3191,7 +3191,7 @@ static long btrfs_fallocate(struct file *file, int mode,
qgroup_reserved -= range->len;
} else if (qgroup_reserved > 0) {
btrfs_qgroup_free_data(BTRFS_I(inode), data_reserved,
range->start, range->len);
range->start, range->len, NULL);
qgroup_reserved -= range->len;
}
list_del(&range->list);

View File

@ -466,7 +466,7 @@ static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 size,
* And at reserve time, it's always aligned to page size, so
* just free one page here.
*/
btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE);
btrfs_qgroup_free_data(inode, NULL, 0, PAGE_SIZE, NULL);
btrfs_free_path(path);
btrfs_end_transaction(trans);
return ret;
@ -5372,7 +5372,7 @@ static void evict_inode_truncate_pages(struct inode *inode)
*/
if (state_flags & EXTENT_DELALLOC)
btrfs_qgroup_free_data(BTRFS_I(inode), NULL, start,
end - start + 1);
end - start + 1, NULL);
clear_extent_bit(io_tree, start, end,
EXTENT_CLEAR_ALL_BITS | EXTENT_DO_ACCOUNTING,
@ -8440,7 +8440,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
* reserved data space.
* Since the IO will never happen for this page.
*/
btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur);
btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur, NULL);
if (!inode_evicting) {
clear_extent_bit(tree, cur, range_end, EXTENT_LOCKED |
EXTENT_DELALLOC | EXTENT_UPTODATE |
@ -9902,7 +9902,7 @@ static struct btrfs_trans_handle *insert_prealloc_file_extent(
struct btrfs_path *path;
u64 start = ins->objectid;
u64 len = ins->offset;
int qgroup_released;
u64 qgroup_released = 0;
int ret;
memset(&stack_fi, 0, sizeof(stack_fi));
@ -9915,9 +9915,9 @@ static struct btrfs_trans_handle *insert_prealloc_file_extent(
btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE);
/* Encryption and other encoding is reserved and all 0 */
qgroup_released = btrfs_qgroup_release_data(inode, file_offset, len);
if (qgroup_released < 0)
return ERR_PTR(qgroup_released);
ret = btrfs_qgroup_release_data(inode, file_offset, len, &qgroup_released);
if (ret < 0)
return ERR_PTR(ret);
if (trans) {
ret = insert_reserved_file_extent(trans, inode,
@ -10903,7 +10903,7 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
btrfs_delalloc_release_metadata(inode, disk_num_bytes, ret < 0);
out_qgroup_free_data:
if (ret < 0)
btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes);
btrfs_qgroup_free_data(inode, data_reserved, start, num_bytes, NULL);
out_free_data_space:
/*
* If btrfs_reserve_extent() succeeded, then we already decremented

View File

@ -172,11 +172,12 @@ int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
struct rb_node *node;
struct btrfs_ordered_extent *entry;
int ret;
u64 qgroup_rsv = 0;
if (flags &
((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) {
/* For nocow write, we can release the qgroup rsv right now */
ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes);
ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes, &qgroup_rsv);
if (ret < 0)
return ret;
ret = 0;
@ -185,7 +186,7 @@ int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
* The ordered extent has reserved qgroup space, release now
* and pass the reserved number for qgroup_record to free.
*/
ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes);
ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes, &qgroup_rsv);
if (ret < 0)
return ret;
}
@ -203,7 +204,7 @@ int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
entry->inode = igrab(&inode->vfs_inode);
entry->compress_type = compress_type;
entry->truncated_len = (u64)-1;
entry->qgroup_rsv = ret;
entry->qgroup_rsv = qgroup_rsv;
entry->physical = (u64)-1;
ASSERT((flags & ~BTRFS_ORDERED_TYPE_FLAGS) == 0);

View File

@ -20,7 +20,7 @@ struct btrfs_ordered_sum {
/*
* this is the length in bytes covered by the sums array below.
*/
int len;
u32 len;
struct list_head list;
/* last field is a variable length array of csums */
u8 sums[];

View File

@ -3833,13 +3833,14 @@ int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
/* Free ranges specified by @reserved, normally in error path */
static int qgroup_free_reserved_data(struct btrfs_inode *inode,
struct extent_changeset *reserved, u64 start, u64 len)
struct extent_changeset *reserved,
u64 start, u64 len, u64 *freed_ret)
{
struct btrfs_root *root = inode->root;
struct ulist_node *unode;
struct ulist_iterator uiter;
struct extent_changeset changeset;
int freed = 0;
u64 freed = 0;
int ret;
extent_changeset_init(&changeset);
@ -3880,7 +3881,9 @@ static int qgroup_free_reserved_data(struct btrfs_inode *inode,
}
btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid, freed,
BTRFS_QGROUP_RSV_DATA);
ret = freed;
if (freed_ret)
*freed_ret = freed;
ret = 0;
out:
extent_changeset_release(&changeset);
return ret;
@ -3888,7 +3891,7 @@ static int qgroup_free_reserved_data(struct btrfs_inode *inode,
static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
struct extent_changeset *reserved, u64 start, u64 len,
int free)
u64 *released, int free)
{
struct extent_changeset changeset;
int trace_op = QGROUP_RELEASE;
@ -3900,7 +3903,7 @@ static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
/* In release case, we shouldn't have @reserved */
WARN_ON(!free && reserved);
if (free && reserved)
return qgroup_free_reserved_data(inode, reserved, start, len);
return qgroup_free_reserved_data(inode, reserved, start, len, released);
extent_changeset_init(&changeset);
ret = clear_record_extent_bits(&inode->io_tree, start, start + len -1,
EXTENT_QGROUP_RESERVED, &changeset);
@ -3915,7 +3918,8 @@ static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
btrfs_qgroup_free_refroot(inode->root->fs_info,
inode->root->root_key.objectid,
changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
ret = changeset.bytes_changed;
if (released)
*released = changeset.bytes_changed;
out:
extent_changeset_release(&changeset);
return ret;
@ -3934,9 +3938,10 @@ static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
* NOTE: This function may sleep for memory allocation.
*/
int btrfs_qgroup_free_data(struct btrfs_inode *inode,
struct extent_changeset *reserved, u64 start, u64 len)
struct extent_changeset *reserved,
u64 start, u64 len, u64 *freed)
{
return __btrfs_qgroup_release_data(inode, reserved, start, len, 1);
return __btrfs_qgroup_release_data(inode, reserved, start, len, freed, 1);
}
/*
@ -3954,9 +3959,9 @@ int btrfs_qgroup_free_data(struct btrfs_inode *inode,
*
* NOTE: This function may sleep for memory allocation.
*/
int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len)
int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released)
{
return __btrfs_qgroup_release_data(inode, NULL, start, len, 0);
return __btrfs_qgroup_release_data(inode, NULL, start, len, released, 0);
}
static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,

View File

@ -360,10 +360,10 @@ int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
/* New io_tree based accurate qgroup reserve API */
int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
struct extent_changeset **reserved, u64 start, u64 len);
int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len);
int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released);
int btrfs_qgroup_free_data(struct btrfs_inode *inode,
struct extent_changeset *reserved, u64 start,
u64 len);
u64 len, u64 *freed);
int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
enum btrfs_qgroup_rsv_type type, bool enforce);
int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,

View File

@ -584,6 +584,8 @@ static bool cachefiles_open_file(struct cachefiles_object *object,
if (ret < 0)
goto check_failed;
clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &object->cookie->flags);
object->file = file;
/* Always update the atime on an object we've just looked up (this is

View File

@ -36,6 +36,8 @@ void ceph_fscache_register_inode_cookie(struct inode *inode)
&ci->i_vino, sizeof(ci->i_vino),
&ci->i_version, sizeof(ci->i_version),
i_size_read(inode));
if (ci->netfs.cache)
mapping_set_release_always(inode->i_mapping);
}
void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info *ci)

View File

@ -253,6 +253,7 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
{
struct inode *orig_inode = file_inode(o_filp);
struct page *pagep[2] = {NULL, NULL};
struct folio *folio[2] = {NULL, NULL};
handle_t *handle;
ext4_lblk_t orig_blk_offset, donor_blk_offset;
unsigned long blocksize = orig_inode->i_sb->s_blocksize;
@ -313,6 +314,13 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
* hold page's lock, if it is still the case data copy is not
* necessary, just swap data blocks between orig and donor.
*/
folio[0] = page_folio(pagep[0]);
folio[1] = page_folio(pagep[1]);
VM_BUG_ON_FOLIO(folio_test_large(folio[0]), folio[0]);
VM_BUG_ON_FOLIO(folio_test_large(folio[1]), folio[1]);
VM_BUG_ON_FOLIO(folio_nr_pages(folio[0]) != folio_nr_pages(folio[1]), folio[1]);
if (unwritten) {
ext4_double_down_write_data_sem(orig_inode, donor_inode);
/* If any of extents in range became initialized we have to
@ -331,10 +339,8 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
ext4_double_up_write_data_sem(orig_inode, donor_inode);
goto data_copy;
}
if ((page_has_private(pagep[0]) &&
!try_to_release_page(pagep[0], 0)) ||
(page_has_private(pagep[1]) &&
!try_to_release_page(pagep[1], 0))) {
if (!filemap_release_folio(folio[0], 0) ||
!filemap_release_folio(folio[1], 0)) {
*err = -EBUSY;
goto drop_data_sem;
}
@ -344,19 +350,19 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
block_len_in_page, 1, err);
drop_data_sem:
ext4_double_up_write_data_sem(orig_inode, donor_inode);
goto unlock_pages;
goto unlock_folios;
}
data_copy:
*err = mext_page_mkuptodate(pagep[0], from, from + replaced_size);
*err = mext_page_mkuptodate(&folio[0]->page, from, from + replaced_size);
if (*err)
goto unlock_pages;
goto unlock_folios;
/* At this point all buffers in range are uptodate, old mapping layout
* is no longer required, try to drop it now. */
if ((page_has_private(pagep[0]) && !try_to_release_page(pagep[0], 0)) ||
(page_has_private(pagep[1]) && !try_to_release_page(pagep[1], 0))) {
if (!filemap_release_folio(folio[0], 0) ||
!filemap_release_folio(folio[1], 0)) {
*err = -EBUSY;
goto unlock_pages;
goto unlock_folios;
}
ext4_double_down_write_data_sem(orig_inode, donor_inode);
replaced_count = ext4_swap_extents(handle, orig_inode, donor_inode,
@ -369,13 +375,13 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
replaced_size =
block_len_in_page << orig_inode->i_blkbits;
} else
goto unlock_pages;
goto unlock_folios;
}
/* Perform all necessary steps similar write_begin()/write_end()
* but keeping in mind that i_size will not change */
if (!page_has_buffers(pagep[0]))
create_empty_buffers(pagep[0], 1 << orig_inode->i_blkbits, 0);
bh = page_buffers(pagep[0]);
if (!folio_buffers(folio[0]))
create_empty_buffers(&folio[0]->page, 1 << orig_inode->i_blkbits, 0);
bh = folio_buffers(folio[0]);
for (i = 0; i < data_offset_in_page; i++)
bh = bh->b_this_page;
for (i = 0; i < block_len_in_page; i++) {
@ -385,7 +391,7 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
bh = bh->b_this_page;
}
if (!*err)
*err = block_commit_write(pagep[0], from, from + replaced_size);
*err = block_commit_write(&folio[0]->page, from, from + replaced_size);
if (unlikely(*err < 0))
goto repair_branches;
@ -395,11 +401,11 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
*err = ext4_jbd2_inode_add_write(handle, orig_inode,
(loff_t)orig_page_offset << PAGE_SHIFT, replaced_size);
unlock_pages:
unlock_page(pagep[0]);
put_page(pagep[0]);
unlock_page(pagep[1]);
put_page(pagep[1]);
unlock_folios:
folio_unlock(folio[0]);
folio_put(folio[0]);
folio_unlock(folio[1]);
folio_put(folio[1]);
stop_journal:
ext4_journal_stop(handle);
if (*err == -ENOSPC &&
@ -430,7 +436,7 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
*err = -EIO;
}
replaced_count = 0;
goto unlock_pages;
goto unlock_folios;
}
/**

View File

@ -317,8 +317,6 @@ static const struct f2fs_compress_ops f2fs_lz4_ops = {
#endif
#ifdef CONFIG_F2FS_FS_ZSTD
#define F2FS_ZSTD_DEFAULT_CLEVEL 1
static int zstd_init_compress_ctx(struct compress_ctx *cc)
{
zstd_parameters params;
@ -327,6 +325,7 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc)
unsigned int workspace_size;
unsigned char level = F2FS_I(cc->inode)->i_compress_level;
/* Need to remain this for backward compatibility */
if (!level)
level = F2FS_ZSTD_DEFAULT_CLEVEL;

View File

@ -1432,6 +1432,8 @@ struct compress_data {
#define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000
#define F2FS_ZSTD_DEFAULT_CLEVEL 1
#define COMPRESS_LEVEL_OFFSET 8
/* compress context */

View File

@ -3989,6 +3989,15 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
F2FS_I(inode)->i_compress_algorithm = option.algorithm;
F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
F2FS_I(inode)->i_cluster_size = BIT(option.log_cluster_size);
/* Set default level */
if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD)
F2FS_I(inode)->i_compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
else
F2FS_I(inode)->i_compress_level = 0;
/* Adjust mount option level */
if (option.algorithm == F2FS_OPTION(sbi).compress_algorithm &&
F2FS_OPTION(sbi).compress_level)
F2FS_I(inode)->i_compress_level = F2FS_OPTION(sbi).compress_level;
f2fs_mark_inode_dirty_sync(inode, true);
if (!f2fs_is_compress_backend_ready(inode))

View File

@ -611,14 +611,12 @@ static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
{
#ifdef CONFIG_F2FS_FS_LZ4HC
unsigned int level;
#endif
if (strlen(str) == 3) {
F2FS_OPTION(sbi).compress_level = 0;
return 0;
}
#ifdef CONFIG_F2FS_FS_LZ4HC
str += 3;
if (str[0] != ':') {
@ -636,6 +634,10 @@ static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
F2FS_OPTION(sbi).compress_level = level;
return 0;
#else
if (strlen(str) == 3) {
F2FS_OPTION(sbi).compress_level = 0;
return 0;
}
f2fs_info(sbi, "kernel doesn't support lz4hc compression");
return -EINVAL;
#endif
@ -649,7 +651,7 @@ static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
int len = 4;
if (strlen(str) == len) {
F2FS_OPTION(sbi).compress_level = 0;
F2FS_OPTION(sbi).compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
return 0;
}
@ -662,7 +664,7 @@ static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
if (kstrtouint(str + 1, 10, &level))
return -EINVAL;
if (!level || level > zstd_max_clevel()) {
if (level < zstd_min_clevel() || level > zstd_max_clevel()) {
f2fs_info(sbi, "invalid zstd compress level: %d", level);
return -EINVAL;
}

View File

@ -215,6 +215,8 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
lockdep_set_class_and_name(&mapping->invalidate_lock,
&sb->s_type->invalidate_lock_key,
"mapping.invalidate_lock");
if (sb->s_iflags & SB_I_STABLE_WRITES)
mapping_set_stable_writes(mapping);
inode->i_private = NULL;
inode->i_mapping = mapping;
INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */

Some files were not shown because too many files have changed in this diff Show More