Merge 5.10.30 into android12-5.10

Changes in 5.10.30
	xfrm/compat: Cleanup WARN()s that can be user-triggered
	ALSA: aloop: Fix initialization of controls
	ALSA: hda/realtek: Fix speaker amp setup on Acer Aspire E1
	ALSA: hda/conexant: Apply quirk for another HP ZBook G5 model
	ASoC: intel: atom: Stop advertising non working S24LE support
	nfc: fix refcount leak in llcp_sock_bind()
	nfc: fix refcount leak in llcp_sock_connect()
	nfc: fix memory leak in llcp_sock_connect()
	nfc: Avoid endless loops caused by repeated llcp_sock_connect()
	selinux: make nslot handling in avtab more robust
	selinux: fix cond_list corruption when changing booleans
	selinux: fix race between old and new sidtab
	xen/evtchn: Change irq_info lock to raw_spinlock_t
	net: ipv6: check for validity before dereferencing cfg->fc_nlinfo.nlh
	net: dsa: lantiq_gswip: Let GSWIP automatically set the xMII clock
	net: dsa: lantiq_gswip: Don't use PHY auto polling
	net: dsa: lantiq_gswip: Configure all remaining GSWIP_MII_CFG bits
	drm/i915: Fix invalid access to ACPI _DSM objects
	ACPI: processor: Fix build when CONFIG_ACPI_PROCESSOR=m
	IB/hfi1: Fix probe time panic when AIP is enabled with a buggy BIOS
	LOOKUP_MOUNTPOINT: we are cleaning "jumped" flag too late
	gcov: re-fix clang-11+ support
	ia64: fix user_stack_pointer() for ptrace()
	nds32: flush_dcache_page: use page_mapping_file to avoid races with swapoff
	ocfs2: fix deadlock between setattr and dio_end_io_write
	fs: direct-io: fix missing sdio->boundary
	ethtool: fix incorrect datatype in set_eee ops
	of: property: fw_devlink: do not link ".*,nr-gpios"
	parisc: parisc-agp requires SBA IOMMU driver
	parisc: avoid a warning on u8 cast for cmpxchg on u8 pointers
	ARM: dts: turris-omnia: configure LED[2]/INTn pin as interrupt pin
	batman-adv: initialize "struct batadv_tvlv_tt_vlan_data"->reserved field
	ice: Continue probe on link/PHY errors
	ice: Increase control queue timeout
	ice: prevent ice_open and ice_stop during reset
	ice: fix memory allocation call
	ice: remove DCBNL_DEVRESET bit from PF state
	ice: Fix for dereference of NULL pointer
	ice: Use port number instead of PF ID for WoL
	ice: Cleanup fltr list in case of allocation issues
	iwlwifi: pcie: properly set LTR workarounds on 22000 devices
	ice: fix memory leak of aRFS after resuming from suspend
	net: hso: fix null-ptr-deref during tty device unregistration
	libbpf: Fix bail out from 'ringbuf_process_ring()' on error
	bpf: Enforce that struct_ops programs be GPL-only
	bpf: link: Refuse non-O_RDWR flags in BPF_OBJ_GET
	ethernet/netronome/nfp: Fix a use after free in nfp_bpf_ctrl_msg_rx
	libbpf: Ensure umem pointer is non-NULL before dereferencing
	libbpf: Restore umem state after socket create failure
	libbpf: Only create rx and tx XDP rings when necessary
	bpf: Refcount task stack in bpf_get_task_stack
	bpf, sockmap: Fix sk->prot unhash op reset
	bpf, sockmap: Fix incorrect fwd_alloc accounting
	net: ensure mac header is set in virtio_net_hdr_to_skb()
	i40e: Fix sparse warning: missing error code 'err'
	i40e: Fix sparse error: 'vsi->netdev' could be null
	i40e: Fix sparse error: uninitialized symbol 'ring'
	i40e: Fix sparse errors in i40e_txrx.c
	vdpa/mlx5: Fix suspend/resume index restoration
	net: sched: sch_teql: fix null-pointer dereference
	net: sched: fix action overwrite reference counting
	nl80211: fix beacon head validation
	nl80211: fix potential leak of ACL params
	cfg80211: check S1G beacon compat element length
	mac80211: fix time-is-after bug in mlme
	mac80211: fix TXQ AC confusion
	net: hsr: Reset MAC header for Tx path
	net-ipv6: bugfix - raw & sctp - switch to ipv6_can_nonlocal_bind()
	net: let skb_orphan_partial wake-up waiters.
	thunderbolt: Fix a leak in tb_retimer_add()
	thunderbolt: Fix off by one in tb_port_find_retimer()
	usbip: add sysfs_lock to synchronize sysfs code paths
	usbip: stub-dev synchronize sysfs code paths
	usbip: vudc synchronize sysfs code paths
	usbip: synchronize event handler with sysfs code paths
	driver core: Fix locking bug in deferred_probe_timeout_work_func()
	scsi: pm80xx: Fix chip initialization failure
	scsi: target: iscsi: Fix zero tag inside a trace event
	percpu: make pcpu_nr_empty_pop_pages per chunk type
	i2c: turn recovery error on init to debug
	KVM: x86/mmu: change TDP MMU yield function returns to match cond_resched
	KVM: x86/mmu: Merge flush and non-flush tdp_mmu_iter_cond_resched
	KVM: x86/mmu: Rename goal_gfn to next_last_level_gfn
	KVM: x86/mmu: Ensure forward progress when yielding in TDP MMU iter
	KVM: x86/mmu: Yield in TDU MMU iter even if no SPTES changed
	KVM: x86/mmu: Ensure TLBs are flushed when yielding during GFN range zap
	KVM: x86/mmu: Ensure TLBs are flushed for TDP MMU during NX zapping
	KVM: x86/mmu: Don't allow TDP MMU to yield when recovering NX pages
	KVM: x86/mmu: preserve pending TLB flush across calls to kvm_tdp_mmu_zap_sp
	net: sched: fix err handler in tcf_action_init()
	ice: Refactor DCB related variables out of the ice_port_info struct
	ice: Recognize 860 as iSCSI port in CEE mode
	xfrm: interface: fix ipv4 pmtu check to honor ip header df
	xfrm: Use actual socket sk instead of skb socket for xfrm_output_resume
	remoteproc: qcom: pil_info: avoid 64-bit division
	regulator: bd9571mwv: Fix AVS and DVFS voltage range
	ARM: OMAP4: Fix PMIC voltage domains for bionic
	ARM: OMAP4: PM: update ROM return address for OSWR and OFF
	net: xfrm: Localize sequence counter per network namespace
	esp: delete NETIF_F_SCTP_CRC bit from features for esp offload
	ASoC: SOF: Intel: HDA: fix core status verification
	ASoC: wm8960: Fix wrong bclk and lrclk with pll enabled for some chips
	xfrm: Fix NULL pointer dereference on policy lookup
	virtchnl: Fix layout of RSS structures
	i40e: Added Asym_Pause to supported link modes
	i40e: Fix kernel oops when i40e driver removes VF's
	hostfs: fix memory handling in follow_link()
	amd-xgbe: Update DMA coherency values
	vxlan: do not modify the shared tunnel info when PMTU triggers an ICMP reply
	geneve: do not modify the shared tunnel info when PMTU triggers an ICMP reply
	sch_red: fix off-by-one checks in red_check_params()
	drivers/net/wan/hdlc_fr: Fix a double free in pvc_xmit
	arm64: dts: imx8mm/q: Fix pad control of SD1_DATA0
	xfrm: Provide private skb extensions for segmented and hw offloaded ESP packets
	can: bcm/raw: fix msg_namelen values depending on CAN_REQUIRED_SIZE
	can: isotp: fix msg_namelen values depending on CAN_REQUIRED_SIZE
	mlxsw: spectrum: Fix ECN marking in tunnel decapsulation
	ethernet: myri10ge: Fix a use after free in myri10ge_sw_tso
	gianfar: Handle error code at MAC address change
	net: dsa: Fix type was not set for devlink port
	cxgb4: avoid collecting SGE_QBASE regs during traffic
	net:tipc: Fix a double free in tipc_sk_mcast_rcv
	ARM: dts: imx6: pbab01: Set vmmc supply for both SD interfaces
	net/ncsi: Avoid channel_monitor hrtimer deadlock
	net: qrtr: Fix memory leak on qrtr_tx_wait failure
	nfp: flower: ignore duplicate merge hints from FW
	net: phy: broadcom: Only advertise EEE for supported modes
	I2C: JZ4780: Fix bug for Ingenic X1000.
	ASoC: sunxi: sun4i-codec: fill ASoC card owner
	net/mlx5e: Fix mapping of ct_label zero
	net/mlx5e: Fix ethtool indication of connector type
	net/mlx5: Don't request more than supported EQs
	net/rds: Fix a use after free in rds_message_map_pages
	xdp: fix xdp_return_frame() kernel BUG throw for page_pool memory model
	soc/fsl: qbman: fix conflicting alignment attributes
	i40e: Fix display statistics for veb_tc
	RDMA/rtrs-clt: Close rtrs client conn before destroying rtrs clt session files
	drm/msm: Set drvdata to NULL when msm_drm_init() fails
	net: udp: Add support for getsockopt(..., ..., UDP_GRO, ..., ...);
	mptcp: forbit mcast-related sockopt on MPTCP sockets
	scsi: ufs: core: Fix task management request completion timeout
	scsi: ufs: core: Fix wrong Task Tag used in task management request UPIUs
	net: cls_api: Fix uninitialised struct field bo->unlocked_driver_cb
	net: macb: restore cmp registers on resume path
	clk: fix invalid usage of list cursor in register
	clk: fix invalid usage of list cursor in unregister
	workqueue: Move the position of debug_work_activate() in __queue_work()
	s390/cpcmd: fix inline assembly register clobbering
	perf inject: Fix repipe usage
	net: openvswitch: conntrack: simplify the return expression of ovs_ct_limit_get_default_limit()
	openvswitch: fix send of uninitialized stack memory in ct limit reply
	i2c: designware: Adjust bus_freq_hz when refuse high speed mode set
	iwlwifi: fix 11ax disabled bit in the regulatory capability flags
	can: mcp251x: fix support for half duplex SPI host controllers
	tipc: increment the tmp aead refcnt before attaching it
	net: hns3: clear VF down state bit before request link status
	net/mlx5: Fix placement of log_max_flow_counter
	net/mlx5: Fix PPLM register mapping
	net/mlx5: Fix PBMC register mapping
	RDMA/cxgb4: check for ipv6 address properly while destroying listener
	perf report: Fix wrong LBR block sorting
	RDMA/qedr: Fix kernel panic when trying to access recv_cq
	drm/vc4: crtc: Reduce PV fifo threshold on hvs4
	i40e: Fix parameters in aq_get_phy_register()
	RDMA/addr: Be strict with gid size
	vdpa/mlx5: should exclude header length and fcs from mtu
	vdpa/mlx5: Fix wrong use of bit numbers
	RAS/CEC: Correct ce_add_elem()'s returned values
	clk: socfpga: fix iomem pointer cast on 64-bit
	lockdep: Address clang -Wformat warning printing for %hd
	dt-bindings: net: ethernet-controller: fix typo in NVMEM
	net: sched: bump refcount for new action in ACT replace mode
	gpiolib: Read "gpio-line-names" from a firmware node
	cfg80211: remove WARN_ON() in cfg80211_sme_connect
	net: tun: set tun->dev->addr_len during TUNSETLINK processing
	drivers: net: fix memory leak in atusb_probe
	drivers: net: fix memory leak in peak_usb_create_dev
	net: mac802154: Fix general protection fault
	net: ieee802154: nl-mac: fix check on panid
	net: ieee802154: fix nl802154 del llsec key
	net: ieee802154: fix nl802154 del llsec dev
	net: ieee802154: fix nl802154 add llsec key
	net: ieee802154: fix nl802154 del llsec devkey
	net: ieee802154: forbid monitor for set llsec params
	net: ieee802154: forbid monitor for del llsec seclevel
	net: ieee802154: stop dump llsec params for monitors
	Revert "net: sched: bump refcount for new action in ACT replace mode"
	Linux 5.10.30

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ie8754a2e4dfef03bf1f2b878843cde19a4adab21
This commit is contained in:
Greg Kroah-Hartman 2021-04-14 12:17:35 +02:00
commit 9a705f0463
192 changed files with 1809 additions and 852 deletions

View File

@ -49,7 +49,7 @@ properties:
description: description:
Reference to an nvmem node for the MAC address Reference to an nvmem node for the MAC address
nvmem-cells-names: nvmem-cell-names:
const: mac-address const: mac-address
phy-connection-type: phy-connection-type:

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 10 PATCHLEVEL = 10
SUBLEVEL = 29 SUBLEVEL = 30
EXTRAVERSION = EXTRAVERSION =
NAME = Dare mighty things NAME = Dare mighty things

View File

@ -236,6 +236,7 @@ phy1: phy@1 {
status = "okay"; status = "okay";
compatible = "ethernet-phy-id0141.0DD1", "ethernet-phy-ieee802.3-c22"; compatible = "ethernet-phy-id0141.0DD1", "ethernet-phy-ieee802.3-c22";
reg = <1>; reg = <1>;
marvell,reg-init = <3 18 0 0x4985>;
/* irq is connected to &pcawan pin 7 */ /* irq is connected to &pcawan pin 7 */
}; };

View File

@ -432,6 +432,7 @@ &usdhc2 {
pinctrl-0 = <&pinctrl_usdhc2>; pinctrl-0 = <&pinctrl_usdhc2>;
cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
vmmc-supply = <&vdd_sd1_reg>;
status = "disabled"; status = "disabled";
}; };
@ -441,5 +442,6 @@ &usdhc3 {
&pinctrl_usdhc3_cdwp>; &pinctrl_usdhc3_cdwp>;
cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>; cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>; wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
vmmc-supply = <&vdd_sd0_reg>;
status = "disabled"; status = "disabled";
}; };

View File

@ -9,6 +9,7 @@
*/ */
#include <linux/arm-smccc.h> #include <linux/arm-smccc.h>
#include <linux/cpu_pm.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/io.h> #include <linux/io.h>
@ -20,6 +21,7 @@
#include "common.h" #include "common.h"
#include "omap-secure.h" #include "omap-secure.h"
#include "soc.h"
static phys_addr_t omap_secure_memblock_base; static phys_addr_t omap_secure_memblock_base;
@ -213,3 +215,40 @@ void __init omap_secure_init(void)
{ {
omap_optee_init_check(); omap_optee_init_check();
} }
/*
* Dummy dispatcher call after core OSWR and MPU off. Updates the ROM return
* address after MMU has been re-enabled after CPU1 has been woken up again.
* Otherwise the ROM code will attempt to use the earlier physical return
* address that got set with MMU off when waking up CPU1. Only used on secure
* devices.
*/
static int cpu_notifier(struct notifier_block *nb, unsigned long cmd, void *v)
{
switch (cmd) {
case CPU_CLUSTER_PM_EXIT:
omap_secure_dispatcher(OMAP4_PPA_SERVICE_0,
FLAG_START_CRITICAL,
0, 0, 0, 0, 0);
break;
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block secure_notifier_block = {
.notifier_call = cpu_notifier,
};
static int __init secure_pm_init(void)
{
if (omap_type() == OMAP2_DEVICE_TYPE_GP || !soc_is_omap44xx())
return 0;
cpu_pm_register_notifier(&secure_notifier_block);
return 0;
}
omap_arch_initcall(secure_pm_init);

View File

@ -50,6 +50,7 @@
#define OMAP5_DRA7_MON_SET_ACR_INDEX 0x107 #define OMAP5_DRA7_MON_SET_ACR_INDEX 0x107
/* Secure PPA(Primary Protected Application) APIs */ /* Secure PPA(Primary Protected Application) APIs */
#define OMAP4_PPA_SERVICE_0 0x21
#define OMAP4_PPA_L2_POR_INDEX 0x23 #define OMAP4_PPA_L2_POR_INDEX 0x23
#define OMAP4_PPA_CPU_ACTRL_SMP_INDEX 0x25 #define OMAP4_PPA_CPU_ACTRL_SMP_INDEX 0x25

View File

@ -246,10 +246,10 @@ int __init omap4_cpcap_init(void)
omap_voltage_register_pmic(voltdm, &omap443x_max8952_mpu); omap_voltage_register_pmic(voltdm, &omap443x_max8952_mpu);
if (of_machine_is_compatible("motorola,droid-bionic")) { if (of_machine_is_compatible("motorola,droid-bionic")) {
voltdm = voltdm_lookup("mpu"); voltdm = voltdm_lookup("core");
omap_voltage_register_pmic(voltdm, &omap_cpcap_core); omap_voltage_register_pmic(voltdm, &omap_cpcap_core);
voltdm = voltdm_lookup("mpu"); voltdm = voltdm_lookup("iva");
omap_voltage_register_pmic(voltdm, &omap_cpcap_iva); omap_voltage_register_pmic(voltdm, &omap_cpcap_iva);
} else { } else {
voltdm = voltdm_lookup("core"); voltdm = voltdm_lookup("core");

View File

@ -124,7 +124,7 @@
#define MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x0A4 0x30C 0x000 0x0 0x0 #define MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x0A4 0x30C 0x000 0x0 0x0
#define MX8MM_IOMUXC_SD1_CMD_GPIO2_IO1 0x0A4 0x30C 0x000 0x5 0x0 #define MX8MM_IOMUXC_SD1_CMD_GPIO2_IO1 0x0A4 0x30C 0x000 0x5 0x0
#define MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x0A8 0x310 0x000 0x0 0x0 #define MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x0A8 0x310 0x000 0x0 0x0
#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x31 0x000 0x5 0x0 #define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x310 0x000 0x5 0x0
#define MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x0AC 0x314 0x000 0x0 0x0 #define MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x0AC 0x314 0x000 0x0 0x0
#define MX8MM_IOMUXC_SD1_DATA1_GPIO2_IO3 0x0AC 0x314 0x000 0x5 0x0 #define MX8MM_IOMUXC_SD1_DATA1_GPIO2_IO3 0x0AC 0x314 0x000 0x5 0x0
#define MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x0B0 0x318 0x000 0x0 0x0 #define MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x0B0 0x318 0x000 0x0 0x0

View File

@ -130,7 +130,7 @@
#define MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0x0A4 0x30C 0x000 0x0 0x0 #define MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0x0A4 0x30C 0x000 0x0 0x0
#define MX8MQ_IOMUXC_SD1_CMD_GPIO2_IO1 0x0A4 0x30C 0x000 0x5 0x0 #define MX8MQ_IOMUXC_SD1_CMD_GPIO2_IO1 0x0A4 0x30C 0x000 0x5 0x0
#define MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x0A8 0x310 0x000 0x0 0x0 #define MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x0A8 0x310 0x000 0x0 0x0
#define MX8MQ_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x31 0x000 0x5 0x0 #define MX8MQ_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x310 0x000 0x5 0x0
#define MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x0AC 0x314 0x000 0x0 0x0 #define MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x0AC 0x314 0x000 0x0 0x0
#define MX8MQ_IOMUXC_SD1_DATA1_GPIO2_IO3 0x0AC 0x314 0x000 0x5 0x0 #define MX8MQ_IOMUXC_SD1_DATA1_GPIO2_IO3 0x0AC 0x314 0x000 0x5 0x0
#define MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x0B0 0x318 0x000 0x0 0x0 #define MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x0B0 0x318 0x000 0x0 0x0

View File

@ -54,8 +54,7 @@
static inline unsigned long user_stack_pointer(struct pt_regs *regs) static inline unsigned long user_stack_pointer(struct pt_regs *regs)
{ {
/* FIXME: should this be bspstore + nr_dirty regs? */ return regs->r12;
return regs->ar_bspstore;
} }
static inline int is_syscall_success(struct pt_regs *regs) static inline int is_syscall_success(struct pt_regs *regs)
@ -79,11 +78,6 @@ static inline long regs_return_value(struct pt_regs *regs)
unsigned long __ip = instruction_pointer(regs); \ unsigned long __ip = instruction_pointer(regs); \
(__ip & ~3UL) + ((__ip & 3UL) << 2); \ (__ip & ~3UL) + ((__ip & 3UL) << 2); \
}) })
/*
* Why not default? Because user_stack_pointer() on ia64 gives register
* stack backing store instead...
*/
#define current_user_stack_pointer() (current_pt_regs()->r12)
/* given a pointer to a task_struct, return the user's pt_regs */ /* given a pointer to a task_struct, return the user's pt_regs */
# define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1) # define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)

View File

@ -238,7 +238,7 @@ void flush_dcache_page(struct page *page)
{ {
struct address_space *mapping; struct address_space *mapping;
mapping = page_mapping(page); mapping = page_mapping_file(page);
if (mapping && !mapping_mapped(mapping)) if (mapping && !mapping_mapped(mapping))
set_bit(PG_dcache_dirty, &page->flags); set_bit(PG_dcache_dirty, &page->flags);
else { else {

View File

@ -72,7 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
#endif #endif
case 4: return __cmpxchg_u32((unsigned int *)ptr, case 4: return __cmpxchg_u32((unsigned int *)ptr,
(unsigned int)old, (unsigned int)new_); (unsigned int)old, (unsigned int)new_);
case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_); case 1: return __cmpxchg_u8((u8 *)ptr, old & 0xff, new_ & 0xff);
} }
__cmpxchg_called_with_bad_pointer(); __cmpxchg_called_with_bad_pointer();
return old; return old;

View File

@ -37,10 +37,12 @@ static int diag8_noresponse(int cmdlen)
static int diag8_response(int cmdlen, char *response, int *rlen) static int diag8_response(int cmdlen, char *response, int *rlen)
{ {
unsigned long _cmdlen = cmdlen | 0x40000000L;
unsigned long _rlen = *rlen;
register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf; register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
register unsigned long reg3 asm ("3") = (addr_t) response; register unsigned long reg3 asm ("3") = (addr_t) response;
register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L; register unsigned long reg4 asm ("4") = _cmdlen;
register unsigned long reg5 asm ("5") = *rlen; register unsigned long reg5 asm ("5") = _rlen;
asm volatile( asm volatile(
" diag %2,%0,0x8\n" " diag %2,%0,0x8\n"

View File

@ -132,7 +132,7 @@ void native_play_dead(void);
void play_dead_common(void); void play_dead_common(void);
void wbinvd_on_cpu(int cpu); void wbinvd_on_cpu(int cpu);
int wbinvd_on_all_cpus(void); int wbinvd_on_all_cpus(void);
bool wakeup_cpu0(void); void cond_wakeup_cpu0(void);
void native_smp_send_reschedule(int cpu); void native_smp_send_reschedule(int cpu);
void native_send_call_func_ipi(const struct cpumask *mask); void native_send_call_func_ipi(const struct cpumask *mask);

View File

@ -1655,13 +1655,17 @@ void play_dead_common(void)
local_irq_disable(); local_irq_disable();
} }
bool wakeup_cpu0(void) /**
* cond_wakeup_cpu0 - Wake up CPU0 if needed.
*
* If NMI wants to wake up CPU0, start CPU0.
*/
void cond_wakeup_cpu0(void)
{ {
if (smp_processor_id() == 0 && enable_start_cpu0) if (smp_processor_id() == 0 && enable_start_cpu0)
return true; start_cpu0();
return false;
} }
EXPORT_SYMBOL_GPL(cond_wakeup_cpu0);
/* /*
* We need to flush the caches before going to sleep, lest we have * We need to flush the caches before going to sleep, lest we have
@ -1730,11 +1734,8 @@ static inline void mwait_play_dead(void)
__monitor(mwait_ptr, 0, 0); __monitor(mwait_ptr, 0, 0);
mb(); mb();
__mwait(eax, 0); __mwait(eax, 0);
/*
* If NMI wants to wake up CPU0, start CPU0. cond_wakeup_cpu0();
*/
if (wakeup_cpu0())
start_cpu0();
} }
} }
@ -1745,11 +1746,8 @@ void hlt_play_dead(void)
while (1) { while (1) {
native_halt(); native_halt();
/*
* If NMI wants to wake up CPU0, start CPU0. cond_wakeup_cpu0();
*/
if (wakeup_cpu0())
start_cpu0();
} }
} }

View File

@ -5972,6 +5972,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
unsigned int ratio; unsigned int ratio;
LIST_HEAD(invalid_list); LIST_HEAD(invalid_list);
bool flush = false;
ulong to_zap; ulong to_zap;
rcu_idx = srcu_read_lock(&kvm->srcu); rcu_idx = srcu_read_lock(&kvm->srcu);
@ -5992,20 +5993,20 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
struct kvm_mmu_page, struct kvm_mmu_page,
lpage_disallowed_link); lpage_disallowed_link);
WARN_ON_ONCE(!sp->lpage_disallowed); WARN_ON_ONCE(!sp->lpage_disallowed);
if (sp->tdp_mmu_page) if (sp->tdp_mmu_page) {
kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn, flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level)); } else {
else {
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
WARN_ON_ONCE(sp->lpage_disallowed); WARN_ON_ONCE(sp->lpage_disallowed);
} }
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
kvm_mmu_commit_zap_page(kvm, &invalid_list); kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
cond_resched_lock(&kvm->mmu_lock); cond_resched_lock(&kvm->mmu_lock);
flush = false;
} }
} }
kvm_mmu_commit_zap_page(kvm, &invalid_list); kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, rcu_idx); srcu_read_unlock(&kvm->srcu, rcu_idx);

View File

@ -22,21 +22,22 @@ static gfn_t round_gfn_for_level(gfn_t gfn, int level)
/* /*
* Sets a TDP iterator to walk a pre-order traversal of the paging structure * Sets a TDP iterator to walk a pre-order traversal of the paging structure
* rooted at root_pt, starting with the walk to translate goal_gfn. * rooted at root_pt, starting with the walk to translate next_last_level_gfn.
*/ */
void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level, void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level,
int min_level, gfn_t goal_gfn) int min_level, gfn_t next_last_level_gfn)
{ {
WARN_ON(root_level < 1); WARN_ON(root_level < 1);
WARN_ON(root_level > PT64_ROOT_MAX_LEVEL); WARN_ON(root_level > PT64_ROOT_MAX_LEVEL);
iter->goal_gfn = goal_gfn; iter->next_last_level_gfn = next_last_level_gfn;
iter->yielded_gfn = iter->next_last_level_gfn;
iter->root_level = root_level; iter->root_level = root_level;
iter->min_level = min_level; iter->min_level = min_level;
iter->level = root_level; iter->level = root_level;
iter->pt_path[iter->level - 1] = root_pt; iter->pt_path[iter->level - 1] = root_pt;
iter->gfn = round_gfn_for_level(iter->goal_gfn, iter->level); iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level);
tdp_iter_refresh_sptep(iter); tdp_iter_refresh_sptep(iter);
iter->valid = true; iter->valid = true;
@ -82,7 +83,7 @@ static bool try_step_down(struct tdp_iter *iter)
iter->level--; iter->level--;
iter->pt_path[iter->level - 1] = child_pt; iter->pt_path[iter->level - 1] = child_pt;
iter->gfn = round_gfn_for_level(iter->goal_gfn, iter->level); iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level);
tdp_iter_refresh_sptep(iter); tdp_iter_refresh_sptep(iter);
return true; return true;
@ -106,7 +107,7 @@ static bool try_step_side(struct tdp_iter *iter)
return false; return false;
iter->gfn += KVM_PAGES_PER_HPAGE(iter->level); iter->gfn += KVM_PAGES_PER_HPAGE(iter->level);
iter->goal_gfn = iter->gfn; iter->next_last_level_gfn = iter->gfn;
iter->sptep++; iter->sptep++;
iter->old_spte = READ_ONCE(*iter->sptep); iter->old_spte = READ_ONCE(*iter->sptep);
@ -158,23 +159,6 @@ void tdp_iter_next(struct tdp_iter *iter)
iter->valid = false; iter->valid = false;
} }
/*
* Restart the walk over the paging structure from the root, starting from the
* highest gfn the iterator had previously reached. Assumes that the entire
* paging structure, except the root page, may have been completely torn down
* and rebuilt.
*/
void tdp_iter_refresh_walk(struct tdp_iter *iter)
{
gfn_t goal_gfn = iter->goal_gfn;
if (iter->gfn > goal_gfn)
goal_gfn = iter->gfn;
tdp_iter_start(iter, iter->pt_path[iter->root_level - 1],
iter->root_level, iter->min_level, goal_gfn);
}
u64 *tdp_iter_root_pt(struct tdp_iter *iter) u64 *tdp_iter_root_pt(struct tdp_iter *iter)
{ {
return iter->pt_path[iter->root_level - 1]; return iter->pt_path[iter->root_level - 1];

View File

@ -15,7 +15,13 @@ struct tdp_iter {
* The iterator will traverse the paging structure towards the mapping * The iterator will traverse the paging structure towards the mapping
* for this GFN. * for this GFN.
*/ */
gfn_t goal_gfn; gfn_t next_last_level_gfn;
/*
* The next_last_level_gfn at the time when the thread last
* yielded. Only yielding when the next_last_level_gfn !=
* yielded_gfn helps ensure forward progress.
*/
gfn_t yielded_gfn;
/* Pointers to the page tables traversed to reach the current SPTE */ /* Pointers to the page tables traversed to reach the current SPTE */
u64 *pt_path[PT64_ROOT_MAX_LEVEL]; u64 *pt_path[PT64_ROOT_MAX_LEVEL];
/* A pointer to the current SPTE */ /* A pointer to the current SPTE */
@ -52,9 +58,8 @@ struct tdp_iter {
u64 *spte_to_child_pt(u64 pte, int level); u64 *spte_to_child_pt(u64 pte, int level);
void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level, void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level,
int min_level, gfn_t goal_gfn); int min_level, gfn_t next_last_level_gfn);
void tdp_iter_next(struct tdp_iter *iter); void tdp_iter_next(struct tdp_iter *iter);
void tdp_iter_refresh_walk(struct tdp_iter *iter);
u64 *tdp_iter_root_pt(struct tdp_iter *iter); u64 *tdp_iter_root_pt(struct tdp_iter *iter);
#endif /* __KVM_X86_MMU_TDP_ITER_H */ #endif /* __KVM_X86_MMU_TDP_ITER_H */

View File

@ -103,7 +103,7 @@ bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
} }
static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t start, gfn_t end, bool can_yield); gfn_t start, gfn_t end, bool can_yield, bool flush);
void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root) void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
{ {
@ -116,7 +116,7 @@ void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
list_del(&root->link); list_del(&root->link);
zap_gfn_range(kvm, root, 0, max_gfn, false); zap_gfn_range(kvm, root, 0, max_gfn, false, false);
free_page((unsigned long)root->spt); free_page((unsigned long)root->spt);
kmem_cache_free(mmu_page_header_cache, root); kmem_cache_free(mmu_page_header_cache, root);
@ -405,27 +405,43 @@ static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
_mmu->shadow_root_level, _start, _end) _mmu->shadow_root_level, _start, _end)
/* /*
* Flush the TLB if the process should drop kvm->mmu_lock. * Yield if the MMU lock is contended or this thread needs to return control
* Return whether the caller still needs to flush the tlb. * to the scheduler.
*
* If this function should yield and flush is set, it will perform a remote
* TLB flush before yielding.
*
* If this function yields, it will also reset the tdp_iter's walk over the
* paging structure and the calling function should skip to the next
* iteration to allow the iterator to continue its traversal from the
* paging structure root.
*
* Return true if this function yielded and the iterator's traversal was reset.
* Return false if a yield was not needed.
*/ */
static bool tdp_mmu_iter_flush_cond_resched(struct kvm *kvm, struct tdp_iter *iter) static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
struct tdp_iter *iter, bool flush)
{ {
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { /* Ensure forward progress has been made before yielding. */
kvm_flush_remote_tlbs(kvm); if (iter->next_last_level_gfn == iter->yielded_gfn)
cond_resched_lock(&kvm->mmu_lock);
tdp_iter_refresh_walk(iter);
return false; return false;
} else {
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
if (flush)
kvm_flush_remote_tlbs(kvm);
cond_resched_lock(&kvm->mmu_lock);
WARN_ON(iter->gfn > iter->next_last_level_gfn);
tdp_iter_start(iter, iter->pt_path[iter->root_level - 1],
iter->root_level, iter->min_level,
iter->next_last_level_gfn);
return true; return true;
} }
}
static void tdp_mmu_iter_cond_resched(struct kvm *kvm, struct tdp_iter *iter) return false;
{
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
cond_resched_lock(&kvm->mmu_lock);
tdp_iter_refresh_walk(iter);
}
} }
/* /*
@ -437,15 +453,22 @@ static void tdp_mmu_iter_cond_resched(struct kvm *kvm, struct tdp_iter *iter)
* scheduler needs the CPU or there is contention on the MMU lock. If this * scheduler needs the CPU or there is contention on the MMU lock. If this
* function cannot yield, it will not release the MMU lock or reschedule and * function cannot yield, it will not release the MMU lock or reschedule and
* the caller must ensure it does not supply too large a GFN range, or the * the caller must ensure it does not supply too large a GFN range, or the
* operation can cause a soft lockup. * operation can cause a soft lockup. Note, in some use cases a flush may be
* required by prior actions. Ensure the pending flush is performed prior to
* yielding.
*/ */
static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t start, gfn_t end, bool can_yield) gfn_t start, gfn_t end, bool can_yield, bool flush)
{ {
struct tdp_iter iter; struct tdp_iter iter;
bool flush_needed = false;
tdp_root_for_each_pte(iter, root, start, end) { tdp_root_for_each_pte(iter, root, start, end) {
if (can_yield &&
tdp_mmu_iter_cond_resched(kvm, &iter, flush)) {
flush = false;
continue;
}
if (!is_shadow_present_pte(iter.old_spte)) if (!is_shadow_present_pte(iter.old_spte))
continue; continue;
@ -460,13 +483,10 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
continue; continue;
tdp_mmu_set_spte(kvm, &iter, 0); tdp_mmu_set_spte(kvm, &iter, 0);
flush = true;
if (can_yield)
flush_needed = tdp_mmu_iter_flush_cond_resched(kvm, &iter);
else
flush_needed = true;
} }
return flush_needed;
return flush;
} }
/* /*
@ -475,13 +495,14 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
* SPTEs have been cleared and a TLB flush is needed before releasing the * SPTEs have been cleared and a TLB flush is needed before releasing the
* MMU lock. * MMU lock.
*/ */
bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end) bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end,
bool can_yield)
{ {
struct kvm_mmu_page *root; struct kvm_mmu_page *root;
bool flush = false; bool flush = false;
for_each_tdp_mmu_root_yield_safe(kvm, root) for_each_tdp_mmu_root_yield_safe(kvm, root)
flush |= zap_gfn_range(kvm, root, start, end, true); flush = zap_gfn_range(kvm, root, start, end, can_yield, flush);
return flush; return flush;
} }
@ -673,7 +694,7 @@ static int zap_gfn_range_hva_wrapper(struct kvm *kvm,
struct kvm_mmu_page *root, gfn_t start, struct kvm_mmu_page *root, gfn_t start,
gfn_t end, unsigned long unused) gfn_t end, unsigned long unused)
{ {
return zap_gfn_range(kvm, root, start, end, false); return zap_gfn_range(kvm, root, start, end, false, false);
} }
int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start, int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start,
@ -824,6 +845,9 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
for_each_tdp_pte_min_level(iter, root->spt, root->role.level, for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
min_level, start, end) { min_level, start, end) {
if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
continue;
if (!is_shadow_present_pte(iter.old_spte) || if (!is_shadow_present_pte(iter.old_spte) ||
!is_last_spte(iter.old_spte, iter.level)) !is_last_spte(iter.old_spte, iter.level))
continue; continue;
@ -832,8 +856,6 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
spte_set = true; spte_set = true;
tdp_mmu_iter_cond_resched(kvm, &iter);
} }
return spte_set; return spte_set;
} }
@ -877,6 +899,9 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
bool spte_set = false; bool spte_set = false;
tdp_root_for_each_leaf_pte(iter, root, start, end) { tdp_root_for_each_leaf_pte(iter, root, start, end) {
if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
continue;
if (spte_ad_need_write_protect(iter.old_spte)) { if (spte_ad_need_write_protect(iter.old_spte)) {
if (is_writable_pte(iter.old_spte)) if (is_writable_pte(iter.old_spte))
new_spte = iter.old_spte & ~PT_WRITABLE_MASK; new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
@ -891,8 +916,6 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
spte_set = true; spte_set = true;
tdp_mmu_iter_cond_resched(kvm, &iter);
} }
return spte_set; return spte_set;
} }
@ -1000,6 +1023,9 @@ static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
bool spte_set = false; bool spte_set = false;
tdp_root_for_each_pte(iter, root, start, end) { tdp_root_for_each_pte(iter, root, start, end) {
if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
continue;
if (!is_shadow_present_pte(iter.old_spte)) if (!is_shadow_present_pte(iter.old_spte))
continue; continue;
@ -1007,8 +1033,6 @@ static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
tdp_mmu_set_spte(kvm, &iter, new_spte); tdp_mmu_set_spte(kvm, &iter, new_spte);
spte_set = true; spte_set = true;
tdp_mmu_iter_cond_resched(kvm, &iter);
} }
return spte_set; return spte_set;
@ -1049,6 +1073,11 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
bool spte_set = false; bool spte_set = false;
tdp_root_for_each_pte(iter, root, start, end) { tdp_root_for_each_pte(iter, root, start, end) {
if (tdp_mmu_iter_cond_resched(kvm, &iter, spte_set)) {
spte_set = false;
continue;
}
if (!is_shadow_present_pte(iter.old_spte) || if (!is_shadow_present_pte(iter.old_spte) ||
!is_last_spte(iter.old_spte, iter.level)) !is_last_spte(iter.old_spte, iter.level))
continue; continue;
@ -1061,7 +1090,7 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
tdp_mmu_set_spte(kvm, &iter, 0); tdp_mmu_set_spte(kvm, &iter, 0);
spte_set = tdp_mmu_iter_flush_cond_resched(kvm, &iter); spte_set = true;
} }
if (spte_set) if (spte_set)

View File

@ -12,7 +12,23 @@ bool is_tdp_mmu_root(struct kvm *kvm, hpa_t root);
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu); hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root); void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root);
bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end); bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end,
bool can_yield);
static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start,
gfn_t end)
{
return __kvm_tdp_mmu_zap_gfn_range(kvm, start, end, true);
}
static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{
gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level);
/*
* Don't allow yielding, as the caller may have pending pages to zap
* on the shadow MMU.
*/
return __kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn, end, false);
}
void kvm_tdp_mmu_zap_all(struct kvm *kvm); void kvm_tdp_mmu_zap_all(struct kvm *kvm);
int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,

View File

@ -545,9 +545,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
return -ENODEV; return -ENODEV;
#if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU) #if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU)
/* If NMI wants to wake up CPU0, start CPU0. */ cond_wakeup_cpu0();
if (wakeup_cpu0())
start_cpu0();
#endif #endif
} }

View File

@ -292,14 +292,16 @@ int driver_deferred_probe_check_state(struct device *dev)
static void deferred_probe_timeout_work_func(struct work_struct *work) static void deferred_probe_timeout_work_func(struct work_struct *work)
{ {
struct device_private *private, *p; struct device_private *p;
driver_deferred_probe_timeout = 0; driver_deferred_probe_timeout = 0;
driver_deferred_probe_trigger(); driver_deferred_probe_trigger();
flush_work(&deferred_probe_work); flush_work(&deferred_probe_work);
list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe) mutex_lock(&deferred_probe_mutex);
dev_info(private->device, "deferred probe pending\n"); list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
dev_info(p->device, "deferred probe pending\n");
mutex_unlock(&deferred_probe_mutex);
wake_up_all(&probe_timeout_waitqueue); wake_up_all(&probe_timeout_waitqueue);
} }
static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func); static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);

View File

@ -125,7 +125,7 @@ config AGP_HP_ZX1
config AGP_PARISC config AGP_PARISC
tristate "HP Quicksilver AGP support" tristate "HP Quicksilver AGP support"
depends on AGP && PARISC && 64BIT depends on AGP && PARISC && 64BIT && IOMMU_SBA
help help
This option gives you AGP GART support for the HP Quicksilver This option gives you AGP GART support for the HP Quicksilver
AGP bus adapter on HP PA-RISC machines (Ok, just on the C8000 AGP bus adapter on HP PA-RISC machines (Ok, just on the C8000

View File

@ -4406,20 +4406,19 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
/* search the list of notifiers for this clk */ /* search the list of notifiers for this clk */
list_for_each_entry(cn, &clk_notifier_list, node) list_for_each_entry(cn, &clk_notifier_list, node)
if (cn->clk == clk) if (cn->clk == clk)
break; goto found;
/* if clk wasn't in the notifier list, allocate new clk_notifier */ /* if clk wasn't in the notifier list, allocate new clk_notifier */
if (cn->clk != clk) { cn = kzalloc(sizeof(*cn), GFP_KERNEL);
cn = kzalloc(sizeof(*cn), GFP_KERNEL); if (!cn)
if (!cn) goto out;
goto out;
cn->clk = clk; cn->clk = clk;
srcu_init_notifier_head(&cn->notifier_head); srcu_init_notifier_head(&cn->notifier_head);
list_add(&cn->node, &clk_notifier_list); list_add(&cn->node, &clk_notifier_list);
}
found:
ret = srcu_notifier_chain_register(&cn->notifier_head, nb); ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
clk->core->notifier_count++; clk->core->notifier_count++;
@ -4444,32 +4443,28 @@ EXPORT_SYMBOL_GPL(clk_notifier_register);
*/ */
int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
{ {
struct clk_notifier *cn = NULL; struct clk_notifier *cn;
int ret = -EINVAL; int ret = -ENOENT;
if (!clk || !nb) if (!clk || !nb)
return -EINVAL; return -EINVAL;
clk_prepare_lock(); clk_prepare_lock();
list_for_each_entry(cn, &clk_notifier_list, node) list_for_each_entry(cn, &clk_notifier_list, node) {
if (cn->clk == clk) if (cn->clk == clk) {
ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
clk->core->notifier_count--;
/* XXX the notifier code should handle this better */
if (!cn->notifier_head.head) {
srcu_cleanup_notifier_head(&cn->notifier_head);
list_del(&cn->node);
kfree(cn);
}
break; break;
if (cn->clk == clk) {
ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
clk->core->notifier_count--;
/* XXX the notifier code should handle this better */
if (!cn->notifier_head.head) {
srcu_cleanup_notifier_head(&cn->notifier_head);
list_del(&cn->node);
kfree(cn);
} }
} else {
ret = -ENOENT;
} }
clk_prepare_unlock(); clk_prepare_unlock();

View File

@ -99,7 +99,7 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift; val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift;
val &= GENMASK(socfpgaclk->width - 1, 0); val &= GENMASK(socfpgaclk->width - 1, 0);
/* Check for GPIO_DB_CLK by its offset */ /* Check for GPIO_DB_CLK by its offset */
if ((int) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET) if ((uintptr_t) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET)
div = val + 1; div = val + 1;
else else
div = (1 << val); div = (1 << val);

View File

@ -368,22 +368,18 @@ static int gpiochip_set_desc_names(struct gpio_chip *gc)
* *
* Looks for device property "gpio-line-names" and if it exists assigns * Looks for device property "gpio-line-names" and if it exists assigns
* GPIO line names for the chip. The memory allocated for the assigned * GPIO line names for the chip. The memory allocated for the assigned
* names belong to the underlying software node and should not be released * names belong to the underlying firmware node and should not be released
* by the caller. * by the caller.
*/ */
static int devprop_gpiochip_set_names(struct gpio_chip *chip) static int devprop_gpiochip_set_names(struct gpio_chip *chip)
{ {
struct gpio_device *gdev = chip->gpiodev; struct gpio_device *gdev = chip->gpiodev;
struct device *dev = chip->parent; struct fwnode_handle *fwnode = dev_fwnode(&gdev->dev);
const char **names; const char **names;
int ret, i; int ret, i;
int count; int count;
/* GPIO chip may not have a parent device whose properties we inspect. */ count = fwnode_property_string_array_count(fwnode, "gpio-line-names");
if (!dev)
return 0;
count = device_property_string_array_count(dev, "gpio-line-names");
if (count < 0) if (count < 0)
return 0; return 0;
@ -397,7 +393,7 @@ static int devprop_gpiochip_set_names(struct gpio_chip *chip)
if (!names) if (!names)
return -ENOMEM; return -ENOMEM;
ret = device_property_read_string_array(dev, "gpio-line-names", ret = fwnode_property_read_string_array(fwnode, "gpio-line-names",
names, count); names, count);
if (ret < 0) { if (ret < 0) {
dev_warn(&gdev->dev, "failed to read GPIO line names\n"); dev_warn(&gdev->dev, "failed to read GPIO line names\n");

View File

@ -84,13 +84,31 @@ static void intel_dsm_platform_mux_info(acpi_handle dhandle)
return; return;
} }
if (!pkg->package.count) {
DRM_DEBUG_DRIVER("no connection in _DSM\n");
return;
}
connector_count = &pkg->package.elements[0]; connector_count = &pkg->package.elements[0];
DRM_DEBUG_DRIVER("MUX info connectors: %lld\n", DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
(unsigned long long)connector_count->integer.value); (unsigned long long)connector_count->integer.value);
for (i = 1; i < pkg->package.count; i++) { for (i = 1; i < pkg->package.count; i++) {
union acpi_object *obj = &pkg->package.elements[i]; union acpi_object *obj = &pkg->package.elements[i];
union acpi_object *connector_id = &obj->package.elements[0]; union acpi_object *connector_id;
union acpi_object *info = &obj->package.elements[1]; union acpi_object *info;
if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < 2) {
DRM_DEBUG_DRIVER("Invalid object for MUX #%d\n", i);
continue;
}
connector_id = &obj->package.elements[0];
info = &obj->package.elements[1];
if (info->type != ACPI_TYPE_BUFFER || info->buffer.length < 4) {
DRM_DEBUG_DRIVER("Invalid info for MUX obj #%d\n", i);
continue;
}
DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n", DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
(unsigned long long)connector_id->integer.value); (unsigned long long)connector_id->integer.value);
DRM_DEBUG_DRIVER(" port id: %s\n", DRM_DEBUG_DRIVER(" port id: %s\n",

View File

@ -557,6 +557,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
kfree(priv); kfree(priv);
err_put_drm_dev: err_put_drm_dev:
drm_dev_put(ddev); drm_dev_put(ddev);
platform_set_drvdata(pdev, NULL);
return ret; return ret;
} }

View File

@ -210,6 +210,7 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
{ {
const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc); const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc);
const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc); const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
struct vc4_dev *vc4 = to_vc4_dev(vc4_crtc->base.dev);
u32 fifo_len_bytes = pv_data->fifo_depth; u32 fifo_len_bytes = pv_data->fifo_depth;
/* /*
@ -238,6 +239,22 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
if (crtc_data->hvs_output == 5) if (crtc_data->hvs_output == 5)
return 32; return 32;
/*
* It looks like in some situations, we will overflow
* the PixelValve FIFO (with the bit 10 of PV stat being
* set) and stall the HVS / PV, eventually resulting in
* a page flip timeout.
*
* Displaying the video overlay during a playback with
* Kodi on an RPi3 seems to be a great solution with a
* failure rate around 50%.
*
* Removing 1 from the FIFO full level however
* seems to completely remove that issue.
*/
if (!vc4->hvs->hvs5)
return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX - 1;
return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX; return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX;
} }
} }

View File

@ -129,6 +129,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK) if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK)
!= DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) { != DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) {
dev_err(dev->dev, "High Speed not supported!\n"); dev_err(dev->dev, "High Speed not supported!\n");
t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
dev->master_cfg &= ~DW_IC_CON_SPEED_MASK; dev->master_cfg &= ~DW_IC_CON_SPEED_MASK;
dev->master_cfg |= DW_IC_CON_SPEED_FAST; dev->master_cfg |= DW_IC_CON_SPEED_FAST;
dev->hs_hcnt = 0; dev->hs_hcnt = 0;

View File

@ -526,8 +526,8 @@ static irqreturn_t jz4780_i2c_irq(int irqno, void *dev_id)
i2c_sta = jz4780_i2c_readw(i2c, JZ4780_I2C_STA); i2c_sta = jz4780_i2c_readw(i2c, JZ4780_I2C_STA);
data = *i2c->wbuf; data = *i2c->wbuf;
data &= ~JZ4780_I2C_DC_READ; data &= ~JZ4780_I2C_DC_READ;
if ((!i2c->stop_hold) && (i2c->cdata->version >= if ((i2c->wt_len == 1) && (!i2c->stop_hold) &&
ID_X1000)) (i2c->cdata->version >= ID_X1000))
data |= X1000_I2C_DC_STOP; data |= X1000_I2C_DC_STOP;
jz4780_i2c_writew(i2c, JZ4780_I2C_DC, data); jz4780_i2c_writew(i2c, JZ4780_I2C_DC, data);
i2c->wbuf++; i2c->wbuf++;

View File

@ -378,7 +378,7 @@ static int i2c_gpio_init_recovery(struct i2c_adapter *adap)
static int i2c_init_recovery(struct i2c_adapter *adap) static int i2c_init_recovery(struct i2c_adapter *adap)
{ {
struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
char *err_str; char *err_str, *err_level = KERN_ERR;
if (!bri) if (!bri)
return 0; return 0;
@ -387,7 +387,8 @@ static int i2c_init_recovery(struct i2c_adapter *adap)
return -EPROBE_DEFER; return -EPROBE_DEFER;
if (!bri->recover_bus) { if (!bri->recover_bus) {
err_str = "no recover_bus() found"; err_str = "no suitable method provided";
err_level = KERN_DEBUG;
goto err; goto err;
} }
@ -414,7 +415,7 @@ static int i2c_init_recovery(struct i2c_adapter *adap)
return 0; return 0;
err: err:
dev_err(&adap->dev, "Not using recovery: %s\n", err_str); dev_printk(err_level, &adap->dev, "Not using recovery: %s\n", err_str);
adap->bus_recovery_info = NULL; adap->bus_recovery_info = NULL;
return -EINVAL; return -EINVAL;

View File

@ -76,7 +76,9 @@ static struct workqueue_struct *addr_wq;
static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = { static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = {
[LS_NLA_TYPE_DGID] = {.type = NLA_BINARY, [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
.len = sizeof(struct rdma_nla_ls_gid)}, .len = sizeof(struct rdma_nla_ls_gid),
.validation_type = NLA_VALIDATE_MIN,
.min = sizeof(struct rdma_nla_ls_gid)},
}; };
static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh) static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh)

View File

@ -3616,7 +3616,8 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
c4iw_init_wr_wait(ep->com.wr_waitp); c4iw_init_wr_wait(ep->com.wr_waitp);
err = cxgb4_remove_server( err = cxgb4_remove_server(
ep->com.dev->rdev.lldi.ports[0], ep->stid, ep->com.dev->rdev.lldi.ports[0], ep->stid,
ep->com.dev->rdev.lldi.rxq_ids[0], true); ep->com.dev->rdev.lldi.rxq_ids[0],
ep->com.local_addr.ss_family == AF_INET6);
if (err) if (err)
goto done; goto done;
err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp, err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,

View File

@ -632,22 +632,11 @@ static void _dev_comp_vect_cpu_mask_clean_up(struct hfi1_devdata *dd,
*/ */
int hfi1_dev_affinity_init(struct hfi1_devdata *dd) int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
{ {
int node = pcibus_to_node(dd->pcidev->bus);
struct hfi1_affinity_node *entry; struct hfi1_affinity_node *entry;
const struct cpumask *local_mask; const struct cpumask *local_mask;
int curr_cpu, possible, i, ret; int curr_cpu, possible, i, ret;
bool new_entry = false; bool new_entry = false;
/*
* If the BIOS does not have the NUMA node information set, select
* NUMA 0 so we get consistent performance.
*/
if (node < 0) {
dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
node = 0;
}
dd->node = node;
local_mask = cpumask_of_node(dd->node); local_mask = cpumask_of_node(dd->node);
if (cpumask_first(local_mask) >= nr_cpu_ids) if (cpumask_first(local_mask) >= nr_cpu_ids)
local_mask = topology_core_cpumask(0); local_mask = topology_core_cpumask(0);
@ -660,7 +649,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
* create an entry in the global affinity structure and initialize it. * create an entry in the global affinity structure and initialize it.
*/ */
if (!entry) { if (!entry) {
entry = node_affinity_allocate(node); entry = node_affinity_allocate(dd->node);
if (!entry) { if (!entry) {
dd_dev_err(dd, dd_dev_err(dd,
"Unable to allocate global affinity node\n"); "Unable to allocate global affinity node\n");
@ -751,6 +740,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
if (new_entry) if (new_entry)
node_affinity_add_tail(entry); node_affinity_add_tail(entry);
dd->affinity_entry = entry;
mutex_unlock(&node_affinity.lock); mutex_unlock(&node_affinity.lock);
return 0; return 0;
@ -766,10 +756,9 @@ void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
{ {
struct hfi1_affinity_node *entry; struct hfi1_affinity_node *entry;
if (dd->node < 0)
return;
mutex_lock(&node_affinity.lock); mutex_lock(&node_affinity.lock);
if (!dd->affinity_entry)
goto unlock;
entry = node_affinity_lookup(dd->node); entry = node_affinity_lookup(dd->node);
if (!entry) if (!entry)
goto unlock; goto unlock;
@ -780,8 +769,8 @@ void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
*/ */
_dev_comp_vect_cpu_mask_clean_up(dd, entry); _dev_comp_vect_cpu_mask_clean_up(dd, entry);
unlock: unlock:
dd->affinity_entry = NULL;
mutex_unlock(&node_affinity.lock); mutex_unlock(&node_affinity.lock);
dd->node = NUMA_NO_NODE;
} }
/* /*

View File

@ -1409,6 +1409,7 @@ struct hfi1_devdata {
spinlock_t irq_src_lock; spinlock_t irq_src_lock;
int vnic_num_vports; int vnic_num_vports;
struct net_device *dummy_netdev; struct net_device *dummy_netdev;
struct hfi1_affinity_node *affinity_entry;
/* Keeps track of IPoIB RSM rule users */ /* Keeps track of IPoIB RSM rule users */
atomic_t ipoib_rsm_usr_num; atomic_t ipoib_rsm_usr_num;

View File

@ -1277,7 +1277,6 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
dd->pport = (struct hfi1_pportdata *)(dd + 1); dd->pport = (struct hfi1_pportdata *)(dd + 1);
dd->pcidev = pdev; dd->pcidev = pdev;
pci_set_drvdata(pdev, dd); pci_set_drvdata(pdev, dd);
dd->node = NUMA_NO_NODE;
ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b, ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b,
GFP_KERNEL); GFP_KERNEL);
@ -1287,6 +1286,15 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
goto bail; goto bail;
} }
rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit); rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit);
/*
* If the BIOS does not have the NUMA node information set, select
* NUMA 0 so we get consistent performance.
*/
dd->node = pcibus_to_node(pdev->bus);
if (dd->node == NUMA_NO_NODE) {
dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
dd->node = 0;
}
/* /*
* Initialize all locks for the device. This needs to be as early as * Initialize all locks for the device. This needs to be as early as

View File

@ -173,8 +173,7 @@ u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
return 0; return 0;
} }
cpumask_and(node_cpu_mask, cpu_mask, cpumask_and(node_cpu_mask, cpu_mask, cpumask_of_node(dd->node));
cpumask_of_node(pcibus_to_node(dd->pcidev->bus)));
available_cpus = cpumask_weight(node_cpu_mask); available_cpus = cpumask_weight(node_cpu_mask);

View File

@ -1241,7 +1241,8 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
* TGT QP isn't associated with RQ/SQ * TGT QP isn't associated with RQ/SQ
*/ */
if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) && if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) &&
(attrs->qp_type != IB_QPT_XRC_TGT)) { (attrs->qp_type != IB_QPT_XRC_TGT) &&
(attrs->qp_type != IB_QPT_XRC_INI)) {
struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq); struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq);
struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq); struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq);

View File

@ -2739,8 +2739,8 @@ void rtrs_clt_close(struct rtrs_clt *clt)
/* Now it is safe to iterate over all paths without locks */ /* Now it is safe to iterate over all paths without locks */
list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) { list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
rtrs_clt_destroy_sess_files(sess, NULL);
rtrs_clt_close_conns(sess, true); rtrs_clt_close_conns(sess, true);
rtrs_clt_destroy_sess_files(sess, NULL);
kobject_put(&sess->kobj); kobject_put(&sess->kobj);
} }
free_clt(clt); free_clt(clt);

View File

@ -314,6 +314,18 @@ static int mcp251x_spi_trans(struct spi_device *spi, int len)
return ret; return ret;
} }
static int mcp251x_spi_write(struct spi_device *spi, int len)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
int ret;
ret = spi_write(spi, priv->spi_tx_buf, len);
if (ret)
dev_err(&spi->dev, "spi write failed: ret = %d\n", ret);
return ret;
}
static u8 mcp251x_read_reg(struct spi_device *spi, u8 reg) static u8 mcp251x_read_reg(struct spi_device *spi, u8 reg)
{ {
struct mcp251x_priv *priv = spi_get_drvdata(spi); struct mcp251x_priv *priv = spi_get_drvdata(spi);
@ -361,7 +373,7 @@ static void mcp251x_write_reg(struct spi_device *spi, u8 reg, u8 val)
priv->spi_tx_buf[1] = reg; priv->spi_tx_buf[1] = reg;
priv->spi_tx_buf[2] = val; priv->spi_tx_buf[2] = val;
mcp251x_spi_trans(spi, 3); mcp251x_spi_write(spi, 3);
} }
static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2) static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2)
@ -373,7 +385,7 @@ static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2)
priv->spi_tx_buf[2] = v1; priv->spi_tx_buf[2] = v1;
priv->spi_tx_buf[3] = v2; priv->spi_tx_buf[3] = v2;
mcp251x_spi_trans(spi, 4); mcp251x_spi_write(spi, 4);
} }
static void mcp251x_write_bits(struct spi_device *spi, u8 reg, static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
@ -386,7 +398,7 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
priv->spi_tx_buf[2] = mask; priv->spi_tx_buf[2] = mask;
priv->spi_tx_buf[3] = val; priv->spi_tx_buf[3] = val;
mcp251x_spi_trans(spi, 4); mcp251x_spi_write(spi, 4);
} }
static u8 mcp251x_read_stat(struct spi_device *spi) static u8 mcp251x_read_stat(struct spi_device *spi)
@ -618,7 +630,7 @@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
buf[i]); buf[i]);
} else { } else {
memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len); memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len);
mcp251x_spi_trans(spi, TXBDAT_OFF + len); mcp251x_spi_write(spi, TXBDAT_OFF + len);
} }
} }
@ -650,7 +662,7 @@ static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
/* use INSTRUCTION_RTS, to avoid "repeated frame problem" */ /* use INSTRUCTION_RTS, to avoid "repeated frame problem" */
priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx); priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx);
mcp251x_spi_trans(priv->spi, 1); mcp251x_spi_write(priv->spi, 1);
} }
static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf, static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
@ -888,7 +900,7 @@ static int mcp251x_hw_reset(struct spi_device *spi)
mdelay(MCP251X_OST_DELAY_MS); mdelay(MCP251X_OST_DELAY_MS);
priv->spi_tx_buf[0] = INSTRUCTION_RESET; priv->spi_tx_buf[0] = INSTRUCTION_RESET;
ret = mcp251x_spi_trans(spi, 1); ret = mcp251x_spi_write(spi, 1);
if (ret) if (ret)
return ret; return ret;

View File

@ -856,7 +856,7 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
if (dev->adapter->dev_set_bus) { if (dev->adapter->dev_set_bus) {
err = dev->adapter->dev_set_bus(dev, 0); err = dev->adapter->dev_set_bus(dev, 0);
if (err) if (err)
goto lbl_unregister_candev; goto adap_dev_free;
} }
/* get device number early */ /* get device number early */
@ -868,6 +868,10 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
return 0; return 0;
adap_dev_free:
if (dev->adapter->dev_free)
dev->adapter->dev_free(dev);
lbl_unregister_candev: lbl_unregister_candev:
unregister_candev(netdev); unregister_candev(netdev);

View File

@ -93,8 +93,12 @@
/* GSWIP MII Registers */ /* GSWIP MII Registers */
#define GSWIP_MII_CFGp(p) (0x2 * (p)) #define GSWIP_MII_CFGp(p) (0x2 * (p))
#define GSWIP_MII_CFG_RESET BIT(15)
#define GSWIP_MII_CFG_EN BIT(14) #define GSWIP_MII_CFG_EN BIT(14)
#define GSWIP_MII_CFG_ISOLATE BIT(13)
#define GSWIP_MII_CFG_LDCLKDIS BIT(12) #define GSWIP_MII_CFG_LDCLKDIS BIT(12)
#define GSWIP_MII_CFG_RGMII_IBS BIT(8)
#define GSWIP_MII_CFG_RMII_CLK BIT(7)
#define GSWIP_MII_CFG_MODE_MIIP 0x0 #define GSWIP_MII_CFG_MODE_MIIP 0x0
#define GSWIP_MII_CFG_MODE_MIIM 0x1 #define GSWIP_MII_CFG_MODE_MIIM 0x1
#define GSWIP_MII_CFG_MODE_RMIIP 0x2 #define GSWIP_MII_CFG_MODE_RMIIP 0x2
@ -190,6 +194,23 @@
#define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA)) #define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA))
#define GSWIP_MAC_FLEN 0x8C5 #define GSWIP_MAC_FLEN 0x8C5
#define GSWIP_MAC_CTRL_0p(p) (0x903 + ((p) * 0xC))
#define GSWIP_MAC_CTRL_0_PADEN BIT(8)
#define GSWIP_MAC_CTRL_0_FCS_EN BIT(7)
#define GSWIP_MAC_CTRL_0_FCON_MASK 0x0070
#define GSWIP_MAC_CTRL_0_FCON_AUTO 0x0000
#define GSWIP_MAC_CTRL_0_FCON_RX 0x0010
#define GSWIP_MAC_CTRL_0_FCON_TX 0x0020
#define GSWIP_MAC_CTRL_0_FCON_RXTX 0x0030
#define GSWIP_MAC_CTRL_0_FCON_NONE 0x0040
#define GSWIP_MAC_CTRL_0_FDUP_MASK 0x000C
#define GSWIP_MAC_CTRL_0_FDUP_AUTO 0x0000
#define GSWIP_MAC_CTRL_0_FDUP_EN 0x0004
#define GSWIP_MAC_CTRL_0_FDUP_DIS 0x000C
#define GSWIP_MAC_CTRL_0_GMII_MASK 0x0003
#define GSWIP_MAC_CTRL_0_GMII_AUTO 0x0000
#define GSWIP_MAC_CTRL_0_GMII_MII 0x0001
#define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002
#define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC)) #define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
#define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */ #define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
@ -653,16 +674,13 @@ static int gswip_port_enable(struct dsa_switch *ds, int port,
GSWIP_SDMA_PCTRLp(port)); GSWIP_SDMA_PCTRLp(port));
if (!dsa_is_cpu_port(ds, port)) { if (!dsa_is_cpu_port(ds, port)) {
u32 macconf = GSWIP_MDIO_PHY_LINK_AUTO | u32 mdio_phy = 0;
GSWIP_MDIO_PHY_SPEED_AUTO |
GSWIP_MDIO_PHY_FDUP_AUTO |
GSWIP_MDIO_PHY_FCONTX_AUTO |
GSWIP_MDIO_PHY_FCONRX_AUTO |
(phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK);
gswip_mdio_w(priv, macconf, GSWIP_MDIO_PHYp(port)); if (phydev)
/* Activate MDIO auto polling */ mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
gswip_mdio_mask(priv, 0, BIT(port), GSWIP_MDIO_MDC_CFG0);
gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy,
GSWIP_MDIO_PHYp(port));
} }
return 0; return 0;
@ -675,14 +693,6 @@ static void gswip_port_disable(struct dsa_switch *ds, int port)
if (!dsa_is_user_port(ds, port)) if (!dsa_is_user_port(ds, port))
return; return;
if (!dsa_is_cpu_port(ds, port)) {
gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_DOWN,
GSWIP_MDIO_PHY_LINK_MASK,
GSWIP_MDIO_PHYp(port));
/* Deactivate MDIO auto polling */
gswip_mdio_mask(priv, BIT(port), 0, GSWIP_MDIO_MDC_CFG0);
}
gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0, gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0,
GSWIP_FDMA_PCTRLp(port)); GSWIP_FDMA_PCTRLp(port));
gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0, gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
@ -806,14 +816,32 @@ static int gswip_setup(struct dsa_switch *ds)
gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2); gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2);
gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3); gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3);
/* disable PHY auto polling */ /* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an
* interoperability problem with this auto polling mechanism because
* their status registers think that the link is in a different state
* than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set
* as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the
* auto polling state machine consider the link being negotiated with
* 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads
* to the switch port being completely dead (RX and TX are both not
* working).
* Also with various other PHY / port combinations (PHY11G GPHY, PHY22F
* GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes
* it would work fine for a few minutes to hours and then stop, on
* other device it would no traffic could be sent or received at all.
* Testing shows that when PHY auto polling is disabled these problems
* go away.
*/
gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0); gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0);
/* Configure the MDIO Clock 2.5 MHz */ /* Configure the MDIO Clock 2.5 MHz */
gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1); gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
/* Disable the xMII link */ /* Disable the xMII interface and clear it's isolation bit */
for (i = 0; i < priv->hw_info->max_ports; i++) for (i = 0; i < priv->hw_info->max_ports; i++)
gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, i); gswip_mii_mask_cfg(priv,
GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE,
0, i);
/* enable special tag insertion on cpu port */ /* enable special tag insertion on cpu port */
gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN, gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
@ -1464,6 +1492,112 @@ static void gswip_phylink_validate(struct dsa_switch *ds, int port,
return; return;
} }
static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
{
u32 mdio_phy;
if (link)
mdio_phy = GSWIP_MDIO_PHY_LINK_UP;
else
mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN;
gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_MASK, mdio_phy,
GSWIP_MDIO_PHYp(port));
}
static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed,
phy_interface_t interface)
{
u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0;
switch (speed) {
case SPEED_10:
mdio_phy = GSWIP_MDIO_PHY_SPEED_M10;
if (interface == PHY_INTERFACE_MODE_RMII)
mii_cfg = GSWIP_MII_CFG_RATE_M50;
else
mii_cfg = GSWIP_MII_CFG_RATE_M2P5;
mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
break;
case SPEED_100:
mdio_phy = GSWIP_MDIO_PHY_SPEED_M100;
if (interface == PHY_INTERFACE_MODE_RMII)
mii_cfg = GSWIP_MII_CFG_RATE_M50;
else
mii_cfg = GSWIP_MII_CFG_RATE_M25;
mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
break;
case SPEED_1000:
mdio_phy = GSWIP_MDIO_PHY_SPEED_G1;
mii_cfg = GSWIP_MII_CFG_RATE_M125;
mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII;
break;
}
gswip_mdio_mask(priv, GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy,
GSWIP_MDIO_PHYp(port));
gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port);
gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0,
GSWIP_MAC_CTRL_0p(port));
}
static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex)
{
u32 mac_ctrl_0, mdio_phy;
if (duplex == DUPLEX_FULL) {
mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN;
mdio_phy = GSWIP_MDIO_PHY_FDUP_EN;
} else {
mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS;
mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS;
}
gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0,
GSWIP_MAC_CTRL_0p(port));
gswip_mdio_mask(priv, GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy,
GSWIP_MDIO_PHYp(port));
}
static void gswip_port_set_pause(struct gswip_priv *priv, int port,
bool tx_pause, bool rx_pause)
{
u32 mac_ctrl_0, mdio_phy;
if (tx_pause && rx_pause) {
mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX;
mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
GSWIP_MDIO_PHY_FCONRX_EN;
} else if (tx_pause) {
mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX;
mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
GSWIP_MDIO_PHY_FCONRX_DIS;
} else if (rx_pause) {
mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX;
mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
GSWIP_MDIO_PHY_FCONRX_EN;
} else {
mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE;
mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
GSWIP_MDIO_PHY_FCONRX_DIS;
}
gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FCON_MASK,
mac_ctrl_0, GSWIP_MAC_CTRL_0p(port));
gswip_mdio_mask(priv,
GSWIP_MDIO_PHY_FCONTX_MASK |
GSWIP_MDIO_PHY_FCONRX_MASK,
mdio_phy, GSWIP_MDIO_PHYp(port));
}
static void gswip_phylink_mac_config(struct dsa_switch *ds, int port, static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
unsigned int mode, unsigned int mode,
const struct phylink_link_state *state) const struct phylink_link_state *state)
@ -1483,6 +1617,9 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
break; break;
case PHY_INTERFACE_MODE_RMII: case PHY_INTERFACE_MODE_RMII:
miicfg |= GSWIP_MII_CFG_MODE_RMIIM; miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
/* Configure the RMII clock as output: */
miicfg |= GSWIP_MII_CFG_RMII_CLK;
break; break;
case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_ID:
@ -1495,7 +1632,11 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
"Unsupported interface: %d\n", state->interface); "Unsupported interface: %d\n", state->interface);
return; return;
} }
gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_MODE_MASK, miicfg, port);
gswip_mii_mask_cfg(priv,
GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK |
GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS,
miicfg, port);
switch (state->interface) { switch (state->interface) {
case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_ID:
@ -1520,6 +1661,9 @@ static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port,
struct gswip_priv *priv = ds->priv; struct gswip_priv *priv = ds->priv;
gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port); gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
if (!dsa_is_cpu_port(ds, port))
gswip_port_set_link(priv, port, false);
} }
static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port, static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
@ -1531,6 +1675,13 @@ static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
{ {
struct gswip_priv *priv = ds->priv; struct gswip_priv *priv = ds->priv;
if (!dsa_is_cpu_port(ds, port)) {
gswip_port_set_link(priv, port, true);
gswip_port_set_speed(priv, port, speed, interface);
gswip_port_set_duplex(priv, port, duplex);
gswip_port_set_pause(priv, port, tx_pause, rx_pause);
}
gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port); gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
} }

View File

@ -180,9 +180,9 @@
#define XGBE_DMA_SYS_AWCR 0x30303030 #define XGBE_DMA_SYS_AWCR 0x30303030
/* DMA cache settings - PCI device */ /* DMA cache settings - PCI device */
#define XGBE_DMA_PCI_ARCR 0x00000003 #define XGBE_DMA_PCI_ARCR 0x000f0f0f
#define XGBE_DMA_PCI_AWCR 0x13131313 #define XGBE_DMA_PCI_AWCR 0x0f0f0f0f
#define XGBE_DMA_PCI_AWARCR 0x00000313 #define XGBE_DMA_PCI_AWARCR 0x00000f0f
/* DMA channel interrupt modes */ /* DMA channel interrupt modes */
#define XGBE_IRQ_MODE_EDGE 0 #define XGBE_IRQ_MODE_EDGE 0

View File

@ -3111,6 +3111,9 @@ static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
bool cmp_b = false; bool cmp_b = false;
bool cmp_c = false; bool cmp_c = false;
if (!macb_is_gem(bp))
return;
tp4sp_v = &(fs->h_u.tcp_ip4_spec); tp4sp_v = &(fs->h_u.tcp_ip4_spec);
tp4sp_m = &(fs->m_u.tcp_ip4_spec); tp4sp_m = &(fs->m_u.tcp_ip4_spec);
@ -3479,6 +3482,7 @@ static void macb_restore_features(struct macb *bp)
{ {
struct net_device *netdev = bp->dev; struct net_device *netdev = bp->dev;
netdev_features_t features = netdev->features; netdev_features_t features = netdev->features;
struct ethtool_rx_fs_item *item;
/* TX checksum offload */ /* TX checksum offload */
macb_set_txcsum_feature(bp, features); macb_set_txcsum_feature(bp, features);
@ -3487,6 +3491,9 @@ static void macb_restore_features(struct macb *bp)
macb_set_rxcsum_feature(bp, features); macb_set_rxcsum_feature(bp, features);
/* RX Flow Filters */ /* RX Flow Filters */
list_for_each_entry(item, &bp->rx_fs_list.list, list)
gem_prog_cmp_regs(bp, &item->fs);
macb_set_rxflow_feature(bp, features); macb_set_rxflow_feature(bp, features);
} }

View File

@ -1794,11 +1794,25 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
struct cudbg_buffer temp_buff = { 0 }; struct cudbg_buffer temp_buff = { 0 };
struct sge_qbase_reg_field *sge_qbase; struct sge_qbase_reg_field *sge_qbase;
struct ireg_buf *ch_sge_dbg; struct ireg_buf *ch_sge_dbg;
u8 padap_running = 0;
int i, rc; int i, rc;
u32 size;
rc = cudbg_get_buff(pdbg_init, dbg_buff, /* Accessing SGE_QBASE_MAP[0-3] and SGE_QBASE_INDEX regs can
sizeof(*ch_sge_dbg) * 2 + sizeof(*sge_qbase), * lead to SGE missing doorbells under heavy traffic. So, only
&temp_buff); * collect them when adapter is idle.
*/
for_each_port(padap, i) {
padap_running = netif_running(padap->port[i]);
if (padap_running)
break;
}
size = sizeof(*ch_sge_dbg) * 2;
if (!padap_running)
size += sizeof(*sge_qbase);
rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc) if (rc)
return rc; return rc;
@ -1820,7 +1834,8 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
ch_sge_dbg++; ch_sge_dbg++;
} }
if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) { if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5 &&
!padap_running) {
sge_qbase = (struct sge_qbase_reg_field *)ch_sge_dbg; sge_qbase = (struct sge_qbase_reg_field *)ch_sge_dbg;
/* 1 addr reg SGE_QBASE_INDEX and 4 data reg /* 1 addr reg SGE_QBASE_INDEX and 4 data reg
* SGE_QBASE_MAP[0-3] * SGE_QBASE_MAP[0-3]

View File

@ -2090,7 +2090,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x1190, 0x1194, 0x1190, 0x1194,
0x11a0, 0x11a4, 0x11a0, 0x11a4,
0x11b0, 0x11b4, 0x11b0, 0x11b4,
0x11fc, 0x1274, 0x11fc, 0x123c,
0x1254, 0x1274,
0x1280, 0x133c, 0x1280, 0x133c,
0x1800, 0x18fc, 0x1800, 0x18fc,
0x3000, 0x302c, 0x3000, 0x302c,

View File

@ -364,7 +364,11 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
static int gfar_set_mac_addr(struct net_device *dev, void *p) static int gfar_set_mac_addr(struct net_device *dev, void *p)
{ {
eth_mac_addr(dev, p); int ret;
ret = eth_mac_addr(dev, p);
if (ret)
return ret;
gfar_set_mac_for_addr(dev, 0, dev->dev_addr); gfar_set_mac_for_addr(dev, 0, dev->dev_addr);

View File

@ -2554,14 +2554,14 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
{ {
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
hclgevf_reset_tqp_stats(handle); hclgevf_reset_tqp_stats(handle);
hclgevf_request_link_info(hdev); hclgevf_request_link_info(hdev);
hclgevf_update_link_mode(hdev); hclgevf_update_link_mode(hdev);
clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
return 0; return 0;
} }

View File

@ -142,6 +142,7 @@ enum i40e_state_t {
__I40E_VIRTCHNL_OP_PENDING, __I40E_VIRTCHNL_OP_PENDING,
__I40E_RECOVERY_MODE, __I40E_RECOVERY_MODE,
__I40E_VF_RESETS_DISABLED, /* disable resets during i40e_remove */ __I40E_VF_RESETS_DISABLED, /* disable resets during i40e_remove */
__I40E_VFS_RELEASING,
/* This must be last as it determines the size of the BITMAP */ /* This must be last as it determines the size of the BITMAP */
__I40E_STATE_SIZE__, __I40E_STATE_SIZE__,
}; };

View File

@ -578,6 +578,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
case RING_TYPE_XDP: case RING_TYPE_XDP:
ring = kmemdup(vsi->xdp_rings[ring_id], sizeof(*ring), GFP_KERNEL); ring = kmemdup(vsi->xdp_rings[ring_id], sizeof(*ring), GFP_KERNEL);
break; break;
default:
ring = NULL;
break;
} }
if (!ring) if (!ring)
return; return;

View File

@ -232,6 +232,8 @@ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
I40E_STAT(struct i40e_vsi, _name, _stat) I40E_STAT(struct i40e_vsi, _name, _stat)
#define I40E_VEB_STAT(_name, _stat) \ #define I40E_VEB_STAT(_name, _stat) \
I40E_STAT(struct i40e_veb, _name, _stat) I40E_STAT(struct i40e_veb, _name, _stat)
#define I40E_VEB_TC_STAT(_name, _stat) \
I40E_STAT(struct i40e_cp_veb_tc_stats, _name, _stat)
#define I40E_PFC_STAT(_name, _stat) \ #define I40E_PFC_STAT(_name, _stat) \
I40E_STAT(struct i40e_pfc_stats, _name, _stat) I40E_STAT(struct i40e_pfc_stats, _name, _stat)
#define I40E_QUEUE_STAT(_name, _stat) \ #define I40E_QUEUE_STAT(_name, _stat) \
@ -266,11 +268,18 @@ static const struct i40e_stats i40e_gstrings_veb_stats[] = {
I40E_VEB_STAT("veb.rx_unknown_protocol", stats.rx_unknown_protocol), I40E_VEB_STAT("veb.rx_unknown_protocol", stats.rx_unknown_protocol),
}; };
struct i40e_cp_veb_tc_stats {
u64 tc_rx_packets;
u64 tc_rx_bytes;
u64 tc_tx_packets;
u64 tc_tx_bytes;
};
static const struct i40e_stats i40e_gstrings_veb_tc_stats[] = { static const struct i40e_stats i40e_gstrings_veb_tc_stats[] = {
I40E_VEB_STAT("veb.tc_%u_tx_packets", tc_stats.tc_tx_packets), I40E_VEB_TC_STAT("veb.tc_%u_tx_packets", tc_tx_packets),
I40E_VEB_STAT("veb.tc_%u_tx_bytes", tc_stats.tc_tx_bytes), I40E_VEB_TC_STAT("veb.tc_%u_tx_bytes", tc_tx_bytes),
I40E_VEB_STAT("veb.tc_%u_rx_packets", tc_stats.tc_rx_packets), I40E_VEB_TC_STAT("veb.tc_%u_rx_packets", tc_rx_packets),
I40E_VEB_STAT("veb.tc_%u_rx_bytes", tc_stats.tc_rx_bytes), I40E_VEB_TC_STAT("veb.tc_%u_rx_bytes", tc_rx_bytes),
}; };
static const struct i40e_stats i40e_gstrings_misc_stats[] = { static const struct i40e_stats i40e_gstrings_misc_stats[] = {
@ -1101,6 +1110,7 @@ static int i40e_get_link_ksettings(struct net_device *netdev,
/* Set flow control settings */ /* Set flow control settings */
ethtool_link_ksettings_add_link_mode(ks, supported, Pause); ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause);
switch (hw->fc.requested_mode) { switch (hw->fc.requested_mode) {
case I40E_FC_FULL: case I40E_FC_FULL:
@ -2216,6 +2226,29 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
} }
} }
/**
* i40e_get_veb_tc_stats - copy VEB TC statistics to formatted structure
* @tc: the TC statistics in VEB structure (veb->tc_stats)
* @i: the index of traffic class in (veb->tc_stats) structure to copy
*
* Copy VEB TC statistics from structure of arrays (veb->tc_stats) to
* one dimensional structure i40e_cp_veb_tc_stats.
* Produce formatted i40e_cp_veb_tc_stats structure of the VEB TC
* statistics for the given TC.
**/
static struct i40e_cp_veb_tc_stats
i40e_get_veb_tc_stats(struct i40e_veb_tc_stats *tc, unsigned int i)
{
struct i40e_cp_veb_tc_stats veb_tc = {
.tc_rx_packets = tc->tc_rx_packets[i],
.tc_rx_bytes = tc->tc_rx_bytes[i],
.tc_tx_packets = tc->tc_tx_packets[i],
.tc_tx_bytes = tc->tc_tx_bytes[i],
};
return veb_tc;
}
/** /**
* i40e_get_pfc_stats - copy HW PFC statistics to formatted structure * i40e_get_pfc_stats - copy HW PFC statistics to formatted structure
* @pf: the PF device structure * @pf: the PF device structure
@ -2300,8 +2333,16 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
i40e_gstrings_veb_stats); i40e_gstrings_veb_stats);
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
i40e_add_ethtool_stats(&data, veb_stats ? veb : NULL, if (veb_stats) {
i40e_gstrings_veb_tc_stats); struct i40e_cp_veb_tc_stats veb_tc =
i40e_get_veb_tc_stats(&veb->tc_stats, i);
i40e_add_ethtool_stats(&data, &veb_tc,
i40e_gstrings_veb_tc_stats);
} else {
i40e_add_ethtool_stats(&data, NULL,
i40e_gstrings_veb_tc_stats);
}
i40e_add_ethtool_stats(&data, pf, i40e_gstrings_stats); i40e_add_ethtool_stats(&data, pf, i40e_gstrings_stats);
@ -5244,7 +5285,7 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
status = i40e_aq_get_phy_register(hw, status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
true, addr, offset, &value, NULL); addr, true, offset, &value, NULL);
if (status) if (status)
return -EIO; return -EIO;
data[i] = value; data[i] = value;

View File

@ -2560,8 +2560,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
i40e_stat_str(hw, aq_ret), i40e_stat_str(hw, aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status)); i40e_aq_str(hw, hw->aq.asq_last_status));
} else { } else {
dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n", dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
vsi->netdev->name,
cur_multipromisc ? "entering" : "leaving"); cur_multipromisc ? "entering" : "leaving");
} }
} }
@ -14647,12 +14646,16 @@ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
* in order to register the netdev * in order to register the netdev
*/ */
v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN); v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
if (v_idx < 0) if (v_idx < 0) {
err = v_idx;
goto err_switch_setup; goto err_switch_setup;
}
pf->lan_vsi = v_idx; pf->lan_vsi = v_idx;
vsi = pf->vsi[v_idx]; vsi = pf->vsi[v_idx];
if (!vsi) if (!vsi) {
err = -EFAULT;
goto err_switch_setup; goto err_switch_setup;
}
vsi->alloc_queue_pairs = 1; vsi->alloc_queue_pairs = 1;
err = i40e_config_netdev(vsi); err = i40e_config_netdev(vsi);
if (err) if (err)

View File

@ -2187,8 +2187,7 @@ int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
* @rx_ring: Rx ring being processed * @rx_ring: Rx ring being processed
* @xdp: XDP buffer containing the frame * @xdp: XDP buffer containing the frame
**/ **/
static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
struct xdp_buff *xdp)
{ {
int err, result = I40E_XDP_PASS; int err, result = I40E_XDP_PASS;
struct i40e_ring *xdp_ring; struct i40e_ring *xdp_ring;
@ -2227,7 +2226,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
} }
xdp_out: xdp_out:
rcu_read_unlock(); rcu_read_unlock();
return ERR_PTR(-result); return result;
} }
/** /**
@ -2339,6 +2338,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
unsigned int xdp_xmit = 0; unsigned int xdp_xmit = 0;
bool failure = false; bool failure = false;
struct xdp_buff xdp; struct xdp_buff xdp;
int xdp_res = 0;
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, 0); xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
@ -2405,12 +2405,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* At larger PAGE_SIZE, frame_sz depend on len size */ /* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size); xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
#endif #endif
skb = i40e_run_xdp(rx_ring, &xdp); xdp_res = i40e_run_xdp(rx_ring, &xdp);
} }
if (IS_ERR(skb)) { if (xdp_res) {
unsigned int xdp_res = -PTR_ERR(skb);
if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) { if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
xdp_xmit |= xdp_res; xdp_xmit |= xdp_res;
i40e_rx_buffer_flip(rx_ring, rx_buffer, size); i40e_rx_buffer_flip(rx_ring, rx_buffer, size);

View File

@ -137,6 +137,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
**/ **/
static inline void i40e_vc_disable_vf(struct i40e_vf *vf) static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
{ {
struct i40e_pf *pf = vf->pf;
int i; int i;
i40e_vc_notify_vf_reset(vf); i40e_vc_notify_vf_reset(vf);
@ -147,6 +148,11 @@ static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
* ensure a reset. * ensure a reset.
*/ */
for (i = 0; i < 20; i++) { for (i = 0; i < 20; i++) {
/* If PF is in VFs releasing state reset VF is impossible,
* so leave it.
*/
if (test_bit(__I40E_VFS_RELEASING, pf->state))
return;
if (i40e_reset_vf(vf, false)) if (i40e_reset_vf(vf, false))
return; return;
usleep_range(10000, 20000); usleep_range(10000, 20000);
@ -1574,6 +1580,8 @@ void i40e_free_vfs(struct i40e_pf *pf)
if (!pf->vf) if (!pf->vf)
return; return;
set_bit(__I40E_VFS_RELEASING, pf->state);
while (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
usleep_range(1000, 2000); usleep_range(1000, 2000);
@ -1631,6 +1639,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
} }
} }
clear_bit(__I40E_VF_DISABLE, pf->state); clear_bit(__I40E_VF_DISABLE, pf->state);
clear_bit(__I40E_VFS_RELEASING, pf->state);
} }
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV

View File

@ -194,7 +194,6 @@ enum ice_state {
__ICE_NEEDS_RESTART, __ICE_NEEDS_RESTART,
__ICE_PREPARED_FOR_RESET, /* set by driver when prepared */ __ICE_PREPARED_FOR_RESET, /* set by driver when prepared */
__ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */ __ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */
__ICE_DCBNL_DEVRESET, /* set by dcbnl devreset */
__ICE_PFR_REQ, /* set by driver and peers */ __ICE_PFR_REQ, /* set by driver and peers */
__ICE_CORER_REQ, /* set by driver and peers */ __ICE_CORER_REQ, /* set by driver and peers */
__ICE_GLOBR_REQ, /* set by driver and peers */ __ICE_GLOBR_REQ, /* set by driver and peers */
@ -587,7 +586,7 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup); void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
const char *ice_stat_str(enum ice_status stat_err); const char *ice_stat_str(enum ice_status stat_err);
const char *ice_aq_str(enum ice_aq_err aq_err); const char *ice_aq_str(enum ice_aq_err aq_err);
bool ice_is_wol_supported(struct ice_pf *pf); bool ice_is_wol_supported(struct ice_hw *hw);
int int
ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
bool is_tun); bool is_tun);
@ -605,6 +604,7 @@ int ice_fdir_create_dflt_rules(struct ice_pf *pf);
int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
struct ice_rq_event_info *event); struct ice_rq_event_info *event);
int ice_open(struct net_device *netdev); int ice_open(struct net_device *netdev);
int ice_open_internal(struct net_device *netdev);
int ice_stop(struct net_device *netdev); int ice_stop(struct net_device *netdev);
void ice_service_task_schedule(struct ice_pf *pf); void ice_service_task_schedule(struct ice_pf *pf);

View File

@ -717,8 +717,8 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
if (!data) { if (!data) {
data = devm_kcalloc(ice_hw_to_dev(hw), data = devm_kcalloc(ice_hw_to_dev(hw),
sizeof(*data),
ICE_AQC_FW_LOG_ID_MAX, ICE_AQC_FW_LOG_ID_MAX,
sizeof(*data),
GFP_KERNEL); GFP_KERNEL);
if (!data) if (!data)
return ICE_ERR_NO_MEMORY; return ICE_ERR_NO_MEMORY;

View File

@ -31,8 +31,8 @@ enum ice_ctl_q {
ICE_CTL_Q_MAILBOX, ICE_CTL_Q_MAILBOX,
}; };
/* Control Queue timeout settings - max delay 250ms */ /* Control Queue timeout settings - max delay 1s */
#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */ #define ICE_CTL_Q_SQ_CMD_TIMEOUT 10000 /* Count 10000 times */
#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */ #define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
#define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */ #define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */
#define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */ #define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */

View File

@ -738,22 +738,27 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
/** /**
* ice_cee_to_dcb_cfg * ice_cee_to_dcb_cfg
* @cee_cfg: pointer to CEE configuration struct * @cee_cfg: pointer to CEE configuration struct
* @dcbcfg: DCB configuration struct * @pi: port information structure
* *
* Convert CEE configuration from firmware to DCB configuration * Convert CEE configuration from firmware to DCB configuration
*/ */
static void static void
ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg, ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
struct ice_dcbx_cfg *dcbcfg) struct ice_port_info *pi)
{ {
u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status); u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift; u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift;
u8 i, j, err, sync, oper, app_index, ice_app_sel_type;
u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
u8 i, err, sync, oper, app_index, ice_app_sel_type;
u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift; u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift;
struct ice_dcbx_cfg *cmp_dcbcfg, *dcbcfg;
u16 ice_app_prot_id_type; u16 ice_app_prot_id_type;
/* CEE PG data to ETS config */ dcbcfg = &pi->qos_cfg.local_dcbx_cfg;
dcbcfg->dcbx_mode = ICE_DCBX_MODE_CEE;
dcbcfg->tlv_status = tlv_status;
/* CEE PG data */
dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
/* Note that the FW creates the oper_prio_tc nibbles reversed /* Note that the FW creates the oper_prio_tc nibbles reversed
@ -780,10 +785,16 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
} }
} }
/* CEE PFC data to ETS config */ /* CEE PFC data */
dcbcfg->pfc.pfcena = cee_cfg->oper_pfc_en; dcbcfg->pfc.pfcena = cee_cfg->oper_pfc_en;
dcbcfg->pfc.pfccap = ICE_MAX_TRAFFIC_CLASS; dcbcfg->pfc.pfccap = ICE_MAX_TRAFFIC_CLASS;
/* CEE APP TLV data */
if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING)
cmp_dcbcfg = &pi->qos_cfg.desired_dcbx_cfg;
else
cmp_dcbcfg = &pi->qos_cfg.remote_dcbx_cfg;
app_index = 0; app_index = 0;
for (i = 0; i < 3; i++) { for (i = 0; i < 3; i++) {
if (i == 0) { if (i == 0) {
@ -802,6 +813,18 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_ISCSI_S; ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_ISCSI_S;
ice_app_sel_type = ICE_APP_SEL_TCPIP; ice_app_sel_type = ICE_APP_SEL_TCPIP;
ice_app_prot_id_type = ICE_APP_PROT_ID_ISCSI; ice_app_prot_id_type = ICE_APP_PROT_ID_ISCSI;
for (j = 0; j < cmp_dcbcfg->numapps; j++) {
u16 prot_id = cmp_dcbcfg->app[j].prot_id;
u8 sel = cmp_dcbcfg->app[j].selector;
if (sel == ICE_APP_SEL_TCPIP &&
(prot_id == ICE_APP_PROT_ID_ISCSI ||
prot_id == ICE_APP_PROT_ID_ISCSI_860)) {
ice_app_prot_id_type = prot_id;
break;
}
}
} else { } else {
/* FIP APP */ /* FIP APP */
ice_aqc_cee_status_mask = ICE_AQC_CEE_FIP_STATUS_M; ice_aqc_cee_status_mask = ICE_AQC_CEE_FIP_STATUS_M;
@ -850,9 +873,9 @@ ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
return ICE_ERR_PARAM; return ICE_ERR_PARAM;
if (dcbx_mode == ICE_DCBX_MODE_IEEE) if (dcbx_mode == ICE_DCBX_MODE_IEEE)
dcbx_cfg = &pi->local_dcbx_cfg; dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
else if (dcbx_mode == ICE_DCBX_MODE_CEE) else if (dcbx_mode == ICE_DCBX_MODE_CEE)
dcbx_cfg = &pi->desired_dcbx_cfg; dcbx_cfg = &pi->qos_cfg.desired_dcbx_cfg;
/* Get Local DCB Config in case of ICE_DCBX_MODE_IEEE /* Get Local DCB Config in case of ICE_DCBX_MODE_IEEE
* or get CEE DCB Desired Config in case of ICE_DCBX_MODE_CEE * or get CEE DCB Desired Config in case of ICE_DCBX_MODE_CEE
@ -863,7 +886,7 @@ ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
goto out; goto out;
/* Get Remote DCB Config */ /* Get Remote DCB Config */
dcbx_cfg = &pi->remote_dcbx_cfg; dcbx_cfg = &pi->qos_cfg.remote_dcbx_cfg;
ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE, ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg); ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg);
/* Don't treat ENOENT as an error for Remote MIBs */ /* Don't treat ENOENT as an error for Remote MIBs */
@ -892,14 +915,11 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL); ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL);
if (!ret) { if (!ret) {
/* CEE mode */ /* CEE mode */
dcbx_cfg = &pi->local_dcbx_cfg;
dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_CEE;
dcbx_cfg->tlv_status = le32_to_cpu(cee_cfg.tlv_status);
ice_cee_to_dcb_cfg(&cee_cfg, dcbx_cfg);
ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE); ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE);
ice_cee_to_dcb_cfg(&cee_cfg, pi);
} else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) { } else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) {
/* CEE mode not enabled try querying IEEE data */ /* CEE mode not enabled try querying IEEE data */
dcbx_cfg = &pi->local_dcbx_cfg; dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE; dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE;
ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_IEEE); ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_IEEE);
} }
@ -916,26 +936,26 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
*/ */
enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
{ {
struct ice_port_info *pi = hw->port_info; struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg;
enum ice_status ret = 0; enum ice_status ret = 0;
if (!hw->func_caps.common_cap.dcb) if (!hw->func_caps.common_cap.dcb)
return ICE_ERR_NOT_SUPPORTED; return ICE_ERR_NOT_SUPPORTED;
pi->is_sw_lldp = true; qos_cfg->is_sw_lldp = true;
/* Get DCBX status */ /* Get DCBX status */
pi->dcbx_status = ice_get_dcbx_status(hw); qos_cfg->dcbx_status = ice_get_dcbx_status(hw);
if (pi->dcbx_status == ICE_DCBX_STATUS_DONE || if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DONE ||
pi->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS || qos_cfg->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS ||
pi->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) { qos_cfg->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
/* Get current DCBX configuration */ /* Get current DCBX configuration */
ret = ice_get_dcb_cfg(pi); ret = ice_get_dcb_cfg(hw->port_info);
if (ret) if (ret)
return ret; return ret;
pi->is_sw_lldp = false; qos_cfg->is_sw_lldp = false;
} else if (pi->dcbx_status == ICE_DCBX_STATUS_DIS) { } else if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) {
return ICE_ERR_NOT_READY; return ICE_ERR_NOT_READY;
} }
@ -943,7 +963,7 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
if (enable_mib_change) { if (enable_mib_change) {
ret = ice_aq_cfg_lldp_mib_change(hw, true, NULL); ret = ice_aq_cfg_lldp_mib_change(hw, true, NULL);
if (ret) if (ret)
pi->is_sw_lldp = true; qos_cfg->is_sw_lldp = true;
} }
return ret; return ret;
@ -958,21 +978,21 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
*/ */
enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib) enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib)
{ {
struct ice_port_info *pi = hw->port_info; struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg;
enum ice_status ret; enum ice_status ret;
if (!hw->func_caps.common_cap.dcb) if (!hw->func_caps.common_cap.dcb)
return ICE_ERR_NOT_SUPPORTED; return ICE_ERR_NOT_SUPPORTED;
/* Get DCBX status */ /* Get DCBX status */
pi->dcbx_status = ice_get_dcbx_status(hw); qos_cfg->dcbx_status = ice_get_dcbx_status(hw);
if (pi->dcbx_status == ICE_DCBX_STATUS_DIS) if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS)
return ICE_ERR_NOT_READY; return ICE_ERR_NOT_READY;
ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL); ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL);
if (!ret) if (!ret)
pi->is_sw_lldp = !ena_mib; qos_cfg->is_sw_lldp = !ena_mib;
return ret; return ret;
} }
@ -1270,7 +1290,7 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
hw = pi->hw; hw = pi->hw;
/* update the HW local config */ /* update the HW local config */
dcbcfg = &pi->local_dcbx_cfg; dcbcfg = &pi->qos_cfg.local_dcbx_cfg;
/* Allocate the LLDPDU */ /* Allocate the LLDPDU */
lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL); lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL);
if (!lldpmib) if (!lldpmib)

View File

@ -28,7 +28,7 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc)) if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc))
return; return;
dcbcfg = &pf->hw.port_info->local_dcbx_cfg; dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
ice_for_each_traffic_class(i) ice_for_each_traffic_class(i)
if (vsi->tc_cfg.ena_tc & BIT(i)) if (vsi->tc_cfg.ena_tc & BIT(i))
@ -134,7 +134,7 @@ static u8 ice_dcb_get_mode(struct ice_port_info *port_info, bool host)
else else
mode = DCB_CAP_DCBX_LLD_MANAGED; mode = DCB_CAP_DCBX_LLD_MANAGED;
if (port_info->local_dcbx_cfg.dcbx_mode & ICE_DCBX_MODE_CEE) if (port_info->qos_cfg.local_dcbx_cfg.dcbx_mode & ICE_DCBX_MODE_CEE)
return mode | DCB_CAP_DCBX_VER_CEE; return mode | DCB_CAP_DCBX_VER_CEE;
else else
return mode | DCB_CAP_DCBX_VER_IEEE; return mode | DCB_CAP_DCBX_VER_IEEE;
@ -277,10 +277,10 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
int ret = ICE_DCB_NO_HW_CHG; int ret = ICE_DCB_NO_HW_CHG;
struct ice_vsi *pf_vsi; struct ice_vsi *pf_vsi;
curr_cfg = &pf->hw.port_info->local_dcbx_cfg; curr_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
/* FW does not care if change happened */ /* FW does not care if change happened */
if (!pf->hw.port_info->is_sw_lldp) if (!pf->hw.port_info->qos_cfg.is_sw_lldp)
ret = ICE_DCB_HW_CHG_RST; ret = ICE_DCB_HW_CHG_RST;
/* Enable DCB tagging only when more than one TC */ /* Enable DCB tagging only when more than one TC */
@ -327,7 +327,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
/* Only send new config to HW if we are in SW LLDP mode. Otherwise, /* Only send new config to HW if we are in SW LLDP mode. Otherwise,
* the new config came from the HW in the first place. * the new config came from the HW in the first place.
*/ */
if (pf->hw.port_info->is_sw_lldp) { if (pf->hw.port_info->qos_cfg.is_sw_lldp) {
ret = ice_set_dcb_cfg(pf->hw.port_info); ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) { if (ret) {
dev_err(dev, "Set DCB Config failed\n"); dev_err(dev, "Set DCB Config failed\n");
@ -360,7 +360,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
*/ */
static void ice_cfg_etsrec_defaults(struct ice_port_info *pi) static void ice_cfg_etsrec_defaults(struct ice_port_info *pi)
{ {
struct ice_dcbx_cfg *dcbcfg = &pi->local_dcbx_cfg; struct ice_dcbx_cfg *dcbcfg = &pi->qos_cfg.local_dcbx_cfg;
u8 i; u8 i;
/* Ensure ETS recommended DCB configuration is not already set */ /* Ensure ETS recommended DCB configuration is not already set */
@ -446,7 +446,7 @@ void ice_dcb_rebuild(struct ice_pf *pf)
mutex_lock(&pf->tc_mutex); mutex_lock(&pf->tc_mutex);
if (!pf->hw.port_info->is_sw_lldp) if (!pf->hw.port_info->qos_cfg.is_sw_lldp)
ice_cfg_etsrec_defaults(pf->hw.port_info); ice_cfg_etsrec_defaults(pf->hw.port_info);
ret = ice_set_dcb_cfg(pf->hw.port_info); ret = ice_set_dcb_cfg(pf->hw.port_info);
@ -455,9 +455,9 @@ void ice_dcb_rebuild(struct ice_pf *pf)
goto dcb_error; goto dcb_error;
} }
if (!pf->hw.port_info->is_sw_lldp) { if (!pf->hw.port_info->qos_cfg.is_sw_lldp) {
ret = ice_cfg_lldp_mib_change(&pf->hw, true); ret = ice_cfg_lldp_mib_change(&pf->hw, true);
if (ret && !pf->hw.port_info->is_sw_lldp) { if (ret && !pf->hw.port_info->qos_cfg.is_sw_lldp) {
dev_err(dev, "Failed to register for MIB changes\n"); dev_err(dev, "Failed to register for MIB changes\n");
goto dcb_error; goto dcb_error;
} }
@ -510,11 +510,12 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked)
int ret = 0; int ret = 0;
pi = pf->hw.port_info; pi = pf->hw.port_info;
newcfg = kmemdup(&pi->local_dcbx_cfg, sizeof(*newcfg), GFP_KERNEL); newcfg = kmemdup(&pi->qos_cfg.local_dcbx_cfg, sizeof(*newcfg),
GFP_KERNEL);
if (!newcfg) if (!newcfg)
return -ENOMEM; return -ENOMEM;
memset(&pi->local_dcbx_cfg, 0, sizeof(*newcfg)); memset(&pi->qos_cfg.local_dcbx_cfg, 0, sizeof(*newcfg));
dev_info(ice_pf_to_dev(pf), "Configuring initial DCB values\n"); dev_info(ice_pf_to_dev(pf), "Configuring initial DCB values\n");
if (ice_pf_dcb_cfg(pf, newcfg, locked)) if (ice_pf_dcb_cfg(pf, newcfg, locked))
@ -545,7 +546,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
if (!dcbcfg) if (!dcbcfg)
return -ENOMEM; return -ENOMEM;
memset(&pi->local_dcbx_cfg, 0, sizeof(*dcbcfg)); memset(&pi->qos_cfg.local_dcbx_cfg, 0, sizeof(*dcbcfg));
dcbcfg->etscfg.willing = ets_willing ? 1 : 0; dcbcfg->etscfg.willing = ets_willing ? 1 : 0;
dcbcfg->etscfg.maxtcs = hw->func_caps.common_cap.maxtc; dcbcfg->etscfg.maxtcs = hw->func_caps.common_cap.maxtc;
@ -608,7 +609,7 @@ static bool ice_dcb_tc_contig(u8 *prio_table)
*/ */
static int ice_dcb_noncontig_cfg(struct ice_pf *pf) static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
{ {
struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg; struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
struct device *dev = ice_pf_to_dev(pf); struct device *dev = ice_pf_to_dev(pf);
int ret; int ret;
@ -638,7 +639,7 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
*/ */
void ice_pf_dcb_recfg(struct ice_pf *pf) void ice_pf_dcb_recfg(struct ice_pf *pf)
{ {
struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg; struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
u8 tc_map = 0; u8 tc_map = 0;
int v, ret; int v, ret;
@ -691,7 +692,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
port_info = hw->port_info; port_info = hw->port_info;
err = ice_init_dcb(hw, false); err = ice_init_dcb(hw, false);
if (err && !port_info->is_sw_lldp) { if (err && !port_info->qos_cfg.is_sw_lldp) {
dev_err(dev, "Error initializing DCB %d\n", err); dev_err(dev, "Error initializing DCB %d\n", err);
goto dcb_init_err; goto dcb_init_err;
} }
@ -858,7 +859,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
/* Update the remote cached instance and return */ /* Update the remote cached instance and return */
ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE, ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID,
&pi->remote_dcbx_cfg); &pi->qos_cfg.remote_dcbx_cfg);
if (ret) { if (ret) {
dev_err(dev, "Failed to get remote DCB config\n"); dev_err(dev, "Failed to get remote DCB config\n");
return; return;
@ -868,10 +869,11 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
mutex_lock(&pf->tc_mutex); mutex_lock(&pf->tc_mutex);
/* store the old configuration */ /* store the old configuration */
tmp_dcbx_cfg = pf->hw.port_info->local_dcbx_cfg; tmp_dcbx_cfg = pf->hw.port_info->qos_cfg.local_dcbx_cfg;
/* Reset the old DCBX configuration data */ /* Reset the old DCBX configuration data */
memset(&pi->local_dcbx_cfg, 0, sizeof(pi->local_dcbx_cfg)); memset(&pi->qos_cfg.local_dcbx_cfg, 0,
sizeof(pi->qos_cfg.local_dcbx_cfg));
/* Get updated DCBX data from firmware */ /* Get updated DCBX data from firmware */
ret = ice_get_dcb_cfg(pf->hw.port_info); ret = ice_get_dcb_cfg(pf->hw.port_info);
@ -881,7 +883,8 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
} }
/* No change detected in DCBX configs */ /* No change detected in DCBX configs */
if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) { if (!memcmp(&tmp_dcbx_cfg, &pi->qos_cfg.local_dcbx_cfg,
sizeof(tmp_dcbx_cfg))) {
dev_dbg(dev, "No change detected in DCBX configuration.\n"); dev_dbg(dev, "No change detected in DCBX configuration.\n");
goto out; goto out;
} }
@ -889,13 +892,13 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
pf->dcbx_cap = ice_dcb_get_mode(pi, false); pf->dcbx_cap = ice_dcb_get_mode(pi, false);
need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg, need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg,
&pi->local_dcbx_cfg); &pi->qos_cfg.local_dcbx_cfg);
ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->local_dcbx_cfg); ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->qos_cfg.local_dcbx_cfg);
if (!need_reconfig) if (!need_reconfig)
goto out; goto out;
/* Enable DCB tagging only when more than one TC */ /* Enable DCB tagging only when more than one TC */
if (ice_dcb_get_num_tc(&pi->local_dcbx_cfg) > 1) { if (ice_dcb_get_num_tc(&pi->qos_cfg.local_dcbx_cfg) > 1) {
dev_dbg(dev, "DCB tagging enabled (num TC > 1)\n"); dev_dbg(dev, "DCB tagging enabled (num TC > 1)\n");
set_bit(ICE_FLAG_DCB_ENA, pf->flags); set_bit(ICE_FLAG_DCB_ENA, pf->flags);
} else { } else {

View File

@ -18,12 +18,10 @@ static void ice_dcbnl_devreset(struct net_device *netdev)
while (ice_is_reset_in_progress(pf->state)) while (ice_is_reset_in_progress(pf->state))
usleep_range(1000, 2000); usleep_range(1000, 2000);
set_bit(__ICE_DCBNL_DEVRESET, pf->state);
dev_close(netdev); dev_close(netdev);
netdev_state_change(netdev); netdev_state_change(netdev);
dev_open(netdev, NULL); dev_open(netdev, NULL);
netdev_state_change(netdev); netdev_state_change(netdev);
clear_bit(__ICE_DCBNL_DEVRESET, pf->state);
} }
/** /**
@ -34,12 +32,10 @@ static void ice_dcbnl_devreset(struct net_device *netdev)
static int ice_dcbnl_getets(struct net_device *netdev, struct ieee_ets *ets) static int ice_dcbnl_getets(struct net_device *netdev, struct ieee_ets *ets)
{ {
struct ice_dcbx_cfg *dcbxcfg; struct ice_dcbx_cfg *dcbxcfg;
struct ice_port_info *pi;
struct ice_pf *pf; struct ice_pf *pf;
pf = ice_netdev_to_pf(netdev); pf = ice_netdev_to_pf(netdev);
pi = pf->hw.port_info; dcbxcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
dcbxcfg = &pi->local_dcbx_cfg;
ets->willing = dcbxcfg->etscfg.willing; ets->willing = dcbxcfg->etscfg.willing;
ets->ets_cap = dcbxcfg->etscfg.maxtcs; ets->ets_cap = dcbxcfg->etscfg.maxtcs;
@ -74,7 +70,7 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL; return -EINVAL;
new_cfg = &pf->hw.port_info->desired_dcbx_cfg; new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
mutex_lock(&pf->tc_mutex); mutex_lock(&pf->tc_mutex);
@ -159,6 +155,7 @@ static u8 ice_dcbnl_getdcbx(struct net_device *netdev)
static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode) static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
{ {
struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_qos_cfg *qos_cfg;
/* if FW LLDP agent is running, DCBNL not allowed to change mode */ /* if FW LLDP agent is running, DCBNL not allowed to change mode */
if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
@ -175,10 +172,11 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
return ICE_DCB_NO_HW_CHG; return ICE_DCB_NO_HW_CHG;
pf->dcbx_cap = mode; pf->dcbx_cap = mode;
qos_cfg = &pf->hw.port_info->qos_cfg;
if (mode & DCB_CAP_DCBX_VER_CEE) if (mode & DCB_CAP_DCBX_VER_CEE)
pf->hw.port_info->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE; qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE;
else else
pf->hw.port_info->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE; qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE;
dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode); dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode);
return ICE_DCB_HW_CHG_RST; return ICE_DCB_HW_CHG_RST;
@ -229,7 +227,7 @@ static int ice_dcbnl_getpfc(struct net_device *netdev, struct ieee_pfc *pfc)
struct ice_dcbx_cfg *dcbxcfg; struct ice_dcbx_cfg *dcbxcfg;
int i; int i;
dcbxcfg = &pi->local_dcbx_cfg; dcbxcfg = &pi->qos_cfg.local_dcbx_cfg;
pfc->pfc_cap = dcbxcfg->pfc.pfccap; pfc->pfc_cap = dcbxcfg->pfc.pfccap;
pfc->pfc_en = dcbxcfg->pfc.pfcena; pfc->pfc_en = dcbxcfg->pfc.pfcena;
pfc->mbc = dcbxcfg->pfc.mbc; pfc->mbc = dcbxcfg->pfc.mbc;
@ -260,7 +258,7 @@ static int ice_dcbnl_setpfc(struct net_device *netdev, struct ieee_pfc *pfc)
mutex_lock(&pf->tc_mutex); mutex_lock(&pf->tc_mutex);
new_cfg = &pf->hw.port_info->desired_dcbx_cfg; new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
if (pfc->pfc_cap) if (pfc->pfc_cap)
new_cfg->pfc.pfccap = pfc->pfc_cap; new_cfg->pfc.pfccap = pfc->pfc_cap;
@ -297,9 +295,9 @@ ice_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting)
if (prio >= ICE_MAX_USER_PRIORITY) if (prio >= ICE_MAX_USER_PRIORITY)
return; return;
*setting = (pi->local_dcbx_cfg.pfc.pfcena >> prio) & 0x1; *setting = (pi->qos_cfg.local_dcbx_cfg.pfc.pfcena >> prio) & 0x1;
dev_dbg(ice_pf_to_dev(pf), "Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n", dev_dbg(ice_pf_to_dev(pf), "Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n",
prio, *setting, pi->local_dcbx_cfg.pfc.pfcena); prio, *setting, pi->qos_cfg.local_dcbx_cfg.pfc.pfcena);
} }
/** /**
@ -320,7 +318,7 @@ static void ice_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, u8 set)
if (prio >= ICE_MAX_USER_PRIORITY) if (prio >= ICE_MAX_USER_PRIORITY)
return; return;
new_cfg = &pf->hw.port_info->desired_dcbx_cfg; new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
new_cfg->pfc.pfccap = pf->hw.func_caps.common_cap.maxtc; new_cfg->pfc.pfccap = pf->hw.func_caps.common_cap.maxtc;
if (set) if (set)
@ -342,7 +340,7 @@ static u8 ice_dcbnl_getpfcstate(struct net_device *netdev)
struct ice_port_info *pi = pf->hw.port_info; struct ice_port_info *pi = pf->hw.port_info;
/* Return enabled if any UP enabled for PFC */ /* Return enabled if any UP enabled for PFC */
if (pi->local_dcbx_cfg.pfc.pfcena) if (pi->qos_cfg.local_dcbx_cfg.pfc.pfcena)
return 1; return 1;
return 0; return 0;
@ -382,8 +380,8 @@ static u8 ice_dcbnl_setstate(struct net_device *netdev, u8 state)
if (state) { if (state) {
set_bit(ICE_FLAG_DCB_ENA, pf->flags); set_bit(ICE_FLAG_DCB_ENA, pf->flags);
memcpy(&pf->hw.port_info->desired_dcbx_cfg, memcpy(&pf->hw.port_info->qos_cfg.desired_dcbx_cfg,
&pf->hw.port_info->local_dcbx_cfg, &pf->hw.port_info->qos_cfg.local_dcbx_cfg,
sizeof(struct ice_dcbx_cfg)); sizeof(struct ice_dcbx_cfg));
} else { } else {
clear_bit(ICE_FLAG_DCB_ENA, pf->flags); clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
@ -417,7 +415,7 @@ ice_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int prio,
if (prio >= ICE_MAX_USER_PRIORITY) if (prio >= ICE_MAX_USER_PRIORITY)
return; return;
*pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio]; *pgid = pi->qos_cfg.local_dcbx_cfg.etscfg.prio_table[prio];
dev_dbg(ice_pf_to_dev(pf), "Get PG config prio=%d tc=%d\n", prio, dev_dbg(ice_pf_to_dev(pf), "Get PG config prio=%d tc=%d\n", prio,
*pgid); *pgid);
} }
@ -448,7 +446,7 @@ ice_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
if (tc >= ICE_MAX_TRAFFIC_CLASS) if (tc >= ICE_MAX_TRAFFIC_CLASS)
return; return;
new_cfg = &pf->hw.port_info->desired_dcbx_cfg; new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
/* prio_type, bwg_id and bw_pct per UP are not supported */ /* prio_type, bwg_id and bw_pct per UP are not supported */
@ -478,7 +476,7 @@ ice_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 *bw_pct)
if (pgid >= ICE_MAX_TRAFFIC_CLASS) if (pgid >= ICE_MAX_TRAFFIC_CLASS)
return; return;
*bw_pct = pi->local_dcbx_cfg.etscfg.tcbwtable[pgid]; *bw_pct = pi->qos_cfg.local_dcbx_cfg.etscfg.tcbwtable[pgid];
dev_dbg(ice_pf_to_dev(pf), "Get PG BW config tc=%d bw_pct=%d\n", dev_dbg(ice_pf_to_dev(pf), "Get PG BW config tc=%d bw_pct=%d\n",
pgid, *bw_pct); pgid, *bw_pct);
} }
@ -502,7 +500,7 @@ ice_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 bw_pct)
if (pgid >= ICE_MAX_TRAFFIC_CLASS) if (pgid >= ICE_MAX_TRAFFIC_CLASS)
return; return;
new_cfg = &pf->hw.port_info->desired_dcbx_cfg; new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
new_cfg->etscfg.tcbwtable[pgid] = bw_pct; new_cfg->etscfg.tcbwtable[pgid] = bw_pct;
} }
@ -532,7 +530,7 @@ ice_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int prio,
if (prio >= ICE_MAX_USER_PRIORITY) if (prio >= ICE_MAX_USER_PRIORITY)
return; return;
*pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio]; *pgid = pi->qos_cfg.local_dcbx_cfg.etscfg.prio_table[prio];
} }
/** /**
@ -703,9 +701,9 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app)
mutex_lock(&pf->tc_mutex); mutex_lock(&pf->tc_mutex);
new_cfg = &pf->hw.port_info->desired_dcbx_cfg; new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
old_cfg = &pf->hw.port_info->local_dcbx_cfg; old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
if (old_cfg->numapps == ICE_DCBX_MAX_APPS) { if (old_cfg->numapps == ICE_DCBX_MAX_APPS) {
ret = -EINVAL; ret = -EINVAL;
@ -755,7 +753,7 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
return -EINVAL; return -EINVAL;
mutex_lock(&pf->tc_mutex); mutex_lock(&pf->tc_mutex);
old_cfg = &pf->hw.port_info->local_dcbx_cfg; old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
if (old_cfg->numapps <= 1) if (old_cfg->numapps <= 1)
goto delapp_out; goto delapp_out;
@ -764,7 +762,7 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
if (ret) if (ret)
goto delapp_out; goto delapp_out;
new_cfg = &pf->hw.port_info->desired_dcbx_cfg; new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
for (i = 1; i < new_cfg->numapps; i++) { for (i = 1; i < new_cfg->numapps; i++) {
if (app->selector == new_cfg->app[i].selector && if (app->selector == new_cfg->app[i].selector &&
@ -817,7 +815,7 @@ static u8 ice_dcbnl_cee_set_all(struct net_device *netdev)
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return ICE_DCB_NO_HW_CHG; return ICE_DCB_NO_HW_CHG;
new_cfg = &pf->hw.port_info->desired_dcbx_cfg; new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
mutex_lock(&pf->tc_mutex); mutex_lock(&pf->tc_mutex);
@ -888,7 +886,7 @@ void ice_dcbnl_set_all(struct ice_vsi *vsi)
if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags)) if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
return; return;
dcbxcfg = &pi->local_dcbx_cfg; dcbxcfg = &pi->qos_cfg.local_dcbx_cfg;
for (i = 0; i < dcbxcfg->numapps; i++) { for (i = 0; i < dcbxcfg->numapps; i++) {
u8 prio, tc_map; u8 prio, tc_map;

View File

@ -2986,7 +2986,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
pause->rx_pause = 0; pause->rx_pause = 0;
pause->tx_pause = 0; pause->tx_pause = 0;
dcbx_cfg = &pi->local_dcbx_cfg; dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps) if (!pcaps)
@ -3038,7 +3038,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
pi = vsi->port_info; pi = vsi->port_info;
hw_link_info = &pi->phy.link_info; hw_link_info = &pi->phy.link_info;
dcbx_cfg = &pi->local_dcbx_cfg; dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
link_up = hw_link_info->link_info & ICE_AQ_LINK_UP; link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
/* Changing the port's flow control is not supported if this isn't the /* Changing the port's flow control is not supported if this isn't the
@ -3472,7 +3472,7 @@ static void ice_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
netdev_warn(netdev, "Wake on LAN is not supported on this interface!\n"); netdev_warn(netdev, "Wake on LAN is not supported on this interface!\n");
/* Get WoL settings based on the HW capability */ /* Get WoL settings based on the HW capability */
if (ice_is_wol_supported(pf)) { if (ice_is_wol_supported(&pf->hw)) {
wol->supported = WAKE_MAGIC; wol->supported = WAKE_MAGIC;
wol->wolopts = pf->wol_ena ? WAKE_MAGIC : 0; wol->wolopts = pf->wol_ena ? WAKE_MAGIC : 0;
} else { } else {
@ -3492,7 +3492,7 @@ static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
struct ice_vsi *vsi = np->vsi; struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(pf)) if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(&pf->hw))
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* only magic packet is supported */ /* only magic packet is supported */

View File

@ -2078,7 +2078,7 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
{ {
struct ice_dcbx_cfg *cfg = &vsi->port_info->local_dcbx_cfg; struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg;
vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg); vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg); vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
@ -2489,7 +2489,7 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
if (!locked) if (!locked)
rtnl_lock(); rtnl_lock();
err = ice_open(vsi->netdev); err = ice_open_internal(vsi->netdev);
if (!locked) if (!locked)
rtnl_unlock(); rtnl_unlock();
@ -2518,7 +2518,7 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
if (!locked) if (!locked)
rtnl_lock(); rtnl_lock();
ice_stop(vsi->netdev); ice_vsi_close(vsi);
if (!locked) if (!locked)
rtnl_unlock(); rtnl_unlock();
@ -2944,7 +2944,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
bool ice_is_reset_in_progress(unsigned long *state) bool ice_is_reset_in_progress(unsigned long *state)
{ {
return test_bit(__ICE_RESET_OICR_RECV, state) || return test_bit(__ICE_RESET_OICR_RECV, state) ||
test_bit(__ICE_DCBNL_DEVRESET, state) ||
test_bit(__ICE_PFR_REQ, state) || test_bit(__ICE_PFR_REQ, state) ||
test_bit(__ICE_CORER_REQ, state) || test_bit(__ICE_CORER_REQ, state) ||
test_bit(__ICE_GLOBR_REQ, state); test_bit(__ICE_GLOBR_REQ, state);

View File

@ -3515,15 +3515,14 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
} }
/** /**
* ice_is_wol_supported - get NVM state of WoL * ice_is_wol_supported - check if WoL is supported
* @pf: board private structure * @hw: pointer to hardware info
* *
* Check if WoL is supported based on the HW configuration. * Check if WoL is supported based on the HW configuration.
* Returns true if NVM supports and enables WoL for this port, false otherwise * Returns true if NVM supports and enables WoL for this port, false otherwise
*/ */
bool ice_is_wol_supported(struct ice_pf *pf) bool ice_is_wol_supported(struct ice_hw *hw)
{ {
struct ice_hw *hw = &pf->hw;
u16 wol_ctrl; u16 wol_ctrl;
/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
@ -3532,7 +3531,7 @@ bool ice_is_wol_supported(struct ice_pf *pf)
if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl)) if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
return false; return false;
return !(BIT(hw->pf_id) & wol_ctrl); return !(BIT(hw->port_info->lport) & wol_ctrl);
} }
/** /**
@ -4170,28 +4169,25 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
goto err_send_version_unroll; goto err_send_version_unroll;
} }
/* not a fatal error if this fails */
err = ice_init_nvm_phy_type(pf->hw.port_info); err = ice_init_nvm_phy_type(pf->hw.port_info);
if (err) { if (err)
dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err); dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
goto err_send_version_unroll;
}
/* not a fatal error if this fails */
err = ice_update_link_info(pf->hw.port_info); err = ice_update_link_info(pf->hw.port_info);
if (err) { if (err)
dev_err(dev, "ice_update_link_info failed: %d\n", err); dev_err(dev, "ice_update_link_info failed: %d\n", err);
goto err_send_version_unroll;
}
ice_init_link_dflt_override(pf->hw.port_info); ice_init_link_dflt_override(pf->hw.port_info);
/* if media available, initialize PHY settings */ /* if media available, initialize PHY settings */
if (pf->hw.port_info->phy.link_info.link_info & if (pf->hw.port_info->phy.link_info.link_info &
ICE_AQ_MEDIA_AVAILABLE) { ICE_AQ_MEDIA_AVAILABLE) {
/* not a fatal error if this fails */
err = ice_init_phy_user_cfg(pf->hw.port_info); err = ice_init_phy_user_cfg(pf->hw.port_info);
if (err) { if (err)
dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err); dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
goto err_send_version_unroll;
}
if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
struct ice_vsi *vsi = ice_get_main_vsi(pf); struct ice_vsi *vsi = ice_get_main_vsi(pf);
@ -4542,6 +4538,7 @@ static int __maybe_unused ice_suspend(struct device *dev)
continue; continue;
ice_vsi_free_q_vectors(pf->vsi[v]); ice_vsi_free_q_vectors(pf->vsi[v]);
} }
ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
ice_clear_interrupt_scheme(pf); ice_clear_interrupt_scheme(pf);
pci_save_state(pdev); pci_save_state(pdev);
@ -6616,6 +6613,28 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
* Returns 0 on success, negative value on failure * Returns 0 on success, negative value on failure
*/ */
int ice_open(struct net_device *netdev) int ice_open(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_pf *pf = np->vsi->back;
if (ice_is_reset_in_progress(pf->state)) {
netdev_err(netdev, "can't open net device while reset is in progress");
return -EBUSY;
}
return ice_open_internal(netdev);
}
/**
* ice_open_internal - Called when a network interface becomes active
* @netdev: network interface device structure
*
* Internal ice_open implementation. Should not be used directly except for ice_open and reset
* handling routine
*
* Returns 0 on success, negative value on failure
*/
int ice_open_internal(struct net_device *netdev)
{ {
struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi; struct ice_vsi *vsi = np->vsi;
@ -6696,6 +6715,12 @@ int ice_stop(struct net_device *netdev)
{ {
struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi; struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
if (ice_is_reset_in_progress(pf->state)) {
netdev_err(netdev, "can't stop net device while reset is in progress");
return -EBUSY;
}
ice_vsi_close(vsi); ice_vsi_close(vsi);

View File

@ -1239,6 +1239,9 @@ ice_add_update_vsi_list(struct ice_hw *hw,
ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
vsi_list_id); vsi_list_id);
if (!m_entry->vsi_list_info)
return ICE_ERR_NO_MEMORY;
/* If this entry was large action then the large action needs /* If this entry was large action then the large action needs
* to be updated to point to FWD to VSI list * to be updated to point to FWD to VSI list
*/ */
@ -2224,6 +2227,7 @@ ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI && return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
fm_entry->fltr_info.vsi_handle == vsi_handle) || fm_entry->fltr_info.vsi_handle == vsi_handle) ||
(fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST && (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
fm_entry->vsi_list_info &&
(test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map)))); (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
} }
@ -2296,14 +2300,12 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
return ICE_ERR_PARAM; return ICE_ERR_PARAM;
list_for_each_entry(fm_entry, lkup_list_head, list_entry) { list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
struct ice_fltr_info *fi; if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
fi = &fm_entry->fltr_info;
if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
continue; continue;
status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
vsi_list_head, fi); vsi_list_head,
&fm_entry->fltr_info);
if (status) if (status)
return status; return status;
} }
@ -2626,7 +2628,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
&remove_list_head); &remove_list_head);
mutex_unlock(rule_lock); mutex_unlock(rule_lock);
if (status) if (status)
return; goto free_fltr_list;
switch (lkup) { switch (lkup) {
case ICE_SW_LKUP_MAC: case ICE_SW_LKUP_MAC:
@ -2649,6 +2651,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
break; break;
} }
free_fltr_list:
list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
list_del(&fm_entry->list_entry); list_del(&fm_entry->list_entry);
devm_kfree(ice_hw_to_dev(hw), fm_entry); devm_kfree(ice_hw_to_dev(hw), fm_entry);

View File

@ -2421,7 +2421,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
/* allow CONTROL frames egress from main VSI if FW LLDP disabled */ /* allow CONTROL frames egress from main VSI if FW LLDP disabled */
if (unlikely(skb->priority == TC_PRIO_CONTROL && if (unlikely(skb->priority == TC_PRIO_CONTROL &&
vsi->type == ICE_VSI_PF && vsi->type == ICE_VSI_PF &&
vsi->port_info->is_sw_lldp)) vsi->port_info->qos_cfg.is_sw_lldp))
offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TX_CTX_DESC_SWTCH_UPLINK <<
ICE_TXD_CTX_QW1_CMD_S); ICE_TXD_CTX_QW1_CMD_S);

View File

@ -493,6 +493,7 @@ struct ice_dcb_app_priority_table {
#define ICE_TLV_STATUS_ERR 0x4 #define ICE_TLV_STATUS_ERR 0x4
#define ICE_APP_PROT_ID_FCOE 0x8906 #define ICE_APP_PROT_ID_FCOE 0x8906
#define ICE_APP_PROT_ID_ISCSI 0x0cbc #define ICE_APP_PROT_ID_ISCSI 0x0cbc
#define ICE_APP_PROT_ID_ISCSI_860 0x035c
#define ICE_APP_PROT_ID_FIP 0x8914 #define ICE_APP_PROT_ID_FIP 0x8914
#define ICE_APP_SEL_ETHTYPE 0x1 #define ICE_APP_SEL_ETHTYPE 0x1
#define ICE_APP_SEL_TCPIP 0x2 #define ICE_APP_SEL_TCPIP 0x2
@ -514,6 +515,14 @@ struct ice_dcbx_cfg {
#define ICE_DCBX_APPS_NON_WILLING 0x1 #define ICE_DCBX_APPS_NON_WILLING 0x1
}; };
struct ice_qos_cfg {
struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */
struct ice_dcbx_cfg desired_dcbx_cfg; /* CEE Desired Cfg */
struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */
u8 dcbx_status : 3; /* see ICE_DCBX_STATUS_DIS */
u8 is_sw_lldp : 1;
};
struct ice_port_info { struct ice_port_info {
struct ice_sched_node *root; /* Root Node per Port */ struct ice_sched_node *root; /* Root Node per Port */
struct ice_hw *hw; /* back pointer to HW instance */ struct ice_hw *hw; /* back pointer to HW instance */
@ -537,13 +546,7 @@ struct ice_port_info {
sib_head[ICE_MAX_TRAFFIC_CLASS][ICE_AQC_TOPO_MAX_LEVEL_NUM]; sib_head[ICE_MAX_TRAFFIC_CLASS][ICE_AQC_TOPO_MAX_LEVEL_NUM];
/* List contain profile ID(s) and other params per layer */ /* List contain profile ID(s) and other params per layer */
struct list_head rl_prof_list[ICE_AQC_TOPO_MAX_LEVEL_NUM]; struct list_head rl_prof_list[ICE_AQC_TOPO_MAX_LEVEL_NUM];
struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */ struct ice_qos_cfg qos_cfg;
/* DCBX info */
struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */
struct ice_dcbx_cfg desired_dcbx_cfg; /* CEE Desired Cfg */
/* LLDP/DCBX Status */
u8 dcbx_status:3; /* see ICE_DCBX_STATUS_DIS */
u8 is_sw_lldp:1;
u8 is_vf:1; u8 is_vf:1;
}; };

View File

@ -184,6 +184,28 @@ mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry)
return !!(entry->tuple_nat_node.next); return !!(entry->tuple_nat_node.next);
} }
static int
mlx5_get_label_mapping(struct mlx5_tc_ct_priv *ct_priv,
u32 *labels, u32 *id)
{
if (!memchr_inv(labels, 0, sizeof(u32) * 4)) {
*id = 0;
return 0;
}
if (mapping_add(ct_priv->labels_mapping, labels, id))
return -EOPNOTSUPP;
return 0;
}
static void
mlx5_put_label_mapping(struct mlx5_tc_ct_priv *ct_priv, u32 id)
{
if (id)
mapping_remove(ct_priv->labels_mapping, id);
}
static int static int
mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule) mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
{ {
@ -435,7 +457,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
mlx5_tc_rule_delete(netdev_priv(ct_priv->netdev), zone_rule->rule, attr); mlx5_tc_rule_delete(netdev_priv(ct_priv->netdev), zone_rule->rule, attr);
mlx5e_mod_hdr_detach(ct_priv->dev, mlx5e_mod_hdr_detach(ct_priv->dev,
ct_priv->mod_hdr_tbl, zone_rule->mh); ct_priv->mod_hdr_tbl, zone_rule->mh);
mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id); mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
kfree(attr); kfree(attr);
} }
@ -638,8 +660,8 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
if (!meta) if (!meta)
return -EOPNOTSUPP; return -EOPNOTSUPP;
err = mapping_add(ct_priv->labels_mapping, meta->ct_metadata.labels, err = mlx5_get_label_mapping(ct_priv, meta->ct_metadata.labels,
&attr->ct_attr.ct_labels_id); &attr->ct_attr.ct_labels_id);
if (err) if (err)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (nat) { if (nat) {
@ -675,7 +697,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
err_mapping: err_mapping:
dealloc_mod_hdr_actions(&mod_acts); dealloc_mod_hdr_actions(&mod_acts);
mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id); mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
return err; return err;
} }
@ -743,7 +765,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
err_rule: err_rule:
mlx5e_mod_hdr_detach(ct_priv->dev, mlx5e_mod_hdr_detach(ct_priv->dev,
ct_priv->mod_hdr_tbl, zone_rule->mh); ct_priv->mod_hdr_tbl, zone_rule->mh);
mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id); mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
err_mod_hdr: err_mod_hdr:
kfree(attr); kfree(attr);
err_attr: err_attr:
@ -1198,7 +1220,7 @@ void mlx5_tc_ct_match_del(struct mlx5_tc_ct_priv *priv, struct mlx5_ct_attr *ct_
if (!priv || !ct_attr->ct_labels_id) if (!priv || !ct_attr->ct_labels_id)
return; return;
mapping_remove(priv->labels_mapping, ct_attr->ct_labels_id); mlx5_put_label_mapping(priv, ct_attr->ct_labels_id);
} }
int int
@ -1276,7 +1298,7 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
ct_labels[1] = key->ct_labels[1] & mask->ct_labels[1]; ct_labels[1] = key->ct_labels[1] & mask->ct_labels[1];
ct_labels[2] = key->ct_labels[2] & mask->ct_labels[2]; ct_labels[2] = key->ct_labels[2] & mask->ct_labels[2];
ct_labels[3] = key->ct_labels[3] & mask->ct_labels[3]; ct_labels[3] = key->ct_labels[3] & mask->ct_labels[3];
if (mapping_add(priv->labels_mapping, ct_labels, &ct_attr->ct_labels_id)) if (mlx5_get_label_mapping(priv, ct_labels, &ct_attr->ct_labels_id))
return -EOPNOTSUPP; return -EOPNOTSUPP;
mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG, ct_attr->ct_labels_id, mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG, ct_attr->ct_labels_id,
MLX5_CT_LABELS_MASK); MLX5_CT_LABELS_MASK);

View File

@ -744,11 +744,11 @@ static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
return 0; return 0;
} }
static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings, static void ptys2ethtool_supported_advertised_port(struct mlx5_core_dev *mdev,
u32 eth_proto_cap, struct ethtool_link_ksettings *link_ksettings,
u8 connector_type, bool ext) u32 eth_proto_cap, u8 connector_type)
{ {
if ((!connector_type && !ext) || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) { if (!MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type)) {
if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR) if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
| MLX5E_PROT_MASK(MLX5E_10GBASE_SR) | MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
| MLX5E_PROT_MASK(MLX5E_40GBASE_CR4) | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
@ -884,9 +884,9 @@ static int ptys2connector_type[MLX5E_CONNECTOR_TYPE_NUMBER] = {
[MLX5E_PORT_OTHER] = PORT_OTHER, [MLX5E_PORT_OTHER] = PORT_OTHER,
}; };
static u8 get_connector_port(u32 eth_proto, u8 connector_type, bool ext) static u8 get_connector_port(struct mlx5_core_dev *mdev, u32 eth_proto, u8 connector_type)
{ {
if ((connector_type || ext) && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER) if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type))
return ptys2connector_type[connector_type]; return ptys2connector_type[connector_type];
if (eth_proto & if (eth_proto &
@ -987,11 +987,11 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
data_rate_oper, link_ksettings); data_rate_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
connector_type = connector_type < MLX5E_CONNECTOR_TYPE_NUMBER ?
link_ksettings->base.port = get_connector_port(eth_proto_oper, connector_type : MLX5E_PORT_UNKNOWN;
connector_type, ext); link_ksettings->base.port = get_connector_port(mdev, eth_proto_oper, connector_type);
ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin, ptys2ethtool_supported_advertised_port(mdev, link_ksettings, eth_proto_admin,
connector_type, ext); connector_type);
get_lp_advertising(mdev, eth_proto_lp, link_ksettings); get_lp_advertising(mdev, eth_proto_lp, link_ksettings);
if (an_status == MLX5_AN_COMPLETE) if (an_status == MLX5_AN_COMPLETE)

View File

@ -926,13 +926,24 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
mutex_unlock(&table->lock); mutex_unlock(&table->lock);
} }
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
#define MLX5_MAX_ASYNC_EQS 4
#else
#define MLX5_MAX_ASYNC_EQS 3
#endif
int mlx5_eq_table_create(struct mlx5_core_dev *dev) int mlx5_eq_table_create(struct mlx5_core_dev *dev)
{ {
struct mlx5_eq_table *eq_table = dev->priv.eq_table; struct mlx5_eq_table *eq_table = dev->priv.eq_table;
int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
MLX5_CAP_GEN(dev, max_num_eqs) :
1 << MLX5_CAP_GEN(dev, log_max_eq);
int err; int err;
eq_table->num_comp_eqs = eq_table->num_comp_eqs =
mlx5_irq_get_num_comp(eq_table->irq_table); min_t(int,
mlx5_irq_get_num_comp(eq_table->irq_table),
num_eqs - MLX5_MAX_ASYNC_EQS);
err = create_async_eqs(dev); err = create_async_eqs(dev);
if (err) { if (err) {

View File

@ -20,6 +20,7 @@
#include <net/red.h> #include <net/red.h>
#include <net/vxlan.h> #include <net/vxlan.h>
#include <net/flow_offload.h> #include <net/flow_offload.h>
#include <net/inet_ecn.h>
#include "port.h" #include "port.h"
#include "core.h" #include "core.h"
@ -345,6 +346,20 @@ struct mlxsw_sp_port_type_speed_ops {
u32 (*ptys_proto_cap_masked_get)(u32 eth_proto_cap); u32 (*ptys_proto_cap_masked_get)(u32 eth_proto_cap);
}; };
static inline u8 mlxsw_sp_tunnel_ecn_decap(u8 outer_ecn, u8 inner_ecn,
bool *trap_en)
{
bool set_ce = false;
*trap_en = !!__INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
if (set_ce)
return INET_ECN_CE;
else if (outer_ecn == INET_ECN_ECT_1 && inner_ecn == INET_ECN_ECT_0)
return INET_ECN_ECT_1;
else
return inner_ecn;
}
static inline struct net_device * static inline struct net_device *
mlxsw_sp_bridge_vxlan_dev_find(struct net_device *br_dev) mlxsw_sp_bridge_vxlan_dev_find(struct net_device *br_dev)
{ {

View File

@ -371,12 +371,11 @@ static int mlxsw_sp_ipip_ecn_decap_init_one(struct mlxsw_sp *mlxsw_sp,
u8 inner_ecn, u8 outer_ecn) u8 inner_ecn, u8 outer_ecn)
{ {
char tidem_pl[MLXSW_REG_TIDEM_LEN]; char tidem_pl[MLXSW_REG_TIDEM_LEN];
bool trap_en, set_ce = false;
u8 new_inner_ecn; u8 new_inner_ecn;
bool trap_en;
trap_en = __INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce); new_inner_ecn = mlxsw_sp_tunnel_ecn_decap(outer_ecn, inner_ecn,
new_inner_ecn = set_ce ? INET_ECN_CE : inner_ecn; &trap_en);
mlxsw_reg_tidem_pack(tidem_pl, outer_ecn, inner_ecn, new_inner_ecn, mlxsw_reg_tidem_pack(tidem_pl, outer_ecn, inner_ecn, new_inner_ecn,
trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0); trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tidem), tidem_pl); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tidem), tidem_pl);

View File

@ -909,12 +909,11 @@ static int __mlxsw_sp_nve_ecn_decap_init(struct mlxsw_sp *mlxsw_sp,
u8 inner_ecn, u8 outer_ecn) u8 inner_ecn, u8 outer_ecn)
{ {
char tndem_pl[MLXSW_REG_TNDEM_LEN]; char tndem_pl[MLXSW_REG_TNDEM_LEN];
bool trap_en, set_ce = false;
u8 new_inner_ecn; u8 new_inner_ecn;
bool trap_en;
trap_en = !!__INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce); new_inner_ecn = mlxsw_sp_tunnel_ecn_decap(outer_ecn, inner_ecn,
new_inner_ecn = set_ce ? INET_ECN_CE : inner_ecn; &trap_en);
mlxsw_reg_tndem_pack(tndem_pl, outer_ecn, inner_ecn, new_inner_ecn, mlxsw_reg_tndem_pack(tndem_pl, outer_ecn, inner_ecn, new_inner_ecn,
trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0); trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tndem), tndem_pl); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tndem), tndem_pl);

View File

@ -2897,7 +2897,7 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
dev_kfree_skb_any(curr); dev_kfree_skb_any(curr);
if (segs != NULL) { if (segs != NULL) {
curr = segs; curr = segs;
segs = segs->next; segs = next;
curr->next = NULL; curr->next = NULL;
dev_kfree_skb_any(segs); dev_kfree_skb_any(segs);
} }

View File

@ -454,6 +454,7 @@ void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
dev_consume_skb_any(skb); dev_consume_skb_any(skb);
else else
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return;
} }
nfp_ccm_rx(&bpf->ccm, skb); nfp_ccm_rx(&bpf->ccm, skb);

View File

@ -190,6 +190,7 @@ struct nfp_fl_internal_ports {
* @qos_rate_limiters: Current active qos rate limiters * @qos_rate_limiters: Current active qos rate limiters
* @qos_stats_lock: Lock on qos stats updates * @qos_stats_lock: Lock on qos stats updates
* @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded * @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded
* @merge_table: Hash table to store merged flows
*/ */
struct nfp_flower_priv { struct nfp_flower_priv {
struct nfp_app *app; struct nfp_app *app;
@ -223,6 +224,7 @@ struct nfp_flower_priv {
unsigned int qos_rate_limiters; unsigned int qos_rate_limiters;
spinlock_t qos_stats_lock; /* Protect the qos stats */ spinlock_t qos_stats_lock; /* Protect the qos stats */
int pre_tun_rule_cnt; int pre_tun_rule_cnt;
struct rhashtable merge_table;
}; };
/** /**
@ -350,6 +352,12 @@ struct nfp_fl_payload_link {
}; };
extern const struct rhashtable_params nfp_flower_table_params; extern const struct rhashtable_params nfp_flower_table_params;
extern const struct rhashtable_params merge_table_params;
struct nfp_merge_info {
u64 parent_ctx;
struct rhash_head ht_node;
};
struct nfp_fl_stats_frame { struct nfp_fl_stats_frame {
__be32 stats_con_id; __be32 stats_con_id;

View File

@ -490,6 +490,12 @@ const struct rhashtable_params nfp_flower_table_params = {
.automatic_shrinking = true, .automatic_shrinking = true,
}; };
const struct rhashtable_params merge_table_params = {
.key_offset = offsetof(struct nfp_merge_info, parent_ctx),
.head_offset = offsetof(struct nfp_merge_info, ht_node),
.key_len = sizeof(u64),
};
int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
unsigned int host_num_mems) unsigned int host_num_mems)
{ {
@ -506,6 +512,10 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
if (err) if (err)
goto err_free_flow_table; goto err_free_flow_table;
err = rhashtable_init(&priv->merge_table, &merge_table_params);
if (err)
goto err_free_stats_ctx_table;
get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed)); get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
/* Init ring buffer and unallocated mask_ids. */ /* Init ring buffer and unallocated mask_ids. */
@ -513,7 +523,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS, kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL); NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
if (!priv->mask_ids.mask_id_free_list.buf) if (!priv->mask_ids.mask_id_free_list.buf)
goto err_free_stats_ctx_table; goto err_free_merge_table;
priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1; priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
@ -550,6 +560,8 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
kfree(priv->mask_ids.last_used); kfree(priv->mask_ids.last_used);
err_free_mask_id: err_free_mask_id:
kfree(priv->mask_ids.mask_id_free_list.buf); kfree(priv->mask_ids.mask_id_free_list.buf);
err_free_merge_table:
rhashtable_destroy(&priv->merge_table);
err_free_stats_ctx_table: err_free_stats_ctx_table:
rhashtable_destroy(&priv->stats_ctx_table); rhashtable_destroy(&priv->stats_ctx_table);
err_free_flow_table: err_free_flow_table:
@ -568,6 +580,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
nfp_check_rhashtable_empty, NULL); nfp_check_rhashtable_empty, NULL);
rhashtable_free_and_destroy(&priv->stats_ctx_table, rhashtable_free_and_destroy(&priv->stats_ctx_table,
nfp_check_rhashtable_empty, NULL); nfp_check_rhashtable_empty, NULL);
rhashtable_free_and_destroy(&priv->merge_table,
nfp_check_rhashtable_empty, NULL);
kvfree(priv->stats); kvfree(priv->stats);
kfree(priv->mask_ids.mask_id_free_list.buf); kfree(priv->mask_ids.mask_id_free_list.buf);
kfree(priv->mask_ids.last_used); kfree(priv->mask_ids.last_used);

View File

@ -1009,6 +1009,8 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
struct netlink_ext_ack *extack = NULL; struct netlink_ext_ack *extack = NULL;
struct nfp_fl_payload *merge_flow; struct nfp_fl_payload *merge_flow;
struct nfp_fl_key_ls merge_key_ls; struct nfp_fl_key_ls merge_key_ls;
struct nfp_merge_info *merge_info;
u64 parent_ctx = 0;
int err; int err;
ASSERT_RTNL(); ASSERT_RTNL();
@ -1019,6 +1021,15 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
nfp_flower_is_merge_flow(sub_flow2)) nfp_flower_is_merge_flow(sub_flow2))
return -EINVAL; return -EINVAL;
/* check if the two flows are already merged */
parent_ctx = (u64)(be32_to_cpu(sub_flow1->meta.host_ctx_id)) << 32;
parent_ctx |= (u64)(be32_to_cpu(sub_flow2->meta.host_ctx_id));
if (rhashtable_lookup_fast(&priv->merge_table,
&parent_ctx, merge_table_params)) {
nfp_flower_cmsg_warn(app, "The two flows are already merged.\n");
return 0;
}
err = nfp_flower_can_merge(sub_flow1, sub_flow2); err = nfp_flower_can_merge(sub_flow1, sub_flow2);
if (err) if (err)
return err; return err;
@ -1060,16 +1071,33 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
if (err) if (err)
goto err_release_metadata; goto err_release_metadata;
merge_info = kmalloc(sizeof(*merge_info), GFP_KERNEL);
if (!merge_info) {
err = -ENOMEM;
goto err_remove_rhash;
}
merge_info->parent_ctx = parent_ctx;
err = rhashtable_insert_fast(&priv->merge_table, &merge_info->ht_node,
merge_table_params);
if (err)
goto err_destroy_merge_info;
err = nfp_flower_xmit_flow(app, merge_flow, err = nfp_flower_xmit_flow(app, merge_flow,
NFP_FLOWER_CMSG_TYPE_FLOW_MOD); NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
if (err) if (err)
goto err_remove_rhash; goto err_remove_merge_info;
merge_flow->in_hw = true; merge_flow->in_hw = true;
sub_flow1->in_hw = false; sub_flow1->in_hw = false;
return 0; return 0;
err_remove_merge_info:
WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
&merge_info->ht_node,
merge_table_params));
err_destroy_merge_info:
kfree(merge_info);
err_remove_rhash: err_remove_rhash:
WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
&merge_flow->fl_node, &merge_flow->fl_node,
@ -1359,7 +1387,9 @@ nfp_flower_remove_merge_flow(struct nfp_app *app,
{ {
struct nfp_flower_priv *priv = app->priv; struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload_link *link, *temp; struct nfp_fl_payload_link *link, *temp;
struct nfp_merge_info *merge_info;
struct nfp_fl_payload *origin; struct nfp_fl_payload *origin;
u64 parent_ctx = 0;
bool mod = false; bool mod = false;
int err; int err;
@ -1396,8 +1426,22 @@ nfp_flower_remove_merge_flow(struct nfp_app *app,
err_free_links: err_free_links:
/* Clean any links connected with the merged flow. */ /* Clean any links connected with the merged flow. */
list_for_each_entry_safe(link, temp, &merge_flow->linked_flows, list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
merge_flow.list) merge_flow.list) {
u32 ctx_id = be32_to_cpu(link->sub_flow.flow->meta.host_ctx_id);
parent_ctx = (parent_ctx << 32) | (u64)(ctx_id);
nfp_flower_unlink_flow(link); nfp_flower_unlink_flow(link);
}
merge_info = rhashtable_lookup_fast(&priv->merge_table,
&parent_ctx,
merge_table_params);
if (merge_info) {
WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
&merge_info->ht_node,
merge_table_params));
kfree(merge_info);
}
kfree(merge_flow->action_data); kfree(merge_flow->action_data);
kfree(merge_flow->mask_data); kfree(merge_flow->mask_data);

View File

@ -907,8 +907,16 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
info = skb_tunnel_info(skb); info = skb_tunnel_info(skb);
if (info) { if (info) {
info->key.u.ipv4.dst = fl4.saddr; struct ip_tunnel_info *unclone;
info->key.u.ipv4.src = fl4.daddr;
unclone = skb_tunnel_info_unclone(skb);
if (unlikely(!unclone)) {
dst_release(&rt->dst);
return -ENOMEM;
}
unclone->key.u.ipv4.dst = fl4.saddr;
unclone->key.u.ipv4.src = fl4.daddr;
} }
if (!pskb_may_pull(skb, ETH_HLEN)) { if (!pskb_may_pull(skb, ETH_HLEN)) {
@ -992,8 +1000,16 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct ip_tunnel_info *info = skb_tunnel_info(skb); struct ip_tunnel_info *info = skb_tunnel_info(skb);
if (info) { if (info) {
info->key.u.ipv6.dst = fl6.saddr; struct ip_tunnel_info *unclone;
info->key.u.ipv6.src = fl6.daddr;
unclone = skb_tunnel_info_unclone(skb);
if (unlikely(!unclone)) {
dst_release(dst);
return -ENOMEM;
}
unclone->key.u.ipv6.dst = fl6.saddr;
unclone->key.u.ipv6.src = fl6.daddr;
} }
if (!pskb_may_pull(skb, ETH_HLEN)) { if (!pskb_may_pull(skb, ETH_HLEN)) {

View File

@ -365,6 +365,7 @@ static int atusb_alloc_urbs(struct atusb *atusb, int n)
return -ENOMEM; return -ENOMEM;
} }
usb_anchor_urb(urb, &atusb->idle_urbs); usb_anchor_urb(urb, &atusb->idle_urbs);
usb_free_urb(urb);
n--; n--;
} }
return 0; return 0;

View File

@ -328,7 +328,7 @@ EXPORT_SYMBOL_GPL(bcm_phy_enable_apd);
int bcm_phy_set_eee(struct phy_device *phydev, bool enable) int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
{ {
int val; int val, mask = 0;
/* Enable EEE at PHY level */ /* Enable EEE at PHY level */
val = phy_read_mmd(phydev, MDIO_MMD_AN, BRCM_CL45VEN_EEE_CONTROL); val = phy_read_mmd(phydev, MDIO_MMD_AN, BRCM_CL45VEN_EEE_CONTROL);
@ -347,10 +347,17 @@ int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
if (val < 0) if (val < 0)
return val; return val;
if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
phydev->supported))
mask |= MDIO_EEE_1000T;
if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
phydev->supported))
mask |= MDIO_EEE_100TX;
if (enable) if (enable)
val |= (MDIO_EEE_100TX | MDIO_EEE_1000T); val |= mask;
else else
val &= ~(MDIO_EEE_100TX | MDIO_EEE_1000T); val &= ~mask;
phy_write_mmd(phydev, MDIO_MMD_AN, BCM_CL45VEN_EEE_ADV, (u32)val); phy_write_mmd(phydev, MDIO_MMD_AN, BCM_CL45VEN_EEE_ADV, (u32)val);

View File

@ -69,6 +69,14 @@
#include <linux/bpf.h> #include <linux/bpf.h>
#include <linux/bpf_trace.h> #include <linux/bpf_trace.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/ieee802154.h>
#include <linux/if_ltalk.h>
#include <uapi/linux/if_fddi.h>
#include <uapi/linux/if_hippi.h>
#include <uapi/linux/if_fc.h>
#include <net/ax25.h>
#include <net/rose.h>
#include <net/6lowpan.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
@ -2978,6 +2986,45 @@ static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p,
return __tun_set_ebpf(tun, prog_p, prog); return __tun_set_ebpf(tun, prog_p, prog);
} }
/* Return correct value for tun->dev->addr_len based on tun->dev->type. */
static unsigned char tun_get_addr_len(unsigned short type)
{
switch (type) {
case ARPHRD_IP6GRE:
case ARPHRD_TUNNEL6:
return sizeof(struct in6_addr);
case ARPHRD_IPGRE:
case ARPHRD_TUNNEL:
case ARPHRD_SIT:
return 4;
case ARPHRD_ETHER:
return ETH_ALEN;
case ARPHRD_IEEE802154:
case ARPHRD_IEEE802154_MONITOR:
return IEEE802154_EXTENDED_ADDR_LEN;
case ARPHRD_PHONET_PIPE:
case ARPHRD_PPP:
case ARPHRD_NONE:
return 0;
case ARPHRD_6LOWPAN:
return EUI64_ADDR_LEN;
case ARPHRD_FDDI:
return FDDI_K_ALEN;
case ARPHRD_HIPPI:
return HIPPI_ALEN;
case ARPHRD_IEEE802:
return FC_ALEN;
case ARPHRD_ROSE:
return ROSE_ADDR_LEN;
case ARPHRD_NETROM:
return AX25_ADDR_LEN;
case ARPHRD_LOCALTLK:
return LTALK_ALEN;
default:
return 0;
}
}
static long __tun_chr_ioctl(struct file *file, unsigned int cmd, static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
unsigned long arg, int ifreq_len) unsigned long arg, int ifreq_len)
{ {
@ -3133,6 +3180,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
ret = -EBUSY; ret = -EBUSY;
} else { } else {
tun->dev->type = (int) arg; tun->dev->type = (int) arg;
tun->dev->addr_len = tun_get_addr_len(tun->dev->type);
netif_info(tun, drv, tun->dev, "linktype set to %d\n", netif_info(tun, drv, tun->dev, "linktype set to %d\n",
tun->dev->type); tun->dev->type);
ret = 0; ret = 0;

View File

@ -611,7 +611,7 @@ static struct hso_serial *get_serial_by_index(unsigned index)
return serial; return serial;
} }
static int get_free_serial_index(void) static int obtain_minor(struct hso_serial *serial)
{ {
int index; int index;
unsigned long flags; unsigned long flags;
@ -619,8 +619,10 @@ static int get_free_serial_index(void)
spin_lock_irqsave(&serial_table_lock, flags); spin_lock_irqsave(&serial_table_lock, flags);
for (index = 0; index < HSO_SERIAL_TTY_MINORS; index++) { for (index = 0; index < HSO_SERIAL_TTY_MINORS; index++) {
if (serial_table[index] == NULL) { if (serial_table[index] == NULL) {
serial_table[index] = serial->parent;
serial->minor = index;
spin_unlock_irqrestore(&serial_table_lock, flags); spin_unlock_irqrestore(&serial_table_lock, flags);
return index; return 0;
} }
} }
spin_unlock_irqrestore(&serial_table_lock, flags); spin_unlock_irqrestore(&serial_table_lock, flags);
@ -629,15 +631,12 @@ static int get_free_serial_index(void)
return -1; return -1;
} }
static void set_serial_by_index(unsigned index, struct hso_serial *serial) static void release_minor(struct hso_serial *serial)
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&serial_table_lock, flags); spin_lock_irqsave(&serial_table_lock, flags);
if (serial) serial_table[serial->minor] = NULL;
serial_table[index] = serial->parent;
else
serial_table[index] = NULL;
spin_unlock_irqrestore(&serial_table_lock, flags); spin_unlock_irqrestore(&serial_table_lock, flags);
} }
@ -2230,6 +2229,7 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
static void hso_serial_tty_unregister(struct hso_serial *serial) static void hso_serial_tty_unregister(struct hso_serial *serial)
{ {
tty_unregister_device(tty_drv, serial->minor); tty_unregister_device(tty_drv, serial->minor);
release_minor(serial);
} }
static void hso_serial_common_free(struct hso_serial *serial) static void hso_serial_common_free(struct hso_serial *serial)
@ -2253,24 +2253,22 @@ static void hso_serial_common_free(struct hso_serial *serial)
static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
int rx_size, int tx_size) int rx_size, int tx_size)
{ {
int minor;
int i; int i;
tty_port_init(&serial->port); tty_port_init(&serial->port);
minor = get_free_serial_index(); if (obtain_minor(serial))
if (minor < 0)
goto exit2; goto exit2;
/* register our minor number */ /* register our minor number */
serial->parent->dev = tty_port_register_device_attr(&serial->port, serial->parent->dev = tty_port_register_device_attr(&serial->port,
tty_drv, minor, &serial->parent->interface->dev, tty_drv, serial->minor, &serial->parent->interface->dev,
serial->parent, hso_serial_dev_groups); serial->parent, hso_serial_dev_groups);
if (IS_ERR(serial->parent->dev)) if (IS_ERR(serial->parent->dev)) {
release_minor(serial);
goto exit2; goto exit2;
}
/* fill in specific data for later use */
serial->minor = minor;
serial->magic = HSO_SERIAL_MAGIC; serial->magic = HSO_SERIAL_MAGIC;
spin_lock_init(&serial->serial_lock); spin_lock_init(&serial->serial_lock);
serial->num_rx_urbs = num_urbs; serial->num_rx_urbs = num_urbs;
@ -2667,9 +2665,6 @@ static struct hso_device *hso_create_bulk_serial_device(
serial->write_data = hso_std_serial_write_data; serial->write_data = hso_std_serial_write_data;
/* and record this serial */
set_serial_by_index(serial->minor, serial);
/* setup the proc dirs and files if needed */ /* setup the proc dirs and files if needed */
hso_log_port(hso_dev); hso_log_port(hso_dev);
@ -2726,9 +2721,6 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface,
serial->shared_int->ref_count++; serial->shared_int->ref_count++;
mutex_unlock(&serial->shared_int->shared_int_lock); mutex_unlock(&serial->shared_int->shared_int_lock);
/* and record this serial */
set_serial_by_index(serial->minor, serial);
/* setup the proc dirs and files if needed */ /* setup the proc dirs and files if needed */
hso_log_port(hso_dev); hso_log_port(hso_dev);
@ -3113,7 +3105,6 @@ static void hso_free_interface(struct usb_interface *interface)
cancel_work_sync(&serial_table[i]->async_get_intf); cancel_work_sync(&serial_table[i]->async_get_intf);
hso_serial_tty_unregister(serial); hso_serial_tty_unregister(serial);
kref_put(&serial_table[i]->ref, hso_serial_ref_free); kref_put(&serial_table[i]->ref, hso_serial_ref_free);
set_serial_by_index(i, NULL);
} }
} }

View File

@ -2724,12 +2724,17 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto tx_error; goto tx_error;
} else if (err) { } else if (err) {
if (info) { if (info) {
struct ip_tunnel_info *unclone;
struct in_addr src, dst; struct in_addr src, dst;
unclone = skb_tunnel_info_unclone(skb);
if (unlikely(!unclone))
goto tx_error;
src = remote_ip.sin.sin_addr; src = remote_ip.sin.sin_addr;
dst = local_ip.sin.sin_addr; dst = local_ip.sin.sin_addr;
info->key.u.ipv4.src = src.s_addr; unclone->key.u.ipv4.src = src.s_addr;
info->key.u.ipv4.dst = dst.s_addr; unclone->key.u.ipv4.dst = dst.s_addr;
} }
vxlan_encap_bypass(skb, vxlan, vxlan, vni, false); vxlan_encap_bypass(skb, vxlan, vxlan, vni, false);
dst_release(ndst); dst_release(ndst);
@ -2780,12 +2785,17 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto tx_error; goto tx_error;
} else if (err) { } else if (err) {
if (info) { if (info) {
struct ip_tunnel_info *unclone;
struct in6_addr src, dst; struct in6_addr src, dst;
unclone = skb_tunnel_info_unclone(skb);
if (unlikely(!unclone))
goto tx_error;
src = remote_ip.sin6.sin6_addr; src = remote_ip.sin6.sin6_addr;
dst = local_ip.sin6.sin6_addr; dst = local_ip.sin6.sin6_addr;
info->key.u.ipv6.src = src; unclone->key.u.ipv6.src = src;
info->key.u.ipv6.dst = dst; unclone->key.u.ipv6.dst = dst;
} }
vxlan_encap_bypass(skb, vxlan, vxlan, vni, false); vxlan_encap_bypass(skb, vxlan, vxlan, vni, false);

View File

@ -415,7 +415,7 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
if (pad > 0) { /* Pad the frame with zeros */ if (pad > 0) { /* Pad the frame with zeros */
if (__skb_pad(skb, pad, false)) if (__skb_pad(skb, pad, false))
goto drop; goto out;
skb_put(skb, pad); skb_put(skb, pad);
} }
} }
@ -448,8 +448,9 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK; return NETDEV_TX_OK;
drop: drop:
dev->stats.tx_dropped++;
kfree_skb(skb); kfree_skb(skb);
out:
dev->stats.tx_dropped++;
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }

View File

@ -285,7 +285,7 @@ enum iwl_reg_capa_flags_v2 {
REG_CAPA_V2_MCS_9_ALLOWED = BIT(6), REG_CAPA_V2_MCS_9_ALLOWED = BIT(6),
REG_CAPA_V2_WEATHER_DISABLED = BIT(7), REG_CAPA_V2_WEATHER_DISABLED = BIT(7),
REG_CAPA_V2_40MHZ_ALLOWED = BIT(8), REG_CAPA_V2_40MHZ_ALLOWED = BIT(8),
REG_CAPA_V2_11AX_DISABLED = BIT(13), REG_CAPA_V2_11AX_DISABLED = BIT(10),
}; };
/* /*

View File

@ -5,7 +5,7 @@
* *
* GPL LICENSE SUMMARY * GPL LICENSE SUMMARY
* *
* Copyright(c) 2018 - 2020 Intel Corporation * Copyright(c) 2018 - 2021 Intel Corporation
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
@ -122,15 +122,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
const struct fw_img *fw) const struct fw_img *fw)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
u32_encode_bits(250,
CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
struct iwl_context_info_gen3 *ctxt_info_gen3; struct iwl_context_info_gen3 *ctxt_info_gen3;
struct iwl_prph_scratch *prph_scratch; struct iwl_prph_scratch *prph_scratch;
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl; struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
@ -264,26 +255,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
CSR_AUTO_FUNC_BOOT_ENA); CSR_AUTO_FUNC_BOOT_ENA);
/*
* To workaround hardware latency issues during the boot process,
* initialize the LTR to ~250 usec (see ltr_val above).
* The firmware initializes this again later (to a smaller value).
*/
if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
!trans->trans_cfg->integrated) {
iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
} else if (trans->trans_cfg->integrated &&
trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
}
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
else
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
return 0; return 0;
err_free_ctxt_info: err_free_ctxt_info:

View File

@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY * GPL LICENSE SUMMARY
* *
* Copyright(c) 2017 Intel Deutschland GmbH * Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2020 Intel Corporation * Copyright(c) 2018 - 2021 Intel Corporation
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
@ -288,7 +288,6 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
/* kick FW self load */ /* kick FW self load */
iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr); iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr);
iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
/* Context info will be released upon alive or failure to get one */ /* Context info will be released upon alive or failure to get one */

View File

@ -281,6 +281,34 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
mutex_unlock(&trans_pcie->mutex); mutex_unlock(&trans_pcie->mutex);
} }
static void iwl_pcie_set_ltr(struct iwl_trans *trans)
{
u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
u32_encode_bits(250,
CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
/*
* To workaround hardware latency issues during the boot process,
* initialize the LTR to ~250 usec (see ltr_val above).
* The firmware initializes this again later (to a smaller value).
*/
if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
!trans->trans_cfg->integrated) {
iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
} else if (trans->trans_cfg->integrated &&
trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
}
}
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
const struct fw_img *fw, bool run_in_rfkill) const struct fw_img *fw, bool run_in_rfkill)
{ {
@ -347,6 +375,13 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
if (ret) if (ret)
goto out; goto out;
iwl_pcie_set_ltr(trans);
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
else
iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
/* re-check RF-Kill state since we may have missed the interrupt */ /* re-check RF-Kill state since we may have missed the interrupt */
hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
if (hw_rfkill && !run_in_rfkill) if (hw_rfkill && !run_in_rfkill)

View File

@ -1262,7 +1262,16 @@ DEFINE_SIMPLE_PROP(pinctrl7, "pinctrl-7", NULL)
DEFINE_SIMPLE_PROP(pinctrl8, "pinctrl-8", NULL) DEFINE_SIMPLE_PROP(pinctrl8, "pinctrl-8", NULL)
DEFINE_SUFFIX_PROP(regulators, "-supply", NULL) DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells") DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
DEFINE_SUFFIX_PROP(gpios, "-gpios", "#gpio-cells")
static struct device_node *parse_gpios(struct device_node *np,
const char *prop_name, int index)
{
if (!strcmp_suffix(prop_name, ",nr-gpios"))
return NULL;
return parse_suffix_prop_cells(np, prop_name, index, "-gpios",
"#gpio-cells");
}
static struct device_node *parse_iommu_maps(struct device_node *np, static struct device_node *parse_iommu_maps(struct device_node *np,
const char *prop_name, int index) const char *prop_name, int index)

View File

@ -309,11 +309,20 @@ static bool sanity_check(struct ce_array *ca)
return ret; return ret;
} }
/**
* cec_add_elem - Add an element to the CEC array.
* @pfn: page frame number to insert
*
* Return values:
* - <0: on error
* - 0: on success
* - >0: when the inserted pfn was offlined
*/
static int cec_add_elem(u64 pfn) static int cec_add_elem(u64 pfn)
{ {
struct ce_array *ca = &ce_arr; struct ce_array *ca = &ce_arr;
int count, err, ret = 0;
unsigned int to = 0; unsigned int to = 0;
int count, ret = 0;
/* /*
* We can be called very early on the identify_cpu() path where we are * We can be called very early on the identify_cpu() path where we are
@ -330,8 +339,8 @@ static int cec_add_elem(u64 pfn)
if (ca->n == MAX_ELEMS) if (ca->n == MAX_ELEMS)
WARN_ON(!del_lru_elem_unlocked(ca)); WARN_ON(!del_lru_elem_unlocked(ca));
ret = find_elem(ca, pfn, &to); err = find_elem(ca, pfn, &to);
if (ret < 0) { if (err < 0) {
/* /*
* Shift range [to-end] to make room for one more element. * Shift range [to-end] to make room for one more element.
*/ */

View File

@ -124,7 +124,7 @@ static const struct regulator_ops vid_ops = {
static const struct regulator_desc regulators[] = { static const struct regulator_desc regulators[] = {
BD9571MWV_REG("VD09", "vd09", VD09, avs_ops, 0, 0x7f, BD9571MWV_REG("VD09", "vd09", VD09, avs_ops, 0, 0x7f,
0x80, 600000, 10000, 0x3c), 0x6f, 600000, 10000, 0x3c),
BD9571MWV_REG("VD18", "vd18", VD18, vid_ops, BD9571MWV_VD18_VID, 0xf, BD9571MWV_REG("VD18", "vd18", VD18, vid_ops, BD9571MWV_VD18_VID, 0xf,
16, 1625000, 25000, 0), 16, 1625000, 25000, 0),
BD9571MWV_REG("VD25", "vd25", VD25, vid_ops, BD9571MWV_VD25_VID, 0xf, BD9571MWV_REG("VD25", "vd25", VD25, vid_ops, BD9571MWV_VD25_VID, 0xf,
@ -133,7 +133,7 @@ static const struct regulator_desc regulators[] = {
11, 2800000, 100000, 0), 11, 2800000, 100000, 0),
BD9571MWV_REG("DVFS", "dvfs", DVFS, reg_ops, BD9571MWV_REG("DVFS", "dvfs", DVFS, reg_ops,
BD9571MWV_DVFS_MONIVDAC, 0x7f, BD9571MWV_DVFS_MONIVDAC, 0x7f,
0x80, 600000, 10000, 0x3c), 0x6f, 600000, 10000, 0x3c),
}; };
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP

View File

@ -56,7 +56,7 @@ static int qcom_pil_info_init(void)
memset_io(base, 0, resource_size(&imem)); memset_io(base, 0, resource_size(&imem));
_reloc.base = base; _reloc.base = base;
_reloc.num_entries = resource_size(&imem) / PIL_RELOC_ENTRY_SIZE; _reloc.num_entries = (u32)resource_size(&imem) / PIL_RELOC_ENTRY_SIZE;
return 0; return 0;
} }

View File

@ -223,7 +223,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
PM8001_EVENT_LOG_SIZE; PM8001_EVENT_LOG_SIZE;
pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option = 0x01; pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option = 0x01;
pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01; pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01;
for (i = 0; i < PM8001_MAX_INB_NUM; i++) { for (i = 0; i < pm8001_ha->max_q_num; i++) {
pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt =
PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30); PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30);
pm8001_ha->inbnd_q_tbl[i].upper_base_addr = pm8001_ha->inbnd_q_tbl[i].upper_base_addr =
@ -249,7 +249,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; pm8001_ha->inbnd_q_tbl[i].producer_idx = 0;
pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; pm8001_ha->inbnd_q_tbl[i].consumer_index = 0;
} }
for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { for (i = 0; i < pm8001_ha->max_q_num; i++) {
pm8001_ha->outbnd_q_tbl[i].element_size_cnt = pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30); PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30);
pm8001_ha->outbnd_q_tbl[i].upper_base_addr = pm8001_ha->outbnd_q_tbl[i].upper_base_addr =
@ -671,9 +671,9 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
read_outbnd_queue_table(pm8001_ha); read_outbnd_queue_table(pm8001_ha);
/* update main config table ,inbound table and outbound table */ /* update main config table ,inbound table and outbound table */
update_main_config_table(pm8001_ha); update_main_config_table(pm8001_ha);
for (i = 0; i < PM8001_MAX_INB_NUM; i++) for (i = 0; i < pm8001_ha->max_q_num; i++)
update_inbnd_queue_table(pm8001_ha, i); update_inbnd_queue_table(pm8001_ha, i);
for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) for (i = 0; i < pm8001_ha->max_q_num; i++)
update_outbnd_queue_table(pm8001_ha, i); update_outbnd_queue_table(pm8001_ha, i);
/* 8081 controller donot require these operations */ /* 8081 controller donot require these operations */
if (deviceid != 0x8081 && deviceid != 0x0042) { if (deviceid != 0x8081 && deviceid != 0x0042) {

View File

@ -6343,37 +6343,34 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
DECLARE_COMPLETION_ONSTACK(wait); DECLARE_COMPLETION_ONSTACK(wait);
struct request *req; struct request *req;
unsigned long flags; unsigned long flags;
int free_slot, task_tag, err; int task_tag, err;
/* /*
* Get free slot, sleep if slots are unavailable. * blk_get_request() is used here only to get a free tag.
* Even though we use wait_event() which sleeps indefinitely,
* the maximum wait time is bounded by %TM_CMD_TIMEOUT.
*/ */
req = blk_get_request(q, REQ_OP_DRV_OUT, 0); req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
if (IS_ERR(req)) if (IS_ERR(req))
return PTR_ERR(req); return PTR_ERR(req);
req->end_io_data = &wait; req->end_io_data = &wait;
free_slot = req->tag;
WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
ufshcd_hold(hba, false); ufshcd_hold(hba, false);
spin_lock_irqsave(host->host_lock, flags); spin_lock_irqsave(host->host_lock, flags);
task_tag = hba->nutrs + free_slot; blk_mq_start_request(req);
task_tag = req->tag;
treq->req_header.dword_0 |= cpu_to_be32(task_tag); treq->req_header.dword_0 |= cpu_to_be32(task_tag);
memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq)); memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function); ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
/* send command to the controller */ /* send command to the controller */
__set_bit(free_slot, &hba->outstanding_tasks); __set_bit(task_tag, &hba->outstanding_tasks);
/* Make sure descriptors are ready before ringing the task doorbell */ /* Make sure descriptors are ready before ringing the task doorbell */
wmb(); wmb();
ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL); ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
/* Make sure that doorbell is committed immediately */ /* Make sure that doorbell is committed immediately */
wmb(); wmb();
@ -6393,24 +6390,24 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err"); ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
__func__, tm_function); __func__, tm_function);
if (ufshcd_clear_tm_cmd(hba, free_slot)) if (ufshcd_clear_tm_cmd(hba, task_tag))
dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n", dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n",
__func__, free_slot); __func__, task_tag);
err = -ETIMEDOUT; err = -ETIMEDOUT;
} else { } else {
err = 0; err = 0;
memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq)); memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete"); ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
} }
spin_lock_irqsave(hba->host->host_lock, flags); spin_lock_irqsave(hba->host->host_lock, flags);
__clear_bit(free_slot, &hba->outstanding_tasks); __clear_bit(task_tag, &hba->outstanding_tasks);
spin_unlock_irqrestore(hba->host->host_lock, flags); spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_release(hba);
blk_put_request(req); blk_put_request(req);
ufshcd_release(hba);
return err; return err;
} }

View File

@ -186,7 +186,7 @@ struct qm_eqcr_entry {
__be32 tag; __be32 tag;
struct qm_fd fd; struct qm_fd fd;
u8 __reserved3[32]; u8 __reserved3[32];
} __packed; } __packed __aligned(8);
#define QM_EQCR_VERB_VBIT 0x80 #define QM_EQCR_VERB_VBIT 0x80
#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */ #define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
#define QM_EQCR_VERB_CMD_ENQUEUE 0x01 #define QM_EQCR_VERB_CMD_ENQUEUE 0x01

View File

@ -1166,6 +1166,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
target_get_sess_cmd(&cmd->se_cmd, true); target_get_sess_cmd(&cmd->se_cmd, true);
cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, hdr->cdb); cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, hdr->cdb);
if (cmd->sense_reason) { if (cmd->sense_reason) {
if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) { if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
@ -1180,8 +1181,6 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (cmd->sense_reason) if (cmd->sense_reason)
goto attach_cmd; goto attach_cmd;
/* only used for printks or comparing with ->ref_task_tag */
cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
cmd->sense_reason = target_cmd_parse_cdb(&cmd->se_cmd); cmd->sense_reason = target_cmd_parse_cdb(&cmd->se_cmd);
if (cmd->sense_reason) if (cmd->sense_reason)
goto attach_cmd; goto attach_cmd;

View File

@ -347,7 +347,7 @@ static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
ret = tb_retimer_nvm_add(rt); ret = tb_retimer_nvm_add(rt);
if (ret) { if (ret) {
dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret); dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret);
device_del(&rt->dev); device_unregister(&rt->dev);
return ret; return ret;
} }
@ -406,7 +406,7 @@ static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
*/ */
int tb_retimer_scan(struct tb_port *port) int tb_retimer_scan(struct tb_port *port)
{ {
u32 status[TB_MAX_RETIMER_INDEX] = {}; u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
int ret, i, last_idx = 0; int ret, i, last_idx = 0;
if (!port->cap_usb4) if (!port->cap_usb4)

View File

@ -63,6 +63,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
dev_info(dev, "stub up\n"); dev_info(dev, "stub up\n");
mutex_lock(&sdev->ud.sysfs_lock);
spin_lock_irq(&sdev->ud.lock); spin_lock_irq(&sdev->ud.lock);
if (sdev->ud.status != SDEV_ST_AVAILABLE) { if (sdev->ud.status != SDEV_ST_AVAILABLE) {
@ -87,13 +88,13 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
tcp_rx = kthread_create(stub_rx_loop, &sdev->ud, "stub_rx"); tcp_rx = kthread_create(stub_rx_loop, &sdev->ud, "stub_rx");
if (IS_ERR(tcp_rx)) { if (IS_ERR(tcp_rx)) {
sockfd_put(socket); sockfd_put(socket);
return -EINVAL; goto unlock_mutex;
} }
tcp_tx = kthread_create(stub_tx_loop, &sdev->ud, "stub_tx"); tcp_tx = kthread_create(stub_tx_loop, &sdev->ud, "stub_tx");
if (IS_ERR(tcp_tx)) { if (IS_ERR(tcp_tx)) {
kthread_stop(tcp_rx); kthread_stop(tcp_rx);
sockfd_put(socket); sockfd_put(socket);
return -EINVAL; goto unlock_mutex;
} }
/* get task structs now */ /* get task structs now */
@ -112,6 +113,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
wake_up_process(sdev->ud.tcp_rx); wake_up_process(sdev->ud.tcp_rx);
wake_up_process(sdev->ud.tcp_tx); wake_up_process(sdev->ud.tcp_tx);
mutex_unlock(&sdev->ud.sysfs_lock);
} else { } else {
dev_info(dev, "stub down\n"); dev_info(dev, "stub down\n");
@ -122,6 +125,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
spin_unlock_irq(&sdev->ud.lock); spin_unlock_irq(&sdev->ud.lock);
usbip_event_add(&sdev->ud, SDEV_EVENT_DOWN); usbip_event_add(&sdev->ud, SDEV_EVENT_DOWN);
mutex_unlock(&sdev->ud.sysfs_lock);
} }
return count; return count;
@ -130,6 +134,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
sockfd_put(socket); sockfd_put(socket);
err: err:
spin_unlock_irq(&sdev->ud.lock); spin_unlock_irq(&sdev->ud.lock);
unlock_mutex:
mutex_unlock(&sdev->ud.sysfs_lock);
return -EINVAL; return -EINVAL;
} }
static DEVICE_ATTR_WO(usbip_sockfd); static DEVICE_ATTR_WO(usbip_sockfd);
@ -270,6 +276,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev)
sdev->ud.side = USBIP_STUB; sdev->ud.side = USBIP_STUB;
sdev->ud.status = SDEV_ST_AVAILABLE; sdev->ud.status = SDEV_ST_AVAILABLE;
spin_lock_init(&sdev->ud.lock); spin_lock_init(&sdev->ud.lock);
mutex_init(&sdev->ud.sysfs_lock);
sdev->ud.tcp_socket = NULL; sdev->ud.tcp_socket = NULL;
sdev->ud.sockfd = -1; sdev->ud.sockfd = -1;

View File

@ -263,6 +263,9 @@ struct usbip_device {
/* lock for status */ /* lock for status */
spinlock_t lock; spinlock_t lock;
/* mutex for synchronizing sysfs store paths */
struct mutex sysfs_lock;
int sockfd; int sockfd;
struct socket *tcp_socket; struct socket *tcp_socket;

Some files were not shown because too many files have changed in this diff Show More