Merge android12-5.10.7+ (43edfc8) into msm-5.10

* refs/heads/tmp-43edfc8:
  FROMLIST: fs/buffer.c: Revoke LRU when trying to drop buffers
  ANDROID: dm-user: fix typo in channel_free
  ANDROID: PM / Domains: add vendor_hook to disallow domain idle state
  ANDROID: sched: Export available_idle_cpu
  ANDROID: sched: Export sched_domains_mutex for lockdep
  ANDROID: iommu/dma: Add support for DMA_ATTR_SYS_CACHE_ONLY_NWA
  ANDROID: iommu/dma: Add support fo DMA_ATTR_SYS_CACHE_ONLY
  ANDROID: arm64: Add support for system cache memory type
  ANDROID: ftrace: vendor hook for ftrace dump on oops
  ANDROID: irqdesc: Export symbol for vendor modules
  ANDROID: fix 0-day bot build-break in fair.c
  ANDROID: cpuidle: export cpuidle_driver_state_disabled
  Linux 5.10.7
  scsi: target: Fix XCOPY NAA identifier lookup
  rtlwifi: rise completion at the last step of firmware callback
  xsk: Fix memory leak for failed bind
  KVM: x86: fix shift out of bounds reported by UBSAN
  x86/mtrr: Correct the range check before performing MTRR type lookups
  dmaengine: idxd: off by one in cleanup code
  netfilter: nft_dynset: report EOPNOTSUPP on missing set feature
  netfilter: xt_RATEEST: reject non-null terminated string from userspace
  netfilter: ipset: fix shift-out-of-bounds in htable_bits()
  netfilter: x_tables: Update remaining dereference to RCU
  ARM: dts: OMAP3: disable AES on N950/N9
  net/mlx5e: Fix SWP offsets when vlan inserted by driver
  bcache: introduce BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE for large bucket
  bcache: check unsupported feature sets for bcache register
  bcache: fix typo from SUUP to SUPP in features.h
  drm/i915: clear the gpu reloc batch
  drm/i915: clear the shadow batch
  arm64: link with -z norelro for LLD or aarch64-elf
  dmabuf: fix use-after-free of dmabuf's file->f_inode
  Revert "device property: Keep secondary firmware node secondary by type"
  btrfs: send: fix wrong file path when there is an inode with a pending rmdir
  btrfs: qgroup: don't try to wait flushing if we're already holding a transaction
  iommu/vt-d: Move intel_iommu info from struct intel_svm to struct intel_svm_dev
  ALSA: hda/realtek: Add two "Intel Reference board" SSID in the ALC256.
  ALSA: hda/realtek: Enable mute and micmute LED on HP EliteBook 850 G7
  ALSA: hda/realtek: Add mute LED quirk for more HP laptops
  ALSA: hda/realtek - Fix speaker volume control on Lenovo C940
  ALSA: hda/conexant: add a new hda codec CX11970
  ALSA: hda/via: Fix runtime PM for Clevo W35xSS
  blk-iocost: fix NULL iocg deref from racing against initialization
  x86/resctrl: Don't move a task to the same resource group
  x86/resctrl: Use an IPI instead of task_work_add() to update PQR_ASSOC MSR
  KVM: x86/mmu: Ensure TDP MMU roots are freed after yield
  kvm: check tlbs_dirty directly
  KVM: x86/mmu: Get root level from walkers when retrieving MMIO SPTE
  KVM: x86/mmu: Use -1 to flag an undefined spte in get_mmio_spte()
  x86/mm: Fix leak of pmd ptlock
  mm: make wait_on_page_writeback() wait for multiple pending writebacks
  hwmon: (amd_energy) fix allocation of hwmon_channel_info config
  USB: serial: keyspan_pda: remove unused variable
  usb: gadget: configfs: Fix use-after-free issue with udc_name
  usb: gadget: configfs: Preserve function ordering after bind failure
  usb: gadget: Fix spinlock lockup on usb_function_deactivate
  USB: gadget: legacy: fix return error code in acm_ms_bind()
  usb: gadget: u_ether: Fix MTU size mismatch with RX packet size
  usb: gadget: function: printer: Fix a memory leak for interface descriptor
  usb: gadget: f_uac2: reset wMaxPacketSize
  USB: Gadget: dummy-hcd: Fix shift-out-of-bounds bug
  usb: gadget: select CONFIG_CRC32
  ALSA: usb-audio: Fix UBSAN warnings for MIDI jacks
  USB: usblp: fix DMA to stack
  USB: yurex: fix control-URB timeout handling
  USB: serial: option: add Quectel EM160R-GL
  USB: serial: option: add LongSung M5710 module support
  USB: serial: iuu_phoenix: fix DMA from stack
  usb: uas: Add PNY USB Portable SSD to unusual_uas
  usb: usbip: vhci_hcd: protect shift size
  USB: xhci: fix U1/U2 handling for hardware with XHCI_INTEL_HOST quirk set
  usb: chipidea: ci_hdrc_imx: add missing put_device() call in usbmisc_get_init_data()
  usb: dwc3: ulpi: Fix USB2.0 HS/FS/LS PHY suspend regression
  usb: dwc3: ulpi: Replace CPU-based busyloop with Protocol-based one
  usb: dwc3: ulpi: Use VStsDone to detect PHY regs access completion
  usb: dwc3: gadget: Clear wait flag on dequeue
  usb: dwc3: gadget: Restart DWC3 gadget when enabling pullup
  usb: dwc3: meson-g12a: disable clk on error handling path in probe
  usb: typec: intel_pmc_mux: Configure HPD first for HPD+IRQ request
  USB: cdc-wdm: Fix use after free in service_outstanding_interrupt().
  USB: cdc-acm: blacklist another IR Droid device
  usb: gadget: enable super speed plus
  staging: mt7621-dma: Fix a resource leak in an error handling path
  Staging: comedi: Return -EFAULT if copy_to_user() fails
  powerpc: Handle .text.{hot,unlikely}.* in linker script
  crypto: asym_tpm: correct zero out potential secrets
  crypto: ecdh - avoid buffer overflow in ecdh_set_secret()
  scsi: block: Do not accept any requests while suspended
  scsi: block: Remove RQF_PREEMPT and BLK_MQ_REQ_PREEMPT
  Bluetooth: revert: hci_h5: close serdev device and free hu in h5_close
  kbuild: don't hardcode depmod path
  scsi: ufs: Clear UAC for FFU and RPMB LUNs
  depmod: handle the case of /sbin/depmod without /sbin in PATH
  lib/genalloc: fix the overflow when size is too big
  local64.h: make <asm/local64.h> mandatory
  scsi: core: Only process PM requests if rpm_status != RPM_ACTIVE
  scsi: scsi_transport_spi: Set RQF_PM for domain validation commands
  scsi: ide: Mark power management requests with RQF_PM instead of RQF_PREEMPT
  scsi: ide: Do not set the RQF_PREEMPT flag for sense requests
  scsi: block: Introduce BLK_MQ_REQ_PM
  scsi: ufs-pci: Enable UFSHCD_CAP_RPM_AUTOSUSPEND for Intel controllers
  scsi: ufs-pci: Fix recovery from hibernate exit errors for Intel controllers
  scsi: ufs-pci: Ensure UFS device is in PowerDown mode for suspend-to-disk ->poweroff()
  scsi: ufs-pci: Fix restore from S4 for Intel controllers
  scsi: ufs: Fix wrong print message in dev_err()
  workqueue: Kick a worker based on the actual activation of delayed works
  block: add debugfs stanza for QUEUE_FLAG_NOWAIT
  selftests/vm: fix building protection keys test
  stmmac: intel: Add PCI IDs for TGL-H platform
  selftests: mlxsw: Set headroom size of correct port
  net: usb: qmi_wwan: add Quectel EM160R-GL
  ibmvnic: fix: NULL pointer dereference.
  CDC-NCM: remove "connected" log message
  net: dsa: lantiq_gswip: Fix GSWIP_MII_CFG(p) register access
  net: dsa: lantiq_gswip: Enable GSWIP_MII_CFG_EN also for internal PHYs
  r8169: work around power-saving bug on some chip versions
  vhost_net: fix ubuf refcount incorrectly when sendmsg fails
  bareudp: Fix use of incorrect min_headroom size
  bareudp: set NETIF_F_LLTX flag
  net: hdlc_ppp: Fix issues when mod_timer is called while timer is running
  erspan: fix version 1 check in gre_parse_header()
  net: hns: fix return value check in __lb_other_process()
  net: sched: prevent invalid Scell_log shift count
  ipv4: Ignore ECN bits for fib lookups in fib_compute_spec_dst()
  bnxt_en: Fix AER recovery.
  net: mvpp2: fix pkt coalescing int-threshold configuration
  bnxt_en: Check TQM rings for maximum supported value.
  e1000e: Export S0ix flags to ethtool
  Revert "e1000e: disable s0ix entry and exit flows for ME systems"
  e1000e: bump up timeout to wait when ME un-configures ULP mode
  e1000e: Only run S0ix flows if shutdown succeeded
  tun: fix return value when the number of iovs exceeds MAX_SKB_FRAGS
  net: ethernet: ti: cpts: fix ethtool output when no ptp_clock registered
  net-sysfs: take the rtnl lock when accessing xps_rxqs_map and num_tc
  net-sysfs: take the rtnl lock when storing xps_rxqs
  net-sysfs: take the rtnl lock when accessing xps_cpus_map and num_tc
  net-sysfs: take the rtnl lock when storing xps_cpus
  net: ethernet: Fix memleak in ethoc_probe
  net/ncsi: Use real net-device for response handler
  virtio_net: Fix recursive call to cpus_read_lock()
  qede: fix offload for IPIP tunnel packets
  net: ethernet: mvneta: Fix error handling in mvneta_probe
  ibmvnic: continue fatal error reset after passive init
  ibmvnic: fix login buffer memory leak
  net: stmmac: dwmac-meson8b: ignore the second clock input
  net: mvpp2: Fix GoP port 3 Networking Complex Control configurations
  atm: idt77252: call pci_disable_device() on error path
  ionic: account for vlan tag len in rx buffer len
  ethernet: ucc_geth: set dev->max_mtu to 1518
  ethernet: ucc_geth: fix use-after-free in ucc_geth_remove()
  net: systemport: set dev->max_mtu to UMAC_MAX_MTU_SIZE
  net: mvpp2: prs: fix PPPoE with ipv6 packet parse
  net: mvpp2: Add TCAM entry to drop flow control pause frames
  net/sched: sch_taprio: ensure to reset/destroy all child qdiscs
  iavf: fix double-release of rtnl_lock
  i40e: Fix Error I40E_AQ_RC_EINVAL when removing VFs
  Linux 5.10.6
  mwifiex: Fix possible buffer overflows in mwifiex_cmd_802_11_ad_hoc_start
  exec: Transform exec_update_mutex into a rw_semaphore
  rwsem: Implement down_read_interruptible
  rwsem: Implement down_read_killable_nested
  perf: Break deadlock involving exec_update_mutex
  fuse: fix bad inode
  RDMA/siw,rxe: Make emulated devices virtual in the device tree
  RDMA/core: remove use of dma_virt_ops
  scsi: ufs: Re-enable WriteBooster after device reset
  scsi: ufs: Allow an error return value from ->device_reset()
  drm/i915/tgl: Fix Combo PHY DPLL fractional divider for 38.4MHz ref clock
  ALSA: hda/hdmi: Fix incorrect mutex unlock in silent_stream_disable()
  ALSA: hda/realtek - Modify Dell platform name
  Bluetooth: Fix attempting to set RPA timeout when unsupported
  kdev_t: always inline major/minor helper functions
  dt-bindings: rtc: add reset-source property
  rtc: pcf2127: only use watchdog when explicitly available
  rtc: pcf2127: move watchdog initialisation to a separate function
  Revert "mtd: spinand: Fix OOB read"
  Revert "drm/amd/display: Fix memory leaks in S3 resume"

Conflicts:
	Documentation/devicetree/bindings
	Documentation/devicetree/bindings/rtc/rtc.yaml
	drivers/usb/dwc3/gadget.c
	drivers/usb/gadget/function/f_uac2.c
	fs/buffer.c
	include/linux/dma-mapping.h

Change-Id: Ia31d8db4ad2181df65e2c455c0c252563b4c004e
Signed-off-by: Ivaylo Georgiev <irgeorgiev@codeaurora.org>
This commit is contained in:
Ivaylo Georgiev 2021-01-14 09:46:10 -08:00
commit 9eeca3e9c5
223 changed files with 13459 additions and 12644 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 5
SUBLEVEL = 7
EXTRAVERSION =
NAME = Kleptomaniac Octopus
@ -451,7 +451,7 @@ LEX = flex
YACC = bison
AWK = awk
INSTALLKERNEL := installkernel
DEPMOD = /sbin/depmod
DEPMOD = depmod
PERL = perl
PYTHON = python
PYTHON3 = python3

File diff suppressed because it is too large Load Diff

View File

@ -1 +0,0 @@
#include <asm-generic/local64.h>

View File

@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generic-y += extable.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += user.h

View File

@ -494,3 +494,11 @@ bluetooth {
clock-names = "sysclk";
};
};
&aes1_target {
status = "disabled";
};
&aes2_target {
status = "disabled";
};

View File

@ -2,7 +2,6 @@
generic-y += early_ioremap.h
generic-y += extable.h
generic-y += flat.h
generic-y += local64.h
generic-y += parport.h
generated-y += mach-types.h

View File

@ -10,7 +10,7 @@
#
# Copyright (C) 1995-2001 by Russell King
LDFLAGS_vmlinux :=--no-undefined -X -z norelro
LDFLAGS_vmlinux :=--no-undefined -X
ifeq ($(CONFIG_RELOCATABLE), y)
# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
@ -110,16 +110,20 @@ KBUILD_CPPFLAGS += -mbig-endian
CHECKFLAGS += -D__AARCH64EB__
# Prefer the baremetal ELF build target, but not all toolchains include
# it so fall back to the standard linux version if needed.
KBUILD_LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb)
KBUILD_LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb -z norelro)
UTS_MACHINE := aarch64_be
else
KBUILD_CPPFLAGS += -mlittle-endian
CHECKFLAGS += -D__AARCH64EL__
# Same as above, prefer ELF but fall back to linux target if needed.
KBUILD_LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux)
KBUILD_LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux -z norelro)
UTS_MACHINE := aarch64
endif
ifeq ($(CONFIG_LD_IS_LLD), y)
KBUILD_LDFLAGS += -z norelro
endif
CHECKFLAGS += -D__aarch64__
ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_REGS),y)

View File

@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
generic-y += early_ioremap.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += qrwlock.h
generic-y += qspinlock.h

View File

@ -138,6 +138,7 @@
#define MT_DEVICE_nGnRnE 4
#define MT_DEVICE_nGnRE 5
#define MT_DEVICE_GRE 6
#define MT_NORMAL_iNC_oWB 7
/*
* Memory types for Stage-2 translation

View File

@ -496,6 +496,15 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
__pgprot_modify(prot, PTE_ATTRINDX_MASK, \
PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
/*
* Mark the prot value as outer cacheable and inner non-cacheable. Non-coherent
* devices on a system with support for a system or last level cache use these
* attributes to cache allocations in the system cache.
*/
#define pgprot_syscached(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, \
PTE_ATTRINDX(MT_NORMAL_iNC_oWB) | PTE_PXN | PTE_UXN)
#define __HAVE_PHYS_MEM_ACCESS_PROT
struct file;
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,

View File

@ -631,6 +631,7 @@
#define MAIR_ATTR_NORMAL_TAGGED UL(0xf0)
#define MAIR_ATTR_NORMAL UL(0xff)
#define MAIR_ATTR_MASK UL(0xff)
#define MAIR_ATTR_NORMAL_iNC_oWB UL(0xf4)
/* Position the attr at the correct index */
#define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8))

View File

@ -56,7 +56,8 @@
MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED))
MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL_iNC_oWB, MT_NORMAL_iNC_oWB))
#ifdef CONFIG_CPU_PM
/**

View File

@ -2,7 +2,6 @@
generic-y += asm-offsets.h
generic-y += gpio.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += qrwlock.h
generic-y += user.h
generic-y += vmlinux.lds.h

View File

@ -2,7 +2,6 @@
generic-y += asm-offsets.h
generic-y += extable.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += spinlock.h

View File

@ -2,5 +2,4 @@
generic-y += extable.h
generic-y += iomap.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h

View File

@ -1 +0,0 @@
#include <asm-generic/local64.h>

View File

@ -2,6 +2,5 @@
generated-y += syscall_table.h
generic-y += extable.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += spinlock.h

View File

@ -2,7 +2,6 @@
generated-y += syscall_table.h
generic-y += extable.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += syscalls.h

View File

@ -6,7 +6,6 @@ generated-y += syscall_table_64_n64.h
generated-y += syscall_table_64_o32.h
generic-y += export.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += qrwlock.h

View File

@ -4,6 +4,5 @@ generic-y += cmpxchg.h
generic-y += export.h
generic-y += gpio.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += parport.h
generic-y += user.h

View File

@ -3,6 +3,5 @@ generated-y += syscall_table_32.h
generated-y += syscall_table_64.h
generated-y += syscall_table_c32.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += user.h

View File

@ -5,7 +5,6 @@ generated-y += syscall_table_c32.h
generated-y += syscall_table_spu.h
generic-y += export.h
generic-y += kvm_types.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += qrwlock.h
generic-y += vtime.h

View File

@ -85,7 +85,7 @@ SECTIONS
ALIGN_FUNCTION();
#endif
/* careful! __ftr_alt_* sections need to be close to .text */
*(.text.hot TEXT_MAIN .text.fixup .text.unlikely .fixup __ftr_alt_* .ref.text);
*(.text.hot .text.hot.* TEXT_MAIN .text.fixup .text.unlikely .text.unlikely.* .fixup __ftr_alt_* .ref.text);
#ifdef CONFIG_PPC64
*(.tramp.ftrace.text);
#endif

View File

@ -3,6 +3,5 @@ generic-y += early_ioremap.h
generic-y += extable.h
generic-y += flat.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += user.h
generic-y += vmlinux.lds.h

View File

@ -7,5 +7,4 @@ generated-y += unistd_nr.h
generic-y += asm-offsets.h
generic-y += export.h
generic-y += kvm_types.h
generic-y += local64.h
generic-y += mcs_spinlock.h

View File

@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += parport.h

View File

@ -6,5 +6,4 @@ generated-y += syscall_table_64.h
generated-y += syscall_table_c32.h
generic-y += export.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h

View File

@ -1 +0,0 @@
#include <asm-generic/local64.h>

View File

@ -167,9 +167,6 @@ static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end,
*repeat = 0;
*uniform = 1;
/* Make end inclusive instead of exclusive */
end--;
prev_match = MTRR_TYPE_INVALID;
for (i = 0; i < num_var_ranges; ++i) {
unsigned short start_state, end_state, inclusive;
@ -261,6 +258,9 @@ u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform)
int repeat;
u64 partial_end;
/* Make end inclusive instead of exclusive */
end--;
if (!mtrr_state_set)
return MTRR_TYPE_INVALID;

View File

@ -525,89 +525,70 @@ static void rdtgroup_remove(struct rdtgroup *rdtgrp)
kfree(rdtgrp);
}
struct task_move_callback {
struct callback_head work;
struct rdtgroup *rdtgrp;
};
static void move_myself(struct callback_head *head)
static void _update_task_closid_rmid(void *task)
{
struct task_move_callback *callback;
struct rdtgroup *rdtgrp;
callback = container_of(head, struct task_move_callback, work);
rdtgrp = callback->rdtgrp;
/*
* If resource group was deleted before this task work callback
* was invoked, then assign the task to root group and free the
* resource group.
* If the task is still current on this CPU, update PQR_ASSOC MSR.
* Otherwise, the MSR is updated when the task is scheduled in.
*/
if (atomic_dec_and_test(&rdtgrp->waitcount) &&
(rdtgrp->flags & RDT_DELETED)) {
current->closid = 0;
current->rmid = 0;
rdtgroup_remove(rdtgrp);
}
if (task == current)
resctrl_sched_in();
}
if (unlikely(current->flags & PF_EXITING))
goto out;
preempt_disable();
/* update PQR_ASSOC MSR to make resource group go into effect */
resctrl_sched_in();
preempt_enable();
out:
kfree(callback);
static void update_task_closid_rmid(struct task_struct *t)
{
if (IS_ENABLED(CONFIG_SMP) && task_curr(t))
smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1);
else
_update_task_closid_rmid(t);
}
static int __rdtgroup_move_task(struct task_struct *tsk,
struct rdtgroup *rdtgrp)
{
struct task_move_callback *callback;
int ret;
callback = kzalloc(sizeof(*callback), GFP_KERNEL);
if (!callback)
return -ENOMEM;
callback->work.func = move_myself;
callback->rdtgrp = rdtgrp;
/* If the task is already in rdtgrp, no need to move the task. */
if ((rdtgrp->type == RDTCTRL_GROUP && tsk->closid == rdtgrp->closid &&
tsk->rmid == rdtgrp->mon.rmid) ||
(rdtgrp->type == RDTMON_GROUP && tsk->rmid == rdtgrp->mon.rmid &&
tsk->closid == rdtgrp->mon.parent->closid))
return 0;
/*
* Take a refcount, so rdtgrp cannot be freed before the
* callback has been invoked.
* Set the task's closid/rmid before the PQR_ASSOC MSR can be
* updated by them.
*
* For ctrl_mon groups, move both closid and rmid.
* For monitor groups, can move the tasks only from
* their parent CTRL group.
*/
atomic_inc(&rdtgrp->waitcount);
ret = task_work_add(tsk, &callback->work, TWA_RESUME);
if (ret) {
/*
* Task is exiting. Drop the refcount and free the callback.
* No need to check the refcount as the group cannot be
* deleted before the write function unlocks rdtgroup_mutex.
*/
atomic_dec(&rdtgrp->waitcount);
kfree(callback);
rdt_last_cmd_puts("Task exited\n");
} else {
/*
* For ctrl_mon groups move both closid and rmid.
* For monitor groups, can move the tasks only from
* their parent CTRL group.
*/
if (rdtgrp->type == RDTCTRL_GROUP) {
tsk->closid = rdtgrp->closid;
if (rdtgrp->type == RDTCTRL_GROUP) {
tsk->closid = rdtgrp->closid;
tsk->rmid = rdtgrp->mon.rmid;
} else if (rdtgrp->type == RDTMON_GROUP) {
if (rdtgrp->mon.parent->closid == tsk->closid) {
tsk->rmid = rdtgrp->mon.rmid;
} else if (rdtgrp->type == RDTMON_GROUP) {
if (rdtgrp->mon.parent->closid == tsk->closid) {
tsk->rmid = rdtgrp->mon.rmid;
} else {
rdt_last_cmd_puts("Can't move task to different control group\n");
ret = -EINVAL;
}
} else {
rdt_last_cmd_puts("Can't move task to different control group\n");
return -EINVAL;
}
}
return ret;
/*
* Ensure the task's closid and rmid are written before determining if
* the task is current that will decide if it will be interrupted.
*/
barrier();
/*
* By now, the task's closid and rmid are set. If the task is current
* on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource
* group go into effect. If the task is not current, the MSR will be
* updated when the task is scheduled in.
*/
update_task_closid_rmid(tsk);
return 0;
}
static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)

View File

@ -49,7 +49,7 @@ static inline u64 rsvd_bits(int s, int e)
if (e < s)
return 0;
return ((1ULL << (e - s + 1)) - 1) << s;
return ((2ULL << (e - s)) - 1) << s;
}
void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask);

View File

@ -3485,16 +3485,16 @@ static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
* Return the level of the lowest level SPTE added to sptes.
* That SPTE may be non-present.
*/
static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes)
static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
{
struct kvm_shadow_walk_iterator iterator;
int leaf = vcpu->arch.mmu->root_level;
int leaf = -1;
u64 spte;
walk_shadow_page_lockless_begin(vcpu);
for (shadow_walk_init(&iterator, vcpu, addr);
for (shadow_walk_init(&iterator, vcpu, addr),
*root_level = iterator.level;
shadow_walk_okay(&iterator);
__shadow_walk_next(&iterator, spte)) {
leaf = iterator.level;
@ -3504,7 +3504,6 @@ static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes)
if (!is_shadow_present_pte(spte))
break;
}
walk_shadow_page_lockless_end(vcpu);
@ -3517,9 +3516,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
{
u64 sptes[PT64_ROOT_MAX_LEVEL];
struct rsvd_bits_validate *rsvd_check;
int root = vcpu->arch.mmu->shadow_root_level;
int leaf;
int level;
int root, leaf, level;
bool reserved = false;
if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) {
@ -3528,9 +3525,14 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
}
if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes);
leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
else
leaf = get_walk(vcpu, addr, sptes);
leaf = get_walk(vcpu, addr, sptes, &root);
if (unlikely(leaf < 0)) {
*sptep = 0ull;
return reserved;
}
rsvd_check = &vcpu->arch.mmu->shadow_zero_check;

View File

@ -42,7 +42,48 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
}
#define for_each_tdp_mmu_root(_kvm, _root) \
static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
{
if (kvm_mmu_put_root(kvm, root))
kvm_tdp_mmu_free_root(kvm, root);
}
static inline bool tdp_mmu_next_root_valid(struct kvm *kvm,
struct kvm_mmu_page *root)
{
lockdep_assert_held(&kvm->mmu_lock);
if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link))
return false;
kvm_mmu_get_root(kvm, root);
return true;
}
static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
struct kvm_mmu_page *root)
{
struct kvm_mmu_page *next_root;
next_root = list_next_entry(root, link);
tdp_mmu_put_root(kvm, root);
return next_root;
}
/*
* Note: this iterator gets and puts references to the roots it iterates over.
* This makes it safe to release the MMU lock and yield within the loop, but
* if exiting the loop early, the caller must drop the reference to the most
* recent root. (Unless keeping a live reference is desirable.)
*/
#define for_each_tdp_mmu_root_yield_safe(_kvm, _root) \
for (_root = list_first_entry(&_kvm->arch.tdp_mmu_roots, \
typeof(*_root), link); \
tdp_mmu_next_root_valid(_kvm, _root); \
_root = tdp_mmu_next_root(_kvm, _root))
#define for_each_tdp_mmu_root(_kvm, _root) \
list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
@ -439,18 +480,9 @@ bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end)
struct kvm_mmu_page *root;
bool flush = false;
for_each_tdp_mmu_root(kvm, root) {
/*
* Take a reference on the root so that it cannot be freed if
* this thread releases the MMU lock and yields in this loop.
*/
kvm_mmu_get_root(kvm, root);
for_each_tdp_mmu_root_yield_safe(kvm, root)
flush |= zap_gfn_range(kvm, root, start, end, true);
kvm_mmu_put_root(kvm, root);
}
return flush;
}
@ -609,13 +641,7 @@ static int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, unsigned long start,
int ret = 0;
int as_id;
for_each_tdp_mmu_root(kvm, root) {
/*
* Take a reference on the root so that it cannot be freed if
* this thread releases the MMU lock and yields in this loop.
*/
kvm_mmu_get_root(kvm, root);
for_each_tdp_mmu_root_yield_safe(kvm, root) {
as_id = kvm_mmu_page_as_id(root);
slots = __kvm_memslots(kvm, as_id);
kvm_for_each_memslot(memslot, slots) {
@ -637,8 +663,6 @@ static int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, unsigned long start,
ret |= handler(kvm, memslot, root, gfn_start,
gfn_end, data);
}
kvm_mmu_put_root(kvm, root);
}
return ret;
@ -826,21 +850,13 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
int root_as_id;
bool spte_set = false;
for_each_tdp_mmu_root(kvm, root) {
for_each_tdp_mmu_root_yield_safe(kvm, root) {
root_as_id = kvm_mmu_page_as_id(root);
if (root_as_id != slot->as_id)
continue;
/*
* Take a reference on the root so that it cannot be freed if
* this thread releases the MMU lock and yields in this loop.
*/
kvm_mmu_get_root(kvm, root);
spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
slot->base_gfn + slot->npages, min_level);
kvm_mmu_put_root(kvm, root);
}
return spte_set;
@ -894,21 +910,13 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
int root_as_id;
bool spte_set = false;
for_each_tdp_mmu_root(kvm, root) {
for_each_tdp_mmu_root_yield_safe(kvm, root) {
root_as_id = kvm_mmu_page_as_id(root);
if (root_as_id != slot->as_id)
continue;
/*
* Take a reference on the root so that it cannot be freed if
* this thread releases the MMU lock and yields in this loop.
*/
kvm_mmu_get_root(kvm, root);
spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
slot->base_gfn + slot->npages);
kvm_mmu_put_root(kvm, root);
}
return spte_set;
@ -1017,21 +1025,13 @@ bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot)
int root_as_id;
bool spte_set = false;
for_each_tdp_mmu_root(kvm, root) {
for_each_tdp_mmu_root_yield_safe(kvm, root) {
root_as_id = kvm_mmu_page_as_id(root);
if (root_as_id != slot->as_id)
continue;
/*
* Take a reference on the root so that it cannot be freed if
* this thread releases the MMU lock and yields in this loop.
*/
kvm_mmu_get_root(kvm, root);
spte_set |= set_dirty_gfn_range(kvm, root, slot->base_gfn,
slot->base_gfn + slot->npages);
kvm_mmu_put_root(kvm, root);
}
return spte_set;
}
@ -1077,21 +1077,13 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
struct kvm_mmu_page *root;
int root_as_id;
for_each_tdp_mmu_root(kvm, root) {
for_each_tdp_mmu_root_yield_safe(kvm, root) {
root_as_id = kvm_mmu_page_as_id(root);
if (root_as_id != slot->as_id)
continue;
/*
* Take a reference on the root so that it cannot be freed if
* this thread releases the MMU lock and yields in this loop.
*/
kvm_mmu_get_root(kvm, root);
zap_collapsible_spte_range(kvm, root, slot->base_gfn,
slot->base_gfn + slot->npages);
kvm_mmu_put_root(kvm, root);
}
}
@ -1148,12 +1140,15 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
* Return the level of the lowest level SPTE added to sptes.
* That SPTE may be non-present.
*/
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes)
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
int *root_level)
{
struct tdp_iter iter;
struct kvm_mmu *mmu = vcpu->arch.mmu;
int leaf = vcpu->arch.mmu->shadow_root_level;
gfn_t gfn = addr >> PAGE_SHIFT;
int leaf = -1;
*root_level = vcpu->arch.mmu->shadow_root_level;
tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
leaf = iter.level;

View File

@ -44,5 +44,7 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn);
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes);
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
int *root_level);
#endif /* __KVM_X86_MMU_TDP_MMU_H */

View File

@ -829,6 +829,8 @@ int pud_free_pmd_page(pud_t *pud, unsigned long addr)
}
free_page((unsigned long)pmd_sv);
pgtable_pmd_page_dtor(virt_to_page(pmd));
free_page((unsigned long)pmd);
return 1;

View File

@ -2,7 +2,6 @@
generated-y += syscall_table.h
generic-y += extable.h
generic-y += kvm_para.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += param.h
generic-y += qrwlock.h

View File

@ -18,6 +18,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/blk-pm.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
@ -424,11 +425,11 @@ EXPORT_SYMBOL(blk_cleanup_queue);
/**
* blk_queue_enter() - try to increase q->q_usage_counter
* @q: request queue pointer
* @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PREEMPT
* @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
*/
int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
{
const bool pm = flags & BLK_MQ_REQ_PREEMPT;
const bool pm = flags & BLK_MQ_REQ_PM;
while (true) {
bool success = false;
@ -440,7 +441,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
* responsible for ensuring that that counter is
* globally visible before the queue is unfrozen.
*/
if (pm || !blk_queue_pm_only(q)) {
if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) ||
!blk_queue_pm_only(q)) {
success = true;
} else {
percpu_ref_put(&q->q_usage_counter);
@ -465,8 +467,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
wait_event(q->mq_freeze_wq,
(!q->mq_freeze_depth &&
(pm || (blk_pm_request_resume(q),
!blk_queue_pm_only(q)))) ||
blk_pm_resume_queue(pm, q)) ||
blk_queue_dying(q));
if (blk_queue_dying(q))
return -ENODEV;
@ -630,7 +631,7 @@ struct request *blk_get_request(struct request_queue *q, unsigned int op,
struct request *req;
WARN_ON_ONCE(op & REQ_NOWAIT);
WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT));
WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM));
req = blk_mq_alloc_request(q, op, flags);
if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)

View File

@ -2525,8 +2525,8 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
bool use_debt, ioc_locked;
unsigned long flags;
/* bypass IOs if disabled or for root cgroup */
if (!ioc->enabled || !iocg->level)
/* bypass IOs if disabled, still initializing, or for root cgroup */
if (!ioc->enabled || !iocg || !iocg->level)
return;
/* calculate the absolute vtime cost */
@ -2653,14 +2653,14 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
struct bio *bio)
{
struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
struct ioc *ioc = iocg->ioc;
struct ioc *ioc = rqos_to_ioc(rqos);
sector_t bio_end = bio_end_sector(bio);
struct ioc_now now;
u64 vtime, abs_cost, cost;
unsigned long flags;
/* bypass if disabled or for root cgroup */
if (!ioc->enabled || !iocg->level)
/* bypass if disabled, still initializing, or for root cgroup */
if (!ioc->enabled || !iocg || !iocg->level)
return;
abs_cost = calc_vtime_cost(bio, iocg, true);
@ -2837,6 +2837,12 @@ static int blk_iocost_init(struct request_queue *q)
ioc_refresh_params(ioc, true);
spin_unlock_irq(&ioc->lock);
/*
* rqos must be added before activation to allow iocg_pd_init() to
* lookup the ioc from q. This means that the rqos methods may get
* called before policy activation completion, can't assume that the
* target bio has an iocg associated and need to test for NULL iocg.
*/
rq_qos_add(q, rqos);
ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
if (ret) {

View File

@ -129,6 +129,7 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(PCI_P2PDMA),
QUEUE_FLAG_NAME(ZONE_RESETALL),
QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
QUEUE_FLAG_NAME(NOWAIT),
};
#undef QUEUE_FLAG_NAME
@ -297,7 +298,6 @@ static const char *const rqf_name[] = {
RQF_NAME(MIXED_MERGE),
RQF_NAME(MQ_INFLIGHT),
RQF_NAME(DONTPREP),
RQF_NAME(PREEMPT),
RQF_NAME(FAILED),
RQF_NAME(QUIET),
RQF_NAME(ELVPRIV),

View File

@ -292,8 +292,8 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
rq->mq_hctx = data->hctx;
rq->rq_flags = 0;
rq->cmd_flags = data->cmd_flags;
if (data->flags & BLK_MQ_REQ_PREEMPT)
rq->rq_flags |= RQF_PREEMPT;
if (data->flags & BLK_MQ_REQ_PM)
rq->rq_flags |= RQF_PM;
if (blk_queue_io_stat(data->q))
rq->rq_flags |= RQF_IO_STAT;
INIT_LIST_HEAD(&rq->queuelist);

View File

@ -6,11 +6,14 @@
#include <linux/pm_runtime.h>
#ifdef CONFIG_PM
static inline void blk_pm_request_resume(struct request_queue *q)
static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
{
if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
q->rpm_status == RPM_SUSPENDING))
pm_request_resume(q->dev);
if (!q->dev || !blk_queue_pm_only(q))
return 1; /* Nothing to do */
if (pm && q->rpm_status != RPM_SUSPENDED)
return 1; /* Request allowed */
pm_request_resume(q->dev);
return 0;
}
static inline void blk_pm_mark_last_busy(struct request *rq)
@ -44,8 +47,9 @@ static inline void blk_pm_put_request(struct request *rq)
--rq->q->nr_pending;
}
#else
static inline void blk_pm_request_resume(struct request_queue *q)
static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
{
return 1;
}
static inline void blk_pm_mark_last_busy(struct request *rq)

View File

@ -354,7 +354,7 @@ static uint32_t derive_pub_key(const void *pub_key, uint32_t len, uint8_t *buf)
memcpy(cur, e, sizeof(e));
cur += sizeof(e);
/* Zero parameters to satisfy set_pub_key ABI. */
memset(cur, 0, SETKEY_PARAMS_SIZE);
memzero_explicit(cur, SETKEY_PARAMS_SIZE);
return cur - buf;
}

View File

@ -39,7 +39,8 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
struct ecdh params;
unsigned int ndigits;
if (crypto_ecdh_decode_key(buf, len, &params) < 0)
if (crypto_ecdh_decode_key(buf, len, &params) < 0 ||
params.key_size > sizeof(ctx->private_key))
return -EINVAL;
ndigits = ecdh_supported_curve(params.curve_id);

View File

@ -27,6 +27,8 @@
#include <trace/hooks/cpufreq.h>
#include <trace/hooks/mm.h>
#include <trace/hooks/preemptirq.h>
#include <trace/hooks/ftrace_dump.h>
#include <trace/hooks/pm_domain.h>
/*
* Export tracepoints that act as a bare tracehook (ie: have no trace event
@ -120,3 +122,9 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_fork_init);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ttwu_cond);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_schedule_bug);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_exec);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ftrace_oops_enter);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ftrace_oops_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ftrace_size_check);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ftrace_format_check);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ftrace_dump_buffer);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_allow_domain_state);

View File

@ -3607,7 +3607,7 @@ static int idt77252_init_one(struct pci_dev *pcidev,
if ((err = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)))) {
printk("idt77252: can't enable DMA for PCI device at %s\n", pci_name(pcidev));
return err;
goto err_out_disable_pdev;
}
card = kzalloc(sizeof(struct idt77252_dev), GFP_KERNEL);

View File

@ -4283,7 +4283,7 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
if (fwnode_is_primary(fn)) {
dev->fwnode = fn->secondary;
if (!(parent && fn == parent->fwnode))
fn->secondary = ERR_PTR(-ENODEV);
fn->secondary = NULL;
} else {
dev->fwnode = NULL;
}

View File

@ -12,6 +12,8 @@
#include <linux/cpumask.h>
#include <linux/ktime.h>
#include <trace/hooks/pm_domain.h>
static int dev_update_qos_constraint(struct device *dev, void *data)
{
s64 *constraint_ns_p = data;
@ -174,6 +176,11 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
struct pm_domain_data *pdd;
s64 min_off_time_ns;
s64 off_on_time_ns;
bool allow = true;
trace_android_vh_allow_domain_state(genpd, state, &allow);
if (!allow)
return false;
off_on_time_ns = genpd->states[state].power_off_latency_ns +
genpd->states[state].power_on_latency_ns;

View File

@ -251,12 +251,8 @@ static int h5_close(struct hci_uart *hu)
if (h5->vnd && h5->vnd->close)
h5->vnd->close(h5);
if (hu->serdev)
serdev_device_close(hu->serdev);
kfree_skb(h5->rx_skb);
kfree(h5);
h5 = NULL;
if (!hu->serdev)
kfree(h5);
return 0;
}

View File

@ -381,3 +381,4 @@ void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx,
mutex_unlock(&cpuidle_lock);
}
EXPORT_SYMBOL_GPL(cpuidle_driver_state_disabled);

View File

@ -76,10 +76,6 @@ static void dma_buf_release(struct dentry *dentry)
dmabuf->ops->release(dmabuf);
mutex_lock(&db_list.lock);
list_del(&dmabuf->list_node);
mutex_unlock(&db_list.lock);
if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
dma_resv_fini(dmabuf->resv);
@ -88,6 +84,22 @@ static void dma_buf_release(struct dentry *dentry)
kfree(dmabuf);
}
static int dma_buf_file_release(struct inode *inode, struct file *file)
{
struct dma_buf *dmabuf;
if (!is_dma_buf_file(file))
return -EINVAL;
dmabuf = file->private_data;
mutex_lock(&db_list.lock);
list_del(&dmabuf->list_node);
mutex_unlock(&db_list.lock);
return 0;
}
static const struct dentry_operations dma_buf_dentry_ops = {
.d_dname = dmabuffs_dname,
.d_release = dma_buf_release,
@ -413,6 +425,7 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
}
static const struct file_operations dma_buf_fops = {
.release = dma_buf_file_release,
.mmap = dma_buf_mmap_internal,
.llseek = dma_buf_llseek,
.poll = dma_buf_poll,

View File

@ -379,7 +379,7 @@ int idxd_register_driver(void)
return 0;
drv_fail:
for (; i > 0; i--)
while (--i >= 0)
driver_unregister(&idxd_drvs[i]->drv);
return rc;
}
@ -1639,7 +1639,7 @@ int idxd_register_bus_type(void)
return 0;
bus_err:
for (; i > 0; i--)
while (--i >= 0)
bus_unregister(idxd_bus_types[i]);
return rc;
}

View File

@ -2278,8 +2278,7 @@ void amdgpu_dm_update_connector_after_detect(
drm_connector_update_edid_property(connector,
aconnector->edid);
aconnector->num_modes = drm_add_edid_modes(connector, aconnector->edid);
drm_connector_list_update(connector);
drm_add_edid_modes(connector, aconnector->edid);
if (aconnector->dc_link->aux_mode)
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,

View File

@ -2622,11 +2622,22 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
return true;
}
/*
* Display WA #22010492432: tgl
* Program half of the nominal DCO divider fraction value.
*/
static bool
tgl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
{
return IS_TIGERLAKE(i915) && i915->dpll.ref_clks.nssc == 38400;
}
static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
const struct intel_shared_dpll *pll,
int ref_clock)
{
const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
u32 dco_fraction;
u32 p0, p1, p2, dco_freq;
p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
@ -2669,8 +2680,13 @@ static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
ref_clock;
dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000;
dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
DPLL_CFGCR0_DCO_FRACTION_SHIFT;
if (tgl_combo_pll_div_frac_wa_needed(dev_priv))
dco_fraction *= 2;
dco_freq += (dco_fraction * ref_clock) / 0x8000;
if (drm_WARN_ON(&dev_priv->drm, p0 == 0 || p1 == 0 || p2 == 0))
return 0;
@ -2948,16 +2964,6 @@ static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
/* the following params are unused */
};
/*
* Display WA #22010492432: tgl
* Divide the nominal .dco_fraction value by 2.
*/
static const struct skl_wrpll_params tgl_tbt_pll_38_4MHz_values = {
.dco_integer = 0x54, .dco_fraction = 0x1800,
/* the following params are unused */
.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
};
static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *pll_params)
{
@ -2991,14 +2997,12 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
fallthrough;
case 19200:
case 38400:
*pll_params = tgl_tbt_pll_19_2MHz_values;
break;
case 24000:
*pll_params = tgl_tbt_pll_24MHz_values;
break;
case 38400:
*pll_params = tgl_tbt_pll_38_4MHz_values;
break;
}
} else {
switch (dev_priv->dpll.ref_clks.nssc) {
@ -3065,9 +3069,14 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915,
const struct skl_wrpll_params *pll_params,
struct intel_dpll_hw_state *pll_state)
{
u32 dco_fraction = pll_params->dco_fraction;
memset(pll_state, 0, sizeof(*pll_state));
pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params->dco_fraction) |
if (tgl_combo_pll_div_frac_wa_needed(i915))
dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
pll_params->dco_integer;
pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |

View File

@ -1046,7 +1046,7 @@ static void reloc_gpu_flush(struct i915_execbuffer *eb, struct reloc_cache *cach
GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
__i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1));
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(cache->rq->engine->gt);
@ -1296,6 +1296,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto err_pool;
}
memset32(cmd, 0, pool->obj->base.size / sizeof(u32));
batch = i915_vma_instance(pool->obj, vma->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);

View File

@ -1166,7 +1166,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
}
}
if (IS_ERR(src)) {
unsigned long x, n;
unsigned long x, n, remain;
void *ptr;
/*
@ -1177,14 +1177,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
* We don't care about copying too much here as we only
* validate up to the end of the batch.
*/
remain = length;
if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
length = round_up(length,
remain = round_up(remain,
boot_cpu_data.x86_clflush_size);
ptr = dst;
x = offset_in_page(offset);
for (n = offset >> PAGE_SHIFT; length; n++) {
int len = min(length, PAGE_SIZE - x);
for (n = offset >> PAGE_SHIFT; remain; n++) {
int len = min(remain, PAGE_SIZE - x);
src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
if (needs_clflush)
@ -1193,13 +1194,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
kunmap_atomic(src);
ptr += len;
length -= len;
remain -= len;
x = 0;
}
}
i915_gem_object_unpin_pages(src_obj);
memset32(dst + length, 0, (dst_obj->base.size - length) / sizeof(u32));
/* dst_obj is returned with vmap pinned */
return dst;
}
@ -1392,11 +1395,6 @@ static unsigned long *alloc_whitelist(u32 batch_length)
#define LENGTH_BIAS 2
static bool shadow_needs_clflush(struct drm_i915_gem_object *obj)
{
return !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
}
/**
* intel_engine_cmd_parser() - parse a batch buffer for privilege violations
* @engine: the engine on which the batch is to execute
@ -1539,16 +1537,9 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
ret = 0; /* allow execution */
}
}
if (shadow_needs_clflush(shadow->obj))
drm_clflush_virt_range(batch_end, 8);
}
if (shadow_needs_clflush(shadow->obj)) {
void *ptr = page_mask_bits(shadow->obj->mm.mapping);
drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
}
i915_gem_object_flush_map(shadow->obj);
if (!IS_ERR_OR_NULL(jump_whitelist))
kfree(jump_whitelist);

View File

@ -222,7 +222,7 @@ static int amd_create_sensor(struct device *dev,
*/
cpus = num_present_cpus() / num_siblings;
s_config = devm_kcalloc(dev, cpus + sockets,
s_config = devm_kcalloc(dev, cpus + sockets + 1,
sizeof(u32), GFP_KERNEL);
if (!s_config)
return -ENOMEM;
@ -254,6 +254,7 @@ static int amd_create_sensor(struct device *dev,
scnprintf(label_l[i], 10, "Esocket%u", (i - cpus));
}
s_config[i] = 0;
return 0;
}

View File

@ -223,7 +223,6 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
sense_rq->rq_disk = rq->rq_disk;
sense_rq->cmd_flags = REQ_OP_DRV_IN;
ide_req(sense_rq)->type = ATA_PRIV_SENSE;
sense_rq->rq_flags |= RQF_PREEMPT;
req->cmd[0] = GPCMD_REQUEST_SENSE;
req->cmd[4] = cmd_len;

View File

@ -515,15 +515,10 @@ blk_status_t ide_issue_rq(ide_drive_t *drive, struct request *rq,
* above to return us whatever is in the queue. Since we call
* ide_do_request() ourselves, we end up taking requests while
* the queue is blocked...
*
* We let requests forced at head of queue with ide-preempt
* though. I hope that doesn't happen too much, hopefully not
* unless the subdriver triggers such a thing in its own PM
* state machine.
*/
if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
ata_pm_request(rq) == 0 &&
(rq->rq_flags & RQF_PREEMPT) == 0) {
(rq->rq_flags & RQF_PM) == 0) {
/* there should be no pending command at this point */
ide_unlock_port(hwif);
goto plug_device;

View File

@ -77,7 +77,7 @@ int generic_ide_resume(struct device *dev)
}
memset(&rqpm, 0, sizeof(rqpm));
rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PREEMPT);
rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PM);
ide_req(rq)->type = ATA_PRIV_PM_RESUME;
ide_req(rq)->special = &rqpm;
rqpm.pm_step = IDE_PM_START_RESUME;

View File

@ -1177,25 +1177,6 @@ static int assign_name(struct ib_device *device, const char *name)
return ret;
}
static void setup_dma_device(struct ib_device *device,
struct device *dma_device)
{
/*
* If the caller does not provide a DMA capable device then the IB
* device will be used. In this case the caller should fully setup the
* ibdev for DMA. This usually means using dma_virt_ops.
*/
#ifdef CONFIG_DMA_VIRT_OPS
if (!dma_device) {
device->dev.dma_ops = &dma_virt_ops;
dma_device = &device->dev;
}
#endif
WARN_ON(!dma_device);
device->dma_device = dma_device;
WARN_ON(!device->dma_device->dma_parms);
}
/*
* setup_device() allocates memory and sets up data that requires calling the
* device ops, this is the only reason these actions are not done during
@ -1341,7 +1322,14 @@ int ib_register_device(struct ib_device *device, const char *name,
if (ret)
return ret;
setup_dma_device(device, dma_device);
/*
* If the caller does not provide a DMA capable device then the IB core
* will set up ib_sge and scatterlist structures that stash the kernel
* virtual address into the address field.
*/
WARN_ON(dma_device && !dma_device->dma_parms);
device->dma_device = dma_device;
ret = setup_device(device);
if (ret)
return ret;
@ -2676,6 +2664,21 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
}
EXPORT_SYMBOL(ib_set_device_ops);
#ifdef CONFIG_INFINIBAND_VIRT_DMA
int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i) {
sg_dma_address(s) = (uintptr_t)sg_virt(s);
sg_dma_len(s) = s->length;
}
return nents;
}
EXPORT_SYMBOL(ib_dma_virt_map_sg);
#endif /* CONFIG_INFINIBAND_VIRT_DMA */
static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
[RDMA_NL_LS_OP_RESOLVE] = {
.doit = ib_nl_handle_resolve_resp,

View File

@ -285,8 +285,11 @@ static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
static int rdma_rw_map_sg(struct ib_device *dev, struct scatterlist *sg,
u32 sg_cnt, enum dma_data_direction dir)
{
if (is_pci_p2pdma_page(sg_page(sg)))
if (is_pci_p2pdma_page(sg_page(sg))) {
if (WARN_ON_ONCE(ib_uses_virt_dma(dev)))
return 0;
return pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir);
}
return ib_dma_map_sg(dev, sg, sg_cnt, dir);
}

View File

@ -4,6 +4,5 @@ config INFINIBAND_RDMAVT
depends on INFINIBAND_VIRT_DMA
depends on X86_64
depends on PCI
select DMA_VIRT_OPS
help
This is a common software verbs provider for RDMA networks.

View File

@ -324,8 +324,6 @@ static void __rvt_free_mr(struct rvt_mr *mr)
* @acc: access flags
*
* Return: the memory region on success, otherwise returns an errno.
* Note that all DMA addresses should be created via the functions in
* struct dma_virt_ops.
*/
struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc)
{
@ -766,7 +764,7 @@ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
/*
* We use LKEY == zero for kernel virtual addresses
* (see rvt_get_dma_mr() and dma_virt_ops).
* (see rvt_get_dma_mr()).
*/
if (sge->lkey == 0) {
struct rvt_dev_info *dev = ib_to_rvt(pd->ibpd.device);
@ -877,7 +875,7 @@ int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
/*
* We use RKEY == zero for kernel virtual addresses
* (see rvt_get_dma_mr() and dma_virt_ops).
* (see rvt_get_dma_mr()).
*/
rcu_read_lock();
if (rkey == 0) {

View File

@ -524,7 +524,6 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
int rvt_register_device(struct rvt_dev_info *rdi)
{
int ret = 0, i;
u64 dma_mask;
if (!rdi)
return -EINVAL;
@ -579,13 +578,6 @@ int rvt_register_device(struct rvt_dev_info *rdi)
/* Completion queues */
spin_lock_init(&rdi->n_cqs_lock);
/* DMA Operations */
rdi->ibdev.dev.dma_parms = rdi->ibdev.dev.parent->dma_parms;
dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
ret = dma_coerce_mask_and_coherent(&rdi->ibdev.dev, dma_mask);
if (ret)
goto bail_wss;
/* Protection Domain */
spin_lock_init(&rdi->n_pds_lock);
rdi->n_pds_allocated = 0;

View File

@ -5,7 +5,6 @@ config RDMA_RXE
depends on INFINIBAND_VIRT_DMA
select NET_UDP_TUNNEL
select CRYPTO_CRC32
select DMA_VIRT_OPS
help
This driver implements the InfiniBand RDMA transport over
the Linux network stack. It enables a system with a

View File

@ -20,18 +20,6 @@
static struct rxe_recv_sockets recv_sockets;
struct device *rxe_dma_device(struct rxe_dev *rxe)
{
struct net_device *ndev;
ndev = rxe->ndev;
if (is_vlan_dev(ndev))
ndev = vlan_dev_real_dev(ndev);
return ndev->dev.parent;
}
int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
{
int err;

View File

@ -1118,23 +1118,15 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
int err;
struct ib_device *dev = &rxe->ib_dev;
struct crypto_shash *tfm;
u64 dma_mask;
strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
dev->node_type = RDMA_NODE_IB_CA;
dev->phys_port_cnt = 1;
dev->num_comp_vectors = num_possible_cpus();
dev->dev.parent = rxe_dma_device(rxe);
dev->local_dma_lkey = 0;
addrconf_addr_eui48((unsigned char *)&dev->node_guid,
rxe->ndev->dev_addr);
dev->dev.dma_parms = &rxe->dma_parms;
dma_set_max_seg_size(&dev->dev, UINT_MAX);
dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
err = dma_coerce_mask_and_coherent(&dev->dev, dma_mask);
if (err)
return err;
dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
| BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)

View File

@ -352,7 +352,6 @@ struct rxe_port {
struct rxe_dev {
struct ib_device ib_dev;
struct ib_device_attr attr;
struct device_dma_parameters dma_parms;
int max_ucontext;
int max_inline_data;
struct mutex usdev_lock;

View File

@ -2,7 +2,6 @@ config RDMA_SIW
tristate "Software RDMA over TCP/IP (iWARP) driver"
depends on INET && INFINIBAND && LIBCRC32C
depends on INFINIBAND_VIRT_DMA
select DMA_VIRT_OPS
help
This driver implements the iWARP RDMA transport over
the Linux TCP/IP network stack. It enables a system with a

View File

@ -69,7 +69,6 @@ struct siw_pd {
struct siw_device {
struct ib_device base_dev;
struct device_dma_parameters dma_parms;
struct net_device *netdev;
struct siw_dev_cap attrs;

View File

@ -305,25 +305,8 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
{
struct siw_device *sdev = NULL;
struct ib_device *base_dev;
struct device *parent = netdev->dev.parent;
u64 dma_mask;
int rv;
if (!parent) {
/*
* The loopback device has no parent device,
* so it appears as a top-level device. To support
* loopback device connectivity, take this device
* as the parent device. Skip all other devices
* w/o parent device.
*/
if (netdev->type != ARPHRD_LOOPBACK) {
pr_warn("siw: device %s error: no parent device\n",
netdev->name);
return NULL;
}
parent = &netdev->dev;
}
sdev = ib_alloc_device(siw_device, base_dev);
if (!sdev)
return NULL;
@ -382,13 +365,6 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
* per physical port.
*/
base_dev->phys_port_cnt = 1;
base_dev->dev.parent = parent;
base_dev->dev.dma_parms = &sdev->dma_parms;
dma_set_max_seg_size(&base_dev->dev, UINT_MAX);
dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
if (dma_coerce_mask_and_coherent(&base_dev->dev, dma_mask))
goto error;
base_dev->num_comp_vectors = num_possible_cpus();
xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1);
@ -430,7 +406,7 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
atomic_set(&sdev->num_mr, 0);
atomic_set(&sdev->num_pd, 0);
sdev->numa_node = dev_to_node(parent);
sdev->numa_node = dev_to_node(&netdev->dev);
spin_lock_init(&sdev->lock);
return sdev;

View File

@ -432,6 +432,10 @@ static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
if (attrs & DMA_ATTR_PRIVILEGED)
prot |= IOMMU_PRIV;
if (attrs & DMA_ATTR_SYS_CACHE_ONLY)
prot |= IOMMU_SYS_CACHE_ONLY;
if (attrs & DMA_ATTR_SYS_CACHE_ONLY_NWA)
prot |= IOMMU_SYS_CACHE_ONLY_NWA;
switch (dir) {
case DMA_BIDIRECTIONAL:

View File

@ -142,7 +142,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
}
desc.qw2 = 0;
desc.qw3 = 0;
qi_submit_sync(svm->iommu, &desc, 1, 0);
qi_submit_sync(sdev->iommu, &desc, 1, 0);
if (sdev->dev_iotlb) {
desc.qw0 = QI_DEV_EIOTLB_PASID(svm->pasid) |
@ -166,7 +166,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
}
desc.qw2 = 0;
desc.qw3 = 0;
qi_submit_sync(svm->iommu, &desc, 1, 0);
qi_submit_sync(sdev->iommu, &desc, 1, 0);
}
}
@ -211,7 +211,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
*/
rcu_read_lock();
list_for_each_entry_rcu(sdev, &svm->devs, list)
intel_pasid_tear_down_entry(svm->iommu, sdev->dev,
intel_pasid_tear_down_entry(sdev->iommu, sdev->dev,
svm->pasid, true);
rcu_read_unlock();
@ -363,6 +363,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
}
sdev->dev = dev;
sdev->sid = PCI_DEVID(info->bus, info->devfn);
sdev->iommu = iommu;
/* Only count users if device has aux domains */
if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
@ -546,6 +547,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
goto out;
}
sdev->dev = dev;
sdev->iommu = iommu;
ret = intel_iommu_enable_pasid(iommu, dev);
if (ret) {
@ -575,7 +577,6 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
kfree(sdev);
goto out;
}
svm->iommu = iommu;
if (pasid_max > intel_pasid_max_id)
pasid_max = intel_pasid_max_id;

View File

@ -17,7 +17,7 @@ struct feature {
};
static struct feature feature_list[] = {
{BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LARGE_BUCKET,
{BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE,
"large_bucket"},
{0, 0, 0 },
};

View File

@ -13,11 +13,15 @@
/* Feature set definition */
/* Incompat feature set */
#define BCH_FEATURE_INCOMPAT_LARGE_BUCKET 0x0001 /* 32bit bucket size */
/* 32bit bucket size, obsoleted */
#define BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET 0x0001
/* real bucket size is (1 << bucket_size) */
#define BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE 0x0002
#define BCH_FEATURE_COMPAT_SUUP 0
#define BCH_FEATURE_RO_COMPAT_SUUP 0
#define BCH_FEATURE_INCOMPAT_SUUP BCH_FEATURE_INCOMPAT_LARGE_BUCKET
#define BCH_FEATURE_COMPAT_SUPP 0
#define BCH_FEATURE_RO_COMPAT_SUPP 0
#define BCH_FEATURE_INCOMPAT_SUPP (BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET| \
BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE)
#define BCH_HAS_COMPAT_FEATURE(sb, mask) \
((sb)->feature_compat & (mask))
@ -77,7 +81,23 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \
~BCH##_FEATURE_INCOMPAT_##flagname; \
}
BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LARGE_BUCKET);
BCH_FEATURE_INCOMPAT_FUNCS(obso_large_bucket, OBSO_LARGE_BUCKET);
BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LOG_LARGE_BUCKET_SIZE);
static inline bool bch_has_unknown_compat_features(struct cache_sb *sb)
{
return ((sb->feature_compat & ~BCH_FEATURE_COMPAT_SUPP) != 0);
}
static inline bool bch_has_unknown_ro_compat_features(struct cache_sb *sb)
{
return ((sb->feature_ro_compat & ~BCH_FEATURE_RO_COMPAT_SUPP) != 0);
}
static inline bool bch_has_unknown_incompat_features(struct cache_sb *sb)
{
return ((sb->feature_incompat & ~BCH_FEATURE_INCOMPAT_SUPP) != 0);
}
int bch_print_cache_set_feature_compat(struct cache_set *c, char *buf, int size);
int bch_print_cache_set_feature_ro_compat(struct cache_set *c, char *buf, int size);

View File

@ -64,9 +64,25 @@ static unsigned int get_bucket_size(struct cache_sb *sb, struct cache_sb_disk *s
{
unsigned int bucket_size = le16_to_cpu(s->bucket_size);
if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES &&
bch_has_feature_large_bucket(sb))
bucket_size |= le16_to_cpu(s->bucket_size_hi) << 16;
if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
if (bch_has_feature_large_bucket(sb)) {
unsigned int max, order;
max = sizeof(unsigned int) * BITS_PER_BYTE - 1;
order = le16_to_cpu(s->bucket_size);
/*
* bcache tool will make sure the overflow won't
* happen, an error message here is enough.
*/
if (order > max)
pr_err("Bucket size (1 << %u) overflows\n",
order);
bucket_size = 1 << order;
} else if (bch_has_feature_obso_large_bucket(sb)) {
bucket_size +=
le16_to_cpu(s->obso_bucket_size_hi) << 16;
}
}
return bucket_size;
}
@ -228,6 +244,20 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
sb->feature_compat = le64_to_cpu(s->feature_compat);
sb->feature_incompat = le64_to_cpu(s->feature_incompat);
sb->feature_ro_compat = le64_to_cpu(s->feature_ro_compat);
/* Check incompatible features */
err = "Unsupported compatible feature found";
if (bch_has_unknown_compat_features(sb))
goto err;
err = "Unsupported read-only compatible feature found";
if (bch_has_unknown_ro_compat_features(sb))
goto err;
err = "Unsupported incompatible feature found";
if (bch_has_unknown_incompat_features(sb))
goto err;
err = read_super_common(sb, bdev, s);
if (err)
goto err;

View File

@ -617,7 +617,7 @@ void channel_free(struct channel *c)
if (c->cur_from_user != &c->scratch_message_from_user)
message_kill(c->cur_from_user, &c->target->message_pool);
list_for_each (cur, &c->from_user)
message_kill(list_entry(cur, struct message, to_user),
message_kill(list_entry(cur, struct message, from_user),
&c->target->message_pool);
mutex_lock(&c->target->lock);

View File

@ -318,10 +318,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
buf += ret;
}
if (req->ooblen)
memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
req->ooblen);
return 0;
}

View File

@ -380,7 +380,7 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
goto free_dst;
min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len +
BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr);
BAREUDP_BASE_HLEN + info->options_len + sizeof(struct ipv6hdr);
err = skb_cow_head(skb, min_headroom);
if (unlikely(err))
@ -534,6 +534,7 @@ static void bareudp_setup(struct net_device *dev)
SET_NETDEV_DEVTYPE(dev, &bareudp_type);
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
dev->features |= NETIF_F_RXCSUM;
dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;

View File

@ -92,9 +92,7 @@
GSWIP_MDIO_PHY_FDUP_MASK)
/* GSWIP MII Registers */
#define GSWIP_MII_CFG0 0x00
#define GSWIP_MII_CFG1 0x02
#define GSWIP_MII_CFG5 0x04
#define GSWIP_MII_CFGp(p) (0x2 * (p))
#define GSWIP_MII_CFG_EN BIT(14)
#define GSWIP_MII_CFG_LDCLKDIS BIT(12)
#define GSWIP_MII_CFG_MODE_MIIP 0x0
@ -392,17 +390,9 @@ static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set,
static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set,
int port)
{
switch (port) {
case 0:
gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG0);
break;
case 1:
gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG1);
break;
case 5:
gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG5);
break;
}
/* There's no MII_CFG register for the CPU port */
if (!dsa_is_cpu_port(priv->ds, port))
gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(port));
}
static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set,
@ -822,9 +812,8 @@ static int gswip_setup(struct dsa_switch *ds)
gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
/* Disable the xMII link */
gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 0);
gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 1);
gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 5);
for (i = 0; i < priv->hw_info->max_ports; i++)
gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, i);
/* enable special tag insertion on cpu port */
gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
@ -1541,9 +1530,7 @@ static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
{
struct gswip_priv *priv = ds->priv;
/* Enable the xMII interface only for the external PHY */
if (interface != PHY_INTERFACE_MODE_INTERNAL)
gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
}
static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,

View File

@ -2577,6 +2577,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
NETIF_F_HW_VLAN_CTAG_TX;
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
dev->max_mtu = UMAC_MAX_MTU_SIZE;
/* Request the WOL interrupt and advertise suspend if available */
priv->wol_irq_disabled = 1;

View File

@ -6790,8 +6790,10 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
if (!ctx->tqm_fp_rings_count)
ctx->tqm_fp_rings_count = bp->max_q;
else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
tqm_rings = ctx->tqm_fp_rings_count + 1;
tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
if (!ctx_pg) {
kfree(ctx);
@ -6925,7 +6927,8 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
pg_dir = &req.tqm_sp_page_dir,
ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
i < BNXT_MAX_TQM_RINGS;
i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
if (!(enables & ena))
continue;
@ -12887,10 +12890,10 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
*/
static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
{
pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
int err = 0, off;
pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
netdev_info(bp->dev, "PCI Slot Reset\n");
@ -12919,22 +12922,8 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
pci_save_state(pdev);
err = bnxt_hwrm_func_reset(bp);
if (!err) {
err = bnxt_hwrm_func_qcaps(bp);
if (!err && netif_running(netdev))
err = bnxt_open(netdev);
}
bnxt_ulp_start(bp, err);
if (!err) {
bnxt_reenable_sriov(bp);
if (!err)
result = PCI_ERS_RESULT_RECOVERED;
}
}
if (result != PCI_ERS_RESULT_RECOVERED) {
if (netif_running(netdev))
dev_close(netdev);
pci_disable_device(pdev);
}
rtnl_unlock();
@ -12952,10 +12941,21 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
static void bnxt_io_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
int err;
netdev_info(bp->dev, "PCI Slot Resume\n");
rtnl_lock();
netif_device_attach(netdev);
err = bnxt_hwrm_func_qcaps(bp);
if (!err && netif_running(netdev))
err = bnxt_open(netdev);
bnxt_ulp_start(bp, err);
if (!err) {
bnxt_reenable_sriov(bp);
netif_device_attach(netdev);
}
rtnl_unlock();
}

View File

@ -1435,6 +1435,11 @@ struct bnxt_ctx_pg_info {
struct bnxt_ctx_pg_info **ctx_pg_tbl;
};
#define BNXT_MAX_TQM_SP_RINGS 1
#define BNXT_MAX_TQM_FP_RINGS 8
#define BNXT_MAX_TQM_RINGS \
(BNXT_MAX_TQM_SP_RINGS + BNXT_MAX_TQM_FP_RINGS)
struct bnxt_ctx_mem_info {
u32 qp_max_entries;
u16 qp_min_qp1_entries;
@ -1473,7 +1478,7 @@ struct bnxt_ctx_mem_info {
struct bnxt_ctx_pg_info stat_mem;
struct bnxt_ctx_pg_info mrav_mem;
struct bnxt_ctx_pg_info tim_mem;
struct bnxt_ctx_pg_info *tqm_mem[9];
struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TQM_RINGS];
};
struct bnxt_fw_health {

View File

@ -1211,7 +1211,7 @@ static int ethoc_probe(struct platform_device *pdev)
ret = mdiobus_register(priv->mdio);
if (ret) {
dev_err(&netdev->dev, "failed to register MDIO bus\n");
goto free2;
goto free3;
}
ret = ethoc_mdio_probe(netdev);
@ -1243,6 +1243,7 @@ static int ethoc_probe(struct platform_device *pdev)
netif_napi_del(&priv->napi);
error:
mdiobus_unregister(priv->mdio);
free3:
mdiobus_free(priv->mdio);
free2:
clk_disable_unprepare(priv->clk);

View File

@ -3889,6 +3889,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
dev->mtu = 1500;
dev->max_mtu = 1518;
ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
ugeth->phy_interface = phy_interface;
@ -3934,12 +3935,12 @@ static int ucc_geth_remove(struct platform_device* ofdev)
struct device_node *np = ofdev->dev.of_node;
unregister_netdev(dev);
free_netdev(dev);
ucc_geth_memclean(ugeth);
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(ugeth->ug_info->tbi_node);
of_node_put(ugeth->ug_info->phy_node);
free_netdev(dev);
return 0;
}

View File

@ -415,6 +415,10 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data,
/* for mutl buffer*/
new_skb = skb_copy(skb, GFP_ATOMIC);
dev_kfree_skb_any(skb);
if (!new_skb) {
netdev_err(ndev, "skb alloc failed\n");
return;
}
skb = new_skb;
check_ok = 0;

View File

@ -932,6 +932,7 @@ static void release_resources(struct ibmvnic_adapter *adapter)
release_rx_pools(adapter);
release_napi(adapter);
release_login_buffer(adapter);
release_login_rsp_buffer(adapter);
}
@ -2247,8 +2248,7 @@ static void __ibmvnic_reset(struct work_struct *work)
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(60 * HZ);
}
} else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
adapter->from_passive_init)) {
} else {
rc = do_reset(adapter, rwi, reset_state);
}
kfree(rwi);
@ -2869,9 +2869,7 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
int rc;
if (!scrq) {
netdev_dbg(adapter->netdev,
"Invalid scrq reset. irq (%d) or msgs (%p).\n",
scrq->irq, scrq->msgs);
netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
return -EINVAL;
}
@ -3768,7 +3766,9 @@ static int send_login(struct ibmvnic_adapter *adapter)
return -1;
}
release_login_buffer(adapter);
release_login_rsp_buffer(adapter);
client_data_len = vnic_client_data_len(adapter);
buffer_size =

View File

@ -436,6 +436,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
#define FLAG2_DFLT_CRC_STRIPPING BIT(12)
#define FLAG2_CHECK_RX_HWTSTAMP BIT(13)
#define FLAG2_CHECK_SYSTIM_OVERFLOW BIT(14)
#define FLAG2_ENABLE_S0IX_FLOWS BIT(15)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))

View File

@ -23,6 +23,13 @@ struct e1000_stats {
int stat_offset;
};
static const char e1000e_priv_flags_strings[][ETH_GSTRING_LEN] = {
#define E1000E_PRIV_FLAGS_S0IX_ENABLED BIT(0)
"s0ix-enabled",
};
#define E1000E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(e1000e_priv_flags_strings)
#define E1000_STAT(str, m) { \
.stat_string = str, \
.type = E1000_STATS, \
@ -1776,6 +1783,8 @@ static int e1000e_get_sset_count(struct net_device __always_unused *netdev,
return E1000_TEST_LEN;
case ETH_SS_STATS:
return E1000_STATS_LEN;
case ETH_SS_PRIV_FLAGS:
return E1000E_PRIV_FLAGS_STR_LEN;
default:
return -EOPNOTSUPP;
}
@ -2097,6 +2106,10 @@ static void e1000_get_strings(struct net_device __always_unused *netdev,
p += ETH_GSTRING_LEN;
}
break;
case ETH_SS_PRIV_FLAGS:
memcpy(data, e1000e_priv_flags_strings,
E1000E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
break;
}
}
@ -2305,6 +2318,37 @@ static int e1000e_get_ts_info(struct net_device *netdev,
return 0;
}
static u32 e1000e_get_priv_flags(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
u32 priv_flags = 0;
if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
priv_flags |= E1000E_PRIV_FLAGS_S0IX_ENABLED;
return priv_flags;
}
static int e1000e_set_priv_flags(struct net_device *netdev, u32 priv_flags)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
unsigned int flags2 = adapter->flags2;
flags2 &= ~FLAG2_ENABLE_S0IX_FLOWS;
if (priv_flags & E1000E_PRIV_FLAGS_S0IX_ENABLED) {
struct e1000_hw *hw = &adapter->hw;
if (hw->mac.type < e1000_pch_cnp)
return -EINVAL;
flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
}
if (flags2 != adapter->flags2)
adapter->flags2 = flags2;
return 0;
}
static const struct ethtool_ops e1000_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = e1000_get_drvinfo,
@ -2336,6 +2380,8 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.set_eee = e1000e_set_eee,
.get_link_ksettings = e1000_get_link_ksettings,
.set_link_ksettings = e1000_set_link_ksettings,
.get_priv_flags = e1000e_get_priv_flags,
.set_priv_flags = e1000e_set_priv_flags,
};
void e1000e_set_ethtool_ops(struct net_device *netdev)

View File

@ -1240,6 +1240,9 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
return 0;
if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
struct e1000_adapter *adapter = hw->adapter;
bool firmware_bug = false;
if (force) {
/* Request ME un-configure ULP mode in the PHY */
mac_reg = er32(H2ME);
@ -1248,16 +1251,24 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
ew32(H2ME, mac_reg);
}
/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
/* Poll up to 2.5 seconds for ME to clear ULP_CFG_DONE.
* If this takes more than 1 second, show a warning indicating a
* firmware bug
*/
while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) {
if (i++ == 30) {
if (i++ == 250) {
ret_val = -E1000_ERR_PHY;
goto out;
}
if (i > 100 && !firmware_bug)
firmware_bug = true;
usleep_range(10000, 11000);
}
e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
if (firmware_bug)
e_warn("ULP_CONFIG_DONE took %dmsec. This is a firmware bug\n", i * 10);
else
e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
if (force) {
mac_reg = er32(H2ME);

View File

@ -103,45 +103,6 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
{0, NULL}
};
struct e1000e_me_supported {
u16 device_id; /* supported device ID */
};
static const struct e1000e_me_supported me_supported[] = {
{E1000_DEV_ID_PCH_LPT_I217_LM},
{E1000_DEV_ID_PCH_LPTLP_I218_LM},
{E1000_DEV_ID_PCH_I218_LM2},
{E1000_DEV_ID_PCH_I218_LM3},
{E1000_DEV_ID_PCH_SPT_I219_LM},
{E1000_DEV_ID_PCH_SPT_I219_LM2},
{E1000_DEV_ID_PCH_LBG_I219_LM3},
{E1000_DEV_ID_PCH_SPT_I219_LM4},
{E1000_DEV_ID_PCH_SPT_I219_LM5},
{E1000_DEV_ID_PCH_CNP_I219_LM6},
{E1000_DEV_ID_PCH_CNP_I219_LM7},
{E1000_DEV_ID_PCH_ICP_I219_LM8},
{E1000_DEV_ID_PCH_ICP_I219_LM9},
{E1000_DEV_ID_PCH_CMP_I219_LM10},
{E1000_DEV_ID_PCH_CMP_I219_LM11},
{E1000_DEV_ID_PCH_CMP_I219_LM12},
{E1000_DEV_ID_PCH_TGP_I219_LM13},
{E1000_DEV_ID_PCH_TGP_I219_LM14},
{E1000_DEV_ID_PCH_TGP_I219_LM15},
{0}
};
static bool e1000e_check_me(u16 device_id)
{
struct e1000e_me_supported *id;
for (id = (struct e1000e_me_supported *)me_supported;
id->device_id; id++)
if (device_id == id->device_id)
return true;
return false;
}
/**
* __ew32_prepare - prepare to write to MAC CSR register on certain parts
* @hw: pointer to the HW structure
@ -6962,7 +6923,6 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
struct e1000_hw *hw = &adapter->hw;
int rc;
e1000e_flush_lpic(pdev);
@ -6970,13 +6930,13 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
e1000e_pm_freeze(dev);
rc = __e1000_shutdown(pdev, false);
if (rc)
if (rc) {
e1000e_pm_thaw(dev);
/* Introduce S0ix implementation */
if (hw->mac.type >= e1000_pch_cnp &&
!e1000e_check_me(hw->adapter->pdev->device))
e1000e_s0ix_entry_flow(adapter);
} else {
/* Introduce S0ix implementation */
if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
e1000e_s0ix_entry_flow(adapter);
}
return rc;
}
@ -6986,12 +6946,10 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
struct e1000_hw *hw = &adapter->hw;
int rc;
/* Introduce S0ix implementation */
if (hw->mac.type >= e1000_pch_cnp &&
!e1000e_check_me(hw->adapter->pdev->device))
if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
e1000e_s0ix_exit_flow(adapter);
rc = __e1000_resume(pdev);
@ -7655,6 +7613,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!(adapter->flags & FLAG_HAS_AMT))
e1000e_get_hw_control(adapter);
if (hw->mac.type >= e1000_pch_cnp)
adapter->flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
err = register_netdev(netdev);
if (err)

View File

@ -120,6 +120,7 @@ enum i40e_state_t {
__I40E_RESET_INTR_RECEIVED,
__I40E_REINIT_REQUESTED,
__I40E_PF_RESET_REQUESTED,
__I40E_PF_RESET_AND_REBUILD_REQUESTED,
__I40E_CORE_RESET_REQUESTED,
__I40E_GLOBAL_RESET_REQUESTED,
__I40E_EMP_RESET_INTR_RECEIVED,
@ -146,6 +147,8 @@ enum i40e_state_t {
};
#define I40E_PF_RESET_FLAG BIT_ULL(__I40E_PF_RESET_REQUESTED)
#define I40E_PF_RESET_AND_REBUILD_FLAG \
BIT_ULL(__I40E_PF_RESET_AND_REBUILD_REQUESTED)
/* VSI state flags */
enum i40e_vsi_state_t {

View File

@ -36,6 +36,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
bool lock_acquired);
static int i40e_reset(struct i40e_pf *pf);
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
@ -8536,6 +8538,14 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
"FW LLDP is disabled\n" :
"FW LLDP is enabled\n");
} else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
/* Request a PF Reset
*
* Resets PF and reinitializes PFs VSI.
*/
i40e_prep_for_reset(pf, lock_acquired);
i40e_reset_and_rebuild(pf, true, lock_acquired);
} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
int v;

View File

@ -1772,7 +1772,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (num_vfs) {
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
}
ret = i40e_pci_sriov_enable(pdev, num_vfs);
goto sriov_configure_out;
@ -1781,7 +1781,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!pci_vfs_assigned(pf->pdev)) {
i40e_free_vfs(pf);
pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
} else {
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
ret = -EINVAL;

View File

@ -1834,11 +1834,9 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
netif_tx_stop_all_queues(netdev);
if (CLIENT_ALLOWED(adapter)) {
err = iavf_lan_add_device(adapter);
if (err) {
rtnl_unlock();
if (err)
dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
err);
}
}
dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
if (netdev->features & NETIF_F_GRO)

View File

@ -5232,7 +5232,7 @@ static int mvneta_probe(struct platform_device *pdev)
err = mvneta_port_power_up(pp, pp->phy_interface);
if (err < 0) {
dev_err(&pdev->dev, "can't power up port\n");
return err;
goto err_netdev;
}
/* Armada3700 network controller does not support per-cpu

View File

@ -1231,7 +1231,7 @@ static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
if (port->gop_id == 2)
val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
val |= GENCONF_CTRL0_PORT0_RGMII;
else if (port->gop_id == 3)
val |= GENCONF_CTRL0_PORT1_RGMII_MII;
regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
@ -2370,17 +2370,18 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq)
{
unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
unsigned int thread;
u32 val;
if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
put_cpu();
/* PKT-coalescing registers are per-queue + per-thread */
for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) {
mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
}
}
static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)

View File

@ -405,6 +405,38 @@ static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
return -EINVAL;
}
/* Drop flow control pause frames */
static void mvpp2_prs_drop_fc(struct mvpp2 *priv)
{
unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
struct mvpp2_prs_entry pe;
unsigned int len;
memset(&pe, 0, sizeof(pe));
/* For all ports - drop flow control frames */
pe.index = MVPP2_PE_FC_DROP;
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
/* Set match on DA */
len = ETH_ALEN;
while (len--)
mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
MVPP2_PRS_RI_DROP_MASK);
mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
/* Mask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
/* Update shadow table and hw entry */
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
mvpp2_prs_hw_write(priv, &pe);
}
/* Enable/disable dropping all mac da's */
static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
{
@ -1162,6 +1194,7 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
mvpp2_prs_hw_write(priv, &pe);
/* Create dummy entries for drop all and promiscuous modes */
mvpp2_prs_drop_fc(priv);
mvpp2_prs_mac_drop_all_set(priv, 0, false);
mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
@ -1647,8 +1680,9 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
MVPP2_PRS_RI_L3_PROTO_MASK);
/* Skip eth_type + 4 bytes of IPv6 header */
mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
/* Jump to DIP of IPV6 header */
mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
MVPP2_MAX_L3_ADDR_SIZE,
MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
/* Set L3 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,

View File

@ -129,7 +129,7 @@
#define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
/* reserved */
#define MVPP2_PE_FC_DROP (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
#define MVPP2_PE_MAC_MC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
#define MVPP2_PE_MAC_UC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)

View File

@ -366,6 +366,15 @@ struct mlx5e_swp_spec {
u8 tun_l4_proto;
};
static inline void mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg *eseg)
{
/* SWP offsets are in 2-bytes words */
eseg->swp_outer_l3_offset += VLAN_HLEN / 2;
eseg->swp_outer_l4_offset += VLAN_HLEN / 2;
eseg->swp_inner_l3_offset += VLAN_HLEN / 2;
eseg->swp_inner_l4_offset += VLAN_HLEN / 2;
}
static inline void
mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
struct mlx5e_swp_spec *swp_spec)

Some files were not shown because too many files have changed in this diff Show More