This is the 5.10.95 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmH1CJEACgkQONu9yGCS
 aT6lBxAArR4bNBRo/PYVKUjbw/SYUz75iKuet2JORXQdMhDCGZHcKYtAuZX25IKt
 RMr613Rp3AACXNE7km6zGTNvX7hvOuBqOg2+PE18UG8JZEaqOGtht6mqMIH8gDtl
 xf3YaiqXdnVExZRZghIyPzg2lzzHgagl3U0HyB5ChvOg0k9YjA/TkEGE0haT14X+
 1PY63ZPIoQaGtdGngd92zOLDeKoxLHCkFDulhQ7ge3mm7SUYPjkonONz7j/R3QF8
 PzFTT5Gjd/qmh3NGTknCbLFXZAWATaSswE5fcrx70WxQ+XUUQ1QArpxXZgNd1TMP
 xIxO/R0v/lejaw7JNHxGp8lxXOaqjRRMPjtDAwWwjLwJi2G+UExHV8+BOC0V3M24
 +F1XWwnyZoIdE3L9xL8qNheHwROSNj3+PD4elpllmWUe2VEDlreZbMOcr/2wJiIT
 Z73ZPJPf05z/3+IzFhMuSXoeyq+K3A7XwJ51jQIfiOGaBsNDOxorQV8HcEgcUcHL
 QfmIXIVSA32mS5tHLyCNxGy70p4mtKgLyDNdtKMt+kueQBy+VYyfX8Y+YN/XdPIn
 k2cT0aTt9+fDFUT3EZTFvddelN5kF1+a3XeM049VaCWZ6GpxBND5IshlyUdS9Dmr
 RpSfkUcjS6OfgxEG2pxL4W8Yu9Ae1m0ubqWx8cCDLvG8cO9Tw2s=
 =KGpp
 -----END PGP SIGNATURE-----

Merge 5.10.95 into android12-5.10-lts

Changes in 5.10.95
	drm/i915: Flush TLBs before releasing backing store
	bnx2x: Utilize firmware 7.13.21.0
	bnx2x: Invalidate fastpath HSI version for VFs
	rcu: Tighten rcu_advance_cbs_nowake() checks
	KVM: x86/mmu: Fix write-protection of PTs mapped by the TDP MMU
	select: Fix indefinitely sleeping task in poll_schedule_timeout()
	drm/vmwgfx: Fix stale file descriptors on failed usercopy
	Linux 5.10.95

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I2df6cec74b4d7c8f0539182c547db61a45cd093f
This commit is contained in:
Greg Kroah-Hartman 2022-02-02 14:44:20 +01:00
commit a9839858b5
23 changed files with 297 additions and 92 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 94
SUBLEVEL = 95
EXTRAVERSION =
NAME = Dare mighty things

View File

@ -1130,12 +1130,12 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
bool spte_set = false;
tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) {
if (!is_writable_pte(iter.old_spte))
break;
new_spte = iter.old_spte &
~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
if (new_spte == iter.old_spte)
break;
tdp_mmu_set_spte(kvm, &iter, new_spte);
spte_set = true;
}

View File

@ -159,6 +159,7 @@ struct drm_i915_gem_object {
#define I915_BO_ALLOC_VOLATILE BIT(1)
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
#define I915_BO_READONLY BIT(2)
#define I915_BO_WAS_BOUND_BIT 3
/*
* Is the object to be mapped as read-only to the GPU

View File

@ -10,6 +10,8 @@
#include "i915_gem_lmem.h"
#include "i915_gem_mman.h"
#include "gt/intel_gt.h"
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
unsigned int sg_page_sizes)
@ -186,6 +188,14 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
__i915_gem_object_reset_page_iter(obj);
obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
intel_wakeref_t wakeref;
with_intel_runtime_pm_if_active(&i915->runtime_pm, wakeref)
intel_gt_invalidate_tlbs(&i915->gt);
}
return pages;
}

View File

@ -26,6 +26,8 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
spin_lock_init(&gt->irq_lock);
mutex_init(&gt->tlb_invalidate_lock);
INIT_LIST_HEAD(&gt->closed_vma);
spin_lock_init(&gt->closed_lock);
@ -661,3 +663,103 @@ void intel_gt_info_print(const struct intel_gt_info *info,
intel_sseu_dump(&info->sseu, p);
}
struct reg_and_bit {
i915_reg_t reg;
u32 bit;
};
static struct reg_and_bit
get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
const i915_reg_t *regs, const unsigned int num)
{
const unsigned int class = engine->class;
struct reg_and_bit rb = { };
if (drm_WARN_ON_ONCE(&engine->i915->drm,
class >= num || !regs[class].reg))
return rb;
rb.reg = regs[class];
if (gen8 && class == VIDEO_DECODE_CLASS)
rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */
else
rb.bit = engine->instance;
rb.bit = BIT(rb.bit);
return rb;
}
void intel_gt_invalidate_tlbs(struct intel_gt *gt)
{
static const i915_reg_t gen8_regs[] = {
[RENDER_CLASS] = GEN8_RTCR,
[VIDEO_DECODE_CLASS] = GEN8_M1TCR, /* , GEN8_M2TCR */
[VIDEO_ENHANCEMENT_CLASS] = GEN8_VTCR,
[COPY_ENGINE_CLASS] = GEN8_BTCR,
};
static const i915_reg_t gen12_regs[] = {
[RENDER_CLASS] = GEN12_GFX_TLB_INV_CR,
[VIDEO_DECODE_CLASS] = GEN12_VD_TLB_INV_CR,
[VIDEO_ENHANCEMENT_CLASS] = GEN12_VE_TLB_INV_CR,
[COPY_ENGINE_CLASS] = GEN12_BLT_TLB_INV_CR,
};
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
enum intel_engine_id id;
const i915_reg_t *regs;
unsigned int num = 0;
if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
return;
if (INTEL_GEN(i915) == 12) {
regs = gen12_regs;
num = ARRAY_SIZE(gen12_regs);
} else if (INTEL_GEN(i915) >= 8 && INTEL_GEN(i915) <= 11) {
regs = gen8_regs;
num = ARRAY_SIZE(gen8_regs);
} else if (INTEL_GEN(i915) < 8) {
return;
}
if (drm_WARN_ONCE(&i915->drm, !num,
"Platform does not implement TLB invalidation!"))
return;
GEM_TRACE("\n");
assert_rpm_wakelock_held(&i915->runtime_pm);
mutex_lock(&gt->tlb_invalidate_lock);
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
for_each_engine(engine, gt, id) {
/*
* HW architecture suggest typical invalidation time at 40us,
* with pessimistic cases up to 100us and a recommendation to
* cap at 1ms. We go a bit higher just in case.
*/
const unsigned int timeout_us = 100;
const unsigned int timeout_ms = 4;
struct reg_and_bit rb;
rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
if (!i915_mmio_reg_offset(rb.reg))
continue;
intel_uncore_write_fw(uncore, rb.reg, rb.bit);
if (__intel_wait_for_register_fw(uncore,
rb.reg, rb.bit, 0,
timeout_us, timeout_ms,
NULL))
drm_err_ratelimited(&gt->i915->drm,
"%s TLB invalidation did not complete in %ums!\n",
engine->name, timeout_ms);
}
intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
mutex_unlock(&gt->tlb_invalidate_lock);
}

View File

@ -77,4 +77,6 @@ static inline bool intel_gt_is_wedged(const struct intel_gt *gt)
void intel_gt_info_print(const struct intel_gt_info *info,
struct drm_printer *p);
void intel_gt_invalidate_tlbs(struct intel_gt *gt);
#endif /* __INTEL_GT_H__ */

View File

@ -36,6 +36,8 @@ struct intel_gt {
struct intel_uc uc;
struct mutex tlb_invalidate_lock;
struct intel_gt_timelines {
spinlock_t lock; /* protects active_list */
struct list_head active_list;

View File

@ -2639,6 +2639,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1 << 28)
#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1 << 24)
#define GEN8_RTCR _MMIO(0x4260)
#define GEN8_M1TCR _MMIO(0x4264)
#define GEN8_M2TCR _MMIO(0x4268)
#define GEN8_BTCR _MMIO(0x426c)
#define GEN8_VTCR _MMIO(0x4270)
#if 0
#define PRB0_TAIL _MMIO(0x2030)
#define PRB0_HEAD _MMIO(0x2034)
@ -2728,6 +2734,11 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define FAULT_VA_HIGH_BITS (0xf << 0)
#define FAULT_GTT_SEL (1 << 4)
#define GEN12_GFX_TLB_INV_CR _MMIO(0xced8)
#define GEN12_VD_TLB_INV_CR _MMIO(0xcedc)
#define GEN12_VE_TLB_INV_CR _MMIO(0xcee0)
#define GEN12_BLT_TLB_INV_CR _MMIO(0xcee4)
#define GEN12_AUX_ERR_DBG _MMIO(0x43f4)
#define FPGA_DBG _MMIO(0x42300)

View File

@ -439,6 +439,9 @@ int i915_vma_bind(struct i915_vma *vma,
vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
}
if (vma->obj)
set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
atomic_or(bind_flags, &vma->flags);
return 0;
}

View File

@ -694,7 +694,8 @@ void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
}
static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
enum forcewake_domains fw_domains,
bool delayed)
{
struct intel_uncore_forcewake_domain *domain;
unsigned int tmp;
@ -709,7 +710,11 @@ static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
continue;
}
uncore->funcs.force_wake_put(uncore, domain->mask);
if (delayed &&
!(domain->uncore->fw_domains_timer & domain->mask))
fw_domain_arm_timer(domain);
else
uncore->funcs.force_wake_put(uncore, domain->mask);
}
}
@ -730,7 +735,20 @@ void intel_uncore_forcewake_put(struct intel_uncore *uncore,
return;
spin_lock_irqsave(&uncore->lock, irqflags);
__intel_uncore_forcewake_put(uncore, fw_domains);
__intel_uncore_forcewake_put(uncore, fw_domains, false);
spin_unlock_irqrestore(&uncore->lock, irqflags);
}
void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
unsigned long irqflags;
if (!uncore->funcs.force_wake_put)
return;
spin_lock_irqsave(&uncore->lock, irqflags);
__intel_uncore_forcewake_put(uncore, fw_domains, true);
spin_unlock_irqrestore(&uncore->lock, irqflags);
}
@ -772,7 +790,7 @@ void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
if (!uncore->funcs.force_wake_put)
return;
__intel_uncore_forcewake_put(uncore, fw_domains);
__intel_uncore_forcewake_put(uncore, fw_domains, false);
}
void assert_forcewakes_inactive(struct intel_uncore *uncore)

View File

@ -211,6 +211,8 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore,
enum forcewake_domains domains);
void intel_uncore_forcewake_put(struct intel_uncore *uncore,
enum forcewake_domains domains);
void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
enum forcewake_domains domains);
void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
enum forcewake_domains fw_domains);

View File

@ -1088,15 +1088,14 @@ extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
struct vmw_private *dev_priv,
struct vmw_fence_obj **p_fence,
uint32_t *p_handle);
extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
extern int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
struct vmw_fpriv *vmw_fp,
int ret,
struct drm_vmw_fence_rep __user
*user_fence_rep,
struct vmw_fence_obj *fence,
uint32_t fence_handle,
int32_t out_fence_fd,
struct sync_file *sync_file);
int32_t out_fence_fd);
bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
/**

View File

@ -3816,17 +3816,17 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
* Also if copying fails, user-space will be unable to signal the fence object
* so we wait for it immediately, and then unreference the user-space reference.
*/
void
int
vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
struct vmw_fpriv *vmw_fp, int ret,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct vmw_fence_obj *fence, uint32_t fence_handle,
int32_t out_fence_fd, struct sync_file *sync_file)
int32_t out_fence_fd)
{
struct drm_vmw_fence_rep fence_rep;
if (user_fence_rep == NULL)
return;
return 0;
memset(&fence_rep, 0, sizeof(fence_rep));
@ -3854,20 +3854,14 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
* handle.
*/
if (unlikely(ret != 0) && (fence_rep.error == 0)) {
if (sync_file)
fput(sync_file->file);
if (fence_rep.fd != -1) {
put_unused_fd(fence_rep.fd);
fence_rep.fd = -1;
}
ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle,
TTM_REF_USAGE);
VMW_DEBUG_USER("Fence copy error. Syncing.\n");
(void) vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT);
}
return ret ? -EFAULT : 0;
}
/**
@ -4209,16 +4203,23 @@ int vmw_execbuf_process(struct drm_file *file_priv,
(void) vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT);
}
}
ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
user_fence_rep, fence, handle, out_fence_fd);
if (sync_file) {
if (ret) {
/* usercopy of fence failed, put the file object */
fput(sync_file->file);
put_unused_fd(out_fence_fd);
} else {
/* Link the fence with the FD created earlier */
fd_install(out_fence_fd, sync_file->file);
}
}
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
user_fence_rep, fence, handle, out_fence_fd,
sync_file);
/* Don't unreference when handing fence out */
if (unlikely(out_fence != NULL)) {
*out_fence = fence;
@ -4236,7 +4237,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
*/
vmw_validation_unref_lists(&val_ctx);
return 0;
return ret;
out_unlock_binding:
mutex_unlock(&dev_priv->binding_mutex);

View File

@ -1171,7 +1171,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
}
vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
handle, -1, NULL);
handle, -1);
vmw_fence_obj_unreference(&fence);
return 0;
out_no_create:

View File

@ -2479,7 +2479,7 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
if (file_priv)
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
ret, user_fence_rep, fence,
handle, -1, NULL);
handle, -1);
if (out_fence)
*out_fence = fence;
else

View File

@ -1850,6 +1850,14 @@ struct bnx2x {
/* Vxlan/Geneve related information */
u16 udp_tunnel_ports[BNX2X_UDP_PORT_MAX];
#define FW_CAP_INVALIDATE_VF_FP_HSI BIT(0)
u32 fw_cap;
u32 fw_major;
u32 fw_minor;
u32 fw_rev;
u32 fw_eng;
};
/* Tx queues may be less or equal to Rx queues */
@ -2526,5 +2534,6 @@ void bnx2x_register_phc(struct bnx2x *bp);
* Meant for implicit re-load flows.
*/
int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp);
int bnx2x_init_firmware(struct bnx2x *bp);
void bnx2x_release_firmware(struct bnx2x *bp);
#endif /* bnx2x.h */

View File

@ -2364,10 +2364,8 @@ int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
/* build my FW version dword */
u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
(BCM_5710_FW_MINOR_VERSION << 8) +
(BCM_5710_FW_REVISION_VERSION << 16) +
(BCM_5710_FW_ENGINEERING_VERSION << 24);
u32 my_fw = (bp->fw_major) + (bp->fw_minor << 8) +
(bp->fw_rev << 16) + (bp->fw_eng << 24);
/* read loaded FW from chip */
u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);

View File

@ -241,6 +241,8 @@
IRO[221].m2))
#define XSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[48].base + ((funcId) * IRO[48].m1))
#define XSTORM_ETH_FUNCTION_INFO_FP_HSI_VALID_E2_OFFSET(fid) \
(IRO[386].base + ((fid) * IRO[386].m1))
#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
/* eth hsi version */

View File

@ -3024,7 +3024,8 @@ struct afex_stats {
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 13
#define BCM_5710_FW_REVISION_VERSION 15
#define BCM_5710_FW_REVISION_VERSION 21
#define BCM_5710_FW_REVISION_VERSION_V15 15
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1

View File

@ -74,9 +74,19 @@
__stringify(BCM_5710_FW_MINOR_VERSION) "." \
__stringify(BCM_5710_FW_REVISION_VERSION) "." \
__stringify(BCM_5710_FW_ENGINEERING_VERSION)
#define FW_FILE_VERSION_V15 \
__stringify(BCM_5710_FW_MAJOR_VERSION) "." \
__stringify(BCM_5710_FW_MINOR_VERSION) "." \
__stringify(BCM_5710_FW_REVISION_VERSION_V15) "." \
__stringify(BCM_5710_FW_ENGINEERING_VERSION)
#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E1_V15 "bnx2x/bnx2x-e1-" FW_FILE_VERSION_V15 ".fw"
#define FW_FILE_NAME_E1H_V15 "bnx2x/bnx2x-e1h-" FW_FILE_VERSION_V15 ".fw"
#define FW_FILE_NAME_E2_V15 "bnx2x/bnx2x-e2-" FW_FILE_VERSION_V15 ".fw"
/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT (5*HZ)
@ -747,9 +757,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
CHIP_IS_E1(bp) ? "everest1" :
CHIP_IS_E1H(bp) ? "everest1h" :
CHIP_IS_E2(bp) ? "everest2" : "everest3",
BCM_5710_FW_MAJOR_VERSION,
BCM_5710_FW_MINOR_VERSION,
BCM_5710_FW_REVISION_VERSION);
bp->fw_major, bp->fw_minor, bp->fw_rev);
return rc;
}
@ -12355,6 +12363,15 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bnx2x_read_fwinfo(bp);
if (IS_PF(bp)) {
rc = bnx2x_init_firmware(bp);
if (rc) {
bnx2x_free_mem_bp(bp);
return rc;
}
}
func = BP_FUNC(bp);
/* need to reset chip if undi was active */
@ -12367,6 +12384,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
rc = bnx2x_prev_unload(bp);
if (rc) {
bnx2x_release_firmware(bp);
bnx2x_free_mem_bp(bp);
return rc;
}
@ -13366,16 +13384,11 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
/* Check FW version */
offset = be32_to_cpu(fw_hdr->fw_version.offset);
fw_ver = firmware->data + offset;
if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
(fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
(fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
(fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
if (fw_ver[0] != bp->fw_major || fw_ver[1] != bp->fw_minor ||
fw_ver[2] != bp->fw_rev || fw_ver[3] != bp->fw_eng) {
BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
BCM_5710_FW_MAJOR_VERSION,
BCM_5710_FW_MINOR_VERSION,
BCM_5710_FW_REVISION_VERSION,
BCM_5710_FW_ENGINEERING_VERSION);
fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
bp->fw_major, bp->fw_minor, bp->fw_rev, bp->fw_eng);
return -EINVAL;
}
@ -13453,34 +13466,51 @@ do { \
(u8 *)bp->arr, len); \
} while (0)
static int bnx2x_init_firmware(struct bnx2x *bp)
int bnx2x_init_firmware(struct bnx2x *bp)
{
const char *fw_file_name;
const char *fw_file_name, *fw_file_name_v15;
struct bnx2x_fw_file_hdr *fw_hdr;
int rc;
if (bp->firmware)
return 0;
if (CHIP_IS_E1(bp))
if (CHIP_IS_E1(bp)) {
fw_file_name = FW_FILE_NAME_E1;
else if (CHIP_IS_E1H(bp))
fw_file_name_v15 = FW_FILE_NAME_E1_V15;
} else if (CHIP_IS_E1H(bp)) {
fw_file_name = FW_FILE_NAME_E1H;
else if (!CHIP_IS_E1x(bp))
fw_file_name_v15 = FW_FILE_NAME_E1H_V15;
} else if (!CHIP_IS_E1x(bp)) {
fw_file_name = FW_FILE_NAME_E2;
else {
fw_file_name_v15 = FW_FILE_NAME_E2_V15;
} else {
BNX2X_ERR("Unsupported chip revision\n");
return -EINVAL;
}
BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
if (rc) {
BNX2X_ERR("Can't load firmware file %s\n",
fw_file_name);
goto request_firmware_exit;
BNX2X_DEV_INFO("Trying to load older fw %s\n", fw_file_name_v15);
/* try to load prev version */
rc = request_firmware(&bp->firmware, fw_file_name_v15, &bp->pdev->dev);
if (rc)
goto request_firmware_exit;
bp->fw_rev = BCM_5710_FW_REVISION_VERSION_V15;
} else {
bp->fw_cap |= FW_CAP_INVALIDATE_VF_FP_HSI;
bp->fw_rev = BCM_5710_FW_REVISION_VERSION;
}
bp->fw_major = BCM_5710_FW_MAJOR_VERSION;
bp->fw_minor = BCM_5710_FW_MINOR_VERSION;
bp->fw_eng = BCM_5710_FW_ENGINEERING_VERSION;
rc = bnx2x_check_firmware(bp);
if (rc) {
BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
@ -13536,7 +13566,7 @@ static int bnx2x_init_firmware(struct bnx2x *bp)
return rc;
}
static void bnx2x_release_firmware(struct bnx2x *bp)
void bnx2x_release_firmware(struct bnx2x *bp)
{
kfree(bp->init_ops_offsets);
kfree(bp->init_ops);
@ -14053,6 +14083,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
return 0;
init_one_freemem:
bnx2x_release_firmware(bp);
bnx2x_free_mem_bp(bp);
init_one_exit:

View File

@ -758,9 +758,18 @@ static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
{
u16 abs_fid;
abs_fid = FW_VF_HANDLE(abs_vfid);
/* set the VF-PF association in the FW */
storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
storm_memset_vf_to_pf(bp, abs_fid, BP_FUNC(bp));
storm_memset_func_en(bp, abs_fid, 1);
/* Invalidate fp_hsi version for vfs */
if (bp->fw_cap & FW_CAP_INVALIDATE_VF_FP_HSI)
REG_WR8(bp, BAR_XSTRORM_INTMEM +
XSTORM_ETH_FUNCTION_INFO_FP_HSI_VALID_E2_OFFSET(abs_fid), 0);
/* clear vf errors*/
bnx2x_vf_semi_clear_err(bp, abs_vfid);

View File

@ -458,9 +458,11 @@ static int max_select_fd(unsigned long n, fd_set_bits *fds)
return max;
}
#define POLLIN_SET (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN | EPOLLHUP | EPOLLERR)
#define POLLOUT_SET (EPOLLWRBAND | EPOLLWRNORM | EPOLLOUT | EPOLLERR)
#define POLLEX_SET (EPOLLPRI)
#define POLLIN_SET (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN | EPOLLHUP | EPOLLERR |\
EPOLLNVAL)
#define POLLOUT_SET (EPOLLWRBAND | EPOLLWRNORM | EPOLLOUT | EPOLLERR |\
EPOLLNVAL)
#define POLLEX_SET (EPOLLPRI | EPOLLNVAL)
static inline void wait_key_set(poll_table *wait, unsigned long in,
unsigned long out, unsigned long bit,
@ -527,6 +529,7 @@ static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
break;
if (!(bit & all_bits))
continue;
mask = EPOLLNVAL;
f = fdget(i);
if (f.file) {
wait_key_set(wait, in, out, bit,
@ -534,34 +537,34 @@ static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
mask = vfs_poll(f.file, wait);
fdput(f);
if ((mask & POLLIN_SET) && (in & bit)) {
res_in |= bit;
retval++;
wait->_qproc = NULL;
}
if ((mask & POLLOUT_SET) && (out & bit)) {
res_out |= bit;
retval++;
wait->_qproc = NULL;
}
if ((mask & POLLEX_SET) && (ex & bit)) {
res_ex |= bit;
retval++;
wait->_qproc = NULL;
}
/* got something, stop busy polling */
if (retval) {
can_busy_loop = false;
busy_flag = 0;
/*
* only remember a returned
* POLL_BUSY_LOOP if we asked for it
*/
} else if (busy_flag & mask)
can_busy_loop = true;
}
if ((mask & POLLIN_SET) && (in & bit)) {
res_in |= bit;
retval++;
wait->_qproc = NULL;
}
if ((mask & POLLOUT_SET) && (out & bit)) {
res_out |= bit;
retval++;
wait->_qproc = NULL;
}
if ((mask & POLLEX_SET) && (ex & bit)) {
res_ex |= bit;
retval++;
wait->_qproc = NULL;
}
/* got something, stop busy polling */
if (retval) {
can_busy_loop = false;
busy_flag = 0;
/*
* only remember a returned
* POLL_BUSY_LOOP if we asked for it
*/
} else if (busy_flag & mask)
can_busy_loop = true;
}
if (res_in)
*rinp = res_in;

View File

@ -1581,10 +1581,11 @@ static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
struct rcu_data *rdp)
{
rcu_lockdep_assert_cblist_protected(rdp);
if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) ||
!raw_spin_trylock_rcu_node(rnp))
if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp))
return;
WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
// The grace period cannot end while we hold the rcu_node lock.
if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))
WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
raw_spin_unlock_rcu_node(rnp);
}