First RDMA subsystem updates for 5.5-rc

Five small driver fixes:
 
 - Fix error flow with MR allocation in bnxt_re
 
 - An errata work around for bnxt_re
 
 - Misuse of the workqueue API in hfi1
 
 - Protocol error in hfi1
 
 - Regression in 5.5 related to the mmap rework with i40iw
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAl4X2boACgkQOG33FX4g
 mxo+iBAAjsaSLD56q9ilUOfrO20/SIFmKL2aG8TkVAbejRIAoPK5d2nqX5pLq1hU
 ezX8SpNkjmwChtfy0BTrJvgEsfZ/qQwee3NOdnFzs0KSccYTy4jDfBfjxSQbO4jH
 aUzznyYuuTPPbf1JgYJcVRVFtgfMKHe7/cC/e+dhoaGkXPYIuFyib0gchQGZOInj
 iVEfZmSp92TrCDXYgGDXA3hqQh9K/MabdXObF8+yXlkVsceKlI9p8prpnCg72+8W
 zQSAdGM1BOttHW8pZZSzWrRMHITwpZ8cQ/+SRu7lZDsZ4zUTY9R/CednwWDxSPOf
 PD1PzRnG/dBVA6E01joiLDTsJack67ExOee0UkqwLvX0kufTIWX1aXOlOE5liafz
 6+3cm+NQg9K4/cjxK+U1LxRE0UhY9fnCsnI9UL/sRtaXAGBG+MSH39icwBJ45jOL
 gaoh1KAbydYviZEMM2MplM8rl3zu3ee3j1PSOE5vY78VKDu3OJpDfrSoisXrSmZ+
 Qptn/FfHOurmnuFABoO4GT1UrsfUFeEDJuJubpCKnpW9JTVR2LpfLicvb5E+XJ3N
 B7l6QqvY7ec6s209sU5CVqyfGhRnE6M0pnpSogrq/hiZSYzg9e49XL53M15bljpT
 /zcCXz1EqoTWF8VDuuIhymm0YZb2c42uAxD4JMwVnbfUycTfWFs=
 =c38z
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "First RDMA subsystem updates for 5.5-rc. A very small set of fixes,
  most people seem to still be recovering from December!

  Five small driver fixes:

   - Fix error flow with MR allocation in bnxt_re

   - An errata work around for bnxt_re

   - Misuse of the workqueue API in hfi1

   - Protocol error in hfi1

   - Regression in 5.5 related to the mmap rework with i40iw"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  i40iw: Remove setting of VMA private data and use rdma_user_mmap_io
  IB/hfi1: Adjust flow PSN with the correct resync_psn
  IB/hfi1: Don't cancel unused work item
  RDMA/bnxt_re: Fix Send Work Entry state check while polling completions
  RDMA/bnxt_re: Avoid freeing MR resources if dereg fails
This commit is contained in:
Linus Torvalds 2020-01-09 21:03:54 -08:00
commit 5e7c1b75bd
5 changed files with 27 additions and 16 deletions

View File

@ -3305,8 +3305,10 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
int rc;
rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc)
if (rc) {
dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
return rc;
}
if (mr->pages) {
rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,

View File

@ -2283,13 +2283,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
/* Add qp to flush list of the CQ */
bnxt_qplib_add_flush_qp(qp);
} else {
if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
/* Before we complete, do WA 9060 */
if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
cqe_sq_cons)) {
*lib_qp = qp;
goto out;
}
if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
cqe->status = CQ_REQ_STATUS_OK;
cqe++;
(*budget)--;

View File

@ -81,6 +81,8 @@ void iowait_init(struct iowait *wait, u32 tx_limit,
void iowait_cancel_work(struct iowait *w)
{
cancel_work_sync(&iowait_get_ib_work(w)->iowork);
/* Make sure that the iowork for TID RDMA is used */
if (iowait_get_tid_work(w)->iowork.func)
cancel_work_sync(&iowait_get_tid_work(w)->iowork);
}

View File

@ -4633,6 +4633,15 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
*/
fpsn = full_flow_psn(flow, flow->flow_state.spsn);
req->r_ack_psn = psn;
/*
* If resync_psn points to the last flow PSN for a
* segment and the new segment (likely from a new
* request) starts with a new generation number, we
* need to adjust resync_psn accordingly.
*/
if (flow->flow_state.generation !=
(resync_psn >> HFI1_KDETH_BTH_SEQ_SHIFT))
resync_psn = mask_psn(fpsn - 1);
flow->resync_npkts +=
delta_psn(mask_psn(resync_psn + 1), fpsn);
/*

View File

@ -169,8 +169,7 @@ static void i40iw_dealloc_ucontext(struct ib_ucontext *context)
static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
struct i40iw_ucontext *ucontext;
u64 db_addr_offset;
u64 push_offset;
u64 db_addr_offset, push_offset, pfn;
ucontext = to_ucontext(context);
if (ucontext->iwdev->sc_dev.is_pf) {
@ -189,7 +188,6 @@ static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_private_data = ucontext;
} else {
if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@ -197,12 +195,12 @@ static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
}
if (io_remap_pfn_range(vma, vma->vm_start,
vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT),
PAGE_SIZE, vma->vm_page_prot))
return -EAGAIN;
pfn = vma->vm_pgoff +
(pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >>
PAGE_SHIFT);
return 0;
return rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
vma->vm_page_prot, NULL);
}
/**