RDMA/rxe: Split rxe_run_task() into two subroutines
[ Upstream commit dccb23f6c312e4480fe32ccbc2afac1a5cac7e5e ] Split rxe_run_task(task, sched) into rxe_run_task(task) and rxe_sched_task(task). Link: https://lore.kernel.org/r/20221021200118.2163-5-rpearsonhpe@gmail.com Signed-off-by: Ian Ziemba <ian.ziemba@hpe.com> Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Stable-dep-of: 5d122db2ff80 ("RDMA/rxe: Fix incomplete state save in rxe_requester") Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
0ad56bf59d
commit
59a4f61fec
@ -118,7 +118,7 @@ void retransmit_timer(struct timer_list *t)
|
||||
|
||||
if (qp->valid) {
|
||||
qp->comp.timeout = 1;
|
||||
rxe_run_task(&qp->comp.task, 1);
|
||||
rxe_sched_task(&qp->comp.task);
|
||||
}
|
||||
}
|
||||
|
||||
@ -132,7 +132,10 @@ void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
|
||||
if (must_sched != 0)
|
||||
rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_COMPLETER_SCHED);
|
||||
|
||||
rxe_run_task(&qp->comp.task, must_sched);
|
||||
if (must_sched)
|
||||
rxe_sched_task(&qp->comp.task);
|
||||
else
|
||||
rxe_run_task(&qp->comp.task);
|
||||
}
|
||||
|
||||
static inline enum comp_state get_wqe(struct rxe_qp *qp,
|
||||
@ -305,7 +308,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
|
||||
qp->comp.psn = pkt->psn;
|
||||
if (qp->req.wait_psn) {
|
||||
qp->req.wait_psn = 0;
|
||||
rxe_run_task(&qp->req.task, 0);
|
||||
rxe_run_task(&qp->req.task);
|
||||
}
|
||||
}
|
||||
return COMPST_ERROR_RETRY;
|
||||
@ -452,7 +455,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
|
||||
*/
|
||||
if (qp->req.wait_fence) {
|
||||
qp->req.wait_fence = 0;
|
||||
rxe_run_task(&qp->req.task, 0);
|
||||
rxe_run_task(&qp->req.task);
|
||||
}
|
||||
}
|
||||
|
||||
@ -466,7 +469,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
|
||||
if (qp->req.need_rd_atomic) {
|
||||
qp->comp.timeout_retry = 0;
|
||||
qp->req.need_rd_atomic = 0;
|
||||
rxe_run_task(&qp->req.task, 0);
|
||||
rxe_run_task(&qp->req.task);
|
||||
}
|
||||
}
|
||||
|
||||
@ -512,7 +515,7 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp,
|
||||
|
||||
if (qp->req.wait_psn) {
|
||||
qp->req.wait_psn = 0;
|
||||
rxe_run_task(&qp->req.task, 1);
|
||||
rxe_sched_task(&qp->req.task);
|
||||
}
|
||||
}
|
||||
|
||||
@ -646,7 +649,7 @@ int rxe_completer(void *arg)
|
||||
|
||||
if (qp->req.wait_psn) {
|
||||
qp->req.wait_psn = 0;
|
||||
rxe_run_task(&qp->req.task, 1);
|
||||
rxe_sched_task(&qp->req.task);
|
||||
}
|
||||
|
||||
state = COMPST_DONE;
|
||||
@ -714,7 +717,7 @@ int rxe_completer(void *arg)
|
||||
RXE_CNT_COMP_RETRY);
|
||||
qp->req.need_retry = 1;
|
||||
qp->comp.started_retry = 1;
|
||||
rxe_run_task(&qp->req.task, 0);
|
||||
rxe_run_task(&qp->req.task);
|
||||
}
|
||||
goto done;
|
||||
|
||||
|
@ -348,7 +348,7 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb)
|
||||
|
||||
if (unlikely(qp->need_req_skb &&
|
||||
skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
|
||||
rxe_run_task(&qp->req.task, 1);
|
||||
rxe_sched_task(&qp->req.task);
|
||||
|
||||
rxe_put(qp);
|
||||
}
|
||||
@ -435,7 +435,7 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
|
||||
if ((qp_type(qp) != IB_QPT_RC) &&
|
||||
(pkt->mask & RXE_END_MASK)) {
|
||||
pkt->wqe->state = wqe_state_done;
|
||||
rxe_run_task(&qp->comp.task, 1);
|
||||
rxe_sched_task(&qp->comp.task);
|
||||
}
|
||||
|
||||
rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS);
|
||||
|
@ -539,10 +539,10 @@ static void rxe_qp_drain(struct rxe_qp *qp)
|
||||
if (qp->req.state != QP_STATE_DRAINED) {
|
||||
qp->req.state = QP_STATE_DRAIN;
|
||||
if (qp_type(qp) == IB_QPT_RC)
|
||||
rxe_run_task(&qp->comp.task, 1);
|
||||
rxe_sched_task(&qp->comp.task);
|
||||
else
|
||||
__rxe_do_task(&qp->comp.task);
|
||||
rxe_run_task(&qp->req.task, 1);
|
||||
rxe_sched_task(&qp->req.task);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -556,13 +556,13 @@ void rxe_qp_error(struct rxe_qp *qp)
|
||||
qp->attr.qp_state = IB_QPS_ERR;
|
||||
|
||||
/* drain work and packet queues */
|
||||
rxe_run_task(&qp->resp.task, 1);
|
||||
rxe_sched_task(&qp->resp.task);
|
||||
|
||||
if (qp_type(qp) == IB_QPT_RC)
|
||||
rxe_run_task(&qp->comp.task, 1);
|
||||
rxe_sched_task(&qp->comp.task);
|
||||
else
|
||||
__rxe_do_task(&qp->comp.task);
|
||||
rxe_run_task(&qp->req.task, 1);
|
||||
rxe_sched_task(&qp->req.task);
|
||||
}
|
||||
|
||||
/* called by the modify qp verb */
|
||||
|
@ -105,7 +105,7 @@ void rnr_nak_timer(struct timer_list *t)
|
||||
/* request a send queue retry */
|
||||
qp->req.need_retry = 1;
|
||||
qp->req.wait_for_rnr_timer = 0;
|
||||
rxe_run_task(&qp->req.task, 1);
|
||||
rxe_sched_task(&qp->req.task);
|
||||
}
|
||||
|
||||
static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
|
||||
@ -608,7 +608,7 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
|
||||
* which can lead to a deadlock. So go ahead and complete
|
||||
* it now.
|
||||
*/
|
||||
rxe_run_task(&qp->comp.task, 1);
|
||||
rxe_sched_task(&qp->comp.task);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -733,7 +733,7 @@ int rxe_requester(void *arg)
|
||||
qp->req.wqe_index);
|
||||
wqe->state = wqe_state_done;
|
||||
wqe->status = IB_WC_SUCCESS;
|
||||
rxe_run_task(&qp->comp.task, 0);
|
||||
rxe_run_task(&qp->comp.task);
|
||||
goto done;
|
||||
}
|
||||
payload = mtu;
|
||||
@ -795,7 +795,7 @@ int rxe_requester(void *arg)
|
||||
rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
|
||||
|
||||
if (err == -EAGAIN) {
|
||||
rxe_run_task(&qp->req.task, 1);
|
||||
rxe_sched_task(&qp->req.task);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
@ -817,7 +817,7 @@ int rxe_requester(void *arg)
|
||||
qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
|
||||
wqe->state = wqe_state_error;
|
||||
qp->req.state = QP_STATE_ERROR;
|
||||
rxe_run_task(&qp->comp.task, 0);
|
||||
rxe_run_task(&qp->comp.task);
|
||||
exit:
|
||||
ret = -EAGAIN;
|
||||
out:
|
||||
|
@ -91,7 +91,10 @@ void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
|
||||
must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
|
||||
(skb_queue_len(&qp->req_pkts) > 1);
|
||||
|
||||
rxe_run_task(&qp->resp.task, must_sched);
|
||||
if (must_sched)
|
||||
rxe_sched_task(&qp->resp.task);
|
||||
else
|
||||
rxe_run_task(&qp->resp.task);
|
||||
}
|
||||
|
||||
static inline enum resp_states get_req(struct rxe_qp *qp,
|
||||
|
@ -127,15 +127,20 @@ void rxe_cleanup_task(struct rxe_task *task)
|
||||
tasklet_kill(&task->tasklet);
|
||||
}
|
||||
|
||||
void rxe_run_task(struct rxe_task *task, int sched)
|
||||
void rxe_run_task(struct rxe_task *task)
|
||||
{
|
||||
if (task->destroyed)
|
||||
return;
|
||||
|
||||
if (sched)
|
||||
tasklet_schedule(&task->tasklet);
|
||||
else
|
||||
rxe_do_task(&task->tasklet);
|
||||
rxe_do_task(&task->tasklet);
|
||||
}
|
||||
|
||||
void rxe_sched_task(struct rxe_task *task)
|
||||
{
|
||||
if (task->destroyed)
|
||||
return;
|
||||
|
||||
tasklet_schedule(&task->tasklet);
|
||||
}
|
||||
|
||||
void rxe_disable_task(struct rxe_task *task)
|
||||
|
@ -52,10 +52,9 @@ int __rxe_do_task(struct rxe_task *task);
|
||||
*/
|
||||
void rxe_do_task(struct tasklet_struct *t);
|
||||
|
||||
/* run a task, else schedule it to run as a tasklet, The decision
|
||||
* to run or schedule tasklet is based on the parameter sched.
|
||||
*/
|
||||
void rxe_run_task(struct rxe_task *task, int sched);
|
||||
void rxe_run_task(struct rxe_task *task);
|
||||
|
||||
void rxe_sched_task(struct rxe_task *task);
|
||||
|
||||
/* keep a task from scheduling */
|
||||
void rxe_disable_task(struct rxe_task *task);
|
||||
|
@ -678,9 +678,9 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
|
||||
wr = next;
|
||||
}
|
||||
|
||||
rxe_run_task(&qp->req.task, 1);
|
||||
rxe_sched_task(&qp->req.task);
|
||||
if (unlikely(qp->req.state == QP_STATE_ERROR))
|
||||
rxe_run_task(&qp->comp.task, 1);
|
||||
rxe_sched_task(&qp->comp.task);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -702,7 +702,7 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
|
||||
|
||||
if (qp->is_user) {
|
||||
/* Utilize process context to do protocol processing */
|
||||
rxe_run_task(&qp->req.task, 0);
|
||||
rxe_run_task(&qp->req.task);
|
||||
return 0;
|
||||
} else
|
||||
return rxe_post_send_kernel(qp, wr, bad_wr);
|
||||
@ -740,7 +740,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
|
||||
spin_unlock_irqrestore(&rq->producer_lock, flags);
|
||||
|
||||
if (qp->resp.state == QP_STATE_ERROR)
|
||||
rxe_run_task(&qp->resp.task, 1);
|
||||
rxe_sched_task(&qp->resp.task);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user