scsi: fnic: Use scsi_host_busy_iter() to traverse commands
Use scsi_host_busy_iter() to traverse commands instead of hand-crafted routines walking the command list. Link: https://lore.kernel.org/r/20210429122517.39659-3-hare@suse.de Reviewed-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
parent
3ba1eeff00
commit
35ffbb60bd
@ -1361,90 +1361,90 @@ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
|
||||
return wq_work_done;
|
||||
}
|
||||
|
||||
static void fnic_cleanup_io(struct fnic *fnic)
|
||||
static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data,
|
||||
bool reserved)
|
||||
{
|
||||
int i;
|
||||
struct fnic *fnic = data;
|
||||
struct fnic_io_req *io_req;
|
||||
unsigned long flags = 0;
|
||||
struct scsi_cmnd *sc;
|
||||
spinlock_t *io_lock;
|
||||
unsigned long start_time = 0;
|
||||
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
|
||||
|
||||
for (i = 0; i < fnic->fnic_max_tag_id; i++) {
|
||||
io_lock = fnic_io_lock_tag(fnic, i);
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
sc = scsi_host_find_tag(fnic->lport->host, i);
|
||||
if (!sc) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
continue;
|
||||
}
|
||||
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
|
||||
!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
|
||||
/*
|
||||
* We will be here only when FW completes reset
|
||||
* without sending completions for outstanding ios.
|
||||
*/
|
||||
CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
|
||||
if (io_req && io_req->dr_done)
|
||||
complete(io_req->dr_done);
|
||||
else if (io_req && io_req->abts_done)
|
||||
complete(io_req->abts_done);
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
continue;
|
||||
} else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
continue;
|
||||
}
|
||||
if (!io_req) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
continue;
|
||||
}
|
||||
|
||||
CMD_SP(sc) = NULL;
|
||||
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
io_lock = fnic_io_lock_tag(fnic, sc->request->tag);
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
|
||||
!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
|
||||
/*
|
||||
* If there is a scsi_cmnd associated with this io_req, then
|
||||
* free the corresponding state
|
||||
* We will be here only when FW completes reset
|
||||
* without sending completions for outstanding ios.
|
||||
*/
|
||||
start_time = io_req->start_time;
|
||||
fnic_release_ioreq_buf(fnic, io_req, sc);
|
||||
mempool_free(io_req, fnic->io_req_pool);
|
||||
|
||||
sc->result = DID_TRANSPORT_DISRUPTED << 16;
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"%s: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
|
||||
__func__, sc->request->tag, sc,
|
||||
(jiffies - start_time));
|
||||
|
||||
if (atomic64_read(&fnic->io_cmpl_skip))
|
||||
atomic64_dec(&fnic->io_cmpl_skip);
|
||||
else
|
||||
atomic64_inc(&fnic_stats->io_stats.io_completions);
|
||||
|
||||
/* Complete the command to SCSI */
|
||||
if (sc->scsi_done) {
|
||||
if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED))
|
||||
shost_printk(KERN_ERR, fnic->lport->host,
|
||||
"Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
|
||||
sc->request->tag, sc);
|
||||
|
||||
FNIC_TRACE(fnic_cleanup_io,
|
||||
sc->device->host->host_no, i, sc,
|
||||
jiffies_to_msecs(jiffies - start_time),
|
||||
0, ((u64)sc->cmnd[0] << 32 |
|
||||
(u64)sc->cmnd[2] << 24 |
|
||||
(u64)sc->cmnd[3] << 16 |
|
||||
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
|
||||
(((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
|
||||
|
||||
sc->scsi_done(sc);
|
||||
}
|
||||
CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
|
||||
if (io_req && io_req->dr_done)
|
||||
complete(io_req->dr_done);
|
||||
else if (io_req && io_req->abts_done)
|
||||
complete(io_req->abts_done);
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
return true;
|
||||
} else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
return true;
|
||||
}
|
||||
if (!io_req) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
goto cleanup_scsi_cmd;
|
||||
}
|
||||
|
||||
CMD_SP(sc) = NULL;
|
||||
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
|
||||
/*
|
||||
* If there is a scsi_cmnd associated with this io_req, then
|
||||
* free the corresponding state
|
||||
*/
|
||||
start_time = io_req->start_time;
|
||||
fnic_release_ioreq_buf(fnic, io_req, sc);
|
||||
mempool_free(io_req, fnic->io_req_pool);
|
||||
|
||||
cleanup_scsi_cmd:
|
||||
sc->result = DID_TRANSPORT_DISRUPTED << 16;
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"fnic_cleanup_io: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
|
||||
sc->request->tag, sc, (jiffies - start_time));
|
||||
|
||||
if (atomic64_read(&fnic->io_cmpl_skip))
|
||||
atomic64_dec(&fnic->io_cmpl_skip);
|
||||
else
|
||||
atomic64_inc(&fnic_stats->io_stats.io_completions);
|
||||
|
||||
/* Complete the command to SCSI */
|
||||
if (sc->scsi_done) {
|
||||
if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED))
|
||||
shost_printk(KERN_ERR, fnic->lport->host,
|
||||
"Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
|
||||
sc->request->tag, sc);
|
||||
|
||||
FNIC_TRACE(fnic_cleanup_io,
|
||||
sc->device->host->host_no, sc->request->tag, sc,
|
||||
jiffies_to_msecs(jiffies - start_time),
|
||||
0, ((u64)sc->cmnd[0] << 32 |
|
||||
(u64)sc->cmnd[2] << 24 |
|
||||
(u64)sc->cmnd[3] << 16 |
|
||||
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
|
||||
(((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
|
||||
|
||||
sc->scsi_done(sc);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void fnic_cleanup_io(struct fnic *fnic)
|
||||
{
|
||||
scsi_host_busy_iter(fnic->lport->host,
|
||||
fnic_cleanup_io_iter, fnic);
|
||||
}
|
||||
|
||||
void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
|
||||
@ -1555,20 +1555,121 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
|
||||
struct fnic_rport_abort_io_iter_data {
|
||||
struct fnic *fnic;
|
||||
u32 port_id;
|
||||
int term_cnt;
|
||||
};
|
||||
|
||||
static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data,
|
||||
bool reserved)
|
||||
{
|
||||
int tag;
|
||||
int abt_tag;
|
||||
int term_cnt = 0;
|
||||
struct fnic_rport_abort_io_iter_data *iter_data = data;
|
||||
struct fnic *fnic = iter_data->fnic;
|
||||
int abt_tag = sc->request->tag;
|
||||
struct fnic_io_req *io_req;
|
||||
spinlock_t *io_lock;
|
||||
unsigned long flags;
|
||||
struct scsi_cmnd *sc;
|
||||
struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
|
||||
struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
|
||||
struct scsi_lun fc_lun;
|
||||
enum fnic_ioreq_state old_ioreq_state;
|
||||
|
||||
io_lock = fnic_io_lock_tag(fnic, abt_tag);
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
|
||||
if (!io_req || io_req->port_id != iter_data->port_id) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
return true;
|
||||
}
|
||||
|
||||
if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
|
||||
(!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
|
||||
sc);
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Found IO that is still pending with firmware and
|
||||
* belongs to rport that went away
|
||||
*/
|
||||
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
return true;
|
||||
}
|
||||
if (io_req->abts_done) {
|
||||
shost_printk(KERN_ERR, fnic->lport->host,
|
||||
"fnic_rport_exch_reset: io_req->abts_done is set "
|
||||
"state is %s\n",
|
||||
fnic_ioreq_state_to_str(CMD_STATE(sc)));
|
||||
}
|
||||
|
||||
if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
|
||||
shost_printk(KERN_ERR, fnic->lport->host,
|
||||
"rport_exch_reset "
|
||||
"IO not yet issued %p tag 0x%x flags "
|
||||
"%x state %d\n",
|
||||
sc, abt_tag, CMD_FLAGS(sc), CMD_STATE(sc));
|
||||
}
|
||||
old_ioreq_state = CMD_STATE(sc);
|
||||
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
|
||||
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
|
||||
if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
|
||||
atomic64_inc(&reset_stats->device_reset_terminates);
|
||||
abt_tag |= FNIC_TAG_DEV_RST;
|
||||
}
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"fnic_rport_exch_reset dev rst sc 0x%p\n", sc);
|
||||
BUG_ON(io_req->abts_done);
|
||||
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"fnic_rport_reset_exch: Issuing abts\n");
|
||||
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
|
||||
/* Now queue the abort command to firmware */
|
||||
int_to_scsilun(sc->device->lun, &fc_lun);
|
||||
|
||||
if (fnic_queue_abort_io_req(fnic, abt_tag,
|
||||
FCPIO_ITMF_ABT_TASK_TERM,
|
||||
fc_lun.scsi_lun, io_req)) {
|
||||
/*
|
||||
* Revert the cmd state back to old state, if
|
||||
* it hasn't changed in between. This cmd will get
|
||||
* aborted later by scsi_eh, or cleaned up during
|
||||
* lun reset
|
||||
*/
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
|
||||
CMD_STATE(sc) = old_ioreq_state;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
} else {
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
|
||||
CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
|
||||
else
|
||||
CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
atomic64_inc(&term_stats->terminates);
|
||||
iter_data->term_cnt++;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
|
||||
{
|
||||
struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
|
||||
struct fnic_rport_abort_io_iter_data iter_data = {
|
||||
.fnic = fnic,
|
||||
.port_id = port_id,
|
||||
.term_cnt = 0,
|
||||
};
|
||||
|
||||
FNIC_SCSI_DBG(KERN_DEBUG,
|
||||
fnic->lport->host,
|
||||
"fnic_rport_exch_reset called portid 0x%06x\n",
|
||||
@ -1577,121 +1678,18 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
|
||||
if (fnic->in_remove)
|
||||
return;
|
||||
|
||||
for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
|
||||
abt_tag = tag;
|
||||
io_lock = fnic_io_lock_tag(fnic, tag);
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
sc = scsi_host_find_tag(fnic->lport->host, tag);
|
||||
if (!sc) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
continue;
|
||||
}
|
||||
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
|
||||
if (!io_req || io_req->port_id != port_id) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
continue;
|
||||
}
|
||||
|
||||
if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
|
||||
(!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
|
||||
sc);
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Found IO that is still pending with firmware and
|
||||
* belongs to rport that went away
|
||||
*/
|
||||
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
continue;
|
||||
}
|
||||
if (io_req->abts_done) {
|
||||
shost_printk(KERN_ERR, fnic->lport->host,
|
||||
"fnic_rport_exch_reset: io_req->abts_done is set "
|
||||
"state is %s\n",
|
||||
fnic_ioreq_state_to_str(CMD_STATE(sc)));
|
||||
}
|
||||
|
||||
if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
|
||||
shost_printk(KERN_ERR, fnic->lport->host,
|
||||
"rport_exch_reset "
|
||||
"IO not yet issued %p tag 0x%x flags "
|
||||
"%x state %d\n",
|
||||
sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
|
||||
}
|
||||
old_ioreq_state = CMD_STATE(sc);
|
||||
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
|
||||
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
|
||||
if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
|
||||
atomic64_inc(&reset_stats->device_reset_terminates);
|
||||
abt_tag = (tag | FNIC_TAG_DEV_RST);
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"fnic_rport_exch_reset dev rst sc 0x%p\n",
|
||||
sc);
|
||||
}
|
||||
|
||||
BUG_ON(io_req->abts_done);
|
||||
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"fnic_rport_reset_exch: Issuing abts\n");
|
||||
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
|
||||
/* Now queue the abort command to firmware */
|
||||
int_to_scsilun(sc->device->lun, &fc_lun);
|
||||
|
||||
if (fnic_queue_abort_io_req(fnic, abt_tag,
|
||||
FCPIO_ITMF_ABT_TASK_TERM,
|
||||
fc_lun.scsi_lun, io_req)) {
|
||||
/*
|
||||
* Revert the cmd state back to old state, if
|
||||
* it hasn't changed in between. This cmd will get
|
||||
* aborted later by scsi_eh, or cleaned up during
|
||||
* lun reset
|
||||
*/
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
|
||||
CMD_STATE(sc) = old_ioreq_state;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
} else {
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
|
||||
CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
|
||||
else
|
||||
CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
atomic64_inc(&term_stats->terminates);
|
||||
term_cnt++;
|
||||
}
|
||||
}
|
||||
if (term_cnt > atomic64_read(&term_stats->max_terminates))
|
||||
atomic64_set(&term_stats->max_terminates, term_cnt);
|
||||
scsi_host_busy_iter(fnic->lport->host, fnic_rport_abort_io_iter,
|
||||
&iter_data);
|
||||
if (iter_data.term_cnt > atomic64_read(&term_stats->max_terminates))
|
||||
atomic64_set(&term_stats->max_terminates, iter_data.term_cnt);
|
||||
|
||||
}
|
||||
|
||||
void fnic_terminate_rport_io(struct fc_rport *rport)
|
||||
{
|
||||
int tag;
|
||||
int abt_tag;
|
||||
int term_cnt = 0;
|
||||
struct fnic_io_req *io_req;
|
||||
spinlock_t *io_lock;
|
||||
unsigned long flags;
|
||||
struct scsi_cmnd *sc;
|
||||
struct scsi_lun fc_lun;
|
||||
struct fc_rport_libfc_priv *rdata;
|
||||
struct fc_lport *lport;
|
||||
struct fnic *fnic;
|
||||
struct fc_rport *cmd_rport;
|
||||
struct reset_stats *reset_stats;
|
||||
struct terminate_stats *term_stats;
|
||||
enum fnic_ioreq_state old_ioreq_state;
|
||||
|
||||
if (!rport) {
|
||||
printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
|
||||
@ -1719,108 +1717,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
|
||||
if (fnic->in_remove)
|
||||
return;
|
||||
|
||||
reset_stats = &fnic->fnic_stats.reset_stats;
|
||||
term_stats = &fnic->fnic_stats.term_stats;
|
||||
|
||||
for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
|
||||
abt_tag = tag;
|
||||
io_lock = fnic_io_lock_tag(fnic, tag);
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
sc = scsi_host_find_tag(fnic->lport->host, tag);
|
||||
if (!sc) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
continue;
|
||||
}
|
||||
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
if (!io_req) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
continue;
|
||||
}
|
||||
|
||||
cmd_rport = starget_to_rport(scsi_target(sc->device));
|
||||
if (rport != cmd_rport) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
continue;
|
||||
}
|
||||
|
||||
if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
|
||||
(!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"fnic_terminate_rport_io dev rst not pending sc 0x%p\n",
|
||||
sc);
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
* Found IO that is still pending with firmware and
|
||||
* belongs to rport that went away
|
||||
*/
|
||||
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
continue;
|
||||
}
|
||||
if (io_req->abts_done) {
|
||||
shost_printk(KERN_ERR, fnic->lport->host,
|
||||
"fnic_terminate_rport_io: io_req->abts_done is set "
|
||||
"state is %s\n",
|
||||
fnic_ioreq_state_to_str(CMD_STATE(sc)));
|
||||
}
|
||||
if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
|
||||
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
|
||||
"fnic_terminate_rport_io "
|
||||
"IO not yet issued %p tag 0x%x flags "
|
||||
"%x state %d\n",
|
||||
sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
|
||||
}
|
||||
old_ioreq_state = CMD_STATE(sc);
|
||||
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
|
||||
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
|
||||
if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
|
||||
atomic64_inc(&reset_stats->device_reset_terminates);
|
||||
abt_tag = (tag | FNIC_TAG_DEV_RST);
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
|
||||
}
|
||||
|
||||
BUG_ON(io_req->abts_done);
|
||||
|
||||
FNIC_SCSI_DBG(KERN_DEBUG,
|
||||
fnic->lport->host,
|
||||
"fnic_terminate_rport_io: Issuing abts\n");
|
||||
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
|
||||
/* Now queue the abort command to firmware */
|
||||
int_to_scsilun(sc->device->lun, &fc_lun);
|
||||
|
||||
if (fnic_queue_abort_io_req(fnic, abt_tag,
|
||||
FCPIO_ITMF_ABT_TASK_TERM,
|
||||
fc_lun.scsi_lun, io_req)) {
|
||||
/*
|
||||
* Revert the cmd state back to old state, if
|
||||
* it hasn't changed in between. This cmd will get
|
||||
* aborted later by scsi_eh, or cleaned up during
|
||||
* lun reset
|
||||
*/
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
|
||||
CMD_STATE(sc) = old_ioreq_state;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
} else {
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
|
||||
CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
|
||||
else
|
||||
CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
atomic64_inc(&term_stats->terminates);
|
||||
term_cnt++;
|
||||
}
|
||||
}
|
||||
if (term_cnt > atomic64_read(&term_stats->max_terminates))
|
||||
atomic64_set(&term_stats->max_terminates, term_cnt);
|
||||
|
||||
fnic_rport_exch_reset(fnic, rport->port_id);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2115,6 +2012,156 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct fnic_pending_aborts_iter_data {
|
||||
struct fnic *fnic;
|
||||
struct scsi_cmnd *lr_sc;
|
||||
struct scsi_device *lun_dev;
|
||||
int ret;
|
||||
};
|
||||
|
||||
static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc,
|
||||
void *data, bool reserved)
|
||||
{
|
||||
struct fnic_pending_aborts_iter_data *iter_data = data;
|
||||
struct fnic *fnic = iter_data->fnic;
|
||||
struct scsi_device *lun_dev = iter_data->lun_dev;
|
||||
int abt_tag = sc->request->tag;
|
||||
struct fnic_io_req *io_req;
|
||||
spinlock_t *io_lock;
|
||||
unsigned long flags;
|
||||
struct scsi_lun fc_lun;
|
||||
DECLARE_COMPLETION_ONSTACK(tm_done);
|
||||
enum fnic_ioreq_state old_ioreq_state;
|
||||
|
||||
if (sc == iter_data->lr_sc || sc->device != lun_dev)
|
||||
return true;
|
||||
if (reserved)
|
||||
return true;
|
||||
|
||||
io_lock = fnic_io_lock_tag(fnic, abt_tag);
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
if (!io_req) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Found IO that is still pending with firmware and
|
||||
* belongs to the LUN that we are resetting
|
||||
*/
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"Found IO in %s on lun\n",
|
||||
fnic_ioreq_state_to_str(CMD_STATE(sc)));
|
||||
|
||||
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
return true;
|
||||
}
|
||||
if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
|
||||
(!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
|
||||
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
|
||||
"%s dev rst not pending sc 0x%p\n", __func__,
|
||||
sc);
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (io_req->abts_done)
|
||||
shost_printk(KERN_ERR, fnic->lport->host,
|
||||
"%s: io_req->abts_done is set state is %s\n",
|
||||
__func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
|
||||
old_ioreq_state = CMD_STATE(sc);
|
||||
/*
|
||||
* Any pending IO issued prior to reset is expected to be
|
||||
* in abts pending state, if not we need to set
|
||||
* FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
|
||||
* When IO is completed, the IO will be handed over and
|
||||
* handled in this function.
|
||||
*/
|
||||
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
|
||||
|
||||
BUG_ON(io_req->abts_done);
|
||||
|
||||
if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
|
||||
abt_tag |= FNIC_TAG_DEV_RST;
|
||||
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
|
||||
"%s: dev rst sc 0x%p\n", __func__, sc);
|
||||
}
|
||||
|
||||
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
|
||||
io_req->abts_done = &tm_done;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
|
||||
/* Now queue the abort command to firmware */
|
||||
int_to_scsilun(sc->device->lun, &fc_lun);
|
||||
|
||||
if (fnic_queue_abort_io_req(fnic, abt_tag,
|
||||
FCPIO_ITMF_ABT_TASK_TERM,
|
||||
fc_lun.scsi_lun, io_req)) {
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
if (io_req)
|
||||
io_req->abts_done = NULL;
|
||||
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
|
||||
CMD_STATE(sc) = old_ioreq_state;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
iter_data->ret = FAILED;
|
||||
return false;
|
||||
} else {
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
|
||||
CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
}
|
||||
CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
|
||||
|
||||
wait_for_completion_timeout(&tm_done, msecs_to_jiffies
|
||||
(fnic->config.ed_tov));
|
||||
|
||||
/* Recheck cmd state to check if it is now aborted */
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
if (!io_req) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
|
||||
return true;
|
||||
}
|
||||
|
||||
io_req->abts_done = NULL;
|
||||
|
||||
/* if abort is still pending with fw, fail */
|
||||
if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
|
||||
iter_data->ret = FAILED;
|
||||
return false;
|
||||
}
|
||||
CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
|
||||
|
||||
/* original sc used for lr is handled by dev reset code */
|
||||
if (sc != iter_data->lr_sc)
|
||||
CMD_SP(sc) = NULL;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
|
||||
/* original sc used for lr is handled by dev reset code */
|
||||
if (sc != iter_data->lr_sc) {
|
||||
fnic_release_ioreq_buf(fnic, io_req, sc);
|
||||
mempool_free(io_req, fnic->io_req_pool);
|
||||
}
|
||||
|
||||
/*
|
||||
* Any IO is returned during reset, it needs to call scsi_done
|
||||
* to return the scsi_cmnd to upper layer.
|
||||
*/
|
||||
if (sc->scsi_done) {
|
||||
/* Set result to let upper SCSI layer retry */
|
||||
sc->result = DID_RESET << 16;
|
||||
sc->scsi_done(sc);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clean up any pending aborts on the lun
|
||||
* For each outstanding IO on this lun, whose abort is not completed by fw,
|
||||
@ -2123,157 +2170,25 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
|
||||
*/
|
||||
static int fnic_clean_pending_aborts(struct fnic *fnic,
|
||||
struct scsi_cmnd *lr_sc,
|
||||
bool new_sc)
|
||||
bool new_sc)
|
||||
|
||||
{
|
||||
int tag, abt_tag;
|
||||
struct fnic_io_req *io_req;
|
||||
spinlock_t *io_lock;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
struct scsi_cmnd *sc;
|
||||
struct scsi_lun fc_lun;
|
||||
struct scsi_device *lun_dev = lr_sc->device;
|
||||
DECLARE_COMPLETION_ONSTACK(tm_done);
|
||||
enum fnic_ioreq_state old_ioreq_state;
|
||||
int ret = SUCCESS;
|
||||
struct fnic_pending_aborts_iter_data iter_data = {
|
||||
.fnic = fnic,
|
||||
.lun_dev = lr_sc->device,
|
||||
.ret = SUCCESS,
|
||||
};
|
||||
|
||||
for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
|
||||
io_lock = fnic_io_lock_tag(fnic, tag);
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
sc = scsi_host_find_tag(fnic->lport->host, tag);
|
||||
/*
|
||||
* ignore this lun reset cmd if issued using new SC
|
||||
* or cmds that do not belong to this lun
|
||||
*/
|
||||
if (!sc || ((sc == lr_sc) && new_sc) || sc->device != lun_dev) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
continue;
|
||||
}
|
||||
if (new_sc)
|
||||
iter_data.lr_sc = lr_sc;
|
||||
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
|
||||
if (!io_req || sc->device != lun_dev) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Found IO that is still pending with firmware and
|
||||
* belongs to the LUN that we are resetting
|
||||
*/
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"Found IO in %s on lun\n",
|
||||
fnic_ioreq_state_to_str(CMD_STATE(sc)));
|
||||
|
||||
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
continue;
|
||||
}
|
||||
if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
|
||||
(!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
|
||||
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
|
||||
"%s dev rst not pending sc 0x%p\n", __func__,
|
||||
sc);
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (io_req->abts_done)
|
||||
shost_printk(KERN_ERR, fnic->lport->host,
|
||||
"%s: io_req->abts_done is set state is %s\n",
|
||||
__func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
|
||||
old_ioreq_state = CMD_STATE(sc);
|
||||
/*
|
||||
* Any pending IO issued prior to reset is expected to be
|
||||
* in abts pending state, if not we need to set
|
||||
* FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
|
||||
* When IO is completed, the IO will be handed over and
|
||||
* handled in this function.
|
||||
*/
|
||||
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
|
||||
|
||||
BUG_ON(io_req->abts_done);
|
||||
|
||||
abt_tag = tag;
|
||||
if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
|
||||
abt_tag |= FNIC_TAG_DEV_RST;
|
||||
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
|
||||
"%s: dev rst sc 0x%p\n", __func__, sc);
|
||||
}
|
||||
|
||||
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
|
||||
io_req->abts_done = &tm_done;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
|
||||
/* Now queue the abort command to firmware */
|
||||
int_to_scsilun(sc->device->lun, &fc_lun);
|
||||
|
||||
if (fnic_queue_abort_io_req(fnic, abt_tag,
|
||||
FCPIO_ITMF_ABT_TASK_TERM,
|
||||
fc_lun.scsi_lun, io_req)) {
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
if (io_req)
|
||||
io_req->abts_done = NULL;
|
||||
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
|
||||
CMD_STATE(sc) = old_ioreq_state;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
ret = 1;
|
||||
goto clean_pending_aborts_end;
|
||||
} else {
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
|
||||
CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
}
|
||||
CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
|
||||
|
||||
wait_for_completion_timeout(&tm_done,
|
||||
msecs_to_jiffies
|
||||
(fnic->config.ed_tov));
|
||||
|
||||
/* Recheck cmd state to check if it is now aborted */
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
if (!io_req) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
|
||||
continue;
|
||||
}
|
||||
|
||||
io_req->abts_done = NULL;
|
||||
|
||||
/* if abort is still pending with fw, fail */
|
||||
if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
|
||||
ret = 1;
|
||||
goto clean_pending_aborts_end;
|
||||
}
|
||||
CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
|
||||
|
||||
/* original sc used for lr is handled by dev reset code */
|
||||
if (sc != lr_sc)
|
||||
CMD_SP(sc) = NULL;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
|
||||
/* original sc used for lr is handled by dev reset code */
|
||||
if (sc != lr_sc) {
|
||||
fnic_release_ioreq_buf(fnic, io_req, sc);
|
||||
mempool_free(io_req, fnic->io_req_pool);
|
||||
}
|
||||
|
||||
/*
|
||||
* Any IO is returned during reset, it needs to call scsi_done
|
||||
* to return the scsi_cmnd to upper layer.
|
||||
*/
|
||||
if (sc->scsi_done) {
|
||||
/* Set result to let upper SCSI layer retry */
|
||||
sc->result = DID_RESET << 16;
|
||||
sc->scsi_done(sc);
|
||||
}
|
||||
scsi_host_busy_iter(fnic->lport->host,
|
||||
fnic_pending_aborts_iter, &iter_data);
|
||||
if (iter_data.ret == FAILED) {
|
||||
ret = iter_data.ret;
|
||||
goto clean_pending_aborts_end;
|
||||
}
|
||||
|
||||
schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
|
||||
|
||||
/* walk again to check, if IOs are still pending in fw */
|
||||
@ -2772,6 +2687,49 @@ void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
|
||||
|
||||
}
|
||||
|
||||
static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data,
|
||||
bool reserved)
|
||||
{
|
||||
struct fnic_pending_aborts_iter_data *iter_data = data;
|
||||
struct fnic *fnic = iter_data->fnic;
|
||||
int cmd_state;
|
||||
struct fnic_io_req *io_req;
|
||||
spinlock_t *io_lock;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* ignore this lun reset cmd or cmds that do not belong to
|
||||
* this lun
|
||||
*/
|
||||
if (iter_data->lr_sc && sc == iter_data->lr_sc)
|
||||
return true;
|
||||
if (iter_data->lun_dev && sc->device != iter_data->lun_dev)
|
||||
return true;
|
||||
|
||||
io_lock = fnic_io_lock_hash(fnic, sc);
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
if (!io_req) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Found IO that is still pending with firmware and
|
||||
* belongs to the LUN that we are resetting
|
||||
*/
|
||||
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
|
||||
"Found IO in %s on lun\n",
|
||||
fnic_ioreq_state_to_str(CMD_STATE(sc)));
|
||||
cmd_state = CMD_STATE(sc);
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
if (cmd_state == FNIC_IOREQ_ABTS_PENDING)
|
||||
iter_data->ret = 1;
|
||||
|
||||
return iter_data->ret ? false : true;
|
||||
}
|
||||
|
||||
/*
|
||||
* fnic_is_abts_pending() is a helper function that
|
||||
* walks through tag map to check if there is any IOs pending,if there is one,
|
||||
@ -2781,49 +2739,20 @@ void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
|
||||
*/
|
||||
int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
|
||||
{
|
||||
int tag;
|
||||
struct fnic_io_req *io_req;
|
||||
spinlock_t *io_lock;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
struct scsi_cmnd *sc;
|
||||
struct scsi_device *lun_dev = NULL;
|
||||
struct fnic_pending_aborts_iter_data iter_data = {
|
||||
.fnic = fnic,
|
||||
.lun_dev = NULL,
|
||||
.ret = 0,
|
||||
};
|
||||
|
||||
if (lr_sc)
|
||||
lun_dev = lr_sc->device;
|
||||
|
||||
/* walk again to check, if IOs are still pending in fw */
|
||||
for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {
|
||||
sc = scsi_host_find_tag(fnic->lport->host, tag);
|
||||
/*
|
||||
* ignore this lun reset cmd or cmds that do not belong to
|
||||
* this lun
|
||||
*/
|
||||
if (!sc || (lr_sc && (sc->device != lun_dev || sc == lr_sc)))
|
||||
continue;
|
||||
|
||||
io_lock = fnic_io_lock_hash(fnic, sc);
|
||||
spin_lock_irqsave(io_lock, flags);
|
||||
|
||||
io_req = (struct fnic_io_req *)CMD_SP(sc);
|
||||
|
||||
if (!io_req || sc->device != lun_dev) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Found IO that is still pending with firmware and
|
||||
* belongs to the LUN that we are resetting
|
||||
*/
|
||||
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
|
||||
"Found IO in %s on lun\n",
|
||||
fnic_ioreq_state_to_str(CMD_STATE(sc)));
|
||||
|
||||
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
|
||||
ret = 1;
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
if (lr_sc) {
|
||||
iter_data.lun_dev = lr_sc->device;
|
||||
iter_data.lr_sc = lr_sc;
|
||||
}
|
||||
|
||||
return ret;
|
||||
/* walk again to check, if IOs are still pending in fw */
|
||||
scsi_host_busy_iter(fnic->lport->host,
|
||||
fnic_abts_pending_iter, &iter_data);
|
||||
|
||||
return iter_data.ret;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user