aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/NCR5380.c6
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c2
-rw-r--r--drivers/scsi/arm/acornscsi.c4
-rw-r--r--drivers/scsi/arm/cumana_2.c2
-rw-r--r--drivers/scsi/arm/eesox.c2
-rw-r--r--drivers/scsi/arm/powertec.c2
-rw-r--r--drivers/scsi/atari_scsi.c6
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c15
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c18
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c3
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c21
-rw-r--r--drivers/scsi/esas2r/esas2r_flash.c1
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c3
-rw-r--r--drivers/scsi/fnic/vnic_dev.c20
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c17
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c6
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/iscsi_boot_sysfs.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c12
-rw-r--r--drivers/scsi/libfc/fc_disc.c2
-rw-r--r--drivers/scsi/libfc/fc_rport.c8
-rw-r--r--drivers/scsi/libiscsi.c4
-rw-r--r--drivers/scsi/libsas/sas_discover.c11
-rw-r--r--drivers/scsi/lpfc/lpfc.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c88
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c41
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c74
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c205
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c191
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h21
-rw-r--r--drivers/scsi/mac_scsi.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c3
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c15
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h1
-rw-r--r--drivers/scsi/mesh.c8
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c27
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c8
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c2
-rw-r--r--drivers/scsi/qedf/qedf.h1
-rw-r--r--drivers/scsi/qedf/qedf_main.c35
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_dsd.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c231
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c68
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c35
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c3
-rw-r--r--drivers/scsi/scsi_debug.c11
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_dh.c1
-rw-r--r--drivers/scsi/scsi_lib.c35
-rw-r--r--drivers/scsi/scsi_pm.c9
-rw-r--r--drivers/scsi/scsi_trace.c124
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c37
-rw-r--r--drivers/scsi/scsi_transport_sas.c9
-rw-r--r--drivers/scsi/scsi_transport_spi.c2
-rw-r--r--drivers/scsi/sd.c25
-rw-r--r--drivers/scsi/sg.c8
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h10
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c225
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c2
-rw-r--r--drivers/scsi/sni_53c710.c4
-rw-r--r--drivers/scsi/sr.c6
-rw-r--r--drivers/scsi/sun3_scsi.c4
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c11
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c9
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c15
-rw-r--r--drivers/scsi/ufs/ufs_bsg.c10
-rw-r--r--drivers/scsi/ufs/ufshcd.c219
-rw-r--r--drivers/scsi/ufs/ufshcd.h24
-rw-r--r--drivers/scsi/zorro_esp.c11
96 files changed, 1498 insertions, 751 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 61da513fc0ed..9f9671ab83cc 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -885,7 +885,7 @@ config SCSI_SNI_53C710
config 53C700_LE_ON_BE
bool
- depends on SCSI_LASI700
+ depends on SCSI_LASI700 || SCSI_SNI_53C710
default y
config SCSI_STEX
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index d9fa9cf2fd8b..184f4eddd0ef 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -129,6 +129,9 @@
#define NCR5380_release_dma_irq(x)
#endif
+static unsigned int disconnect_mask = ~0;
+module_param(disconnect_mask, int, 0444);
+
static int do_abort(struct Scsi_Host *);
static void do_reset(struct Scsi_Host *);
static void bus_reset_cleanup(struct Scsi_Host *);
@@ -945,7 +948,8 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
int err;
bool ret = true;
bool can_disconnect = instance->irq != NO_IRQ &&
- cmd->cmnd[0] != REQUEST_SENSE;
+ cmd->cmnd[0] != REQUEST_SENSE &&
+ (disconnect_mask & BIT(scmd_id(cmd)));
NCR5380_dprint(NDEBUG_ARBITRATION, instance);
dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n",
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index a9d40d3b90ef..4190a025381a 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -2314,7 +2314,7 @@ ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
* At some speeds, we only support
* ST transfers.
*/
- if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
+ if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
break;
}
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index d12dd89538df..deab66598910 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -2911,8 +2911,10 @@ static int acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
ashost->base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
ashost->fast = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
- if (!ashost->base || !ashost->fast)
+ if (!ashost->base || !ashost->fast) {
+ ret = -ENOMEM;
goto out_put;
+ }
host->irq = ec->irq;
ashost->host = host;
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
index a1f3e9ee4e63..14e1d001253c 100644
--- a/drivers/scsi/arm/cumana_2.c
+++ b/drivers/scsi/arm/cumana_2.c
@@ -450,7 +450,7 @@ static int cumanascsi2_probe(struct expansion_card *ec,
if (info->info.scsi.dma != NO_DMA)
free_dma(info->info.scsi.dma);
- free_irq(ec->irq, host);
+ free_irq(ec->irq, info);
out_release:
fas216_release(host);
diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c
index 134f040d58e2..f441ec8eb93d 100644
--- a/drivers/scsi/arm/eesox.c
+++ b/drivers/scsi/arm/eesox.c
@@ -571,7 +571,7 @@ static int eesoxscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
if (info->info.scsi.dma != NO_DMA)
free_dma(info->info.scsi.dma);
- free_irq(ec->irq, host);
+ free_irq(ec->irq, info);
out_remove:
fas216_remove(host);
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
index c795537a671c..2dc0df005cb3 100644
--- a/drivers/scsi/arm/powertec.c
+++ b/drivers/scsi/arm/powertec.c
@@ -378,7 +378,7 @@ static int powertecscsi_probe(struct expansion_card *ec,
if (info->info.scsi.dma != NO_DMA)
free_dma(info->info.scsi.dma);
- free_irq(ec->irq, host);
+ free_irq(ec->irq, info);
out_release:
fas216_release(host);
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index e809493d0d06..a82b63a66635 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -742,7 +742,7 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
atari_scsi_template.sg_tablesize = SG_ALL;
} else {
atari_scsi_template.can_queue = 1;
- atari_scsi_template.sg_tablesize = SG_NONE;
+ atari_scsi_template.sg_tablesize = 1;
}
if (setup_can_queue > 0)
@@ -751,8 +751,8 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
if (setup_cmd_per_lun > 0)
atari_scsi_template.cmd_per_lun = setup_cmd_per_lun;
- /* Leave sg_tablesize at 0 on a Falcon! */
- if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize >= 0)
+ /* Don't increase sg_tablesize on Falcon! */
+ if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize > 0)
atari_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0) {
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index c5fa5f3b00e9..0b28d44d3573 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -915,12 +915,12 @@ void bnx2i_free_hba(struct bnx2i_hba *hba)
INIT_LIST_HEAD(&hba->ep_ofld_list);
INIT_LIST_HEAD(&hba->ep_active_list);
INIT_LIST_HEAD(&hba->ep_destroy_list);
- pci_dev_put(hba->pcidev);
if (hba->regview) {
pci_iounmap(hba->pcidev, hba->regview);
hba->regview = NULL;
}
+ pci_dev_put(hba->pcidev);
bnx2i_free_mp_bdt(hba);
bnx2i_release_free_cid_que(hba);
iscsi_host_free(shost);
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index 66e58f0a75dc..23cbe4cda760 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -301,6 +301,7 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
struct fc_fdmi_port_name *port_name;
uint8_t buf[64];
uint8_t *fc4_type;
+ unsigned long flags;
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n",
@@ -385,13 +386,13 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
len = (uint32_t)(pld - (uint8_t *)cmd);
/* Submit FDMI RPA request */
- spin_lock_irq(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n");
}
- spin_unlock_irq(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}
/*
@@ -412,6 +413,7 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
struct fc_fdmi_rpl *reg_pl;
struct fs_fdmi_attrs *attrib_blk;
uint8_t buf[64];
+ unsigned long flags;
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n",
@@ -491,13 +493,13 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
attrib_blk->numattrs = htonl(numattrs);
/* Submit FDMI RHBA request */
- spin_lock_irq(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n");
}
- spin_unlock_irq(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}
/*
@@ -512,6 +514,7 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
void *cmd;
struct fc_fdmi_port_name *port_name;
uint32_t len;
+ unsigned long flags;
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n",
@@ -542,13 +545,13 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
len += sizeof(*port_name);
/* Submit FDMI request */
- spin_lock_irq(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n");
}
- spin_unlock_irq(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}
/**
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 469d0bc9f5fe..00cf33573136 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -1383,7 +1383,7 @@ csio_device_reset(struct device *dev,
return -EINVAL;
/* Delete NPIV lnodes */
- csio_lnodes_exit(hw, 1);
+ csio_lnodes_exit(hw, 1);
/* Block upper IOs */
csio_lnodes_block_request(hw);
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index b8dd9e648dd0..ce7f0364a62a 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -959,6 +959,7 @@ static int init_act_open(struct cxgbi_sock *csk)
struct net_device *ndev = cdev->ports[csk->port_id];
struct cxgbi_hba *chba = cdev->hbas[csk->port_id];
struct sk_buff *skb = NULL;
+ int ret;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags);
@@ -979,16 +980,16 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk);
if (csk->atid < 0) {
pr_err("NO atid available.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_sock;
}
cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
cxgbi_sock_get(csk);
skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL);
if (!skb) {
- cxgb3_free_atid(t3dev, csk->atid);
- cxgbi_sock_put(csk);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_atid;
}
skb->sk = (struct sock *)csk;
set_arp_failure_handler(skb, act_open_arp_failure);
@@ -1010,6 +1011,15 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
send_act_open_req(csk, skb, csk->l2t);
return 0;
+
+free_atid:
+ cxgb3_free_atid(t3dev, csk->atid);
+put_sock:
+ cxgbi_sock_put(csk);
+ l2t_release(t3dev, csk->l2t);
+ csk->l2t = NULL;
+
+ return ret;
}
cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = {
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 7d43e014bd21..1d3305416075 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -121,7 +121,8 @@ static inline void cxgbi_device_destroy(struct cxgbi_device *cdev)
"cdev 0x%p, p# %u.\n", cdev, cdev->nports);
cxgbi_hbas_remove(cdev);
cxgbi_device_portmap_cleanup(cdev);
- cxgbi_ppm_release(cdev->cdev2ppm(cdev));
+ if (cdev->cdev2ppm)
+ cxgbi_ppm_release(cdev->cdev2ppm(cdev));
if (cdev->pmap.max_connect)
cxgbi_free_big_mem(cdev->pmap.port_csk);
kfree(cdev);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 4971104b1817..f32da0ca529e 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -512,6 +512,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
unsigned int tpg_desc_tbl_off;
unsigned char orig_transition_tmo;
unsigned long flags;
+ bool transitioning_sense = false;
if (!pg->expiry) {
unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ;
@@ -572,13 +573,19 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
goto retry;
}
/*
- * Retry on ALUA state transition or if any
- * UNIT ATTENTION occurred.
+ * If the array returns with 'ALUA state transition'
+ * sense code here it cannot return RTPG data during
+ * transition. So set the state to 'transitioning' directly.
*/
if (sense_hdr.sense_key == NOT_READY &&
- sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
- err = SCSI_DH_RETRY;
- else if (sense_hdr.sense_key == UNIT_ATTENTION)
+ sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) {
+ transitioning_sense = true;
+ goto skip_rtpg;
+ }
+ /*
+ * Retry on any other UNIT ATTENTION occurred.
+ */
+ if (sense_hdr.sense_key == UNIT_ATTENTION)
err = SCSI_DH_RETRY;
if (err == SCSI_DH_RETRY &&
pg->expiry != 0 && time_before(jiffies, pg->expiry)) {
@@ -666,7 +673,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
off = 8 + (desc[7] * 4);
}
+ skip_rtpg:
spin_lock_irqsave(&pg->lock, flags);
+ if (transitioning_sense)
+ pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
+
sdev_printk(KERN_INFO, sdev,
"%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c
index 7bd376d95ed5..b02ac389e6c6 100644
--- a/drivers/scsi/esas2r/esas2r_flash.c
+++ b/drivers/scsi/esas2r/esas2r_flash.c
@@ -1197,6 +1197,7 @@ bool esas2r_nvram_read_direct(struct esas2r_adapter *a)
if (!esas2r_read_flash_block(a, a->nvram, FLS_OFFSET_NVR,
sizeof(struct esas2r_sas_nvram))) {
esas2r_hdebug("NVRAM read failed, using defaults");
+ up(&a->nvram_semaphore);
return false;
}
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 80608b53897b..e3f5c91d5e4f 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -439,6 +439,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
return SCSI_MLQUEUE_HOST_BUSY;
+ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET)))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
rport = starget_to_rport(scsi_target(sc->device));
if (!rport) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index 78af9cc2009b..522636e94628 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -688,26 +688,26 @@ int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
int vnic_dev_hang_notify(struct vnic_dev *vdev)
{
- u64 a0, a1;
+ u64 a0 = 0, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
}
int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
{
- u64 a0, a1;
+ u64 a[2] = {};
int wait = 1000;
int err, i;
for (i = 0; i < ETH_ALEN; i++)
mac_addr[i] = 0;
- err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
+ err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a[0], &a[1], wait);
if (err)
return err;
for (i = 0; i < ETH_ALEN; i++)
- mac_addr[i] = ((u8 *)&a0)[i];
+ mac_addr[i] = ((u8 *)&a)[i];
return 0;
}
@@ -732,30 +732,30 @@ void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
{
- u64 a0 = 0, a1 = 0;
+ u64 a[2] = {};
int wait = 1000;
int err;
int i;
for (i = 0; i < ETH_ALEN; i++)
- ((u8 *)&a0)[i] = addr[i];
+ ((u8 *)&a)[i] = addr[i];
- err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
+ err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a[0], &a[1], wait);
if (err)
pr_err("Can't add addr [%pM], %d\n", addr, err);
}
void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
{
- u64 a0 = 0, a1 = 0;
+ u64 a[2] = {};
int wait = 1000;
int err;
int i;
for (i = 0; i < ETH_ALEN; i++)
- ((u8 *)&a0)[i] = addr[i];
+ ((u8 *)&a)[i] = addr[i];
- err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
+ err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a[0], &a[1], wait);
if (err)
pr_err("Can't del addr [%pM], %d\n", addr, err);
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 5879771d82b2..d8553c855194 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -623,7 +623,13 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
dev = hisi_hba->dev;
if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
- if (in_softirq())
+ /*
+ * For IOs from upper layer, it may already disable preempt
+ * in the IO path, if disable preempt again in down(),
+ * function schedule() will report schedule_bug(), so check
+ * preemptible() before goto down().
+ */
+ if (!preemptible())
return -EINVAL;
down(&hisi_hba->sem);
@@ -930,8 +936,11 @@ void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct device *dev = hisi_hba->dev;
+ dev_dbg(dev, "phy%d OOB ready\n", phy_no);
+ if (phy->phy_attached)
+ return;
+
if (!timer_pending(&phy->timer)) {
- dev_dbg(dev, "phy%d OOB ready\n", phy_no);
phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT * HZ;
add_timer(&phy->timer);
}
@@ -1000,12 +1009,13 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
struct hisi_sas_phy *phy = sas_phy->lldd_phy;
struct asd_sas_port *sas_port = sas_phy->port;
- struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
+ struct hisi_sas_port *port;
unsigned long flags;
if (!sas_port)
return;
+ port = to_hisi_sas_port(sas_port);
spin_lock_irqsave(&hisi_hba->lock, flags);
port->port_attached = 1;
port->id = phy->port_id;
@@ -2704,6 +2714,7 @@ int hisi_sas_probe(struct platform_device *pdev,
err_out_register_ha:
scsi_remove_host(shost);
err_out_ha:
+ hisi_sas_debugfs_exit(hisi_hba);
hisi_sas_free(hisi_hba);
scsi_host_put(shost);
return rc;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 0efd55baacd3..78004bc86158 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -3106,6 +3106,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_out_register_ha:
scsi_remove_host(shost);
err_out_ha:
+ hisi_sas_debugfs_exit(hisi_hba);
scsi_host_put(shost);
err_out_regions:
pci_release_regions(pdev);
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 4aea97ee4b24..4744d0c6da1f 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -415,6 +415,8 @@ static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
int rc = 0;
struct vio_dev *vdev = to_vio_dev(hostdata->dev);
+ set_adapter_info(hostdata);
+
/* Re-enable the CRQ */
do {
if (rc)
@@ -2281,16 +2283,12 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
static int ibmvscsi_remove(struct vio_dev *vdev)
{
struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
- unsigned long flags;
srp_remove_host(hostdata->host);
scsi_remove_host(hostdata->host);
purge_requests(hostdata, DID_ERROR);
-
- spin_lock_irqsave(hostdata->host->host_lock, flags);
release_event_pool(&hostdata->pool, hostdata);
- spin_unlock_irqrestore(hostdata->host->host_lock, flags);
ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
max_events);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index d06bc1a817a1..a3e81f427609 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -9944,6 +9944,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg->max_devs_supported = ipr_max_devs;
if (ioa_cfg->sis64) {
+ host->max_channel = IPR_MAX_SIS64_BUSES;
host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
@@ -9952,6 +9953,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
+ ((sizeof(struct ipr_config_table_entry64)
* ioa_cfg->max_devs_supported)));
} else {
+ host->max_channel = IPR_VSET_BUS;
host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
@@ -9961,7 +9963,6 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
* ioa_cfg->max_devs_supported)));
}
- host->max_channel = IPR_VSET_BUS;
host->unique_id = host->host_no;
host->max_cmd_len = IPR_MAX_CDB_LEN;
host->can_queue = ioa_cfg->max_cmds;
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index a67baeb36d1f..b97aa9ac2ffe 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1300,6 +1300,7 @@ struct ipr_resource_entry {
#define IPR_ARRAY_VIRTUAL_BUS 0x1
#define IPR_VSET_VIRTUAL_BUS 0x2
#define IPR_IOAFP_VIRTUAL_BUS 0x3
+#define IPR_MAX_SIS64_BUSES 0x4
#define IPR_GET_RES_PHYS_LOC(res) \
(((res)->bus << 24) | ((res)->target << 8) | (res)->lun)
diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c
index e4857b728033..a64abe38db2d 100644
--- a/drivers/scsi/iscsi_boot_sysfs.c
+++ b/drivers/scsi/iscsi_boot_sysfs.c
@@ -352,7 +352,7 @@ iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset,
boot_kobj->kobj.kset = boot_kset->kset;
if (kobject_init_and_add(&boot_kobj->kobj, &iscsi_boot_ktype,
NULL, name, index)) {
- kfree(boot_kobj);
+ kobject_put(&boot_kobj->kobj);
return NULL;
}
boot_kobj->data = data;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 7bedbe877704..b5dd1caae5e9 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -369,8 +369,16 @@ static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
{
struct iscsi_conn *conn = task->conn;
unsigned int noreclaim_flag;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
int rc = 0;
+ if (!tcp_sw_conn->sock) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "Transport not bound to socket!\n");
+ return -EINVAL;
+ }
+
noreclaim_flag = memalloc_noreclaim_save();
while (iscsi_sw_tcp_xmit_qlen(conn)) {
@@ -879,6 +887,10 @@ free_host:
static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
{
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+ struct iscsi_session *session = cls_session->dd_data;
+
+ if (WARN_ON_ONCE(session->leadconn))
+ return;
iscsi_tcp_r2tpool_free(cls_session->dd_data);
iscsi_session_teardown(cls_session);
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 9c5f7c9178c6..2b865c6423e2 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -628,6 +628,8 @@ redisc:
}
out:
kref_put(&rdata->kref, fc_rport_destroy);
+ if (!IS_ERR(fp))
+ fc_frame_free(fp);
}
/**
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index da6e97d8dc3b..6bb8917b99a1 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -1208,9 +1208,15 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
if (!rjt)
FC_RPORT_DBG(rdata, "PRLI bad response\n");
- else
+ else {
FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n",
rjt->er_reason, rjt->er_explan);
+ if (rjt->er_reason == ELS_RJT_UNAB &&
+ rjt->er_explan == ELS_EXPL_PLOGI_REQD) {
+ fc_rport_enter_plogi(rdata);
+ goto out;
+ }
+ }
fc_rport_error_retry(rdata, FC_EX_ELS_RJT);
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index ebd47c0cf9e9..70b99c0e2e67 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1945,7 +1945,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
- spin_lock(&session->frwd_lock);
+ spin_lock_bh(&session->frwd_lock);
task = (struct iscsi_task *)sc->SCp.ptr;
if (!task) {
/*
@@ -2072,7 +2072,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
done:
if (task)
task->last_timeout = jiffies;
- spin_unlock(&session->frwd_lock);
+ spin_unlock_bh(&session->frwd_lock);
ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
"timer reset" : "shutdown or nh");
return rc;
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 726ada9b8c79..f06792bd6ef6 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -97,12 +97,21 @@ static int sas_get_port_device(struct asd_sas_port *port)
else
dev->dev_type = SAS_SATA_DEV;
dev->tproto = SAS_PROTOCOL_SATA;
- } else {
+ } else if (port->oob_mode == SAS_OOB_MODE) {
struct sas_identify_frame *id =
(struct sas_identify_frame *) dev->frame_rcvd;
dev->dev_type = id->dev_type;
dev->iproto = id->initiator_bits;
dev->tproto = id->target_bits;
+ } else {
+ /* If the oob mode is OOB_NOT_CONNECTED, the port is
+ * disconnected due to race with PHY down. We cannot
+ * continue to discover this port
+ */
+ sas_put_device(dev);
+ pr_warn("Port %016llx is disconnected when discovering\n",
+ SAS_ADDR(port->attached_sas_addr));
+ return -ENODEV;
}
sas_init_dev(dev);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 4604e1bc334c..273426dfdd9a 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -740,6 +740,7 @@ struct lpfc_hba {
*/
#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
+#define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
@@ -1204,6 +1205,15 @@ struct lpfc_hba {
uint64_t ktime_seg10_min;
uint64_t ktime_seg10_max;
#endif
+
+ struct hlist_node cpuhp; /* used for cpuhp per hba callback */
+ struct timer_list cpuhp_poll_timer;
+ struct list_head poll_list; /* slowpath eq polling list */
+#define LPFC_POLL_HB 1 /* slowpath heartbeat */
+#define LPFC_POLL_FASTPATH 0 /* called from fastpath */
+#define LPFC_POLL_SLOWPATH 1 /* called from slowpath */
+
+ char os_host_name[MAXHOSTNAMELEN];
};
static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index b0202bc0aa62..ef7f62b68413 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -4492,12 +4492,6 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
phba->mbox_ext_buf_ctx.seqNum++;
nemb_tp = phba->mbox_ext_buf_ctx.nembType;
- dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
- if (!dd_data) {
- rc = -ENOMEM;
- goto job_error;
- }
-
pbuf = (uint8_t *)dmabuf->virt;
size = job->request_payload.payload_len;
sg_copy_to_buffer(job->request_payload.sg_list,
@@ -4534,6 +4528,13 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
"2968 SLI_CONFIG ext-buffer wr all %d "
"ebuffers received\n",
phba->mbox_ext_buf_ctx.numBuf);
+
+ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+ if (!dd_data) {
+ rc = -ENOMEM;
+ goto job_error;
+ }
+
/* mailbox command structure for base driver */
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmboxq) {
@@ -4582,6 +4583,8 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
return SLI_CONFIG_HANDLED;
job_error:
+ if (pmboxq)
+ mempool_free(pmboxq, phba->mbox_mem_pool);
lpfc_bsg_dma_page_free(phba, dmabuf);
kfree(dd_data);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index e0b14d791b8c..4aa91d0b6291 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -179,7 +179,7 @@ int lpfc_issue_gidft(struct lpfc_vport *vport);
int lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *iocbq);
int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int, uint32_t);
-void lpfc_fdmi_num_disc_check(struct lpfc_vport *);
+void lpfc_fdmi_change_check(struct lpfc_vport *vport);
void lpfc_delayed_disc_tmo(struct timer_list *);
void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
@@ -214,6 +214,12 @@ irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
irqreturn_t lpfc_sli4_intr_handler(int, void *);
irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
+void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba);
+int lpfc_sli4_poll_eq(struct lpfc_queue *q, uint8_t path);
+void lpfc_sli4_poll_hbtimer(struct timer_list *t);
+void lpfc_sli4_start_polling(struct lpfc_queue *q);
+void lpfc_sli4_stop_polling(struct lpfc_queue *q);
+
void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 4812bbbf43cc..6bc53d01d49e 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1439,33 +1439,35 @@ int
lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
size_t size)
{
- char fwrev[FW_REV_STR_SIZE];
- int n;
+ char fwrev[FW_REV_STR_SIZE] = {0};
+ char tmp[MAXHOSTNAMELEN] = {0};
- lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
+ memset(symbol, 0, size);
- n = scnprintf(symbol, size, "Emulex %s", vport->phba->ModelName);
- if (size < n)
- return n;
+ scnprintf(tmp, sizeof(tmp), "Emulex %s", vport->phba->ModelName);
+ if (strlcat(symbol, tmp, size) >= size)
+ goto buffer_done;
- n += scnprintf(symbol + n, size - n, " FV%s", fwrev);
- if (size < n)
- return n;
+ lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
+ scnprintf(tmp, sizeof(tmp), " FV%s", fwrev);
+ if (strlcat(symbol, tmp, size) >= size)
+ goto buffer_done;
- n += scnprintf(symbol + n, size - n, " DV%s.",
- lpfc_release_version);
- if (size < n)
- return n;
+ scnprintf(tmp, sizeof(tmp), " DV%s", lpfc_release_version);
+ if (strlcat(symbol, tmp, size) >= size)
+ goto buffer_done;
- n += scnprintf(symbol + n, size - n, " HN:%s.",
- init_utsname()->nodename);
- if (size < n)
- return n;
+ scnprintf(tmp, sizeof(tmp), " HN:%s", vport->phba->os_host_name);
+ if (strlcat(symbol, tmp, size) >= size)
+ goto buffer_done;
/* Note :- OS name is "Linux" */
- n += scnprintf(symbol + n, size - n, " OS:%s",
- init_utsname()->sysname);
- return n;
+ scnprintf(tmp, sizeof(tmp), " OS:%s", init_utsname()->sysname);
+ strlcat(symbol, tmp, size);
+
+buffer_done:
+ return strnlen(symbol, size);
+
}
static uint32_t
@@ -1938,14 +1940,16 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/**
- * lpfc_fdmi_num_disc_check - Check how many mapped NPorts we are connected to
+ * lpfc_fdmi_change_check - Check for changed FDMI parameters
* @vport: pointer to a host virtual N_Port data structure.
*
- * Called from hbeat timeout routine to check if the number of discovered
- * ports has changed. If so, re-register thar port Attribute.
+ * Check how many mapped NPorts we are connected to
+ * Check if our hostname changed
+ * Called from hbeat timeout routine to check if any FDMI parameters
+ * changed. If so, re-register those Attributes.
*/
void
-lpfc_fdmi_num_disc_check(struct lpfc_vport *vport)
+lpfc_fdmi_change_check(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp;
@@ -1958,17 +1962,41 @@ lpfc_fdmi_num_disc_check(struct lpfc_vport *vport)
if (!(vport->fc_flag & FC_FABRIC))
return;
+ ndlp = lpfc_findnode_did(vport, FDMI_DID);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ return;
+
+ /* Check if system hostname changed */
+ if (strcmp(phba->os_host_name, init_utsname()->nodename)) {
+ memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
+ scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
+ init_utsname()->nodename);
+ lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
+
+ /* Since this effects multiple HBA and PORT attributes, we need
+ * de-register and go thru the whole FDMI registration cycle.
+ * DHBA -> DPRT -> RHBA -> RPA (physical port)
+ * DPRT -> RPRT (vports)
+ */
+ if (vport->port_type == LPFC_PHYSICAL_PORT)
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
+ else
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
+
+ /* Since this code path registers all the port attributes
+ * we can just return without further checking.
+ */
+ return;
+ }
+
if (!(vport->fdmi_port_mask & LPFC_FDMI_PORT_ATTR_num_disc))
return;
+ /* Check if the number of mapped NPorts changed */
cnt = lpfc_find_map_node(vport);
if (cnt == vport->fdmi_num_disc)
return;
- ndlp = lpfc_findnode_did(vport, FDMI_DID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
- return;
-
if (vport->port_type == LPFC_PHYSICAL_PORT) {
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA,
LPFC_FDMI_PORT_ATTR_num_disc);
@@ -2551,8 +2579,8 @@ lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
memset(ae, 0, 256);
- snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
- init_utsname()->nodename);
+ scnprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
+ vport->phba->os_host_name);
len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
len += (len & 3) ? (4 - (len & 3)) : 4;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 5ac4f8d76b91..c958e550911f 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -2200,6 +2200,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
+ char *mode;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
@@ -2237,8 +2238,17 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
+ /* If we don't send GFT_ID to Fabric, a PRLI error
+ * could be expected.
+ */
+ if ((vport->fc_flag & FC_FABRIC) ||
+ (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH))
+ mode = KERN_ERR;
+ else
+ mode = KERN_INFO;
+
/* PRLI failed */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, mode, LOG_ELS,
"2754 PRLI failure DID:%06X Status:x%x/x%x, "
"data: x%x\n",
ndlp->nlp_DID, irsp->ulpStatus,
@@ -4279,7 +4289,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
mempool_free(mbox, phba->mbox_mem_pool);
}
out:
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
spin_unlock_irq(shost->host_lock);
@@ -6298,7 +6308,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
uint32_t payload_len, length, nportid, *cmd;
int rscn_cnt;
int rscn_id = 0, hba_id = 0;
- int i;
+ int i, tmo;
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
@@ -6387,6 +6397,13 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_RSCN_DEFERRED;
+
+ /* Restart disctmo if its already running */
+ if (vport->fc_flag & FC_DISC_TMO) {
+ tmo = ((phba->fc_ratov * 3) + 3);
+ mod_timer(&vport->fc_disctmo,
+ jiffies + msecs_to_jiffies(1000 * tmo));
+ }
if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
!(vport->fc_flag & FC_RSCN_DISCOVERY)) {
vport->fc_flag |= FC_RSCN_MODE;
@@ -7812,20 +7829,22 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *tmp_iocb, *piocb;
IOCB_t *cmd = NULL;
+ unsigned long iflags = 0;
lpfc_fabric_abort_vport(vport);
+
/*
* For SLI3, only the hbalock is required. But SLI4 needs to coordinate
* with the ring insert operation. Because lpfc_sli_issue_abort_iotag
* ultimately grabs the ring_lock, the driver must splice the list into
* a working list and release the locks before calling the abort.
*/
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
pring = lpfc_phba_elsring(phba);
/* Bail out if we've no ELS wq, like in PCI error recovery case. */
if (unlikely(!pring)) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
return;
}
@@ -7842,20 +7861,20 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
}
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring->ring_lock);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Abort each iocb on the aborted list and remove the dlist links. */
list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
list_del_init(&piocb->dlist);
lpfc_sli_issue_abort_iotag(phba, pring, piocb);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
}
if (!list_empty(&abort_list))
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"3387 abort list for txq not empty\n");
INIT_LIST_HEAD(&abort_list);
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (phba->sli_rev == LPFC_SLI_REV4)
spin_lock(&pring->ring_lock);
@@ -7881,7 +7900,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
}
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring->ring_lock);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Cancell all the IOCBs from the completions list */
lpfc_sli_cancel_iocbs(phba, &abort_list,
@@ -8162,6 +8181,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
spin_lock_irq(shost->host_lock);
if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) {
spin_unlock_irq(shost->host_lock);
+ if (newnode)
+ lpfc_nlp_put(ndlp);
goto dropit;
}
spin_unlock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index c43852f97f25..120d2844f3cb 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -28,6 +28,7 @@
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/lockdep.h>
+#include <linux/utsname.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -1131,7 +1132,6 @@ void
lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
- uint8_t bbscn = 0;
if (pmb->u.mb.mbxStatus)
goto out;
@@ -1156,18 +1156,15 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
/* Start discovery by sending a FLOGI. port_state is identically
- * LPFC_FLOGI while waiting for FLOGI cmpl
+ * LPFC_FLOGI while waiting for FLOGI cmpl. Check if sending
+ * the FLOGI is being deferred till after MBX_READ_SPARAM completes.
*/
if (vport->port_state != LPFC_FLOGI) {
- if (phba->bbcredit_support && phba->cfg_enable_bbcr) {
- bbscn = bf_get(lpfc_bbscn_def,
- &phba->sli4_hba.bbscn_params);
- vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
- vport->fc_sparam.cmn.bbRcvSizeMsb |= (bbscn << 4);
- }
- lpfc_initial_flogi(vport);
- } else if (vport->fc_flag & FC_PT2PT) {
- lpfc_disc_start(vport);
+ if (!(phba->hba_flag & HBA_DEFER_FLOGI))
+ lpfc_initial_flogi(vport);
+ } else {
+ if (vport->fc_flag & FC_PT2PT)
+ lpfc_disc_start(vport);
}
return;
@@ -3093,6 +3090,14 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mempool_free(pmb, phba->mbox_mem_pool);
+
+ /* Check if sending the FLOGI is being deferred to after we get
+ * up to date CSPs from MBX_READ_SPARAM.
+ */
+ if (phba->hba_flag & HBA_DEFER_FLOGI) {
+ lpfc_initial_flogi(vport);
+ phba->hba_flag &= ~HBA_DEFER_FLOGI;
+ }
return;
out:
@@ -3222,6 +3227,23 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
}
lpfc_linkup(phba);
+ sparam_mbox = NULL;
+
+ if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+ cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!cfglink_mbox)
+ goto out;
+ vport->port_state = LPFC_LOCAL_CFG_LINK;
+ lpfc_config_link(phba, cfglink_mbox);
+ cfglink_mbox->vport = vport;
+ cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
+ rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(cfglink_mbox, phba->mbox_mem_pool);
+ goto out;
+ }
+ }
+
sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!sparam_mbox)
goto out;
@@ -3242,20 +3264,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
goto out;
}
- if (!(phba->hba_flag & HBA_FCOE_MODE)) {
- cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!cfglink_mbox)
- goto out;
- vport->port_state = LPFC_LOCAL_CFG_LINK;
- lpfc_config_link(phba, cfglink_mbox);
- cfglink_mbox->vport = vport;
- cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
- rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(cfglink_mbox, phba->mbox_mem_pool);
- goto out;
- }
- } else {
+ if (phba->hba_flag & HBA_FCOE_MODE) {
vport->port_state = LPFC_VPORT_UNKNOWN;
/*
* Add the driver's default FCF record at FCF index 0 now. This
@@ -3312,8 +3321,16 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
}
/* Reset FCF roundrobin bmask for new discovery */
lpfc_sli4_clear_fcf_rr_bmask(phba);
+ } else {
+ if (phba->bbcredit_support && phba->cfg_enable_bbcr &&
+ !(phba->link_flag & LS_LOOPBACK_MODE))
+ phba->hba_flag |= HBA_DEFER_FLOGI;
}
+ /* Prepare for LINK up registrations */
+ memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
+ scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
+ init_utsname()->nodename);
return;
out:
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
@@ -5333,9 +5350,14 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
/* If we've already received a PLOGI from this NPort
* we don't need to try to discover it again.
*/
- if (ndlp->nlp_flag & NLP_RCV_PLOGI)
+ if (ndlp->nlp_flag & NLP_RCV_PLOGI &&
+ !(ndlp->nlp_type &
+ (NLP_FCP_TARGET | NLP_NVME_TARGET)))
return NULL;
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 2fd8f15f9997..546cdc345e6d 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -39,6 +39,7 @@
#include <linux/msi.h>
#include <linux/irq.h>
#include <linux/bitops.h>
+#include <linux/cpuhotplug.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -71,9 +72,13 @@ char *_dump_buf_dif;
unsigned long _dump_buf_dif_order;
spinlock_t _dump_buf_lock;
+static enum cpuhp_state lpfc_cpuhp_state;
/* Used when mapping IRQ vectors in a driver centric manner */
uint32_t lpfc_present_cpu;
+static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
+static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
+static void lpfc_cpuhp_add(struct lpfc_hba *phba);
static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
static int lpfc_post_rcv_buf(struct lpfc_hba *);
static int lpfc_sli4_queue_verify(struct lpfc_hba *);
@@ -1376,7 +1381,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
if (vports != NULL)
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
lpfc_rcv_seq_check_edtov(vports[i]);
- lpfc_fdmi_num_disc_check(vports[i]);
+ lpfc_fdmi_change_check(vports[i]);
}
lpfc_destroy_vport_work_array(phba, vports);
@@ -3401,6 +3406,8 @@ lpfc_online(struct lpfc_hba *phba)
if (phba->cfg_xri_rebalancing)
lpfc_create_multixri_pools(phba);
+ lpfc_cpuhp_add(phba);
+
lpfc_unblock_mgmt_io(phba);
return 0;
}
@@ -3559,6 +3566,7 @@ lpfc_offline(struct lpfc_hba *phba)
spin_unlock_irq(shost->host_lock);
}
lpfc_destroy_vport_work_array(phba, vports);
+ __lpfc_cpuhp_remove(phba);
if (phba->cfg_xri_rebalancing)
lpfc_destroy_multixri_pools(phba);
@@ -9172,6 +9180,8 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
}
spin_unlock_irq(&phba->hbalock);
+ lpfc_sli4_cleanup_poll_list(phba);
+
/* Release HBA eqs */
if (phba->sli4_hba.hdwq)
lpfc_sli4_release_hdwq(phba);
@@ -10726,6 +10736,170 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
}
/**
+ * lpfc_cpuhp_get_eq
+ *
+ * @phba: pointer to lpfc hba data structure.
+ * @cpu: cpu going offline
+ * @eqlist:
+ */
+static void
+lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
+ struct list_head *eqlist)
+{
+ struct lpfc_vector_map_info *map;
+ const struct cpumask *maskp;
+ struct lpfc_queue *eq;
+ unsigned int i;
+ cpumask_t tmp;
+ u16 idx;
+
+ for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
+ maskp = pci_irq_get_affinity(phba->pcidev, idx);
+ if (!maskp)
+ continue;
+ /*
+ * if irq is not affinitized to the cpu going
+ * then we don't need to poll the eq attached
+ * to it.
+ */
+ if (!cpumask_and(&tmp, maskp, cpumask_of(cpu)))
+ continue;
+ /* get the cpus that are online and are affini-
+ * tized to this irq vector. If the count is
+ * more than 1 then cpuhp is not going to shut-
+ * down this vector. Since this cpu has not
+ * gone offline yet, we need >1.
+ */
+ cpumask_and(&tmp, maskp, cpu_online_mask);
+ if (cpumask_weight(&tmp) > 1)
+ continue;
+
+ /* Now that we have an irq to shutdown, get the eq
+ * mapped to this irq. Note: multiple hdwq's in
+ * the software can share an eq, but eventually
+ * only eq will be mapped to this vector
+ */
+ for_each_possible_cpu(i) {
+ map = &phba->sli4_hba.cpu_map[i];
+ if (!(map->irq == pci_irq_vector(phba->pcidev, idx)))
+ continue;
+ eq = phba->sli4_hba.hdwq[map->hdwq].hba_eq;
+ list_add(&eq->_poll_list, eqlist);
+ /* 1 is good enough. others will be a copy of this */
+ break;
+ }
+ }
+}
+
+static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
+{
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ return;
+
+ cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
+ &phba->cpuhp);
+ /*
+ * unregistering the instance doesn't stop the polling
+ * timer. Wait for the poll timer to retire.
+ */
+ synchronize_rcu();
+ del_timer_sync(&phba->cpuhp_poll_timer);
+}
+
+static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
+{
+ if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+ return;
+
+ __lpfc_cpuhp_remove(phba);
+}
+
+static void lpfc_cpuhp_add(struct lpfc_hba *phba)
+{
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ return;
+
+ rcu_read_lock();
+
+ if (!list_empty(&phba->poll_list)) {
+ timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
+ mod_timer(&phba->cpuhp_poll_timer,
+ jiffies + msecs_to_jiffies(LPFC_POLL_HB));
+ }
+
+ rcu_read_unlock();
+
+ cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
+ &phba->cpuhp);
+}
+
+static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
+{
+ if (phba->pport->load_flag & FC_UNLOADING) {
+ *retval = -EAGAIN;
+ return true;
+ }
+
+ if (phba->sli_rev != LPFC_SLI_REV4) {
+ *retval = 0;
+ return true;
+ }
+
+ /* proceed with the hotplug */
+ return false;
+}
+
+static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
+{
+ struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
+ struct lpfc_queue *eq, *next;
+ LIST_HEAD(eqlist);
+ int retval;
+
+ if (!phba) {
+ WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
+ return 0;
+ }
+
+ if (__lpfc_cpuhp_checks(phba, &retval))
+ return retval;
+
+ lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
+
+ /* start polling on these eq's */
+ list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
+ list_del_init(&eq->_poll_list);
+ lpfc_sli4_start_polling(eq);
+ }
+
+ return 0;
+}
+
+static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
+{
+ struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
+ struct lpfc_queue *eq, *next;
+ unsigned int n;
+ int retval;
+
+ if (!phba) {
+ WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
+ return 0;
+ }
+
+ if (__lpfc_cpuhp_checks(phba, &retval))
+ return retval;
+
+ list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
+ n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
+ if (n == cpu)
+ lpfc_sli4_stop_polling(eq);
+ }
+
+ return 0;
+}
+
+/**
* lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
* @phba: pointer to lpfc hba data structure.
*
@@ -11155,6 +11329,10 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Wait for completion of device XRI exchange busy */
lpfc_sli4_xri_exchange_busy_wait(phba);
+ /* per-phba callback de-registration for hotplug event */
+ if (phba->pport)
+ lpfc_cpuhp_remove(phba);
+
/* Disable PCI subsystem interrupt */
lpfc_sli4_disable_intr(phba);
@@ -12380,6 +12558,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Enable RAS FW log support */
lpfc_sli4_ras_setup(phba);
+ INIT_LIST_HEAD(&phba->poll_list);
+ cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
+
return 0;
out_free_sysfs_attr:
@@ -13206,11 +13387,24 @@ lpfc_init(void)
/* Initialize in case vector mapping is needed */
lpfc_present_cpu = num_present_cpus();
+ error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "lpfc/sli4:online",
+ lpfc_cpu_online, lpfc_cpu_offline);
+ if (error < 0)
+ goto cpuhp_failure;
+ lpfc_cpuhp_state = error;
+
error = pci_register_driver(&lpfc_driver);
- if (error) {
- fc_release_transport(lpfc_transport_template);
- fc_release_transport(lpfc_vport_transport_template);
- }
+ if (error)
+ goto unwind;
+
+ return error;
+
+unwind:
+ cpuhp_remove_multi_state(lpfc_cpuhp_state);
+cpuhp_failure:
+ fc_release_transport(lpfc_transport_template);
+ fc_release_transport(lpfc_vport_transport_template);
return error;
}
@@ -13227,6 +13421,7 @@ lpfc_exit(void)
{
misc_deregister(&lpfc_mgmt_dev);
pci_unregister_driver(&lpfc_driver);
+ cpuhp_remove_multi_state(lpfc_cpuhp_state);
fc_release_transport(lpfc_transport_template);
fc_release_transport(lpfc_vport_transport_template);
if (_dump_buf_data) {
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 59252bfca14e..b3d968303145 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -484,8 +484,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* single discovery thread, this will cause a huge delay in
* discovery. Also this will cause multiple state machines
* running in parallel for this node.
+ * This only applies to a fabric environment.
*/
- if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
+ if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
+ (vport->fc_flag & FC_FABRIC)) {
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp);
}
@@ -845,9 +847,9 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (!(vport->fc_flag & FC_PT2PT)) {
/* Check config parameter use-adisc or FCP-2 */
- if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
+ if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) ||
((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
- (ndlp->nlp_type & NLP_FCP_TARGET))) {
+ (ndlp->nlp_type & NLP_FCP_TARGET)))) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_ADISC;
spin_unlock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 9d99cb915390..532d44d3055e 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -342,13 +342,15 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) {
ndlp->nrport = NULL;
ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
- }
- spin_unlock_irq(&vport->phba->hbalock);
+ spin_unlock_irq(&vport->phba->hbalock);
- /* Remove original register reference. The host transport
- * won't reference this rport/remoteport any further.
- */
- lpfc_nlp_put(ndlp);
+ /* Remove original register reference. The host transport
+ * won't reference this rport/remoteport any further.
+ */
+ lpfc_nlp_put(ndlp);
+ } else {
+ spin_unlock_irq(&vport->phba->hbalock);
+ }
rport_err:
return;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index d74bfd264495..8a93dfa79297 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1723,7 +1723,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
}
tgtp->tport_unreg_cmp = &tport_unreg_cmp;
nvmet_fc_unregister_targetport(phba->targetport);
- if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
+ if (!wait_for_completion_timeout(&tport_unreg_cmp,
msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
"6179 Unreg targetport %p timeout "
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index ba996fbde89b..31989b8863a1 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -583,7 +583,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
if (psb->cur_iocbq.sli4_xritag == xri) {
list_del(&psb->list);
qp->abts_scsi_io_bufs--;
- psb->exch_busy = 0;
+ psb->flags &= ~LPFC_SBUF_XBUSY;
psb->status = IOSTAT_SUCCESS;
spin_unlock(
&qp->abts_scsi_buf_list_lock);
@@ -615,7 +615,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
if (iocbq->sli4_xritag != xri)
continue;
psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
- psb->exch_busy = 0;
+ psb->flags &= ~LPFC_SBUF_XBUSY;
spin_unlock_irqrestore(&phba->hbalock, iflag);
if (!list_empty(&pring->txq))
lpfc_worker_wake_up(phba);
@@ -834,7 +834,7 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
psb->prot_seg_cnt = 0;
qp = psb->hdwq;
- if (psb->exch_busy) {
+ if (psb->flags & LPFC_SBUF_XBUSY) {
spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag);
psb->pCmd = NULL;
list_add_tail(&psb->list, &qp->lpfc_abts_scsi_buf_list);
@@ -3679,7 +3679,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
/* pick up SLI4 exhange busy status from HBA */
- lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
+ if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY)
+ lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
+ else
+ lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (lpfc_cmd->prot_data_type) {
@@ -4682,20 +4685,21 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
abtsiocb, 0);
}
- /* no longer need the lock after this point */
- spin_unlock_irqrestore(&phba->hbalock, flags);
if (ret_val == IOCB_ERROR) {
/* Indicate the IO is not being aborted by the driver. */
iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
lpfc_cmd->waitq = NULL;
spin_unlock(&lpfc_cmd->buf_lock);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_sli_release_iocbq(phba, abtsiocb);
ret = FAILED;
goto out;
}
+ /* no longer need the lock after this point */
spin_unlock(&lpfc_cmd->buf_lock);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_sli_handle_fast_ring_event(phba,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index d1512e4f9791..6e97d85c698d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -485,7 +485,8 @@ lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
}
static int
-lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq)
+lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
+ uint8_t rearm)
{
struct lpfc_eqe *eqe;
int count = 0, consumed = 0;
@@ -519,8 +520,8 @@ lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq)
eq->queue_claimed = 0;
rearm_and_exit:
- /* Always clear and re-arm the EQ */
- phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_REARM);
+ /* Always clear the EQ. */
+ phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
return count;
}
@@ -2463,6 +2464,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
!pmb->u.mb.mbxStatus) {
rpi = pmb->u.mb.un.varWords[0];
vpi = pmb->u.mb.un.varRegLogin.vpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
lpfc_unreg_login(phba, vpi, rpi, pmb);
pmb->vport = vport;
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -3991,6 +3994,11 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
struct lpfc_iocbq *piocb, *next_iocb;
spin_lock_irq(&phba->hbalock);
+ if (phba->hba_flag & HBA_FCP_IOQ_FLUSH ||
+ !phba->sli4_hba.hdwq) {
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
/* Indicate the I/O queues are flushed */
phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
spin_unlock_irq(&phba->hbalock);
@@ -7915,7 +7923,7 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
if (mbox_pending)
/* process and rearm the EQ */
- lpfc_sli4_process_eq(phba, fpeq);
+ lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
else
/* Always clear and re-arm the EQ */
sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
@@ -10068,10 +10076,13 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb, uint32_t flag)
{
struct lpfc_sli_ring *pring;
+ struct lpfc_queue *eq;
unsigned long iflags;
int rc;
if (phba->sli_rev == LPFC_SLI_REV4) {
+ eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
+
pring = lpfc_sli4_calc_ring(phba, piocb);
if (unlikely(pring == NULL))
return IOCB_ERROR;
@@ -10079,6 +10090,8 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
spin_lock_irqsave(&pring->ring_lock, iflags);
rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
} else {
/* For now, SLI2/3 will still use hbalock */
spin_lock_irqsave(&phba->hbalock, iflags);
@@ -11758,7 +11771,10 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
!(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
cur_iocbq);
- lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
+ if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
+ lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
+ else
+ lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
}
pdone_q = cmdiocbq->context_un.wait_queue;
@@ -13180,13 +13196,19 @@ send_current_mbox:
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
/* Setting active mailbox pointer need to be in sync to flag clear */
phba->sli.mbox_active = NULL;
+ if (bf_get(lpfc_trailer_consumed, mcqe))
+ lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Wake up worker thread to post the next pending mailbox command */
lpfc_worker_wake_up(phba);
+ return workposted;
+
out_no_mqe_complete:
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (bf_get(lpfc_trailer_consumed, mcqe))
lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
- return workposted;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return false;
}
/**
@@ -14262,7 +14284,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
/* process and rearm the EQ */
- ecount = lpfc_sli4_process_eq(phba, fpeq);
+ ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
if (unlikely(ecount == 0)) {
fpeq->EQ_no_entry++;
@@ -14322,6 +14344,147 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
} /* lpfc_sli4_intr_handler */
+void lpfc_sli4_poll_hbtimer(struct timer_list *t)
+{
+ struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
+ struct lpfc_queue *eq;
+ int i = 0;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
+ i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
+ if (!list_empty(&phba->poll_list))
+ mod_timer(&phba->cpuhp_poll_timer,
+ jiffies + msecs_to_jiffies(LPFC_POLL_HB));
+
+ rcu_read_unlock();
+}
+
+inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
+{
+ struct lpfc_hba *phba = eq->phba;
+ int i = 0;
+
+ /*
+ * Unlocking an irq is one of the entry point to check
+ * for re-schedule, but we are good for io submission
+ * path as midlayer does a get_cpu to glue us in. Flush
+ * out the invalidate queue so we can see the updated
+ * value for flag.
+ */
+ smp_rmb();
+
+ if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
+ /* We will not likely get the completion for the caller
+ * during this iteration but i guess that's fine.
+ * Future io's coming on this eq should be able to
+ * pick it up. As for the case of single io's, they
+ * will be handled through a sched from polling timer
+ * function which is currently triggered every 1msec.
+ */
+ i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
+
+ return i;
+}
+
+static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
+{
+ struct lpfc_hba *phba = eq->phba;
+
+ if (list_empty(&phba->poll_list)) {
+ timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
+ /* kickstart slowpath processing for this eq */
+ mod_timer(&phba->cpuhp_poll_timer,
+ jiffies + msecs_to_jiffies(LPFC_POLL_HB));
+ }
+
+ list_add_rcu(&eq->_poll_list, &phba->poll_list);
+ synchronize_rcu();
+}
+
+static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
+{
+ struct lpfc_hba *phba = eq->phba;
+
+ /* Disable slowpath processing for this eq. Kick start the eq
+ * by RE-ARMING the eq's ASAP
+ */
+ list_del_rcu(&eq->_poll_list);
+ synchronize_rcu();
+
+ if (list_empty(&phba->poll_list))
+ del_timer_sync(&phba->cpuhp_poll_timer);
+}
+
+void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
+{
+ struct lpfc_queue *eq, *next;
+
+ list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
+ list_del(&eq->_poll_list);
+
+ INIT_LIST_HEAD(&phba->poll_list);
+ synchronize_rcu();
+}
+
+static inline void
+__lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
+{
+ if (mode == eq->mode)
+ return;
+ /*
+ * currently this function is only called during a hotplug
+ * event and the cpu on which this function is executing
+ * is going offline. By now the hotplug has instructed
+ * the scheduler to remove this cpu from cpu active mask.
+ * So we don't need to work about being put aside by the
+ * scheduler for a high priority process. Yes, the inte-
+ * rrupts could come but they are known to retire ASAP.
+ */
+
+ /* Disable polling in the fastpath */
+ WRITE_ONCE(eq->mode, mode);
+ /* flush out the store buffer */
+ smp_wmb();
+
+ /*
+ * Add this eq to the polling list and start polling. For
+ * a grace period both interrupt handler and poller will
+ * try to process the eq _but_ that's fine. We have a
+ * synchronization mechanism in place (queue_claimed) to
+ * deal with it. This is just a draining phase for int-
+ * errupt handler (not eq's) as we have guranteed through
+ * barrier that all the CPUs have seen the new CQ_POLLED
+ * state. which will effectively disable the REARMING of
+ * the EQ. The whole idea is eq's die off eventually as
+ * we are not rearming EQ's anymore.
+ */
+ mode ? lpfc_sli4_add_to_poll_list(eq) :
+ lpfc_sli4_remove_from_poll_list(eq);
+}
+
+void lpfc_sli4_start_polling(struct lpfc_queue *eq)
+{
+ __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
+}
+
+void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
+{
+ struct lpfc_hba *phba = eq->phba;
+
+ __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
+
+ /* Kick start for the pending io's in h/w.
+ * Once we switch back to interrupt processing on a eq
+ * the io path completion will only arm eq's when it
+ * receives a completion. But since eq's are in disa-
+ * rmed state it doesn't receive a completion. This
+ * creates a deadlock scenaro.
+ */
+ phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
+}
+
/**
* lpfc_sli4_queue_free - free a queue structure and associated memory
* @queue: The queue structure to free.
@@ -14396,6 +14559,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
return NULL;
INIT_LIST_HEAD(&queue->list);
+ INIT_LIST_HEAD(&queue->_poll_list);
INIT_LIST_HEAD(&queue->wq_list);
INIT_LIST_HEAD(&queue->wqfull_list);
INIT_LIST_HEAD(&queue->page_list);
@@ -18193,6 +18357,13 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
static void
__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
{
+ /*
+ * if the rpi value indicates a prior unreg has already
+ * been done, skip the unreg.
+ */
+ if (rpi == LPFC_RPI_ALLOC_ERROR)
+ return;
+
if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
phba->sli4_hba.rpi_count--;
phba->sli4_hba.max_cfg_param.rpi_used--;
@@ -19691,6 +19862,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
return 0;
}
@@ -19711,6 +19884,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
}
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
return 0;
}
@@ -19739,6 +19914,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
}
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
return 0;
}
return WQE_ERROR;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 467b8270f7fd..9449236c231d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -375,14 +375,13 @@ struct lpfc_io_buf {
struct lpfc_nodelist *ndlp;
uint32_t timeout;
- uint16_t flags; /* TBD convert exch_busy to flags */
+ uint16_t flags;
#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
#define LPFC_SBUF_BUMP_QDEPTH 0x2 /* bumped queue depth counter */
/* External DIF device IO conversions */
#define LPFC_SBUF_NORMAL_DIF 0x4 /* normal mode to insert/strip */
#define LPFC_SBUF_PASS_DIF 0x8 /* insert/strip mode to passthru */
#define LPFC_SBUF_NOT_POSTED 0x10 /* SGL failed post to FW. */
- uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
uint16_t status; /* From IOCB Word 7- ulpStatus */
uint32_t result; /* From IOCB Word 4. */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 986594ec40e2..9c3100246375 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -137,6 +137,23 @@ struct lpfc_rqb {
struct lpfc_queue {
struct list_head list;
struct list_head wq_list;
+
+ /*
+ * If interrupts are in effect on _all_ the eq's the footprint
+ * of polling code is zero (except mode). This memory is chec-
+ * ked for every io to see if the io needs to be polled and
+ * while completion to check if the eq's needs to be rearmed.
+ * Keep in same cacheline as the queue ptr to avoid cpu fetch
+ * stalls. Using 1B memory will leave us with 7B hole. Fill
+ * it with other frequently used members.
+ */
+ uint16_t last_cpu; /* most recent cpu */
+ uint16_t hdwq;
+ uint8_t qe_valid;
+ uint8_t mode; /* interrupt or polling */
+#define LPFC_EQ_INTERRUPT 0
+#define LPFC_EQ_POLL 1
+
struct list_head wqfull_list;
enum lpfc_sli4_queue_type type;
enum lpfc_sli4_queue_subtype subtype;
@@ -241,10 +258,8 @@ struct lpfc_queue {
struct delayed_work sched_spwork;
uint64_t isr_timestamp;
- uint16_t hdwq;
- uint16_t last_cpu; /* most recent cpu */
- uint8_t qe_valid;
struct lpfc_queue *assoc_qp;
+ struct list_head _poll_list;
void **q_pgs; /* array to index entries per page */
};
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index c5169d31c966..3c65d4354fed 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -430,7 +430,7 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
mac_scsi_template.can_queue = setup_can_queue;
if (setup_cmd_per_lun > 0)
mac_scsi_template.cmd_per_lun = setup_cmd_per_lun;
- if (setup_sg_tablesize >= 0)
+ if (setup_sg_tablesize > 0)
mac_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0)
mac_scsi_template.this_id = setup_hostid & 7;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 5f30016e9b64..f63f9bb6e7d3 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -4197,7 +4197,8 @@ dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
if (instance->adapter_type == MFI_SERIES)
return KILL_ADAPTER;
else if (instance->unload ||
- test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
+ test_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE,
+ &instance->reset_flags))
return IGNORE_TIMEOUT;
else
return INITIATE_OCR;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 4dfa0685a86c..74df9edc6cda 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -523,7 +523,8 @@ retry_alloc:
fusion->io_request_frames =
dma_pool_alloc(fusion->io_request_frames_pool,
- GFP_KERNEL, &fusion->io_request_frames_phys);
+ GFP_KERNEL | __GFP_NOWARN,
+ &fusion->io_request_frames_phys);
if (!fusion->io_request_frames) {
if (instance->max_fw_cmds >= (MEGASAS_REDUCE_QD_COUNT * 2)) {
instance->max_fw_cmds -= MEGASAS_REDUCE_QD_COUNT;
@@ -561,7 +562,7 @@ retry_alloc:
fusion->io_request_frames =
dma_pool_alloc(fusion->io_request_frames_pool,
- GFP_KERNEL,
+ GFP_KERNEL | __GFP_NOWARN,
&fusion->io_request_frames_phys);
if (!fusion->io_request_frames) {
@@ -3996,6 +3997,7 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
struct fusion_context *fusion;
struct megasas_cmd *cmd_mfi;
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req;
u16 smid;
bool refire_cmd = 0;
u8 result;
@@ -4046,6 +4048,11 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
break;
}
+ scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *)
+ cmd_fusion->io_request;
+ if (scsi_io_req->Function == MPI2_FUNCTION_SCSI_TASK_MGMT)
+ result = RETURN_CMD;
+
switch (result) {
case REFIRE_CMD:
megasas_fire_cmd_fusion(instance, req_desc);
@@ -4243,7 +4250,6 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
if (!timeleft) {
dev_err(&instance->pdev->dev,
"task mgmt type 0x%x timed out\n", type);
- cmd_mfi->flags |= DRV_DCMD_SKIP_REFIRE;
mutex_unlock(&instance->reset_mutex);
rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR);
mutex_lock(&instance->reset_mutex);
@@ -4608,6 +4614,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
if (instance->requestorId && !instance->skip_heartbeat_timer_del)
del_timer_sync(&instance->sriov_heartbeat_timer);
set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
+ set_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags);
atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING);
instance->instancet->disable_intr(instance);
megasas_sync_irqs((unsigned long)instance);
@@ -4803,7 +4810,7 @@ fail_kill_adapter:
atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
}
out:
- clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
+ clear_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags);
mutex_unlock(&instance->reset_mutex);
return retval;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 7fa73eaca1a8..c2fb21fc099c 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -89,6 +89,7 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
#define MEGASAS_FP_CMD_LEN 16
#define MEGASAS_FUSION_IN_RESET 0
#define THRESHOLD_REPLY_COUNT 50
+#define MEGASAS_FUSION_OCR_NOT_POSSIBLE 1
#define RAID_1_PEER_CMDS 2
#define JBOD_MAPS_COUNT 2
#define MEGASAS_REDUCE_QD_COUNT 64
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 74fb50644678..4dd50db90677 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1045,6 +1045,8 @@ static void handle_error(struct mesh_state *ms)
while ((in_8(&mr->bus_status1) & BS1_RST) != 0)
udelay(1);
printk("done\n");
+ if (ms->dma_started)
+ halt_dma(ms);
handle_reset(ms);
/* request_q is empty, no point in mesh_start() */
return;
@@ -1357,7 +1359,8 @@ static void halt_dma(struct mesh_state *ms)
ms->conn_tgt, ms->data_ptr, scsi_bufflen(cmd),
ms->tgts[ms->conn_tgt].data_goes_out);
}
- scsi_dma_unmap(cmd);
+ if (cmd)
+ scsi_dma_unmap(cmd);
ms->dma_started = 0;
}
@@ -1712,6 +1715,9 @@ static int mesh_host_reset(struct scsi_cmnd *cmd)
spin_lock_irqsave(ms->host->host_lock, flags);
+ if (ms->dma_started)
+ halt_dma(ms);
+
/* Reset the controller & dbdma channel */
out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */
out_8(&mr->exception, 0xff); /* clear all exception bits */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index f2d61d023bcb..eeb6d58ae867 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -4380,7 +4380,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
}
kfree(ioc->hpr_lookup);
+ ioc->hpr_lookup = NULL;
kfree(ioc->internal_lookup);
+ ioc->internal_lookup = NULL;
if (ioc->chain_lookup) {
for (i = 0; i < ioc->scsiio_depth; i++) {
for (j = ioc->chains_per_prp_buffer;
@@ -4836,7 +4838,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
&ct->chain_buffer_dma);
if (!ct->chain_buffer) {
ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
- _base_release_memory_pools(ioc);
goto out;
}
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 5181c03e82a6..10f8e1791a5a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -770,6 +770,18 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
case MPI2_FUNCTION_NVME_ENCAPSULATED:
{
nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request;
+ if (!ioc->pcie_sg_lookup) {
+ dtmprintk(ioc, ioc_info(ioc,
+ "HBA doesn't support NVMe. Rejecting NVMe Encapsulated request.\n"
+ ));
+
+ if (ioc->logging_level & MPT_DEBUG_TM)
+ _debug_dump_mf(nvme_encap_request,
+ ioc->request_sz/4);
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
/*
* Get the Physical Address of the sense buffer.
* Use Error Response buffer address field to hold the sense
@@ -1550,7 +1562,8 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
ioc_err(ioc, "%s: failed allocating memory for diag buffers, requested size(%d)\n",
__func__, request_data_sz);
mpt3sas_base_free_smid(ioc, smid);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out;
}
ioc->diag_buffer[buffer_type] = request_data;
ioc->diag_buffer_sz[buffer_type] = request_data_sz;
@@ -2880,19 +2893,18 @@ _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
if (!ioc->is_warpdrive) {
ioc_err(ioc, "%s: BRM attribute is only for warpdrive\n",
__func__);
- goto out;
+ return 0;
}
/* pci_access_mutex lock acquired by sysfs show path */
mutex_lock(&ioc->pci_access_mutex);
- if (ioc->pci_error_recovery || ioc->remove_host) {
- mutex_unlock(&ioc->pci_access_mutex);
- return 0;
- }
+ if (ioc->pci_error_recovery || ioc->remove_host)
+ goto out;
/* allocate upto GPIOVal 36 entries */
sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36);
io_unit_pg3 = kzalloc(sz, GFP_KERNEL);
if (!io_unit_pg3) {
+ rc = -ENOMEM;
ioc_err(ioc, "%s: failed allocating memory for iounit_pg3: (%d) bytes\n",
__func__, sz);
goto out;
@@ -2902,6 +2914,7 @@ _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
0) {
ioc_err(ioc, "%s: failed reading iounit_pg3\n",
__func__);
+ rc = -EINVAL;
goto out;
}
@@ -2909,12 +2922,14 @@ _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
ioc_err(ioc, "%s: iounit_pg3 failed with ioc_status(0x%04x)\n",
__func__, ioc_status);
+ rc = -EINVAL;
goto out;
}
if (io_unit_pg3->GPIOCount < 25) {
ioc_err(ioc, "%s: iounit_pg3->GPIOCount less than 25 entries, detected (%d) entries\n",
__func__, io_unit_pg3->GPIOCount);
+ rc = -EINVAL;
goto out;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 1ccfbc7eebe0..4c5a8e460b50 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -9673,8 +9673,8 @@ static void scsih_remove(struct pci_dev *pdev)
ioc->remove_host = 1;
- mpt3sas_wait_for_commands_to_complete(ioc);
- _scsih_flush_running_cmds(ioc);
+ if (!pci_device_is_present(pdev))
+ _scsih_flush_running_cmds(ioc);
_scsih_fw_event_cleanup_queue(ioc);
@@ -9750,8 +9750,8 @@ scsih_shutdown(struct pci_dev *pdev)
ioc->remove_host = 1;
- mpt3sas_wait_for_commands_to_complete(ioc);
- _scsih_flush_running_cmds(ioc);
+ if (!pci_device_is_present(pdev))
+ _scsih_flush_running_cmds(ioc);
_scsih_fw_event_cleanup_queue(ioc);
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 301de40eb708..f628164bf757 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -2382,6 +2382,8 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("task 0x%p done with io_status 0x%x"
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat));
+ if (t->slow_task)
+ complete(&t->slow_task->completion);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 5a021217bfc9..1fe4aa3befd5 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -354,6 +354,7 @@ struct qedf_ctx {
#define QEDF_GRCDUMP_CAPTURE 4
#define QEDF_IN_RECOVERY 5
#define QEDF_DBG_STOP_IO 6
+#define QEDF_PROBING 8
unsigned long flags; /* Miscellaneous state flags */
int fipvlan_retries;
u8 num_queues;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 6ef0f741bf89..7b24c21edc37 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -3101,7 +3101,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
{
int rc = -EINVAL;
struct fc_lport *lport;
- struct qedf_ctx *qedf;
+ struct qedf_ctx *qedf = NULL;
struct Scsi_Host *host;
bool is_vf = false;
struct qed_ll2_params params;
@@ -3131,6 +3131,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
/* Initialize qedf_ctx */
qedf = lport_priv(lport);
+ set_bit(QEDF_PROBING, &qedf->flags);
qedf->lport = lport;
qedf->ctlr.lp = lport;
qedf->pdev = pdev;
@@ -3154,9 +3155,12 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
} else {
/* Init pointers during recovery */
qedf = pci_get_drvdata(pdev);
+ set_bit(QEDF_PROBING, &qedf->flags);
lport = qedf->lport;
}
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe started.\n");
+
host = lport->host;
/* Allocate mempool for qedf_io_work structs */
@@ -3453,6 +3457,10 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
else
fc_fabric_login(lport);
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
+
+ clear_bit(QEDF_PROBING, &qedf->flags);
+
/* All good */
return 0;
@@ -3478,6 +3486,11 @@ err2:
err1:
scsi_host_put(lport->host);
err0:
+ if (qedf) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
+
+ clear_bit(QEDF_PROBING, &qedf->flags);
+ }
return rc;
}
@@ -3627,11 +3640,25 @@ void qedf_get_protocol_tlv_data(void *dev, void *data)
{
struct qedf_ctx *qedf = dev;
struct qed_mfw_tlv_fcoe *fcoe = data;
- struct fc_lport *lport = qedf->lport;
- struct Scsi_Host *host = lport->host;
- struct fc_host_attrs *fc_host = shost_to_fc_host(host);
+ struct fc_lport *lport;
+ struct Scsi_Host *host;
+ struct fc_host_attrs *fc_host;
struct fc_host_statistics *hst;
+ if (!qedf) {
+ QEDF_ERR(NULL, "qedf is null.\n");
+ return;
+ }
+
+ if (test_bit(QEDF_PROBING, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx, "Function is still probing.\n");
+ return;
+ }
+
+ lport = qedf->lport;
+ host = lport->host;
+ fc_host = shost_to_fc_host(host);
+
/* Force a refresh of the fc_host stats including offload stats */
hst = qedf_fc_get_host_stats(host);
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 8829880a54c3..0f57c8073406 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -997,7 +997,8 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
if (qedi_ep->state == EP_STATE_OFLDCONN_START)
goto ep_exit_recover;
- flush_work(&qedi_ep->offload_work);
+ if (qedi_ep->state != EP_STATE_OFLDCONN_NONE)
+ flush_work(&qedi_ep->offload_work);
if (qedi_ep->conn) {
qedi_conn = qedi_ep->conn;
@@ -1214,6 +1215,10 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
}
iscsi_cid = (u32)path_data->handle;
+ if (iscsi_cid >= qedi->max_active_conns) {
+ ret = -EINVAL;
+ goto set_path_exit;
+ }
qedi_ep = qedi->ep_tbl[iscsi_cid];
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 6b7b390b2e52..6954520f6ac6 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -176,6 +176,7 @@ qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
faddr = ha->flt_region_nvram;
if (IS_QLA28XX(ha)) {
+ qla28xx_get_aux_images(vha, &active_regions);
if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
faddr = ha->flt_region_nvram_sec;
}
@@ -441,9 +442,6 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
valid = 0;
if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
valid = 1;
- else if (start == (ha->flt_region_boot * 4) ||
- start == (ha->flt_region_fw * 4))
- valid = 1;
else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
valid = 1;
if (!valid) {
@@ -491,8 +489,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
"Writing flash region -- 0x%x/0x%x.\n",
ha->optrom_region_start, ha->optrom_region_size);
- ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
+ rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
ha->optrom_region_start, ha->optrom_region_size);
+ if (rval)
+ rval = -EIO;
break;
default:
rval = -EINVAL;
@@ -1775,9 +1775,6 @@ qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
return -EINVAL;
}
- ql_log(ql_log_info, vha, 0x70d6,
- "port speed:%d\n", ha->link_data_rate);
-
return scnprintf(buf, PAGE_SIZE, "%s\n", spd[ha->link_data_rate]);
}
@@ -2924,11 +2921,11 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
msleep(1000);
- qla_nvme_delete(vha);
qla24xx_disable_vp(vha);
qla2x00_wait_for_sess_deletion(vha);
+ qla_nvme_delete(vha);
vha->flags.delete_progress = 1;
qlt_remove_target(ha, vha);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 5441557b424b..89cd77068c76 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -257,7 +257,7 @@ qla2x00_process_els(struct bsg_job *bsg_job)
srb_t *sp;
const char *type;
int req_sg_cnt, rsp_sg_cnt;
- int rval = (DRIVER_ERROR << 16);
+ int rval = (DID_ERROR << 16);
uint16_t nextlid = 0;
if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
@@ -432,7 +432,7 @@ qla2x00_process_ct(struct bsg_job *bsg_job)
struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
- int rval = (DRIVER_ERROR << 16);
+ int rval = (DID_ERROR << 16);
int req_sg_cnt, rsp_sg_cnt;
uint16_t loop_id;
struct fc_port *fcport;
@@ -1951,7 +1951,7 @@ qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
- int rval = (DRIVER_ERROR << 16);
+ int rval = (DID_ERROR << 16);
struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
srb_t *sp;
int req_sg_cnt = 0, rsp_sg_cnt = 0;
@@ -2400,7 +2400,7 @@ qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
struct qla_active_regions regions = { };
struct active_regions active_regions = { };
- qla28xx_get_aux_images(vha, &active_regions);
+ qla27xx_get_active_image(vha, &active_regions);
regions.global_image = active_regions.global;
if (IS_QLA28XX(ha)) {
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 9e80646722e2..d31c8657c894 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -2519,12 +2519,6 @@ qla83xx_fw_dump_failed:
/* Driver Debug Functions. */
/****************************************************************************/
-static inline int
-ql_mask_match(uint level)
-{
- return (level & ql2xextended_error_logging) == level;
-}
-
/*
* This function is for formatting and logging debug information.
* It is to be used when vha is available. It formats the message
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index bb01b680ce9f..433e95502808 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -374,3 +374,9 @@ extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *,
extern void qla24xx_pause_risc(struct device_reg_24xx __iomem *,
struct qla_hw_data *);
extern int qla24xx_soft_reset(struct qla_hw_data *);
+
+static inline int
+ql_mask_match(uint level)
+{
+ return (level & ql2xextended_error_logging) == level;
+}
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 1a4095c56eee..b3dd0e980156 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2335,6 +2335,7 @@ typedef struct fc_port {
unsigned int query:1;
unsigned int id_changed:1;
unsigned int scan_needed:1;
+ unsigned int explicit_logout:1;
struct work_struct nvme_del_work;
struct completion nvme_del_done;
diff --git a/drivers/scsi/qla2xxx/qla_dsd.h b/drivers/scsi/qla2xxx/qla_dsd.h
index 7479924ba422..20788054b91b 100644
--- a/drivers/scsi/qla2xxx/qla_dsd.h
+++ b/drivers/scsi/qla2xxx/qla_dsd.h
@@ -1,6 +1,8 @@
#ifndef _QLA_DSD_H_
#define _QLA_DSD_H_
+#include <asm/unaligned.h>
+
/* 32-bit data segment descriptor (8 bytes) */
struct dsd32 {
__le32 address;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index df079a8c2b33..070b847937fb 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1523,6 +1523,10 @@ struct qla_flt_header {
#define FLT_REG_NVRAM_SEC_28XX_1 0x10F
#define FLT_REG_NVRAM_SEC_28XX_2 0x111
#define FLT_REG_NVRAM_SEC_28XX_3 0x113
+#define FLT_REG_MPI_PRI_28XX 0xD3
+#define FLT_REG_MPI_SEC_28XX 0xF0
+#define FLT_REG_PEP_PRI_28XX 0xD1
+#define FLT_REG_PEP_SEC_28XX 0xF1
struct qla_flt_region {
uint16_t code;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 11b2f2281eed..670259fe08bb 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -513,6 +513,7 @@ static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
e->u.fcport.fcport = fcport;
fcport->flags |= FCF_ASYNC_ACTIVE;
+ fcport->disc_state = DSC_LOGIN_PEND;
return qla2x00_post_work(vha, e);
}
@@ -3095,103 +3096,113 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
}
static void
-qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
+qla2x00_init_fce_trace(scsi_qla_host_t *vha)
{
int rval;
dma_addr_t tc_dma;
void *tc;
struct qla_hw_data *ha = vha->hw;
- if (ha->eft) {
+ if (!IS_FWI2_CAPABLE(ha))
+ return;
+
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return;
+
+ if (ha->fce) {
ql_dbg(ql_dbg_init, vha, 0x00bd,
- "%s: Offload Mem is already allocated.\n",
- __func__);
+ "%s: FCE Mem is already allocated.\n",
+ __func__);
return;
}
- if (IS_FWI2_CAPABLE(ha)) {
- /* Allocate memory for Fibre Channel Event Buffer. */
- if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
- !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
- goto try_eft;
+ /* Allocate memory for Fibre Channel Event Buffer. */
+ tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
+ GFP_KERNEL);
+ if (!tc) {
+ ql_log(ql_log_warn, vha, 0x00be,
+ "Unable to allocate (%d KB) for FCE.\n",
+ FCE_SIZE / 1024);
+ return;
+ }
- if (ha->fce)
- dma_free_coherent(&ha->pdev->dev,
- FCE_SIZE, ha->fce, ha->fce_dma);
+ rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
+ ha->fce_mb, &ha->fce_bufs);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x00bf,
+ "Unable to initialize FCE (%d).\n", rval);
+ dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma);
+ return;
+ }
- /* Allocate memory for Fibre Channel Event Buffer. */
- tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
- GFP_KERNEL);
- if (!tc) {
- ql_log(ql_log_warn, vha, 0x00be,
- "Unable to allocate (%d KB) for FCE.\n",
- FCE_SIZE / 1024);
- goto try_eft;
- }
-
- rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
- ha->fce_mb, &ha->fce_bufs);
- if (rval) {
- ql_log(ql_log_warn, vha, 0x00bf,
- "Unable to initialize FCE (%d).\n", rval);
- dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
- tc_dma);
- ha->flags.fce_enabled = 0;
- goto try_eft;
- }
- ql_dbg(ql_dbg_init, vha, 0x00c0,
- "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
-
- ha->flags.fce_enabled = 1;
- ha->fce_dma = tc_dma;
- ha->fce = tc;
-
-try_eft:
- if (ha->eft)
- dma_free_coherent(&ha->pdev->dev,
- EFT_SIZE, ha->eft, ha->eft_dma);
+ ql_dbg(ql_dbg_init, vha, 0x00c0,
+ "Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024);
- /* Allocate memory for Extended Trace Buffer. */
- tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
- GFP_KERNEL);
- if (!tc) {
- ql_log(ql_log_warn, vha, 0x00c1,
- "Unable to allocate (%d KB) for EFT.\n",
- EFT_SIZE / 1024);
- goto eft_err;
- }
+ ha->flags.fce_enabled = 1;
+ ha->fce_dma = tc_dma;
+ ha->fce = tc;
+}
- rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
- if (rval) {
- ql_log(ql_log_warn, vha, 0x00c2,
- "Unable to initialize EFT (%d).\n", rval);
- dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
- tc_dma);
- goto eft_err;
- }
- ql_dbg(ql_dbg_init, vha, 0x00c3,
- "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
+static void
+qla2x00_init_eft_trace(scsi_qla_host_t *vha)
+{
+ int rval;
+ dma_addr_t tc_dma;
+ void *tc;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_FWI2_CAPABLE(ha))
+ return;
+
+ if (ha->eft) {
+ ql_dbg(ql_dbg_init, vha, 0x00bd,
+ "%s: EFT Mem is already allocated.\n",
+ __func__);
+ return;
+ }
- ha->eft_dma = tc_dma;
- ha->eft = tc;
+ /* Allocate memory for Extended Trace Buffer. */
+ tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
+ GFP_KERNEL);
+ if (!tc) {
+ ql_log(ql_log_warn, vha, 0x00c1,
+ "Unable to allocate (%d KB) for EFT.\n",
+ EFT_SIZE / 1024);
+ return;
}
-eft_err:
- return;
+ rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x00c2,
+ "Unable to initialize EFT (%d).\n", rval);
+ dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma);
+ return;
+ }
+
+ ql_dbg(ql_dbg_init, vha, 0x00c3,
+ "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
+
+ ha->eft_dma = tc_dma;
+ ha->eft = tc;
+}
+
+static void
+qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
+{
+ qla2x00_init_fce_trace(vha);
+ qla2x00_init_eft_trace(vha);
}
void
qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
{
- int rval;
uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
eft_size, fce_size, mq_size;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
struct rsp_que *rsp = ha->rsp_q_map[0];
struct qla2xxx_fw_dump *fw_dump;
- dma_addr_t tc_dma;
- void *tc;
dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
req_q_size = rsp_q_size = 0;
@@ -3229,37 +3240,13 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
}
if (ha->tgt.atio_ring)
mq_size += ha->tgt.atio_q_length * sizeof(request_t);
- /* Allocate memory for Fibre Channel Event Buffer. */
- if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
- !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
- goto try_eft;
- fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
-try_eft:
+ qla2x00_init_fce_trace(vha);
+ if (ha->fce)
+ fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
+ qla2x00_init_eft_trace(vha);
if (ha->eft)
- dma_free_coherent(&ha->pdev->dev,
- EFT_SIZE, ha->eft, ha->eft_dma);
-
- /* Allocate memory for Extended Trace Buffer. */
- tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
- GFP_KERNEL);
- if (!tc) {
- ql_log(ql_log_warn, vha, 0x00c1,
- "Unable to allocate (%d KB) for EFT.\n",
- EFT_SIZE / 1024);
- goto allocate;
- }
-
- rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
- if (rval) {
- ql_log(ql_log_warn, vha, 0x00c2,
- "Unable to initialize EFT (%d).\n", rval);
- dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
- tc_dma);
- }
- ql_dbg(ql_dbg_init, vha, 0x00c3,
- "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
- eft_size = EFT_SIZE;
+ eft_size = EFT_SIZE;
}
if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
@@ -3281,24 +3268,22 @@ try_eft:
j, fwdt->dump_size);
dump_size += fwdt->dump_size;
}
- goto allocate;
+ } else {
+ req_q_size = req->length * sizeof(request_t);
+ rsp_q_size = rsp->length * sizeof(response_t);
+ dump_size = offsetof(struct qla2xxx_fw_dump, isp);
+ dump_size += fixed_size + mem_size + req_q_size + rsp_q_size
+ + eft_size;
+ ha->chain_offset = dump_size;
+ dump_size += mq_size + fce_size;
+ if (ha->exchoffld_buf)
+ dump_size += sizeof(struct qla2xxx_offld_chain) +
+ ha->exchoffld_size;
+ if (ha->exlogin_buf)
+ dump_size += sizeof(struct qla2xxx_offld_chain) +
+ ha->exlogin_size;
}
- req_q_size = req->length * sizeof(request_t);
- rsp_q_size = rsp->length * sizeof(response_t);
- dump_size = offsetof(struct qla2xxx_fw_dump, isp);
- dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
- ha->chain_offset = dump_size;
- dump_size += mq_size + fce_size;
-
- if (ha->exchoffld_buf)
- dump_size += sizeof(struct qla2xxx_offld_chain) +
- ha->exchoffld_size;
- if (ha->exlogin_buf)
- dump_size += sizeof(struct qla2xxx_offld_chain) +
- ha->exlogin_size;
-
-allocate:
if (!ha->fw_dump_len || dump_size > ha->fw_dump_alloc_len) {
ql_dbg(ql_dbg_init, vha, 0x00c5,
@@ -4894,6 +4879,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
}
INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
+ INIT_WORK(&fcport->free_work, qlt_free_session_done);
INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
INIT_LIST_HEAD(&fcport->gnl_entry);
INIT_LIST_HEAD(&fcport->list);
@@ -4972,14 +4958,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
set_bit(RSCN_UPDATE, &flags);
clear_bit(LOCAL_LOOP_UPDATE, &flags);
- } else if (ha->current_topology == ISP_CFG_N) {
- clear_bit(RSCN_UPDATE, &flags);
- if (qla_tgt_mode_enabled(vha)) {
- /* allow the other side to start the login */
- clear_bit(LOCAL_LOOP_UPDATE, &flags);
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- }
- } else if (ha->current_topology == ISP_CFG_NL) {
+ } else if (ha->current_topology == ISP_CFG_NL ||
+ ha->current_topology == ISP_CFG_N) {
clear_bit(RSCN_UPDATE, &flags);
set_bit(LOCAL_LOOP_UPDATE, &flags);
} else if (!vha->flags.online ||
@@ -5094,7 +5074,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
memcpy(&ha->plogi_els_payld.data, (void *)ha->init_cb,
sizeof(ha->plogi_els_payld.data));
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
} else {
ql_dbg(ql_dbg_init, vha, 0x00d1,
"PLOGI ELS param read fail.\n");
@@ -5927,8 +5906,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
- if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
- (fcport->flags & FCF_LOGIN_NEEDED) == 0)
+ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
continue;
if (fcport->scan_state == QLA_FCPORT_SCAN) {
@@ -5951,7 +5929,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
}
}
- if (fcport->scan_state == QLA_FCPORT_FOUND)
+ if (fcport->scan_state == QLA_FCPORT_FOUND &&
+ (fcport->flags & FCF_LOGIN_NEEDED) != 0)
qla24xx_fcport_handle_login(vha, fcport);
}
return (rval);
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 9312b19ed708..4ab44f8fb285 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2441,11 +2441,19 @@ qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
static void
qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
{
+ u16 control_flags = LCF_COMMAND_LOGO;
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
- logio->control_flags =
- cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
- if (!sp->fcport->keep_nport_handle)
- logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
+
+ if (sp->fcport->explicit_logout) {
+ control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT;
+ } else {
+ control_flags |= LCF_IMPL_LOGO;
+
+ if (!sp->fcport->keep_nport_handle)
+ control_flags |= LCF_FREE_NPORT;
+ }
+
+ logio->control_flags = cpu_to_le16(control_flags);
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
logio->port_id[1] = sp->fcport->d_id.b.area;
@@ -2715,7 +2723,8 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
"PLOGI ELS IOCB:\n");
ql_dump_buffer(ql_log_info, vha, 0x0109,
- (uint8_t *)els_iocb, 0x70);
+ (uint8_t *)els_iocb,
+ sizeof(*els_iocb));
} else {
els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
@@ -2876,7 +2885,8 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
- (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
+ (uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
+ sizeof(*elsio->u.els_plogi.els_plogi_pyld));
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 78aec50abe0f..18a70859a0b7 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1062,8 +1062,6 @@ global_port_update:
ql_dbg(ql_dbg_async, vha, 0x5011,
"Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
mb[1], mb[2], mb[3]);
-
- qlt_async_event(mb[0], vha, mb);
break;
}
@@ -1080,8 +1078,6 @@ global_port_update:
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(VP_CONFIG_OK, &vha->vp_flags);
-
- qlt_async_event(mb[0], vha, mb);
break;
case MBA_RSCN_UPDATE: /* State Change Registration */
@@ -1903,6 +1899,18 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
inbuf = (uint32_t *)&sts->nvme_ersp_data;
outbuf = (uint32_t *)fd->rspaddr;
iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
+ if (unlikely(iocb->u.nvme.rsp_pyld_len >
+ sizeof(struct nvme_fc_ersp_iu))) {
+ if (ql_mask_match(ql_dbg_io)) {
+ WARN_ONCE(1, "Unexpected response payload length %u.\n",
+ iocb->u.nvme.rsp_pyld_len);
+ ql_log(ql_log_warn, fcport->vha, 0x5100,
+ "Unexpected response payload length %u.\n",
+ iocb->u.nvme.rsp_pyld_len);
+ }
+ iocb->u.nvme.rsp_pyld_len =
+ sizeof(struct nvme_fc_ersp_iu);
+ }
iter = iocb->u.nvme.rsp_pyld_len >> 2;
for (; iter; iter--)
*outbuf++ = swab32(*inbuf++);
@@ -3633,7 +3641,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
skip_msix:
ql_log(ql_log_info, vha, 0x0037,
- "Falling back-to MSI mode -%d.\n", ret);
+ "Falling back-to MSI mode -- ret=%d.\n", ret);
if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
!IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
@@ -3641,13 +3649,13 @@ skip_msix:
goto skip_msi;
ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
- if (!ret) {
+ if (ret > 0) {
ql_dbg(ql_dbg_init, vha, 0x0038,
"MSI: Enabled.\n");
ha->flags.msi_enabled = 1;
} else
ql_log(ql_log_warn, vha, 0x0039,
- "Falling back-to INTa mode -- %d.\n", ret);
+ "Falling back-to INTa mode -- ret=%d.\n", ret);
skip_msi:
/* Skip INTx on ISP82xx. */
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 133f5f6270ff..2774c81fefb7 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -710,6 +710,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
mcp->mb[2] = LSW(risc_addr);
mcp->mb[3] = 0;
mcp->mb[4] = 0;
+ mcp->mb[11] = 0;
ha->flags.using_lr_setting = 0;
if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
@@ -754,7 +755,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
if (ha->flags.exchoffld_enabled)
mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
- mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1;
+ mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
} else {
mcp->mb[1] = LSW(risc_addr);
@@ -3124,7 +3125,7 @@ qla24xx_abort_command(srb_t *sp)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
"Entered %s.\n", __func__);
- if (vha->flags.qpairs_available && sp->qpair)
+ if (sp->qpair)
req = sp->qpair->req;
else
return QLA_FUNCTION_FAILED;
@@ -3917,6 +3918,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
vha->d_id.b24 = 0;
vha->d_id.b.al_pa = 1;
ha->flags.n2n_bigger = 1;
+ ha->flags.n2n_ae = 0;
id.b.al_pa = 2;
ql_dbg(ql_dbg_async, vha, 0x5075,
@@ -3927,6 +3929,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
"Format 1: Remote login - Waiting for WWPN %8phC.\n",
rptid_entry->u.f1.port_name);
ha->flags.n2n_bigger = 0;
+ ha->flags.n2n_ae = 1;
}
qla24xx_post_newsess_work(vha, &id,
rptid_entry->u.f1.port_name,
@@ -3938,7 +3941,6 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
/* if our portname is higher then initiate N2N login */
set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
- ha->flags.n2n_ae = 1;
return;
break;
case TOPO_FL:
@@ -6138,9 +6140,8 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
mcp->mb[7] = LSW(MSD(req_dma));
mcp->mb[8] = MSW(addr);
/* Setting RAM ID to valid */
- mcp->mb[10] |= BIT_7;
/* For MCTP RAM ID is 0x40 */
- mcp->mb[10] |= 0x40;
+ mcp->mb[10] = BIT_7 | 0x40;
mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
MBX_0;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index c760ae354174..55df42933c4b 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1612,8 +1612,7 @@ qla82xx_get_bootld_offset(struct qla_hw_data *ha)
return (u8 *)&ha->hablob->fw->data[offset];
}
-static __le32
-qla82xx_get_fw_size(struct qla_hw_data *ha)
+static u32 qla82xx_get_fw_size(struct qla_hw_data *ha)
{
struct qla82xx_uri_data_desc *uri_desc = NULL;
@@ -1624,7 +1623,7 @@ qla82xx_get_fw_size(struct qla_hw_data *ha)
return cpu_to_le32(uri_desc->size);
}
- return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]);
+ return get_unaligned_le32(&ha->hablob->fw->data[FW_SIZE_OFFSET]);
}
static u8 *
@@ -1816,7 +1815,7 @@ qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
}
flashaddr = FLASH_ADDR_START;
- size = (__force u32)qla82xx_get_fw_size(ha) / 8;
+ size = qla82xx_get_fw_size(ha) / 8;
ptr64 = (u64 *)qla82xx_get_fw_offs(ha);
for (i = 0; i < size; i++) {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index b5ef1148eea8..4af35179475f 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1269,6 +1269,7 @@ static int
qla2xxx_eh_abort(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+ DECLARE_COMPLETION_ONSTACK(comp);
srb_t *sp;
int ret;
unsigned int id;
@@ -1304,6 +1305,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
return SUCCESS;
}
+ /* Get a reference to the sp and drop the lock. */
if (sp_get(sp)){
/* ref_count is already 0 */
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
@@ -1331,6 +1333,23 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
sp->done(sp, DID_ABORT << 16);
ret = SUCCESS;
break;
+ case QLA_FUNCTION_PARAMETER_ERROR: {
+ /* Wait for the command completion. */
+ uint32_t ratov = ha->r_a_tov/10;
+ uint32_t ratov_j = msecs_to_jiffies(4 * ratov * 1000);
+
+ WARN_ON_ONCE(sp->comp);
+ sp->comp = &comp;
+ if (!wait_for_completion_timeout(&comp, ratov_j)) {
+ ql_dbg(ql_dbg_taskm, vha, 0xffff,
+ "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
+ __func__, ha->r_a_tov);
+ ret = FAILED;
+ } else {
+ ret = SUCCESS;
+ }
+ break;
+ }
default:
/*
* Either abort failed or abort and completion raced. Let
@@ -1340,6 +1359,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
break;
}
+ sp->comp = NULL;
+ atomic_dec(&sp->ref_count);
ql_log(ql_log_info, vha, 0x801c,
"Abort command issued nexus=%ld:%d:%llu -- %x.\n",
vha->host_no, id, lun, ret);
@@ -1746,6 +1767,8 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
spin_lock_irqsave(qp->qp_lock_ptr, *flags);
sp->comp = NULL;
}
+
+ atomic_dec(&sp->ref_count);
}
static void
@@ -3232,6 +3255,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
+ if (unlikely(!ha->wq)) {
+ ret = -ENOMEM;
+ goto probe_failed;
+ }
if (ha->isp_ops->initialize_adapter(base_vha)) {
ql_log(ql_log_fatal, base_vha, 0x00d6,
@@ -3538,6 +3565,10 @@ qla2x00_shutdown(struct pci_dev *pdev)
qla2x00_try_to_stop_firmware(vha);
}
+ /* Disable timer */
+ if (vha->timer_active)
+ qla2x00_stop_timer(vha);
+
/* Turn adapter off line */
vha->flags.online = 0;
@@ -3687,6 +3718,13 @@ qla2x00_remove_one(struct pci_dev *pdev)
}
qla2x00_wait_for_hba_ready(base_vha);
+ /*
+ * if UNLOADING flag is already set, then continue unload,
+ * where it was set first.
+ */
+ if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
+ return;
+
if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
IS_QLA28XX(ha)) {
if (ha->flags.fw_started)
@@ -3705,15 +3743,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
qla2x00_wait_for_sess_deletion(base_vha);
- /*
- * if UNLOAD flag is already set, then continue unload,
- * where it was set first.
- */
- if (test_bit(UNLOADING, &base_vha->dpc_flags))
- return;
-
- set_bit(UNLOADING, &base_vha->dpc_flags);
-
qla_nvme_delete(base_vha);
dma_free_coherent(&ha->pdev->dev,
@@ -4598,6 +4627,7 @@ qla2x00_free_fw_dump(struct qla_hw_data *ha)
ha->fce = NULL;
ha->fce_dma = 0;
+ ha->flags.fce_enabled = 0;
ha->eft = NULL;
ha->eft_dma = 0;
ha->fw_dumped = 0;
@@ -4846,6 +4876,9 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
struct qla_work_evt *e;
uint8_t bail;
+ if (test_bit(UNLOADING, &vha->dpc_flags))
+ return NULL;
+
QLA_VHA_MARK_BUSY(vha, bail);
if (bail)
return NULL;
@@ -6052,13 +6085,6 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
struct pci_dev *pdev = ha->pdev;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
- /*
- * if UNLOAD flag is already set, then continue unload,
- * where it was set first.
- */
- if (test_bit(UNLOADING, &base_vha->dpc_flags))
- return;
-
ql_log(ql_log_warn, base_vha, 0x015b,
"Disabling adapter.\n");
@@ -6069,9 +6095,14 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
return;
}
- qla2x00_wait_for_sess_deletion(base_vha);
+ /*
+ * if UNLOADING flag is already set, then continue unload,
+ * where it was set first.
+ */
+ if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
+ return;
- set_bit(UNLOADING, &base_vha->dpc_flags);
+ qla2x00_wait_for_sess_deletion(base_vha);
qla2x00_delete_all_vps(ha, base_vha);
@@ -6295,6 +6326,7 @@ qla2x00_do_dpc(void *data)
if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE,
&base_vha->dpc_flags))) {
+ base_vha->flags.online = 1;
ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
"ISP abort scheduled.\n");
if (ha->isp_ops->abort_isp(base_vha)) {
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 1eb82384d933..ee32ef70eabd 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -845,15 +845,15 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
ha->flt_region_img_status_pri = start;
break;
case FLT_REG_IMG_SEC_27XX:
- if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
ha->flt_region_img_status_sec = start;
break;
case FLT_REG_FW_SEC_27XX:
- if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
ha->flt_region_fw_sec = start;
break;
case FLT_REG_BOOTLOAD_SEC_27XX:
- if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
ha->flt_region_boot_sec = start;
break;
case FLT_REG_AUX_IMG_PRI_28XX:
@@ -2723,8 +2723,11 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
"Region %x is secure\n", region.code);
- if (region.code == FLT_REG_FW ||
- region.code == FLT_REG_FW_SEC_27XX) {
+ switch (region.code) {
+ case FLT_REG_FW:
+ case FLT_REG_FW_SEC_27XX:
+ case FLT_REG_MPI_PRI_28XX:
+ case FLT_REG_MPI_SEC_28XX:
fw_array = dwptr;
/* 1st fw array */
@@ -2755,9 +2758,23 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
buf_size_without_sfub += risc_size;
fw_array += risc_size;
}
- } else {
- ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
- "Secure region %x not supported\n",
+ break;
+
+ case FLT_REG_PEP_PRI_28XX:
+ case FLT_REG_PEP_SEC_28XX:
+ fw_array = dwptr;
+
+ /* 1st fw array */
+ risc_size = be32_to_cpu(fw_array[3]);
+ risc_attr = be32_to_cpu(fw_array[9]);
+
+ buf_size_without_sfub = risc_size;
+ fw_array += risc_size;
+ break;
+
+ default:
+ ql_log(ql_log_warn + ql_dbg_verbose, vha,
+ 0xffff, "Secure region %x not supported\n",
region.code);
rval = QLA_COMMAND_ERROR;
goto done;
@@ -2878,7 +2895,7 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
"Sending Secure Flash MB Cmd\n");
rval = qla28xx_secure_flash_update(vha, 0, region.code,
buf_size_without_sfub, sfub_dma,
- sizeof(struct secure_flash_update_block));
+ sizeof(struct secure_flash_update_block) >> 2);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0xffff,
"Secure Flash MB Cmd failed %x.", rval);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index b511af33832c..51f16346e3e4 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1099,6 +1099,7 @@ void qlt_free_session_done(struct work_struct *work)
}
}
+ sess->explicit_logout = 0;
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
@@ -1164,7 +1165,6 @@ void qlt_unreg_sess(struct fc_port *sess)
sess->nvme_flag |= NVME_FLAG_DELETING;
schedule_work(&sess->nvme_del_work);
} else {
- INIT_WORK(&sess->free_work, qlt_free_session_done);
schedule_work(&sess->free_work);
}
}
@@ -1270,7 +1270,6 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
"Scheduling sess %p for deletion %8phC\n",
sess, sess->port_name);
- INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn);
WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
}
@@ -4839,6 +4838,7 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
switch (sess->disc_state) {
case DSC_DELETED:
+ case DSC_LOGIN_PEND:
qlt_plogi_ack_unref(vha, pla);
break;
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index d15412d3d9bd..8fb8fb8a48ad 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -246,6 +246,8 @@ static void tcm_qla2xxx_complete_mcmd(struct work_struct *work)
*/
static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
{
+ if (!mcmd)
+ return;
INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd);
queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work);
}
@@ -348,6 +350,7 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess)
target_sess_cmd_list_set_waiting(se_sess);
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ sess->explicit_logout = 1;
tcm_qla2xxx_put_sess(sess);
}
@@ -919,6 +922,7 @@ static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item,
atomic_set(&tpg->lport_tpg_enabled, 0);
qlt_stop_phase1(vha->vha_tgt.qla_tgt);
+ qlt_stop_phase2(vha->vha_tgt.qla_tgt);
}
return count;
@@ -1081,6 +1085,7 @@ static ssize_t tcm_qla2xxx_npiv_tpg_enable_store(struct config_item *item,
atomic_set(&tpg->lport_tpg_enabled, 0);
qlt_stop_phase1(vha->vha_tgt.qla_tgt);
+ qlt_stop_phase2(vha->vha_tgt.qla_tgt);
}
return count;
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index dac9a7013208..02636b4785c5 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -640,9 +640,6 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
QLA_SUCCESS) {
- dma_free_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- init_fw_cb, init_fw_cb_dma);
goto exit_init_fw_cb;
}
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 8c674eca09f1..5504ab11decc 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -4145,7 +4145,7 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
ha->queues_dma);
- if (ha->fw_dump)
+ if (ha->fw_dump)
vfree(ha->fw_dump);
ha->queues_len = 0;
@@ -4275,7 +4275,6 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
return QLA_SUCCESS;
mem_alloc_error_exit:
- qla4xxx_mem_free(ha);
return QLA_ERROR;
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index d323523f5f9d..44181a2cbf18 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -5263,6 +5263,11 @@ static int __init scsi_debug_init(void)
return -EINVAL;
}
+ if (sdebug_num_tgts < 0) {
+ pr_err("num_tgts must be >= 0\n");
+ return -EINVAL;
+ }
+
if (sdebug_guard > 1) {
pr_err("guard must be 0 or 1\n");
return -EINVAL;
@@ -5291,6 +5296,12 @@ static int __init scsi_debug_init(void)
pr_err("submit_queues must be 1 or more\n");
return -EINVAL;
}
+
+ if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
+ pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
+ return -EINVAL;
+ }
+
sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
GFP_KERNEL);
if (sdebug_q_arr == NULL)
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index a08ff3bd6310..d16d8b6aaccc 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -239,6 +239,7 @@ static struct {
{"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"FUJITSU", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
{"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
{"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 42f0550d6b11..6f41e4b5a2b8 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -63,6 +63,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
{"LSI", "INF-01-00", "rdac", },
{"ENGENIO", "INF-01-00", "rdac", },
{"LENOVO", "DE_Series", "rdac", },
+ {"FUJITSU", "ETERNUS_AHB", "rdac", },
{NULL, NULL, NULL },
};
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 506062081d8e..65646497295f 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -539,7 +539,7 @@ static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
}
}
-static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
+static void scsi_free_sgtables(struct scsi_cmnd *cmd)
{
if (cmd->sdb.table.nents)
sg_free_table_chained(&cmd->sdb.table, true);
@@ -549,11 +549,20 @@ static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
{
- scsi_mq_free_sgtables(cmd);
+ scsi_free_sgtables(cmd);
scsi_uninit_cmd(cmd);
scsi_del_cmd_from_list(cmd);
}
+static void scsi_run_queue_async(struct scsi_device *sdev)
+{
+ if (scsi_target(sdev)->single_lun ||
+ !list_empty(&sdev->host->starved_list))
+ kblockd_schedule_work(&sdev->requeue_work);
+ else
+ blk_mq_run_hw_queues(sdev->request_queue, true);
+}
+
/* Returns false when no more bytes to process, true if there are more */
static bool scsi_end_request(struct request *req, blk_status_t error,
unsigned int bytes)
@@ -598,11 +607,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
__blk_mq_end_request(req, error);
- if (scsi_target(sdev)->single_lun ||
- !list_empty(&sdev->host->starved_list))
- kblockd_schedule_work(&sdev->requeue_work);
- else
- blk_mq_run_hw_queues(q, true);
+ scsi_run_queue_async(sdev);
percpu_ref_put(&q->q_usage_counter);
return false;
@@ -1047,7 +1052,7 @@ blk_status_t scsi_init_io(struct scsi_cmnd *cmd)
return BLK_STS_OK;
out_free_sgtables:
- scsi_mq_free_sgtables(cmd);
+ scsi_free_sgtables(cmd);
return ret;
}
EXPORT_SYMBOL(scsi_init_io);
@@ -1198,6 +1203,7 @@ static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev,
struct request *req)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+ blk_status_t ret;
if (!blk_rq_bytes(req))
cmd->sc_data_direction = DMA_NONE;
@@ -1207,9 +1213,14 @@ static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev,
cmd->sc_data_direction = DMA_FROM_DEVICE;
if (blk_rq_is_scsi(req))
- return scsi_setup_scsi_cmnd(sdev, req);
+ ret = scsi_setup_scsi_cmnd(sdev, req);
else
- return scsi_setup_fs_cmnd(sdev, req);
+ ret = scsi_setup_fs_cmnd(sdev, req);
+
+ if (ret != BLK_STS_OK)
+ scsi_free_sgtables(cmd);
+
+ return ret;
}
static blk_status_t
@@ -1706,6 +1717,7 @@ out_put_budget:
*/
if (req->rq_flags & RQF_DONTPREP)
scsi_mq_uninit_cmd(cmd);
+ scsi_run_queue_async(sdev);
break;
}
return ret;
@@ -1833,7 +1845,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
{
unsigned int cmd_size, sgl_size;
- sgl_size = scsi_mq_sgl_size(shost);
+ sgl_size = max_t(unsigned int, sizeof(struct scatterlist),
+ scsi_mq_sgl_size(shost));
cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
if (scsi_host_get_prot(shost))
cmd_size += sizeof(struct scsi_data_buffer) + sgl_size;
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 48ee68059fe6..453f320e85ec 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -80,6 +80,10 @@ static int scsi_dev_type_resume(struct device *dev,
dev_dbg(dev, "scsi resume: %d\n", err);
if (err == 0) {
+ bool was_runtime_suspended;
+
+ was_runtime_suspended = pm_runtime_suspended(dev);
+
pm_runtime_disable(dev);
err = pm_runtime_set_active(dev);
pm_runtime_enable(dev);
@@ -93,8 +97,9 @@ static int scsi_dev_type_resume(struct device *dev,
*/
if (!err && scsi_is_sdev_device(dev)) {
struct scsi_device *sdev = to_scsi_device(dev);
-
- if (sdev->request_queue->dev)
+ if (was_runtime_suspended)
+ blk_post_runtime_resume(sdev->request_queue, 0);
+ else
blk_set_runtime_active(sdev->request_queue);
}
}
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
index 0f17e7dac1b0..ac35c301c792 100644
--- a/drivers/scsi/scsi_trace.c
+++ b/drivers/scsi/scsi_trace.c
@@ -9,7 +9,7 @@
#include <trace/events/scsi.h>
#define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f)
-#define SERVICE_ACTION32(cdb) ((cdb[8] << 8) | cdb[9])
+#define SERVICE_ACTION32(cdb) (get_unaligned_be16(&cdb[8]))
static const char *
scsi_trace_misc(struct trace_seq *, unsigned char *, int);
@@ -18,15 +18,18 @@ static const char *
scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- sector_t lba = 0, txlen = 0;
+ u32 lba = 0, txlen;
lba |= ((cdb[1] & 0x1F) << 16);
lba |= (cdb[2] << 8);
lba |= cdb[3];
- txlen = cdb[4];
+ /*
+ * From SBC-2: a TRANSFER LENGTH field set to zero specifies that 256
+ * logical blocks shall be read (READ(6)) or written (WRITE(6)).
+ */
+ txlen = cdb[4] ? cdb[4] : 256;
- trace_seq_printf(p, "lba=%llu txlen=%llu",
- (unsigned long long)lba, (unsigned long long)txlen);
+ trace_seq_printf(p, "lba=%u txlen=%u", lba, txlen);
trace_seq_putc(p, 0);
return ret;
@@ -36,17 +39,12 @@ static const char *
scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- sector_t lba = 0, txlen = 0;
+ u32 lba, txlen;
- lba |= (cdb[2] << 24);
- lba |= (cdb[3] << 16);
- lba |= (cdb[4] << 8);
- lba |= cdb[5];
- txlen |= (cdb[7] << 8);
- txlen |= cdb[8];
+ lba = get_unaligned_be32(&cdb[2]);
+ txlen = get_unaligned_be16(&cdb[7]);
- trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
- (unsigned long long)lba, (unsigned long long)txlen,
+ trace_seq_printf(p, "lba=%u txlen=%u protect=%u", lba, txlen,
cdb[1] >> 5);
if (cdb[0] == WRITE_SAME)
@@ -61,19 +59,12 @@ static const char *
scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- sector_t lba = 0, txlen = 0;
-
- lba |= (cdb[2] << 24);
- lba |= (cdb[3] << 16);
- lba |= (cdb[4] << 8);
- lba |= cdb[5];
- txlen |= (cdb[6] << 24);
- txlen |= (cdb[7] << 16);
- txlen |= (cdb[8] << 8);
- txlen |= cdb[9];
-
- trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
- (unsigned long long)lba, (unsigned long long)txlen,
+ u32 lba, txlen;
+
+ lba = get_unaligned_be32(&cdb[2]);
+ txlen = get_unaligned_be32(&cdb[6]);
+
+ trace_seq_printf(p, "lba=%u txlen=%u protect=%u", lba, txlen,
cdb[1] >> 5);
trace_seq_putc(p, 0);
@@ -84,23 +75,13 @@ static const char *
scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- sector_t lba = 0, txlen = 0;
-
- lba |= ((u64)cdb[2] << 56);
- lba |= ((u64)cdb[3] << 48);
- lba |= ((u64)cdb[4] << 40);
- lba |= ((u64)cdb[5] << 32);
- lba |= (cdb[6] << 24);
- lba |= (cdb[7] << 16);
- lba |= (cdb[8] << 8);
- lba |= cdb[9];
- txlen |= (cdb[10] << 24);
- txlen |= (cdb[11] << 16);
- txlen |= (cdb[12] << 8);
- txlen |= cdb[13];
-
- trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
- (unsigned long long)lba, (unsigned long long)txlen,
+ u64 lba;
+ u32 txlen;
+
+ lba = get_unaligned_be64(&cdb[2]);
+ txlen = get_unaligned_be32(&cdb[10]);
+
+ trace_seq_printf(p, "lba=%llu txlen=%u protect=%u", lba, txlen,
cdb[1] >> 5);
if (cdb[0] == WRITE_SAME_16)
@@ -115,8 +96,8 @@ static const char *
scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p), *cmd;
- sector_t lba = 0, txlen = 0;
- u32 ei_lbrt = 0;
+ u64 lba;
+ u32 ei_lbrt, txlen;
switch (SERVICE_ACTION32(cdb)) {
case READ_32:
@@ -136,26 +117,12 @@ scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len)
goto out;
}
- lba |= ((u64)cdb[12] << 56);
- lba |= ((u64)cdb[13] << 48);
- lba |= ((u64)cdb[14] << 40);
- lba |= ((u64)cdb[15] << 32);
- lba |= (cdb[16] << 24);
- lba |= (cdb[17] << 16);
- lba |= (cdb[18] << 8);
- lba |= cdb[19];
- ei_lbrt |= (cdb[20] << 24);
- ei_lbrt |= (cdb[21] << 16);
- ei_lbrt |= (cdb[22] << 8);
- ei_lbrt |= cdb[23];
- txlen |= (cdb[28] << 24);
- txlen |= (cdb[29] << 16);
- txlen |= (cdb[30] << 8);
- txlen |= cdb[31];
-
- trace_seq_printf(p, "%s_32 lba=%llu txlen=%llu protect=%u ei_lbrt=%u",
- cmd, (unsigned long long)lba,
- (unsigned long long)txlen, cdb[10] >> 5, ei_lbrt);
+ lba = get_unaligned_be64(&cdb[12]);
+ ei_lbrt = get_unaligned_be32(&cdb[20]);
+ txlen = get_unaligned_be32(&cdb[28]);
+
+ trace_seq_printf(p, "%s_32 lba=%llu txlen=%u protect=%u ei_lbrt=%u",
+ cmd, lba, txlen, cdb[10] >> 5, ei_lbrt);
if (SERVICE_ACTION32(cdb) == WRITE_SAME_32)
trace_seq_printf(p, " unmap=%u", cdb[10] >> 3 & 1);
@@ -170,7 +137,7 @@ static const char *
scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- unsigned int regions = cdb[7] << 8 | cdb[8];
+ unsigned int regions = get_unaligned_be16(&cdb[7]);
trace_seq_printf(p, "regions=%u", (regions - 8) / 16);
trace_seq_putc(p, 0);
@@ -182,8 +149,8 @@ static const char *
scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p), *cmd;
- sector_t lba = 0;
- u32 alloc_len = 0;
+ u64 lba;
+ u32 alloc_len;
switch (SERVICE_ACTION16(cdb)) {
case SAI_READ_CAPACITY_16:
@@ -197,21 +164,10 @@ scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len)
goto out;
}
- lba |= ((u64)cdb[2] << 56);
- lba |= ((u64)cdb[3] << 48);
- lba |= ((u64)cdb[4] << 40);
- lba |= ((u64)cdb[5] << 32);
- lba |= (cdb[6] << 24);
- lba |= (cdb[7] << 16);
- lba |= (cdb[8] << 8);
- lba |= cdb[9];
- alloc_len |= (cdb[10] << 24);
- alloc_len |= (cdb[11] << 16);
- alloc_len |= (cdb[12] << 8);
- alloc_len |= cdb[13];
-
- trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd,
- (unsigned long long)lba, alloc_len);
+ lba = get_unaligned_be64(&cdb[2]);
+ alloc_len = get_unaligned_be32(&cdb[10]);
+
+ trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd, lba, alloc_len);
out:
trace_seq_putc(p, 0);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 417b868d8735..a5c78b38d302 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -24,6 +24,8 @@
#define ISCSI_TRANSPORT_VERSION "2.0-870"
+#define ISCSI_SEND_MAX_ALLOWED 10
+
#define CREATE_TRACE_POINTS
#include <trace/events/iscsi.h>
@@ -2010,7 +2012,7 @@ static void __iscsi_unbind_session(struct work_struct *work)
if (session->target_id == ISCSI_MAX_TARGET) {
spin_unlock_irqrestore(&session->lock, flags);
mutex_unlock(&ihost->mutex);
- return;
+ goto unbind_session_exit;
}
target_id = session->target_id;
@@ -2022,6 +2024,8 @@ static void __iscsi_unbind_session(struct work_struct *work)
ida_simple_remove(&iscsi_sess_ida, target_id);
scsi_remove_target(&session->dev);
+
+unbind_session_exit:
iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n");
}
@@ -2945,6 +2949,24 @@ iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev)
return err;
}
+static int iscsi_session_has_conns(int sid)
+{
+ struct iscsi_cls_conn *conn;
+ unsigned long flags;
+ int found = 0;
+
+ spin_lock_irqsave(&connlock, flags);
+ list_for_each_entry(conn, &connlist, conn_list) {
+ if (iscsi_conn_get_sid(conn) == sid) {
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&connlock, flags);
+
+ return found;
+}
+
static int
iscsi_set_iface_params(struct iscsi_transport *transport,
struct iscsi_uevent *ev, uint32_t len)
@@ -3522,10 +3544,12 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
break;
case ISCSI_UEVENT_DESTROY_SESSION:
session = iscsi_session_lookup(ev->u.d_session.sid);
- if (session)
- transport->destroy_session(session);
- else
+ if (!session)
err = -EINVAL;
+ else if (iscsi_session_has_conns(ev->u.d_session.sid))
+ err = -EBUSY;
+ else
+ transport->destroy_session(session);
break;
case ISCSI_UEVENT_UNBIND_SESSION:
session = iscsi_session_lookup(ev->u.d_session.sid);
@@ -3682,6 +3706,7 @@ iscsi_if_rx(struct sk_buff *skb)
struct nlmsghdr *nlh;
struct iscsi_uevent *ev;
uint32_t group;
+ int retries = ISCSI_SEND_MAX_ALLOWED;
nlh = nlmsg_hdr(skb);
if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) ||
@@ -3712,6 +3737,10 @@ iscsi_if_rx(struct sk_buff *skb)
break;
err = iscsi_if_send_reply(portid, nlh->nlmsg_type,
ev, sizeof(*ev));
+ if (err == -EAGAIN && --retries < 0) {
+ printk(KERN_WARNING "Send reply failed, error %d\n", err);
+ break;
+ }
} while (err < 0 && err != -ECONNREFUSED && err != -ESRCH);
skb_pull(skb, rlen);
}
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index ef138c57e2a6..182fd25c7c43 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -1391,9 +1391,6 @@ static void sas_expander_release(struct device *dev)
struct sas_rphy *rphy = dev_to_rphy(dev);
struct sas_expander_device *edev = rphy_to_expander_device(rphy);
- if (rphy->q)
- blk_cleanup_queue(rphy->q);
-
put_device(dev->parent);
kfree(edev);
}
@@ -1403,9 +1400,6 @@ static void sas_end_device_release(struct device *dev)
struct sas_rphy *rphy = dev_to_rphy(dev);
struct sas_end_device *edev = rphy_to_end_device(rphy);
- if (rphy->q)
- blk_cleanup_queue(rphy->q);
-
put_device(dev->parent);
kfree(edev);
}
@@ -1634,8 +1628,7 @@ sas_rphy_remove(struct sas_rphy *rphy)
}
sas_rphy_unlink(rphy);
- if (rphy->q)
- bsg_unregister_queue(rphy->q);
+ bsg_remove_queue(rphy->q);
transport_remove_device(dev);
device_del(dev);
}
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index f8661062ef95..f3d5b1bbd5aa 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -339,7 +339,7 @@ store_spi_transport_##field(struct device *dev, \
struct spi_transport_attrs *tp \
= (struct spi_transport_attrs *)&starget->starget_data; \
\
- if (i->f->set_##field) \
+ if (!i->f->set_##field) \
return -EINVAL; \
val = simple_strtoul(buf, NULL, 0); \
if (val > tp->max_##field) \
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 19b53ce14822..d84dfa286d0a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1165,11 +1165,12 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
sector_t threshold;
unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
- bool dif, dix;
unsigned int mask = logical_to_sectors(sdp, 1) - 1;
bool write = rq_data_dir(rq) == WRITE;
unsigned char protect, fua;
blk_status_t ret;
+ unsigned int dif;
+ bool dix;
ret = scsi_init_io(cmd);
if (ret != BLK_STS_OK)
@@ -1693,20 +1694,30 @@ static void sd_rescan(struct device *dev)
static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
+ struct gendisk *disk = bdev->bd_disk;
+ struct scsi_disk *sdkp = scsi_disk(disk);
+ struct scsi_device *sdev = sdkp->device;
+ void __user *p = compat_ptr(arg);
int error;
+ error = scsi_verify_blk_ioctl(bdev, cmd);
+ if (error < 0)
+ return error;
+
error = scsi_ioctl_block_when_processing_errors(sdev, cmd,
(mode & FMODE_NDELAY) != 0);
if (error)
return error;
+
+ if (is_sed_ioctl(cmd))
+ return sed_ioctl(sdkp->opal_dev, cmd, p);
/*
* Let the static ioctl translation table take care of it.
*/
if (!sdev->host->hostt->compat_ioctl)
return -ENOIOCTLCMD;
- return sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg);
+ return sdev->host->hostt->compat_ioctl(sdev, cmd, p);
}
#endif
@@ -2194,8 +2205,10 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer
u8 type;
int ret = 0;
- if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0)
+ if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) {
+ sdkp->protection_type = 0;
return ret;
+ }
type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
@@ -3161,9 +3174,11 @@ static int sd_revalidate_disk(struct gendisk *disk)
if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
- } else
+ } else {
+ q->limits.io_opt = 0;
rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
(sector_t)BLK_DEF_MAX_SECTORS);
+ }
/* Do not exceed controller limit */
rw_max = min(rw_max, queue_max_hw_sectors(q));
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index cce757506383..9c6bf13daaee 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -689,8 +689,10 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
hp->flags = input_size; /* structure abuse ... */
hp->pack_id = old_hdr.pack_id;
hp->usr_ptr = NULL;
- if (__copy_from_user(cmnd, buf, cmd_size))
+ if (__copy_from_user(cmnd, buf, cmd_size)) {
+ sg_remove_request(sfp, srp);
return -EFAULT;
+ }
/*
* SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
* but is is possible that the app intended SG_DXFER_TO_DEV, because there
@@ -803,8 +805,10 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
"sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
(int) cmnd[0], (int) hp->cmd_len));
- if (hp->dxfer_len >= SZ_256M)
+ if (hp->dxfer_len >= SZ_256M) {
+ sg_remove_request(sfp, srp);
return -EINVAL;
+ }
k = sg_start_req(srp, cmnd);
if (k) {
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index e8e768849c70..2cc9c858beee 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -906,7 +906,6 @@ struct pqi_scsi_dev {
u8 scsi3addr[8];
__be64 wwid;
u8 volume_id[16];
- u8 unique_id[16];
u8 is_physical_device : 1;
u8 is_external_raid_device : 1;
u8 is_expander_smp_device : 1;
@@ -1123,8 +1122,9 @@ struct pqi_ctrl_info {
struct mutex ofa_mutex; /* serialize ofa */
bool controller_online;
bool block_requests;
- bool in_shutdown;
+ bool block_device_reset;
bool in_ofa;
+ bool in_shutdown;
u8 inbound_spanning_supported : 1;
u8 outbound_spanning_supported : 1;
u8 pqi_mode_enabled : 1;
@@ -1166,6 +1166,7 @@ struct pqi_ctrl_info {
struct pqi_ofa_memory *pqi_ofa_mem_virt_addr;
dma_addr_t pqi_ofa_mem_dma_handle;
void **pqi_ofa_chunk_virt_addr;
+ atomic_t sync_cmds_outstanding;
};
enum pqi_ctrl_mode {
@@ -1407,6 +1408,11 @@ static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
return ctrl_info->block_requests;
}
+static inline bool pqi_device_reset_blocked(struct pqi_ctrl_info *ctrl_info)
+{
+ return ctrl_info->block_device_reset;
+}
+
void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
struct sas_rphy *rphy);
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 8fd5ffc55792..55cc7ede3f6e 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -237,6 +237,11 @@ static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
scsi_unblock_requests(ctrl_info->scsi_host);
}
+static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
+{
+ ctrl_info->block_device_reset = true;
+}
+
static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
unsigned long timeout_msecs)
{
@@ -319,6 +324,16 @@ static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
return device->in_remove && !ctrl_info->in_shutdown;
}
+static inline void pqi_ctrl_shutdown_start(struct pqi_ctrl_info *ctrl_info)
+{
+ ctrl_info->in_shutdown = true;
+}
+
+static inline bool pqi_ctrl_in_shutdown(struct pqi_ctrl_info *ctrl_info)
+{
+ return ctrl_info->in_shutdown;
+}
+
static inline void pqi_schedule_rescan_worker_with_delay(
struct pqi_ctrl_info *ctrl_info, unsigned long delay)
{
@@ -348,6 +363,11 @@ static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
cancel_delayed_work_sync(&ctrl_info->rescan_work);
}
+static inline void pqi_cancel_event_worker(struct pqi_ctrl_info *ctrl_info)
+{
+ cancel_work_sync(&ctrl_info->event_work);
+}
+
static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
{
if (!ctrl_info->heartbeat_counter)
@@ -607,79 +627,6 @@ static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT);
}
-static bool pqi_vpd_page_supported(struct pqi_ctrl_info *ctrl_info,
- u8 *scsi3addr, u16 vpd_page)
-{
- int rc;
- int i;
- int pages;
- unsigned char *buf, bufsize;
-
- buf = kzalloc(256, GFP_KERNEL);
- if (!buf)
- return false;
-
- /* Get the size of the page list first */
- rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
- VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
- buf, SCSI_VPD_HEADER_SZ);
- if (rc != 0)
- goto exit_unsupported;
-
- pages = buf[3];
- if ((pages + SCSI_VPD_HEADER_SZ) <= 255)
- bufsize = pages + SCSI_VPD_HEADER_SZ;
- else
- bufsize = 255;
-
- /* Get the whole VPD page list */
- rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
- VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
- buf, bufsize);
- if (rc != 0)
- goto exit_unsupported;
-
- pages = buf[3];
- for (i = 1; i <= pages; i++)
- if (buf[3 + i] == vpd_page)
- goto exit_supported;
-
-exit_unsupported:
- kfree(buf);
- return false;
-
-exit_supported:
- kfree(buf);
- return true;
-}
-
-static int pqi_get_device_id(struct pqi_ctrl_info *ctrl_info,
- u8 *scsi3addr, u8 *device_id, int buflen)
-{
- int rc;
- unsigned char *buf;
-
- if (!pqi_vpd_page_supported(ctrl_info, scsi3addr, SCSI_VPD_DEVICE_ID))
- return 1; /* function not supported */
-
- buf = kzalloc(64, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
- VPD_PAGE | SCSI_VPD_DEVICE_ID,
- buf, 64);
- if (rc == 0) {
- if (buflen > 16)
- buflen = 16;
- memcpy(device_id, &buf[SCSI_VPD_DEVICE_ID_IDX], buflen);
- }
-
- kfree(buf);
-
- return rc;
-}
-
static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device,
struct bmic_identify_physical_device *buffer,
@@ -1364,14 +1311,6 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
}
}
- if (pqi_get_device_id(ctrl_info, device->scsi3addr,
- device->unique_id, sizeof(device->unique_id)) < 0)
- dev_warn(&ctrl_info->pci_dev->dev,
- "Can't get device id for scsi %d:%d:%d:%d\n",
- ctrl_info->scsi_host->host_no,
- device->bus, device->target,
- device->lun);
-
out:
kfree(buffer);
@@ -4068,6 +4007,8 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
goto out;
}
+ atomic_inc(&ctrl_info->sync_cmds_outstanding);
+
io_request = pqi_alloc_io_request(ctrl_info);
put_unaligned_le16(io_request->index,
@@ -4114,6 +4055,7 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
pqi_free_io_request(io_request);
+ atomic_dec(&ctrl_info->sync_cmds_outstanding);
out:
up(&ctrl_info->sync_request_sem);
@@ -5348,7 +5290,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
pqi_ctrl_busy(ctrl_info);
if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) ||
- pqi_ctrl_in_ofa(ctrl_info)) {
+ pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) {
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
@@ -5596,6 +5538,18 @@ static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
return 0;
}
+static int pqi_ctrl_wait_for_pending_sync_cmds(struct pqi_ctrl_info *ctrl_info)
+{
+ while (atomic_read(&ctrl_info->sync_cmds_outstanding)) {
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENXIO;
+ usleep_range(1000, 2000);
+ }
+
+ return 0;
+}
+
static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
void *context)
{
@@ -5733,17 +5687,17 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
shost->host_no, device->bus, device->target, device->lun);
pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info)) {
- dev_err(&ctrl_info->pci_dev->dev,
- "controller %u offlined - cannot send device reset\n",
- ctrl_info->ctrl_id);
+ if (pqi_ctrl_offline(ctrl_info) ||
+ pqi_device_reset_blocked(ctrl_info)) {
rc = FAILED;
goto out;
}
pqi_wait_until_ofa_finished(ctrl_info);
+ atomic_inc(&ctrl_info->sync_cmds_outstanding);
rc = pqi_device_reset(ctrl_info, device);
+ atomic_dec(&ctrl_info->sync_cmds_outstanding);
out:
dev_err(&ctrl_info->pci_dev->dev,
@@ -6065,7 +6019,8 @@ static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
ctrl_info = shost_to_hba(sdev->host);
- if (pqi_ctrl_in_ofa(ctrl_info))
+ if (pqi_ctrl_in_ofa(ctrl_info) ||
+ pqi_ctrl_in_shutdown(ctrl_info))
return -EBUSY;
switch (cmd) {
@@ -6179,7 +6134,7 @@ static ssize_t pqi_unique_id_show(struct device *dev,
struct scsi_device *sdev;
struct pqi_scsi_dev *device;
unsigned long flags;
- unsigned char uid[16];
+ u8 unique_id[16];
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
@@ -6192,16 +6147,22 @@ static ssize_t pqi_unique_id_show(struct device *dev,
flags);
return -ENODEV;
}
- memcpy(uid, device->unique_id, sizeof(uid));
+
+ if (device->is_physical_device) {
+ memset(unique_id, 0, 8);
+ memcpy(unique_id + 8, &device->wwid, sizeof(device->wwid));
+ } else {
+ memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
+ }
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return snprintf(buffer, PAGE_SIZE,
"%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
- uid[0], uid[1], uid[2], uid[3],
- uid[4], uid[5], uid[6], uid[7],
- uid[8], uid[9], uid[10], uid[11],
- uid[12], uid[13], uid[14], uid[15]);
+ unique_id[0], unique_id[1], unique_id[2], unique_id[3],
+ unique_id[4], unique_id[5], unique_id[6], unique_id[7],
+ unique_id[8], unique_id[9], unique_id[10], unique_id[11],
+ unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
}
static ssize_t pqi_lunid_show(struct device *dev,
@@ -6939,13 +6900,20 @@ static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
return pqi_revert_to_sis_mode(ctrl_info);
}
+#define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000
+
static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
{
int rc;
- rc = pqi_force_sis_mode(ctrl_info);
- if (rc)
- return rc;
+ if (reset_devices) {
+ sis_soft_reset(ctrl_info);
+ msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
+ } else {
+ rc = pqi_force_sis_mode(ctrl_info);
+ if (rc)
+ return rc;
+ }
/*
* Wait until the controller is ready to start accepting SIS
@@ -7372,6 +7340,7 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
atomic_set(&ctrl_info->num_interrupts, 0);
+ atomic_set(&ctrl_info->sync_cmds_outstanding, 0);
INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
@@ -7645,8 +7614,6 @@ static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
0, NULL, NO_TIMEOUT);
}
-#define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000
-
static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info)
{
msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
@@ -7814,28 +7781,74 @@ static void pqi_pci_remove(struct pci_dev *pci_dev)
pqi_remove_ctrl(ctrl_info);
}
+static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned int i;
+ struct pqi_io_request *io_request;
+ struct scsi_cmnd *scmd;
+
+ for (i = 0; i < ctrl_info->max_io_slots; i++) {
+ io_request = &ctrl_info->io_request_pool[i];
+ if (atomic_read(&io_request->refcount) == 0)
+ continue;
+ scmd = io_request->scmd;
+ WARN_ON(scmd != NULL); /* IO command from SML */
+ WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
+ }
+}
+
static void pqi_shutdown(struct pci_dev *pci_dev)
{
int rc;
struct pqi_ctrl_info *ctrl_info;
ctrl_info = pci_get_drvdata(pci_dev);
- if (!ctrl_info)
- goto error;
+ if (!ctrl_info) {
+ dev_err(&pci_dev->dev,
+ "cache could not be flushed\n");
+ return;
+ }
+
+ pqi_disable_events(ctrl_info);
+ pqi_wait_until_ofa_finished(ctrl_info);
+ pqi_cancel_update_time_worker(ctrl_info);
+ pqi_cancel_rescan_worker(ctrl_info);
+ pqi_cancel_event_worker(ctrl_info);
+
+ pqi_ctrl_shutdown_start(ctrl_info);
+ pqi_ctrl_wait_until_quiesced(ctrl_info);
+
+ rc = pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
+ if (rc) {
+ dev_err(&pci_dev->dev,
+ "wait for pending I/O failed\n");
+ return;
+ }
+
+ pqi_ctrl_block_device_reset(ctrl_info);
+ pqi_wait_until_lun_reset_finished(ctrl_info);
/*
* Write all data in the controller's battery-backed cache to
* storage.
*/
rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
- pqi_free_interrupts(ctrl_info);
- pqi_reset(ctrl_info);
- if (rc == 0)
+ if (rc)
+ dev_err(&pci_dev->dev,
+ "unable to flush controller cache\n");
+
+ pqi_ctrl_block_requests(ctrl_info);
+
+ rc = pqi_ctrl_wait_for_pending_sync_cmds(ctrl_info);
+ if (rc) {
+ dev_err(&pci_dev->dev,
+ "wait for pending sync cmds failed\n");
return;
+ }
+
+ pqi_crash_if_pending_command(ctrl_info);
+ pqi_reset(ctrl_info);
-error:
- dev_warn(&pci_dev->dev,
- "unable to flush controller cache\n");
}
static void pqi_process_lockup_action_param(void)
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index 5cca1b9ef1f1..977655d622e4 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -45,9 +45,9 @@ static void pqi_free_sas_phy(struct pqi_sas_phy *pqi_sas_phy)
struct sas_phy *phy = pqi_sas_phy->phy;
sas_port_delete_phy(pqi_sas_phy->parent_port->port, phy);
- sas_phy_free(phy);
if (pqi_sas_phy->added_to_port)
list_del(&pqi_sas_phy->phy_list_entry);
+ sas_phy_delete(phy);
kfree(pqi_sas_phy);
}
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index aef4881d8e21..a85d52b5dc32 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -66,10 +66,8 @@ static int snirm710_probe(struct platform_device *dev)
base = res->start;
hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
- if (!hostdata) {
- dev_printk(KERN_ERR, dev, "Failed to allocate host data\n");
+ if (!hostdata)
return -ENOMEM;
- }
hostdata->dev = &dev->dev;
dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 4664fdf75c0f..70a28f6fb1d0 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -750,7 +750,7 @@ static int sr_probe(struct device *dev)
cd->cdi.disk = disk;
if (register_cdrom(&cd->cdi))
- goto fail_put;
+ goto fail_minor;
/*
* Initialize block layer runtime PM stuffs before the
@@ -768,6 +768,10 @@ static int sr_probe(struct device *dev)
return 0;
+fail_minor:
+ spin_lock(&sr_index_lock);
+ clear_bit(minor, sr_index_bits);
+ spin_unlock(&sr_index_lock);
fail_put:
put_disk(disk);
fail_free:
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 3d80ab67a626..b19c4855377f 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -499,7 +499,7 @@ static struct scsi_host_template sun3_scsi_template = {
.eh_host_reset_handler = sun3scsi_host_reset,
.can_queue = 16,
.this_id = 7,
- .sg_tablesize = SG_NONE,
+ .sg_tablesize = 1,
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
.cmd_size = NCR5380_CMD_SIZE,
@@ -521,7 +521,7 @@ static int __init sun3_scsi_probe(struct platform_device *pdev)
sun3_scsi_template.can_queue = setup_can_queue;
if (setup_cmd_per_lun > 0)
sun3_scsi_template.cmd_per_lun = setup_cmd_per_lun;
- if (setup_sg_tablesize >= 0)
+ if (setup_sg_tablesize > 0)
sun3_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0)
sun3_scsi_template.this_id = setup_hostid & 7;
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 0f6ff33ce52e..d4a8be5ffd52 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -13,6 +13,7 @@
#include "ufshcd.h"
#include "ufshcd-pltfrm.h"
+#include "ufs_quirks.h"
#include "unipro.h"
#include "ufs-mediatek.h"
@@ -286,6 +287,15 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
return 0;
}
+static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba,
+ struct ufs_dev_desc *card)
+{
+ if (card->wmanufacturerid == UFS_VENDOR_SAMSUNG)
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
+
+ return 0;
+}
+
/**
* struct ufs_hba_mtk_vops - UFS MTK specific variant operations
*
@@ -298,6 +308,7 @@ static struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
.setup_clocks = ufs_mtk_setup_clocks,
.link_startup_notify = ufs_mtk_link_startup_notify,
.pwr_change_notify = ufs_mtk_pwr_change_notify,
+ .apply_dev_quirks = ufs_mtk_apply_dev_quirks,
.suspend = ufs_mtk_suspend,
.resume = ufs_mtk_resume,
};
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index b4d1b5c22987..6aa8d2fb16a9 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -903,7 +903,8 @@ out:
return err;
}
-static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
+static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba,
+ struct ufs_dev_desc *card)
{
int err = 0;
@@ -1530,11 +1531,11 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
/* sleep a bit intermittently as we are dumping too much data */
ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
- usleep_range(1000, 1100);
+ udelay(1000);
ufs_qcom_testbus_read(hba);
- usleep_range(1000, 1100);
+ udelay(1000);
ufs_qcom_print_unipro_testbus(hba);
- usleep_range(1000, 1100);
+ udelay(1000);
}
/**
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index 8d9332bb7d0c..9ef211cc540e 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -126,13 +126,16 @@ static void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
return;
spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->ahit == ahit)
- goto out_unlock;
- hba->ahit = ahit;
- if (!pm_runtime_suspended(hba->dev))
- ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
-out_unlock:
+ if (hba->ahit != ahit)
+ hba->ahit = ahit;
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (!pm_runtime_suspended(hba->dev)) {
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_hold(hba, false);
+ ufshcd_auto_hibern8_enable(hba);
+ ufshcd_release(hba);
+ pm_runtime_put(hba->dev);
+ }
}
/* Convert Auto-Hibernate Idle Timer register value to microseconds */
diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c
index 869e71f861d6..5e1bbf87f482 100644
--- a/drivers/scsi/ufs/ufs_bsg.c
+++ b/drivers/scsi/ufs/ufs_bsg.c
@@ -98,14 +98,18 @@ static int ufs_bsg_request(struct bsg_job *job)
bsg_reply->reply_payload_rcv_len = 0;
+ pm_runtime_get_sync(hba->dev);
+
msgcode = bsg_request->msgcode;
switch (msgcode) {
case UPIU_TRANSACTION_QUERY_REQ:
desc_op = bsg_request->upiu_req.qr.opcode;
ret = ufs_bsg_alloc_desc_buffer(hba, job, &desc_buff,
&desc_len, desc_op);
- if (ret)
+ if (ret) {
+ pm_runtime_put_sync(hba->dev);
goto out;
+ }
/* fall through */
case UPIU_TRANSACTION_NOP_OUT:
@@ -135,6 +139,8 @@ static int ufs_bsg_request(struct bsg_job *job)
break;
}
+ pm_runtime_put_sync(hba->dev);
+
if (!desc_buff)
goto out;
@@ -196,7 +202,7 @@ int ufs_bsg_probe(struct ufs_hba *hba)
bsg_dev->parent = get_device(parent);
bsg_dev->release = ufs_bsg_node_release;
- dev_set_name(bsg_dev, "ufs-bsg");
+ dev_set_name(bsg_dev, "ufs-bsg%u", shost->host_no);
ret = device_add(bsg_dev);
if (ret)
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index d485eaa90731..aa7d02d8c440 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -492,8 +492,8 @@ static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
static void ufshcd_print_host_state(struct ufs_hba *hba)
{
dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
- dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
- hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks);
+ dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
+ hba->outstanding_reqs, hba->outstanding_tasks);
dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
hba->saved_err, hba->saved_uic_err);
dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
@@ -1268,6 +1268,24 @@ out:
return ret;
}
+static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
+{
+ int *busy = priv;
+
+ WARN_ON_ONCE(reserved);
+ (*busy)++;
+ return false;
+}
+
+/* Whether or not any tag is in use by a request that is in progress. */
+static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
+{
+ struct request_queue *q = hba->cmd_queue;
+ int busy = 0;
+
+ blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
+ return busy;
+}
static int ufshcd_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *stat)
@@ -1537,6 +1555,11 @@ start:
*/
if (ufshcd_can_hibern8_during_gating(hba) &&
ufshcd_is_link_hibern8(hba)) {
+ if (async) {
+ rc = -EAGAIN;
+ hba->clk_gating.active_reqs--;
+ break;
+ }
spin_unlock_irqrestore(hba->host->host_lock, flags);
flush_work(&hba->clk_gating.ungate_work);
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -1614,7 +1637,7 @@ static void ufshcd_gate_work(struct work_struct *work)
if (hba->clk_gating.active_reqs
|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
- || hba->lrb_in_use || hba->outstanding_tasks
+ || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
|| hba->active_uic_cmd || hba->uic_async_done)
goto rel_lock;
@@ -1668,7 +1691,7 @@ static void __ufshcd_release(struct ufs_hba *hba)
if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
- || hba->lrb_in_use || hba->outstanding_tasks
+ || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
|| hba->active_uic_cmd || hba->uic_async_done
|| ufshcd_eh_in_progress(hba))
return;
@@ -2438,22 +2461,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
hba->req_abort_count = 0;
- /* acquire the tag to make sure device cmds don't use it */
- if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
- /*
- * Dev manage command in progress, requeue the command.
- * Requeuing the command helps in cases where the request *may*
- * find different tag instead of waiting for dev manage command
- * completion.
- */
- err = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
-
err = ufshcd_hold(hba, true);
if (err) {
err = SCSI_MLQUEUE_HOST_BUSY;
- clear_bit_unlock(tag, &hba->lrb_in_use);
goto out;
}
WARN_ON(hba->clk_gating.state != CLKS_ON);
@@ -2474,7 +2484,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
err = ufshcd_map_sg(hba, lrbp);
if (err) {
lrbp->cmd = NULL;
- clear_bit_unlock(tag, &hba->lrb_in_use);
+ ufshcd_release(hba);
goto out;
}
/* Make sure descriptors are ready before ringing the doorbell */
@@ -2622,44 +2632,6 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
}
/**
- * ufshcd_get_dev_cmd_tag - Get device management command tag
- * @hba: per-adapter instance
- * @tag_out: pointer to variable with available slot value
- *
- * Get a free slot and lock it until device management command
- * completes.
- *
- * Returns false if free slot is unavailable for locking, else
- * return true with tag value in @tag.
- */
-static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
-{
- int tag;
- bool ret = false;
- unsigned long tmp;
-
- if (!tag_out)
- goto out;
-
- do {
- tmp = ~hba->lrb_in_use;
- tag = find_last_bit(&tmp, hba->nutrs);
- if (tag >= hba->nutrs)
- goto out;
- } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
-
- *tag_out = tag;
- ret = true;
-out:
- return ret;
-}
-
-static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
-{
- clear_bit_unlock(tag, &hba->lrb_in_use);
-}
-
-/**
* ufshcd_exec_dev_cmd - API for sending device management requests
* @hba: UFS hba
* @cmd_type: specifies the type (NOP, Query...)
@@ -2671,6 +2643,8 @@ static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type, int timeout)
{
+ struct request_queue *q = hba->cmd_queue;
+ struct request *req;
struct ufshcd_lrb *lrbp;
int err;
int tag;
@@ -2684,7 +2658,11 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
* Even though we use wait_event() which sleeps indefinitely,
* the maximum wait time is bounded by SCSI request timeout.
*/
- wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
+ req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ tag = req->tag;
+ WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
init_completion(&wait);
lrbp = &hba->lrb[tag];
@@ -2709,8 +2687,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
err ? "query_complete_err" : "query_complete");
out_put_tag:
- ufshcd_put_dev_cmd_tag(hba, tag);
- wake_up(&hba->dev_cmd.tag_wq);
+ blk_put_request(req);
up_read(&hba->clk_scaling_lock);
return err;
}
@@ -2984,10 +2961,10 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
goto out_unlock;
}
- hba->dev_cmd.query.descriptor = NULL;
*buf_len = be16_to_cpu(response->upiu_res.length);
out_unlock:
+ hba->dev_cmd.query.descriptor = NULL;
mutex_unlock(&hba->dev_cmd.lock);
out:
ufshcd_release(hba);
@@ -3848,15 +3825,24 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
if (ret) {
+ int err;
+
dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
__func__, ret);
/*
- * If link recovery fails then return error so that caller
- * don't retry the hibern8 enter again.
+ * If link recovery fails then return error code returned from
+ * ufshcd_link_recovery().
+ * If link recovery succeeds then return -EAGAIN to attempt
+ * hibern8 enter retry again.
*/
- if (ufshcd_link_recovery(hba))
- ret = -ENOLINK;
+ err = ufshcd_link_recovery(hba);
+ if (err) {
+ dev_err(hba->dev, "%s: link recovery failed", __func__);
+ ret = err;
+ } else {
+ ret = -EAGAIN;
+ }
} else
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
POST_CHANGE);
@@ -3870,7 +3856,7 @@ static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
ret = __ufshcd_uic_hibern8_enter(hba);
- if (!ret || ret == -ENOLINK)
+ if (!ret)
goto out;
}
out:
@@ -3904,7 +3890,7 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
return ret;
}
-static void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
+void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
{
unsigned long flags;
@@ -4734,7 +4720,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
break;
} /* end of switch */
- if (host_byte(result) != DID_OK)
+ if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
return result;
}
@@ -4781,7 +4767,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
cmd->result = result;
/* Mark completed command as NULL in LRB */
lrbp->cmd = NULL;
- clear_bit_unlock(index, &hba->lrb_in_use);
/* Do not touch lrbp after scsi done */
cmd->scsi_done(cmd);
__ufshcd_release(hba);
@@ -4803,9 +4788,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
hba->outstanding_reqs ^= completed_reqs;
ufshcd_clk_scaling_update_busy(hba);
-
- /* we might have free'd some tags above */
- wake_up(&hba->dev_cmd.tag_wq);
}
/**
@@ -4969,6 +4951,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
hba->auto_bkops_enabled = false;
trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
+ hba->is_urgent_bkops_lvl_checked = false;
out:
return err;
}
@@ -4993,6 +4976,7 @@ static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
ufshcd_disable_auto_bkops(hba);
}
+ hba->is_urgent_bkops_lvl_checked = false;
}
static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
@@ -5263,8 +5247,8 @@ static void ufshcd_err_handler(struct work_struct *work)
/*
* if host reset is required then skip clearing the pending
- * transfers forcefully because they will automatically get
- * cleared after link startup.
+ * transfers forcefully because they will get cleared during
+ * host reset and restore
*/
if (needs_reset)
goto skip_pending_xfer_clear;
@@ -5708,6 +5692,8 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
int cmd_type,
enum query_opcode desc_op)
{
+ struct request_queue *q = hba->cmd_queue;
+ struct request *req;
struct ufshcd_lrb *lrbp;
int err = 0;
int tag;
@@ -5717,7 +5703,11 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
down_read(&hba->clk_scaling_lock);
- wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
+ req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ tag = req->tag;
+ WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
init_completion(&wait);
lrbp = &hba->lrb[tag];
@@ -5791,8 +5781,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
}
}
- ufshcd_put_dev_cmd_tag(hba, tag);
- wake_up(&hba->dev_cmd.tag_wq);
+ blk_put_request(req);
up_read(&hba->clk_scaling_lock);
return err;
}
@@ -6085,9 +6074,6 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
hba->lrb[tag].cmd = NULL;
spin_unlock_irqrestore(host->host_lock, flags);
- clear_bit_unlock(tag, &hba->lrb_in_use);
- wake_up(&hba->dev_cmd.tag_wq);
-
out:
if (!err) {
err = SUCCESS;
@@ -6120,9 +6106,15 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
int err;
unsigned long flags;
- /* Reset the host controller */
+ /*
+ * Stop the host controller and complete the requests
+ * cleared by h/w
+ */
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_hba_stop(hba, false);
+ hba->silence_err_logs = true;
+ ufshcd_complete_requests(hba);
+ hba->silence_err_logs = false;
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* scale up clocks to max frequency before full reinitialization */
@@ -6156,22 +6148,15 @@ out:
static int ufshcd_reset_and_restore(struct ufs_hba *hba)
{
int err = 0;
- unsigned long flags;
int retries = MAX_HOST_RESET_RETRIES;
do {
+ /* Reset the attached device */
+ ufshcd_vops_device_reset(hba);
+
err = ufshcd_host_reset_and_restore(hba);
} while (err && --retries);
- /*
- * After reset the door-bell might be cleared, complete
- * outstanding requests in s/w here.
- */
- spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_transfer_req_compl(hba);
- ufshcd_tmc_handler(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
return err;
}
@@ -6633,7 +6618,8 @@ out:
return ret;
}
-static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
+static void ufshcd_tune_unipro_params(struct ufs_hba *hba,
+ struct ufs_dev_desc *card)
{
if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
ufshcd_tune_pa_tactivate(hba);
@@ -6647,7 +6633,7 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
ufshcd_quirk_tune_host_pa_tactivate(hba);
- ufshcd_vops_apply_dev_quirks(hba);
+ ufshcd_vops_apply_dev_quirks(hba, card);
}
static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
@@ -6699,23 +6685,13 @@ static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
&hba->desc_size.geom_desc);
if (err)
hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+
err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
&hba->desc_size.hlth_desc);
if (err)
hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
}
-static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
-{
- hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
- hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
- hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
- hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
- hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
- hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
- hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
-}
-
static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
{19200000, REF_CLK_FREQ_19_2_MHZ},
{26000000, REF_CLK_FREQ_26_MHZ},
@@ -6810,9 +6786,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
/* UniPro link is active now */
ufshcd_set_link_active(hba);
- /* Enable Auto-Hibernate if configured */
- ufshcd_auto_hibern8_enable(hba);
-
ret = ufshcd_verify_dev_init(hba);
if (ret)
goto out;
@@ -6832,7 +6805,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
}
ufs_fixup_device_setup(hba, &card);
- ufshcd_tune_unipro_params(hba);
+ ufshcd_tune_unipro_params(hba, &card);
/* UFS device is also active now */
ufshcd_set_ufs_dev_active(hba);
@@ -6861,6 +6834,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
/* set the state as operational after switching to desired gear */
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ /* Enable Auto-Hibernate if configured */
+ ufshcd_auto_hibern8_enable(hba);
+
/*
* If we are in error handling context or in power management callbacks
* context, no need to scan the host
@@ -6878,7 +6854,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
ufshcd_init_icc_levels(hba);
/* Add required well known logical units to scsi mid layer */
- if (ufshcd_scsi_add_wlus(hba))
+ ret = ufshcd_scsi_add_wlus(hba);
+ if (ret)
goto out;
/* Initialize devfreq after UFS device is detected */
@@ -7874,12 +7851,12 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (hba->clk_scaling.is_allowed)
ufshcd_resume_clkscaling(hba);
- /* Schedule clock gating in case of no access to UFS device yet */
- ufshcd_release(hba);
-
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
+ /* Schedule clock gating in case of no access to UFS device yet */
+ ufshcd_release(hba);
+
goto out;
set_old_link_state:
@@ -8095,6 +8072,7 @@ void ufshcd_remove(struct ufs_hba *hba)
{
ufs_bsg_remove(hba);
ufs_sysfs_remove_nodes(hba->dev);
+ blk_cleanup_queue(hba->cmd_queue);
scsi_remove_host(hba->host);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
@@ -8196,9 +8174,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->mmio_base = mmio_base;
hba->irq = irq;
- /* Set descriptor lengths to specification defaults */
- ufshcd_def_desc_sizes(hba);
-
err = ufshcd_hba_init(hba);
if (err)
goto out_error;
@@ -8261,9 +8236,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
init_rwsem(&hba->clk_scaling_lock);
- /* Initialize device management tag acquire wait queue */
- init_waitqueue_head(&hba->dev_cmd.tag_wq);
-
ufshcd_init_clk_gating(hba);
ufshcd_init_clk_scaling(hba);
@@ -8297,13 +8269,22 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
goto exit_gating;
}
+ hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
+ if (IS_ERR(hba->cmd_queue)) {
+ err = PTR_ERR(hba->cmd_queue);
+ goto out_remove_scsi_host;
+ }
+
+ /* Reset the attached device */
+ ufshcd_vops_device_reset(hba);
+
/* Host controller enable */
err = ufshcd_hba_enable(hba);
if (err) {
dev_err(hba->dev, "Host controller enable failed\n");
ufshcd_print_host_regs(hba);
ufshcd_print_host_state(hba);
- goto out_remove_scsi_host;
+ goto free_cmd_queue;
}
/*
@@ -8340,6 +8321,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
return 0;
+free_cmd_queue:
+ blk_cleanup_queue(hba->cmd_queue);
out_remove_scsi_host:
scsi_remove_host(hba->host);
exit_gating:
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index ecfa898b9ccc..32a0ea443406 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -212,13 +212,11 @@ struct ufs_query {
* @type: device management command type - Query, NOP OUT
* @lock: lock to allow one command at a time
* @complete: internal commands completion
- * @tag_wq: wait queue until free command slot is available
*/
struct ufs_dev_cmd {
enum dev_cmd_type type;
struct mutex lock;
struct completion *complete;
- wait_queue_head_t tag_wq;
struct ufs_query query;
};
@@ -298,6 +296,7 @@ struct ufs_pwr_mode_info {
* @resume: called during host controller PM callback
* @dbg_register_dump: used to dump controller debug information
* @phy_initialization: used to initialize phys
+ * @device_reset: called to issue a reset pulse on the UFS device
*/
struct ufs_hba_variant_ops {
const char *name;
@@ -321,11 +320,12 @@ struct ufs_hba_variant_ops {
void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
enum ufs_notify_change_status);
- int (*apply_dev_quirks)(struct ufs_hba *);
+ int (*apply_dev_quirks)(struct ufs_hba *, struct ufs_dev_desc *);
int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
int (*resume)(struct ufs_hba *, enum ufs_pm_op);
void (*dbg_register_dump)(struct ufs_hba *hba);
int (*phy_initialization)(struct ufs_hba *);
+ void (*device_reset)(struct ufs_hba *hba);
};
/* clock gating state */
@@ -459,7 +459,7 @@ struct ufs_stats {
* @host: Scsi_Host instance of the driver
* @dev: device handle
* @lrb: local reference block
- * @lrb_in_use: lrb in use
+ * @cmd_queue: Used to allocate command tags from hba->host->tag_set.
* @outstanding_tasks: Bits representing outstanding task requests
* @outstanding_reqs: Bits representing outstanding transfer requests
* @capabilities: UFS Controller Capabilities
@@ -489,6 +489,7 @@ struct ufs_stats {
* @uic_error: UFS interconnect layer error status
* @saved_err: sticky error mask
* @saved_uic_err: sticky UIC error mask
+ * @silence_err_logs: flag to silence error logs
* @dev_cmd: ufs device management command information
* @last_dme_cmd_tstamp: time stamp of the last completed DME command
* @auto_bkops_enabled: to track whether bkops is enabled in device
@@ -517,6 +518,7 @@ struct ufs_hba {
struct Scsi_Host *host;
struct device *dev;
+ struct request_queue *cmd_queue;
/*
* This field is to keep a reference to "scsi_device" corresponding to
* "UFS device" W-LU.
@@ -537,7 +539,6 @@ struct ufs_hba {
u32 ahit;
struct ufshcd_lrb *lrb;
- unsigned long lrb_in_use;
unsigned long outstanding_tasks;
unsigned long outstanding_reqs;
@@ -646,6 +647,7 @@ struct ufs_hba {
u32 saved_err;
u32 saved_uic_err;
struct ufs_stats ufs_stats;
+ bool silence_err_logs;
/* Device management request data */
struct ufs_dev_cmd dev_cmd;
@@ -886,6 +888,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, bool *flag_res);
+void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
u8 *buf, u32 size, bool ascii);
@@ -1011,10 +1014,11 @@ static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
return hba->vops->hibern8_notify(hba, cmd, status);
}
-static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
+static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba,
+ struct ufs_dev_desc *card)
{
if (hba->vops && hba->vops->apply_dev_quirks)
- return hba->vops->apply_dev_quirks(hba);
+ return hba->vops->apply_dev_quirks(hba, card);
return 0;
}
@@ -1040,6 +1044,12 @@ static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
hba->vops->dbg_register_dump(hba);
}
+static inline void ufshcd_vops_device_reset(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->device_reset)
+ hba->vops->device_reset(hba);
+}
+
extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
/*
diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c
index ca8e3abeb2c7..a23a8e5794f5 100644
--- a/drivers/scsi/zorro_esp.c
+++ b/drivers/scsi/zorro_esp.c
@@ -218,7 +218,14 @@ static int fastlane_esp_irq_pending(struct esp *esp)
static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
u32 dma_len)
{
- return dma_len > 0xFFFF ? 0xFFFF : dma_len;
+ return dma_len > (1U << 16) ? (1U << 16) : dma_len;
+}
+
+static u32 fastlane_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
+ u32 dma_len)
+{
+ /* The old driver used 0xfffc as limit, so do that here too */
+ return dma_len > 0xfffc ? 0xfffc : dma_len;
}
static void zorro_esp_reset_dma(struct esp *esp)
@@ -604,7 +611,7 @@ static const struct esp_driver_ops fastlane_esp_ops = {
.esp_write8 = zorro_esp_write8,
.esp_read8 = zorro_esp_read8,
.irq_pending = fastlane_esp_irq_pending,
- .dma_length_limit = zorro_esp_dma_length_limit,
+ .dma_length_limit = fastlane_esp_dma_length_limit,
.reset_dma = zorro_esp_reset_dma,
.dma_drain = zorro_esp_dma_drain,
.dma_invalidate = fastlane_esp_dma_invalidate,