summaryrefslogtreecommitdiff
path: root/drivers/scsi/qla2xxx
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-02-26 16:55:27 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2010-02-26 16:55:27 -0800
commit654451748b779b28077d9058442d0f354251870d (patch)
treeff889a2f6226e16b1121789f809927666a9ccf13 /drivers/scsi/qla2xxx
parent64d497f55379b1e320a08ec2426468d96f5642ec (diff)
parent77c9cfc51b0d732b2524799810fb30018074fd60 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (158 commits) [SCSI] Fix printing of failed 32-byte commands [SCSI] Fix printing of variable length commands [SCSI] libsrp: fix bug in ADDITIONAL CDB LENGTH interpretation [SCSI] scsi_dh_alua: Add IBM Power Virtual SCSI ALUA device to dev list [SCSI] scsi_dh_alua: add netapp to dev list [SCSI] qla2xxx: Update version number to 8.03.02-k1. [SCSI] qla2xxx: EEH: Restore PCI saved state during pci slot reset. [SCSI] qla2xxx: Add firmware ETS burst support. [SCSI] qla2xxx: Correct loop-resync issues during SNS scans. [SCSI] qla2xxx: Correct use-after-free issue in terminate_rport_io callback. [SCSI] qla2xxx: Correct EH bus-reset handling. [SCSI] qla2xxx: Proper clean-up of BSG requests when request times out. [SCSI] qla2xxx: Initialize payload receive length in failure path of vendor commands [SCSI] fix duplicate removal on error path in scsi_sysfs_add_sdev [SCSI] fix refcounting bug in scsi_get_host_dev [SCSI] fix memory leak in scsi_report_lun_scan [SCSI] lpfc: correct PPC build failure [SCSI] raid_class: add raid1e [SCSI] mpt2sas: Do not call sas_is_tlr_enabled for RAID volumes. [SCSI] zfcp: Introduce header file for qdio structs and inline functions ...
Diffstat (limited to 'drivers/scsi/qla2xxx')
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c732
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h155
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h33
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c32
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c120
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c110
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c151
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c135
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
10 files changed, 1396 insertions, 85 deletions
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 3a9f5b288ae..90d1e062ec4 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -11,7 +11,9 @@
#include <linux/delay.h>
static int qla24xx_vport_disable(struct fc_vport *, bool);
-
+static int qla84xx_reset(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
+int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t, uint16_t *);
+static int qla84xx_mgmt_cmd(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
/* SYSFS attributes --------------------------------------------------------- */
static ssize_t
@@ -1168,6 +1170,28 @@ qla2x00_total_isp_aborts_show(struct device *dev,
}
static ssize_t
+qla24xx_84xx_fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int rval = QLA_SUCCESS;
+ uint16_t status[2] = {0, 0};
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+
+ if (IS_QLA84XX(ha) && ha->cs84xx) {
+ if (ha->cs84xx->op_fw_version == 0) {
+ rval = qla84xx_verify_chip(vha, status);
+ }
+
+ if ((rval == QLA_SUCCESS) && (status[0] == 0))
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ (uint32_t)ha->cs84xx->op_fw_version);
+ }
+
+ return snprintf(buf, PAGE_SIZE, "\n");
+}
+
+static ssize_t
qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1281,6 +1305,8 @@ static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
qla2x00_optrom_fcode_version_show, NULL);
static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
NULL);
+static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
+ NULL);
static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
NULL);
static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
@@ -1310,6 +1336,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_optrom_efi_version,
&dev_attr_optrom_fcode_version,
&dev_attr_optrom_fw_version,
+ &dev_attr_84xx_fw_version,
&dev_attr_total_isp_aborts,
&dev_attr_mpi_version,
&dev_attr_phy_version,
@@ -1504,8 +1531,6 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
fcport->loop_id, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa);
-
- qla2x00_abort_fcport_cmds(fcport);
}
static int
@@ -1795,6 +1820,581 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
return 0;
}
+/* BSG support for ELS/CT pass through */
+inline srb_t *
+qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
+{
+ srb_t *sp;
+ struct qla_hw_data *ha = vha->hw;
+ struct srb_bsg_ctx *ctx;
+
+ sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
+ if (!sp)
+ goto done;
+ ctx = kzalloc(size, GFP_KERNEL);
+ if (!ctx) {
+ mempool_free(sp, ha->srb_mempool);
+ goto done;
+ }
+
+ memset(sp, 0, sizeof(*sp));
+ sp->fcport = fcport;
+ sp->ctx = ctx;
+done:
+ return sp;
+}
+
+static int
+qla2x00_process_els(struct fc_bsg_job *bsg_job)
+{
+ struct fc_rport *rport;
+ fc_port_t *fcport;
+ struct Scsi_Host *host;
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ srb_t *sp;
+ const char *type;
+ int req_sg_cnt, rsp_sg_cnt;
+ int rval = (DRIVER_ERROR << 16);
+ uint16_t nextlid = 0;
+ struct srb_bsg *els;
+
+ /* Multiple SG's are not supported for ELS requests */
+ if (bsg_job->request_payload.sg_cnt > 1 ||
+ bsg_job->reply_payload.sg_cnt > 1) {
+ DEBUG2(printk(KERN_INFO
+ "multiple SG's are not supported for ELS requests"
+ " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
+ bsg_job->request_payload.sg_cnt,
+ bsg_job->reply_payload.sg_cnt));
+ rval = -EPERM;
+ goto done;
+ }
+
+ /* ELS request for rport */
+ if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
+ rport = bsg_job->rport;
+ fcport = *(fc_port_t **) rport->dd_data;
+ host = rport_to_shost(rport);
+ vha = shost_priv(host);
+ ha = vha->hw;
+ type = "FC_BSG_RPT_ELS";
+
+ /* make sure the rport is logged in,
+ * if not perform fabric login
+ */
+ if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "failed to login port %06X for ELS passthru\n",
+ fcport->d_id.b24));
+ rval = -EIO;
+ goto done;
+ }
+ } else {
+ host = bsg_job->shost;
+ vha = shost_priv(host);
+ ha = vha->hw;
+ type = "FC_BSG_HST_ELS_NOLOGIN";
+
+ /* Allocate a dummy fcport structure, since functions
+ * preparing the IOCB and mailbox command retrieves port
+ * specific information from fcport structure. For Host based
+ * ELS commands there will be no fcport structure allocated
+ */
+ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+ if (!fcport) {
+ rval = -ENOMEM;
+ goto done;
+ }
+
+ /* Initialize all required fields of fcport */
+ fcport->vha = vha;
+ fcport->vp_idx = vha->vp_idx;
+ fcport->d_id.b.al_pa =
+ bsg_job->request->rqst_data.h_els.port_id[0];
+ fcport->d_id.b.area =
+ bsg_job->request->rqst_data.h_els.port_id[1];
+ fcport->d_id.b.domain =
+ bsg_job->request->rqst_data.h_els.port_id[2];
+ fcport->loop_id =
+ (fcport->d_id.b.al_pa == 0xFD) ?
+ NPH_FABRIC_CONTROLLER : NPH_F_PORT;
+ }
+
+ if (!vha->flags.online) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "host not online\n"));
+ rval = -EIO;
+ goto done;
+ }
+
+ req_sg_cnt =
+ dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (!req_sg_cnt) {
+ rval = -ENOMEM;
+ goto done_free_fcport;
+ }
+ rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ if (!rsp_sg_cnt) {
+ rval = -ENOMEM;
+ goto done_free_fcport;
+ }
+
+ if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
+ (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
+ {
+ DEBUG2(printk(KERN_INFO
+ "dma mapping resulted in different sg counts \
+ [request_sg_cnt: %x dma_request_sg_cnt: %x\
+ reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
+ bsg_job->request_payload.sg_cnt, req_sg_cnt,
+ bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+
+ /* Alloc SRB structure */
+ sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
+ if (!sp) {
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ els = sp->ctx;
+ els->ctx.type =
+ (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
+ SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
+ els->bsg_job = bsg_job;
+
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
+ "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
+ bsg_job->request->rqst_data.h_els.command_code,
+ fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa));
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ kfree(sp->ctx);
+ mempool_free(sp, ha->srb_mempool);
+ rval = -EIO;
+ goto done_unmap_sg;
+ }
+ return rval;
+
+done_unmap_sg:
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ goto done_free_fcport;
+
+done_free_fcport:
+ if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
+ kfree(fcport);
+done:
+ return rval;
+}
+
+static int
+qla2x00_process_ct(struct fc_bsg_job *bsg_job)
+{
+ srb_t *sp;
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval = (DRIVER_ERROR << 16);
+ int req_sg_cnt, rsp_sg_cnt;
+ uint16_t loop_id;
+ struct fc_port *fcport;
+ char *type = "FC_BSG_HST_CT";
+ struct srb_bsg *ct;
+
+ /* pass through is supported only for ISP 4Gb or higher */
+ if (!IS_FWI2_CAPABLE(ha)) {
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld):Firmware is not capable to support FC "
+ "CT pass thru\n", vha->host_no));
+ rval = -EPERM;
+ goto done;
+ }
+
+ req_sg_cnt =
+ dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (!req_sg_cnt) {
+ rval = -ENOMEM;
+ goto done;
+ }
+
+ rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ if (!rsp_sg_cnt) {
+ rval = -ENOMEM;
+ goto done;
+ }
+
+ if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
+ (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
+ {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "dma mapping resulted in different sg counts \
+ [request_sg_cnt: %x dma_request_sg_cnt: %x\
+ reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
+ bsg_job->request_payload.sg_cnt, req_sg_cnt,
+ bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+
+ if (!vha->flags.online) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "host not online\n"));
+ rval = -EIO;
+ goto done_unmap_sg;
+ }
+
+ loop_id =
+ (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
+ >> 24;
+ switch (loop_id) {
+ case 0xFC:
+ loop_id = cpu_to_le16(NPH_SNS);
+ break;
+ case 0xFA:
+ loop_id = vha->mgmt_svr_loop_id;
+ break;
+ default:
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "Unknown loop id: %x\n", loop_id));
+ rval = -EINVAL;
+ goto done_unmap_sg;
+ }
+
+ /* Allocate a dummy fcport structure, since functions preparing the
+ * IOCB and mailbox command retrieves port specific information
+ * from fcport structure. For Host based ELS commands there will be
+ * no fcport structure allocated
+ */
+ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+ if (!fcport)
+ {
+ rval = -ENOMEM;
+ goto done_unmap_sg;
+ }
+
+ /* Initialize all required fields of fcport */
+ fcport->vha = vha;
+ fcport->vp_idx = vha->vp_idx;
+ fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
+ fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
+ fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
+ fcport->loop_id = loop_id;
+
+ /* Alloc SRB structure */
+ sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
+ if (!sp) {
+ rval = -ENOMEM;
+ goto done_free_fcport;
+ }
+
+ ct = sp->ctx;
+ ct->ctx.type = SRB_CT_CMD;
+ ct->bsg_job = bsg_job;
+
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
+ "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
+ (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
+ fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa));
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ kfree(sp->ctx);
+ mempool_free(sp, ha->srb_mempool);
+ rval = -EIO;
+ goto done_free_fcport;
+ }
+ return rval;
+
+done_free_fcport:
+ kfree(fcport);
+done_unmap_sg:
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+done:
+ return rval;
+}
+
+static int
+qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ int rval;
+ uint8_t command_sent;
+ uint32_t vendor_cmd;
+ char *type;
+ struct msg_echo_lb elreq;
+ uint16_t response[MAILBOX_REGISTER_COUNT];
+ uint8_t* fw_sts_ptr;
+ uint8_t *req_data;
+ dma_addr_t req_data_dma;
+ uint32_t req_data_len;
+ uint8_t *rsp_data;
+ dma_addr_t rsp_data_dma;
+ uint32_t rsp_data_len;
+
+ if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+ rval = -EBUSY;
+ goto done;
+ }
+
+ if (!vha->flags.online) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "host not online\n"));
+ rval = -EIO;
+ goto done;
+ }
+
+ elreq.req_sg_cnt =
+ dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (!elreq.req_sg_cnt) {
+ rval = -ENOMEM;
+ goto done;
+ }
+ elreq.rsp_sg_cnt =
+ dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ if (!elreq.rsp_sg_cnt) {
+ rval = -ENOMEM;
+ goto done;
+ }
+
+ if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
+ (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
+ {
+ DEBUG2(printk(KERN_INFO
+ "dma mapping resulted in different sg counts \
+ [request_sg_cnt: %x dma_request_sg_cnt: %x\
+ reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
+ bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
+ bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
+ rval = -EAGAIN;
+ goto done_unmap_sg;
+ }
+ req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
+ req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
+ &req_data_dma, GFP_KERNEL);
+
+ rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
+ &rsp_data_dma, GFP_KERNEL);
+
+ /* Copy the request buffer in req_data now */
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, req_data,
+ req_data_len);
+
+ elreq.send_dma = req_data_dma;
+ elreq.rcv_dma = rsp_data_dma;
+ elreq.transfer_size = req_data_len;
+
+ /* Vendor cmd : loopback or ECHO diagnostic
+ * Options:
+ * Loopback : Either internal or external loopback
+ * ECHO: ECHO ELS or Vendor specific FC4 link data
+ */
+ vendor_cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd[0];
+ elreq.options =
+ *(((uint32_t *)bsg_job->request->rqst_data.h_vendor.vendor_cmd)
+ + 1);
+
+ switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
+ case QL_VND_LOOPBACK:
+ if (ha->current_topology != ISP_CFG_F) {
+ type = "FC_BSG_HST_VENDOR_LOOPBACK";
+
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
+ vha->host_no, type, vendor_cmd, elreq.options));
+
+ command_sent = INT_DEF_LB_LOOPBACK_CMD;
+ rval = qla2x00_loopback_test(vha, &elreq, response);
+ if (IS_QLA81XX(ha)) {
+ if (response[0] == MBS_COMMAND_ERROR && response[1] == MBS_LB_RESET) {
+ DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
+ "ISP\n", __func__, vha->host_no));
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ }
+ } else {
+ type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
+ vha->host_no, type, vendor_cmd, elreq.options));
+
+ command_sent = INT_DEF_LB_ECHO_CMD;
+ rval = qla2x00_echo_test(vha, &elreq, response);
+ }
+ break;
+ case QLA84_RESET:
+ if (!IS_QLA84XX(vha->hw)) {
+ rval = -EINVAL;
+ DEBUG16(printk(
+ "%s(%ld): 8xxx exiting.\n",
+ __func__, vha->host_no));
+ return rval;
+ }
+ rval = qla84xx_reset(vha, &elreq, bsg_job);
+ break;
+ case QLA84_MGMT_CMD:
+ if (!IS_QLA84XX(vha->hw)) {
+ rval = -EINVAL;
+ DEBUG16(printk(
+ "%s(%ld): 8xxx exiting.\n",
+ __func__, vha->host_no));
+ return rval;
+ }
+ rval = qla84xx_mgmt_cmd(vha, &elreq, bsg_job);
+ break;
+ default:
+ rval = -ENOSYS;
+ }
+
+ if (rval != QLA_SUCCESS) {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "scsi(%ld) Vendor request %s failed\n", vha->host_no, type));
+ rval = 0;
+ bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_job->reply->reply_payload_rcv_len = 0;
+ fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
+ memcpy( fw_sts_ptr, response, sizeof(response));
+ fw_sts_ptr += sizeof(response);
+ *fw_sts_ptr = command_sent;
+ } else {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "scsi(%ld) Vendor request %s completed\n", vha->host_no, type));
+ rval = bsg_job->reply->result = 0;
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(response) + sizeof(uint8_t);
+ bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
+ fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
+ memcpy(fw_sts_ptr, response, sizeof(response));
+ fw_sts_ptr += sizeof(response);
+ *fw_sts_ptr = command_sent;
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, rsp_data,
+ rsp_data_len);
+ }
+ bsg_job->job_done(bsg_job);
+
+done_unmap_sg:
+
+ if(req_data)
+ dma_free_coherent(&ha->pdev->dev, req_data_len,
+ req_data, req_data_dma);
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+
+done:
+ return rval;
+}
+
+static int
+qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
+{
+ int ret = -EINVAL;
+
+ switch (bsg_job->request->msgcode) {
+ case FC_BSG_RPT_ELS:
+ case FC_BSG_HST_ELS_NOLOGIN:
+ ret = qla2x00_process_els(bsg_job);
+ break;
+ case FC_BSG_HST_CT:
+ ret = qla2x00_process_ct(bsg_job);
+ break;
+ case FC_BSG_HST_VENDOR:
+ ret = qla2x00_process_vendor_specific(bsg_job);
+ break;
+ case FC_BSG_HST_ADD_RPORT:
+ case FC_BSG_HST_DEL_RPORT:
+ case FC_BSG_RPT_CT:
+ default:
+ DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
+ break;
+ }
+ return ret;
+}
+
+static int
+qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
+{
+ scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
+ struct qla_hw_data *ha = vha->hw;
+ srb_t *sp;
+ int cnt, que;
+ unsigned long flags;
+ struct req_que *req;
+ struct srb_bsg *sp_bsg;
+
+ /* find the bsg job from the active list of commands */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (que = 0; que < ha->max_req_queues; que++) {
+ req = ha->req_q_map[que];
+ if (!req)
+ continue;
+
+ for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++ ) {
+ sp = req->outstanding_cmds[cnt];
+
+ if (sp) {
+ sp_bsg = (struct srb_bsg*)sp->ctx;
+
+ if (((sp_bsg->ctx.type == SRB_CT_CMD) ||
+ (sp_bsg->ctx.type == SRB_ELS_CMD_RPT)
+ || ( sp_bsg->ctx.type == SRB_ELS_CMD_HST)) &&
+ (sp_bsg->bsg_job == bsg_job)) {
+ if (ha->isp_ops->abort_command(sp)) {
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld): mbx abort_command failed\n", vha->host_no));
+ bsg_job->req->errors = bsg_job->reply->result = -EIO;
+ } else {
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld): mbx abort_command success\n", vha->host_no));
+ bsg_job->req->errors = bsg_job->reply->result = 0;
+ }
+ goto done;
+ }
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "scsi(%ld) SRB not found to abort\n", vha->host_no));
+ bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
+ return 0;
+
+done:
+ if (bsg_job->request->msgcode == FC_BSG_HST_CT)
+ kfree(sp->fcport);
+ kfree(sp->ctx);
+ mempool_free(sp, ha->srb_mempool);
+ return 0;
+}
+
struct fc_function_template qla2xxx_transport_functions = {
.show_host_node_name = 1,
@@ -1838,6 +2438,8 @@ struct fc_function_template qla2xxx_transport_functions = {
.vport_create = qla24xx_vport_create,
.vport_disable = qla24xx_vport_disable,
.vport_delete = qla24xx_vport_delete,
+ .bsg_request = qla24xx_bsg_request,
+ .bsg_timeout = qla24xx_bsg_timeout,
};
struct fc_function_template qla2xxx_transport_vport_functions = {
@@ -1878,6 +2480,8 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
.terminate_rport_io = qla2x00_terminate_rport_io,
.get_fc_host_stats = qla2x00_get_fc_host_stats,
+ .bsg_request = qla24xx_bsg_request,
+ .bsg_timeout = qla24xx_bsg_timeout,
};
void
@@ -1906,3 +2510,125 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
speed = FC_PORTSPEED_1GBIT;
fc_host_supported_speeds(vha->host) = speed;
}
+static int
+qla84xx_reset(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
+{
+ int ret = 0;
+ int cmd;
+ uint16_t cmd_status;
+
+ DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no));
+
+ cmd = (*((bsg_job->request->rqst_data.h_vendor.vendor_cmd) + 2))
+ == A84_RESET_FLAG_ENABLE_DIAG_FW ?
+ A84_ISSUE_RESET_DIAG_FW : A84_ISSUE_RESET_OP_FW;
+ ret = qla84xx_reset_chip(ha, cmd == A84_ISSUE_RESET_DIAG_FW,
+ &cmd_status);
+ return ret;
+}
+
+static int
+qla84xx_mgmt_cmd(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
+{
+ struct access_chip_84xx *mn;
+ dma_addr_t mn_dma, mgmt_dma;
+ void *mgmt_b = NULL;
+ int ret = 0;
+ int rsp_hdr_len, len = 0;
+ struct qla84_msg_mgmt *ql84_mgmt;
+
+ ql84_mgmt = (struct qla84_msg_mgmt *) vmalloc(sizeof(struct qla84_msg_mgmt));
+ ql84_mgmt->cmd =
+ *((uint16_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 2));
+ ql84_mgmt->mgmtp.u.mem.start_addr =
+ *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 3));
+ ql84_mgmt->len =
+ *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 4));
+ ql84_mgmt->mgmtp.u.config.id =
+ *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 5));
+ ql84_mgmt->mgmtp.u.config.param0 =
+ *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 6));
+ ql84_mgmt->mgmtp.u.config.param1 =
+ *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 7));
+ ql84_mgmt->mgmtp.u.info.type =
+ *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 8));
+ ql84_mgmt->mgmtp.u.info.context =
+ *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 9));
+
+ rsp_hdr_len = bsg_job->request_payload.payload_len;
+
+ mn = dma_pool_alloc(ha->hw->s_dma_pool, GFP_KERNEL, &mn_dma);
+ if (mn == NULL) {
+ DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
+ "failed%lu\n", __func__, ha->host_no));
+ return -ENOMEM;
+ }
+
+ memset(mn, 0, sizeof (struct access_chip_84xx));
+
+ mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
+ mn->entry_count = 1;
+
+ switch (ql84_mgmt->cmd) {
+ case QLA84_MGMT_READ_MEM:
+ mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
+ mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
+ break;
+ case QLA84_MGMT_WRITE_MEM:
+ mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
+ mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
+ break;
+ case QLA84_MGMT_CHNG_CONFIG:
+ mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
+ mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.id);
+ mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param0);
+ mn->parameter3 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param1);
+ break;
+ case QLA84_MGMT_GET_INFO:
+ mn->options = cpu_to_le16(ACO_REQUEST_INFO);
+ mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.type);
+ mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.context);
+ break;
+ default:
+ ret = -EIO;
+ goto exit_mgmt0;
+ }
+
+ if ((len == ql84_mgmt->len) &&
+ ql84_mgmt->cmd != QLA84_MGMT_CHNG_CONFIG) {
+ mgmt_b = dma_alloc_coherent(&ha->hw->pdev->dev, len,
+ &mgmt_dma, GFP_KERNEL);
+ if (mgmt_b == NULL) {
+ DEBUG2(printk(KERN_ERR "%s: dma alloc mgmt_b "
+ "failed%lu\n", __func__, ha->host_no));
+ ret = -ENOMEM;
+ goto exit_mgmt0;
+ }
+ mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->len);
+ mn->dseg_count = cpu_to_le16(1);
+ mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
+ mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
+ mn->dseg_length = cpu_to_le32(len);
+
+ if (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM) {
+ memcpy(mgmt_b, ql84_mgmt->payload, len);
+ }
+ }
+
+ ret = qla2x00_issue_iocb(ha, mn, mn_dma, 0);
+ if ((ret != QLA_SUCCESS) || (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM)
+ || (ql84_mgmt->cmd == QLA84_MGMT_CHNG_CONFIG)) {
+ if (ret != QLA_SUCCESS)
+ DEBUG2(printk(KERN_ERR "%s(%lu): failed\n",
+ __func__, ha->host_no));
+ } else if ((ql84_mgmt->cmd == QLA84_MGMT_READ_MEM) ||
+ (ql84_mgmt->cmd == QLA84_MGMT_GET_INFO)) {
+ }
+
+ if (mgmt_b)
+ dma_free_coherent(&ha->hw->pdev->dev, len, mgmt_b, mgmt_dma);
+
+exit_mgmt0:
+ dma_pool_free(ha->hw->s_dma_pool, mn, mn_dma);
+ return ret;
+}
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 1263d9796e8..afa95614aaf 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -31,6 +31,7 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_bsg_fc.h>
#define QLA2XXX_DRIVER_NAME "qla2xxx"
@@ -228,6 +229,27 @@ struct srb_logio {
uint16_t flags;
};
+struct srb_bsg_ctx {
+#define SRB_ELS_CMD_RPT 3
+#define SRB_ELS_CMD_HST 4
+#define SRB_CT_CMD 5
+ uint16_t type;
+};
+
+struct srb_bsg {
+ struct srb_bsg_ctx ctx;
+ struct fc_bsg_job *bsg_job;
+};
+
+struct msg_echo_lb {
+ dma_addr_t send_dma;
+ dma_addr_t rcv_dma;
+ uint16_t req_sg_cnt;
+ uint16_t rsp_sg_cnt;
+ uint16_t options;
+ uint32_t transfer_size;
+};
+
/*
* ISP I/O Register Set structure definitions.
*/
@@ -522,6 +544,8 @@ typedef struct {
#define MBA_DISCARD_RND_FRAME 0x8048 /* discard RND frame due to error. */
#define MBA_REJECTED_FCP_CMD 0x8049 /* rejected FCP_CMD. */
+/* ISP mailbox loopback echo diagnostic error code */
+#define MBS_LB_RESET 0x17
/*
* Firmware options 1, 2, 3.
*/
@@ -2230,6 +2254,13 @@ struct req_que {
int max_q_depth;
};
+/* Place holder for FW buffer parameters */
+struct qlfc_fw {
+ void *fw_buf;
+ dma_addr_t fw_dma;
+ uint32_t len;
+};
+
/*
* Qlogic host adapter specific data structure.
*/
@@ -2594,6 +2625,7 @@ struct qla_hw_data {
struct qla_statistics qla_stats;
struct isp_operations *isp_ops;
struct workqueue_struct *wq;
+ struct qlfc_fw fw_buf;
};
/*
@@ -2766,4 +2798,127 @@ typedef struct scsi_qla_host {
#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
+/*
+ * BSG Vendor specific commands
+ */
+
+#define QL_VND_LOOPBACK 0x01
+#define QLA84_RESET 0x02
+#define QLA84_UPDATE_FW 0x03
+#define QLA84_MGMT_CMD 0x04
+
+/* BSG definations for interpreting CommandSent field */
+#define INT_DEF_LB_LOOPBACK_CMD 0
+#define INT_DEF_LB_ECHO_CMD 1
+
+/* BSG Vendor specific definations */
+typedef struct _A84_RESET {
+ uint16_t Flags;
+ uint16_t Reserved;
+#define A84_RESET_FLAG_ENABLE_DIAG_FW 1
+} __attribute__((packed)) A84_RESET, *PA84_RESET;
+
+#define A84_ISSUE_WRITE_TYPE_CMD 0
+#define A84_ISSUE_READ_TYPE_CMD 1
+#define A84_CLEANUP_CMD 2
+#define A84_ISSUE_RESET_OP_FW 3
+#define A84_ISSUE_RESET_DIAG_FW 4
+#define A84_ISSUE_UPDATE_OPFW_CMD 5
+#define A84_ISSUE_UPDATE_DIAGFW_CMD 6
+
+struct qla84_mgmt_param {
+ union {
+ struct {
+ uint32_t start_addr;
+ } mem; /* for QLA84_MGMT_READ/WRITE_MEM */
+ struct {
+ uint32_t id;
+#define QLA84_MGMT_CONFIG_ID_UIF 1
+#define QLA84_MGMT_CONFIG_ID_FCOE_COS 2
+#define QLA84_MGMT_CONFIG_ID_PAUSE 3
+#define QLA84_MGMT_CONFIG_ID_TIMEOUTS 4
+
+ uint32_t param0;
+ uint32_t param1;
+ } config; /* for QLA84_MGMT_CHNG_CONFIG */
+
+ struct {
+ uint32_t type;
+#define QLA84_MGMT_INFO_CONFIG_LOG_DATA 1 /* Get Config Log Data */
+#define QLA84_MGMT_INFO_LOG_DATA 2 /* Get Log Data */
+#define QLA84_MGMT_INFO_PORT_STAT 3 /* Get Port Statistics */
+#define QLA84_MGMT_INFO_LIF_STAT 4 /* Get LIF Statistics */
+#define QLA84_MGMT_INFO_ASIC_STAT 5 /* Get ASIC Statistics */
+#define QLA84_MGMT_INFO_CONFIG_PARAMS 6 /* Get Config Parameters */
+#define QLA84_MGMT_INFO_PANIC_LOG 7 /* Get Panic Log */
+
+ uint32_t context;
+/*
+* context definitions for QLA84_MGMT_INFO_CONFIG_LOG_DATA
+*/
+#define IC_LOG_DATA_LOG_ID_DEBUG_LOG 0
+#define IC_LOG_DATA_LOG_ID_LEARN_LOG 1
+#define IC_LOG_DATA_LOG_ID_FC_ACL_INGRESS_LOG 2
+#define IC_LOG_DATA_LOG_ID_FC_ACL_EGRESS_LOG 3
+#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_INGRESS_LOG 4
+#define IC_LOG_DATA_LOG_ID_ETHERNET_ACL_EGRESS_LOG 5
+#define IC_LOG_DATA_LOG_ID_MESSAGE_TRANSMIT_LOG 6
+#define IC_LOG_DATA_LOG_ID_MESSAGE_RECEIVE_LOG 7
+#define IC_LOG_DATA_LOG_ID_LINK_EVENT_LOG 8
+#define IC_LOG_DATA_LOG_ID_DCX_LOG 9
+
+/*
+* context definitions for QLA84_MGMT_INFO_PORT_STAT
+*/
+#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT0 0
+#define IC_PORT_STATISTICS_PORT_NUMBER_ETHERNET_PORT1 1
+#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT0 2
+#define IC_PORT_STATISTICS_PORT_NUMBER_NSL_PORT1 3
+#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT0 4
+#define IC_PORT_STATISTICS_PORT_NUMBER_FC_PORT1 5
+
+
+/*
+* context definitions for QLA84_MGMT_INFO_LIF_STAT
+*/
+#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT0 0
+#define IC_LIF_STATISTICS_LIF_NUMBER_ETHERNET_PORT1 1
+#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT0 2
+#define IC_LIF_STATISTICS_LIF_NUMBER_FC_PORT1 3
+#define IC_LIF_STATISTICS_LIF_NUMBER_CPU 6
+
+ } info; /* for QLA84_MGMT_GET_INFO */
+ } u;
+};
+
+struct qla84_msg_mgmt {
+ uint16_t cmd;
+#define QLA84_MGMT_READ_MEM 0x00
+#define QLA84_MGMT_WRITE_MEM 0x01
+#define QLA84_MGMT_CHNG_CONFIG 0x02
+#define QLA84_MGMT_GET_INFO 0x03
+ uint16_t rsrvd;
+ struct qla84_mgmt_param mgmtp;/* parameters for cmd */
+ uint32_t len; /* bytes in payload following this struct */
+ uint8_t payload[0]; /* payload for cmd */
+};
+
+struct msg_update_fw {
+ /*
+ * diag_fw = 0 operational fw
+ * otherwise diagnostic fw
+ * offset, len, fw_len are present to overcome the current limitation
+ * of 128Kb xfer size. The fw is sent in smaller chunks. Each chunk
+ * specifies the byte "offset" where it fits in the fw buffer. The
+ * number of bytes in each chunk is specified in "len". "fw_len"
+ * is the total size of fw. The first chunk should start at offset = 0.
+ * When offset+len == fw_len, the fw is written to the HBA.
+ */
+ uint32_t diag_fw;
+ uint32_t offset;/* start offset */
+ uint32_t len; /* num bytes in cur xfer */
+ uint32_t fw_len; /* size of fw in bytes */
+ uint8_t fw_bytes[0];
+};
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 66a8da5d7d0..cebf4f1bb7d 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -627,6 +627,39 @@ struct els_entry_24xx {
uint32_t rx_len; /* Data segment 1 length. */
};
+struct els_sts_entry_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System Defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+
+ uint16_t comp_status;
+
+ uint16_t nport_handle; /* N_PORT handle. */
+
+ uint16_t reserved_1;
+
+ uint8_t vp_index;
+ uint8_t sof_type;
+
+ uint32_t rx_xchg_address; /* Receive exchange address. */
+ uint16_t reserved_2;
+
+ uint8_t opcode;
+ uint8_t reserved_3;
+
+ uint8_t port_id[3];
+ uint8_t reserved_4;
+
+ uint16_t reserved_5;
+
+ uint16_t control_flags; /* Control flags. */
+ uint32_t total_byte_count;
+ uint32_t error_subcode_1;
+ uint32_t error_subcode_2;
+};
/*
* ISP queue - Mailbox Command entry structure definition.
*/
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 8bc6f53691e..3a89bc514e2 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -60,6 +60,8 @@ extern int qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
extern int qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
+extern fc_port_t *
+qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t );
/*
* Global Data in qla_os.c source file.
*/
@@ -76,6 +78,7 @@ extern int ql2xiidmaenable;
extern int ql2xmaxqueues;
extern int ql2xmultique_tag;
extern int ql2xfwloadbin;
+extern int ql2xetsenable;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -94,7 +97,6 @@ extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
-extern void qla2x00_abort_fcport_cmds(fc_port_t *);
extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *,
struct qla_hw_data *);
extern void qla2x00_free_host(struct scsi_qla_host *);
@@ -154,6 +156,7 @@ int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
uint16_t, uint16_t, uint8_t);
extern int qla2x00_start_sp(srb_t *);
+extern void qla2x00_ctx_sp_free(srb_t *);
/*
* Global Function Prototypes in qla_mbx.c source file.
@@ -426,6 +429,8 @@ extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
extern void qla2x00_init_host_attr(scsi_qla_host_t *);
extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
+extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *);
+extern int qla2x00_echo_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *);
/*
* Global Function Prototypes in qla_dfs.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 3f8e8495b74..a67b2bafb88 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -62,7 +62,7 @@ qla2x00_ctx_sp_timeout(unsigned long __data)
ctx->free(sp);
}
-static void
+void
qla2x00_ctx_sp_free(srb_t *sp)
{
struct srb_ctx *ctx = sp->ctx;
@@ -338,6 +338,16 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
rval = qla2x00_init_rings(vha);
ha->flags.chip_reset_done = 1;
+ if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
+ /* Issue verify 84xx FW IOCB to complete 84xx initialization */
+ rval = qla84xx_init_chip(vha);
+ if (rval != QLA_SUCCESS) {
+ qla_printk(KERN_ERR, ha,
+ "Unable to initialize ISP84XX.\n");
+ qla84xx_put_chip(vha);
+ }
+ }
+
return (rval);
}
@@ -2216,7 +2226,7 @@ qla2x00_rport_del(void *data)
*
* Returns a pointer to the allocated fcport, or NULL, if none available.
*/
-static fc_port_t *
+fc_port_t *
qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
{
fc_port_t *fcport;
@@ -2900,8 +2910,13 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
if (qla2x00_is_reserved_id(vha, loop_id))
continue;
- if (atomic_read(&vha->loop_down_timer) || LOOP_TRANSITION(vha))
+ if (atomic_read(&vha->loop_down_timer) ||
+ LOOP_TRANSITION(vha)) {
+ atomic_set(&vha->loop_down_timer, 0);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
break;
+ }
if (swl != NULL) {
if (last_dev) {
@@ -4877,6 +4892,15 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
}
void
-qla81xx_update_fw_options(scsi_qla_host_t *ha)
+qla81xx_update_fw_options(scsi_qla_host_t *vha)
{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ql2xetsenable)
+ return;
+
+ /* Enable ETS Burst. */
+ memset(ha->fw_options, 0, sizeof(ha->fw_options));
+ ha->fw_options[2] |= BIT_9;
+ qla2x00_set_fw_options(vha, ha->fw_options);
}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index c5ccac0bef7..8299a9891bf 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1025,6 +1025,119 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
/* Implicit: mbx->mbx10 = 0. */
}
+static void
+qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
+{
+ struct fc_bsg_job *bsg_job = ((struct srb_bsg*)sp->ctx)->bsg_job;
+
+ els_iocb->entry_type = ELS_IOCB_TYPE;
+ els_iocb->entry_count = 1;
+ els_iocb->sys_define = 0;
+ els_iocb->entry_status = 0;
+ els_iocb->handle = sp->handle;
+ els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
+ els_iocb->vp_index = sp->fcport->vp_idx;
+ els_iocb->sof_type = EST_SOFI3;
+ els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
+
+ els_iocb->opcode =(((struct srb_bsg*)sp->ctx)->ctx.type == SRB_ELS_CMD_RPT) ?
+ bsg_job->request->rqst_data.r_els.els_code : bsg_job->request->rqst_data.h_els.command_code;
+ els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
+ els_iocb->port_id[1] = sp->fcport->d_id.b.area;
+ els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+ els_iocb->control_flags = 0;
+ els_iocb->rx_byte_count =
+ cpu_to_le32(bsg_job->reply_payload.payload_len);
+ els_iocb->tx_byte_count =
+ cpu_to_le32(bsg_job->request_payload.payload_len);
+
+ els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
+ (bsg_job->request_payload.sg_list)));
+ els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
+ (bsg_job->request_payload.sg_list)));
+ els_iocb->tx_len = cpu_to_le32(sg_dma_len
+ (bsg_job->request_payload.sg_list));
+
+ els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
+ (bsg_job->reply_payload.sg_list)));
+ els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
+ (bsg_job->reply_payload.sg_list)));
+ els_iocb->rx_len = cpu_to_le32(sg_dma_len
+ (bsg_job->reply_payload.sg_list));
+}
+
+static void
+qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
+{
+ uint16_t avail_dsds;
+ uint32_t *cur_dsd;
+ struct scatterlist *sg;
+ int index;
+ uint16_t tot_dsds;
+ scsi_qla_host_t *vha = sp->fcport->vha;
+ struct fc_bsg_job *bsg_job = ((struct srb_bsg*)sp->ctx)->bsg_job;
+ int loop_iterartion = 0;
+ int cont_iocb_prsnt = 0;
+ int entry_count = 1;
+
+ ct_iocb->entry_type = CT_IOCB_TYPE;
+ ct_iocb->entry_status = 0;
+ ct_iocb->sys_define = 0;
+ ct_iocb->handle = sp->handle;
+
+ ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ ct_iocb->vp_index = sp->fcport->vp_idx;
+ ct_iocb->comp_status = __constant_cpu_to_le16(0);
+
+ ct_iocb->cmd_dsd_count =
+ __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
+ ct_iocb->timeout = 0;
+ ct_iocb->rsp_dsd_count =
+ __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
+ ct_iocb->rsp_byte_count =
+ cpu_to_le32(bsg_job->reply_payload.payload_len);
+ ct_iocb->cmd_byte_count =
+ cpu_to_le32(bsg_job->request_payload.payload_len);
+ ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
+ (bsg_job->request_payload.sg_list)));
+ ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
+ (bsg_job->request_payload.sg_list)));
+ ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
+ (bsg_job->request_payload.sg_list));
+
+ avail_dsds = 1;
+ cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
+ index = 0;
+ tot_dsds = bsg_job->reply_payload.sg_cnt;
+
+ for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
+ dma_addr_t sle_dma;
+ cont_a64_entry_t *cont_pkt;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ /*
+ * Five DSDs are available in the Cont.
+ * Type 1 IOCB.
+ */
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+ cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+ avail_dsds = 5;
+ cont_iocb_prsnt = 1;
+ entry_count++;
+ }
+
+ sle_dma = sg_dma_address(sg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ loop_iterartion++;
+ avail_dsds--;
+ }
+ ct_iocb->entry_count = entry_count;
+}
+
int
qla2x00_start_sp(srb_t *sp)
{
@@ -1052,6 +1165,13 @@ qla2x00_start_sp(srb_t *sp)
qla24xx_logout_iocb(sp, pkt):
qla2x00_logout_iocb(sp, pkt);
break;
+ case SRB_ELS_CMD_RPT:
+ case SRB_ELS_CMD_HST:
+ qla24xx_els_iocb(sp, pkt);
+ break;
+ case SRB_CT_CMD:
+ qla24xx_ct_iocb(sp, pkt);
+ break;
default:
break;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 6fc63b98818..ab90329ff2e 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_bsg_fc.h>
static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
static void qla2x00_process_completed_request(struct scsi_qla_host *,
@@ -881,7 +882,9 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
index);
return NULL;
}
+
req->outstanding_cmds[index] = NULL;
+
done:
return sp;
}
@@ -982,6 +985,100 @@ done_post_logio_done_work:
}
static void
+qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct sts_entry_24xx *pkt, int iocb_type)
+{
+ const char func[] = "ELS_CT_IOCB";
+ const char *type;
+ struct qla_hw_data *ha = vha->hw;
+ srb_t *sp;
+ struct srb_bsg *sp_bsg;
+ struct fc_bsg_job *bsg_job;
+ uint16_t comp_status;
+ uint32_t fw_status[3];
+ uint8_t* fw_sts_ptr;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ if (!sp)
+ return;
+ sp_bsg = (struct srb_bsg*)sp->ctx;
+ bsg_job = sp_bsg->bsg_job;
+
+ type = NULL;
+ switch (sp_bsg->ctx.type) {
+ case SRB_ELS_CMD_RPT:
+ case SRB_ELS_CMD_HST:
+ type = "els";
+ break;
+ case SRB_CT_CMD:
+ type = "ct pass-through";
+ break;
+ default:
+ qla_printk(KERN_WARNING, ha,
+ "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
+ sp_bsg->ctx.type);
+ return;
+ }
+
+ comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
+ fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
+ fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
+
+ /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
+ * fc payload to the caller
+ */
+ bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
+
+ if (comp_status != CS_COMPLETE) {
+ if (comp_status == CS_DATA_UNDERRUN) {
+ bsg_job->reply->result = DID_OK << 16;
+ bsg_job->reply->reply_payload_rcv_len =
+ le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
+
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x "
+ "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
+ vha->host_no, sp->handle, type, comp_status, fw_status[1], fw_status[2],
+ le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count)));
+ fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
+ memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
+ }
+ else {
+ DEBUG2(qla_printk(KERN_WARNING, ha,
+ "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x "
+ "error subcode 1=0x%x error subcode 2=0x%x.\n",
+ vha->host_no, sp->handle, type, comp_status,
+ le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1),
+ le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2)));
+ bsg_job->reply->result = DID_ERROR << 16;
+ bsg_job->reply->reply_payload_rcv_len = 0;
+ fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
+ memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
+ }
+ DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt)));
+ }
+ else {
+ bsg_job->reply->result = DID_OK << 16;;
+ bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
+ bsg_job->reply_len = 0;
+ }
+
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ dma_unmap_sg(&ha->pdev->dev,
+ bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+ if ((sp_bsg->ctx.type == SRB_ELS_CMD_HST) ||
+ (sp_bsg->ctx.type == SRB_CT_CMD))
+ kfree(sp->fcport);
+ kfree(sp->ctx);
+ mempool_free(sp, ha->srb_mempool);
+ bsg_job->job_done(bsg_job);
+}
+
+static void
qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
struct logio_entry_24xx *logio)
{
@@ -1749,6 +1846,13 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
qla24xx_logio_entry(vha, rsp->req,
(struct logio_entry_24xx *)pkt);
break;
+ case CT_IOCB_TYPE:
+ qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
+ clear_bit(MBX_INTERRUPT, &vha->hw->mbx_cmd_flags);
+ break;
+ case ELS_IOCB_TYPE:
+ qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
+ break;
default:
/* Type Not Supported. */
DEBUG4(printk(KERN_WARNING
@@ -2049,7 +2153,6 @@ qla24xx_msix_default(int irq, void *dev_id)
set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
complete(&ha->mbx_intr_comp);
}
-
return IRQ_HANDLED;
}
@@ -2255,10 +2358,11 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
if (ha->flags.msix_enabled)
qla24xx_disable_msix(ha);
- else if (ha->flags.inta_enabled) {
+ else if (ha->flags.msi_enabled) {
free_irq(ha->pdev->irq, rsp);
pci_disable_msi(ha->pdev);
- }
+ } else
+ free_irq(ha->pdev->irq, rsp);
}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 056e4d4505f..6e53bdbb1da 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -3636,6 +3636,157 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
}
int
+qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ uint32_t iter_cnt = 0x1;
+
+ DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no));
+
+ memset(mcp->mb, 0 , sizeof(mcp->mb));
+ mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
+ mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
+
+ /* transfer count */
+ mcp->mb[10] = LSW(mreq->transfer_size);
+ mcp->mb[11] = MSW(mreq->transfer_size);
+
+ /* send data address */
+ mcp->mb[14] = LSW(mreq->send_dma);
+ mcp->mb[15] = MSW(mreq->send_dma);
+ mcp->mb[20] = LSW(MSD(mreq->send_dma));
+ mcp->mb[21] = MSW(MSD(mreq->send_dma));
+
+ /* recieve data address */
+ mcp->mb[16] = LSW(mreq->rcv_dma);
+ mcp->mb[17] = MSW(mreq->rcv_dma);
+ mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
+ mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
+
+ /* Iteration count */
+ mcp->mb[18] = LSW(iter_cnt);
+ mcp->mb[19] = MSW(iter_cnt);
+
+ mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
+ MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
+ if (IS_QLA81XX(vha->hw))
+ mcp->out_mb |= MBX_2;
+ mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
+
+ mcp->buf_size = mreq->transfer_size;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ DEBUG2(printk(KERN_WARNING
+ "(%ld): failed=%x mb[0]=0x%x "
+ "mb[1]=0x%x mb[2]=0x%x mb[3]=0x%x mb[18]=0x%x mb[19]=0x%x. \n", vha->host_no, rval,
+ mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[18], mcp->mb[19]));
+ } else {
+ DEBUG2(printk(KERN_WARNING
+ "scsi(%ld): done.\n", vha->host_no));
+ }
+
+ /* Copy mailbox information */
+ memcpy( mresp, mcp->mb, 64);
+ mresp[3] = mcp->mb[18];
+ mresp[4] = mcp->mb[19];
+ return rval;
+}
+
+int
+qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, uint16_t *mresp)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+ DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no));
+
+ memset(mcp->mb, 0 , sizeof(mcp->mb));
+ mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
+ mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */
+ if (IS_QLA81XX(ha))
+ mcp->mb[1] |= BIT_15;
+ mcp->mb[2] = IS_QLA81XX(ha) ? vha->fcoe_fcf_idx : 0;
+ mcp->mb[16] = LSW(mreq->rcv_dma);
+ mcp->mb[17] = MSW(mreq->rcv_dma);
+ mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
+ mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
+
+ mcp->mb[10] = LSW(mreq->transfer_size);
+
+ mcp->mb[14] = LSW(mreq->send_dma);
+ mcp->mb[15] = MSW(mreq->send_dma);
+ mcp->mb[20] = LSW(MSD(mreq->send_dma));
+ mcp->mb[21] = MSW(MSD(mreq->send_dma));
+
+ mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
+ MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
+ if (IS_QLA81XX(ha))
+ mcp->out_mb |= MBX_2;
+
+ mcp->in_mb = MBX_0;
+ if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha))
+ mcp->in_mb |= MBX_1;
+ if (IS_QLA81XX(ha))
+ mcp->in_mb |= MBX_3;
+
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
+ mcp->buf_size = mreq->transfer_size;
+
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ DEBUG2(printk(KERN_WARNING
+ "(%ld): failed=%x mb[0]=0x%x mb[1]=0x%x.\n",
+ vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ } else {
+ DEBUG2(printk(KERN_WARNING
+ "scsi(%ld): done.\n", vha->host_no));
+ }
+
+ /* Copy mailbox information */
+ memcpy( mresp, mcp->mb, 32);
+ return rval;
+}
+int
+qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic,
+ uint16_t *cmd_status)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ DEBUG16(printk("%s(%ld): enable_diag=%d entered.\n", __func__,
+ ha->host_no, enable_diagnostic));
+
+ mcp->mb[0] = MBC_ISP84XX_RESET;
+ mcp->mb[1] = enable_diagnostic;
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
+ rval = qla2x00_mailbox_command(ha, mcp);
+
+ /* Return mailbox statuses. */
+ *cmd_status = mcp->mb[0];
+ if (rval != QLA_SUCCESS)
+ DEBUG16(printk("%s(%ld): failed=%x.\n", __func__, ha->host_no,
+ rval));
+ else
+ DEBUG16(printk("%s(%ld): done.\n", __func__, ha->host_no));
+
+ return rval;
+}
+
+int
qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
{
int rval;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8529eb1f3cd..46720b23028 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -107,6 +107,12 @@ MODULE_PARM_DESC(ql2xfwloadbin,
" 1 -- load firmware from flash.\n"
" 0 -- use default semantics.\n");
+int ql2xetsenable;
+module_param(ql2xetsenable, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xetsenable,
+ "Enables firmware ETS burst."
+ "Default is 0 - skip ETS enablement.");
+
/*
* SCSI host template entry points
*/
@@ -682,44 +688,6 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
return (return_status);
}
-void
-qla2x00_abort_fcport_cmds(fc_port_t *fcport)
-{
- int cnt;
- unsigned long flags;
- srb_t *sp;
- scsi_qla_host_t *vha = fcport->vha;
- struct qla_hw_data *ha = vha->hw;
- struct req_que *req;
-
- spin_lock_irqsave(&ha->hardware_lock, flags);
- req = vha->req;
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
- sp = req->outstanding_cmds[cnt];
- if (!sp)
- continue;
- if (sp->fcport != fcport)
- continue;
- if (sp->ctx)
- continue;
-
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (ha->isp_ops->abort_command(sp)) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "Abort failed -- %lx\n",
- sp->cmd->serial_number));
- } else {
- if (qla2x00_eh_wait_on_command(sp->cmd) !=
- QLA_SUCCESS)
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "Abort failed while waiting -- %lx\n",
- sp->cmd->serial_number));
- }
- spin_lock_irqsave(&ha->hardware_lock, flags);
- }
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-}
-
/**************************************************************************
* qla2xxx_eh_abort
*
@@ -1095,6 +1063,20 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
struct fc_port *fcport;
struct qla_hw_data *ha = vha->hw;
+ if (ha->flags.enable_target_reset) {
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->port_type != FCT_TARGET)
+ continue;
+
+ ret = ha->isp_ops->target_reset(fcport, 0, 0);
+ if (ret != QLA_SUCCESS) {
+ DEBUG2_3(printk("%s(%ld): bus_reset failed: "
+ "target_reset=%d d_id=%x.\n", __func__,
+ vha->host_no, ret, fcport->d_id.b24));
+ }
+ }
+ }
+
if (ha->flags.enable_lip_full_login && !IS_QLA81XX(ha)) {
ret = qla2x00_full_login_lip(vha);
if (ret != QLA_SUCCESS) {
@@ -1117,19 +1099,6 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
qla2x00_wait_for_loop_ready(vha);
}
- if (ha->flags.enable_target_reset) {
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->port_type != FCT_TARGET)
- continue;
-
- ret = ha->isp_ops->target_reset(fcport, 0, 0);
- if (ret != QLA_SUCCESS) {
- DEBUG2_3(printk("%s(%ld): bus_reset failed: "
- "target_reset=%d d_id=%x.\n", __func__,
- vha->host_no, ret, fcport->d_id.b24));
- }
- }
- }
/* Issue marker command only when we are going to start the I/O */
vha->marker_needed = 1;
@@ -1160,8 +1129,19 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
qla2x00_sp_compl(ha, sp);
} else {
ctx = sp->ctx;
- del_timer_sync(&ctx->timer);
- ctx->free(sp);
+ if (ctx->type == SRB_LOGIN_CMD || ctx->type == SRB_LOGOUT_CMD) {
+ del_timer_sync(&ctx->timer);
+ ctx->free(sp);
+ } else {
+ struct srb_bsg* sp_bsg = (struct srb_bsg*)sp->ctx;
+ if (sp_bsg->bsg_job->request->msgcode == FC_BSG_HST_CT)
+ kfree(sp->fcport);
+ sp_bsg->bsg_job->req->errors = 0;
+ sp_bsg->bsg_job->reply->result = res;
+ sp_bsg->bsg_job->job_done(sp_bsg->bsg_job);
+ kfree(sp->ctx);
+ mempool_free(sp, ha->srb_mempool);
+ }
}
}
}
@@ -1258,7 +1238,7 @@ qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
qla2x00_adjust_sdev_qdepth_up(sdev, qdepth);
break;
default:
- return EOPNOTSUPP;
+ return -EOPNOTSUPP;
}
return sdev->queue_depth;
@@ -1818,7 +1798,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* Set EEH reset type to fundamental if required by hba */
if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) {
pdev->needs_freset = 1;
- pci_save_state(pdev);
}
/* Configure PCI I/O space */
@@ -1970,11 +1949,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_channel = MAX_BUSES - 1;
host->max_lun = MAX_LUNS;
host->transportt = qla2xxx_transport_template;
+ sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
/* Set up the irqs */
ret = qla2x00_request_irqs(ha, rsp);
if (ret)
goto probe_init_failed;
+
+ pci_save_state(pdev);
+
/* Alloc arrays of request and response ring ptrs */
que_init:
if (!qla2x00_alloc_queues(ha)) {
@@ -2176,6 +2159,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
kfree(ha);
ha = NULL;
+ pci_disable_pcie_error_reporting(pdev);
+
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
@@ -3310,6 +3295,7 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
ha->flags.eeh_busy = 1;
+ qla2x00_free_irqs(vha);
pci_disable_device(pdev);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
@@ -3363,10 +3349,24 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
struct qla_hw_data *ha = base_vha->hw;
- int rc;
+ struct rsp_que *rsp;
+ int rc, retries = 10;
DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n"));
+ /* Workaround: qla2xxx driver which access hardware earlier
+ * needs error state to be pci_channel_io_online.
+ * Otherwise mailbox command timesout.
+ */
+ pdev->error_state = pci_channel_io_normal;
+
+ pci_restore_state(pdev);
+
+ /* pci_restore_state() clears the saved_state flag of the device
+ * save restored state which resets saved_state flag
+ */
+ pci_save_state(pdev);
+
if (ha->mem_only)
rc = pci_enable_device_mem(pdev);
else
@@ -3378,27 +3378,23 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
return ret;
}
+ rsp = ha->rsp_q_map[0];
+ if (qla2x00_request_irqs(ha, rsp))
+ return ret;
+
if (ha->isp_ops->pci_config(base_vha))
return ret;
-#ifdef QL_DEBUG_LEVEL_17
- {
- uint8_t b;
- uint32_t i;
+ while (ha->flags.mbox_busy && retries--)
+ msleep(1000);
- printk("slot_reset_1: ");
- for (i = 0; i < 256; i++) {
- pci_read_config_byte(ha->pdev, i, &b);
- printk("%s%02x", (i%16) ? " " : "\n", b);
- }
- printk("\n");
- }
-#endif
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS)
ret = PCI_ERS_RESULT_RECOVERED;
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
DEBUG17(qla_printk(KERN_WARNING, ha,
"slot_reset-return:ret=%x\n", ret));
@@ -3422,8 +3418,6 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
}
ha->flags.eeh_busy = 0;
-
- pci_cleanup_aer_uncorrect_error_status(pdev);
}
static struct pci_error_handlers qla2xxx_err_handler = {
@@ -3536,4 +3530,3 @@ MODULE_FIRMWARE(FW_FILE_ISP2300);
MODULE_FIRMWARE(FW_FILE_ISP2322);
MODULE_FIRMWARE(FW_FILE_ISP24XX);
MODULE_FIRMWARE(FW_FILE_ISP25XX);
-MODULE_FIRMWARE(FW_FILE_ISP81XX);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index ed36279a33c..8d2fc2fa7a6 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.03.01-k10"
+#define QLA2XXX_VERSION "8.03.02-k1"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 3
-#define QLA_DRIVER_PATCH_VER 1
-#define QLA_DRIVER_BETA_VER 0
+#define QLA_DRIVER_PATCH_VER 2
+#define QLA_DRIVER_BETA_VER 1