From b8ef3204f460912a46659cdc74d237adbe705053 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Fri, 6 Aug 2010 03:02:37 -0500 Subject: [SCSI] fc class: add fc host default default dev loss setting This patch adds a fc_host setting to store the default dev_loss_tmo. It is used if the driver has a callack to get the value from the LLD. If the callback is not set, then we use the fc class module default value. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/scsi_transport_fc.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index d7e470a06180..9f0f7d9c7422 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -2525,7 +2525,11 @@ fc_rport_create(struct Scsi_Host *shost, int channel, rport->maxframe_size = -1; rport->supported_classes = FC_COS_UNSPECIFIED; - rport->dev_loss_tmo = fc_dev_loss_tmo; + if (fci->f->get_host_def_dev_loss_tmo) { + fci->f->get_host_def_dev_loss_tmo(shost); + rport->dev_loss_tmo = fc_host_def_dev_loss_tmo(shost); + } else + rport->dev_loss_tmo = fc_dev_loss_tmo; memcpy(&rport->node_name, &ids->node_name, sizeof(rport->node_name)); memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name)); rport->port_id = ids->port_id; -- cgit v1.2.3 From a74bdf4661441d79a700f7ab3fc6d225ea2cf409 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Fri, 6 Aug 2010 03:02:38 -0500 Subject: [SCSI] qla2xxx: do not reset dev_loss_tmo in slave callout This fixes a bug where the driver was resetting the rport dev_loss_tmo when devices were added by adding support for the get_host_def_dev_loss_tmo callout. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_attr.c | 11 +++++++++++ drivers/scsi/qla2xxx/qla_os.c | 5 ----- 2 files changed, 11 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 420238cc794e..679a4326811c 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1529,6 +1529,15 @@ qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) rport->dev_loss_tmo = 1; } +static void +qla2x00_get_host_def_loss_tmo(struct Scsi_Host *shost) +{ + scsi_qla_host_t *vha = shost_priv(shost); + struct qla_hw_data *ha = vha->hw; + + fc_host_def_dev_loss_tmo(shost) = ha->port_down_retry_count; +} + static void qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) { @@ -1903,6 +1912,7 @@ struct fc_function_template qla2xxx_transport_functions = { .show_host_fabric_name = 1, .get_host_port_state = qla2x00_get_host_port_state, .show_host_port_state = 1, + .get_host_def_dev_loss_tmo = qla2x00_get_host_def_loss_tmo, .dd_fcrport_size = sizeof(struct fc_port *), .show_rport_supported_classes = 1, @@ -1949,6 +1959,7 @@ struct fc_function_template qla2xxx_transport_vport_functions = { .show_host_fabric_name = 1, .get_host_port_state = qla2x00_get_host_port_state, .show_host_port_state = 1, + .get_host_def_dev_loss_tmo = qla2x00_get_host_def_loss_tmo, .dd_fcrport_size = sizeof(struct fc_port *), .show_rport_supported_classes = 1, diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 8c80b49ac1c4..30b05229af26 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1295,17 +1295,12 @@ static int qla2xxx_slave_configure(struct scsi_device *sdev) { scsi_qla_host_t *vha = shost_priv(sdev->host); - struct qla_hw_data *ha = vha->hw; - struct fc_rport *rport = starget_to_rport(sdev->sdev_target); struct req_que *req = vha->req; if (sdev->tagged_supported) scsi_activate_tcq(sdev, req->max_q_depth); else scsi_deactivate_tcq(sdev, req->max_q_depth); - - rport->dev_loss_tmo = ha->port_down_retry_count; - return 0; } -- cgit v1.2.3 From 143beaa811aa3cfc4ae8b4a2092f232ad78b33da Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Fri, 6 Aug 2010 03:02:39 -0500 Subject: [SCSI] lpfc: do not reset dev_loss_tmo in slave callout This fixes a bug where the driver was resetting the rport dev_loss_tmo when devices were added by adding support for the get_host_def_dev_loss_tmo callout. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_attr.c | 10 ++++++++++ drivers/scsi/lpfc/lpfc_scsi.c | 10 ---------- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 23ce45708335..87c2b6b858f7 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -4370,6 +4370,14 @@ lpfc_get_starget_port_name(struct scsi_target *starget) ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0; } +static void +lpfc_get_host_def_loss_tmo(struct Scsi_Host *shost) +{ + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + + fc_host_def_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; +} + /** * lpfc_set_rport_loss_tmo - Set the rport dev loss tmo * @rport: fc rport address. @@ -4478,6 +4486,7 @@ struct fc_function_template lpfc_transport_functions = { .get_host_fabric_name = lpfc_get_host_fabric_name, .show_host_fabric_name = 1, + .get_host_def_dev_loss_tmo = lpfc_get_host_def_loss_tmo, /* * The LPFC driver treats linkdown handling as target loss events * so there are no sysfs handlers for link_down_tmo. @@ -4545,6 +4554,7 @@ struct fc_function_template lpfc_vport_transport_functions = { .get_host_fabric_name = lpfc_get_host_fabric_name, .show_host_fabric_name = 1, + .get_host_def_dev_loss_tmo = lpfc_get_host_def_loss_tmo, /* * The LPFC driver treats linkdown handling as target loss events * so there are no sysfs handlers for link_down_tmo. diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 2e51aa6b45b3..6e331c73170e 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -3656,7 +3656,6 @@ lpfc_slave_alloc(struct scsi_device *sdev) * * This routine configures following items * - Tag command queuing support for @sdev if supported. - * - Dev loss time out value of fc_rport. * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set. * * Return codes: @@ -3667,21 +3666,12 @@ lpfc_slave_configure(struct scsi_device *sdev) { struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; struct lpfc_hba *phba = vport->phba; - struct fc_rport *rport = starget_to_rport(sdev->sdev_target); if (sdev->tagged_supported) scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth); else scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth); - /* - * Initialize the fc transport attributes for the target - * containing this scsi device. Also note that the driver's - * target pointer is stored in the starget_data for the - * driver's sysfs entry point functions. - */ - rport->dev_loss_tmo = vport->cfg_devloss_tmo; - if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { lpfc_sli_handle_fast_ring_event(phba, &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); -- cgit v1.2.3 From 8196a934eea3810be6243b307b336136d63bbc48 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Fri, 6 Aug 2010 03:02:40 -0500 Subject: [SCSI] fnic: do not reset dev_loss_tmo in slave callout This fixes a bug where the driver was resetting the rport dev_loss_tmo when devices were added by adding support for the get_host_def_dev_loss_tmo callout. Patch has only been compile tested. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/fnic/fnic_main.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 9eb7a9ebccae..df91a61591b2 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -80,8 +80,6 @@ static struct libfc_function_template fnic_transport_template = { static int fnic_slave_alloc(struct scsi_device *sdev) { struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); - struct fc_lport *lp = shost_priv(sdev->host); - struct fnic *fnic = lport_priv(lp); sdev->tagged_supported = 1; @@ -89,8 +87,6 @@ static int fnic_slave_alloc(struct scsi_device *sdev) return -ENXIO; scsi_activate_tcq(sdev, FNIC_DFLT_QUEUE_DEPTH); - rport->dev_loss_tmo = fnic->config.port_down_timeout / 1000; - return 0; } @@ -113,6 +109,15 @@ static struct scsi_host_template fnic_host_template = { .shost_attrs = fnic_attrs, }; +static void +fnic_get_host_def_loss_tmo(struct Scsi_Host *shost) +{ + struct fc_lport *lp = shost_priv(shost); + struct fnic *fnic = lport_priv(lp); + + fc_host_def_dev_loss_tmo(shost) = fnic->config.port_down_timeout / 1000; +} + static void fnic_get_host_speed(struct Scsi_Host *shost); static struct scsi_transport_template *fnic_fc_transport; static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *); @@ -142,6 +147,7 @@ static struct fc_function_template fnic_fc_functions = { .show_rport_dev_loss_tmo = 1, .issue_fc_host_lip = fnic_reset, .get_fc_host_stats = fnic_get_stats, + .get_host_def_dev_loss_tmo = fnic_get_host_def_loss_tmo, .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), .terminate_rport_io = fnic_terminate_rport_io, .bsg_request = fc_lport_bsg_request, -- cgit v1.2.3 From da99e307aa508fba6fbedc3699e7df64e284d2c5 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Fri, 6 Aug 2010 03:02:41 -0500 Subject: [SCSI] ibmvfc: do not reset dev_loss_tmo in slave callout This fixes a bug where the driver was resetting the rport dev_loss_tmo when devices were added by adding support for the get_host_def_dev_loss_tmo callout. Patch has only been compile tested. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvfc.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 9f75a6d519a2..a5dd9b4a0ae8 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -1030,6 +1030,11 @@ static void ibmvfc_get_host_port_state(struct Scsi_Host *shost) spin_unlock_irqrestore(shost->host_lock, flags); } +static void ibmvfc_set_host_def_dev_loss_tmo(struct Scsi_Host *shost) +{ + fc_host_def_dev_loss_tmo(shost) = dev_loss_tmo; +} + /** * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout * @rport: rport struct @@ -2788,7 +2793,6 @@ static int ibmvfc_target_alloc(struct scsi_target *starget) static int ibmvfc_slave_configure(struct scsi_device *sdev) { struct Scsi_Host *shost = sdev->host; - struct fc_rport *rport = starget_to_rport(sdev->sdev_target); unsigned long flags = 0; spin_lock_irqsave(shost->host_lock, flags); @@ -2800,8 +2804,6 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev) scsi_activate_tcq(sdev, sdev->queue_depth); } else scsi_deactivate_tcq(sdev, sdev->queue_depth); - - rport->dev_loss_tmo = dev_loss_tmo; spin_unlock_irqrestore(shost->host_lock, flags); return 0; } @@ -4889,6 +4891,8 @@ static struct fc_function_template ibmvfc_transport_functions = { .get_host_speed = ibmvfc_get_host_speed, .show_host_speed = 1, + .get_host_def_dev_loss_tmo = ibmvfc_set_host_def_dev_loss_tmo, + .issue_fc_host_lip = ibmvfc_issue_fc_host_lip, .terminate_rport_io = ibmvfc_terminate_rport_io, -- cgit v1.2.3 From 7968f1944c9d6c83683e87fd2ede977ccfd29285 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Thu, 5 Aug 2010 22:19:36 +0200 Subject: [SCSI] drivers/scsi: Adjust confusing if indentation Outdent the code following the if. The semantic match that finds this problem is as follows: (http://coccinelle.lip6.fr/) // @r disable braces4@ position p1,p2; statement S1,S2; @@ ( if (...) { ... } | if (...) S1@p1 S2@p2 ) @script:python@ p1 << r.p1; p2 << r.p2; @@ if (p1[0].column == p2[0].column): cocci.print_main("branch",p1) cocci.print_secs("after",p2) // Signed-off-by: Julia Lawall Acked-by: Nick Cheng Signed-off-by: James Bottomley --- drivers/scsi/arcmsr/arcmsr_hba.c | 4 ++-- drivers/scsi/mpt2sas/mpt2sas_base.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index c8dc392edd57..05a78e515a24 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -878,8 +878,8 @@ static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, if (!error) { if (acb->devstate[id][lun] == ARECA_RAID_GONE) acb->devstate[id][lun] = ARECA_RAID_GOOD; - ccb->pcmd->result = DID_OK << 16; - arcmsr_ccb_complete(ccb); + ccb->pcmd->result = DID_OK << 16; + arcmsr_ccb_complete(ccb); }else{ switch (ccb->arcmsr_cdb.DeviceStatus) { case ARCMSR_DEV_SELECT_TIMEOUT: { diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 57bcd5c9dcff..12faf64f91b0 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -534,7 +534,7 @@ _base_display_event_data(struct MPT2SAS_ADAPTER *ioc, if (event_data->DiscoveryStatus) printk("discovery_status(0x%08x)", le32_to_cpu(event_data->DiscoveryStatus)); - printk("\n"); + printk("\n"); return; } case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: -- cgit v1.2.3 From ec21b3b0dbbaf5965f6b508cb6b48d9fe5bb6ab5 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sun, 8 Aug 2010 00:15:17 +0200 Subject: [SCSI] lpfc: change spin_lock_irq() to spin_lock() In lpfc_cleanup_pending_mbox() we already have IRQs disabled so we don't need to disable them again. Also in lpfc_sli_intr_handler() there is a typo where it has spin_unlock_irq() instead of just spin_unlock(). Signed-off-by: Dan Carpenter Acked-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_sli.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index fb8905f893f5..23a47e536858 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -8476,7 +8476,7 @@ lpfc_sli_intr_handler(int irq, void *dev_id) * If there is deferred error attention, do not check for any interrupt. */ if (unlikely(phba->hba_flag & DEFER_ERATT)) { - spin_unlock_irq(&phba->hbalock); + spin_unlock(&phba->hbalock); return IRQ_NONE; } @@ -12827,9 +12827,9 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) } ndlp = (struct lpfc_nodelist *) mb->context2; if (ndlp) { - spin_lock_irq(shost->host_lock); + spin_lock(shost->host_lock); ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; - spin_unlock_irq(shost->host_lock); + spin_unlock(shost->host_lock); lpfc_nlp_put(ndlp); mb->context2 = NULL; } @@ -12845,9 +12845,9 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { ndlp = (struct lpfc_nodelist *) mb->context2; if (ndlp) { - spin_lock_irq(shost->host_lock); + spin_lock(shost->host_lock); ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; - spin_unlock_irq(shost->host_lock); + spin_unlock(shost->host_lock); lpfc_nlp_put(ndlp); mb->context2 = NULL; } -- cgit v1.2.3 From 9ba682f01e2ffe47e6ea47fcc6cdfe39d7a71571 Mon Sep 17 00:00:00 2001 From: "kxie@chelsio.com" Date: Mon, 16 Aug 2010 20:55:53 -0700 Subject: [SCSI] libcxgbi: common library for cxgb3i and cxgb4i [PATCH v5 1/3] libcxgbi: common library for cxgb3i and cxgb4i From: Karen Xie Extracts common functions to libcxgbi. Signed-off-by: Karen Xie Reviewed-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/cxgbi/libcxgbi.c | 2610 +++++++++++++++++++++++++++++++++++++++++ drivers/scsi/cxgbi/libcxgbi.h | 753 ++++++++++++ 2 files changed, 3363 insertions(+) create mode 100644 drivers/scsi/cxgbi/libcxgbi.c create mode 100644 drivers/scsi/cxgbi/libcxgbi.h (limited to 'drivers') diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c new file mode 100644 index 000000000000..db9d08a831d0 --- /dev/null +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -0,0 +1,2610 @@ +/* + * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver. + * + * Copyright (c) 2010 Chelsio Communications, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Karen Xie (kxie@chelsio.com) + * Written by: Rakesh Ranjan (rranjan@chelsio.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* ip_dev_find */ +#include + +static unsigned int dbg_level; + +#include "libcxgbi.h" + +#define DRV_MODULE_NAME "libcxgbi" +#define DRV_MODULE_DESC "Chelsio iSCSI driver library" +#define DRV_MODULE_VERSION "0.9.0" +#define DRV_MODULE_RELDATE "Jun. 2010" + +MODULE_AUTHOR("Chelsio Communications, Inc."); +MODULE_DESCRIPTION(DRV_MODULE_DESC); +MODULE_VERSION(DRV_MODULE_VERSION); +MODULE_LICENSE("GPL"); + +module_param(dbg_level, uint, 0644); +MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)"); + + +/* + * cxgbi device management + * maintains a list of the cxgbi devices + */ +static LIST_HEAD(cdev_list); +static DEFINE_MUTEX(cdev_mutex); + +int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base, + unsigned int max_conn) +{ + struct cxgbi_ports_map *pmap = &cdev->pmap; + + pmap->port_csk = cxgbi_alloc_big_mem(max_conn * + sizeof(struct cxgbi_sock *), + GFP_KERNEL); + if (!pmap->port_csk) { + pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev, max_conn); + return -ENOMEM; + } + + pmap->max_connect = max_conn; + pmap->sport_base = base; + spin_lock_init(&pmap->lock); + return 0; +} +EXPORT_SYMBOL_GPL(cxgbi_device_portmap_create); + +void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev) +{ + struct cxgbi_ports_map *pmap = &cdev->pmap; + struct cxgbi_sock *csk; + int i; + + for (i = 0; i < pmap->max_connect; i++) { + if (pmap->port_csk[i]) { + csk = pmap->port_csk[i]; + pmap->port_csk[i] = NULL; + log_debug(1 << CXGBI_DBG_SOCK, + "csk 0x%p, cdev 0x%p, offload down.\n", + csk, cdev); + spin_lock_bh(&csk->lock); + cxgbi_sock_set_flag(csk, CTPF_OFFLOAD_DOWN); + cxgbi_sock_closed(csk); + spin_unlock_bh(&csk->lock); + cxgbi_sock_put(csk); + } + } +} +EXPORT_SYMBOL_GPL(cxgbi_device_portmap_cleanup); + +static inline void cxgbi_device_destroy(struct cxgbi_device *cdev) +{ + log_debug(1 << CXGBI_DBG_DEV, + "cdev 0x%p, p# %u.\n", cdev, cdev->nports); + cxgbi_hbas_remove(cdev); + cxgbi_device_portmap_cleanup(cdev); + if (cdev->dev_ddp_cleanup) + cdev->dev_ddp_cleanup(cdev); + else + cxgbi_ddp_cleanup(cdev); + if (cdev->ddp) + cxgbi_ddp_cleanup(cdev); + if (cdev->pmap.max_connect) + cxgbi_free_big_mem(cdev->pmap.port_csk); + kfree(cdev); +} + +struct cxgbi_device *cxgbi_device_register(unsigned int extra, + unsigned int nports) +{ + struct cxgbi_device *cdev; + + cdev = kzalloc(sizeof(*cdev) + extra + nports * + (sizeof(struct cxgbi_hba *) + + sizeof(struct net_device *)), + GFP_KERNEL); + if (!cdev) { + pr_warn("nport %d, OOM.\n", nports); + return NULL; + } + cdev->ports = (struct net_device **)(cdev + 1); + cdev->hbas = (struct cxgbi_hba **)(((char*)cdev->ports) + nports * + sizeof(struct net_device *)); + if (extra) + cdev->dd_data = ((char *)cdev->hbas) + + nports * sizeof(struct cxgbi_hba *); + spin_lock_init(&cdev->pmap.lock); + + mutex_lock(&cdev_mutex); + list_add_tail(&cdev->list_head, &cdev_list); + mutex_unlock(&cdev_mutex); + + log_debug(1 << CXGBI_DBG_DEV, + "cdev 0x%p, p# %u.\n", cdev, nports); + return cdev; +} +EXPORT_SYMBOL_GPL(cxgbi_device_register); + +void cxgbi_device_unregister(struct cxgbi_device *cdev) +{ + log_debug(1 << CXGBI_DBG_DEV, + "cdev 0x%p, p# %u,%s.\n", + cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : ""); + mutex_lock(&cdev_mutex); + list_del(&cdev->list_head); + mutex_unlock(&cdev_mutex); + cxgbi_device_destroy(cdev); +} +EXPORT_SYMBOL_GPL(cxgbi_device_unregister); + +void cxgbi_device_unregister_all(unsigned int flag) +{ + struct cxgbi_device *cdev, *tmp; + + mutex_lock(&cdev_mutex); + list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { + if ((cdev->flags & flag) == flag) { + log_debug(1 << CXGBI_DBG_DEV, + "cdev 0x%p, p# %u,%s.\n", + cdev, cdev->nports, cdev->nports ? + cdev->ports[0]->name : ""); + list_del(&cdev->list_head); + cxgbi_device_destroy(cdev); + } + } + mutex_unlock(&cdev_mutex); +} +EXPORT_SYMBOL_GPL(cxgbi_device_unregister_all); + +struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev) +{ + struct cxgbi_device *cdev, *tmp; + + mutex_lock(&cdev_mutex); + list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { + if (cdev->lldev == lldev) { + mutex_unlock(&cdev_mutex); + return cdev; + } + } + mutex_unlock(&cdev_mutex); + log_debug(1 << CXGBI_DBG_DEV, + "lldev 0x%p, NO match found.\n", lldev); + return NULL; +} +EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev); + +static struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev, + int *port) +{ + struct cxgbi_device *cdev, *tmp; + int i; + + if (ndev->priv_flags & IFF_802_1Q_VLAN) + ndev = vlan_dev_real_dev(ndev); + + mutex_lock(&cdev_mutex); + list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { + for (i = 0; i < cdev->nports; i++) { + if (ndev == cdev->ports[i]) { + mutex_unlock(&cdev_mutex); + if (port) + *port = i; + return cdev; + } + } + } + mutex_unlock(&cdev_mutex); + log_debug(1 << CXGBI_DBG_DEV, + "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name); + return NULL; +} + +struct cxgbi_hba *cxgbi_hba_find_by_netdev(struct net_device *dev, + struct cxgbi_device *cdev) +{ + int i; + + if (dev->priv_flags & IFF_802_1Q_VLAN) + dev = vlan_dev_real_dev(dev); + + for (i = 0; i < cdev->nports; i++) { + if (cdev->hbas[i]->ndev == dev) + return cdev->hbas[i]; + } + log_debug(1 << CXGBI_DBG_DEV, + "ndev 0x%p, %s, cdev 0x%p, NO match found.\n", + dev, dev->name, cdev); + return NULL; +} + +void cxgbi_hbas_remove(struct cxgbi_device *cdev) +{ + int i; + struct cxgbi_hba *chba; + + log_debug(1 << CXGBI_DBG_DEV, + "cdev 0x%p, p#%u.\n", cdev, cdev->nports); + + for (i = 0; i < cdev->nports; i++) { + chba = cdev->hbas[i]; + if (chba) { + cdev->hbas[i] = NULL; + iscsi_host_remove(chba->shost); + pci_dev_put(cdev->pdev); + iscsi_host_free(chba->shost); + } + } +} +EXPORT_SYMBOL_GPL(cxgbi_hbas_remove); + +int cxgbi_hbas_add(struct cxgbi_device *cdev, unsigned int max_lun, + unsigned int max_id, struct scsi_host_template *sht, + struct scsi_transport_template *stt) +{ + struct cxgbi_hba *chba; + struct Scsi_Host *shost; + int i, err; + + log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p#%u.\n", cdev, cdev->nports); + + for (i = 0; i < cdev->nports; i++) { + shost = iscsi_host_alloc(sht, sizeof(*chba), 1); + if (!shost) { + pr_info("0x%p, p%d, %s, host alloc failed.\n", + cdev, i, cdev->ports[i]->name); + err = -ENOMEM; + goto err_out; + } + + shost->transportt = stt; + shost->max_lun = max_lun; + shost->max_id = max_id; + shost->max_channel = 0; + shost->max_cmd_len = 16; + + chba = iscsi_host_priv(shost); + chba->cdev = cdev; + chba->ndev = cdev->ports[i]; + chba->shost = shost; + + log_debug(1 << CXGBI_DBG_DEV, + "cdev 0x%p, p#%d %s: chba 0x%p.\n", + cdev, i, cdev->ports[i]->name, chba); + + pci_dev_get(cdev->pdev); + err = iscsi_host_add(shost, &cdev->pdev->dev); + if (err) { + pr_info("cdev 0x%p, p#%d %s, host add failed.\n", + cdev, i, cdev->ports[i]->name); + pci_dev_put(cdev->pdev); + scsi_host_put(shost); + goto err_out; + } + + cdev->hbas[i] = chba; + } + + return 0; + +err_out: + cxgbi_hbas_remove(cdev); + return err; +} +EXPORT_SYMBOL_GPL(cxgbi_hbas_add); + +/* + * iSCSI offload + * + * - source port management + * To find a free source port in the port allocation map we use a very simple + * rotor scheme to look for the next free port. + * + * If a source port has been specified make sure that it doesn't collide with + * our normal source port allocation map. If it's outside the range of our + * allocation/deallocation scheme just let them use it. + * + * If the source port is outside our allocation range, the caller is + * responsible for keeping track of their port usage. + */ +static int sock_get_port(struct cxgbi_sock *csk) +{ + struct cxgbi_device *cdev = csk->cdev; + struct cxgbi_ports_map *pmap = &cdev->pmap; + unsigned int start; + int idx; + + if (!pmap->max_connect) { + pr_err("cdev 0x%p, p#%u %s, NO port map.\n", + cdev, csk->port_id, cdev->ports[csk->port_id]->name); + return -EADDRNOTAVAIL; + } + + if (csk->saddr.sin_port) { + pr_err("source port NON-ZERO %u.\n", + ntohs(csk->saddr.sin_port)); + return -EADDRINUSE; + } + + spin_lock_bh(&pmap->lock); + if (pmap->used >= pmap->max_connect) { + spin_unlock_bh(&pmap->lock); + pr_info("cdev 0x%p, p#%u %s, ALL ports used.\n", + cdev, csk->port_id, cdev->ports[csk->port_id]->name); + return -EADDRNOTAVAIL; + } + + start = idx = pmap->next; + do { + if (++idx >= pmap->max_connect) + idx = 0; + if (!pmap->port_csk[idx]) { + pmap->used++; + csk->saddr.sin_port = + htons(pmap->sport_base + idx); + pmap->next = idx; + pmap->port_csk[idx] = csk; + spin_unlock_bh(&pmap->lock); + cxgbi_sock_get(csk); + log_debug(1 << CXGBI_DBG_SOCK, + "cdev 0x%p, p#%u %s, p %u, %u.\n", + cdev, csk->port_id, + cdev->ports[csk->port_id]->name, + pmap->sport_base + idx, pmap->next); + return 0; + } + } while (idx != start); + spin_unlock_bh(&pmap->lock); + + /* should not happen */ + pr_warn("cdev 0x%p, p#%u %s, next %u?\n", + cdev, csk->port_id, cdev->ports[csk->port_id]->name, + pmap->next); + return -EADDRNOTAVAIL; +} + +static void sock_put_port(struct cxgbi_sock *csk) +{ + struct cxgbi_device *cdev = csk->cdev; + struct cxgbi_ports_map *pmap = &cdev->pmap; + + if (csk->saddr.sin_port) { + int idx = ntohs(csk->saddr.sin_port) - pmap->sport_base; + + csk->saddr.sin_port = 0; + if (idx < 0 || idx >= pmap->max_connect) { + pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n", + cdev, csk->port_id, + cdev->ports[csk->port_id]->name, + ntohs(csk->saddr.sin_port)); + return; + } + + spin_lock_bh(&pmap->lock); + pmap->port_csk[idx] = NULL; + pmap->used--; + spin_unlock_bh(&pmap->lock); + + log_debug(1 << CXGBI_DBG_SOCK, + "cdev 0x%p, p#%u %s, release %u.\n", + cdev, csk->port_id, cdev->ports[csk->port_id]->name, + pmap->sport_base + idx); + + cxgbi_sock_put(csk); + } +} + +/* + * iscsi tcp connection + */ +void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *csk) +{ + if (csk->cpl_close) { + kfree_skb(csk->cpl_close); + csk->cpl_close = NULL; + } + if (csk->cpl_abort_req) { + kfree_skb(csk->cpl_abort_req); + csk->cpl_abort_req = NULL; + } + if (csk->cpl_abort_rpl) { + kfree_skb(csk->cpl_abort_rpl); + csk->cpl_abort_rpl = NULL; + } +} +EXPORT_SYMBOL_GPL(cxgbi_sock_free_cpl_skbs); + +static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev) +{ + struct cxgbi_sock *csk = kzalloc(sizeof(*csk), GFP_NOIO); + + if (!csk) { + pr_info("alloc csk %zu failed.\n", sizeof(*csk)); + return NULL; + } + + if (cdev->csk_alloc_cpls(csk) < 0) { + pr_info("csk 0x%p, alloc cpls failed.\n", csk); + kfree(csk); + return NULL; + } + + spin_lock_init(&csk->lock); + kref_init(&csk->refcnt); + skb_queue_head_init(&csk->receive_queue); + skb_queue_head_init(&csk->write_queue); + setup_timer(&csk->retry_timer, NULL, (unsigned long)csk); + rwlock_init(&csk->callback_lock); + csk->cdev = cdev; + csk->flags = 0; + cxgbi_sock_set_state(csk, CTP_CLOSED); + + log_debug(1 << CXGBI_DBG_SOCK, "cdev 0x%p, new csk 0x%p.\n", cdev, csk); + + return csk; +} + +static struct rtable *find_route_ipv4(__be32 saddr, __be32 daddr, + __be16 sport, __be16 dport, u8 tos) +{ + struct rtable *rt; + struct flowi fl = { + .oif = 0, + .nl_u = { + .ip4_u = { + .daddr = daddr, + .saddr = saddr, + .tos = tos } + }, + .proto = IPPROTO_TCP, + .uli_u = { + .ports = { + .sport = sport, + .dport = dport } + } + }; + + if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0)) + return NULL; + + return rt; +} + +static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) +{ + struct sockaddr_in *daddr = (struct sockaddr_in *)dst_addr; + struct dst_entry *dst; + struct net_device *ndev; + struct cxgbi_device *cdev; + struct rtable *rt = NULL; + struct cxgbi_sock *csk = NULL; + unsigned int mtu = 0; + int port = 0xFFFF; + int err = 0; + + if (daddr->sin_family != AF_INET) { + pr_info("address family 0x%x NOT supported.\n", + daddr->sin_family); + err = -EAFNOSUPPORT; + goto err_out; + } + + rt = find_route_ipv4(0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0); + if (!rt) { + pr_info("no route to ipv4 0x%x, port %u.\n", + daddr->sin_addr.s_addr, daddr->sin_port); + err = -ENETUNREACH; + goto err_out; + } + dst = &rt->dst; + ndev = dst->neighbour->dev; + + if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { + pr_info("multi-cast route %pI4, port %u, dev %s.\n", + &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), + ndev->name); + err = -ENETUNREACH; + goto rel_rt; + } + + if (ndev->flags & IFF_LOOPBACK) { + ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr); + mtu = ndev->mtu; + pr_info("rt dev %s, loopback -> %s, mtu %u.\n", + dst->neighbour->dev->name, ndev->name, mtu); + } + + if (ndev->priv_flags & IFF_802_1Q_VLAN) { + ndev = vlan_dev_real_dev(ndev); + pr_info("rt dev %s, vlan -> %s.\n", + dst->neighbour->dev->name, ndev->name); + } + + cdev = cxgbi_device_find_by_netdev(ndev, &port); + if (!cdev) { + pr_info("dst %pI4, %s, NOT cxgbi device.\n", + &daddr->sin_addr.s_addr, ndev->name); + err = -ENETUNREACH; + goto rel_rt; + } + log_debug(1 << CXGBI_DBG_SOCK, + "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n", + &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), + port, ndev->name, cdev); + + csk = cxgbi_sock_create(cdev); + if (!csk) { + err = -ENOMEM; + goto rel_rt; + } + csk->cdev = cdev; + csk->port_id = port; + csk->mtu = mtu; + csk->dst = dst; + csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr; + csk->daddr.sin_port = daddr->sin_port; + if (cdev->hbas[port]->ipv4addr) + csk->saddr.sin_addr.s_addr = cdev->hbas[port]->ipv4addr; + else + csk->saddr.sin_addr.s_addr = rt->rt_src; + + return csk; + +rel_rt: + ip_rt_put(rt); + if (csk) + cxgbi_sock_closed(csk); +err_out: + return ERR_PTR(err); +} + +void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn, + unsigned int opt) +{ + csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn; + dst_confirm(csk->dst); + smp_mb(); + cxgbi_sock_set_state(csk, CTP_ESTABLISHED); +} +EXPORT_SYMBOL_GPL(cxgbi_sock_established); + +static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk) +{ + log_debug(1 << CXGBI_DBG_SOCK, + "csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n", + csk, csk->state, csk->flags, csk->user_data); + + if (csk->state != CTP_ESTABLISHED) { + read_lock(&csk->callback_lock); + if (csk->user_data) + iscsi_conn_failure(csk->user_data, + ISCSI_ERR_CONN_FAILED); + read_unlock(&csk->callback_lock); + } +} + +void cxgbi_sock_closed(struct cxgbi_sock *csk) +{ + log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", + csk, (csk)->state, (csk)->flags, (csk)->tid); + cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED); + if (csk->state == CTP_ACTIVE_OPEN || csk->state == CTP_CLOSED) + return; + if (csk->saddr.sin_port) + sock_put_port(csk); + if (csk->dst) + dst_release(csk->dst); + csk->cdev->csk_release_offload_resources(csk); + cxgbi_sock_set_state(csk, CTP_CLOSED); + cxgbi_inform_iscsi_conn_closing(csk); + cxgbi_sock_put(csk); +} +EXPORT_SYMBOL_GPL(cxgbi_sock_closed); + +static void need_active_close(struct cxgbi_sock *csk) +{ + int data_lost; + int close_req = 0; + + log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", + csk, (csk)->state, (csk)->flags, (csk)->tid); + spin_lock_bh(&csk->lock); + dst_confirm(csk->dst); + data_lost = skb_queue_len(&csk->receive_queue); + __skb_queue_purge(&csk->receive_queue); + + if (csk->state == CTP_ACTIVE_OPEN) + cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED); + else if (csk->state == CTP_ESTABLISHED) { + close_req = 1; + cxgbi_sock_set_state(csk, CTP_ACTIVE_CLOSE); + } else if (csk->state == CTP_PASSIVE_CLOSE) { + close_req = 1; + cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2); + } + + if (close_req) { + if (data_lost) + csk->cdev->csk_send_abort_req(csk); + else + csk->cdev->csk_send_close_req(csk); + } + + spin_unlock_bh(&csk->lock); +} + +void cxgbi_sock_fail_act_open(struct cxgbi_sock *csk, int errno) +{ + pr_info("csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n", + csk, csk->state, csk->flags, + &csk->saddr.sin_addr.s_addr, csk->saddr.sin_port, + &csk->daddr.sin_addr.s_addr, csk->daddr.sin_port, + errno); + + cxgbi_sock_set_state(csk, CTP_CONNECTING); + csk->err = errno; + cxgbi_sock_closed(csk); +} +EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open); + +void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb) +{ + struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk; + + log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", + csk, (csk)->state, (csk)->flags, (csk)->tid); + cxgbi_sock_get(csk); + spin_lock_bh(&csk->lock); + if (csk->state == CTP_ACTIVE_OPEN) + cxgbi_sock_fail_act_open(csk, -EHOSTUNREACH); + spin_unlock_bh(&csk->lock); + cxgbi_sock_put(csk); + __kfree_skb(skb); +} +EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure); + +void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk) +{ + cxgbi_sock_get(csk); + spin_lock_bh(&csk->lock); + if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { + if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_RCVD)) + cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD); + else { + cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_RCVD); + cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING); + if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) + pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n", + csk, csk->state, csk->flags, csk->tid); + cxgbi_sock_closed(csk); + } + } + spin_unlock_bh(&csk->lock); + cxgbi_sock_put(csk); +} +EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_abort_rpl); + +void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *csk) +{ + log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", + csk, (csk)->state, (csk)->flags, (csk)->tid); + cxgbi_sock_get(csk); + spin_lock_bh(&csk->lock); + + if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) + goto done; + + switch (csk->state) { + case CTP_ESTABLISHED: + cxgbi_sock_set_state(csk, CTP_PASSIVE_CLOSE); + break; + case CTP_ACTIVE_CLOSE: + cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2); + break; + case CTP_CLOSE_WAIT_1: + cxgbi_sock_closed(csk); + break; + case CTP_ABORTING: + break; + default: + pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n", + csk, csk->state, csk->flags, csk->tid); + } + cxgbi_inform_iscsi_conn_closing(csk); +done: + spin_unlock_bh(&csk->lock); + cxgbi_sock_put(csk); +} +EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_peer_close); + +void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *csk, u32 snd_nxt) +{ + log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", + csk, (csk)->state, (csk)->flags, (csk)->tid); + cxgbi_sock_get(csk); + spin_lock_bh(&csk->lock); + + csk->snd_una = snd_nxt - 1; + if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) + goto done; + + switch (csk->state) { + case CTP_ACTIVE_CLOSE: + cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_1); + break; + case CTP_CLOSE_WAIT_1: + case CTP_CLOSE_WAIT_2: + cxgbi_sock_closed(csk); + break; + case CTP_ABORTING: + break; + default: + pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n", + csk, csk->state, csk->flags, csk->tid); + } +done: + spin_unlock_bh(&csk->lock); + cxgbi_sock_put(csk); +} +EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_close_conn_rpl); + +void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *csk, unsigned int credits, + unsigned int snd_una, int seq_chk) +{ + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n", + csk, csk->state, csk->flags, csk->tid, credits, + csk->wr_cred, csk->wr_una_cred, snd_una, seq_chk); + + spin_lock_bh(&csk->lock); + + csk->wr_cred += credits; + if (csk->wr_una_cred > csk->wr_max_cred - csk->wr_cred) + csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred; + + while (credits) { + struct sk_buff *p = cxgbi_sock_peek_wr(csk); + + if (unlikely(!p)) { + pr_err("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n", + csk, csk->state, csk->flags, csk->tid, credits, + csk->wr_cred, csk->wr_una_cred); + break; + } + + if (unlikely(credits < p->csum)) { + pr_warn("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n", + csk, csk->state, csk->flags, csk->tid, + credits, csk->wr_cred, csk->wr_una_cred, + p->csum); + p->csum -= credits; + break; + } else { + cxgbi_sock_dequeue_wr(csk); + credits -= p->csum; + kfree_skb(p); + } + } + + cxgbi_sock_check_wr_invariants(csk); + + if (seq_chk) { + if (unlikely(before(snd_una, csk->snd_una))) { + pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.", + csk, csk->state, csk->flags, csk->tid, snd_una, + csk->snd_una); + goto done; + } + + if (csk->snd_una != snd_una) { + csk->snd_una = snd_una; + dst_confirm(csk->dst); + } + } + + if (skb_queue_len(&csk->write_queue)) { + if (csk->cdev->csk_push_tx_frames(csk, 0)) + cxgbi_conn_tx_open(csk); + } else + cxgbi_conn_tx_open(csk); +done: + spin_unlock_bh(&csk->lock); +} +EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_wr_ack); + +static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock *csk, + unsigned short mtu) +{ + int i = 0; + + while (i < csk->cdev->nmtus - 1 && csk->cdev->mtus[i + 1] <= mtu) + ++i; + + return i; +} + +unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *csk, unsigned int pmtu) +{ + unsigned int idx; + struct dst_entry *dst = csk->dst; + + csk->advmss = dst_metric(dst, RTAX_ADVMSS); + + if (csk->advmss > pmtu - 40) + csk->advmss = pmtu - 40; + if (csk->advmss < csk->cdev->mtus[0] - 40) + csk->advmss = csk->cdev->mtus[0] - 40; + idx = cxgbi_sock_find_best_mtu(csk, csk->advmss + 40); + + return idx; +} +EXPORT_SYMBOL_GPL(cxgbi_sock_select_mss); + +void cxgbi_sock_skb_entail(struct cxgbi_sock *csk, struct sk_buff *skb) +{ + cxgbi_skcb_tcp_seq(skb) = csk->write_seq; + __skb_queue_tail(&csk->write_queue, skb); +} +EXPORT_SYMBOL_GPL(cxgbi_sock_skb_entail); + +void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *csk) +{ + struct sk_buff *skb; + + while ((skb = cxgbi_sock_dequeue_wr(csk)) != NULL) + kfree_skb(skb); +} +EXPORT_SYMBOL_GPL(cxgbi_sock_purge_wr_queue); + +void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *csk) +{ + int pending = cxgbi_sock_count_pending_wrs(csk); + + if (unlikely(csk->wr_cred + pending != csk->wr_max_cred)) + pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n", + csk, csk->tid, csk->wr_cred, pending, csk->wr_max_cred); +} +EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants); + +static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb) +{ + struct cxgbi_device *cdev = csk->cdev; + struct sk_buff *next; + int err, copied = 0; + + spin_lock_bh(&csk->lock); + + if (csk->state != CTP_ESTABLISHED) { + log_debug(1 << CXGBI_DBG_PDU_TX, + "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n", + csk, csk->state, csk->flags, csk->tid); + err = -EAGAIN; + goto out_err; + } + + if (csk->err) { + log_debug(1 << CXGBI_DBG_PDU_TX, + "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n", + csk, csk->state, csk->flags, csk->tid, csk->err); + err = -EPIPE; + goto out_err; + } + + if (csk->write_seq - csk->snd_una >= cdev->snd_win) { + log_debug(1 << CXGBI_DBG_PDU_TX, + "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n", + csk, csk->state, csk->flags, csk->tid, csk->write_seq, + csk->snd_una, cdev->snd_win); + err = -ENOBUFS; + goto out_err; + } + + while (skb) { + int frags = skb_shinfo(skb)->nr_frags + + (skb->len != skb->data_len); + + if (unlikely(skb_headroom(skb) < cdev->skb_tx_rsvd)) { + pr_err("csk 0x%p, skb head %u < %u.\n", + csk, skb_headroom(skb), cdev->skb_tx_rsvd); + err = -EINVAL; + goto out_err; + } + + if (frags >= SKB_WR_LIST_SIZE) { + pr_err("csk 0x%p, frags %d, %u,%u >%u.\n", + csk, skb_shinfo(skb)->nr_frags, skb->len, + skb->data_len, (uint)(SKB_WR_LIST_SIZE)); + err = -EINVAL; + goto out_err; + } + + next = skb->next; + skb->next = NULL; + cxgbi_skcb_set_flag(skb, SKCBF_TX_NEED_HDR); + cxgbi_sock_skb_entail(csk, skb); + copied += skb->len; + csk->write_seq += skb->len + + cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); + skb = next; + } +done: + if (likely(skb_queue_len(&csk->write_queue))) + cdev->csk_push_tx_frames(csk, 1); + spin_unlock_bh(&csk->lock); + return copied; + +out_err: + if (copied == 0 && err == -EPIPE) + copied = csk->err ? csk->err : -EPIPE; + else + copied = err; + goto done; +} + +/* + * Direct Data Placement - + * Directly place the iSCSI Data-In or Data-Out PDU's payload into pre-posted + * final destination host-memory buffers based on the Initiator Task Tag (ITT) + * in Data-In or Target Task Tag (TTT) in Data-Out PDUs. + * The host memory address is programmed into h/w in the format of pagepod + * entries. + * The location of the pagepod entry is encoded into ddp tag which is used as + * the base for ITT/TTT. + */ + +static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4}; +static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16}; +static unsigned char page_idx = DDP_PGIDX_MAX; + +static unsigned char sw_tag_idx_bits; +static unsigned char sw_tag_age_bits; + +/* + * Direct-Data Placement page size adjustment + */ +static int ddp_adjust_page_table(void) +{ + int i; + unsigned int base_order, order; + + if (PAGE_SIZE < (1UL << ddp_page_shift[0])) { + pr_info("PAGE_SIZE 0x%lx too small, min 0x%lx\n", + PAGE_SIZE, 1UL << ddp_page_shift[0]); + return -EINVAL; + } + + base_order = get_order(1UL << ddp_page_shift[0]); + order = get_order(1UL << PAGE_SHIFT); + + for (i = 0; i < DDP_PGIDX_MAX; i++) { + /* first is the kernel page size, then just doubling */ + ddp_page_order[i] = order - base_order + i; + ddp_page_shift[i] = PAGE_SHIFT + i; + } + return 0; +} + +static int ddp_find_page_index(unsigned long pgsz) +{ + int i; + + for (i = 0; i < DDP_PGIDX_MAX; i++) { + if (pgsz == (1UL << ddp_page_shift[i])) + return i; + } + pr_info("ddp page size %lu not supported.\n", pgsz); + return DDP_PGIDX_MAX; +} + +static void ddp_setup_host_page_size(void) +{ + if (page_idx == DDP_PGIDX_MAX) { + page_idx = ddp_find_page_index(PAGE_SIZE); + + if (page_idx == DDP_PGIDX_MAX) { + pr_info("system PAGE %lu, update hw.\n", PAGE_SIZE); + if (ddp_adjust_page_table() < 0) { + pr_info("PAGE %lu, disable ddp.\n", PAGE_SIZE); + return; + } + page_idx = ddp_find_page_index(PAGE_SIZE); + } + pr_info("system PAGE %lu, ddp idx %u.\n", PAGE_SIZE, page_idx); + } +} + +void cxgbi_ddp_page_size_factor(int *pgsz_factor) +{ + int i; + + for (i = 0; i < DDP_PGIDX_MAX; i++) + pgsz_factor[i] = ddp_page_order[i]; +} +EXPORT_SYMBOL_GPL(cxgbi_ddp_page_size_factor); + +/* + * DDP setup & teardown + */ + +void cxgbi_ddp_ppod_set(struct cxgbi_pagepod *ppod, + struct cxgbi_pagepod_hdr *hdr, + struct cxgbi_gather_list *gl, unsigned int gidx) +{ + int i; + + memcpy(ppod, hdr, sizeof(*hdr)); + for (i = 0; i < (PPOD_PAGES_MAX + 1); i++, gidx++) { + ppod->addr[i] = gidx < gl->nelem ? + cpu_to_be64(gl->phys_addr[gidx]) : 0ULL; + } +} +EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_set); + +void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod *ppod) +{ + memset(ppod, 0, sizeof(*ppod)); +} +EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_clear); + +static inline int ddp_find_unused_entries(struct cxgbi_ddp_info *ddp, + unsigned int start, unsigned int max, + unsigned int count, + struct cxgbi_gather_list *gl) +{ + unsigned int i, j, k; + + /* not enough entries */ + if ((max - start) < count) { + log_debug(1 << CXGBI_DBG_DDP, + "NOT enough entries %u+%u < %u.\n", start, count, max); + return -EBUSY; + } + + max -= count; + spin_lock(&ddp->map_lock); + for (i = start; i < max;) { + for (j = 0, k = i; j < count; j++, k++) { + if (ddp->gl_map[k]) + break; + } + if (j == count) { + for (j = 0, k = i; j < count; j++, k++) + ddp->gl_map[k] = gl; + spin_unlock(&ddp->map_lock); + return i; + } + i += j + 1; + } + spin_unlock(&ddp->map_lock); + log_debug(1 << CXGBI_DBG_DDP, + "NO suitable entries %u available.\n", count); + return -EBUSY; +} + +static inline void ddp_unmark_entries(struct cxgbi_ddp_info *ddp, + int start, int count) +{ + spin_lock(&ddp->map_lock); + memset(&ddp->gl_map[start], 0, + count * sizeof(struct cxgbi_gather_list *)); + spin_unlock(&ddp->map_lock); +} + +static inline void ddp_gl_unmap(struct pci_dev *pdev, + struct cxgbi_gather_list *gl) +{ + int i; + + for (i = 0; i < gl->nelem; i++) + dma_unmap_page(&pdev->dev, gl->phys_addr[i], PAGE_SIZE, + PCI_DMA_FROMDEVICE); +} + +static inline int ddp_gl_map(struct pci_dev *pdev, + struct cxgbi_gather_list *gl) +{ + int i; + + for (i = 0; i < gl->nelem; i++) { + gl->phys_addr[i] = dma_map_page(&pdev->dev, gl->pages[i], 0, + PAGE_SIZE, + PCI_DMA_FROMDEVICE); + if (unlikely(dma_mapping_error(&pdev->dev, gl->phys_addr[i]))) { + log_debug(1 << CXGBI_DBG_DDP, + "page %d 0x%p, 0x%p dma mapping err.\n", + i, gl->pages[i], pdev); + goto unmap; + } + } + return i; +unmap: + if (i) { + unsigned int nelem = gl->nelem; + + gl->nelem = i; + ddp_gl_unmap(pdev, gl); + gl->nelem = nelem; + } + return -EINVAL; +} + +static void ddp_release_gl(struct cxgbi_gather_list *gl, + struct pci_dev *pdev) +{ + ddp_gl_unmap(pdev, gl); + kfree(gl); +} + +static struct cxgbi_gather_list *ddp_make_gl(unsigned int xferlen, + struct scatterlist *sgl, + unsigned int sgcnt, + struct pci_dev *pdev, + gfp_t gfp) +{ + struct cxgbi_gather_list *gl; + struct scatterlist *sg = sgl; + struct page *sgpage = sg_page(sg); + unsigned int sglen = sg->length; + unsigned int sgoffset = sg->offset; + unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >> + PAGE_SHIFT; + int i = 1, j = 0; + + if (xferlen < DDP_THRESHOLD) { + log_debug(1 << CXGBI_DBG_DDP, + "xfer %u < threshold %u, no ddp.\n", + xferlen, DDP_THRESHOLD); + return NULL; + } + + gl = kzalloc(sizeof(struct cxgbi_gather_list) + + npages * (sizeof(dma_addr_t) + + sizeof(struct page *)), gfp); + if (!gl) { + log_debug(1 << CXGBI_DBG_DDP, + "xfer %u, %u pages, OOM.\n", xferlen, npages); + return NULL; + } + + log_debug(1 << CXGBI_DBG_DDP, + "xfer %u, sgl %u, gl max %u.\n", xferlen, sgcnt, npages); + + gl->pages = (struct page **)&gl->phys_addr[npages]; + gl->nelem = npages; + gl->length = xferlen; + gl->offset = sgoffset; + gl->pages[0] = sgpage; + + for (i = 1, sg = sg_next(sgl), j = 0; i < sgcnt; + i++, sg = sg_next(sg)) { + struct page *page = sg_page(sg); + + if (sgpage == page && sg->offset == sgoffset + sglen) + sglen += sg->length; + else { + /* make sure the sgl is fit for ddp: + * each has the same page size, and + * all of the middle pages are used completely + */ + if ((j && sgoffset) || ((i != sgcnt - 1) && + ((sglen + sgoffset) & ~PAGE_MASK))) { + log_debug(1 << CXGBI_DBG_DDP, + "page %d/%u, %u + %u.\n", + i, sgcnt, sgoffset, sglen); + goto error_out; + } + + j++; + if (j == gl->nelem || sg->offset) { + log_debug(1 << CXGBI_DBG_DDP, + "page %d/%u, offset %u.\n", + j, gl->nelem, sg->offset); + goto error_out; + } + gl->pages[j] = page; + sglen = sg->length; + sgoffset = sg->offset; + sgpage = page; + } + } + gl->nelem = ++j; + + if (ddp_gl_map(pdev, gl) < 0) + goto error_out; + + return gl; + +error_out: + kfree(gl); + return NULL; +} + +static void ddp_tag_release(struct cxgbi_hba *chba, u32 tag) +{ + struct cxgbi_device *cdev = chba->cdev; + struct cxgbi_ddp_info *ddp = cdev->ddp; + u32 idx; + + idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask; + if (idx < ddp->nppods) { + struct cxgbi_gather_list *gl = ddp->gl_map[idx]; + unsigned int npods; + + if (!gl || !gl->nelem) { + pr_warn("tag 0x%x, idx %u, gl 0x%p, %u.\n", + tag, idx, gl, gl ? gl->nelem : 0); + return; + } + npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT; + log_debug(1 << CXGBI_DBG_DDP, + "tag 0x%x, release idx %u, npods %u.\n", + tag, idx, npods); + cdev->csk_ddp_clear(chba, tag, idx, npods); + ddp_unmark_entries(ddp, idx, npods); + ddp_release_gl(gl, ddp->pdev); + } else + pr_warn("tag 0x%x, idx %u > max %u.\n", tag, idx, ddp->nppods); +} + +static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid, + u32 sw_tag, u32 *tagp, struct cxgbi_gather_list *gl, + gfp_t gfp) +{ + struct cxgbi_device *cdev = csk->cdev; + struct cxgbi_ddp_info *ddp = cdev->ddp; + struct cxgbi_tag_format *tformat = &cdev->tag_format; + struct cxgbi_pagepod_hdr hdr; + unsigned int npods; + int idx = -1; + int err = -ENOMEM; + u32 tag; + + npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT; + if (ddp->idx_last == ddp->nppods) + idx = ddp_find_unused_entries(ddp, 0, ddp->nppods, + npods, gl); + else { + idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1, + ddp->nppods, npods, + gl); + if (idx < 0 && ddp->idx_last >= npods) { + idx = ddp_find_unused_entries(ddp, 0, + min(ddp->idx_last + npods, ddp->nppods), + npods, gl); + } + } + if (idx < 0) { + log_debug(1 << CXGBI_DBG_DDP, + "xferlen %u, gl %u, npods %u NO DDP.\n", + gl->length, gl->nelem, npods); + return idx; + } + + if (cdev->csk_ddp_alloc_gl_skb) { + err = cdev->csk_ddp_alloc_gl_skb(ddp, idx, npods, gfp); + if (err < 0) + goto unmark_entries; + } + + tag = cxgbi_ddp_tag_base(tformat, sw_tag); + tag |= idx << PPOD_IDX_SHIFT; + + hdr.rsvd = 0; + hdr.vld_tid = htonl(PPOD_VALID_FLAG | PPOD_TID(tid)); + hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask); + hdr.max_offset = htonl(gl->length); + hdr.page_offset = htonl(gl->offset); + + err = cdev->csk_ddp_set(csk, &hdr, idx, npods, gl); + if (err < 0) { + if (cdev->csk_ddp_free_gl_skb) + cdev->csk_ddp_free_gl_skb(ddp, idx, npods); + goto unmark_entries; + } + + ddp->idx_last = idx; + log_debug(1 << CXGBI_DBG_DDP, + "xfer %u, gl %u,%u, tid 0x%x, tag 0x%x->0x%x(%u,%u).\n", + gl->length, gl->nelem, gl->offset, tid, sw_tag, tag, idx, + npods); + *tagp = tag; + return 0; + +unmark_entries: + ddp_unmark_entries(ddp, idx, npods); + return err; +} + +int cxgbi_ddp_reserve(struct cxgbi_sock *csk, unsigned int *tagp, + unsigned int sw_tag, unsigned int xferlen, + struct scatterlist *sgl, unsigned int sgcnt, gfp_t gfp) +{ + struct cxgbi_device *cdev = csk->cdev; + struct cxgbi_tag_format *tformat = &cdev->tag_format; + struct cxgbi_gather_list *gl; + int err; + + if (page_idx >= DDP_PGIDX_MAX || !cdev->ddp || + xferlen < DDP_THRESHOLD) { + log_debug(1 << CXGBI_DBG_DDP, + "pgidx %u, xfer %u, NO ddp.\n", page_idx, xferlen); + return -EINVAL; + } + + if (!cxgbi_sw_tag_usable(tformat, sw_tag)) { + log_debug(1 << CXGBI_DBG_DDP, + "sw_tag 0x%x NOT usable.\n", sw_tag); + return -EINVAL; + } + + gl = ddp_make_gl(xferlen, sgl, sgcnt, cdev->pdev, gfp); + if (!gl) + return -ENOMEM; + + err = ddp_tag_reserve(csk, csk->tid, sw_tag, tagp, gl, gfp); + if (err < 0) + ddp_release_gl(gl, cdev->pdev); + + return err; +} + +static void ddp_destroy(struct kref *kref) +{ + struct cxgbi_ddp_info *ddp = container_of(kref, + struct cxgbi_ddp_info, + refcnt); + struct cxgbi_device *cdev = ddp->cdev; + int i = 0; + + pr_info("kref 0, destroy ddp 0x%p, cdev 0x%p.\n", ddp, cdev); + + while (i < ddp->nppods) { + struct cxgbi_gather_list *gl = ddp->gl_map[i]; + + if (gl) { + int npods = (gl->nelem + PPOD_PAGES_MAX - 1) + >> PPOD_PAGES_SHIFT; + pr_info("cdev 0x%p, ddp %d + %d.\n", cdev, i, npods); + kfree(gl); + if (cdev->csk_ddp_free_gl_skb) + cdev->csk_ddp_free_gl_skb(ddp, i, npods); + i += npods; + } else + i++; + } + cxgbi_free_big_mem(ddp); +} + +int cxgbi_ddp_cleanup(struct cxgbi_device *cdev) +{ + struct cxgbi_ddp_info *ddp = cdev->ddp; + + log_debug(1 << CXGBI_DBG_DDP, + "cdev 0x%p, release ddp 0x%p.\n", cdev, ddp); + cdev->ddp = NULL; + if (ddp) + return kref_put(&ddp->refcnt, ddp_destroy); + return 0; +} +EXPORT_SYMBOL_GPL(cxgbi_ddp_cleanup); + +int cxgbi_ddp_init(struct cxgbi_device *cdev, + unsigned int llimit, unsigned int ulimit, + unsigned int max_txsz, unsigned int max_rxsz) +{ + struct cxgbi_ddp_info *ddp; + unsigned int ppmax, bits; + + ppmax = (ulimit - llimit + 1) >> PPOD_SIZE_SHIFT; + bits = __ilog2_u32(ppmax) + 1; + if (bits > PPOD_IDX_MAX_SIZE) + bits = PPOD_IDX_MAX_SIZE; + ppmax = (1 << (bits - 1)) - 1; + + ddp = cxgbi_alloc_big_mem(sizeof(struct cxgbi_ddp_info) + + ppmax * (sizeof(struct cxgbi_gather_list *) + + sizeof(struct sk_buff *)), + GFP_KERNEL); + if (!ddp) { + pr_warn("cdev 0x%p, ddp ppmax %u OOM.\n", cdev, ppmax); + return -ENOMEM; + } + ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1); + ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) + + ppmax * sizeof(struct cxgbi_gather_list *)); + cdev->ddp = ddp; + + spin_lock_init(&ddp->map_lock); + kref_init(&ddp->refcnt); + + ddp->cdev = cdev; + ddp->pdev = cdev->pdev; + ddp->llimit = llimit; + ddp->ulimit = ulimit; + ddp->max_txsz = min_t(unsigned int, max_txsz, ULP2_MAX_PKT_SIZE); + ddp->max_rxsz = min_t(unsigned int, max_rxsz, ULP2_MAX_PKT_SIZE); + ddp->nppods = ppmax; + ddp->idx_last = ppmax; + ddp->idx_bits = bits; + ddp->idx_mask = (1 << bits) - 1; + ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1; + + cdev->tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits; + cdev->tag_format.rsvd_bits = ddp->idx_bits; + cdev->tag_format.rsvd_shift = PPOD_IDX_SHIFT; + cdev->tag_format.rsvd_mask = (1 << cdev->tag_format.rsvd_bits) - 1; + + pr_info("%s tag format, sw %u, rsvd %u,%u, mask 0x%x.\n", + cdev->ports[0]->name, cdev->tag_format.sw_bits, + cdev->tag_format.rsvd_bits, cdev->tag_format.rsvd_shift, + cdev->tag_format.rsvd_mask); + + cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, + ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN); + cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, + ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN); + + log_debug(1 << CXGBI_DBG_DDP, + "%s max payload size: %u/%u, %u/%u.\n", + cdev->ports[0]->name, cdev->tx_max_size, ddp->max_txsz, + cdev->rx_max_size, ddp->max_rxsz); + return 0; +} +EXPORT_SYMBOL_GPL(cxgbi_ddp_init); + +/* + * APIs interacting with open-iscsi libraries + */ + +static unsigned char padding[4]; + +static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt) +{ + struct scsi_cmnd *sc = task->sc; + struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; + struct cxgbi_conn *cconn = tcp_conn->dd_data; + struct cxgbi_hba *chba = cconn->chba; + struct cxgbi_tag_format *tformat = &chba->cdev->tag_format; + u32 tag = ntohl((__force u32)hdr_itt); + + log_debug(1 << CXGBI_DBG_DDP, + "cdev 0x%p, release tag 0x%x.\n", chba->cdev, tag); + if (sc && + (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) && + cxgbi_is_ddp_tag(tformat, tag)) + ddp_tag_release(chba, tag); +} + +static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt) +{ + struct scsi_cmnd *sc = task->sc; + struct iscsi_conn *conn = task->conn; + struct iscsi_session *sess = conn->session; + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct cxgbi_conn *cconn = tcp_conn->dd_data; + struct cxgbi_hba *chba = cconn->chba; + struct cxgbi_tag_format *tformat = &chba->cdev->tag_format; + u32 sw_tag = (sess->age << cconn->task_idx_bits) | task->itt; + u32 tag = 0; + int err = -EINVAL; + + if (sc && + (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE)) { + err = cxgbi_ddp_reserve(cconn->cep->csk, &tag, sw_tag, + scsi_in(sc)->length, + scsi_in(sc)->table.sgl, + scsi_in(sc)->table.nents, + GFP_ATOMIC); + if (err < 0) + log_debug(1 << CXGBI_DBG_DDP, + "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n", + cconn->cep->csk, task, scsi_in(sc)->length, + scsi_in(sc)->table.nents); + } + + if (err < 0) + tag = cxgbi_set_non_ddp_tag(tformat, sw_tag); + /* the itt need to sent in big-endian order */ + *hdr_itt = (__force itt_t)htonl(tag); + + log_debug(1 << CXGBI_DBG_DDP, + "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n", + chba->cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt); + return 0; +} + +void cxgbi_parse_pdu_itt(struct iscsi_conn *conn, itt_t itt, int *idx, int *age) +{ + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct cxgbi_conn *cconn = tcp_conn->dd_data; + struct cxgbi_device *cdev = cconn->chba->cdev; + u32 tag = ntohl((__force u32) itt); + u32 sw_bits; + + sw_bits = cxgbi_tag_nonrsvd_bits(&cdev->tag_format, tag); + if (idx) + *idx = sw_bits & ((1 << cconn->task_idx_bits) - 1); + if (age) + *age = (sw_bits >> cconn->task_idx_bits) & ISCSI_AGE_MASK; + + log_debug(1 << CXGBI_DBG_DDP, + "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n", + cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF, + age ? *age : 0xFF); +} +EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt); + +void cxgbi_conn_tx_open(struct cxgbi_sock *csk) +{ + struct iscsi_conn *conn = csk->user_data; + + if (conn) { + log_debug(1 << CXGBI_DBG_SOCK, + "csk 0x%p, cid %d.\n", csk, conn->id); + iscsi_conn_queue_work(conn); + } +} +EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open); + +/* + * pdu receive, interact with libiscsi_tcp + */ +static inline int read_pdu_skb(struct iscsi_conn *conn, + struct sk_buff *skb, + unsigned int offset, + int offloaded) +{ + int status = 0; + int bytes_read; + + bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status); + switch (status) { + case ISCSI_TCP_CONN_ERR: + pr_info("skb 0x%p, off %u, %d, TCP_ERR.\n", + skb, offset, offloaded); + return -EIO; + case ISCSI_TCP_SUSPENDED: + log_debug(1 << CXGBI_DBG_PDU_RX, + "skb 0x%p, off %u, %d, TCP_SUSPEND, rc %d.\n", + skb, offset, offloaded, bytes_read); + /* no transfer - just have caller flush queue */ + return bytes_read; + case ISCSI_TCP_SKB_DONE: + pr_info("skb 0x%p, off %u, %d, TCP_SKB_DONE.\n", + skb, offset, offloaded); + /* + * pdus should always fit in the skb and we should get + * segment done notifcation. + */ + iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb."); + return -EFAULT; + case ISCSI_TCP_SEGMENT_DONE: + log_debug(1 << CXGBI_DBG_PDU_RX, + "skb 0x%p, off %u, %d, TCP_SEG_DONE, rc %d.\n", + skb, offset, offloaded, bytes_read); + return bytes_read; + default: + pr_info("skb 0x%p, off %u, %d, invalid status %d.\n", + skb, offset, offloaded, status); + return -EINVAL; + } +} + +static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb) +{ + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + + log_debug(1 << CXGBI_DBG_PDU_RX, + "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", + conn, skb, skb->len, cxgbi_skcb_flags(skb)); + + if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) { + pr_info("conn 0x%p, skb 0x%p, not hdr.\n", conn, skb); + iscsi_conn_failure(conn, ISCSI_ERR_PROTO); + return -EIO; + } + + if (conn->hdrdgst_en && + cxgbi_skcb_test_flag(skb, SKCBF_RX_HCRC_ERR)) { + pr_info("conn 0x%p, skb 0x%p, hcrc.\n", conn, skb); + iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST); + return -EIO; + } + + return read_pdu_skb(conn, skb, 0, 0); +} + +static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb, + struct sk_buff *skb, unsigned int offset) +{ + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + bool offloaded = 0; + int opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK; + + log_debug(1 << CXGBI_DBG_PDU_RX, + "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", + conn, skb, skb->len, cxgbi_skcb_flags(skb)); + + if (conn->datadgst_en && + cxgbi_skcb_test_flag(lskb, SKCBF_RX_DCRC_ERR)) { + pr_info("conn 0x%p, skb 0x%p, dcrc 0x%lx.\n", + conn, lskb, cxgbi_skcb_flags(lskb)); + iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST); + return -EIO; + } + + if (iscsi_tcp_recv_segment_is_hdr(tcp_conn)) + return 0; + + /* coalesced, add header digest length */ + if (lskb == skb && conn->hdrdgst_en) + offset += ISCSI_DIGEST_SIZE; + + if (cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA_DDPD)) + offloaded = 1; + + if (opcode == ISCSI_OP_SCSI_DATA_IN) + log_debug(1 << CXGBI_DBG_PDU_RX, + "skb 0x%p, op 0x%x, itt 0x%x, %u %s ddp'ed.\n", + skb, opcode, ntohl(tcp_conn->in.hdr->itt), + tcp_conn->in.datalen, offloaded ? "is" : "not"); + + return read_pdu_skb(conn, skb, offset, offloaded); +} + +static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied) +{ + struct cxgbi_device *cdev = csk->cdev; + int must_send; + u32 credits; + + log_debug(1 << CXGBI_DBG_PDU_RX, + "csk 0x%p,%u,0x%lu,%u, seq %u, wup %u, thre %u, %u.\n", + csk, csk->state, csk->flags, csk->tid, csk->copied_seq, + csk->rcv_wup, cdev->rx_credit_thres, + cdev->rcv_win); + + if (csk->state != CTP_ESTABLISHED) + return; + + credits = csk->copied_seq - csk->rcv_wup; + if (unlikely(!credits)) + return; + if (unlikely(cdev->rx_credit_thres == 0)) + return; + + must_send = credits + 16384 >= cdev->rcv_win; + if (must_send || credits >= cdev->rx_credit_thres) + csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits); +} + +void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk) +{ + struct cxgbi_device *cdev = csk->cdev; + struct iscsi_conn *conn = csk->user_data; + struct sk_buff *skb; + unsigned int read = 0; + int err = 0; + + log_debug(1 << CXGBI_DBG_PDU_RX, + "csk 0x%p, conn 0x%p.\n", csk, conn); + + if (unlikely(!conn || conn->suspend_rx)) { + log_debug(1 << CXGBI_DBG_PDU_RX, + "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n", + csk, conn, conn ? conn->id : 0xFF, + conn ? conn->suspend_rx : 0xFF); + read_unlock(&csk->callback_lock); + return; + } + + while (!err) { + read_lock(&csk->callback_lock); + skb = skb_peek(&csk->receive_queue); + if (!skb || + !(cxgbi_skcb_test_flag(skb, SKCBF_RX_STATUS))) { + if (skb) + log_debug(1 << CXGBI_DBG_PDU_RX, + "skb 0x%p, NOT ready 0x%lx.\n", + skb, cxgbi_skcb_flags(skb)); + read_unlock(&csk->callback_lock); + break; + } + __skb_unlink(skb, &csk->receive_queue); + read_unlock(&csk->callback_lock); + + read += cxgbi_skcb_rx_pdulen(skb); + log_debug(1 << CXGBI_DBG_PDU_RX, + "csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n", + csk, skb, skb->len, cxgbi_skcb_flags(skb), + cxgbi_skcb_rx_pdulen(skb)); + + if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) { + err = skb_read_pdu_bhs(conn, skb); + if (err < 0) + break; + err = skb_read_pdu_data(conn, skb, skb, + err + cdev->skb_rx_extra); + } else { + err = skb_read_pdu_bhs(conn, skb); + if (err < 0) + break; + if (cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) { + struct sk_buff *dskb; + + read_lock(&csk->callback_lock); + dskb = skb_peek(&csk->receive_queue); + if (!dskb) { + read_unlock(&csk->callback_lock); + pr_err("csk 0x%p, NO data.\n", csk); + err = -EAGAIN; + break; + } + __skb_unlink(dskb, &csk->receive_queue); + read_unlock(&csk->callback_lock); + + err = skb_read_pdu_data(conn, skb, dskb, 0); + __kfree_skb(dskb); + } else + err = skb_read_pdu_data(conn, skb, skb, 0); + } + if (err < 0) + break; + + __kfree_skb(skb); + } + + log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, read %u.\n", csk, read); + if (read) { + csk->copied_seq += read; + csk_return_rx_credits(csk, read); + conn->rxdata_octets += read; + } + + if (err < 0) { + pr_info("csk 0x%p, 0x%p, rx failed %d.\n", csk, conn, err); + iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); + } +} +EXPORT_SYMBOL_GPL(cxgbi_conn_pdu_ready); + +static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt, + unsigned int offset, unsigned int *off, + struct scatterlist **sgp) +{ + int i; + struct scatterlist *sg; + + for_each_sg(sgl, sg, sgcnt, i) { + if (offset < sg->length) { + *off = offset; + *sgp = sg; + return 0; + } + offset -= sg->length; + } + return -EFAULT; +} + +static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset, + unsigned int dlen, skb_frag_t *frags, + int frag_max) +{ + unsigned int datalen = dlen; + unsigned int sglen = sg->length - sgoffset; + struct page *page = sg_page(sg); + int i; + + i = 0; + do { + unsigned int copy; + + if (!sglen) { + sg = sg_next(sg); + if (!sg) { + pr_warn("sg %d NULL, len %u/%u.\n", + i, datalen, dlen); + return -EINVAL; + } + sgoffset = 0; + sglen = sg->length; + page = sg_page(sg); + + } + copy = min(datalen, sglen); + if (i && page == frags[i - 1].page && + sgoffset + sg->offset == + frags[i - 1].page_offset + frags[i - 1].size) { + frags[i - 1].size += copy; + } else { + if (i >= frag_max) { + pr_warn("too many pages %u, dlen %u.\n", + frag_max, dlen); + return -EINVAL; + } + + frags[i].page = page; + frags[i].page_offset = sg->offset + sgoffset; + frags[i].size = copy; + i++; + } + datalen -= copy; + sgoffset += copy; + sglen -= copy; + } while (datalen); + + return i; +} + +int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode) +{ + struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; + struct cxgbi_conn *cconn = tcp_conn->dd_data; + struct cxgbi_device *cdev = cconn->chba->cdev; + struct iscsi_conn *conn = task->conn; + struct iscsi_tcp_task *tcp_task = task->dd_data; + struct cxgbi_task_data *tdata = task->dd_data + sizeof(*tcp_task); + struct scsi_cmnd *sc = task->sc; + int headroom = SKB_TX_ISCSI_PDU_HEADER_MAX; + + tcp_task->dd_data = tdata; + task->hdr = NULL; + + if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) && + (opcode == ISCSI_OP_SCSI_DATA_OUT || + (opcode == ISCSI_OP_SCSI_CMD && + (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE)))) + /* data could goes into skb head */ + headroom += min_t(unsigned int, + SKB_MAX_HEAD(cdev->skb_tx_rsvd), + conn->max_xmit_dlength); + + tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC); + if (!tdata->skb) { + pr_warn("alloc skb %u+%u, opcode 0x%x failed.\n", + cdev->skb_tx_rsvd, headroom, opcode); + return -ENOMEM; + } + + skb_reserve(tdata->skb, cdev->skb_tx_rsvd); + task->hdr = (struct iscsi_hdr *)tdata->skb->data; + task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */ + + /* data_out uses scsi_cmd's itt */ + if (opcode != ISCSI_OP_SCSI_DATA_OUT) + task_reserve_itt(task, &task->hdr->itt); + + log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, + "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n", + task, opcode, tdata->skb, cdev->skb_tx_rsvd, headroom, + conn->max_xmit_dlength, ntohl(task->hdr->itt)); + + return 0; +} +EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu); + +static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc) +{ + u8 submode = 0; + + if (hcrc) + submode |= 1; + if (dcrc) + submode |= 2; + cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode; +} + +int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset, + unsigned int count) +{ + struct iscsi_conn *conn = task->conn; + struct iscsi_tcp_task *tcp_task = task->dd_data; + struct cxgbi_task_data *tdata = tcp_task->dd_data; + struct sk_buff *skb = tdata->skb; + unsigned int datalen = count; + int i, padlen = iscsi_padding(count); + struct page *pg; + + log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, + "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n", + task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK, + ntohl(task->cmdsn), ntohl(task->hdr->itt), offset, count); + + skb_put(skb, task->hdr_len); + tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0); + if (!count) + return 0; + + if (task->sc) { + struct scsi_data_buffer *sdb = scsi_out(task->sc); + struct scatterlist *sg = NULL; + int err; + + tdata->offset = offset; + tdata->count = count; + err = sgl_seek_offset( + sdb->table.sgl, sdb->table.nents, + tdata->offset, &tdata->sgoffset, &sg); + if (err < 0) { + pr_warn("tpdu, sgl %u, bad offset %u/%u.\n", + sdb->table.nents, tdata->offset, sdb->length); + return err; + } + err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count, + tdata->frags, MAX_PDU_FRAGS); + if (err < 0) { + pr_warn("tpdu, sgl %u, bad offset %u + %u.\n", + sdb->table.nents, tdata->offset, tdata->count); + return err; + } + tdata->nr_frags = err; + + if (tdata->nr_frags > MAX_SKB_FRAGS || + (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) { + char *dst = skb->data + task->hdr_len; + skb_frag_t *frag = tdata->frags; + + /* data fits in the skb's headroom */ + for (i = 0; i < tdata->nr_frags; i++, frag++) { + char *src = kmap_atomic(frag->page, + KM_SOFTIRQ0); + + memcpy(dst, src+frag->page_offset, frag->size); + dst += frag->size; + kunmap_atomic(src, KM_SOFTIRQ0); + } + if (padlen) { + memset(dst, 0, padlen); + padlen = 0; + } + skb_put(skb, count + padlen); + } else { + /* data fit into frag_list */ + for (i = 0; i < tdata->nr_frags; i++) + get_page(tdata->frags[i].page); + + memcpy(skb_shinfo(skb)->frags, tdata->frags, + sizeof(skb_frag_t) * tdata->nr_frags); + skb_shinfo(skb)->nr_frags = tdata->nr_frags; + skb->len += count; + skb->data_len += count; + skb->truesize += count; + } + + } else { + pg = virt_to_page(task->data); + + get_page(pg); + skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data), + count); + skb->len += count; + skb->data_len += count; + skb->truesize += count; + } + + if (padlen) { + i = skb_shinfo(skb)->nr_frags; + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + virt_to_page(padding), offset_in_page(padding), + padlen); + + skb->data_len += padlen; + skb->truesize += padlen; + skb->len += padlen; + } + + return 0; +} +EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu); + +int cxgbi_conn_xmit_pdu(struct iscsi_task *task) +{ + struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; + struct cxgbi_conn *cconn = tcp_conn->dd_data; + struct iscsi_tcp_task *tcp_task = task->dd_data; + struct cxgbi_task_data *tdata = tcp_task->dd_data; + struct sk_buff *skb = tdata->skb; + unsigned int datalen; + int err; + + if (!skb) { + log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, + "task 0x%p, skb NULL.\n", task); + return 0; + } + + datalen = skb->data_len; + tdata->skb = NULL; + err = cxgbi_sock_send_pdus(cconn->cep->csk, skb); + if (err > 0) { + int pdulen = err; + + log_debug(1 << CXGBI_DBG_PDU_TX, + "task 0x%p,0x%p, skb 0x%p, len %u/%u, rv %d.\n", + task, task->sc, skb, skb->len, skb->data_len, err); + + if (task->conn->hdrdgst_en) + pdulen += ISCSI_DIGEST_SIZE; + + if (datalen && task->conn->datadgst_en) + pdulen += ISCSI_DIGEST_SIZE; + + task->conn->txdata_octets += pdulen; + return 0; + } + + if (err == -EAGAIN || err == -ENOBUFS) { + log_debug(1 << CXGBI_DBG_PDU_TX, + "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n", + task, skb, skb->len, skb->data_len, err); + /* reset skb to send when we are called again */ + tdata->skb = skb; + return err; + } + + kfree_skb(skb); + log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, + "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n", + task->itt, skb, skb->len, skb->data_len, err); + iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err); + iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED); + return err; +} +EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu); + +void cxgbi_cleanup_task(struct iscsi_task *task) +{ + struct cxgbi_task_data *tdata = task->dd_data + + sizeof(struct iscsi_tcp_task); + + log_debug(1 << CXGBI_DBG_ISCSI, + "task 0x%p, skb 0x%p, itt 0x%x.\n", + task, tdata->skb, task->hdr_itt); + + /* never reached the xmit task callout */ + if (tdata->skb) + __kfree_skb(tdata->skb); + memset(tdata, 0, sizeof(*tdata)); + + task_release_itt(task, task->hdr_itt); + iscsi_tcp_cleanup_task(task); +} +EXPORT_SYMBOL_GPL(cxgbi_cleanup_task); + +void cxgbi_get_conn_stats(struct iscsi_cls_conn *cls_conn, + struct iscsi_stats *stats) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + + stats->txdata_octets = conn->txdata_octets; + stats->rxdata_octets = conn->rxdata_octets; + stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; + stats->dataout_pdus = conn->dataout_pdus_cnt; + stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; + stats->datain_pdus = conn->datain_pdus_cnt; + stats->r2t_pdus = conn->r2t_pdus_cnt; + stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; + stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; + stats->digest_err = 0; + stats->timeout_err = 0; + stats->custom_length = 1; + strcpy(stats->custom[0].desc, "eh_abort_cnt"); + stats->custom[0].value = conn->eh_abort_cnt; +} +EXPORT_SYMBOL_GPL(cxgbi_get_conn_stats); + +static int cxgbi_conn_max_xmit_dlength(struct iscsi_conn *conn) +{ + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct cxgbi_conn *cconn = tcp_conn->dd_data; + struct cxgbi_device *cdev = cconn->chba->cdev; + unsigned int headroom = SKB_MAX_HEAD(cdev->skb_tx_rsvd); + unsigned int max_def = 512 * MAX_SKB_FRAGS; + unsigned int max = max(max_def, headroom); + + max = min(cconn->chba->cdev->tx_max_size, max); + if (conn->max_xmit_dlength) + conn->max_xmit_dlength = min(conn->max_xmit_dlength, max); + else + conn->max_xmit_dlength = max; + cxgbi_align_pdu_size(conn->max_xmit_dlength); + + return 0; +} + +static int cxgbi_conn_max_recv_dlength(struct iscsi_conn *conn) +{ + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct cxgbi_conn *cconn = tcp_conn->dd_data; + unsigned int max = cconn->chba->cdev->rx_max_size; + + cxgbi_align_pdu_size(max); + + if (conn->max_recv_dlength) { + if (conn->max_recv_dlength > max) { + pr_err("MaxRecvDataSegmentLength %u > %u.\n", + conn->max_recv_dlength, max); + return -EINVAL; + } + conn->max_recv_dlength = min(conn->max_recv_dlength, max); + cxgbi_align_pdu_size(conn->max_recv_dlength); + } else + conn->max_recv_dlength = max; + + return 0; +} + +int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn, + enum iscsi_param param, char *buf, int buflen) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct iscsi_session *session = conn->session; + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct cxgbi_conn *cconn = tcp_conn->dd_data; + struct cxgbi_sock *csk = cconn->cep->csk; + int value, err = 0; + + log_debug(1 << CXGBI_DBG_ISCSI, + "cls_conn 0x%p, param %d, buf(%d) %s.\n", + cls_conn, param, buflen, buf); + + switch (param) { + case ISCSI_PARAM_HDRDGST_EN: + err = iscsi_set_param(cls_conn, param, buf, buflen); + if (!err && conn->hdrdgst_en) + err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, + conn->hdrdgst_en, + conn->datadgst_en, 0); + break; + case ISCSI_PARAM_DATADGST_EN: + err = iscsi_set_param(cls_conn, param, buf, buflen); + if (!err && conn->datadgst_en) + err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, + conn->hdrdgst_en, + conn->datadgst_en, 0); + break; + case ISCSI_PARAM_MAX_R2T: + sscanf(buf, "%d", &value); + if (value <= 0 || !is_power_of_2(value)) + return -EINVAL; + if (session->max_r2t == value) + break; + iscsi_tcp_r2tpool_free(session); + err = iscsi_set_param(cls_conn, param, buf, buflen); + if (!err && iscsi_tcp_r2tpool_alloc(session)) + return -ENOMEM; + case ISCSI_PARAM_MAX_RECV_DLENGTH: + err = iscsi_set_param(cls_conn, param, buf, buflen); + if (!err) + err = cxgbi_conn_max_recv_dlength(conn); + break; + case ISCSI_PARAM_MAX_XMIT_DLENGTH: + err = iscsi_set_param(cls_conn, param, buf, buflen); + if (!err) + err = cxgbi_conn_max_xmit_dlength(conn); + break; + default: + return iscsi_set_param(cls_conn, param, buf, buflen); + } + return err; +} +EXPORT_SYMBOL_GPL(cxgbi_set_conn_param); + +int cxgbi_get_conn_param(struct iscsi_cls_conn *cls_conn, + enum iscsi_param param, char *buf) +{ + struct iscsi_conn *iconn = cls_conn->dd_data; + int len; + + log_debug(1 << CXGBI_DBG_ISCSI, + "cls_conn 0x%p, param %d.\n", cls_conn, param); + + switch (param) { + case ISCSI_PARAM_CONN_PORT: + spin_lock_bh(&iconn->session->lock); + len = sprintf(buf, "%hu\n", iconn->portal_port); + spin_unlock_bh(&iconn->session->lock); + break; + case ISCSI_PARAM_CONN_ADDRESS: + spin_lock_bh(&iconn->session->lock); + len = sprintf(buf, "%s\n", iconn->portal_address); + spin_unlock_bh(&iconn->session->lock); + break; + default: + return iscsi_conn_get_param(cls_conn, param, buf); + } + return len; +} +EXPORT_SYMBOL_GPL(cxgbi_get_conn_param); + +struct iscsi_cls_conn * +cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid) +{ + struct iscsi_cls_conn *cls_conn; + struct iscsi_conn *conn; + struct iscsi_tcp_conn *tcp_conn; + struct cxgbi_conn *cconn; + + cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid); + if (!cls_conn) + return NULL; + + conn = cls_conn->dd_data; + tcp_conn = conn->dd_data; + cconn = tcp_conn->dd_data; + cconn->iconn = conn; + + log_debug(1 << CXGBI_DBG_ISCSI, + "cid %u(0x%x), cls 0x%p,0x%p, conn 0x%p,0x%p,0x%p.\n", + cid, cid, cls_session, cls_conn, conn, tcp_conn, cconn); + + return cls_conn; +} +EXPORT_SYMBOL_GPL(cxgbi_create_conn); + +int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, + struct iscsi_cls_conn *cls_conn, + u64 transport_eph, int is_leading) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct cxgbi_conn *cconn = tcp_conn->dd_data; + struct iscsi_endpoint *ep; + struct cxgbi_endpoint *cep; + struct cxgbi_sock *csk; + int err; + + ep = iscsi_lookup_endpoint(transport_eph); + if (!ep) + return -EINVAL; + + /* setup ddp pagesize */ + cep = ep->dd_data; + csk = cep->csk; + err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, page_idx, 0); + if (err < 0) + return err; + + err = iscsi_conn_bind(cls_session, cls_conn, is_leading); + if (err) + return -EINVAL; + + /* calculate the tag idx bits needed for this conn based on cmds_max */ + cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1; + + write_lock(&csk->callback_lock); + csk->user_data = conn; + cconn->chba = cep->chba; + cconn->cep = cep; + cep->cconn = cconn; + write_unlock(&csk->callback_lock); + + cxgbi_conn_max_xmit_dlength(conn); + cxgbi_conn_max_recv_dlength(conn); + + spin_lock_bh(&conn->session->lock); + sprintf(conn->portal_address, "%pI4", &csk->daddr.sin_addr.s_addr); + conn->portal_port = ntohs(csk->daddr.sin_port); + spin_unlock_bh(&conn->session->lock); + + log_debug(1 << CXGBI_DBG_ISCSI, + "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n", + cls_session, cls_conn, ep, cconn, csk); + /* init recv engine */ + iscsi_tcp_hdr_recv_prep(tcp_conn); + + return 0; +} +EXPORT_SYMBOL_GPL(cxgbi_bind_conn); + +struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *ep, + u16 cmds_max, u16 qdepth, + u32 initial_cmdsn) +{ + struct cxgbi_endpoint *cep; + struct cxgbi_hba *chba; + struct Scsi_Host *shost; + struct iscsi_cls_session *cls_session; + struct iscsi_session *session; + + if (!ep) { + pr_err("missing endpoint.\n"); + return NULL; + } + + cep = ep->dd_data; + chba = cep->chba; + shost = chba->shost; + + BUG_ON(chba != iscsi_host_priv(shost)); + + cls_session = iscsi_session_setup(chba->cdev->itp, shost, + cmds_max, 0, + sizeof(struct iscsi_tcp_task) + + sizeof(struct cxgbi_task_data), + initial_cmdsn, ISCSI_MAX_TARGET); + if (!cls_session) + return NULL; + + session = cls_session->dd_data; + if (iscsi_tcp_r2tpool_alloc(session)) + goto remove_session; + + log_debug(1 << CXGBI_DBG_ISCSI, + "ep 0x%p, cls sess 0x%p.\n", ep, cls_session); + return cls_session; + +remove_session: + iscsi_session_teardown(cls_session); + return NULL; +} +EXPORT_SYMBOL_GPL(cxgbi_create_session); + +void cxgbi_destroy_session(struct iscsi_cls_session *cls_session) +{ + log_debug(1 << CXGBI_DBG_ISCSI, + "cls sess 0x%p.\n", cls_session); + + iscsi_tcp_r2tpool_free(cls_session->dd_data); + iscsi_session_teardown(cls_session); +} +EXPORT_SYMBOL_GPL(cxgbi_destroy_session); + +int cxgbi_set_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, + char *buf, int buflen) +{ + struct cxgbi_hba *chba = iscsi_host_priv(shost); + + if (!chba->ndev) { + shost_printk(KERN_ERR, shost, "Could not get host param. " + "netdev for host not set.\n"); + return -ENODEV; + } + + log_debug(1 << CXGBI_DBG_ISCSI, + "shost 0x%p, hba 0x%p,%s, param %d, buf(%d) %s.\n", + shost, chba, chba->ndev->name, param, buflen, buf); + + switch (param) { + case ISCSI_HOST_PARAM_IPADDRESS: + { + __be32 addr = in_aton(buf); + log_debug(1 << CXGBI_DBG_ISCSI, + "hba %s, req. ipv4 %pI4.\n", chba->ndev->name, &addr); + cxgbi_set_iscsi_ipv4(chba, addr); + return 0; + } + case ISCSI_HOST_PARAM_HWADDRESS: + case ISCSI_HOST_PARAM_NETDEV_NAME: + return 0; + default: + return iscsi_host_set_param(shost, param, buf, buflen); + } +} +EXPORT_SYMBOL_GPL(cxgbi_set_host_param); + +int cxgbi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, + char *buf) +{ + struct cxgbi_hba *chba = iscsi_host_priv(shost); + int len = 0; + + if (!chba->ndev) { + shost_printk(KERN_ERR, shost, "Could not get host param. " + "netdev for host not set.\n"); + return -ENODEV; + } + + log_debug(1 << CXGBI_DBG_ISCSI, + "shost 0x%p, hba 0x%p,%s, param %d.\n", + shost, chba, chba->ndev->name, param); + + switch (param) { + case ISCSI_HOST_PARAM_HWADDRESS: + len = sysfs_format_mac(buf, chba->ndev->dev_addr, 6); + break; + case ISCSI_HOST_PARAM_NETDEV_NAME: + len = sprintf(buf, "%s\n", chba->ndev->name); + break; + case ISCSI_HOST_PARAM_IPADDRESS: + { + __be32 addr; + + addr = cxgbi_get_iscsi_ipv4(chba); + len = sprintf(buf, "%pI4", &addr); + log_debug(1 << CXGBI_DBG_ISCSI, + "hba %s, ipv4 %pI4.\n", chba->ndev->name, &addr); + break; + } + default: + return iscsi_host_get_param(shost, param, buf); + } + + return len; +} +EXPORT_SYMBOL_GPL(cxgbi_get_host_param); + +struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost, + struct sockaddr *dst_addr, + int non_blocking) +{ + struct iscsi_endpoint *ep; + struct cxgbi_endpoint *cep; + struct cxgbi_hba *hba = NULL; + struct cxgbi_sock *csk; + int err = -EINVAL; + + log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, + "shost 0x%p, non_blocking %d, dst_addr 0x%p.\n", + shost, non_blocking, dst_addr); + + if (shost) { + hba = iscsi_host_priv(shost); + if (!hba) { + pr_info("shost 0x%p, priv NULL.\n", shost); + goto err_out; + } + } + + csk = cxgbi_check_route(dst_addr); + if (IS_ERR(csk)) + return (struct iscsi_endpoint *)csk; + cxgbi_sock_get(csk); + + if (!hba) + hba = csk->cdev->hbas[csk->port_id]; + else if (hba != csk->cdev->hbas[csk->port_id]) { + pr_info("Could not connect through requested host %u" + "hba 0x%p != 0x%p (%u).\n", + shost->host_no, hba, + csk->cdev->hbas[csk->port_id], csk->port_id); + err = -ENOSPC; + goto release_conn; + } + + err = sock_get_port(csk); + if (err) + goto release_conn; + + cxgbi_sock_set_state(csk, CTP_CONNECTING); + err = csk->cdev->csk_init_act_open(csk); + if (err) + goto release_conn; + + if (cxgbi_sock_is_closing(csk)) { + err = -ENOSPC; + pr_info("csk 0x%p is closing.\n", csk); + goto release_conn; + } + + ep = iscsi_create_endpoint(sizeof(*cep)); + if (!ep) { + err = -ENOMEM; + pr_info("iscsi alloc ep, OOM.\n"); + goto release_conn; + } + + cep = ep->dd_data; + cep->csk = csk; + cep->chba = hba; + + log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, + "ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n", + ep, cep, csk, hba, hba->ndev->name); + return ep; + +release_conn: + cxgbi_sock_put(csk); + cxgbi_sock_closed(csk); +err_out: + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(cxgbi_ep_connect); + +int cxgbi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) +{ + struct cxgbi_endpoint *cep = ep->dd_data; + struct cxgbi_sock *csk = cep->csk; + + if (!cxgbi_sock_is_established(csk)) + return 0; + return 1; +} +EXPORT_SYMBOL_GPL(cxgbi_ep_poll); + +void cxgbi_ep_disconnect(struct iscsi_endpoint *ep) +{ + struct cxgbi_endpoint *cep = ep->dd_data; + struct cxgbi_conn *cconn = cep->cconn; + struct cxgbi_sock *csk = cep->csk; + + log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, + "ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n", + ep, cep, cconn, csk, csk->state, csk->flags); + + if (cconn && cconn->iconn) { + iscsi_suspend_tx(cconn->iconn); + write_lock_bh(&csk->callback_lock); + cep->csk->user_data = NULL; + cconn->cep = NULL; + write_unlock_bh(&csk->callback_lock); + } + iscsi_destroy_endpoint(ep); + + if (likely(csk->state >= CTP_ESTABLISHED)) + need_active_close(csk); + else + cxgbi_sock_closed(csk); + + cxgbi_sock_put(csk); +} +EXPORT_SYMBOL_GPL(cxgbi_ep_disconnect); + +int cxgbi_iscsi_init(struct iscsi_transport *itp, + struct scsi_transport_template **stt) +{ + *stt = iscsi_register_transport(itp); + if (*stt == NULL) { + pr_err("unable to register %s transport 0x%p.\n", + itp->name, itp); + return -ENODEV; + } + log_debug(1 << CXGBI_DBG_ISCSI, + "%s, registered iscsi transport 0x%p.\n", + itp->name, stt); + return 0; +} +EXPORT_SYMBOL_GPL(cxgbi_iscsi_init); + +void cxgbi_iscsi_cleanup(struct iscsi_transport *itp, + struct scsi_transport_template **stt) +{ + if (*stt) { + log_debug(1 << CXGBI_DBG_ISCSI, + "de-register transport 0x%p, %s, stt 0x%p.\n", + itp, itp->name, *stt); + *stt = NULL; + iscsi_unregister_transport(itp); + } +} +EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup); + +static int __init libcxgbi_init_module(void) +{ + sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1; + sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1; + + pr_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n", + ISCSI_ITT_MASK, sw_tag_idx_bits, + ISCSI_AGE_MASK, sw_tag_age_bits); + + ddp_setup_host_page_size(); + return 0; +} + +static void __exit libcxgbi_exit_module(void) +{ + cxgbi_device_unregister_all(0xFF); + return; +} + +module_init(libcxgbi_init_module); +module_exit(libcxgbi_exit_module); diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h new file mode 100644 index 000000000000..40551f3be5dc --- /dev/null +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -0,0 +1,753 @@ +/* + * libcxgbi.h: Chelsio common library for T3/T4 iSCSI driver. + * + * Copyright (c) 2010 Chelsio Communications, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Karen Xie (kxie@chelsio.com) + * Written by: Rakesh Ranjan (rranjan@chelsio.com) + */ + +#ifndef __LIBCXGBI_H__ +#define __LIBCXGBI_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum cxgbi_dbg_flag { + CXGBI_DBG_ISCSI, + CXGBI_DBG_DDP, + CXGBI_DBG_TOE, + CXGBI_DBG_SOCK, + + CXGBI_DBG_PDU_TX, + CXGBI_DBG_PDU_RX, + CXGBI_DBG_DEV, +}; + +#define log_debug(level, fmt, ...) \ + do { \ + if (dbg_level & (level)) \ + pr_info(fmt, ##__VA_ARGS__); \ + } while (0) + +/* max. connections per adapter */ +#define CXGBI_MAX_CONN 16384 + +/* always allocate rooms for AHS */ +#define SKB_TX_ISCSI_PDU_HEADER_MAX \ + (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE) + +#define ISCSI_PDU_NONPAYLOAD_LEN 312 /* bhs(48) + ahs(256) + digest(8)*/ + +/* + * align pdu size to multiple of 512 for better performance + */ +#define cxgbi_align_pdu_size(n) do { n = (n) & (~511); } while (0) + +#define ULP2_MODE_ISCSI 2 + +#define ULP2_MAX_PKT_SIZE 16224 +#define ULP2_MAX_PDU_PAYLOAD \ + (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_LEN) + +/* + * For iscsi connections HW may inserts digest bytes into the pdu. Those digest + * bytes are not sent by the host but are part of the TCP payload and therefore + * consume TCP sequence space. + */ +static const unsigned int ulp2_extra_len[] = { 0, 4, 4, 8 }; +static inline unsigned int cxgbi_ulp_extra_len(int submode) +{ + return ulp2_extra_len[submode & 3]; +} + +/* + * struct pagepod_hdr, pagepod - pagepod format + */ + +#define CPL_RX_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */ +#define CPL_RX_DDP_STATUS_PAD_SHIFT 19 /* pad error */ +#define CPL_RX_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */ +#define CPL_RX_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */ + +struct cxgbi_pagepod_hdr { + u32 vld_tid; + u32 pgsz_tag_clr; + u32 max_offset; + u32 page_offset; + u64 rsvd; +}; + +#define PPOD_PAGES_MAX 4 +struct cxgbi_pagepod { + struct cxgbi_pagepod_hdr hdr; + u64 addr[PPOD_PAGES_MAX + 1]; +}; + +struct cxgbi_tag_format { + unsigned char sw_bits; + unsigned char rsvd_bits; + unsigned char rsvd_shift; + unsigned char filler[1]; + u32 rsvd_mask; +}; + +struct cxgbi_gather_list { + unsigned int tag; + unsigned int length; + unsigned int offset; + unsigned int nelem; + struct page **pages; + dma_addr_t phys_addr[0]; +}; + +struct cxgbi_ddp_info { + struct kref refcnt; + struct cxgbi_device *cdev; + struct pci_dev *pdev; + unsigned int max_txsz; + unsigned int max_rxsz; + unsigned int llimit; + unsigned int ulimit; + unsigned int nppods; + unsigned int idx_last; + unsigned char idx_bits; + unsigned char filler[3]; + unsigned int idx_mask; + unsigned int rsvd_tag_mask; + spinlock_t map_lock; + struct cxgbi_gather_list **gl_map; + struct sk_buff **gl_skb; +}; + +#define DDP_PGIDX_MAX 4 +#define DDP_THRESHOLD 2048 + +#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */ + +#define PPOD_SIZE sizeof(struct cxgbi_pagepod) /* 64 */ +#define PPOD_SIZE_SHIFT 6 + +#define ULPMEM_DSGL_MAX_NPPODS 16 /* 1024/PPOD_SIZE */ +#define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */ +#define PCIE_MEMWIN_MAX_NPPODS 16 /* 1024/PPOD_SIZE */ + +#define PPOD_COLOR_SHIFT 0 +#define PPOD_COLOR(x) ((x) << PPOD_COLOR_SHIFT) + +#define PPOD_IDX_SHIFT 6 +#define PPOD_IDX_MAX_SIZE 24 + +#define PPOD_TID_SHIFT 0 +#define PPOD_TID(x) ((x) << PPOD_TID_SHIFT) + +#define PPOD_TAG_SHIFT 6 +#define PPOD_TAG(x) ((x) << PPOD_TAG_SHIFT) + +#define PPOD_VALID_SHIFT 24 +#define PPOD_VALID(x) ((x) << PPOD_VALID_SHIFT) +#define PPOD_VALID_FLAG PPOD_VALID(1U) + +#define W_TCB_ULP_TYPE 0 +#define TCB_ULP_TYPE_SHIFT 0 +#define TCB_ULP_TYPE_MASK 0xfULL +#define TCB_ULP_TYPE(x) ((x) << TCB_ULP_TYPE_SHIFT) + +#define W_TCB_ULP_RAW 0 +#define TCB_ULP_RAW_SHIFT 4 +#define TCB_ULP_RAW_MASK 0xffULL +#define TCB_ULP_RAW(x) ((x) << TCB_ULP_RAW_SHIFT) + +/* + * sge_opaque_hdr - + * Opaque version of structure the SGE stores at skb->head of TX_DATA packets + * and for which we must reserve space. + */ +struct sge_opaque_hdr { + void *dev; + dma_addr_t addr[MAX_SKB_FRAGS + 1]; +}; + +struct cxgbi_sock { + struct cxgbi_device *cdev; + + int tid; + int atid; + unsigned long flags; + unsigned int mtu; + unsigned short rss_qid; + unsigned short txq_idx; + unsigned short advmss; + unsigned int tx_chan; + unsigned int rx_chan; + unsigned int mss_idx; + unsigned int smac_idx; + unsigned char port_id; + int wr_max_cred; + int wr_cred; + int wr_una_cred; + unsigned char hcrc_len; + unsigned char dcrc_len; + + void *l2t; + struct sk_buff *wr_pending_head; + struct sk_buff *wr_pending_tail; + struct sk_buff *cpl_close; + struct sk_buff *cpl_abort_req; + struct sk_buff *cpl_abort_rpl; + struct sk_buff *skb_ulp_lhdr; + spinlock_t lock; + struct kref refcnt; + unsigned int state; + struct sockaddr_in saddr; + struct sockaddr_in daddr; + struct dst_entry *dst; + struct sk_buff_head receive_queue; + struct sk_buff_head write_queue; + struct timer_list retry_timer; + int err; + rwlock_t callback_lock; + void *user_data; + + u32 rcv_nxt; + u32 copied_seq; + u32 rcv_wup; + u32 snd_nxt; + u32 snd_una; + u32 write_seq; +}; + +/* + * connection states + */ +enum cxgbi_sock_states{ + CTP_CLOSED, + CTP_CONNECTING, + CTP_ACTIVE_OPEN, + CTP_ESTABLISHED, + CTP_ACTIVE_CLOSE, + CTP_PASSIVE_CLOSE, + CTP_CLOSE_WAIT_1, + CTP_CLOSE_WAIT_2, + CTP_ABORTING, +}; + +/* + * Connection flags -- many to track some close related events. + */ +enum cxgbi_sock_flags { + CTPF_ABORT_RPL_RCVD, /*received one ABORT_RPL_RSS message */ + CTPF_ABORT_REQ_RCVD, /*received one ABORT_REQ_RSS message */ + CTPF_ABORT_RPL_PENDING, /* expecting an abort reply */ + CTPF_TX_DATA_SENT, /* already sent a TX_DATA WR */ + CTPF_ACTIVE_CLOSE_NEEDED,/* need to be closed */ + CTPF_HAS_ATID, /* reserved atid */ + CTPF_HAS_TID, /* reserved hw tid */ + CTPF_OFFLOAD_DOWN, /* offload function off */ +}; + +struct cxgbi_skb_rx_cb { + __u32 ddigest; + __u32 pdulen; +}; + +struct cxgbi_skb_tx_cb { + void *l2t; + struct sk_buff *wr_next; +}; + +enum cxgbi_skcb_flags { + SKCBF_TX_NEED_HDR, /* packet needs a header */ + SKCBF_RX_COALESCED, /* received whole pdu */ + SKCBF_RX_HDR, /* recieved pdu header */ + SKCBF_RX_DATA, /* recieved pdu payload */ + SKCBF_RX_STATUS, /* recieved ddp status */ + SKCBF_RX_DATA_DDPD, /* pdu payload ddp'd */ + SKCBF_RX_HCRC_ERR, /* header digest error */ + SKCBF_RX_DCRC_ERR, /* data digest error */ + SKCBF_RX_PAD_ERR, /* padding byte error */ +}; + +struct cxgbi_skb_cb { + unsigned char ulp_mode; + unsigned long flags; + unsigned int seq; + union { + struct cxgbi_skb_rx_cb rx; + struct cxgbi_skb_tx_cb tx; + }; +}; + +#define CXGBI_SKB_CB(skb) ((struct cxgbi_skb_cb *)&((skb)->cb[0])) +#define cxgbi_skcb_flags(skb) (CXGBI_SKB_CB(skb)->flags) +#define cxgbi_skcb_ulp_mode(skb) (CXGBI_SKB_CB(skb)->ulp_mode) +#define cxgbi_skcb_tcp_seq(skb) (CXGBI_SKB_CB(skb)->seq) +#define cxgbi_skcb_rx_ddigest(skb) (CXGBI_SKB_CB(skb)->rx.ddigest) +#define cxgbi_skcb_rx_pdulen(skb) (CXGBI_SKB_CB(skb)->rx.pdulen) +#define cxgbi_skcb_tx_wr_next(skb) (CXGBI_SKB_CB(skb)->tx.wr_next) + +static inline void cxgbi_skcb_set_flag(struct sk_buff *skb, + enum cxgbi_skcb_flags flag) +{ + __set_bit(flag, &(cxgbi_skcb_flags(skb))); +} + +static inline void cxgbi_skcb_clear_flag(struct sk_buff *skb, + enum cxgbi_skcb_flags flag) +{ + __clear_bit(flag, &(cxgbi_skcb_flags(skb))); +} + +static inline int cxgbi_skcb_test_flag(struct sk_buff *skb, + enum cxgbi_skcb_flags flag) +{ + return test_bit(flag, &(cxgbi_skcb_flags(skb))); +} + +static inline void cxgbi_sock_set_flag(struct cxgbi_sock *csk, + enum cxgbi_sock_flags flag) +{ + __set_bit(flag, &csk->flags); + log_debug(1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx, bit %d.\n", + csk, csk->state, csk->flags, flag); +} + +static inline void cxgbi_sock_clear_flag(struct cxgbi_sock *csk, + enum cxgbi_sock_flags flag) +{ + __clear_bit(flag, &csk->flags); + log_debug(1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx, bit %d.\n", + csk, csk->state, csk->flags, flag); +} + +static inline int cxgbi_sock_flag(struct cxgbi_sock *csk, + enum cxgbi_sock_flags flag) +{ + if (csk == NULL) + return 0; + return test_bit(flag, &csk->flags); +} + +static inline void cxgbi_sock_set_state(struct cxgbi_sock *csk, int state) +{ + log_debug(1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx, state -> %u.\n", + csk, csk->state, csk->flags, state); + csk->state = state; +} + +static inline void cxgbi_sock_free(struct kref *kref) +{ + struct cxgbi_sock *csk = container_of(kref, + struct cxgbi_sock, + refcnt); + if (csk) { + log_debug(1 << CXGBI_DBG_SOCK, + "free csk 0x%p, state %u, flags 0x%lx\n", + csk, csk->state, csk->flags); + kfree(csk); + } +} + +static inline void __cxgbi_sock_put(const char *fn, struct cxgbi_sock *csk) +{ + log_debug(1 << CXGBI_DBG_SOCK, + "%s, put csk 0x%p, ref %u-1.\n", + fn, csk, atomic_read(&csk->refcnt.refcount)); + kref_put(&csk->refcnt, cxgbi_sock_free); +} +#define cxgbi_sock_put(csk) __cxgbi_sock_put(__func__, csk) + +static inline void __cxgbi_sock_get(const char *fn, struct cxgbi_sock *csk) +{ + log_debug(1 << CXGBI_DBG_SOCK, + "%s, get csk 0x%p, ref %u+1.\n", + fn, csk, atomic_read(&csk->refcnt.refcount)); + kref_get(&csk->refcnt); +} +#define cxgbi_sock_get(csk) __cxgbi_sock_get(__func__, csk) + +static inline int cxgbi_sock_is_closing(struct cxgbi_sock *csk) +{ + return csk->state >= CTP_ACTIVE_CLOSE; +} + +static inline int cxgbi_sock_is_established(struct cxgbi_sock *csk) +{ + return csk->state == CTP_ESTABLISHED; +} + +static inline void cxgbi_sock_purge_write_queue(struct cxgbi_sock *csk) +{ + struct sk_buff *skb; + + while ((skb = __skb_dequeue(&csk->write_queue))) + __kfree_skb(skb); +} + +static inline unsigned int cxgbi_sock_compute_wscale(unsigned int win) +{ + unsigned int wscale = 0; + + while (wscale < 14 && (65535 << wscale) < win) + wscale++; + return wscale; +} + +static inline struct sk_buff *alloc_cpl(int cpl_len, int dlen, gfp_t gfp) +{ + int wrlen = roundup(cpl_len, 16); + struct sk_buff *skb = alloc_skb(wrlen + dlen, gfp); + + if (skb) { + __skb_put(skb, wrlen); + memset(skb->head, 0, wrlen + dlen); + } else + pr_info("alloc cpl skb %u+%u, OOM.\n", cpl_len, dlen); + return skb; +} + + +/* + * The number of WRs needed for an skb depends on the number of fragments + * in the skb and whether it has any payload in its main body. This maps the + * length of the gather list represented by an skb into the # of necessary WRs. + * The extra two fragments are for iscsi bhs and payload padding. + */ +#define SKB_WR_LIST_SIZE (MAX_SKB_FRAGS + 2) + +static inline void cxgbi_sock_reset_wr_list(struct cxgbi_sock *csk) +{ + csk->wr_pending_head = csk->wr_pending_tail = NULL; +} + +static inline void cxgbi_sock_enqueue_wr(struct cxgbi_sock *csk, + struct sk_buff *skb) +{ + cxgbi_skcb_tx_wr_next(skb) = NULL; + /* + * We want to take an extra reference since both us and the driver + * need to free the packet before it's really freed. We know there's + * just one user currently so we use atomic_set rather than skb_get + * to avoid the atomic op. + */ + atomic_set(&skb->users, 2); + + if (!csk->wr_pending_head) + csk->wr_pending_head = skb; + else + cxgbi_skcb_tx_wr_next(csk->wr_pending_tail) = skb; + csk->wr_pending_tail = skb; +} + +static inline int cxgbi_sock_count_pending_wrs(const struct cxgbi_sock *csk) +{ + int n = 0; + const struct sk_buff *skb = csk->wr_pending_head; + + while (skb) { + n += skb->csum; + skb = cxgbi_skcb_tx_wr_next(skb); + } + return n; +} + +static inline struct sk_buff *cxgbi_sock_peek_wr(const struct cxgbi_sock *csk) +{ + return csk->wr_pending_head; +} + +static inline struct sk_buff *cxgbi_sock_dequeue_wr(struct cxgbi_sock *csk) +{ + struct sk_buff *skb = csk->wr_pending_head; + + if (likely(skb)) { + csk->wr_pending_head = cxgbi_skcb_tx_wr_next(skb); + cxgbi_skcb_tx_wr_next(skb) = NULL; + } + return skb; +} + +void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *); +void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *); +void cxgbi_sock_skb_entail(struct cxgbi_sock *, struct sk_buff *); +void cxgbi_sock_fail_act_open(struct cxgbi_sock *, int); +void cxgbi_sock_act_open_req_arp_failure(void *, struct sk_buff *); +void cxgbi_sock_closed(struct cxgbi_sock *); +void cxgbi_sock_established(struct cxgbi_sock *, unsigned int, unsigned int); +void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *); +void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *); +void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *, u32); +void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *, unsigned int, unsigned int, + int); +unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *, unsigned int); +void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *); + +struct cxgbi_hba { + struct net_device *ndev; + struct Scsi_Host *shost; + struct cxgbi_device *cdev; + __be32 ipv4addr; + unsigned char port_id; +}; + +struct cxgbi_ports_map { + unsigned int max_connect; + unsigned int used; + unsigned short sport_base; + spinlock_t lock; + unsigned int next; + struct cxgbi_sock **port_csk; +}; + +#define CXGBI_FLAG_DEV_T3 0x1 +#define CXGBI_FLAG_DEV_T4 0x2 +#define CXGBI_FLAG_ADAPTER_RESET 0x4 +#define CXGBI_FLAG_IPV4_SET 0x10 +struct cxgbi_device { + struct list_head list_head; + unsigned int flags; + struct net_device **ports; + void *lldev; + struct cxgbi_hba **hbas; + const unsigned short *mtus; + unsigned char nmtus; + unsigned char nports; + struct pci_dev *pdev; + struct dentry *debugfs_root; + struct iscsi_transport *itp; + + unsigned int pfvf; + unsigned int snd_win; + unsigned int rcv_win; + unsigned int rx_credit_thres; + unsigned int skb_tx_rsvd; + unsigned int skb_rx_extra; /* for msg coalesced mode */ + unsigned int tx_max_size; + unsigned int rx_max_size; + struct cxgbi_ports_map pmap; + struct cxgbi_tag_format tag_format; + struct cxgbi_ddp_info *ddp; + + void (*dev_ddp_cleanup)(struct cxgbi_device *); + void (*csk_ddp_free_gl_skb)(struct cxgbi_ddp_info *, int, int); + int (*csk_ddp_alloc_gl_skb)(struct cxgbi_ddp_info *, int, int, gfp_t); + int (*csk_ddp_set)(struct cxgbi_sock *, struct cxgbi_pagepod_hdr *, + unsigned int, unsigned int, + struct cxgbi_gather_list *); + void (*csk_ddp_clear)(struct cxgbi_hba *, + unsigned int, unsigned int, unsigned int); + int (*csk_ddp_setup_digest)(struct cxgbi_sock *, + unsigned int, int, int, int); + int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *, + unsigned int, int, bool); + + void (*csk_release_offload_resources)(struct cxgbi_sock *); + int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *); + u32 (*csk_send_rx_credits)(struct cxgbi_sock *, u32); + int (*csk_push_tx_frames)(struct cxgbi_sock *, int); + void (*csk_send_abort_req)(struct cxgbi_sock *); + void (*csk_send_close_req)(struct cxgbi_sock *); + int (*csk_alloc_cpls)(struct cxgbi_sock *); + int (*csk_init_act_open)(struct cxgbi_sock *); + + void *dd_data; +}; +#define cxgbi_cdev_priv(cdev) ((cdev)->dd_data) + +struct cxgbi_conn { + struct cxgbi_endpoint *cep; + struct iscsi_conn *iconn; + struct cxgbi_hba *chba; + u32 task_idx_bits; +}; + +struct cxgbi_endpoint { + struct cxgbi_conn *cconn; + struct cxgbi_hba *chba; + struct cxgbi_sock *csk; +}; + +#define MAX_PDU_FRAGS ((ULP2_MAX_PDU_PAYLOAD + 512 - 1) / 512) +struct cxgbi_task_data { + unsigned short nr_frags; + skb_frag_t frags[MAX_PDU_FRAGS]; + struct sk_buff *skb; + unsigned int offset; + unsigned int count; + unsigned int sgoffset; +}; + +static inline int cxgbi_is_ddp_tag(struct cxgbi_tag_format *tformat, u32 tag) +{ + return !(tag & (1 << (tformat->rsvd_bits + tformat->rsvd_shift - 1))); +} + +static inline int cxgbi_sw_tag_usable(struct cxgbi_tag_format *tformat, + u32 sw_tag) +{ + sw_tag >>= (32 - tformat->rsvd_bits); + return !sw_tag; +} + +static inline u32 cxgbi_set_non_ddp_tag(struct cxgbi_tag_format *tformat, + u32 sw_tag) +{ + unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1; + u32 mask = (1 << shift) - 1; + + if (sw_tag && (sw_tag & ~mask)) { + u32 v1 = sw_tag & ((1 << shift) - 1); + u32 v2 = (sw_tag >> (shift - 1)) << shift; + + return v2 | v1 | 1 << shift; + } + + return sw_tag | 1 << shift; +} + +static inline u32 cxgbi_ddp_tag_base(struct cxgbi_tag_format *tformat, + u32 sw_tag) +{ + u32 mask = (1 << tformat->rsvd_shift) - 1; + + if (sw_tag && (sw_tag & ~mask)) { + u32 v1 = sw_tag & mask; + u32 v2 = sw_tag >> tformat->rsvd_shift; + + v2 <<= tformat->rsvd_bits + tformat->rsvd_shift; + + return v2 | v1; + } + + return sw_tag; +} + +static inline u32 cxgbi_tag_rsvd_bits(struct cxgbi_tag_format *tformat, + u32 tag) +{ + if (cxgbi_is_ddp_tag(tformat, tag)) + return (tag >> tformat->rsvd_shift) & tformat->rsvd_mask; + + return 0; +} + +static inline u32 cxgbi_tag_nonrsvd_bits(struct cxgbi_tag_format *tformat, + u32 tag) +{ + unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1; + u32 v1, v2; + + if (cxgbi_is_ddp_tag(tformat, tag)) { + v1 = tag & ((1 << tformat->rsvd_shift) - 1); + v2 = (tag >> (shift + 1)) << tformat->rsvd_shift; + } else { + u32 mask = (1 << shift) - 1; + tag &= ~(1 << shift); + v1 = tag & mask; + v2 = (tag >> 1) & ~mask; + } + return v1 | v2; +} + +static inline void *cxgbi_alloc_big_mem(unsigned int size, + gfp_t gfp) +{ + void *p = kmalloc(size, gfp); + if (!p) + p = vmalloc(size); + if (p) + memset(p, 0, size); + return p; +} + +static inline void cxgbi_free_big_mem(void *addr) +{ + if (is_vmalloc_addr(addr)) + vfree(addr); + else + kfree(addr); +} + +static inline void cxgbi_set_iscsi_ipv4(struct cxgbi_hba *chba, __be32 ipaddr) +{ + if (chba->cdev->flags & CXGBI_FLAG_IPV4_SET) + chba->ipv4addr = ipaddr; + else + pr_info("set iscsi ipv4 NOT supported, using %s ipv4.\n", + chba->ndev->name); +} + +static inline __be32 cxgbi_get_iscsi_ipv4(struct cxgbi_hba *chba) +{ + return chba->ipv4addr; +} + +struct cxgbi_device *cxgbi_device_register(unsigned int, unsigned int); +void cxgbi_device_unregister(struct cxgbi_device *); +void cxgbi_device_unregister_all(unsigned int flag); +struct cxgbi_device *cxgbi_device_find_by_lldev(void *); +int cxgbi_hbas_add(struct cxgbi_device *, unsigned int, unsigned int, + struct scsi_host_template *, + struct scsi_transport_template *); +void cxgbi_hbas_remove(struct cxgbi_device *); + +int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base, + unsigned int max_conn); +void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev); + +void cxgbi_conn_tx_open(struct cxgbi_sock *); +void cxgbi_conn_pdu_ready(struct cxgbi_sock *); +int cxgbi_conn_alloc_pdu(struct iscsi_task *, u8); +int cxgbi_conn_init_pdu(struct iscsi_task *, unsigned int , unsigned int); +int cxgbi_conn_xmit_pdu(struct iscsi_task *); + +void cxgbi_cleanup_task(struct iscsi_task *task); + +void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *); +int cxgbi_set_conn_param(struct iscsi_cls_conn *, + enum iscsi_param, char *, int); +int cxgbi_get_conn_param(struct iscsi_cls_conn *, enum iscsi_param, char *); +struct iscsi_cls_conn *cxgbi_create_conn(struct iscsi_cls_session *, u32); +int cxgbi_bind_conn(struct iscsi_cls_session *, + struct iscsi_cls_conn *, u64, int); +void cxgbi_destroy_session(struct iscsi_cls_session *); +struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *, + u16, u16, u32); +int cxgbi_set_host_param(struct Scsi_Host *, + enum iscsi_host_param, char *, int); +int cxgbi_get_host_param(struct Scsi_Host *, enum iscsi_host_param, char *); +struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *, + struct sockaddr *, int); +int cxgbi_ep_poll(struct iscsi_endpoint *, int); +void cxgbi_ep_disconnect(struct iscsi_endpoint *); + +int cxgbi_iscsi_init(struct iscsi_transport *, + struct scsi_transport_template **); +void cxgbi_iscsi_cleanup(struct iscsi_transport *, + struct scsi_transport_template **); +void cxgbi_parse_pdu_itt(struct iscsi_conn *, itt_t, int *, int *); +int cxgbi_ddp_init(struct cxgbi_device *, unsigned int, unsigned int, + unsigned int, unsigned int); +int cxgbi_ddp_cleanup(struct cxgbi_device *); +void cxgbi_ddp_page_size_factor(int *); +void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod *); +void cxgbi_ddp_ppod_set(struct cxgbi_pagepod *, struct cxgbi_pagepod_hdr *, + struct cxgbi_gather_list *, unsigned int); +#endif /*__LIBCXGBI_H__*/ -- cgit v1.2.3 From 7b36b6e03b0d6cee0948593a6a11841a457695b9 Mon Sep 17 00:00:00 2001 From: "kxie@chelsio.com" Date: Mon, 16 Aug 2010 20:55:53 -0700 Subject: [SCSI] cxgb4i v5: iscsi driver Added cxgb4i iSCSI driver. This patch implements the cxgb4i iscsi connection acceleration for the open-iscsi initiator. The cxgb4i driver offers the iscsi PDU based offload: - digest insertion and verification - payload direct-placement into host memory buffer. Signed-off-by: Karen Xie Reviewed-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/Kconfig | 1 + drivers/scsi/Makefile | 1 + drivers/scsi/cxgbi/Kconfig | 1 + drivers/scsi/cxgbi/Makefile | 1 + drivers/scsi/cxgbi/cxgb4i/Kbuild | 3 + drivers/scsi/cxgbi/cxgb4i/Kconfig | 7 + drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | 1600 ++++++++++++++++++++++++++++++++++++ drivers/scsi/cxgbi/cxgb4i/cxgb4i.h | 38 + 8 files changed, 1652 insertions(+) create mode 100644 drivers/scsi/cxgbi/Kconfig create mode 100644 drivers/scsi/cxgbi/Makefile create mode 100644 drivers/scsi/cxgbi/cxgb4i/Kbuild create mode 100644 drivers/scsi/cxgbi/cxgb4i/Kconfig create mode 100644 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c create mode 100644 drivers/scsi/cxgbi/cxgb4i/cxgb4i.h (limited to 'drivers') diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index bbf91aec64f5..bee3aef230dc 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -379,6 +379,7 @@ config ISCSI_BOOT_SYSFS say Y. Otherwise, say N. source "drivers/scsi/cxgb3i/Kconfig" +source "drivers/scsi/cxgbi/Kconfig" source "drivers/scsi/bnx2i/Kconfig" source "drivers/scsi/be2iscsi/Kconfig" diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 2703c6ec5e36..cb31f8cf09d4 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -134,6 +134,7 @@ obj-$(CONFIG_SCSI_STEX) += stex.o obj-$(CONFIG_SCSI_MVSAS) += mvsas/ obj-$(CONFIG_PS3_ROM) += ps3rom.o obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/ +obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/ obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/ obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/ obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o diff --git a/drivers/scsi/cxgbi/Kconfig b/drivers/scsi/cxgbi/Kconfig new file mode 100644 index 000000000000..a470e389f6ba --- /dev/null +++ b/drivers/scsi/cxgbi/Kconfig @@ -0,0 +1 @@ +source "drivers/scsi/cxgbi/cxgb4i/Kconfig" diff --git a/drivers/scsi/cxgbi/Makefile b/drivers/scsi/cxgbi/Makefile new file mode 100644 index 000000000000..9e8f604888dc --- /dev/null +++ b/drivers/scsi/cxgbi/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libcxgbi.o cxgb4i/ diff --git a/drivers/scsi/cxgbi/cxgb4i/Kbuild b/drivers/scsi/cxgbi/cxgb4i/Kbuild new file mode 100644 index 000000000000..b9f4af7454b7 --- /dev/null +++ b/drivers/scsi/cxgbi/cxgb4i/Kbuild @@ -0,0 +1,3 @@ +EXTRA_CFLAGS += -I$(srctree)/drivers/net/cxgb4 + +obj-$(CONFIG_SCSI_CXGB4_ISCSI) += cxgb4i.o diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig new file mode 100644 index 000000000000..bb94b39b17b3 --- /dev/null +++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig @@ -0,0 +1,7 @@ +config SCSI_CXGB4_ISCSI + tristate "Chelsio T4 iSCSI support" + depends on CHELSIO_T4_DEPENDS + select CHELSIO_T4 + select SCSI_ISCSI_ATTRS + ---help--- + This driver supports iSCSI offload for the Chelsio T4 devices. diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c new file mode 100644 index 000000000000..b375a683a6b0 --- /dev/null +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -0,0 +1,1600 @@ +/* + * cxgb4i.c: Chelsio T4 iSCSI driver. + * + * Copyright (c) 2010 Chelsio Communications, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Karen Xie (kxie@chelsio.com) + * Rakesh Ranjan (rranjan@chelsio.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include + +#include "t4_msg.h" +#include "cxgb4.h" +#include "cxgb4_uld.h" +#include "t4fw_api.h" +#include "l2t.h" +#include "cxgb4i.h" + +static unsigned int dbg_level; + +#include "../libcxgbi.h" + +#define DRV_MODULE_NAME "cxgb4i" +#define DRV_MODULE_DESC "Chelsio T4 iSCSI Driver" +#define DRV_MODULE_VERSION "0.9.0" +#define DRV_MODULE_RELDATE "May 2010" + +static char version[] = + DRV_MODULE_DESC " " DRV_MODULE_NAME + " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + +MODULE_AUTHOR("Chelsio Communications, Inc."); +MODULE_DESCRIPTION(DRV_MODULE_DESC); +MODULE_VERSION(DRV_MODULE_VERSION); +MODULE_LICENSE("GPL"); + +module_param(dbg_level, uint, 0644); +MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)"); + +static int cxgb4i_rcv_win = 256 * 1024; +module_param(cxgb4i_rcv_win, int, 0644); +MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes"); + +static int cxgb4i_snd_win = 128 * 1024; +module_param(cxgb4i_snd_win, int, 0644); +MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes"); + +static int cxgb4i_rx_credit_thres = 10 * 1024; +module_param(cxgb4i_rx_credit_thres, int, 0644); +MODULE_PARM_DESC(cxgb4i_rx_credit_thres, + "RX credits return threshold in bytes (default=10KB)"); + +static unsigned int cxgb4i_max_connect = (8 * 1024); +module_param(cxgb4i_max_connect, uint, 0644); +MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections"); + +static unsigned short cxgb4i_sport_base = 20000; +module_param(cxgb4i_sport_base, ushort, 0644); +MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)"); + +typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *); + +static void *t4_uld_add(const struct cxgb4_lld_info *); +static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *); +static int t4_uld_state_change(void *, enum cxgb4_state state); + +static const struct cxgb4_uld_info cxgb4i_uld_info = { + .name = DRV_MODULE_NAME, + .add = t4_uld_add, + .rx_handler = t4_uld_rx_handler, + .state_change = t4_uld_state_change, +}; + +static struct scsi_host_template cxgb4i_host_template = { + .module = THIS_MODULE, + .name = DRV_MODULE_NAME, + .proc_name = DRV_MODULE_NAME, + .can_queue = CXGB4I_SCSI_HOST_QDEPTH, + .queuecommand = iscsi_queuecommand, + .change_queue_depth = iscsi_change_queue_depth, + .sg_tablesize = SG_ALL, + .max_sectors = 0xFFFF, + .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, + .eh_abort_handler = iscsi_eh_abort, + .eh_device_reset_handler = iscsi_eh_device_reset, + .eh_target_reset_handler = iscsi_eh_recover_target, + .target_alloc = iscsi_target_alloc, + .use_clustering = DISABLE_CLUSTERING, + .this_id = -1, +}; + +static struct iscsi_transport cxgb4i_iscsi_transport = { + .owner = THIS_MODULE, + .name = DRV_MODULE_NAME, + .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | + CAP_DATADGST | CAP_DIGEST_OFFLOAD | + CAP_PADDING_OFFLOAD, + .param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH | + ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN | + ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T | + ISCSI_IMM_DATA_EN | ISCSI_FIRST_BURST | + ISCSI_MAX_BURST | ISCSI_PDU_INORDER_EN | + ISCSI_DATASEQ_INORDER_EN | ISCSI_ERL | + ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS | + ISCSI_EXP_STATSN | ISCSI_PERSISTENT_PORT | + ISCSI_PERSISTENT_ADDRESS | + ISCSI_TARGET_NAME | ISCSI_TPGT | + ISCSI_USERNAME | ISCSI_PASSWORD | + ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | + ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | + ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO | + ISCSI_PING_TMO | ISCSI_RECV_TMO | + ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, + .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | + ISCSI_HOST_INITIATOR_NAME | + ISCSI_HOST_NETDEV_NAME, + .get_host_param = cxgbi_get_host_param, + .set_host_param = cxgbi_set_host_param, + /* session management */ + .create_session = cxgbi_create_session, + .destroy_session = cxgbi_destroy_session, + .get_session_param = iscsi_session_get_param, + /* connection management */ + .create_conn = cxgbi_create_conn, + .bind_conn = cxgbi_bind_conn, + .destroy_conn = iscsi_tcp_conn_teardown, + .start_conn = iscsi_conn_start, + .stop_conn = iscsi_conn_stop, + .get_conn_param = cxgbi_get_conn_param, + .set_param = cxgbi_set_conn_param, + .get_stats = cxgbi_get_conn_stats, + /* pdu xmit req from user space */ + .send_pdu = iscsi_conn_send_pdu, + /* task */ + .init_task = iscsi_tcp_task_init, + .xmit_task = iscsi_tcp_task_xmit, + .cleanup_task = cxgbi_cleanup_task, + /* pdu */ + .alloc_pdu = cxgbi_conn_alloc_pdu, + .init_pdu = cxgbi_conn_init_pdu, + .xmit_pdu = cxgbi_conn_xmit_pdu, + .parse_pdu_itt = cxgbi_parse_pdu_itt, + /* TCP connect/disconnect */ + .ep_connect = cxgbi_ep_connect, + .ep_poll = cxgbi_ep_poll, + .ep_disconnect = cxgbi_ep_disconnect, + /* Error recovery timeout call */ + .session_recovery_timedout = iscsi_session_recovery_timedout, +}; + +static struct scsi_transport_template *cxgb4i_stt; + +/* + * CPL (Chelsio Protocol Language) defines a message passing interface between + * the host driver and Chelsio asic. + * The section below implments CPLs that related to iscsi tcp connection + * open/close/abort and data send/receive. + */ +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) +#define RCV_BUFSIZ_MASK 0x3FFU +#define MAX_IMM_TX_PKT_LEN 128 + +static inline void set_queue(struct sk_buff *skb, unsigned int queue, + const struct cxgbi_sock *csk) +{ + skb->queue_mapping = queue; +} + +static int push_tx_frames(struct cxgbi_sock *, int); + +/* + * is_ofld_imm - check whether a packet can be sent as immediate data + * @skb: the packet + * + * Returns true if a packet can be sent as an offload WR with immediate + * data. We currently use the same limit as for Ethernet packets. + */ +static inline int is_ofld_imm(const struct sk_buff *skb) +{ + return skb->len <= (MAX_IMM_TX_PKT_LEN - + sizeof(struct fw_ofld_tx_data_wr)); +} + +static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, + struct l2t_entry *e) +{ + struct cpl_act_open_req *req; + int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); + unsigned long long opt0; + unsigned int opt2; + unsigned int qid_atid = ((unsigned int)csk->atid) | + (((unsigned int)csk->rss_qid) << 14); + + opt0 = KEEP_ALIVE(1) | + WND_SCALE(wscale) | + MSS_IDX(csk->mss_idx) | + L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) | + TX_CHAN(csk->tx_chan) | + SMAC_SEL(csk->smac_idx) | + ULP_MODE(ULP_MODE_ISCSI) | + RCV_BUFSIZ(cxgb4i_rcv_win >> 10); + opt2 = RX_CHANNEL(0) | + RSS_QUEUE_VALID | + (1 << 20) | (1 << 22) | + RSS_QUEUE(csk->rss_qid); + + set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); + req = (struct cpl_act_open_req *)skb->head; + + INIT_TP_WR(req, 0); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, + qid_atid)); + req->local_port = csk->saddr.sin_port; + req->peer_port = csk->daddr.sin_port; + req->local_ip = csk->saddr.sin_addr.s_addr; + req->peer_ip = csk->daddr.sin_addr.s_addr; + req->opt0 = cpu_to_be64(opt0); + req->params = 0; + req->opt2 = cpu_to_be32(opt2); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", + csk, &req->local_ip, ntohs(req->local_port), + &req->peer_ip, ntohs(req->peer_port), + csk->atid, csk->rss_qid); + + cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); +} + +static void send_close_req(struct cxgbi_sock *csk) +{ + struct sk_buff *skb = csk->cpl_close; + struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head; + unsigned int tid = csk->tid; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx, tid %u.\n", + csk, csk->state, csk->flags, csk->tid); + csk->cpl_close = NULL; + set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); + INIT_TP_WR(req, tid); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); + req->rsvd = 0; + + cxgbi_sock_skb_entail(csk, skb); + if (csk->state >= CTP_ESTABLISHED) + push_tx_frames(csk, 1); +} + +static void abort_arp_failure(void *handle, struct sk_buff *skb) +{ + struct cxgbi_sock *csk = (struct cxgbi_sock *)handle; + struct cpl_abort_req *req; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx, tid %u, abort.\n", + csk, csk->state, csk->flags, csk->tid); + req = (struct cpl_abort_req *)skb->data; + req->cmd = CPL_ABORT_NO_RST; + cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); +} + +static void send_abort_req(struct cxgbi_sock *csk) +{ + struct cpl_abort_req *req; + struct sk_buff *skb = csk->cpl_abort_req; + + if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev) + return; + cxgbi_sock_set_state(csk, CTP_ABORTING); + cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); + cxgbi_sock_purge_write_queue(csk); + + csk->cpl_abort_req = NULL; + req = (struct cpl_abort_req *)skb->head; + set_queue(skb, CPL_PRIORITY_DATA, csk); + req->cmd = CPL_ABORT_SEND_RST; + t4_set_arp_err_handler(skb, csk, abort_arp_failure); + INIT_TP_WR(req, csk->tid); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid)); + req->rsvd0 = htonl(csk->snd_nxt); + req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n", + csk, csk->state, csk->flags, csk->tid, csk->snd_nxt, + req->rsvd1); + + cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); +} + +static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status) +{ + struct sk_buff *skb = csk->cpl_abort_rpl; + struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u, status %d.\n", + csk, csk->state, csk->flags, csk->tid, rst_status); + + csk->cpl_abort_rpl = NULL; + set_queue(skb, CPL_PRIORITY_DATA, csk); + INIT_TP_WR(rpl, csk->tid); + OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); + rpl->cmd = rst_status; + cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); +} + +/* + * CPL connection rx data ack: host -> + * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of + * credits sent. + */ +static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) +{ + struct sk_buff *skb; + struct cpl_rx_data_ack *req; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, + "csk 0x%p,%u,0x%lx,%u, credit %u.\n", + csk, csk->state, csk->flags, csk->tid, credits); + + skb = alloc_cpl(sizeof(*req), 0, GFP_ATOMIC); + if (!skb) { + pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits); + return 0; + } + req = (struct cpl_rx_data_ack *)skb->head; + + set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id); + INIT_TP_WR(req, csk->tid); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, + csk->tid)); + req->credit_dack = cpu_to_be32(RX_CREDITS(credits) | RX_FORCE_ACK(1)); + cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); + return credits; +} + +/* + * sgl_len - calculates the size of an SGL of the given capacity + * @n: the number of SGL entries + * Calculates the number of flits needed for a scatter/gather list that + * can hold the given number of entries. + */ +static inline unsigned int sgl_len(unsigned int n) +{ + n--; + return (3 * n) / 2 + (n & 1) + 2; +} + +/* + * calc_tx_flits_ofld - calculate # of flits for an offload packet + * @skb: the packet + * + * Returns the number of flits needed for the given offload packet. + * These packets are already fully constructed and no additional headers + * will be added. + */ +static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) +{ + unsigned int flits, cnt; + + if (is_ofld_imm(skb)) + return DIV_ROUND_UP(skb->len, 8); + flits = skb_transport_offset(skb) / 8; + cnt = skb_shinfo(skb)->nr_frags; + if (skb->tail != skb->transport_header) + cnt++; + return flits + sgl_len(cnt); +} + +static inline void send_tx_flowc_wr(struct cxgbi_sock *csk) +{ + struct sk_buff *skb; + struct fw_flowc_wr *flowc; + int flowclen, i; + + flowclen = 80; + skb = alloc_cpl(flowclen, 0, GFP_ATOMIC); + flowc = (struct fw_flowc_wr *)skb->head; + flowc->op_to_nparams = + htonl(FW_WR_OP(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS(8)); + flowc->flowid_len16 = + htonl(FW_WR_LEN16(DIV_ROUND_UP(72, 16)) | + FW_WR_FLOWID(csk->tid)); + flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; + flowc->mnemval[0].val = htonl(0); + flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; + flowc->mnemval[1].val = htonl(csk->tx_chan); + flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; + flowc->mnemval[2].val = htonl(csk->tx_chan); + flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; + flowc->mnemval[3].val = htonl(csk->rss_qid); + flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; + flowc->mnemval[4].val = htonl(csk->snd_nxt); + flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; + flowc->mnemval[5].val = htonl(csk->rcv_nxt); + flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; + flowc->mnemval[6].val = htonl(cxgb4i_snd_win); + flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; + flowc->mnemval[7].val = htonl(csk->advmss); + flowc->mnemval[8].mnemonic = 0; + flowc->mnemval[8].val = 0; + for (i = 0; i < 9; i++) { + flowc->mnemval[i].r4[0] = 0; + flowc->mnemval[i].r4[1] = 0; + flowc->mnemval[i].r4[2] = 0; + } + set_queue(skb, CPL_PRIORITY_DATA, csk); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n", + csk, csk->tid, 0, csk->tx_chan, csk->rss_qid, + csk->snd_nxt, csk->rcv_nxt, cxgb4i_snd_win, + csk->advmss); + + cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); +} + +static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, + int dlen, int len, u32 credits, int compl) +{ + struct fw_ofld_tx_data_wr *req; + unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3; + unsigned int wr_ulp_mode = 0; + + req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req)); + + if (is_ofld_imm(skb)) { + req->op_to_immdlen = htonl(FW_WR_OP(FW_OFLD_TX_DATA_WR) | + FW_WR_COMPL(1) | + FW_WR_IMMDLEN(dlen)); + req->flowid_len16 = htonl(FW_WR_FLOWID(csk->tid) | + FW_WR_LEN16(credits)); + } else { + req->op_to_immdlen = + cpu_to_be32(FW_WR_OP(FW_OFLD_TX_DATA_WR) | + FW_WR_COMPL(1) | + FW_WR_IMMDLEN(0)); + req->flowid_len16 = + cpu_to_be32(FW_WR_FLOWID(csk->tid) | + FW_WR_LEN16(credits)); + } + if (submode) + wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE(ULP2_MODE_ISCSI) | + FW_OFLD_TX_DATA_WR_ULPSUBMODE(submode); + req->tunnel_to_proxy = htonl(wr_ulp_mode) | + FW_OFLD_TX_DATA_WR_SHOVE(skb_peek(&csk->write_queue) ? 0 : 1); + req->plen = htonl(len); + if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) + cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); +} + +static void arp_failure_skb_discard(void *handle, struct sk_buff *skb) +{ + kfree_skb(skb); +} + +static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) +{ + int total_size = 0; + struct sk_buff *skb; + + if (unlikely(csk->state < CTP_ESTABLISHED || + csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) { + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK | + 1 << CXGBI_DBG_PDU_TX, + "csk 0x%p,%u,0x%lx,%u, in closing state.\n", + csk, csk->state, csk->flags, csk->tid); + return 0; + } + + while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) { + int dlen = skb->len; + int len = skb->len; + unsigned int credits_needed; + + skb_reset_transport_header(skb); + if (is_ofld_imm(skb)) + credits_needed = DIV_ROUND_UP(dlen + + sizeof(struct fw_ofld_tx_data_wr), 16); + else + credits_needed = DIV_ROUND_UP(8*calc_tx_flits_ofld(skb) + + sizeof(struct fw_ofld_tx_data_wr), + 16); + + if (csk->wr_cred < credits_needed) { + log_debug(1 << CXGBI_DBG_PDU_TX, + "csk 0x%p, skb %u/%u, wr %d < %u.\n", + csk, skb->len, skb->data_len, + credits_needed, csk->wr_cred); + break; + } + __skb_unlink(skb, &csk->write_queue); + set_queue(skb, CPL_PRIORITY_DATA, csk); + skb->csum = credits_needed; + csk->wr_cred -= credits_needed; + csk->wr_una_cred += credits_needed; + cxgbi_sock_enqueue_wr(csk, skb); + + log_debug(1 << CXGBI_DBG_PDU_TX, + "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n", + csk, skb->len, skb->data_len, credits_needed, + csk->wr_cred, csk->wr_una_cred); + + if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) { + if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { + send_tx_flowc_wr(csk); + skb->csum += 5; + csk->wr_cred -= 5; + csk->wr_una_cred += 5; + } + len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); + make_tx_data_wr(csk, skb, dlen, len, credits_needed, + req_completion); + csk->snd_nxt += len; + cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR); + } + total_size += skb->truesize; + t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, + "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n", + csk, csk->state, csk->flags, csk->tid, skb, len); + + cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); + } + return total_size; +} + +static inline void free_atid(struct cxgbi_sock *csk) +{ + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); + + if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) { + cxgb4_free_atid(lldi->tids, csk->atid); + cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID); + cxgbi_sock_put(csk); + } +} + +static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb) +{ + struct cxgbi_sock *csk; + struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data; + unsigned short tcp_opt = ntohs(req->tcp_opt); + unsigned int tid = GET_TID(req); + unsigned int atid = GET_TID_TID(ntohl(req->tos_atid)); + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct tid_info *t = lldi->tids; + u32 rcv_isn = be32_to_cpu(req->rcv_isn); + + csk = lookup_atid(t, atid); + if (unlikely(!csk)) { + pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev); + goto rel_skb; + } + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx, tid %u, atid %u, rseq %u.\n", + csk, csk->state, csk->flags, tid, atid, rcv_isn); + + cxgbi_sock_get(csk); + csk->tid = tid; + cxgb4_insert_tid(lldi->tids, csk, tid); + cxgbi_sock_set_flag(csk, CTPF_HAS_TID); + + free_atid(csk); + + spin_lock_bh(&csk->lock); + if (unlikely(csk->state != CTP_ACTIVE_OPEN)) + pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n", + csk, csk->state, csk->flags, csk->tid); + + if (csk->retry_timer.function) { + del_timer(&csk->retry_timer); + csk->retry_timer.function = NULL; + } + + csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; + /* + * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't + * pass through opt0. + */ + if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10)) + csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10); + + csk->advmss = lldi->mtus[GET_TCPOPT_MSS(tcp_opt)] - 40; + if (GET_TCPOPT_TSTAMP(tcp_opt)) + csk->advmss -= 12; + if (csk->advmss < 128) + csk->advmss = 128; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p, mss_idx %u, advmss %u.\n", + csk, GET_TCPOPT_MSS(tcp_opt), csk->advmss); + + cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); + + if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED))) + send_abort_req(csk); + else { + if (skb_queue_len(&csk->write_queue)) + push_tx_frames(csk, 0); + cxgbi_conn_tx_open(csk); + } + spin_unlock_bh(&csk->lock); + +rel_skb: + __kfree_skb(skb); +} + +static int act_open_rpl_status_to_errno(int status) +{ + switch (status) { + case CPL_ERR_CONN_RESET: + return -ECONNREFUSED; + case CPL_ERR_ARP_MISS: + return -EHOSTUNREACH; + case CPL_ERR_CONN_TIMEDOUT: + return -ETIMEDOUT; + case CPL_ERR_TCAM_FULL: + return -ENOMEM; + case CPL_ERR_CONN_EXIST: + return -EADDRINUSE; + default: + return -EIO; + } +} + +static void csk_act_open_retry_timer(unsigned long data) +{ + struct sk_buff *skb; + struct cxgbi_sock *csk = (struct cxgbi_sock *)data; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u.\n", + csk, csk->state, csk->flags, csk->tid); + + cxgbi_sock_get(csk); + spin_lock_bh(&csk->lock); + skb = alloc_cpl(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC); + if (!skb) + cxgbi_sock_fail_act_open(csk, -ENOMEM); + else { + skb->sk = (struct sock *)csk; + t4_set_arp_err_handler(skb, csk, + cxgbi_sock_act_open_req_arp_failure); + send_act_open_req(csk, skb, csk->l2t); + } + spin_unlock_bh(&csk->lock); + cxgbi_sock_put(csk); +} + +static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) +{ + struct cxgbi_sock *csk; + struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data; + unsigned int tid = GET_TID(rpl); + unsigned int atid = + GET_TID_TID(GET_AOPEN_ATID(be32_to_cpu(rpl->atid_status))); + unsigned int status = GET_AOPEN_STATUS(be32_to_cpu(rpl->atid_status)); + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct tid_info *t = lldi->tids; + + csk = lookup_atid(t, atid); + if (unlikely(!csk)) { + pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid); + goto rel_skb; + } + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx, status %u, atid %u, tid %u.\n", + csk, csk->state, csk->flags, status, atid, tid); + + if (status && status != CPL_ERR_TCAM_FULL && + status != CPL_ERR_CONN_EXIST && + status != CPL_ERR_ARP_MISS) + cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl)); + + cxgbi_sock_get(csk); + spin_lock_bh(&csk->lock); + + if (status == CPL_ERR_CONN_EXIST && + csk->retry_timer.function != csk_act_open_retry_timer) { + csk->retry_timer.function = csk_act_open_retry_timer; + mod_timer(&csk->retry_timer, jiffies + HZ / 2); + } else + cxgbi_sock_fail_act_open(csk, + act_open_rpl_status_to_errno(status)); + + spin_unlock_bh(&csk->lock); + cxgbi_sock_put(csk); +rel_skb: + __kfree_skb(skb); +} + +static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb) +{ + struct cxgbi_sock *csk; + struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data; + unsigned int tid = GET_TID(req); + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct tid_info *t = lldi->tids; + + csk = lookup_tid(t, tid); + if (unlikely(!csk)) { + pr_err("can't find connection for tid %u.\n", tid); + goto rel_skb; + } + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u.\n", + csk, csk->state, csk->flags, csk->tid); + cxgbi_sock_rcv_peer_close(csk); +rel_skb: + __kfree_skb(skb); +} + +static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) +{ + struct cxgbi_sock *csk; + struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data; + unsigned int tid = GET_TID(rpl); + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct tid_info *t = lldi->tids; + + csk = lookup_tid(t, tid); + if (unlikely(!csk)) { + pr_err("can't find connection for tid %u.\n", tid); + goto rel_skb; + } + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u.\n", + csk, csk->state, csk->flags, csk->tid); + cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); +rel_skb: + __kfree_skb(skb); +} + +static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, + int *need_rst) +{ + switch (abort_reason) { + case CPL_ERR_BAD_SYN: /* fall through */ + case CPL_ERR_CONN_RESET: + return csk->state > CTP_ESTABLISHED ? + -EPIPE : -ECONNRESET; + case CPL_ERR_XMIT_TIMEDOUT: + case CPL_ERR_PERSIST_TIMEDOUT: + case CPL_ERR_FINWAIT2_TIMEDOUT: + case CPL_ERR_KEEPALIVE_TIMEDOUT: + return -ETIMEDOUT; + default: + return -EIO; + } +} + +static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb) +{ + struct cxgbi_sock *csk; + struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data; + unsigned int tid = GET_TID(req); + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct tid_info *t = lldi->tids; + int rst_status = CPL_ABORT_NO_RST; + + csk = lookup_tid(t, tid); + if (unlikely(!csk)) { + pr_err("can't find connection for tid %u.\n", tid); + goto rel_skb; + } + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx, tid %u, status 0x%x.\n", + csk, csk->state, csk->flags, csk->tid, req->status); + + if (req->status == CPL_ERR_RTX_NEG_ADVICE || + req->status == CPL_ERR_PERSIST_NEG_ADVICE) + goto rel_skb; + + cxgbi_sock_get(csk); + spin_lock_bh(&csk->lock); + + if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) { + cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); + cxgbi_sock_set_state(csk, CTP_ABORTING); + goto done; + } + + cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); + send_abort_rpl(csk, rst_status); + + if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { + csk->err = abort_status_to_errno(csk, req->status, &rst_status); + cxgbi_sock_closed(csk); + } +done: + spin_unlock_bh(&csk->lock); + cxgbi_sock_put(csk); +rel_skb: + __kfree_skb(skb); +} + +static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb) +{ + struct cxgbi_sock *csk; + struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data; + unsigned int tid = GET_TID(rpl); + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct tid_info *t = lldi->tids; + + csk = lookup_tid(t, tid); + if (!csk) + goto rel_skb; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "status 0x%x, csk 0x%p, s %u, 0x%lx.\n", + rpl->status, csk, csk ? csk->state : 0, + csk ? csk->flags : 0UL); + + if (rpl->status == CPL_ERR_ABORT_FAILED) + goto rel_skb; + + cxgbi_sock_rcv_abort_rpl(csk); +rel_skb: + __kfree_skb(skb); +} + +static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) +{ + struct cxgbi_sock *csk; + struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data; + unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp); + unsigned int tid = GET_TID(cpl); + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct tid_info *t = lldi->tids; + struct sk_buff *lskb; + + csk = lookup_tid(t, tid); + if (unlikely(!csk)) { + pr_err("can't find conn. for tid %u.\n", tid); + goto rel_skb; + } + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, + "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n", + csk, csk->state, csk->flags, csk->tid, skb, skb->len, + pdu_len_ddp); + + spin_lock_bh(&csk->lock); + + if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u, bad state.\n", + csk, csk->state, csk->flags, csk->tid); + if (csk->state != CTP_ABORTING) + goto abort_conn; + else + goto discard; + } + + cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq); + skb_reset_transport_header(skb); + __skb_pull(skb, sizeof(*cpl)); + __pskb_trim(skb, ntohs(cpl->len)); + + if (!csk->skb_ulp_lhdr) { + unsigned char *bhs; + unsigned int hlen, dlen; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, + "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n", + csk, csk->state, csk->flags, csk->tid, skb); + csk->skb_ulp_lhdr = skb; + lskb = csk->skb_ulp_lhdr; + cxgbi_skcb_set_flag(lskb, SKCBF_RX_HDR); + + if (cxgbi_skcb_tcp_seq(lskb) != csk->rcv_nxt) { + pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n", + csk->tid, cxgbi_skcb_tcp_seq(lskb), + csk->rcv_nxt); + goto abort_conn; + } + + bhs = lskb->data; + hlen = ntohs(cpl->len); + dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF; + + if ((hlen + dlen) != ISCSI_PDU_LEN(pdu_len_ddp) - 40) { + pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len " + "mismatch %u != %u + %u, seq 0x%x.\n", + csk->tid, ISCSI_PDU_LEN(pdu_len_ddp) - 40, + hlen, dlen, cxgbi_skcb_tcp_seq(skb)); + goto abort_conn; + } + + cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3); + if (dlen) + cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len; + csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, + "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n", + csk, skb, *bhs, hlen, dlen, + ntohl(*((unsigned int *)(bhs + 16))), + ntohl(*((unsigned int *)(bhs + 24)))); + + } else { + lskb = csk->skb_ulp_lhdr; + cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, + "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n", + csk, csk->state, csk->flags, skb, lskb); + } + + __skb_queue_tail(&csk->receive_queue, skb); + spin_unlock_bh(&csk->lock); + return; + +abort_conn: + send_abort_req(csk); +discard: + spin_unlock_bh(&csk->lock); +rel_skb: + __kfree_skb(skb); +} + +static void do_rx_data_ddp(struct cxgbi_device *cdev, + struct sk_buff *skb) +{ + struct cxgbi_sock *csk; + struct sk_buff *lskb; + struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data; + unsigned int tid = GET_TID(rpl); + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct tid_info *t = lldi->tids; + unsigned int status = ntohl(rpl->ddpvld); + + csk = lookup_tid(t, tid); + if (unlikely(!csk)) { + pr_err("can't find connection for tid %u.\n", tid); + goto rel_skb; + } + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, + "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n", + csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr); + + spin_lock_bh(&csk->lock); + + if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u, bad state.\n", + csk, csk->state, csk->flags, csk->tid); + if (csk->state != CTP_ABORTING) + goto abort_conn; + else + goto discard; + } + + if (!csk->skb_ulp_lhdr) { + pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid); + goto abort_conn; + } + + lskb = csk->skb_ulp_lhdr; + csk->skb_ulp_lhdr = NULL; + + cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS); + cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc); + + if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb)) + pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n", + csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb)); + + if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) { + log_debug(1 << CXGBI_DBG_PDU_RX, + "csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad.\n", + csk, lskb, status); + cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR); + } + if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) { + log_debug(1 << CXGBI_DBG_PDU_RX, + "csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad.\n", + csk, lskb, status); + cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR); + } + if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) { + log_debug(1 << CXGBI_DBG_PDU_RX, + "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n", + csk, lskb, status); + cxgbi_skcb_set_flag(lskb, SKCBF_RX_PAD_ERR); + } + if ((status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) && + !cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA)) { + log_debug(1 << CXGBI_DBG_PDU_RX, + "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n", + csk, lskb, status); + cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA_DDPD); + } + log_debug(1 << CXGBI_DBG_PDU_RX, + "csk 0x%p, lskb 0x%p, f 0x%lx.\n", + csk, lskb, cxgbi_skcb_flags(lskb)); + + cxgbi_conn_pdu_ready(csk); + spin_unlock_bh(&csk->lock); + goto rel_skb; + +abort_conn: + send_abort_req(csk); +discard: + spin_unlock_bh(&csk->lock); +rel_skb: + __kfree_skb(skb); +} + +static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb) +{ + struct cxgbi_sock *csk; + struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data; + unsigned int tid = GET_TID(rpl); + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct tid_info *t = lldi->tids; + + csk = lookup_tid(t, tid); + if (unlikely(!csk)) + pr_err("can't find connection for tid %u.\n", tid); + else { + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u.\n", + csk, csk->state, csk->flags, csk->tid); + cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una), + rpl->seq_vld); + } + __kfree_skb(skb); +} + +static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) +{ + struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data; + unsigned int tid = GET_TID(rpl); + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct tid_info *t = lldi->tids; + struct cxgbi_sock *csk; + + csk = lookup_tid(t, tid); + if (!csk) + pr_err("can't find conn. for tid %u.\n", tid); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,%lx,%u, status 0x%x.\n", + csk, csk->state, csk->flags, csk->tid, rpl->status); + + if (rpl->status != CPL_ERR_NONE) + pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n", + csk, tid, rpl->status); + + __kfree_skb(skb); +} + +static int alloc_cpls(struct cxgbi_sock *csk) +{ + csk->cpl_close = alloc_cpl(sizeof(struct cpl_close_con_req), + 0, GFP_NOIO); + if (!csk->cpl_close) + return -ENOMEM; + + csk->cpl_abort_req = alloc_cpl(sizeof(struct cpl_abort_req), + 0, GFP_NOIO); + if (!csk->cpl_abort_req) + goto free_cpls; + + csk->cpl_abort_rpl = alloc_cpl(sizeof(struct cpl_abort_rpl), + 0, GFP_NOIO); + if (!csk->cpl_abort_rpl) + goto free_cpls; + return 0; + +free_cpls: + cxgbi_sock_free_cpl_skbs(csk); + return -ENOMEM; +} + +static inline void l2t_put(struct cxgbi_sock *csk) +{ + if (csk->l2t) { + cxgb4_l2t_release(csk->l2t); + csk->l2t = NULL; + cxgbi_sock_put(csk); + } +} + +static void release_offload_resources(struct cxgbi_sock *csk) +{ + struct cxgb4_lld_info *lldi; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u.\n", + csk, csk->state, csk->flags, csk->tid); + + cxgbi_sock_free_cpl_skbs(csk); + if (csk->wr_cred != csk->wr_max_cred) { + cxgbi_sock_purge_wr_queue(csk); + cxgbi_sock_reset_wr_list(csk); + } + + l2t_put(csk); + if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) + free_atid(csk); + else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { + lldi = cxgbi_cdev_priv(csk->cdev); + cxgb4_remove_tid(lldi->tids, 0, csk->tid); + cxgbi_sock_clear_flag(csk, CTPF_HAS_TID); + cxgbi_sock_put(csk); + } + csk->dst = NULL; + csk->cdev = NULL; +} + +static int init_act_open(struct cxgbi_sock *csk) +{ + struct cxgbi_device *cdev = csk->cdev; + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct net_device *ndev = cdev->ports[csk->port_id]; + struct port_info *pi = netdev_priv(ndev); + struct sk_buff *skb = NULL; + unsigned int step; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u.\n", + csk, csk->state, csk->flags, csk->tid); + + csk->atid = cxgb4_alloc_atid(lldi->tids, csk); + if (csk->atid < 0) { + pr_err("%s, NO atid available.\n", ndev->name); + return -EINVAL; + } + cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); + cxgbi_sock_get(csk); + + csk->l2t = cxgb4_l2t_get(lldi->l2t, csk->dst->neighbour, ndev, 0); + if (!csk->l2t) { + pr_err("%s, cannot alloc l2t.\n", ndev->name); + goto rel_resource; + } + cxgbi_sock_get(csk); + + skb = alloc_cpl(sizeof(struct cpl_act_open_req), 0, GFP_NOIO); + if (!skb) + goto rel_resource; + skb->sk = (struct sock *)csk; + t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure); + + if (!csk->mtu) + csk->mtu = dst_mtu(csk->dst); + cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx); + csk->tx_chan = cxgb4_port_chan(ndev); + /* SMT two entries per row */ + csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1; + step = lldi->ntxq / lldi->nchan; + csk->txq_idx = cxgb4_port_idx(ndev) * step; + step = lldi->nrxq / lldi->nchan; + csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step]; + csk->wr_max_cred = csk->wr_cred = lldi->wr_cred; + csk->wr_una_cred = 0; + cxgbi_sock_reset_wr_list(csk); + csk->err = 0; + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,p%d,%s, %u,%u,%u, mss %u,%u, smac %u.\n", + csk, pi->port_id, ndev->name, csk->tx_chan, + csk->txq_idx, csk->rss_qid, csk->mtu, csk->mss_idx, + csk->smac_idx); + + cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); + send_act_open_req(csk, skb, csk->l2t); + return 0; + +rel_resource: + if (skb) + __kfree_skb(skb); + return -EINVAL; +} + +cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = { + [CPL_ACT_ESTABLISH] = do_act_establish, + [CPL_ACT_OPEN_RPL] = do_act_open_rpl, + [CPL_PEER_CLOSE] = do_peer_close, + [CPL_ABORT_REQ_RSS] = do_abort_req_rss, + [CPL_ABORT_RPL_RSS] = do_abort_rpl_rss, + [CPL_CLOSE_CON_RPL] = do_close_con_rpl, + [CPL_FW4_ACK] = do_fw4_ack, + [CPL_ISCSI_HDR] = do_rx_iscsi_hdr, + [CPL_SET_TCB_RPL] = do_set_tcb_rpl, + [CPL_RX_DATA_DDP] = do_rx_data_ddp, +}; + +int cxgb4i_ofld_init(struct cxgbi_device *cdev) +{ + int rc; + + if (cxgb4i_max_connect > CXGB4I_MAX_CONN) + cxgb4i_max_connect = CXGB4I_MAX_CONN; + + rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base, + cxgb4i_max_connect); + if (rc < 0) + return rc; + + cdev->csk_release_offload_resources = release_offload_resources; + cdev->csk_push_tx_frames = push_tx_frames; + cdev->csk_send_abort_req = send_abort_req; + cdev->csk_send_close_req = send_close_req; + cdev->csk_send_rx_credits = send_rx_credits; + cdev->csk_alloc_cpls = alloc_cpls; + cdev->csk_init_act_open = init_act_open; + + pr_info("cdev 0x%p, offload up, added.\n", cdev); + return 0; +} + +/* + * functions to program the pagepod in h/w + */ +static inline void ulp_mem_io_set_hdr(struct ulp_mem_io *req, + unsigned int dlen, unsigned int pm_addr) +{ + struct ulptx_sgl *sgl; + unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) + + sizeof(struct ulptx_sgl), 16); + + INIT_ULPTX_WR(req, wr_len, 0, 0); + req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE)); + req->dlen = htonl(ULP_MEMIO_DATA_LEN(dlen >> 5)); + req->lock_addr = htonl(ULP_MEMIO_ADDR(pm_addr >> 5)); + req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); + sgl = (struct ulptx_sgl *)(req + 1); + sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(1)); + sgl->len0 = htonl(dlen); +} + +static int ddp_ppod_write_sgl(struct cxgbi_device *cdev, unsigned int port_id, + struct cxgbi_pagepod_hdr *hdr, unsigned int idx, + unsigned int npods, + struct cxgbi_gather_list *gl, + unsigned int gl_pidx) +{ + struct cxgbi_ddp_info *ddp = cdev->ddp; + unsigned int dlen, pm_addr; + struct sk_buff *skb; + struct ulp_mem_io *req; + struct ulptx_sgl *sgl; + struct cxgbi_pagepod *ppod; + unsigned int i; + + dlen = PPOD_SIZE * npods; + pm_addr = idx * PPOD_SIZE + ddp->llimit; + + skb = alloc_cpl(sizeof(*req) + sizeof(*sgl), dlen, GFP_ATOMIC); + if (!skb) { + pr_err("cdev 0x%p, idx %u, npods %u, OOM.\n", + cdev, idx, npods); + return -ENOMEM; + } + req = (struct ulp_mem_io *)skb->head; + set_queue(skb, CPL_PRIORITY_CONTROL, NULL); + + ulp_mem_io_set_hdr(req, dlen, pm_addr); + sgl = (struct ulptx_sgl *)(req + 1); + ppod = (struct cxgbi_pagepod *)(sgl + 1); + sgl->addr0 = cpu_to_be64(virt_to_phys(ppod)); + + for (i = 0; i < npods; i++, ppod++, gl_pidx += PPOD_PAGES_MAX) { + if (!hdr && !gl) + cxgbi_ddp_ppod_clear(ppod); + else + cxgbi_ddp_ppod_set(ppod, hdr, gl, gl_pidx); + } + + cxgb4_ofld_send(cdev->ports[port_id], skb); + return 0; +} + +static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr, + unsigned int idx, unsigned int npods, + struct cxgbi_gather_list *gl) +{ + unsigned int i, cnt; + int err = 0; + + for (i = 0; i < npods; i += cnt, idx += cnt) { + cnt = npods - i; + if (cnt > ULPMEM_DSGL_MAX_NPPODS) + cnt = ULPMEM_DSGL_MAX_NPPODS; + err = ddp_ppod_write_sgl(csk->cdev, csk->port_id, hdr, + idx, cnt, gl, 4 * i); + if (err < 0) + break; + } + return err; +} + +static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag, + unsigned int idx, unsigned int npods) +{ + unsigned int i, cnt; + int err; + + for (i = 0; i < npods; i += cnt, idx += cnt) { + cnt = npods - i; + if (cnt > ULPMEM_DSGL_MAX_NPPODS) + cnt = ULPMEM_DSGL_MAX_NPPODS; + err = ddp_ppod_write_sgl(chba->cdev, chba->port_id, NULL, + idx, cnt, NULL, 0); + if (err < 0) + break; + } +} + +static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, + int pg_idx, bool reply) +{ + struct sk_buff *skb; + struct cpl_set_tcb_field *req; + u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0; + + if (!pg_idx) + return 0; + + skb = alloc_cpl(sizeof(*req), 0, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + /* set up ulp submode and page size */ + val = (val & 0x03) << 2; + val |= TCB_ULP_TYPE(ULP2_MODE_ISCSI); + + req = (struct cpl_set_tcb_field *)skb->head; + INIT_TP_WR(req, csk->tid); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); + req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid)); + req->word_cookie = htons(TCB_WORD(W_TCB_ULP_RAW)); + req->mask = cpu_to_be64(TCB_ULP_TYPE(TCB_ULP_TYPE_MASK)); + req->val = cpu_to_be64(val); + set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); + + cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); + return 0; +} + +static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, + int hcrc, int dcrc, int reply) +{ + struct sk_buff *skb; + struct cpl_set_tcb_field *req; + u64 val = (hcrc ? ULP_CRC_HEADER : 0) | (dcrc ? ULP_CRC_DATA : 0); + + val = TCB_ULP_RAW(val); + val |= TCB_ULP_TYPE(ULP2_MODE_ISCSI); + + skb = alloc_cpl(sizeof(*req), 0, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + csk->hcrc_len = (hcrc ? 4 : 0); + csk->dcrc_len = (dcrc ? 4 : 0); + /* set up ulp submode and page size */ + req = (struct cpl_set_tcb_field *)skb->head; + INIT_TP_WR(req, tid); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); + req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid)); + req->word_cookie = htons(TCB_WORD(W_TCB_ULP_RAW)); + req->mask = cpu_to_be64(TCB_ULP_RAW(TCB_ULP_RAW_MASK)); + req->val = cpu_to_be64(val); + set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); + + cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); + return 0; +} + +static int cxgb4i_ddp_init(struct cxgbi_device *cdev) +{ + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); + struct cxgbi_ddp_info *ddp = cdev->ddp; + unsigned int tagmask, pgsz_factor[4]; + int err; + + if (ddp) { + kref_get(&ddp->refcnt); + pr_warn("cdev 0x%p, ddp 0x%p already set up.\n", + cdev, cdev->ddp); + return -EALREADY; + } + + err = cxgbi_ddp_init(cdev, lldi->vr->iscsi.start, + lldi->vr->iscsi.start + lldi->vr->iscsi.size - 1, + lldi->iscsi_iolen, lldi->iscsi_iolen); + if (err < 0) + return err; + + ddp = cdev->ddp; + + tagmask = ddp->idx_mask << PPOD_IDX_SHIFT; + cxgbi_ddp_page_size_factor(pgsz_factor); + cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor); + + cdev->csk_ddp_free_gl_skb = NULL; + cdev->csk_ddp_alloc_gl_skb = NULL; + cdev->csk_ddp_setup_digest = ddp_setup_conn_digest; + cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx; + cdev->csk_ddp_set = ddp_set_map; + cdev->csk_ddp_clear = ddp_clear_map; + + pr_info("cxgb4i 0x%p tag: sw %u, rsvd %u,%u, mask 0x%x.\n", + cdev, cdev->tag_format.sw_bits, cdev->tag_format.rsvd_bits, + cdev->tag_format.rsvd_shift, cdev->tag_format.rsvd_mask); + pr_info("cxgb4i 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, " + " %u/%u.\n", + cdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask, + ddp->rsvd_tag_mask, ddp->max_txsz, lldi->iscsi_iolen, + ddp->max_rxsz, lldi->iscsi_iolen); + pr_info("cxgb4i 0x%p max payload size: %u/%u, %u/%u.\n", + cdev, cdev->tx_max_size, ddp->max_txsz, cdev->rx_max_size, + ddp->max_rxsz); + return 0; +} + +static void *t4_uld_add(const struct cxgb4_lld_info *lldi) +{ + struct cxgbi_device *cdev; + struct port_info *pi; + int i, rc; + + cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports); + if (!cdev) { + pr_info("t4 device 0x%p, register failed.\n", lldi); + return NULL; + } + pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n", + cdev, lldi->adapter_type, lldi->nports, + lldi->ports[0]->name, lldi->nchan, lldi->ntxq, + lldi->nrxq, lldi->wr_cred); + for (i = 0; i < lldi->nrxq; i++) + log_debug(1 << CXGBI_DBG_DEV, + "t4 0x%p, rxq id #%d: %u.\n", + cdev, i, lldi->rxq_ids[i]); + + memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi)); + cdev->flags = CXGBI_FLAG_DEV_T4; + cdev->pdev = lldi->pdev; + cdev->ports = lldi->ports; + cdev->nports = lldi->nports; + cdev->mtus = lldi->mtus; + cdev->nmtus = NMTUS; + cdev->snd_win = cxgb4i_snd_win; + cdev->rcv_win = cxgb4i_rcv_win; + cdev->rx_credit_thres = cxgb4i_rx_credit_thres; + cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN; + cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr); + cdev->itp = &cxgb4i_iscsi_transport; + + rc = cxgb4i_ddp_init(cdev); + if (rc) { + pr_info("t4 0x%p ddp init failed.\n", cdev); + goto err_out; + } + rc = cxgb4i_ofld_init(cdev); + if (rc) { + pr_info("t4 0x%p ofld init failed.\n", cdev); + goto err_out; + } + + rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, CXGBI_MAX_CONN, + &cxgb4i_host_template, cxgb4i_stt); + if (rc) + goto err_out; + + for (i = 0; i < cdev->nports; i++) { + pi = netdev_priv(lldi->ports[i]); + cdev->hbas[i]->port_id = pi->port_id; + } + return cdev; + +err_out: + cxgbi_device_unregister(cdev); + return ERR_PTR(-ENOMEM); +} + +#define RX_PULL_LEN 128 +static int t4_uld_rx_handler(void *handle, const __be64 *rsp, + const struct pkt_gl *pgl) +{ + const struct cpl_act_establish *rpl; + struct sk_buff *skb; + unsigned int opc; + struct cxgbi_device *cdev = handle; + + if (pgl == NULL) { + unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8; + + skb = alloc_cpl(len, 0, GFP_ATOMIC); + if (!skb) + goto nomem; + skb_copy_to_linear_data(skb, &rsp[1], len); + } else { + if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) { + pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n", + pgl->va, be64_to_cpu(*rsp), + be64_to_cpu(*(u64 *)pgl->va), + pgl->tot_len); + return 0; + } + skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN); + if (unlikely(!skb)) + goto nomem; + } + + rpl = (struct cpl_act_establish *)skb->data; + opc = rpl->ot.opcode; + log_debug(1 << CXGBI_DBG_TOE, + "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n", + cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb); + if (cxgb4i_cplhandlers[opc]) + cxgb4i_cplhandlers[opc](cdev, skb); + else { + pr_err("No handler for opcode 0x%x.\n", opc); + __kfree_skb(skb); + } + return 0; +nomem: + log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n"); + return 1; +} + +static int t4_uld_state_change(void *handle, enum cxgb4_state state) +{ + struct cxgbi_device *cdev = handle; + + switch (state) { + case CXGB4_STATE_UP: + pr_info("cdev 0x%p, UP.\n", cdev); + /* re-initialize */ + break; + case CXGB4_STATE_START_RECOVERY: + pr_info("cdev 0x%p, RECOVERY.\n", cdev); + /* close all connections */ + break; + case CXGB4_STATE_DOWN: + pr_info("cdev 0x%p, DOWN.\n", cdev); + break; + case CXGB4_STATE_DETACH: + pr_info("cdev 0x%p, DETACH.\n", cdev); + break; + default: + pr_info("cdev 0x%p, unknown state %d.\n", cdev, state); + break; + } + return 0; +} + +static int __init cxgb4i_init_module(void) +{ + int rc; + + printk(KERN_INFO "%s", version); + + rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt); + if (rc < 0) + return rc; + cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info); + return 0; +} + +static void __exit cxgb4i_exit_module(void) +{ + cxgb4_unregister_uld(CXGB4_ULD_ISCSI); + cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4); + cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt); +} + +module_init(cxgb4i_init_module); +module_exit(cxgb4i_exit_module); diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h new file mode 100644 index 000000000000..342263b1f542 --- /dev/null +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h @@ -0,0 +1,38 @@ +/* + * cxgb4i.h: Chelsio T4 iSCSI driver. + * + * Copyright (c) 2010 Chelsio Communications, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Karen Xie (kxie@chelsio.com) + * Written by: Rakesh Ranjan (rranjan@chelsio.com) + */ + +#ifndef __CXGB4I_H__ +#define __CXGB4I_H__ + +#define CXGB4I_SCSI_HOST_QDEPTH 1024 +#define CXGB4I_MAX_CONN 16384 +#define CXGB4I_MAX_TARGET CXGB4I_MAX_CONN +#define CXGB4I_MAX_LUN 0x1000 + +/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */ +#define CXGB4I_TX_HEADER_LEN \ + (sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr)) + +struct cpl_rx_data_ddp { + union opcode_tid ot; + __be16 urg; + __be16 len; + __be32 seq; + union { + __be32 nxt_seq; + __be32 ddp_report; + }; + __be32 ulp_crc; + __be32 ddpvld; +}; +#endif /* __CXGB4I_H__ */ -- cgit v1.2.3 From 6f7efaabefebfbc523ea9776e3663a2d81b86399 Mon Sep 17 00:00:00 2001 From: "kxie@chelsio.com" Date: Mon, 16 Aug 2010 20:55:53 -0700 Subject: [SCSI] cxgb3i: change cxgb3i to use libcxgbi Signed-off-by: Karen Xie Reviewed-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/Kconfig | 1 - drivers/scsi/Makefile | 2 +- drivers/scsi/cxgb3i/Kbuild | 4 - drivers/scsi/cxgb3i/Kconfig | 7 - drivers/scsi/cxgb3i/cxgb3i.h | 161 --- drivers/scsi/cxgb3i/cxgb3i_ddp.c | 773 -------------- drivers/scsi/cxgb3i/cxgb3i_ddp.h | 312 ------ drivers/scsi/cxgb3i/cxgb3i_init.c | 132 --- drivers/scsi/cxgb3i/cxgb3i_iscsi.c | 1018 ------------------ drivers/scsi/cxgb3i/cxgb3i_offload.c | 1944 ---------------------------------- drivers/scsi/cxgb3i/cxgb3i_offload.h | 243 ----- drivers/scsi/cxgb3i/cxgb3i_pdu.c | 495 --------- drivers/scsi/cxgb3i/cxgb3i_pdu.h | 59 -- drivers/scsi/cxgbi/Kconfig | 1 + drivers/scsi/cxgbi/Makefile | 1 + drivers/scsi/cxgbi/cxgb3i/Kbuild | 3 + drivers/scsi/cxgbi/cxgb3i/Kconfig | 7 + drivers/scsi/cxgbi/cxgb3i/cxgb3i.c | 1432 +++++++++++++++++++++++++ drivers/scsi/cxgbi/cxgb3i/cxgb3i.h | 51 + 19 files changed, 1496 insertions(+), 5150 deletions(-) delete mode 100644 drivers/scsi/cxgb3i/Kbuild delete mode 100644 drivers/scsi/cxgb3i/Kconfig delete mode 100644 drivers/scsi/cxgb3i/cxgb3i.h delete mode 100644 drivers/scsi/cxgb3i/cxgb3i_ddp.c delete mode 100644 drivers/scsi/cxgb3i/cxgb3i_ddp.h delete mode 100644 drivers/scsi/cxgb3i/cxgb3i_init.c delete mode 100644 drivers/scsi/cxgb3i/cxgb3i_iscsi.c delete mode 100644 drivers/scsi/cxgb3i/cxgb3i_offload.c delete mode 100644 drivers/scsi/cxgb3i/cxgb3i_offload.h delete mode 100644 drivers/scsi/cxgb3i/cxgb3i_pdu.c delete mode 100644 drivers/scsi/cxgb3i/cxgb3i_pdu.h create mode 100644 drivers/scsi/cxgbi/cxgb3i/Kbuild create mode 100644 drivers/scsi/cxgbi/cxgb3i/Kconfig create mode 100644 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c create mode 100644 drivers/scsi/cxgbi/cxgb3i/cxgb3i.h (limited to 'drivers') diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index bee3aef230dc..a6fdcf4cf74a 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -378,7 +378,6 @@ config ISCSI_BOOT_SYSFS via sysfs to userspace. If you wish to export this information, say Y. Otherwise, say N. -source "drivers/scsi/cxgb3i/Kconfig" source "drivers/scsi/cxgbi/Kconfig" source "drivers/scsi/bnx2i/Kconfig" source "drivers/scsi/be2iscsi/Kconfig" diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index cb31f8cf09d4..2e9a87e8e7d8 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -133,7 +133,7 @@ obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o obj-$(CONFIG_SCSI_STEX) += stex.o obj-$(CONFIG_SCSI_MVSAS) += mvsas/ obj-$(CONFIG_PS3_ROM) += ps3rom.o -obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/ +obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/ obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/ obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/ obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/ diff --git a/drivers/scsi/cxgb3i/Kbuild b/drivers/scsi/cxgb3i/Kbuild deleted file mode 100644 index 70d060b7ff4f..000000000000 --- a/drivers/scsi/cxgb3i/Kbuild +++ /dev/null @@ -1,4 +0,0 @@ -EXTRA_CFLAGS += -I$(srctree)/drivers/net/cxgb3 - -cxgb3i-y := cxgb3i_init.o cxgb3i_iscsi.o cxgb3i_pdu.o cxgb3i_offload.o cxgb3i_ddp.o -obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i.o diff --git a/drivers/scsi/cxgb3i/Kconfig b/drivers/scsi/cxgb3i/Kconfig deleted file mode 100644 index bfdcaf5c9c57..000000000000 --- a/drivers/scsi/cxgb3i/Kconfig +++ /dev/null @@ -1,7 +0,0 @@ -config SCSI_CXGB3_ISCSI - tristate "Chelsio S3xx iSCSI support" - depends on CHELSIO_T3_DEPENDS - select CHELSIO_T3 - select SCSI_ISCSI_ATTRS - ---help--- - This driver supports iSCSI offload for the Chelsio S3 series devices. diff --git a/drivers/scsi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgb3i/cxgb3i.h deleted file mode 100644 index e3133b58e594..000000000000 --- a/drivers/scsi/cxgb3i/cxgb3i.h +++ /dev/null @@ -1,161 +0,0 @@ -/* - * cxgb3i.h: Chelsio S3xx iSCSI driver. - * - * Copyright (c) 2008 Chelsio Communications, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * Written by: Karen Xie (kxie@chelsio.com) - */ - -#ifndef __CXGB3I_H__ -#define __CXGB3I_H__ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* from cxgb3 LLD */ -#include "common.h" -#include "t3_cpl.h" -#include "t3cdev.h" -#include "cxgb3_ctl_defs.h" -#include "cxgb3_offload.h" -#include "firmware_exports.h" - -#include "cxgb3i_offload.h" -#include "cxgb3i_ddp.h" - -#define CXGB3I_SCSI_HOST_QDEPTH 1024 -#define CXGB3I_MAX_TARGET CXGB3I_MAX_CONN -#define CXGB3I_MAX_LUN 512 -#define ISCSI_PDU_NONPAYLOAD_MAX \ - (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE + 2*ISCSI_DIGEST_SIZE) - -struct cxgb3i_adapter; -struct cxgb3i_hba; -struct cxgb3i_endpoint; - -/** - * struct cxgb3i_hba - cxgb3i iscsi structure (per port) - * - * @snic: cxgb3i adapter containing this port - * @ndev: pointer to netdev structure - * @shost: pointer to scsi host structure - */ -struct cxgb3i_hba { - struct cxgb3i_adapter *snic; - struct net_device *ndev; - struct Scsi_Host *shost; -}; - -/** - * struct cxgb3i_adapter - cxgb3i adapter structure (per pci) - * - * @listhead: list head to link elements - * @lock: lock for this structure - * @tdev: pointer to t3cdev used by cxgb3 driver - * @pdev: pointer to pci dev - * @hba_cnt: # of hbas (the same as # of ports) - * @hba: all the hbas on this adapter - * @flags: bit flag for adapter event/status - * @tx_max_size: max. tx packet size supported - * @rx_max_size: max. rx packet size supported - * @tag_format: ddp tag format settings - */ -#define CXGB3I_ADAPTER_FLAG_RESET 0x1 -struct cxgb3i_adapter { - struct list_head list_head; - spinlock_t lock; - struct t3cdev *tdev; - struct pci_dev *pdev; - unsigned char hba_cnt; - struct cxgb3i_hba *hba[MAX_NPORTS]; - - unsigned int flags; - unsigned int tx_max_size; - unsigned int rx_max_size; - - struct cxgb3i_tag_format tag_format; -}; - -/** - * struct cxgb3i_conn - cxgb3i iscsi connection - * - * @listhead: list head to link elements - * @cep: pointer to iscsi_endpoint structure - * @conn: pointer to iscsi_conn structure - * @hba: pointer to the hba this conn. is going through - * @task_idx_bits: # of bits needed for session->cmds_max - */ -struct cxgb3i_conn { - struct list_head list_head; - struct cxgb3i_endpoint *cep; - struct iscsi_conn *conn; - struct cxgb3i_hba *hba; - unsigned int task_idx_bits; -}; - -/** - * struct cxgb3i_endpoint - iscsi tcp endpoint - * - * @c3cn: the h/w tcp connection representation - * @hba: pointer to the hba this conn. is going through - * @cconn: pointer to the associated cxgb3i iscsi connection - */ -struct cxgb3i_endpoint { - struct s3_conn *c3cn; - struct cxgb3i_hba *hba; - struct cxgb3i_conn *cconn; -}; - -/** - * struct cxgb3i_task_data - private iscsi task data - * - * @nr_frags: # of coalesced page frags (from scsi sgl) - * @frags: coalesced page frags (from scsi sgl) - * @skb: tx pdu skb - * @offset: data offset for the next pdu - * @count: max. possible pdu payload - * @sgoffset: offset to the first sg entry for a given offset - */ -#define MAX_PDU_FRAGS ((ULP2_MAX_PDU_PAYLOAD + 512 - 1) / 512) -struct cxgb3i_task_data { - unsigned short nr_frags; - skb_frag_t frags[MAX_PDU_FRAGS]; - struct sk_buff *skb; - unsigned int offset; - unsigned int count; - unsigned int sgoffset; -}; - -int cxgb3i_iscsi_init(void); -void cxgb3i_iscsi_cleanup(void); - -struct cxgb3i_adapter *cxgb3i_adapter_find_by_tdev(struct t3cdev *); -void cxgb3i_adapter_open(struct t3cdev *); -void cxgb3i_adapter_close(struct t3cdev *); - -struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *, - struct net_device *); -void cxgb3i_hba_host_remove(struct cxgb3i_hba *); - -int cxgb3i_pdu_init(void); -void cxgb3i_pdu_cleanup(void); -void cxgb3i_conn_cleanup_task(struct iscsi_task *); -int cxgb3i_conn_alloc_pdu(struct iscsi_task *, u8); -int cxgb3i_conn_init_pdu(struct iscsi_task *, unsigned int, unsigned int); -int cxgb3i_conn_xmit_pdu(struct iscsi_task *); - -void cxgb3i_release_itt(struct iscsi_task *task, itt_t hdr_itt); -int cxgb3i_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt); - -#endif diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.c b/drivers/scsi/cxgb3i/cxgb3i_ddp.c deleted file mode 100644 index be0e23042c76..000000000000 --- a/drivers/scsi/cxgb3i/cxgb3i_ddp.c +++ /dev/null @@ -1,773 +0,0 @@ -/* - * cxgb3i_ddp.c: Chelsio S3xx iSCSI DDP Manager. - * - * Copyright (c) 2008 Chelsio Communications, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * Written by: Karen Xie (kxie@chelsio.com) - */ - -#include -#include -#include - -/* from cxgb3 LLD */ -#include "common.h" -#include "t3_cpl.h" -#include "t3cdev.h" -#include "cxgb3_ctl_defs.h" -#include "cxgb3_offload.h" -#include "firmware_exports.h" - -#include "cxgb3i_ddp.h" - -#define ddp_log_error(fmt...) printk(KERN_ERR "cxgb3i_ddp: ERR! " fmt) -#define ddp_log_warn(fmt...) printk(KERN_WARNING "cxgb3i_ddp: WARN! " fmt) -#define ddp_log_info(fmt...) printk(KERN_INFO "cxgb3i_ddp: " fmt) - -#ifdef __DEBUG_CXGB3I_DDP__ -#define ddp_log_debug(fmt, args...) \ - printk(KERN_INFO "cxgb3i_ddp: %s - " fmt, __func__ , ## args) -#else -#define ddp_log_debug(fmt...) -#endif - -/* - * iSCSI Direct Data Placement - * - * T3 h/w can directly place the iSCSI Data-In or Data-Out PDU's payload into - * pre-posted final destination host-memory buffers based on the Initiator - * Task Tag (ITT) in Data-In or Target Task Tag (TTT) in Data-Out PDUs. - * - * The host memory address is programmed into h/w in the format of pagepod - * entries. - * The location of the pagepod entry is encoded into ddp tag which is used or - * is the base for ITT/TTT. - */ - -#define DDP_PGIDX_MAX 4 -#define DDP_THRESHOLD 2048 -static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4}; -static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16}; -static unsigned char page_idx = DDP_PGIDX_MAX; - -/* - * functions to program the pagepod in h/w - */ -static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr) -{ - struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head; - - req->wr.wr_lo = 0; - req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS)); - req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) | - V_ULPTX_CMD(ULP_MEM_WRITE)); - req->len = htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE >> 5) | - V_ULPTX_NFLITS((PPOD_SIZE >> 3) + 1)); -} - -static int set_ddp_map(struct cxgb3i_ddp_info *ddp, struct pagepod_hdr *hdr, - unsigned int idx, unsigned int npods, - struct cxgb3i_gather_list *gl) -{ - unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit; - int i; - - for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) { - struct sk_buff *skb = ddp->gl_skb[idx]; - struct pagepod *ppod; - int j, pidx; - - /* hold on to the skb until we clear the ddp mapping */ - skb_get(skb); - - ulp_mem_io_set_hdr(skb, pm_addr); - ppod = (struct pagepod *) - (skb->head + sizeof(struct ulp_mem_io)); - memcpy(&(ppod->hdr), hdr, sizeof(struct pagepod)); - for (pidx = 4 * i, j = 0; j < 5; ++j, ++pidx) - ppod->addr[j] = pidx < gl->nelem ? - cpu_to_be64(gl->phys_addr[pidx]) : 0UL; - - skb->priority = CPL_PRIORITY_CONTROL; - cxgb3_ofld_send(ddp->tdev, skb); - } - return 0; -} - -static void clear_ddp_map(struct cxgb3i_ddp_info *ddp, unsigned int tag, - unsigned int idx, unsigned int npods) -{ - unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit; - int i; - - for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) { - struct sk_buff *skb = ddp->gl_skb[idx]; - - if (!skb) { - ddp_log_error("ddp tag 0x%x, 0x%x, %d/%u, skb NULL.\n", - tag, idx, i, npods); - continue; - } - ddp->gl_skb[idx] = NULL; - memset((skb->head + sizeof(struct ulp_mem_io)), 0, PPOD_SIZE); - ulp_mem_io_set_hdr(skb, pm_addr); - skb->priority = CPL_PRIORITY_CONTROL; - cxgb3_ofld_send(ddp->tdev, skb); - } -} - -static inline int ddp_find_unused_entries(struct cxgb3i_ddp_info *ddp, - unsigned int start, unsigned int max, - unsigned int count, - struct cxgb3i_gather_list *gl) -{ - unsigned int i, j, k; - - /* not enough entries */ - if ((max - start) < count) - return -EBUSY; - - max -= count; - spin_lock(&ddp->map_lock); - for (i = start; i < max;) { - for (j = 0, k = i; j < count; j++, k++) { - if (ddp->gl_map[k]) - break; - } - if (j == count) { - for (j = 0, k = i; j < count; j++, k++) - ddp->gl_map[k] = gl; - spin_unlock(&ddp->map_lock); - return i; - } - i += j + 1; - } - spin_unlock(&ddp->map_lock); - return -EBUSY; -} - -static inline void ddp_unmark_entries(struct cxgb3i_ddp_info *ddp, - int start, int count) -{ - spin_lock(&ddp->map_lock); - memset(&ddp->gl_map[start], 0, - count * sizeof(struct cxgb3i_gather_list *)); - spin_unlock(&ddp->map_lock); -} - -static inline void ddp_free_gl_skb(struct cxgb3i_ddp_info *ddp, - int idx, int count) -{ - int i; - - for (i = 0; i < count; i++, idx++) - if (ddp->gl_skb[idx]) { - kfree_skb(ddp->gl_skb[idx]); - ddp->gl_skb[idx] = NULL; - } -} - -static inline int ddp_alloc_gl_skb(struct cxgb3i_ddp_info *ddp, int idx, - int count, gfp_t gfp) -{ - int i; - - for (i = 0; i < count; i++) { - struct sk_buff *skb = alloc_skb(sizeof(struct ulp_mem_io) + - PPOD_SIZE, gfp); - if (skb) { - ddp->gl_skb[idx + i] = skb; - skb_put(skb, sizeof(struct ulp_mem_io) + PPOD_SIZE); - } else { - ddp_free_gl_skb(ddp, idx, i); - return -ENOMEM; - } - } - return 0; -} - -/** - * cxgb3i_ddp_find_page_index - return ddp page index for a given page size - * @pgsz: page size - * return the ddp page index, if no match is found return DDP_PGIDX_MAX. - */ -int cxgb3i_ddp_find_page_index(unsigned long pgsz) -{ - int i; - - for (i = 0; i < DDP_PGIDX_MAX; i++) { - if (pgsz == (1UL << ddp_page_shift[i])) - return i; - } - ddp_log_debug("ddp page size 0x%lx not supported.\n", pgsz); - return DDP_PGIDX_MAX; -} - -/** - * cxgb3i_ddp_adjust_page_table - adjust page table with PAGE_SIZE - * return the ddp page index, if no match is found return DDP_PGIDX_MAX. - */ -int cxgb3i_ddp_adjust_page_table(void) -{ - int i; - unsigned int base_order, order; - - if (PAGE_SIZE < (1UL << ddp_page_shift[0])) { - ddp_log_info("PAGE_SIZE 0x%lx too small, min. 0x%lx.\n", - PAGE_SIZE, 1UL << ddp_page_shift[0]); - return -EINVAL; - } - - base_order = get_order(1UL << ddp_page_shift[0]); - order = get_order(1 << PAGE_SHIFT); - for (i = 0; i < DDP_PGIDX_MAX; i++) { - /* first is the kernel page size, then just doubling the size */ - ddp_page_order[i] = order - base_order + i; - ddp_page_shift[i] = PAGE_SHIFT + i; - } - return 0; -} - -static inline void ddp_gl_unmap(struct pci_dev *pdev, - struct cxgb3i_gather_list *gl) -{ - int i; - - for (i = 0; i < gl->nelem; i++) - pci_unmap_page(pdev, gl->phys_addr[i], PAGE_SIZE, - PCI_DMA_FROMDEVICE); -} - -static inline int ddp_gl_map(struct pci_dev *pdev, - struct cxgb3i_gather_list *gl) -{ - int i; - - for (i = 0; i < gl->nelem; i++) { - gl->phys_addr[i] = pci_map_page(pdev, gl->pages[i], 0, - PAGE_SIZE, - PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(pdev, gl->phys_addr[i]))) - goto unmap; - } - - return i; - -unmap: - if (i) { - unsigned int nelem = gl->nelem; - - gl->nelem = i; - ddp_gl_unmap(pdev, gl); - gl->nelem = nelem; - } - return -ENOMEM; -} - -/** - * cxgb3i_ddp_make_gl - build ddp page buffer list - * @xferlen: total buffer length - * @sgl: page buffer scatter-gather list - * @sgcnt: # of page buffers - * @pdev: pci_dev, used for pci map - * @gfp: allocation mode - * - * construct a ddp page buffer list from the scsi scattergather list. - * coalesce buffers as much as possible, and obtain dma addresses for - * each page. - * - * Return the cxgb3i_gather_list constructed from the page buffers if the - * memory can be used for ddp. Return NULL otherwise. - */ -struct cxgb3i_gather_list *cxgb3i_ddp_make_gl(unsigned int xferlen, - struct scatterlist *sgl, - unsigned int sgcnt, - struct pci_dev *pdev, - gfp_t gfp) -{ - struct cxgb3i_gather_list *gl; - struct scatterlist *sg = sgl; - struct page *sgpage = sg_page(sg); - unsigned int sglen = sg->length; - unsigned int sgoffset = sg->offset; - unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >> - PAGE_SHIFT; - int i = 1, j = 0; - - if (xferlen < DDP_THRESHOLD) { - ddp_log_debug("xfer %u < threshold %u, no ddp.\n", - xferlen, DDP_THRESHOLD); - return NULL; - } - - gl = kzalloc(sizeof(struct cxgb3i_gather_list) + - npages * (sizeof(dma_addr_t) + sizeof(struct page *)), - gfp); - if (!gl) - return NULL; - - gl->pages = (struct page **)&gl->phys_addr[npages]; - gl->length = xferlen; - gl->offset = sgoffset; - gl->pages[0] = sgpage; - - sg = sg_next(sg); - while (sg) { - struct page *page = sg_page(sg); - - if (sgpage == page && sg->offset == sgoffset + sglen) - sglen += sg->length; - else { - /* make sure the sgl is fit for ddp: - * each has the same page size, and - * all of the middle pages are used completely - */ - if ((j && sgoffset) || - ((i != sgcnt - 1) && - ((sglen + sgoffset) & ~PAGE_MASK))) - goto error_out; - - j++; - if (j == gl->nelem || sg->offset) - goto error_out; - gl->pages[j] = page; - sglen = sg->length; - sgoffset = sg->offset; - sgpage = page; - } - i++; - sg = sg_next(sg); - } - gl->nelem = ++j; - - if (ddp_gl_map(pdev, gl) < 0) - goto error_out; - - return gl; - -error_out: - kfree(gl); - return NULL; -} - -/** - * cxgb3i_ddp_release_gl - release a page buffer list - * @gl: a ddp page buffer list - * @pdev: pci_dev used for pci_unmap - * free a ddp page buffer list resulted from cxgb3i_ddp_make_gl(). - */ -void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list *gl, - struct pci_dev *pdev) -{ - ddp_gl_unmap(pdev, gl); - kfree(gl); -} - -/** - * cxgb3i_ddp_tag_reserve - set up ddp for a data transfer - * @tdev: t3cdev adapter - * @tid: connection id - * @tformat: tag format - * @tagp: contains s/w tag initially, will be updated with ddp/hw tag - * @gl: the page momory list - * @gfp: allocation mode - * - * ddp setup for a given page buffer list and construct the ddp tag. - * return 0 if success, < 0 otherwise. - */ -int cxgb3i_ddp_tag_reserve(struct t3cdev *tdev, unsigned int tid, - struct cxgb3i_tag_format *tformat, u32 *tagp, - struct cxgb3i_gather_list *gl, gfp_t gfp) -{ - struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi; - struct pagepod_hdr hdr; - unsigned int npods; - int idx = -1; - int err = -ENOMEM; - u32 sw_tag = *tagp; - u32 tag; - - if (page_idx >= DDP_PGIDX_MAX || !ddp || !gl || !gl->nelem || - gl->length < DDP_THRESHOLD) { - ddp_log_debug("pgidx %u, xfer %u/%u, NO ddp.\n", - page_idx, gl->length, DDP_THRESHOLD); - return -EINVAL; - } - - npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT; - - if (ddp->idx_last == ddp->nppods) - idx = ddp_find_unused_entries(ddp, 0, ddp->nppods, npods, gl); - else { - idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1, - ddp->nppods, npods, gl); - if (idx < 0 && ddp->idx_last >= npods) { - idx = ddp_find_unused_entries(ddp, 0, - min(ddp->idx_last + npods, ddp->nppods), - npods, gl); - } - } - if (idx < 0) { - ddp_log_debug("xferlen %u, gl %u, npods %u NO DDP.\n", - gl->length, gl->nelem, npods); - return idx; - } - - err = ddp_alloc_gl_skb(ddp, idx, npods, gfp); - if (err < 0) - goto unmark_entries; - - tag = cxgb3i_ddp_tag_base(tformat, sw_tag); - tag |= idx << PPOD_IDX_SHIFT; - - hdr.rsvd = 0; - hdr.vld_tid = htonl(F_PPOD_VALID | V_PPOD_TID(tid)); - hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask); - hdr.maxoffset = htonl(gl->length); - hdr.pgoffset = htonl(gl->offset); - - err = set_ddp_map(ddp, &hdr, idx, npods, gl); - if (err < 0) - goto free_gl_skb; - - ddp->idx_last = idx; - ddp_log_debug("xfer %u, gl %u,%u, tid 0x%x, 0x%x -> 0x%x(%u,%u).\n", - gl->length, gl->nelem, gl->offset, tid, sw_tag, tag, - idx, npods); - *tagp = tag; - return 0; - -free_gl_skb: - ddp_free_gl_skb(ddp, idx, npods); -unmark_entries: - ddp_unmark_entries(ddp, idx, npods); - return err; -} - -/** - * cxgb3i_ddp_tag_release - release a ddp tag - * @tdev: t3cdev adapter - * @tag: ddp tag - * ddp cleanup for a given ddp tag and release all the resources held - */ -void cxgb3i_ddp_tag_release(struct t3cdev *tdev, u32 tag) -{ - struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi; - u32 idx; - - if (!ddp) { - ddp_log_error("release ddp tag 0x%x, ddp NULL.\n", tag); - return; - } - - idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask; - if (idx < ddp->nppods) { - struct cxgb3i_gather_list *gl = ddp->gl_map[idx]; - unsigned int npods; - - if (!gl || !gl->nelem) { - ddp_log_error("release 0x%x, idx 0x%x, gl 0x%p, %u.\n", - tag, idx, gl, gl ? gl->nelem : 0); - return; - } - npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT; - ddp_log_debug("ddp tag 0x%x, release idx 0x%x, npods %u.\n", - tag, idx, npods); - clear_ddp_map(ddp, tag, idx, npods); - ddp_unmark_entries(ddp, idx, npods); - cxgb3i_ddp_release_gl(gl, ddp->pdev); - } else - ddp_log_error("ddp tag 0x%x, idx 0x%x > max 0x%x.\n", - tag, idx, ddp->nppods); -} - -static int setup_conn_pgidx(struct t3cdev *tdev, unsigned int tid, int pg_idx, - int reply) -{ - struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field), - GFP_KERNEL); - struct cpl_set_tcb_field *req; - u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0; - - if (!skb) - return -ENOMEM; - - /* set up ulp submode and page size */ - req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req)); - req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); - req->wr.wr_lo = 0; - OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); - req->reply = V_NO_REPLY(reply ? 0 : 1); - req->cpu_idx = 0; - req->word = htons(31); - req->mask = cpu_to_be64(0xF0000000); - req->val = cpu_to_be64(val << 28); - skb->priority = CPL_PRIORITY_CONTROL; - - cxgb3_ofld_send(tdev, skb); - return 0; -} - -/** - * cxgb3i_setup_conn_host_pagesize - setup the conn.'s ddp page size - * @tdev: t3cdev adapter - * @tid: connection id - * @reply: request reply from h/w - * set up the ddp page size based on the host PAGE_SIZE for a connection - * identified by tid - */ -int cxgb3i_setup_conn_host_pagesize(struct t3cdev *tdev, unsigned int tid, - int reply) -{ - return setup_conn_pgidx(tdev, tid, page_idx, reply); -} - -/** - * cxgb3i_setup_conn_pagesize - setup the conn.'s ddp page size - * @tdev: t3cdev adapter - * @tid: connection id - * @reply: request reply from h/w - * @pgsz: ddp page size - * set up the ddp page size for a connection identified by tid - */ -int cxgb3i_setup_conn_pagesize(struct t3cdev *tdev, unsigned int tid, - int reply, unsigned long pgsz) -{ - int pgidx = cxgb3i_ddp_find_page_index(pgsz); - - return setup_conn_pgidx(tdev, tid, pgidx, reply); -} - -/** - * cxgb3i_setup_conn_digest - setup conn. digest setting - * @tdev: t3cdev adapter - * @tid: connection id - * @hcrc: header digest enabled - * @dcrc: data digest enabled - * @reply: request reply from h/w - * set up the iscsi digest settings for a connection identified by tid - */ -int cxgb3i_setup_conn_digest(struct t3cdev *tdev, unsigned int tid, - int hcrc, int dcrc, int reply) -{ - struct sk_buff *skb = alloc_skb(sizeof(struct cpl_set_tcb_field), - GFP_KERNEL); - struct cpl_set_tcb_field *req; - u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0); - - if (!skb) - return -ENOMEM; - - /* set up ulp submode and page size */ - req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req)); - req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); - req->wr.wr_lo = 0; - OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); - req->reply = V_NO_REPLY(reply ? 0 : 1); - req->cpu_idx = 0; - req->word = htons(31); - req->mask = cpu_to_be64(0x0F000000); - req->val = cpu_to_be64(val << 24); - skb->priority = CPL_PRIORITY_CONTROL; - - cxgb3_ofld_send(tdev, skb); - return 0; -} - - -/** - * cxgb3i_adapter_ddp_info - read the adapter's ddp information - * @tdev: t3cdev adapter - * @tformat: tag format - * @txsz: max tx pdu payload size, filled in by this func. - * @rxsz: max rx pdu payload size, filled in by this func. - * setup the tag format for a given iscsi entity - */ -int cxgb3i_adapter_ddp_info(struct t3cdev *tdev, - struct cxgb3i_tag_format *tformat, - unsigned int *txsz, unsigned int *rxsz) -{ - struct cxgb3i_ddp_info *ddp; - unsigned char idx_bits; - - if (!tformat) - return -EINVAL; - - if (!tdev->ulp_iscsi) - return -EINVAL; - - ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi; - - idx_bits = 32 - tformat->sw_bits; - tformat->rsvd_bits = ddp->idx_bits; - tformat->rsvd_shift = PPOD_IDX_SHIFT; - tformat->rsvd_mask = (1 << tformat->rsvd_bits) - 1; - - ddp_log_info("tag format: sw %u, rsvd %u,%u, mask 0x%x.\n", - tformat->sw_bits, tformat->rsvd_bits, - tformat->rsvd_shift, tformat->rsvd_mask); - - *txsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, - ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN); - *rxsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, - ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN); - ddp_log_info("max payload size: %u/%u, %u/%u.\n", - *txsz, ddp->max_txsz, *rxsz, ddp->max_rxsz); - return 0; -} - -/** - * cxgb3i_ddp_cleanup - release the cxgb3 adapter's ddp resource - * @tdev: t3cdev adapter - * release all the resource held by the ddp pagepod manager for a given - * adapter if needed - */ - -static void ddp_cleanup(struct kref *kref) -{ - struct cxgb3i_ddp_info *ddp = container_of(kref, - struct cxgb3i_ddp_info, - refcnt); - int i = 0; - - ddp_log_info("kref release ddp 0x%p, t3dev 0x%p.\n", ddp, ddp->tdev); - - ddp->tdev->ulp_iscsi = NULL; - while (i < ddp->nppods) { - struct cxgb3i_gather_list *gl = ddp->gl_map[i]; - if (gl) { - int npods = (gl->nelem + PPOD_PAGES_MAX - 1) - >> PPOD_PAGES_SHIFT; - ddp_log_info("t3dev 0x%p, ddp %d + %d.\n", - ddp->tdev, i, npods); - kfree(gl); - ddp_free_gl_skb(ddp, i, npods); - i += npods; - } else - i++; - } - cxgb3i_free_big_mem(ddp); -} - -void cxgb3i_ddp_cleanup(struct t3cdev *tdev) -{ - struct cxgb3i_ddp_info *ddp = (struct cxgb3i_ddp_info *)tdev->ulp_iscsi; - - ddp_log_info("t3dev 0x%p, release ddp 0x%p.\n", tdev, ddp); - if (ddp) - kref_put(&ddp->refcnt, ddp_cleanup); -} - -/** - * ddp_init - initialize the cxgb3 adapter's ddp resource - * @tdev: t3cdev adapter - * initialize the ddp pagepod manager for a given adapter - */ -static void ddp_init(struct t3cdev *tdev) -{ - struct cxgb3i_ddp_info *ddp = tdev->ulp_iscsi; - struct ulp_iscsi_info uinfo; - unsigned int ppmax, bits; - int i, err; - - if (ddp) { - kref_get(&ddp->refcnt); - ddp_log_warn("t3dev 0x%p, ddp 0x%p already set up.\n", - tdev, tdev->ulp_iscsi); - return; - } - - err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo); - if (err < 0) { - ddp_log_error("%s, failed to get iscsi param err=%d.\n", - tdev->name, err); - return; - } - - ppmax = (uinfo.ulimit - uinfo.llimit + 1) >> PPOD_SIZE_SHIFT; - bits = __ilog2_u32(ppmax) + 1; - if (bits > PPOD_IDX_MAX_SIZE) - bits = PPOD_IDX_MAX_SIZE; - ppmax = (1 << (bits - 1)) - 1; - - ddp = cxgb3i_alloc_big_mem(sizeof(struct cxgb3i_ddp_info) + - ppmax * - (sizeof(struct cxgb3i_gather_list *) + - sizeof(struct sk_buff *)), - GFP_KERNEL); - if (!ddp) { - ddp_log_warn("%s unable to alloc ddp 0x%d, ddp disabled.\n", - tdev->name, ppmax); - return; - } - ddp->gl_map = (struct cxgb3i_gather_list **)(ddp + 1); - ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) + - ppmax * - sizeof(struct cxgb3i_gather_list *)); - spin_lock_init(&ddp->map_lock); - kref_init(&ddp->refcnt); - - ddp->tdev = tdev; - ddp->pdev = uinfo.pdev; - ddp->max_txsz = min_t(unsigned int, uinfo.max_txsz, ULP2_MAX_PKT_SIZE); - ddp->max_rxsz = min_t(unsigned int, uinfo.max_rxsz, ULP2_MAX_PKT_SIZE); - ddp->llimit = uinfo.llimit; - ddp->ulimit = uinfo.ulimit; - ddp->nppods = ppmax; - ddp->idx_last = ppmax; - ddp->idx_bits = bits; - ddp->idx_mask = (1 << bits) - 1; - ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1; - - uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT; - for (i = 0; i < DDP_PGIDX_MAX; i++) - uinfo.pgsz_factor[i] = ddp_page_order[i]; - uinfo.ulimit = uinfo.llimit + (ppmax << PPOD_SIZE_SHIFT); - - err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo); - if (err < 0) { - ddp_log_warn("%s unable to set iscsi param err=%d, " - "ddp disabled.\n", tdev->name, err); - goto free_ddp_map; - } - - tdev->ulp_iscsi = ddp; - - ddp_log_info("tdev 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u," - " %u/%u.\n", - tdev, ppmax, ddp->idx_bits, ddp->idx_mask, - ddp->rsvd_tag_mask, ddp->max_txsz, uinfo.max_txsz, - ddp->max_rxsz, uinfo.max_rxsz); - return; - -free_ddp_map: - cxgb3i_free_big_mem(ddp); -} - -/** - * cxgb3i_ddp_init - initialize ddp functions - */ -void cxgb3i_ddp_init(struct t3cdev *tdev) -{ - if (page_idx == DDP_PGIDX_MAX) { - page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE); - - if (page_idx == DDP_PGIDX_MAX) { - ddp_log_info("system PAGE_SIZE %lu, update hw.\n", - PAGE_SIZE); - if (cxgb3i_ddp_adjust_page_table() < 0) { - ddp_log_info("PAGE_SIZE %lu, ddp disabled.\n", - PAGE_SIZE); - return; - } - page_idx = cxgb3i_ddp_find_page_index(PAGE_SIZE); - } - ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n", - PAGE_SIZE, page_idx); - } - ddp_init(tdev); -} diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.h b/drivers/scsi/cxgb3i/cxgb3i_ddp.h deleted file mode 100644 index 6761b329124d..000000000000 --- a/drivers/scsi/cxgb3i/cxgb3i_ddp.h +++ /dev/null @@ -1,312 +0,0 @@ -/* - * cxgb3i_ddp.h: Chelsio S3xx iSCSI DDP Manager. - * - * Copyright (c) 2008 Chelsio Communications, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * Written by: Karen Xie (kxie@chelsio.com) - */ - -#ifndef __CXGB3I_ULP2_DDP_H__ -#define __CXGB3I_ULP2_DDP_H__ - -#include -#include - -/** - * struct cxgb3i_tag_format - cxgb3i ulp tag format for an iscsi entity - * - * @sw_bits: # of bits used by iscsi software layer - * @rsvd_bits: # of bits used by h/w - * @rsvd_shift: h/w bits shift left - * @rsvd_mask: reserved bit mask - */ -struct cxgb3i_tag_format { - unsigned char sw_bits; - unsigned char rsvd_bits; - unsigned char rsvd_shift; - unsigned char filler[1]; - u32 rsvd_mask; -}; - -/** - * struct cxgb3i_gather_list - cxgb3i direct data placement memory - * - * @tag: ddp tag - * @length: total data buffer length - * @offset: initial offset to the 1st page - * @nelem: # of pages - * @pages: page pointers - * @phys_addr: physical address - */ -struct cxgb3i_gather_list { - u32 tag; - unsigned int length; - unsigned int offset; - unsigned int nelem; - struct page **pages; - dma_addr_t phys_addr[0]; -}; - -/** - * struct cxgb3i_ddp_info - cxgb3i direct data placement for pdu payload - * - * @list: list head to link elements - * @refcnt: ref. count - * @tdev: pointer to t3cdev used by cxgb3 driver - * @max_txsz: max tx packet size for ddp - * @max_rxsz: max rx packet size for ddp - * @llimit: lower bound of the page pod memory - * @ulimit: upper bound of the page pod memory - * @nppods: # of page pod entries - * @idx_last: page pod entry last used - * @idx_bits: # of bits the pagepod index would take - * @idx_mask: pagepod index mask - * @rsvd_tag_mask: tag mask - * @map_lock: lock to synchonize access to the page pod map - * @gl_map: ddp memory gather list - * @gl_skb: skb used to program the pagepod - */ -struct cxgb3i_ddp_info { - struct list_head list; - struct kref refcnt; - struct t3cdev *tdev; - struct pci_dev *pdev; - unsigned int max_txsz; - unsigned int max_rxsz; - unsigned int llimit; - unsigned int ulimit; - unsigned int nppods; - unsigned int idx_last; - unsigned char idx_bits; - unsigned char filler[3]; - u32 idx_mask; - u32 rsvd_tag_mask; - spinlock_t map_lock; - struct cxgb3i_gather_list **gl_map; - struct sk_buff **gl_skb; -}; - -#define ISCSI_PDU_NONPAYLOAD_LEN 312 /* bhs(48) + ahs(256) + digest(8) */ -#define ULP2_MAX_PKT_SIZE 16224 -#define ULP2_MAX_PDU_PAYLOAD (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_LEN) -#define PPOD_PAGES_MAX 4 -#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */ - -/* - * struct pagepod_hdr, pagepod - pagepod format - */ -struct pagepod_hdr { - u32 vld_tid; - u32 pgsz_tag_clr; - u32 maxoffset; - u32 pgoffset; - u64 rsvd; -}; - -struct pagepod { - struct pagepod_hdr hdr; - u64 addr[PPOD_PAGES_MAX + 1]; -}; - -#define PPOD_SIZE sizeof(struct pagepod) /* 64 */ -#define PPOD_SIZE_SHIFT 6 - -#define PPOD_COLOR_SHIFT 0 -#define PPOD_COLOR_SIZE 6 -#define PPOD_COLOR_MASK ((1 << PPOD_COLOR_SIZE) - 1) - -#define PPOD_IDX_SHIFT PPOD_COLOR_SIZE -#define PPOD_IDX_MAX_SIZE 24 - -#define S_PPOD_TID 0 -#define M_PPOD_TID 0xFFFFFF -#define V_PPOD_TID(x) ((x) << S_PPOD_TID) - -#define S_PPOD_VALID 24 -#define V_PPOD_VALID(x) ((x) << S_PPOD_VALID) -#define F_PPOD_VALID V_PPOD_VALID(1U) - -#define S_PPOD_COLOR 0 -#define M_PPOD_COLOR 0x3F -#define V_PPOD_COLOR(x) ((x) << S_PPOD_COLOR) - -#define S_PPOD_TAG 6 -#define M_PPOD_TAG 0xFFFFFF -#define V_PPOD_TAG(x) ((x) << S_PPOD_TAG) - -#define S_PPOD_PGSZ 30 -#define M_PPOD_PGSZ 0x3 -#define V_PPOD_PGSZ(x) ((x) << S_PPOD_PGSZ) - -/* - * large memory chunk allocation/release - * use vmalloc() if kmalloc() fails - */ -static inline void *cxgb3i_alloc_big_mem(unsigned int size, - gfp_t gfp) -{ - void *p = kmalloc(size, gfp); - if (!p) - p = vmalloc(size); - if (p) - memset(p, 0, size); - return p; -} - -static inline void cxgb3i_free_big_mem(void *addr) -{ - if (is_vmalloc_addr(addr)) - vfree(addr); - else - kfree(addr); -} - -/* - * cxgb3i ddp tag are 32 bits, it consists of reserved bits used by h/w and - * non-reserved bits that can be used by the iscsi s/w. - * The reserved bits are identified by the rsvd_bits and rsvd_shift fields - * in struct cxgb3i_tag_format. - * - * The upper most reserved bit can be used to check if a tag is ddp tag or not: - * if the bit is 0, the tag is a valid ddp tag - */ - -/** - * cxgb3i_is_ddp_tag - check if a given tag is a hw/ddp tag - * @tformat: tag format information - * @tag: tag to be checked - * - * return true if the tag is a ddp tag, false otherwise. - */ -static inline int cxgb3i_is_ddp_tag(struct cxgb3i_tag_format *tformat, u32 tag) -{ - return !(tag & (1 << (tformat->rsvd_bits + tformat->rsvd_shift - 1))); -} - -/** - * cxgb3i_sw_tag_usable - check if s/w tag has enough bits left for hw bits - * @tformat: tag format information - * @sw_tag: s/w tag to be checked - * - * return true if the tag can be used for hw ddp tag, false otherwise. - */ -static inline int cxgb3i_sw_tag_usable(struct cxgb3i_tag_format *tformat, - u32 sw_tag) -{ - sw_tag >>= (32 - tformat->rsvd_bits); - return !sw_tag; -} - -/** - * cxgb3i_set_non_ddp_tag - mark a given s/w tag as an invalid ddp tag - * @tformat: tag format information - * @sw_tag: s/w tag to be checked - * - * insert 1 at the upper most reserved bit to mark it as an invalid ddp tag. - */ -static inline u32 cxgb3i_set_non_ddp_tag(struct cxgb3i_tag_format *tformat, - u32 sw_tag) -{ - unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1; - u32 mask = (1 << shift) - 1; - - if (sw_tag && (sw_tag & ~mask)) { - u32 v1 = sw_tag & ((1 << shift) - 1); - u32 v2 = (sw_tag >> (shift - 1)) << shift; - - return v2 | v1 | 1 << shift; - } - return sw_tag | 1 << shift; -} - -/** - * cxgb3i_ddp_tag_base - shift s/w tag bits so that reserved bits are not used - * @tformat: tag format information - * @sw_tag: s/w tag to be checked - */ -static inline u32 cxgb3i_ddp_tag_base(struct cxgb3i_tag_format *tformat, - u32 sw_tag) -{ - u32 mask = (1 << tformat->rsvd_shift) - 1; - - if (sw_tag && (sw_tag & ~mask)) { - u32 v1 = sw_tag & mask; - u32 v2 = sw_tag >> tformat->rsvd_shift; - - v2 <<= tformat->rsvd_shift + tformat->rsvd_bits; - return v2 | v1; - } - return sw_tag; -} - -/** - * cxgb3i_tag_rsvd_bits - get the reserved bits used by the h/w - * @tformat: tag format information - * @tag: tag to be checked - * - * return the reserved bits in the tag - */ -static inline u32 cxgb3i_tag_rsvd_bits(struct cxgb3i_tag_format *tformat, - u32 tag) -{ - if (cxgb3i_is_ddp_tag(tformat, tag)) - return (tag >> tformat->rsvd_shift) & tformat->rsvd_mask; - return 0; -} - -/** - * cxgb3i_tag_nonrsvd_bits - get the non-reserved bits used by the s/w - * @tformat: tag format information - * @tag: tag to be checked - * - * return the non-reserved bits in the tag. - */ -static inline u32 cxgb3i_tag_nonrsvd_bits(struct cxgb3i_tag_format *tformat, - u32 tag) -{ - unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1; - u32 v1, v2; - - if (cxgb3i_is_ddp_tag(tformat, tag)) { - v1 = tag & ((1 << tformat->rsvd_shift) - 1); - v2 = (tag >> (shift + 1)) << tformat->rsvd_shift; - } else { - u32 mask = (1 << shift) - 1; - - tag &= ~(1 << shift); - v1 = tag & mask; - v2 = (tag >> 1) & ~mask; - } - return v1 | v2; -} - -int cxgb3i_ddp_tag_reserve(struct t3cdev *, unsigned int tid, - struct cxgb3i_tag_format *, u32 *tag, - struct cxgb3i_gather_list *, gfp_t gfp); -void cxgb3i_ddp_tag_release(struct t3cdev *, u32 tag); - -struct cxgb3i_gather_list *cxgb3i_ddp_make_gl(unsigned int xferlen, - struct scatterlist *sgl, - unsigned int sgcnt, - struct pci_dev *pdev, - gfp_t gfp); -void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list *gl, - struct pci_dev *pdev); - -int cxgb3i_setup_conn_host_pagesize(struct t3cdev *, unsigned int tid, - int reply); -int cxgb3i_setup_conn_pagesize(struct t3cdev *, unsigned int tid, int reply, - unsigned long pgsz); -int cxgb3i_setup_conn_digest(struct t3cdev *, unsigned int tid, - int hcrc, int dcrc, int reply); -int cxgb3i_ddp_find_page_index(unsigned long pgsz); -int cxgb3i_adapter_ddp_info(struct t3cdev *, struct cxgb3i_tag_format *, - unsigned int *txsz, unsigned int *rxsz); - -void cxgb3i_ddp_init(struct t3cdev *); -void cxgb3i_ddp_cleanup(struct t3cdev *); -#endif diff --git a/drivers/scsi/cxgb3i/cxgb3i_init.c b/drivers/scsi/cxgb3i/cxgb3i_init.c deleted file mode 100644 index 685af3698518..000000000000 --- a/drivers/scsi/cxgb3i/cxgb3i_init.c +++ /dev/null @@ -1,132 +0,0 @@ -/* cxgb3i_init.c: Chelsio S3xx iSCSI driver. - * - * Copyright (c) 2008 Chelsio Communications, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * Written by: Karen Xie (kxie@chelsio.com) - */ - -#include "cxgb3i.h" - -#define DRV_MODULE_NAME "cxgb3i" -#define DRV_MODULE_VERSION "1.0.2" -#define DRV_MODULE_RELDATE "Mar. 2009" - -static char version[] = - "Chelsio S3xx iSCSI Driver " DRV_MODULE_NAME - " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; - -MODULE_AUTHOR("Karen Xie "); -MODULE_DESCRIPTION("Chelsio S3xx iSCSI Driver"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_MODULE_VERSION); - -static void open_s3_dev(struct t3cdev *); -static void close_s3_dev(struct t3cdev *); -static void s3_event_handler(struct t3cdev *tdev, u32 event, u32 port); - -static cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS]; -static struct cxgb3_client t3c_client = { - .name = "iscsi_cxgb3", - .handlers = cxgb3i_cpl_handlers, - .add = open_s3_dev, - .remove = close_s3_dev, - .event_handler = s3_event_handler, -}; - -/** - * open_s3_dev - register with cxgb3 LLD - * @t3dev: cxgb3 adapter instance - */ -static void open_s3_dev(struct t3cdev *t3dev) -{ - static int vers_printed; - - if (!vers_printed) { - printk(KERN_INFO "%s", version); - vers_printed = 1; - } - - cxgb3i_ddp_init(t3dev); - cxgb3i_sdev_add(t3dev, &t3c_client); - cxgb3i_adapter_open(t3dev); -} - -/** - * close_s3_dev - de-register with cxgb3 LLD - * @t3dev: cxgb3 adapter instance - */ -static void close_s3_dev(struct t3cdev *t3dev) -{ - cxgb3i_adapter_close(t3dev); - cxgb3i_sdev_remove(t3dev); - cxgb3i_ddp_cleanup(t3dev); -} - -static void s3_event_handler(struct t3cdev *tdev, u32 event, u32 port) -{ - struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(tdev); - - cxgb3i_log_info("snic 0x%p, tdev 0x%p, event 0x%x, port 0x%x.\n", - snic, tdev, event, port); - if (!snic) - return; - - switch (event) { - case OFFLOAD_STATUS_DOWN: - snic->flags |= CXGB3I_ADAPTER_FLAG_RESET; - break; - case OFFLOAD_STATUS_UP: - snic->flags &= ~CXGB3I_ADAPTER_FLAG_RESET; - break; - } -} - -/** - * cxgb3i_init_module - module init entry point - * - * initialize any driver wide global data structures and register itself - * with the cxgb3 module - */ -static int __init cxgb3i_init_module(void) -{ - int err; - - err = cxgb3i_sdev_init(cxgb3i_cpl_handlers); - if (err < 0) - return err; - - err = cxgb3i_iscsi_init(); - if (err < 0) - return err; - - err = cxgb3i_pdu_init(); - if (err < 0) { - cxgb3i_iscsi_cleanup(); - return err; - } - - cxgb3_register_client(&t3c_client); - - return 0; -} - -/** - * cxgb3i_exit_module - module cleanup/exit entry point - * - * go through the driver hba list and for each hba, release any resource held. - * and unregisters iscsi transport and the cxgb3 module - */ -static void __exit cxgb3i_exit_module(void) -{ - cxgb3_unregister_client(&t3c_client); - cxgb3i_pdu_cleanup(); - cxgb3i_iscsi_cleanup(); - cxgb3i_sdev_cleanup(); -} - -module_init(cxgb3i_init_module); -module_exit(cxgb3i_exit_module); diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c deleted file mode 100644 index 7b686abaae64..000000000000 --- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c +++ /dev/null @@ -1,1018 +0,0 @@ -/* cxgb3i_iscsi.c: Chelsio S3xx iSCSI driver. - * - * Copyright (c) 2008 Chelsio Communications, Inc. - * Copyright (c) 2008 Mike Christie - * Copyright (c) 2008 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * Written by: Karen Xie (kxie@chelsio.com) - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "cxgb3i.h" -#include "cxgb3i_pdu.h" - -#ifdef __DEBUG_CXGB3I_TAG__ -#define cxgb3i_tag_debug cxgb3i_log_debug -#else -#define cxgb3i_tag_debug(fmt...) -#endif - -#ifdef __DEBUG_CXGB3I_API__ -#define cxgb3i_api_debug cxgb3i_log_debug -#else -#define cxgb3i_api_debug(fmt...) -#endif - -/* - * align pdu size to multiple of 512 for better performance - */ -#define align_pdu_size(n) do { n = (n) & (~511); } while (0) - -static struct scsi_transport_template *cxgb3i_scsi_transport; -static struct scsi_host_template cxgb3i_host_template; -static struct iscsi_transport cxgb3i_iscsi_transport; -static unsigned char sw_tag_idx_bits; -static unsigned char sw_tag_age_bits; - -static LIST_HEAD(cxgb3i_snic_list); -static DEFINE_RWLOCK(cxgb3i_snic_rwlock); - -/** - * cxgb3i_adpater_find_by_tdev - find the cxgb3i_adapter structure via t3cdev - * @tdev: t3cdev pointer - */ -struct cxgb3i_adapter *cxgb3i_adapter_find_by_tdev(struct t3cdev *tdev) -{ - struct cxgb3i_adapter *snic; - - read_lock(&cxgb3i_snic_rwlock); - list_for_each_entry(snic, &cxgb3i_snic_list, list_head) { - if (snic->tdev == tdev) { - read_unlock(&cxgb3i_snic_rwlock); - return snic; - } - } - read_unlock(&cxgb3i_snic_rwlock); - return NULL; -} - -static inline int adapter_update(struct cxgb3i_adapter *snic) -{ - cxgb3i_log_info("snic 0x%p, t3dev 0x%p, updating.\n", - snic, snic->tdev); - return cxgb3i_adapter_ddp_info(snic->tdev, &snic->tag_format, - &snic->tx_max_size, - &snic->rx_max_size); -} - -static int adapter_add(struct cxgb3i_adapter *snic) -{ - struct t3cdev *t3dev = snic->tdev; - struct adapter *adapter = tdev2adap(t3dev); - int i, err; - - snic->pdev = adapter->pdev; - snic->tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits; - - err = cxgb3i_adapter_ddp_info(t3dev, &snic->tag_format, - &snic->tx_max_size, - &snic->rx_max_size); - if (err < 0) - return err; - - for_each_port(adapter, i) { - snic->hba[i] = cxgb3i_hba_host_add(snic, adapter->port[i]); - if (!snic->hba[i]) - return -EINVAL; - } - snic->hba_cnt = adapter->params.nports; - - /* add to the list */ - write_lock(&cxgb3i_snic_rwlock); - list_add_tail(&snic->list_head, &cxgb3i_snic_list); - write_unlock(&cxgb3i_snic_rwlock); - - cxgb3i_log_info("t3dev 0x%p open, snic 0x%p, %u scsi hosts added.\n", - t3dev, snic, snic->hba_cnt); - return 0; -} - -/** - * cxgb3i_adapter_open - init a s3 adapter structure and any h/w settings - * @t3dev: t3cdev adapter - */ -void cxgb3i_adapter_open(struct t3cdev *t3dev) -{ - struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(t3dev); - int err; - - if (snic) - err = adapter_update(snic); - else { - snic = kzalloc(sizeof(*snic), GFP_KERNEL); - if (snic) { - spin_lock_init(&snic->lock); - snic->tdev = t3dev; - err = adapter_add(snic); - } else - err = -ENOMEM; - } - - if (err < 0) { - cxgb3i_log_info("snic 0x%p, f 0x%x, t3dev 0x%p open, err %d.\n", - snic, snic ? snic->flags : 0, t3dev, err); - if (snic) { - snic->flags &= ~CXGB3I_ADAPTER_FLAG_RESET; - cxgb3i_adapter_close(t3dev); - } - } -} - -/** - * cxgb3i_adapter_close - release the resources held and cleanup h/w settings - * @t3dev: t3cdev adapter - */ -void cxgb3i_adapter_close(struct t3cdev *t3dev) -{ - struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(t3dev); - int i; - - if (!snic || snic->flags & CXGB3I_ADAPTER_FLAG_RESET) { - cxgb3i_log_info("t3dev 0x%p close, snic 0x%p, f 0x%x.\n", - t3dev, snic, snic ? snic->flags : 0); - return; - } - - /* remove from the list */ - write_lock(&cxgb3i_snic_rwlock); - list_del(&snic->list_head); - write_unlock(&cxgb3i_snic_rwlock); - - for (i = 0; i < snic->hba_cnt; i++) { - if (snic->hba[i]) { - cxgb3i_hba_host_remove(snic->hba[i]); - snic->hba[i] = NULL; - } - } - cxgb3i_log_info("t3dev 0x%p close, snic 0x%p, %u scsi hosts removed.\n", - t3dev, snic, snic->hba_cnt); - kfree(snic); -} - -/** - * cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure via net_device - * @t3dev: t3cdev adapter - */ -static struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev) -{ - struct cxgb3i_adapter *snic; - int i; - - if (ndev->priv_flags & IFF_802_1Q_VLAN) - ndev = vlan_dev_real_dev(ndev); - - read_lock(&cxgb3i_snic_rwlock); - list_for_each_entry(snic, &cxgb3i_snic_list, list_head) { - for (i = 0; i < snic->hba_cnt; i++) { - if (snic->hba[i]->ndev == ndev) { - read_unlock(&cxgb3i_snic_rwlock); - return snic->hba[i]; - } - } - } - read_unlock(&cxgb3i_snic_rwlock); - return NULL; -} - -/** - * cxgb3i_hba_host_add - register a new host with scsi/iscsi - * @snic: the cxgb3i adapter - * @ndev: associated net_device - */ -struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *snic, - struct net_device *ndev) -{ - struct cxgb3i_hba *hba; - struct Scsi_Host *shost; - int err; - - shost = iscsi_host_alloc(&cxgb3i_host_template, - sizeof(struct cxgb3i_hba), 1); - if (!shost) { - cxgb3i_log_info("snic 0x%p, ndev 0x%p, host_alloc failed.\n", - snic, ndev); - return NULL; - } - - shost->transportt = cxgb3i_scsi_transport; - shost->max_lun = CXGB3I_MAX_LUN; - shost->max_id = CXGB3I_MAX_TARGET; - shost->max_channel = 0; - shost->max_cmd_len = 16; - - hba = iscsi_host_priv(shost); - hba->snic = snic; - hba->ndev = ndev; - hba->shost = shost; - - pci_dev_get(snic->pdev); - err = iscsi_host_add(shost, &snic->pdev->dev); - if (err) { - cxgb3i_log_info("snic 0x%p, ndev 0x%p, host_add failed.\n", - snic, ndev); - goto pci_dev_put; - } - - cxgb3i_api_debug("shost 0x%p, hba 0x%p, no %u.\n", - shost, hba, shost->host_no); - - return hba; - -pci_dev_put: - pci_dev_put(snic->pdev); - scsi_host_put(shost); - return NULL; -} - -/** - * cxgb3i_hba_host_remove - de-register the host with scsi/iscsi - * @hba: the cxgb3i hba - */ -void cxgb3i_hba_host_remove(struct cxgb3i_hba *hba) -{ - cxgb3i_api_debug("shost 0x%p, hba 0x%p, no %u.\n", - hba->shost, hba, hba->shost->host_no); - iscsi_host_remove(hba->shost); - pci_dev_put(hba->snic->pdev); - iscsi_host_free(hba->shost); -} - -/** - * cxgb3i_ep_connect - establish TCP connection to target portal - * @shost: scsi host to use - * @dst_addr: target IP address - * @non_blocking: blocking or non-blocking call - * - * Initiates a TCP/IP connection to the dst_addr - */ -static struct iscsi_endpoint *cxgb3i_ep_connect(struct Scsi_Host *shost, - struct sockaddr *dst_addr, - int non_blocking) -{ - struct iscsi_endpoint *ep; - struct cxgb3i_endpoint *cep; - struct cxgb3i_hba *hba = NULL; - struct s3_conn *c3cn = NULL; - int err = 0; - - if (shost) - hba = iscsi_host_priv(shost); - - cxgb3i_api_debug("shost 0x%p, hba 0x%p.\n", shost, hba); - - c3cn = cxgb3i_c3cn_create(); - if (!c3cn) { - cxgb3i_log_info("ep connect OOM.\n"); - err = -ENOMEM; - goto release_conn; - } - - err = cxgb3i_c3cn_connect(hba ? hba->ndev : NULL, c3cn, - (struct sockaddr_in *)dst_addr); - if (err < 0) { - cxgb3i_log_info("ep connect failed.\n"); - goto release_conn; - } - - hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev); - if (!hba) { - err = -ENOSPC; - cxgb3i_log_info("NOT going through cxgbi device.\n"); - goto release_conn; - } - - if (shost && hba != iscsi_host_priv(shost)) { - err = -ENOSPC; - cxgb3i_log_info("Could not connect through request host%u\n", - shost->host_no); - goto release_conn; - } - - if (c3cn_is_closing(c3cn)) { - err = -ENOSPC; - cxgb3i_log_info("ep connect unable to connect.\n"); - goto release_conn; - } - - ep = iscsi_create_endpoint(sizeof(*cep)); - if (!ep) { - err = -ENOMEM; - cxgb3i_log_info("iscsi alloc ep, OOM.\n"); - goto release_conn; - } - cep = ep->dd_data; - cep->c3cn = c3cn; - cep->hba = hba; - - cxgb3i_api_debug("ep 0x%p, 0x%p, c3cn 0x%p, hba 0x%p.\n", - ep, cep, c3cn, hba); - return ep; - -release_conn: - cxgb3i_api_debug("conn 0x%p failed, release.\n", c3cn); - if (c3cn) - cxgb3i_c3cn_release(c3cn); - return ERR_PTR(err); -} - -/** - * cxgb3i_ep_poll - polls for TCP connection establishement - * @ep: TCP connection (endpoint) handle - * @timeout_ms: timeout value in milli secs - * - * polls for TCP connect request to complete - */ -static int cxgb3i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) -{ - struct cxgb3i_endpoint *cep = ep->dd_data; - struct s3_conn *c3cn = cep->c3cn; - - if (!c3cn_is_established(c3cn)) - return 0; - cxgb3i_api_debug("ep 0x%p, c3cn 0x%p established.\n", ep, c3cn); - return 1; -} - -/** - * cxgb3i_ep_disconnect - teardown TCP connection - * @ep: TCP connection (endpoint) handle - * - * teardown TCP connection - */ -static void cxgb3i_ep_disconnect(struct iscsi_endpoint *ep) -{ - struct cxgb3i_endpoint *cep = ep->dd_data; - struct cxgb3i_conn *cconn = cep->cconn; - - cxgb3i_api_debug("ep 0x%p, cep 0x%p.\n", ep, cep); - - if (cconn && cconn->conn) { - /* - * stop the xmit path so the xmit_pdu function is - * not being called - */ - iscsi_suspend_tx(cconn->conn); - - write_lock_bh(&cep->c3cn->callback_lock); - cep->c3cn->user_data = NULL; - cconn->cep = NULL; - write_unlock_bh(&cep->c3cn->callback_lock); - } - - cxgb3i_api_debug("ep 0x%p, cep 0x%p, release c3cn 0x%p.\n", - ep, cep, cep->c3cn); - cxgb3i_c3cn_release(cep->c3cn); - iscsi_destroy_endpoint(ep); -} - -/** - * cxgb3i_session_create - create a new iscsi session - * @cmds_max: max # of commands - * @qdepth: scsi queue depth - * @initial_cmdsn: initial iscsi CMDSN for this session - * - * Creates a new iSCSI session - */ -static struct iscsi_cls_session * -cxgb3i_session_create(struct iscsi_endpoint *ep, u16 cmds_max, u16 qdepth, - u32 initial_cmdsn) -{ - struct cxgb3i_endpoint *cep; - struct cxgb3i_hba *hba; - struct Scsi_Host *shost; - struct iscsi_cls_session *cls_session; - struct iscsi_session *session; - - if (!ep) { - cxgb3i_log_error("%s, missing endpoint.\n", __func__); - return NULL; - } - - cep = ep->dd_data; - hba = cep->hba; - shost = hba->shost; - cxgb3i_api_debug("ep 0x%p, cep 0x%p, hba 0x%p.\n", ep, cep, hba); - BUG_ON(hba != iscsi_host_priv(shost)); - - cls_session = iscsi_session_setup(&cxgb3i_iscsi_transport, shost, - cmds_max, 0, - sizeof(struct iscsi_tcp_task) + - sizeof(struct cxgb3i_task_data), - initial_cmdsn, ISCSI_MAX_TARGET); - if (!cls_session) - return NULL; - session = cls_session->dd_data; - if (iscsi_tcp_r2tpool_alloc(session)) - goto remove_session; - - return cls_session; - -remove_session: - iscsi_session_teardown(cls_session); - return NULL; -} - -/** - * cxgb3i_session_destroy - destroys iscsi session - * @cls_session: pointer to iscsi cls session - * - * Destroys an iSCSI session instance and releases its all resources held - */ -static void cxgb3i_session_destroy(struct iscsi_cls_session *cls_session) -{ - cxgb3i_api_debug("sess 0x%p.\n", cls_session); - iscsi_tcp_r2tpool_free(cls_session->dd_data); - iscsi_session_teardown(cls_session); -} - -/** - * cxgb3i_conn_max_xmit_dlength -- calc the max. xmit pdu segment size - * @conn: iscsi connection - * check the max. xmit pdu payload, reduce it if needed - */ -static inline int cxgb3i_conn_max_xmit_dlength(struct iscsi_conn *conn) - -{ - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; - struct cxgb3i_conn *cconn = tcp_conn->dd_data; - unsigned int max = max(512 * MAX_SKB_FRAGS, SKB_TX_HEADROOM); - - max = min(cconn->hba->snic->tx_max_size, max); - if (conn->max_xmit_dlength) - conn->max_xmit_dlength = min(conn->max_xmit_dlength, max); - else - conn->max_xmit_dlength = max; - align_pdu_size(conn->max_xmit_dlength); - cxgb3i_api_debug("conn 0x%p, max xmit %u.\n", - conn, conn->max_xmit_dlength); - return 0; -} - -/** - * cxgb3i_conn_max_recv_dlength -- check the max. recv pdu segment size - * @conn: iscsi connection - * return 0 if the value is valid, < 0 otherwise. - */ -static inline int cxgb3i_conn_max_recv_dlength(struct iscsi_conn *conn) -{ - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; - struct cxgb3i_conn *cconn = tcp_conn->dd_data; - unsigned int max = cconn->hba->snic->rx_max_size; - - align_pdu_size(max); - if (conn->max_recv_dlength) { - if (conn->max_recv_dlength > max) { - cxgb3i_log_error("MaxRecvDataSegmentLength %u too big." - " Need to be <= %u.\n", - conn->max_recv_dlength, max); - return -EINVAL; - } - conn->max_recv_dlength = min(conn->max_recv_dlength, max); - align_pdu_size(conn->max_recv_dlength); - } else - conn->max_recv_dlength = max; - cxgb3i_api_debug("conn 0x%p, max recv %u.\n", - conn, conn->max_recv_dlength); - return 0; -} - -/** - * cxgb3i_conn_create - create iscsi connection instance - * @cls_session: pointer to iscsi cls session - * @cid: iscsi cid - * - * Creates a new iSCSI connection instance for a given session - */ -static struct iscsi_cls_conn *cxgb3i_conn_create(struct iscsi_cls_session - *cls_session, u32 cid) -{ - struct iscsi_cls_conn *cls_conn; - struct iscsi_conn *conn; - struct iscsi_tcp_conn *tcp_conn; - struct cxgb3i_conn *cconn; - - cxgb3i_api_debug("sess 0x%p, cid %u.\n", cls_session, cid); - - cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid); - if (!cls_conn) - return NULL; - conn = cls_conn->dd_data; - tcp_conn = conn->dd_data; - cconn = tcp_conn->dd_data; - - cconn->conn = conn; - return cls_conn; -} - -/** - * cxgb3i_conn_bind - binds iscsi sess, conn and endpoint together - * @cls_session: pointer to iscsi cls session - * @cls_conn: pointer to iscsi cls conn - * @transport_eph: 64-bit EP handle - * @is_leading: leading connection on this session? - * - * Binds together an iSCSI session, an iSCSI connection and a - * TCP connection. This routine returns error code if the TCP - * connection does not belong on the device iSCSI sess/conn is bound - */ - -static int cxgb3i_conn_bind(struct iscsi_cls_session *cls_session, - struct iscsi_cls_conn *cls_conn, - u64 transport_eph, int is_leading) -{ - struct iscsi_conn *conn = cls_conn->dd_data; - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; - struct cxgb3i_conn *cconn = tcp_conn->dd_data; - struct cxgb3i_adapter *snic; - struct iscsi_endpoint *ep; - struct cxgb3i_endpoint *cep; - struct s3_conn *c3cn; - int err; - - ep = iscsi_lookup_endpoint(transport_eph); - if (!ep) - return -EINVAL; - - /* setup ddp pagesize */ - cep = ep->dd_data; - c3cn = cep->c3cn; - snic = cep->hba->snic; - err = cxgb3i_setup_conn_host_pagesize(snic->tdev, c3cn->tid, 0); - if (err < 0) - return err; - - cxgb3i_api_debug("ep 0x%p, cls sess 0x%p, cls conn 0x%p.\n", - ep, cls_session, cls_conn); - - err = iscsi_conn_bind(cls_session, cls_conn, is_leading); - if (err) - return -EINVAL; - - /* calculate the tag idx bits needed for this conn based on cmds_max */ - cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1; - cxgb3i_api_debug("session cmds_max 0x%x, bits %u.\n", - conn->session->cmds_max, cconn->task_idx_bits); - - read_lock(&c3cn->callback_lock); - c3cn->user_data = conn; - cconn->hba = cep->hba; - cconn->cep = cep; - cep->cconn = cconn; - read_unlock(&c3cn->callback_lock); - - cxgb3i_conn_max_xmit_dlength(conn); - cxgb3i_conn_max_recv_dlength(conn); - - spin_lock_bh(&conn->session->lock); - sprintf(conn->portal_address, "%pI4", &c3cn->daddr.sin_addr.s_addr); - conn->portal_port = ntohs(c3cn->daddr.sin_port); - spin_unlock_bh(&conn->session->lock); - - /* init recv engine */ - iscsi_tcp_hdr_recv_prep(tcp_conn); - - return 0; -} - -/** - * cxgb3i_conn_get_param - return iscsi connection parameter to caller - * @cls_conn: pointer to iscsi cls conn - * @param: parameter type identifier - * @buf: buffer pointer - * - * returns iSCSI connection parameters - */ -static int cxgb3i_conn_get_param(struct iscsi_cls_conn *cls_conn, - enum iscsi_param param, char *buf) -{ - struct iscsi_conn *conn = cls_conn->dd_data; - int len; - - cxgb3i_api_debug("cls_conn 0x%p, param %d.\n", cls_conn, param); - - switch (param) { - case ISCSI_PARAM_CONN_PORT: - spin_lock_bh(&conn->session->lock); - len = sprintf(buf, "%hu\n", conn->portal_port); - spin_unlock_bh(&conn->session->lock); - break; - case ISCSI_PARAM_CONN_ADDRESS: - spin_lock_bh(&conn->session->lock); - len = sprintf(buf, "%s\n", conn->portal_address); - spin_unlock_bh(&conn->session->lock); - break; - default: - return iscsi_conn_get_param(cls_conn, param, buf); - } - - return len; -} - -/** - * cxgb3i_conn_set_param - set iscsi connection parameter - * @cls_conn: pointer to iscsi cls conn - * @param: parameter type identifier - * @buf: buffer pointer - * @buflen: buffer length - * - * set iSCSI connection parameters - */ -static int cxgb3i_conn_set_param(struct iscsi_cls_conn *cls_conn, - enum iscsi_param param, char *buf, int buflen) -{ - struct iscsi_conn *conn = cls_conn->dd_data; - struct iscsi_session *session = conn->session; - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; - struct cxgb3i_conn *cconn = tcp_conn->dd_data; - struct cxgb3i_adapter *snic = cconn->hba->snic; - struct s3_conn *c3cn = cconn->cep->c3cn; - int value, err = 0; - - switch (param) { - case ISCSI_PARAM_HDRDGST_EN: - err = iscsi_set_param(cls_conn, param, buf, buflen); - if (!err && conn->hdrdgst_en) - err = cxgb3i_setup_conn_digest(snic->tdev, c3cn->tid, - conn->hdrdgst_en, - conn->datadgst_en, 0); - break; - case ISCSI_PARAM_DATADGST_EN: - err = iscsi_set_param(cls_conn, param, buf, buflen); - if (!err && conn->datadgst_en) - err = cxgb3i_setup_conn_digest(snic->tdev, c3cn->tid, - conn->hdrdgst_en, - conn->datadgst_en, 0); - break; - case ISCSI_PARAM_MAX_R2T: - sscanf(buf, "%d", &value); - if (value <= 0 || !is_power_of_2(value)) - return -EINVAL; - if (session->max_r2t == value) - break; - iscsi_tcp_r2tpool_free(session); - err = iscsi_set_param(cls_conn, param, buf, buflen); - if (!err && iscsi_tcp_r2tpool_alloc(session)) - return -ENOMEM; - case ISCSI_PARAM_MAX_RECV_DLENGTH: - err = iscsi_set_param(cls_conn, param, buf, buflen); - if (!err) - err = cxgb3i_conn_max_recv_dlength(conn); - break; - case ISCSI_PARAM_MAX_XMIT_DLENGTH: - err = iscsi_set_param(cls_conn, param, buf, buflen); - if (!err) - err = cxgb3i_conn_max_xmit_dlength(conn); - break; - default: - return iscsi_set_param(cls_conn, param, buf, buflen); - } - return err; -} - -/** - * cxgb3i_host_set_param - configure host (adapter) related parameters - * @shost: scsi host pointer - * @param: parameter type identifier - * @buf: buffer pointer - */ -static int cxgb3i_host_set_param(struct Scsi_Host *shost, - enum iscsi_host_param param, - char *buf, int buflen) -{ - struct cxgb3i_hba *hba = iscsi_host_priv(shost); - - if (!hba->ndev) { - shost_printk(KERN_ERR, shost, "Could not set host param. " - "Netdev for host not set.\n"); - return -ENODEV; - } - - cxgb3i_api_debug("param %d, buf %s.\n", param, buf); - - switch (param) { - case ISCSI_HOST_PARAM_IPADDRESS: - { - __be32 addr = in_aton(buf); - cxgb3i_set_private_ipv4addr(hba->ndev, addr); - return 0; - } - case ISCSI_HOST_PARAM_HWADDRESS: - case ISCSI_HOST_PARAM_NETDEV_NAME: - /* ignore */ - return 0; - default: - return iscsi_host_set_param(shost, param, buf, buflen); - } -} - -/** - * cxgb3i_host_get_param - returns host (adapter) related parameters - * @shost: scsi host pointer - * @param: parameter type identifier - * @buf: buffer pointer - */ -static int cxgb3i_host_get_param(struct Scsi_Host *shost, - enum iscsi_host_param param, char *buf) -{ - struct cxgb3i_hba *hba = iscsi_host_priv(shost); - int len = 0; - - if (!hba->ndev) { - shost_printk(KERN_ERR, shost, "Could not set host param. " - "Netdev for host not set.\n"); - return -ENODEV; - } - - cxgb3i_api_debug("hba %s, param %d.\n", hba->ndev->name, param); - - switch (param) { - case ISCSI_HOST_PARAM_HWADDRESS: - len = sysfs_format_mac(buf, hba->ndev->dev_addr, 6); - break; - case ISCSI_HOST_PARAM_NETDEV_NAME: - len = sprintf(buf, "%s\n", hba->ndev->name); - break; - case ISCSI_HOST_PARAM_IPADDRESS: - { - __be32 addr; - - addr = cxgb3i_get_private_ipv4addr(hba->ndev); - len = sprintf(buf, "%pI4", &addr); - break; - } - default: - return iscsi_host_get_param(shost, param, buf); - } - return len; -} - -/** - * cxgb3i_conn_get_stats - returns iSCSI stats - * @cls_conn: pointer to iscsi cls conn - * @stats: pointer to iscsi statistic struct - */ -static void cxgb3i_conn_get_stats(struct iscsi_cls_conn *cls_conn, - struct iscsi_stats *stats) -{ - struct iscsi_conn *conn = cls_conn->dd_data; - - stats->txdata_octets = conn->txdata_octets; - stats->rxdata_octets = conn->rxdata_octets; - stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; - stats->dataout_pdus = conn->dataout_pdus_cnt; - stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; - stats->datain_pdus = conn->datain_pdus_cnt; - stats->r2t_pdus = conn->r2t_pdus_cnt; - stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; - stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; - stats->digest_err = 0; - stats->timeout_err = 0; - stats->custom_length = 1; - strcpy(stats->custom[0].desc, "eh_abort_cnt"); - stats->custom[0].value = conn->eh_abort_cnt; -} - -/** - * cxgb3i_parse_itt - get the idx and age bits from a given tag - * @conn: iscsi connection - * @itt: itt tag - * @idx: task index, filled in by this function - * @age: session age, filled in by this function - */ -static void cxgb3i_parse_itt(struct iscsi_conn *conn, itt_t itt, - int *idx, int *age) -{ - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; - struct cxgb3i_conn *cconn = tcp_conn->dd_data; - struct cxgb3i_adapter *snic = cconn->hba->snic; - u32 tag = ntohl((__force u32) itt); - u32 sw_bits; - - sw_bits = cxgb3i_tag_nonrsvd_bits(&snic->tag_format, tag); - if (idx) - *idx = sw_bits & ((1 << cconn->task_idx_bits) - 1); - if (age) - *age = (sw_bits >> cconn->task_idx_bits) & ISCSI_AGE_MASK; - - cxgb3i_tag_debug("parse tag 0x%x/0x%x, sw 0x%x, itt 0x%x, age 0x%x.\n", - tag, itt, sw_bits, idx ? *idx : 0xFFFFF, - age ? *age : 0xFF); -} - -/** - * cxgb3i_reserve_itt - generate tag for a give task - * @task: iscsi task - * @hdr_itt: tag, filled in by this function - * Set up ddp for scsi read tasks if possible. - */ -int cxgb3i_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt) -{ - struct scsi_cmnd *sc = task->sc; - struct iscsi_conn *conn = task->conn; - struct iscsi_session *sess = conn->session; - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; - struct cxgb3i_conn *cconn = tcp_conn->dd_data; - struct cxgb3i_adapter *snic = cconn->hba->snic; - struct cxgb3i_tag_format *tformat = &snic->tag_format; - u32 sw_tag = (sess->age << cconn->task_idx_bits) | task->itt; - u32 tag; - int err = -EINVAL; - - if (sc && - (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) && - cxgb3i_sw_tag_usable(tformat, sw_tag)) { - struct s3_conn *c3cn = cconn->cep->c3cn; - struct cxgb3i_gather_list *gl; - - gl = cxgb3i_ddp_make_gl(scsi_in(sc)->length, - scsi_in(sc)->table.sgl, - scsi_in(sc)->table.nents, - snic->pdev, - GFP_ATOMIC); - if (gl) { - tag = sw_tag; - err = cxgb3i_ddp_tag_reserve(snic->tdev, c3cn->tid, - tformat, &tag, - gl, GFP_ATOMIC); - if (err < 0) - cxgb3i_ddp_release_gl(gl, snic->pdev); - } - } - - if (err < 0) - tag = cxgb3i_set_non_ddp_tag(tformat, sw_tag); - /* the itt need to sent in big-endian order */ - *hdr_itt = (__force itt_t)htonl(tag); - - cxgb3i_tag_debug("new tag 0x%x/0x%x (itt 0x%x, age 0x%x).\n", - tag, *hdr_itt, task->itt, sess->age); - return 0; -} - -/** - * cxgb3i_release_itt - release the tag for a given task - * @task: iscsi task - * @hdr_itt: tag - * If the tag is a ddp tag, release the ddp setup - */ -void cxgb3i_release_itt(struct iscsi_task *task, itt_t hdr_itt) -{ - struct scsi_cmnd *sc = task->sc; - struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; - struct cxgb3i_conn *cconn = tcp_conn->dd_data; - struct cxgb3i_adapter *snic = cconn->hba->snic; - struct cxgb3i_tag_format *tformat = &snic->tag_format; - u32 tag = ntohl((__force u32)hdr_itt); - - cxgb3i_tag_debug("release tag 0x%x.\n", tag); - - if (sc && - (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) && - cxgb3i_is_ddp_tag(tformat, tag)) - cxgb3i_ddp_tag_release(snic->tdev, tag); -} - -/** - * cxgb3i_host_template -- Scsi_Host_Template structure - * used when registering with the scsi mid layer - */ -static struct scsi_host_template cxgb3i_host_template = { - .module = THIS_MODULE, - .name = "Chelsio S3xx iSCSI Initiator", - .proc_name = "cxgb3i", - .queuecommand = iscsi_queuecommand, - .change_queue_depth = iscsi_change_queue_depth, - .can_queue = CXGB3I_SCSI_HOST_QDEPTH, - .sg_tablesize = SG_ALL, - .max_sectors = 0xFFFF, - .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, - .eh_abort_handler = iscsi_eh_abort, - .eh_device_reset_handler = iscsi_eh_device_reset, - .eh_target_reset_handler = iscsi_eh_recover_target, - .target_alloc = iscsi_target_alloc, - .use_clustering = DISABLE_CLUSTERING, - .this_id = -1, -}; - -static struct iscsi_transport cxgb3i_iscsi_transport = { - .owner = THIS_MODULE, - .name = "cxgb3i", - .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST - | CAP_DATADGST | CAP_DIGEST_OFFLOAD | - CAP_PADDING_OFFLOAD, - .param_mask = ISCSI_MAX_RECV_DLENGTH | - ISCSI_MAX_XMIT_DLENGTH | - ISCSI_HDRDGST_EN | - ISCSI_DATADGST_EN | - ISCSI_INITIAL_R2T_EN | - ISCSI_MAX_R2T | - ISCSI_IMM_DATA_EN | - ISCSI_FIRST_BURST | - ISCSI_MAX_BURST | - ISCSI_PDU_INORDER_EN | - ISCSI_DATASEQ_INORDER_EN | - ISCSI_ERL | - ISCSI_CONN_PORT | - ISCSI_CONN_ADDRESS | - ISCSI_EXP_STATSN | - ISCSI_PERSISTENT_PORT | - ISCSI_PERSISTENT_ADDRESS | - ISCSI_TARGET_NAME | ISCSI_TPGT | - ISCSI_USERNAME | ISCSI_PASSWORD | - ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | - ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | - ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO | - ISCSI_PING_TMO | ISCSI_RECV_TMO | - ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, - .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | - ISCSI_HOST_INITIATOR_NAME | ISCSI_HOST_NETDEV_NAME, - .get_host_param = cxgb3i_host_get_param, - .set_host_param = cxgb3i_host_set_param, - /* session management */ - .create_session = cxgb3i_session_create, - .destroy_session = cxgb3i_session_destroy, - .get_session_param = iscsi_session_get_param, - /* connection management */ - .create_conn = cxgb3i_conn_create, - .bind_conn = cxgb3i_conn_bind, - .destroy_conn = iscsi_tcp_conn_teardown, - .start_conn = iscsi_conn_start, - .stop_conn = iscsi_conn_stop, - .get_conn_param = cxgb3i_conn_get_param, - .set_param = cxgb3i_conn_set_param, - .get_stats = cxgb3i_conn_get_stats, - /* pdu xmit req. from user space */ - .send_pdu = iscsi_conn_send_pdu, - /* task */ - .init_task = iscsi_tcp_task_init, - .xmit_task = iscsi_tcp_task_xmit, - .cleanup_task = cxgb3i_conn_cleanup_task, - - /* pdu */ - .alloc_pdu = cxgb3i_conn_alloc_pdu, - .init_pdu = cxgb3i_conn_init_pdu, - .xmit_pdu = cxgb3i_conn_xmit_pdu, - .parse_pdu_itt = cxgb3i_parse_itt, - - /* TCP connect/disconnect */ - .ep_connect = cxgb3i_ep_connect, - .ep_poll = cxgb3i_ep_poll, - .ep_disconnect = cxgb3i_ep_disconnect, - /* Error recovery timeout call */ - .session_recovery_timedout = iscsi_session_recovery_timedout, -}; - -int cxgb3i_iscsi_init(void) -{ - sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1; - sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1; - cxgb3i_log_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n", - ISCSI_ITT_MASK, sw_tag_idx_bits, - ISCSI_AGE_MASK, sw_tag_age_bits); - - cxgb3i_scsi_transport = - iscsi_register_transport(&cxgb3i_iscsi_transport); - if (!cxgb3i_scsi_transport) { - cxgb3i_log_error("Could not register cxgb3i transport.\n"); - return -ENODEV; - } - cxgb3i_api_debug("cxgb3i transport 0x%p.\n", cxgb3i_scsi_transport); - return 0; -} - -void cxgb3i_iscsi_cleanup(void) -{ - if (cxgb3i_scsi_transport) { - cxgb3i_api_debug("cxgb3i transport 0x%p.\n", - cxgb3i_scsi_transport); - iscsi_unregister_transport(&cxgb3i_iscsi_transport); - } -} diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c deleted file mode 100644 index 3ee13cf9556b..000000000000 --- a/drivers/scsi/cxgb3i/cxgb3i_offload.c +++ /dev/null @@ -1,1944 +0,0 @@ -/* - * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management - * - * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this - * release for licensing terms and conditions. - * - * Written by: Dimitris Michailidis (dm@chelsio.com) - * Karen Xie (kxie@chelsio.com) - */ - -#include -#include -#include - -#include "cxgb3_defs.h" -#include "cxgb3_ctl_defs.h" -#include "firmware_exports.h" -#include "cxgb3i_offload.h" -#include "cxgb3i_pdu.h" -#include "cxgb3i_ddp.h" - -#ifdef __DEBUG_C3CN_CONN__ -#define c3cn_conn_debug cxgb3i_log_debug -#else -#define c3cn_conn_debug(fmt...) -#endif - -#ifdef __DEBUG_C3CN_TX__ -#define c3cn_tx_debug cxgb3i_log_debug -#else -#define c3cn_tx_debug(fmt...) -#endif - -#ifdef __DEBUG_C3CN_RX__ -#define c3cn_rx_debug cxgb3i_log_debug -#else -#define c3cn_rx_debug(fmt...) -#endif - -/* - * module parameters releated to offloaded iscsi connection - */ -static int cxgb3_rcv_win = 256 * 1024; -module_param(cxgb3_rcv_win, int, 0644); -MODULE_PARM_DESC(cxgb3_rcv_win, "TCP receive window in bytes (default=256KB)"); - -static int cxgb3_snd_win = 128 * 1024; -module_param(cxgb3_snd_win, int, 0644); -MODULE_PARM_DESC(cxgb3_snd_win, "TCP send window in bytes (default=128KB)"); - -static int cxgb3_rx_credit_thres = 10 * 1024; -module_param(cxgb3_rx_credit_thres, int, 0644); -MODULE_PARM_DESC(rx_credit_thres, - "RX credits return threshold in bytes (default=10KB)"); - -static unsigned int cxgb3_max_connect = 8 * 1024; -module_param(cxgb3_max_connect, uint, 0644); -MODULE_PARM_DESC(cxgb3_max_connect, "Max. # of connections (default=8092)"); - -static unsigned int cxgb3_sport_base = 20000; -module_param(cxgb3_sport_base, uint, 0644); -MODULE_PARM_DESC(cxgb3_sport_base, "starting port number (default=20000)"); - -/* - * cxgb3i tcp connection data(per adapter) list - */ -static LIST_HEAD(cdata_list); -static DEFINE_RWLOCK(cdata_rwlock); - -static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion); -static void c3cn_release_offload_resources(struct s3_conn *c3cn); - -/* - * iscsi source port management - * - * Find a free source port in the port allocation map. We use a very simple - * rotor scheme to look for the next free port. - * - * If a source port has been specified make sure that it doesn't collide with - * our normal source port allocation map. If it's outside the range of our - * allocation/deallocation scheme just let them use it. - * - * If the source port is outside our allocation range, the caller is - * responsible for keeping track of their port usage. - */ -static int c3cn_get_port(struct s3_conn *c3cn, struct cxgb3i_sdev_data *cdata) -{ - unsigned int start; - int idx; - - if (!cdata) - goto error_out; - - if (c3cn->saddr.sin_port) { - cxgb3i_log_error("connect, sin_port NON-ZERO %u.\n", - c3cn->saddr.sin_port); - return -EADDRINUSE; - } - - spin_lock_bh(&cdata->lock); - start = idx = cdata->sport_next; - do { - if (++idx >= cxgb3_max_connect) - idx = 0; - if (!cdata->sport_conn[idx]) { - c3cn->saddr.sin_port = htons(cxgb3_sport_base + idx); - cdata->sport_next = idx; - cdata->sport_conn[idx] = c3cn; - spin_unlock_bh(&cdata->lock); - - c3cn_conn_debug("%s reserve port %u.\n", - cdata->cdev->name, - cxgb3_sport_base + idx); - return 0; - } - } while (idx != start); - spin_unlock_bh(&cdata->lock); - -error_out: - return -EADDRNOTAVAIL; -} - -static void c3cn_put_port(struct s3_conn *c3cn) -{ - if (!c3cn->cdev) - return; - - if (c3cn->saddr.sin_port) { - struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(c3cn->cdev); - int idx = ntohs(c3cn->saddr.sin_port) - cxgb3_sport_base; - - c3cn->saddr.sin_port = 0; - if (idx < 0 || idx >= cxgb3_max_connect) - return; - spin_lock_bh(&cdata->lock); - cdata->sport_conn[idx] = NULL; - spin_unlock_bh(&cdata->lock); - c3cn_conn_debug("%s, release port %u.\n", - cdata->cdev->name, cxgb3_sport_base + idx); - } -} - -static inline void c3cn_set_flag(struct s3_conn *c3cn, enum c3cn_flags flag) -{ - __set_bit(flag, &c3cn->flags); - c3cn_conn_debug("c3cn 0x%p, set %d, s %u, f 0x%lx.\n", - c3cn, flag, c3cn->state, c3cn->flags); -} - -static inline void c3cn_clear_flag(struct s3_conn *c3cn, enum c3cn_flags flag) -{ - __clear_bit(flag, &c3cn->flags); - c3cn_conn_debug("c3cn 0x%p, clear %d, s %u, f 0x%lx.\n", - c3cn, flag, c3cn->state, c3cn->flags); -} - -static inline int c3cn_flag(struct s3_conn *c3cn, enum c3cn_flags flag) -{ - if (c3cn == NULL) - return 0; - return test_bit(flag, &c3cn->flags); -} - -static void c3cn_set_state(struct s3_conn *c3cn, int state) -{ - c3cn_conn_debug("c3cn 0x%p state -> %u.\n", c3cn, state); - c3cn->state = state; -} - -static inline void c3cn_hold(struct s3_conn *c3cn) -{ - atomic_inc(&c3cn->refcnt); -} - -static inline void c3cn_put(struct s3_conn *c3cn) -{ - if (atomic_dec_and_test(&c3cn->refcnt)) { - c3cn_conn_debug("free c3cn 0x%p, s %u, f 0x%lx.\n", - c3cn, c3cn->state, c3cn->flags); - kfree(c3cn); - } -} - -static void c3cn_closed(struct s3_conn *c3cn) -{ - c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n", - c3cn, c3cn->state, c3cn->flags); - - c3cn_put_port(c3cn); - c3cn_release_offload_resources(c3cn); - c3cn_set_state(c3cn, C3CN_STATE_CLOSED); - cxgb3i_conn_closing(c3cn); -} - -/* - * CPL (Chelsio Protocol Language) defines a message passing interface between - * the host driver and T3 asic. - * The section below implments CPLs that related to iscsi tcp connection - * open/close/abort and data send/receive. - */ - -/* - * CPL connection active open request: host -> - */ -static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu) -{ - int i = 0; - - while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu) - ++i; - return i; -} - -static unsigned int select_mss(struct s3_conn *c3cn, unsigned int pmtu) -{ - unsigned int idx; - struct dst_entry *dst = c3cn->dst_cache; - struct t3cdev *cdev = c3cn->cdev; - const struct t3c_data *td = T3C_DATA(cdev); - u16 advmss = dst_metric(dst, RTAX_ADVMSS); - - if (advmss > pmtu - 40) - advmss = pmtu - 40; - if (advmss < td->mtus[0] - 40) - advmss = td->mtus[0] - 40; - idx = find_best_mtu(td, advmss + 40); - return idx; -} - -static inline int compute_wscale(int win) -{ - int wscale = 0; - while (wscale < 14 && (65535<mss_idx); -} - -static inline unsigned int calc_opt0l(struct s3_conn *c3cn) -{ - return V_ULP_MODE(ULP_MODE_ISCSI) | - V_RCV_BUFSIZ(cxgb3_rcv_win>>10); -} - -static void make_act_open_req(struct s3_conn *c3cn, struct sk_buff *skb, - unsigned int atid, const struct l2t_entry *e) -{ - struct cpl_act_open_req *req; - - c3cn_conn_debug("c3cn 0x%p, atid 0x%x.\n", c3cn, atid); - - skb->priority = CPL_PRIORITY_SETUP; - req = (struct cpl_act_open_req *)__skb_put(skb, sizeof(*req)); - req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); - req->wr.wr_lo = 0; - OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, atid)); - req->local_port = c3cn->saddr.sin_port; - req->peer_port = c3cn->daddr.sin_port; - req->local_ip = c3cn->saddr.sin_addr.s_addr; - req->peer_ip = c3cn->daddr.sin_addr.s_addr; - req->opt0h = htonl(calc_opt0h(c3cn) | V_L2T_IDX(e->idx) | - V_TX_CHANNEL(e->smt_idx)); - req->opt0l = htonl(calc_opt0l(c3cn)); - req->params = 0; - req->opt2 = 0; -} - -static void fail_act_open(struct s3_conn *c3cn, int errno) -{ - c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n", - c3cn, c3cn->state, c3cn->flags); - c3cn->err = errno; - c3cn_closed(c3cn); -} - -static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb) -{ - struct s3_conn *c3cn = (struct s3_conn *)skb->sk; - - c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn, c3cn->state); - - c3cn_hold(c3cn); - spin_lock_bh(&c3cn->lock); - if (c3cn->state == C3CN_STATE_CONNECTING) - fail_act_open(c3cn, -EHOSTUNREACH); - spin_unlock_bh(&c3cn->lock); - c3cn_put(c3cn); - __kfree_skb(skb); -} - -/* - * CPL connection close request: host -> - * - * Close a connection by sending a CPL_CLOSE_CON_REQ message and queue it to - * the write queue (i.e., after any unsent txt data). - */ -static void skb_entail(struct s3_conn *c3cn, struct sk_buff *skb, - int flags) -{ - skb_tcp_seq(skb) = c3cn->write_seq; - skb_flags(skb) = flags; - __skb_queue_tail(&c3cn->write_queue, skb); -} - -static void send_close_req(struct s3_conn *c3cn) -{ - struct sk_buff *skb = c3cn->cpl_close; - struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head; - unsigned int tid = c3cn->tid; - - c3cn_conn_debug("c3cn 0x%p, state 0x%x, flag 0x%lx.\n", - c3cn, c3cn->state, c3cn->flags); - - c3cn->cpl_close = NULL; - - req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON)); - req->wr.wr_lo = htonl(V_WR_TID(tid)); - OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); - req->rsvd = htonl(c3cn->write_seq); - - skb_entail(c3cn, skb, C3CB_FLAG_NO_APPEND); - if (c3cn->state != C3CN_STATE_CONNECTING) - c3cn_push_tx_frames(c3cn, 1); -} - -/* - * CPL connection abort request: host -> - * - * Send an ABORT_REQ message. Makes sure we do not send multiple ABORT_REQs - * for the same connection and also that we do not try to send a message - * after the connection has closed. - */ -static void abort_arp_failure(struct t3cdev *cdev, struct sk_buff *skb) -{ - struct cpl_abort_req *req = cplhdr(skb); - - c3cn_conn_debug("tdev 0x%p.\n", cdev); - - req->cmd = CPL_ABORT_NO_RST; - cxgb3_ofld_send(cdev, skb); -} - -static inline void c3cn_purge_write_queue(struct s3_conn *c3cn) -{ - struct sk_buff *skb; - - while ((skb = __skb_dequeue(&c3cn->write_queue))) - __kfree_skb(skb); -} - -static void send_abort_req(struct s3_conn *c3cn) -{ - struct sk_buff *skb = c3cn->cpl_abort_req; - struct cpl_abort_req *req; - unsigned int tid = c3cn->tid; - - if (unlikely(c3cn->state == C3CN_STATE_ABORTING) || !skb || - !c3cn->cdev) - return; - - c3cn_set_state(c3cn, C3CN_STATE_ABORTING); - - c3cn_conn_debug("c3cn 0x%p, flag ABORT_RPL + ABORT_SHUT.\n", c3cn); - - c3cn_set_flag(c3cn, C3CN_ABORT_RPL_PENDING); - - /* Purge the send queue so we don't send anything after an abort. */ - c3cn_purge_write_queue(c3cn); - - c3cn->cpl_abort_req = NULL; - req = (struct cpl_abort_req *)skb->head; - memset(req, 0, sizeof(*req)); - - skb->priority = CPL_PRIORITY_DATA; - set_arp_failure_handler(skb, abort_arp_failure); - - req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ)); - req->wr.wr_lo = htonl(V_WR_TID(tid)); - OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, tid)); - req->rsvd0 = htonl(c3cn->snd_nxt); - req->rsvd1 = !c3cn_flag(c3cn, C3CN_TX_DATA_SENT); - req->cmd = CPL_ABORT_SEND_RST; - - l2t_send(c3cn->cdev, skb, c3cn->l2t); -} - -/* - * CPL connection abort reply: host -> - * - * Send an ABORT_RPL message in response of the ABORT_REQ received. - */ -static void send_abort_rpl(struct s3_conn *c3cn, int rst_status) -{ - struct sk_buff *skb = c3cn->cpl_abort_rpl; - struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head; - - c3cn->cpl_abort_rpl = NULL; - - skb->priority = CPL_PRIORITY_DATA; - memset(rpl, 0, sizeof(*rpl)); - rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); - rpl->wr.wr_lo = htonl(V_WR_TID(c3cn->tid)); - OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, c3cn->tid)); - rpl->cmd = rst_status; - - cxgb3_ofld_send(c3cn->cdev, skb); -} - -/* - * CPL connection rx data ack: host -> - * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of - * credits sent. - */ -static u32 send_rx_credits(struct s3_conn *c3cn, u32 credits, u32 dack) -{ - struct sk_buff *skb; - struct cpl_rx_data_ack *req; - - skb = alloc_skb(sizeof(*req), GFP_ATOMIC); - if (!skb) - return 0; - - req = (struct cpl_rx_data_ack *)__skb_put(skb, sizeof(*req)); - req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); - req->wr.wr_lo = 0; - OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, c3cn->tid)); - req->credit_dack = htonl(dack | V_RX_CREDITS(credits)); - skb->priority = CPL_PRIORITY_ACK; - cxgb3_ofld_send(c3cn->cdev, skb); - return credits; -} - -/* - * CPL connection tx data: host -> - * - * Send iscsi PDU via TX_DATA CPL message. Returns the number of - * credits sent. - * Each TX_DATA consumes work request credit (wrs), so we need to keep track of - * how many we've used so far and how many are pending (i.e., yet ack'ed by T3). - */ - -/* - * For ULP connections HW may inserts digest bytes into the pdu. Those digest - * bytes are not sent by the host but are part of the TCP payload and therefore - * consume TCP sequence space. - */ -static const unsigned int cxgb3_ulp_extra_len[] = { 0, 4, 4, 8 }; -static inline unsigned int ulp_extra_len(const struct sk_buff *skb) -{ - return cxgb3_ulp_extra_len[skb_ulp_mode(skb) & 3]; -} - -static unsigned int wrlen __read_mostly; - -/* - * The number of WRs needed for an skb depends on the number of fragments - * in the skb and whether it has any payload in its main body. This maps the - * length of the gather list represented by an skb into the # of necessary WRs. - * The extra two fragments are for iscsi bhs and payload padding. - */ -#define SKB_WR_LIST_SIZE (MAX_SKB_FRAGS + 2) -static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly; - -static void s3_init_wr_tab(unsigned int wr_len) -{ - int i; - - if (skb_wrs[1]) /* already initialized */ - return; - - for (i = 1; i < SKB_WR_LIST_SIZE; i++) { - int sgl_len = (3 * i) / 2 + (i & 1); - - sgl_len += 3; - skb_wrs[i] = (sgl_len <= wr_len - ? 1 : 1 + (sgl_len - 2) / (wr_len - 1)); - } - - wrlen = wr_len * 8; -} - -static inline void reset_wr_list(struct s3_conn *c3cn) -{ - c3cn->wr_pending_head = c3cn->wr_pending_tail = NULL; -} - -/* - * Add a WR to a connections's list of pending WRs. This is a singly-linked - * list of sk_buffs operating as a FIFO. The head is kept in wr_pending_head - * and the tail in wr_pending_tail. - */ -static inline void enqueue_wr(struct s3_conn *c3cn, - struct sk_buff *skb) -{ - skb_tx_wr_next(skb) = NULL; - - /* - * We want to take an extra reference since both us and the driver - * need to free the packet before it's really freed. We know there's - * just one user currently so we use atomic_set rather than skb_get - * to avoid the atomic op. - */ - atomic_set(&skb->users, 2); - - if (!c3cn->wr_pending_head) - c3cn->wr_pending_head = skb; - else - skb_tx_wr_next(c3cn->wr_pending_tail) = skb; - c3cn->wr_pending_tail = skb; -} - -static int count_pending_wrs(struct s3_conn *c3cn) -{ - int n = 0; - const struct sk_buff *skb = c3cn->wr_pending_head; - - while (skb) { - n += skb->csum; - skb = skb_tx_wr_next(skb); - } - return n; -} - -static inline struct sk_buff *peek_wr(const struct s3_conn *c3cn) -{ - return c3cn->wr_pending_head; -} - -static inline void free_wr_skb(struct sk_buff *skb) -{ - kfree_skb(skb); -} - -static inline struct sk_buff *dequeue_wr(struct s3_conn *c3cn) -{ - struct sk_buff *skb = c3cn->wr_pending_head; - - if (likely(skb)) { - /* Don't bother clearing the tail */ - c3cn->wr_pending_head = skb_tx_wr_next(skb); - skb_tx_wr_next(skb) = NULL; - } - return skb; -} - -static void purge_wr_queue(struct s3_conn *c3cn) -{ - struct sk_buff *skb; - while ((skb = dequeue_wr(c3cn)) != NULL) - free_wr_skb(skb); -} - -static inline void make_tx_data_wr(struct s3_conn *c3cn, struct sk_buff *skb, - int len, int req_completion) -{ - struct tx_data_wr *req; - - skb_reset_transport_header(skb); - req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req)); - req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) | - (req_completion ? F_WR_COMPL : 0)); - req->wr_lo = htonl(V_WR_TID(c3cn->tid)); - req->sndseq = htonl(c3cn->snd_nxt); - /* len includes the length of any HW ULP additions */ - req->len = htonl(len); - req->param = htonl(V_TX_PORT(c3cn->l2t->smt_idx)); - /* V_TX_ULP_SUBMODE sets both the mode and submode */ - req->flags = htonl(V_TX_ULP_SUBMODE(skb_ulp_mode(skb)) | - V_TX_SHOVE((skb_peek(&c3cn->write_queue) ? 0 : 1))); - - if (!c3cn_flag(c3cn, C3CN_TX_DATA_SENT)) { - req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT | - V_TX_CPU_IDX(c3cn->qset)); - /* Sendbuffer is in units of 32KB. */ - req->param |= htonl(V_TX_SNDBUF(cxgb3_snd_win >> 15)); - c3cn_set_flag(c3cn, C3CN_TX_DATA_SENT); - } -} - -/** - * c3cn_push_tx_frames -- start transmit - * @c3cn: the offloaded connection - * @req_completion: request wr_ack or not - * - * Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a - * connection's send queue and sends them on to T3. Must be called with the - * connection's lock held. Returns the amount of send buffer space that was - * freed as a result of sending queued data to T3. - */ -static void arp_failure_discard(struct t3cdev *cdev, struct sk_buff *skb) -{ - kfree_skb(skb); -} - -static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion) -{ - int total_size = 0; - struct sk_buff *skb; - struct t3cdev *cdev; - struct cxgb3i_sdev_data *cdata; - - if (unlikely(c3cn->state == C3CN_STATE_CONNECTING || - c3cn->state == C3CN_STATE_CLOSE_WAIT_1 || - c3cn->state >= C3CN_STATE_ABORTING)) { - c3cn_tx_debug("c3cn 0x%p, in closing state %u.\n", - c3cn, c3cn->state); - return 0; - } - - cdev = c3cn->cdev; - cdata = CXGB3_SDEV_DATA(cdev); - - while (c3cn->wr_avail - && (skb = skb_peek(&c3cn->write_queue)) != NULL) { - int len = skb->len; /* length before skb_push */ - int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len); - int wrs_needed = skb_wrs[frags]; - - if (wrs_needed > 1 && len + sizeof(struct tx_data_wr) <= wrlen) - wrs_needed = 1; - - WARN_ON(frags >= SKB_WR_LIST_SIZE || wrs_needed < 1); - - if (c3cn->wr_avail < wrs_needed) { - c3cn_tx_debug("c3cn 0x%p, skb len %u/%u, frag %u, " - "wr %d < %u.\n", - c3cn, skb->len, skb->data_len, frags, - wrs_needed, c3cn->wr_avail); - break; - } - - __skb_unlink(skb, &c3cn->write_queue); - skb->priority = CPL_PRIORITY_DATA; - skb->csum = wrs_needed; /* remember this until the WR_ACK */ - c3cn->wr_avail -= wrs_needed; - c3cn->wr_unacked += wrs_needed; - enqueue_wr(c3cn, skb); - - c3cn_tx_debug("c3cn 0x%p, enqueue, skb len %u/%u, frag %u, " - "wr %d, left %u, unack %u.\n", - c3cn, skb->len, skb->data_len, frags, - wrs_needed, c3cn->wr_avail, c3cn->wr_unacked); - - - if (likely(skb_flags(skb) & C3CB_FLAG_NEED_HDR)) { - if ((req_completion && - c3cn->wr_unacked == wrs_needed) || - (skb_flags(skb) & C3CB_FLAG_COMPL) || - c3cn->wr_unacked >= c3cn->wr_max / 2) { - req_completion = 1; - c3cn->wr_unacked = 0; - } - len += ulp_extra_len(skb); - make_tx_data_wr(c3cn, skb, len, req_completion); - c3cn->snd_nxt += len; - skb_flags(skb) &= ~C3CB_FLAG_NEED_HDR; - } - - total_size += skb->truesize; - set_arp_failure_handler(skb, arp_failure_discard); - l2t_send(cdev, skb, c3cn->l2t); - } - return total_size; -} - -/* - * process_cpl_msg: -> host - * Top-level CPL message processing used by most CPL messages that - * pertain to connections. - */ -static inline void process_cpl_msg(void (*fn)(struct s3_conn *, - struct sk_buff *), - struct s3_conn *c3cn, - struct sk_buff *skb) -{ - spin_lock_bh(&c3cn->lock); - fn(c3cn, skb); - spin_unlock_bh(&c3cn->lock); -} - -/* - * process_cpl_msg_ref: -> host - * Similar to process_cpl_msg() but takes an extra connection reference around - * the call to the handler. Should be used if the handler may drop a - * connection reference. - */ -static inline void process_cpl_msg_ref(void (*fn) (struct s3_conn *, - struct sk_buff *), - struct s3_conn *c3cn, - struct sk_buff *skb) -{ - c3cn_hold(c3cn); - process_cpl_msg(fn, c3cn, skb); - c3cn_put(c3cn); -} - -/* - * Process a CPL_ACT_ESTABLISH message: -> host - * Updates connection state from an active establish CPL message. Runs with - * the connection lock held. - */ - -static inline void s3_free_atid(struct t3cdev *cdev, unsigned int tid) -{ - struct s3_conn *c3cn = cxgb3_free_atid(cdev, tid); - if (c3cn) - c3cn_put(c3cn); -} - -static void c3cn_established(struct s3_conn *c3cn, u32 snd_isn, - unsigned int opt) -{ - c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn, c3cn->state); - - c3cn->write_seq = c3cn->snd_nxt = c3cn->snd_una = snd_isn; - - /* - * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't - * pass through opt0. - */ - if (cxgb3_rcv_win > (M_RCV_BUFSIZ << 10)) - c3cn->rcv_wup -= cxgb3_rcv_win - (M_RCV_BUFSIZ << 10); - - dst_confirm(c3cn->dst_cache); - - smp_mb(); - - c3cn_set_state(c3cn, C3CN_STATE_ESTABLISHED); -} - -static void process_act_establish(struct s3_conn *c3cn, struct sk_buff *skb) -{ - struct cpl_act_establish *req = cplhdr(skb); - u32 rcv_isn = ntohl(req->rcv_isn); /* real RCV_ISN + 1 */ - - c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n", - c3cn, c3cn->state, c3cn->flags); - - if (unlikely(c3cn->state != C3CN_STATE_CONNECTING)) - cxgb3i_log_error("TID %u expected SYN_SENT, got EST., s %u\n", - c3cn->tid, c3cn->state); - - c3cn->copied_seq = c3cn->rcv_wup = c3cn->rcv_nxt = rcv_isn; - c3cn_established(c3cn, ntohl(req->snd_isn), ntohs(req->tcp_opt)); - - __kfree_skb(skb); - - if (unlikely(c3cn_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED))) - /* upper layer has requested closing */ - send_abort_req(c3cn); - else { - if (skb_queue_len(&c3cn->write_queue)) - c3cn_push_tx_frames(c3cn, 1); - cxgb3i_conn_tx_open(c3cn); - } -} - -static int do_act_establish(struct t3cdev *cdev, struct sk_buff *skb, - void *ctx) -{ - struct cpl_act_establish *req = cplhdr(skb); - unsigned int tid = GET_TID(req); - unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); - struct s3_conn *c3cn = ctx; - struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(cdev); - - c3cn_conn_debug("rcv, tid 0x%x, c3cn 0x%p, s %u, f 0x%lx.\n", - tid, c3cn, c3cn->state, c3cn->flags); - - c3cn->tid = tid; - c3cn_hold(c3cn); - cxgb3_insert_tid(cdata->cdev, cdata->client, c3cn, tid); - s3_free_atid(cdev, atid); - - c3cn->qset = G_QNUM(ntohl(skb->csum)); - - process_cpl_msg(process_act_establish, c3cn, skb); - return 0; -} - -/* - * Process a CPL_ACT_OPEN_RPL message: -> host - * Handle active open failures. - */ -static int act_open_rpl_status_to_errno(int status) -{ - switch (status) { - case CPL_ERR_CONN_RESET: - return -ECONNREFUSED; - case CPL_ERR_ARP_MISS: - return -EHOSTUNREACH; - case CPL_ERR_CONN_TIMEDOUT: - return -ETIMEDOUT; - case CPL_ERR_TCAM_FULL: - return -ENOMEM; - case CPL_ERR_CONN_EXIST: - cxgb3i_log_error("ACTIVE_OPEN_RPL: 4-tuple in use\n"); - return -EADDRINUSE; - default: - return -EIO; - } -} - -static void act_open_retry_timer(unsigned long data) -{ - struct sk_buff *skb; - struct s3_conn *c3cn = (struct s3_conn *)data; - - c3cn_conn_debug("c3cn 0x%p, state %u.\n", c3cn, c3cn->state); - - spin_lock_bh(&c3cn->lock); - skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_ATOMIC); - if (!skb) - fail_act_open(c3cn, -ENOMEM); - else { - skb->sk = (struct sock *)c3cn; - set_arp_failure_handler(skb, act_open_req_arp_failure); - make_act_open_req(c3cn, skb, c3cn->tid, c3cn->l2t); - l2t_send(c3cn->cdev, skb, c3cn->l2t); - } - spin_unlock_bh(&c3cn->lock); - c3cn_put(c3cn); -} - -static void process_act_open_rpl(struct s3_conn *c3cn, struct sk_buff *skb) -{ - struct cpl_act_open_rpl *rpl = cplhdr(skb); - - c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n", - c3cn, c3cn->state, c3cn->flags); - - if (rpl->status == CPL_ERR_CONN_EXIST && - c3cn->retry_timer.function != act_open_retry_timer) { - c3cn->retry_timer.function = act_open_retry_timer; - if (!mod_timer(&c3cn->retry_timer, jiffies + HZ / 2)) - c3cn_hold(c3cn); - } else - fail_act_open(c3cn, act_open_rpl_status_to_errno(rpl->status)); - __kfree_skb(skb); -} - -static int do_act_open_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) -{ - struct s3_conn *c3cn = ctx; - struct cpl_act_open_rpl *rpl = cplhdr(skb); - - c3cn_conn_debug("rcv, status 0x%x, c3cn 0x%p, s %u, f 0x%lx.\n", - rpl->status, c3cn, c3cn->state, c3cn->flags); - - if (rpl->status != CPL_ERR_TCAM_FULL && - rpl->status != CPL_ERR_CONN_EXIST && - rpl->status != CPL_ERR_ARP_MISS) - cxgb3_queue_tid_release(cdev, GET_TID(rpl)); - - process_cpl_msg_ref(process_act_open_rpl, c3cn, skb); - return 0; -} - -/* - * Process PEER_CLOSE CPL messages: -> host - * Handle peer FIN. - */ -static void process_peer_close(struct s3_conn *c3cn, struct sk_buff *skb) -{ - c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n", - c3cn, c3cn->state, c3cn->flags); - - if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING)) - goto out; - - switch (c3cn->state) { - case C3CN_STATE_ESTABLISHED: - c3cn_set_state(c3cn, C3CN_STATE_PASSIVE_CLOSE); - break; - case C3CN_STATE_ACTIVE_CLOSE: - c3cn_set_state(c3cn, C3CN_STATE_CLOSE_WAIT_2); - break; - case C3CN_STATE_CLOSE_WAIT_1: - c3cn_closed(c3cn); - break; - case C3CN_STATE_ABORTING: - break; - default: - cxgb3i_log_error("%s: peer close, TID %u in bad state %u\n", - c3cn->cdev->name, c3cn->tid, c3cn->state); - } - - cxgb3i_conn_closing(c3cn); -out: - __kfree_skb(skb); -} - -static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) -{ - struct s3_conn *c3cn = ctx; - - c3cn_conn_debug("rcv, c3cn 0x%p, s %u, f 0x%lx.\n", - c3cn, c3cn->state, c3cn->flags); - process_cpl_msg_ref(process_peer_close, c3cn, skb); - return 0; -} - -/* - * Process CLOSE_CONN_RPL CPL message: -> host - * Process a peer ACK to our FIN. - */ -static void process_close_con_rpl(struct s3_conn *c3cn, struct sk_buff *skb) -{ - struct cpl_close_con_rpl *rpl = cplhdr(skb); - - c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n", - c3cn, c3cn->state, c3cn->flags); - - c3cn->snd_una = ntohl(rpl->snd_nxt) - 1; /* exclude FIN */ - - if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING)) - goto out; - - switch (c3cn->state) { - case C3CN_STATE_ACTIVE_CLOSE: - c3cn_set_state(c3cn, C3CN_STATE_CLOSE_WAIT_1); - break; - case C3CN_STATE_CLOSE_WAIT_1: - case C3CN_STATE_CLOSE_WAIT_2: - c3cn_closed(c3cn); - break; - case C3CN_STATE_ABORTING: - break; - default: - cxgb3i_log_error("%s: close_rpl, TID %u in bad state %u\n", - c3cn->cdev->name, c3cn->tid, c3cn->state); - } - -out: - kfree_skb(skb); -} - -static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb, - void *ctx) -{ - struct s3_conn *c3cn = ctx; - - c3cn_conn_debug("rcv, c3cn 0x%p, s %u, f 0x%lx.\n", - c3cn, c3cn->state, c3cn->flags); - - process_cpl_msg_ref(process_close_con_rpl, c3cn, skb); - return 0; -} - -/* - * Process ABORT_REQ_RSS CPL message: -> host - * Process abort requests. If we are waiting for an ABORT_RPL we ignore this - * request except that we need to reply to it. - */ - -static int abort_status_to_errno(struct s3_conn *c3cn, int abort_reason, - int *need_rst) -{ - switch (abort_reason) { - case CPL_ERR_BAD_SYN: /* fall through */ - case CPL_ERR_CONN_RESET: - return c3cn->state > C3CN_STATE_ESTABLISHED ? - -EPIPE : -ECONNRESET; - case CPL_ERR_XMIT_TIMEDOUT: - case CPL_ERR_PERSIST_TIMEDOUT: - case CPL_ERR_FINWAIT2_TIMEDOUT: - case CPL_ERR_KEEPALIVE_TIMEDOUT: - return -ETIMEDOUT; - default: - return -EIO; - } -} - -static void process_abort_req(struct s3_conn *c3cn, struct sk_buff *skb) -{ - int rst_status = CPL_ABORT_NO_RST; - const struct cpl_abort_req_rss *req = cplhdr(skb); - - c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n", - c3cn, c3cn->state, c3cn->flags); - - if (!c3cn_flag(c3cn, C3CN_ABORT_REQ_RCVD)) { - c3cn_set_flag(c3cn, C3CN_ABORT_REQ_RCVD); - c3cn_set_state(c3cn, C3CN_STATE_ABORTING); - __kfree_skb(skb); - return; - } - - c3cn_clear_flag(c3cn, C3CN_ABORT_REQ_RCVD); - send_abort_rpl(c3cn, rst_status); - - if (!c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING)) { - c3cn->err = - abort_status_to_errno(c3cn, req->status, &rst_status); - c3cn_closed(c3cn); - } -} - -static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) -{ - const struct cpl_abort_req_rss *req = cplhdr(skb); - struct s3_conn *c3cn = ctx; - - c3cn_conn_debug("rcv, c3cn 0x%p, s 0x%x, f 0x%lx.\n", - c3cn, c3cn->state, c3cn->flags); - - if (req->status == CPL_ERR_RTX_NEG_ADVICE || - req->status == CPL_ERR_PERSIST_NEG_ADVICE) { - __kfree_skb(skb); - return 0; - } - - process_cpl_msg_ref(process_abort_req, c3cn, skb); - return 0; -} - -/* - * Process ABORT_RPL_RSS CPL message: -> host - * Process abort replies. We only process these messages if we anticipate - * them as the coordination between SW and HW in this area is somewhat lacking - * and sometimes we get ABORT_RPLs after we are done with the connection that - * originated the ABORT_REQ. - */ -static void process_abort_rpl(struct s3_conn *c3cn, struct sk_buff *skb) -{ - c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n", - c3cn, c3cn->state, c3cn->flags); - - if (c3cn_flag(c3cn, C3CN_ABORT_RPL_PENDING)) { - if (!c3cn_flag(c3cn, C3CN_ABORT_RPL_RCVD)) - c3cn_set_flag(c3cn, C3CN_ABORT_RPL_RCVD); - else { - c3cn_clear_flag(c3cn, C3CN_ABORT_RPL_RCVD); - c3cn_clear_flag(c3cn, C3CN_ABORT_RPL_PENDING); - if (c3cn_flag(c3cn, C3CN_ABORT_REQ_RCVD)) - cxgb3i_log_error("%s tid %u, ABORT_RPL_RSS\n", - c3cn->cdev->name, c3cn->tid); - c3cn_closed(c3cn); - } - } - __kfree_skb(skb); -} - -static int do_abort_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) -{ - struct cpl_abort_rpl_rss *rpl = cplhdr(skb); - struct s3_conn *c3cn = ctx; - - c3cn_conn_debug("rcv, status 0x%x, c3cn 0x%p, s %u, 0x%lx.\n", - rpl->status, c3cn, c3cn ? c3cn->state : 0, - c3cn ? c3cn->flags : 0UL); - - /* - * Ignore replies to post-close aborts indicating that the abort was - * requested too late. These connections are terminated when we get - * PEER_CLOSE or CLOSE_CON_RPL and by the time the abort_rpl_rss - * arrives the TID is either no longer used or it has been recycled. - */ - if (rpl->status == CPL_ERR_ABORT_FAILED) - goto discard; - - /* - * Sometimes we've already closed the connection, e.g., a post-close - * abort races with ABORT_REQ_RSS, the latter frees the connection - * expecting the ABORT_REQ will fail with CPL_ERR_ABORT_FAILED, - * but FW turns the ABORT_REQ into a regular one and so we get - * ABORT_RPL_RSS with status 0 and no connection. - */ - if (!c3cn) - goto discard; - - process_cpl_msg_ref(process_abort_rpl, c3cn, skb); - return 0; - -discard: - __kfree_skb(skb); - return 0; -} - -/* - * Process RX_ISCSI_HDR CPL message: -> host - * Handle received PDUs, the payload could be DDP'ed. If not, the payload - * follow after the bhs. - */ -static void process_rx_iscsi_hdr(struct s3_conn *c3cn, struct sk_buff *skb) -{ - struct cpl_iscsi_hdr *hdr_cpl = cplhdr(skb); - struct cpl_iscsi_hdr_norss data_cpl; - struct cpl_rx_data_ddp_norss ddp_cpl; - unsigned int hdr_len, data_len, status; - unsigned int len; - int err; - - if (unlikely(c3cn->state >= C3CN_STATE_PASSIVE_CLOSE)) { - if (c3cn->state != C3CN_STATE_ABORTING) - send_abort_req(c3cn); - __kfree_skb(skb); - return; - } - - skb_tcp_seq(skb) = ntohl(hdr_cpl->seq); - skb_flags(skb) = 0; - - skb_reset_transport_header(skb); - __skb_pull(skb, sizeof(struct cpl_iscsi_hdr)); - - len = hdr_len = ntohs(hdr_cpl->len); - /* msg coalesce is off or not enough data received */ - if (skb->len <= hdr_len) { - cxgb3i_log_error("%s: TID %u, ISCSI_HDR, skb len %u < %u.\n", - c3cn->cdev->name, c3cn->tid, - skb->len, hdr_len); - goto abort_conn; - } - - err = skb_copy_bits(skb, skb->len - sizeof(ddp_cpl), &ddp_cpl, - sizeof(ddp_cpl)); - if (err < 0) - goto abort_conn; - - skb_ulp_mode(skb) = ULP2_FLAG_DATA_READY; - skb_rx_pdulen(skb) = ntohs(ddp_cpl.len); - skb_rx_ddigest(skb) = ntohl(ddp_cpl.ulp_crc); - status = ntohl(ddp_cpl.ddp_status); - - c3cn_rx_debug("rx skb 0x%p, len %u, pdulen %u, ddp status 0x%x.\n", - skb, skb->len, skb_rx_pdulen(skb), status); - - if (status & (1 << RX_DDP_STATUS_HCRC_SHIFT)) - skb_ulp_mode(skb) |= ULP2_FLAG_HCRC_ERROR; - if (status & (1 << RX_DDP_STATUS_DCRC_SHIFT)) - skb_ulp_mode(skb) |= ULP2_FLAG_DCRC_ERROR; - if (status & (1 << RX_DDP_STATUS_PAD_SHIFT)) - skb_ulp_mode(skb) |= ULP2_FLAG_PAD_ERROR; - - if (skb->len > (hdr_len + sizeof(ddp_cpl))) { - err = skb_copy_bits(skb, hdr_len, &data_cpl, sizeof(data_cpl)); - if (err < 0) - goto abort_conn; - data_len = ntohs(data_cpl.len); - len += sizeof(data_cpl) + data_len; - } else if (status & (1 << RX_DDP_STATUS_DDP_SHIFT)) - skb_ulp_mode(skb) |= ULP2_FLAG_DATA_DDPED; - - c3cn->rcv_nxt = ntohl(ddp_cpl.seq) + skb_rx_pdulen(skb); - __pskb_trim(skb, len); - __skb_queue_tail(&c3cn->receive_queue, skb); - cxgb3i_conn_pdu_ready(c3cn); - - return; - -abort_conn: - send_abort_req(c3cn); - __kfree_skb(skb); -} - -static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx) -{ - struct s3_conn *c3cn = ctx; - - process_cpl_msg(process_rx_iscsi_hdr, c3cn, skb); - return 0; -} - -/* - * Process TX_DATA_ACK CPL messages: -> host - * Process an acknowledgment of WR completion. Advance snd_una and send the - * next batch of work requests from the write queue. - */ -static void check_wr_invariants(struct s3_conn *c3cn) -{ - int pending = count_pending_wrs(c3cn); - - if (unlikely(c3cn->wr_avail + pending != c3cn->wr_max)) - cxgb3i_log_error("TID %u: credit imbalance: avail %u, " - "pending %u, total should be %u\n", - c3cn->tid, c3cn->wr_avail, pending, - c3cn->wr_max); -} - -static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb) -{ - struct cpl_wr_ack *hdr = cplhdr(skb); - unsigned int credits = ntohs(hdr->credits); - u32 snd_una = ntohl(hdr->snd_una); - - c3cn_tx_debug("%u WR credits, avail %u, unack %u, TID %u, state %u.\n", - credits, c3cn->wr_avail, c3cn->wr_unacked, - c3cn->tid, c3cn->state); - - c3cn->wr_avail += credits; - if (c3cn->wr_unacked > c3cn->wr_max - c3cn->wr_avail) - c3cn->wr_unacked = c3cn->wr_max - c3cn->wr_avail; - - while (credits) { - struct sk_buff *p = peek_wr(c3cn); - - if (unlikely(!p)) { - cxgb3i_log_error("%u WR_ACK credits for TID %u with " - "nothing pending, state %u\n", - credits, c3cn->tid, c3cn->state); - break; - } - if (unlikely(credits < p->csum)) { - struct tx_data_wr *w = cplhdr(p); - cxgb3i_log_error("TID %u got %u WR credits need %u, " - "len %u, main body %u, frags %u, " - "seq # %u, ACK una %u, ACK nxt %u, " - "WR_AVAIL %u, WRs pending %u\n", - c3cn->tid, credits, p->csum, p->len, - p->len - p->data_len, - skb_shinfo(p)->nr_frags, - ntohl(w->sndseq), snd_una, - ntohl(hdr->snd_nxt), c3cn->wr_avail, - count_pending_wrs(c3cn) - credits); - p->csum -= credits; - break; - } else { - dequeue_wr(c3cn); - credits -= p->csum; - free_wr_skb(p); - } - } - - check_wr_invariants(c3cn); - - if (unlikely(before(snd_una, c3cn->snd_una))) { - cxgb3i_log_error("TID %u, unexpected sequence # %u in WR_ACK " - "snd_una %u\n", - c3cn->tid, snd_una, c3cn->snd_una); - goto out_free; - } - - if (c3cn->snd_una != snd_una) { - c3cn->snd_una = snd_una; - dst_confirm(c3cn->dst_cache); - } - - if (skb_queue_len(&c3cn->write_queue)) { - if (c3cn_push_tx_frames(c3cn, 0)) - cxgb3i_conn_tx_open(c3cn); - } else - cxgb3i_conn_tx_open(c3cn); -out_free: - __kfree_skb(skb); -} - -static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) -{ - struct s3_conn *c3cn = ctx; - - process_cpl_msg(process_wr_ack, c3cn, skb); - return 0; -} - -/* - * for each connection, pre-allocate skbs needed for close/abort requests. So - * that we can service the request right away. - */ -static void c3cn_free_cpl_skbs(struct s3_conn *c3cn) -{ - if (c3cn->cpl_close) - kfree_skb(c3cn->cpl_close); - if (c3cn->cpl_abort_req) - kfree_skb(c3cn->cpl_abort_req); - if (c3cn->cpl_abort_rpl) - kfree_skb(c3cn->cpl_abort_rpl); -} - -static int c3cn_alloc_cpl_skbs(struct s3_conn *c3cn) -{ - c3cn->cpl_close = alloc_skb(sizeof(struct cpl_close_con_req), - GFP_KERNEL); - if (!c3cn->cpl_close) - return -ENOMEM; - skb_put(c3cn->cpl_close, sizeof(struct cpl_close_con_req)); - - c3cn->cpl_abort_req = alloc_skb(sizeof(struct cpl_abort_req), - GFP_KERNEL); - if (!c3cn->cpl_abort_req) - goto free_cpl_skbs; - skb_put(c3cn->cpl_abort_req, sizeof(struct cpl_abort_req)); - - c3cn->cpl_abort_rpl = alloc_skb(sizeof(struct cpl_abort_rpl), - GFP_KERNEL); - if (!c3cn->cpl_abort_rpl) - goto free_cpl_skbs; - skb_put(c3cn->cpl_abort_rpl, sizeof(struct cpl_abort_rpl)); - - return 0; - -free_cpl_skbs: - c3cn_free_cpl_skbs(c3cn); - return -ENOMEM; -} - -/** - * c3cn_release_offload_resources - release offload resource - * @c3cn: the offloaded iscsi tcp connection. - * Release resources held by an offload connection (TID, L2T entry, etc.) - */ -static void c3cn_release_offload_resources(struct s3_conn *c3cn) -{ - struct t3cdev *cdev = c3cn->cdev; - unsigned int tid = c3cn->tid; - - c3cn->qset = 0; - c3cn_free_cpl_skbs(c3cn); - - if (c3cn->wr_avail != c3cn->wr_max) { - purge_wr_queue(c3cn); - reset_wr_list(c3cn); - } - - if (cdev) { - if (c3cn->l2t) { - l2t_release(L2DATA(cdev), c3cn->l2t); - c3cn->l2t = NULL; - } - if (c3cn->state == C3CN_STATE_CONNECTING) - /* we have ATID */ - s3_free_atid(cdev, tid); - else { - /* we have TID */ - cxgb3_remove_tid(cdev, (void *)c3cn, tid); - c3cn_put(c3cn); - } - } - - c3cn->dst_cache = NULL; - c3cn->cdev = NULL; -} - -/** - * cxgb3i_c3cn_create - allocate and initialize an s3_conn structure - * returns the s3_conn structure allocated. - */ -struct s3_conn *cxgb3i_c3cn_create(void) -{ - struct s3_conn *c3cn; - - c3cn = kzalloc(sizeof(*c3cn), GFP_KERNEL); - if (!c3cn) - return NULL; - - /* pre-allocate close/abort cpl, so we don't need to wait for memory - when close/abort is requested. */ - if (c3cn_alloc_cpl_skbs(c3cn) < 0) - goto free_c3cn; - - c3cn_conn_debug("alloc c3cn 0x%p.\n", c3cn); - - c3cn->flags = 0; - spin_lock_init(&c3cn->lock); - atomic_set(&c3cn->refcnt, 1); - skb_queue_head_init(&c3cn->receive_queue); - skb_queue_head_init(&c3cn->write_queue); - setup_timer(&c3cn->retry_timer, NULL, (unsigned long)c3cn); - rwlock_init(&c3cn->callback_lock); - - return c3cn; - -free_c3cn: - kfree(c3cn); - return NULL; -} - -static void c3cn_active_close(struct s3_conn *c3cn) -{ - int data_lost; - int close_req = 0; - - c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n", - c3cn, c3cn->state, c3cn->flags); - - dst_confirm(c3cn->dst_cache); - - c3cn_hold(c3cn); - spin_lock_bh(&c3cn->lock); - - data_lost = skb_queue_len(&c3cn->receive_queue); - __skb_queue_purge(&c3cn->receive_queue); - - switch (c3cn->state) { - case C3CN_STATE_CLOSED: - case C3CN_STATE_ACTIVE_CLOSE: - case C3CN_STATE_CLOSE_WAIT_1: - case C3CN_STATE_CLOSE_WAIT_2: - case C3CN_STATE_ABORTING: - /* nothing need to be done */ - break; - case C3CN_STATE_CONNECTING: - /* defer until cpl_act_open_rpl or cpl_act_establish */ - c3cn_set_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED); - break; - case C3CN_STATE_ESTABLISHED: - close_req = 1; - c3cn_set_state(c3cn, C3CN_STATE_ACTIVE_CLOSE); - break; - case C3CN_STATE_PASSIVE_CLOSE: - close_req = 1; - c3cn_set_state(c3cn, C3CN_STATE_CLOSE_WAIT_2); - break; - } - - if (close_req) { - if (data_lost) - /* Unread data was tossed, zap the connection. */ - send_abort_req(c3cn); - else - send_close_req(c3cn); - } - - spin_unlock_bh(&c3cn->lock); - c3cn_put(c3cn); -} - -/** - * cxgb3i_c3cn_release - close and release an iscsi tcp connection and any - * resource held - * @c3cn: the iscsi tcp connection - */ -void cxgb3i_c3cn_release(struct s3_conn *c3cn) -{ - c3cn_conn_debug("c3cn 0x%p, s %u, f 0x%lx.\n", - c3cn, c3cn->state, c3cn->flags); - if (unlikely(c3cn->state == C3CN_STATE_CONNECTING)) - c3cn_set_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED); - else if (likely(c3cn->state != C3CN_STATE_CLOSED)) - c3cn_active_close(c3cn); - c3cn_put(c3cn); -} - -static int is_cxgb3_dev(struct net_device *dev) -{ - struct cxgb3i_sdev_data *cdata; - struct net_device *ndev = dev; - - if (dev->priv_flags & IFF_802_1Q_VLAN) - ndev = vlan_dev_real_dev(dev); - - write_lock(&cdata_rwlock); - list_for_each_entry(cdata, &cdata_list, list) { - struct adap_ports *ports = &cdata->ports; - int i; - - for (i = 0; i < ports->nports; i++) - if (ndev == ports->lldevs[i]) { - write_unlock(&cdata_rwlock); - return 1; - } - } - write_unlock(&cdata_rwlock); - return 0; -} - -/** - * cxgb3_egress_dev - return the cxgb3 egress device - * @root_dev: the root device anchoring the search - * @c3cn: the connection used to determine egress port in bonding mode - * @context: in bonding mode, indicates a connection set up or failover - * - * Return egress device or NULL if the egress device isn't one of our ports. - */ -static struct net_device *cxgb3_egress_dev(struct net_device *root_dev, - struct s3_conn *c3cn, - int context) -{ - while (root_dev) { - if (root_dev->priv_flags & IFF_802_1Q_VLAN) - root_dev = vlan_dev_real_dev(root_dev); - else if (is_cxgb3_dev(root_dev)) - return root_dev; - else - return NULL; - } - return NULL; -} - -static struct rtable *find_route(struct net_device *dev, - __be32 saddr, __be32 daddr, - __be16 sport, __be16 dport) -{ - struct rtable *rt; - struct flowi fl = { - .oif = dev ? dev->ifindex : 0, - .nl_u = { - .ip4_u = { - .daddr = daddr, - .saddr = saddr, - .tos = 0 } }, - .proto = IPPROTO_TCP, - .uli_u = { - .ports = { - .sport = sport, - .dport = dport } } }; - - if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0)) - return NULL; - return rt; -} - -/* - * Assign offload parameters to some connection fields. - */ -static void init_offload_conn(struct s3_conn *c3cn, - struct t3cdev *cdev, - struct dst_entry *dst) -{ - BUG_ON(c3cn->cdev != cdev); - c3cn->wr_max = c3cn->wr_avail = T3C_DATA(cdev)->max_wrs - 1; - c3cn->wr_unacked = 0; - c3cn->mss_idx = select_mss(c3cn, dst_mtu(dst)); - - reset_wr_list(c3cn); -} - -static int initiate_act_open(struct s3_conn *c3cn, struct net_device *dev) -{ - struct cxgb3i_sdev_data *cdata = NDEV2CDATA(dev); - struct t3cdev *cdev = cdata->cdev; - struct dst_entry *dst = c3cn->dst_cache; - struct sk_buff *skb; - - c3cn_conn_debug("c3cn 0x%p, state %u, flag 0x%lx.\n", - c3cn, c3cn->state, c3cn->flags); - /* - * Initialize connection data. Note that the flags and ULP mode are - * initialized higher up ... - */ - c3cn->dev = dev; - c3cn->cdev = cdev; - c3cn->tid = cxgb3_alloc_atid(cdev, cdata->client, c3cn); - if (c3cn->tid < 0) - goto out_err; - - c3cn->qset = 0; - c3cn->l2t = t3_l2t_get(cdev, dst->neighbour, dev); - if (!c3cn->l2t) - goto free_tid; - - skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_KERNEL); - if (!skb) - goto free_l2t; - - skb->sk = (struct sock *)c3cn; - set_arp_failure_handler(skb, act_open_req_arp_failure); - - c3cn_hold(c3cn); - - init_offload_conn(c3cn, cdev, dst); - c3cn->err = 0; - - make_act_open_req(c3cn, skb, c3cn->tid, c3cn->l2t); - l2t_send(cdev, skb, c3cn->l2t); - return 0; - -free_l2t: - l2t_release(L2DATA(cdev), c3cn->l2t); -free_tid: - s3_free_atid(cdev, c3cn->tid); - c3cn->tid = 0; -out_err: - return -EINVAL; -} - -/** - * cxgb3i_find_dev - find the interface associated with the given address - * @ipaddr: ip address - */ -static struct net_device * -cxgb3i_find_dev(struct net_device *dev, __be32 ipaddr) -{ - struct flowi fl; - int err; - struct rtable *rt; - - memset(&fl, 0, sizeof(fl)); - fl.nl_u.ip4_u.daddr = ipaddr; - - err = ip_route_output_key(dev ? dev_net(dev) : &init_net, &rt, &fl); - if (!err) - return (&rt->dst)->dev; - - return NULL; -} - -/** - * cxgb3i_c3cn_connect - initiates an iscsi tcp connection to a given address - * @c3cn: the iscsi tcp connection - * @usin: destination address - * - * return 0 if active open request is sent, < 0 otherwise. - */ -int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn, - struct sockaddr_in *usin) -{ - struct rtable *rt; - struct cxgb3i_sdev_data *cdata; - struct t3cdev *cdev; - __be32 sipv4; - struct net_device *dstdev; - int err; - - c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev); - - if (usin->sin_family != AF_INET) - return -EAFNOSUPPORT; - - c3cn->daddr.sin_port = usin->sin_port; - c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr; - - dstdev = cxgb3i_find_dev(dev, usin->sin_addr.s_addr); - if (!dstdev || !is_cxgb3_dev(dstdev)) - return -ENETUNREACH; - - if (dstdev->priv_flags & IFF_802_1Q_VLAN) - dev = dstdev; - - rt = find_route(dev, c3cn->saddr.sin_addr.s_addr, - c3cn->daddr.sin_addr.s_addr, - c3cn->saddr.sin_port, - c3cn->daddr.sin_port); - if (rt == NULL) { - c3cn_conn_debug("NO route to 0x%x, port %u, dev %s.\n", - c3cn->daddr.sin_addr.s_addr, - ntohs(c3cn->daddr.sin_port), - dev ? dev->name : "any"); - return -ENETUNREACH; - } - - if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { - c3cn_conn_debug("multi-cast route to 0x%x, port %u, dev %s.\n", - c3cn->daddr.sin_addr.s_addr, - ntohs(c3cn->daddr.sin_port), - dev ? dev->name : "any"); - ip_rt_put(rt); - return -ENETUNREACH; - } - - if (!c3cn->saddr.sin_addr.s_addr) - c3cn->saddr.sin_addr.s_addr = rt->rt_src; - - /* now commit destination to connection */ - c3cn->dst_cache = &rt->dst; - - /* try to establish an offloaded connection */ - dev = cxgb3_egress_dev(c3cn->dst_cache->dev, c3cn, 0); - if (dev == NULL) { - c3cn_conn_debug("c3cn 0x%p, egress dev NULL.\n", c3cn); - return -ENETUNREACH; - } - cdata = NDEV2CDATA(dev); - cdev = cdata->cdev; - - /* get a source port if one hasn't been provided */ - err = c3cn_get_port(c3cn, cdata); - if (err) - return err; - - c3cn_conn_debug("c3cn 0x%p get port %u.\n", - c3cn, ntohs(c3cn->saddr.sin_port)); - - sipv4 = cxgb3i_get_private_ipv4addr(dev); - if (!sipv4) { - c3cn_conn_debug("c3cn 0x%p, iscsi ip not configured.\n", c3cn); - sipv4 = c3cn->saddr.sin_addr.s_addr; - cxgb3i_set_private_ipv4addr(dev, sipv4); - } else - c3cn->saddr.sin_addr.s_addr = sipv4; - - c3cn_conn_debug("c3cn 0x%p, %pI4,%u-%pI4,%u SYN_SENT.\n", - c3cn, - &c3cn->saddr.sin_addr.s_addr, - ntohs(c3cn->saddr.sin_port), - &c3cn->daddr.sin_addr.s_addr, - ntohs(c3cn->daddr.sin_port)); - - c3cn_set_state(c3cn, C3CN_STATE_CONNECTING); - if (!initiate_act_open(c3cn, dev)) - return 0; - - /* - * If we get here, we don't have an offload connection so simply - * return a failure. - */ - err = -ENOTSUPP; - - /* - * This trashes the connection and releases the local port, - * if necessary. - */ - c3cn_conn_debug("c3cn 0x%p -> CLOSED.\n", c3cn); - c3cn_set_state(c3cn, C3CN_STATE_CLOSED); - ip_rt_put(rt); - c3cn_put_port(c3cn); - return err; -} - -/** - * cxgb3i_c3cn_rx_credits - ack received tcp data. - * @c3cn: iscsi tcp connection - * @copied: # of bytes processed - * - * Called after some received data has been read. It returns RX credits - * to the HW for the amount of data processed. - */ -void cxgb3i_c3cn_rx_credits(struct s3_conn *c3cn, int copied) -{ - struct t3cdev *cdev; - int must_send; - u32 credits, dack = 0; - - if (c3cn->state != C3CN_STATE_ESTABLISHED) - return; - - credits = c3cn->copied_seq - c3cn->rcv_wup; - if (unlikely(!credits)) - return; - - cdev = c3cn->cdev; - - if (unlikely(cxgb3_rx_credit_thres == 0)) - return; - - dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1); - - /* - * For coalescing to work effectively ensure the receive window has - * at least 16KB left. - */ - must_send = credits + 16384 >= cxgb3_rcv_win; - - if (must_send || credits >= cxgb3_rx_credit_thres) - c3cn->rcv_wup += send_rx_credits(c3cn, credits, dack); -} - -/** - * cxgb3i_c3cn_send_pdus - send the skbs containing iscsi pdus - * @c3cn: iscsi tcp connection - * @skb: skb contains the iscsi pdu - * - * Add a list of skbs to a connection send queue. The skbs must comply with - * the max size limit of the device and have a headroom of at least - * TX_HEADER_LEN bytes. - * Return # of bytes queued. - */ -int cxgb3i_c3cn_send_pdus(struct s3_conn *c3cn, struct sk_buff *skb) -{ - struct sk_buff *next; - int err, copied = 0; - - spin_lock_bh(&c3cn->lock); - - if (c3cn->state != C3CN_STATE_ESTABLISHED) { - c3cn_tx_debug("c3cn 0x%p, not in est. state %u.\n", - c3cn, c3cn->state); - err = -EAGAIN; - goto out_err; - } - - if (c3cn->err) { - c3cn_tx_debug("c3cn 0x%p, err %d.\n", c3cn, c3cn->err); - err = -EPIPE; - goto out_err; - } - - if (c3cn->write_seq - c3cn->snd_una >= cxgb3_snd_win) { - c3cn_tx_debug("c3cn 0x%p, snd %u - %u > %u.\n", - c3cn, c3cn->write_seq, c3cn->snd_una, - cxgb3_snd_win); - err = -ENOBUFS; - goto out_err; - } - - while (skb) { - int frags = skb_shinfo(skb)->nr_frags + - (skb->len != skb->data_len); - - if (unlikely(skb_headroom(skb) < TX_HEADER_LEN)) { - c3cn_tx_debug("c3cn 0x%p, skb head.\n", c3cn); - err = -EINVAL; - goto out_err; - } - - if (frags >= SKB_WR_LIST_SIZE) { - cxgb3i_log_error("c3cn 0x%p, tx frags %d, len %u,%u.\n", - c3cn, skb_shinfo(skb)->nr_frags, - skb->len, skb->data_len); - err = -EINVAL; - goto out_err; - } - - next = skb->next; - skb->next = NULL; - skb_entail(c3cn, skb, C3CB_FLAG_NO_APPEND | C3CB_FLAG_NEED_HDR); - copied += skb->len; - c3cn->write_seq += skb->len + ulp_extra_len(skb); - skb = next; - } -done: - if (likely(skb_queue_len(&c3cn->write_queue))) - c3cn_push_tx_frames(c3cn, 1); - spin_unlock_bh(&c3cn->lock); - return copied; - -out_err: - if (copied == 0 && err == -EPIPE) - copied = c3cn->err ? c3cn->err : -EPIPE; - else - copied = err; - goto done; -} - -static void sdev_data_cleanup(struct cxgb3i_sdev_data *cdata) -{ - struct adap_ports *ports = &cdata->ports; - struct s3_conn *c3cn; - int i; - - for (i = 0; i < cxgb3_max_connect; i++) { - if (cdata->sport_conn[i]) { - c3cn = cdata->sport_conn[i]; - cdata->sport_conn[i] = NULL; - - spin_lock_bh(&c3cn->lock); - c3cn->cdev = NULL; - c3cn_set_flag(c3cn, C3CN_OFFLOAD_DOWN); - c3cn_closed(c3cn); - spin_unlock_bh(&c3cn->lock); - } - } - - for (i = 0; i < ports->nports; i++) - NDEV2CDATA(ports->lldevs[i]) = NULL; - - cxgb3i_free_big_mem(cdata); -} - -void cxgb3i_sdev_cleanup(void) -{ - struct cxgb3i_sdev_data *cdata; - - write_lock(&cdata_rwlock); - list_for_each_entry(cdata, &cdata_list, list) { - list_del(&cdata->list); - sdev_data_cleanup(cdata); - } - write_unlock(&cdata_rwlock); -} - -int cxgb3i_sdev_init(cxgb3_cpl_handler_func *cpl_handlers) -{ - cpl_handlers[CPL_ACT_ESTABLISH] = do_act_establish; - cpl_handlers[CPL_ACT_OPEN_RPL] = do_act_open_rpl; - cpl_handlers[CPL_PEER_CLOSE] = do_peer_close; - cpl_handlers[CPL_ABORT_REQ_RSS] = do_abort_req; - cpl_handlers[CPL_ABORT_RPL_RSS] = do_abort_rpl; - cpl_handlers[CPL_CLOSE_CON_RPL] = do_close_con_rpl; - cpl_handlers[CPL_TX_DMA_ACK] = do_wr_ack; - cpl_handlers[CPL_ISCSI_HDR] = do_iscsi_hdr; - - if (cxgb3_max_connect > CXGB3I_MAX_CONN) - cxgb3_max_connect = CXGB3I_MAX_CONN; - return 0; -} - -/** - * cxgb3i_sdev_add - allocate and initialize resources for each adapter found - * @cdev: t3cdev adapter - * @client: cxgb3 driver client - */ -void cxgb3i_sdev_add(struct t3cdev *cdev, struct cxgb3_client *client) -{ - struct cxgb3i_sdev_data *cdata; - struct ofld_page_info rx_page_info; - unsigned int wr_len; - int mapsize = cxgb3_max_connect * sizeof(struct s3_conn *); - int i; - - cdata = cxgb3i_alloc_big_mem(sizeof(*cdata) + mapsize, GFP_KERNEL); - if (!cdata) { - cxgb3i_log_warn("t3dev 0x%p, offload up, OOM %d.\n", - cdev, mapsize); - return; - } - - if (cdev->ctl(cdev, GET_WR_LEN, &wr_len) < 0 || - cdev->ctl(cdev, GET_PORTS, &cdata->ports) < 0 || - cdev->ctl(cdev, GET_RX_PAGE_INFO, &rx_page_info) < 0) { - cxgb3i_log_warn("t3dev 0x%p, offload up, ioctl failed.\n", - cdev); - goto free_cdata; - } - - s3_init_wr_tab(wr_len); - - spin_lock_init(&cdata->lock); - INIT_LIST_HEAD(&cdata->list); - cdata->cdev = cdev; - cdata->client = client; - - for (i = 0; i < cdata->ports.nports; i++) - NDEV2CDATA(cdata->ports.lldevs[i]) = cdata; - - write_lock(&cdata_rwlock); - list_add_tail(&cdata->list, &cdata_list); - write_unlock(&cdata_rwlock); - - cxgb3i_log_info("t3dev 0x%p, offload up, added.\n", cdev); - return; - -free_cdata: - cxgb3i_free_big_mem(cdata); -} - -/** - * cxgb3i_sdev_remove - free the allocated resources for the adapter - * @cdev: t3cdev adapter - */ -void cxgb3i_sdev_remove(struct t3cdev *cdev) -{ - struct cxgb3i_sdev_data *cdata = CXGB3_SDEV_DATA(cdev); - - cxgb3i_log_info("t3dev 0x%p, offload down, remove.\n", cdev); - - write_lock(&cdata_rwlock); - list_del(&cdata->list); - write_unlock(&cdata_rwlock); - - sdev_data_cleanup(cdata); -} diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.h b/drivers/scsi/cxgb3i/cxgb3i_offload.h deleted file mode 100644 index 6a1d86b1fafe..000000000000 --- a/drivers/scsi/cxgb3i/cxgb3i_offload.h +++ /dev/null @@ -1,243 +0,0 @@ -/* - * cxgb3i_offload.h: Chelsio S3xx iscsi offloaded tcp connection management - * - * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this - * release for licensing terms and conditions. - * - * Written by: Dimitris Michailidis (dm@chelsio.com) - * Karen Xie (kxie@chelsio.com) - */ - -#ifndef _CXGB3I_OFFLOAD_H -#define _CXGB3I_OFFLOAD_H - -#include -#include - -#include "common.h" -#include "adapter.h" -#include "t3cdev.h" -#include "cxgb3_offload.h" - -#define cxgb3i_log_error(fmt...) printk(KERN_ERR "cxgb3i: ERR! " fmt) -#define cxgb3i_log_warn(fmt...) printk(KERN_WARNING "cxgb3i: WARN! " fmt) -#define cxgb3i_log_info(fmt...) printk(KERN_INFO "cxgb3i: " fmt) -#define cxgb3i_log_debug(fmt, args...) \ - printk(KERN_INFO "cxgb3i: %s - " fmt, __func__ , ## args) - -/** - * struct s3_conn - an iscsi tcp connection structure - * - * @dev: net device of with connection - * @cdev: adapter t3cdev for net device - * @flags: see c3cn_flags below - * @tid: connection id assigned by the h/w - * @qset: queue set used by connection - * @mss_idx: Maximum Segment Size table index - * @l2t: ARP resolution entry for offload packets - * @wr_max: maximum in-flight writes - * @wr_avail: number of writes available - * @wr_unacked: writes since last request for completion notification - * @wr_pending_head: head of pending write queue - * @wr_pending_tail: tail of pending write queue - * @cpl_close: skb for cpl_close_req - * @cpl_abort_req: skb for cpl_abort_req - * @cpl_abort_rpl: skb for cpl_abort_rpl - * @lock: connection status lock - * @refcnt: reference count on connection - * @state: connection state - * @saddr: source ip/port address - * @daddr: destination ip/port address - * @dst_cache: reference to destination route - * @receive_queue: received PDUs - * @write_queue: un-pushed pending writes - * @retry_timer: retry timer for various operations - * @err: connection error status - * @callback_lock: lock for opaque user context - * @user_data: opaque user context - * @rcv_nxt: next receive seq. # - * @copied_seq: head of yet unread data - * @rcv_wup: rcv_nxt on last window update sent - * @snd_nxt: next sequence we send - * @snd_una: first byte we want an ack for - * @write_seq: tail+1 of data held in send buffer - */ -struct s3_conn { - struct net_device *dev; - struct t3cdev *cdev; - unsigned long flags; - int tid; - int qset; - int mss_idx; - struct l2t_entry *l2t; - int wr_max; - int wr_avail; - int wr_unacked; - struct sk_buff *wr_pending_head; - struct sk_buff *wr_pending_tail; - struct sk_buff *cpl_close; - struct sk_buff *cpl_abort_req; - struct sk_buff *cpl_abort_rpl; - spinlock_t lock; - atomic_t refcnt; - volatile unsigned int state; - struct sockaddr_in saddr; - struct sockaddr_in daddr; - struct dst_entry *dst_cache; - struct sk_buff_head receive_queue; - struct sk_buff_head write_queue; - struct timer_list retry_timer; - int err; - rwlock_t callback_lock; - void *user_data; - - u32 rcv_nxt; - u32 copied_seq; - u32 rcv_wup; - u32 snd_nxt; - u32 snd_una; - u32 write_seq; -}; - -/* - * connection state - */ -enum conn_states { - C3CN_STATE_CONNECTING = 1, - C3CN_STATE_ESTABLISHED, - C3CN_STATE_ACTIVE_CLOSE, - C3CN_STATE_PASSIVE_CLOSE, - C3CN_STATE_CLOSE_WAIT_1, - C3CN_STATE_CLOSE_WAIT_2, - C3CN_STATE_ABORTING, - C3CN_STATE_CLOSED, -}; - -static inline unsigned int c3cn_is_closing(const struct s3_conn *c3cn) -{ - return c3cn->state >= C3CN_STATE_ACTIVE_CLOSE; -} -static inline unsigned int c3cn_is_established(const struct s3_conn *c3cn) -{ - return c3cn->state == C3CN_STATE_ESTABLISHED; -} - -/* - * Connection flags -- many to track some close related events. - */ -enum c3cn_flags { - C3CN_ABORT_RPL_RCVD, /* received one ABORT_RPL_RSS message */ - C3CN_ABORT_REQ_RCVD, /* received one ABORT_REQ_RSS message */ - C3CN_ABORT_RPL_PENDING, /* expecting an abort reply */ - C3CN_TX_DATA_SENT, /* already sent a TX_DATA WR */ - C3CN_ACTIVE_CLOSE_NEEDED, /* need to be closed */ - C3CN_OFFLOAD_DOWN /* offload function off */ -}; - -/** - * cxgb3i_sdev_data - Per adapter data. - * Linked off of each Ethernet device port on the adapter. - * Also available via the t3cdev structure since we have pointers to our port - * net_device's there ... - * - * @list: list head to link elements - * @cdev: t3cdev adapter - * @client: CPL client pointer - * @ports: array of adapter ports - * @sport_next: next port - * @sport_conn: source port connection - */ -struct cxgb3i_sdev_data { - struct list_head list; - struct t3cdev *cdev; - struct cxgb3_client *client; - struct adap_ports ports; - spinlock_t lock; - unsigned int sport_next; - struct s3_conn *sport_conn[0]; -}; -#define NDEV2CDATA(ndev) (*(struct cxgb3i_sdev_data **)&(ndev)->ec_ptr) -#define CXGB3_SDEV_DATA(cdev) NDEV2CDATA((cdev)->lldev) - -void cxgb3i_sdev_cleanup(void); -int cxgb3i_sdev_init(cxgb3_cpl_handler_func *); -void cxgb3i_sdev_add(struct t3cdev *, struct cxgb3_client *); -void cxgb3i_sdev_remove(struct t3cdev *); - -struct s3_conn *cxgb3i_c3cn_create(void); -int cxgb3i_c3cn_connect(struct net_device *, struct s3_conn *, - struct sockaddr_in *); -void cxgb3i_c3cn_rx_credits(struct s3_conn *, int); -int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *); -void cxgb3i_c3cn_release(struct s3_conn *); - -/** - * cxgb3_skb_cb - control block for received pdu state and ULP mode management. - * - * @flag: see C3CB_FLAG_* below - * @ulp_mode: ULP mode/submode of sk_buff - * @seq: tcp sequence number - */ -struct cxgb3_skb_rx_cb { - __u32 ddigest; /* data digest */ - __u32 pdulen; /* recovered pdu length */ -}; - -struct cxgb3_skb_tx_cb { - struct sk_buff *wr_next; /* next wr */ -}; - -struct cxgb3_skb_cb { - __u8 flags; - __u8 ulp_mode; - __u32 seq; - union { - struct cxgb3_skb_rx_cb rx; - struct cxgb3_skb_tx_cb tx; - }; -}; - -#define CXGB3_SKB_CB(skb) ((struct cxgb3_skb_cb *)&((skb)->cb[0])) -#define skb_flags(skb) (CXGB3_SKB_CB(skb)->flags) -#define skb_ulp_mode(skb) (CXGB3_SKB_CB(skb)->ulp_mode) -#define skb_tcp_seq(skb) (CXGB3_SKB_CB(skb)->seq) -#define skb_rx_ddigest(skb) (CXGB3_SKB_CB(skb)->rx.ddigest) -#define skb_rx_pdulen(skb) (CXGB3_SKB_CB(skb)->rx.pdulen) -#define skb_tx_wr_next(skb) (CXGB3_SKB_CB(skb)->tx.wr_next) - -enum c3cb_flags { - C3CB_FLAG_NEED_HDR = 1 << 0, /* packet needs a TX_DATA_WR header */ - C3CB_FLAG_NO_APPEND = 1 << 1, /* don't grow this skb */ - C3CB_FLAG_COMPL = 1 << 2, /* request WR completion */ -}; - -/** - * sge_opaque_hdr - - * Opaque version of structure the SGE stores at skb->head of TX_DATA packets - * and for which we must reserve space. - */ -struct sge_opaque_hdr { - void *dev; - dma_addr_t addr[MAX_SKB_FRAGS + 1]; -}; - -/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */ -#define TX_HEADER_LEN \ - (sizeof(struct tx_data_wr) + sizeof(struct sge_opaque_hdr)) -#define SKB_TX_HEADROOM SKB_MAX_HEAD(TX_HEADER_LEN) - -/* - * get and set private ip for iscsi traffic - */ -#define cxgb3i_get_private_ipv4addr(ndev) \ - (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr) -#define cxgb3i_set_private_ipv4addr(ndev, addr) \ - (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr) = addr - -/* max. connections per adapter */ -#define CXGB3I_MAX_CONN 16384 -#endif /* _CXGB3_OFFLOAD_H */ diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.c b/drivers/scsi/cxgb3i/cxgb3i_pdu.c deleted file mode 100644 index dc5e3e77a351..000000000000 --- a/drivers/scsi/cxgb3i/cxgb3i_pdu.c +++ /dev/null @@ -1,495 +0,0 @@ -/* - * cxgb3i_pdu.c: Chelsio S3xx iSCSI driver. - * - * Copyright (c) 2008 Chelsio Communications, Inc. - * Copyright (c) 2008 Mike Christie - * Copyright (c) 2008 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * Written by: Karen Xie (kxie@chelsio.com) - */ - -#include -#include -#include -#include -#include - -#include "cxgb3i.h" -#include "cxgb3i_pdu.h" - -#ifdef __DEBUG_CXGB3I_RX__ -#define cxgb3i_rx_debug cxgb3i_log_debug -#else -#define cxgb3i_rx_debug(fmt...) -#endif - -#ifdef __DEBUG_CXGB3I_TX__ -#define cxgb3i_tx_debug cxgb3i_log_debug -#else -#define cxgb3i_tx_debug(fmt...) -#endif - -/* always allocate rooms for AHS */ -#define SKB_TX_PDU_HEADER_LEN \ - (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE) -static unsigned int skb_extra_headroom; -static struct page *pad_page; - -/* - * pdu receive, interact with libiscsi_tcp - */ -static inline int read_pdu_skb(struct iscsi_conn *conn, struct sk_buff *skb, - unsigned int offset, int offloaded) -{ - int status = 0; - int bytes_read; - - bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status); - switch (status) { - case ISCSI_TCP_CONN_ERR: - return -EIO; - case ISCSI_TCP_SUSPENDED: - /* no transfer - just have caller flush queue */ - return bytes_read; - case ISCSI_TCP_SKB_DONE: - /* - * pdus should always fit in the skb and we should get - * segment done notifcation. - */ - iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb."); - return -EFAULT; - case ISCSI_TCP_SEGMENT_DONE: - return bytes_read; - default: - iscsi_conn_printk(KERN_ERR, conn, "Invalid iscsi_tcp_recv_skb " - "status %d\n", status); - return -EINVAL; - } -} - -static int cxgb3i_conn_read_pdu_skb(struct iscsi_conn *conn, - struct sk_buff *skb) -{ - struct iscsi_tcp_conn *tcp_conn = conn->dd_data; - bool offloaded = 0; - unsigned int offset; - int rc; - - cxgb3i_rx_debug("conn 0x%p, skb 0x%p, len %u, flag 0x%x.\n", - conn, skb, skb->len, skb_ulp_mode(skb)); - - if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) { - iscsi_conn_failure(conn, ISCSI_ERR_PROTO); - return -EIO; - } - - if (conn->hdrdgst_en && (skb_ulp_mode(skb) & ULP2_FLAG_HCRC_ERROR)) { - iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST); - return -EIO; - } - - if (conn->datadgst_en && (skb_ulp_mode(skb) & ULP2_FLAG_DCRC_ERROR)) { - iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST); - return -EIO; - } - - /* iscsi hdr */ - rc = read_pdu_skb(conn, skb, 0, 0); - if (rc <= 0) - return rc; - - if (iscsi_tcp_recv_segment_is_hdr(tcp_conn)) - return 0; - - offset = rc; - if (conn->hdrdgst_en) - offset += ISCSI_DIGEST_SIZE; - - /* iscsi data */ - if (skb_ulp_mode(skb) & ULP2_FLAG_DATA_DDPED) { - cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, ddp'ed, " - "itt 0x%x.\n", - skb, - tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK, - tcp_conn->in.datalen, - ntohl(tcp_conn->in.hdr->itt)); - offloaded = 1; - } else { - cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, NOT ddp'ed, " - "itt 0x%x.\n", - skb, - tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK, - tcp_conn->in.datalen, - ntohl(tcp_conn->in.hdr->itt)); - offset += sizeof(struct cpl_iscsi_hdr_norss); - } - - rc = read_pdu_skb(conn, skb, offset, offloaded); - if (rc < 0) - return rc; - else - return 0; -} - -/* - * pdu transmit, interact with libiscsi_tcp - */ -static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc) -{ - u8 submode = 0; - - if (hcrc) - submode |= 1; - if (dcrc) - submode |= 2; - skb_ulp_mode(skb) = (ULP_MODE_ISCSI << 4) | submode; -} - -void cxgb3i_conn_cleanup_task(struct iscsi_task *task) -{ - struct cxgb3i_task_data *tdata = task->dd_data + - sizeof(struct iscsi_tcp_task); - - /* never reached the xmit task callout */ - if (tdata->skb) - __kfree_skb(tdata->skb); - memset(tdata, 0, sizeof(struct cxgb3i_task_data)); - - /* MNC - Do we need a check in case this is called but - * cxgb3i_conn_alloc_pdu has never been called on the task */ - cxgb3i_release_itt(task, task->hdr_itt); - iscsi_tcp_cleanup_task(task); -} - -static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt, - unsigned int offset, unsigned int *off, - struct scatterlist **sgp) -{ - int i; - struct scatterlist *sg; - - for_each_sg(sgl, sg, sgcnt, i) { - if (offset < sg->length) { - *off = offset; - *sgp = sg; - return 0; - } - offset -= sg->length; - } - return -EFAULT; -} - -static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset, - unsigned int dlen, skb_frag_t *frags, - int frag_max) -{ - unsigned int datalen = dlen; - unsigned int sglen = sg->length - sgoffset; - struct page *page = sg_page(sg); - int i; - - i = 0; - do { - unsigned int copy; - - if (!sglen) { - sg = sg_next(sg); - if (!sg) { - cxgb3i_log_error("%s, sg NULL, len %u/%u.\n", - __func__, datalen, dlen); - return -EINVAL; - } - sgoffset = 0; - sglen = sg->length; - page = sg_page(sg); - - } - copy = min(datalen, sglen); - if (i && page == frags[i - 1].page && - sgoffset + sg->offset == - frags[i - 1].page_offset + frags[i - 1].size) { - frags[i - 1].size += copy; - } else { - if (i >= frag_max) { - cxgb3i_log_error("%s, too many pages %u, " - "dlen %u.\n", __func__, - frag_max, dlen); - return -EINVAL; - } - - frags[i].page = page; - frags[i].page_offset = sg->offset + sgoffset; - frags[i].size = copy; - i++; - } - datalen -= copy; - sgoffset += copy; - sglen -= copy; - } while (datalen); - - return i; -} - -int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode) -{ - struct iscsi_conn *conn = task->conn; - struct iscsi_tcp_task *tcp_task = task->dd_data; - struct cxgb3i_task_data *tdata = task->dd_data + sizeof(*tcp_task); - struct scsi_cmnd *sc = task->sc; - int headroom = SKB_TX_PDU_HEADER_LEN; - - tcp_task->dd_data = tdata; - task->hdr = NULL; - - /* write command, need to send data pdus */ - if (skb_extra_headroom && (opcode == ISCSI_OP_SCSI_DATA_OUT || - (opcode == ISCSI_OP_SCSI_CMD && - (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE)))) - headroom += min(skb_extra_headroom, conn->max_xmit_dlength); - - tdata->skb = alloc_skb(TX_HEADER_LEN + headroom, GFP_ATOMIC); - if (!tdata->skb) - return -ENOMEM; - skb_reserve(tdata->skb, TX_HEADER_LEN); - - cxgb3i_tx_debug("task 0x%p, opcode 0x%x, skb 0x%p.\n", - task, opcode, tdata->skb); - - task->hdr = (struct iscsi_hdr *)tdata->skb->data; - task->hdr_max = SKB_TX_PDU_HEADER_LEN; - - /* data_out uses scsi_cmd's itt */ - if (opcode != ISCSI_OP_SCSI_DATA_OUT) - cxgb3i_reserve_itt(task, &task->hdr->itt); - - return 0; -} - -int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset, - unsigned int count) -{ - struct iscsi_conn *conn = task->conn; - struct iscsi_tcp_task *tcp_task = task->dd_data; - struct cxgb3i_task_data *tdata = tcp_task->dd_data; - struct sk_buff *skb = tdata->skb; - unsigned int datalen = count; - int i, padlen = iscsi_padding(count); - struct page *pg; - - cxgb3i_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n", - task, task->sc, offset, count, skb); - - skb_put(skb, task->hdr_len); - tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0); - if (!count) - return 0; - - if (task->sc) { - struct scsi_data_buffer *sdb = scsi_out(task->sc); - struct scatterlist *sg = NULL; - int err; - - tdata->offset = offset; - tdata->count = count; - err = sgl_seek_offset(sdb->table.sgl, sdb->table.nents, - tdata->offset, &tdata->sgoffset, &sg); - if (err < 0) { - cxgb3i_log_warn("tpdu, sgl %u, bad offset %u/%u.\n", - sdb->table.nents, tdata->offset, - sdb->length); - return err; - } - err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count, - tdata->frags, MAX_PDU_FRAGS); - if (err < 0) { - cxgb3i_log_warn("tpdu, sgl %u, bad offset %u + %u.\n", - sdb->table.nents, tdata->offset, - tdata->count); - return err; - } - tdata->nr_frags = err; - - if (tdata->nr_frags > MAX_SKB_FRAGS || - (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) { - char *dst = skb->data + task->hdr_len; - skb_frag_t *frag = tdata->frags; - - /* data fits in the skb's headroom */ - for (i = 0; i < tdata->nr_frags; i++, frag++) { - char *src = kmap_atomic(frag->page, - KM_SOFTIRQ0); - - memcpy(dst, src+frag->page_offset, frag->size); - dst += frag->size; - kunmap_atomic(src, KM_SOFTIRQ0); - } - if (padlen) { - memset(dst, 0, padlen); - padlen = 0; - } - skb_put(skb, count + padlen); - } else { - /* data fit into frag_list */ - for (i = 0; i < tdata->nr_frags; i++) - get_page(tdata->frags[i].page); - - memcpy(skb_shinfo(skb)->frags, tdata->frags, - sizeof(skb_frag_t) * tdata->nr_frags); - skb_shinfo(skb)->nr_frags = tdata->nr_frags; - skb->len += count; - skb->data_len += count; - skb->truesize += count; - } - - } else { - pg = virt_to_page(task->data); - - get_page(pg); - skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data), - count); - skb->len += count; - skb->data_len += count; - skb->truesize += count; - } - - if (padlen) { - i = skb_shinfo(skb)->nr_frags; - get_page(pad_page); - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, pad_page, 0, - padlen); - - skb->data_len += padlen; - skb->truesize += padlen; - skb->len += padlen; - } - - return 0; -} - -int cxgb3i_conn_xmit_pdu(struct iscsi_task *task) -{ - struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; - struct cxgb3i_conn *cconn = tcp_conn->dd_data; - struct iscsi_tcp_task *tcp_task = task->dd_data; - struct cxgb3i_task_data *tdata = tcp_task->dd_data; - struct sk_buff *skb = tdata->skb; - unsigned int datalen; - int err; - - if (!skb) - return 0; - - datalen = skb->data_len; - tdata->skb = NULL; - err = cxgb3i_c3cn_send_pdus(cconn->cep->c3cn, skb); - if (err > 0) { - int pdulen = err; - - cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n", - task, skb, skb->len, skb->data_len, err); - - if (task->conn->hdrdgst_en) - pdulen += ISCSI_DIGEST_SIZE; - if (datalen && task->conn->datadgst_en) - pdulen += ISCSI_DIGEST_SIZE; - - task->conn->txdata_octets += pdulen; - return 0; - } - - if (err == -EAGAIN || err == -ENOBUFS) { - /* reset skb to send when we are called again */ - tdata->skb = skb; - return err; - } - - kfree_skb(skb); - cxgb3i_tx_debug("itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n", - task->itt, skb, skb->len, skb->data_len, err); - iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err); - iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED); - return err; -} - -int cxgb3i_pdu_init(void) -{ - if (SKB_TX_HEADROOM > (512 * MAX_SKB_FRAGS)) - skb_extra_headroom = SKB_TX_HEADROOM; - pad_page = alloc_page(GFP_KERNEL); - if (!pad_page) - return -ENOMEM; - memset(page_address(pad_page), 0, PAGE_SIZE); - return 0; -} - -void cxgb3i_pdu_cleanup(void) -{ - if (pad_page) { - __free_page(pad_page); - pad_page = NULL; - } -} - -void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn) -{ - struct sk_buff *skb; - unsigned int read = 0; - struct iscsi_conn *conn = c3cn->user_data; - int err = 0; - - cxgb3i_rx_debug("cn 0x%p.\n", c3cn); - - read_lock(&c3cn->callback_lock); - if (unlikely(!conn || conn->suspend_rx)) { - cxgb3i_rx_debug("conn 0x%p, id %d, suspend_rx %lu!\n", - conn, conn ? conn->id : 0xFF, - conn ? conn->suspend_rx : 0xFF); - read_unlock(&c3cn->callback_lock); - return; - } - skb = skb_peek(&c3cn->receive_queue); - while (!err && skb) { - __skb_unlink(skb, &c3cn->receive_queue); - read += skb_rx_pdulen(skb); - cxgb3i_rx_debug("conn 0x%p, cn 0x%p, rx skb 0x%p, pdulen %u.\n", - conn, c3cn, skb, skb_rx_pdulen(skb)); - err = cxgb3i_conn_read_pdu_skb(conn, skb); - __kfree_skb(skb); - skb = skb_peek(&c3cn->receive_queue); - } - read_unlock(&c3cn->callback_lock); - c3cn->copied_seq += read; - cxgb3i_c3cn_rx_credits(c3cn, read); - conn->rxdata_octets += read; - - if (err) { - cxgb3i_log_info("conn 0x%p rx failed err %d.\n", conn, err); - iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); - } -} - -void cxgb3i_conn_tx_open(struct s3_conn *c3cn) -{ - struct iscsi_conn *conn = c3cn->user_data; - - cxgb3i_tx_debug("cn 0x%p.\n", c3cn); - if (conn) { - cxgb3i_tx_debug("cn 0x%p, cid %d.\n", c3cn, conn->id); - iscsi_conn_queue_work(conn); - } -} - -void cxgb3i_conn_closing(struct s3_conn *c3cn) -{ - struct iscsi_conn *conn; - - read_lock(&c3cn->callback_lock); - conn = c3cn->user_data; - if (conn && c3cn->state != C3CN_STATE_ESTABLISHED) - iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); - read_unlock(&c3cn->callback_lock); -} diff --git a/drivers/scsi/cxgb3i/cxgb3i_pdu.h b/drivers/scsi/cxgb3i/cxgb3i_pdu.h deleted file mode 100644 index 0770b23d90da..000000000000 --- a/drivers/scsi/cxgb3i/cxgb3i_pdu.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * cxgb3i_ulp2.h: Chelsio S3xx iSCSI driver. - * - * Copyright (c) 2008 Chelsio Communications, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * Written by: Karen Xie (kxie@chelsio.com) - */ - -#ifndef __CXGB3I_ULP2_PDU_H__ -#define __CXGB3I_ULP2_PDU_H__ - -struct cpl_iscsi_hdr_norss { - union opcode_tid ot; - u16 pdu_len_ddp; - u16 len; - u32 seq; - u16 urg; - u8 rsvd; - u8 status; -}; - -struct cpl_rx_data_ddp_norss { - union opcode_tid ot; - u16 urg; - u16 len; - u32 seq; - u32 nxt_seq; - u32 ulp_crc; - u32 ddp_status; -}; - -#define RX_DDP_STATUS_IPP_SHIFT 27 /* invalid pagepod */ -#define RX_DDP_STATUS_TID_SHIFT 26 /* tid mismatch */ -#define RX_DDP_STATUS_COLOR_SHIFT 25 /* color mismatch */ -#define RX_DDP_STATUS_OFFSET_SHIFT 24 /* offset mismatch */ -#define RX_DDP_STATUS_ULIMIT_SHIFT 23 /* ulimit error */ -#define RX_DDP_STATUS_TAG_SHIFT 22 /* tag mismatch */ -#define RX_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */ -#define RX_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */ -#define RX_DDP_STATUS_PAD_SHIFT 19 /* pad error */ -#define RX_DDP_STATUS_PPP_SHIFT 18 /* pagepod parity error */ -#define RX_DDP_STATUS_LLIMIT_SHIFT 17 /* llimit error */ -#define RX_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */ -#define RX_DDP_STATUS_PMM_SHIFT 15 /* pagepod mismatch */ - -#define ULP2_FLAG_DATA_READY 0x1 -#define ULP2_FLAG_DATA_DDPED 0x2 -#define ULP2_FLAG_HCRC_ERROR 0x10 -#define ULP2_FLAG_DCRC_ERROR 0x20 -#define ULP2_FLAG_PAD_ERROR 0x40 - -void cxgb3i_conn_closing(struct s3_conn *c3cn); -void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn); -void cxgb3i_conn_tx_open(struct s3_conn *c3cn); -#endif diff --git a/drivers/scsi/cxgbi/Kconfig b/drivers/scsi/cxgbi/Kconfig index a470e389f6ba..17eb5d522f42 100644 --- a/drivers/scsi/cxgbi/Kconfig +++ b/drivers/scsi/cxgbi/Kconfig @@ -1 +1,2 @@ +source "drivers/scsi/cxgbi/cxgb3i/Kconfig" source "drivers/scsi/cxgbi/cxgb4i/Kconfig" diff --git a/drivers/scsi/cxgbi/Makefile b/drivers/scsi/cxgbi/Makefile index 9e8f604888dc..86007e344955 100644 --- a/drivers/scsi/cxgbi/Makefile +++ b/drivers/scsi/cxgbi/Makefile @@ -1 +1,2 @@ +obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libcxgbi.o cxgb3i/ obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libcxgbi.o cxgb4i/ diff --git a/drivers/scsi/cxgbi/cxgb3i/Kbuild b/drivers/scsi/cxgbi/cxgb3i/Kbuild new file mode 100644 index 000000000000..09dbf9efc8ea --- /dev/null +++ b/drivers/scsi/cxgbi/cxgb3i/Kbuild @@ -0,0 +1,3 @@ +EXTRA_CFLAGS += -I$(srctree)/drivers/net/cxgb3 + +obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i.o diff --git a/drivers/scsi/cxgbi/cxgb3i/Kconfig b/drivers/scsi/cxgbi/cxgb3i/Kconfig new file mode 100644 index 000000000000..5cf4e9831f1b --- /dev/null +++ b/drivers/scsi/cxgbi/cxgb3i/Kconfig @@ -0,0 +1,7 @@ +config SCSI_CXGB3_ISCSI + tristate "Chelsio T3 iSCSI support" + depends on CHELSIO_T3_DEPENDS + select CHELSIO_T3 + select SCSI_ISCSI_ATTRS + ---help--- + This driver supports iSCSI offload for the Chelsio T3 devices. diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c new file mode 100644 index 000000000000..a01c1e238938 --- /dev/null +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -0,0 +1,1432 @@ +/* + * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management + * + * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this + * release for licensing terms and conditions. + * + * Written by: Dimitris Michailidis (dm@chelsio.com) + * Karen Xie (kxie@chelsio.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ + +#include +#include +#include +#include + +#include "common.h" +#include "t3_cpl.h" +#include "t3cdev.h" +#include "cxgb3_defs.h" +#include "cxgb3_ctl_defs.h" +#include "cxgb3_offload.h" +#include "firmware_exports.h" +#include "cxgb3i.h" + +static unsigned int dbg_level; +#include "../libcxgbi.h" + +#define DRV_MODULE_NAME "cxgb3i" +#define DRV_MODULE_DESC "Chelsio T3 iSCSI Driver" +#define DRV_MODULE_VERSION "2.0.0" +#define DRV_MODULE_RELDATE "Jun. 2010" + +static char version[] = + DRV_MODULE_DESC " " DRV_MODULE_NAME + " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + +MODULE_AUTHOR("Chelsio Communications, Inc."); +MODULE_DESCRIPTION(DRV_MODULE_DESC); +MODULE_VERSION(DRV_MODULE_VERSION); +MODULE_LICENSE("GPL"); + +module_param(dbg_level, uint, 0644); +MODULE_PARM_DESC(dbg_level, "debug flag (default=0)"); + +static int cxgb3i_rcv_win = 256 * 1024; +module_param(cxgb3i_rcv_win, int, 0644); +MODULE_PARM_DESC(cxgb3i_rcv_win, "TCP receive window in bytes (default=256KB)"); + +static int cxgb3i_snd_win = 128 * 1024; +module_param(cxgb3i_snd_win, int, 0644); +MODULE_PARM_DESC(cxgb3i_snd_win, "TCP send window in bytes (default=128KB)"); + +static int cxgb3i_rx_credit_thres = 10 * 1024; +module_param(cxgb3i_rx_credit_thres, int, 0644); +MODULE_PARM_DESC(rx_credit_thres, + "RX credits return threshold in bytes (default=10KB)"); + +static unsigned int cxgb3i_max_connect = 8 * 1024; +module_param(cxgb3i_max_connect, uint, 0644); +MODULE_PARM_DESC(cxgb3i_max_connect, "Max. # of connections (default=8092)"); + +static unsigned int cxgb3i_sport_base = 20000; +module_param(cxgb3i_sport_base, uint, 0644); +MODULE_PARM_DESC(cxgb3i_sport_base, "starting port number (default=20000)"); + +static void cxgb3i_dev_open(struct t3cdev *); +static void cxgb3i_dev_close(struct t3cdev *); +static void cxgb3i_dev_event_handler(struct t3cdev *, u32, u32); + +static struct cxgb3_client t3_client = { + .name = DRV_MODULE_NAME, + .handlers = cxgb3i_cpl_handlers, + .add = cxgb3i_dev_open, + .remove = cxgb3i_dev_close, + .event_handler = cxgb3i_dev_event_handler, +}; + +static struct scsi_host_template cxgb3i_host_template = { + .module = THIS_MODULE, + .name = DRV_MODULE_NAME, + .proc_name = DRV_MODULE_NAME, + .can_queue = CXGB3I_SCSI_HOST_QDEPTH, + .queuecommand = iscsi_queuecommand, + .change_queue_depth = iscsi_change_queue_depth, + .sg_tablesize = SG_ALL, + .max_sectors = 0xFFFF, + .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, + .eh_abort_handler = iscsi_eh_abort, + .eh_device_reset_handler = iscsi_eh_device_reset, + .eh_target_reset_handler = iscsi_eh_recover_target, + .target_alloc = iscsi_target_alloc, + .use_clustering = DISABLE_CLUSTERING, + .this_id = -1, +}; + +static struct iscsi_transport cxgb3i_iscsi_transport = { + .owner = THIS_MODULE, + .name = DRV_MODULE_NAME, + /* owner and name should be set already */ + .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST + | CAP_DATADGST | CAP_DIGEST_OFFLOAD | + CAP_PADDING_OFFLOAD, + .param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH | + ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN | + ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T | + ISCSI_IMM_DATA_EN | ISCSI_FIRST_BURST | + ISCSI_MAX_BURST | ISCSI_PDU_INORDER_EN | + ISCSI_DATASEQ_INORDER_EN | ISCSI_ERL | + ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS | + ISCSI_EXP_STATSN | ISCSI_PERSISTENT_PORT | + ISCSI_PERSISTENT_ADDRESS | + ISCSI_TARGET_NAME | ISCSI_TPGT | + ISCSI_USERNAME | ISCSI_PASSWORD | + ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | + ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | + ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO | + ISCSI_PING_TMO | ISCSI_RECV_TMO | + ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, + .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | + ISCSI_HOST_INITIATOR_NAME | + ISCSI_HOST_NETDEV_NAME, + .get_host_param = cxgbi_get_host_param, + .set_host_param = cxgbi_set_host_param, + /* session management */ + .create_session = cxgbi_create_session, + .destroy_session = cxgbi_destroy_session, + .get_session_param = iscsi_session_get_param, + /* connection management */ + .create_conn = cxgbi_create_conn, + .bind_conn = cxgbi_bind_conn, + .destroy_conn = iscsi_tcp_conn_teardown, + .start_conn = iscsi_conn_start, + .stop_conn = iscsi_conn_stop, + .get_conn_param = cxgbi_get_conn_param, + .set_param = cxgbi_set_conn_param, + .get_stats = cxgbi_get_conn_stats, + /* pdu xmit req from user space */ + .send_pdu = iscsi_conn_send_pdu, + /* task */ + .init_task = iscsi_tcp_task_init, + .xmit_task = iscsi_tcp_task_xmit, + .cleanup_task = cxgbi_cleanup_task, + /* pdu */ + .alloc_pdu = cxgbi_conn_alloc_pdu, + .init_pdu = cxgbi_conn_init_pdu, + .xmit_pdu = cxgbi_conn_xmit_pdu, + .parse_pdu_itt = cxgbi_parse_pdu_itt, + /* TCP connect/disconnect */ + .ep_connect = cxgbi_ep_connect, + .ep_poll = cxgbi_ep_poll, + .ep_disconnect = cxgbi_ep_disconnect, + /* Error recovery timeout call */ + .session_recovery_timedout = iscsi_session_recovery_timedout, +}; + +static struct scsi_transport_template *cxgb3i_stt; + +/* + * CPL (Chelsio Protocol Language) defines a message passing interface between + * the host driver and Chelsio asic. + * The section below implments CPLs that related to iscsi tcp connection + * open/close/abort and data send/receive. + */ + +static int push_tx_frames(struct cxgbi_sock *csk, int req_completion); + +static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, + const struct l2t_entry *e) +{ + unsigned int wscale = cxgbi_sock_compute_wscale(cxgb3i_rcv_win); + struct cpl_act_open_req *req = (struct cpl_act_open_req *)skb->head; + + skb->priority = CPL_PRIORITY_SETUP; + + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, csk->atid)); + req->local_port = csk->saddr.sin_port; + req->peer_port = csk->daddr.sin_port; + req->local_ip = csk->saddr.sin_addr.s_addr; + req->peer_ip = csk->daddr.sin_addr.s_addr; + + req->opt0h = htonl(V_KEEP_ALIVE(1) | F_TCAM_BYPASS | + V_WND_SCALE(wscale) | V_MSS_IDX(csk->mss_idx) | + V_L2T_IDX(e->idx) | V_TX_CHANNEL(e->smt_idx)); + req->opt0l = htonl(V_ULP_MODE(ULP2_MODE_ISCSI) | + V_RCV_BUFSIZ(cxgb3i_rcv_win>>10)); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u, %pI4:%u-%pI4:%u, %u,%u,%u.\n", + csk, csk->state, csk->flags, csk->atid, + &req->local_ip, ntohs(req->local_port), + &req->peer_ip, ntohs(req->peer_port), + csk->mss_idx, e->idx, e->smt_idx); + + l2t_send(csk->cdev->lldev, skb, csk->l2t); +} + +static inline void act_open_arp_failure(struct t3cdev *dev, struct sk_buff *skb) +{ + cxgbi_sock_act_open_req_arp_failure(NULL, skb); +} + +/* + * CPL connection close request: host -> + * + * Close a connection by sending a CPL_CLOSE_CON_REQ message and queue it to + * the write queue (i.e., after any unsent txt data). + */ +static void send_close_req(struct cxgbi_sock *csk) +{ + struct sk_buff *skb = csk->cpl_close; + struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head; + unsigned int tid = csk->tid; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u.\n", + csk, csk->state, csk->flags, csk->tid); + + csk->cpl_close = NULL; + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON)); + req->wr.wr_lo = htonl(V_WR_TID(tid)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); + req->rsvd = htonl(csk->write_seq); + + cxgbi_sock_skb_entail(csk, skb); + if (csk->state >= CTP_ESTABLISHED) + push_tx_frames(csk, 1); +} + +/* + * CPL connection abort request: host -> + * + * Send an ABORT_REQ message. Makes sure we do not send multiple ABORT_REQs + * for the same connection and also that we do not try to send a message + * after the connection has closed. + */ +static void abort_arp_failure(struct t3cdev *tdev, struct sk_buff *skb) +{ + struct cpl_abort_req *req = cplhdr(skb); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "t3dev 0x%p, tid %u, skb 0x%p.\n", + tdev, GET_TID(req), skb); + req->cmd = CPL_ABORT_NO_RST; + cxgb3_ofld_send(tdev, skb); +} + +static void send_abort_req(struct cxgbi_sock *csk) +{ + struct sk_buff *skb = csk->cpl_abort_req; + struct cpl_abort_req *req; + + if (unlikely(csk->state == CTP_ABORTING || !skb)) + return; + cxgbi_sock_set_state(csk, CTP_ABORTING); + cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); + /* Purge the send queue so we don't send anything after an abort. */ + cxgbi_sock_purge_write_queue(csk); + + csk->cpl_abort_req = NULL; + req = (struct cpl_abort_req *)skb->head; + skb->priority = CPL_PRIORITY_DATA; + set_arp_failure_handler(skb, abort_arp_failure); + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ)); + req->wr.wr_lo = htonl(V_WR_TID(csk->tid)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid)); + req->rsvd0 = htonl(csk->snd_nxt); + req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT); + req->cmd = CPL_ABORT_SEND_RST; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n", + csk, csk->state, csk->flags, csk->tid, csk->snd_nxt, + req->rsvd1); + + l2t_send(csk->cdev->lldev, skb, csk->l2t); +} + +/* + * CPL connection abort reply: host -> + * + * Send an ABORT_RPL message in response of the ABORT_REQ received. + */ +static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status) +{ + struct sk_buff *skb = csk->cpl_abort_rpl; + struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u, status %d.\n", + csk, csk->state, csk->flags, csk->tid, rst_status); + + csk->cpl_abort_rpl = NULL; + skb->priority = CPL_PRIORITY_DATA; + rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); + rpl->wr.wr_lo = htonl(V_WR_TID(csk->tid)); + OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); + rpl->cmd = rst_status; + cxgb3_ofld_send(csk->cdev->lldev, skb); +} + +/* + * CPL connection rx data ack: host -> + * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of + * credits sent. + */ +static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) +{ + struct sk_buff *skb; + struct cpl_rx_data_ack *req; + u32 dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, + "csk 0x%p,%u,0x%lx,%u, credit %u, dack %u.\n", + csk, csk->state, csk->flags, csk->tid, credits, dack); + + skb = alloc_cpl(sizeof(*req), 0, GFP_ATOMIC); + if (!skb) { + pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits); + return 0; + } + req = (struct cpl_rx_data_ack *)skb->head; + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, csk->tid)); + req->credit_dack = htonl(F_RX_DACK_CHANGE | V_RX_DACK_MODE(1) | + V_RX_CREDITS(credits)); + skb->priority = CPL_PRIORITY_ACK; + cxgb3_ofld_send(csk->cdev->lldev, skb); + return credits; +} + +/* + * CPL connection tx data: host -> + * + * Send iscsi PDU via TX_DATA CPL message. Returns the number of + * credits sent. + * Each TX_DATA consumes work request credit (wrs), so we need to keep track of + * how many we've used so far and how many are pending (i.e., yet ack'ed by T3). + */ + +static unsigned int wrlen __read_mostly; +static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly; + +static void init_wr_tab(unsigned int wr_len) +{ + int i; + + if (skb_wrs[1]) /* already initialized */ + return; + for (i = 1; i < SKB_WR_LIST_SIZE; i++) { + int sgl_len = (3 * i) / 2 + (i & 1); + + sgl_len += 3; + skb_wrs[i] = (sgl_len <= wr_len + ? 1 : 1 + (sgl_len - 2) / (wr_len - 1)); + } + wrlen = wr_len * 8; +} + +static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, + int len, int req_completion) +{ + struct tx_data_wr *req; + struct l2t_entry *l2t = csk->l2t; + + skb_reset_transport_header(skb); + req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req)); + req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) | + (req_completion ? F_WR_COMPL : 0)); + req->wr_lo = htonl(V_WR_TID(csk->tid)); + /* len includes the length of any HW ULP additions */ + req->len = htonl(len); + /* V_TX_ULP_SUBMODE sets both the mode and submode */ + req->flags = htonl(V_TX_ULP_SUBMODE(cxgbi_skcb_ulp_mode(skb)) | + V_TX_SHOVE((skb_peek(&csk->write_queue) ? 0 : 1))); + req->sndseq = htonl(csk->snd_nxt); + req->param = htonl(V_TX_PORT(l2t->smt_idx)); + + if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { + req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT | + V_TX_CPU_IDX(csk->rss_qid)); + /* sendbuffer is in units of 32KB. */ + req->param |= htonl(V_TX_SNDBUF(cxgb3i_snd_win >> 15)); + cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); + } +} + +/** + * push_tx_frames -- start transmit + * @c3cn: the offloaded connection + * @req_completion: request wr_ack or not + * + * Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a + * connection's send queue and sends them on to T3. Must be called with the + * connection's lock held. Returns the amount of send buffer space that was + * freed as a result of sending queued data to T3. + */ + +static void arp_failure_skb_discard(struct t3cdev *dev, struct sk_buff *skb) +{ + kfree_skb(skb); +} + +static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) +{ + int total_size = 0; + struct sk_buff *skb; + + if (unlikely(csk->state < CTP_ESTABLISHED || + csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) { + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, + "csk 0x%p,%u,0x%lx,%u, in closing state.\n", + csk, csk->state, csk->flags, csk->tid); + return 0; + } + + while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) { + int len = skb->len; /* length before skb_push */ + int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len); + int wrs_needed = skb_wrs[frags]; + + if (wrs_needed > 1 && len + sizeof(struct tx_data_wr) <= wrlen) + wrs_needed = 1; + + WARN_ON(frags >= SKB_WR_LIST_SIZE || wrs_needed < 1); + + if (csk->wr_cred < wrs_needed) { + log_debug(1 << CXGBI_DBG_PDU_TX, + "csk 0x%p, skb len %u/%u, frag %u, wr %d<%u.\n", + csk, skb->len, skb->data_len, frags, + wrs_needed, csk->wr_cred); + break; + } + + __skb_unlink(skb, &csk->write_queue); + skb->priority = CPL_PRIORITY_DATA; + skb->csum = wrs_needed; /* remember this until the WR_ACK */ + csk->wr_cred -= wrs_needed; + csk->wr_una_cred += wrs_needed; + cxgbi_sock_enqueue_wr(csk, skb); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, + "csk 0x%p, enqueue, skb len %u/%u, frag %u, wr %d, " + "left %u, unack %u.\n", + csk, skb->len, skb->data_len, frags, skb->csum, + csk->wr_cred, csk->wr_una_cred); + + if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) { + if ((req_completion && + csk->wr_una_cred == wrs_needed) || + csk->wr_una_cred >= csk->wr_max_cred / 2) { + req_completion = 1; + csk->wr_una_cred = 0; + } + len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); + make_tx_data_wr(csk, skb, len, req_completion); + csk->snd_nxt += len; + cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR); + } + total_size += skb->truesize; + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, + "csk 0x%p, tid 0x%x, send skb 0x%p.\n", + csk, csk->tid, skb); + set_arp_failure_handler(skb, arp_failure_skb_discard); + l2t_send(csk->cdev->lldev, skb, csk->l2t); + } + return total_size; +} + +/* + * Process a CPL_ACT_ESTABLISH message: -> host + * Updates connection state from an active establish CPL message. Runs with + * the connection lock held. + */ + +static inline void free_atid(struct cxgbi_sock *csk) +{ + if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) { + cxgb3_free_atid(csk->cdev->lldev, csk->atid); + cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID); + cxgbi_sock_put(csk); + } +} + +static int do_act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) +{ + struct cxgbi_sock *csk = ctx; + struct cpl_act_establish *req = cplhdr(skb); + unsigned int tid = GET_TID(req); + unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); + u32 rcv_isn = ntohl(req->rcv_isn); /* real RCV_ISN + 1 */ + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "atid 0x%x,tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n", + atid, atid, csk, csk->state, csk->flags, rcv_isn); + + cxgbi_sock_get(csk); + cxgbi_sock_set_flag(csk, CTPF_HAS_TID); + csk->tid = tid; + cxgb3_insert_tid(csk->cdev->lldev, &t3_client, csk, tid); + + free_atid(csk); + + csk->rss_qid = G_QNUM(ntohs(skb->csum)); + + spin_lock_bh(&csk->lock); + if (csk->retry_timer.function) { + del_timer(&csk->retry_timer); + csk->retry_timer.function = NULL; + } + + if (unlikely(csk->state != CTP_ACTIVE_OPEN)) + pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n", + csk, csk->state, csk->flags, csk->tid); + + csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; + if (cxgb3i_rcv_win > (M_RCV_BUFSIZ << 10)) + csk->rcv_wup -= cxgb3i_rcv_win - (M_RCV_BUFSIZ << 10); + + cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); + + if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED))) + /* upper layer has requested closing */ + send_abort_req(csk); + else { + if (skb_queue_len(&csk->write_queue)) + push_tx_frames(csk, 1); + cxgbi_conn_tx_open(csk); + } + + spin_unlock_bh(&csk->lock); + __kfree_skb(skb); + return 0; +} + +/* + * Process a CPL_ACT_OPEN_RPL message: -> host + * Handle active open failures. + */ +static int act_open_rpl_status_to_errno(int status) +{ + switch (status) { + case CPL_ERR_CONN_RESET: + return -ECONNREFUSED; + case CPL_ERR_ARP_MISS: + return -EHOSTUNREACH; + case CPL_ERR_CONN_TIMEDOUT: + return -ETIMEDOUT; + case CPL_ERR_TCAM_FULL: + return -ENOMEM; + case CPL_ERR_CONN_EXIST: + return -EADDRINUSE; + default: + return -EIO; + } +} + +static void act_open_retry_timer(unsigned long data) +{ + struct sk_buff *skb; + struct cxgbi_sock *csk = (struct cxgbi_sock *)data; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u.\n", + csk, csk->state, csk->flags, csk->tid); + + cxgbi_sock_get(csk); + spin_lock_bh(&csk->lock); + skb = alloc_cpl(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC); + if (!skb) + cxgbi_sock_fail_act_open(csk, -ENOMEM); + else { + skb->sk = (struct sock *)csk; + set_arp_failure_handler(skb, act_open_arp_failure); + send_act_open_req(csk, skb, csk->l2t); + } + spin_unlock_bh(&csk->lock); + cxgbi_sock_put(csk); +} + +static int do_act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) +{ + struct cxgbi_sock *csk = ctx; + struct cpl_act_open_rpl *rpl = cplhdr(skb); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u, status %u.\n", + csk, csk->state, csk->flags, csk->atid, rpl->status); + + if (rpl->status != CPL_ERR_TCAM_FULL && + rpl->status != CPL_ERR_CONN_EXIST && + rpl->status != CPL_ERR_ARP_MISS) + cxgb3_queue_tid_release(tdev, GET_TID(rpl)); + + cxgbi_sock_get(csk); + spin_lock_bh(&csk->lock); + if (rpl->status == CPL_ERR_CONN_EXIST && + csk->retry_timer.function != act_open_retry_timer) { + csk->retry_timer.function = act_open_retry_timer; + mod_timer(&csk->retry_timer, jiffies + HZ / 2); + } else + cxgbi_sock_fail_act_open(csk, + act_open_rpl_status_to_errno(rpl->status)); + + spin_unlock_bh(&csk->lock); + cxgbi_sock_put(csk); + __kfree_skb(skb); + return 0; +} + +/* + * Process PEER_CLOSE CPL messages: -> host + * Handle peer FIN. + */ +static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) +{ + struct cxgbi_sock *csk = ctx; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u.\n", + csk, csk->state, csk->flags, csk->tid); + + cxgbi_sock_rcv_peer_close(csk); + __kfree_skb(skb); + return 0; +} + +/* + * Process CLOSE_CONN_RPL CPL message: -> host + * Process a peer ACK to our FIN. + */ +static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb, + void *ctx) +{ + struct cxgbi_sock *csk = ctx; + struct cpl_close_con_rpl *rpl = cplhdr(skb); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u, snxt %u.\n", + csk, csk->state, csk->flags, csk->tid, ntohl(rpl->snd_nxt)); + + cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); + __kfree_skb(skb); + return 0; +} + +/* + * Process ABORT_REQ_RSS CPL message: -> host + * Process abort requests. If we are waiting for an ABORT_RPL we ignore this + * request except that we need to reply to it. + */ + +static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, + int *need_rst) +{ + switch (abort_reason) { + case CPL_ERR_BAD_SYN: /* fall through */ + case CPL_ERR_CONN_RESET: + return csk->state > CTP_ESTABLISHED ? + -EPIPE : -ECONNRESET; + case CPL_ERR_XMIT_TIMEDOUT: + case CPL_ERR_PERSIST_TIMEDOUT: + case CPL_ERR_FINWAIT2_TIMEDOUT: + case CPL_ERR_KEEPALIVE_TIMEDOUT: + return -ETIMEDOUT; + default: + return -EIO; + } +} + +static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) +{ + const struct cpl_abort_req_rss *req = cplhdr(skb); + struct cxgbi_sock *csk = ctx; + int rst_status = CPL_ABORT_NO_RST; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u.\n", + csk, csk->state, csk->flags, csk->tid); + + if (req->status == CPL_ERR_RTX_NEG_ADVICE || + req->status == CPL_ERR_PERSIST_NEG_ADVICE) { + goto done; + } + + cxgbi_sock_get(csk); + spin_lock_bh(&csk->lock); + + if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) { + cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); + cxgbi_sock_set_state(csk, CTP_ABORTING); + goto out; + } + + cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); + send_abort_rpl(csk, rst_status); + + if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { + csk->err = abort_status_to_errno(csk, req->status, &rst_status); + cxgbi_sock_closed(csk); + } + +out: + spin_unlock_bh(&csk->lock); + cxgbi_sock_put(csk); +done: + __kfree_skb(skb); + return 0; +} + +/* + * Process ABORT_RPL_RSS CPL message: -> host + * Process abort replies. We only process these messages if we anticipate + * them as the coordination between SW and HW in this area is somewhat lacking + * and sometimes we get ABORT_RPLs after we are done with the connection that + * originated the ABORT_REQ. + */ +static int do_abort_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) +{ + struct cpl_abort_rpl_rss *rpl = cplhdr(skb); + struct cxgbi_sock *csk = ctx; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "status 0x%x, csk 0x%p, s %u, 0x%lx.\n", + rpl->status, csk, csk ? csk->state : 0, + csk ? csk->flags : 0UL); + /* + * Ignore replies to post-close aborts indicating that the abort was + * requested too late. These connections are terminated when we get + * PEER_CLOSE or CLOSE_CON_RPL and by the time the abort_rpl_rss + * arrives the TID is either no longer used or it has been recycled. + */ + if (rpl->status == CPL_ERR_ABORT_FAILED) + goto rel_skb; + /* + * Sometimes we've already closed the connection, e.g., a post-close + * abort races with ABORT_REQ_RSS, the latter frees the connection + * expecting the ABORT_REQ will fail with CPL_ERR_ABORT_FAILED, + * but FW turns the ABORT_REQ into a regular one and so we get + * ABORT_RPL_RSS with status 0 and no connection. + */ + if (csk) + cxgbi_sock_rcv_abort_rpl(csk); +rel_skb: + __kfree_skb(skb); + return 0; +} + +/* + * Process RX_ISCSI_HDR CPL message: -> host + * Handle received PDUs, the payload could be DDP'ed. If not, the payload + * follow after the bhs. + */ +static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx) +{ + struct cxgbi_sock *csk = ctx; + struct cpl_iscsi_hdr *hdr_cpl = cplhdr(skb); + struct cpl_iscsi_hdr_norss data_cpl; + struct cpl_rx_data_ddp_norss ddp_cpl; + unsigned int hdr_len, data_len, status; + unsigned int len; + int err; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, + "csk 0x%p,%u,0x%lx,%u, skb 0x%p,%u.\n", + csk, csk->state, csk->flags, csk->tid, skb, skb->len); + + spin_lock_bh(&csk->lock); + + if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u, bad state.\n", + csk, csk->state, csk->flags, csk->tid); + if (csk->state != CTP_ABORTING) + goto abort_conn; + else + goto discard; + } + + cxgbi_skcb_tcp_seq(skb) = ntohl(hdr_cpl->seq); + cxgbi_skcb_flags(skb) = 0; + + skb_reset_transport_header(skb); + __skb_pull(skb, sizeof(struct cpl_iscsi_hdr)); + + len = hdr_len = ntohs(hdr_cpl->len); + /* msg coalesce is off or not enough data received */ + if (skb->len <= hdr_len) { + pr_err("%s: tid %u, CPL_ISCSI_HDR, skb len %u < %u.\n", + csk->cdev->ports[csk->port_id]->name, csk->tid, + skb->len, hdr_len); + goto abort_conn; + } + cxgbi_skcb_set_flag(skb, SKCBF_RX_COALESCED); + + err = skb_copy_bits(skb, skb->len - sizeof(ddp_cpl), &ddp_cpl, + sizeof(ddp_cpl)); + if (err < 0) { + pr_err("%s: tid %u, copy cpl_ddp %u-%zu failed %d.\n", + csk->cdev->ports[csk->port_id]->name, csk->tid, + skb->len, sizeof(ddp_cpl), err); + goto abort_conn; + } + + cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS); + cxgbi_skcb_rx_pdulen(skb) = ntohs(ddp_cpl.len); + cxgbi_skcb_rx_ddigest(skb) = ntohl(ddp_cpl.ulp_crc); + status = ntohl(ddp_cpl.ddp_status); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, + "csk 0x%p, skb 0x%p,%u, pdulen %u, status 0x%x.\n", + csk, skb, skb->len, cxgbi_skcb_rx_pdulen(skb), status); + + if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) + cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR); + if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) + cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR); + if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) + cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR); + + if (skb->len > (hdr_len + sizeof(ddp_cpl))) { + err = skb_copy_bits(skb, hdr_len, &data_cpl, sizeof(data_cpl)); + if (err < 0) { + pr_err("%s: tid %u, cp %zu/%u failed %d.\n", + csk->cdev->ports[csk->port_id]->name, + csk->tid, sizeof(data_cpl), skb->len, err); + goto abort_conn; + } + data_len = ntohs(data_cpl.len); + log_debug(1 << CXGBI_DBG_DDP | 1 << CXGBI_DBG_PDU_RX, + "skb 0x%p, pdu not ddp'ed %u/%u, status 0x%x.\n", + skb, data_len, cxgbi_skcb_rx_pdulen(skb), status); + len += sizeof(data_cpl) + data_len; + } else if (status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) + cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD); + + csk->rcv_nxt = ntohl(ddp_cpl.seq) + cxgbi_skcb_rx_pdulen(skb); + __pskb_trim(skb, len); + __skb_queue_tail(&csk->receive_queue, skb); + cxgbi_conn_pdu_ready(csk); + + spin_unlock_bh(&csk->lock); + return 0; + +abort_conn: + send_abort_req(csk); +discard: + spin_unlock_bh(&csk->lock); + __kfree_skb(skb); + return 0; +} + +/* + * Process TX_DATA_ACK CPL messages: -> host + * Process an acknowledgment of WR completion. Advance snd_una and send the + * next batch of work requests from the write queue. + */ +static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) +{ + struct cxgbi_sock *csk = ctx; + struct cpl_wr_ack *hdr = cplhdr(skb); + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, + "csk 0x%p,%u,0x%lx,%u, cr %u.\n", + csk, csk->state, csk->flags, csk->tid, ntohs(hdr->credits)); + + cxgbi_sock_rcv_wr_ack(csk, ntohs(hdr->credits), ntohl(hdr->snd_una), 1); + __kfree_skb(skb); + return 0; +} + +/* + * for each connection, pre-allocate skbs needed for close/abort requests. So + * that we can service the request right away. + */ +static int alloc_cpls(struct cxgbi_sock *csk) +{ + csk->cpl_close = alloc_cpl(sizeof(struct cpl_close_con_req), 0, + GFP_KERNEL); + if (!csk->cpl_close) + return -ENOMEM; + csk->cpl_abort_req = alloc_cpl(sizeof(struct cpl_abort_req), 0, + GFP_KERNEL); + if (!csk->cpl_abort_req) + goto free_cpl_skbs; + + csk->cpl_abort_rpl = alloc_cpl(sizeof(struct cpl_abort_rpl), 0, + GFP_KERNEL); + if (!csk->cpl_abort_rpl) + goto free_cpl_skbs; + + return 0; + +free_cpl_skbs: + cxgbi_sock_free_cpl_skbs(csk); + return -ENOMEM; +} + +/** + * release_offload_resources - release offload resource + * @c3cn: the offloaded iscsi tcp connection. + * Release resources held by an offload connection (TID, L2T entry, etc.) + */ +static void l2t_put(struct cxgbi_sock *csk) +{ + struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; + + if (csk->l2t) { + l2t_release(L2DATA(t3dev), csk->l2t); + csk->l2t = NULL; + cxgbi_sock_put(csk); + } +} + +static void release_offload_resources(struct cxgbi_sock *csk) +{ + struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx,%u.\n", + csk, csk->state, csk->flags, csk->tid); + + csk->rss_qid = 0; + cxgbi_sock_free_cpl_skbs(csk); + + if (csk->wr_cred != csk->wr_max_cred) { + cxgbi_sock_purge_wr_queue(csk); + cxgbi_sock_reset_wr_list(csk); + } + l2t_put(csk); + if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) + free_atid(csk); + else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { + cxgb3_remove_tid(t3dev, (void *)csk, csk->tid); + cxgbi_sock_clear_flag(csk, CTPF_HAS_TID); + cxgbi_sock_put(csk); + } + csk->dst = NULL; + csk->cdev = NULL; +} + +static int init_act_open(struct cxgbi_sock *csk) +{ + struct dst_entry *dst = csk->dst; + struct cxgbi_device *cdev = csk->cdev; + struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev; + struct net_device *ndev = cdev->ports[csk->port_id]; + struct sk_buff *skb = NULL; + + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags); + + csk->rss_qid = 0; + csk->l2t = t3_l2t_get(t3dev, dst->neighbour, ndev); + if (!csk->l2t) { + pr_err("NO l2t available.\n"); + return -EINVAL; + } + cxgbi_sock_get(csk); + + csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk); + if (csk->atid < 0) { + pr_err("NO atid available.\n"); + goto rel_resource; + } + cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); + cxgbi_sock_get(csk); + + skb = alloc_cpl(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL); + if (!skb) + goto rel_resource; + skb->sk = (struct sock *)csk; + set_arp_failure_handler(skb, act_open_arp_failure); + + csk->wr_max_cred = csk->wr_cred = T3C_DATA(t3dev)->max_wrs - 1; + csk->wr_una_cred = 0; + csk->mss_idx = cxgbi_sock_select_mss(csk, dst_mtu(dst)); + cxgbi_sock_reset_wr_list(csk); + csk->err = 0; + + cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); + send_act_open_req(csk, skb, csk->l2t); + return 0; + +rel_resource: + if (skb) + __kfree_skb(skb); + return -EINVAL; +} + +cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = { + [CPL_ACT_ESTABLISH] = do_act_establish, + [CPL_ACT_OPEN_RPL] = do_act_open_rpl, + [CPL_PEER_CLOSE] = do_peer_close, + [CPL_ABORT_REQ_RSS] = do_abort_req, + [CPL_ABORT_RPL_RSS] = do_abort_rpl, + [CPL_CLOSE_CON_RPL] = do_close_con_rpl, + [CPL_TX_DMA_ACK] = do_wr_ack, + [CPL_ISCSI_HDR] = do_iscsi_hdr, +}; + +/** + * cxgb3i_ofld_init - allocate and initialize resources for each adapter found + * @cdev: cxgbi adapter + */ +int cxgb3i_ofld_init(struct cxgbi_device *cdev) +{ + struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev; + struct adap_ports port; + struct ofld_page_info rx_page_info; + unsigned int wr_len; + int rc; + + if (t3dev->ctl(t3dev, GET_WR_LEN, &wr_len) < 0 || + t3dev->ctl(t3dev, GET_PORTS, &port) < 0 || + t3dev->ctl(t3dev, GET_RX_PAGE_INFO, &rx_page_info) < 0) { + pr_warn("t3 0x%p, offload up, ioctl failed.\n", t3dev); + return -EINVAL; + } + + if (cxgb3i_max_connect > CXGBI_MAX_CONN) + cxgb3i_max_connect = CXGBI_MAX_CONN; + + rc = cxgbi_device_portmap_create(cdev, cxgb3i_sport_base, + cxgb3i_max_connect); + if (rc < 0) + return rc; + + init_wr_tab(wr_len); + cdev->csk_release_offload_resources = release_offload_resources; + cdev->csk_push_tx_frames = push_tx_frames; + cdev->csk_send_abort_req = send_abort_req; + cdev->csk_send_close_req = send_close_req; + cdev->csk_send_rx_credits = send_rx_credits; + cdev->csk_alloc_cpls = alloc_cpls; + cdev->csk_init_act_open = init_act_open; + + pr_info("cdev 0x%p, offload up, added.\n", cdev); + return 0; +} + +/* + * functions to program the pagepod in h/w + */ +static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr) +{ + struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head; + + memset(req, 0, sizeof(*req)); + + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS)); + req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) | + V_ULPTX_CMD(ULP_MEM_WRITE)); + req->len = htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE >> 5) | + V_ULPTX_NFLITS((PPOD_SIZE >> 3) + 1)); +} + +static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr, + unsigned int idx, unsigned int npods, + struct cxgbi_gather_list *gl) +{ + struct cxgbi_device *cdev = csk->cdev; + struct cxgbi_ddp_info *ddp = cdev->ddp; + unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit; + int i; + + log_debug(1 << CXGBI_DBG_DDP, + "csk 0x%p, idx %u, npods %u, gl 0x%p.\n", + csk, idx, npods, gl); + + for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) { + struct sk_buff *skb = ddp->gl_skb[idx]; + + /* hold on to the skb until we clear the ddp mapping */ + skb_get(skb); + + ulp_mem_io_set_hdr(skb, pm_addr); + cxgbi_ddp_ppod_set((struct cxgbi_pagepod *)(skb->head + + sizeof(struct ulp_mem_io)), + hdr, gl, i * PPOD_PAGES_MAX); + skb->priority = CPL_PRIORITY_CONTROL; + cxgb3_ofld_send(cdev->lldev, skb); + } + return 0; +} + +static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag, + unsigned int idx, unsigned int npods) +{ + struct cxgbi_device *cdev = chba->cdev; + struct cxgbi_ddp_info *ddp = cdev->ddp; + unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit; + int i; + + log_debug(1 << CXGBI_DBG_DDP, + "cdev 0x%p, idx %u, npods %u, tag 0x%x.\n", + cdev, idx, npods, tag); + + for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) { + struct sk_buff *skb = ddp->gl_skb[idx]; + + if (!skb) { + pr_err("tag 0x%x, 0x%x, %d/%u, skb NULL.\n", + tag, idx, i, npods); + continue; + } + ddp->gl_skb[idx] = NULL; + memset(skb->head + sizeof(struct ulp_mem_io), 0, PPOD_SIZE); + ulp_mem_io_set_hdr(skb, pm_addr); + skb->priority = CPL_PRIORITY_CONTROL; + cxgb3_ofld_send(cdev->lldev, skb); + } +} + +static void ddp_free_gl_skb(struct cxgbi_ddp_info *ddp, int idx, int cnt) +{ + int i; + + log_debug(1 << CXGBI_DBG_DDP, + "ddp 0x%p, idx %d, cnt %d.\n", ddp, idx, cnt); + + for (i = 0; i < cnt; i++, idx++) + if (ddp->gl_skb[idx]) { + kfree_skb(ddp->gl_skb[idx]); + ddp->gl_skb[idx] = NULL; + } +} + +static int ddp_alloc_gl_skb(struct cxgbi_ddp_info *ddp, int idx, + int cnt, gfp_t gfp) +{ + int i; + + log_debug(1 << CXGBI_DBG_DDP, + "ddp 0x%p, idx %d, cnt %d.\n", ddp, idx, cnt); + + for (i = 0; i < cnt; i++) { + struct sk_buff *skb = alloc_cpl(sizeof(struct ulp_mem_io) + + PPOD_SIZE, 0, gfp); + if (skb) { + ddp->gl_skb[idx + i] = skb; + } else { + ddp_free_gl_skb(ddp, idx, i); + return -ENOMEM; + } + } + return 0; +} + +static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, + unsigned int tid, int pg_idx, bool reply) +{ + struct sk_buff *skb = alloc_cpl(sizeof(struct cpl_set_tcb_field), 0, + GFP_KERNEL); + struct cpl_set_tcb_field *req; + u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0; + + log_debug(1 << CXGBI_DBG_DDP, + "csk 0x%p, tid %u, pg_idx %d.\n", csk, tid, pg_idx); + if (!skb) + return -ENOMEM; + + /* set up ulp submode and page size */ + req = (struct cpl_set_tcb_field *)skb->head; + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); + req->reply = V_NO_REPLY(reply ? 0 : 1); + req->cpu_idx = 0; + req->word = htons(31); + req->mask = cpu_to_be64(0xF0000000); + req->val = cpu_to_be64(val << 28); + skb->priority = CPL_PRIORITY_CONTROL; + + cxgb3_ofld_send(csk->cdev->lldev, skb); + return 0; +} + +/** + * cxgb3i_setup_conn_digest - setup conn. digest setting + * @csk: cxgb tcp socket + * @tid: connection id + * @hcrc: header digest enabled + * @dcrc: data digest enabled + * @reply: request reply from h/w + * set up the iscsi digest settings for a connection identified by tid + */ +static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, + int hcrc, int dcrc, int reply) +{ + struct sk_buff *skb = alloc_cpl(sizeof(struct cpl_set_tcb_field), 0, + GFP_KERNEL); + struct cpl_set_tcb_field *req; + u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0); + + log_debug(1 << CXGBI_DBG_DDP, + "csk 0x%p, tid %u, crc %d,%d.\n", csk, tid, hcrc, dcrc); + if (!skb) + return -ENOMEM; + + /* set up ulp submode and page size */ + req = (struct cpl_set_tcb_field *)skb->head; + req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); + req->reply = V_NO_REPLY(reply ? 0 : 1); + req->cpu_idx = 0; + req->word = htons(31); + req->mask = cpu_to_be64(0x0F000000); + req->val = cpu_to_be64(val << 24); + skb->priority = CPL_PRIORITY_CONTROL; + + cxgb3_ofld_send(csk->cdev->lldev, skb); + return 0; +} + +/** + * t3_ddp_cleanup - release the cxgb3 adapter's ddp resource + * @cdev: cxgb3i adapter + * release all the resource held by the ddp pagepod manager for a given + * adapter if needed + */ + +static void t3_ddp_cleanup(struct cxgbi_device *cdev) +{ + struct t3cdev *tdev = (struct t3cdev *)cdev->lldev; + + if (cxgbi_ddp_cleanup(cdev)) { + pr_info("t3dev 0x%p, ulp_iscsi no more user.\n", tdev); + tdev->ulp_iscsi = NULL; + } +} + +/** + * ddp_init - initialize the cxgb3 adapter's ddp resource + * @cdev: cxgb3i adapter + * initialize the ddp pagepod manager for a given adapter + */ +static int cxgb3i_ddp_init(struct cxgbi_device *cdev) +{ + struct t3cdev *tdev = (struct t3cdev *)cdev->lldev; + struct cxgbi_ddp_info *ddp = tdev->ulp_iscsi; + struct ulp_iscsi_info uinfo; + unsigned int pgsz_factor[4]; + int err; + + if (ddp) { + kref_get(&ddp->refcnt); + pr_warn("t3dev 0x%p, ddp 0x%p already set up.\n", + tdev, tdev->ulp_iscsi); + cdev->ddp = ddp; + return -EALREADY; + } + + err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo); + if (err < 0) { + pr_err("%s, failed to get iscsi param err=%d.\n", + tdev->name, err); + return err; + } + + err = cxgbi_ddp_init(cdev, uinfo.llimit, uinfo.ulimit, + uinfo.max_txsz, uinfo.max_rxsz); + if (err < 0) + return err; + + ddp = cdev->ddp; + + uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT; + cxgbi_ddp_page_size_factor(pgsz_factor); + uinfo.ulimit = uinfo.llimit + (ddp->nppods << PPOD_SIZE_SHIFT); + + err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo); + if (err < 0) { + pr_warn("%s unable to set iscsi param err=%d, ddp disabled.\n", + tdev->name, err); + cxgbi_ddp_cleanup(cdev); + return err; + } + tdev->ulp_iscsi = ddp; + + cdev->csk_ddp_free_gl_skb = ddp_free_gl_skb; + cdev->csk_ddp_alloc_gl_skb = ddp_alloc_gl_skb; + cdev->csk_ddp_setup_digest = ddp_setup_conn_digest; + cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx; + cdev->csk_ddp_set = ddp_set_map; + cdev->csk_ddp_clear = ddp_clear_map; + + pr_info("tdev 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, " + "%u/%u.\n", + tdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask, + ddp->rsvd_tag_mask, ddp->max_txsz, uinfo.max_txsz, + ddp->max_rxsz, uinfo.max_rxsz); + return 0; +} + +static void cxgb3i_dev_close(struct t3cdev *t3dev) +{ + struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev); + + if (!cdev || cdev->flags & CXGBI_FLAG_ADAPTER_RESET) { + pr_info("0x%p close, f 0x%x.\n", cdev, cdev ? cdev->flags : 0); + return; + } + + cxgbi_device_unregister(cdev); +} + +/** + * cxgb3i_dev_open - init a t3 adapter structure and any h/w settings + * @t3dev: t3cdev adapter + */ +static void cxgb3i_dev_open(struct t3cdev *t3dev) +{ + struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev); + struct adapter *adapter = tdev2adap(t3dev); + int i, err; + + if (cdev) { + pr_info("0x%p, updating.\n", cdev); + return; + } + + cdev = cxgbi_device_register(0, adapter->params.nports); + if (!cdev) { + pr_warn("device 0x%p register failed.\n", t3dev); + return; + } + + cdev->flags = CXGBI_FLAG_DEV_T3 | CXGBI_FLAG_IPV4_SET; + cdev->lldev = t3dev; + cdev->pdev = adapter->pdev; + cdev->ports = adapter->port; + cdev->nports = adapter->params.nports; + cdev->mtus = adapter->params.mtus; + cdev->nmtus = NMTUS; + cdev->snd_win = cxgb3i_snd_win; + cdev->rcv_win = cxgb3i_rcv_win; + cdev->rx_credit_thres = cxgb3i_rx_credit_thres; + cdev->skb_tx_rsvd = CXGB3I_TX_HEADER_LEN; + cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr_norss); + cdev->dev_ddp_cleanup = t3_ddp_cleanup; + cdev->itp = &cxgb3i_iscsi_transport; + + err = cxgb3i_ddp_init(cdev); + if (err) { + pr_info("0x%p ddp init failed\n", cdev); + goto err_out; + } + + err = cxgb3i_ofld_init(cdev); + if (err) { + pr_info("0x%p offload init failed\n", cdev); + goto err_out; + } + + err = cxgbi_hbas_add(cdev, CXGB3I_MAX_LUN, CXGBI_MAX_CONN, + &cxgb3i_host_template, cxgb3i_stt); + if (err) + goto err_out; + + for (i = 0; i < cdev->nports; i++) + cdev->hbas[i]->ipv4addr = + cxgb3i_get_private_ipv4addr(cdev->ports[i]); + + pr_info("cdev 0x%p, f 0x%x, t3dev 0x%p open, err %d.\n", + cdev, cdev ? cdev->flags : 0, t3dev, err); + return; + +err_out: + cxgbi_device_unregister(cdev); +} + +static void cxgb3i_dev_event_handler(struct t3cdev *t3dev, u32 event, u32 port) +{ + struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev); + + log_debug(1 << CXGBI_DBG_TOE, + "0x%p, cdev 0x%p, event 0x%x, port 0x%x.\n", + t3dev, cdev, event, port); + if (!cdev) + return; + + switch (event) { + case OFFLOAD_STATUS_DOWN: + cdev->flags |= CXGBI_FLAG_ADAPTER_RESET; + break; + case OFFLOAD_STATUS_UP: + cdev->flags &= ~CXGBI_FLAG_ADAPTER_RESET; + break; + } +} + +/** + * cxgb3i_init_module - module init entry point + * + * initialize any driver wide global data structures and register itself + * with the cxgb3 module + */ +static int __init cxgb3i_init_module(void) +{ + int rc; + + printk(KERN_INFO "%s", version); + + rc = cxgbi_iscsi_init(&cxgb3i_iscsi_transport, &cxgb3i_stt); + if (rc < 0) + return rc; + + cxgb3_register_client(&t3_client); + return 0; +} + +/** + * cxgb3i_exit_module - module cleanup/exit entry point + * + * go through the driver hba list and for each hba, release any resource held. + * and unregisters iscsi transport and the cxgb3 module + */ +static void __exit cxgb3i_exit_module(void) +{ + cxgb3_unregister_client(&t3_client); + cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T3); + cxgbi_iscsi_cleanup(&cxgb3i_iscsi_transport, &cxgb3i_stt); +} + +module_init(cxgb3i_init_module); +module_exit(cxgb3i_exit_module); diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h new file mode 100644 index 000000000000..5f5e3394b594 --- /dev/null +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h @@ -0,0 +1,51 @@ +/* + * cxgb3i.h: Chelsio S3xx iSCSI driver. + * + * Copyright (c) 2008 Chelsio Communications, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Karen Xie (kxie@chelsio.com) + */ + +#ifndef __CXGB3I_H__ +#define __CXGB3I_H__ + +#define CXGB3I_SCSI_HOST_QDEPTH 1024 +#define CXGB3I_MAX_LUN 512 +#define ISCSI_PDU_NONPAYLOAD_MAX \ + (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE + 2*ISCSI_DIGEST_SIZE) + +/*for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */ +#define CXGB3I_TX_HEADER_LEN \ + (sizeof(struct tx_data_wr) + sizeof(struct sge_opaque_hdr)) + +extern cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS]; + +#define cxgb3i_get_private_ipv4addr(ndev) \ + (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr) +#define cxgb3i_set_private_ipv4addr(ndev, addr) \ + (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr) = addr + +struct cpl_iscsi_hdr_norss { + union opcode_tid ot; + u16 pdu_len_ddp; + u16 len; + u32 seq; + u16 urg; + u8 rsvd; + u8 status; +}; + +struct cpl_rx_data_ddp_norss { + union opcode_tid ot; + u16 urg; + u16 len; + u32 seq; + u32 nxt_seq; + u32 ulp_crc; + u32 ddp_status; +}; +#endif -- cgit v1.2.3 From 46d7456324766cd291d7ccd5b4927cbd3c28fb1b Mon Sep 17 00:00:00 2001 From: Wayne Boyer Date: Wed, 11 Aug 2010 07:15:17 -0700 Subject: [SCSI] ipr: fix resource address formatting and add attribute for device ID This patch fixes a resource address formatting problem where the first byte was being zeroed out. Also, the device ID is now made available as a sysfs attribute. Signed-off-by: Wayne Boyer Acked-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ipr.c | 43 ++++++++++++++++++++++++++++++++++++++++++- drivers/scsi/ipr.h | 4 +++- 2 files changed, 45 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 52568588039f..67875d41345f 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -1096,6 +1096,7 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res, res->bus = cfgtew->u.cfgte->res_addr.bus; res->target = cfgtew->u.cfgte->res_addr.target; res->lun = cfgtew->u.cfgte->res_addr.lun; + res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn); } ipr_update_ata_class(res, proto); @@ -1142,7 +1143,7 @@ static char *ipr_format_res_path(u8 *res_path, char *buffer, int len) int i; char *p = buffer; - res_path[0] = '\0'; + *p = '\0'; p += snprintf(p, buffer + len - p, "%02X", res_path[0]); for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++) p += snprintf(p, buffer + len - p, "-%02X", res_path[i]); @@ -4089,6 +4090,7 @@ static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type) /** * ipr_show_adapter_handle - Show the adapter's resource handle for this device * @dev: device struct + * @attr: device attribute structure * @buf: buffer * * Return value: @@ -4122,6 +4124,7 @@ static struct device_attribute ipr_adapter_handle_attr = { * ipr_show_resource_path - Show the resource path or the resource address for * this device. * @dev: device struct + * @attr: device attribute structure * @buf: buffer * * Return value: @@ -4158,9 +4161,46 @@ static struct device_attribute ipr_resource_path_attr = { .show = ipr_show_resource_path }; +/** + * ipr_show_device_id - Show the device_id for this device. + * @dev: device struct + * @attr: device attribute structure + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; + struct ipr_resource_entry *res; + unsigned long lock_flags = 0; + ssize_t len = -ENXIO; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + res = (struct ipr_resource_entry *)sdev->hostdata; + if (res && ioa_cfg->sis64) + len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id); + else if (res) + len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn); + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return len; +} + +static struct device_attribute ipr_device_id_attr = { + .attr = { + .name = "device_id", + .mode = S_IRUGO, + }, + .show = ipr_show_device_id +}; + /** * ipr_show_resource_type - Show the resource type for this device. * @dev: device struct + * @attr: device attribute structure * @buf: buffer * * Return value: @@ -4195,6 +4235,7 @@ static struct device_attribute ipr_resource_type_attr = { static struct device_attribute *ipr_dev_attrs[] = { &ipr_adapter_handle_attr, &ipr_resource_path_attr, + &ipr_device_id_attr, &ipr_resource_type_attr, NULL, }; diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 4d31625ab9cf..16bc77a2be36 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -26,6 +26,7 @@ #ifndef _IPR_H #define _IPR_H +#include #include #include #include @@ -372,7 +373,7 @@ struct ipr_config_table_entry { struct ipr_res_addr res_addr; __be32 res_handle; - __be32 reserved4[2]; + __be32 lun_wwn[2]; struct ipr_std_inq_data std_inq_data; }__attribute__ ((packed, aligned (4))); @@ -1210,6 +1211,7 @@ struct ipr_resource_entry { __be32 res_handle; __be64 dev_id; + __be64 lun_wwn; struct scsi_lun dev_lun; u8 res_path[8]; -- cgit v1.2.3 From a87b04de66eec66a728fb65c70a8bf8bb4d1eb48 Mon Sep 17 00:00:00 2001 From: Wayne Boyer Date: Tue, 10 Aug 2010 09:13:00 -0700 Subject: [SCSI] ipr: Driver version 2.5.1 Bump the driver version. Signed-off-by: Wayne Boyer Signed-off-by: James Bottomley --- drivers/scsi/ipr.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 16bc77a2be36..67cae67378ad 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -38,8 +38,8 @@ /* * Literals */ -#define IPR_DRIVER_VERSION "2.5.0" -#define IPR_DRIVER_DATE "(February 11, 2010)" +#define IPR_DRIVER_VERSION "2.5.1" +#define IPR_DRIVER_DATE "(August 10, 2010)" /* * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding -- cgit v1.2.3 From 39304072ac401015ee3c0fbfa724574dbedb46b5 Mon Sep 17 00:00:00 2001 From: Eddie Wai Date: Thu, 12 Aug 2010 16:44:27 -0700 Subject: [SCSI] bnx2i: Fixed a protocol violation on nopout responses According to RFC3720, nopout packet sent in response to unsolicited nopin packet requesting a response must retain the TTT of the requester. Signed-off-by: Eddie Wai Reviewed-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/bnx2i/bnx2i.h | 2 +- drivers/scsi/bnx2i/bnx2i_hwi.c | 5 ++--- drivers/scsi/bnx2i/bnx2i_iscsi.c | 2 -- 3 files changed, 3 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h index 00c033511cbf..b6345d91bb66 100644 --- a/drivers/scsi/bnx2i/bnx2i.h +++ b/drivers/scsi/bnx2i/bnx2i.h @@ -753,7 +753,7 @@ extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn, extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn, struct bnx2i_cmd *cmnd); extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn, - struct iscsi_task *mtask, u32 ttt, + struct iscsi_task *mtask, char *datap, int data_len, int unsol); extern int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn, struct iscsi_task *mtask); diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index d23fc256d585..015efb579384 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -464,7 +464,6 @@ int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *bnx2i_conn, * @conn: iscsi connection * @cmd: driver command structure which is requesting * a WQE to sent to chip for further processing - * @ttt: TTT to be used when building pdu header * @datap: payload buffer pointer * @data_len: payload data length * @unsol: indicated whether nopout pdu is unsolicited pdu or @@ -473,7 +472,7 @@ int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *bnx2i_conn, * prepare and post a nopout request WQE to CNIC firmware */ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn, - struct iscsi_task *task, u32 ttt, + struct iscsi_task *task, char *datap, int data_len, int unsol) { struct bnx2i_endpoint *ep = bnx2i_conn->ep; @@ -498,7 +497,7 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn, nopout_wqe->itt = ((u16)task->itt | (ISCSI_TASK_TYPE_MPATH << ISCSI_TMF_REQUEST_TYPE_SHIFT)); - nopout_wqe->ttt = ttt; + nopout_wqe->ttt = nopout_hdr->ttt; nopout_wqe->flags = 0; if (!unsol) nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION; diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index a46ccc380ab1..36b245b0dd3c 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -1078,11 +1078,9 @@ static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task) buf = bnx2i_conn->gen_pdu.req_buf; if (data_len) rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, - RESERVED_ITT, buf, data_len, 1); else rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, - RESERVED_ITT, NULL, 0, 1); break; case ISCSI_OP_LOGOUT: -- cgit v1.2.3 From c47b401230895edc94abe2c342811a2171375dbe Mon Sep 17 00:00:00 2001 From: Eddie Wai Date: Fri, 13 Aug 2010 09:33:27 -0700 Subject: [SCSI] bxn2i: Added support for other TMFs besides ABORT_TASK Expanded the TMF request routine to support other TMFs such as LUN RESET, etc. Signed-off-by: Eddie Wai Reviewed-by: Anil Veerabhadrappa Reviewed-by: Benjamin Li Reviewed-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/bnx2i/bnx2i_hwi.c | 58 ++++++++++++++++++++++-------------------- 1 file changed, 30 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index 015efb579384..90cef716b796 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -385,7 +385,6 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn, struct bnx2i_cmd *bnx2i_cmd; struct bnx2i_tmf_request *tmfabort_wqe; u32 dword; - u32 scsi_lun[2]; bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data; tmfabort_hdr = (struct iscsi_tm *)mtask->hdr; @@ -393,38 +392,41 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn, bnx2i_conn->ep->qp.sq_prod_qe; tmfabort_wqe->op_code = tmfabort_hdr->opcode; - tmfabort_wqe->op_attr = 0; - tmfabort_wqe->op_attr = - ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK; + tmfabort_wqe->op_attr = tmfabort_hdr->flags; tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14)); tmfabort_wqe->reserved2 = 0; tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn); - ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt); - if (!ctask || !ctask->sc) - /* - * the iscsi layer must have completed the cmd while this - * was starting up. - * - * Note: In the case of a SCSI cmd timeout, the task's sc - * is still active; hence ctask->sc != 0 - * In this case, the task must be aborted - */ - return 0; - - ref_sc = ctask->sc; - - /* Retrieve LUN directly from the ref_sc */ - int_to_scsilun(ref_sc->device->lun, (struct scsi_lun *) scsi_lun); - tmfabort_wqe->lun[0] = be32_to_cpu(scsi_lun[0]); - tmfabort_wqe->lun[1] = be32_to_cpu(scsi_lun[1]); - - if (ref_sc->sc_data_direction == DMA_TO_DEVICE) - dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT); - else - dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT); - tmfabort_wqe->ref_itt = (dword | (tmfabort_hdr->rtt & ISCSI_ITT_MASK)); + switch (tmfabort_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) { + case ISCSI_TM_FUNC_ABORT_TASK: + case ISCSI_TM_FUNC_TASK_REASSIGN: + ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt); + if (!ctask || !ctask->sc) + /* + * the iscsi layer must have completed the cmd while + * was starting up. + * + * Note: In the case of a SCSI cmd timeout, the task's + * sc is still active; hence ctask->sc != 0 + * In this case, the task must be aborted + */ + return 0; + + ref_sc = ctask->sc; + if (ref_sc->sc_data_direction == DMA_TO_DEVICE) + dword = (ISCSI_TASK_TYPE_WRITE << + ISCSI_CMD_REQUEST_TYPE_SHIFT); + else + dword = (ISCSI_TASK_TYPE_READ << + ISCSI_CMD_REQUEST_TYPE_SHIFT); + tmfabort_wqe->ref_itt = (dword | + (tmfabort_hdr->rtt & ISCSI_ITT_MASK)); + break; + default: + tmfabort_wqe->ref_itt = RESERVED_ITT; + } + memcpy(tmfabort_wqe->lun, tmfabort_hdr->lun, sizeof(struct scsi_lun)); tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn); tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma; -- cgit v1.2.3 From 2c2255e08dee510c68d641195f83d40899897c65 Mon Sep 17 00:00:00 2001 From: Eddie Wai Date: Thu, 12 Aug 2010 16:44:29 -0700 Subject: [SCSI] bnx2i: Recouple the CFC delete cleanup with cm_abort/close completion Specific to the Broadcom 10g chipset, the CFC delete operation must be coupled with the cm_abort/close with does the SRC delete/terminate offload operation prior. Signed-off-by: Eddie Wai Reviewed-by: Michael Chan Acked-by: Benjamin Li Reviewed-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/bnx2i/bnx2i_iscsi.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 36b245b0dd3c..0425540d4814 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -1996,11 +1996,13 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep) else close_ret = cnic->cm_abort(bnx2i_ep->cm_sk); + /* No longer allow CFC delete if cm_close/abort fails the request */ if (close_ret) - bnx2i_ep->state = EP_STATE_DISCONN_COMPL; - - /* wait for option-2 conn teardown */ - wait_event_interruptible(bnx2i_ep->ofld_wait, + printk(KERN_ALERT "bnx2i: %s close/abort(%d) returned %d\n", + bnx2i_ep->hba->netdev->name, close, close_ret); + else + /* wait for option-2 conn teardown */ + wait_event_interruptible(bnx2i_ep->ofld_wait, bnx2i_ep->state != EP_STATE_DISCONN_START); if (signal_pending(current)) -- cgit v1.2.3 From 250ae982e2864aa0e06a09137382f44e58b51683 Mon Sep 17 00:00:00 2001 From: Eddie Wai Date: Thu, 12 Aug 2010 16:44:30 -0700 Subject: [SCSI] bnx2i: Added chip cleanup for the remove module path In the case when an ep_connect request is interrupted due to route request stall, if the iSCSI daemon is terminated by the user, the chip will be left in a state which will not get cleaned up upon module removal. Upon module reload, when the same context id is used for a new connection, chip panic would occur. This patch adds chip cleanup in the module removal path. Signed-off-by: Eddie Wai Acked-by: Benjamin Li Reviewed-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/bnx2i/bnx2i_init.c | 54 +++++++++++++++++++++++++--------------- drivers/scsi/bnx2i/bnx2i_iscsi.c | 3 +++ 2 files changed, 37 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index a796f565f383..1294936f4fa1 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c @@ -166,6 +166,38 @@ void bnx2i_start(void *handle) } +/** + * bnx2i_chip_cleanup - local routine to handle chip cleanup + * @hba: Adapter instance to register + * + * Driver checks if adapter still has any active connections before + * executing the cleanup process + */ +static void bnx2i_chip_cleanup(struct bnx2i_hba *hba) +{ + struct bnx2i_endpoint *bnx2i_ep; + struct list_head *pos, *tmp; + + if (hba->ofld_conns_active) { + /* Stage to force the disconnection + * This is the case where the daemon is either slow or + * not present + */ + printk(KERN_ALERT "bnx2i: (%s) chip cleanup for %d active " + "connections\n", hba->netdev->name, + hba->ofld_conns_active); + mutex_lock(&hba->net_dev_lock); + list_for_each_safe(pos, tmp, &hba->ep_active_list) { + bnx2i_ep = list_entry(pos, struct bnx2i_endpoint, link); + /* Clean up the chip only */ + bnx2i_hw_ep_disconnect(bnx2i_ep); + bnx2i_ep->cm_sk = NULL; + } + mutex_unlock(&hba->net_dev_lock); + } +} + + /** * bnx2i_stop - cnic callback to shutdown adapter instance * @handle: transparent handle pointing to adapter structure @@ -176,8 +208,6 @@ void bnx2i_start(void *handle) void bnx2i_stop(void *handle) { struct bnx2i_hba *hba = handle; - struct list_head *pos, *tmp; - struct bnx2i_endpoint *bnx2i_ep; int conns_active; /* check if cleanup happened in GOING_DOWN context */ @@ -198,24 +228,7 @@ void bnx2i_stop(void *handle) if (hba->ofld_conns_active == conns_active) break; } - if (hba->ofld_conns_active) { - /* Stage to force the disconnection - * This is the case where the daemon is either slow or - * not present - */ - printk(KERN_ALERT "bnx2i: Wait timeout, force all eps " - "to disconnect (%d)\n", hba->ofld_conns_active); - mutex_lock(&hba->net_dev_lock); - list_for_each_safe(pos, tmp, &hba->ep_active_list) { - bnx2i_ep = list_entry(pos, struct bnx2i_endpoint, link); - /* Clean up the chip only */ - bnx2i_hw_ep_disconnect(bnx2i_ep); - } - mutex_unlock(&hba->net_dev_lock); - if (hba->ofld_conns_active) - printk(KERN_ERR "bnx2i: EP disconnect timeout (%d)!\n", - hba->ofld_conns_active); - } + bnx2i_chip_cleanup(hba); /* This flag should be cleared last so that ep_disconnect() gracefully * cleans up connection context @@ -457,6 +470,7 @@ static void __exit bnx2i_mod_exit(void) adapter_count--; if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { + bnx2i_chip_cleanup(hba); hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); } diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 0425540d4814..fb50efbce087 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -1953,6 +1953,9 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep) if (!cnic) return 0; + if (bnx2i_ep->state == EP_STATE_IDLE) + return 0; + if (!bnx2i_ep_tcp_conn_active(bnx2i_ep)) goto destroy_conn; -- cgit v1.2.3 From fc91961ce520ed7faa32aa01d0f7a82601bc4796 Mon Sep 17 00:00:00 2001 From: Eddie Wai Date: Thu, 12 Aug 2010 16:44:31 -0700 Subject: [SCSI] bnx2i: Updated version to bnx2i-2.1.3 Also updated maintainer info. Signed-off-by: Eddie Wai Signed-off-by: James Bottomley --- drivers/scsi/bnx2i/bnx2i_init.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index 1294936f4fa1..50c2aa3b8eb1 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c @@ -17,15 +17,17 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list); static u32 adapter_count; #define DRV_MODULE_NAME "bnx2i" -#define DRV_MODULE_VERSION "2.1.2" -#define DRV_MODULE_RELDATE "Jun 28, 2010" +#define DRV_MODULE_VERSION "2.1.3" +#define DRV_MODULE_RELDATE "Aug 10, 2010" static char version[] __devinitdata = "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; -MODULE_AUTHOR("Anil Veerabhadrappa "); +MODULE_AUTHOR("Anil Veerabhadrappa and " + "Eddie Wai "); + MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711" " iSCSI Driver"); MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 1c1acab0367d88ad5da2b9db2efdf2699113ec88 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Wed, 11 Aug 2010 12:11:24 +0200 Subject: [SCSI] drivers/message/fusion: Return -ENOMEM on memory allocation failure In this code, 0 is returned on memory allocation failure, even though other failures return -ENOMEM or other similar values. A simplified version of the semantic match that finds this problem is as follows: (http://coccinelle.lip6.fr/) // @@ expression ret; expression x,e1,e2,e3; @@ ret = 0 ... when != ret = e1 *x = \(kmalloc\|kcalloc\|kzalloc\)(...) ... when != ret = e2 if (x == NULL) { ... when != ret = e3 return ret; } // Signed-off-by: Julia Lawall Acked-by: "Desai, Kashyap" Signed-off-by: James Bottomley --- drivers/message/fusion/mptbase.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 6837a8ef9371..3e57b61ca446 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -5945,8 +5945,10 @@ mpt_findImVolumes(MPT_ADAPTER *ioc) goto out; mem = kmalloc(iocpage2sz, GFP_KERNEL); - if (!mem) + if (!mem) { + rc = -ENOMEM; goto out; + } memcpy(mem, (u8 *)pIoc2, iocpage2sz); ioc->raid_data.pIocPg2 = (IOCPage2_t *) mem; -- cgit v1.2.3 From e3b3e6246726cd05950677ed843010b8e8c5884c Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 11 Aug 2010 11:06:25 -0500 Subject: [SCSI] scsi/block: increase flush/sync timeout We have been seeing the flush request timeout with a wide range of hardware from tgt+iser to FC targets from a major vendor. After discussions about if the value should be configurable and what the best value should be, this patch just increases the flush/sync cache timeout to 1 minute. 2 minutes was determined to be too long, and making it configurable was troublesome for users. This patch was made over Linus's tree. It is not made over scsi-misc or scsi-rc-fixes, because Linus's had block layer changes that my patch was built over. Signed-off-by: Mike Christie Acked-by: Jens Axboe Signed-off-by: James Bottomley --- drivers/scsi/sd.c | 4 ++-- drivers/scsi/sd.h | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 2714becc2eaf..8c9b275f71a7 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -477,7 +477,7 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq) { - rq->timeout = SD_TIMEOUT; + rq->timeout = SD_FLUSH_TIMEOUT; rq->retries = SD_MAX_RETRIES; rq->cmd[0] = SYNCHRONIZE_CACHE; rq->cmd_len = 10; @@ -1072,7 +1072,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp) * flush everything. */ res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, - SD_TIMEOUT, SD_MAX_RETRIES, NULL); + SD_FLUSH_TIMEOUT, SD_MAX_RETRIES, NULL); if (res == 0) break; } diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index f81a9309e6de..315ce9d96b1f 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -19,6 +19,7 @@ */ #define SD_TIMEOUT (30 * HZ) #define SD_MOD_TIMEOUT (75 * HZ) +#define SD_FLUSH_TIMEOUT (60 * HZ) /* * Number of allowed retries -- cgit v1.2.3 From c9afb9a24db2e673971e790ee4c1bac55708f333 Mon Sep 17 00:00:00 2001 From: Giridhar Malavali Date: Fri, 3 Sep 2010 15:20:48 -0700 Subject: [SCSI] qla2xxx: Don't issue set or get port param MBC if port is not online. Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_bsg.c | 7 +++---- drivers/scsi/qla2xxx/qla_init.c | 3 +++ 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 9067629817ea..fdfbf83a6330 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -1254,10 +1254,9 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job) return -EINVAL; } - if (fcport->loop_id == FC_NO_LOOP_ID) { - DEBUG2(printk(KERN_ERR "%s(%ld): Invalid port loop id, " - "loop_id = 0x%x\n", - __func__, vha->host_no, fcport->loop_id)); + if (atomic_read(&fcport->state) != FCS_ONLINE) { + DEBUG2(printk(KERN_ERR "%s(%ld): Port not online\n", + __func__, vha->host_no)); return -EINVAL; } diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index d863ed2619b5..9f71226e0a3e 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -2785,6 +2785,9 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) if (!IS_IIDMA_CAPABLE(ha)) return; + if (atomic_read(&fcport->state) != FCS_ONLINE) + return; + if (fcport->fp_speed == PORT_SPEED_UNKNOWN || fcport->fp_speed > ha->link_data_rate) return; -- cgit v1.2.3 From 6dbdda4d596f201b8a82a276a0c0b50ef2b899e8 Mon Sep 17 00:00:00 2001 From: Giridhar Malavali Date: Fri, 3 Sep 2010 15:20:49 -0700 Subject: [SCSI] qla2xxx: Pass first 64 bytes of MBX information when vendor commands fail. Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_mbx.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 6009b0c69488..d0413d56887c 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -3825,8 +3825,6 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, /* Copy mailbox information */ memcpy( mresp, mcp->mb, 64); - mresp[3] = mcp->mb[18]; - mresp[4] = mcp->mb[19]; return rval; } @@ -3887,9 +3885,10 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, } /* Copy mailbox information */ - memcpy( mresp, mcp->mb, 32); + memcpy(mresp, mcp->mb, 64); return rval; } + int qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic) { -- cgit v1.2.3 From a5b36321918b3a1295748b77c62976c167233eec Mon Sep 17 00:00:00 2001 From: Lalit Chandivade Date: Fri, 3 Sep 2010 15:20:50 -0700 Subject: [SCSI] qla2xxx: Added AER support for ISP82xx. Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_gbl.h | 2 + drivers/scsi/qla2xxx/qla_nx.c | 7 ++- drivers/scsi/qla2xxx/qla_os.c | 134 ++++++++++++++++++++++++++++++++++++++++- 3 files changed, 138 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 1a1b281cea33..4edfc731ea96 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -511,6 +511,7 @@ extern void qla82xx_reset_chip(struct scsi_qla_host *); extern void qla82xx_config_rings(struct scsi_qla_host *); extern int qla82xx_pinit_from_rom(scsi_qla_host_t *); extern void qla82xx_watchdog(scsi_qla_host_t *); +extern int qla82xx_start_firmware(scsi_qla_host_t *); /* Firmware and flash related functions */ extern int qla82xx_load_risc(scsi_qla_host_t *, uint32_t *); @@ -538,6 +539,7 @@ extern void qla82xx_poll(int, void *); extern void qla82xx_init_flags(struct qla_hw_data *); /* ISP 8021 hardware related */ +extern void qla82xx_set_drv_active(scsi_qla_host_t *); extern int qla82xx_crb_win_lock(struct qla_hw_data *); extern void qla82xx_crb_win_unlock(struct qla_hw_data *); extern int qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *, ulong *); diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 915b77a6e193..2ad91100a7e4 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -2257,7 +2257,7 @@ void qla82xx_init_flags(struct qla_hw_data *ha) ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; } -static inline void +inline void qla82xx_set_drv_active(scsi_qla_host_t *vha) { uint32_t drv_active; @@ -2411,7 +2411,7 @@ fw_load_failed: return QLA_FUNCTION_FAILED; } -static int +int qla82xx_start_firmware(scsi_qla_host_t *vha) { int pcie_cap; @@ -3291,6 +3291,9 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; fw_heartbeat_counter = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); + /* all 0xff, assume AER/EEH in progress, ignore */ + if (fw_heartbeat_counter == 0xffffffff) + return; if (vha->fw_heartbeat_counter == fw_heartbeat_counter) { vha->seconds_since_last_heartbeat++; /* FW not alive after 2 seconds */ diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 30b05229af26..cfa55827a101 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3526,6 +3526,11 @@ qla2x00_timer(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; struct req_que *req; + if (ha->flags.eeh_busy) { + qla2x00_restart_timer(vha, WATCH_INTERVAL); + return; + } + if (IS_QLA82XX(ha)) qla82xx_watchdog(vha); @@ -3755,6 +3760,17 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) return PCI_ERS_RESULT_CAN_RECOVER; case pci_channel_io_frozen: ha->flags.eeh_busy = 1; + /* For ISP82XX complete any pending mailbox cmd */ + if (IS_QLA82XX(ha)) { + ha->flags.fw_hung = 1; + if (ha->flags.mbox_busy) { + ha->flags.mbox_int = 1; + DEBUG2(qla_printk(KERN_ERR, ha, + "Due to pci channel io frozen, doing premature " + "completion of mbx command\n")); + complete(&ha->mbx_intr_comp); + } + } qla2x00_free_irqs(vha); pci_disable_device(pdev); return PCI_ERS_RESULT_NEED_RESET; @@ -3803,6 +3819,109 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev) return PCI_ERS_RESULT_RECOVERED; } +uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha) +{ + uint32_t rval = QLA_FUNCTION_FAILED; + uint32_t drv_active = 0; + struct qla_hw_data *ha = base_vha->hw; + int fn; + struct pci_dev *other_pdev = NULL; + + DEBUG17(qla_printk(KERN_INFO, ha, + "scsi(%ld): In qla82xx_error_recovery\n", base_vha->host_no)); + + set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); + + if (base_vha->flags.online) { + /* Abort all outstanding commands, + * so as to be requeued later */ + qla2x00_abort_isp_cleanup(base_vha); + } + + + fn = PCI_FUNC(ha->pdev->devfn); + while (fn > 0) { + fn--; + DEBUG17(qla_printk(KERN_INFO, ha, + "Finding pci device at function = 0x%x\n", fn)); + other_pdev = + pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), + ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), + fn)); + + if (!other_pdev) + continue; + if (atomic_read(&other_pdev->enable_cnt)) { + DEBUG17(qla_printk(KERN_INFO, ha, + "Found PCI func availabe and enabled at 0x%x\n", + fn)); + pci_dev_put(other_pdev); + break; + } + pci_dev_put(other_pdev); + } + + if (!fn) { + /* Reset owner */ + DEBUG17(qla_printk(KERN_INFO, ha, + "This devfn is reset owner = 0x%x\n", ha->pdev->devfn)); + qla82xx_idc_lock(ha); + + qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, + QLA82XX_DEV_INITIALIZING); + + qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, + QLA82XX_IDC_VERSION); + + drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); + DEBUG17(qla_printk(KERN_INFO, ha, + "drv_active = 0x%x\n", drv_active)); + + qla82xx_idc_unlock(ha); + /* Reset if device is not already reset + * drv_active would be 0 if a reset has already been done + */ + if (drv_active) + rval = qla82xx_start_firmware(base_vha); + else + rval = QLA_SUCCESS; + qla82xx_idc_lock(ha); + + if (rval != QLA_SUCCESS) { + qla_printk(KERN_INFO, ha, "HW State: FAILED\n"); + qla82xx_clear_drv_active(ha); + qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, + QLA82XX_DEV_FAILED); + } else { + qla_printk(KERN_INFO, ha, "HW State: READY\n"); + qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, + QLA82XX_DEV_READY); + qla82xx_idc_unlock(ha); + ha->flags.fw_hung = 0; + rval = qla82xx_restart_isp(base_vha); + qla82xx_idc_lock(ha); + /* Clear driver state register */ + qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0); + qla82xx_set_drv_active(base_vha); + } + qla82xx_idc_unlock(ha); + } else { + DEBUG17(qla_printk(KERN_INFO, ha, + "This devfn is not reset owner = 0x%x\n", ha->pdev->devfn)); + if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) == + QLA82XX_DEV_READY)) { + ha->flags.fw_hung = 0; + rval = qla82xx_restart_isp(base_vha); + qla82xx_idc_lock(ha); + qla82xx_set_drv_active(base_vha); + qla82xx_idc_unlock(ha); + } + } + clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); + + return rval; +} + static pci_ers_result_t qla2xxx_pci_slot_reset(struct pci_dev *pdev) { @@ -3835,15 +3954,23 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev) if (rc) { qla_printk(KERN_WARNING, ha, "Can't re-enable PCI device after reset.\n"); - return ret; + goto exit_slot_reset; } rsp = ha->rsp_q_map[0]; if (qla2x00_request_irqs(ha, rsp)) - return ret; + goto exit_slot_reset; if (ha->isp_ops->pci_config(base_vha)) - return ret; + goto exit_slot_reset; + + if (IS_QLA82XX(ha)) { + if (qla82xx_error_recovery(base_vha) == QLA_SUCCESS) { + ret = PCI_ERS_RESULT_RECOVERED; + goto exit_slot_reset; + } else + goto exit_slot_reset; + } while (ha->flags.mbox_busy && retries--) msleep(1000); @@ -3854,6 +3981,7 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev) clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); +exit_slot_reset: DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset-return:ret=%x\n", ret)); -- cgit v1.2.3 From 35e0cbd4b282e541d05ca21694beb26c087bb49e Mon Sep 17 00:00:00 2001 From: Giridhar Malavali Date: Fri, 3 Sep 2010 15:20:51 -0700 Subject: [SCSI] qla2xxx: Return proper fabric name based on device state. Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_attr.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 679a4326811c..560641aa3a5c 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1685,14 +1685,14 @@ static void qla2x00_get_host_fabric_name(struct Scsi_Host *shost) { scsi_qla_host_t *vha = shost_priv(shost); - u64 node_name; + uint8_t node_name[WWN_SIZE] = { 0xFF, 0xFF, 0xFF, 0xFF, \ + 0xFF, 0xFF, 0xFF, 0xFF}; + u64 fabric_name = wwn_to_u64(node_name); if (vha->device_flags & SWITCH_FOUND) - node_name = wwn_to_u64(vha->fabric_node_name); - else - node_name = wwn_to_u64(vha->node_name); + fabric_name = wwn_to_u64(vha->fabric_node_name); - fc_host_fabric_name(shost) = node_name; + fc_host_fabric_name(shost) = fabric_name; } static void -- cgit v1.2.3 From 77e334d240aa0a771f861656d639ea13e9fc6569 Mon Sep 17 00:00:00 2001 From: Giridhar Malavali Date: Fri, 3 Sep 2010 15:20:52 -0700 Subject: [SCSI] qla2xxx: Cleanup some dead-code and make some functions static. Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_gbl.h | 11 -- drivers/scsi/qla2xxx/qla_nx.c | 385 +++++++++++++++++++++-------------------- drivers/scsi/qla2xxx/qla_nx.h | 5 + 3 files changed, 202 insertions(+), 199 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 4edfc731ea96..967bc9278bf7 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -501,7 +501,6 @@ extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); /* PCI related functions */ extern int qla82xx_pci_config(struct scsi_qla_host *); extern int qla82xx_pci_mem_read_2M(struct qla_hw_data *, u64, void *, int); -extern int qla82xx_pci_mem_write_2M(struct qla_hw_data *, u64, void *, int); extern char *qla82xx_pci_info_str(struct scsi_qla_host *, char *); extern int qla82xx_pci_region_offset(struct pci_dev *, int); extern int qla82xx_iospace_config(struct qla_hw_data *); @@ -509,7 +508,6 @@ extern int qla82xx_iospace_config(struct qla_hw_data *); /* Initialization related functions */ extern void qla82xx_reset_chip(struct scsi_qla_host *); extern void qla82xx_config_rings(struct scsi_qla_host *); -extern int qla82xx_pinit_from_rom(scsi_qla_host_t *); extern void qla82xx_watchdog(scsi_qla_host_t *); extern int qla82xx_start_firmware(scsi_qla_host_t *); @@ -534,26 +532,17 @@ extern irqreturn_t qla82xx_msix_default(int, void *); extern irqreturn_t qla82xx_msix_rsp_q(int, void *); extern void qla82xx_enable_intrs(struct qla_hw_data *); extern void qla82xx_disable_intrs(struct qla_hw_data *); -extern void qla82xx_mbx_completion(scsi_qla_host_t *, uint16_t); extern void qla82xx_poll(int, void *); extern void qla82xx_init_flags(struct qla_hw_data *); /* ISP 8021 hardware related */ extern void qla82xx_set_drv_active(scsi_qla_host_t *); -extern int qla82xx_crb_win_lock(struct qla_hw_data *); extern void qla82xx_crb_win_unlock(struct qla_hw_data *); -extern int qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *, ulong *); extern int qla82xx_wr_32(struct qla_hw_data *, ulong, u32); extern int qla82xx_rd_32(struct qla_hw_data *, ulong); extern int qla82xx_rdmem(struct qla_hw_data *, u64, void *, int); extern int qla82xx_wrmem(struct qla_hw_data *, u64, void *, int); -extern int qla82xx_check_for_bad_spd(struct qla_hw_data *); -extern int qla82xx_load_fw(scsi_qla_host_t *); -extern int qla82xx_rom_lock(struct qla_hw_data *); extern void qla82xx_rom_unlock(struct qla_hw_data *); -extern int qla82xx_rom_fast_read(struct qla_hw_data *, int , int *); -extern int qla82xx_do_rom_fast_read(struct qla_hw_data *, int, int *); -extern unsigned long qla82xx_decode_crb_addr(unsigned long); /* ISP 8021 IDC */ extern void qla82xx_clear_drv_active(struct qla_hw_data *); diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 2ad91100a7e4..614d745d7e11 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -403,6 +403,54 @@ qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off) return off; } +static int +qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off) +{ + struct crb_128M_2M_sub_block_map *m; + + if (*off >= QLA82XX_CRB_MAX) + return -1; + + if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) { + *off = (*off - QLA82XX_PCI_CAMQM) + + QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase; + return 0; + } + + if (*off < QLA82XX_PCI_CRBSPACE) + return -1; + + *off -= QLA82XX_PCI_CRBSPACE; + + /* Try direct map */ + m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)]; + + if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) { + *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase; + return 0; + } + /* Not in direct map, use crb window */ + return 1; +} + +#define CRB_WIN_LOCK_TIMEOUT 100000000 +static int qla82xx_crb_win_lock(struct qla_hw_data *ha) +{ + int done = 0, timeout = 0; + + while (!done) { + /* acquire semaphore3 from PCI HW block */ + done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK)); + if (done == 1) + break; + if (timeout >= CRB_WIN_LOCK_TIMEOUT) + return -1; + timeout++; + } + qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum); + return 0; +} + int qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data) { @@ -453,24 +501,6 @@ qla82xx_rd_32(struct qla_hw_data *ha, ulong off) return data; } -#define CRB_WIN_LOCK_TIMEOUT 100000000 -int qla82xx_crb_win_lock(struct qla_hw_data *ha) -{ - int done = 0, timeout = 0; - - while (!done) { - /* acquire semaphore3 from PCI HW block */ - done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK)); - if (done == 1) - break; - if (timeout >= CRB_WIN_LOCK_TIMEOUT) - return -1; - timeout++; - } - qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum); - return 0; -} - #define IDC_LOCK_TIMEOUT 100000000 int qla82xx_idc_lock(struct qla_hw_data *ha) { @@ -504,36 +534,6 @@ void qla82xx_idc_unlock(struct qla_hw_data *ha) qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK)); } -int -qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off) -{ - struct crb_128M_2M_sub_block_map *m; - - if (*off >= QLA82XX_CRB_MAX) - return -1; - - if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) { - *off = (*off - QLA82XX_PCI_CAMQM) + - QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase; - return 0; - } - - if (*off < QLA82XX_PCI_CRBSPACE) - return -1; - - *off -= QLA82XX_PCI_CRBSPACE; - - /* Try direct map */ - m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)]; - - if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) { - *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase; - return 0; - } - /* Not in direct map, use crb window */ - return 1; -} - /* PCI Windowing for DDR regions. */ #define QLA82XX_ADDR_IN_RANGE(addr, low, high) \ (((addr) <= (high)) && ((addr) >= (low))) @@ -557,7 +557,7 @@ qla82xx_pci_mem_bound_check(struct qla_hw_data *ha, int qla82xx_pci_set_window_warning_count; -unsigned long +static unsigned long qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr) { int window; @@ -798,7 +798,8 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha, } #define MTU_FUDGE_FACTOR 100 -unsigned long qla82xx_decode_crb_addr(unsigned long addr) +static unsigned long +qla82xx_decode_crb_addr(unsigned long addr) { int i; unsigned long base_addr, offset, pci_base; @@ -824,7 +825,7 @@ unsigned long qla82xx_decode_crb_addr(unsigned long addr) static long rom_max_timeout = 100; static long qla82xx_rom_lock_timeout = 100; -int +static int qla82xx_rom_lock(struct qla_hw_data *ha) { int done = 0, timeout = 0; @@ -842,7 +843,7 @@ qla82xx_rom_lock(struct qla_hw_data *ha) return 0; } -int +static int qla82xx_wait_rom_busy(struct qla_hw_data *ha) { long timeout = 0; @@ -862,7 +863,7 @@ qla82xx_wait_rom_busy(struct qla_hw_data *ha) return 0; } -int +static int qla82xx_wait_rom_done(struct qla_hw_data *ha) { long timeout = 0; @@ -882,7 +883,7 @@ qla82xx_wait_rom_done(struct qla_hw_data *ha) return 0; } -int +static int qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) { qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr); @@ -905,7 +906,7 @@ qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) return 0; } -int +static int qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) { int ret, loops = 0; @@ -926,7 +927,7 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) return ret; } -int +static int qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val) { qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR); @@ -940,7 +941,7 @@ qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val) return 0; } -int +static int qla82xx_flash_wait_write_finish(struct qla_hw_data *ha) { long timeout = 0; @@ -964,7 +965,7 @@ qla82xx_flash_wait_write_finish(struct qla_hw_data *ha) return ret; } -int +static int qla82xx_flash_set_write_enable(struct qla_hw_data *ha) { uint32_t val; @@ -981,7 +982,7 @@ qla82xx_flash_set_write_enable(struct qla_hw_data *ha) return 0; } -int +static int qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val) { if (qla82xx_flash_set_write_enable(ha)) @@ -996,7 +997,7 @@ qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val) return qla82xx_flash_wait_write_finish(ha); } -int +static int qla82xx_write_disable_flash(struct qla_hw_data *ha) { qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI); @@ -1008,7 +1009,7 @@ qla82xx_write_disable_flash(struct qla_hw_data *ha) return 0; } -int +static int ql82xx_rom_lock_d(struct qla_hw_data *ha) { int loops = 0; @@ -1024,7 +1025,7 @@ ql82xx_rom_lock_d(struct qla_hw_data *ha) return 0;; } -int +static int qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr, uint32_t data) { @@ -1061,7 +1062,8 @@ done_write: /* This routine does CRB initialize sequence * to put the ISP into operational state */ -int qla82xx_pinit_from_rom(scsi_qla_host_t *vha) +static int +qla82xx_pinit_from_rom(scsi_qla_host_t *vha) { int addr, val; int i ; @@ -1207,7 +1209,8 @@ int qla82xx_pinit_from_rom(scsi_qla_host_t *vha) return 0; } -int qla82xx_check_for_bad_spd(struct qla_hw_data *ha) +static int +qla82xx_check_for_bad_spd(struct qla_hw_data *ha) { u32 val = 0; val = qla82xx_rd_32(ha, BOOT_LOADER_DIMM_STATUS); @@ -1225,7 +1228,116 @@ int qla82xx_check_for_bad_spd(struct qla_hw_data *ha) return 0; } -int +static int +qla82xx_pci_mem_write_2M(struct qla_hw_data *ha, + u64 off, void *data, int size) +{ + int i, j, ret = 0, loop, sz[2], off0; + int scale, shift_amount, startword; + uint32_t temp; + uint64_t off8, mem_crb, tmpw, word[2] = {0, 0}; + + /* + * If not MN, go check for MS or invalid. + */ + if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) + mem_crb = QLA82XX_CRB_QDR_NET; + else { + mem_crb = QLA82XX_CRB_DDR_NET; + if (qla82xx_pci_mem_bound_check(ha, off, size) == 0) + return qla82xx_pci_mem_write_direct(ha, + off, data, size); + } + + off0 = off & 0x7; + sz[0] = (size < (8 - off0)) ? size : (8 - off0); + sz[1] = size - sz[0]; + + off8 = off & 0xfffffff0; + loop = (((off & 0xf) + size - 1) >> 4) + 1; + shift_amount = 4; + scale = 2; + startword = (off & 0xf)/8; + + for (i = 0; i < loop; i++) { + if (qla82xx_pci_mem_read_2M(ha, off8 + + (i << shift_amount), &word[i * scale], 8)) + return -1; + } + + switch (size) { + case 1: + tmpw = *((uint8_t *)data); + break; + case 2: + tmpw = *((uint16_t *)data); + break; + case 4: + tmpw = *((uint32_t *)data); + break; + case 8: + default: + tmpw = *((uint64_t *)data); + break; + } + + if (sz[0] == 8) { + word[startword] = tmpw; + } else { + word[startword] &= + ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8)); + word[startword] |= tmpw << (off0 * 8); + } + if (sz[1] != 0) { + word[startword+1] &= ~(~0ULL << (sz[1] * 8)); + word[startword+1] |= tmpw >> (sz[0] * 8); + } + + /* + * don't lock here - write_wx gets the lock if each time + * write_lock_irqsave(&adapter->adapter_lock, flags); + * netxen_nic_pci_change_crbwindow_128M(adapter, 0); + */ + for (i = 0; i < loop; i++) { + temp = off8 + (i << shift_amount); + qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp); + temp = 0; + qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp); + temp = word[i * scale] & 0xffffffff; + qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp); + temp = (word[i * scale] >> 32) & 0xffffffff; + qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp); + temp = word[i*scale + 1] & 0xffffffff; + qla82xx_wr_32(ha, mem_crb + + MIU_TEST_AGT_WRDATA_UPPER_LO, temp); + temp = (word[i*scale + 1] >> 32) & 0xffffffff; + qla82xx_wr_32(ha, mem_crb + + MIU_TEST_AGT_WRDATA_UPPER_HI, temp); + + temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; + qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); + temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; + qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL); + if ((temp & MIU_TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + if (printk_ratelimit()) + dev_err(&ha->pdev->dev, + "failed to write through agent\n"); + ret = -1; + break; + } + } + + return ret; +} + +static int qla82xx_fw_load_from_flash(struct qla_hw_data *ha) { int i; @@ -1357,114 +1469,6 @@ qla82xx_pci_mem_read_2M(struct qla_hw_data *ha, return 0; } -int -qla82xx_pci_mem_write_2M(struct qla_hw_data *ha, - u64 off, void *data, int size) -{ - int i, j, ret = 0, loop, sz[2], off0; - int scale, shift_amount, startword; - uint32_t temp; - uint64_t off8, mem_crb, tmpw, word[2] = {0, 0}; - - /* - * If not MN, go check for MS or invalid. - */ - if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) - mem_crb = QLA82XX_CRB_QDR_NET; - else { - mem_crb = QLA82XX_CRB_DDR_NET; - if (qla82xx_pci_mem_bound_check(ha, off, size) == 0) - return qla82xx_pci_mem_write_direct(ha, - off, data, size); - } - - off0 = off & 0x7; - sz[0] = (size < (8 - off0)) ? size : (8 - off0); - sz[1] = size - sz[0]; - - off8 = off & 0xfffffff0; - loop = (((off & 0xf) + size - 1) >> 4) + 1; - shift_amount = 4; - scale = 2; - startword = (off & 0xf)/8; - - for (i = 0; i < loop; i++) { - if (qla82xx_pci_mem_read_2M(ha, off8 + - (i << shift_amount), &word[i * scale], 8)) - return -1; - } - - switch (size) { - case 1: - tmpw = *((uint8_t *)data); - break; - case 2: - tmpw = *((uint16_t *)data); - break; - case 4: - tmpw = *((uint32_t *)data); - break; - case 8: - default: - tmpw = *((uint64_t *)data); - break; - } - - if (sz[0] == 8) { - word[startword] = tmpw; - } else { - word[startword] &= - ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8)); - word[startword] |= tmpw << (off0 * 8); - } - if (sz[1] != 0) { - word[startword+1] &= ~(~0ULL << (sz[1] * 8)); - word[startword+1] |= tmpw >> (sz[0] * 8); - } - - /* - * don't lock here - write_wx gets the lock if each time - * write_lock_irqsave(&adapter->adapter_lock, flags); - * netxen_nic_pci_change_crbwindow_128M(adapter, 0); - */ - for (i = 0; i < loop; i++) { - temp = off8 + (i << shift_amount); - qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp); - temp = 0; - qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp); - temp = word[i * scale] & 0xffffffff; - qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp); - temp = (word[i * scale] >> 32) & 0xffffffff; - qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp); - temp = word[i*scale + 1] & 0xffffffff; - qla82xx_wr_32(ha, mem_crb + - MIU_TEST_AGT_WRDATA_UPPER_LO, temp); - temp = (word[i*scale + 1] >> 32) & 0xffffffff; - qla82xx_wr_32(ha, mem_crb + - MIU_TEST_AGT_WRDATA_UPPER_HI, temp); - - temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; - qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); - temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; - qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); - - for (j = 0; j < MAX_CTL_CHECK; j++) { - temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL); - if ((temp & MIU_TA_CTL_BUSY) == 0) - break; - } - - if (j >= MAX_CTL_CHECK) { - if (printk_ratelimit()) - dev_err(&ha->pdev->dev, - "failed to write through agent\n"); - ret = -1; - break; - } - } - - return ret; -} static struct qla82xx_uri_table_desc * qla82xx_get_table_desc(const u8 *unirom, int section) @@ -1725,7 +1729,8 @@ void qla82xx_reset_adapter(struct scsi_qla_host *vha) ha->isp_ops->disable_intrs(ha); } -int qla82xx_fw_load_from_blob(struct qla_hw_data *ha) +static int +qla82xx_fw_load_from_blob(struct qla_hw_data *ha) { u64 *ptr64; u32 i, flashaddr, size; @@ -1836,7 +1841,8 @@ qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type) return 0; } -int qla82xx_check_cmdpeg_state(struct qla_hw_data *ha) +static int +qla82xx_check_cmdpeg_state(struct qla_hw_data *ha) { u32 val = 0; int retries = 60; @@ -1874,7 +1880,8 @@ int qla82xx_check_cmdpeg_state(struct qla_hw_data *ha) return QLA_FUNCTION_FAILED; } -int qla82xx_check_rcvpeg_state(struct qla_hw_data *ha) +static int +qla82xx_check_rcvpeg_state(struct qla_hw_data *ha) { u32 val = 0; int retries = 60; @@ -1933,7 +1940,7 @@ static struct qla82xx_legacy_intr_set legacy_intr[] = \ * @ha: SCSI driver HA context * @mb0: Mailbox0 register */ -void +static void qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) { uint16_t cnt; @@ -2267,10 +2274,11 @@ qla82xx_set_drv_active(scsi_qla_host_t *vha) /* If reset value is all FF's, initialize DRV_ACTIVE */ if (drv_active == 0xffffffff) { - qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, 0); + qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, + QLA82XX_DRV_NOT_ACTIVE); drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); } - drv_active |= (1 << (ha->portnum * 4)); + drv_active |= (QLA82XX_DRV_ACTIVE << (ha->portnum * 4)); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); } @@ -2280,7 +2288,7 @@ qla82xx_clear_drv_active(struct qla_hw_data *ha) uint32_t drv_active; drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); - drv_active &= ~(1 << (ha->portnum * 4)); + drv_active &= ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4)); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); } @@ -2291,7 +2299,7 @@ qla82xx_need_reset(struct qla_hw_data *ha) int rval; drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); - rval = drv_state & (1 << (ha->portnum * 4)); + rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); return rval; } @@ -2305,7 +2313,7 @@ qla82xx_set_rst_ready(struct qla_hw_data *ha) /* If reset value is all FF's, initialize DRV_STATE */ if (drv_state == 0xffffffff) { - qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0); + qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, QLA82XX_DRVST_NOT_RDY); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); } drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); @@ -2335,7 +2343,8 @@ qla82xx_set_qsnt_ready(struct qla_hw_data *ha) qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state); } -int qla82xx_load_fw(scsi_qla_host_t *vha) +static int +qla82xx_load_fw(scsi_qla_host_t *vha) { int rst; struct fw_blob *blob; @@ -2419,7 +2428,7 @@ qla82xx_start_firmware(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; /* scrub dma mask expansion register */ - qla82xx_wr_32(ha, CRB_DMA_SHIFT, 0x55555555); + qla82xx_wr_32(ha, CRB_DMA_SHIFT, QLA82XX_DMA_SHIFT_VALUE); /* Put both the PEG CMD and RCV PEG to default state * of 0 before resetting the hardware @@ -2869,7 +2878,7 @@ queuing_error: return QLA_FUNCTION_FAILED; } -uint32_t * +static uint32_t * qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, uint32_t length) { @@ -2890,7 +2899,7 @@ done_read: return dwptr; } -int +static int qla82xx_unprotect_flash(struct qla_hw_data *ha) { int ret; @@ -2921,7 +2930,7 @@ done_unprotect: return ret; } -int +static int qla82xx_protect_flash(struct qla_hw_data *ha) { int ret; @@ -2950,7 +2959,7 @@ done_protect: return ret; } -int +static int qla82xx_erase_sector(struct qla_hw_data *ha, int addr) { int ret = 0; diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h index 15559cab39f8..51ec0c5380e8 100644 --- a/drivers/scsi/qla2xxx/qla_nx.h +++ b/drivers/scsi/qla2xxx/qla_nx.h @@ -26,6 +26,7 @@ #define CRB_RCVPEG_STATE QLA82XX_REG(0x13c) #define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54) #define CRB_DMA_SHIFT QLA82XX_REG(0xcc) +#define QLA82XX_DMA_SHIFT_VALUE 0x55555555 #define QLA82XX_HW_H0_CH_HUB_ADR 0x05 #define QLA82XX_HW_H1_CH_HUB_ADR 0x0E @@ -583,6 +584,10 @@ #define QLA82XX_DRVST_RST_RDY 1 #define QLA82XX_DRVST_QSNT_RDY 2 +/* Different drive active state */ +#define QLA82XX_DRV_NOT_ACTIVE 0 +#define QLA82XX_DRV_ACTIVE 1 + /* * The PCI VendorID and DeviceID for our board. */ -- cgit v1.2.3 From bddd2d65a48c492d3e585e65df0be89c58b4acda Mon Sep 17 00:00:00 2001 From: Lalit Chandivade Date: Fri, 3 Sep 2010 15:20:53 -0700 Subject: [SCSI] qla2xxx: Update to AER support, do early abort commands. Currently the IOs are returned back in slot reset, this could be late and can cause error handler to invoke. If error handler invokes, eh_abort fails and escalate to device/bus/host resets causing issues. The commands need to be returned back to upper layer in io_error_detected. Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_init.c | 20 +++++++++++--------- drivers/scsi/qla2xxx/qla_os.c | 2 ++ 2 files changed, 13 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 9f71226e0a3e..f2da5c5dacdd 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -3835,17 +3835,19 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) LOOP_DOWN_TIME); } - /* Make sure for ISP 82XX IO DMA is complete */ - if (IS_QLA82XX(ha)) { - if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, - WAIT_HOST) == QLA_SUCCESS) { - DEBUG2(qla_printk(KERN_INFO, ha, - "Done wait for pending commands\n")); + if (!ha->flags.eeh_busy) { + /* Make sure for ISP 82XX IO DMA is complete */ + if (IS_QLA82XX(ha)) { + if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, + WAIT_HOST) == QLA_SUCCESS) { + DEBUG2(qla_printk(KERN_INFO, ha, + "Done wait for pending commands\n")); + } } - } - /* Requeue all commands in outstanding command list. */ - qla2x00_abort_all_cmds(vha, DID_RESET << 16); + /* Requeue all commands in outstanding command list. */ + qla2x00_abort_all_cmds(vha, DID_RESET << 16); + } } /* diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index cfa55827a101..e1782f771400 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3773,6 +3773,8 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) } qla2x00_free_irqs(vha); pci_disable_device(pdev); + /* Return back all IOs */ + qla2x00_abort_all_cmds(vha, DID_RESET << 16); return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: ha->flags.pci_channel_io_perm_failure = 1; -- cgit v1.2.3 From b1d46989c12ec4d93f274ca8378bb1a6014d244a Mon Sep 17 00:00:00 2001 From: Madhuranath Iyengar Date: Fri, 3 Sep 2010 15:20:54 -0700 Subject: [SCSI] qla2xxx: Handle MPI timeout indicated by AE8002 In case the MPI times out, the FW issues an async event AE8002 to indicate this to every FCoE function. The FC/FCoE driver is required to handle this, by doing a soft reset and issuing a Write MPI register mailbox command to reset the MPI. Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_def.h | 6 ++++ drivers/scsi/qla2xxx/qla_gbl.h | 2 ++ drivers/scsi/qla2xxx/qla_init.c | 31 +++++++++++++++++++++ drivers/scsi/qla2xxx/qla_isr.c | 8 +++++- drivers/scsi/qla2xxx/qla_mbx.c | 61 +++++++++++++++++++++++++++++++++++++++++ 5 files changed, 107 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 3a432ea0c7a3..ee15f4f10fec 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -706,6 +706,11 @@ typedef struct { #define MBC_SET_PORT_CONFIG 0x122 /* Set port configuration */ #define MBC_GET_PORT_CONFIG 0x123 /* Get port configuration */ +/* + * ISP81xx mailbox commands + */ +#define MBC_WRITE_MPI_REGISTER 0x01 /* Write MPI Register. */ + /* Firmware return data sizes */ #define FCAL_MAP_SIZE 128 @@ -2858,6 +2863,7 @@ typedef struct scsi_qla_host { #define NPIV_CONFIG_NEEDED 16 #define ISP_UNRECOVERABLE 17 #define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */ +#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */ uint32_t device_flags; #define SWITCH_FOUND BIT_0 diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 967bc9278bf7..c33dec827e1e 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -352,6 +352,8 @@ qla2x00_read_ram_word(scsi_qla_host_t *, uint32_t, uint32_t *); extern int qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t); +extern int +qla81xx_write_mpi_register(scsi_qla_host_t *, uint16_t *); extern int qla2x00_get_data_rate(scsi_qla_host_t *); extern int qla24xx_set_fcp_prio(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t *); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index f2da5c5dacdd..c2d7bb8cd53f 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -943,6 +943,19 @@ qla2x00_reset_chip(scsi_qla_host_t *vha) spin_unlock_irqrestore(&ha->hardware_lock, flags); } +/** + * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC. + * + * Returns 0 on success. + */ +int +qla81xx_reset_mpi(scsi_qla_host_t *vha) +{ + uint16_t mb[4] = {0x1010, 0, 1, 0}; + + return qla81xx_write_mpi_register(vha, mb); +} + /** * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC. * @ha: HA context @@ -957,6 +970,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha) struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; uint32_t cnt, d2; uint16_t wd; + static int abts_cnt; /* ISP abort retry counts */ spin_lock_irqsave(&ha->hardware_lock, flags); @@ -990,6 +1004,23 @@ qla24xx_reset_risc(scsi_qla_host_t *vha) barrier(); } + /* If required, do an MPI FW reset now */ + if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) { + if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) { + if (++abts_cnt < 5) { + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + set_bit(MPI_RESET_NEEDED, &vha->dpc_flags); + } else { + /* + * We exhausted the ISP abort retries. We have to + * set the board offline. + */ + abts_cnt = 0; + vha->flags.online = 0; + } + } + } + WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET); RD_REG_DWORD(®->hccr); diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 6982ba70e12a..bb4d63a792aa 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -412,8 +412,14 @@ skip_rio: "Unrecoverable Hardware Error: adapter " "marked OFFLINE!\n"); vha->flags.online = 0; - } else + } else { + /* Check to see if MPI timeout occured */ + if ((mbx & MBX_3) && (ha->flags.port0)) + set_bit(MPI_RESET_NEEDED, + &vha->dpc_flags); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + } } else if (mb[1] == 0) { qla_printk(KERN_INFO, ha, "Unrecoverable Hardware Error: adapter marked " diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index d0413d56887c..52024080c393 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -3948,6 +3948,67 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) return rval; } +int +qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) +{ + int rval; + uint32_t stat, timer; + uint16_t mb0 = 0; + struct qla_hw_data *ha = vha->hw; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + + rval = QLA_SUCCESS; + + DEBUG11(qla_printk(KERN_INFO, ha, + "%s(%ld): entered.\n", __func__, vha->host_no)); + + clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + + /* Write the MBC data to the registers */ + WRT_REG_WORD(®->mailbox0, MBC_WRITE_MPI_REGISTER); + WRT_REG_WORD(®->mailbox1, mb[0]); + WRT_REG_WORD(®->mailbox2, mb[1]); + WRT_REG_WORD(®->mailbox3, mb[2]); + WRT_REG_WORD(®->mailbox4, mb[3]); + + WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT); + + /* Poll for MBC interrupt */ + for (timer = 6000000; timer; timer--) { + /* Check for pending interrupts. */ + stat = RD_REG_DWORD(®->host_status); + if (stat & HSRX_RISC_INT) { + stat &= 0xff; + + if (stat == 0x1 || stat == 0x2 || + stat == 0x10 || stat == 0x11) { + set_bit(MBX_INTERRUPT, + &ha->mbx_cmd_flags); + mb0 = RD_REG_WORD(®->mailbox0); + WRT_REG_DWORD(®->hccr, + HCCRX_CLR_RISC_INT); + RD_REG_DWORD(®->hccr); + break; + } + } + udelay(5); + } + + if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) + rval = mb0 & MBS_MASK; + else + rval = QLA_FUNCTION_FAILED; + + if (rval != QLA_SUCCESS) { + DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n", + __func__, vha->host_no, rval, mb[0])); + } else { + DEBUG11(printk(KERN_INFO + "%s(%ld): done.\n", __func__, vha->host_no)); + } + + return rval; +} int qla2x00_get_data_rate(scsi_qla_host_t *vha) { -- cgit v1.2.3 From 3fadb80b0f39b193caafeed615a219aae57279fe Mon Sep 17 00:00:00 2001 From: Giridhar Malavali Date: Fri, 3 Sep 2010 15:20:55 -0700 Subject: [SCSI] qla2xxx: Clear local references of rport on device loss timeout notification from FC transport. The clearing of local references to rport was blocked when abort is active. This code clears the local references unconditionaly upon device loss timeout callback from FC transport. Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_attr.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 560641aa3a5c..c9781050c9ed 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1547,22 +1547,22 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) if (!fcport) return; - if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) - return; - - if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { - qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); - return; - } - /* * Transport has effectively 'deleted' the rport, clear * all local references. */ spin_lock_irq(host->host_lock); - fcport->rport = NULL; + fcport->rport = fcport->drport = NULL; *((fc_port_t **)rport->dd_data) = NULL; spin_unlock_irq(host->host_lock); + + if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) + return; + + if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { + qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); + return; + } } static void -- cgit v1.2.3 From 58548cb51a824276c34a3227a4917d2e2ded0daa Mon Sep 17 00:00:00 2001 From: Giridhar Malavali Date: Fri, 3 Sep 2010 15:20:56 -0700 Subject: [SCSI] qla2xxx: Increase SG table size to support large IO size per scsi command. The sg table size is increased from 128 to 1024 to support multiple sg lists and number of sg elements per scsi command. Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_def.h | 2 ++ drivers/scsi/qla2xxx/qla_os.c | 12 ++++++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index ee15f4f10fec..df7d74f93e50 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2989,6 +2989,8 @@ typedef struct scsi_qla_host { #define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr) +#define QLA_SG_ALL 1024 + enum nexus_wait_type { WAIT_HOST = 0, WAIT_TARGET, diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index e1782f771400..2e10f2b2e017 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -2136,8 +2136,16 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) else base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER + base_vha->vp_idx; - if (IS_QLA2100(ha)) - host->sg_tablesize = 32; + + /* Set the SG table size based on ISP type */ + if (!IS_FWI2_CAPABLE(ha)) { + if (IS_QLA2100(ha)) + host->sg_tablesize = 32; + } else { + if (!IS_QLA82XX(ha)) + host->sg_tablesize = QLA_SG_ALL; + } + host->max_id = max_id; host->this_id = 255; host->cmd_per_lun = 3; -- cgit v1.2.3 From bcc5b6d3233688a5179f445ef06a28e055c5873a Mon Sep 17 00:00:00 2001 From: Saurav Kashyap Date: Fri, 3 Sep 2010 15:20:57 -0700 Subject: [SCSI] qla2xxx: AER Support-Return recovered from mmio_enable function for 82XX. Return recovered from pci_mmio_enabled function if it is 82XX. Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_os.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 2e10f2b2e017..bdd53f0ebef0 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3803,6 +3803,9 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev) struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; + if (IS_QLA82XX(ha)) + return PCI_ERS_RESULT_RECOVERED; + spin_lock_irqsave(&ha->hardware_lock, flags); if (IS_QLA2100(ha) || IS_QLA2200(ha)){ stat = RD_REG_DWORD(®->hccr); -- cgit v1.2.3 From e6d4ef450965af5a3753184d36ef9db7a3242950 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 14 Aug 2010 13:05:41 -0700 Subject: [SCSI] iscsi transport: fix kernel-doc notation Fix typo in scsi_transport_iscsi.c kernel-doc notation: Warning(drivers/scsi/scsi_transport_iscsi.c:548): No description found for parameter 'cmd' Signed-off-by: Randy Dunlap Acked-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/scsi_transport_iscsi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index e84026def1f4..332387a6bc25 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -537,7 +537,7 @@ static void iscsi_scan_session(struct work_struct *work) /** * iscsi_block_scsi_eh - block scsi eh until session state has transistioned - * cmd: scsi cmd passed to scsi eh handler + * @cmd: scsi cmd passed to scsi eh handler * * If the session is down this function will wait for the recovery * timer to fire or for the session to be logged back in. If the -- cgit v1.2.3 From 0caeb91c8d9ae6398bfe46ce70892e965353f613 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 17 Aug 2010 13:54:57 +0200 Subject: [SCSI] pm8001: handle allocation failures Return -ENOMEM if the allocations fail. Signed-off-by: Dan Carpenter Acked-by: Jack Wang Signed-off-by: James Bottomley --- drivers/scsi/pm8001/pm8001_hwi.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 9793aa6afb10..d8db0137c0c7 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -4194,6 +4194,8 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, nvmd_type = ioctl_payload->minor_function; fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); + if (!fw_control_context) + return -ENOMEM; fw_control_context->usrAddr = (u8 *)&ioctl_payload->func_specific[0]; fw_control_context->len = ioctl_payload->length; circularQ = &pm8001_ha->inbnd_q_tbl[0]; @@ -4272,6 +4274,8 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, nvmd_type = ioctl_payload->minor_function; fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); + if (!fw_control_context) + return -ENOMEM; circularQ = &pm8001_ha->inbnd_q_tbl[0]; memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr, ioctl_payload->func_specific, @@ -4381,6 +4385,8 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, struct pm8001_ioctl_payload *ioctl_payload = payload; fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); + if (!fw_control_context) + return -ENOMEM; fw_control = (struct fw_control_info *)&ioctl_payload->func_specific[0]; if (fw_control->len != 0) { if (pm8001_mem_alloc(pm8001_ha->pdev, -- cgit v1.2.3 From 47259658ed9fad837541b738f9375d38022daa14 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 18 Aug 2010 09:28:43 -0700 Subject: [SCSI] fix kconfig dependency warnings related to SCSI_SAS_ATTRS Fix kconfig dependency warnings in scsi/Kconfig: warning: (SCSI_SAS_LIBSAS && SCSI || SCSI_MPT2SAS && SCSI_LOWLEVEL && PCI && SCSI || FUSION_SAS && FUSION && PCI && SCSI) selects SCSI_SAS_ATTRS which has unmet direct dependencies (SCSI && BLK_DEV_BSG) Signed-off-by: Randy Dunlap Signed-off-by: James Bottomley --- drivers/scsi/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index a6fdcf4cf74a..2e9632e2c98b 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -316,7 +316,8 @@ config SCSI_ISCSI_ATTRS config SCSI_SAS_ATTRS tristate "SAS Transport Attributes" - depends on SCSI && BLK_DEV_BSG + depends on SCSI + select BLK_DEV_BSG help If you wish to export transport-specific information about each attached SAS device to sysfs, say Y. -- cgit v1.2.3 From 6014759c929b4ce8c068810b8d05f31969df5897 Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Thu, 19 Aug 2010 11:49:00 -0400 Subject: [SCSI] scsi_debug: Update thin provisioning support The previous thin provisioning support was not very user friendly because it depended on all the relevant options being set on the command line. Implement support for the Thin Provisioning VPD page from SBC3 r24 and add module options for TPU (UNMAP) and TPWS (WRITE SAME (16) with UNMAP bit). This allows us to have sane default and to enable thin provisioning with a simple tpu=1 or tpws=1 on the command line depending on whether we want UNMAP or WRITE SAME behavior. Signed-off-by: Martin K. Petersen Acked-by: Douglas Gilbert Signed-off-by: James Bottomley --- drivers/scsi/scsi_debug.c | 100 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 69 insertions(+), 31 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index b02bdc6c2cd1..06329d47b38f 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -109,10 +109,12 @@ static const char * scsi_debug_version_date = "20100324"; #define DEF_PHYSBLK_EXP 0 #define DEF_LOWEST_ALIGNED 0 #define DEF_OPT_BLKS 64 -#define DEF_UNMAP_MAX_BLOCKS 0 -#define DEF_UNMAP_MAX_DESC 0 -#define DEF_UNMAP_GRANULARITY 0 +#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF +#define DEF_UNMAP_MAX_DESC 256 +#define DEF_UNMAP_GRANULARITY 1 #define DEF_UNMAP_ALIGNMENT 0 +#define DEF_TPWS 0 +#define DEF_TPU 0 /* bit mask values for scsi_debug_opts */ #define SCSI_DEBUG_OPT_NOISE 1 @@ -177,10 +179,12 @@ static int scsi_debug_ato = DEF_ATO; static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP; static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED; static int scsi_debug_opt_blks = DEF_OPT_BLKS; -static int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC; -static int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; -static int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY; -static int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT; +static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC; +static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; +static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY; +static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT; +static unsigned int scsi_debug_tpws = DEF_TPWS; +static unsigned int scsi_debug_tpu = DEF_TPU; static int scsi_debug_cmnd_count = 0; @@ -723,16 +727,9 @@ static int inquiry_evpd_b0(unsigned char * arr) /* Optimal Transfer Length */ put_unaligned_be32(scsi_debug_opt_blks, &arr[8]); - if (scsi_debug_unmap_max_desc) { - unsigned int blocks; - - if (scsi_debug_unmap_max_blocks) - blocks = scsi_debug_unmap_max_blocks; - else - blocks = 0xffffffff; - + if (scsi_debug_tpu) { /* Maximum Unmap LBA Count */ - put_unaligned_be32(blocks, &arr[16]); + put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]); /* Maximum Unmap Block Descriptor Count */ put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]); @@ -745,10 +742,9 @@ static int inquiry_evpd_b0(unsigned char * arr) } /* Optimal Unmap Granularity */ - if (scsi_debug_unmap_granularity) { - put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]); - return 0x3c; /* Mandatory page length for thin provisioning */ - } + put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]); + + return 0x3c; /* Mandatory page length for thin provisioning */ return sizeof(vpdb0_data); } @@ -765,6 +761,21 @@ static int inquiry_evpd_b1(unsigned char *arr) return 0x3c; } +/* Thin provisioning VPD page (SBC-3) */ +static int inquiry_evpd_b2(unsigned char *arr) +{ + memset(arr, 0, 0x8); + arr[0] = 0; /* threshold exponent */ + + if (scsi_debug_tpu) + arr[1] = 1 << 7; + + if (scsi_debug_tpws) + arr[1] |= 1 << 6; + + return 0x8; +} + #define SDEBUG_LONG_INQ_SZ 96 #define SDEBUG_MAX_INQ_ARR_SZ 584 @@ -820,6 +831,7 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target, arr[n++] = 0x89; /* ATA information */ arr[n++] = 0xb0; /* Block limits (SBC) */ arr[n++] = 0xb1; /* Block characteristics (SBC) */ + arr[n++] = 0xb2; /* Thin provisioning (SBC) */ arr[3] = n - 4; /* number of supported VPD pages */ } else if (0x80 == cmd[2]) { /* unit serial number */ arr[1] = cmd[2]; /*sanity */ @@ -867,6 +879,9 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target, } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */ arr[1] = cmd[2]; /*sanity */ arr[3] = inquiry_evpd_b1(&arr[4]); + } else if (0xb2 == cmd[2]) { /* Thin provisioning (SBC) */ + arr[1] = cmd[2]; /*sanity */ + arr[3] = inquiry_evpd_b2(&arr[4]); } else { /* Illegal request, invalid field in cdb */ mk_sense_buffer(devip, ILLEGAL_REQUEST, @@ -1038,7 +1053,7 @@ static int resp_readcap16(struct scsi_cmnd * scp, arr[13] = scsi_debug_physblk_exp & 0xf; arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f; - if (scsi_debug_unmap_granularity) + if (scsi_debug_tpu || scsi_debug_tpws) arr[14] |= 0x80; /* TPE */ arr[15] = scsi_debug_lowest_aligned & 0xff; @@ -2708,6 +2723,8 @@ module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO); module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO); module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO); module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO); +module_param_named(tpu, scsi_debug_tpu, int, S_IRUGO); +module_param_named(tpws, scsi_debug_tpws, int, S_IRUGO); MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); MODULE_DESCRIPTION("SCSI debug adapter driver"); @@ -2739,10 +2756,12 @@ MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)"); MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); -MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0)"); -MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=0)"); -MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=0)"); +MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)"); +MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)"); +MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)"); MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)"); +MODULE_PARM_DESC(tpu, "enable TP, support UNMAP command (def=0)"); +MODULE_PARM_DESC(tpws, "enable TP, support WRITE SAME(16) with UNMAP bit (def=0)"); static char sdebug_info[256]; @@ -3130,7 +3149,7 @@ static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf) { ssize_t count; - if (scsi_debug_unmap_granularity == 0) + if (scsi_debug_tpu == 0 && scsi_debug_tpws == 0) return scnprintf(buf, PAGE_SIZE, "0-%u\n", sdebug_store_sectors); @@ -3322,10 +3341,21 @@ static int __init scsi_debug_init(void) memset(dif_storep, 0xff, dif_size); } - if (scsi_debug_unmap_granularity) { + /* Thin Provisioning */ + if (scsi_debug_tpu || scsi_debug_tpws) { unsigned int map_bytes; - if (scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) { + scsi_debug_unmap_max_blocks = + clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU); + + scsi_debug_unmap_max_desc = + clamp(scsi_debug_unmap_max_desc, 0U, 256U); + + scsi_debug_unmap_granularity = + clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU); + + if (scsi_debug_unmap_alignment && + scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) { printk(KERN_ERR "%s: ERR: unmap_granularity < unmap_alignment\n", __func__); @@ -3642,7 +3672,7 @@ int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done) errsts = resp_readcap16(SCpnt, devip); else if (cmd[1] == SAI_GET_LBA_STATUS) { - if (scsi_debug_unmap_max_desc == 0) { + if (scsi_debug_tpu == 0 && scsi_debug_tpws == 0) { mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_COMMAND_OPCODE, 0); errsts = check_condition_result; @@ -3753,8 +3783,16 @@ write: } break; case WRITE_SAME_16: - if (cmd[1] & 0x8) - unmap = 1; + if (cmd[1] & 0x8) { + if (scsi_debug_tpws == 0) { + mk_sense_buffer(devip, ILLEGAL_REQUEST, + INVALID_FIELD_IN_CDB, 0); + errsts = check_condition_result; + } else + unmap = 1; + } + if (errsts) + break; /* fall through */ case WRITE_SAME: errsts = check_readiness(SCpnt, 0, devip); @@ -3768,7 +3806,7 @@ write: if (errsts) break; - if (scsi_debug_unmap_max_desc == 0) { + if (scsi_debug_unmap_max_desc == 0 || scsi_debug_tpu == 0) { mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_COMMAND_OPCODE, 0); errsts = check_condition_result; -- cgit v1.2.3 From 634651fab36dc8d7c3b1a1aa14f9ec1e29de9f0f Mon Sep 17 00:00:00 2001 From: Wayne Boyer Date: Fri, 27 Aug 2010 14:45:07 -0700 Subject: [SCSI] ipr: reverse the isr optimization changes The isr optimization patch that was submitted a few months ago exposed problems with receiving and handling spurious HRRQ interrutps. commit 64ffdb762241c0a9c0c8fac7ea92aa0ba1529334 Author: Wayne Boyer Date: Wed May 19 11:56:13 2010 -0700 [SCSI] ipr: improve interrupt service routine performance That patch is reverted with this one. A new patch will be submitted once the issue is better understood and properly handled in the driver. Signed-off-by: Wayne Boyer Acked-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ipr.c | 63 ++++++++++++++++++++++++++++-------------------------- 1 file changed, 33 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 67875d41345f..3b28e8728131 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -4939,39 +4939,15 @@ static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd) /** * ipr_handle_other_interrupt - Handle "other" interrupts * @ioa_cfg: ioa config struct + * @int_reg: interrupt register * * Return value: * IRQ_NONE / IRQ_HANDLED **/ -static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg) +static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, + volatile u32 int_reg) { irqreturn_t rc = IRQ_HANDLED; - volatile u32 int_reg, int_mask_reg; - - int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); - int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg; - - /* If an interrupt on the adapter did not occur, ignore it. - * Or in the case of SIS 64, check for a stage change interrupt. - */ - if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) { - if (ioa_cfg->sis64) { - int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); - int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; - if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) { - - /* clear stage change */ - writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); - int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; - list_del(&ioa_cfg->reset_cmd->queue); - del_timer(&ioa_cfg->reset_cmd->timer); - ipr_reset_ioa_job(ioa_cfg->reset_cmd); - return IRQ_HANDLED; - } - } - - return IRQ_NONE; - } if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { /* Mask the interrupt */ @@ -5032,7 +5008,7 @@ static irqreturn_t ipr_isr(int irq, void *devp) { struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; unsigned long lock_flags = 0; - volatile u32 int_reg; + volatile u32 int_reg, int_mask_reg; u32 ioasc; u16 cmd_index; int num_hrrq = 0; @@ -5047,6 +5023,33 @@ static irqreturn_t ipr_isr(int irq, void *devp) return IRQ_NONE; } + int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg; + + /* If an interrupt on the adapter did not occur, ignore it. + * Or in the case of SIS 64, check for a stage change interrupt. + */ + if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) { + if (ioa_cfg->sis64) { + int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; + if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) { + + /* clear stage change */ + writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; + list_del(&ioa_cfg->reset_cmd->queue); + del_timer(&ioa_cfg->reset_cmd->timer); + ipr_reset_ioa_job(ioa_cfg->reset_cmd); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return IRQ_HANDLED; + } + } + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return IRQ_NONE; + } + while (1) { ipr_cmd = NULL; @@ -5086,7 +5089,7 @@ static irqreturn_t ipr_isr(int irq, void *devp) /* Clear the PCI interrupt */ do { writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); - int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg; } while (int_reg & IPR_PCII_HRRQ_UPDATED && num_hrrq++ < IPR_MAX_HRRQ_RETRIES); @@ -5101,7 +5104,7 @@ static irqreturn_t ipr_isr(int irq, void *devp) } if (unlikely(rc == IRQ_NONE)) - rc = ipr_handle_other_interrupt(ioa_cfg); + rc = ipr_handle_other_interrupt(ioa_cfg, int_reg); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return rc; -- cgit v1.2.3 From 91978465b1e5f89025cd43cd2102943160ec6dee Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Mon, 30 Aug 2010 10:55:09 +0200 Subject: [SCSI] zfcp: Reorder registration of initial SCSI device Make sure that the rport registration did complete and then register SCSI device directly. Otherwise the unit_enqueue would race with the call to zfcp_scsi_queue_unit_register. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 96fa1f536394..68df57157d48 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -73,13 +73,14 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun) if (!port) goto out_port; + flush_work(&port->rport_work); unit = zfcp_unit_enqueue(port, lun); if (IS_ERR(unit)) goto out_unit; zfcp_erp_unit_reopen(unit, 0, "auidc_1", NULL); zfcp_erp_wait(adapter); - flush_work(&unit->scsi_work); + zfcp_scsi_scan(unit); out_unit: put_device(&port->dev); -- cgit v1.2.3 From 78d16341facf829a71b6f7c68ec5511b9c168060 Mon Sep 17 00:00:00 2001 From: James Smart Date: Tue, 31 Aug 2010 22:27:31 -0400 Subject: [SCSI] scsi_transport_fc: fix blocked bsg request when fc object deleted When an rport is "blocked" and a bsg request is received, the bsg request gets placed on the queue but the queue stalls. If the fc object is then deleted - the bsg queue never restarts and keeps the reference on the object, and stops the overall teardown. This patch restarts the bsg queue on teardown and drains any pending requests, allowing the teardown to succeed. Signed-off-by: Carl Lajeunesse Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/scsi_transport_fc.c | 43 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 9f0f7d9c7422..78486d540652 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -4048,11 +4048,54 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport) /** * fc_bsg_remove - Deletes the bsg hooks on fchosts/rports * @q: the request_queue that is to be torn down. + * + * Notes: + * Before unregistering the queue empty any requests that are blocked + * + * */ static void fc_bsg_remove(struct request_queue *q) { + struct request *req; /* block request */ + int counts; /* totals for request_list count and starved */ + if (q) { + /* Stop taking in new requests */ + spin_lock_irq(q->queue_lock); + blk_stop_queue(q); + + /* drain all requests in the queue */ + while (1) { + /* need the lock to fetch a request + * this may fetch the same reqeust as the previous pass + */ + req = blk_fetch_request(q); + /* save requests in use and starved */ + counts = q->rq.count[0] + q->rq.count[1] + + q->rq.starved[0] + q->rq.starved[1]; + spin_unlock_irq(q->queue_lock); + /* any requests still outstanding? */ + if (counts == 0) + break; + + /* This may be the same req as the previous iteration, + * always send the blk_end_request_all after a prefetch. + * It is not okay to not end the request because the + * prefetch started the request. + */ + if (req) { + /* return -ENXIO to indicate that this queue is + * going away + */ + req->errors = -ENXIO; + blk_end_request_all(req, -ENXIO); + } + + msleep(200); /* allow bsg to possibly finish */ + spin_lock_irq(q->queue_lock); + } + bsg_unregister_queue(q); blk_cleanup_queue(q); } -- cgit v1.2.3 From 9b906779568f009b53254a15b283b53ae4570b93 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Mon, 6 Sep 2010 17:24:28 -0700 Subject: [SCSI] scsi_debug: Convert to use root_device_register() and root_device_unregister() This patch updates the scsi_debug virtual LLD to use root_device_register() and root_device_unregister() from include/linux/device.h instead of device_register() and device_unregister() respectively within scsi_debug_init() and scsi_debug_exit() This simply involved converting the static struct device pseudo_primary into a pointer that is setup by the call to root_device_register(). This patch also contains the correct IS_ERR() conditional check of root_device_register() from within scsi_debug_init(). Thanks to Richard Sharpe and Dmitry Torokhov for their help with this item. Signed-off-by: Nicholas A. Bellinger Acked-by: Douglas Gilbert Signed-off-by: James Bottomley --- drivers/scsi/scsi_debug.c | 25 ++++++++----------------- 1 file changed, 8 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 06329d47b38f..2c36bae3bd4b 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -3226,16 +3226,7 @@ static void do_remove_driverfs_files(void) driver_remove_file(&sdebug_driverfs_driver, &driver_attr_add_host); } -static void pseudo_0_release(struct device *dev) -{ - if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) - printk(KERN_INFO "scsi_debug: pseudo_0_release() called\n"); -} - -static struct device pseudo_primary = { - .init_name = "pseudo_0", - .release = pseudo_0_release, -}; +struct device *pseudo_primary; static int __init scsi_debug_init(void) { @@ -3382,10 +3373,10 @@ static int __init scsi_debug_init(void) map_region(0, 2); } - ret = device_register(&pseudo_primary); - if (ret < 0) { - printk(KERN_WARNING "scsi_debug: device_register error: %d\n", - ret); + pseudo_primary = root_device_register("pseudo_0"); + if (IS_ERR(pseudo_primary)) { + printk(KERN_WARNING "scsi_debug: root_device_register() error\n"); + ret = PTR_ERR(pseudo_primary); goto free_vm; } ret = bus_register(&pseudo_lld_bus); @@ -3432,7 +3423,7 @@ del_files: bus_unreg: bus_unregister(&pseudo_lld_bus); dev_unreg: - device_unregister(&pseudo_primary); + root_device_unregister(pseudo_primary); free_vm: if (map_storep) vfree(map_storep); @@ -3453,7 +3444,7 @@ static void __exit scsi_debug_exit(void) do_remove_driverfs_files(); driver_unregister(&sdebug_driverfs_driver); bus_unregister(&pseudo_lld_bus); - device_unregister(&pseudo_primary); + root_device_unregister(pseudo_primary); if (dif_storep) vfree(dif_storep); @@ -3504,7 +3495,7 @@ static int sdebug_add_adapter(void) spin_unlock(&sdebug_host_list_lock); sdbg_host->dev.bus = &pseudo_lld_bus; - sdbg_host->dev.parent = &pseudo_primary; + sdbg_host->dev.parent = pseudo_primary; sdbg_host->dev.release = &sdebug_release_adapter; dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host); -- cgit v1.2.3 From 6de76cfc7db8844bc26ab9a60b20f50ad7851833 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 7 Sep 2010 14:32:47 +0000 Subject: [SCSI] aacraid: semaphore cleanup Get rid of init_MUTEX[_LOCKED]() and use sema_init() instead. Signed-off-by: Thomas Gleixner Cc: aacraid@adaptec.com Signed-off-by: James Bottomley --- drivers/scsi/aacraid/commctrl.c | 2 +- drivers/scsi/aacraid/commsup.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 1a5bf5724750..645ddd9d9b9e 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -190,7 +190,7 @@ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg) /* * Initialize the mutex used to wait for the next AIF. */ - init_MUTEX_LOCKED(&fibctx->wait_sem); + sema_init(&fibctx->wait_sem, 0); fibctx->wait = 0; /* * Initialize the fibs and set the count of fibs on diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 70079146e203..afc9aeba5edb 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -124,7 +124,7 @@ int aac_fib_setup(struct aac_dev * dev) fibptr->hw_fib_va = hw_fib; fibptr->data = (void *) fibptr->hw_fib_va->data; fibptr->next = fibptr+1; /* Forward chain the fibs */ - init_MUTEX_LOCKED(&fibptr->event_wait); + sema_init(&fibptr->event_wait, 0); spin_lock_init(&fibptr->event_lock); hw_fib->header.XferState = cpu_to_le32(0xffffffff); hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size); -- cgit v1.2.3 From e6a4202aa920a25db76f0e02dabe133179293ec0 Mon Sep 17 00:00:00 2001 From: Shyam Sundar Date: Tue, 7 Sep 2010 20:55:32 -0700 Subject: [SCSI] qla2xxx: ROM lock recovery if fw hangs while holding the lock. Signed-off-by: Giridhar Malavali Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_nx.c | 39 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 36 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 614d745d7e11..8d8e40b829e4 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -3152,6 +3152,20 @@ qla82xx_start_iocbs(srb_t *sp) } } +void qla82xx_rom_lock_recovery(struct qla_hw_data *ha) +{ + if (qla82xx_rom_lock(ha)) + /* Someone else is holding the lock. */ + qla_printk(KERN_INFO, ha, "Resetting rom_lock\n"); + + /* + * Either we got the lock, or someone + * else died while holding it. + * In either case, unlock. + */ + qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); +} + /* * qla82xx_device_bootstrap * Initialize device, set DEV_READY, start fw @@ -3166,12 +3180,13 @@ qla82xx_start_iocbs(srb_t *sp) static int qla82xx_device_bootstrap(scsi_qla_host_t *vha) { - int rval, i, timeout; + int rval = QLA_SUCCESS; + int i, timeout; uint32_t old_count, count; struct qla_hw_data *ha = vha->hw; + int need_reset = 0, peg_stuck = 1; - if (qla82xx_need_reset(ha)) - goto dev_initialize; + need_reset = qla82xx_need_reset(ha); old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); @@ -3185,9 +3200,27 @@ qla82xx_device_bootstrap(scsi_qla_host_t *vha) count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); if (count != old_count) + peg_stuck = 0; + } + + if (need_reset) { + /* We are trying to perform a recovery here. */ + if (peg_stuck) + qla82xx_rom_lock_recovery(ha); + goto dev_initialize; + } else { + /* Start of day for this ha context. */ + if (peg_stuck) { + /* Either we are the first or recovery in progress. */ + qla82xx_rom_lock_recovery(ha); + goto dev_initialize; + } else + /* Firmware already running. */ goto dev_ready; } + return rval; + dev_initialize: /* set to DEV_INITIALIZING */ qla_printk(KERN_INFO, ha, "HW State: INITIALIZING\n"); -- cgit v1.2.3 From d99e5f488ae28d9dfccc84435f4fd29bd1fe1b9b Mon Sep 17 00:00:00 2001 From: Brian King Date: Thu, 9 Sep 2010 16:20:42 -0500 Subject: [SCSI] ibmvfc: Log link up/down events By default, ibmvfc does not log any async events in order to avoid flooding the log with them. Improve on this by logging by default events that are not likely to flood the log, such as link up/down. Having these events in the log will improve the ability to debug issues with ibmvfc. Signed-off-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvfc.c | 74 ++++++++++++++++++++++++++++-------------- drivers/scsi/ibmvscsi/ibmvfc.h | 6 ++++ 2 files changed, 56 insertions(+), 24 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index a5dd9b4a0ae8..b1512e9d1b9d 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -2501,41 +2501,66 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport) LEAVE; } -static const struct { - enum ibmvfc_async_event ae; - const char *desc; -} ae_desc [] = { - { IBMVFC_AE_ELS_PLOGI, "PLOGI" }, - { IBMVFC_AE_ELS_LOGO, "LOGO" }, - { IBMVFC_AE_ELS_PRLO, "PRLO" }, - { IBMVFC_AE_SCN_NPORT, "N-Port SCN" }, - { IBMVFC_AE_SCN_GROUP, "Group SCN" }, - { IBMVFC_AE_SCN_DOMAIN, "Domain SCN" }, - { IBMVFC_AE_SCN_FABRIC, "Fabric SCN" }, - { IBMVFC_AE_LINK_UP, "Link Up" }, - { IBMVFC_AE_LINK_DOWN, "Link Down" }, - { IBMVFC_AE_LINK_DEAD, "Link Dead" }, - { IBMVFC_AE_HALT, "Halt" }, - { IBMVFC_AE_RESUME, "Resume" }, - { IBMVFC_AE_ADAPTER_FAILED, "Adapter Failed" }, +static const struct ibmvfc_async_desc ae_desc [] = { + { IBMVFC_AE_ELS_PLOGI, "PLOGI", IBMVFC_DEFAULT_LOG_LEVEL + 1 }, + { IBMVFC_AE_ELS_LOGO, "LOGO", IBMVFC_DEFAULT_LOG_LEVEL + 1 }, + { IBMVFC_AE_ELS_PRLO, "PRLO", IBMVFC_DEFAULT_LOG_LEVEL + 1 }, + { IBMVFC_AE_SCN_NPORT, "N-Port SCN", IBMVFC_DEFAULT_LOG_LEVEL + 1 }, + { IBMVFC_AE_SCN_GROUP, "Group SCN", IBMVFC_DEFAULT_LOG_LEVEL + 1 }, + { IBMVFC_AE_SCN_DOMAIN, "Domain SCN", IBMVFC_DEFAULT_LOG_LEVEL }, + { IBMVFC_AE_SCN_FABRIC, "Fabric SCN", IBMVFC_DEFAULT_LOG_LEVEL }, + { IBMVFC_AE_LINK_UP, "Link Up", IBMVFC_DEFAULT_LOG_LEVEL }, + { IBMVFC_AE_LINK_DOWN, "Link Down", IBMVFC_DEFAULT_LOG_LEVEL }, + { IBMVFC_AE_LINK_DEAD, "Link Dead", IBMVFC_DEFAULT_LOG_LEVEL }, + { IBMVFC_AE_HALT, "Halt", IBMVFC_DEFAULT_LOG_LEVEL }, + { IBMVFC_AE_RESUME, "Resume", IBMVFC_DEFAULT_LOG_LEVEL }, + { IBMVFC_AE_ADAPTER_FAILED, "Adapter Failed", IBMVFC_DEFAULT_LOG_LEVEL }, }; -static const char *unknown_ae = "Unknown async"; +static const struct ibmvfc_async_desc unknown_ae = { + 0, "Unknown async", IBMVFC_DEFAULT_LOG_LEVEL +}; /** * ibmvfc_get_ae_desc - Get text description for async event * @ae: async event * **/ -static const char *ibmvfc_get_ae_desc(u64 ae) +static const struct ibmvfc_async_desc *ibmvfc_get_ae_desc(u64 ae) { int i; for (i = 0; i < ARRAY_SIZE(ae_desc); i++) if (ae_desc[i].ae == ae) - return ae_desc[i].desc; + return &ae_desc[i]; + + return &unknown_ae; +} + +static const struct { + enum ibmvfc_ae_link_state state; + const char *desc; +} link_desc [] = { + { IBMVFC_AE_LS_LINK_UP, " link up" }, + { IBMVFC_AE_LS_LINK_BOUNCED, " link bounced" }, + { IBMVFC_AE_LS_LINK_DOWN, " link down" }, + { IBMVFC_AE_LS_LINK_DEAD, " link dead" }, +}; + +/** + * ibmvfc_get_link_state - Get text description for link state + * @state: link state + * + **/ +static const char *ibmvfc_get_link_state(enum ibmvfc_ae_link_state state) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(link_desc); i++) + if (link_desc[i].state == state) + return link_desc[i].desc; - return unknown_ae; + return ""; } /** @@ -2547,11 +2572,12 @@ static const char *ibmvfc_get_ae_desc(u64 ae) static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, struct ibmvfc_host *vhost) { - const char *desc = ibmvfc_get_ae_desc(crq->event); + const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(crq->event); struct ibmvfc_target *tgt; - ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx," - " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name); + ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx," + " node_name: %llx%s\n", desc->desc, crq->scsi_id, crq->wwpn, crq->node_name, + ibmvfc_get_link_state(crq->link_state)); switch (crq->event) { case IBMVFC_AE_RESUME: diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 608af394c8cf..ef663e7c9bbc 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -541,6 +541,12 @@ enum ibmvfc_async_event { IBMVFC_AE_ADAPTER_FAILED = 0x1000, }; +struct ibmvfc_async_desc { + enum ibmvfc_async_event ae; + const char *desc; + int log_level; +}; + struct ibmvfc_crq { volatile u8 valid; volatile u8 format; -- cgit v1.2.3 From 57c237731b92fadc7d44824276313ec330b1989b Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Wed, 8 Sep 2010 14:39:51 +0200 Subject: [SCSI] zfcp: Add zfcp private struct as SCSI device driver data Add a new data structure zfcp_scsi_dev that holds zfcp private data for each SCSI device. Use scsi_transport_reserve_device to let the SCSI midlayer automatically allocate this with each SCSI device. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 3 +++ drivers/s390/scsi/zfcp_def.h | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 68df57157d48..5c4b874591ea 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -159,6 +159,9 @@ static int __init zfcp_module_init(void) fc_attach_transport(&zfcp_transport_functions); if (!zfcp_data.scsi_transport_template) goto out_transport; + scsi_transport_reserve_device(zfcp_data.scsi_transport_template, + sizeof(struct zfcp_scsi_dev)); + retval = misc_register(&zfcp_cfdc_misc); if (retval) { diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index e1c6b6e05a75..d31158c0de93 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -233,6 +233,45 @@ struct zfcp_unit { struct work_struct scsi_work; }; +/** + * struct zfcp_scsi_dev - zfcp data per SCSI device + * @status: zfcp internal status flags + * @lun_handle: handle from "open lun" for issuing FSF requests + * @erp_action: zfcp erp data for opening and recovering this LUN + * @erp_counter: zfcp erp counter for this LUN + * @latencies: FSF channel and fabric latencies + * @port: zfcp_port where this LUN belongs to + */ +struct zfcp_scsi_dev { + atomic_t status; + u32 lun_handle; + struct zfcp_erp_action erp_action; + atomic_t erp_counter; + struct zfcp_latencies latencies; + struct zfcp_port *port; +}; + +/** + * sdev_to_zfcp - Access zfcp LUN data for SCSI device + * @sdev: scsi_device where to get the zfcp_scsi_dev pointer + */ +static inline struct zfcp_scsi_dev *sdev_to_zfcp(struct scsi_device *sdev) +{ + return scsi_transport_device_data(sdev); +} + +/** + * zfcp_scsi_dev_lun - Return SCSI device LUN as 64 bit FCP LUN + * @sdev: SCSI device where to get the LUN from + */ +static inline u64 zfcp_scsi_dev_lun(struct scsi_device *sdev) +{ + u64 fcp_lun; + + int_to_scsilun(sdev->lun, (struct scsi_lun *)&fcp_lun); + return fcp_lun; +} + /** * struct zfcp_fsf_req - basic FSF request structure * @list: list of FSF requests -- cgit v1.2.3 From 1daa4eb50fa5cd4c8f9c55452606e786fd42053b Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Wed, 8 Sep 2010 14:39:52 +0200 Subject: [SCSI] zfcp: Move code for managing zfcp_unit devices to new file Move the code for managing zfcp_unit devices to the new file zfcp_unit.c. This is in preparation for the change that zfcp_unit will only track the LUNs configured via unit_add, other data will be moved from zfcp_unit to the new struct zfcp_scsi_dev. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/Makefile | 5 +- drivers/s390/scsi/zfcp_aux.c | 122 +------------------- drivers/s390/scsi/zfcp_ext.h | 13 ++- drivers/s390/scsi/zfcp_scsi.c | 41 +------ drivers/s390/scsi/zfcp_sysfs.c | 56 ++------- drivers/s390/scsi/zfcp_unit.c | 255 +++++++++++++++++++++++++++++++++++++++++ 6 files changed, 279 insertions(+), 213 deletions(-) create mode 100644 drivers/s390/scsi/zfcp_unit.c (limited to 'drivers') diff --git a/drivers/s390/scsi/Makefile b/drivers/s390/scsi/Makefile index cb301cc6178c..c454ffebb63e 100644 --- a/drivers/s390/scsi/Makefile +++ b/drivers/s390/scsi/Makefile @@ -2,7 +2,8 @@ # Makefile for the S/390 specific device drivers # -zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \ - zfcp_fsf.o zfcp_dbf.o zfcp_sysfs.o zfcp_fc.o zfcp_cfdc.o +zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_cfdc.o zfcp_dbf.o zfcp_erp.o \ + zfcp_fc.o zfcp_fsf.o zfcp_qdio.o zfcp_scsi.o zfcp_sysfs.o \ + zfcp_unit.o obj-$(CONFIG_ZFCP) += zfcp.o diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 5c4b874591ea..044fb22718d2 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -56,7 +56,6 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun) struct ccw_device *cdev; struct zfcp_adapter *adapter; struct zfcp_port *port; - struct zfcp_unit *unit; cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid); if (!cdev) @@ -72,18 +71,11 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun) port = zfcp_get_port_by_wwpn(adapter, wwpn); if (!port) goto out_port; - flush_work(&port->rport_work); - unit = zfcp_unit_enqueue(port, lun); - if (IS_ERR(unit)) - goto out_unit; - - zfcp_erp_unit_reopen(unit, 0, "auidc_1", NULL); - zfcp_erp_wait(adapter); - zfcp_scsi_scan(unit); -out_unit: + zfcp_unit_add(port, lun); put_device(&port->dev); + out_port: zfcp_ccw_adapter_put(adapter); out_ccw_device: @@ -214,30 +206,6 @@ static void __exit zfcp_module_exit(void) module_exit(zfcp_module_exit); -/** - * zfcp_get_unit_by_lun - find unit in unit list of port by FCP LUN - * @port: pointer to port to search for unit - * @fcp_lun: FCP LUN to search for - * - * Returns: pointer to zfcp_unit or NULL - */ -struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun) -{ - unsigned long flags; - struct zfcp_unit *unit; - - read_lock_irqsave(&port->unit_list_lock, flags); - list_for_each_entry(unit, &port->unit_list, list) - if (unit->fcp_lun == fcp_lun) { - if (!get_device(&unit->dev)) - unit = NULL; - read_unlock_irqrestore(&port->unit_list_lock, flags); - return unit; - } - read_unlock_irqrestore(&port->unit_list_lock, flags); - return NULL; -} - /** * zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn * @adapter: pointer to adapter to search for port @@ -263,92 +231,6 @@ struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter, return NULL; } -/** - * zfcp_unit_release - dequeue unit - * @dev: pointer to device - * - * waits until all work is done on unit and removes it then from the unit->list - * of the associated port. - */ -static void zfcp_unit_release(struct device *dev) -{ - struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev); - - put_device(&unit->port->dev); - kfree(unit); -} - -/** - * zfcp_unit_enqueue - enqueue unit to unit list of a port. - * @port: pointer to port where unit is added - * @fcp_lun: FCP LUN of unit to be enqueued - * Returns: pointer to enqueued unit on success, ERR_PTR on error - * - * Sets up some unit internal structures and creates sysfs entry. - */ -struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) -{ - struct zfcp_unit *unit; - int retval = -ENOMEM; - - get_device(&port->dev); - - unit = zfcp_get_unit_by_lun(port, fcp_lun); - if (unit) { - put_device(&unit->dev); - retval = -EEXIST; - goto err_out; - } - - unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL); - if (!unit) - goto err_out; - - unit->port = port; - unit->fcp_lun = fcp_lun; - unit->dev.parent = &port->dev; - unit->dev.release = zfcp_unit_release; - - if (dev_set_name(&unit->dev, "0x%016llx", - (unsigned long long) fcp_lun)) { - kfree(unit); - goto err_out; - } - retval = -EINVAL; - - INIT_WORK(&unit->scsi_work, zfcp_scsi_scan_work); - - spin_lock_init(&unit->latencies.lock); - unit->latencies.write.channel.min = 0xFFFFFFFF; - unit->latencies.write.fabric.min = 0xFFFFFFFF; - unit->latencies.read.channel.min = 0xFFFFFFFF; - unit->latencies.read.fabric.min = 0xFFFFFFFF; - unit->latencies.cmd.channel.min = 0xFFFFFFFF; - unit->latencies.cmd.fabric.min = 0xFFFFFFFF; - - if (device_register(&unit->dev)) { - put_device(&unit->dev); - goto err_out; - } - - if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) - goto err_out_put; - - write_lock_irq(&port->unit_list_lock); - list_add_tail(&unit->list, &port->unit_list); - write_unlock_irq(&port->unit_list_lock); - - atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status); - - return unit; - -err_out_put: - device_unregister(&unit->dev); -err_out: - put_device(&port->dev); - return ERR_PTR(retval); -} - static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) { adapter->pool.erp_req = diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 3b93239c6f69..5c3966eaca51 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -15,12 +15,10 @@ #include "zfcp_fc.h" /* zfcp_aux.c */ -extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, u64); extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64); extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *); extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32, u32); -extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64); extern void zfcp_sg_free_table(struct scatterlist *, int); extern int zfcp_sg_setup_table(struct scatterlist *, int); extern void zfcp_device_unregister(struct device *, @@ -163,8 +161,6 @@ extern void zfcp_scsi_rport_work(struct work_struct *); extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *); extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *); extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *); -extern void zfcp_scsi_scan(struct zfcp_unit *); -extern void zfcp_scsi_scan_work(struct work_struct *); extern void zfcp_scsi_set_prot(struct zfcp_adapter *); extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int); @@ -175,4 +171,13 @@ extern struct attribute_group zfcp_sysfs_port_attrs; extern struct device_attribute *zfcp_sysfs_sdev_attrs[]; extern struct device_attribute *zfcp_sysfs_shost_attrs[]; +/* zfcp_unit.c */ +extern int zfcp_unit_add(struct zfcp_port *, u64); +extern int zfcp_unit_remove(struct zfcp_port *, u64); +extern struct zfcp_unit *zfcp_unit_find(struct zfcp_port *, u64); +extern struct scsi_device *zfcp_unit_sdev(struct zfcp_unit *unit); +extern void zfcp_unit_scsi_scan(struct zfcp_unit *); +extern void zfcp_unit_queue_scsi_scan(struct zfcp_port *); +extern unsigned int zfcp_unit_sdev_status(struct zfcp_unit *); + #endif /* ZFCP_EXT_H */ diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index cb000c9833bb..03837797c45e 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -144,7 +144,7 @@ static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter, list_for_each_entry(port, &adapter->port_list, list) { if (!port->rport || (id != port->rport->scsi_target_id)) continue; - unit = zfcp_get_unit_by_lun(port, lun); + unit = zfcp_unit_find(port, lun); if (unit) break; } @@ -534,20 +534,6 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) } } -static void zfcp_scsi_queue_unit_register(struct zfcp_port *port) -{ - struct zfcp_unit *unit; - - read_lock_irq(&port->unit_list_lock); - list_for_each_entry(unit, &port->unit_list, list) { - get_device(&unit->dev); - if (scsi_queue_work(port->adapter->scsi_host, - &unit->scsi_work) <= 0) - put_device(&unit->dev); - } - read_unlock_irq(&port->unit_list_lock); -} - static void zfcp_scsi_rport_register(struct zfcp_port *port) { struct fc_rport_identifiers ids; @@ -574,7 +560,7 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port) port->rport = rport; port->starget_id = rport->scsi_target_id; - zfcp_scsi_queue_unit_register(port); + zfcp_unit_queue_scsi_scan(port); } static void zfcp_scsi_rport_block(struct zfcp_port *port) @@ -637,29 +623,6 @@ void zfcp_scsi_rport_work(struct work_struct *work) put_device(&port->dev); } -/** - * zfcp_scsi_scan - Register LUN with SCSI midlayer - * @unit: The LUN/unit to register - */ -void zfcp_scsi_scan(struct zfcp_unit *unit) -{ - struct fc_rport *rport = unit->port->rport; - - if (rport && rport->port_state == FC_PORTSTATE_ONLINE) - scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, - scsilun_to_int((struct scsi_lun *) - &unit->fcp_lun), 0); -} - -void zfcp_scsi_scan_work(struct work_struct *work) -{ - struct zfcp_unit *unit = container_of(work, struct zfcp_unit, - scsi_work); - - zfcp_scsi_scan(unit); - put_device(&unit->dev); -} - /** * zfcp_scsi_set_prot - Configure DIF/DIX support in scsi_host * @adapter: The adapter where to configure DIF/DIX for the SCSI host diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index b4561c86e230..56c46e09d2d4 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -257,28 +257,15 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev, const char *buf, size_t count) { struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); - struct zfcp_unit *unit; u64 fcp_lun; - int retval = -EINVAL; - - if (!(port && get_device(&port->dev))) - return -EBUSY; if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) - goto out; + return -EINVAL; - unit = zfcp_unit_enqueue(port, fcp_lun); - if (IS_ERR(unit)) - goto out; - else - retval = 0; + if (zfcp_unit_add(port, fcp_lun)) + return -EINVAL; - zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL); - zfcp_erp_wait(unit->port->adapter); - zfcp_scsi_scan(unit); -out: - put_device(&port->dev); - return retval ? retval : (ssize_t) count; + return count; } static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store); @@ -287,42 +274,15 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev, const char *buf, size_t count) { struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); - struct zfcp_unit *unit; u64 fcp_lun; - int retval = -EINVAL; - struct scsi_device *sdev; - - if (!(port && get_device(&port->dev))) - return -EBUSY; if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) - goto out; - - unit = zfcp_get_unit_by_lun(port, fcp_lun); - if (!unit) - goto out; - else - retval = 0; + return -EINVAL; - sdev = scsi_device_lookup(port->adapter->scsi_host, 0, - port->starget_id, - scsilun_to_int((struct scsi_lun *)&fcp_lun)); - if (sdev) { - scsi_remove_device(sdev); - scsi_device_put(sdev); - } + if (zfcp_unit_remove(port, fcp_lun)) + return -EINVAL; - write_lock_irq(&port->unit_list_lock); - list_del(&unit->list); - write_unlock_irq(&port->unit_list_lock); - - put_device(&unit->dev); - - zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL); - zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs); -out: - put_device(&port->dev); - return retval ? retval : (ssize_t) count; + return count; } static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store); diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c new file mode 100644 index 000000000000..e210c41ee389 --- /dev/null +++ b/drivers/s390/scsi/zfcp_unit.c @@ -0,0 +1,255 @@ +/* + * zfcp device driver + * + * Tracking of manually configured LUNs and helper functions to + * register the LUNs with the SCSI midlayer. + * + * Copyright IBM Corporation 2010 + */ + +#include "zfcp_def.h" +#include "zfcp_ext.h" + +/** + * zfcp_unit_scsi_scan - Register LUN with SCSI midlayer + * @unit: The zfcp LUN/unit to register + * + * When the SCSI midlayer is not allowed to automatically scan and + * attach SCSI devices, zfcp has to register the single devices with + * the SCSI midlayer. + */ +void zfcp_unit_scsi_scan(struct zfcp_unit *unit) +{ + struct fc_rport *rport = unit->port->rport; + unsigned int lun; + + lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun); + + if (rport && rport->port_state == FC_PORTSTATE_ONLINE) + scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, lun, 1); +} + +static void zfcp_unit_scsi_scan_work(struct work_struct *work) +{ + struct zfcp_unit *unit = container_of(work, struct zfcp_unit, + scsi_work); + + zfcp_unit_scsi_scan(unit); + put_device(&unit->dev); +} + +/** + * zfcp_unit_queue_scsi_scan - Register configured units on port + * @port: The zfcp_port where to register units + * + * After opening a port, all units configured on this port have to be + * registered with the SCSI midlayer. This function should be called + * after calling fc_remote_port_add, so that the fc_rport is already + * ONLINE and the call to scsi_scan_target runs the same way as the + * call in the FC transport class. + */ +void zfcp_unit_queue_scsi_scan(struct zfcp_port *port) +{ + struct zfcp_unit *unit; + + read_lock_irq(&port->unit_list_lock); + list_for_each_entry(unit, &port->unit_list, list) { + get_device(&unit->dev); + if (scsi_queue_work(port->adapter->scsi_host, + &unit->scsi_work) <= 0) + put_device(&unit->dev); + } + read_unlock_irq(&port->unit_list_lock); +} + +static struct zfcp_unit *_zfcp_unit_find(struct zfcp_port *port, u64 fcp_lun) +{ + struct zfcp_unit *unit; + + list_for_each_entry(unit, &port->unit_list, list) + if (unit->fcp_lun == fcp_lun) { + get_device(&unit->dev); + return unit; + } + + return NULL; +} + +/** + * zfcp_unit_find - Find and return zfcp_unit with specified FCP LUN + * @port: zfcp_port where to look for the unit + * @fcp_lun: 64 Bit FCP LUN used to identify the zfcp_unit + * + * If zfcp_unit is found, a reference is acquired that has to be + * released later. + * + * Returns: Pointer to the zfcp_unit, or NULL if there is no zfcp_unit + * with the specified FCP LUN. + */ +struct zfcp_unit *zfcp_unit_find(struct zfcp_port *port, u64 fcp_lun) +{ + struct zfcp_unit *unit; + + read_lock_irq(&port->unit_list_lock); + unit = _zfcp_unit_find(port, fcp_lun); + read_unlock_irq(&port->unit_list_lock); + return unit; +} + +/** + * zfcp_unit_release - Drop reference to zfcp_port and free memory of zfcp_unit. + * @dev: pointer to device in zfcp_unit + */ +static void zfcp_unit_release(struct device *dev) +{ + struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev); + + put_device(&unit->port->dev); + kfree(unit); +} + +/** + * zfcp_unit_enqueue - enqueue unit to unit list of a port. + * @port: pointer to port where unit is added + * @fcp_lun: FCP LUN of unit to be enqueued + * Returns: 0 success + * + * Sets up some unit internal structures and creates sysfs entry. + */ +int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun) +{ + struct zfcp_unit *unit; + + unit = zfcp_unit_find(port, fcp_lun); + if (unit) { + put_device(&unit->dev); + return -EEXIST; + } + + unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL); + if (!unit) + return -ENOMEM; + + unit->port = port; + unit->fcp_lun = fcp_lun; + unit->dev.parent = &port->dev; + unit->dev.release = zfcp_unit_release; + unit->latencies.write.channel.min = 0xFFFFFFFF; + unit->latencies.write.fabric.min = 0xFFFFFFFF; + unit->latencies.read.channel.min = 0xFFFFFFFF; + unit->latencies.read.fabric.min = 0xFFFFFFFF; + unit->latencies.cmd.channel.min = 0xFFFFFFFF; + unit->latencies.cmd.fabric.min = 0xFFFFFFFF; + INIT_WORK(&unit->scsi_work, zfcp_unit_scsi_scan_work); + spin_lock_init(&unit->latencies.lock); + + if (dev_set_name(&unit->dev, "0x%016llx", + (unsigned long long) fcp_lun)) { + kfree(unit); + return -ENOMEM; + } + + if (device_register(&unit->dev)) { + put_device(&unit->dev); + return -ENOMEM; + } + + if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) { + device_unregister(&unit->dev); + return -EINVAL; + } + + get_device(&port->dev); + + write_lock_irq(&port->unit_list_lock); + list_add_tail(&unit->list, &port->unit_list); + write_unlock_irq(&port->unit_list_lock); + + atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status); + zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL); + zfcp_erp_wait(unit->port->adapter); + zfcp_unit_scsi_scan(unit); + + return 0; +} + +/** + * zfcp_unit_sdev - Return SCSI device for zfcp_unit + * @unit: The zfcp_unit where to get the SCSI device for + * + * Returns: scsi_device pointer on success, NULL if there is no SCSI + * device for this zfcp_unit + * + * On success, the caller also holds a reference to the SCSI device + * that must be released with scsi_device_put. + */ +struct scsi_device *zfcp_unit_sdev(struct zfcp_unit *unit) +{ + struct Scsi_Host *shost; + struct zfcp_port *port; + unsigned int lun; + + lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun); + port = unit->port; + shost = port->adapter->scsi_host; + return scsi_device_lookup(shost, 0, port->starget_id, lun); +} + +/** + * zfcp_unit_sdev_status - Return zfcp LUN status for SCSI device + * @unit: The unit to lookup the SCSI device for + * + * Returns the zfcp LUN status field of the SCSI device if the SCSI device + * for the zfcp_unit exists, 0 otherwise. + */ +unsigned int zfcp_unit_sdev_status(struct zfcp_unit *unit) +{ + unsigned int status = 0; + struct scsi_device *sdev; + struct zfcp_scsi_dev *zfcp_sdev; + + sdev = zfcp_unit_sdev(unit); + if (sdev) { + zfcp_sdev = sdev_to_zfcp(sdev); + status = atomic_read(&zfcp_sdev->status); + scsi_device_put(sdev); + } + + return status; +} + +/** + * zfcp_unit_remove - Remove entry from list of configured units + * @port: The port where to remove the unit from the configuration + * @fcp_lun: The 64 bit LUN of the unit to remove + * + * Returns: -EINVAL if a unit with the specified LUN does not exist, + * 0 on success. + */ +int zfcp_unit_remove(struct zfcp_port *port, u64 fcp_lun) +{ + struct zfcp_unit *unit; + struct scsi_device *sdev; + + write_lock_irq(&port->unit_list_lock); + unit = _zfcp_unit_find(port, fcp_lun); + if (unit) + list_del(&unit->list); + write_unlock_irq(&port->unit_list_lock); + + if (!unit) + return -EINVAL; + + sdev = zfcp_unit_sdev(unit); + if (sdev) { + scsi_remove_device(sdev); + scsi_device_put(sdev); + } + + put_device(&unit->dev); + + zfcp_erp_unit_shutdown(unit, 0, "unrem_1", NULL); + zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs); + + return 0; +} -- cgit v1.2.3 From e4b9857fe628b453983cac89473a11a76a1f5786 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Wed, 8 Sep 2010 14:39:53 +0200 Subject: [SCSI] zfcp: Remove ZFCP_SYSFS_FAILED macro and implement fcp_lun_show without macro These sysfs attributes will require different functions. Implement them without using macros, so that the open coded functions can be changed later. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_sysfs.c | 121 ++++++++++++++++++++++++++--------------- 1 file changed, 77 insertions(+), 44 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 56c46e09d2d4..6b43bc46bf97 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -82,49 +82,73 @@ ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_readonly, "%d\n", (atomic_read(&unit->status) & ZFCP_STATUS_UNIT_READONLY) != 0); -#define ZFCP_SYSFS_FAILED(_feat_def, _feat, _adapter, _mod_id, _reopen_id) \ -static ssize_t zfcp_sysfs_##_feat##_failed_show(struct device *dev, \ - struct device_attribute *attr, \ - char *buf) \ -{ \ - struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \ - \ - if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED) \ - return sprintf(buf, "1\n"); \ - else \ - return sprintf(buf, "0\n"); \ -} \ -static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \ - struct device_attribute *attr,\ - const char *buf, size_t count)\ -{ \ - struct _feat_def *_feat = container_of(dev, struct _feat_def, dev); \ - unsigned long val; \ - int retval = 0; \ - \ - if (!(_feat && get_device(&_feat->dev))) \ - return -EBUSY; \ - \ - if (strict_strtoul(buf, 0, &val) || val != 0) { \ - retval = -EINVAL; \ - goto out; \ - } \ - \ - zfcp_erp_modify_##_feat##_status(_feat, _mod_id, NULL, \ - ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);\ - zfcp_erp_##_feat##_reopen(_feat, ZFCP_STATUS_COMMON_ERP_FAILED, \ - _reopen_id, NULL); \ - zfcp_erp_wait(_adapter); \ -out: \ - put_device(&_feat->dev); \ - return retval ? retval : (ssize_t) count; \ -} \ -static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \ - zfcp_sysfs_##_feat##_failed_show, \ - zfcp_sysfs_##_feat##_failed_store); +static ssize_t zfcp_sysfs_port_failed_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); -ZFCP_SYSFS_FAILED(zfcp_port, port, port->adapter, "sypfai1", "sypfai2"); -ZFCP_SYSFS_FAILED(zfcp_unit, unit, unit->port->adapter, "syufai1", "syufai2"); + if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) + return sprintf(buf, "1\n"); + + return sprintf(buf, "0\n"); +} + +static ssize_t zfcp_sysfs_port_failed_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); + unsigned long val; + + if (strict_strtoul(buf, 0, &val) || val != 0) + return -EINVAL; + + zfcp_erp_modify_port_status(port, "sypfai1", NULL, + ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); + zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2", + NULL); + zfcp_erp_wait(port->adapter); + + return count; +} +static ZFCP_DEV_ATTR(port, failed, S_IWUSR | S_IRUGO, + zfcp_sysfs_port_failed_show, + zfcp_sysfs_port_failed_store); + +static ssize_t zfcp_sysfs_unit_failed_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev); + + if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_FAILED) + return sprintf(buf, "1\n"); + + return sprintf(buf, "0\n"); +} + +static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev); + unsigned long val; + + if (strict_strtoul(buf, 0, &val) || val != 0) + return -EINVAL; + + zfcp_erp_modify_unit_status(unit, "syufai1", NULL, + ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); + zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, + "syufai2", NULL); + zfcp_erp_wait(unit->port->adapter); + + return count; +} +static ZFCP_DEV_ATTR(unit, failed, S_IWUSR | S_IRUGO, + zfcp_sysfs_unit_failed_show, + zfcp_sysfs_unit_failed_store); static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev, struct device_attribute *attr, @@ -394,8 +418,17 @@ ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n", dev_name(&unit->port->adapter->ccw_device->dev)); ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", (unsigned long long) unit->port->wwpn); -ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n", - (unsigned long long) unit->fcp_lun); + +static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct zfcp_unit *unit = sdev->hostdata; + + return sprintf(buf, "0x%016llx\n", (unsigned long long) unit->fcp_lun); +} +static DEVICE_ATTR(fcp_lun, S_IRUGO, zfcp_sysfs_scsi_fcp_lun_show, NULL); struct device_attribute *zfcp_sysfs_sdev_attrs[] = { &dev_attr_fcp_lun, -- cgit v1.2.3 From fdbd1c5e27dabfa950d4b0f52a20069aeaf67b9d Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Wed, 8 Sep 2010 14:39:54 +0200 Subject: [SCSI] zfcp: Allow running unit/LUN shutdown without acquiring reference With the change for the LUN data to be part of the scsi_device, the slave_destroy callback will be the final call to the zfcp_erp_unit_shutdown function. The erp tries to acquire a reference to run the action asynchronously and fail, if it cannot get the reference. But calling scsi_device_get from slave_destroy will fail, because the scsi_device is already in the state SDEV_DEL. Introduce a new call into the zfcp erp to solve this: The function zfcp_erp_unit_shutdown_wait will close the LUN and wait for the erp to finish without acquiring an additional reference. The wait allows to omit the reference; the caller waiting for the erp to finish already has a reference that holds the struct in place. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_erp.c | 98 ++++++++++++++++++++++++++++---------------- drivers/s390/scsi/zfcp_ext.h | 1 + 2 files changed, 63 insertions(+), 36 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 160b432c907f..50514b4c8732 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -21,6 +21,7 @@ enum zfcp_erp_act_flags { ZFCP_STATUS_ERP_DISMISSING = 0x00100000, ZFCP_STATUS_ERP_DISMISSED = 0x00200000, ZFCP_STATUS_ERP_LOWMEM = 0x00400000, + ZFCP_STATUS_ERP_NO_REF = 0x00800000, }; enum zfcp_erp_steps { @@ -169,22 +170,22 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, return need; } -static struct zfcp_erp_action *zfcp_erp_setup_act(int need, +static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, struct zfcp_adapter *adapter, struct zfcp_port *port, struct zfcp_unit *unit) { struct zfcp_erp_action *erp_action; - u32 status = 0; switch (need) { case ZFCP_ERP_ACTION_REOPEN_UNIT: - if (!get_device(&unit->dev)) - return NULL; + if (!(act_status & ZFCP_STATUS_ERP_NO_REF)) + if (!get_device(&unit->dev)) + return NULL; atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status); erp_action = &unit->erp_action; if (!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_RUNNING)) - status = ZFCP_STATUS_ERP_CLOSE_ONLY; + act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; break; case ZFCP_ERP_ACTION_REOPEN_PORT: @@ -195,7 +196,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); erp_action = &port->erp_action; if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING)) - status = ZFCP_STATUS_ERP_CLOSE_ONLY; + act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; break; case ZFCP_ERP_ACTION_REOPEN_ADAPTER: @@ -205,7 +206,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, erp_action = &adapter->erp_action; if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_RUNNING)) - status = ZFCP_STATUS_ERP_CLOSE_ONLY; + act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; break; default: @@ -217,14 +218,15 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, erp_action->port = port; erp_action->unit = unit; erp_action->action = need; - erp_action->status = status; + erp_action->status = act_status; return erp_action; } static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, struct zfcp_port *port, - struct zfcp_unit *unit, char *id, void *ref) + struct zfcp_unit *unit, char *id, void *ref, + u32 act_status) { int retval = 1, need; struct zfcp_erp_action *act = NULL; @@ -236,10 +238,10 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, if (!need) goto out; - atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); - act = zfcp_erp_setup_act(need, adapter, port, unit); + act = zfcp_erp_setup_act(need, act_status, adapter, port, unit); if (!act) goto out; + atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); ++adapter->erp_total_count; list_add_tail(&act->list, &adapter->erp_ready_head); wake_up(&adapter->erp_ready_wq); @@ -262,7 +264,7 @@ static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, return -EIO; } return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, - adapter, NULL, NULL, id, ref); + adapter, NULL, NULL, id, ref, 0); } /** @@ -285,7 +287,7 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, zfcp_erp_adapter_failed(adapter, "erareo1", NULL); else zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter, - NULL, NULL, id, ref); + NULL, NULL, id, ref, 0); write_unlock_irqrestore(&adapter->erp_lock, flags); } @@ -317,20 +319,6 @@ void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id, zfcp_erp_port_reopen(port, clear | flags, id, ref); } -/** - * zfcp_erp_unit_shutdown - Shutdown unit - * @unit: Unit to shut down. - * @clear: Status flags to clear. - * @id: Id for debug trace event. - * @ref: Reference for debug trace event. - */ -void zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear, char *id, - void *ref) -{ - int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; - zfcp_erp_unit_reopen(unit, clear | flags, id, ref); -} - static void zfcp_erp_port_block(struct zfcp_port *port, int clear) { zfcp_erp_modify_port_status(port, "erpblk1", NULL, @@ -348,7 +336,7 @@ static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, return; zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED, - port->adapter, port, NULL, id, ref); + port->adapter, port, NULL, id, ref, 0); } /** @@ -381,7 +369,7 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, } return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT, - port->adapter, port, NULL, id, ref); + port->adapter, port, NULL, id, ref, 0); } /** @@ -412,7 +400,7 @@ static void zfcp_erp_unit_block(struct zfcp_unit *unit, int clear_mask) } static void _zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id, - void *ref) + void *ref, u32 act_status) { struct zfcp_adapter *adapter = unit->port->adapter; @@ -422,7 +410,7 @@ static void _zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id, return; zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_UNIT, - adapter, unit->port, unit, id, ref); + adapter, unit->port, unit, id, ref, act_status); } /** @@ -439,8 +427,45 @@ void zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id, struct zfcp_adapter *adapter = port->adapter; write_lock_irqsave(&adapter->erp_lock, flags); - _zfcp_erp_unit_reopen(unit, clear, id, ref); + _zfcp_erp_unit_reopen(unit, clear, id, ref, 0); + write_unlock_irqrestore(&adapter->erp_lock, flags); +} + +/** + * zfcp_erp_unit_shutdown - Shutdown unit + * @unit: Unit to shut down. + * @clear: Status flags to clear. + * @id: Id for debug trace event. + * @ref: Reference for debug trace event. + */ +void zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear, char *id, + void *ref) +{ + int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; + zfcp_erp_unit_reopen(unit, clear | flags, id, ref); +} + +/** + * zfcp_erp_unit_shutdown_wait - Shutdown unit and wait for erp completion + * @unit: Unit to shut down. + * @id: Id for debug trace event. + * + * Do not acquire a reference for the unit when creating the ERP + * action. It is safe, because this function waits for the ERP to + * complete first. + */ +void zfcp_erp_unit_shutdown_wait(struct zfcp_unit *unit, char *id) +{ + unsigned long flags; + struct zfcp_port *port = unit->port; + struct zfcp_adapter *adapter = port->adapter; + int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; + + write_lock_irqsave(&adapter->erp_lock, flags); + _zfcp_erp_unit_reopen(unit, clear, id, NULL, ZFCP_STATUS_ERP_NO_REF); write_unlock_irqrestore(&adapter->erp_lock, flags); + + zfcp_erp_wait(adapter); } static int status_change_set(unsigned long mask, atomic_t *status) @@ -566,7 +591,7 @@ static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, read_lock(&port->unit_list_lock); list_for_each_entry(unit, &port->unit_list, list) - _zfcp_erp_unit_reopen(unit, clear, id, ref); + _zfcp_erp_unit_reopen(unit, clear, id, ref, 0); read_unlock(&port->unit_list_lock); } @@ -583,7 +608,7 @@ static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) _zfcp_erp_port_reopen(act->port, 0, "ersff_3", NULL); break; case ZFCP_ERP_ACTION_REOPEN_UNIT: - _zfcp_erp_unit_reopen(act->unit, 0, "ersff_4", NULL); + _zfcp_erp_unit_reopen(act->unit, 0, "ersff_4", NULL, 0); break; } } @@ -1143,7 +1168,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret) if (zfcp_erp_strat_change_det(&unit->status, erp_status)) { _zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, - "ersscg3", NULL); + "ersscg3", NULL, 0); return ZFCP_ERP_EXIT; } break; @@ -1191,7 +1216,8 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) switch (act->action) { case ZFCP_ERP_ACTION_REOPEN_UNIT: - put_device(&unit->dev); + if (!(act->status & ZFCP_STATUS_ERP_NO_REF)) + put_device(&unit->dev); break; case ZFCP_ERP_ACTION_REOPEN_PORT: diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 5c3966eaca51..3c90604076e0 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -80,6 +80,7 @@ extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, char *, void *, u32, int); extern void zfcp_erp_unit_reopen(struct zfcp_unit *, int, char *, void *); extern void zfcp_erp_unit_shutdown(struct zfcp_unit *, int, char *, void *); +extern void zfcp_erp_unit_shutdown_wait(struct zfcp_unit *, char *); extern void zfcp_erp_unit_failed(struct zfcp_unit *, char *, void *); extern int zfcp_erp_thread_setup(struct zfcp_adapter *); extern void zfcp_erp_thread_kill(struct zfcp_adapter *); -- cgit v1.2.3 From b62a8d9b45b971a67a0f8413338c230e3117dff5 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Wed, 8 Sep 2010 14:39:55 +0200 Subject: [SCSI] zfcp: Use SCSI device data zfcp_scsi_dev instead of zfcp_unit This is the large change to switch from using the data in zfcp_unit to zfcp_scsi_dev. Keeping everything working requires doing the switch in one piece. To ensure that no code keeps using the data in zfcp_unit, this patch also removes the data from zfcp_unit that is now being replaced with zfcp_scsi_dev. For zfcp, the scsi_device together with zfcp_scsi_dev exist from the call of slave_alloc to the call of slave_destroy. The data in zfcp_scsi_dev is initialized in zfcp_scsi_slave_alloc and the LUN is opened; the final shutdown for the LUN is run from slave_destroy. Where the scsi_device or zfcp_scsi_dev is needed, the pointer to the scsi_device is passed as function argument and inside the function converted to the pointer to zfcp_scsi_dev; this avoids back and forth conversion betweeen scsi_device and zfcp_scsi_dev. While changing the function arguments from zfcp_unit to scsi_device, the functions names are renamed form "unit" to "lun". This is to have a seperation between zfcp_scsi_dev/LUN and the zfcp_unit; only code referring to the remaining configuration information in zfcp_unit struct uses "unit". Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_dbf.c | 28 ++-- drivers/s390/scsi/zfcp_dbf.h | 12 +- drivers/s390/scsi/zfcp_def.h | 34 ++-- drivers/s390/scsi/zfcp_erp.c | 369 ++++++++++++++++++++++------------------- drivers/s390/scsi/zfcp_ext.h | 32 ++-- drivers/s390/scsi/zfcp_fsf.c | 279 +++++++++++++++---------------- drivers/s390/scsi/zfcp_scsi.c | 114 ++++++------- drivers/s390/scsi/zfcp_sysfs.c | 66 +++++--- drivers/s390/scsi/zfcp_unit.c | 11 -- 9 files changed, 480 insertions(+), 465 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index a86117b0d6e1..2224caa8b92d 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -482,7 +482,7 @@ static int zfcp_dbf_rec_view_format(debug_info_t *id, struct debug_view *view, zfcp_dbf_out(&p, "fcp_lun", "0x%016Lx", r->u.trigger.fcp_lun); zfcp_dbf_out(&p, "adapter_status", "0x%08x", r->u.trigger.as); zfcp_dbf_out(&p, "port_status", "0x%08x", r->u.trigger.ps); - zfcp_dbf_out(&p, "unit_status", "0x%08x", r->u.trigger.us); + zfcp_dbf_out(&p, "lun_status", "0x%08x", r->u.trigger.ls); break; case ZFCP_REC_DBF_ID_ACTION: zfcp_dbf_out(&p, "erp_action", "0x%016Lx", r->u.action.action); @@ -600,19 +600,20 @@ void zfcp_dbf_rec_port(char *id, void *ref, struct zfcp_port *port) } /** - * zfcp_dbf_rec_unit - trace event for unit state change + * zfcp_dbf_rec_lun - trace event for LUN state change * @id: identifier for trigger of state change * @ref: additional reference (e.g. request) - * @unit: unit + * @sdev: SCSI device */ -void zfcp_dbf_rec_unit(char *id, void *ref, struct zfcp_unit *unit) +void zfcp_dbf_rec_lun(char *id, void *ref, struct scsi_device *sdev) { - struct zfcp_port *port = unit->port; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + struct zfcp_port *port = zfcp_sdev->port; struct zfcp_dbf *dbf = port->adapter->dbf; - zfcp_dbf_rec_target(id, ref, dbf, &unit->status, - &unit->erp_counter, port->wwpn, port->d_id, - unit->fcp_lun); + zfcp_dbf_rec_target(id, ref, dbf, &zfcp_sdev->status, + &zfcp_sdev->erp_counter, port->wwpn, port->d_id, + zfcp_scsi_dev_lun(sdev)); } /** @@ -624,11 +625,11 @@ void zfcp_dbf_rec_unit(char *id, void *ref, struct zfcp_unit *unit) * @action: address of error recovery action struct * @adapter: adapter * @port: port - * @unit: unit + * @sdev: SCSI device */ void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action, struct zfcp_adapter *adapter, struct zfcp_port *port, - struct zfcp_unit *unit) + struct scsi_device *sdev) { struct zfcp_dbf *dbf = adapter->dbf; struct zfcp_dbf_rec_record *r = &dbf->rec_buf; @@ -647,9 +648,10 @@ void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action, r->u.trigger.ps = atomic_read(&port->status); r->u.trigger.wwpn = port->wwpn; } - if (unit) - r->u.trigger.us = atomic_read(&unit->status); - r->u.trigger.fcp_lun = unit ? unit->fcp_lun : ZFCP_DBF_INVALID_LUN; + if (sdev) + r->u.trigger.ls = atomic_read(&sdev_to_zfcp(sdev)->status); + r->u.trigger.fcp_lun = sdev ? zfcp_scsi_dev_lun(sdev) : + ZFCP_DBF_INVALID_LUN; debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r)); spin_unlock_irqrestore(&dbf->rec_lock, flags); } diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index 2bcc3403126a..6a48c197b45d 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h @@ -60,7 +60,7 @@ struct zfcp_dbf_rec_record_trigger { u8 need; u32 as; u32 ps; - u32 us; + u32 ls; u64 ref; u64 action; u64 wwpn; @@ -350,16 +350,16 @@ void zfcp_dbf_scsi_abort(const char *tag, struct zfcp_dbf *dbf, /** * zfcp_dbf_scsi_devreset - trace event for Logical Unit or Target Reset * @tag: tag indicating success or failure of reset operation + * @scmnd: SCSI command which caused this error recovery * @flag: indicates type of reset (Target Reset, Logical Unit Reset) - * @unit: unit that needs reset - * @scsi_cmnd: SCSI command which caused this error recovery */ static inline -void zfcp_dbf_scsi_devreset(const char *tag, u8 flag, struct zfcp_unit *unit, - struct scsi_cmnd *scsi_cmnd) +void zfcp_dbf_scsi_devreset(const char *tag, struct scsi_cmnd *scmnd, u8 flag) { + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device); + zfcp_dbf_scsi(flag == FCP_TMF_TGT_RESET ? "trst" : "lrst", tag, 1, - unit->port->adapter->dbf, scsi_cmnd, NULL, 0); + zfcp_sdev->port->adapter->dbf, scmnd, NULL, 0); } #endif /* ZFCP_DBF_H */ diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index d31158c0de93..6dfae7091aa4 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -85,8 +85,8 @@ struct zfcp_reqlist; #define ZFCP_STATUS_PORT_LINK_TEST 0x00000002 /* logical unit status */ -#define ZFCP_STATUS_UNIT_SHARED 0x00000004 -#define ZFCP_STATUS_UNIT_READONLY 0x00000008 +#define ZFCP_STATUS_LUN_SHARED 0x00000004 +#define ZFCP_STATUS_LUN_READONLY 0x00000008 /* FSF request status (this does not have a common part) */ #define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002 @@ -118,7 +118,7 @@ struct zfcp_erp_action { int action; /* requested action code */ struct zfcp_adapter *adapter; /* device which should be recovered */ struct zfcp_port *port; - struct zfcp_unit *unit; + struct scsi_device *sdev; u32 status; /* recovery status */ u32 step; /* active step of this erp action */ unsigned long fsf_req_id; @@ -219,17 +219,23 @@ struct zfcp_port { unsigned int starget_id; }; +/** + * struct zfcp_unit - LUN configured via zfcp sysfs + * @dev: struct device for sysfs representation and reference counting + * @list: entry in LUN/unit list per zfcp_port + * @port: reference to zfcp_port where this LUN is configured + * @fcp_lun: 64 bit LUN value + * @scsi_work: for running scsi_scan_target + * + * This is the representation of a LUN that has been configured for + * usage. The main data here is the 64 bit LUN value, data for + * running I/O and recovery is in struct zfcp_scsi_dev. + */ struct zfcp_unit { - struct device dev; - struct list_head list; /* list of logical units */ - struct zfcp_port *port; /* remote port of unit */ - atomic_t status; /* status of this logical unit */ - u64 fcp_lun; /* own FCP_LUN */ - u32 handle; /* handle assigned by FSF */ - struct scsi_device *device; /* scsi device struct pointer */ - struct zfcp_erp_action erp_action; /* pending error recovery */ - atomic_t erp_counter; - struct zfcp_latencies latencies; + struct device dev; + struct list_head list; + struct zfcp_port *port; + u64 fcp_lun; struct work_struct scsi_work; }; @@ -288,7 +294,6 @@ static inline u64 zfcp_scsi_dev_lun(struct scsi_device *sdev) * @erp_action: reference to erp action if request issued on behalf of ERP * @pool: reference to memory pool if used for this request * @issued: time when request was send (STCK) - * @unit: reference to unit if this request is a SCSI request * @handler: handler which should be called to process response */ struct zfcp_fsf_req { @@ -306,7 +311,6 @@ struct zfcp_fsf_req { struct zfcp_erp_action *erp_action; mempool_t *pool; unsigned long long issued; - struct zfcp_unit *unit; void (*handler)(struct zfcp_fsf_req *); }; diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 50514b4c8732..734fc838931f 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -30,12 +30,12 @@ enum zfcp_erp_steps { ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010, ZFCP_ERP_STEP_PORT_CLOSING = 0x0100, ZFCP_ERP_STEP_PORT_OPENING = 0x0800, - ZFCP_ERP_STEP_UNIT_CLOSING = 0x1000, - ZFCP_ERP_STEP_UNIT_OPENING = 0x2000, + ZFCP_ERP_STEP_LUN_CLOSING = 0x1000, + ZFCP_ERP_STEP_LUN_OPENING = 0x2000, }; enum zfcp_erp_act_type { - ZFCP_ERP_ACTION_REOPEN_UNIT = 1, + ZFCP_ERP_ACTION_REOPEN_LUN = 1, ZFCP_ERP_ACTION_REOPEN_PORT = 2, ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3, ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4, @@ -89,24 +89,24 @@ static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act) zfcp_erp_action_ready(act); } -static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit) +static void zfcp_erp_action_dismiss_lun(struct scsi_device *sdev) { - if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_INUSE) - zfcp_erp_action_dismiss(&unit->erp_action); + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + + if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_INUSE) + zfcp_erp_action_dismiss(&zfcp_sdev->erp_action); } static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) { - struct zfcp_unit *unit; + struct scsi_device *sdev; if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) zfcp_erp_action_dismiss(&port->erp_action); - else { - read_lock(&port->unit_list_lock); - list_for_each_entry(unit, &port->unit_list, list) - zfcp_erp_action_dismiss_unit(unit); - read_unlock(&port->unit_list_lock); - } + else + shost_for_each_device(sdev, port->adapter->scsi_host) + if (sdev_to_zfcp(sdev)->port == port) + zfcp_erp_action_dismiss_lun(sdev); } static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) @@ -125,15 +125,17 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, struct zfcp_port *port, - struct zfcp_unit *unit) + struct scsi_device *sdev) { int need = want; - int u_status, p_status, a_status; + int l_status, p_status, a_status; + struct zfcp_scsi_dev *zfcp_sdev; switch (want) { - case ZFCP_ERP_ACTION_REOPEN_UNIT: - u_status = atomic_read(&unit->status); - if (u_status & ZFCP_STATUS_COMMON_ERP_INUSE) + case ZFCP_ERP_ACTION_REOPEN_LUN: + zfcp_sdev = sdev_to_zfcp(sdev); + l_status = atomic_read(&zfcp_sdev->status); + if (l_status & ZFCP_STATUS_COMMON_ERP_INUSE) return 0; p_status = atomic_read(&port->status); if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) || @@ -173,18 +175,22 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, struct zfcp_adapter *adapter, struct zfcp_port *port, - struct zfcp_unit *unit) + struct scsi_device *sdev) { struct zfcp_erp_action *erp_action; + struct zfcp_scsi_dev *zfcp_sdev; switch (need) { - case ZFCP_ERP_ACTION_REOPEN_UNIT: + case ZFCP_ERP_ACTION_REOPEN_LUN: + zfcp_sdev = sdev_to_zfcp(sdev); if (!(act_status & ZFCP_STATUS_ERP_NO_REF)) - if (!get_device(&unit->dev)) + if (scsi_device_get(sdev)) return NULL; - atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status); - erp_action = &unit->erp_action; - if (!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_RUNNING)) + atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, + &zfcp_sdev->status); + erp_action = &zfcp_sdev->erp_action; + if (!(atomic_read(&zfcp_sdev->status) & + ZFCP_STATUS_COMMON_RUNNING)) act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; break; @@ -216,7 +222,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, memset(erp_action, 0, sizeof(struct zfcp_erp_action)); erp_action->adapter = adapter; erp_action->port = port; - erp_action->unit = unit; + erp_action->sdev = sdev; erp_action->action = need; erp_action->status = act_status; @@ -225,8 +231,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, struct zfcp_port *port, - struct zfcp_unit *unit, char *id, void *ref, - u32 act_status) + struct scsi_device *sdev, + char *id, void *ref, u32 act_status) { int retval = 1, need; struct zfcp_erp_action *act = NULL; @@ -234,11 +240,11 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, if (!adapter->erp_thread) return -EIO; - need = zfcp_erp_required_act(want, adapter, port, unit); + need = zfcp_erp_required_act(want, adapter, port, sdev); if (!need) goto out; - act = zfcp_erp_setup_act(need, act_status, adapter, port, unit); + act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev); if (!act) goto out; atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); @@ -248,7 +254,7 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, zfcp_dbf_rec_thread("eracte1", adapter->dbf); retval = 0; out: - zfcp_dbf_rec_trigger(id, ref, want, need, act, adapter, port, unit); + zfcp_dbf_rec_trigger(id, ref, want, need, act, adapter, port, sdev); return retval; } @@ -392,77 +398,81 @@ int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, void *ref) return retval; } -static void zfcp_erp_unit_block(struct zfcp_unit *unit, int clear_mask) +static void zfcp_erp_lun_block(struct scsi_device *sdev, int clear_mask) { - zfcp_erp_modify_unit_status(unit, "erublk1", NULL, - ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask, - ZFCP_CLEAR); + zfcp_erp_modify_lun_status(sdev, "erlblk1", NULL, + ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask, + ZFCP_CLEAR); } -static void _zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id, - void *ref, u32 act_status) +static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id, + void *ref, u32 act_status) { - struct zfcp_adapter *adapter = unit->port->adapter; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; - zfcp_erp_unit_block(unit, clear); + zfcp_erp_lun_block(sdev, clear); - if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_FAILED) + if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) return; - zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_UNIT, - adapter, unit->port, unit, id, ref, act_status); + zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter, + zfcp_sdev->port, sdev, id, ref, act_status); } /** - * zfcp_erp_unit_reopen - initiate reopen of a unit - * @unit: unit to be reopened - * @clear_mask: specifies flags in unit status to be cleared + * zfcp_erp_lun_reopen - initiate reopen of a LUN + * @sdev: SCSI device / LUN to be reopened + * @clear_mask: specifies flags in LUN status to be cleared * Return: 0 on success, < 0 on error */ -void zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id, - void *ref) +void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id, + void *ref) { unsigned long flags; - struct zfcp_port *port = unit->port; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + struct zfcp_port *port = zfcp_sdev->port; struct zfcp_adapter *adapter = port->adapter; write_lock_irqsave(&adapter->erp_lock, flags); - _zfcp_erp_unit_reopen(unit, clear, id, ref, 0); + _zfcp_erp_lun_reopen(sdev, clear, id, ref, 0); write_unlock_irqrestore(&adapter->erp_lock, flags); } /** - * zfcp_erp_unit_shutdown - Shutdown unit - * @unit: Unit to shut down. + * zfcp_erp_lun_shutdown - Shutdown LUN + * @sdev: SCSI device / LUN to shut down. * @clear: Status flags to clear. * @id: Id for debug trace event. * @ref: Reference for debug trace event. */ -void zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear, char *id, - void *ref) +void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *id, + void *ref) { int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; - zfcp_erp_unit_reopen(unit, clear | flags, id, ref); + zfcp_erp_lun_reopen(sdev, clear | flags, id, ref); } /** - * zfcp_erp_unit_shutdown_wait - Shutdown unit and wait for erp completion - * @unit: Unit to shut down. + * zfcp_erp_lun_shutdown_wait - Shutdown LUN and wait for erp completion + * @sdev: SCSI device / LUN to shut down. * @id: Id for debug trace event. * - * Do not acquire a reference for the unit when creating the ERP + * Do not acquire a reference for the LUN when creating the ERP * action. It is safe, because this function waits for the ERP to - * complete first. + * complete first. This allows to shutdown the LUN, even when the SCSI + * device is in the state SDEV_DEL when scsi_device_get will fail. */ -void zfcp_erp_unit_shutdown_wait(struct zfcp_unit *unit, char *id) +void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id) { unsigned long flags; - struct zfcp_port *port = unit->port; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + struct zfcp_port *port = zfcp_sdev->port; struct zfcp_adapter *adapter = port->adapter; int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; write_lock_irqsave(&adapter->erp_lock, flags); - _zfcp_erp_unit_reopen(unit, clear, id, NULL, ZFCP_STATUS_ERP_NO_REF); + _zfcp_erp_lun_reopen(sdev, clear, id, NULL, ZFCP_STATUS_ERP_NO_REF); write_unlock_irqrestore(&adapter->erp_lock, flags); zfcp_erp_wait(adapter); @@ -492,11 +502,13 @@ static void zfcp_erp_port_unblock(struct zfcp_port *port) atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status); } -static void zfcp_erp_unit_unblock(struct zfcp_unit *unit) +static void zfcp_erp_lun_unblock(struct scsi_device *sdev) { - if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status)) - zfcp_dbf_rec_unit("eruubl1", NULL, unit); - atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status); + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + + if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status)) + zfcp_dbf_rec_lun("erlubl1", NULL, sdev); + atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status); } static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) @@ -584,15 +596,14 @@ static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, read_unlock(&adapter->port_list_lock); } -static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, - char *id, void *ref) +static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear, + char *id, void *ref) { - struct zfcp_unit *unit; + struct scsi_device *sdev; - read_lock(&port->unit_list_lock); - list_for_each_entry(unit, &port->unit_list, list) - _zfcp_erp_unit_reopen(unit, clear, id, ref, 0); - read_unlock(&port->unit_list_lock); + shost_for_each_device(sdev, port->adapter->scsi_host) + if (sdev_to_zfcp(sdev)->port == port) + _zfcp_erp_lun_reopen(sdev, clear, id, ref, 0); } static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) @@ -607,8 +618,8 @@ static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) case ZFCP_ERP_ACTION_REOPEN_PORT: _zfcp_erp_port_reopen(act->port, 0, "ersff_3", NULL); break; - case ZFCP_ERP_ACTION_REOPEN_UNIT: - _zfcp_erp_unit_reopen(act->unit, 0, "ersff_4", NULL, 0); + case ZFCP_ERP_ACTION_REOPEN_LUN: + _zfcp_erp_lun_reopen(act->sdev, 0, "ersff_4", NULL, 0); break; } } @@ -623,7 +634,7 @@ static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act) _zfcp_erp_port_reopen(act->port, 0, "ersfs_2", NULL); break; case ZFCP_ERP_ACTION_REOPEN_PORT: - _zfcp_erp_unit_reopen_all(act->port, 0, "ersfs_3", NULL); + _zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3", NULL); break; } } @@ -767,7 +778,7 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act) zfcp_fsf_req_dismiss_all(adapter); adapter->fsf_req_seq_no = 0; zfcp_fc_wka_ports_force_offline(adapter->gs); - /* all ports and units are closed */ + /* all ports and LUNs are closed */ zfcp_erp_modify_adapter_status(adapter, "erascl1", NULL, ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); @@ -958,82 +969,86 @@ close_init_done: return zfcp_erp_port_strategy_open_common(erp_action); } -static void zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit) +static void zfcp_erp_lun_strategy_clearstati(struct scsi_device *sdev) { + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | - ZFCP_STATUS_UNIT_SHARED | - ZFCP_STATUS_UNIT_READONLY, - &unit->status); + ZFCP_STATUS_LUN_SHARED | ZFCP_STATUS_LUN_READONLY, + &zfcp_sdev->status); } -static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *erp_action) +static int zfcp_erp_lun_strategy_close(struct zfcp_erp_action *erp_action) { - int retval = zfcp_fsf_close_unit(erp_action); + int retval = zfcp_fsf_close_lun(erp_action); if (retval == -ENOMEM) return ZFCP_ERP_NOMEM; - erp_action->step = ZFCP_ERP_STEP_UNIT_CLOSING; + erp_action->step = ZFCP_ERP_STEP_LUN_CLOSING; if (retval) return ZFCP_ERP_FAILED; return ZFCP_ERP_CONTINUES; } -static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *erp_action) +static int zfcp_erp_lun_strategy_open(struct zfcp_erp_action *erp_action) { - int retval = zfcp_fsf_open_unit(erp_action); + int retval = zfcp_fsf_open_lun(erp_action); if (retval == -ENOMEM) return ZFCP_ERP_NOMEM; - erp_action->step = ZFCP_ERP_STEP_UNIT_OPENING; + erp_action->step = ZFCP_ERP_STEP_LUN_OPENING; if (retval) return ZFCP_ERP_FAILED; return ZFCP_ERP_CONTINUES; } -static int zfcp_erp_unit_strategy(struct zfcp_erp_action *erp_action) +static int zfcp_erp_lun_strategy(struct zfcp_erp_action *erp_action) { - struct zfcp_unit *unit = erp_action->unit; + struct scsi_device *sdev = erp_action->sdev; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); switch (erp_action->step) { case ZFCP_ERP_STEP_UNINITIALIZED: - zfcp_erp_unit_strategy_clearstati(unit); - if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN) - return zfcp_erp_unit_strategy_close(erp_action); + zfcp_erp_lun_strategy_clearstati(sdev); + if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN) + return zfcp_erp_lun_strategy_close(erp_action); /* already closed, fall through */ - case ZFCP_ERP_STEP_UNIT_CLOSING: - if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN) + case ZFCP_ERP_STEP_LUN_CLOSING: + if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN) return ZFCP_ERP_FAILED; if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY) return ZFCP_ERP_EXIT; - return zfcp_erp_unit_strategy_open(erp_action); + return zfcp_erp_lun_strategy_open(erp_action); - case ZFCP_ERP_STEP_UNIT_OPENING: - if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN) + case ZFCP_ERP_STEP_LUN_OPENING: + if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN) return ZFCP_ERP_SUCCEEDED; } return ZFCP_ERP_FAILED; } -static int zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result) +static int zfcp_erp_strategy_check_lun(struct scsi_device *sdev, int result) { + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + switch (result) { case ZFCP_ERP_SUCCEEDED : - atomic_set(&unit->erp_counter, 0); - zfcp_erp_unit_unblock(unit); + atomic_set(&zfcp_sdev->erp_counter, 0); + zfcp_erp_lun_unblock(sdev); break; case ZFCP_ERP_FAILED : - atomic_inc(&unit->erp_counter); - if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS) { - dev_err(&unit->port->adapter->ccw_device->dev, - "ERP failed for unit 0x%016Lx on " + atomic_inc(&zfcp_sdev->erp_counter); + if (atomic_read(&zfcp_sdev->erp_counter) > ZFCP_MAX_ERPS) { + dev_err(&zfcp_sdev->port->adapter->ccw_device->dev, + "ERP failed for LUN 0x%016Lx on " "port 0x%016Lx\n", - (unsigned long long)unit->fcp_lun, - (unsigned long long)unit->port->wwpn); - zfcp_erp_unit_failed(unit, "erusck1", NULL); + (unsigned long long)zfcp_scsi_dev_lun(sdev), + (unsigned long long)zfcp_sdev->port->wwpn); + zfcp_erp_lun_failed(sdev, "ersckl1", NULL); } break; } - if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { - zfcp_erp_unit_block(unit, 0); + if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { + zfcp_erp_lun_block(sdev, 0); result = ZFCP_ERP_EXIT; } return result; @@ -1101,12 +1116,12 @@ static int zfcp_erp_strategy_check_target(struct zfcp_erp_action *erp_action, { struct zfcp_adapter *adapter = erp_action->adapter; struct zfcp_port *port = erp_action->port; - struct zfcp_unit *unit = erp_action->unit; + struct scsi_device *sdev = erp_action->sdev; switch (erp_action->action) { - case ZFCP_ERP_ACTION_REOPEN_UNIT: - result = zfcp_erp_strategy_check_unit(unit, result); + case ZFCP_ERP_ACTION_REOPEN_LUN: + result = zfcp_erp_strategy_check_lun(sdev, result); break; case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: @@ -1141,7 +1156,8 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret) int action = act->action; struct zfcp_adapter *adapter = act->adapter; struct zfcp_port *port = act->port; - struct zfcp_unit *unit = act->unit; + struct scsi_device *sdev = act->sdev; + struct zfcp_scsi_dev *zfcp_sdev; u32 erp_status = act->status; switch (action) { @@ -1164,11 +1180,12 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret) } break; - case ZFCP_ERP_ACTION_REOPEN_UNIT: - if (zfcp_erp_strat_change_det(&unit->status, erp_status)) { - _zfcp_erp_unit_reopen(unit, - ZFCP_STATUS_COMMON_ERP_FAILED, - "ersscg3", NULL, 0); + case ZFCP_ERP_ACTION_REOPEN_LUN: + zfcp_sdev = sdev_to_zfcp(sdev); + if (zfcp_erp_strat_change_det(&zfcp_sdev->status, erp_status)) { + _zfcp_erp_lun_reopen(sdev, + ZFCP_STATUS_COMMON_ERP_FAILED, + "ersscg3", NULL, 0); return ZFCP_ERP_EXIT; } break; @@ -1179,6 +1196,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret) static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action) { struct zfcp_adapter *adapter = erp_action->adapter; + struct zfcp_scsi_dev *zfcp_sdev; adapter->erp_total_count--; if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) { @@ -1190,9 +1208,10 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action) zfcp_dbf_rec_action("eractd1", erp_action); switch (erp_action->action) { - case ZFCP_ERP_ACTION_REOPEN_UNIT: + case ZFCP_ERP_ACTION_REOPEN_LUN: + zfcp_sdev = sdev_to_zfcp(erp_action->sdev); atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, - &erp_action->unit->status); + &zfcp_sdev->status); break; case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: @@ -1212,12 +1231,12 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) { struct zfcp_adapter *adapter = act->adapter; struct zfcp_port *port = act->port; - struct zfcp_unit *unit = act->unit; + struct scsi_device *sdev = act->sdev; switch (act->action) { - case ZFCP_ERP_ACTION_REOPEN_UNIT: + case ZFCP_ERP_ACTION_REOPEN_LUN: if (!(act->status & ZFCP_STATUS_ERP_NO_REF)) - put_device(&unit->dev); + scsi_device_put(sdev); break; case ZFCP_ERP_ACTION_REOPEN_PORT: @@ -1248,8 +1267,8 @@ static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action) return zfcp_erp_port_forced_strategy(erp_action); case ZFCP_ERP_ACTION_REOPEN_PORT: return zfcp_erp_port_strategy(erp_action); - case ZFCP_ERP_ACTION_REOPEN_UNIT: - return zfcp_erp_unit_strategy(erp_action); + case ZFCP_ERP_ACTION_REOPEN_LUN: + return zfcp_erp_lun_strategy(erp_action); } return ZFCP_ERP_FAILED; } @@ -1426,15 +1445,15 @@ void zfcp_erp_port_failed(struct zfcp_port *port, char *id, void *ref) } /** - * zfcp_erp_unit_failed - Set unit status to failed. - * @unit: Failed unit. + * zfcp_erp_lun_failed - Set LUN status to failed. + * @sdev: Failed SCSI device / LUN * @id: Event id for debug trace. * @ref: Reference for debug trace. */ -void zfcp_erp_unit_failed(struct zfcp_unit *unit, char *id, void *ref) +void zfcp_erp_lun_failed(struct scsi_device *sdev, char *id, void *ref) { - zfcp_erp_modify_unit_status(unit, id, ref, - ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); + zfcp_erp_modify_lun_status(sdev, id, ref, + ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); } /** @@ -1456,7 +1475,7 @@ void zfcp_erp_wait(struct zfcp_adapter *adapter) * @mask: status bits to change * @set_or_clear: ZFCP_SET or ZFCP_CLEAR * - * Changes in common status bits are propagated to attached ports and units. + * Changes in common status bits are propagated to attached ports and LUNs. */ void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id, void *ref, u32 mask, int set_or_clear) @@ -1494,13 +1513,12 @@ void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id, * @mask: status bits to change * @set_or_clear: ZFCP_SET or ZFCP_CLEAR * - * Changes in common status bits are propagated to attached units. + * Changes in common status bits are propagated to attached LUNs. */ void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref, u32 mask, int set_or_clear) { - struct zfcp_unit *unit; - unsigned long flags; + struct scsi_device *sdev; u32 common_mask = mask & ZFCP_COMMON_FLAGS; if (set_or_clear == ZFCP_SET) { @@ -1515,36 +1533,37 @@ void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref, atomic_set(&port->erp_counter, 0); } - if (common_mask) { - read_lock_irqsave(&port->unit_list_lock, flags); - list_for_each_entry(unit, &port->unit_list, list) - zfcp_erp_modify_unit_status(unit, id, ref, common_mask, - set_or_clear); - read_unlock_irqrestore(&port->unit_list_lock, flags); - } + if (common_mask) + shost_for_each_device(sdev, port->adapter->scsi_host) + if (sdev_to_zfcp(sdev)->port == port) + zfcp_erp_modify_lun_status(sdev, id, ref, + common_mask, + set_or_clear); } /** - * zfcp_erp_modify_unit_status - change unit status bits - * @unit: unit to change the status bits + * zfcp_erp_modify_lun_status - change LUN status bits + * @sdev: SCSI device / LUN where to change the status bits * @id: id for the debug trace * @ref: reference for the debug trace * @mask: status bits to change * @set_or_clear: ZFCP_SET or ZFCP_CLEAR */ -void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, char *id, void *ref, - u32 mask, int set_or_clear) +void zfcp_erp_modify_lun_status(struct scsi_device *sdev, char *id, void *ref, + u32 mask, int set_or_clear) { + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + if (set_or_clear == ZFCP_SET) { - if (status_change_set(mask, &unit->status)) - zfcp_dbf_rec_unit(id, ref, unit); - atomic_set_mask(mask, &unit->status); + if (status_change_set(mask, &zfcp_sdev->status)) + zfcp_dbf_rec_lun(id, ref, sdev); + atomic_set_mask(mask, &zfcp_sdev->status); } else { - if (status_change_clear(mask, &unit->status)) - zfcp_dbf_rec_unit(id, ref, unit); - atomic_clear_mask(mask, &unit->status); + if (status_change_clear(mask, &zfcp_sdev->status)) + zfcp_dbf_rec_lun(id, ref, sdev); + atomic_clear_mask(mask, &zfcp_sdev->status); if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) { - atomic_set(&unit->erp_counter, 0); + atomic_set(&zfcp_sdev->erp_counter, 0); } } } @@ -1563,16 +1582,16 @@ void zfcp_erp_port_boxed(struct zfcp_port *port, char *id, void *ref) } /** - * zfcp_erp_unit_boxed - Mark unit as "boxed" and start ERP - * @port: The "boxed" unit. + * zfcp_erp_lun_boxed - Mark LUN as "boxed" and start ERP + * @sdev: The "boxed" SCSI device / LUN. * @id: The debug trace id. - * @id: Reference for the debug trace. + * @ref: Reference for the debug trace. */ -void zfcp_erp_unit_boxed(struct zfcp_unit *unit, char *id, void *ref) +void zfcp_erp_lun_boxed(struct scsi_device *sdev, char *id, void *ref) { - zfcp_erp_modify_unit_status(unit, id, ref, - ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET); - zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); + zfcp_erp_modify_lun_status(sdev, id, ref, + ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET); + zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); } /** @@ -1582,7 +1601,7 @@ void zfcp_erp_unit_boxed(struct zfcp_unit *unit, char *id, void *ref) * @ref: reference for debug trace * * Since the adapter has denied access, stop using the port and the - * attached units. + * attached LUNs. */ void zfcp_erp_port_access_denied(struct zfcp_port *port, char *id, void *ref) { @@ -1592,44 +1611,44 @@ void zfcp_erp_port_access_denied(struct zfcp_port *port, char *id, void *ref) } /** - * zfcp_erp_unit_access_denied - Adapter denied access to unit. - * @unit: unit where access has been denied + * zfcp_erp_lun_access_denied - Adapter denied access to LUN. + * @sdev: SCSI device / LUN where access has been denied * @id: id for debug trace * @ref: reference for debug trace * - * Since the adapter has denied access, stop using the unit. + * Since the adapter has denied access, stop using the LUN. */ -void zfcp_erp_unit_access_denied(struct zfcp_unit *unit, char *id, void *ref) +void zfcp_erp_lun_access_denied(struct scsi_device *sdev, char *id, void *ref) { - zfcp_erp_modify_unit_status(unit, id, ref, - ZFCP_STATUS_COMMON_ERP_FAILED | - ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET); + zfcp_erp_modify_lun_status(sdev, id, ref, + ZFCP_STATUS_COMMON_ERP_FAILED | + ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET); } -static void zfcp_erp_unit_access_changed(struct zfcp_unit *unit, char *id, - void *ref) +static void zfcp_erp_lun_access_changed(struct scsi_device *sdev, char *id, + void *ref) { - int status = atomic_read(&unit->status); + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + int status = atomic_read(&zfcp_sdev->status); + if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED | ZFCP_STATUS_COMMON_ACCESS_BOXED))) return; - zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); + zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); } static void zfcp_erp_port_access_changed(struct zfcp_port *port, char *id, void *ref) { - struct zfcp_unit *unit; - unsigned long flags; + struct scsi_device *sdev; int status = atomic_read(&port->status); if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED | ZFCP_STATUS_COMMON_ACCESS_BOXED))) { - read_lock_irqsave(&port->unit_list_lock, flags); - list_for_each_entry(unit, &port->unit_list, list) - zfcp_erp_unit_access_changed(unit, id, ref); - read_unlock_irqrestore(&port->unit_list_lock, flags); + shost_for_each_device(sdev, port->adapter->scsi_host) + if (sdev_to_zfcp(sdev)->port == port) + zfcp_erp_lun_access_changed(sdev, id, ref); return; } diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 3c90604076e0..80714679f5d6 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -42,10 +42,10 @@ extern void zfcp_dbf_rec_thread(char *, struct zfcp_dbf *); extern void zfcp_dbf_rec_thread_lock(char *, struct zfcp_dbf *); extern void zfcp_dbf_rec_adapter(char *, void *, struct zfcp_dbf *); extern void zfcp_dbf_rec_port(char *, void *, struct zfcp_port *); -extern void zfcp_dbf_rec_unit(char *, void *, struct zfcp_unit *); +extern void zfcp_dbf_rec_lun(char *, void *, struct scsi_device *); extern void zfcp_dbf_rec_trigger(char *, void *, u8, u8, void *, struct zfcp_adapter *, struct zfcp_port *, - struct zfcp_unit *); + struct scsi_device *); extern void zfcp_dbf_rec_action(char *, struct zfcp_erp_action *); extern void _zfcp_dbf_hba_fsf_response(const char *, int, struct zfcp_fsf_req *, struct zfcp_dbf *); @@ -76,20 +76,20 @@ extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *, void *); extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *, void *); extern void zfcp_erp_port_failed(struct zfcp_port *, char *, void *); -extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, char *, void *, u32, - int); -extern void zfcp_erp_unit_reopen(struct zfcp_unit *, int, char *, void *); -extern void zfcp_erp_unit_shutdown(struct zfcp_unit *, int, char *, void *); -extern void zfcp_erp_unit_shutdown_wait(struct zfcp_unit *, char *); -extern void zfcp_erp_unit_failed(struct zfcp_unit *, char *, void *); +extern void zfcp_erp_modify_lun_status(struct scsi_device *, char *, void *, + u32, int); +extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *, void *); +extern void zfcp_erp_lun_shutdown(struct scsi_device *, int, char *, void *); +extern void zfcp_erp_lun_shutdown_wait(struct scsi_device *, char *); +extern void zfcp_erp_lun_failed(struct scsi_device *, char *, void *); extern int zfcp_erp_thread_setup(struct zfcp_adapter *); extern void zfcp_erp_thread_kill(struct zfcp_adapter *); extern void zfcp_erp_wait(struct zfcp_adapter *); extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long); extern void zfcp_erp_port_boxed(struct zfcp_port *, char *, void *); -extern void zfcp_erp_unit_boxed(struct zfcp_unit *, char *, void *); +extern void zfcp_erp_lun_boxed(struct scsi_device *, char *, void *); extern void zfcp_erp_port_access_denied(struct zfcp_port *, char *, void *); -extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, char *, void *); +extern void zfcp_erp_lun_access_denied(struct scsi_device *, char *, void *); extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *, void *); extern void zfcp_erp_timeout_handler(unsigned long); @@ -117,8 +117,8 @@ extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *); extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *); extern int zfcp_fsf_close_port(struct zfcp_erp_action *); extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *); -extern int zfcp_fsf_open_unit(struct zfcp_erp_action *); -extern int zfcp_fsf_close_unit(struct zfcp_erp_action *); +extern int zfcp_fsf_open_lun(struct zfcp_erp_action *); +extern int zfcp_fsf_close_lun(struct zfcp_erp_action *); extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *); extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *, struct fsf_qtcb_bottom_config *); @@ -134,12 +134,10 @@ extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *, mempool_t *, unsigned int); extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32, struct zfcp_fsf_ct_els *, unsigned int); -extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *, - struct scsi_cmnd *); +extern int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *); extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); -extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *, u8); -extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long, - struct zfcp_unit *); +extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *, u8); +extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *); extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int); /* zfcp_qdio.c */ diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 9d1d7d1842ce..2fbd80257bca 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -86,17 +86,19 @@ static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req, req->status |= ZFCP_STATUS_FSFREQ_ERROR; } -static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req, - struct zfcp_unit *unit) +static void zfcp_fsf_access_denied_lun(struct zfcp_fsf_req *req, + struct scsi_device *sdev) { + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + struct fsf_qtcb_header *header = &req->qtcb->header; dev_warn(&req->adapter->ccw_device->dev, - "Access denied to unit 0x%016Lx on port 0x%016Lx\n", - (unsigned long long)unit->fcp_lun, - (unsigned long long)unit->port->wwpn); + "Access denied to LUN 0x%016Lx on port 0x%016Lx\n", + (unsigned long long)zfcp_scsi_dev_lun(sdev), + (unsigned long long)zfcp_sdev->port->wwpn); zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]); zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]); - zfcp_erp_unit_access_denied(unit, "fsuad_1", req); + zfcp_erp_lun_access_denied(sdev, "fsadl_1", req); req->status |= ZFCP_STATUS_FSFREQ_ERROR; } @@ -811,7 +813,8 @@ out: static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) { - struct zfcp_unit *unit = req->data; + struct scsi_device *sdev = req->data; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual; if (req->status & ZFCP_STATUS_FSFREQ_ERROR) @@ -820,14 +823,15 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) switch (req->qtcb->header.fsf_status) { case FSF_PORT_HANDLE_NOT_VALID: if (fsq->word[0] == fsq->word[1]) { - zfcp_erp_adapter_reopen(unit->port->adapter, 0, + zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fsafch1", req); req->status |= ZFCP_STATUS_FSFREQ_ERROR; } break; case FSF_LUN_HANDLE_NOT_VALID: if (fsq->word[0] == fsq->word[1]) { - zfcp_erp_port_reopen(unit->port, 0, "fsafch2", req); + zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2", + req); req->status |= ZFCP_STATUS_FSFREQ_ERROR; } break; @@ -835,17 +839,17 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED; break; case FSF_PORT_BOXED: - zfcp_erp_port_boxed(unit->port, "fsafch3", req); + zfcp_erp_port_boxed(zfcp_sdev->port, "fsafch3", req); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_LUN_BOXED: - zfcp_erp_unit_boxed(unit, "fsafch4", req); + zfcp_erp_lun_boxed(sdev, "fsafch4", req); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_ADAPTER_STATUS_AVAILABLE: switch (fsq->word[0]) { case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: - zfcp_fc_test_link(unit->port); + zfcp_fc_test_link(zfcp_sdev->port); /* fall through */ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: req->status |= ZFCP_STATUS_FSFREQ_ERROR; @@ -859,17 +863,18 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) } /** - * zfcp_fsf_abort_fcp_command - abort running SCSI command - * @old_req_id: unsigned long - * @unit: pointer to struct zfcp_unit + * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command + * @scmnd: The SCSI command to abort * Returns: pointer to struct zfcp_fsf_req */ -struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, - struct zfcp_unit *unit) +struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd) { struct zfcp_fsf_req *req = NULL; - struct zfcp_qdio *qdio = unit->port->adapter->qdio; + struct scsi_device *sdev = scmnd->device; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio; + unsigned long old_req_id = (unsigned long) scmnd->host_scribble; spin_lock_bh(&qdio->req_q_lock); if (zfcp_qdio_sbal_get(qdio)) @@ -882,16 +887,16 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, goto out; } - if (unlikely(!(atomic_read(&unit->status) & + if (unlikely(!(atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_UNBLOCKED))) goto out_error_free; zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); - req->data = unit; + req->data = zfcp_sdev; req->handler = zfcp_fsf_abort_fcp_command_handler; - req->qtcb->header.lun_handle = unit->handle; - req->qtcb->header.port_handle = unit->port->handle; + req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; + req->qtcb->header.port_handle = zfcp_sdev->port->handle; req->qtcb->bottom.support.req_handle = (u64) old_req_id; zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); @@ -1666,7 +1671,7 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) { struct zfcp_port *port = req->data; struct fsf_qtcb_header *header = &req->qtcb->header; - struct zfcp_unit *unit; + struct scsi_device *sdev; if (req->status & ZFCP_STATUS_FSFREQ_ERROR) return; @@ -1683,11 +1688,10 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) /* can't use generic zfcp_erp_modify_port_status because * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); - read_lock(&port->unit_list_lock); - list_for_each_entry(unit, &port->unit_list, list) - atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, - &unit->status); - read_unlock(&port->unit_list_lock); + shost_for_each_device(sdev, port->adapter->scsi_host) + if (sdev_to_zfcp(sdev)->port == port) + atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, + &sdev_to_zfcp(sdev)->status); zfcp_erp_port_boxed(port, "fscpph2", req); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; @@ -1705,11 +1709,10 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); - read_lock(&port->unit_list_lock); - list_for_each_entry(unit, &port->unit_list, list) - atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, - &unit->status); - read_unlock(&port->unit_list_lock); + shost_for_each_device(sdev, port->adapter->scsi_host) + if (sdev_to_zfcp(sdev)->port == port) + atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, + &sdev_to_zfcp(sdev)->status); break; } } @@ -1758,10 +1761,11 @@ out: return retval; } -static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req) +static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) { struct zfcp_adapter *adapter = req->adapter; - struct zfcp_unit *unit = req->data; + struct scsi_device *sdev = req->data; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); struct fsf_qtcb_header *header = &req->qtcb->header; struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support; struct fsf_queue_designator *queue_designator = @@ -1773,24 +1777,24 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req) atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | ZFCP_STATUS_COMMON_ACCESS_BOXED | - ZFCP_STATUS_UNIT_SHARED | - ZFCP_STATUS_UNIT_READONLY, - &unit->status); + ZFCP_STATUS_LUN_SHARED | + ZFCP_STATUS_LUN_READONLY, + &zfcp_sdev->status); switch (header->fsf_status) { case FSF_PORT_HANDLE_NOT_VALID: - zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fsouh_1", req); + zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1", req); /* fall through */ case FSF_LUN_ALREADY_OPEN: break; case FSF_ACCESS_DENIED: - zfcp_fsf_access_denied_unit(req, unit); - atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status); - atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status); + zfcp_fsf_access_denied_lun(req, sdev); + atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status); + atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status); break; case FSF_PORT_BOXED: - zfcp_erp_port_boxed(unit->port, "fsouh_2", req); + zfcp_erp_port_boxed(zfcp_sdev->port, "fsouh_2", req); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_LUN_SHARING_VIOLATION: @@ -1798,25 +1802,25 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req) dev_warn(&adapter->ccw_device->dev, "LUN 0x%Lx on port 0x%Lx is already in " "use by CSS%d, MIF Image ID %x\n", - (unsigned long long)unit->fcp_lun, - (unsigned long long)unit->port->wwpn, + (unsigned long long)zfcp_scsi_dev_lun(sdev), + (unsigned long long)zfcp_sdev->port->wwpn, queue_designator->cssid, queue_designator->hla); else zfcp_act_eval_err(adapter, header->fsf_status_qual.word[2]); - zfcp_erp_unit_access_denied(unit, "fsouh_3", req); - atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status); - atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status); + zfcp_erp_lun_access_denied(sdev, "fsolh_3", req); + atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status); + atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED: dev_warn(&adapter->ccw_device->dev, "No handle is available for LUN " "0x%016Lx on port 0x%016Lx\n", - (unsigned long long)unit->fcp_lun, - (unsigned long long)unit->port->wwpn); - zfcp_erp_unit_failed(unit, "fsouh_4", req); + (unsigned long long)zfcp_scsi_dev_lun(sdev), + (unsigned long long)zfcp_sdev->port->wwpn); + zfcp_erp_lun_failed(sdev, "fsolh_4", req); /* fall through */ case FSF_INVALID_COMMAND_OPTION: req->status |= ZFCP_STATUS_FSFREQ_ERROR; @@ -1824,7 +1828,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req) case FSF_ADAPTER_STATUS_AVAILABLE: switch (header->fsf_status_qual.word[0]) { case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: - zfcp_fc_test_link(unit->port); + zfcp_fc_test_link(zfcp_sdev->port); /* fall through */ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: req->status |= ZFCP_STATUS_FSFREQ_ERROR; @@ -1833,8 +1837,8 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req) break; case FSF_GOOD: - unit->handle = header->lun_handle; - atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); + zfcp_sdev->lun_handle = header->lun_handle; + atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) && (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) && @@ -1845,39 +1849,39 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req) FSF_UNIT_ACCESS_OUTBOUND_TRANSFER); if (!exclusive) - atomic_set_mask(ZFCP_STATUS_UNIT_SHARED, - &unit->status); + atomic_set_mask(ZFCP_STATUS_LUN_SHARED, + &zfcp_sdev->status); if (!readwrite) { - atomic_set_mask(ZFCP_STATUS_UNIT_READONLY, - &unit->status); + atomic_set_mask(ZFCP_STATUS_LUN_READONLY, + &zfcp_sdev->status); dev_info(&adapter->ccw_device->dev, "SCSI device at LUN 0x%016Lx on port " "0x%016Lx opened read-only\n", - (unsigned long long)unit->fcp_lun, - (unsigned long long)unit->port->wwpn); + (unsigned long long)zfcp_scsi_dev_lun(sdev), + (unsigned long long)zfcp_sdev->port->wwpn); } if (exclusive && !readwrite) { dev_err(&adapter->ccw_device->dev, "Exclusive read-only access not " - "supported (unit 0x%016Lx, " + "supported (LUN 0x%016Lx, " "port 0x%016Lx)\n", - (unsigned long long)unit->fcp_lun, - (unsigned long long)unit->port->wwpn); - zfcp_erp_unit_failed(unit, "fsouh_5", req); + (unsigned long long)zfcp_scsi_dev_lun(sdev), + (unsigned long long)zfcp_sdev->port->wwpn); + zfcp_erp_lun_failed(sdev, "fsolh_5", req); req->status |= ZFCP_STATUS_FSFREQ_ERROR; - zfcp_erp_unit_shutdown(unit, 0, "fsouh_6", req); + zfcp_erp_lun_shutdown(sdev, 0, "fsolh_6", req); } else if (!exclusive && readwrite) { dev_err(&adapter->ccw_device->dev, "Shared read-write access not " - "supported (unit 0x%016Lx, port " + "supported (LUN 0x%016Lx, port " "0x%016Lx)\n", - (unsigned long long)unit->fcp_lun, - (unsigned long long)unit->port->wwpn); - zfcp_erp_unit_failed(unit, "fsouh_7", req); + (unsigned long long)zfcp_scsi_dev_lun(sdev), + (unsigned long long)zfcp_sdev->port->wwpn); + zfcp_erp_lun_failed(sdev, "fsolh_7", req); req->status |= ZFCP_STATUS_FSFREQ_ERROR; - zfcp_erp_unit_shutdown(unit, 0, "fsouh_8", req); + zfcp_erp_lun_shutdown(sdev, 0, "fsolh_8", req); } } break; @@ -1885,11 +1889,11 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req) } /** - * zfcp_fsf_open_unit - open unit + * zfcp_fsf_open_lun - open LUN * @erp_action: pointer to struct zfcp_erp_action * Returns: 0 on success, error otherwise */ -int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) +int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action) { struct zfcp_adapter *adapter = erp_action->adapter; struct zfcp_qdio *qdio = adapter->qdio; @@ -1913,9 +1917,9 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); req->qtcb->header.port_handle = erp_action->port->handle; - req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun; - req->handler = zfcp_fsf_open_unit_handler; - req->data = erp_action->unit; + req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev); + req->handler = zfcp_fsf_open_lun_handler; + req->data = erp_action->sdev; req->erp_action = erp_action; erp_action->fsf_req_id = req->req_id; @@ -1933,30 +1937,32 @@ out: return retval; } -static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req) +static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req) { - struct zfcp_unit *unit = req->data; + struct scsi_device *sdev = req->data; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); if (req->status & ZFCP_STATUS_FSFREQ_ERROR) return; switch (req->qtcb->header.fsf_status) { case FSF_PORT_HANDLE_NOT_VALID: - zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fscuh_1", req); + zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1", + req); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_LUN_HANDLE_NOT_VALID: - zfcp_erp_port_reopen(unit->port, 0, "fscuh_2", req); + zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2", req); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_PORT_BOXED: - zfcp_erp_port_boxed(unit->port, "fscuh_3", req); + zfcp_erp_port_boxed(zfcp_sdev->port, "fscuh_3", req); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_ADAPTER_STATUS_AVAILABLE: switch (req->qtcb->header.fsf_status_qual.word[0]) { case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: - zfcp_fc_test_link(unit->port); + zfcp_fc_test_link(zfcp_sdev->port); /* fall through */ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: req->status |= ZFCP_STATUS_FSFREQ_ERROR; @@ -1964,19 +1970,20 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req) } break; case FSF_GOOD: - atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); + atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); break; } } /** - * zfcp_fsf_close_unit - close zfcp unit - * @erp_action: pointer to struct zfcp_unit + * zfcp_fsf_close_LUN - close LUN + * @erp_action: pointer to erp_action triggering the "close LUN" * Returns: 0 on success, error otherwise */ -int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) +int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action) { struct zfcp_qdio *qdio = erp_action->adapter->qdio; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev); struct zfcp_fsf_req *req; int retval = -EIO; @@ -1997,9 +2004,9 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); req->qtcb->header.port_handle = erp_action->port->handle; - req->qtcb->header.lun_handle = erp_action->unit->handle; - req->handler = zfcp_fsf_close_unit_handler; - req->data = erp_action->unit; + req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; + req->handler = zfcp_fsf_close_lun_handler; + req->data = erp_action->sdev; req->erp_action = erp_action; erp_action->fsf_req_id = req->req_id; @@ -2025,7 +2032,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) { struct fsf_qual_latency_info *lat_in; struct latency_cont *lat = NULL; - struct zfcp_unit *unit = req->unit; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scsi->device); struct zfcp_blk_drv_data blktrc; int ticks = req->adapter->timer_ticks; @@ -2048,24 +2055,24 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) case FSF_DATADIR_DIF_READ_STRIP: case FSF_DATADIR_DIF_READ_CONVERT: case FSF_DATADIR_READ: - lat = &unit->latencies.read; + lat = &zfcp_sdev->latencies.read; break; case FSF_DATADIR_DIF_WRITE_INSERT: case FSF_DATADIR_DIF_WRITE_CONVERT: case FSF_DATADIR_WRITE: - lat = &unit->latencies.write; + lat = &zfcp_sdev->latencies.write; break; case FSF_DATADIR_CMND: - lat = &unit->latencies.cmd; + lat = &zfcp_sdev->latencies.cmd; break; } if (lat) { - spin_lock(&unit->latencies.lock); + spin_lock(&zfcp_sdev->latencies.lock); zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat); zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat); lat->counter++; - spin_unlock(&unit->latencies.lock); + spin_unlock(&zfcp_sdev->latencies.lock); } } @@ -2141,68 +2148,66 @@ static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req) static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req) { - struct zfcp_unit *unit; + struct scsi_cmnd *scmnd = req->data; + struct scsi_device *sdev = scmnd->device; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); struct fsf_qtcb_header *header = &req->qtcb->header; - if (unlikely(req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)) - unit = req->data; - else - unit = req->unit; - if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) goto skip_fsfstatus; switch (header->fsf_status) { case FSF_HANDLE_MISMATCH: case FSF_PORT_HANDLE_NOT_VALID: - zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fssfch1", req); + zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1", + req); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_FCPLUN_NOT_VALID: case FSF_LUN_HANDLE_NOT_VALID: - zfcp_erp_port_reopen(unit->port, 0, "fssfch2", req); + zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2", req); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_SERVICE_CLASS_NOT_SUPPORTED: zfcp_fsf_class_not_supp(req); break; case FSF_ACCESS_DENIED: - zfcp_fsf_access_denied_unit(req, unit); + zfcp_fsf_access_denied_lun(req, sdev); break; case FSF_DIRECTION_INDICATOR_NOT_VALID: dev_err(&req->adapter->ccw_device->dev, - "Incorrect direction %d, unit 0x%016Lx on port " + "Incorrect direction %d, LUN 0x%016Lx on port " "0x%016Lx closed\n", req->qtcb->bottom.io.data_direction, - (unsigned long long)unit->fcp_lun, - (unsigned long long)unit->port->wwpn); - zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch3", - req); + (unsigned long long)zfcp_scsi_dev_lun(sdev), + (unsigned long long)zfcp_sdev->port->wwpn); + zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0, + "fssfch3", req); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_CMND_LENGTH_NOT_VALID: dev_err(&req->adapter->ccw_device->dev, - "Incorrect CDB length %d, unit 0x%016Lx on " + "Incorrect CDB length %d, LUN 0x%016Lx on " "port 0x%016Lx closed\n", req->qtcb->bottom.io.fcp_cmnd_length, - (unsigned long long)unit->fcp_lun, - (unsigned long long)unit->port->wwpn); - zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch4", - req); + (unsigned long long)zfcp_scsi_dev_lun(sdev), + (unsigned long long)zfcp_sdev->port->wwpn); + zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0, + "fssfch4", req); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_PORT_BOXED: - zfcp_erp_port_boxed(unit->port, "fssfch5", req); + zfcp_erp_port_boxed(zfcp_sdev->port, "fssfch5", req); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_LUN_BOXED: - zfcp_erp_unit_boxed(unit, "fssfch6", req); + zfcp_erp_lun_boxed(sdev, "fssfch6", req); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_ADAPTER_STATUS_AVAILABLE: if (header->fsf_status_qual.word[0] == FSF_SQ_INVOKE_LINK_TEST_PROCEDURE) - zfcp_fc_test_link(unit->port); + zfcp_fc_test_link(zfcp_sdev->port); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; } @@ -2211,8 +2216,6 @@ skip_fsfstatus: zfcp_fsf_send_fcp_ctm_handler(req); else { zfcp_fsf_send_fcp_command_task_handler(req); - req->unit = NULL; - put_device(&unit->dev); } } @@ -2255,22 +2258,22 @@ static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir) } /** - * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command) - * @unit: unit where command is sent to + * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command) * @scsi_cmnd: scsi command to be sent */ -int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, - struct scsi_cmnd *scsi_cmnd) +int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd) { struct zfcp_fsf_req *req; struct fcp_cmnd *fcp_cmnd; unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; int real_bytes, retval = -EIO, dix_bytes = 0; - struct zfcp_adapter *adapter = unit->port->adapter; + struct scsi_device *sdev = scsi_cmnd->device; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; struct zfcp_qdio *qdio = adapter->qdio; struct fsf_qtcb_bottom_io *io; - if (unlikely(!(atomic_read(&unit->status) & + if (unlikely(!(atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_UNBLOCKED))) return -EBUSY; @@ -2295,11 +2298,10 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, io = &req->qtcb->bottom.io; req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - req->unit = unit; req->data = scsi_cmnd; req->handler = zfcp_fsf_send_fcp_command_handler; - req->qtcb->header.lun_handle = unit->handle; - req->qtcb->header.port_handle = unit->port->handle; + req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; + req->qtcb->header.port_handle = zfcp_sdev->port->handle; io->service_class = FSF_CLASS_3; io->fcp_cmnd_length = FCP_CMND_LEN; @@ -2310,8 +2312,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction); - get_device(&unit->dev); - fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd); @@ -2338,7 +2338,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, goto out; failed_scsi_cmnd: - put_device(&unit->dev); zfcp_fsf_req_free(req); scsi_cmnd->host_scribble = NULL; out: @@ -2347,18 +2346,20 @@ out: } /** - * zfcp_fsf_send_fcp_ctm - send SCSI task management command - * @unit: pointer to struct zfcp_unit + * zfcp_fsf_fcp_task_mgmt - send SCSI task management command + * @scmnd: SCSI command to send the task management command for * @tm_flags: unsigned byte for task management flags * Returns: on success pointer to struct fsf_req, NULL otherwise */ -struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) +struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd, + u8 tm_flags) { struct zfcp_fsf_req *req = NULL; struct fcp_cmnd *fcp_cmnd; - struct zfcp_qdio *qdio = unit->port->adapter->qdio; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device); + struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio; - if (unlikely(!(atomic_read(&unit->status) & + if (unlikely(!(atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_UNBLOCKED))) return NULL; @@ -2376,10 +2377,10 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) } req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT; - req->data = unit; + req->data = scmnd; req->handler = zfcp_fsf_send_fcp_command_handler; - req->qtcb->header.lun_handle = unit->handle; - req->qtcb->header.port_handle = unit->port->handle; + req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; + req->qtcb->header.port_handle = zfcp_sdev->port->handle; req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; req->qtcb->bottom.io.service_class = FSF_CLASS_3; req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN; @@ -2387,7 +2388,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; - zfcp_fc_fcp_tm(fcp_cmnd, unit->device, tm_flags); + zfcp_fc_fcp_tm(fcp_cmnd, scmnd->device, tm_flags); zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); if (!zfcp_fsf_req_send(req)) diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 03837797c45e..bc7217b88989 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -49,11 +49,12 @@ static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth, return sdev->queue_depth; } -static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) +static void zfcp_scsi_slave_destroy(struct scsi_device *sdev) { - struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; - unit->device = NULL; - put_device(&unit->dev); + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + + zfcp_erp_lun_shutdown_wait(sdev, "scssd_1"); + put_device(&zfcp_sdev->port->dev); } static int zfcp_scsi_slave_configure(struct scsi_device *sdp) @@ -78,23 +79,16 @@ static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result) static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, void (*done) (struct scsi_cmnd *)) { - struct zfcp_unit *unit; - struct zfcp_adapter *adapter; - int status, scsi_result, ret; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); + struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device)); + int status, scsi_result, ret; /* reset the status for this request */ scpnt->result = 0; scpnt->host_scribble = NULL; scpnt->scsi_done = done; - /* - * figure out adapter and target device - * (stored there by zfcp_scsi_slave_alloc) - */ - adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0]; - unit = scpnt->device->hostdata; - scsi_result = fc_remote_port_chkready(rport); if (unlikely(scsi_result)) { scpnt->result = scsi_result; @@ -103,11 +97,11 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, return 0; } - status = atomic_read(&unit->status); + status = atomic_read(&zfcp_sdev->status); if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) && - !(atomic_read(&unit->port->status) & + !(atomic_read(&zfcp_sdev->port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)) { - /* only unit access denied, but port is good + /* only LUN access denied, but port is good * not covered by FC transport, have to fail here */ zfcp_scsi_command_fail(scpnt, DID_ERROR); return 0; @@ -115,8 +109,8 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) { /* This could be either - * open unit pending: this is temporary, will result in - * open unit or ERP_FAILED, so retry command + * open LUN pending: this is temporary, will result in + * open LUN or ERP_FAILED, so retry command * call to rport_delete pending: mimic retry from * fc_remote_port_chkready until rport is BLOCKED */ @@ -124,7 +118,7 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, return 0; } - ret = zfcp_fsf_send_fcp_command_task(unit, scpnt); + ret = zfcp_fsf_fcp_cmnd(scpnt); if (unlikely(ret == -EBUSY)) return SCSI_MLQUEUE_DEVICE_BUSY; else if (unlikely(ret < 0)) @@ -133,45 +127,42 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, return ret; } -static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter, - unsigned int id, u64 lun) +static int zfcp_scsi_slave_alloc(struct scsi_device *sdev) { - unsigned long flags; + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + struct zfcp_adapter *adapter = + (struct zfcp_adapter *) sdev->host->hostdata[0]; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); struct zfcp_port *port; - struct zfcp_unit *unit = NULL; + struct zfcp_unit *unit; - read_lock_irqsave(&adapter->port_list_lock, flags); - list_for_each_entry(port, &adapter->port_list, list) { - if (!port->rport || (id != port->rport->scsi_target_id)) - continue; - unit = zfcp_unit_find(port, lun); - if (unit) - break; - } - read_unlock_irqrestore(&adapter->port_list_lock, flags); + port = zfcp_get_port_by_wwpn(adapter, rport->port_name); + if (!port) + return -ENXIO; - return unit; -} + unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev)); + if (unit) + put_device(&unit->dev); + else { + put_device(&port->dev); + return -ENXIO; + } -static int zfcp_scsi_slave_alloc(struct scsi_device *sdp) -{ - struct zfcp_adapter *adapter; - struct zfcp_unit *unit; - u64 lun; + zfcp_sdev->port = port; + zfcp_sdev->latencies.write.channel.min = 0xFFFFFFFF; + zfcp_sdev->latencies.write.fabric.min = 0xFFFFFFFF; + zfcp_sdev->latencies.read.channel.min = 0xFFFFFFFF; + zfcp_sdev->latencies.read.fabric.min = 0xFFFFFFFF; + zfcp_sdev->latencies.cmd.channel.min = 0xFFFFFFFF; + zfcp_sdev->latencies.cmd.fabric.min = 0xFFFFFFFF; + spin_lock_init(&zfcp_sdev->latencies.lock); - adapter = (struct zfcp_adapter *) sdp->host->hostdata[0]; - if (!adapter) - goto out; + zfcp_erp_modify_lun_status(sdev, "scsla_0", NULL, + ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); + zfcp_erp_lun_reopen(sdev, 0, "scsla_1", NULL); + zfcp_erp_wait(port->adapter); - int_to_scsilun(sdp->lun, (struct scsi_lun *)&lun); - unit = zfcp_unit_lookup(adapter, sdp->id, lun); - if (unit) { - sdp->hostdata = unit; - unit->device = sdp; - return 0; - } -out: - return -ENXIO; + return 0; } static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) @@ -179,7 +170,6 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) struct Scsi_Host *scsi_host = scpnt->device->host; struct zfcp_adapter *adapter = (struct zfcp_adapter *) scsi_host->hostdata[0]; - struct zfcp_unit *unit = scpnt->device->hostdata; struct zfcp_fsf_req *old_req, *abrt_req; unsigned long flags; unsigned long old_reqid = (unsigned long) scpnt->host_scribble; @@ -203,7 +193,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) write_unlock_irqrestore(&adapter->abort_lock, flags); while (retry--) { - abrt_req = zfcp_fsf_abort_fcp_command(old_reqid, unit); + abrt_req = zfcp_fsf_abort_fcp_cmnd(scpnt); if (abrt_req) break; @@ -238,14 +228,14 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) { - struct zfcp_unit *unit = scpnt->device->hostdata; - struct zfcp_adapter *adapter = unit->port->adapter; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); + struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; struct zfcp_fsf_req *fsf_req = NULL; int retval = SUCCESS, ret; int retry = 3; while (retry--) { - fsf_req = zfcp_fsf_send_fcp_ctm(unit, tm_flags); + fsf_req = zfcp_fsf_fcp_task_mgmt(scpnt, tm_flags); if (fsf_req) break; @@ -256,7 +246,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_RUNNING)) { - zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt); + zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags); return SUCCESS; } } @@ -266,10 +256,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) wait_for_completion(&fsf_req->completion); if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { - zfcp_dbf_scsi_devreset("fail", tm_flags, unit, scpnt); + zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags); retval = FAILED; } else - zfcp_dbf_scsi_devreset("okay", tm_flags, unit, scpnt); + zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags); zfcp_fsf_req_free(fsf_req); return retval; @@ -287,8 +277,8 @@ static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt) static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) { - struct zfcp_unit *unit = scpnt->device->hostdata; - struct zfcp_adapter *adapter = unit->port->adapter; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); + struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; int ret; zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt); diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 6b43bc46bf97..4f59356b07bb 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -68,19 +68,19 @@ ZFCP_DEFINE_ATTR(zfcp_port, port, access_denied, "%d\n", ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n", - atomic_read(&unit->status)); + zfcp_unit_sdev_status(unit)); ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n", - (atomic_read(&unit->status) & + (zfcp_unit_sdev_status(unit) & ZFCP_STATUS_COMMON_ERP_INUSE) != 0); ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n", - (atomic_read(&unit->status) & + (zfcp_unit_sdev_status(unit) & ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_shared, "%d\n", - (atomic_read(&unit->status) & - ZFCP_STATUS_UNIT_SHARED) != 0); + (zfcp_unit_sdev_status(unit) & + ZFCP_STATUS_LUN_SHARED) != 0); ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_readonly, "%d\n", - (atomic_read(&unit->status) & - ZFCP_STATUS_UNIT_READONLY) != 0); + (zfcp_unit_sdev_status(unit) & + ZFCP_STATUS_LUN_READONLY) != 0); static ssize_t zfcp_sysfs_port_failed_show(struct device *dev, struct device_attribute *attr, @@ -121,11 +121,17 @@ static ssize_t zfcp_sysfs_unit_failed_show(struct device *dev, char *buf) { struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev); + struct scsi_device *sdev; + unsigned int status, failed = 1; + + sdev = zfcp_unit_sdev(unit); + if (sdev) { + status = atomic_read(&sdev_to_zfcp(sdev)->status); + failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0; + scsi_device_put(sdev); + } - if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_FAILED) - return sprintf(buf, "1\n"); - - return sprintf(buf, "0\n"); + return sprintf(buf, "%d\n", failed); } static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev, @@ -134,15 +140,21 @@ static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev, { struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev); unsigned long val; + struct scsi_device *sdev; if (strict_strtoul(buf, 0, &val) || val != 0) return -EINVAL; - zfcp_erp_modify_unit_status(unit, "syufai1", NULL, - ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); - zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, - "syufai2", NULL); - zfcp_erp_wait(unit->port->adapter); + sdev = zfcp_unit_sdev(unit); + if (sdev) { + zfcp_erp_modify_lun_status(sdev, "syufai1", NULL, + ZFCP_STATUS_COMMON_RUNNING, + ZFCP_SET); + zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, + "syufai2", NULL); + zfcp_erp_wait(unit->port->adapter); + } else + zfcp_unit_scsi_scan(unit); return count; } @@ -347,9 +359,9 @@ zfcp_sysfs_unit_##_name##_latency_show(struct device *dev, \ struct device_attribute *attr, \ char *buf) { \ struct scsi_device *sdev = to_scsi_device(dev); \ - struct zfcp_unit *unit = sdev->hostdata; \ - struct zfcp_latencies *lat = &unit->latencies; \ - struct zfcp_adapter *adapter = unit->port->adapter; \ + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \ + struct zfcp_latencies *lat = &zfcp_sdev->latencies; \ + struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; \ unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc; \ \ spin_lock_bh(&lat->lock); \ @@ -378,8 +390,8 @@ zfcp_sysfs_unit_##_name##_latency_store(struct device *dev, \ const char *buf, size_t count) \ { \ struct scsi_device *sdev = to_scsi_device(dev); \ - struct zfcp_unit *unit = sdev->hostdata; \ - struct zfcp_latencies *lat = &unit->latencies; \ + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \ + struct zfcp_latencies *lat = &zfcp_sdev->latencies; \ unsigned long flags; \ \ spin_lock_irqsave(&lat->lock, flags); \ @@ -407,26 +419,26 @@ static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev, \ struct device_attribute *attr,\ char *buf) \ { \ - struct scsi_device *sdev = to_scsi_device(dev); \ - struct zfcp_unit *unit = sdev->hostdata; \ + struct scsi_device *sdev = to_scsi_device(dev); \ + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); \ + struct zfcp_port *port = zfcp_sdev->port; \ \ return sprintf(buf, _format, _value); \ } \ static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL); ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n", - dev_name(&unit->port->adapter->ccw_device->dev)); + dev_name(&port->adapter->ccw_device->dev)); ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n", - (unsigned long long) unit->port->wwpn); + (unsigned long long) port->wwpn); static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); - struct zfcp_unit *unit = sdev->hostdata; - return sprintf(buf, "0x%016llx\n", (unsigned long long) unit->fcp_lun); + return sprintf(buf, "0x%016llx\n", zfcp_scsi_dev_lun(sdev)); } static DEVICE_ATTR(fcp_lun, S_IRUGO, zfcp_sysfs_scsi_fcp_lun_show, NULL); diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c index e210c41ee389..1119c535a667 100644 --- a/drivers/s390/scsi/zfcp_unit.c +++ b/drivers/s390/scsi/zfcp_unit.c @@ -134,14 +134,7 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun) unit->fcp_lun = fcp_lun; unit->dev.parent = &port->dev; unit->dev.release = zfcp_unit_release; - unit->latencies.write.channel.min = 0xFFFFFFFF; - unit->latencies.write.fabric.min = 0xFFFFFFFF; - unit->latencies.read.channel.min = 0xFFFFFFFF; - unit->latencies.read.fabric.min = 0xFFFFFFFF; - unit->latencies.cmd.channel.min = 0xFFFFFFFF; - unit->latencies.cmd.fabric.min = 0xFFFFFFFF; INIT_WORK(&unit->scsi_work, zfcp_unit_scsi_scan_work); - spin_lock_init(&unit->latencies.lock); if (dev_set_name(&unit->dev, "0x%016llx", (unsigned long long) fcp_lun)) { @@ -165,9 +158,6 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun) list_add_tail(&unit->list, &port->unit_list); write_unlock_irq(&port->unit_list_lock); - atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status); - zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL); - zfcp_erp_wait(unit->port->adapter); zfcp_unit_scsi_scan(unit); return 0; @@ -248,7 +238,6 @@ int zfcp_unit_remove(struct zfcp_port *port, u64 fcp_lun) put_device(&unit->dev); - zfcp_erp_unit_shutdown(unit, 0, "unrem_1", NULL); zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs); return 0; -- cgit v1.2.3 From f8210e34887e1feb977a9b6b8caa086855af40c9 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Wed, 8 Sep 2010 14:39:56 +0200 Subject: [SCSI] zfcp: Allow midlayer to scan for LUNs when running in NPIV mode Enable the LUN scanning mechanism in the SCSI midlayer: - Do not set the disable_target_scan bit in the FC transport class. - Set max_lun to 0xFFFFFFFF to allow the midlayer scan to include the two-level hierachical LUNs (like 0x40XX40XX00000000, but in SCSI midlayer LUN format). - Set max_id to a high value to allow triggering the SCSI device rescan from sysfs. When running in NPIV mode, zfcp accepts all LUNs in slave_attach. When running in non-NPIV mode, the list of zfcp_unit structs determines which SCSI devices are allowed on the current system. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_scsi.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index bc7217b88989..1e8d0cc7e1df 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -143,7 +143,8 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev) unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev)); if (unit) put_device(&unit->dev); - else { + + if (!unit && !(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) { put_device(&port->dev); return -ENXIO; } @@ -309,8 +310,8 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter) } /* tell the SCSI stack some characteristics of this adapter */ - adapter->scsi_host->max_id = 1; - adapter->scsi_host->max_lun = 1; + adapter->scsi_host->max_id = 511; + adapter->scsi_host->max_lun = 0xFFFFFFFF; adapter->scsi_host->max_channel = 0; adapter->scsi_host->unique_id = dev_id.devno; adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */ @@ -687,7 +688,6 @@ struct fc_function_template zfcp_transport_functions = { .show_host_port_type = 1, .show_host_speed = 1, .show_host_port_id = 1, - .disable_target_scan = 1, .dd_bsg_size = sizeof(struct zfcp_fsf_ct_els), }; -- cgit v1.2.3 From 44a24cb3731495336d77f3a955a7004997270dfd Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Wed, 8 Sep 2010 14:39:57 +0200 Subject: [SCSI] zfcp: Change spin_lock_bh to spin_lock_irq to fix lockdep warning With the change to use the data on the SCSI device, iterating through all LUNs/scsi_devices takes the SCSI host_lock. This triggers warnings from the lock dependency checker: ========================================================= [ INFO: possible irq lock inversion dependency detected ] 2.6.34.1 #97 --------------------------------------------------------- chchp/3224 just changed the state of lock: (&(shost->host_lock)->rlock){-.-...}, at: [<00000000003a73f4>] __scsi_iterate_devices+0x38/0xbc but this lock took another, HARDIRQ-unsafe lock in the past: (&(&qdio->req_q_lock)->rlock){+.-...} and interrupts could create inverse lock ordering between them. other info that might help us debug this: [ 24.972394] 2 locks held by chchp/3224: #0: (&(sch->lock)->rlock){-.-...}, at: [<0000000000401efa>] do_IRQ+0xb2/0x1e4 #1: (&adapter->port_list_lock){.-....}, at: [<0000000000490302>] zfcp_erp_modify_adapter_status+0x9e/0x16c [...] ========================================================= [ INFO: possible irq lock inversion dependency detected ] 2.6.34.1 #98 --------------------------------------------------------- chchp/3235 just changed the state of lock: (&(shost->host_lock)->rlock){-.-...}, at: [<00000000003a73f4>] __scsi_iterate_devices+0x38/0xbc but this lock took another, HARDIRQ-unsafe lock in the past: (&(&qdio->stat_lock)->rlock){+.-...} and interrupts could create inverse lock ordering between them. other info that might help us debug this: 2 locks held by chchp/3235: #0: (&(sch->lock)->rlock){-.-...}, at: [<0000000000401efa>] do_IRQ+0xb2/0x1e4 #1: (&adapter->port_list_lock){.-.-..}, at: [<00000000004902f6>] zfcp_erp_modify_adapter_status+0x9e/0x16c [...] To stop this warning, change the request queue lock to disable irqs, not only softirq. The changes are required only outside of the critical "send fcp command" path. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_fsf.c | 72 +++++++++++++++++++++---------------------- drivers/s390/scsi/zfcp_qdio.c | 18 ++++++----- 2 files changed, 46 insertions(+), 44 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 2fbd80257bca..48aa16a40d93 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -773,7 +773,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio) struct fsf_status_read_buffer *sr_buf; int retval = -EIO; - spin_lock_bh(&qdio->req_q_lock); + spin_lock_irq(&qdio->req_q_lock); if (zfcp_qdio_sbal_get(qdio)) goto out; @@ -807,7 +807,7 @@ failed_buf: zfcp_fsf_req_free(req); zfcp_dbf_hba_fsf_unsol("fail", adapter->dbf, NULL); out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return retval; } @@ -876,7 +876,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd) struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio; unsigned long old_req_id = (unsigned long) scmnd->host_scribble; - spin_lock_bh(&qdio->req_q_lock); + spin_lock_irq(&qdio->req_q_lock); if (zfcp_qdio_sbal_get(qdio)) goto out; req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND, @@ -907,7 +907,7 @@ out_error_free: zfcp_fsf_req_free(req); req = NULL; out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return req; } @@ -1046,7 +1046,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, struct zfcp_fsf_req *req; int ret = -EIO; - spin_lock_bh(&qdio->req_q_lock); + spin_lock_irq(&qdio->req_q_lock); if (zfcp_qdio_sbal_get(qdio)) goto out; @@ -1078,7 +1078,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, failed_send: zfcp_fsf_req_free(req); out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return ret; } @@ -1142,7 +1142,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, struct zfcp_qdio *qdio = adapter->qdio; int ret = -EIO; - spin_lock_bh(&qdio->req_q_lock); + spin_lock_irq(&qdio->req_q_lock); if (zfcp_qdio_sbal_get(qdio)) goto out; @@ -1178,7 +1178,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, failed_send: zfcp_fsf_req_free(req); out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return ret; } @@ -1188,7 +1188,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) struct zfcp_qdio *qdio = erp_action->adapter->qdio; int retval = -EIO; - spin_lock_bh(&qdio->req_q_lock); + spin_lock_irq(&qdio->req_q_lock); if (zfcp_qdio_sbal_get(qdio)) goto out; @@ -1220,7 +1220,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) erp_action->fsf_req_id = 0; } out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return retval; } @@ -1230,7 +1230,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio, struct zfcp_fsf_req *req = NULL; int retval = -EIO; - spin_lock_bh(&qdio->req_q_lock); + spin_lock_irq(&qdio->req_q_lock); if (zfcp_qdio_sbal_get(qdio)) goto out_unlock; @@ -1256,7 +1256,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio, zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); retval = zfcp_fsf_req_send(req); - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); if (!retval) wait_for_completion(&req->completion); @@ -1264,7 +1264,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio, return retval; out_unlock: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return retval; } @@ -1282,7 +1282,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) return -EOPNOTSUPP; - spin_lock_bh(&qdio->req_q_lock); + spin_lock_irq(&qdio->req_q_lock); if (zfcp_qdio_sbal_get(qdio)) goto out; @@ -1309,7 +1309,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) erp_action->fsf_req_id = 0; } out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return retval; } @@ -1328,7 +1328,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio, if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) return -EOPNOTSUPP; - spin_lock_bh(&qdio->req_q_lock); + spin_lock_irq(&qdio->req_q_lock); if (zfcp_qdio_sbal_get(qdio)) goto out_unlock; @@ -1348,7 +1348,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio, req->handler = zfcp_fsf_exchange_port_data_handler; zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); retval = zfcp_fsf_req_send(req); - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); if (!retval) wait_for_completion(&req->completion); @@ -1358,7 +1358,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio, return retval; out_unlock: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return retval; } @@ -1442,7 +1442,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&qdio->req_q_lock); + spin_lock_irq(&qdio->req_q_lock); if (zfcp_qdio_sbal_get(qdio)) goto out; @@ -1473,7 +1473,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) put_device(&port->dev); } out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return retval; } @@ -1510,7 +1510,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&qdio->req_q_lock); + spin_lock_irq(&qdio->req_q_lock); if (zfcp_qdio_sbal_get(qdio)) goto out; @@ -1539,7 +1539,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) erp_action->fsf_req_id = 0; } out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return retval; } @@ -1585,7 +1585,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&qdio->req_q_lock); + spin_lock_irq(&qdio->req_q_lock); if (zfcp_qdio_sbal_get(qdio)) goto out; @@ -1610,7 +1610,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) if (retval) zfcp_fsf_req_free(req); out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return retval; } @@ -1638,7 +1638,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&qdio->req_q_lock); + spin_lock_irq(&qdio->req_q_lock); if (zfcp_qdio_sbal_get(qdio)) goto out; @@ -1663,7 +1663,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) if (retval) zfcp_fsf_req_free(req); out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return retval; } @@ -1728,7 +1728,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&qdio->req_q_lock); + spin_lock_irq(&qdio->req_q_lock); if (zfcp_qdio_sbal_get(qdio)) goto out; @@ -1757,7 +1757,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) erp_action->fsf_req_id = 0; } out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return retval; } @@ -1900,7 +1900,7 @@ int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action) struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&qdio->req_q_lock); + spin_lock_irq(&qdio->req_q_lock); if (zfcp_qdio_sbal_get(qdio)) goto out; @@ -1933,7 +1933,7 @@ int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action) erp_action->fsf_req_id = 0; } out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return retval; } @@ -1987,7 +1987,7 @@ int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action) struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&qdio->req_q_lock); + spin_lock_irq(&qdio->req_q_lock); if (zfcp_qdio_sbal_get(qdio)) goto out; @@ -2017,7 +2017,7 @@ int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action) erp_action->fsf_req_id = 0; } out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return retval; } @@ -2363,7 +2363,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd, ZFCP_STATUS_COMMON_UNBLOCKED))) return NULL; - spin_lock_bh(&qdio->req_q_lock); + spin_lock_irq(&qdio->req_q_lock); if (zfcp_qdio_sbal_get(qdio)) goto out; @@ -2397,7 +2397,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd, zfcp_fsf_req_free(req); req = NULL; out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return req; } @@ -2433,7 +2433,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, return ERR_PTR(-EINVAL); } - spin_lock_bh(&qdio->req_q_lock); + spin_lock_irq(&qdio->req_q_lock); if (zfcp_qdio_sbal_get(qdio)) goto out; @@ -2460,7 +2460,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); retval = zfcp_fsf_req_send(req); out: - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); if (!retval) { wait_for_completion(&req->completion); diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index b2635759721c..60e6e5714eb9 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -60,13 +60,11 @@ static inline void zfcp_qdio_account(struct zfcp_qdio *qdio) unsigned long long now, span; int used; - spin_lock(&qdio->stat_lock); now = get_clock_monotonic(); span = (now - qdio->req_q_time) >> 12; used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free); qdio->req_q_util += used * span; qdio->req_q_time = now; - spin_unlock(&qdio->stat_lock); } static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, @@ -84,7 +82,9 @@ static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, /* cleanup all SBALs being program-owned now */ zfcp_qdio_zero_sbals(qdio->req_q, idx, count); + spin_lock_irq(&qdio->stat_lock); zfcp_qdio_account(qdio); + spin_unlock_irq(&qdio->stat_lock); atomic_add(count, &qdio->req_q_free); wake_up(&qdio->req_q_wq); } @@ -201,11 +201,11 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) { - spin_lock_bh(&qdio->req_q_lock); + spin_lock_irq(&qdio->req_q_lock); if (atomic_read(&qdio->req_q_free) || !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) return 1; - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); return 0; } @@ -223,7 +223,7 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) { long ret; - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); ret = wait_event_interruptible_timeout(qdio->req_q_wq, zfcp_qdio_sbal_check(qdio), 5 * HZ); @@ -239,7 +239,7 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1", NULL); } - spin_lock_bh(&qdio->req_q_lock); + spin_lock_irq(&qdio->req_q_lock); return -EIO; } @@ -254,7 +254,9 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) int retval; u8 sbal_number = q_req->sbal_number; + spin_lock(&qdio->stat_lock); zfcp_qdio_account(qdio); + spin_unlock(&qdio->stat_lock); retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, q_req->sbal_first, sbal_number); @@ -328,9 +330,9 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio) return; /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ - spin_lock_bh(&qdio->req_q_lock); + spin_lock_irq(&qdio->req_q_lock); atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); - spin_unlock_bh(&qdio->req_q_lock); + spin_unlock_irq(&qdio->req_q_lock); wake_up(&qdio->req_q_wq); -- cgit v1.2.3 From c61b536c97f225a74cf430716fdb243dfafe9d48 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Wed, 8 Sep 2010 14:39:58 +0200 Subject: [SCSI] zfcp: Reorder FCP I/O and task management handler functions Instead of calling the same handler for both, I/O and task management commands, use different handlers that call a function for the common part. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_fsf.c | 147 +++++++++++++++++++++---------------------- 1 file changed, 72 insertions(+), 75 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 48aa16a40d93..2b9dfea9f254 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -2080,73 +2080,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) sizeof(blktrc)); } -static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) -{ - struct scsi_cmnd *scpnt; - struct fcp_resp_with_ext *fcp_rsp; - unsigned long flags; - - read_lock_irqsave(&req->adapter->abort_lock, flags); - - scpnt = req->data; - if (unlikely(!scpnt)) { - read_unlock_irqrestore(&req->adapter->abort_lock, flags); - return; - } - - if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { - set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED); - goto skip_fsfstatus; - } - - switch (req->qtcb->header.fsf_status) { - case FSF_INCONSISTENT_PROT_DATA: - case FSF_INVALID_PROT_PARM: - set_host_byte(scpnt, DID_ERROR); - goto skip_fsfstatus; - case FSF_BLOCK_GUARD_CHECK_FAILURE: - zfcp_scsi_dif_sense_error(scpnt, 0x1); - goto skip_fsfstatus; - case FSF_APP_TAG_CHECK_FAILURE: - zfcp_scsi_dif_sense_error(scpnt, 0x2); - goto skip_fsfstatus; - case FSF_REF_TAG_CHECK_FAILURE: - zfcp_scsi_dif_sense_error(scpnt, 0x3); - goto skip_fsfstatus; - } - fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; - zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt); - -skip_fsfstatus: - zfcp_fsf_req_trace(req, scpnt); - zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req); - - scpnt->host_scribble = NULL; - (scpnt->scsi_done) (scpnt); - /* - * We must hold this lock until scsi_done has been called. - * Otherwise we may call scsi_done after abort regarding this - * command has completed. - * Note: scsi_done must not block! - */ - read_unlock_irqrestore(&req->adapter->abort_lock, flags); -} - -static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req) -{ - struct fcp_resp_with_ext *fcp_rsp; - struct fcp_resp_rsp_info *rsp_info; - - fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; - rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1]; - - if ((rsp_info->rsp_code != FCP_TMF_CMPL) || - (req->status & ZFCP_STATUS_FSFREQ_ERROR)) - req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED; -} - - -static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req) +static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req) { struct scsi_cmnd *scmnd = req->data; struct scsi_device *sdev = scmnd->device; @@ -2154,7 +2088,7 @@ static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req) struct fsf_qtcb_header *header = &req->qtcb->header; if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) - goto skip_fsfstatus; + return; switch (header->fsf_status) { case FSF_HANDLE_MISMATCH: @@ -2211,12 +2145,60 @@ static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req) req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; } -skip_fsfstatus: - if (req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT) - zfcp_fsf_send_fcp_ctm_handler(req); - else { - zfcp_fsf_send_fcp_command_task_handler(req); +} + +static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req) +{ + struct scsi_cmnd *scpnt; + struct fcp_resp_with_ext *fcp_rsp; + unsigned long flags; + + zfcp_fsf_fcp_handler_common(req); + + read_lock_irqsave(&req->adapter->abort_lock, flags); + + scpnt = req->data; + if (unlikely(!scpnt)) { + read_unlock_irqrestore(&req->adapter->abort_lock, flags); + return; } + + if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { + set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED); + goto skip_fsfstatus; + } + + switch (req->qtcb->header.fsf_status) { + case FSF_INCONSISTENT_PROT_DATA: + case FSF_INVALID_PROT_PARM: + set_host_byte(scpnt, DID_ERROR); + goto skip_fsfstatus; + case FSF_BLOCK_GUARD_CHECK_FAILURE: + zfcp_scsi_dif_sense_error(scpnt, 0x1); + goto skip_fsfstatus; + case FSF_APP_TAG_CHECK_FAILURE: + zfcp_scsi_dif_sense_error(scpnt, 0x2); + goto skip_fsfstatus; + case FSF_REF_TAG_CHECK_FAILURE: + zfcp_scsi_dif_sense_error(scpnt, 0x3); + goto skip_fsfstatus; + } + fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; + zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt); + +skip_fsfstatus: + zfcp_fsf_req_trace(req, scpnt); + zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req); + + scpnt->host_scribble = NULL; + (scpnt->scsi_done) (scpnt); + /* + * We must hold this lock until scsi_done has been called. + * Otherwise we may call scsi_done after abort regarding this + * command has completed. + * Note: scsi_done must not block! + */ + read_unlock_irqrestore(&req->adapter->abort_lock, flags); } static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir) @@ -2299,7 +2281,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd) io = &req->qtcb->bottom.io; req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; req->data = scsi_cmnd; - req->handler = zfcp_fsf_send_fcp_command_handler; + req->handler = zfcp_fsf_fcp_cmnd_handler; req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; req->qtcb->header.port_handle = zfcp_sdev->port->handle; io->service_class = FSF_CLASS_3; @@ -2345,6 +2327,21 @@ out: return retval; } +static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req) +{ + struct fcp_resp_with_ext *fcp_rsp; + struct fcp_resp_rsp_info *rsp_info; + + zfcp_fsf_fcp_handler_common(req); + + fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; + rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1]; + + if ((rsp_info->rsp_code != FCP_TMF_CMPL) || + (req->status & ZFCP_STATUS_FSFREQ_ERROR)) + req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED; +} + /** * zfcp_fsf_fcp_task_mgmt - send SCSI task management command * @scmnd: SCSI command to send the task management command for @@ -2378,7 +2375,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd, req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT; req->data = scmnd; - req->handler = zfcp_fsf_send_fcp_command_handler; + req->handler = zfcp_fsf_fcp_task_mgmt_handler; req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; req->qtcb->header.port_handle = zfcp_sdev->port->handle; req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; -- cgit v1.2.3 From a1ca48319a9aa1c5b57ce142f538e76050bb8972 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Wed, 8 Sep 2010 14:39:59 +0200 Subject: [SCSI] zfcp: Move ACL/CFDC code to zfcp_cfdc.c Move the code evaluating the ACL/CFDC specific errors to the file zfcp_cfdc.c. With this change, all code related to the old access control feature is kept in one file, not split across zfcp_erp.c and zfcp_fsf.c. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_cfdc.c | 186 +++++++++++++++++++++++++++++++++++++++++- drivers/s390/scsi/zfcp_erp.c | 82 ------------------- drivers/s390/scsi/zfcp_ext.h | 12 ++- drivers/s390/scsi/zfcp_fsf.c | 129 ++++------------------------- 4 files changed, 207 insertions(+), 202 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c index fcbd2b756da4..f952b89b108a 100644 --- a/drivers/s390/scsi/zfcp_cfdc.c +++ b/drivers/s390/scsi/zfcp_cfdc.c @@ -2,9 +2,10 @@ * zfcp device driver * * Userspace interface for accessing the - * Access Control Lists / Control File Data Channel + * Access Control Lists / Control File Data Channel; + * handling of response code and states for ports and LUNs. * - * Copyright IBM Corporation 2008, 2009 + * Copyright IBM Corporation 2008, 2010 */ #define KMSG_COMPONENT "zfcp" @@ -260,3 +261,184 @@ struct miscdevice zfcp_cfdc_misc = { .name = "zfcp_cfdc", .fops = &zfcp_cfdc_fops, }; + +/** + * zfcp_cfdc_adapter_access_changed - Process change in adapter ACT + * @adapter: Adapter where the Access Control Table (ACT) changed + * + * After a change in the adapter ACT, check if access to any + * previously denied resources is now possible. + */ +void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter) +{ + unsigned long flags; + struct zfcp_port *port; + struct scsi_device *sdev; + struct zfcp_scsi_dev *zfcp_sdev; + int status; + + if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) + return; + + read_lock_irqsave(&adapter->port_list_lock, flags); + list_for_each_entry(port, &adapter->port_list, list) { + status = atomic_read(&port->status); + if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) || + (status & ZFCP_STATUS_COMMON_ACCESS_BOXED)) + zfcp_erp_port_reopen(port, + ZFCP_STATUS_COMMON_ERP_FAILED, + "cfaac_1", NULL); + } + read_unlock_irqrestore(&adapter->port_list_lock, flags); + + shost_for_each_device(sdev, port->adapter->scsi_host) { + zfcp_sdev = sdev_to_zfcp(sdev); + status = atomic_read(&zfcp_sdev->status); + if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) || + (status & ZFCP_STATUS_COMMON_ACCESS_BOXED)) + zfcp_erp_lun_reopen(sdev, + ZFCP_STATUS_COMMON_ERP_FAILED, + "cfaac_2", NULL); + } +} + +static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table) +{ + u16 subtable = table >> 16; + u16 rule = table & 0xffff; + const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" }; + + if (subtable && subtable < ARRAY_SIZE(act_type)) + dev_warn(&adapter->ccw_device->dev, + "Access denied according to ACT rule type %s, " + "rule %d\n", act_type[subtable], rule); +} + +/** + * zfcp_cfdc_port_denied - Process "access denied" for port + * @port: The port where the acces has been denied + * @qual: The FSF status qualifier for the access denied FSF status + */ +void zfcp_cfdc_port_denied(struct zfcp_port *port, + union fsf_status_qual *qual) +{ + dev_warn(&port->adapter->ccw_device->dev, + "Access denied to port 0x%016Lx\n", + (unsigned long long)port->wwpn); + + zfcp_act_eval_err(port->adapter, qual->halfword[0]); + zfcp_act_eval_err(port->adapter, qual->halfword[1]); + zfcp_erp_modify_port_status(port, "cfadp_1", NULL, + ZFCP_STATUS_COMMON_ERP_FAILED | + ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET); +} + +/** + * zfcp_cfdc_lun_denied - Process "access denied" for LUN + * @sdev: The SCSI device / LUN where the access has been denied + * @qual: The FSF status qualifier for the access denied FSF status + */ +void zfcp_cfdc_lun_denied(struct scsi_device *sdev, + union fsf_status_qual *qual) +{ + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + + dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev, + "Access denied to LUN 0x%016Lx on port 0x%016Lx\n", + zfcp_scsi_dev_lun(sdev), + (unsigned long long)zfcp_sdev->port->wwpn); + zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->halfword[0]); + zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->halfword[1]); + zfcp_erp_modify_lun_status(sdev, "cfadl_1", NULL, + ZFCP_STATUS_COMMON_ERP_FAILED | + ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET); + + atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status); + atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status); +} + +/** + * zfcp_cfdc_lun_shrng_vltn - Evaluate LUN sharing violation status + * @sdev: The LUN / SCSI device where sharing violation occurred + * @qual: The FSF status qualifier from the LUN sharing violation + */ +void zfcp_cfdc_lun_shrng_vltn(struct scsi_device *sdev, + union fsf_status_qual *qual) +{ + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + + if (qual->word[0]) + dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev, + "LUN 0x%Lx on port 0x%Lx is already in " + "use by CSS%d, MIF Image ID %x\n", + zfcp_scsi_dev_lun(sdev), + (unsigned long long)zfcp_sdev->port->wwpn, + qual->fsf_queue_designator.cssid, + qual->fsf_queue_designator.hla); + else + zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->word[2]); + + zfcp_erp_modify_lun_status(sdev, "fsosh_3", NULL, + ZFCP_STATUS_COMMON_ERP_FAILED | + ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET); + atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status); + atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status); +} + +/** + * zfcp_cfdc_open_lun_eval - Eval access ctrl. status for successful "open lun" + * @sdev: The SCSI device / LUN where to evaluate the status + * @bottom: The qtcb bottom with the status from the "open lun" + * + * Returns: 0 if LUN is usable, -EACCES if the access control table + * reports an unsupported configuration. + */ +int zfcp_cfdc_open_lun_eval(struct scsi_device *sdev, + struct fsf_qtcb_bottom_support *bottom) +{ + int shared, rw; + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; + + if ((adapter->connection_features & FSF_FEATURE_NPIV_MODE) || + !(adapter->adapter_features & FSF_FEATURE_LUN_SHARING) || + zfcp_ccw_priv_sch(adapter)) + return 0; + + shared = !(bottom->lun_access_info & FSF_UNIT_ACCESS_EXCLUSIVE); + rw = (bottom->lun_access_info & FSF_UNIT_ACCESS_OUTBOUND_TRANSFER); + + if (shared) + atomic_set_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status); + + if (!rw) { + atomic_set_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status); + dev_info(&adapter->ccw_device->dev, "SCSI device at LUN " + "0x%016Lx on port 0x%016Lx opened read-only\n", + zfcp_scsi_dev_lun(sdev), + (unsigned long long)zfcp_sdev->port->wwpn); + } + + if (!shared && !rw) { + dev_err(&adapter->ccw_device->dev, "Exclusive read-only access " + "not supported (LUN 0x%016Lx, port 0x%016Lx)\n", + zfcp_scsi_dev_lun(sdev), + (unsigned long long)zfcp_sdev->port->wwpn); + zfcp_erp_lun_failed(sdev, "fsosh_5", NULL); + zfcp_erp_lun_shutdown(sdev, 0, "fsouh_6", NULL); + return -EACCES; + } + + if (shared && rw) { + dev_err(&adapter->ccw_device->dev, + "Shared read-write access not supported " + "(LUN 0x%016Lx, port 0x%016Lx)\n", + zfcp_scsi_dev_lun(sdev), + (unsigned long long)zfcp_sdev->port->wwpn); + zfcp_erp_lun_failed(sdev, "fsosh_7", NULL); + zfcp_erp_lun_shutdown(sdev, 0, "fsosh_8", NULL); + return -EACCES; + } + + return 0; +} diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 734fc838931f..9e7d029ac7a2 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -1593,85 +1593,3 @@ void zfcp_erp_lun_boxed(struct scsi_device *sdev, char *id, void *ref) ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET); zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); } - -/** - * zfcp_erp_port_access_denied - Adapter denied access to port. - * @port: port where access has been denied - * @id: id for debug trace - * @ref: reference for debug trace - * - * Since the adapter has denied access, stop using the port and the - * attached LUNs. - */ -void zfcp_erp_port_access_denied(struct zfcp_port *port, char *id, void *ref) -{ - zfcp_erp_modify_port_status(port, id, ref, - ZFCP_STATUS_COMMON_ERP_FAILED | - ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET); -} - -/** - * zfcp_erp_lun_access_denied - Adapter denied access to LUN. - * @sdev: SCSI device / LUN where access has been denied - * @id: id for debug trace - * @ref: reference for debug trace - * - * Since the adapter has denied access, stop using the LUN. - */ -void zfcp_erp_lun_access_denied(struct scsi_device *sdev, char *id, void *ref) -{ - zfcp_erp_modify_lun_status(sdev, id, ref, - ZFCP_STATUS_COMMON_ERP_FAILED | - ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET); -} - -static void zfcp_erp_lun_access_changed(struct scsi_device *sdev, char *id, - void *ref) -{ - struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); - int status = atomic_read(&zfcp_sdev->status); - - if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED | - ZFCP_STATUS_COMMON_ACCESS_BOXED))) - return; - - zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); -} - -static void zfcp_erp_port_access_changed(struct zfcp_port *port, char *id, - void *ref) -{ - struct scsi_device *sdev; - int status = atomic_read(&port->status); - - if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED | - ZFCP_STATUS_COMMON_ACCESS_BOXED))) { - shost_for_each_device(sdev, port->adapter->scsi_host) - if (sdev_to_zfcp(sdev)->port == port) - zfcp_erp_lun_access_changed(sdev, id, ref); - return; - } - - zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); -} - -/** - * zfcp_erp_adapter_access_changed - Process change in adapter ACT - * @adapter: Adapter where the Access Control Table (ACT) changed - * @id: Id for debug trace - * @ref: Reference for debug trace - */ -void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, char *id, - void *ref) -{ - unsigned long flags; - struct zfcp_port *port; - - if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) - return; - - read_lock_irqsave(&adapter->port_list_lock, flags); - list_for_each_entry(port, &adapter->port_list, list) - zfcp_erp_port_access_changed(port, id, ref); - read_unlock_irqrestore(&adapter->port_list_lock, flags); -} diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 80714679f5d6..7320132a430c 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -34,6 +34,14 @@ extern void zfcp_ccw_adapter_put(struct zfcp_adapter *); /* zfcp_cfdc.c */ extern struct miscdevice zfcp_cfdc_misc; +extern void zfcp_cfdc_port_denied(struct zfcp_port *, union fsf_status_qual *); +extern void zfcp_cfdc_lun_denied(struct scsi_device *, union fsf_status_qual *); +extern void zfcp_cfdc_lun_shrng_vltn(struct scsi_device *, + union fsf_status_qual *); +extern int zfcp_cfdc_open_lun_eval(struct scsi_device *, + struct fsf_qtcb_bottom_support *); +extern void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *); + /* zfcp_dbf.c */ extern int zfcp_dbf_adapter_register(struct zfcp_adapter *); @@ -88,10 +96,6 @@ extern void zfcp_erp_wait(struct zfcp_adapter *); extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long); extern void zfcp_erp_port_boxed(struct zfcp_port *, char *, void *); extern void zfcp_erp_lun_boxed(struct scsi_device *, char *, void *); -extern void zfcp_erp_port_access_denied(struct zfcp_port *, char *, void *); -extern void zfcp_erp_lun_access_denied(struct scsi_device *, char *, void *); -extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *, - void *); extern void zfcp_erp_timeout_handler(unsigned long); /* zfcp_fc.c */ diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 2b9dfea9f254..813c5b22565b 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -61,47 +61,6 @@ static u32 fsf_qtcb_type[] = { [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND }; -static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table) -{ - u16 subtable = table >> 16; - u16 rule = table & 0xffff; - const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" }; - - if (subtable && subtable < ARRAY_SIZE(act_type)) - dev_warn(&adapter->ccw_device->dev, - "Access denied according to ACT rule type %s, " - "rule %d\n", act_type[subtable], rule); -} - -static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req, - struct zfcp_port *port) -{ - struct fsf_qtcb_header *header = &req->qtcb->header; - dev_warn(&req->adapter->ccw_device->dev, - "Access denied to port 0x%016Lx\n", - (unsigned long long)port->wwpn); - zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]); - zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]); - zfcp_erp_port_access_denied(port, "fspad_1", req); - req->status |= ZFCP_STATUS_FSFREQ_ERROR; -} - -static void zfcp_fsf_access_denied_lun(struct zfcp_fsf_req *req, - struct scsi_device *sdev) -{ - struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); - - struct fsf_qtcb_header *header = &req->qtcb->header; - dev_warn(&req->adapter->ccw_device->dev, - "Access denied to LUN 0x%016Lx on port 0x%016Lx\n", - (unsigned long long)zfcp_scsi_dev_lun(sdev), - (unsigned long long)zfcp_sdev->port->wwpn); - zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]); - zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]); - zfcp_erp_lun_access_denied(sdev, "fsadl_1", req); - req->status |= ZFCP_STATUS_FSFREQ_ERROR; -} - static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) { dev_err(&req->adapter->ccw_device->dev, "FCP device not " @@ -295,13 +254,12 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) break; case FSF_STATUS_READ_NOTIFICATION_LOST: if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED) - zfcp_erp_adapter_access_changed(adapter, "fssrh_3", - req); + zfcp_cfdc_adapter_access_changed(adapter); if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS) queue_work(adapter->work_queue, &adapter->scan_work); break; case FSF_STATUS_READ_CFDC_UPDATED: - zfcp_erp_adapter_access_changed(adapter, "fssrh_4", req); + zfcp_cfdc_adapter_access_changed(adapter); break; case FSF_STATUS_READ_FEATURE_UPDATE_ALERT: adapter->adapter_features = sr_buf->payload.word[0]; @@ -1116,8 +1074,10 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) case FSF_RESPONSE_SIZE_TOO_LARGE: break; case FSF_ACCESS_DENIED: - if (port) - zfcp_fsf_access_denied_port(req, port); + if (port) { + zfcp_cfdc_port_denied(port, &header->fsf_status_qual); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; + } break; case FSF_SBAL_MISMATCH: /* should never occure, avoided in zfcp_fsf_send_els */ @@ -1375,7 +1335,8 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) case FSF_PORT_ALREADY_OPEN: break; case FSF_ACCESS_DENIED: - zfcp_fsf_access_denied_port(req, port); + zfcp_cfdc_port_denied(port, &header->fsf_status_qual); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: dev_warn(&req->adapter->ccw_device->dev, @@ -1682,7 +1643,7 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_ACCESS_DENIED: - zfcp_fsf_access_denied_port(req, port); + zfcp_cfdc_port_denied(port, &header->fsf_status_qual); break; case FSF_PORT_BOXED: /* can't use generic zfcp_erp_modify_port_status because @@ -1768,9 +1729,6 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); struct fsf_qtcb_header *header = &req->qtcb->header; struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support; - struct fsf_queue_designator *queue_designator = - &header->fsf_status_qual.fsf_queue_designator; - int exclusive, readwrite; if (req->status & ZFCP_STATUS_FSFREQ_ERROR) return; @@ -1789,29 +1747,15 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) case FSF_LUN_ALREADY_OPEN: break; case FSF_ACCESS_DENIED: - zfcp_fsf_access_denied_lun(req, sdev); - atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status); - atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status); + zfcp_cfdc_lun_denied(sdev, &header->fsf_status_qual); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_PORT_BOXED: zfcp_erp_port_boxed(zfcp_sdev->port, "fsouh_2", req); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_LUN_SHARING_VIOLATION: - if (header->fsf_status_qual.word[0]) - dev_warn(&adapter->ccw_device->dev, - "LUN 0x%Lx on port 0x%Lx is already in " - "use by CSS%d, MIF Image ID %x\n", - (unsigned long long)zfcp_scsi_dev_lun(sdev), - (unsigned long long)zfcp_sdev->port->wwpn, - queue_designator->cssid, - queue_designator->hla); - else - zfcp_act_eval_err(adapter, - header->fsf_status_qual.word[2]); - zfcp_erp_lun_access_denied(sdev, "fsolh_3", req); - atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status); - atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status); + zfcp_cfdc_lun_shrng_vltn(sdev, &header->fsf_status_qual); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED: @@ -1839,51 +1783,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) case FSF_GOOD: zfcp_sdev->lun_handle = header->lun_handle; atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); - - if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) && - (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) && - !zfcp_ccw_priv_sch(adapter)) { - exclusive = (bottom->lun_access_info & - FSF_UNIT_ACCESS_EXCLUSIVE); - readwrite = (bottom->lun_access_info & - FSF_UNIT_ACCESS_OUTBOUND_TRANSFER); - - if (!exclusive) - atomic_set_mask(ZFCP_STATUS_LUN_SHARED, - &zfcp_sdev->status); - - if (!readwrite) { - atomic_set_mask(ZFCP_STATUS_LUN_READONLY, - &zfcp_sdev->status); - dev_info(&adapter->ccw_device->dev, - "SCSI device at LUN 0x%016Lx on port " - "0x%016Lx opened read-only\n", - (unsigned long long)zfcp_scsi_dev_lun(sdev), - (unsigned long long)zfcp_sdev->port->wwpn); - } - - if (exclusive && !readwrite) { - dev_err(&adapter->ccw_device->dev, - "Exclusive read-only access not " - "supported (LUN 0x%016Lx, " - "port 0x%016Lx)\n", - (unsigned long long)zfcp_scsi_dev_lun(sdev), - (unsigned long long)zfcp_sdev->port->wwpn); - zfcp_erp_lun_failed(sdev, "fsolh_5", req); - req->status |= ZFCP_STATUS_FSFREQ_ERROR; - zfcp_erp_lun_shutdown(sdev, 0, "fsolh_6", req); - } else if (!exclusive && readwrite) { - dev_err(&adapter->ccw_device->dev, - "Shared read-write access not " - "supported (LUN 0x%016Lx, port " - "0x%016Lx)\n", - (unsigned long long)zfcp_scsi_dev_lun(sdev), - (unsigned long long)zfcp_sdev->port->wwpn); - zfcp_erp_lun_failed(sdev, "fsolh_7", req); - req->status |= ZFCP_STATUS_FSFREQ_ERROR; - zfcp_erp_lun_shutdown(sdev, 0, "fsolh_8", req); - } - } + zfcp_cfdc_open_lun_eval(sdev, bottom); break; } } @@ -2106,7 +2006,8 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req) zfcp_fsf_class_not_supp(req); break; case FSF_ACCESS_DENIED: - zfcp_fsf_access_denied_lun(req, sdev); + zfcp_cfdc_lun_denied(sdev, &header->fsf_status_qual); + req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_DIRECTION_INDICATOR_NOT_VALID: dev_err(&req->adapter->ccw_device->dev, -- cgit v1.2.3 From c9ff5d0315231b133a43e5af842bc01fb474f0d7 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Wed, 8 Sep 2010 14:40:00 +0200 Subject: [SCSI] zfcp: Remove duplicated code from zfcp_ccw_set_online The steps for setting the zfcp_adapter online are the same in zfcp_ccw_set_online and zfcp_ccw_activate. Remove the code duplication by calling zfcp_ccw_activate from zfcp_ccw_set_online. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_ccw.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index ce1cc7a11fb4..bdf2f38e4b13 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c @@ -164,14 +164,7 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev) BUG_ON(!zfcp_reqlist_isempty(adapter->req_list)); adapter->req_no = 0; - zfcp_erp_modify_adapter_status(adapter, "ccsonl1", NULL, - ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); - zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, - "ccsonl2", NULL); - zfcp_erp_wait(adapter); - - flush_work(&adapter->scan_work); - + zfcp_ccw_activate(cdev); zfcp_ccw_adapter_put(adapter); return 0; } -- cgit v1.2.3 From edaed859e63aac174fcc3fed81886b91bb124661 Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Wed, 8 Sep 2010 14:40:01 +0200 Subject: [SCSI] zfcp: Replace status modifier functions. Replace the zfcp_modify__status functions and its accompanying wrappers with dedicated status modifier functions. This eases code readability and maintenance. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_ccw.c | 8 +- drivers/s390/scsi/zfcp_cfdc.c | 22 ++-- drivers/s390/scsi/zfcp_def.h | 5 - drivers/s390/scsi/zfcp_erp.c | 258 +++++++++++++++++++---------------------- drivers/s390/scsi/zfcp_ext.h | 17 +-- drivers/s390/scsi/zfcp_fc.c | 2 +- drivers/s390/scsi/zfcp_fsf.c | 72 +++++++----- drivers/s390/scsi/zfcp_scsi.c | 3 +- drivers/s390/scsi/zfcp_sysfs.c | 10 +- 9 files changed, 189 insertions(+), 208 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index bdf2f38e4b13..0833c2b51e39 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c @@ -46,8 +46,7 @@ static int zfcp_ccw_activate(struct ccw_device *cdev) if (!adapter) return 0; - zfcp_erp_modify_adapter_status(adapter, "ccresu1", NULL, - ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); + zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, "ccresu2", NULL); zfcp_erp_wait(adapter); @@ -217,9 +216,8 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event) break; case CIO_OPER: dev_info(&cdev->dev, "The FCP device is operational again\n"); - zfcp_erp_modify_adapter_status(adapter, "ccnoti3", NULL, - ZFCP_STATUS_COMMON_RUNNING, - ZFCP_SET); + zfcp_erp_set_adapter_status(adapter, + ZFCP_STATUS_COMMON_RUNNING); zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, "ccnoti4", NULL); break; diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c index f952b89b108a..a56d14166c99 100644 --- a/drivers/s390/scsi/zfcp_cfdc.c +++ b/drivers/s390/scsi/zfcp_cfdc.c @@ -328,9 +328,9 @@ void zfcp_cfdc_port_denied(struct zfcp_port *port, zfcp_act_eval_err(port->adapter, qual->halfword[0]); zfcp_act_eval_err(port->adapter, qual->halfword[1]); - zfcp_erp_modify_port_status(port, "cfadp_1", NULL, - ZFCP_STATUS_COMMON_ERP_FAILED | - ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET); + zfcp_erp_set_port_status(port, + ZFCP_STATUS_COMMON_ERP_FAILED | + ZFCP_STATUS_COMMON_ACCESS_DENIED); } /** @@ -349,9 +349,9 @@ void zfcp_cfdc_lun_denied(struct scsi_device *sdev, (unsigned long long)zfcp_sdev->port->wwpn); zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->halfword[0]); zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->halfword[1]); - zfcp_erp_modify_lun_status(sdev, "cfadl_1", NULL, - ZFCP_STATUS_COMMON_ERP_FAILED | - ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET); + zfcp_erp_set_lun_status(sdev, + ZFCP_STATUS_COMMON_ERP_FAILED | + ZFCP_STATUS_COMMON_ACCESS_DENIED); atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status); atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status); @@ -378,9 +378,9 @@ void zfcp_cfdc_lun_shrng_vltn(struct scsi_device *sdev, else zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->word[2]); - zfcp_erp_modify_lun_status(sdev, "fsosh_3", NULL, - ZFCP_STATUS_COMMON_ERP_FAILED | - ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET); + zfcp_erp_set_lun_status(sdev, + ZFCP_STATUS_COMMON_ERP_FAILED | + ZFCP_STATUS_COMMON_ACCESS_DENIED); atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status); atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status); } @@ -424,7 +424,7 @@ int zfcp_cfdc_open_lun_eval(struct scsi_device *sdev, "not supported (LUN 0x%016Lx, port 0x%016Lx)\n", zfcp_scsi_dev_lun(sdev), (unsigned long long)zfcp_sdev->port->wwpn); - zfcp_erp_lun_failed(sdev, "fsosh_5", NULL); + zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED); zfcp_erp_lun_shutdown(sdev, 0, "fsouh_6", NULL); return -EACCES; } @@ -435,7 +435,7 @@ int zfcp_cfdc_open_lun_eval(struct scsi_device *sdev, "(LUN 0x%016Lx, port 0x%016Lx)\n", zfcp_scsi_dev_lun(sdev), (unsigned long long)zfcp_sdev->port->wwpn); - zfcp_erp_lun_failed(sdev, "fsosh_7", NULL); + zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED); zfcp_erp_lun_shutdown(sdev, 0, "fsosh_8", NULL); return -EACCES; } diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 6dfae7091aa4..9ae1d0a6f627 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -325,9 +325,4 @@ struct zfcp_data { struct kmem_cache *adisc_cache; }; -/********************** ZFCP SPECIFIC DEFINES ********************************/ - -#define ZFCP_SET 0x00000100 -#define ZFCP_CLEAR 0x00000200 - #endif /* ZFCP_DEF_H */ diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 9e7d029ac7a2..d37c7331f244 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -57,9 +57,8 @@ enum zfcp_erp_act_result { static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask) { - zfcp_erp_modify_adapter_status(adapter, "erablk1", NULL, - ZFCP_STATUS_COMMON_UNBLOCKED | mask, - ZFCP_CLEAR); + zfcp_erp_clear_adapter_status(adapter, + ZFCP_STATUS_COMMON_UNBLOCKED | mask); } static int zfcp_erp_action_exists(struct zfcp_erp_action *act) @@ -266,7 +265,8 @@ static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, /* ensure propagation of failed status to new devices */ if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { - zfcp_erp_adapter_failed(adapter, "erareo1", NULL); + zfcp_erp_set_adapter_status(adapter, + ZFCP_STATUS_COMMON_ERP_FAILED); return -EIO; } return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, @@ -290,7 +290,8 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, write_lock_irqsave(&adapter->erp_lock, flags); if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) - zfcp_erp_adapter_failed(adapter, "erareo1", NULL); + zfcp_erp_set_adapter_status(adapter, + ZFCP_STATUS_COMMON_ERP_FAILED); else zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter, NULL, NULL, id, ref, 0); @@ -327,9 +328,8 @@ void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id, static void zfcp_erp_port_block(struct zfcp_port *port, int clear) { - zfcp_erp_modify_port_status(port, "erpblk1", NULL, - ZFCP_STATUS_COMMON_UNBLOCKED | clear, - ZFCP_CLEAR); + zfcp_erp_clear_port_status(port, + ZFCP_STATUS_COMMON_UNBLOCKED | clear); } static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, @@ -370,7 +370,7 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { /* ensure propagation of failed status to new devices */ - zfcp_erp_port_failed(port, "erpreo1", NULL); + zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED); return -EIO; } @@ -400,9 +400,8 @@ int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, void *ref) static void zfcp_erp_lun_block(struct scsi_device *sdev, int clear_mask) { - zfcp_erp_modify_lun_status(sdev, "erlblk1", NULL, - ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask, - ZFCP_CLEAR); + zfcp_erp_clear_lun_status(sdev, + ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask); } static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id, @@ -483,11 +482,6 @@ static int status_change_set(unsigned long mask, atomic_t *status) return (atomic_read(status) ^ mask) & mask; } -static int status_change_clear(unsigned long mask, atomic_t *status) -{ - return atomic_read(status) & mask; -} - static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) { if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status)) @@ -779,8 +773,7 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act) adapter->fsf_req_seq_no = 0; zfcp_fc_wka_ports_force_offline(adapter->gs); /* all ports and LUNs are closed */ - zfcp_erp_modify_adapter_status(adapter, "erascl1", NULL, - ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); + zfcp_erp_clear_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN); atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); @@ -897,7 +890,7 @@ static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act) struct zfcp_port *port = act->port; if (port->wwpn != adapter->peer_wwpn) { - zfcp_erp_port_failed(port, "eroptp1", NULL); + zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED); return ZFCP_ERP_FAILED; } port->d_id = adapter->peer_d_id; @@ -1042,7 +1035,8 @@ static int zfcp_erp_strategy_check_lun(struct scsi_device *sdev, int result) "port 0x%016Lx\n", (unsigned long long)zfcp_scsi_dev_lun(sdev), (unsigned long long)zfcp_sdev->port->wwpn); - zfcp_erp_lun_failed(sdev, "ersckl1", NULL); + zfcp_erp_set_lun_status(sdev, + ZFCP_STATUS_COMMON_ERP_FAILED); } break; } @@ -1072,7 +1066,8 @@ static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result) dev_err(&port->adapter->ccw_device->dev, "ERP failed for remote port 0x%016Lx\n", (unsigned long long)port->wwpn); - zfcp_erp_port_failed(port, "erpsck1", NULL); + zfcp_erp_set_port_status(port, + ZFCP_STATUS_COMMON_ERP_FAILED); } break; } @@ -1099,7 +1094,8 @@ static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter, dev_err(&adapter->ccw_device->dev, "ERP cannot recover an error " "on the FCP device\n"); - zfcp_erp_adapter_failed(adapter, "erasck1", NULL); + zfcp_erp_set_adapter_status(adapter, + ZFCP_STATUS_COMMON_ERP_FAILED); } break; } @@ -1421,175 +1417,159 @@ void zfcp_erp_thread_kill(struct zfcp_adapter *adapter) } /** - * zfcp_erp_adapter_failed - Set adapter status to failed. - * @adapter: Failed adapter. - * @id: Event id for debug trace. - * @ref: Reference for debug trace. + * zfcp_erp_wait - wait for completion of error recovery on an adapter + * @adapter: adapter for which to wait for completion of its error recovery */ -void zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, char *id, void *ref) +void zfcp_erp_wait(struct zfcp_adapter *adapter) { - zfcp_erp_modify_adapter_status(adapter, id, ref, - ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); + wait_event(adapter->erp_done_wqh, + !(atomic_read(&adapter->status) & + ZFCP_STATUS_ADAPTER_ERP_PENDING)); } /** - * zfcp_erp_port_failed - Set port status to failed. - * @port: Failed port. - * @id: Event id for debug trace. - * @ref: Reference for debug trace. + * zfcp_erp_set_adapter_status - set adapter status bits + * @adapter: adapter to change the status + * @mask: status bits to change + * + * Changes in common status bits are propagated to attached ports and LUNs. */ -void zfcp_erp_port_failed(struct zfcp_port *port, char *id, void *ref) +void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask) { - zfcp_erp_modify_port_status(port, id, ref, - ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); -} + struct zfcp_port *port; + struct scsi_device *sdev; + unsigned long flags; + u32 common_mask = mask & ZFCP_COMMON_FLAGS; -/** - * zfcp_erp_lun_failed - Set LUN status to failed. - * @sdev: Failed SCSI device / LUN - * @id: Event id for debug trace. - * @ref: Reference for debug trace. - */ -void zfcp_erp_lun_failed(struct scsi_device *sdev, char *id, void *ref) -{ - zfcp_erp_modify_lun_status(sdev, id, ref, - ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET); -} + atomic_set_mask(mask, &adapter->status); -/** - * zfcp_erp_wait - wait for completion of error recovery on an adapter - * @adapter: adapter for which to wait for completion of its error recovery - */ -void zfcp_erp_wait(struct zfcp_adapter *adapter) -{ - wait_event(adapter->erp_done_wqh, - !(atomic_read(&adapter->status) & - ZFCP_STATUS_ADAPTER_ERP_PENDING)); + if (!common_mask) + return; + + read_lock_irqsave(&adapter->port_list_lock, flags); + list_for_each_entry(port, &adapter->port_list, list) + atomic_set_mask(common_mask, &port->status); + read_unlock_irqrestore(&adapter->port_list_lock, flags); + + shost_for_each_device(sdev, adapter->scsi_host) + atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status); } /** - * zfcp_erp_modify_adapter_status - change adapter status bits + * zfcp_erp_clear_adapter_status - clear adapter status bits * @adapter: adapter to change the status - * @id: id for the debug trace - * @ref: reference for the debug trace * @mask: status bits to change - * @set_or_clear: ZFCP_SET or ZFCP_CLEAR * * Changes in common status bits are propagated to attached ports and LUNs. */ -void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id, - void *ref, u32 mask, int set_or_clear) +void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask) { struct zfcp_port *port; + struct scsi_device *sdev; unsigned long flags; u32 common_mask = mask & ZFCP_COMMON_FLAGS; + u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; - if (set_or_clear == ZFCP_SET) { - if (status_change_set(mask, &adapter->status)) - zfcp_dbf_rec_adapter(id, ref, adapter->dbf); - atomic_set_mask(mask, &adapter->status); - } else { - if (status_change_clear(mask, &adapter->status)) - zfcp_dbf_rec_adapter(id, ref, adapter->dbf); - atomic_clear_mask(mask, &adapter->status); - if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) - atomic_set(&adapter->erp_counter, 0); + atomic_clear_mask(mask, &adapter->status); + + if (!common_mask) + return; + + if (clear_counter) + atomic_set(&adapter->erp_counter, 0); + + read_lock_irqsave(&adapter->port_list_lock, flags); + list_for_each_entry(port, &adapter->port_list, list) { + atomic_clear_mask(common_mask, &port->status); + if (clear_counter) + atomic_set(&port->erp_counter, 0); } + read_unlock_irqrestore(&adapter->port_list_lock, flags); - if (common_mask) { - read_lock_irqsave(&adapter->port_list_lock, flags); - list_for_each_entry(port, &adapter->port_list, list) - zfcp_erp_modify_port_status(port, id, ref, common_mask, - set_or_clear); - read_unlock_irqrestore(&adapter->port_list_lock, flags); + shost_for_each_device(sdev, adapter->scsi_host) { + atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status); + if (clear_counter) + atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); } } /** - * zfcp_erp_modify_port_status - change port status bits - * @port: port to change the status bits - * @id: id for the debug trace - * @ref: reference for the debug trace + * zfcp_erp_set_port_status - set port status bits + * @port: port to change the status * @mask: status bits to change - * @set_or_clear: ZFCP_SET or ZFCP_CLEAR * * Changes in common status bits are propagated to attached LUNs. */ -void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref, - u32 mask, int set_or_clear) +void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask) { struct scsi_device *sdev; u32 common_mask = mask & ZFCP_COMMON_FLAGS; - if (set_or_clear == ZFCP_SET) { - if (status_change_set(mask, &port->status)) - zfcp_dbf_rec_port(id, ref, port); - atomic_set_mask(mask, &port->status); - } else { - if (status_change_clear(mask, &port->status)) - zfcp_dbf_rec_port(id, ref, port); - atomic_clear_mask(mask, &port->status); - if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) - atomic_set(&port->erp_counter, 0); - } + atomic_set_mask(mask, &port->status); - if (common_mask) - shost_for_each_device(sdev, port->adapter->scsi_host) - if (sdev_to_zfcp(sdev)->port == port) - zfcp_erp_modify_lun_status(sdev, id, ref, - common_mask, - set_or_clear); + if (!common_mask) + return; + + shost_for_each_device(sdev, port->adapter->scsi_host) + if (sdev_to_zfcp(sdev)->port == port) + atomic_set_mask(common_mask, + &sdev_to_zfcp(sdev)->status); } /** - * zfcp_erp_modify_lun_status - change LUN status bits - * @sdev: SCSI device / LUN where to change the status bits - * @id: id for the debug trace - * @ref: reference for the debug trace + * zfcp_erp_clear_port_status - clear port status bits + * @port: adapter to change the status * @mask: status bits to change - * @set_or_clear: ZFCP_SET or ZFCP_CLEAR + * + * Changes in common status bits are propagated to attached LUNs. */ -void zfcp_erp_modify_lun_status(struct scsi_device *sdev, char *id, void *ref, - u32 mask, int set_or_clear) +void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask) { - struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + struct scsi_device *sdev; + u32 common_mask = mask & ZFCP_COMMON_FLAGS; + u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; + + atomic_clear_mask(mask, &port->status); + + if (!common_mask) + return; - if (set_or_clear == ZFCP_SET) { - if (status_change_set(mask, &zfcp_sdev->status)) - zfcp_dbf_rec_lun(id, ref, sdev); - atomic_set_mask(mask, &zfcp_sdev->status); - } else { - if (status_change_clear(mask, &zfcp_sdev->status)) - zfcp_dbf_rec_lun(id, ref, sdev); - atomic_clear_mask(mask, &zfcp_sdev->status); - if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) { - atomic_set(&zfcp_sdev->erp_counter, 0); + if (clear_counter) + atomic_set(&port->erp_counter, 0); + + shost_for_each_device(sdev, port->adapter->scsi_host) + if (sdev_to_zfcp(sdev)->port == port) { + atomic_clear_mask(common_mask, + &sdev_to_zfcp(sdev)->status); + if (clear_counter) + atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); } - } } /** - * zfcp_erp_port_boxed - Mark port as "boxed" and start ERP - * @port: The "boxed" port. - * @id: The debug trace id. - * @id: Reference for the debug trace. + * zfcp_erp_set_lun_status - set lun status bits + * @sdev: SCSI device / lun to set the status bits + * @mask: status bits to change */ -void zfcp_erp_port_boxed(struct zfcp_port *port, char *id, void *ref) +void zfcp_erp_set_lun_status(struct scsi_device *sdev, u32 mask) { - zfcp_erp_modify_port_status(port, id, ref, - ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET); - zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + + atomic_set_mask(mask, &zfcp_sdev->status); } /** - * zfcp_erp_lun_boxed - Mark LUN as "boxed" and start ERP - * @sdev: The "boxed" SCSI device / LUN. - * @id: The debug trace id. - * @ref: Reference for the debug trace. + * zfcp_erp_clear_lun_status - clear lun status bits + * @sdev: SCSi device / lun to clear the status bits + * @mask: status bits to change */ -void zfcp_erp_lun_boxed(struct scsi_device *sdev, char *id, void *ref) +void zfcp_erp_clear_lun_status(struct scsi_device *sdev, u32 mask) { - zfcp_erp_modify_lun_status(sdev, id, ref, - ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET); - zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + + atomic_clear_mask(mask, &zfcp_sdev->status); + + if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) + atomic_set(&zfcp_sdev->erp_counter, 0); } + diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 7320132a430c..bf8f3e514839 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -71,31 +71,26 @@ extern void _zfcp_dbf_scsi(const char *, const char *, int, struct zfcp_dbf *, unsigned long); /* zfcp_erp.c */ -extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, char *, - void *, u32, int); +extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32); +extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32); extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *, void *); extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *, void *); -extern void zfcp_erp_adapter_failed(struct zfcp_adapter *, char *, void *); -extern void zfcp_erp_modify_port_status(struct zfcp_port *, char *, void *, u32, - int); +extern void zfcp_erp_set_port_status(struct zfcp_port *, u32); +extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32); extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *, void *); extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *, void *); extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *, void *); -extern void zfcp_erp_port_failed(struct zfcp_port *, char *, void *); -extern void zfcp_erp_modify_lun_status(struct scsi_device *, char *, void *, - u32, int); +extern void zfcp_erp_set_lun_status(struct scsi_device *, u32); +extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32); extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *, void *); extern void zfcp_erp_lun_shutdown(struct scsi_device *, int, char *, void *); extern void zfcp_erp_lun_shutdown_wait(struct scsi_device *, char *); -extern void zfcp_erp_lun_failed(struct scsi_device *, char *, void *); extern int zfcp_erp_thread_setup(struct zfcp_adapter *); extern void zfcp_erp_thread_kill(struct zfcp_adapter *); extern void zfcp_erp_wait(struct zfcp_adapter *); extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long); -extern void zfcp_erp_port_boxed(struct zfcp_port *, char *, void *); -extern void zfcp_erp_lun_boxed(struct scsi_device *, char *, void *); extern void zfcp_erp_timeout_handler(unsigned long); /* zfcp_fc.c */ diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 6f3ed2b9a349..86fd905df48b 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -365,7 +365,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work) } if (!port->d_id) { - zfcp_erp_port_failed(port, "fcgpn_2", NULL); + zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED); goto out; } diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 813c5b22565b..beaf0916ceab 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -104,7 +104,7 @@ static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req) read_unlock_irqrestore(&adapter->port_list_lock, flags); } -static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id, +static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, struct fsf_link_down_info *link_down) { struct zfcp_adapter *adapter = req->adapter; @@ -184,7 +184,7 @@ static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id, "the FC fabric is down\n"); } out: - zfcp_erp_adapter_failed(adapter, id, req); + zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED); } static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req) @@ -195,13 +195,13 @@ static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req) switch (sr_buf->status_subtype) { case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: - zfcp_fsf_link_down_info_eval(req, "fssrld1", ldi); + zfcp_fsf_link_down_info_eval(req, ldi); break; case FSF_STATUS_READ_SUB_FDISC_FAILED: - zfcp_fsf_link_down_info_eval(req, "fssrld2", ldi); + zfcp_fsf_link_down_info_eval(req, ldi); break; case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE: - zfcp_fsf_link_down_info_eval(req, "fssrld3", NULL); + zfcp_fsf_link_down_info_eval(req, NULL); }; } @@ -242,9 +242,8 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) dev_info(&adapter->ccw_device->dev, "The local link has been restored\n"); /* All ports should be marked as ready to run again */ - zfcp_erp_modify_adapter_status(adapter, "fssrh_1", NULL, - ZFCP_STATUS_COMMON_RUNNING, - ZFCP_SET); + zfcp_erp_set_adapter_status(adapter, + ZFCP_STATUS_COMMON_RUNNING); zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | ZFCP_STATUS_COMMON_ERP_FAILED, @@ -359,16 +358,14 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4", req); break; case FSF_PROT_LINK_DOWN: - zfcp_fsf_link_down_info_eval(req, "fspse_5", - &psq->link_down_info); + zfcp_fsf_link_down_info_eval(req, &psq->link_down_info); /* go through reopen to flush pending requests */ zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req); break; case FSF_PROT_REEST_QUEUE: /* All ports should be marked as ready to run again */ - zfcp_erp_modify_adapter_status(adapter, "fspse_7", NULL, - ZFCP_STATUS_COMMON_RUNNING, - ZFCP_SET); + zfcp_erp_set_adapter_status(adapter, + ZFCP_STATUS_COMMON_RUNNING); zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | ZFCP_STATUS_COMMON_ERP_FAILED, @@ -538,7 +535,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status); - zfcp_fsf_link_down_info_eval(req, "fsecdh2", + zfcp_fsf_link_down_info_eval(req, &qtcb->header.fsf_status_qual.link_down_info); break; default: @@ -604,7 +601,7 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req) break; case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: zfcp_fsf_exchange_port_evaluate(req); - zfcp_fsf_link_down_info_eval(req, "fsepdh1", + zfcp_fsf_link_down_info_eval(req, &qtcb->header.fsf_status_qual.link_down_info); break; } @@ -797,11 +794,17 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED; break; case FSF_PORT_BOXED: - zfcp_erp_port_boxed(zfcp_sdev->port, "fsafch3", req); + zfcp_erp_set_port_status(zfcp_sdev->port, + ZFCP_STATUS_COMMON_ACCESS_BOXED); + zfcp_erp_port_reopen(zfcp_sdev->port, + ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3", + req); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_LUN_BOXED: - zfcp_erp_lun_boxed(sdev, "fsafch4", req); + zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED); + zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, + "fsafch4", req); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_ADAPTER_STATUS_AVAILABLE: @@ -1343,7 +1346,8 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) "Not enough FCP adapter resources to open " "remote port 0x%016Lx\n", (unsigned long long)port->wwpn); - zfcp_erp_port_failed(port, "fsoph_1", req); + zfcp_erp_set_port_status(port, + ZFCP_STATUS_COMMON_ERP_FAILED); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_ADAPTER_STATUS_AVAILABLE: @@ -1453,9 +1457,7 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req) case FSF_ADAPTER_STATUS_AVAILABLE: break; case FSF_GOOD: - zfcp_erp_modify_port_status(port, "fscph_2", req, - ZFCP_STATUS_COMMON_OPEN, - ZFCP_CLEAR); + zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN); break; } } @@ -1653,7 +1655,9 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) if (sdev_to_zfcp(sdev)->port == port) atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &sdev_to_zfcp(sdev)->status); - zfcp_erp_port_boxed(port, "fscpph2", req); + zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED); + zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, + "fscpph2", req); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_ADAPTER_STATUS_AVAILABLE: @@ -1751,7 +1755,11 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_PORT_BOXED: - zfcp_erp_port_boxed(zfcp_sdev->port, "fsouh_2", req); + zfcp_erp_set_port_status(zfcp_sdev->port, + ZFCP_STATUS_COMMON_ACCESS_BOXED); + zfcp_erp_port_reopen(zfcp_sdev->port, + ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2", + req); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_LUN_SHARING_VIOLATION: @@ -1764,7 +1772,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) "0x%016Lx on port 0x%016Lx\n", (unsigned long long)zfcp_scsi_dev_lun(sdev), (unsigned long long)zfcp_sdev->port->wwpn); - zfcp_erp_lun_failed(sdev, "fsolh_4", req); + zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED); /* fall through */ case FSF_INVALID_COMMAND_OPTION: req->status |= ZFCP_STATUS_FSFREQ_ERROR; @@ -1856,7 +1864,11 @@ static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req) req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_PORT_BOXED: - zfcp_erp_port_boxed(zfcp_sdev->port, "fscuh_3", req); + zfcp_erp_set_port_status(zfcp_sdev->port, + ZFCP_STATUS_COMMON_ACCESS_BOXED); + zfcp_erp_port_reopen(zfcp_sdev->port, + ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3", + req); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_ADAPTER_STATUS_AVAILABLE: @@ -2032,11 +2044,17 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req) req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_PORT_BOXED: - zfcp_erp_port_boxed(zfcp_sdev->port, "fssfch5", req); + zfcp_erp_set_port_status(zfcp_sdev->port, + ZFCP_STATUS_COMMON_ACCESS_BOXED); + zfcp_erp_port_reopen(zfcp_sdev->port, + ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5", + req); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_LUN_BOXED: - zfcp_erp_lun_boxed(sdev, "fssfch6", req); + zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED); + zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, + "fssfch6", req); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; case FSF_ADAPTER_STATUS_AVAILABLE: diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 1e8d0cc7e1df..ae10883a5b28 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -158,8 +158,7 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev) zfcp_sdev->latencies.cmd.fabric.min = 0xFFFFFFFF; spin_lock_init(&zfcp_sdev->latencies.lock); - zfcp_erp_modify_lun_status(sdev, "scsla_0", NULL, - ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); + zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING); zfcp_erp_lun_reopen(sdev, 0, "scsla_1", NULL); zfcp_erp_wait(port->adapter); diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 4f59356b07bb..2f2c54f4718f 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -104,8 +104,7 @@ static ssize_t zfcp_sysfs_port_failed_store(struct device *dev, if (strict_strtoul(buf, 0, &val) || val != 0) return -EINVAL; - zfcp_erp_modify_port_status(port, "sypfai1", NULL, - ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); + zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_RUNNING); zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2", NULL); zfcp_erp_wait(port->adapter); @@ -147,9 +146,7 @@ static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev, sdev = zfcp_unit_sdev(unit); if (sdev) { - zfcp_erp_modify_lun_status(sdev, "syufai1", NULL, - ZFCP_STATUS_COMMON_RUNNING, - ZFCP_SET); + zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING); zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, "syufai2", NULL); zfcp_erp_wait(unit->port->adapter); @@ -199,8 +196,7 @@ static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev, goto out; } - zfcp_erp_modify_adapter_status(adapter, "syafai1", NULL, - ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); + zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, "syafai2", NULL); zfcp_erp_wait(adapter); -- cgit v1.2.3 From a36c61f9025b8924f99f54d518763bee7aa84085 Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Wed, 15 Sep 2010 11:50:55 -0700 Subject: [SCSI] bfa: cleanup driver We have flattened the BFA hierarchy and also reduced the number of source and header files we used to have earlier. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/Makefile | 17 +- drivers/scsi/bfa/bfa.h | 438 ++ drivers/scsi/bfa/bfa_callback_priv.h | 57 - drivers/scsi/bfa/bfa_cb_ioim.h | 169 + drivers/scsi/bfa/bfa_cb_ioim_macros.h | 181 - drivers/scsi/bfa/bfa_cee.c | 492 -- drivers/scsi/bfa/bfa_core.c | 1131 +++- drivers/scsi/bfa/bfa_cs.h | 364 ++ drivers/scsi/bfa/bfa_csdebug.c | 58 - drivers/scsi/bfa/bfa_defs.h | 466 ++ drivers/scsi/bfa/bfa_defs_fcs.h | 457 ++ drivers/scsi/bfa/bfa_defs_svc.h | 1081 ++++ drivers/scsi/bfa/bfa_drv.c | 107 + drivers/scsi/bfa/bfa_fc.h | 1916 +++++++ drivers/scsi/bfa/bfa_fcbuild.c | 1410 +++++ drivers/scsi/bfa/bfa_fcbuild.h | 316 ++ drivers/scsi/bfa/bfa_fcpim.c | 3460 ++++++++++++- drivers/scsi/bfa/bfa_fcpim.h | 401 ++ drivers/scsi/bfa/bfa_fcpim_priv.h | 192 - drivers/scsi/bfa/bfa_fcport.c | 1962 ------- drivers/scsi/bfa/bfa_fcs.c | 1611 +++++- drivers/scsi/bfa/bfa_fcs.h | 779 +++ drivers/scsi/bfa/bfa_fcs_fcpim.c | 783 +++ drivers/scsi/bfa/bfa_fcs_lport.c | 5413 ++++++++++++++++++- drivers/scsi/bfa/bfa_fcs_port.c | 61 - drivers/scsi/bfa/bfa_fcs_rport.c | 3126 +++++++++++ drivers/scsi/bfa/bfa_fcs_uf.c | 99 - drivers/scsi/bfa/bfa_fcxp.c | 774 --- drivers/scsi/bfa/bfa_fcxp_priv.h | 138 - drivers/scsi/bfa/bfa_fwimg_priv.h | 44 - drivers/scsi/bfa/bfa_hw_cb.c | 8 +- drivers/scsi/bfa/bfa_hw_ct.c | 11 +- drivers/scsi/bfa/bfa_intr.c | 270 - drivers/scsi/bfa/bfa_intr_priv.h | 117 - drivers/scsi/bfa/bfa_ioc.c | 1984 ++++--- drivers/scsi/bfa/bfa_ioc.h | 248 +- drivers/scsi/bfa/bfa_ioc_cb.c | 124 +- drivers/scsi/bfa/bfa_ioc_ct.c | 137 +- drivers/scsi/bfa/bfa_iocfc.c | 927 ---- drivers/scsi/bfa/bfa_iocfc.h | 184 - drivers/scsi/bfa/bfa_iocfc_q.c | 44 - drivers/scsi/bfa/bfa_ioim.c | 1364 ----- drivers/scsi/bfa/bfa_itnim.c | 1088 ---- drivers/scsi/bfa/bfa_log.c | 346 -- drivers/scsi/bfa/bfa_log_module.c | 537 -- drivers/scsi/bfa/bfa_lps.c | 892 ---- drivers/scsi/bfa/bfa_lps_priv.h | 38 - drivers/scsi/bfa/bfa_module.c | 90 - drivers/scsi/bfa/bfa_modules.h | 130 + drivers/scsi/bfa/bfa_modules_priv.h | 43 - drivers/scsi/bfa/bfa_os_inc.h | 144 +- drivers/scsi/bfa/bfa_plog.h | 159 + drivers/scsi/bfa/bfa_port.c | 134 +- drivers/scsi/bfa/bfa_port.h | 66 + drivers/scsi/bfa/bfa_port_priv.h | 94 - drivers/scsi/bfa/bfa_priv.h | 110 - drivers/scsi/bfa/bfa_rport.c | 906 ---- drivers/scsi/bfa/bfa_rport_priv.h | 45 - drivers/scsi/bfa/bfa_sgpg.c | 226 - drivers/scsi/bfa/bfa_sgpg_priv.h | 79 - drivers/scsi/bfa/bfa_sm.c | 38 - drivers/scsi/bfa/bfa_svc.c | 5423 ++++++++++++++++++++ drivers/scsi/bfa/bfa_svc.h | 657 +++ drivers/scsi/bfa/bfa_timer.c | 90 - drivers/scsi/bfa/bfa_trcmod_priv.h | 64 - drivers/scsi/bfa/bfa_tskim.c | 690 --- drivers/scsi/bfa/bfa_uf.c | 343 -- drivers/scsi/bfa/bfa_uf_priv.h | 47 - drivers/scsi/bfa/bfad.c | 1355 +++-- drivers/scsi/bfa/bfad_attr.c | 241 +- drivers/scsi/bfa/bfad_attr.h | 56 - drivers/scsi/bfa/bfad_debugfs.c | 10 +- drivers/scsi/bfa/bfad_drv.h | 254 +- drivers/scsi/bfa/bfad_fwimg.c | 131 - drivers/scsi/bfa/bfad_im.c | 257 +- drivers/scsi/bfa/bfad_im.h | 56 +- drivers/scsi/bfa/bfad_im_compat.h | 45 - drivers/scsi/bfa/bfad_intr.c | 222 - drivers/scsi/bfa/bfad_ipfc.h | 42 - drivers/scsi/bfa/bfad_os.c | 50 - drivers/scsi/bfa/bfad_tm.h | 59 - drivers/scsi/bfa/bfad_trcmod.h | 52 - drivers/scsi/bfa/bfi.h | 579 +++ drivers/scsi/bfa/bfi_cbreg.h | 304 ++ drivers/scsi/bfa/bfi_ctreg.h | 627 +++ drivers/scsi/bfa/bfi_ms.h | 765 +++ drivers/scsi/bfa/fab.c | 62 - drivers/scsi/bfa/fabric.c | 1323 ----- drivers/scsi/bfa/fcbuild.c | 1449 ------ drivers/scsi/bfa/fcbuild.h | 279 - drivers/scsi/bfa/fcpim.c | 824 --- drivers/scsi/bfa/fcptm.c | 68 - drivers/scsi/bfa/fcs.h | 30 - drivers/scsi/bfa/fcs_auth.h | 37 - drivers/scsi/bfa/fcs_fabric.h | 68 - drivers/scsi/bfa/fcs_fcpim.h | 39 - drivers/scsi/bfa/fcs_fcptm.h | 45 - drivers/scsi/bfa/fcs_fcxp.h | 29 - drivers/scsi/bfa/fcs_lport.h | 118 - drivers/scsi/bfa/fcs_ms.h | 35 - drivers/scsi/bfa/fcs_port.h | 31 - drivers/scsi/bfa/fcs_rport.h | 61 - drivers/scsi/bfa/fcs_trcmod.h | 56 - drivers/scsi/bfa/fcs_uf.h | 31 - drivers/scsi/bfa/fcs_vport.h | 32 - drivers/scsi/bfa/fdmi.c | 1230 ----- drivers/scsi/bfa/include/aen/bfa_aen.h | 96 - drivers/scsi/bfa/include/aen/bfa_aen_adapter.h | 31 - drivers/scsi/bfa/include/aen/bfa_aen_audit.h | 31 - drivers/scsi/bfa/include/aen/bfa_aen_ethport.h | 35 - drivers/scsi/bfa/include/aen/bfa_aen_ioc.h | 45 - drivers/scsi/bfa/include/aen/bfa_aen_itnim.h | 33 - drivers/scsi/bfa/include/aen/bfa_aen_lport.h | 51 - drivers/scsi/bfa/include/aen/bfa_aen_port.h | 57 - drivers/scsi/bfa/include/aen/bfa_aen_rport.h | 37 - drivers/scsi/bfa/include/bfa.h | 203 - drivers/scsi/bfa/include/bfa_fcpim.h | 177 - drivers/scsi/bfa/include/bfa_fcptm.h | 47 - drivers/scsi/bfa/include/bfa_svc.h | 338 -- drivers/scsi/bfa/include/bfa_timer.h | 53 - drivers/scsi/bfa/include/bfi/bfi.h | 174 - drivers/scsi/bfa/include/bfi/bfi_boot.h | 34 - drivers/scsi/bfa/include/bfi/bfi_cbreg.h | 319 -- drivers/scsi/bfa/include/bfi/bfi_cee.h | 119 - drivers/scsi/bfa/include/bfi/bfi_ctreg.h | 640 --- drivers/scsi/bfa/include/bfi/bfi_fabric.h | 92 - drivers/scsi/bfa/include/bfi/bfi_fcpim.h | 301 -- drivers/scsi/bfa/include/bfi/bfi_fcxp.h | 71 - drivers/scsi/bfa/include/bfi/bfi_ioc.h | 208 - drivers/scsi/bfa/include/bfi/bfi_iocfc.h | 179 - drivers/scsi/bfa/include/bfi/bfi_lport.h | 89 - drivers/scsi/bfa/include/bfi/bfi_lps.h | 104 - drivers/scsi/bfa/include/bfi/bfi_pbc.h | 62 - drivers/scsi/bfa/include/bfi/bfi_port.h | 115 - drivers/scsi/bfa/include/bfi/bfi_pport.h | 118 - drivers/scsi/bfa/include/bfi/bfi_rport.h | 104 - drivers/scsi/bfa/include/bfi/bfi_uf.h | 52 - drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h | 40 - drivers/scsi/bfa/include/cna/cee/bfa_cee.h | 77 - drivers/scsi/bfa/include/cna/port/bfa_port.h | 70 - drivers/scsi/bfa/include/cna/pstats/ethport_defs.h | 36 - drivers/scsi/bfa/include/cna/pstats/phyport_defs.h | 218 - drivers/scsi/bfa/include/cs/bfa_checksum.h | 60 - drivers/scsi/bfa/include/cs/bfa_debug.h | 45 - drivers/scsi/bfa/include/cs/bfa_log.h | 184 - drivers/scsi/bfa/include/cs/bfa_perf.h | 34 - drivers/scsi/bfa/include/cs/bfa_plog.h | 167 - drivers/scsi/bfa/include/cs/bfa_q.h | 81 - drivers/scsi/bfa/include/cs/bfa_sm.h | 77 - drivers/scsi/bfa/include/cs/bfa_trc.h | 176 - drivers/scsi/bfa/include/cs/bfa_wc.h | 68 - drivers/scsi/bfa/include/defs/bfa_defs_adapter.h | 83 - drivers/scsi/bfa/include/defs/bfa_defs_aen.h | 83 - drivers/scsi/bfa/include/defs/bfa_defs_audit.h | 38 - drivers/scsi/bfa/include/defs/bfa_defs_auth.h | 134 - drivers/scsi/bfa/include/defs/bfa_defs_boot.h | 81 - drivers/scsi/bfa/include/defs/bfa_defs_cee.h | 157 - drivers/scsi/bfa/include/defs/bfa_defs_driver.h | 41 - drivers/scsi/bfa/include/defs/bfa_defs_ethport.h | 99 - drivers/scsi/bfa/include/defs/bfa_defs_fcpim.h | 45 - drivers/scsi/bfa/include/defs/bfa_defs_fcport.h | 88 - drivers/scsi/bfa/include/defs/bfa_defs_ioc.h | 158 - drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h | 322 -- drivers/scsi/bfa/include/defs/bfa_defs_ipfc.h | 70 - drivers/scsi/bfa/include/defs/bfa_defs_itnim.h | 136 - drivers/scsi/bfa/include/defs/bfa_defs_led.h | 35 - drivers/scsi/bfa/include/defs/bfa_defs_lport.h | 68 - drivers/scsi/bfa/include/defs/bfa_defs_mfg.h | 144 - drivers/scsi/bfa/include/defs/bfa_defs_pci.h | 48 - drivers/scsi/bfa/include/defs/bfa_defs_pm.h | 33 - drivers/scsi/bfa/include/defs/bfa_defs_pom.h | 56 - drivers/scsi/bfa/include/defs/bfa_defs_port.h | 248 - drivers/scsi/bfa/include/defs/bfa_defs_pport.h | 393 -- drivers/scsi/bfa/include/defs/bfa_defs_qos.h | 99 - drivers/scsi/bfa/include/defs/bfa_defs_rport.h | 199 - drivers/scsi/bfa/include/defs/bfa_defs_status.h | 282 - drivers/scsi/bfa/include/defs/bfa_defs_tin.h | 118 - drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h | 43 - drivers/scsi/bfa/include/defs/bfa_defs_types.h | 30 - drivers/scsi/bfa/include/defs/bfa_defs_version.h | 22 - drivers/scsi/bfa/include/defs/bfa_defs_vf.h | 74 - drivers/scsi/bfa/include/defs/bfa_defs_vport.h | 91 - drivers/scsi/bfa/include/fcb/bfa_fcb.h | 33 - drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h | 75 - drivers/scsi/bfa/include/fcb/bfa_fcb_port.h | 113 - drivers/scsi/bfa/include/fcb/bfa_fcb_rport.h | 80 - drivers/scsi/bfa/include/fcb/bfa_fcb_vf.h | 47 - drivers/scsi/bfa/include/fcb/bfa_fcb_vport.h | 48 - drivers/scsi/bfa/include/fcs/bfa_fcs.h | 76 - drivers/scsi/bfa/include/fcs/bfa_fcs_auth.h | 82 - drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h | 112 - drivers/scsi/bfa/include/fcs/bfa_fcs_fcpim.h | 132 - drivers/scsi/bfa/include/fcs/bfa_fcs_fdmi.h | 63 - drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h | 219 - drivers/scsi/bfa/include/fcs/bfa_fcs_rport.h | 105 - drivers/scsi/bfa/include/fcs/bfa_fcs_vport.h | 67 - drivers/scsi/bfa/include/log/bfa_log_fcs.h | 28 - drivers/scsi/bfa/include/log/bfa_log_hal.h | 36 - drivers/scsi/bfa/include/log/bfa_log_linux.h | 62 - drivers/scsi/bfa/include/log/bfa_log_wdrv.h | 36 - drivers/scsi/bfa/include/protocol/ct.h | 492 -- drivers/scsi/bfa/include/protocol/fc.h | 1111 ---- drivers/scsi/bfa/include/protocol/fc_sp.h | 224 - drivers/scsi/bfa/include/protocol/fcp.h | 184 - drivers/scsi/bfa/include/protocol/fdmi.h | 163 - drivers/scsi/bfa/include/protocol/scsi.h | 1648 ------ drivers/scsi/bfa/include/protocol/types.h | 42 - drivers/scsi/bfa/loop.c | 213 - drivers/scsi/bfa/lport_api.c | 303 -- drivers/scsi/bfa/lport_priv.h | 82 - drivers/scsi/bfa/ms.c | 759 --- drivers/scsi/bfa/n2n.c | 105 - drivers/scsi/bfa/ns.c | 1242 ----- drivers/scsi/bfa/plog.c | 184 - drivers/scsi/bfa/rport.c | 2676 ---------- drivers/scsi/bfa/rport_api.c | 185 - drivers/scsi/bfa/rport_ftrs.c | 379 -- drivers/scsi/bfa/scn.c | 482 -- drivers/scsi/bfa/vfapi.c | 292 -- drivers/scsi/bfa/vport.c | 903 ---- 220 files changed, 34823 insertions(+), 43478 deletions(-) create mode 100644 drivers/scsi/bfa/bfa.h delete mode 100644 drivers/scsi/bfa/bfa_callback_priv.h create mode 100644 drivers/scsi/bfa/bfa_cb_ioim.h delete mode 100644 drivers/scsi/bfa/bfa_cb_ioim_macros.h delete mode 100644 drivers/scsi/bfa/bfa_cee.c create mode 100644 drivers/scsi/bfa/bfa_cs.h delete mode 100644 drivers/scsi/bfa/bfa_csdebug.c create mode 100644 drivers/scsi/bfa/bfa_defs.h create mode 100644 drivers/scsi/bfa/bfa_defs_fcs.h create mode 100644 drivers/scsi/bfa/bfa_defs_svc.h create mode 100644 drivers/scsi/bfa/bfa_drv.c create mode 100644 drivers/scsi/bfa/bfa_fc.h create mode 100644 drivers/scsi/bfa/bfa_fcbuild.c create mode 100644 drivers/scsi/bfa/bfa_fcbuild.h create mode 100644 drivers/scsi/bfa/bfa_fcpim.h delete mode 100644 drivers/scsi/bfa/bfa_fcpim_priv.h delete mode 100644 drivers/scsi/bfa/bfa_fcport.c create mode 100644 drivers/scsi/bfa/bfa_fcs.h create mode 100644 drivers/scsi/bfa/bfa_fcs_fcpim.c delete mode 100644 drivers/scsi/bfa/bfa_fcs_port.c create mode 100644 drivers/scsi/bfa/bfa_fcs_rport.c delete mode 100644 drivers/scsi/bfa/bfa_fcs_uf.c delete mode 100644 drivers/scsi/bfa/bfa_fcxp.c delete mode 100644 drivers/scsi/bfa/bfa_fcxp_priv.h delete mode 100644 drivers/scsi/bfa/bfa_fwimg_priv.h delete mode 100644 drivers/scsi/bfa/bfa_intr.c delete mode 100644 drivers/scsi/bfa/bfa_intr_priv.h delete mode 100644 drivers/scsi/bfa/bfa_iocfc.c delete mode 100644 drivers/scsi/bfa/bfa_iocfc.h delete mode 100644 drivers/scsi/bfa/bfa_iocfc_q.c delete mode 100644 drivers/scsi/bfa/bfa_ioim.c delete mode 100644 drivers/scsi/bfa/bfa_itnim.c delete mode 100644 drivers/scsi/bfa/bfa_log.c delete mode 100644 drivers/scsi/bfa/bfa_log_module.c delete mode 100644 drivers/scsi/bfa/bfa_lps.c delete mode 100644 drivers/scsi/bfa/bfa_lps_priv.h delete mode 100644 drivers/scsi/bfa/bfa_module.c create mode 100644 drivers/scsi/bfa/bfa_modules.h delete mode 100644 drivers/scsi/bfa/bfa_modules_priv.h create mode 100644 drivers/scsi/bfa/bfa_plog.h create mode 100644 drivers/scsi/bfa/bfa_port.h delete mode 100644 drivers/scsi/bfa/bfa_port_priv.h delete mode 100644 drivers/scsi/bfa/bfa_priv.h delete mode 100644 drivers/scsi/bfa/bfa_rport.c delete mode 100644 drivers/scsi/bfa/bfa_rport_priv.h delete mode 100644 drivers/scsi/bfa/bfa_sgpg.c delete mode 100644 drivers/scsi/bfa/bfa_sgpg_priv.h delete mode 100644 drivers/scsi/bfa/bfa_sm.c create mode 100644 drivers/scsi/bfa/bfa_svc.c create mode 100644 drivers/scsi/bfa/bfa_svc.h delete mode 100644 drivers/scsi/bfa/bfa_timer.c delete mode 100644 drivers/scsi/bfa/bfa_trcmod_priv.h delete mode 100644 drivers/scsi/bfa/bfa_tskim.c delete mode 100644 drivers/scsi/bfa/bfa_uf.c delete mode 100644 drivers/scsi/bfa/bfa_uf_priv.h delete mode 100644 drivers/scsi/bfa/bfad_attr.h delete mode 100644 drivers/scsi/bfa/bfad_fwimg.c delete mode 100644 drivers/scsi/bfa/bfad_im_compat.h delete mode 100644 drivers/scsi/bfa/bfad_intr.c delete mode 100644 drivers/scsi/bfa/bfad_ipfc.h delete mode 100644 drivers/scsi/bfa/bfad_os.c delete mode 100644 drivers/scsi/bfa/bfad_tm.h delete mode 100644 drivers/scsi/bfa/bfad_trcmod.h create mode 100644 drivers/scsi/bfa/bfi.h create mode 100644 drivers/scsi/bfa/bfi_cbreg.h create mode 100644 drivers/scsi/bfa/bfi_ctreg.h create mode 100644 drivers/scsi/bfa/bfi_ms.h delete mode 100644 drivers/scsi/bfa/fab.c delete mode 100644 drivers/scsi/bfa/fabric.c delete mode 100644 drivers/scsi/bfa/fcbuild.c delete mode 100644 drivers/scsi/bfa/fcbuild.h delete mode 100644 drivers/scsi/bfa/fcpim.c delete mode 100644 drivers/scsi/bfa/fcptm.c delete mode 100644 drivers/scsi/bfa/fcs.h delete mode 100644 drivers/scsi/bfa/fcs_auth.h delete mode 100644 drivers/scsi/bfa/fcs_fabric.h delete mode 100644 drivers/scsi/bfa/fcs_fcpim.h delete mode 100644 drivers/scsi/bfa/fcs_fcptm.h delete mode 100644 drivers/scsi/bfa/fcs_fcxp.h delete mode 100644 drivers/scsi/bfa/fcs_lport.h delete mode 100644 drivers/scsi/bfa/fcs_ms.h delete mode 100644 drivers/scsi/bfa/fcs_port.h delete mode 100644 drivers/scsi/bfa/fcs_rport.h delete mode 100644 drivers/scsi/bfa/fcs_trcmod.h delete mode 100644 drivers/scsi/bfa/fcs_uf.h delete mode 100644 drivers/scsi/bfa/fcs_vport.h delete mode 100644 drivers/scsi/bfa/fdmi.c delete mode 100644 drivers/scsi/bfa/include/aen/bfa_aen.h delete mode 100644 drivers/scsi/bfa/include/aen/bfa_aen_adapter.h delete mode 100644 drivers/scsi/bfa/include/aen/bfa_aen_audit.h delete mode 100644 drivers/scsi/bfa/include/aen/bfa_aen_ethport.h delete mode 100644 drivers/scsi/bfa/include/aen/bfa_aen_ioc.h delete mode 100644 drivers/scsi/bfa/include/aen/bfa_aen_itnim.h delete mode 100644 drivers/scsi/bfa/include/aen/bfa_aen_lport.h delete mode 100644 drivers/scsi/bfa/include/aen/bfa_aen_port.h delete mode 100644 drivers/scsi/bfa/include/aen/bfa_aen_rport.h delete mode 100644 drivers/scsi/bfa/include/bfa.h delete mode 100644 drivers/scsi/bfa/include/bfa_fcpim.h delete mode 100644 drivers/scsi/bfa/include/bfa_fcptm.h delete mode 100644 drivers/scsi/bfa/include/bfa_svc.h delete mode 100644 drivers/scsi/bfa/include/bfa_timer.h delete mode 100644 drivers/scsi/bfa/include/bfi/bfi.h delete mode 100644 drivers/scsi/bfa/include/bfi/bfi_boot.h delete mode 100644 drivers/scsi/bfa/include/bfi/bfi_cbreg.h delete mode 100644 drivers/scsi/bfa/include/bfi/bfi_cee.h delete mode 100644 drivers/scsi/bfa/include/bfi/bfi_ctreg.h delete mode 100644 drivers/scsi/bfa/include/bfi/bfi_fabric.h delete mode 100644 drivers/scsi/bfa/include/bfi/bfi_fcpim.h delete mode 100644 drivers/scsi/bfa/include/bfi/bfi_fcxp.h delete mode 100644 drivers/scsi/bfa/include/bfi/bfi_ioc.h delete mode 100644 drivers/scsi/bfa/include/bfi/bfi_iocfc.h delete mode 100644 drivers/scsi/bfa/include/bfi/bfi_lport.h delete mode 100644 drivers/scsi/bfa/include/bfi/bfi_lps.h delete mode 100644 drivers/scsi/bfa/include/bfi/bfi_pbc.h delete mode 100644 drivers/scsi/bfa/include/bfi/bfi_port.h delete mode 100644 drivers/scsi/bfa/include/bfi/bfi_pport.h delete mode 100644 drivers/scsi/bfa/include/bfi/bfi_rport.h delete mode 100644 drivers/scsi/bfa/include/bfi/bfi_uf.h delete mode 100644 drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h delete mode 100644 drivers/scsi/bfa/include/cna/cee/bfa_cee.h delete mode 100644 drivers/scsi/bfa/include/cna/port/bfa_port.h delete mode 100644 drivers/scsi/bfa/include/cna/pstats/ethport_defs.h delete mode 100644 drivers/scsi/bfa/include/cna/pstats/phyport_defs.h delete mode 100644 drivers/scsi/bfa/include/cs/bfa_checksum.h delete mode 100644 drivers/scsi/bfa/include/cs/bfa_debug.h delete mode 100644 drivers/scsi/bfa/include/cs/bfa_log.h delete mode 100644 drivers/scsi/bfa/include/cs/bfa_perf.h delete mode 100644 drivers/scsi/bfa/include/cs/bfa_plog.h delete mode 100644 drivers/scsi/bfa/include/cs/bfa_q.h delete mode 100644 drivers/scsi/bfa/include/cs/bfa_sm.h delete mode 100644 drivers/scsi/bfa/include/cs/bfa_trc.h delete mode 100644 drivers/scsi/bfa/include/cs/bfa_wc.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_adapter.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_aen.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_audit.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_auth.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_boot.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_cee.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_driver.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_ethport.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_fcpim.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_fcport.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_ioc.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_ipfc.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_itnim.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_led.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_lport.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_mfg.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_pci.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_pm.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_pom.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_port.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_pport.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_qos.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_rport.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_status.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_tin.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_types.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_version.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_vf.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_vport.h delete mode 100644 drivers/scsi/bfa/include/fcb/bfa_fcb.h delete mode 100644 drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h delete mode 100644 drivers/scsi/bfa/include/fcb/bfa_fcb_port.h delete mode 100644 drivers/scsi/bfa/include/fcb/bfa_fcb_rport.h delete mode 100644 drivers/scsi/bfa/include/fcb/bfa_fcb_vf.h delete mode 100644 drivers/scsi/bfa/include/fcb/bfa_fcb_vport.h delete mode 100644 drivers/scsi/bfa/include/fcs/bfa_fcs.h delete mode 100644 drivers/scsi/bfa/include/fcs/bfa_fcs_auth.h delete mode 100644 drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h delete mode 100644 drivers/scsi/bfa/include/fcs/bfa_fcs_fcpim.h delete mode 100644 drivers/scsi/bfa/include/fcs/bfa_fcs_fdmi.h delete mode 100644 drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h delete mode 100644 drivers/scsi/bfa/include/fcs/bfa_fcs_rport.h delete mode 100644 drivers/scsi/bfa/include/fcs/bfa_fcs_vport.h delete mode 100644 drivers/scsi/bfa/include/log/bfa_log_fcs.h delete mode 100644 drivers/scsi/bfa/include/log/bfa_log_hal.h delete mode 100644 drivers/scsi/bfa/include/log/bfa_log_linux.h delete mode 100644 drivers/scsi/bfa/include/log/bfa_log_wdrv.h delete mode 100644 drivers/scsi/bfa/include/protocol/ct.h delete mode 100644 drivers/scsi/bfa/include/protocol/fc.h delete mode 100644 drivers/scsi/bfa/include/protocol/fc_sp.h delete mode 100644 drivers/scsi/bfa/include/protocol/fcp.h delete mode 100644 drivers/scsi/bfa/include/protocol/fdmi.h delete mode 100644 drivers/scsi/bfa/include/protocol/scsi.h delete mode 100644 drivers/scsi/bfa/include/protocol/types.h delete mode 100644 drivers/scsi/bfa/loop.c delete mode 100644 drivers/scsi/bfa/lport_api.c delete mode 100644 drivers/scsi/bfa/lport_priv.h delete mode 100644 drivers/scsi/bfa/ms.c delete mode 100644 drivers/scsi/bfa/n2n.c delete mode 100644 drivers/scsi/bfa/ns.c delete mode 100644 drivers/scsi/bfa/plog.c delete mode 100644 drivers/scsi/bfa/rport.c delete mode 100644 drivers/scsi/bfa/rport_api.c delete mode 100644 drivers/scsi/bfa/rport_ftrs.c delete mode 100644 drivers/scsi/bfa/scn.c delete mode 100644 drivers/scsi/bfa/vfapi.c delete mode 100644 drivers/scsi/bfa/vport.c (limited to 'drivers') diff --git a/drivers/scsi/bfa/Makefile b/drivers/scsi/bfa/Makefile index ac3fdf02d5f6..d2eefd3e3bd5 100644 --- a/drivers/scsi/bfa/Makefile +++ b/drivers/scsi/bfa/Makefile @@ -1,15 +1,8 @@ obj-$(CONFIG_SCSI_BFA_FC) := bfa.o -bfa-y := bfad.o bfad_intr.o bfad_os.o bfad_im.o bfad_attr.o bfad_fwimg.o -bfa-y += bfad_debugfs.o -bfa-y += bfa_core.o bfa_ioc.o bfa_ioc_ct.o bfa_ioc_cb.o bfa_iocfc.o bfa_fcxp.o -bfa-y += bfa_lps.o bfa_hw_cb.o bfa_hw_ct.o bfa_intr.o bfa_timer.o bfa_rport.o -bfa-y += bfa_fcport.o bfa_port.o bfa_uf.o bfa_sgpg.o bfa_module.o bfa_ioim.o -bfa-y += bfa_itnim.o bfa_fcpim.o bfa_tskim.o bfa_log.o bfa_log_module.o -bfa-y += bfa_csdebug.o bfa_sm.o plog.o +bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o +bfa-y += bfa_ioc.o bfa_ioc_cb.o bfa_ioc_ct.o bfa_hw_cb.o bfa_hw_ct.o +bfa-y += bfa_fcs.o bfa_fcs_lport.o bfa_fcs_rport.o bfa_fcs_fcpim.o bfa_fcbuild.o +bfa-y += bfa_port.o bfa_fcpim.o bfa_core.o bfa_drv.o bfa_svc.o -bfa-y += fcbuild.o fabric.o fcpim.o vfapi.o fcptm.o bfa_fcs.o bfa_fcs_port.o -bfa-y += bfa_fcs_uf.o bfa_fcs_lport.o fab.o fdmi.o ms.o ns.o scn.o loop.o -bfa-y += lport_api.o n2n.o rport.o rport_api.o rport_ftrs.o vport.o - -ccflags-y := -I$(obj) -I$(obj)/include -I$(obj)/include/cna -DBFA_PERF_BUILD +ccflags-y := -DBFA_PERF_BUILD diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h new file mode 100644 index 000000000000..ceaac65a91ff --- /dev/null +++ b/drivers/scsi/bfa/bfa.h @@ -0,0 +1,438 @@ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + * + * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#ifndef __BFA_H__ +#define __BFA_H__ + +#include "bfa_os_inc.h" +#include "bfa_cs.h" +#include "bfa_plog.h" +#include "bfa_defs_svc.h" +#include "bfi.h" +#include "bfa_ioc.h" + +struct bfa_s; + +typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m); +typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete); + +/** + * Interrupt message handlers + */ +void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m); +void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func); + +/** + * Request and response queue related defines + */ +#define BFA_REQQ_NELEMS_MIN (4) +#define BFA_RSPQ_NELEMS_MIN (4) + +#define bfa_reqq_pi(__bfa, __reqq) ((__bfa)->iocfc.req_cq_pi[__reqq]) +#define bfa_reqq_ci(__bfa, __reqq) \ + (*(u32 *)((__bfa)->iocfc.req_cq_shadow_ci[__reqq].kva)) + +#define bfa_reqq_full(__bfa, __reqq) \ + (((bfa_reqq_pi(__bfa, __reqq) + 1) & \ + ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1)) == \ + bfa_reqq_ci(__bfa, __reqq)) + +#define bfa_reqq_next(__bfa, __reqq) \ + (bfa_reqq_full(__bfa, __reqq) ? NULL : \ + ((void *)((struct bfi_msg_s *)((__bfa)->iocfc.req_cq_ba[__reqq].kva) \ + + bfa_reqq_pi((__bfa), (__reqq))))) + +#define bfa_reqq_produce(__bfa, __reqq) do { \ + (__bfa)->iocfc.req_cq_pi[__reqq]++; \ + (__bfa)->iocfc.req_cq_pi[__reqq] &= \ + ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \ + bfa_reg_write((__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq], \ + (__bfa)->iocfc.req_cq_pi[__reqq]); \ + mmiowb(); \ + } while (0) + +#define bfa_rspq_pi(__bfa, __rspq) \ + (*(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva)) + +#define bfa_rspq_ci(__bfa, __rspq) ((__bfa)->iocfc.rsp_cq_ci[__rspq]) +#define bfa_rspq_elem(__bfa, __rspq, __ci) \ + (&((struct bfi_msg_s *)((__bfa)->iocfc.rsp_cq_ba[__rspq].kva))[__ci]) + +#define CQ_INCR(__index, __size) do { \ + (__index)++; \ + (__index) &= ((__size) - 1); \ +} while (0) + +/** + * Queue element to wait for room in request queue. FIFO order is + * maintained when fullfilling requests. + */ +struct bfa_reqq_wait_s { + struct list_head qe; + void (*qresume) (void *cbarg); + void *cbarg; +}; + +/** + * Circular queue usage assignments + */ +enum { + BFA_REQQ_IOC = 0, /* all low-priority IOC msgs */ + BFA_REQQ_FCXP = 0, /* all FCXP messages */ + BFA_REQQ_LPS = 0, /* all lport service msgs */ + BFA_REQQ_PORT = 0, /* all port messages */ + BFA_REQQ_FLASH = 0, /* for flash module */ + BFA_REQQ_DIAG = 0, /* for diag module */ + BFA_REQQ_RPORT = 0, /* all port messages */ + BFA_REQQ_SBOOT = 0, /* all san boot messages */ + BFA_REQQ_QOS_LO = 1, /* all low priority IO */ + BFA_REQQ_QOS_MD = 2, /* all medium priority IO */ + BFA_REQQ_QOS_HI = 3, /* all high priority IO */ +}; + +static inline void +bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg), + void *cbarg) +{ + wqe->qresume = qresume; + wqe->cbarg = cbarg; +} + +#define bfa_reqq(__bfa, __reqq) (&(__bfa)->reqq_waitq[__reqq]) + +/** + * static inline void + * bfa_reqq_wait(struct bfa_s *bfa, int reqq, struct bfa_reqq_wait_s *wqe) + */ +#define bfa_reqq_wait(__bfa, __reqq, __wqe) do { \ + \ + struct list_head *waitq = bfa_reqq(__bfa, __reqq); \ + \ + bfa_assert(((__reqq) < BFI_IOC_MAX_CQS)); \ + bfa_assert((__wqe)->qresume && (__wqe)->cbarg); \ + \ + list_add_tail(&(__wqe)->qe, waitq); \ + } while (0) + +#define bfa_reqq_wcancel(__wqe) list_del(&(__wqe)->qe) + + +/** + * Generic BFA callback element. + */ +struct bfa_cb_qe_s { + struct list_head qe; + bfa_cb_cbfn_t cbfn; + bfa_boolean_t once; + u32 rsvd; + void *cbarg; +}; + +#define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do { \ + (__hcb_qe)->cbfn = (__cbfn); \ + (__hcb_qe)->cbarg = (__cbarg); \ + list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \ + } while (0) + +#define bfa_cb_dequeue(__hcb_qe) list_del(&(__hcb_qe)->qe) + +#define bfa_cb_queue_once(__bfa, __hcb_qe, __cbfn, __cbarg) do { \ + (__hcb_qe)->cbfn = (__cbfn); \ + (__hcb_qe)->cbarg = (__cbarg); \ + if (!(__hcb_qe)->once) { \ + list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \ + (__hcb_qe)->once = BFA_TRUE; \ + } \ + } while (0) + +#define bfa_cb_queue_done(__hcb_qe) do { \ + (__hcb_qe)->once = BFA_FALSE; \ + } while (0) + + +/** + * PCI devices supported by the current BFA + */ +struct bfa_pciid_s { + u16 device_id; + u16 vendor_id; +}; + +extern char bfa_version[]; + +/** + * BFA memory resources + */ +enum bfa_mem_type { + BFA_MEM_TYPE_KVA = 1, /* Kernel Virtual Memory *(non-dma-able) */ + BFA_MEM_TYPE_DMA = 2, /* DMA-able memory */ + BFA_MEM_TYPE_MAX = BFA_MEM_TYPE_DMA, +}; + +struct bfa_mem_elem_s { + enum bfa_mem_type mem_type; /* see enum bfa_mem_type */ + u32 mem_len; /* Total Length in Bytes */ + u8 *kva; /* kernel virtual address */ + u64 dma; /* dma address if DMA memory */ + u8 *kva_curp; /* kva allocation cursor */ + u64 dma_curp; /* dma allocation cursor */ +}; + +struct bfa_meminfo_s { + struct bfa_mem_elem_s meminfo[BFA_MEM_TYPE_MAX]; +}; +#define bfa_meminfo_kva(_m) \ + ((_m)->meminfo[BFA_MEM_TYPE_KVA - 1].kva_curp) +#define bfa_meminfo_dma_virt(_m) \ + ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].kva_curp) +#define bfa_meminfo_dma_phys(_m) \ + ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp) + +struct bfa_iocfc_regs_s { + bfa_os_addr_t intr_status; + bfa_os_addr_t intr_mask; + bfa_os_addr_t cpe_q_pi[BFI_IOC_MAX_CQS]; + bfa_os_addr_t cpe_q_ci[BFI_IOC_MAX_CQS]; + bfa_os_addr_t cpe_q_depth[BFI_IOC_MAX_CQS]; + bfa_os_addr_t cpe_q_ctrl[BFI_IOC_MAX_CQS]; + bfa_os_addr_t rme_q_ci[BFI_IOC_MAX_CQS]; + bfa_os_addr_t rme_q_pi[BFI_IOC_MAX_CQS]; + bfa_os_addr_t rme_q_depth[BFI_IOC_MAX_CQS]; + bfa_os_addr_t rme_q_ctrl[BFI_IOC_MAX_CQS]; +}; + +/** + * MSIX vector handlers + */ +#define BFA_MSIX_MAX_VECTORS 22 +typedef void (*bfa_msix_handler_t)(struct bfa_s *bfa, int vec); +struct bfa_msix_s { + int nvecs; + bfa_msix_handler_t handler[BFA_MSIX_MAX_VECTORS]; +}; + +/** + * Chip specific interfaces + */ +struct bfa_hwif_s { + void (*hw_reginit)(struct bfa_s *bfa); + void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq); + void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq); + void (*hw_msix_init)(struct bfa_s *bfa, int nvecs); + void (*hw_msix_install)(struct bfa_s *bfa); + void (*hw_msix_uninstall)(struct bfa_s *bfa); + void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix); + void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap, + u32 *nvecs, u32 *maxvec); + void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start, + u32 *end); +}; +typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status); + +struct bfa_iocfc_s { + struct bfa_s *bfa; + struct bfa_iocfc_cfg_s cfg; + int action; + u32 req_cq_pi[BFI_IOC_MAX_CQS]; + u32 rsp_cq_ci[BFI_IOC_MAX_CQS]; + struct bfa_cb_qe_s init_hcb_qe; + struct bfa_cb_qe_s stop_hcb_qe; + struct bfa_cb_qe_s dis_hcb_qe; + struct bfa_cb_qe_s stats_hcb_qe; + bfa_boolean_t cfgdone; + + struct bfa_dma_s cfg_info; + struct bfi_iocfc_cfg_s *cfginfo; + struct bfa_dma_s cfgrsp_dma; + struct bfi_iocfc_cfgrsp_s *cfgrsp; + struct bfi_iocfc_cfg_reply_s *cfg_reply; + struct bfa_dma_s req_cq_ba[BFI_IOC_MAX_CQS]; + struct bfa_dma_s req_cq_shadow_ci[BFI_IOC_MAX_CQS]; + struct bfa_dma_s rsp_cq_ba[BFI_IOC_MAX_CQS]; + struct bfa_dma_s rsp_cq_shadow_pi[BFI_IOC_MAX_CQS]; + struct bfa_iocfc_regs_s bfa_regs; /* BFA device registers */ + struct bfa_hwif_s hwif; + bfa_cb_iocfc_t updateq_cbfn; /* bios callback function */ + void *updateq_cbarg; /* bios callback arg */ + u32 intr_mask; +}; + +#define bfa_lpuid(__bfa) \ + bfa_ioc_portid(&(__bfa)->ioc) +#define bfa_msix_init(__bfa, __nvecs) \ + ((__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs)) +#define bfa_msix_install(__bfa) \ + ((__bfa)->iocfc.hwif.hw_msix_install(__bfa)) +#define bfa_msix_uninstall(__bfa) \ + ((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa)) +#define bfa_isr_mode_set(__bfa, __msix) \ + ((__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix)) +#define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) \ + ((__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap, \ + __nvecs, __maxvec)) +#define bfa_msix_get_rme_range(__bfa, __start, __end) \ + ((__bfa)->iocfc.hwif.hw_msix_get_rme_range(__bfa, __start, __end)) +#define bfa_msix(__bfa, __vec) \ + ((__bfa)->msix.handler[__vec](__bfa, __vec)) + +/* + * FC specific IOC functions. + */ +void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, + u32 *dm_len); +void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, + struct bfa_iocfc_cfg_s *cfg, + struct bfa_meminfo_s *meminfo, + struct bfa_pcidev_s *pcidev); +void bfa_iocfc_detach(struct bfa_s *bfa); +void bfa_iocfc_init(struct bfa_s *bfa); +void bfa_iocfc_start(struct bfa_s *bfa); +void bfa_iocfc_stop(struct bfa_s *bfa); +void bfa_iocfc_isr(void *bfa, struct bfi_mbmsg_s *msg); +void bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa); +bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa); +void bfa_iocfc_reset_queues(struct bfa_s *bfa); + +void bfa_msix_all(struct bfa_s *bfa, int vec); +void bfa_msix_reqq(struct bfa_s *bfa, int vec); +void bfa_msix_rspq(struct bfa_s *bfa, int vec); +void bfa_msix_lpu_err(struct bfa_s *bfa, int vec); + +void bfa_hwcb_reginit(struct bfa_s *bfa); +void bfa_hwcb_reqq_ack(struct bfa_s *bfa, int rspq); +void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq); +void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs); +void bfa_hwcb_msix_install(struct bfa_s *bfa); +void bfa_hwcb_msix_uninstall(struct bfa_s *bfa); +void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix); +void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs, + u32 *maxvec); +void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, + u32 *end); +void bfa_hwct_reginit(struct bfa_s *bfa); +void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq); +void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq); +void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs); +void bfa_hwct_msix_install(struct bfa_s *bfa); +void bfa_hwct_msix_uninstall(struct bfa_s *bfa); +void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix); +void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs, + u32 *maxvec); +void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, + u32 *end); +void bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi); +void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns); +wwn_t bfa_iocfc_get_pwwn(struct bfa_s *bfa); +wwn_t bfa_iocfc_get_nwwn(struct bfa_s *bfa); +void bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa, + struct bfa_boot_pbc_s *pbcfg); +int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, + struct bfi_pbc_vport_s *pbc_vport); + + +/** + *---------------------------------------------------------------------- + * BFA public interfaces + *---------------------------------------------------------------------- + */ +#define bfa_stats(_mod, _stats) ((_mod)->stats._stats++) +#define bfa_ioc_get_stats(__bfa, __ioc_stats) \ + bfa_ioc_fetch_stats(&(__bfa)->ioc, __ioc_stats) +#define bfa_ioc_clear_stats(__bfa) \ + bfa_ioc_clr_stats(&(__bfa)->ioc) +#define bfa_get_nports(__bfa) \ + bfa_ioc_get_nports(&(__bfa)->ioc) +#define bfa_get_adapter_manufacturer(__bfa, __manufacturer) \ + bfa_ioc_get_adapter_manufacturer(&(__bfa)->ioc, __manufacturer) +#define bfa_get_adapter_model(__bfa, __model) \ + bfa_ioc_get_adapter_model(&(__bfa)->ioc, __model) +#define bfa_get_adapter_serial_num(__bfa, __serial_num) \ + bfa_ioc_get_adapter_serial_num(&(__bfa)->ioc, __serial_num) +#define bfa_get_adapter_fw_ver(__bfa, __fw_ver) \ + bfa_ioc_get_adapter_fw_ver(&(__bfa)->ioc, __fw_ver) +#define bfa_get_adapter_optrom_ver(__bfa, __optrom_ver) \ + bfa_ioc_get_adapter_optrom_ver(&(__bfa)->ioc, __optrom_ver) +#define bfa_get_pci_chip_rev(__bfa, __chip_rev) \ + bfa_ioc_get_pci_chip_rev(&(__bfa)->ioc, __chip_rev) +#define bfa_get_ioc_state(__bfa) \ + bfa_ioc_get_state(&(__bfa)->ioc) +#define bfa_get_type(__bfa) \ + bfa_ioc_get_type(&(__bfa)->ioc) +#define bfa_get_mac(__bfa) \ + bfa_ioc_get_mac(&(__bfa)->ioc) +#define bfa_get_mfg_mac(__bfa) \ + bfa_ioc_get_mfg_mac(&(__bfa)->ioc) +#define bfa_get_fw_clock_res(__bfa) \ + ((__bfa)->iocfc.cfgrsp->fwcfg.fw_tick_res) + +void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids); +void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg); +void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg); +void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, + struct bfa_meminfo_s *meminfo); +void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, + struct bfa_meminfo_s *meminfo, + struct bfa_pcidev_s *pcidev); +void bfa_init_trc(struct bfa_s *bfa, struct bfa_trc_mod_s *trcmod); +void bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog); +void bfa_detach(struct bfa_s *bfa); +void bfa_init(struct bfa_s *bfa); +void bfa_start(struct bfa_s *bfa); +void bfa_stop(struct bfa_s *bfa); +void bfa_attach_fcs(struct bfa_s *bfa); +void bfa_cb_init(void *bfad, bfa_status_t status); +void bfa_cb_updateq(void *bfad, bfa_status_t status); + +bfa_boolean_t bfa_intx(struct bfa_s *bfa); +void bfa_intx_disable(struct bfa_s *bfa); +void bfa_intx_enable(struct bfa_s *bfa); +void bfa_isr_enable(struct bfa_s *bfa); +void bfa_isr_disable(struct bfa_s *bfa); + +void bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q); +void bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q); +void bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q); + +typedef void (*bfa_cb_ioc_t) (void *cbarg, enum bfa_status status); +void bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr); +void bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr); + +void bfa_adapter_get_attr(struct bfa_s *bfa, + struct bfa_adapter_attr_s *ad_attr); +u64 bfa_adapter_get_id(struct bfa_s *bfa); + +bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa, + struct bfa_iocfc_intr_attr_s *attr); + +void bfa_iocfc_enable(struct bfa_s *bfa); +void bfa_iocfc_disable(struct bfa_s *bfa); +void bfa_chip_reset(struct bfa_s *bfa); +void bfa_timer_tick(struct bfa_s *bfa); +#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \ + bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout) + +/* + * BFA debug API functions + */ +bfa_status_t bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen); +bfa_status_t bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen); +bfa_status_t bfa_debug_fwcore(struct bfa_s *bfa, void *buf, + u32 *offset, int *buflen); +void bfa_debug_fwsave_clear(struct bfa_s *bfa); +bfa_status_t bfa_fw_stats_get(struct bfa_s *bfa, void *data); +bfa_status_t bfa_fw_stats_clear(struct bfa_s *bfa); + +#endif /* __BFA_H__ */ diff --git a/drivers/scsi/bfa/bfa_callback_priv.h b/drivers/scsi/bfa/bfa_callback_priv.h deleted file mode 100644 index 1e3265c9f7d4..000000000000 --- a/drivers/scsi/bfa/bfa_callback_priv.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_CALLBACK_PRIV_H__ -#define __BFA_CALLBACK_PRIV_H__ - -#include - -typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete); - -/** - * Generic BFA callback element. - */ -struct bfa_cb_qe_s { - struct list_head qe; - bfa_cb_cbfn_t cbfn; - bfa_boolean_t once; - u32 rsvd; - void *cbarg; -}; - -#define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do { \ - (__hcb_qe)->cbfn = (__cbfn); \ - (__hcb_qe)->cbarg = (__cbarg); \ - list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \ -} while (0) - -#define bfa_cb_dequeue(__hcb_qe) list_del(&(__hcb_qe)->qe) - -#define bfa_cb_queue_once(__bfa, __hcb_qe, __cbfn, __cbarg) do { \ - (__hcb_qe)->cbfn = (__cbfn); \ - (__hcb_qe)->cbarg = (__cbarg); \ - if (!(__hcb_qe)->once) { \ - list_add_tail((__hcb_qe), &(__bfa)->comp_q); \ - (__hcb_qe)->once = BFA_TRUE; \ - } \ -} while (0) - -#define bfa_cb_queue_done(__hcb_qe) do { \ - (__hcb_qe)->once = BFA_FALSE; \ -} while (0) - -#endif /* __BFA_CALLBACK_PRIV_H__ */ diff --git a/drivers/scsi/bfa/bfa_cb_ioim.h b/drivers/scsi/bfa/bfa_cb_ioim.h new file mode 100644 index 000000000000..a989a94c38da --- /dev/null +++ b/drivers/scsi/bfa/bfa_cb_ioim.h @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + * + * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef __BFA_HCB_IOIM_H__ +#define __BFA_HCB_IOIM_H__ + +#include "bfa_os_inc.h" +/* + * task attribute values in FCP-2 FCP_CMND IU + */ +#define SIMPLE_Q 0 +#define HEAD_OF_Q 1 +#define ORDERED_Q 2 +#define ACA_Q 4 +#define UNTAGGED 5 + +static inline lun_t +bfad_int_to_lun(u32 luno) +{ + union { + u16 scsi_lun[4]; + lun_t bfa_lun; + } lun; + + lun.bfa_lun = 0; + lun.scsi_lun[0] = bfa_os_htons(luno); + + return lun.bfa_lun; +} + +/** + * Get LUN for the I/O request + */ +#define bfa_cb_ioim_get_lun(__dio) \ + bfad_int_to_lun(((struct scsi_cmnd *)__dio)->device->lun) + +/** + * Get CDB for the I/O request + */ +static inline u8 * +bfa_cb_ioim_get_cdb(struct bfad_ioim_s *dio) +{ + struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; + + return (u8 *) cmnd->cmnd; +} + +/** + * Get I/O direction (read/write) for the I/O request + */ +static inline enum fcp_iodir +bfa_cb_ioim_get_iodir(struct bfad_ioim_s *dio) +{ + struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; + enum dma_data_direction dmadir; + + dmadir = cmnd->sc_data_direction; + if (dmadir == DMA_TO_DEVICE) + return FCP_IODIR_WRITE; + else if (dmadir == DMA_FROM_DEVICE) + return FCP_IODIR_READ; + else + return FCP_IODIR_NONE; +} + +/** + * Get IO size in bytes for the I/O request + */ +static inline u32 +bfa_cb_ioim_get_size(struct bfad_ioim_s *dio) +{ + struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; + + return scsi_bufflen(cmnd); +} + +/** + * Get timeout for the I/O request + */ +static inline u8 +bfa_cb_ioim_get_timeout(struct bfad_ioim_s *dio) +{ + struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; + /* + * TBD: need a timeout for scsi passthru + */ + if (cmnd->device->host == NULL) + return 4; + + return 0; +} + +/** + * Get Command Reference Number for the I/O request. 0 if none. + */ +static inline u8 +bfa_cb_ioim_get_crn(struct bfad_ioim_s *dio) +{ + return 0; +} + +/** + * Get SAM-3 priority for the I/O request. 0 is default. + */ +static inline u8 +bfa_cb_ioim_get_priority(struct bfad_ioim_s *dio) +{ + return 0; +} + +/** + * Get task attributes for the I/O request. Default is FCP_TASK_ATTR_SIMPLE(0). + */ +static inline u8 +bfa_cb_ioim_get_taskattr(struct bfad_ioim_s *dio) +{ + struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; + u8 task_attr = UNTAGGED; + + if (cmnd->device->tagged_supported) { + switch (cmnd->tag) { + case HEAD_OF_QUEUE_TAG: + task_attr = HEAD_OF_Q; + break; + case ORDERED_QUEUE_TAG: + task_attr = ORDERED_Q; + break; + default: + task_attr = SIMPLE_Q; + break; + } + } + + return task_attr; +} + +/** + * Get CDB length in bytes for the I/O request. Default is FCP_CMND_CDB_LEN(16). + */ +static inline u8 +bfa_cb_ioim_get_cdblen(struct bfad_ioim_s *dio) +{ + struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; + + return cmnd->cmd_len; +} + +/** + * Assign queue to be used for the I/O request. This value depends on whether + * the driver wants to use the queues via any specific algorithm. Currently, + * this is not supported. + */ +#define bfa_cb_ioim_get_reqq(__dio) BFA_FALSE + +#endif /* __BFA_HCB_IOIM_H__ */ diff --git a/drivers/scsi/bfa/bfa_cb_ioim_macros.h b/drivers/scsi/bfa/bfa_cb_ioim_macros.h deleted file mode 100644 index 3906ed926966..000000000000 --- a/drivers/scsi/bfa/bfa_cb_ioim_macros.h +++ /dev/null @@ -1,181 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * bfa_cb_ioim_macros.h BFA IOIM driver interface macros. - */ - -#ifndef __BFA_HCB_IOIM_MACROS_H__ -#define __BFA_HCB_IOIM_MACROS_H__ - -#include -/* - * #include - * - * #include #include #include - * #include - */ -#include "bfad_im_compat.h" - -/* - * task attribute values in FCP-2 FCP_CMND IU - */ -#define SIMPLE_Q 0 -#define HEAD_OF_Q 1 -#define ORDERED_Q 2 -#define ACA_Q 4 -#define UNTAGGED 5 - -static inline lun_t -bfad_int_to_lun(u32 luno) -{ - union { - u16 scsi_lun[4]; - lun_t bfa_lun; - } lun; - - lun.bfa_lun = 0; - lun.scsi_lun[0] = bfa_os_htons(luno); - - return lun.bfa_lun; -} - -/** - * Get LUN for the I/O request - */ -#define bfa_cb_ioim_get_lun(__dio) \ - bfad_int_to_lun(((struct scsi_cmnd *)__dio)->device->lun) - -/** - * Get CDB for the I/O request - */ -static inline u8 * -bfa_cb_ioim_get_cdb(struct bfad_ioim_s *dio) -{ - struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; - - return (u8 *) cmnd->cmnd; -} - -/** - * Get I/O direction (read/write) for the I/O request - */ -static inline enum fcp_iodir -bfa_cb_ioim_get_iodir(struct bfad_ioim_s *dio) -{ - struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; - enum dma_data_direction dmadir; - - dmadir = cmnd->sc_data_direction; - if (dmadir == DMA_TO_DEVICE) - return FCP_IODIR_WRITE; - else if (dmadir == DMA_FROM_DEVICE) - return FCP_IODIR_READ; - else - return FCP_IODIR_NONE; -} - -/** - * Get IO size in bytes for the I/O request - */ -static inline u32 -bfa_cb_ioim_get_size(struct bfad_ioim_s *dio) -{ - struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; - - return scsi_bufflen(cmnd); -} - -/** - * Get timeout for the I/O request - */ -static inline u8 -bfa_cb_ioim_get_timeout(struct bfad_ioim_s *dio) -{ - struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; - /* - * TBD: need a timeout for scsi passthru - */ - if (cmnd->device->host == NULL) - return 4; - - return 0; -} - -/** - * Get Command Reference Number for the I/O request. 0 if none. - */ -static inline u8 -bfa_cb_ioim_get_crn(struct bfad_ioim_s *dio) -{ - return 0; -} - -/** - * Get SAM-3 priority for the I/O request. 0 is default. - */ -static inline u8 -bfa_cb_ioim_get_priority(struct bfad_ioim_s *dio) -{ - return 0; -} - -/** - * Get task attributes for the I/O request. Default is FCP_TASK_ATTR_SIMPLE(0). - */ -static inline u8 -bfa_cb_ioim_get_taskattr(struct bfad_ioim_s *dio) -{ - struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; - u8 task_attr = UNTAGGED; - - if (cmnd->device->tagged_supported) { - switch (cmnd->tag) { - case HEAD_OF_QUEUE_TAG: - task_attr = HEAD_OF_Q; - break; - case ORDERED_QUEUE_TAG: - task_attr = ORDERED_Q; - break; - default: - task_attr = SIMPLE_Q; - break; - } - } - - return task_attr; -} - -/** - * Get CDB length in bytes for the I/O request. Default is FCP_CMND_CDB_LEN(16). - */ -static inline u8 -bfa_cb_ioim_get_cdblen(struct bfad_ioim_s *dio) -{ - struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; - - return cmnd->cmd_len; -} - -/** - * Assign queue to be used for the I/O request. This value depends on whether - * the driver wants to use the queues via any specific algorithm. Currently, - * this is not supported. - */ -#define bfa_cb_ioim_get_reqq(__dio) BFA_FALSE - -#endif /* __BFA_HCB_IOIM_MACROS_H__ */ diff --git a/drivers/scsi/bfa/bfa_cee.c b/drivers/scsi/bfa/bfa_cee.c deleted file mode 100644 index 2b917792c6bc..000000000000 --- a/drivers/scsi/bfa/bfa_cee.c +++ /dev/null @@ -1,492 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -BFA_TRC_FILE(CNA, CEE); - -#define bfa_ioc_portid(__ioc) ((__ioc)->port_id) -#define bfa_lpuid(__arg) bfa_ioc_portid(&(__arg)->ioc) - -static void bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg_s *lldp_cfg); -static void bfa_cee_format_dcbcx_stats(struct bfa_cee_dcbx_stats_s - *dcbcx_stats); -static void bfa_cee_format_lldp_stats(struct bfa_cee_lldp_stats_s - *lldp_stats); -static void bfa_cee_format_cfg_stats(struct bfa_cee_cfg_stats_s *cfg_stats); -static void bfa_cee_format_cee_cfg(void *buffer); -static void bfa_cee_format_cee_stats(void *buffer); - -static void -bfa_cee_format_cee_stats(void *buffer) -{ - struct bfa_cee_stats_s *cee_stats = buffer; - bfa_cee_format_dcbcx_stats(&cee_stats->dcbx_stats); - bfa_cee_format_lldp_stats(&cee_stats->lldp_stats); - bfa_cee_format_cfg_stats(&cee_stats->cfg_stats); -} - -static void -bfa_cee_format_cee_cfg(void *buffer) -{ - struct bfa_cee_attr_s *cee_cfg = buffer; - bfa_cee_format_lldp_cfg(&cee_cfg->lldp_remote); -} - -static void -bfa_cee_format_dcbcx_stats(struct bfa_cee_dcbx_stats_s *dcbcx_stats) -{ - dcbcx_stats->subtlvs_unrecognized = - bfa_os_ntohl(dcbcx_stats->subtlvs_unrecognized); - dcbcx_stats->negotiation_failed = - bfa_os_ntohl(dcbcx_stats->negotiation_failed); - dcbcx_stats->remote_cfg_changed = - bfa_os_ntohl(dcbcx_stats->remote_cfg_changed); - dcbcx_stats->tlvs_received = bfa_os_ntohl(dcbcx_stats->tlvs_received); - dcbcx_stats->tlvs_invalid = bfa_os_ntohl(dcbcx_stats->tlvs_invalid); - dcbcx_stats->seqno = bfa_os_ntohl(dcbcx_stats->seqno); - dcbcx_stats->ackno = bfa_os_ntohl(dcbcx_stats->ackno); - dcbcx_stats->recvd_seqno = bfa_os_ntohl(dcbcx_stats->recvd_seqno); - dcbcx_stats->recvd_ackno = bfa_os_ntohl(dcbcx_stats->recvd_ackno); -} - -static void -bfa_cee_format_lldp_stats(struct bfa_cee_lldp_stats_s *lldp_stats) -{ - lldp_stats->frames_transmitted = - bfa_os_ntohl(lldp_stats->frames_transmitted); - lldp_stats->frames_aged_out = bfa_os_ntohl(lldp_stats->frames_aged_out); - lldp_stats->frames_discarded = - bfa_os_ntohl(lldp_stats->frames_discarded); - lldp_stats->frames_in_error = bfa_os_ntohl(lldp_stats->frames_in_error); - lldp_stats->frames_rcvd = bfa_os_ntohl(lldp_stats->frames_rcvd); - lldp_stats->tlvs_discarded = bfa_os_ntohl(lldp_stats->tlvs_discarded); - lldp_stats->tlvs_unrecognized = - bfa_os_ntohl(lldp_stats->tlvs_unrecognized); -} - -static void -bfa_cee_format_cfg_stats(struct bfa_cee_cfg_stats_s *cfg_stats) -{ - cfg_stats->cee_status_down = bfa_os_ntohl(cfg_stats->cee_status_down); - cfg_stats->cee_status_up = bfa_os_ntohl(cfg_stats->cee_status_up); - cfg_stats->cee_hw_cfg_changed = - bfa_os_ntohl(cfg_stats->cee_hw_cfg_changed); - cfg_stats->recvd_invalid_cfg = - bfa_os_ntohl(cfg_stats->recvd_invalid_cfg); -} - -static void -bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg_s *lldp_cfg) -{ - lldp_cfg->time_to_interval = bfa_os_ntohs(lldp_cfg->time_to_interval); - lldp_cfg->enabled_system_cap = - bfa_os_ntohs(lldp_cfg->enabled_system_cap); -} - -/** - * bfa_cee_attr_meminfo() - * - * - * @param[in] void - * - * @return Size of DMA region - */ -static u32 -bfa_cee_attr_meminfo(void) -{ - return BFA_ROUNDUP(sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ); -} - -/** - * bfa_cee_stats_meminfo() - * - * - * @param[in] void - * - * @return Size of DMA region - */ -static u32 -bfa_cee_stats_meminfo(void) -{ - return BFA_ROUNDUP(sizeof(struct bfa_cee_stats_s), BFA_DMA_ALIGN_SZ); -} - -/** - * bfa_cee_get_attr_isr() - * - * - * @param[in] cee - Pointer to the CEE module - * status - Return status from the f/w - * - * @return void - */ -static void -bfa_cee_get_attr_isr(struct bfa_cee_s *cee, bfa_status_t status) -{ - cee->get_attr_status = status; - bfa_trc(cee, 0); - if (status == BFA_STATUS_OK) { - bfa_trc(cee, 0); - /* - * The requested data has been copied to the DMA area, *process - * it. - */ - memcpy(cee->attr, cee->attr_dma.kva, - sizeof(struct bfa_cee_attr_s)); - bfa_cee_format_cee_cfg(cee->attr); - } - cee->get_attr_pending = BFA_FALSE; - if (cee->cbfn.get_attr_cbfn) { - bfa_trc(cee, 0); - cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status); - } - bfa_trc(cee, 0); -} - -/** - * bfa_cee_get_attr_isr() - * - * - * @param[in] cee - Pointer to the CEE module - * status - Return status from the f/w - * - * @return void - */ -static void -bfa_cee_get_stats_isr(struct bfa_cee_s *cee, bfa_status_t status) -{ - cee->get_stats_status = status; - bfa_trc(cee, 0); - if (status == BFA_STATUS_OK) { - bfa_trc(cee, 0); - /* - * The requested data has been copied to the DMA area, process - * it. - */ - memcpy(cee->stats, cee->stats_dma.kva, - sizeof(struct bfa_cee_stats_s)); - bfa_cee_format_cee_stats(cee->stats); - } - cee->get_stats_pending = BFA_FALSE; - bfa_trc(cee, 0); - if (cee->cbfn.get_stats_cbfn) { - bfa_trc(cee, 0); - cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status); - } - bfa_trc(cee, 0); -} - -/** - * bfa_cee_get_attr_isr() - * - * - * @param[in] cee - Pointer to the CEE module - * status - Return status from the f/w - * - * @return void - */ -static void -bfa_cee_reset_stats_isr(struct bfa_cee_s *cee, bfa_status_t status) -{ - cee->reset_stats_status = status; - cee->reset_stats_pending = BFA_FALSE; - if (cee->cbfn.reset_stats_cbfn) - cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status); -} - -/** - * bfa_cee_meminfo() - * - * - * @param[in] void - * - * @return Size of DMA region - */ -u32 -bfa_cee_meminfo(void) -{ - return bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo(); -} - -/** - * bfa_cee_mem_claim() - * - * - * @param[in] cee CEE module pointer - * dma_kva Kernel Virtual Address of CEE DMA Memory - * dma_pa Physical Address of CEE DMA Memory - * - * @return void - */ -void -bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva, u64 dma_pa) -{ - cee->attr_dma.kva = dma_kva; - cee->attr_dma.pa = dma_pa; - cee->stats_dma.kva = dma_kva + bfa_cee_attr_meminfo(); - cee->stats_dma.pa = dma_pa + bfa_cee_attr_meminfo(); - cee->attr = (struct bfa_cee_attr_s *)dma_kva; - cee->stats = - (struct bfa_cee_stats_s *)(dma_kva + bfa_cee_attr_meminfo()); -} - -/** - * bfa_cee_get_attr() - * - * Send the request to the f/w to fetch CEE attributes. - * - * @param[in] Pointer to the CEE module data structure. - * - * @return Status - */ - -bfa_status_t -bfa_cee_get_attr(struct bfa_cee_s *cee, struct bfa_cee_attr_s *attr, - bfa_cee_get_attr_cbfn_t cbfn, void *cbarg) -{ - struct bfi_cee_get_req_s *cmd; - - bfa_assert((cee != NULL) && (cee->ioc != NULL)); - bfa_trc(cee, 0); - if (!bfa_ioc_is_operational(cee->ioc)) { - bfa_trc(cee, 0); - return BFA_STATUS_IOC_FAILURE; - } - if (cee->get_attr_pending == BFA_TRUE) { - bfa_trc(cee, 0); - return BFA_STATUS_DEVBUSY; - } - cee->get_attr_pending = BFA_TRUE; - cmd = (struct bfi_cee_get_req_s *)cee->get_cfg_mb.msg; - cee->attr = attr; - cee->cbfn.get_attr_cbfn = cbfn; - cee->cbfn.get_attr_cbarg = cbarg; - bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ, - bfa_ioc_portid(cee->ioc)); - bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa); - bfa_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb); - bfa_trc(cee, 0); - - return BFA_STATUS_OK; -} - -/** - * bfa_cee_get_stats() - * - * Send the request to the f/w to fetch CEE statistics. - * - * @param[in] Pointer to the CEE module data structure. - * - * @return Status - */ - -bfa_status_t -bfa_cee_get_stats(struct bfa_cee_s *cee, struct bfa_cee_stats_s *stats, - bfa_cee_get_stats_cbfn_t cbfn, void *cbarg) -{ - struct bfi_cee_get_req_s *cmd; - - bfa_assert((cee != NULL) && (cee->ioc != NULL)); - - if (!bfa_ioc_is_operational(cee->ioc)) { - bfa_trc(cee, 0); - return BFA_STATUS_IOC_FAILURE; - } - if (cee->get_stats_pending == BFA_TRUE) { - bfa_trc(cee, 0); - return BFA_STATUS_DEVBUSY; - } - cee->get_stats_pending = BFA_TRUE; - cmd = (struct bfi_cee_get_req_s *)cee->get_stats_mb.msg; - cee->stats = stats; - cee->cbfn.get_stats_cbfn = cbfn; - cee->cbfn.get_stats_cbarg = cbarg; - bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_STATS_REQ, - bfa_ioc_portid(cee->ioc)); - bfa_dma_be_addr_set(cmd->dma_addr, cee->stats_dma.pa); - bfa_ioc_mbox_queue(cee->ioc, &cee->get_stats_mb); - bfa_trc(cee, 0); - - return BFA_STATUS_OK; -} - -/** - * bfa_cee_reset_stats() - * - * - * @param[in] Pointer to the CEE module data structure. - * - * @return Status - */ - -bfa_status_t -bfa_cee_reset_stats(struct bfa_cee_s *cee, bfa_cee_reset_stats_cbfn_t cbfn, - void *cbarg) -{ - struct bfi_cee_reset_stats_s *cmd; - - bfa_assert((cee != NULL) && (cee->ioc != NULL)); - if (!bfa_ioc_is_operational(cee->ioc)) { - bfa_trc(cee, 0); - return BFA_STATUS_IOC_FAILURE; - } - if (cee->reset_stats_pending == BFA_TRUE) { - bfa_trc(cee, 0); - return BFA_STATUS_DEVBUSY; - } - cee->reset_stats_pending = BFA_TRUE; - cmd = (struct bfi_cee_reset_stats_s *)cee->reset_stats_mb.msg; - cee->cbfn.reset_stats_cbfn = cbfn; - cee->cbfn.reset_stats_cbarg = cbarg; - bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_RESET_STATS, - bfa_ioc_portid(cee->ioc)); - bfa_ioc_mbox_queue(cee->ioc, &cee->reset_stats_mb); - bfa_trc(cee, 0); - return BFA_STATUS_OK; -} - -/** - * bfa_cee_isrs() - * - * - * @param[in] Pointer to the CEE module data structure. - * - * @return void - */ - -void -bfa_cee_isr(void *cbarg, struct bfi_mbmsg_s *m) -{ - union bfi_cee_i2h_msg_u *msg; - struct bfi_cee_get_rsp_s *get_rsp; - struct bfa_cee_s *cee = (struct bfa_cee_s *)cbarg; - msg = (union bfi_cee_i2h_msg_u *)m; - get_rsp = (struct bfi_cee_get_rsp_s *)m; - bfa_trc(cee, msg->mh.msg_id); - switch (msg->mh.msg_id) { - case BFI_CEE_I2H_GET_CFG_RSP: - bfa_trc(cee, get_rsp->cmd_status); - bfa_cee_get_attr_isr(cee, get_rsp->cmd_status); - break; - case BFI_CEE_I2H_GET_STATS_RSP: - bfa_cee_get_stats_isr(cee, get_rsp->cmd_status); - break; - case BFI_CEE_I2H_RESET_STATS_RSP: - bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status); - break; - default: - bfa_assert(0); - } -} - -/** - * bfa_cee_hbfail() - * - * - * @param[in] Pointer to the CEE module data structure. - * - * @return void - */ - -void -bfa_cee_hbfail(void *arg) -{ - struct bfa_cee_s *cee; - cee = (struct bfa_cee_s *)arg; - - if (cee->get_attr_pending == BFA_TRUE) { - cee->get_attr_status = BFA_STATUS_FAILED; - cee->get_attr_pending = BFA_FALSE; - if (cee->cbfn.get_attr_cbfn) { - cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, - BFA_STATUS_FAILED); - } - } - if (cee->get_stats_pending == BFA_TRUE) { - cee->get_stats_status = BFA_STATUS_FAILED; - cee->get_stats_pending = BFA_FALSE; - if (cee->cbfn.get_stats_cbfn) { - cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, - BFA_STATUS_FAILED); - } - } - if (cee->reset_stats_pending == BFA_TRUE) { - cee->reset_stats_status = BFA_STATUS_FAILED; - cee->reset_stats_pending = BFA_FALSE; - if (cee->cbfn.reset_stats_cbfn) { - cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, - BFA_STATUS_FAILED); - } - } -} - -/** - * bfa_cee_attach() - * - * - * @param[in] cee - Pointer to the CEE module data structure - * ioc - Pointer to the ioc module data structure - * dev - Pointer to the device driver module data structure - * The device driver specific mbox ISR functions have - * this pointer as one of the parameters. - * trcmod - - * logmod - - * - * @return void - */ -void -bfa_cee_attach(struct bfa_cee_s *cee, struct bfa_ioc_s *ioc, void *dev, - struct bfa_trc_mod_s *trcmod, struct bfa_log_mod_s *logmod) -{ - bfa_assert(cee != NULL); - cee->dev = dev; - cee->trcmod = trcmod; - cee->logmod = logmod; - cee->ioc = ioc; - - bfa_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee); - bfa_ioc_hbfail_init(&cee->hbfail, bfa_cee_hbfail, cee); - bfa_ioc_hbfail_register(cee->ioc, &cee->hbfail); - bfa_trc(cee, 0); -} - -/** - * bfa_cee_detach() - * - * - * @param[in] cee - Pointer to the CEE module data structure - * - * @return void - */ -void -bfa_cee_detach(struct bfa_cee_s *cee) -{ - /* - * For now, just check if there is some ioctl pending and mark that as - * failed? - */ - /* bfa_cee_hbfail(cee); */ -} diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c index 76fa5c5b40dd..c2fa07f2485d 100644 --- a/drivers/scsi/bfa/bfa_core.c +++ b/drivers/scsi/bfa/bfa_core.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * @@ -15,27 +15,992 @@ * General Public License for more details. */ -#include -#include -#include -#include +#include "bfa_modules.h" +#include "bfi_ctreg.h" +#include "bfad_drv.h" -#define DEF_CFG_NUM_FABRICS 1 -#define DEF_CFG_NUM_LPORTS 256 -#define DEF_CFG_NUM_CQS 4 -#define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX) -#define DEF_CFG_NUM_TSKIM_REQS 128 -#define DEF_CFG_NUM_FCXP_REQS 64 -#define DEF_CFG_NUM_UF_BUFS 64 -#define DEF_CFG_NUM_RPORTS 1024 -#define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS) -#define DEF_CFG_NUM_TINS 256 +BFA_TRC_FILE(HAL, CORE); -#define DEF_CFG_NUM_SGPGS 2048 -#define DEF_CFG_NUM_REQQ_ELEMS 256 -#define DEF_CFG_NUM_RSPQ_ELEMS 64 -#define DEF_CFG_NUM_SBOOT_TGTS 16 -#define DEF_CFG_NUM_SBOOT_LUNS 16 +/** + * BFA IOC FC related definitions + */ + +/** + * IOC local definitions + */ +#define BFA_IOCFC_TOV 5000 /* msecs */ + +enum { + BFA_IOCFC_ACT_NONE = 0, + BFA_IOCFC_ACT_INIT = 1, + BFA_IOCFC_ACT_STOP = 2, + BFA_IOCFC_ACT_DISABLE = 3, +}; + +#define DEF_CFG_NUM_FABRICS 1 +#define DEF_CFG_NUM_LPORTS 256 +#define DEF_CFG_NUM_CQS 4 +#define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX) +#define DEF_CFG_NUM_TSKIM_REQS 128 +#define DEF_CFG_NUM_FCXP_REQS 64 +#define DEF_CFG_NUM_UF_BUFS 64 +#define DEF_CFG_NUM_RPORTS 1024 +#define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS) +#define DEF_CFG_NUM_TINS 256 + +#define DEF_CFG_NUM_SGPGS 2048 +#define DEF_CFG_NUM_REQQ_ELEMS 256 +#define DEF_CFG_NUM_RSPQ_ELEMS 64 +#define DEF_CFG_NUM_SBOOT_TGTS 16 +#define DEF_CFG_NUM_SBOOT_LUNS 16 + +/** + * forward declaration for IOC FC functions + */ +static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status); +static void bfa_iocfc_disable_cbfn(void *bfa_arg); +static void bfa_iocfc_hbfail_cbfn(void *bfa_arg); +static void bfa_iocfc_reset_cbfn(void *bfa_arg); +static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn; + +/** + * BFA Interrupt handling functions + */ +static void +bfa_msix_errint(struct bfa_s *bfa, u32 intr) +{ + bfa_ioc_error_isr(&bfa->ioc); +} + +static void +bfa_msix_lpu(struct bfa_s *bfa) +{ + bfa_ioc_mbox_isr(&bfa->ioc); +} + +static void +bfa_reqq_resume(struct bfa_s *bfa, int qid) +{ + struct list_head *waitq, *qe, *qen; + struct bfa_reqq_wait_s *wqe; + + waitq = bfa_reqq(bfa, qid); + list_for_each_safe(qe, qen, waitq) { + /** + * Callback only as long as there is room in request queue + */ + if (bfa_reqq_full(bfa, qid)) + break; + + list_del(qe); + wqe = (struct bfa_reqq_wait_s *) qe; + wqe->qresume(wqe->cbarg); + } +} + +void +bfa_msix_all(struct bfa_s *bfa, int vec) +{ + bfa_intx(bfa); +} + +/** + * hal_intr_api + */ +bfa_boolean_t +bfa_intx(struct bfa_s *bfa) +{ + u32 intr, qintr; + int queue; + + intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status); + if (!intr) + return BFA_FALSE; + + /** + * RME completion queue interrupt + */ + qintr = intr & __HFN_INT_RME_MASK; + bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr); + + for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { + if (intr & (__HFN_INT_RME_Q0 << queue)) + bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1)); + } + intr &= ~qintr; + if (!intr) + return BFA_TRUE; + + /** + * CPE completion queue interrupt + */ + qintr = intr & __HFN_INT_CPE_MASK; + bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr); + + for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { + if (intr & (__HFN_INT_CPE_Q0 << queue)) + bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1)); + } + intr &= ~qintr; + if (!intr) + return BFA_TRUE; + + bfa_msix_lpu_err(bfa, intr); + + return BFA_TRUE; +} + +void +bfa_intx_enable(struct bfa_s *bfa) +{ + bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, bfa->iocfc.intr_mask); +} + +void +bfa_intx_disable(struct bfa_s *bfa) +{ + bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L); +} + +void +bfa_isr_enable(struct bfa_s *bfa) +{ + u32 intr_unmask; + int pci_func = bfa_ioc_pcifn(&bfa->ioc); + + bfa_trc(bfa, pci_func); + + bfa_msix_install(bfa); + intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | + __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | + __HFN_INT_LL_HALT); + + if (pci_func == 0) + intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | + __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 | + __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | + __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | + __HFN_INT_MBOX_LPU0); + else + intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | + __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 | + __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | + __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | + __HFN_INT_MBOX_LPU1); + + bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr_unmask); + bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, ~intr_unmask); + bfa->iocfc.intr_mask = ~intr_unmask; + bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0); +} + +void +bfa_isr_disable(struct bfa_s *bfa) +{ + bfa_isr_mode_set(bfa, BFA_FALSE); + bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L); + bfa_msix_uninstall(bfa); +} + +void +bfa_msix_reqq(struct bfa_s *bfa, int qid) +{ + struct list_head *waitq; + + qid &= (BFI_IOC_MAX_CQS - 1); + + bfa->iocfc.hwif.hw_reqq_ack(bfa, qid); + + /** + * Resume any pending requests in the corresponding reqq. + */ + waitq = bfa_reqq(bfa, qid); + if (!list_empty(waitq)) + bfa_reqq_resume(bfa, qid); +} + +void +bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m) +{ + bfa_trc(bfa, m->mhdr.msg_class); + bfa_trc(bfa, m->mhdr.msg_id); + bfa_trc(bfa, m->mhdr.mtag.i2htok); + bfa_assert(0); + bfa_trc_stop(bfa->trcmod); +} + +void +bfa_msix_rspq(struct bfa_s *bfa, int qid) +{ + struct bfi_msg_s *m; + u32 pi, ci; + struct list_head *waitq; + + bfa_trc_fp(bfa, qid); + + qid &= (BFI_IOC_MAX_CQS - 1); + + bfa->iocfc.hwif.hw_rspq_ack(bfa, qid); + + ci = bfa_rspq_ci(bfa, qid); + pi = bfa_rspq_pi(bfa, qid); + + bfa_trc_fp(bfa, ci); + bfa_trc_fp(bfa, pi); + + if (bfa->rme_process) { + while (ci != pi) { + m = bfa_rspq_elem(bfa, qid, ci); + bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX); + + bfa_isrs[m->mhdr.msg_class] (bfa, m); + + CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems); + } + } + + /** + * update CI + */ + bfa_rspq_ci(bfa, qid) = pi; + bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi); + mmiowb(); + + /** + * Resume any pending requests in the corresponding reqq. + */ + waitq = bfa_reqq(bfa, qid); + if (!list_empty(waitq)) + bfa_reqq_resume(bfa, qid); +} + +void +bfa_msix_lpu_err(struct bfa_s *bfa, int vec) +{ + u32 intr, curr_value; + + intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status); + + if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1)) + bfa_msix_lpu(bfa); + + intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | + __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT); + + if (intr) { + if (intr & __HFN_INT_LL_HALT) { + /** + * If LL_HALT bit is set then FW Init Halt LL Port + * Register needs to be cleared as well so Interrupt + * Status Register will be cleared. + */ + curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt); + curr_value &= ~__FW_INIT_HALT_P; + bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value); + } + + if (intr & __HFN_INT_ERR_PSS) { + /** + * ERR_PSS bit needs to be cleared as well in case + * interrups are shared so driver's interrupt handler is + * still called eventhough it is already masked out. + */ + curr_value = bfa_reg_read( + bfa->ioc.ioc_regs.pss_err_status_reg); + curr_value &= __PSS_ERR_STATUS_SET; + bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg, + curr_value); + } + + bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr); + bfa_msix_errint(bfa, intr); + } +} + +void +bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func) +{ + bfa_isrs[mc] = isr_func; +} + +/** + * BFA IOC FC related functions + */ + +/** + * hal_ioc_pvt BFA IOC private functions + */ + +static void +bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len) +{ + int i, per_reqq_sz, per_rspq_sz; + + per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), + BFA_DMA_ALIGN_SZ); + per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), + BFA_DMA_ALIGN_SZ); + + /* + * Calculate CQ size + */ + for (i = 0; i < cfg->fwcfg.num_cqs; i++) { + *dm_len = *dm_len + per_reqq_sz; + *dm_len = *dm_len + per_rspq_sz; + } + + /* + * Calculate Shadow CI/PI size + */ + for (i = 0; i < cfg->fwcfg.num_cqs; i++) + *dm_len += (2 * BFA_CACHELINE_SZ); +} + +static void +bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len) +{ + *dm_len += + BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); + *dm_len += + BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), + BFA_CACHELINE_SZ); +} + +/** + * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ + */ +static void +bfa_iocfc_send_cfg(void *bfa_arg) +{ + struct bfa_s *bfa = bfa_arg; + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + struct bfi_iocfc_cfg_req_s cfg_req; + struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo; + struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg; + int i; + + bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS); + bfa_trc(bfa, cfg->fwcfg.num_cqs); + + bfa_iocfc_reset_queues(bfa); + + /** + * initialize IOC configuration info + */ + cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; + cfg_info->num_cqs = cfg->fwcfg.num_cqs; + + bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); + /** + * dma map REQ and RSP circular queues and shadow pointers + */ + for (i = 0; i < cfg->fwcfg.num_cqs; i++) { + bfa_dma_be_addr_set(cfg_info->req_cq_ba[i], + iocfc->req_cq_ba[i].pa); + bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i], + iocfc->req_cq_shadow_ci[i].pa); + cfg_info->req_cq_elems[i] = + bfa_os_htons(cfg->drvcfg.num_reqq_elems); + + bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i], + iocfc->rsp_cq_ba[i].pa); + bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i], + iocfc->rsp_cq_shadow_pi[i].pa); + cfg_info->rsp_cq_elems[i] = + bfa_os_htons(cfg->drvcfg.num_rspq_elems); + } + + /** + * Enable interrupt coalescing if it is driver init path + * and not ioc disable/enable path. + */ + if (!iocfc->cfgdone) + cfg_info->intr_attr.coalesce = BFA_TRUE; + + iocfc->cfgdone = BFA_FALSE; + + /** + * dma map IOC configuration itself + */ + bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ, + bfa_lpuid(bfa)); + bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa); + + bfa_ioc_mbox_send(&bfa->ioc, &cfg_req, + sizeof(struct bfi_iocfc_cfg_req_s)); +} + +static void +bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, + struct bfa_pcidev_s *pcidev) +{ + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + + bfa->bfad = bfad; + iocfc->bfa = bfa; + iocfc->action = BFA_IOCFC_ACT_NONE; + + bfa_os_assign(iocfc->cfg, *cfg); + + /** + * Initialize chip specific handlers. + */ + if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) { + iocfc->hwif.hw_reginit = bfa_hwct_reginit; + iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack; + iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack; + iocfc->hwif.hw_msix_init = bfa_hwct_msix_init; + iocfc->hwif.hw_msix_install = bfa_hwct_msix_install; + iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall; + iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set; + iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs; + iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range; + } else { + iocfc->hwif.hw_reginit = bfa_hwcb_reginit; + iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack; + iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack; + iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init; + iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install; + iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall; + iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set; + iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs; + iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range; + } + + iocfc->hwif.hw_reginit(bfa); + bfa->msix.nvecs = 0; +} + +static void +bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg, + struct bfa_meminfo_s *meminfo) +{ + u8 *dm_kva; + u64 dm_pa; + int i, per_reqq_sz, per_rspq_sz; + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + int dbgsz; + + dm_kva = bfa_meminfo_dma_virt(meminfo); + dm_pa = bfa_meminfo_dma_phys(meminfo); + + /* + * First allocate dma memory for IOC. + */ + bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa); + dm_kva += bfa_ioc_meminfo(); + dm_pa += bfa_ioc_meminfo(); + + /* + * Claim DMA-able memory for the request/response queues and for shadow + * ci/pi registers + */ + per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), + BFA_DMA_ALIGN_SZ); + per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), + BFA_DMA_ALIGN_SZ); + + for (i = 0; i < cfg->fwcfg.num_cqs; i++) { + iocfc->req_cq_ba[i].kva = dm_kva; + iocfc->req_cq_ba[i].pa = dm_pa; + bfa_os_memset(dm_kva, 0, per_reqq_sz); + dm_kva += per_reqq_sz; + dm_pa += per_reqq_sz; + + iocfc->rsp_cq_ba[i].kva = dm_kva; + iocfc->rsp_cq_ba[i].pa = dm_pa; + bfa_os_memset(dm_kva, 0, per_rspq_sz); + dm_kva += per_rspq_sz; + dm_pa += per_rspq_sz; + } + + for (i = 0; i < cfg->fwcfg.num_cqs; i++) { + iocfc->req_cq_shadow_ci[i].kva = dm_kva; + iocfc->req_cq_shadow_ci[i].pa = dm_pa; + dm_kva += BFA_CACHELINE_SZ; + dm_pa += BFA_CACHELINE_SZ; + + iocfc->rsp_cq_shadow_pi[i].kva = dm_kva; + iocfc->rsp_cq_shadow_pi[i].pa = dm_pa; + dm_kva += BFA_CACHELINE_SZ; + dm_pa += BFA_CACHELINE_SZ; + } + + /* + * Claim DMA-able memory for the config info page + */ + bfa->iocfc.cfg_info.kva = dm_kva; + bfa->iocfc.cfg_info.pa = dm_pa; + bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva; + dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); + dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); + + /* + * Claim DMA-able memory for the config response + */ + bfa->iocfc.cfgrsp_dma.kva = dm_kva; + bfa->iocfc.cfgrsp_dma.pa = dm_pa; + bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva; + + dm_kva += + BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), + BFA_CACHELINE_SZ); + dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), + BFA_CACHELINE_SZ); + + + bfa_meminfo_dma_virt(meminfo) = dm_kva; + bfa_meminfo_dma_phys(meminfo) = dm_pa; + + dbgsz = bfa_ioc_debug_trcsz(bfa_auto_recover); + if (dbgsz > 0) { + bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo)); + bfa_meminfo_kva(meminfo) += dbgsz; + } +} + +/** + * Start BFA submodules. + */ +static void +bfa_iocfc_start_submod(struct bfa_s *bfa) +{ + int i; + + bfa->rme_process = BFA_TRUE; + + for (i = 0; hal_mods[i]; i++) + hal_mods[i]->start(bfa); +} + +/** + * Disable BFA submodules. + */ +static void +bfa_iocfc_disable_submod(struct bfa_s *bfa) +{ + int i; + + for (i = 0; hal_mods[i]; i++) + hal_mods[i]->iocdisable(bfa); +} + +static void +bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete) +{ + struct bfa_s *bfa = bfa_arg; + + if (complete) { + if (bfa->iocfc.cfgdone) + bfa_cb_init(bfa->bfad, BFA_STATUS_OK); + else + bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED); + } else { + if (bfa->iocfc.cfgdone) + bfa->iocfc.action = BFA_IOCFC_ACT_NONE; + } +} + +static void +bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl) +{ + struct bfa_s *bfa = bfa_arg; + struct bfad_s *bfad = bfa->bfad; + + if (compl) + complete(&bfad->comp); + else + bfa->iocfc.action = BFA_IOCFC_ACT_NONE; +} + +static void +bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl) +{ + struct bfa_s *bfa = bfa_arg; + struct bfad_s *bfad = bfa->bfad; + + if (compl) + complete(&bfad->disable_comp); +} + +/** + * Update BFA configuration from firmware configuration. + */ +static void +bfa_iocfc_cfgrsp(struct bfa_s *bfa) +{ + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; + struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg; + + fwcfg->num_cqs = fwcfg->num_cqs; + fwcfg->num_ioim_reqs = bfa_os_ntohs(fwcfg->num_ioim_reqs); + fwcfg->num_tskim_reqs = bfa_os_ntohs(fwcfg->num_tskim_reqs); + fwcfg->num_fcxp_reqs = bfa_os_ntohs(fwcfg->num_fcxp_reqs); + fwcfg->num_uf_bufs = bfa_os_ntohs(fwcfg->num_uf_bufs); + fwcfg->num_rports = bfa_os_ntohs(fwcfg->num_rports); + + iocfc->cfgdone = BFA_TRUE; + + /** + * Configuration is complete - initialize/start submodules + */ + bfa_fcport_init(bfa); + + if (iocfc->action == BFA_IOCFC_ACT_INIT) + bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa); + else + bfa_iocfc_start_submod(bfa); +} +void +bfa_iocfc_reset_queues(struct bfa_s *bfa) +{ + int q; + + for (q = 0; q < BFI_IOC_MAX_CQS; q++) { + bfa_reqq_ci(bfa, q) = 0; + bfa_reqq_pi(bfa, q) = 0; + bfa_rspq_ci(bfa, q) = 0; + bfa_rspq_pi(bfa, q) = 0; + } +} + +/** + * IOC enable request is complete + */ +static void +bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status) +{ + struct bfa_s *bfa = bfa_arg; + + if (status != BFA_STATUS_OK) { + bfa_isr_disable(bfa); + if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) + bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, + bfa_iocfc_init_cb, bfa); + return; + } + + bfa_iocfc_send_cfg(bfa); +} + +/** + * IOC disable request is complete + */ +static void +bfa_iocfc_disable_cbfn(void *bfa_arg) +{ + struct bfa_s *bfa = bfa_arg; + + bfa_isr_disable(bfa); + bfa_iocfc_disable_submod(bfa); + + if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP) + bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb, + bfa); + else { + bfa_assert(bfa->iocfc.action == BFA_IOCFC_ACT_DISABLE); + bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb, + bfa); + } +} + +/** + * Notify sub-modules of hardware failure. + */ +static void +bfa_iocfc_hbfail_cbfn(void *bfa_arg) +{ + struct bfa_s *bfa = bfa_arg; + + bfa->rme_process = BFA_FALSE; + + bfa_isr_disable(bfa); + bfa_iocfc_disable_submod(bfa); + + if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) + bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb, + bfa); +} + +/** + * Actions on chip-reset completion. + */ +static void +bfa_iocfc_reset_cbfn(void *bfa_arg) +{ + struct bfa_s *bfa = bfa_arg; + + bfa_iocfc_reset_queues(bfa); + bfa_isr_enable(bfa); +} + +/** + * hal_ioc_public + */ + +/** + * Query IOC memory requirement information. + */ +void +bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, + u32 *dm_len) +{ + /* dma memory for IOC */ + *dm_len += bfa_ioc_meminfo(); + + bfa_iocfc_fw_cfg_sz(cfg, dm_len); + bfa_iocfc_cqs_sz(cfg, dm_len); + *km_len += bfa_ioc_debug_trcsz(bfa_auto_recover); +} + +/** + * Query IOC memory requirement information. + */ +void +bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, + struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) +{ + int i; + struct bfa_ioc_s *ioc = &bfa->ioc; + + bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn; + bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn; + bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn; + bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn; + + ioc->trcmod = bfa->trcmod; + bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod); + + /** + * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC. + */ + if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC) + bfa_ioc_set_fcmode(&bfa->ioc); + + bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC); + bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs); + + bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev); + bfa_iocfc_mem_claim(bfa, cfg, meminfo); + bfa_timer_init(&bfa->timer_mod); + + INIT_LIST_HEAD(&bfa->comp_q); + for (i = 0; i < BFI_IOC_MAX_CQS; i++) + INIT_LIST_HEAD(&bfa->reqq_waitq[i]); +} + +/** + * Query IOC memory requirement information. + */ +void +bfa_iocfc_detach(struct bfa_s *bfa) +{ + bfa_ioc_detach(&bfa->ioc); +} + +/** + * Query IOC memory requirement information. + */ +void +bfa_iocfc_init(struct bfa_s *bfa) +{ + bfa->iocfc.action = BFA_IOCFC_ACT_INIT; + bfa_ioc_enable(&bfa->ioc); +} + +/** + * IOC start called from bfa_start(). Called to start IOC operations + * at driver instantiation for this instance. + */ +void +bfa_iocfc_start(struct bfa_s *bfa) +{ + if (bfa->iocfc.cfgdone) + bfa_iocfc_start_submod(bfa); +} + +/** + * IOC stop called from bfa_stop(). Called only when driver is unloaded + * for this instance. + */ +void +bfa_iocfc_stop(struct bfa_s *bfa) +{ + bfa->iocfc.action = BFA_IOCFC_ACT_STOP; + + bfa->rme_process = BFA_FALSE; + bfa_ioc_disable(&bfa->ioc); +} + +void +bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m) +{ + struct bfa_s *bfa = bfaarg; + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + union bfi_iocfc_i2h_msg_u *msg; + + msg = (union bfi_iocfc_i2h_msg_u *) m; + bfa_trc(bfa, msg->mh.msg_id); + + switch (msg->mh.msg_id) { + case BFI_IOCFC_I2H_CFG_REPLY: + iocfc->cfg_reply = &msg->cfg_reply; + bfa_iocfc_cfgrsp(bfa); + break; + case BFI_IOCFC_I2H_UPDATEQ_RSP: + iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK); + break; + default: + bfa_assert(0); + } +} + +void +bfa_adapter_get_attr(struct bfa_s *bfa, struct bfa_adapter_attr_s *ad_attr) +{ + bfa_ioc_get_adapter_attr(&bfa->ioc, ad_attr); +} + +u64 +bfa_adapter_get_id(struct bfa_s *bfa) +{ + return bfa_ioc_get_adid(&bfa->ioc); +} + +void +bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr) +{ + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + + attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce; + + attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ? + bfa_os_ntohs(iocfc->cfginfo->intr_attr.delay) : + bfa_os_ntohs(iocfc->cfgrsp->intr_attr.delay); + + attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ? + bfa_os_ntohs(iocfc->cfginfo->intr_attr.latency) : + bfa_os_ntohs(iocfc->cfgrsp->intr_attr.latency); + + attr->config = iocfc->cfg; +} + +bfa_status_t +bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr) +{ + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + struct bfi_iocfc_set_intr_req_s *m; + + iocfc->cfginfo->intr_attr.coalesce = attr->coalesce; + iocfc->cfginfo->intr_attr.delay = bfa_os_htons(attr->delay); + iocfc->cfginfo->intr_attr.latency = bfa_os_htons(attr->latency); + + if (!bfa_iocfc_is_operational(bfa)) + return BFA_STATUS_OK; + + m = bfa_reqq_next(bfa, BFA_REQQ_IOC); + if (!m) + return BFA_STATUS_DEVBUSY; + + bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ, + bfa_lpuid(bfa)); + m->coalesce = iocfc->cfginfo->intr_attr.coalesce; + m->delay = iocfc->cfginfo->intr_attr.delay; + m->latency = iocfc->cfginfo->intr_attr.latency; + + bfa_trc(bfa, attr->delay); + bfa_trc(bfa, attr->latency); + + bfa_reqq_produce(bfa, BFA_REQQ_IOC); + return BFA_STATUS_OK; +} + +void +bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa) +{ + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + + iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1); + bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa); +} +/** + * Enable IOC after it is disabled. + */ +void +bfa_iocfc_enable(struct bfa_s *bfa) +{ + bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, + "IOC Enable"); + bfa_ioc_enable(&bfa->ioc); +} + +void +bfa_iocfc_disable(struct bfa_s *bfa) +{ + bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, + "IOC Disable"); + bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE; + + bfa->rme_process = BFA_FALSE; + bfa_ioc_disable(&bfa->ioc); +} + + +bfa_boolean_t +bfa_iocfc_is_operational(struct bfa_s *bfa) +{ + return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone; +} + +/** + * Return boot target port wwns -- read from boot information in flash. + */ +void +bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns) +{ + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; + int i; + + if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) { + bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns); + *nwwns = cfgrsp->pbc_cfg.nbluns; + for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++) + wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn; + + return; + } + + *nwwns = cfgrsp->bootwwns.nwwns; + memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn)); +} + +void +bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa, struct bfa_boot_pbc_s *pbcfg) +{ + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; + + pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled; + pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns; + pbcfg->speed = cfgrsp->pbc_cfg.port_speed; + memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun)); +} + +int +bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport) +{ + struct bfa_iocfc_s *iocfc = &bfa->iocfc; + struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; + + memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport)); + return cfgrsp->pbc_cfg.nvports; +} + +/** + * hal_api + */ /** * Use this function query the memory requirement of the BFA library. @@ -45,16 +1010,16 @@ * This call will fail, if the cap is out of range compared to pre-defined * values within the BFA library * - * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate - * its configuration in this structure. + * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate + * its configuration in this structure. * The default values for struct bfa_iocfc_cfg_s can be * fetched using bfa_cfg_get_default() API. * - * If cap's boundary check fails, the library will use + * If cap's boundary check fails, the library will use * the default bfa_cap_t values (and log a warning msg). * * @param[out] meminfo - pointer to bfa_meminfo_t. This content - * indicates the memory type (see bfa_mem_type_t) and + * indicates the memory type (see bfa_mem_type_t) and * amount of memory required. * * Driver should allocate the memory, populate the @@ -68,8 +1033,8 @@ void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo) { - int i; - u32 km_len = 0, dm_len = 0; + int i; + u32 km_len = 0, dm_len = 0; bfa_assert((cfg != NULL) && (meminfo != NULL)); @@ -90,26 +1055,6 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo) meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len; } -static void -bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi) -{ - struct bfa_port_s *port = &bfa->modules.port; - uint32_t dm_len; - uint8_t *dm_kva; - uint64_t dm_pa; - - dm_len = bfa_port_meminfo(); - dm_kva = bfa_meminfo_dma_virt(mi); - dm_pa = bfa_meminfo_dma_phys(mi); - - memset(port, 0, sizeof(struct bfa_port_s)); - bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod, bfa->logm); - bfa_port_mem_claim(port, dm_kva, dm_pa); - - bfa_meminfo_dma_virt(mi) = dm_kva + dm_len; - bfa_meminfo_dma_phys(mi) = dm_pa + dm_len; -} - /** * Use this function to do attach the driver instance with the BFA * library. This function will not trigger any HW initialization @@ -119,14 +1064,14 @@ bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi) * pre-defined values within the BFA library * * @param[out] bfa Pointer to bfa_t. - * @param[in] bfad Opaque handle back to the driver's IOC structure + * @param[in] bfad Opaque handle back to the driver's IOC structure * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure - * that was used in bfa_cfg_get_meminfo(). - * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should - * use the bfa_cfg_get_meminfo() call to - * find the memory blocks required, allocate the - * required memory and provide the starting addresses. - * @param[in] pcidev pointer to struct bfa_pcidev_s + * that was used in bfa_cfg_get_meminfo(). + * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should + * use the bfa_cfg_get_meminfo() call to + * find the memory blocks required, allocate the + * required memory and provide the starting addresses. + * @param[in] pcidev pointer to struct bfa_pcidev_s * * @return * void @@ -140,8 +1085,8 @@ void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) { - int i; - struct bfa_mem_elem_s *melem; + int i; + struct bfa_mem_elem_s *melem; bfa->fcs = BFA_FALSE; @@ -195,20 +1140,6 @@ bfa_init_trc(struct bfa_s *bfa, struct bfa_trc_mod_s *trcmod) bfa->trcmod = trcmod; } - -void -bfa_init_log(struct bfa_s *bfa, struct bfa_log_mod_s *logmod) -{ - bfa->logm = logmod; -} - - -void -bfa_init_aen(struct bfa_s *bfa, struct bfa_aen_s *aen) -{ - bfa->aen = aen; -} - void bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog) { @@ -254,14 +1185,14 @@ bfa_start(struct bfa_s *bfa) /** * Use this function quiese the IOC. This function will return immediately, - * when the IOC is actually stopped, the bfa_cb_stop() will be called. + * when the IOC is actually stopped, the bfad->comp will be set. * - * @param[in] bfa - pointer to bfa_t. + * @param[in]bfa - pointer to bfa_t. * * @return None * * Special Considerations: - * bfa_cb_stop() could be called before or after bfa_stop() returns. + * bfad->comp can be set before or after bfa_stop() returns. * * @note * In case of any failure, we could handle it automatically by doing a @@ -283,9 +1214,9 @@ bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q) void bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q) { - struct list_head *qe; - struct list_head *qen; - struct bfa_cb_qe_s *hcb_qe; + struct list_head *qe; + struct list_head *qen; + struct bfa_cb_qe_s *hcb_qe; list_for_each_safe(qe, qen, comp_q) { hcb_qe = (struct bfa_cb_qe_s *) qe; @@ -296,8 +1227,8 @@ bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q) void bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q) { - struct list_head *qe; - struct bfa_cb_qe_s *hcb_qe; + struct list_head *qe; + struct bfa_cb_qe_s *hcb_qe; while (!list_empty(comp_q)) { bfa_q_deq(comp_q, &qe); @@ -321,7 +1252,6 @@ bfa_timer_tick(struct bfa_s *bfa) bfa_timer_beat(&bfa->timer_mod); } -#ifndef BFA_BIOS_BUILD /** * Return the list of PCI vendor/device id lists supported by this * BFA instance. @@ -336,7 +1266,7 @@ bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids) {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC}, }; - *npciids = ARRAY_SIZE(__pciids); + *npciids = sizeof(__pciids) / sizeof(__pciids[0]); *pciids = __pciids; } @@ -351,7 +1281,7 @@ bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids) * void * * Special Considerations: - * note + * note */ void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg) @@ -389,7 +1319,7 @@ bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg) cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN; cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN; - cfg->drvcfg.min_cfg = BFA_TRUE; + cfg->drvcfg.min_cfg = BFA_TRUE; } void @@ -417,7 +1347,7 @@ bfa_debug_fwsave_clear(struct bfa_s *bfa) } /** - * Fetch firmware trace data. + * Fetch firmware trace data. * * @param[in] bfa BFA instance * @param[out] trcdata Firmware trace buffer @@ -432,6 +1362,22 @@ bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen) return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen); } +/** + * Dump firmware memory. + * + * @param[in] bfa BFA instance + * @param[out] buf buffer for dump + * @param[in,out] offset smem offset to start read + * @param[in,out] buflen length of buffer + * + * @retval BFA_STATUS_OK Firmware memory is dumped. + * @retval BFA_STATUS_INPROGRESS Firmware memory dump is in progress. + */ +bfa_status_t +bfa_debug_fwcore(struct bfa_s *bfa, void *buf, u32 *offset, int *buflen) +{ + return bfa_ioc_debug_fwcore(&bfa->ioc, buf, offset, buflen); +} /** * Reset hw semaphore & usage cnt regs and initialize. */ @@ -441,4 +1387,23 @@ bfa_chip_reset(struct bfa_s *bfa) bfa_ioc_ownership_reset(&bfa->ioc); bfa_ioc_pll_init(&bfa->ioc); } -#endif + +/** + * Fetch firmware statistics data. + * + * @param[in] bfa BFA instance + * @param[out] data Firmware stats buffer + * + * @retval BFA_STATUS_OK Firmware trace is fetched. + */ +bfa_status_t +bfa_fw_stats_get(struct bfa_s *bfa, void *data) +{ + return bfa_ioc_fw_stats_get(&bfa->ioc, data); +} + +bfa_status_t +bfa_fw_stats_clear(struct bfa_s *bfa) +{ + return bfa_ioc_fw_stats_clear(&bfa->ioc); +} diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h new file mode 100644 index 000000000000..7260c74620f8 --- /dev/null +++ b/drivers/scsi/bfa/bfa_cs.h @@ -0,0 +1,364 @@ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + * + * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +/** + * bfa_cs.h BFA common services + */ + +#ifndef __BFA_CS_H__ +#define __BFA_CS_H__ + +#include "bfa_os_inc.h" + +/** + * BFA TRC + */ + +#ifndef BFA_TRC_MAX +#define BFA_TRC_MAX (4 * 1024) +#endif + +#ifndef BFA_TRC_TS +#define BFA_TRC_TS(_trcm) ((_trcm)->ticks++) +#endif + +struct bfa_trc_s { +#ifdef __BIGENDIAN + u16 fileno; + u16 line; +#else + u16 line; + u16 fileno; +#endif + u32 timestamp; + union { + struct { + u32 rsvd; + u32 u32; + } u32; + u64 u64; + } data; +}; + +struct bfa_trc_mod_s { + u32 head; + u32 tail; + u32 ntrc; + u32 stopped; + u32 ticks; + u32 rsvd[3]; + struct bfa_trc_s trc[BFA_TRC_MAX]; +}; + +enum { + BFA_TRC_HAL = 1, /* BFA modules */ + BFA_TRC_FCS = 2, /* BFA FCS modules */ + BFA_TRC_LDRV = 3, /* Linux driver modules */ + BFA_TRC_CNA = 4, /* Common modules */ +}; +#define BFA_TRC_MOD_SH 10 +#define BFA_TRC_MOD(__mod) ((BFA_TRC_ ## __mod) << BFA_TRC_MOD_SH) + +/** + * Define a new tracing file (module). Module should match one defined above. + */ +#define BFA_TRC_FILE(__mod, __submod) \ + static int __trc_fileno = ((BFA_TRC_ ## __mod ## _ ## __submod) | \ + BFA_TRC_MOD(__mod)) + + +#define bfa_trc32(_trcp, _data) \ + __bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u32)_data) +#define bfa_trc(_trcp, _data) \ + __bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u64)_data) + +static inline void +bfa_trc_init(struct bfa_trc_mod_s *trcm) +{ + trcm->head = trcm->tail = trcm->stopped = 0; + trcm->ntrc = BFA_TRC_MAX; +} + +static inline void +bfa_trc_stop(struct bfa_trc_mod_s *trcm) +{ + trcm->stopped = 1; +} + +#ifdef FWTRC +extern void dc_flush(void *data); +#else +#define dc_flush(data) +#endif + + +static inline void +__bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data) +{ + int tail = trcm->tail; + struct bfa_trc_s *trc = &trcm->trc[tail]; + + if (trcm->stopped) + return; + + trc->fileno = (u16) fileno; + trc->line = (u16) line; + trc->data.u64 = data; + trc->timestamp = BFA_TRC_TS(trcm); + dc_flush(trc); + + trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1); + if (trcm->tail == trcm->head) + trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1); + dc_flush(trcm); +} + + +static inline void +__bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data) +{ + int tail = trcm->tail; + struct bfa_trc_s *trc = &trcm->trc[tail]; + + if (trcm->stopped) + return; + + trc->fileno = (u16) fileno; + trc->line = (u16) line; + trc->data.u32.u32 = data; + trc->timestamp = BFA_TRC_TS(trcm); + dc_flush(trc); + + trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1); + if (trcm->tail == trcm->head) + trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1); + dc_flush(trcm); +} + +#ifndef BFA_PERF_BUILD +#define bfa_trc_fp(_trcp, _data) bfa_trc(_trcp, _data) +#else +#define bfa_trc_fp(_trcp, _data) +#endif + +/** + * @ BFA LOG interfaces + */ +#define bfa_assert(__cond) do { \ + if (!(__cond)) { \ + printk(KERN_ERR "assert(%s) failed at %s:%d\\n", \ + #__cond, __FILE__, __LINE__); \ + } \ +} while (0) + +#define bfa_sm_fault(__mod, __event) do { \ + bfa_trc(__mod, (((u32)0xDEAD << 16) | __event)); \ + printk(KERN_ERR "Assertion failure: %s:%d: %d", \ + __FILE__, __LINE__, (__event)); \ +} while (0) + +#ifndef BFA_PERF_BUILD +#define bfa_assert_fp(__cond) bfa_assert(__cond) +#else +#define bfa_assert_fp(__cond) +#endif + +/* BFA queue definitions */ +#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next)) +#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next) +#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev) + +/* + * bfa_q_qe_init - to initialize a queue element + */ +#define bfa_q_qe_init(_qe) { \ + bfa_q_next(_qe) = (struct list_head *) NULL; \ + bfa_q_prev(_qe) = (struct list_head *) NULL; \ +} + +/* + * bfa_q_deq - dequeue an element from head of the queue + */ +#define bfa_q_deq(_q, _qe) { \ + if (!list_empty(_q)) { \ + (*((struct list_head **) (_qe))) = bfa_q_next(_q); \ + bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \ + (struct list_head *) (_q); \ + bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe));\ + BFA_Q_DBG_INIT(*((struct list_head **) _qe)); \ + } else { \ + *((struct list_head **) (_qe)) = (struct list_head *) NULL;\ + } \ +} + +/* + * bfa_q_deq_tail - dequeue an element from tail of the queue + */ +#define bfa_q_deq_tail(_q, _qe) { \ + if (!list_empty(_q)) { \ + *((struct list_head **) (_qe)) = bfa_q_prev(_q); \ + bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) = \ + (struct list_head *) (_q); \ + bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\ + BFA_Q_DBG_INIT(*((struct list_head **) _qe)); \ + } else { \ + *((struct list_head **) (_qe)) = (struct list_head *) NULL;\ + } \ +} + +static inline int +bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe) +{ + struct list_head *tqe; + + tqe = bfa_q_next(q); + while (tqe != q) { + if (tqe == qe) + return 1; + tqe = bfa_q_next(tqe); + if (tqe == NULL) + break; + } + return 0; +} + +/* + * #ifdef BFA_DEBUG (Using bfa_assert to check for debug_build is not + * consistent across modules) + */ +#ifndef BFA_PERF_BUILD +#define BFA_Q_DBG_INIT(_qe) bfa_q_qe_init(_qe) +#else +#define BFA_Q_DBG_INIT(_qe) +#endif + +#define bfa_q_is_on_q(_q, _qe) \ + bfa_q_is_on_q_func(_q, (struct list_head *)(_qe)) + +/** + * @ BFA state machine interfaces + */ + +typedef void (*bfa_sm_t)(void *sm, int event); + +/** + * oc - object class eg. bfa_ioc + * st - state, eg. reset + * otype - object type, eg. struct bfa_ioc_s + * etype - object type, eg. enum ioc_event + */ +#define bfa_sm_state_decl(oc, st, otype, etype) \ + static void oc ## _sm_ ## st(otype * fsm, etype event) + +#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state)) +#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event))) +#define bfa_sm_get_state(_sm) ((_sm)->sm) +#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state)) + +/** + * For converting from state machine function to state encoding. + */ +struct bfa_sm_table_s { + bfa_sm_t sm; /* state machine function */ + int state; /* state machine encoding */ + char *name; /* state name for display */ +}; +#define BFA_SM(_sm) ((bfa_sm_t)(_sm)) + +/** + * State machine with entry actions. + */ +typedef void (*bfa_fsm_t)(void *fsm, int event); + +/** + * oc - object class eg. bfa_ioc + * st - state, eg. reset + * otype - object type, eg. struct bfa_ioc_s + * etype - object type, eg. enum ioc_event + */ +#define bfa_fsm_state_decl(oc, st, otype, etype) \ + static void oc ## _sm_ ## st(otype * fsm, etype event); \ + static void oc ## _sm_ ## st ## _entry(otype * fsm) + +#define bfa_fsm_set_state(_fsm, _state) do { \ + (_fsm)->fsm = (bfa_fsm_t)(_state); \ + _state ## _entry(_fsm); \ +} while (0) + +#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event))) +#define bfa_fsm_get_state(_fsm) ((_fsm)->fsm) +#define bfa_fsm_cmp_state(_fsm, _state) \ + ((_fsm)->fsm == (bfa_fsm_t)(_state)) + +static inline int +bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm) +{ + int i = 0; + + while (smt[i].sm && smt[i].sm != sm) + i++; + return smt[i].state; +} + +/** + * @ Generic wait counter. + */ + +typedef void (*bfa_wc_resume_t) (void *cbarg); + +struct bfa_wc_s { + bfa_wc_resume_t wc_resume; + void *wc_cbarg; + int wc_count; +}; + +static inline void +bfa_wc_up(struct bfa_wc_s *wc) +{ + wc->wc_count++; +} + +static inline void +bfa_wc_down(struct bfa_wc_s *wc) +{ + wc->wc_count--; + if (wc->wc_count == 0) + wc->wc_resume(wc->wc_cbarg); +} + +/** + * Initialize a waiting counter. + */ +static inline void +bfa_wc_init(struct bfa_wc_s *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg) +{ + wc->wc_resume = wc_resume; + wc->wc_cbarg = wc_cbarg; + wc->wc_count = 0; + bfa_wc_up(wc); +} + +/** + * Wait for counter to reach zero + */ +static inline void +bfa_wc_wait(struct bfa_wc_s *wc) +{ + bfa_wc_down(wc); +} + +#endif /* __BFA_CS_H__ */ diff --git a/drivers/scsi/bfa/bfa_csdebug.c b/drivers/scsi/bfa/bfa_csdebug.c deleted file mode 100644 index caeb1143a4e6..000000000000 --- a/drivers/scsi/bfa/bfa_csdebug.c +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#include -#include -#include -#include - -/** - * cs_debug_api - */ - - -void -bfa_panic(int line, char *file, char *panicstr) -{ - bfa_log(NULL, BFA_LOG_HAL_ASSERT, file, line, panicstr); - bfa_os_panic(); -} - -void -bfa_sm_panic(struct bfa_log_mod_s *logm, int line, char *file, int event) -{ - bfa_log(logm, BFA_LOG_HAL_SM_ASSERT, file, line, event); - bfa_os_panic(); -} - -int -bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe) -{ - struct list_head *tqe; - - tqe = bfa_q_next(q); - while (tqe != q) { - if (tqe == qe) - return 1; - tqe = bfa_q_next(tqe); - if (tqe == NULL) - break; - } - return 0; -} - - diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h new file mode 100644 index 000000000000..d49877ff5140 --- /dev/null +++ b/drivers/scsi/bfa/bfa_defs.h @@ -0,0 +1,466 @@ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + * + * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef __BFA_DEFS_H__ +#define __BFA_DEFS_H__ + +#include "bfa_fc.h" +#include "bfa_os_inc.h" + +#define BFA_MFG_SERIALNUM_SIZE 11 +#define STRSZ(_n) (((_n) + 4) & ~3) + +/** + * Manufacturing card type + */ +enum { + BFA_MFG_TYPE_CB_MAX = 825, /* Crossbow card type max */ + BFA_MFG_TYPE_FC8P2 = 825, /* 8G 2port FC card */ + BFA_MFG_TYPE_FC8P1 = 815, /* 8G 1port FC card */ + BFA_MFG_TYPE_FC4P2 = 425, /* 4G 2port FC card */ + BFA_MFG_TYPE_FC4P1 = 415, /* 4G 1port FC card */ + BFA_MFG_TYPE_CNA10P2 = 1020, /* 10G 2port CNA card */ + BFA_MFG_TYPE_CNA10P1 = 1010, /* 10G 1port CNA card */ + BFA_MFG_TYPE_JAYHAWK = 804, /* Jayhawk mezz card */ + BFA_MFG_TYPE_WANCHESE = 1007, /* Wanchese mezz card */ + BFA_MFG_TYPE_ASTRA = 807, /* Astra mezz card */ + BFA_MFG_TYPE_LIGHTNING_P0 = 902, /* Lightning mezz card - old */ + BFA_MFG_TYPE_LIGHTNING = 1741, /* Lightning mezz card */ + BFA_MFG_TYPE_INVALID = 0, /* Invalid card type */ +}; + +#pragma pack(1) + +/** + * Check if Mezz card + */ +#define bfa_mfg_is_mezz(type) (( \ + (type) == BFA_MFG_TYPE_JAYHAWK || \ + (type) == BFA_MFG_TYPE_WANCHESE || \ + (type) == BFA_MFG_TYPE_ASTRA || \ + (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \ + (type) == BFA_MFG_TYPE_LIGHTNING)) + +/** + * Check if the card having old wwn/mac handling + */ +#define bfa_mfg_is_old_wwn_mac_model(type) (( \ + (type) == BFA_MFG_TYPE_FC8P2 || \ + (type) == BFA_MFG_TYPE_FC8P1 || \ + (type) == BFA_MFG_TYPE_FC4P2 || \ + (type) == BFA_MFG_TYPE_FC4P1 || \ + (type) == BFA_MFG_TYPE_CNA10P2 || \ + (type) == BFA_MFG_TYPE_CNA10P1 || \ + (type) == BFA_MFG_TYPE_JAYHAWK || \ + (type) == BFA_MFG_TYPE_WANCHESE)) + +#define bfa_mfg_increment_wwn_mac(m, i) \ +do { \ + u32 t = ((u32)(m)[0] << 16) | ((u32)(m)[1] << 8) | \ + (u32)(m)[2]; \ + t += (i); \ + (m)[0] = (t >> 16) & 0xFF; \ + (m)[1] = (t >> 8) & 0xFF; \ + (m)[2] = t & 0xFF; \ +} while (0) + +/** + * VPD data length + */ +#define BFA_MFG_VPD_LEN 512 + +/** + * VPD vendor tag + */ +enum { + BFA_MFG_VPD_UNKNOWN = 0, /* vendor unknown */ + BFA_MFG_VPD_IBM = 1, /* vendor IBM */ + BFA_MFG_VPD_HP = 2, /* vendor HP */ + BFA_MFG_VPD_DELL = 3, /* vendor DELL */ + BFA_MFG_VPD_PCI_IBM = 0x08, /* PCI VPD IBM */ + BFA_MFG_VPD_PCI_HP = 0x10, /* PCI VPD HP */ + BFA_MFG_VPD_PCI_DELL = 0x20, /* PCI VPD DELL */ + BFA_MFG_VPD_PCI_BRCD = 0xf8, /* PCI VPD Brocade */ +}; + +/** + * All numerical fields are in big-endian format. + */ +struct bfa_mfg_vpd_s { + u8 version; /* vpd data version */ + u8 vpd_sig[3]; /* characters 'V', 'P', 'D' */ + u8 chksum; /* u8 checksum */ + u8 vendor; /* vendor */ + u8 len; /* vpd data length excluding header */ + u8 rsv; + u8 data[BFA_MFG_VPD_LEN]; /* vpd data */ +}; + +#pragma pack() + +/** + * Status return values + */ +enum bfa_status { + BFA_STATUS_OK = 0, /* Success */ + BFA_STATUS_FAILED = 1, /* Operation failed */ + BFA_STATUS_EINVAL = 2, /* Invalid params Check input + * parameters */ + BFA_STATUS_ENOMEM = 3, /* Out of resources */ + BFA_STATUS_ETIMER = 5, /* Timer expired - Retry, if persists, + * contact support */ + BFA_STATUS_EPROTOCOL = 6, /* Protocol error */ + BFA_STATUS_DEVBUSY = 13, /* Device busy - Retry operation */ + BFA_STATUS_UNKNOWN_LWWN = 18, /* LPORT PWWN not found */ + BFA_STATUS_UNKNOWN_RWWN = 19, /* RPORT PWWN not found */ + BFA_STATUS_VPORT_EXISTS = 21, /* VPORT already exists */ + BFA_STATUS_VPORT_MAX = 22, /* Reached max VPORT supported limit */ + BFA_STATUS_UNSUPP_SPEED = 23, /* Invalid Speed Check speed setting */ + BFA_STATUS_INVLD_DFSZ = 24, /* Invalid Max data field size */ + BFA_STATUS_FABRIC_RJT = 29, /* Reject from attached fabric */ + BFA_STATUS_VPORT_WWN_BP = 46, /* WWN is same as base port's WWN */ + BFA_STATUS_NO_FCPIM_NEXUS = 52, /* No FCP Nexus exists with the rport */ + BFA_STATUS_IOC_FAILURE = 56, /* IOC failure - Retry, if persists + * contact support */ + BFA_STATUS_INVALID_WWN = 57, /* Invalid WWN */ + BFA_STATUS_DIAG_BUSY = 71, /* diag busy */ + BFA_STATUS_ENOFSAVE = 78, /* No saved firmware trace */ + BFA_STATUS_IOC_DISABLED = 82, /* IOC is already disabled */ + BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */ + BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot + * configuration */ + BFA_STATUS_TRUNK_ENABLED = 164, /* Trunk is already enabled on + * this adapter */ + BFA_STATUS_TRUNK_DISABLED = 165, /* Trunking is disabled on + * the adapter */ + BFA_STATUS_IOPROFILE_OFF = 175, /* IO profile OFF */ + BFA_STATUS_MAX_VAL /* Unknown error code */ +}; +#define bfa_status_t enum bfa_status + +enum bfa_eproto_status { + BFA_EPROTO_BAD_ACCEPT = 0, + BFA_EPROTO_UNKNOWN_RSP = 1 +}; +#define bfa_eproto_status_t enum bfa_eproto_status + +enum bfa_boolean { + BFA_FALSE = 0, + BFA_TRUE = 1 +}; +#define bfa_boolean_t enum bfa_boolean + +#define BFA_STRING_32 32 +#define BFA_VERSION_LEN 64 + +/** + * ---------------------- adapter definitions ------------ + */ + +/** + * BFA adapter level attributes. + */ +enum { + BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE), + /* + *!< adapter serial num length + */ + BFA_ADAPTER_MODEL_NAME_LEN = 16, /* model name length */ + BFA_ADAPTER_MODEL_DESCR_LEN = 128, /* model description length */ + BFA_ADAPTER_MFG_NAME_LEN = 8, /* manufacturer name length */ + BFA_ADAPTER_SYM_NAME_LEN = 64, /* adapter symbolic name length */ + BFA_ADAPTER_OS_TYPE_LEN = 64, /* adapter os type length */ +}; + +struct bfa_adapter_attr_s { + char manufacturer[BFA_ADAPTER_MFG_NAME_LEN]; + char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; + u32 card_type; + char model[BFA_ADAPTER_MODEL_NAME_LEN]; + char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN]; + wwn_t pwwn; + char node_symname[FC_SYMNAME_MAX]; + char hw_ver[BFA_VERSION_LEN]; + char fw_ver[BFA_VERSION_LEN]; + char optrom_ver[BFA_VERSION_LEN]; + char os_type[BFA_ADAPTER_OS_TYPE_LEN]; + struct bfa_mfg_vpd_s vpd; + struct mac_s mac; + + u8 nports; + u8 max_speed; + u8 prototype; + char asic_rev; + + u8 pcie_gen; + u8 pcie_lanes_orig; + u8 pcie_lanes; + u8 cna_capable; + + u8 is_mezz; + u8 trunk_capable; +}; + +/** + * ---------------------- IOC definitions ------------ + */ + +enum { + BFA_IOC_DRIVER_LEN = 16, + BFA_IOC_CHIP_REV_LEN = 8, +}; + +/** + * Driver and firmware versions. + */ +struct bfa_ioc_driver_attr_s { + char driver[BFA_IOC_DRIVER_LEN]; /* driver name */ + char driver_ver[BFA_VERSION_LEN]; /* driver version */ + char fw_ver[BFA_VERSION_LEN]; /* firmware version */ + char bios_ver[BFA_VERSION_LEN]; /* bios version */ + char efi_ver[BFA_VERSION_LEN]; /* EFI version */ + char ob_ver[BFA_VERSION_LEN]; /* openboot version */ +}; + +/** + * IOC PCI device attributes + */ +struct bfa_ioc_pci_attr_s { + u16 vendor_id; /* PCI vendor ID */ + u16 device_id; /* PCI device ID */ + u16 ssid; /* subsystem ID */ + u16 ssvid; /* subsystem vendor ID */ + u32 pcifn; /* PCI device function */ + u32 rsvd; /* padding */ + char chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */ +}; + +/** + * IOC states + */ +enum bfa_ioc_state { + BFA_IOC_UNINIT = 1, /* IOC is in uninit state */ + BFA_IOC_RESET = 2, /* IOC is in reset state */ + BFA_IOC_SEMWAIT = 3, /* Waiting for IOC h/w semaphore */ + BFA_IOC_HWINIT = 4, /* IOC h/w is being initialized */ + BFA_IOC_GETATTR = 5, /* IOC is being configured */ + BFA_IOC_OPERATIONAL = 6, /* IOC is operational */ + BFA_IOC_INITFAIL = 7, /* IOC hardware failure */ + BFA_IOC_FAIL = 8, /* IOC heart-beat failure */ + BFA_IOC_DISABLING = 9, /* IOC is being disabled */ + BFA_IOC_DISABLED = 10, /* IOC is disabled */ + BFA_IOC_FWMISMATCH = 11, /* IOC f/w different from drivers */ + BFA_IOC_ENABLING = 12, /* IOC is being enabled */ +}; + +/** + * IOC firmware stats + */ +struct bfa_fw_ioc_stats_s { + u32 enable_reqs; + u32 disable_reqs; + u32 get_attr_reqs; + u32 dbg_sync; + u32 dbg_dump; + u32 unknown_reqs; +}; + +/** + * IOC driver stats + */ +struct bfa_ioc_drv_stats_s { + u32 ioc_isrs; + u32 ioc_enables; + u32 ioc_disables; + u32 ioc_hbfails; + u32 ioc_boots; + u32 stats_tmos; + u32 hb_count; + u32 disable_reqs; + u32 enable_reqs; + u32 disable_replies; + u32 enable_replies; +}; + +/** + * IOC statistics + */ +struct bfa_ioc_stats_s { + struct bfa_ioc_drv_stats_s drv_stats; /* driver IOC stats */ + struct bfa_fw_ioc_stats_s fw_stats; /* firmware IOC stats */ +}; + +enum bfa_ioc_type_e { + BFA_IOC_TYPE_FC = 1, + BFA_IOC_TYPE_FCoE = 2, + BFA_IOC_TYPE_LL = 3, +}; + +/** + * IOC attributes returned in queries + */ +struct bfa_ioc_attr_s { + enum bfa_ioc_type_e ioc_type; + enum bfa_ioc_state state; /* IOC state */ + struct bfa_adapter_attr_s adapter_attr; /* HBA attributes */ + struct bfa_ioc_driver_attr_s driver_attr; /* driver attr */ + struct bfa_ioc_pci_attr_s pci_attr; + u8 port_id; /* port number */ + u8 rsvd[7]; /* 64bit align */ +}; + +/** + * ---------------------- mfg definitions ------------ + */ + +/** + * Checksum size + */ +#define BFA_MFG_CHKSUM_SIZE 16 + +#define BFA_MFG_PARTNUM_SIZE 14 +#define BFA_MFG_SUPPLIER_ID_SIZE 10 +#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20 +#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20 +#define BFA_MFG_SUPPLIER_REVISION_SIZE 4 + +#pragma pack(1) + +/** + * All numerical fields are in big-endian format. + */ +struct bfa_mfg_block_s { + u8 version; /* manufacturing block version */ + u8 mfg_sig[3]; /* characters 'M', 'F', 'G' */ + u16 mfgsize; /* mfg block size */ + u16 u16_chksum; /* old u16 checksum */ + char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)]; + char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)]; + u8 mfg_day; /* manufacturing day */ + u8 mfg_month; /* manufacturing month */ + u16 mfg_year; /* manufacturing year */ + wwn_t mfg_wwn; /* wwn base for this adapter */ + u8 num_wwn; /* number of wwns assigned */ + u8 mfg_speeds; /* speeds allowed for this adapter */ + u8 rsv[2]; + char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)]; + char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)]; + char + supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)]; + char + supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)]; + mac_t mfg_mac; /* mac address */ + u8 num_mac; /* number of mac addresses */ + u8 rsv2; + u32 mfg_type; /* card type */ + u8 rsv3[108]; + u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /* md5 checksum */ +}; + +#pragma pack() + +/** + * ---------------------- pci definitions ------------ + */ + +/** + * PCI device and vendor ID information + */ +enum { + BFA_PCI_VENDOR_ID_BROCADE = 0x1657, + BFA_PCI_DEVICE_ID_FC_8G2P = 0x13, + BFA_PCI_DEVICE_ID_FC_8G1P = 0x17, + BFA_PCI_DEVICE_ID_CT = 0x14, + BFA_PCI_DEVICE_ID_CT_FC = 0x21, +}; + +#define bfa_asic_id_ct(devid) \ + ((devid) == BFA_PCI_DEVICE_ID_CT || \ + (devid) == BFA_PCI_DEVICE_ID_CT_FC) + +/** + * PCI sub-system device and vendor ID information + */ +enum { + BFA_PCI_FCOE_SSDEVICE_ID = 0x14, +}; + +/** + * Maximum number of device address ranges mapped through different BAR(s) + */ +#define BFA_PCI_ACCESS_RANGES 1 + +/* + * Port speed settings. Each specific speed is a bit field. Use multiple + * bits to specify speeds to be selected for auto-negotiation. + */ +enum bfa_port_speed { + BFA_PORT_SPEED_UNKNOWN = 0, + BFA_PORT_SPEED_1GBPS = 1, + BFA_PORT_SPEED_2GBPS = 2, + BFA_PORT_SPEED_4GBPS = 4, + BFA_PORT_SPEED_8GBPS = 8, + BFA_PORT_SPEED_10GBPS = 10, + BFA_PORT_SPEED_16GBPS = 16, + BFA_PORT_SPEED_AUTO = + (BFA_PORT_SPEED_1GBPS | BFA_PORT_SPEED_2GBPS | + BFA_PORT_SPEED_4GBPS | BFA_PORT_SPEED_8GBPS), +}; +#define bfa_port_speed_t enum bfa_port_speed + +enum { + BFA_BOOT_BOOTLUN_MAX = 4, /* maximum boot lun per IOC */ + BFA_PREBOOT_BOOTLUN_MAX = 8, /* maximum preboot lun per IOC */ +}; + +#define BOOT_CFG_REV1 1 +#define BOOT_CFG_VLAN 1 + +/** + * Boot options setting. Boot options setting determines from where + * to get the boot lun information + */ +enum bfa_boot_bootopt { + BFA_BOOT_AUTO_DISCOVER = 0, /* Boot from blun provided by fabric */ + BFA_BOOT_STORED_BLUN = 1, /* Boot from bluns stored in flash */ + BFA_BOOT_FIRST_LUN = 2, /* Boot from first discovered blun */ + BFA_BOOT_PBC = 3, /* Boot from pbc configured blun */ +}; + +#pragma pack(1) +/** + * Boot lun information. + */ +struct bfa_boot_bootlun_s { + wwn_t pwwn; /* port wwn of target */ + lun_t lun; /* 64-bit lun */ +}; +#pragma pack() + +/** + * BOOT boot configuraton + */ +struct bfa_boot_pbc_s { + u8 enable; /* enable/disable SAN boot */ + u8 speed; /* boot speed settings */ + u8 topology; /* boot topology setting */ + u8 rsvd1; + u32 nbluns; /* number of boot luns */ + struct bfa_boot_bootlun_s pblun[BFA_PREBOOT_BOOTLUN_MAX]; +}; + +#endif /* __BFA_DEFS_H__ */ diff --git a/drivers/scsi/bfa/bfa_defs_fcs.h b/drivers/scsi/bfa/bfa_defs_fcs.h new file mode 100644 index 000000000000..96905d301828 --- /dev/null +++ b/drivers/scsi/bfa/bfa_defs_fcs.h @@ -0,0 +1,457 @@ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + * + * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef __BFA_DEFS_FCS_H__ +#define __BFA_DEFS_FCS_H__ + +#include "bfa_fc.h" +#include "bfa_defs_svc.h" + +/** + * VF states + */ +enum bfa_vf_state { + BFA_VF_UNINIT = 0, /* fabric is not yet initialized */ + BFA_VF_LINK_DOWN = 1, /* link is down */ + BFA_VF_FLOGI = 2, /* flogi is in progress */ + BFA_VF_AUTH = 3, /* authentication in progress */ + BFA_VF_NOFABRIC = 4, /* fabric is not present */ + BFA_VF_ONLINE = 5, /* login to fabric is complete */ + BFA_VF_EVFP = 6, /* EVFP is in progress */ + BFA_VF_ISOLATED = 7, /* port isolated due to vf_id mismatch */ +}; + +/** + * VF statistics + */ +struct bfa_vf_stats_s { + u32 flogi_sent; /* Num FLOGIs sent */ + u32 flogi_rsp_err; /* FLOGI response errors */ + u32 flogi_acc_err; /* FLOGI accept errors */ + u32 flogi_accepts; /* FLOGI accepts received */ + u32 flogi_rejects; /* FLOGI rejects received */ + u32 flogi_unknown_rsp; /* Unknown responses for FLOGI */ + u32 flogi_alloc_wait; /* Allocation waits prior to sending FLOGI */ + u32 flogi_rcvd; /* FLOGIs received */ + u32 flogi_rejected; /* Incoming FLOGIs rejected */ + u32 fabric_onlines; /* Internal fabric online notification sent + * to other modules */ + u32 fabric_offlines; /* Internal fabric offline notification sent + * to other modules */ + u32 resvd; /* padding for 64 bit alignment */ +}; + +/** + * VF attributes returned in queries + */ +struct bfa_vf_attr_s { + enum bfa_vf_state state; /* VF state */ + u32 rsvd; + wwn_t fabric_name; /* fabric name */ +}; + +#define BFA_FCS_MAX_LPORTS 256 +#define BFA_FCS_FABRIC_IPADDR_SZ 16 + +/** + * symbolic names for base port/virtual port + */ +#define BFA_SYMNAME_MAXLEN 128 /* 128 bytes */ +struct bfa_lport_symname_s { + char symname[BFA_SYMNAME_MAXLEN]; +}; + +/** +* Roles of FCS port: + * - FCP IM and FCP TM roles cannot be enabled together for a FCS port + * - Create multiple ports if both IM and TM functions required. + * - Atleast one role must be specified. + */ +enum bfa_lport_role { + BFA_LPORT_ROLE_FCP_IM = 0x01, /* FCP initiator role */ + BFA_LPORT_ROLE_FCP_MAX = BFA_LPORT_ROLE_FCP_IM, +}; + +/** + * FCS port configuration. + */ +struct bfa_lport_cfg_s { + wwn_t pwwn; /* port wwn */ + wwn_t nwwn; /* node wwn */ + struct bfa_lport_symname_s sym_name; /* vm port symbolic name */ + bfa_boolean_t preboot_vp; /* vport created from PBC */ + enum bfa_lport_role roles; /* FCS port roles */ + u8 tag[16]; /* opaque tag from application */ +}; + +/** + * FCS port states + */ +enum bfa_lport_state { + BFA_LPORT_UNINIT = 0, /* PORT is not yet initialized */ + BFA_LPORT_FDISC = 1, /* FDISC is in progress */ + BFA_LPORT_ONLINE = 2, /* login to fabric is complete */ + BFA_LPORT_OFFLINE = 3, /* No login to fabric */ +}; + +/** + * FCS port type. + */ +enum bfa_lport_type { + BFA_LPORT_TYPE_PHYSICAL = 0, + BFA_LPORT_TYPE_VIRTUAL, +}; + +/** + * FCS port offline reason. + */ +enum bfa_lport_offline_reason { + BFA_LPORT_OFFLINE_UNKNOWN = 0, + BFA_LPORT_OFFLINE_LINKDOWN, + BFA_LPORT_OFFLINE_FAB_UNSUPPORTED, /* NPIV not supported by the + * fabric */ + BFA_LPORT_OFFLINE_FAB_NORESOURCES, + BFA_LPORT_OFFLINE_FAB_LOGOUT, +}; + +/** + * FCS lport info. + */ +struct bfa_lport_info_s { + u8 port_type; /* bfa_lport_type_t : physical or + * virtual */ + u8 port_state; /* one of bfa_lport_state values */ + u8 offline_reason; /* one of bfa_lport_offline_reason_t + * values */ + wwn_t port_wwn; + wwn_t node_wwn; + + /* + * following 4 feilds are valid for Physical Ports only + */ + u32 max_vports_supp; /* Max supported vports */ + u32 num_vports_inuse; /* Num of in use vports */ + u32 max_rports_supp; /* Max supported rports */ + u32 num_rports_inuse; /* Num of doscovered rports */ + +}; + +/** + * FCS port statistics + */ +struct bfa_lport_stats_s { + u32 ns_plogi_sent; + u32 ns_plogi_rsp_err; + u32 ns_plogi_acc_err; + u32 ns_plogi_accepts; + u32 ns_rejects; /* NS command rejects */ + u32 ns_plogi_unknown_rsp; + u32 ns_plogi_alloc_wait; + + u32 ns_retries; /* NS command retries */ + u32 ns_timeouts; /* NS command timeouts */ + + u32 ns_rspnid_sent; + u32 ns_rspnid_accepts; + u32 ns_rspnid_rsp_err; + u32 ns_rspnid_rejects; + u32 ns_rspnid_alloc_wait; + + u32 ns_rftid_sent; + u32 ns_rftid_accepts; + u32 ns_rftid_rsp_err; + u32 ns_rftid_rejects; + u32 ns_rftid_alloc_wait; + + u32 ns_rffid_sent; + u32 ns_rffid_accepts; + u32 ns_rffid_rsp_err; + u32 ns_rffid_rejects; + u32 ns_rffid_alloc_wait; + + u32 ns_gidft_sent; + u32 ns_gidft_accepts; + u32 ns_gidft_rsp_err; + u32 ns_gidft_rejects; + u32 ns_gidft_unknown_rsp; + u32 ns_gidft_alloc_wait; + + /* + * Mgmt Server stats + */ + u32 ms_retries; /* MS command retries */ + u32 ms_timeouts; /* MS command timeouts */ + u32 ms_plogi_sent; + u32 ms_plogi_rsp_err; + u32 ms_plogi_acc_err; + u32 ms_plogi_accepts; + u32 ms_rejects; /* MS command rejects */ + u32 ms_plogi_unknown_rsp; + u32 ms_plogi_alloc_wait; + + u32 num_rscn; /* Num of RSCN received */ + u32 num_portid_rscn;/* Num portid format RSCN + * received */ + + u32 uf_recvs; /* Unsolicited recv frames */ + u32 uf_recv_drops; /* Dropped received frames */ + + u32 plogi_rcvd; /* Received plogi */ + u32 prli_rcvd; /* Received prli */ + u32 adisc_rcvd; /* Received adisc */ + u32 prlo_rcvd; /* Received prlo */ + u32 logo_rcvd; /* Received logo */ + u32 rpsc_rcvd; /* Received rpsc */ + u32 un_handled_els_rcvd; /* Received unhandled ELS */ + u32 rport_plogi_timeouts; /* Rport plogi retry timeout count */ + u32 rport_del_max_plogi_retry; /* Deleted rport + * (max retry of plogi) */ +}; + +/** + * BFA port attribute returned in queries + */ +struct bfa_lport_attr_s { + enum bfa_lport_state state; /* port state */ + u32 pid; /* port ID */ + struct bfa_lport_cfg_s port_cfg; /* port configuration */ + enum bfa_port_type port_type; /* current topology */ + u32 loopback; /* cable is externally looped back */ + wwn_t fabric_name; /* attached switch's nwwn */ + u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ]; /* attached + * fabric's ip addr */ + mac_t fpma_mac; /* Lport's FPMA Mac address */ + u16 authfail; /* auth failed state */ +}; + + +/** + * VPORT states + */ +enum bfa_vport_state { + BFA_FCS_VPORT_UNINIT = 0, + BFA_FCS_VPORT_CREATED = 1, + BFA_FCS_VPORT_OFFLINE = 1, + BFA_FCS_VPORT_FDISC_SEND = 2, + BFA_FCS_VPORT_FDISC = 3, + BFA_FCS_VPORT_FDISC_RETRY = 4, + BFA_FCS_VPORT_ONLINE = 5, + BFA_FCS_VPORT_DELETING = 6, + BFA_FCS_VPORT_CLEANUP = 6, + BFA_FCS_VPORT_LOGO_SEND = 7, + BFA_FCS_VPORT_LOGO = 8, + BFA_FCS_VPORT_ERROR = 9, + BFA_FCS_VPORT_MAX_STATE, +}; + +/** + * vport statistics + */ +struct bfa_vport_stats_s { + struct bfa_lport_stats_s port_stats; /* base class (port) stats */ + /* + * TODO - remove + */ + + u32 fdisc_sent; /* num fdisc sent */ + u32 fdisc_accepts; /* fdisc accepts */ + u32 fdisc_retries; /* fdisc retries */ + u32 fdisc_timeouts; /* fdisc timeouts */ + u32 fdisc_rsp_err; /* fdisc response error */ + u32 fdisc_acc_bad; /* bad fdisc accepts */ + u32 fdisc_rejects; /* fdisc rejects */ + u32 fdisc_unknown_rsp; + /* + *!< fdisc rsp unknown error + */ + u32 fdisc_alloc_wait;/* fdisc req (fcxp)alloc wait */ + + u32 logo_alloc_wait;/* logo req (fcxp) alloc wait */ + u32 logo_sent; /* logo sent */ + u32 logo_accepts; /* logo accepts */ + u32 logo_rejects; /* logo rejects */ + u32 logo_rsp_err; /* logo rsp errors */ + u32 logo_unknown_rsp; + /* logo rsp unknown errors */ + + u32 fab_no_npiv; /* fabric does not support npiv */ + + u32 fab_offline; /* offline events from fab SM */ + u32 fab_online; /* online events from fab SM */ + u32 fab_cleanup; /* cleanup request from fab SM */ + u32 rsvd; +}; + +/** + * BFA vport attribute returned in queries + */ +struct bfa_vport_attr_s { + struct bfa_lport_attr_s port_attr; /* base class (port) attributes */ + enum bfa_vport_state vport_state; /* vport state */ + u32 rsvd; +}; + +/** + * FCS remote port states + */ +enum bfa_rport_state { + BFA_RPORT_UNINIT = 0, /* PORT is not yet initialized */ + BFA_RPORT_OFFLINE = 1, /* rport is offline */ + BFA_RPORT_PLOGI = 2, /* PLOGI to rport is in progress */ + BFA_RPORT_ONLINE = 3, /* login to rport is complete */ + BFA_RPORT_PLOGI_RETRY = 4, /* retrying login to rport */ + BFA_RPORT_NSQUERY = 5, /* nameserver query */ + BFA_RPORT_ADISC = 6, /* ADISC authentication */ + BFA_RPORT_LOGO = 7, /* logging out with rport */ + BFA_RPORT_LOGORCV = 8, /* handling LOGO from rport */ + BFA_RPORT_NSDISC = 9, /* re-discover rport */ +}; + +/** + * Rport Scsi Function : Initiator/Target. + */ +enum bfa_rport_function { + BFA_RPORT_INITIATOR = 0x01, /* SCSI Initiator */ + BFA_RPORT_TARGET = 0x02, /* SCSI Target */ +}; + +/** + * port/node symbolic names for rport + */ +#define BFA_RPORT_SYMNAME_MAXLEN 255 +struct bfa_rport_symname_s { + char symname[BFA_RPORT_SYMNAME_MAXLEN]; +}; + +/** + * FCS remote port statistics + */ +struct bfa_rport_stats_s { + u32 offlines; /* remote port offline count */ + u32 onlines; /* remote port online count */ + u32 rscns; /* RSCN affecting rport */ + u32 plogis; /* plogis sent */ + u32 plogi_accs; /* plogi accepts */ + u32 plogi_timeouts; /* plogi timeouts */ + u32 plogi_rejects; /* rcvd plogi rejects */ + u32 plogi_failed; /* local failure */ + u32 plogi_rcvd; /* plogis rcvd */ + u32 prli_rcvd; /* inbound PRLIs */ + u32 adisc_rcvd; /* ADISCs received */ + u32 adisc_rejects; /* recvd ADISC rejects */ + u32 adisc_sent; /* ADISC requests sent */ + u32 adisc_accs; /* ADISC accepted by rport */ + u32 adisc_failed; /* ADISC failed (no response) */ + u32 adisc_rejected; /* ADISC rejected by us */ + u32 logos; /* logos sent */ + u32 logo_accs; /* LOGO accepts from rport */ + u32 logo_failed; /* LOGO failures */ + u32 logo_rejected; /* LOGO rejects from rport */ + u32 logo_rcvd; /* LOGO from remote port */ + + u32 rpsc_rcvd; /* RPSC received */ + u32 rpsc_rejects; /* recvd RPSC rejects */ + u32 rpsc_sent; /* RPSC requests sent */ + u32 rpsc_accs; /* RPSC accepted by rport */ + u32 rpsc_failed; /* RPSC failed (no response) */ + u32 rpsc_rejected; /* RPSC rejected by us */ + + u32 rjt_insuff_res; /* LS RJT with insuff resources */ + struct bfa_rport_hal_stats_s hal_stats; /* BFA rport stats */ +}; + +/** + * FCS remote port attributes returned in queries + */ +struct bfa_rport_attr_s { + wwn_t nwwn; /* node wwn */ + wwn_t pwwn; /* port wwn */ + enum fc_cos cos_supported; /* supported class of services */ + u32 pid; /* port ID */ + u32 df_sz; /* Max payload size */ + enum bfa_rport_state state; /* Rport State machine state */ + enum fc_cos fc_cos; /* FC classes of services */ + bfa_boolean_t cisc; /* CISC capable device */ + struct bfa_rport_symname_s symname; /* Symbolic Name */ + enum bfa_rport_function scsi_function; /* Initiator/Target */ + struct bfa_rport_qos_attr_s qos_attr; /* qos attributes */ + enum bfa_port_speed curr_speed; /* operating speed got from + * RPSC ELS. UNKNOWN, if RPSC + * is not supported */ + bfa_boolean_t trl_enforced; /* TRL enforced ? TRUE/FALSE */ + enum bfa_port_speed assigned_speed; /* Speed assigned by the user. + * will be used if RPSC is not + * supported by the rport */ +}; + +struct bfa_rport_remote_link_stats_s { + u32 lfc; /* Link Failure Count */ + u32 lsyc; /* Loss of Synchronization Count */ + u32 lsic; /* Loss of Signal Count */ + u32 pspec; /* Primitive Sequence Protocol Error Count */ + u32 itwc; /* Invalid Transmission Word Count */ + u32 icc; /* Invalid CRC Count */ +}; + + +#define BFA_MAX_IO_INDEX 7 +#define BFA_NO_IO_INDEX 9 + +/** + * FCS itnim states + */ +enum bfa_itnim_state { + BFA_ITNIM_OFFLINE = 0, /* offline */ + BFA_ITNIM_PRLI_SEND = 1, /* prli send */ + BFA_ITNIM_PRLI_SENT = 2, /* prli sent */ + BFA_ITNIM_PRLI_RETRY = 3, /* prli retry */ + BFA_ITNIM_HCB_ONLINE = 4, /* online callback */ + BFA_ITNIM_ONLINE = 5, /* online */ + BFA_ITNIM_HCB_OFFLINE = 6, /* offline callback */ + BFA_ITNIM_INITIATIOR = 7, /* initiator */ +}; + +/** + * FCS remote port statistics + */ +struct bfa_itnim_stats_s { + u32 onlines; /* num rport online */ + u32 offlines; /* num rport offline */ + u32 prli_sent; /* num prli sent out */ + u32 fcxp_alloc_wait;/* num fcxp alloc waits */ + u32 prli_rsp_err; /* num prli rsp errors */ + u32 prli_rsp_acc; /* num prli rsp accepts */ + u32 initiator; /* rport is an initiator */ + u32 prli_rsp_parse_err; /* prli rsp parsing errors */ + u32 prli_rsp_rjt; /* num prli rsp rejects */ + u32 timeout; /* num timeouts detected */ + u32 sler; /* num sler notification from BFA */ + u32 rsvd; /* padding for 64 bit alignment */ +}; + +/** + * FCS itnim attributes returned in queries + */ +struct bfa_itnim_attr_s { + enum bfa_itnim_state state; /* FCS itnim state */ + u8 retry; /* data retransmision support */ + u8 task_retry_id; /* task retry ident support */ + u8 rec_support; /* REC supported */ + u8 conf_comp; /* confirmed completion supp */ +}; + +#endif /* __BFA_DEFS_FCS_H__ */ diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h new file mode 100644 index 000000000000..56226fcf9470 --- /dev/null +++ b/drivers/scsi/bfa/bfa_defs_svc.h @@ -0,0 +1,1081 @@ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + * + * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef __BFA_DEFS_SVC_H__ +#define __BFA_DEFS_SVC_H__ + +#include "bfa_defs.h" +#include "bfa_fc.h" +#include "bfi.h" + +#define BFA_IOCFC_INTR_DELAY 1125 +#define BFA_IOCFC_INTR_LATENCY 225 +#define BFA_IOCFCOE_INTR_DELAY 25 +#define BFA_IOCFCOE_INTR_LATENCY 5 + +/** + * Interrupt coalescing configuration. + */ +#pragma pack(1) +struct bfa_iocfc_intr_attr_s { + u8 coalesce; /* enable/disable coalescing */ + u8 rsvd[3]; + u16 latency; /* latency in microseconds */ + u16 delay; /* delay in microseconds */ +}; + +/** + * IOC firmware configuraton + */ +struct bfa_iocfc_fwcfg_s { + u16 num_fabrics; /* number of fabrics */ + u16 num_lports; /* number of local lports */ + u16 num_rports; /* number of remote ports */ + u16 num_ioim_reqs; /* number of IO reqs */ + u16 num_tskim_reqs; /* task management requests */ + u16 num_iotm_reqs; /* number of TM IO reqs */ + u16 num_tsktm_reqs; /* TM task management requests*/ + u16 num_fcxp_reqs; /* unassisted FC exchanges */ + u16 num_uf_bufs; /* unsolicited recv buffers */ + u8 num_cqs; + u8 fw_tick_res; /* FW clock resolution in ms */ + u8 rsvd[4]; +}; +#pragma pack() + +struct bfa_iocfc_drvcfg_s { + u16 num_reqq_elems; /* number of req queue elements */ + u16 num_rspq_elems; /* number of rsp queue elements */ + u16 num_sgpgs; /* number of total SG pages */ + u16 num_sboot_tgts; /* number of SAN boot targets */ + u16 num_sboot_luns; /* number of SAN boot luns */ + u16 ioc_recover; /* IOC recovery mode */ + u16 min_cfg; /* minimum configuration */ + u16 path_tov; /* device path timeout */ + bfa_boolean_t delay_comp; /* delay completion of + failed inflight IOs */ + u32 rsvd; +}; + +/** + * IOC configuration + */ +struct bfa_iocfc_cfg_s { + struct bfa_iocfc_fwcfg_s fwcfg; /* firmware side config */ + struct bfa_iocfc_drvcfg_s drvcfg; /* driver side config */ +}; + +/** + * IOC firmware IO stats + */ +struct bfa_fw_io_stats_s { + u32 host_abort; /* IO aborted by host driver*/ + u32 host_cleanup; /* IO clean up by host driver */ + + u32 fw_io_timeout; /* IOs timedout */ + u32 fw_frm_parse; /* frame parsed by f/w */ + u32 fw_frm_data; /* fcp_data frame parsed by f/w */ + u32 fw_frm_rsp; /* fcp_rsp frame parsed by f/w */ + u32 fw_frm_xfer_rdy; /* xfer_rdy frame parsed by f/w */ + u32 fw_frm_bls_acc; /* BLS ACC frame parsed by f/w */ + u32 fw_frm_tgt_abort; /* target ABTS parsed by f/w */ + u32 fw_frm_unknown; /* unknown parsed by f/w */ + u32 fw_data_dma; /* f/w DMA'ed the data frame */ + u32 fw_frm_drop; /* f/w drop the frame */ + + u32 rec_timeout; /* FW rec timed out */ + u32 error_rec; /* FW sending rec on + * an error condition*/ + u32 wait_for_si; /* FW wait for SI */ + u32 rec_rsp_inval; /* REC rsp invalid */ + u32 seqr_io_abort; /* target does not know cmd so abort */ + u32 seqr_io_retry; /* SEQR failed so retry IO */ + + u32 itn_cisc_upd_rsp; /* ITN cisc updated on fcp_rsp */ + u32 itn_cisc_upd_data; /* ITN cisc updated on fcp_data */ + u32 itn_cisc_upd_xfer_rdy; /* ITN cisc updated on fcp_data */ + + u32 fcp_data_lost; /* fcp data lost */ + + u32 ro_set_in_xfer_rdy; /* Target set RO in Xfer_rdy frame */ + u32 xfer_rdy_ooo_err; /* Out of order Xfer_rdy received */ + u32 xfer_rdy_unknown_err; /* unknown error in xfer_rdy frame */ + + u32 io_abort_timeout; /* ABTS timedout */ + u32 sler_initiated; /* SLER initiated */ + + u32 unexp_fcp_rsp; /* fcp response in wrong state */ + + u32 fcp_rsp_under_run; /* fcp rsp IO underrun */ + u32 fcp_rsp_under_run_wr; /* fcp rsp IO underrun for write */ + u32 fcp_rsp_under_run_err; /* fcp rsp IO underrun error */ + u32 fcp_rsp_resid_inval; /* invalid residue */ + u32 fcp_rsp_over_run; /* fcp rsp IO overrun */ + u32 fcp_rsp_over_run_err; /* fcp rsp IO overrun error */ + u32 fcp_rsp_proto_err; /* protocol error in fcp rsp */ + u32 fcp_rsp_sense_err; /* error in sense info in fcp rsp */ + u32 fcp_conf_req; /* FCP conf requested */ + + u32 tgt_aborted_io; /* target initiated abort */ + + u32 ioh_edtov_timeout_event;/* IOH edtov timer popped */ + u32 ioh_fcp_rsp_excp_event; /* IOH FCP_RSP exception */ + u32 ioh_fcp_conf_event; /* IOH FCP_CONF */ + u32 ioh_mult_frm_rsp_event; /* IOH multi_frame FCP_RSP */ + u32 ioh_hit_class2_event; /* IOH hit class2 */ + u32 ioh_miss_other_event; /* IOH miss other */ + u32 ioh_seq_cnt_err_event; /* IOH seq cnt error */ + u32 ioh_len_err_event; /* IOH len error - fcp_dl != + * bytes xfered */ + u32 ioh_seq_len_err_event; /* IOH seq len error */ + u32 ioh_data_oor_event; /* Data out of range */ + u32 ioh_ro_ooo_event; /* Relative offset out of range */ + u32 ioh_cpu_owned_event; /* IOH hit -iost owned by f/w */ + u32 ioh_unexp_frame_event; /* unexpected frame recieved + * count */ + u32 ioh_err_int; /* IOH error int during data-phase + * for scsi write + */ +}; + +/** + * IOC port firmware stats + */ + +struct bfa_fw_port_fpg_stats_s { + u32 intr_evt; + u32 intr; + u32 intr_excess; + u32 intr_cause0; + u32 intr_other; + u32 intr_other_ign; + u32 sig_lost; + u32 sig_regained; + u32 sync_lost; + u32 sync_to; + u32 sync_regained; + u32 div2_overflow; + u32 div2_underflow; + u32 efifo_overflow; + u32 efifo_underflow; + u32 idle_rx; + u32 lrr_rx; + u32 lr_rx; + u32 ols_rx; + u32 nos_rx; + u32 lip_rx; + u32 arbf0_rx; + u32 arb_rx; + u32 mrk_rx; + u32 const_mrk_rx; + u32 prim_unknown; +}; + + +struct bfa_fw_port_lksm_stats_s { + u32 hwsm_success; /* hwsm state machine success */ + u32 hwsm_fails; /* hwsm fails */ + u32 hwsm_wdtov; /* hwsm timed out */ + u32 swsm_success; /* swsm success */ + u32 swsm_fails; /* swsm fails */ + u32 swsm_wdtov; /* swsm timed out */ + u32 busybufs; /* link init failed due to busybuf */ + u32 buf_waits; /* bufwait state entries */ + u32 link_fails; /* link failures */ + u32 psp_errors; /* primitive sequence protocol errors */ + u32 lr_unexp; /* No. of times LR rx-ed unexpectedly */ + u32 lrr_unexp; /* No. of times LRR rx-ed unexpectedly */ + u32 lr_tx; /* No. of times LR tx started */ + u32 lrr_tx; /* No. of times LRR tx started */ + u32 ols_tx; /* No. of times OLS tx started */ + u32 nos_tx; /* No. of times NOS tx started */ + u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */ + u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */ +}; + +struct bfa_fw_port_snsm_stats_s { + u32 hwsm_success; /* Successful hwsm terminations */ + u32 hwsm_fails; /* hwsm fail count */ + u32 hwsm_wdtov; /* hwsm timed out */ + u32 swsm_success; /* swsm success */ + u32 swsm_wdtov; /* swsm timed out */ + u32 error_resets; /* error resets initiated by upsm */ + u32 sync_lost; /* Sync loss count */ + u32 sig_lost; /* Signal loss count */ +}; + +struct bfa_fw_port_physm_stats_s { + u32 module_inserts; /* Module insert count */ + u32 module_xtracts; /* Module extracts count */ + u32 module_invalids; /* Invalid module inserted count */ + u32 module_read_ign; /* Module validation status ignored */ + u32 laser_faults; /* Laser fault count */ + u32 rsvd; +}; + +struct bfa_fw_fip_stats_s { + u32 vlan_req; /* vlan discovery requests */ + u32 vlan_notify; /* vlan notifications */ + u32 vlan_err; /* vlan response error */ + u32 vlan_timeouts; /* vlan disvoery timeouts */ + u32 vlan_invalids; /* invalid vlan in discovery advert. */ + u32 disc_req; /* Discovery solicit requests */ + u32 disc_rsp; /* Discovery solicit response */ + u32 disc_err; /* Discovery advt. parse errors */ + u32 disc_unsol; /* Discovery unsolicited */ + u32 disc_timeouts; /* Discovery timeouts */ + u32 disc_fcf_unavail; /* Discovery FCF Not Avail. */ + u32 linksvc_unsupp; /* Unsupported link service req */ + u32 linksvc_err; /* Parse error in link service req */ + u32 logo_req; /* FIP logos received */ + u32 clrvlink_req; /* Clear virtual link req */ + u32 op_unsupp; /* Unsupported FIP operation */ + u32 untagged; /* Untagged frames (ignored) */ + u32 invalid_version; /* Invalid FIP version */ +}; + +struct bfa_fw_lps_stats_s { + u32 mac_invalids; /* Invalid mac assigned */ + u32 rsvd; +}; + +struct bfa_fw_fcoe_stats_s { + u32 cee_linkups; /* CEE link up count */ + u32 cee_linkdns; /* CEE link down count */ + u32 fip_linkups; /* FIP link up count */ + u32 fip_linkdns; /* FIP link up count */ + u32 fip_fails; /* FIP fail count */ + u32 mac_invalids; /* Invalid mac assigned */ +}; + +/** + * IOC firmware FCoE port stats + */ +struct bfa_fw_fcoe_port_stats_s { + struct bfa_fw_fcoe_stats_s fcoe_stats; + struct bfa_fw_fip_stats_s fip_stats; +}; + +/** + * IOC firmware FC uport stats + */ +struct bfa_fw_fc_uport_stats_s { + struct bfa_fw_port_snsm_stats_s snsm_stats; + struct bfa_fw_port_lksm_stats_s lksm_stats; +}; + +/** + * IOC firmware FC port stats + */ +union bfa_fw_fc_port_stats_s { + struct bfa_fw_fc_uport_stats_s fc_stats; + struct bfa_fw_fcoe_port_stats_s fcoe_stats; +}; + +/** + * IOC firmware port stats + */ +struct bfa_fw_port_stats_s { + struct bfa_fw_port_fpg_stats_s fpg_stats; + struct bfa_fw_port_physm_stats_s physm_stats; + union bfa_fw_fc_port_stats_s fc_port; +}; + +/** + * fcxchg module statistics + */ +struct bfa_fw_fcxchg_stats_s { + u32 ua_tag_inv; + u32 ua_state_inv; +}; + +struct bfa_fw_lpsm_stats_s { + u32 cls_rx; + u32 cls_tx; +}; + +/** + * Trunk statistics + */ +struct bfa_fw_trunk_stats_s { + u32 emt_recvd; /* Trunk EMT received */ + u32 emt_accepted; /* Trunk EMT Accepted */ + u32 emt_rejected; /* Trunk EMT rejected */ + u32 etp_recvd; /* Trunk ETP received */ + u32 etp_accepted; /* Trunk ETP Accepted */ + u32 etp_rejected; /* Trunk ETP rejected */ + u32 lr_recvd; /* Trunk LR received */ + u32 rsvd; /* padding for 64 bit alignment */ +}; + +struct bfa_fw_advsm_stats_s { + u32 flogi_sent; /* Flogi sent */ + u32 flogi_acc_recvd; /* Flogi Acc received */ + u32 flogi_rjt_recvd; /* Flogi rejects received */ + u32 flogi_retries; /* Flogi retries */ + + u32 elp_recvd; /* ELP received */ + u32 elp_accepted; /* ELP Accepted */ + u32 elp_rejected; /* ELP rejected */ + u32 elp_dropped; /* ELP dropped */ +}; + +/** + * IOCFC firmware stats + */ +struct bfa_fw_iocfc_stats_s { + u32 cfg_reqs; /* cfg request */ + u32 updq_reqs; /* update queue request */ + u32 ic_reqs; /* interrupt coalesce reqs */ + u32 unknown_reqs; + u32 set_intr_reqs; /* set interrupt reqs */ +}; + +/** + * IOC attributes returned in queries + */ +struct bfa_iocfc_attr_s { + struct bfa_iocfc_cfg_s config; /* IOCFC config */ + struct bfa_iocfc_intr_attr_s intr_attr; /* interrupt attr */ +}; + +/** + * Eth_sndrcv mod stats + */ +struct bfa_fw_eth_sndrcv_stats_s { + u32 crc_err; + u32 rsvd; /* 64bit align */ +}; + +/** + * CT MAC mod stats + */ +struct bfa_fw_mac_mod_stats_s { + u32 mac_on; /* MAC got turned-on */ + u32 link_up; /* link-up */ + u32 signal_off; /* lost signal */ + u32 dfe_on; /* DFE on */ + u32 mac_reset; /* # of MAC reset to bring lnk up */ + u32 pcs_reset; /* # of PCS reset to bring lnk up */ + u32 loopback; /* MAC got into serdes loopback */ + u32 lb_mac_reset; + /* # of MAC reset to bring link up in loopback */ + u32 lb_pcs_reset; + /* # of PCS reset to bring link up in loopback */ + u32 rsvd; /* 64bit align */ +}; + +/** + * CT MOD stats + */ +struct bfa_fw_ct_mod_stats_s { + u32 rxa_rds_undrun; /* RxA RDS underrun */ + u32 rad_bpc_ovfl; /* RAD BPC overflow */ + u32 rad_rlb_bpc_ovfl; /* RAD RLB BPC overflow */ + u32 bpc_fcs_err; /* BPC FCS_ERR */ + u32 txa_tso_hdr; /* TxA TSO header too long */ + u32 rsvd; /* 64bit align */ +}; + +/** + * IOC firmware stats + */ +struct bfa_fw_stats_s { + struct bfa_fw_ioc_stats_s ioc_stats; + struct bfa_fw_iocfc_stats_s iocfc_stats; + struct bfa_fw_io_stats_s io_stats; + struct bfa_fw_port_stats_s port_stats; + struct bfa_fw_fcxchg_stats_s fcxchg_stats; + struct bfa_fw_lpsm_stats_s lpsm_stats; + struct bfa_fw_lps_stats_s lps_stats; + struct bfa_fw_trunk_stats_s trunk_stats; + struct bfa_fw_advsm_stats_s advsm_stats; + struct bfa_fw_mac_mod_stats_s macmod_stats; + struct bfa_fw_ct_mod_stats_s ctmod_stats; + struct bfa_fw_eth_sndrcv_stats_s ethsndrcv_stats; +}; + +#define BFA_IOCFC_PATHTOV_MAX 60 +#define BFA_IOCFC_QDEPTH_MAX 2000 + +/** + * QoS states + */ +enum bfa_qos_state { + BFA_QOS_ONLINE = 1, /* QoS is online */ + BFA_QOS_OFFLINE = 2, /* QoS is offline */ +}; + +/** + * QoS Priority levels. + */ +enum bfa_qos_priority { + BFA_QOS_UNKNOWN = 0, + BFA_QOS_HIGH = 1, /* QoS Priority Level High */ + BFA_QOS_MED = 2, /* QoS Priority Level Medium */ + BFA_QOS_LOW = 3, /* QoS Priority Level Low */ +}; + +/** + * QoS bandwidth allocation for each priority level + */ +enum bfa_qos_bw_alloc { + BFA_QOS_BW_HIGH = 60, /* bandwidth allocation for High */ + BFA_QOS_BW_MED = 30, /* bandwidth allocation for Medium */ + BFA_QOS_BW_LOW = 10, /* bandwidth allocation for Low */ +}; +#pragma pack(1) +/** + * QoS attribute returned in QoS Query + */ +struct bfa_qos_attr_s { + u8 state; /* QoS current state */ + u8 rsvd[3]; + u32 total_bb_cr; /* Total BB Credits */ +}; + +/** + * These fields should be displayed only from the CLI. + * There will be a separate BFAL API (get_qos_vc_attr ?) + * to retrieve this. + * + */ +#define BFA_QOS_MAX_VC 16 + +struct bfa_qos_vc_info_s { + u8 vc_credit; + u8 borrow_credit; + u8 priority; + u8 resvd; +}; + +struct bfa_qos_vc_attr_s { + u16 total_vc_count; /* Total VC Count */ + u16 shared_credit; + u32 elp_opmode_flags; + struct bfa_qos_vc_info_s vc_info[BFA_QOS_MAX_VC]; /* as many as + * total_vc_count */ +}; + +/** + * QoS statistics + */ +struct bfa_qos_stats_s { + u32 flogi_sent; /* QoS Flogi sent */ + u32 flogi_acc_recvd; /* QoS Flogi Acc received */ + u32 flogi_rjt_recvd; /* QoS Flogi rejects received */ + u32 flogi_retries; /* QoS Flogi retries */ + + u32 elp_recvd; /* QoS ELP received */ + u32 elp_accepted; /* QoS ELP Accepted */ + u32 elp_rejected; /* QoS ELP rejected */ + u32 elp_dropped; /* QoS ELP dropped */ + + u32 qos_rscn_recvd; /* QoS RSCN received */ + u32 rsvd; /* padding for 64 bit alignment */ +}; + +/** + * FCoE statistics + */ +struct bfa_fcoe_stats_s { + u64 secs_reset; /* Seconds since stats reset */ + u64 cee_linkups; /* CEE link up */ + u64 cee_linkdns; /* CEE link down */ + u64 fip_linkups; /* FIP link up */ + u64 fip_linkdns; /* FIP link down */ + u64 fip_fails; /* FIP failures */ + u64 mac_invalids; /* Invalid mac assignments */ + u64 vlan_req; /* Vlan requests */ + u64 vlan_notify; /* Vlan notifications */ + u64 vlan_err; /* Vlan notification errors */ + u64 vlan_timeouts; /* Vlan request timeouts */ + u64 vlan_invalids; /* Vlan invalids */ + u64 disc_req; /* Discovery requests */ + u64 disc_rsp; /* Discovery responses */ + u64 disc_err; /* Discovery error frames */ + u64 disc_unsol; /* Discovery unsolicited */ + u64 disc_timeouts; /* Discovery timeouts */ + u64 disc_fcf_unavail; /* Discovery FCF not avail */ + u64 linksvc_unsupp; /* FIP link service req unsupp. */ + u64 linksvc_err; /* FIP link service req errors */ + u64 logo_req; /* FIP logos received */ + u64 clrvlink_req; /* Clear virtual link requests */ + u64 op_unsupp; /* FIP operation unsupp. */ + u64 untagged; /* FIP untagged frames */ + u64 txf_ucast; /* Tx FCoE unicast frames */ + u64 txf_ucast_vlan; /* Tx FCoE unicast vlan frames */ + u64 txf_ucast_octets; /* Tx FCoE unicast octets */ + u64 txf_mcast; /* Tx FCoE multicast frames */ + u64 txf_mcast_vlan; /* Tx FCoE multicast vlan frames */ + u64 txf_mcast_octets; /* Tx FCoE multicast octets */ + u64 txf_bcast; /* Tx FCoE broadcast frames */ + u64 txf_bcast_vlan; /* Tx FCoE broadcast vlan frames */ + u64 txf_bcast_octets; /* Tx FCoE broadcast octets */ + u64 txf_timeout; /* Tx timeouts */ + u64 txf_parity_errors; /* Transmit parity err */ + u64 txf_fid_parity_errors; /* Transmit FID parity err */ + u64 rxf_ucast_octets; /* Rx FCoE unicast octets */ + u64 rxf_ucast; /* Rx FCoE unicast frames */ + u64 rxf_ucast_vlan; /* Rx FCoE unicast vlan frames */ + u64 rxf_mcast_octets; /* Rx FCoE multicast octets */ + u64 rxf_mcast; /* Rx FCoE multicast frames */ + u64 rxf_mcast_vlan; /* Rx FCoE multicast vlan frames */ + u64 rxf_bcast_octets; /* Rx FCoE broadcast octets */ + u64 rxf_bcast; /* Rx FCoE broadcast frames */ + u64 rxf_bcast_vlan; /* Rx FCoE broadcast vlan frames */ +}; + +/** + * QoS or FCoE stats (fcport stats excluding physical FC port stats) + */ +union bfa_fcport_stats_u { + struct bfa_qos_stats_s fcqos; + struct bfa_fcoe_stats_s fcoe; +}; +#pragma pack() + +struct bfa_fcpim_del_itn_stats_s { + u32 del_itn_iocomp_aborted; /* Aborted IO requests */ + u32 del_itn_iocomp_timedout; /* IO timeouts */ + u32 del_itn_iocom_sqer_needed; /* IO retry for SQ error recovery */ + u32 del_itn_iocom_res_free; /* Delayed freeing of IO resources */ + u32 del_itn_iocom_hostabrts; /* Host IO abort requests */ + u32 del_itn_total_ios; /* Total IO count */ + u32 del_io_iocdowns; /* IO cleaned-up due to IOC down */ + u32 del_tm_iocdowns; /* TM cleaned-up due to IOC down */ +}; + +struct bfa_itnim_iostats_s { + + u32 total_ios; /* Total IO Requests */ + u32 input_reqs; /* Data in-bound requests */ + u32 output_reqs; /* Data out-bound requests */ + u32 io_comps; /* Total IO Completions */ + u32 wr_throughput; /* Write data transfered in bytes */ + u32 rd_throughput; /* Read data transfered in bytes */ + + u32 iocomp_ok; /* Slowpath IO completions */ + u32 iocomp_underrun; /* IO underrun */ + u32 iocomp_overrun; /* IO overrun */ + u32 qwait; /* IO Request-Q wait */ + u32 qresumes; /* IO Request-Q wait done */ + u32 no_iotags; /* No free IO tag */ + u32 iocomp_timedout; /* IO timeouts */ + u32 iocom_nexus_abort; /* IO failure due to target offline */ + u32 iocom_proto_err; /* IO protocol errors */ + u32 iocom_dif_err; /* IO SBC-3 protection errors */ + + u32 iocom_sqer_needed; /* fcp-2 error recovery failed */ + u32 iocom_res_free; /* Delayed freeing of IO tag */ + + + u32 io_aborts; /* Host IO abort requests */ + u32 iocom_hostabrts; /* Host IO abort completions */ + u32 io_cleanups; /* IO clean-up requests */ + u32 path_tov_expired; /* IO path tov expired */ + u32 iocomp_aborted; /* IO abort completions */ + u32 io_iocdowns; /* IO cleaned-up due to IOC down */ + u32 iocom_utags; /* IO comp with unknown tags */ + + u32 io_tmaborts; /* Abort request due to TM command */ + u32 tm_io_comps; /* Abort completion due to TM command */ + + u32 creates; /* IT Nexus create requests */ + u32 fw_create; /* IT Nexus FW create requests */ + u32 create_comps; /* IT Nexus FW create completions */ + u32 onlines; /* IT Nexus onlines */ + u32 offlines; /* IT Nexus offlines */ + u32 fw_delete; /* IT Nexus FW delete requests */ + u32 delete_comps; /* IT Nexus FW delete completions */ + u32 deletes; /* IT Nexus delete requests */ + u32 sler_events; /* SLER events */ + u32 ioc_disabled; /* Num IOC disables */ + u32 cleanup_comps; /* IT Nexus cleanup completions */ + + u32 tm_cmnds; /* TM Requests */ + u32 tm_fw_rsps; /* TM Completions */ + u32 tm_success; /* TM initiated IO cleanup success */ + u32 tm_failures; /* TM initiated IO cleanup failure */ + u32 no_tskims; /* No free TM tag */ + u32 tm_qwait; /* TM Request-Q wait */ + u32 tm_qresumes; /* TM Request-Q wait done */ + + u32 tm_iocdowns; /* TM cleaned-up due to IOC down */ + u32 tm_cleanups; /* TM cleanup requests */ + u32 tm_cleanup_comps; /* TM cleanup completions */ +}; + +/* Modify char* port_stt[] in bfal_port.c if a new state was added */ +enum bfa_port_states { + BFA_PORT_ST_UNINIT = 1, + BFA_PORT_ST_ENABLING_QWAIT = 2, + BFA_PORT_ST_ENABLING = 3, + BFA_PORT_ST_LINKDOWN = 4, + BFA_PORT_ST_LINKUP = 5, + BFA_PORT_ST_DISABLING_QWAIT = 6, + BFA_PORT_ST_DISABLING = 7, + BFA_PORT_ST_DISABLED = 8, + BFA_PORT_ST_STOPPED = 9, + BFA_PORT_ST_IOCDOWN = 10, + BFA_PORT_ST_IOCDIS = 11, + BFA_PORT_ST_FWMISMATCH = 12, + BFA_PORT_ST_PREBOOT_DISABLED = 13, + BFA_PORT_ST_TOGGLING_QWAIT = 14, + BFA_PORT_ST_MAX_STATE, +}; + +/** + * Port operational type (in sync with SNIA port type). + */ +enum bfa_port_type { + BFA_PORT_TYPE_UNKNOWN = 1, /* port type is unknown */ + BFA_PORT_TYPE_NPORT = 5, /* P2P with switched fabric */ + BFA_PORT_TYPE_NLPORT = 6, /* public loop */ + BFA_PORT_TYPE_LPORT = 20, /* private loop */ + BFA_PORT_TYPE_P2P = 21, /* P2P with no switched fabric */ + BFA_PORT_TYPE_VPORT = 22, /* NPIV - virtual port */ +}; + +/** + * Port topology setting. A port's topology and fabric login status + * determine its operational type. + */ +enum bfa_port_topology { + BFA_PORT_TOPOLOGY_NONE = 0, /* No valid topology */ + BFA_PORT_TOPOLOGY_P2P = 1, /* P2P only */ + BFA_PORT_TOPOLOGY_LOOP = 2, /* LOOP topology */ + BFA_PORT_TOPOLOGY_AUTO = 3, /* auto topology selection */ +}; + +/** + * Physical port loopback types. + */ +enum bfa_port_opmode { + BFA_PORT_OPMODE_NORMAL = 0x00, /* normal non-loopback mode */ + BFA_PORT_OPMODE_LB_INT = 0x01, /* internal loop back */ + BFA_PORT_OPMODE_LB_SLW = 0x02, /* serial link wrapback (serdes) */ + BFA_PORT_OPMODE_LB_EXT = 0x04, /* external loop back (serdes) */ + BFA_PORT_OPMODE_LB_CBL = 0x08, /* cabled loop back */ + BFA_PORT_OPMODE_LB_NLINT = 0x20, /* NL_Port internal loopback */ +}; + +#define BFA_PORT_OPMODE_LB_HARD(_mode) \ + ((_mode == BFA_PORT_OPMODE_LB_INT) || \ + (_mode == BFA_PORT_OPMODE_LB_SLW) || \ + (_mode == BFA_PORT_OPMODE_LB_EXT)) + +/** + * Port link state + */ +enum bfa_port_linkstate { + BFA_PORT_LINKUP = 1, /* Physical port/Trunk link up */ + BFA_PORT_LINKDOWN = 2, /* Physical port/Trunk link down */ +}; + +/** + * Port link state reason code + */ +enum bfa_port_linkstate_rsn { + BFA_PORT_LINKSTATE_RSN_NONE = 0, + BFA_PORT_LINKSTATE_RSN_DISABLED = 1, + BFA_PORT_LINKSTATE_RSN_RX_NOS = 2, + BFA_PORT_LINKSTATE_RSN_RX_OLS = 3, + BFA_PORT_LINKSTATE_RSN_RX_LIP = 4, + BFA_PORT_LINKSTATE_RSN_RX_LIPF7 = 5, + BFA_PORT_LINKSTATE_RSN_SFP_REMOVED = 6, + BFA_PORT_LINKSTATE_RSN_PORT_FAULT = 7, + BFA_PORT_LINKSTATE_RSN_RX_LOS = 8, + BFA_PORT_LINKSTATE_RSN_LOCAL_FAULT = 9, + BFA_PORT_LINKSTATE_RSN_REMOTE_FAULT = 10, + BFA_PORT_LINKSTATE_RSN_TIMEOUT = 11, + + + + /* CEE related reason codes/errors */ + CEE_LLDP_INFO_AGED_OUT = 20, + CEE_LLDP_SHUTDOWN_TLV_RCVD = 21, + CEE_PEER_NOT_ADVERTISE_DCBX = 22, + CEE_PEER_NOT_ADVERTISE_PG = 23, + CEE_PEER_NOT_ADVERTISE_PFC = 24, + CEE_PEER_NOT_ADVERTISE_FCOE = 25, + CEE_PG_NOT_COMPATIBLE = 26, + CEE_PFC_NOT_COMPATIBLE = 27, + CEE_FCOE_NOT_COMPATIBLE = 28, + CEE_BAD_PG_RCVD = 29, + CEE_BAD_BW_RCVD = 30, + CEE_BAD_PFC_RCVD = 31, + CEE_BAD_APP_PRI_RCVD = 32, + CEE_FCOE_PRI_PFC_OFF = 33, + CEE_DUP_CONTROL_TLV_RCVD = 34, + CEE_DUP_FEAT_TLV_RCVD = 35, + CEE_APPLY_NEW_CFG = 36, /* reason, not error */ + CEE_PROTOCOL_INIT = 37, /* reason, not error */ + CEE_PHY_LINK_DOWN = 38, + CEE_LLS_FCOE_ABSENT = 39, + CEE_LLS_FCOE_DOWN = 40, + CEE_ISCSI_NOT_COMPATIBLE = 41, + CEE_ISCSI_PRI_PFC_OFF = 42, + CEE_ISCSI_PRI_OVERLAP_FCOE_PRI = 43 +}; +#pragma pack(1) +/** + * Physical port configuration + */ +struct bfa_port_cfg_s { + u8 topology; /* bfa_port_topology */ + u8 speed; /* enum bfa_port_speed */ + u8 trunked; /* trunked or not */ + u8 qos_enabled; /* qos enabled or not */ + u8 cfg_hardalpa; /* is hard alpa configured */ + u8 hardalpa; /* configured hard alpa */ + u16 maxfrsize; /* maximum frame size */ + u8 rx_bbcredit; /* receive buffer credits */ + u8 tx_bbcredit; /* transmit buffer credits */ + u8 ratelimit; /* ratelimit enabled or not */ + u8 trl_def_speed; /* ratelimit default speed */ + u16 path_tov; /* device path timeout */ + u16 q_depth; /* SCSI Queue depth */ +}; +#pragma pack() + +/** + * Port attribute values. + */ +struct bfa_port_attr_s { + /* + * Static fields + */ + wwn_t nwwn; /* node wwn */ + wwn_t pwwn; /* port wwn */ + wwn_t factorynwwn; /* factory node wwn */ + wwn_t factorypwwn; /* factory port wwn */ + enum fc_cos cos_supported; /* supported class of services */ + u32 rsvd; + struct fc_symname_s port_symname; /* port symbolic name */ + enum bfa_port_speed speed_supported; /* supported speeds */ + bfa_boolean_t pbind_enabled; + + /* + * Configured values + */ + struct bfa_port_cfg_s pport_cfg; /* pport cfg */ + + /* + * Dynamic field - info from BFA + */ + enum bfa_port_states port_state; /* current port state */ + enum bfa_port_speed speed; /* current speed */ + enum bfa_port_topology topology; /* current topology */ + bfa_boolean_t beacon; /* current beacon status */ + bfa_boolean_t link_e2e_beacon; /* link beacon is on */ + bfa_boolean_t plog_enabled; /* portlog is enabled */ + + /* + * Dynamic field - info from FCS + */ + u32 pid; /* port ID */ + enum bfa_port_type port_type; /* current topology */ + u32 loopback; /* external loopback */ + u32 authfail; /* auth fail state */ + bfa_boolean_t io_profile; /* get it from fcpim mod */ + u8 pad[4]; /* for 64-bit alignement */ + + /* FCoE specific */ + u16 fcoe_vlan; + u8 rsvd1[6]; +}; + +/** + * Port FCP mappings. + */ +struct bfa_port_fcpmap_s { + char osdevname[256]; + u32 bus; + u32 target; + u32 oslun; + u32 fcid; + wwn_t nwwn; + wwn_t pwwn; + u64 fcplun; + char luid[256]; +}; + +/** + * Port RNID info. + */ +struct bfa_port_rnid_s { + wwn_t wwn; + u32 unittype; + u32 portid; + u32 attached_nodes_num; + u16 ip_version; + u16 udp_port; + u8 ipaddr[16]; + u16 rsvd; + u16 topologydiscoveryflags; +}; + +#pragma pack(1) +struct bfa_fcport_fcf_s { + wwn_t name; /* FCF name */ + wwn_t fabric_name; /* Fabric Name */ + u8 fipenabled; /* FIP enabled or not */ + u8 fipfailed; /* FIP failed or not */ + u8 resv[2]; + u8 pri; /* FCF priority */ + u8 version; /* FIP version used */ + u8 available; /* Available for login */ + u8 fka_disabled; /* FKA is disabled */ + u8 maxsz_verified; /* FCoE max size verified */ + u8 fc_map[3]; /* FC map */ + u16 vlan; /* FCoE vlan tag/priority */ + u32 fka_adv_per; /* FIP ka advert. period */ + mac_t mac; /* FCF mac */ +}; + +/** + * Trunk states for BCU/BFAL + */ +enum bfa_trunk_state { + BFA_TRUNK_DISABLED = 0, /* Trunk is not configured */ + BFA_TRUNK_ONLINE = 1, /* Trunk is online */ + BFA_TRUNK_OFFLINE = 2, /* Trunk is offline */ +}; + +/** + * VC attributes for trunked link + */ +struct bfa_trunk_vc_attr_s { + u32 bb_credit; + u32 elp_opmode_flags; + u32 req_credit; + u16 vc_credits[8]; +}; + +/** + * Link state information + */ +struct bfa_port_link_s { + u8 linkstate; /* Link state bfa_port_linkstate */ + u8 linkstate_rsn; /* bfa_port_linkstate_rsn_t */ + u8 topology; /* P2P/LOOP bfa_port_topology */ + u8 speed; /* Link speed (1/2/4/8 G) */ + u32 linkstate_opt; /* Linkstate optional data (debug) */ + u8 trunked; /* Trunked or not (1 or 0) */ + u8 resvd[3]; + struct bfa_qos_attr_s qos_attr; /* QoS Attributes */ + union { + struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */ + struct bfa_trunk_vc_attr_s trunk_vc_attr; + struct bfa_fcport_fcf_s fcf; /* FCF information (for FCoE) */ + } vc_fcf; +}; +#pragma pack() + +enum bfa_trunk_link_fctl { + BFA_TRUNK_LINK_FCTL_NORMAL, + BFA_TRUNK_LINK_FCTL_VC, + BFA_TRUNK_LINK_FCTL_VC_QOS, +}; + +enum bfa_trunk_link_state { + BFA_TRUNK_LINK_STATE_UP = 1, /* link part of trunk */ + BFA_TRUNK_LINK_STATE_DN_LINKDN = 2, /* physical link down */ + BFA_TRUNK_LINK_STATE_DN_GRP_MIS = 3, /* trunk group different */ + BFA_TRUNK_LINK_STATE_DN_SPD_MIS = 4, /* speed mismatch */ + BFA_TRUNK_LINK_STATE_DN_MODE_MIS = 5, /* remote port not trunked */ +}; + +#define BFA_TRUNK_MAX_PORTS 2 +struct bfa_trunk_link_attr_s { + wwn_t trunk_wwn; + enum bfa_trunk_link_fctl fctl; + enum bfa_trunk_link_state link_state; + enum bfa_port_speed speed; + u32 deskew; +}; + +struct bfa_trunk_attr_s { + enum bfa_trunk_state state; + enum bfa_port_speed speed; + u32 port_id; + u32 rsvd; + struct bfa_trunk_link_attr_s link_attr[BFA_TRUNK_MAX_PORTS]; +}; + +struct bfa_rport_hal_stats_s { + u32 sm_un_cr; /* uninit: create events */ + u32 sm_un_unexp; /* uninit: exception events */ + u32 sm_cr_on; /* created: online events */ + u32 sm_cr_del; /* created: delete events */ + u32 sm_cr_hwf; /* created: IOC down */ + u32 sm_cr_unexp; /* created: exception events */ + u32 sm_fwc_rsp; /* fw create: f/w responses */ + u32 sm_fwc_del; /* fw create: delete events */ + u32 sm_fwc_off; /* fw create: offline events */ + u32 sm_fwc_hwf; /* fw create: IOC down */ + u32 sm_fwc_unexp; /* fw create: exception events*/ + u32 sm_on_off; /* online: offline events */ + u32 sm_on_del; /* online: delete events */ + u32 sm_on_hwf; /* online: IOC down events */ + u32 sm_on_unexp; /* online: exception events */ + u32 sm_fwd_rsp; /* fw delete: fw responses */ + u32 sm_fwd_del; /* fw delete: delete events */ + u32 sm_fwd_hwf; /* fw delete: IOC down events */ + u32 sm_fwd_unexp; /* fw delete: exception events*/ + u32 sm_off_del; /* offline: delete events */ + u32 sm_off_on; /* offline: online events */ + u32 sm_off_hwf; /* offline: IOC down events */ + u32 sm_off_unexp; /* offline: exception events */ + u32 sm_del_fwrsp; /* delete: fw responses */ + u32 sm_del_hwf; /* delete: IOC down events */ + u32 sm_del_unexp; /* delete: exception events */ + u32 sm_delp_fwrsp; /* delete pend: fw responses */ + u32 sm_delp_hwf; /* delete pend: IOC downs */ + u32 sm_delp_unexp; /* delete pend: exceptions */ + u32 sm_offp_fwrsp; /* off-pending: fw responses */ + u32 sm_offp_del; /* off-pending: deletes */ + u32 sm_offp_hwf; /* off-pending: IOC downs */ + u32 sm_offp_unexp; /* off-pending: exceptions */ + u32 sm_iocd_off; /* IOC down: offline events */ + u32 sm_iocd_del; /* IOC down: delete events */ + u32 sm_iocd_on; /* IOC down: online events */ + u32 sm_iocd_unexp; /* IOC down: exceptions */ + u32 rsvd; +}; +#pragma pack(1) +/** + * Rport's QoS attributes + */ +struct bfa_rport_qos_attr_s { + u8 qos_priority; /* rport's QoS priority */ + u8 rsvd[3]; + u32 qos_flow_id; /* QoS flow Id */ +}; +#pragma pack() + +#define BFA_IOBUCKET_MAX 14 + +struct bfa_itnim_latency_s { + u32 min[BFA_IOBUCKET_MAX]; + u32 max[BFA_IOBUCKET_MAX]; + u32 count[BFA_IOBUCKET_MAX]; + u32 avg[BFA_IOBUCKET_MAX]; +}; + +struct bfa_itnim_ioprofile_s { + u32 clock_res_mul; + u32 clock_res_div; + u32 index; + u32 io_profile_start_time; /* IO profile start time */ + u32 iocomps[BFA_IOBUCKET_MAX]; /* IO completed */ + struct bfa_itnim_latency_s io_latency; +}; + +/** + * FC physical port statistics. + */ +struct bfa_port_fc_stats_s { + u64 secs_reset; /* Seconds since stats is reset */ + u64 tx_frames; /* Tx frames */ + u64 tx_words; /* Tx words */ + u64 tx_lip; /* Tx LIP */ + u64 tx_nos; /* Tx NOS */ + u64 tx_ols; /* Tx OLS */ + u64 tx_lr; /* Tx LR */ + u64 tx_lrr; /* Tx LRR */ + u64 rx_frames; /* Rx frames */ + u64 rx_words; /* Rx words */ + u64 lip_count; /* Rx LIP */ + u64 nos_count; /* Rx NOS */ + u64 ols_count; /* Rx OLS */ + u64 lr_count; /* Rx LR */ + u64 lrr_count; /* Rx LRR */ + u64 invalid_crcs; /* Rx CRC err frames */ + u64 invalid_crc_gd_eof; /* Rx CRC err good EOF frames */ + u64 undersized_frm; /* Rx undersized frames */ + u64 oversized_frm; /* Rx oversized frames */ + u64 bad_eof_frm; /* Rx frames with bad EOF */ + u64 error_frames; /* Errored frames */ + u64 dropped_frames; /* Dropped frames */ + u64 link_failures; /* Link Failure (LF) count */ + u64 loss_of_syncs; /* Loss of sync count */ + u64 loss_of_signals; /* Loss of signal count */ + u64 primseq_errs; /* Primitive sequence protocol err. */ + u64 bad_os_count; /* Invalid ordered sets */ + u64 err_enc_out; /* Encoding err nonframe_8b10b */ + u64 err_enc; /* Encoding err frame_8b10b */ +}; + +/** + * Eth Physical Port statistics. + */ +struct bfa_port_eth_stats_s { + u64 secs_reset; /* Seconds since stats is reset */ + u64 frame_64; /* Frames 64 bytes */ + u64 frame_65_127; /* Frames 65-127 bytes */ + u64 frame_128_255; /* Frames 128-255 bytes */ + u64 frame_256_511; /* Frames 256-511 bytes */ + u64 frame_512_1023; /* Frames 512-1023 bytes */ + u64 frame_1024_1518; /* Frames 1024-1518 bytes */ + u64 frame_1519_1522; /* Frames 1519-1522 bytes */ + u64 tx_bytes; /* Tx bytes */ + u64 tx_packets; /* Tx packets */ + u64 tx_mcast_packets; /* Tx multicast packets */ + u64 tx_bcast_packets; /* Tx broadcast packets */ + u64 tx_control_frame; /* Tx control frame */ + u64 tx_drop; /* Tx drops */ + u64 tx_jabber; /* Tx jabber */ + u64 tx_fcs_error; /* Tx FCS errors */ + u64 tx_fragments; /* Tx fragments */ + u64 rx_bytes; /* Rx bytes */ + u64 rx_packets; /* Rx packets */ + u64 rx_mcast_packets; /* Rx multicast packets */ + u64 rx_bcast_packets; /* Rx broadcast packets */ + u64 rx_control_frames; /* Rx control frames */ + u64 rx_unknown_opcode; /* Rx unknown opcode */ + u64 rx_drop; /* Rx drops */ + u64 rx_jabber; /* Rx jabber */ + u64 rx_fcs_error; /* Rx FCS errors */ + u64 rx_alignment_error; /* Rx alignment errors */ + u64 rx_frame_length_error; /* Rx frame len errors */ + u64 rx_code_error; /* Rx code errors */ + u64 rx_fragments; /* Rx fragments */ + u64 rx_pause; /* Rx pause */ + u64 rx_zero_pause; /* Rx zero pause */ + u64 tx_pause; /* Tx pause */ + u64 tx_zero_pause; /* Tx zero pause */ + u64 rx_fcoe_pause; /* Rx FCoE pause */ + u64 rx_fcoe_zero_pause; /* Rx FCoE zero pause */ + u64 tx_fcoe_pause; /* Tx FCoE pause */ + u64 tx_fcoe_zero_pause; /* Tx FCoE zero pause */ + u64 rx_iscsi_pause; /* Rx iSCSI pause */ + u64 rx_iscsi_zero_pause; /* Rx iSCSI zero pause */ + u64 tx_iscsi_pause; /* Tx iSCSI pause */ + u64 tx_iscsi_zero_pause; /* Tx iSCSI zero pause */ +}; + +/** + * Port statistics. + */ +union bfa_port_stats_u { + struct bfa_port_fc_stats_s fc; + struct bfa_port_eth_stats_s eth; +}; + +#endif /* __BFA_DEFS_SVC_H__ */ diff --git a/drivers/scsi/bfa/bfa_drv.c b/drivers/scsi/bfa/bfa_drv.c new file mode 100644 index 000000000000..14127646dc54 --- /dev/null +++ b/drivers/scsi/bfa/bfa_drv.c @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + * + * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include "bfa_modules.h" + +/** + * BFA module list terminated by NULL + */ +struct bfa_module_s *hal_mods[] = { + &hal_mod_sgpg, + &hal_mod_fcport, + &hal_mod_fcxp, + &hal_mod_lps, + &hal_mod_uf, + &hal_mod_rport, + &hal_mod_fcpim, + NULL +}; + +/** + * Message handlers for various modules. + */ +bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = { + bfa_isr_unhandled, /* NONE */ + bfa_isr_unhandled, /* BFI_MC_IOC */ + bfa_isr_unhandled, /* BFI_MC_DIAG */ + bfa_isr_unhandled, /* BFI_MC_FLASH */ + bfa_isr_unhandled, /* BFI_MC_CEE */ + bfa_fcport_isr, /* BFI_MC_FCPORT */ + bfa_isr_unhandled, /* BFI_MC_IOCFC */ + bfa_isr_unhandled, /* BFI_MC_LL */ + bfa_uf_isr, /* BFI_MC_UF */ + bfa_fcxp_isr, /* BFI_MC_FCXP */ + bfa_lps_isr, /* BFI_MC_LPS */ + bfa_rport_isr, /* BFI_MC_RPORT */ + bfa_itnim_isr, /* BFI_MC_ITNIM */ + bfa_isr_unhandled, /* BFI_MC_IOIM_READ */ + bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */ + bfa_isr_unhandled, /* BFI_MC_IOIM_IO */ + bfa_ioim_isr, /* BFI_MC_IOIM */ + bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */ + bfa_tskim_isr, /* BFI_MC_TSKIM */ + bfa_isr_unhandled, /* BFI_MC_SBOOT */ + bfa_isr_unhandled, /* BFI_MC_IPFC */ + bfa_isr_unhandled, /* BFI_MC_PORT */ + bfa_isr_unhandled, /* --------- */ + bfa_isr_unhandled, /* --------- */ + bfa_isr_unhandled, /* --------- */ + bfa_isr_unhandled, /* --------- */ + bfa_isr_unhandled, /* --------- */ + bfa_isr_unhandled, /* --------- */ + bfa_isr_unhandled, /* --------- */ + bfa_isr_unhandled, /* --------- */ + bfa_isr_unhandled, /* --------- */ + bfa_isr_unhandled, /* --------- */ +}; + + +/** + * Message handlers for mailbox command classes + */ +bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = { + NULL, + NULL, /* BFI_MC_IOC */ + NULL, /* BFI_MC_DIAG */ + NULL, /* BFI_MC_FLASH */ + NULL, /* BFI_MC_CEE */ + NULL, /* BFI_MC_PORT */ + bfa_iocfc_isr, /* BFI_MC_IOCFC */ + NULL, +}; + + + +void +bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi) +{ + struct bfa_port_s *port = &bfa->modules.port; + u32 dm_len; + u8 *dm_kva; + u64 dm_pa; + + dm_len = bfa_port_meminfo(); + dm_kva = bfa_meminfo_dma_virt(mi); + dm_pa = bfa_meminfo_dma_phys(mi); + + memset(port, 0, sizeof(struct bfa_port_s)); + bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod); + bfa_port_mem_claim(port, dm_kva, dm_pa); + + bfa_meminfo_dma_virt(mi) = dm_kva + dm_len; + bfa_meminfo_dma_phys(mi) = dm_pa + dm_len; +} diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h new file mode 100644 index 000000000000..6eff705564eb --- /dev/null +++ b/drivers/scsi/bfa/bfa_fc.h @@ -0,0 +1,1916 @@ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + * + * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef __BFA_FC_H__ +#define __BFA_FC_H__ + +#include "bfa_os_inc.h" + +typedef u64 wwn_t; +typedef u64 lun_t; + +#define WWN_NULL (0) +#define FC_SYMNAME_MAX 256 /* max name server symbolic name size */ +#define FC_ALPA_MAX 128 + +#pragma pack(1) + +#define MAC_ADDRLEN (6) +struct mac_s { u8 mac[MAC_ADDRLEN]; }; +#define mac_t struct mac_s + +/* + * generic SCSI cdb definition + */ +#define SCSI_MAX_CDBLEN 16 +struct scsi_cdb_s { + u8 scsi_cdb[SCSI_MAX_CDBLEN]; +}; +#define scsi_cdb_t struct scsi_cdb_s + +/* ------------------------------------------------------------ + * SCSI status byte values + * ------------------------------------------------------------ + */ +#define SCSI_STATUS_GOOD 0x00 +#define SCSI_STATUS_CHECK_CONDITION 0x02 +#define SCSI_STATUS_CONDITION_MET 0x04 +#define SCSI_STATUS_BUSY 0x08 +#define SCSI_STATUS_INTERMEDIATE 0x10 +#define SCSI_STATUS_ICM 0x14 /* intermediate condition met */ +#define SCSI_STATUS_RESERVATION_CONFLICT 0x18 +#define SCSI_STATUS_COMMAND_TERMINATED 0x22 +#define SCSI_STATUS_QUEUE_FULL 0x28 +#define SCSI_STATUS_ACA_ACTIVE 0x30 + +#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */ + +/* + * Fibre Channel Header Structure (FCHS) definition + */ +struct fchs_s { +#ifdef __BIGENDIAN + u32 routing:4; /* routing bits */ + u32 cat_info:4; /* category info */ +#else + u32 cat_info:4; /* category info */ + u32 routing:4; /* routing bits */ +#endif + u32 d_id:24; /* destination identifier */ + + u32 cs_ctl:8; /* class specific control */ + u32 s_id:24; /* source identifier */ + + u32 type:8; /* data structure type */ + u32 f_ctl:24; /* initial frame control */ + + u8 seq_id; /* sequence identifier */ + u8 df_ctl; /* data field control */ + u16 seq_cnt; /* sequence count */ + + u16 ox_id; /* originator exchange ID */ + u16 rx_id; /* responder exchange ID */ + + u32 ro; /* relative offset */ +}; + +#define FC_SOF_LEN 4 +#define FC_EOF_LEN 4 +#define FC_CRC_LEN 4 + +/* + * Fibre Channel BB_E Header Structure + */ +struct fcbbehs_s { + u16 ver_rsvd; + u32 rsvd[2]; + u32 rsvd__sof; +}; + +#define FC_SEQ_ID_MAX 256 + +/* + * routing bit definitions + */ +enum { + FC_RTG_FC4_DEV_DATA = 0x0, /* FC-4 Device Data */ + FC_RTG_EXT_LINK = 0x2, /* Extended Link Data */ + FC_RTG_FC4_LINK_DATA = 0x3, /* FC-4 Link Data */ + FC_RTG_VIDEO_DATA = 0x4, /* Video Data */ + FC_RTG_EXT_HDR = 0x5, /* VFT, IFR or Encapsuled */ + FC_RTG_BASIC_LINK = 0x8, /* Basic Link data */ + FC_RTG_LINK_CTRL = 0xC, /* Link Control */ +}; + +/* + * information category for extended link data and FC-4 Link Data + */ +enum { + FC_CAT_LD_REQUEST = 0x2, /* Request */ + FC_CAT_LD_REPLY = 0x3, /* Reply */ + FC_CAT_LD_DIAG = 0xF, /* for DIAG use only */ +}; + +/* + * information category for extended headers (VFT, IFR or encapsulation) + */ +enum { + FC_CAT_VFT_HDR = 0x0, /* Virtual fabric tagging header */ + FC_CAT_IFR_HDR = 0x1, /* Inter-Fabric routing header */ + FC_CAT_ENC_HDR = 0x2, /* Encapsulation header */ +}; + +/* + * information category for FC-4 device data + */ +enum { + FC_CAT_UNCATEG_INFO = 0x0, /* Uncategorized information */ + FC_CAT_SOLICIT_DATA = 0x1, /* Solicited Data */ + FC_CAT_UNSOLICIT_CTRL = 0x2, /* Unsolicited Control */ + FC_CAT_SOLICIT_CTRL = 0x3, /* Solicited Control */ + FC_CAT_UNSOLICIT_DATA = 0x4, /* Unsolicited Data */ + FC_CAT_DATA_DESC = 0x5, /* Data Descriptor */ + FC_CAT_UNSOLICIT_CMD = 0x6, /* Unsolicited Command */ + FC_CAT_CMD_STATUS = 0x7, /* Command Status */ +}; + +/* + * information category for Link Control + */ +enum { + FC_CAT_ACK_1 = 0x00, + FC_CAT_ACK_0_N = 0x01, + FC_CAT_P_RJT = 0x02, + FC_CAT_F_RJT = 0x03, + FC_CAT_P_BSY = 0x04, + FC_CAT_F_BSY_DATA = 0x05, + FC_CAT_F_BSY_LINK_CTL = 0x06, + FC_CAT_F_LCR = 0x07, + FC_CAT_NTY = 0x08, + FC_CAT_END = 0x09, +}; + +/* + * Type Field Definitions. FC-PH Section 18.5 pg. 165 + */ +enum { + FC_TYPE_BLS = 0x0, /* Basic Link Service */ + FC_TYPE_ELS = 0x1, /* Extended Link Service */ + FC_TYPE_IP = 0x5, /* IP */ + FC_TYPE_FCP = 0x8, /* SCSI-FCP */ + FC_TYPE_GPP = 0x9, /* SCSI_GPP */ + FC_TYPE_SERVICES = 0x20, /* Fibre Channel Services */ + FC_TYPE_FC_FSS = 0x22, /* Fabric Switch Services */ + FC_TYPE_FC_AL = 0x23, /* FC-AL */ + FC_TYPE_FC_SNMP = 0x24, /* FC-SNMP */ + FC_TYPE_FC_SPINFAB = 0xEE, /* SPINFAB */ + FC_TYPE_FC_DIAG = 0xEF, /* DIAG */ + FC_TYPE_MAX = 256, /* 256 FC-4 types */ +}; + +struct fc_fc4types_s { + u8 bits[FC_TYPE_MAX / 8]; +}; + +/* + * Frame Control Definitions. FC-PH Table-45. pg. 168 + */ +enum { + FCTL_EC_ORIG = 0x000000, /* exchange originator */ + FCTL_EC_RESP = 0x800000, /* exchange responder */ + FCTL_SEQ_INI = 0x000000, /* sequence initiator */ + FCTL_SEQ_REC = 0x400000, /* sequence recipient */ + FCTL_FS_EXCH = 0x200000, /* first sequence of xchg */ + FCTL_LS_EXCH = 0x100000, /* last sequence of xchg */ + FCTL_END_SEQ = 0x080000, /* last frame of sequence */ + FCTL_SI_XFER = 0x010000, /* seq initiative transfer */ + FCTL_RO_PRESENT = 0x000008, /* relative offset present */ + FCTL_FILLBYTE_MASK = 0x000003 /* , fill byte mask */ +}; + +/* + * Fabric Well Known Addresses + */ +enum { + FC_MIN_WELL_KNOWN_ADDR = 0xFFFFF0, + FC_DOMAIN_CONTROLLER_MASK = 0xFFFC00, + FC_ALIAS_SERVER = 0xFFFFF8, + FC_MGMT_SERVER = 0xFFFFFA, + FC_TIME_SERVER = 0xFFFFFB, + FC_NAME_SERVER = 0xFFFFFC, + FC_FABRIC_CONTROLLER = 0xFFFFFD, + FC_FABRIC_PORT = 0xFFFFFE, + FC_BROADCAST_SERVER = 0xFFFFFF +}; + +/* + * domain/area/port defines + */ +#define FC_DOMAIN_MASK 0xFF0000 +#define FC_DOMAIN_SHIFT 16 +#define FC_AREA_MASK 0x00FF00 +#define FC_AREA_SHIFT 8 +#define FC_PORT_MASK 0x0000FF +#define FC_PORT_SHIFT 0 + +#define FC_GET_DOMAIN(p) (((p) & FC_DOMAIN_MASK) >> FC_DOMAIN_SHIFT) +#define FC_GET_AREA(p) (((p) & FC_AREA_MASK) >> FC_AREA_SHIFT) +#define FC_GET_PORT(p) (((p) & FC_PORT_MASK) >> FC_PORT_SHIFT) + +#define FC_DOMAIN_CTRLR(p) (FC_DOMAIN_CONTROLLER_MASK | (FC_GET_DOMAIN(p))) + +enum { + FC_RXID_ANY = 0xFFFFU, +}; + +/* + * generic ELS command + */ +struct fc_els_cmd_s { + u32 els_code:8; /* ELS Command Code */ + u32 reserved:24; +}; + +/* + * ELS Command Codes. FC-PH Table-75. pg. 223 + */ +enum { + FC_ELS_LS_RJT = 0x1, /* Link Service Reject. */ + FC_ELS_ACC = 0x02, /* Accept */ + FC_ELS_PLOGI = 0x03, /* N_Port Login. */ + FC_ELS_FLOGI = 0x04, /* F_Port Login. */ + FC_ELS_LOGO = 0x05, /* Logout. */ + FC_ELS_ABTX = 0x06, /* Abort Exchange */ + FC_ELS_RES = 0x08, /* Read Exchange status */ + FC_ELS_RSS = 0x09, /* Read sequence status block */ + FC_ELS_RSI = 0x0A, /* Request Sequence Initiative */ + FC_ELS_ESTC = 0x0C, /* Estimate Credit. */ + FC_ELS_RTV = 0x0E, /* Read Timeout Value. */ + FC_ELS_RLS = 0x0F, /* Read Link Status. */ + FC_ELS_ECHO = 0x10, /* Echo */ + FC_ELS_TEST = 0x11, /* Test */ + FC_ELS_RRQ = 0x12, /* Reinstate Recovery Qualifier. */ + FC_ELS_REC = 0x13, /* Add this for TAPE support in FCR */ + FC_ELS_PRLI = 0x20, /* Process Login */ + FC_ELS_PRLO = 0x21, /* Process Logout. */ + FC_ELS_SCN = 0x22, /* State Change Notification. */ + FC_ELS_TPRLO = 0x24, /* Third Party Process Logout. */ + FC_ELS_PDISC = 0x50, /* Discover N_Port Parameters. */ + FC_ELS_FDISC = 0x51, /* Discover F_Port Parameters. */ + FC_ELS_ADISC = 0x52, /* Discover Address. */ + FC_ELS_FARP_REQ = 0x54, /* FARP Request. */ + FC_ELS_FARP_REP = 0x55, /* FARP Reply. */ + FC_ELS_FAN = 0x60, /* Fabric Address Notification */ + FC_ELS_RSCN = 0x61, /* Reg State Change Notification */ + FC_ELS_SCR = 0x62, /* State Change Registration. */ + FC_ELS_RTIN = 0x77, /* Mangement server request */ + FC_ELS_RNID = 0x78, /* Mangement server request */ + FC_ELS_RLIR = 0x79, /* Registered Link Incident Record */ + + FC_ELS_RPSC = 0x7D, /* Report Port Speed Capabilities */ + FC_ELS_QSA = 0x7E, /* Query Security Attributes. Ref FC-SP */ + FC_ELS_E2E_LBEACON = 0x81, + /* End-to-End Link Beacon */ + FC_ELS_AUTH = 0x90, /* Authentication. Ref FC-SP */ + FC_ELS_RFCN = 0x97, /* Request Fabric Change Notification. Ref + *FC-SP */ + +}; + +/* + * Version numbers for FC-PH standards, + * used in login to indicate what port + * supports. See FC-PH-X table 158. + */ +enum { + FC_PH_VER_4_3 = 0x09, + FC_PH_VER_PH_3 = 0x20, +}; + +/* + * PDU size defines + */ +enum { + FC_MIN_PDUSZ = 512, + FC_MAX_PDUSZ = 2112, +}; + +/* + * N_Port PLOGI Common Service Parameters. + * FC-PH-x. Figure-76. pg. 308. + */ +struct fc_plogi_csp_s { + u8 verhi; /* FC-PH high version */ + u8 verlo; /* FC-PH low version */ + u16 bbcred; /* BB_Credit */ + +#ifdef __BIGENDIAN + u8 ciro:1, /* continuously increasing RO */ + rro:1, /* random relative offset */ + npiv_supp:1, /* NPIV supported */ + port_type:1, /* N_Port/F_port */ + altbbcred:1, /* alternate BB_Credit */ + resolution:1, /* ms/ns ED_TOV resolution */ + vvl_info:1, /* VVL Info included */ + reserved1:1; + + u8 hg_supp:1, + query_dbc:1, + security:1, + sync_cap:1, + r_t_tov:1, + dh_dup_supp:1, + cisc:1, /* continuously increasing seq count */ + payload:1; +#else + u8 reserved2:2, + resolution:1, /* ms/ns ED_TOV resolution */ + altbbcred:1, /* alternate BB_Credit */ + port_type:1, /* N_Port/F_port */ + npiv_supp:1, /* NPIV supported */ + rro:1, /* random relative offset */ + ciro:1; /* continuously increasing RO */ + + u8 payload:1, + cisc:1, /* continuously increasing seq count */ + dh_dup_supp:1, + r_t_tov:1, + sync_cap:1, + security:1, + query_dbc:1, + hg_supp:1; +#endif + + u16 rxsz; /* recieve data_field size */ + + u16 conseq; + u16 ro_bitmap; + + u32 e_d_tov; +}; + +/* + * N_Port PLOGI Class Specific Parameters. + * FC-PH-x. Figure 78. pg. 318. + */ +struct fc_plogi_clp_s { +#ifdef __BIGENDIAN + u32 class_valid:1; + u32 intermix:1; /* class intermix supported if set =1. + * valid only for class1. Reserved for + * class2 & class3 + */ + u32 reserved1:2; + u32 sequential:1; + u32 reserved2:3; +#else + u32 reserved2:3; + u32 sequential:1; + u32 reserved1:2; + u32 intermix:1; /* class intermix supported if set =1. + * valid only for class1. Reserved for + * class2 & class3 + */ + u32 class_valid:1; +#endif + + u32 reserved3:24; + + u32 reserved4:16; + u32 rxsz:16; /* Receive data_field size */ + + u32 reserved5:8; + u32 conseq:8; + u32 e2e_credit:16; /* end to end credit */ + + u32 reserved7:8; + u32 ospx:8; + u32 reserved8:16; +}; + +/* ASCII value for each character in string "BRCD" */ +#define FLOGI_VVL_BRCD 0x42524344 + +/* + * PLOGI els command and reply payload + */ +struct fc_logi_s { + struct fc_els_cmd_s els_cmd; /* ELS command code */ + struct fc_plogi_csp_s csp; /* common service params */ + wwn_t port_name; + wwn_t node_name; + struct fc_plogi_clp_s class1; /* class 1 service parameters */ + struct fc_plogi_clp_s class2; /* class 2 service parameters */ + struct fc_plogi_clp_s class3; /* class 3 service parameters */ + struct fc_plogi_clp_s class4; /* class 4 service parameters */ + u8 vvl[16]; /* vendor version level */ +}; + +/* + * LOGO els command payload + */ +struct fc_logo_s { + struct fc_els_cmd_s els_cmd; /* ELS command code */ + u32 res1:8; + u32 nport_id:24; /* N_Port identifier of source */ + wwn_t orig_port_name; /* Port name of the LOGO originator */ +}; + +/* + * ADISC els command payload + */ +struct fc_adisc_s { + struct fc_els_cmd_s els_cmd; /* ELS command code */ + u32 res1:8; + u32 orig_HA:24; /* originator hard address */ + wwn_t orig_port_name; /* originator port name */ + wwn_t orig_node_name; /* originator node name */ + u32 res2:8; + u32 nport_id:24; /* originator NPortID */ +}; + +/* + * Exchange status block + */ +struct fc_exch_status_blk_s { + u32 oxid:16; + u32 rxid:16; + u32 res1:8; + u32 orig_np:24; /* originator NPortID */ + u32 res2:8; + u32 resp_np:24; /* responder NPortID */ + u32 es_bits; + u32 res3; + /* + * un modified section of the fields + */ +}; + +/* + * RES els command payload + */ +struct fc_res_s { + struct fc_els_cmd_s els_cmd; /* ELS command code */ + u32 res1:8; + u32 nport_id:24; /* N_Port identifier of source */ + u32 oxid:16; + u32 rxid:16; + u8 assoc_hdr[32]; +}; + +/* + * RES els accept payload + */ +struct fc_res_acc_s { + struct fc_els_cmd_s els_cmd; /* ELS command code */ + struct fc_exch_status_blk_s fc_exch_blk; /* Exchange status block */ +}; + +/* + * REC els command payload + */ +struct fc_rec_s { + struct fc_els_cmd_s els_cmd; /* ELS command code */ + u32 res1:8; + u32 nport_id:24; /* N_Port identifier of source */ + u32 oxid:16; + u32 rxid:16; +}; + +#define FC_REC_ESB_OWN_RSP 0x80000000 /* responder owns */ +#define FC_REC_ESB_SI 0x40000000 /* SI is owned */ +#define FC_REC_ESB_COMP 0x20000000 /* exchange is complete */ +#define FC_REC_ESB_ENDCOND_ABN 0x10000000 /* abnormal ending */ +#define FC_REC_ESB_RQACT 0x04000000 /* recovery qual active */ +#define FC_REC_ESB_ERRP_MSK 0x03000000 +#define FC_REC_ESB_OXID_INV 0x00800000 /* invalid OXID */ +#define FC_REC_ESB_RXID_INV 0x00400000 /* invalid RXID */ +#define FC_REC_ESB_PRIO_INUSE 0x00200000 + +/* + * REC els accept payload + */ +struct fc_rec_acc_s { + struct fc_els_cmd_s els_cmd; /* ELS command code */ + u32 oxid:16; + u32 rxid:16; + u32 res1:8; + u32 orig_id:24; /* N_Port id of exchange originator */ + u32 res2:8; + u32 resp_id:24; /* N_Port id of exchange responder */ + u32 count; /* data transfer count */ + u32 e_stat; /* exchange status */ +}; + +/* + * RSI els payload + */ +struct fc_rsi_s { + struct fc_els_cmd_s els_cmd; + u32 res1:8; + u32 orig_sid:24; + u32 oxid:16; + u32 rxid:16; +}; + +/* + * structure for PRLI paramater pages, both request & response + * see FC-PH-X table 113 & 115 for explanation also FCP table 8 + */ +struct fc_prli_params_s { + u32 reserved:16; +#ifdef __BIGENDIAN + u32 reserved1:5; + u32 rec_support:1; + u32 task_retry_id:1; + u32 retry:1; + + u32 confirm:1; + u32 doverlay:1; + u32 initiator:1; + u32 target:1; + u32 cdmix:1; + u32 drmix:1; + u32 rxrdisab:1; + u32 wxrdisab:1; +#else + u32 retry:1; + u32 task_retry_id:1; + u32 rec_support:1; + u32 reserved1:5; + + u32 wxrdisab:1; + u32 rxrdisab:1; + u32 drmix:1; + u32 cdmix:1; + u32 target:1; + u32 initiator:1; + u32 doverlay:1; + u32 confirm:1; +#endif +}; + +/* + * valid values for rspcode in PRLI ACC payload + */ +enum { + FC_PRLI_ACC_XQTD = 0x1, /* request executed */ + FC_PRLI_ACC_PREDEF_IMG = 0x5, /* predefined image - no prli needed */ +}; + +struct fc_prli_params_page_s { + u32 type:8; + u32 codext:8; +#ifdef __BIGENDIAN + u32 origprocasv:1; + u32 rsppav:1; + u32 imagepair:1; + u32 reserved1:1; + u32 rspcode:4; +#else + u32 rspcode:4; + u32 reserved1:1; + u32 imagepair:1; + u32 rsppav:1; + u32 origprocasv:1; +#endif + u32 reserved2:8; + + u32 origprocas; + u32 rspprocas; + struct fc_prli_params_s servparams; +}; + +/* + * PRLI request and accept payload, FC-PH-X tables 112 & 114 + */ +struct fc_prli_s { + u32 command:8; + u32 pglen:8; + u32 pagebytes:16; + struct fc_prli_params_page_s parampage; +}; + +/* + * PRLO logout params page + */ +struct fc_prlo_params_page_s { + u32 type:8; + u32 type_ext:8; +#ifdef __BIGENDIAN + u32 opa_valid:1; /* originator process associator + * valid + */ + u32 rpa_valid:1; /* responder process associator valid */ + u32 res1:14; +#else + u32 res1:14; + u32 rpa_valid:1; /* responder process associator valid */ + u32 opa_valid:1; /* originator process associator + * valid + */ +#endif + u32 orig_process_assc; + u32 resp_process_assc; + + u32 res2; +}; + +/* + * PRLO els command payload + */ +struct fc_prlo_s { + u32 command:8; + u32 page_len:8; + u32 payload_len:16; + struct fc_prlo_params_page_s prlo_params[1]; +}; + +/* + * PRLO Logout response parameter page + */ +struct fc_prlo_acc_params_page_s { + u32 type:8; + u32 type_ext:8; + +#ifdef __BIGENDIAN + u32 opa_valid:1; /* originator process associator + * valid + */ + u32 rpa_valid:1; /* responder process associator valid */ + u32 res1:14; +#else + u32 res1:14; + u32 rpa_valid:1; /* responder process associator valid */ + u32 opa_valid:1; /* originator process associator + * valid + */ +#endif + u32 orig_process_assc; + u32 resp_process_assc; + + u32 fc4type_csp; +}; + +/* + * PRLO els command ACC payload + */ +struct fc_prlo_acc_s { + u32 command:8; + u32 page_len:8; + u32 payload_len:16; + struct fc_prlo_acc_params_page_s prlo_acc_params[1]; +}; + +/* + * SCR els command payload + */ +enum { + FC_SCR_REG_FUNC_FABRIC_DETECTED = 0x01, + FC_SCR_REG_FUNC_N_PORT_DETECTED = 0x02, + FC_SCR_REG_FUNC_FULL = 0x03, + FC_SCR_REG_FUNC_CLEAR_REG = 0xFF, +}; + +/* SCR VU registrations */ +enum { + FC_VU_SCR_REG_FUNC_FABRIC_NAME_CHANGE = 0x01 +}; + +struct fc_scr_s { + u32 command:8; + u32 res:24; + u32 vu_reg_func:8; /* Vendor Unique Registrations */ + u32 res1:16; + u32 reg_func:8; +}; + +/* + * Information category for Basic link data + */ +enum { + FC_CAT_NOP = 0x0, + FC_CAT_ABTS = 0x1, + FC_CAT_RMC = 0x2, + FC_CAT_BA_ACC = 0x4, + FC_CAT_BA_RJT = 0x5, + FC_CAT_PRMT = 0x6, +}; + +/* + * LS_RJT els reply payload + */ +struct fc_ls_rjt_s { + struct fc_els_cmd_s els_cmd; /* ELS command code */ + u32 res1:8; + u32 reason_code:8; /* Reason code for reject */ + u32 reason_code_expl:8; /* Reason code explanation */ + u32 vendor_unique:8; /* Vendor specific */ +}; + +/* + * LS_RJT reason codes + */ +enum { + FC_LS_RJT_RSN_INV_CMD_CODE = 0x01, + FC_LS_RJT_RSN_LOGICAL_ERROR = 0x03, + FC_LS_RJT_RSN_LOGICAL_BUSY = 0x05, + FC_LS_RJT_RSN_PROTOCOL_ERROR = 0x07, + FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD = 0x09, + FC_LS_RJT_RSN_CMD_NOT_SUPP = 0x0B, +}; + +/* + * LS_RJT reason code explanation + */ +enum { + FC_LS_RJT_EXP_NO_ADDL_INFO = 0x00, + FC_LS_RJT_EXP_SPARMS_ERR_OPTIONS = 0x01, + FC_LS_RJT_EXP_SPARMS_ERR_INI_CTL = 0x03, + FC_LS_RJT_EXP_SPARMS_ERR_REC_CTL = 0x05, + FC_LS_RJT_EXP_SPARMS_ERR_RXSZ = 0x07, + FC_LS_RJT_EXP_SPARMS_ERR_CONSEQ = 0x09, + FC_LS_RJT_EXP_SPARMS_ERR_CREDIT = 0x0B, + FC_LS_RJT_EXP_INV_PORT_NAME = 0x0D, + FC_LS_RJT_EXP_INV_NODE_FABRIC_NAME = 0x0E, + FC_LS_RJT_EXP_INV_CSP = 0x0F, + FC_LS_RJT_EXP_INV_ASSOC_HDR = 0x11, + FC_LS_RJT_EXP_ASSOC_HDR_REQD = 0x13, + FC_LS_RJT_EXP_INV_ORIG_S_ID = 0x15, + FC_LS_RJT_EXP_INV_OXID_RXID_COMB = 0x17, + FC_LS_RJT_EXP_CMD_ALREADY_IN_PROG = 0x19, + FC_LS_RJT_EXP_LOGIN_REQUIRED = 0x1E, + FC_LS_RJT_EXP_INVALID_NPORT_ID = 0x1F, + FC_LS_RJT_EXP_INSUFF_RES = 0x29, + FC_LS_RJT_EXP_CMD_NOT_SUPP = 0x2C, + FC_LS_RJT_EXP_INV_PAYLOAD_LEN = 0x2D, +}; + +/* + * RRQ els command payload + */ +struct fc_rrq_s { + struct fc_els_cmd_s els_cmd; /* ELS command code */ + u32 res1:8; + u32 s_id:24; /* exchange originator S_ID */ + + u32 ox_id:16; /* originator exchange ID */ + u32 rx_id:16; /* responder exchange ID */ + + u32 res2[8]; /* optional association header */ +}; + +/* + * ABTS BA_ACC reply payload + */ +struct fc_ba_acc_s { + u32 seq_id_valid:8; /* set to 0x00 for Abort Exchange */ + u32 seq_id:8; /* invalid for Abort Exchange */ + u32 res2:16; + u32 ox_id:16; /* OX_ID from ABTS frame */ + u32 rx_id:16; /* RX_ID from ABTS frame */ + u32 low_seq_cnt:16; /* set to 0x0000 for Abort Exchange */ + u32 high_seq_cnt:16;/* set to 0xFFFF for Abort Exchange */ +}; + +/* + * ABTS BA_RJT reject payload + */ +struct fc_ba_rjt_s { + u32 res1:8; /* Reserved */ + u32 reason_code:8; /* reason code for reject */ + u32 reason_expl:8; /* reason code explanation */ + u32 vendor_unique:8;/* vendor unique reason code,set to 0 */ +}; + +/* + * TPRLO logout parameter page + */ +struct fc_tprlo_params_page_s { +u32 type:8; +u32 type_ext:8; + +#ifdef __BIGENDIAN + u32 opa_valid:1; + u32 rpa_valid:1; + u32 tpo_nport_valid:1; + u32 global_process_logout:1; + u32 res1:12; +#else + u32 res1:12; + u32 global_process_logout:1; + u32 tpo_nport_valid:1; + u32 rpa_valid:1; + u32 opa_valid:1; +#endif + + u32 orig_process_assc; + u32 resp_process_assc; + + u32 res2:8; + u32 tpo_nport_id; +}; + +/* + * TPRLO ELS command payload + */ +struct fc_tprlo_s { + u32 command:8; + u32 page_len:8; + u32 payload_len:16; + + struct fc_tprlo_params_page_s tprlo_params[1]; +}; + +enum fc_tprlo_type { + FC_GLOBAL_LOGO = 1, + FC_TPR_LOGO +}; + +/* + * TPRLO els command ACC payload + */ +struct fc_tprlo_acc_s { + u32 command:8; + u32 page_len:8; + u32 payload_len:16; + struct fc_prlo_acc_params_page_s tprlo_acc_params[1]; +}; + +/* + * RSCN els command req payload + */ +#define FC_RSCN_PGLEN 0x4 + +enum fc_rscn_format { + FC_RSCN_FORMAT_PORTID = 0x0, + FC_RSCN_FORMAT_AREA = 0x1, + FC_RSCN_FORMAT_DOMAIN = 0x2, + FC_RSCN_FORMAT_FABRIC = 0x3, +}; + +struct fc_rscn_event_s { + u32 format:2; + u32 qualifier:4; + u32 resvd:2; + u32 portid:24; +}; + +struct fc_rscn_pl_s { + u8 command; + u8 pagelen; + u16 payldlen; + struct fc_rscn_event_s event[1]; +}; + +/* + * ECHO els command req payload + */ +struct fc_echo_s { + struct fc_els_cmd_s els_cmd; +}; + +/* + * RNID els command + */ + +#define RNID_NODEID_DATA_FORMAT_COMMON 0x00 +#define RNID_NODEID_DATA_FORMAT_FCP3 0x08 +#define RNID_NODEID_DATA_FORMAT_DISCOVERY 0xDF + +#define RNID_ASSOCIATED_TYPE_UNKNOWN 0x00000001 +#define RNID_ASSOCIATED_TYPE_OTHER 0x00000002 +#define RNID_ASSOCIATED_TYPE_HUB 0x00000003 +#define RNID_ASSOCIATED_TYPE_SWITCH 0x00000004 +#define RNID_ASSOCIATED_TYPE_GATEWAY 0x00000005 +#define RNID_ASSOCIATED_TYPE_STORAGE_DEVICE 0x00000009 +#define RNID_ASSOCIATED_TYPE_HOST 0x0000000A +#define RNID_ASSOCIATED_TYPE_STORAGE_SUBSYSTEM 0x0000000B +#define RNID_ASSOCIATED_TYPE_STORAGE_ACCESS_DEVICE 0x0000000E +#define RNID_ASSOCIATED_TYPE_NAS_SERVER 0x00000011 +#define RNID_ASSOCIATED_TYPE_BRIDGE 0x00000002 +#define RNID_ASSOCIATED_TYPE_VIRTUALIZATION_DEVICE 0x00000003 +#define RNID_ASSOCIATED_TYPE_MULTI_FUNCTION_DEVICE 0x000000FF + +/* + * RNID els command payload + */ +struct fc_rnid_cmd_s { + struct fc_els_cmd_s els_cmd; + u32 node_id_data_format:8; + u32 reserved:24; +}; + +/* + * RNID els response payload + */ + +struct fc_rnid_common_id_data_s { + wwn_t port_name; + wwn_t node_name; +}; + +struct fc_rnid_general_topology_data_s { + u32 vendor_unique[4]; + u32 asso_type; + u32 phy_port_num; + u32 num_attached_nodes; + u32 node_mgmt:8; + u32 ip_version:8; + u32 udp_tcp_port_num:16; + u32 ip_address[4]; + u32 reserved:16; + u32 vendor_specific:16; +}; + +struct fc_rnid_acc_s { + struct fc_els_cmd_s els_cmd; + u32 node_id_data_format:8; + u32 common_id_data_length:8; + u32 reserved:8; + u32 specific_id_data_length:8; + struct fc_rnid_common_id_data_s common_id_data; + struct fc_rnid_general_topology_data_s gen_topology_data; +}; + +#define RNID_ASSOCIATED_TYPE_UNKNOWN 0x00000001 +#define RNID_ASSOCIATED_TYPE_OTHER 0x00000002 +#define RNID_ASSOCIATED_TYPE_HUB 0x00000003 +#define RNID_ASSOCIATED_TYPE_SWITCH 0x00000004 +#define RNID_ASSOCIATED_TYPE_GATEWAY 0x00000005 +#define RNID_ASSOCIATED_TYPE_STORAGE_DEVICE 0x00000009 +#define RNID_ASSOCIATED_TYPE_HOST 0x0000000A +#define RNID_ASSOCIATED_TYPE_STORAGE_SUBSYSTEM 0x0000000B +#define RNID_ASSOCIATED_TYPE_STORAGE_ACCESS_DEVICE 0x0000000E +#define RNID_ASSOCIATED_TYPE_NAS_SERVER 0x00000011 +#define RNID_ASSOCIATED_TYPE_BRIDGE 0x00000002 +#define RNID_ASSOCIATED_TYPE_VIRTUALIZATION_DEVICE 0x00000003 +#define RNID_ASSOCIATED_TYPE_MULTI_FUNCTION_DEVICE 0x000000FF + +enum fc_rpsc_speed_cap { + RPSC_SPEED_CAP_1G = 0x8000, + RPSC_SPEED_CAP_2G = 0x4000, + RPSC_SPEED_CAP_4G = 0x2000, + RPSC_SPEED_CAP_10G = 0x1000, + RPSC_SPEED_CAP_8G = 0x0800, + RPSC_SPEED_CAP_16G = 0x0400, + + RPSC_SPEED_CAP_UNKNOWN = 0x0001, +}; + +enum fc_rpsc_op_speed { + RPSC_OP_SPEED_1G = 0x8000, + RPSC_OP_SPEED_2G = 0x4000, + RPSC_OP_SPEED_4G = 0x2000, + RPSC_OP_SPEED_10G = 0x1000, + RPSC_OP_SPEED_8G = 0x0800, + RPSC_OP_SPEED_16G = 0x0400, + + RPSC_OP_SPEED_NOT_EST = 0x0001, /*! speed not established */ +}; + +struct fc_rpsc_speed_info_s { + u16 port_speed_cap; /*! see enum fc_rpsc_speed_cap */ + u16 port_op_speed; /*! see enum fc_rpsc_op_speed */ +}; + +enum link_e2e_beacon_subcmd { + LINK_E2E_BEACON_ON = 1, + LINK_E2E_BEACON_OFF = 2 +}; + +enum beacon_type { + BEACON_TYPE_NORMAL = 1, /*! Normal Beaconing. Green */ + BEACON_TYPE_WARN = 2, /*! Warning Beaconing. Yellow/Amber */ + BEACON_TYPE_CRITICAL = 3 /*! Critical Beaconing. Red */ +}; + +struct link_e2e_beacon_param_s { + u8 beacon_type; /* Beacon Type. See enum beacon_type */ + u8 beacon_frequency; + /* Beacon frequency. Number of blinks + * per 10 seconds + */ + u16 beacon_duration;/* Beacon duration (in Seconds). The + * command operation should be + * terminated at the end of this + * timeout value. + * + * Ignored if diag_sub_cmd is + * LINK_E2E_BEACON_OFF. + * + * If 0, beaconing will continue till a + * BEACON OFF request is received + */ +}; + +/* + * Link E2E beacon request/good response format. + * For LS_RJTs use struct fc_ls_rjt_s + */ +struct link_e2e_beacon_req_s { + u32 ls_code; /*! FC_ELS_E2E_LBEACON in requests * + *or FC_ELS_ACC in good replies */ + u32 ls_sub_cmd; /*! See enum link_e2e_beacon_subcmd */ + struct link_e2e_beacon_param_s beacon_parm; +}; + +/** + * If RPSC request is sent to the Domain Controller, the request is for + * all the ports within that domain (TODO - I don't think FOS implements + * this...). + */ +struct fc_rpsc_cmd_s { + struct fc_els_cmd_s els_cmd; +}; + +/* + * RPSC Acc + */ +struct fc_rpsc_acc_s { + u32 command:8; + u32 rsvd:8; + u32 num_entries:16; + + struct fc_rpsc_speed_info_s speed_info[1]; +}; + +/** + * If RPSC2 request is sent to the Domain Controller, + */ +#define FC_BRCD_TOKEN 0x42524344 + +struct fc_rpsc2_cmd_s { + struct fc_els_cmd_s els_cmd; + u32 token; + u16 resvd; + u16 num_pids; /* Number of pids in the request */ + struct { + u32 rsvd1:8; + u32 pid:24; /* port identifier */ + } pid_list[1]; +}; + +enum fc_rpsc2_port_type { + RPSC2_PORT_TYPE_UNKNOWN = 0, + RPSC2_PORT_TYPE_NPORT = 1, + RPSC2_PORT_TYPE_NLPORT = 2, + RPSC2_PORT_TYPE_NPIV_PORT = 0x5f, + RPSC2_PORT_TYPE_NPORT_TRUNK = 0x6f, +}; +/* + * RPSC2 portInfo entry structure + */ +struct fc_rpsc2_port_info_s { + u32 pid; /* PID */ + u16 resvd1; + u16 index; /* port number / index */ + u8 resvd2; + u8 type; /* port type N/NL/... */ + u16 speed; /* port Operating Speed */ +}; + +/* + * RPSC2 Accept payload + */ +struct fc_rpsc2_acc_s { + u8 els_cmd; + u8 resvd; + u16 num_pids; /* Number of pids in the request */ + struct fc_rpsc2_port_info_s port_info[1]; /* port information */ +}; + +/** + * bit fields so that multiple classes can be specified + */ +enum fc_cos { + FC_CLASS_2 = 0x04, + FC_CLASS_3 = 0x08, + FC_CLASS_2_3 = 0x0C, +}; + +/* + * symbolic name + */ +struct fc_symname_s { + u8 symname[FC_SYMNAME_MAX]; +}; + +struct fc_alpabm_s { + u8 alpa_bm[FC_ALPA_MAX / 8]; +}; + +/* + * protocol default timeout values + */ +#define FC_ED_TOV 2 +#define FC_REC_TOV (FC_ED_TOV + 1) +#define FC_RA_TOV 10 +#define FC_ELS_TOV (2 * FC_RA_TOV) +#define FC_FCCT_TOV (3 * FC_RA_TOV) + +/* + * virtual fabric related defines + */ +#define FC_VF_ID_NULL 0 /* must not be used as VF_ID */ +#define FC_VF_ID_MIN 1 +#define FC_VF_ID_MAX 0xEFF +#define FC_VF_ID_CTL 0xFEF /* control VF_ID */ + +/** + * Virtual Fabric Tagging header format + * @caution This is defined only in BIG ENDIAN format. + */ +struct fc_vft_s { + u32 r_ctl:8; + u32 ver:2; + u32 type:4; + u32 res_a:2; + u32 priority:3; + u32 vf_id:12; + u32 res_b:1; + u32 hopct:8; + u32 res_c:24; +}; + +/* + * FCP + */ +enum { + FCP_RJT = 0x01000000, /* SRR reject */ + FCP_SRR_ACCEPT = 0x02000000, /* SRR accept */ + FCP_SRR = 0x14000000, /* Sequence Retransmission Request */ +}; + +/* + * SRR FC-4 LS payload + */ +struct fc_srr_s { + u32 ls_cmd; + u32 ox_id:16; /* ox-id */ + u32 rx_id:16; /* rx-id */ + u32 ro; /* relative offset */ + u32 r_ctl:8; /* R_CTL for I.U. */ + u32 res:24; +}; + + +/* + * FCP_CMND definitions + */ +#define FCP_CMND_CDB_LEN 16 +#define FCP_CMND_LUN_LEN 8 + +struct fcp_cmnd_s { + lun_t lun; /* 64-bit LU number */ + u8 crn; /* command reference number */ +#ifdef __BIGENDIAN + u8 resvd:1, + priority:4, /* FCP-3: SAM-3 priority */ + taskattr:3; /* scsi task attribute */ +#else + u8 taskattr:3, /* scsi task attribute */ + priority:4, /* FCP-3: SAM-3 priority */ + resvd:1; +#endif + u8 tm_flags; /* task management flags */ +#ifdef __BIGENDIAN + u8 addl_cdb_len:6, /* additional CDB length words */ + iodir:2; /* read/write FCP_DATA IUs */ +#else + u8 iodir:2, /* read/write FCP_DATA IUs */ + addl_cdb_len:6; /* additional CDB length */ +#endif + scsi_cdb_t cdb; + + /* + * !!! additional cdb bytes follows here!!! + */ + u32 fcp_dl; /* bytes to be transferred */ +}; + +#define fcp_cmnd_cdb_len(_cmnd) ((_cmnd)->addl_cdb_len * 4 + FCP_CMND_CDB_LEN) +#define fcp_cmnd_fcpdl(_cmnd) ((&(_cmnd)->fcp_dl)[(_cmnd)->addl_cdb_len]) + +/* + * struct fcp_cmnd_s .iodir field values + */ +enum fcp_iodir { + FCP_IODIR_NONE = 0, + FCP_IODIR_WRITE = 1, + FCP_IODIR_READ = 2, + FCP_IODIR_RW = 3, +}; + +/* + * Task attribute field + */ +enum { + FCP_TASK_ATTR_SIMPLE = 0, + FCP_TASK_ATTR_HOQ = 1, + FCP_TASK_ATTR_ORDERED = 2, + FCP_TASK_ATTR_ACA = 4, + FCP_TASK_ATTR_UNTAGGED = 5, /* obsolete in FCP-3 */ +}; + +/* + * Task management flags field - only one bit shall be set + */ +enum fcp_tm_cmnd { + FCP_TM_ABORT_TASK_SET = BIT(1), + FCP_TM_CLEAR_TASK_SET = BIT(2), + FCP_TM_LUN_RESET = BIT(4), + FCP_TM_TARGET_RESET = BIT(5), /* obsolete in FCP-3 */ + FCP_TM_CLEAR_ACA = BIT(6), +}; + +/* + * FCP_XFER_RDY IU defines + */ +struct fcp_xfer_rdy_s { + u32 data_ro; + u32 burst_len; + u32 reserved; +}; + +/* + * FCP_RSP residue flags + */ +enum fcp_residue { + FCP_NO_RESIDUE = 0, /* no residue */ + FCP_RESID_OVER = 1, /* more data left that was not sent */ + FCP_RESID_UNDER = 2, /* less data than requested */ +}; + +enum { + FCP_RSPINFO_GOOD = 0, + FCP_RSPINFO_DATALEN_MISMATCH = 1, + FCP_RSPINFO_CMND_INVALID = 2, + FCP_RSPINFO_ROLEN_MISMATCH = 3, + FCP_RSPINFO_TM_NOT_SUPP = 4, + FCP_RSPINFO_TM_FAILED = 5, +}; + +struct fcp_rspinfo_s { + u32 res0:24; + u32 rsp_code:8; /* response code (as above) */ + u32 res1; +}; + +struct fcp_resp_s { + u32 reserved[2]; /* 2 words reserved */ + u16 reserved2; +#ifdef __BIGENDIAN + u8 reserved3:3; + u8 fcp_conf_req:1; /* FCP_CONF is requested */ + u8 resid_flags:2; /* underflow/overflow */ + u8 sns_len_valid:1;/* sense len is valid */ + u8 rsp_len_valid:1;/* response len is valid */ +#else + u8 rsp_len_valid:1;/* response len is valid */ + u8 sns_len_valid:1;/* sense len is valid */ + u8 resid_flags:2; /* underflow/overflow */ + u8 fcp_conf_req:1; /* FCP_CONF is requested */ + u8 reserved3:3; +#endif + u8 scsi_status; /* one byte SCSI status */ + u32 residue; /* residual data bytes */ + u32 sns_len; /* length od sense info */ + u32 rsp_len; /* length of response info */ +}; + +#define fcp_snslen(__fcprsp) ((__fcprsp)->sns_len_valid ? \ + (__fcprsp)->sns_len : 0) +#define fcp_rsplen(__fcprsp) ((__fcprsp)->rsp_len_valid ? \ + (__fcprsp)->rsp_len : 0) +#define fcp_rspinfo(__fcprsp) ((struct fcp_rspinfo_s *)((__fcprsp) + 1)) +#define fcp_snsinfo(__fcprsp) (((u8 *)fcp_rspinfo(__fcprsp)) + \ + fcp_rsplen(__fcprsp)) + +struct fcp_cmnd_fr_s { + struct fchs_s fchs; + struct fcp_cmnd_s fcp; +}; + +/* + * CT + */ +struct ct_hdr_s { + u32 rev_id:8; /* Revision of the CT */ + u32 in_id:24; /* Initiator Id */ + u32 gs_type:8; /* Generic service Type */ + u32 gs_sub_type:8; /* Generic service sub type */ + u32 options:8; /* options */ + u32 rsvrd:8; /* reserved */ + u32 cmd_rsp_code:16;/* ct command/response code */ + u32 max_res_size:16;/* maximum/residual size */ + u32 frag_id:8; /* fragment ID */ + u32 reason_code:8; /* reason code */ + u32 exp_code:8; /* explanation code */ + u32 vendor_unq:8; /* vendor unique */ +}; + +/* + * defines for the Revision + */ +enum { + CT_GS3_REVISION = 0x01, +}; + +/* + * defines for gs_type + */ +enum { + CT_GSTYPE_KEYSERVICE = 0xF7, + CT_GSTYPE_ALIASSERVICE = 0xF8, + CT_GSTYPE_MGMTSERVICE = 0xFA, + CT_GSTYPE_TIMESERVICE = 0xFB, + CT_GSTYPE_DIRSERVICE = 0xFC, +}; + +/* + * defines for gs_sub_type for gs type directory service + */ +enum { + CT_GSSUBTYPE_NAMESERVER = 0x02, +}; + +/* + * defines for gs_sub_type for gs type management service + */ +enum { + CT_GSSUBTYPE_CFGSERVER = 0x01, + CT_GSSUBTYPE_UNZONED_NS = 0x02, + CT_GSSUBTYPE_ZONESERVER = 0x03, + CT_GSSUBTYPE_LOCKSERVER = 0x04, + CT_GSSUBTYPE_HBA_MGMTSERVER = 0x10, /* for FDMI */ +}; + +/* + * defines for CT response code field + */ +enum { + CT_RSP_REJECT = 0x8001, + CT_RSP_ACCEPT = 0x8002, +}; + +/* + * defintions for CT reason code + */ +enum { + CT_RSN_INV_CMD = 0x01, + CT_RSN_INV_VER = 0x02, + CT_RSN_LOGIC_ERR = 0x03, + CT_RSN_INV_SIZE = 0x04, + CT_RSN_LOGICAL_BUSY = 0x05, + CT_RSN_PROTO_ERR = 0x07, + CT_RSN_UNABLE_TO_PERF = 0x09, + CT_RSN_NOT_SUPP = 0x0B, + CT_RSN_SERVER_NOT_AVBL = 0x0D, + CT_RSN_SESSION_COULD_NOT_BE_ESTBD = 0x0E, + CT_RSN_VENDOR_SPECIFIC = 0xFF, + +}; + +/* + * definitions for explanations code for Name server + */ +enum { + CT_NS_EXP_NOADDITIONAL = 0x00, + CT_NS_EXP_ID_NOT_REG = 0x01, + CT_NS_EXP_PN_NOT_REG = 0x02, + CT_NS_EXP_NN_NOT_REG = 0x03, + CT_NS_EXP_CS_NOT_REG = 0x04, + CT_NS_EXP_IPN_NOT_REG = 0x05, + CT_NS_EXP_IPA_NOT_REG = 0x06, + CT_NS_EXP_FT_NOT_REG = 0x07, + CT_NS_EXP_SPN_NOT_REG = 0x08, + CT_NS_EXP_SNN_NOT_REG = 0x09, + CT_NS_EXP_PT_NOT_REG = 0x0A, + CT_NS_EXP_IPP_NOT_REG = 0x0B, + CT_NS_EXP_FPN_NOT_REG = 0x0C, + CT_NS_EXP_HA_NOT_REG = 0x0D, + CT_NS_EXP_FD_NOT_REG = 0x0E, + CT_NS_EXP_FF_NOT_REG = 0x0F, + CT_NS_EXP_ACCESSDENIED = 0x10, + CT_NS_EXP_UNACCEPTABLE_ID = 0x11, + CT_NS_EXP_DATABASEEMPTY = 0x12, + CT_NS_EXP_NOT_REG_IN_SCOPE = 0x13, + CT_NS_EXP_DOM_ID_NOT_PRESENT = 0x14, + CT_NS_EXP_PORT_NUM_NOT_PRESENT = 0x15, + CT_NS_EXP_NO_DEVICE_ATTACHED = 0x16 +}; + +/* + * defintions for the explanation code for all servers + */ +enum { + CT_EXP_AUTH_EXCEPTION = 0xF1, + CT_EXP_DB_FULL = 0xF2, + CT_EXP_DB_EMPTY = 0xF3, + CT_EXP_PROCESSING_REQ = 0xF4, + CT_EXP_UNABLE_TO_VERIFY_CONN = 0xF5, + CT_EXP_DEVICES_NOT_IN_CMN_ZONE = 0xF6 +}; + +/* + * Command codes for Name server + */ +enum { + GS_GID_PN = 0x0121, /* Get Id on port name */ + GS_GPN_ID = 0x0112, /* Get port name on ID */ + GS_GNN_ID = 0x0113, /* Get node name on ID */ + GS_GID_FT = 0x0171, /* Get Id on FC4 type */ + GS_GSPN_ID = 0x0118, /* Get symbolic PN on ID */ + GS_RFT_ID = 0x0217, /* Register fc4type on ID */ + GS_RSPN_ID = 0x0218, /* Register symbolic PN on ID */ + GS_RPN_ID = 0x0212, /* Register port name */ + GS_RNN_ID = 0x0213, /* Register node name */ + GS_RCS_ID = 0x0214, /* Register class of service */ + GS_RPT_ID = 0x021A, /* Register port type */ + GS_GA_NXT = 0x0100, /* Get all next */ + GS_RFF_ID = 0x021F, /* Register FC4 Feature */ +}; + +struct fcgs_id_req_s{ + u32 rsvd:8; + u32 dap:24; /* port identifier */ +}; +#define fcgs_gpnid_req_t struct fcgs_id_req_s +#define fcgs_gnnid_req_t struct fcgs_id_req_s +#define fcgs_gspnid_req_t struct fcgs_id_req_s + +struct fcgs_gidpn_req_s { + wwn_t port_name; /* port wwn */ +}; + +struct fcgs_gidpn_resp_s { + u32 rsvd:8; + u32 dap:24; /* port identifier */ +}; + +/** + * RFT_ID + */ +struct fcgs_rftid_req_s { + u32 rsvd:8; + u32 dap:24; /* port identifier */ + u32 fc4_type[8]; /* fc4 types */ +}; + +/** + * RFF_ID : Register FC4 features. + */ + +#define FC_GS_FCP_FC4_FEATURE_INITIATOR 0x02 +#define FC_GS_FCP_FC4_FEATURE_TARGET 0x01 + +struct fcgs_rffid_req_s { + u32 rsvd:8; + u32 dap:24; /* port identifier */ + u32 rsvd1:16; + u32 fc4ftr_bits:8; /* fc4 feature bits */ + u32 fc4_type:8; /* corresponding FC4 Type */ +}; + +/** + * GID_FT Request + */ +struct fcgs_gidft_req_s { + u8 reserved; + u8 domain_id; /* domain, 0 - all fabric */ + u8 area_id; /* area, 0 - whole domain */ + u8 fc4_type; /* FC_TYPE_FCP for SCSI devices */ +}; /* GID_FT Request */ + +/** + * GID_FT Response + */ +struct fcgs_gidft_resp_s { + u8 last:1; /* last port identifier flag */ + u8 reserved:7; + u32 pid:24; /* port identifier */ +}; /* GID_FT Response */ + +/** + * RSPN_ID + */ +struct fcgs_rspnid_req_s { + u32 rsvd:8; + u32 dap:24; /* port identifier */ + u8 spn_len; /* symbolic port name length */ + u8 spn[256]; /* symbolic port name */ +}; + +/** + * RPN_ID + */ +struct fcgs_rpnid_req_s { + u32 rsvd:8; + u32 port_id:24; + wwn_t port_name; +}; + +/** + * RNN_ID + */ +struct fcgs_rnnid_req_s { + u32 rsvd:8; + u32 port_id:24; + wwn_t node_name; +}; + +/** + * RCS_ID + */ +struct fcgs_rcsid_req_s { + u32 rsvd:8; + u32 port_id:24; + u32 cos; +}; + +/** + * RPT_ID + */ +struct fcgs_rptid_req_s { + u32 rsvd:8; + u32 port_id:24; + u32 port_type:8; + u32 rsvd1:24; +}; + +/** + * GA_NXT Request + */ +struct fcgs_ganxt_req_s { + u32 rsvd:8; + u32 port_id:24; +}; + +/** + * GA_NXT Response + */ +struct fcgs_ganxt_rsp_s { + u32 port_type:8; /* Port Type */ + u32 port_id:24; /* Port Identifier */ + wwn_t port_name; /* Port Name */ + u8 spn_len; /* Length of Symbolic Port Name */ + char spn[255]; /* Symbolic Port Name */ + wwn_t node_name; /* Node Name */ + u8 snn_len; /* Length of Symbolic Node Name */ + char snn[255]; /* Symbolic Node Name */ + u8 ipa[8]; /* Initial Process Associator */ + u8 ip[16]; /* IP Address */ + u32 cos; /* Class of Service */ + u32 fc4types[8]; /* FC-4 TYPEs */ + wwn_t fabric_port_name; + /* Fabric Port Name */ + u32 rsvd:8; /* Reserved */ + u32 hard_addr:24; /* Hard Address */ +}; + +/* + * Fabric Config Server + */ + +/* + * Command codes for Fabric Configuration Server + */ +enum { + GS_FC_GFN_CMD = 0x0114, /* GS FC Get Fabric Name */ + GS_FC_GMAL_CMD = 0x0116, /* GS FC GMAL */ + GS_FC_TRACE_CMD = 0x0400, /* GS FC Trace Route */ + GS_FC_PING_CMD = 0x0401, /* GS FC Ping */ +}; + +/* + * Source or Destination Port Tags. + */ +enum { + GS_FTRACE_TAG_NPORT_ID = 1, + GS_FTRACE_TAG_NPORT_NAME = 2, +}; + +/* +* Port Value : Could be a Port id or wwn + */ +union fcgs_port_val_u { + u32 nport_id; + wwn_t nport_wwn; +}; + +#define GS_FTRACE_MAX_HOP_COUNT 20 +#define GS_FTRACE_REVISION 1 + +/* + * Ftrace Related Structures. + */ + +/* + * STR (Switch Trace) Reject Reason Codes. From FC-SW. + */ +enum { + GS_FTRACE_STR_CMD_COMPLETED_SUCC = 0, + GS_FTRACE_STR_CMD_NOT_SUPP_IN_NEXT_SWITCH, + GS_FTRACE_STR_NO_RESP_FROM_NEXT_SWITCH, + GS_FTRACE_STR_MAX_HOP_CNT_REACHED, + GS_FTRACE_STR_SRC_PORT_NOT_FOUND, + GS_FTRACE_STR_DST_PORT_NOT_FOUND, + GS_FTRACE_STR_DEVICES_NOT_IN_COMMON_ZONE, + GS_FTRACE_STR_NO_ROUTE_BW_PORTS, + GS_FTRACE_STR_NO_ADDL_EXPLN, + GS_FTRACE_STR_FABRIC_BUSY, + GS_FTRACE_STR_FABRIC_BUILD_IN_PROGRESS, + GS_FTRACE_STR_VENDOR_SPECIFIC_ERR_START = 0xf0, + GS_FTRACE_STR_VENDOR_SPECIFIC_ERR_END = 0xff, +}; + +/* + * Ftrace Request + */ +struct fcgs_ftrace_req_s { + u32 revision; + u16 src_port_tag; /* Source Port tag */ + u16 src_port_len; /* Source Port len */ + union fcgs_port_val_u src_port_val; /* Source Port value */ + u16 dst_port_tag; /* Destination Port tag */ + u16 dst_port_len; /* Destination Port len */ + union fcgs_port_val_u dst_port_val; /* Destination Port value */ + u32 token; + u8 vendor_id[8]; /* T10 Vendor Identifier */ + u8 vendor_info[8]; /* Vendor specific Info */ + u32 max_hop_cnt; /* Max Hop Count */ +}; + +/* + * Path info structure + */ +struct fcgs_ftrace_path_info_s { + wwn_t switch_name; /* Switch WWN */ + u32 domain_id; + wwn_t ingress_port_name; /* Ingress ports wwn */ + u32 ingress_phys_port_num; /* Ingress ports physical port + * number + */ + wwn_t egress_port_name; /* Ingress ports wwn */ + u32 egress_phys_port_num; /* Ingress ports physical port + * number + */ +}; + +/* + * Ftrace Acc Response + */ +struct fcgs_ftrace_resp_s { + u32 revision; + u32 token; + u8 vendor_id[8]; /* T10 Vendor Identifier */ + u8 vendor_info[8]; /* Vendor specific Info */ + u32 str_rej_reason_code; /* STR Reject Reason Code */ + u32 num_path_info_entries; /* No. of path info entries */ + /* + * path info entry/entries. + */ + struct fcgs_ftrace_path_info_s path_info[1]; + +}; + +/* +* Fabric Config Server : FCPing + */ + +/* + * FC Ping Request + */ +struct fcgs_fcping_req_s { + u32 revision; + u16 port_tag; + u16 port_len; /* Port len */ + union fcgs_port_val_u port_val; /* Port value */ + u32 token; +}; + +/* + * FC Ping Response + */ +struct fcgs_fcping_resp_s { + u32 token; +}; + +/* + * Command codes for zone server query. + */ +enum { + ZS_GZME = 0x0124, /* Get zone member extended */ +}; + +/* + * ZS GZME request + */ +#define ZS_GZME_ZNAMELEN 32 +struct zs_gzme_req_s { + u8 znamelen; + u8 rsvd[3]; + u8 zname[ZS_GZME_ZNAMELEN]; +}; + +enum zs_mbr_type { + ZS_MBR_TYPE_PWWN = 1, + ZS_MBR_TYPE_DOMPORT = 2, + ZS_MBR_TYPE_PORTID = 3, + ZS_MBR_TYPE_NWWN = 4, +}; + +struct zs_mbr_wwn_s { + u8 mbr_type; + u8 rsvd[3]; + wwn_t wwn; +}; + +struct zs_query_resp_s { + u32 nmbrs; /* number of zone members */ + struct zs_mbr_wwn_s mbr[1]; +}; + +/* + * GMAL Command ( Get ( interconnect Element) Management Address List) + * To retrieve the IP Address of a Switch. + */ + +#define CT_GMAL_RESP_PREFIX_TELNET "telnet://" +#define CT_GMAL_RESP_PREFIX_HTTP "http://" + +/* GMAL/GFN request */ +struct fcgs_req_s { + wwn_t wwn; /* PWWN/NWWN */ +}; + +#define fcgs_gmal_req_t struct fcgs_req_s +#define fcgs_gfn_req_t struct fcgs_req_s + +/* Accept Response to GMAL */ +struct fcgs_gmal_resp_s { + u32 ms_len; /* Num of entries */ + u8 ms_ma[256]; +}; + +struct fcgs_gmal_entry_s { + u8 len; + u8 prefix[7]; /* like "http://" */ + u8 ip_addr[248]; +}; + +/* + * FDMI + */ +/* + * FDMI Command Codes + */ +#define FDMI_GRHL 0x0100 +#define FDMI_GHAT 0x0101 +#define FDMI_GRPL 0x0102 +#define FDMI_GPAT 0x0110 +#define FDMI_RHBA 0x0200 +#define FDMI_RHAT 0x0201 +#define FDMI_RPRT 0x0210 +#define FDMI_RPA 0x0211 +#define FDMI_DHBA 0x0300 +#define FDMI_DPRT 0x0310 + +/* + * FDMI reason codes + */ +#define FDMI_NO_ADDITIONAL_EXP 0x00 +#define FDMI_HBA_ALREADY_REG 0x10 +#define FDMI_HBA_ATTRIB_NOT_REG 0x11 +#define FDMI_HBA_ATTRIB_MULTIPLE 0x12 +#define FDMI_HBA_ATTRIB_LENGTH_INVALID 0x13 +#define FDMI_HBA_ATTRIB_NOT_PRESENT 0x14 +#define FDMI_PORT_ORIG_NOT_IN_LIST 0x15 +#define FDMI_PORT_HBA_NOT_IN_LIST 0x16 +#define FDMI_PORT_ATTRIB_NOT_REG 0x20 +#define FDMI_PORT_NOT_REG 0x21 +#define FDMI_PORT_ATTRIB_MULTIPLE 0x22 +#define FDMI_PORT_ATTRIB_LENGTH_INVALID 0x23 +#define FDMI_PORT_ALREADY_REGISTEREED 0x24 + +/* + * FDMI Transmission Speed Mask values + */ +#define FDMI_TRANS_SPEED_1G 0x00000001 +#define FDMI_TRANS_SPEED_2G 0x00000002 +#define FDMI_TRANS_SPEED_10G 0x00000004 +#define FDMI_TRANS_SPEED_4G 0x00000008 +#define FDMI_TRANS_SPEED_8G 0x00000010 +#define FDMI_TRANS_SPEED_16G 0x00000020 +#define FDMI_TRANS_SPEED_UNKNOWN 0x00008000 + +/* + * FDMI HBA attribute types + */ +enum fdmi_hba_attribute_type { + FDMI_HBA_ATTRIB_NODENAME = 1, /* 0x0001 */ + FDMI_HBA_ATTRIB_MANUFACTURER, /* 0x0002 */ + FDMI_HBA_ATTRIB_SERIALNUM, /* 0x0003 */ + FDMI_HBA_ATTRIB_MODEL, /* 0x0004 */ + FDMI_HBA_ATTRIB_MODEL_DESC, /* 0x0005 */ + FDMI_HBA_ATTRIB_HW_VERSION, /* 0x0006 */ + FDMI_HBA_ATTRIB_DRIVER_VERSION, /* 0x0007 */ + FDMI_HBA_ATTRIB_ROM_VERSION, /* 0x0008 */ + FDMI_HBA_ATTRIB_FW_VERSION, /* 0x0009 */ + FDMI_HBA_ATTRIB_OS_NAME, /* 0x000A */ + FDMI_HBA_ATTRIB_MAX_CT, /* 0x000B */ + + FDMI_HBA_ATTRIB_MAX_TYPE +}; + +/* + * FDMI Port attribute types + */ +enum fdmi_port_attribute_type { + FDMI_PORT_ATTRIB_FC4_TYPES = 1, /* 0x0001 */ + FDMI_PORT_ATTRIB_SUPP_SPEED, /* 0x0002 */ + FDMI_PORT_ATTRIB_PORT_SPEED, /* 0x0003 */ + FDMI_PORT_ATTRIB_FRAME_SIZE, /* 0x0004 */ + FDMI_PORT_ATTRIB_DEV_NAME, /* 0x0005 */ + FDMI_PORT_ATTRIB_HOST_NAME, /* 0x0006 */ + + FDMI_PORT_ATTR_MAX_TYPE +}; + +/* + * FDMI attribute + */ +struct fdmi_attr_s { + u16 type; + u16 len; + u8 value[1]; +}; + +/* + * HBA Attribute Block + */ +struct fdmi_hba_attr_s { + u32 attr_count; /* # of attributes */ + struct fdmi_attr_s hba_attr; /* n attributes */ +}; + +/* + * Registered Port List + */ +struct fdmi_port_list_s { + u32 num_ports; /* number Of Port Entries */ + wwn_t port_entry; /* one or more */ +}; + +/* + * Port Attribute Block + */ +struct fdmi_port_attr_s { + u32 attr_count; /* # of attributes */ + struct fdmi_attr_s port_attr; /* n attributes */ +}; + +/* + * FDMI Register HBA Attributes + */ +struct fdmi_rhba_s { + wwn_t hba_id; /* HBA Identifier */ + struct fdmi_port_list_s port_list; /* Registered Port List */ + struct fdmi_hba_attr_s hba_attr_blk; /* HBA attribute block */ +}; + +/* + * FDMI Register Port + */ +struct fdmi_rprt_s { + wwn_t hba_id; /* HBA Identifier */ + wwn_t port_name; /* Port wwn */ + struct fdmi_port_attr_s port_attr_blk; /* Port Attr Block */ +}; + +/* + * FDMI Register Port Attributes + */ +struct fdmi_rpa_s { + wwn_t port_name; /* port wwn */ + struct fdmi_port_attr_s port_attr_blk; /* Port Attr Block */ +}; + +#pragma pack() + +#endif /* __BFA_FC_H__ */ diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c new file mode 100644 index 000000000000..b7d2657ca82a --- /dev/null +++ b/drivers/scsi/bfa/bfa_fcbuild.c @@ -0,0 +1,1410 @@ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + * + * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * fcbuild.c - FC link service frame building and parsing routines + */ + +#include "bfa_os_inc.h" +#include "bfa_fcbuild.h" + +/* + * static build functions + */ +static void fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, + u16 ox_id); +static void fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, + u16 ox_id); +static struct fchs_s fc_els_req_tmpl; +static struct fchs_s fc_els_rsp_tmpl; +static struct fchs_s fc_bls_req_tmpl; +static struct fchs_s fc_bls_rsp_tmpl; +static struct fc_ba_acc_s ba_acc_tmpl; +static struct fc_logi_s plogi_tmpl; +static struct fc_prli_s prli_tmpl; +static struct fc_rrq_s rrq_tmpl; +static struct fchs_s fcp_fchs_tmpl; + +void +fcbuild_init(void) +{ + /* + * fc_els_req_tmpl + */ + fc_els_req_tmpl.routing = FC_RTG_EXT_LINK; + fc_els_req_tmpl.cat_info = FC_CAT_LD_REQUEST; + fc_els_req_tmpl.type = FC_TYPE_ELS; + fc_els_req_tmpl.f_ctl = + bfa_os_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ | + FCTL_SI_XFER); + fc_els_req_tmpl.rx_id = FC_RXID_ANY; + + /* + * fc_els_rsp_tmpl + */ + fc_els_rsp_tmpl.routing = FC_RTG_EXT_LINK; + fc_els_rsp_tmpl.cat_info = FC_CAT_LD_REPLY; + fc_els_rsp_tmpl.type = FC_TYPE_ELS; + fc_els_rsp_tmpl.f_ctl = + bfa_os_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH | + FCTL_END_SEQ | FCTL_SI_XFER); + fc_els_rsp_tmpl.rx_id = FC_RXID_ANY; + + /* + * fc_bls_req_tmpl + */ + fc_bls_req_tmpl.routing = FC_RTG_BASIC_LINK; + fc_bls_req_tmpl.type = FC_TYPE_BLS; + fc_bls_req_tmpl.f_ctl = bfa_os_hton3b(FCTL_END_SEQ | FCTL_SI_XFER); + fc_bls_req_tmpl.rx_id = FC_RXID_ANY; + + /* + * fc_bls_rsp_tmpl + */ + fc_bls_rsp_tmpl.routing = FC_RTG_BASIC_LINK; + fc_bls_rsp_tmpl.cat_info = FC_CAT_BA_ACC; + fc_bls_rsp_tmpl.type = FC_TYPE_BLS; + fc_bls_rsp_tmpl.f_ctl = + bfa_os_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH | + FCTL_END_SEQ | FCTL_SI_XFER); + fc_bls_rsp_tmpl.rx_id = FC_RXID_ANY; + + /* + * ba_acc_tmpl + */ + ba_acc_tmpl.seq_id_valid = 0; + ba_acc_tmpl.low_seq_cnt = 0; + ba_acc_tmpl.high_seq_cnt = 0xFFFF; + + /* + * plogi_tmpl + */ + plogi_tmpl.csp.verhi = FC_PH_VER_PH_3; + plogi_tmpl.csp.verlo = FC_PH_VER_4_3; + plogi_tmpl.csp.bbcred = bfa_os_htons(0x0004); + plogi_tmpl.csp.ciro = 0x1; + plogi_tmpl.csp.cisc = 0x0; + plogi_tmpl.csp.altbbcred = 0x0; + plogi_tmpl.csp.conseq = bfa_os_htons(0x00FF); + plogi_tmpl.csp.ro_bitmap = bfa_os_htons(0x0002); + plogi_tmpl.csp.e_d_tov = bfa_os_htonl(2000); + + plogi_tmpl.class3.class_valid = 1; + plogi_tmpl.class3.sequential = 1; + plogi_tmpl.class3.conseq = 0xFF; + plogi_tmpl.class3.ospx = 1; + + /* + * prli_tmpl + */ + prli_tmpl.command = FC_ELS_PRLI; + prli_tmpl.pglen = 0x10; + prli_tmpl.pagebytes = bfa_os_htons(0x0014); + prli_tmpl.parampage.type = FC_TYPE_FCP; + prli_tmpl.parampage.imagepair = 1; + prli_tmpl.parampage.servparams.rxrdisab = 1; + + /* + * rrq_tmpl + */ + rrq_tmpl.els_cmd.els_code = FC_ELS_RRQ; + + /* + * fcp_struct fchs_s mpl + */ + fcp_fchs_tmpl.routing = FC_RTG_FC4_DEV_DATA; + fcp_fchs_tmpl.cat_info = FC_CAT_UNSOLICIT_CMD; + fcp_fchs_tmpl.type = FC_TYPE_FCP; + fcp_fchs_tmpl.f_ctl = + bfa_os_hton3b(FCTL_FS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER); + fcp_fchs_tmpl.seq_id = 1; + fcp_fchs_tmpl.rx_id = FC_RXID_ANY; +} + +static void +fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id) +{ + bfa_os_memset(fchs, 0, sizeof(struct fchs_s)); + + fchs->routing = FC_RTG_FC4_DEV_DATA; + fchs->cat_info = FC_CAT_UNSOLICIT_CTRL; + fchs->type = FC_TYPE_SERVICES; + fchs->f_ctl = + bfa_os_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ | + FCTL_SI_XFER); + fchs->rx_id = FC_RXID_ANY; + fchs->d_id = (d_id); + fchs->s_id = (s_id); + fchs->ox_id = bfa_os_htons(ox_id); + + /** + * @todo no need to set ox_id for request + * no need to set rx_id for response + */ +} + +void +fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) +{ + bfa_os_memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s)); + fchs->d_id = (d_id); + fchs->s_id = (s_id); + fchs->ox_id = bfa_os_htons(ox_id); +} + +static void +fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) +{ + bfa_os_memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s)); + fchs->d_id = d_id; + fchs->s_id = s_id; + fchs->ox_id = ox_id; +} + +enum fc_parse_status +fc_els_rsp_parse(struct fchs_s *fchs, int len) +{ + struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); + struct fc_ls_rjt_s *ls_rjt = (struct fc_ls_rjt_s *) els_cmd; + + len = len; + + switch (els_cmd->els_code) { + case FC_ELS_LS_RJT: + if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY) + return FC_PARSE_BUSY; + else + return FC_PARSE_FAILURE; + + case FC_ELS_ACC: + return FC_PARSE_OK; + } + return FC_PARSE_OK; +} + +static void +fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) +{ + bfa_os_memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s)); + fchs->d_id = d_id; + fchs->s_id = s_id; + fchs->ox_id = ox_id; +} + +static u16 +fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, + u16 ox_id, wwn_t port_name, wwn_t node_name, + u16 pdu_size, u8 els_code) +{ + struct fc_logi_s *plogi = (struct fc_logi_s *) (pld); + + bfa_os_memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s)); + + plogi->els_cmd.els_code = els_code; + if (els_code == FC_ELS_PLOGI) + fc_els_req_build(fchs, d_id, s_id, ox_id); + else + fc_els_rsp_build(fchs, d_id, s_id, ox_id); + + plogi->csp.rxsz = plogi->class3.rxsz = bfa_os_htons(pdu_size); + + bfa_os_memcpy(&plogi->port_name, &port_name, sizeof(wwn_t)); + bfa_os_memcpy(&plogi->node_name, &node_name, sizeof(wwn_t)); + + return sizeof(struct fc_logi_s); +} + +u16 +fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, + u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size, + u8 set_npiv, u8 set_auth, u16 local_bb_credits) +{ + u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT); + u32 *vvl_info; + + bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); + + flogi->els_cmd.els_code = FC_ELS_FLOGI; + fc_els_req_build(fchs, d_id, s_id, ox_id); + + flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size); + flogi->port_name = port_name; + flogi->node_name = node_name; + + /* + * Set the NPIV Capability Bit ( word 1, bit 31) of Common + * Service Parameters. + */ + flogi->csp.ciro = set_npiv; + + /* set AUTH capability */ + flogi->csp.security = set_auth; + + flogi->csp.bbcred = bfa_os_htons(local_bb_credits); + + /* Set brcd token in VVL */ + vvl_info = (u32 *)&flogi->vvl[0]; + + /* set the flag to indicate the presence of VVL */ + flogi->csp.npiv_supp = 1; /* @todo. field name is not correct */ + vvl_info[0] = bfa_os_htonl(FLOGI_VVL_BRCD); + + return sizeof(struct fc_logi_s); +} + +u16 +fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, + u16 ox_id, wwn_t port_name, wwn_t node_name, + u16 pdu_size, u16 local_bb_credits) +{ + u32 d_id = 0; + + bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); + fc_els_rsp_build(fchs, d_id, s_id, ox_id); + + flogi->els_cmd.els_code = FC_ELS_ACC; + flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size); + flogi->port_name = port_name; + flogi->node_name = node_name; + + flogi->csp.bbcred = bfa_os_htons(local_bb_credits); + + return sizeof(struct fc_logi_s); +} + +u16 +fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, + u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size) +{ + u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT); + + bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); + + flogi->els_cmd.els_code = FC_ELS_FDISC; + fc_els_req_build(fchs, d_id, s_id, ox_id); + + flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size); + flogi->port_name = port_name; + flogi->node_name = node_name; + + return sizeof(struct fc_logi_s); +} + +u16 +fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, + u16 ox_id, wwn_t port_name, wwn_t node_name, + u16 pdu_size) +{ + return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name, + node_name, pdu_size, FC_ELS_PLOGI); +} + +u16 +fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, + u16 ox_id, wwn_t port_name, wwn_t node_name, + u16 pdu_size) +{ + return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name, + node_name, pdu_size, FC_ELS_ACC); +} + +enum fc_parse_status +fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name) +{ + struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); + struct fc_logi_s *plogi; + struct fc_ls_rjt_s *ls_rjt; + + switch (els_cmd->els_code) { + case FC_ELS_LS_RJT: + ls_rjt = (struct fc_ls_rjt_s *) (fchs + 1); + if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY) + return FC_PARSE_BUSY; + else + return FC_PARSE_FAILURE; + case FC_ELS_ACC: + plogi = (struct fc_logi_s *) (fchs + 1); + if (len < sizeof(struct fc_logi_s)) + return FC_PARSE_FAILURE; + + if (!wwn_is_equal(plogi->port_name, port_name)) + return FC_PARSE_FAILURE; + + if (!plogi->class3.class_valid) + return FC_PARSE_FAILURE; + + if (bfa_os_ntohs(plogi->class3.rxsz) < (FC_MIN_PDUSZ)) + return FC_PARSE_FAILURE; + + return FC_PARSE_OK; + default: + return FC_PARSE_FAILURE; + } +} + +enum fc_parse_status +fc_plogi_parse(struct fchs_s *fchs) +{ + struct fc_logi_s *plogi = (struct fc_logi_s *) (fchs + 1); + + if (plogi->class3.class_valid != 1) + return FC_PARSE_FAILURE; + + if ((bfa_os_ntohs(plogi->class3.rxsz) < FC_MIN_PDUSZ) + || (bfa_os_ntohs(plogi->class3.rxsz) > FC_MAX_PDUSZ) + || (plogi->class3.rxsz == 0)) + return FC_PARSE_FAILURE; + + return FC_PARSE_OK; +} + +u16 +fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, + u16 ox_id) +{ + struct fc_prli_s *prli = (struct fc_prli_s *) (pld); + + fc_els_req_build(fchs, d_id, s_id, ox_id); + bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s)); + + prli->command = FC_ELS_PRLI; + prli->parampage.servparams.initiator = 1; + prli->parampage.servparams.retry = 1; + prli->parampage.servparams.rec_support = 1; + prli->parampage.servparams.task_retry_id = 0; + prli->parampage.servparams.confirm = 1; + + return sizeof(struct fc_prli_s); +} + +u16 +fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, + u16 ox_id, enum bfa_lport_role role) +{ + struct fc_prli_s *prli = (struct fc_prli_s *) (pld); + + fc_els_rsp_build(fchs, d_id, s_id, ox_id); + bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s)); + + prli->command = FC_ELS_ACC; + + prli->parampage.servparams.initiator = 1; + + prli->parampage.rspcode = FC_PRLI_ACC_XQTD; + + return sizeof(struct fc_prli_s); +} + +enum fc_parse_status +fc_prli_rsp_parse(struct fc_prli_s *prli, int len) +{ + if (len < sizeof(struct fc_prli_s)) + return FC_PARSE_FAILURE; + + if (prli->command != FC_ELS_ACC) + return FC_PARSE_FAILURE; + + if ((prli->parampage.rspcode != FC_PRLI_ACC_XQTD) + && (prli->parampage.rspcode != FC_PRLI_ACC_PREDEF_IMG)) + return FC_PARSE_FAILURE; + + if (prli->parampage.servparams.target != 1) + return FC_PARSE_FAILURE; + + return FC_PARSE_OK; +} + +enum fc_parse_status +fc_prli_parse(struct fc_prli_s *prli) +{ + if (prli->parampage.type != FC_TYPE_FCP) + return FC_PARSE_FAILURE; + + if (!prli->parampage.imagepair) + return FC_PARSE_FAILURE; + + if (!prli->parampage.servparams.initiator) + return FC_PARSE_FAILURE; + + return FC_PARSE_OK; +} + +u16 +fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, u32 s_id, + u16 ox_id, wwn_t port_name) +{ + fc_els_req_build(fchs, d_id, s_id, ox_id); + + bfa_os_memset(logo, '\0', sizeof(struct fc_logo_s)); + logo->els_cmd.els_code = FC_ELS_LOGO; + logo->nport_id = (s_id); + logo->orig_port_name = port_name; + + return sizeof(struct fc_logo_s); +} + +static u16 +fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, + u32 s_id, u16 ox_id, wwn_t port_name, + wwn_t node_name, u8 els_code) +{ + bfa_os_memset(adisc, '\0', sizeof(struct fc_adisc_s)); + + adisc->els_cmd.els_code = els_code; + + if (els_code == FC_ELS_ADISC) + fc_els_req_build(fchs, d_id, s_id, ox_id); + else + fc_els_rsp_build(fchs, d_id, s_id, ox_id); + + adisc->orig_HA = 0; + adisc->orig_port_name = port_name; + adisc->orig_node_name = node_name; + adisc->nport_id = (s_id); + + return sizeof(struct fc_adisc_s); +} + +u16 +fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, + u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name) +{ + return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name, + node_name, FC_ELS_ADISC); +} + +u16 +fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, + u32 s_id, u16 ox_id, wwn_t port_name, + wwn_t node_name) +{ + return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name, + node_name, FC_ELS_ACC); +} + +enum fc_parse_status +fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len, wwn_t port_name, + wwn_t node_name) +{ + + if (len < sizeof(struct fc_adisc_s)) + return FC_PARSE_FAILURE; + + if (adisc->els_cmd.els_code != FC_ELS_ACC) + return FC_PARSE_FAILURE; + + if (!wwn_is_equal(adisc->orig_port_name, port_name)) + return FC_PARSE_FAILURE; + + return FC_PARSE_OK; +} + +enum fc_parse_status +fc_adisc_parse(struct fchs_s *fchs, void *pld, u32 host_dap, wwn_t node_name, + wwn_t port_name) +{ + struct fc_adisc_s *adisc = (struct fc_adisc_s *) pld; + + if (adisc->els_cmd.els_code != FC_ELS_ACC) + return FC_PARSE_FAILURE; + + if ((adisc->nport_id == (host_dap)) + && wwn_is_equal(adisc->orig_port_name, port_name) + && wwn_is_equal(adisc->orig_node_name, node_name)) + return FC_PARSE_OK; + + return FC_PARSE_FAILURE; +} + +enum fc_parse_status +fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name) +{ + struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); + + if (pdisc->class3.class_valid != 1) + return FC_PARSE_FAILURE; + + if ((bfa_os_ntohs(pdisc->class3.rxsz) < + (FC_MIN_PDUSZ - sizeof(struct fchs_s))) + || (pdisc->class3.rxsz == 0)) + return FC_PARSE_FAILURE; + + if (!wwn_is_equal(pdisc->port_name, port_name)) + return FC_PARSE_FAILURE; + + if (!wwn_is_equal(pdisc->node_name, node_name)) + return FC_PARSE_FAILURE; + + return FC_PARSE_OK; +} + +u16 +fc_abts_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) +{ + bfa_os_memcpy(fchs, &fc_bls_req_tmpl, sizeof(struct fchs_s)); + fchs->cat_info = FC_CAT_ABTS; + fchs->d_id = (d_id); + fchs->s_id = (s_id); + fchs->ox_id = bfa_os_htons(ox_id); + + return sizeof(struct fchs_s); +} + +enum fc_parse_status +fc_abts_rsp_parse(struct fchs_s *fchs, int len) +{ + if ((fchs->cat_info == FC_CAT_BA_ACC) + || (fchs->cat_info == FC_CAT_BA_RJT)) + return FC_PARSE_OK; + + return FC_PARSE_FAILURE; +} + +u16 +fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id, u32 s_id, + u16 ox_id, u16 rrq_oxid) +{ + fc_els_req_build(fchs, d_id, s_id, ox_id); + + /* + * build rrq payload + */ + bfa_os_memcpy(rrq, &rrq_tmpl, sizeof(struct fc_rrq_s)); + rrq->s_id = (s_id); + rrq->ox_id = bfa_os_htons(rrq_oxid); + rrq->rx_id = FC_RXID_ANY; + + return sizeof(struct fc_rrq_s); +} + +u16 +fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, + u16 ox_id) +{ + struct fc_els_cmd_s *acc = pld; + + fc_els_rsp_build(fchs, d_id, s_id, ox_id); + + bfa_os_memset(acc, 0, sizeof(struct fc_els_cmd_s)); + acc->els_code = FC_ELS_ACC; + + return sizeof(struct fc_els_cmd_s); +} + +u16 +fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id, + u32 s_id, u16 ox_id, u8 reason_code, + u8 reason_code_expl) +{ + fc_els_rsp_build(fchs, d_id, s_id, ox_id); + bfa_os_memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s)); + + ls_rjt->els_cmd.els_code = FC_ELS_LS_RJT; + ls_rjt->reason_code = reason_code; + ls_rjt->reason_code_expl = reason_code_expl; + ls_rjt->vendor_unique = 0x00; + + return sizeof(struct fc_ls_rjt_s); +} + +u16 +fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id, + u32 s_id, u16 ox_id, u16 rx_id) +{ + fc_bls_rsp_build(fchs, d_id, s_id, ox_id); + + bfa_os_memcpy(ba_acc, &ba_acc_tmpl, sizeof(struct fc_ba_acc_s)); + + fchs->rx_id = rx_id; + + ba_acc->ox_id = fchs->ox_id; + ba_acc->rx_id = fchs->rx_id; + + return sizeof(struct fc_ba_acc_s); +} + +u16 +fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, u32 d_id, + u32 s_id, u16 ox_id) +{ + fc_els_rsp_build(fchs, d_id, s_id, ox_id); + bfa_os_memset(els_cmd, 0, sizeof(struct fc_els_cmd_s)); + els_cmd->els_code = FC_ELS_ACC; + + return sizeof(struct fc_els_cmd_s); +} + +int +fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code) +{ + int num_pages = 0; + struct fc_prlo_s *prlo; + struct fc_tprlo_s *tprlo; + + if (els_code == FC_ELS_PRLO) { + prlo = (struct fc_prlo_s *) (fc_frame + 1); + num_pages = (bfa_os_ntohs(prlo->payload_len) - 4) / 16; + } else { + tprlo = (struct fc_tprlo_s *) (fc_frame + 1); + num_pages = (bfa_os_ntohs(tprlo->payload_len) - 4) / 16; + } + return num_pages; +} + +u16 +fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc, + u32 d_id, u32 s_id, u16 ox_id, int num_pages) +{ + int page; + + fc_els_rsp_build(fchs, d_id, s_id, ox_id); + + bfa_os_memset(tprlo_acc, 0, (num_pages * 16) + 4); + tprlo_acc->command = FC_ELS_ACC; + + tprlo_acc->page_len = 0x10; + tprlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4); + + for (page = 0; page < num_pages; page++) { + tprlo_acc->tprlo_acc_params[page].opa_valid = 0; + tprlo_acc->tprlo_acc_params[page].rpa_valid = 0; + tprlo_acc->tprlo_acc_params[page].fc4type_csp = FC_TYPE_FCP; + tprlo_acc->tprlo_acc_params[page].orig_process_assc = 0; + tprlo_acc->tprlo_acc_params[page].resp_process_assc = 0; + } + return bfa_os_ntohs(tprlo_acc->payload_len); +} + +u16 +fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id, + u32 s_id, u16 ox_id, int num_pages) +{ + int page; + + fc_els_rsp_build(fchs, d_id, s_id, ox_id); + + bfa_os_memset(prlo_acc, 0, (num_pages * 16) + 4); + prlo_acc->command = FC_ELS_ACC; + prlo_acc->page_len = 0x10; + prlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4); + + for (page = 0; page < num_pages; page++) { + prlo_acc->prlo_acc_params[page].opa_valid = 0; + prlo_acc->prlo_acc_params[page].rpa_valid = 0; + prlo_acc->prlo_acc_params[page].fc4type_csp = FC_TYPE_FCP; + prlo_acc->prlo_acc_params[page].orig_process_assc = 0; + prlo_acc->prlo_acc_params[page].resp_process_assc = 0; + } + + return bfa_os_ntohs(prlo_acc->payload_len); +} + +u16 +fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id, + u32 s_id, u16 ox_id, u32 data_format) +{ + fc_els_req_build(fchs, d_id, s_id, ox_id); + + bfa_os_memset(rnid, 0, sizeof(struct fc_rnid_cmd_s)); + + rnid->els_cmd.els_code = FC_ELS_RNID; + rnid->node_id_data_format = data_format; + + return sizeof(struct fc_rnid_cmd_s); +} + +u16 +fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id, + u32 s_id, u16 ox_id, u32 data_format, + struct fc_rnid_common_id_data_s *common_id_data, + struct fc_rnid_general_topology_data_s *gen_topo_data) +{ + bfa_os_memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s)); + + fc_els_rsp_build(fchs, d_id, s_id, ox_id); + + rnid_acc->els_cmd.els_code = FC_ELS_ACC; + rnid_acc->node_id_data_format = data_format; + rnid_acc->common_id_data_length = + sizeof(struct fc_rnid_common_id_data_s); + rnid_acc->common_id_data = *common_id_data; + + if (data_format == RNID_NODEID_DATA_FORMAT_DISCOVERY) { + rnid_acc->specific_id_data_length = + sizeof(struct fc_rnid_general_topology_data_s); + bfa_os_assign(rnid_acc->gen_topology_data, *gen_topo_data); + return sizeof(struct fc_rnid_acc_s); + } else { + return sizeof(struct fc_rnid_acc_s) - + sizeof(struct fc_rnid_general_topology_data_s); + } + +} + +u16 +fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, u32 d_id, + u32 s_id, u16 ox_id) +{ + fc_els_req_build(fchs, d_id, s_id, ox_id); + + bfa_os_memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s)); + + rpsc->els_cmd.els_code = FC_ELS_RPSC; + return sizeof(struct fc_rpsc_cmd_s); +} + +u16 +fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, u32 d_id, + u32 s_id, u32 *pid_list, u16 npids) +{ + u32 dctlr_id = FC_DOMAIN_CTRLR(bfa_os_hton3b(d_id)); + int i = 0; + + fc_els_req_build(fchs, bfa_os_hton3b(dctlr_id), s_id, 0); + + bfa_os_memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s)); + + rpsc2->els_cmd.els_code = FC_ELS_RPSC; + rpsc2->token = bfa_os_htonl(FC_BRCD_TOKEN); + rpsc2->num_pids = bfa_os_htons(npids); + for (i = 0; i < npids; i++) + rpsc2->pid_list[i].pid = pid_list[i]; + + return sizeof(struct fc_rpsc2_cmd_s) + ((npids - 1) * (sizeof(u32))); +} + +u16 +fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc, + u32 d_id, u32 s_id, u16 ox_id, + struct fc_rpsc_speed_info_s *oper_speed) +{ + bfa_os_memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s)); + + fc_els_rsp_build(fchs, d_id, s_id, ox_id); + + rpsc_acc->command = FC_ELS_ACC; + rpsc_acc->num_entries = bfa_os_htons(1); + + rpsc_acc->speed_info[0].port_speed_cap = + bfa_os_htons(oper_speed->port_speed_cap); + + rpsc_acc->speed_info[0].port_op_speed = + bfa_os_htons(oper_speed->port_op_speed); + + return sizeof(struct fc_rpsc_acc_s); +} + +/* + * TBD - + * . get rid of unnecessary memsets + */ + +u16 +fc_logo_rsp_parse(struct fchs_s *fchs, int len) +{ + struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); + + len = len; + if (els_cmd->els_code != FC_ELS_ACC) + return FC_PARSE_FAILURE; + + return FC_PARSE_OK; +} + +u16 +fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, + wwn_t port_name, wwn_t node_name, u16 pdu_size) +{ + struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); + + bfa_os_memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s)); + + pdisc->els_cmd.els_code = FC_ELS_PDISC; + fc_els_req_build(fchs, d_id, s_id, ox_id); + + pdisc->csp.rxsz = pdisc->class3.rxsz = bfa_os_htons(pdu_size); + pdisc->port_name = port_name; + pdisc->node_name = node_name; + + return sizeof(struct fc_logi_s); +} + +u16 +fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name) +{ + struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); + + if (len < sizeof(struct fc_logi_s)) + return FC_PARSE_LEN_INVAL; + + if (pdisc->els_cmd.els_code != FC_ELS_ACC) + return FC_PARSE_ACC_INVAL; + + if (!wwn_is_equal(pdisc->port_name, port_name)) + return FC_PARSE_PWWN_NOT_EQUAL; + + if (!pdisc->class3.class_valid) + return FC_PARSE_NWWN_NOT_EQUAL; + + if (bfa_os_ntohs(pdisc->class3.rxsz) < (FC_MIN_PDUSZ)) + return FC_PARSE_RXSZ_INVAL; + + return FC_PARSE_OK; +} + +u16 +fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, + int num_pages) +{ + struct fc_prlo_s *prlo = (struct fc_prlo_s *) (fchs + 1); + int page; + + fc_els_req_build(fchs, d_id, s_id, ox_id); + bfa_os_memset(prlo, 0, (num_pages * 16) + 4); + prlo->command = FC_ELS_PRLO; + prlo->page_len = 0x10; + prlo->payload_len = bfa_os_htons((num_pages * 16) + 4); + + for (page = 0; page < num_pages; page++) { + prlo->prlo_params[page].type = FC_TYPE_FCP; + prlo->prlo_params[page].opa_valid = 0; + prlo->prlo_params[page].rpa_valid = 0; + prlo->prlo_params[page].orig_process_assc = 0; + prlo->prlo_params[page].resp_process_assc = 0; + } + + return bfa_os_ntohs(prlo->payload_len); +} + +u16 +fc_prlo_rsp_parse(struct fchs_s *fchs, int len) +{ + struct fc_prlo_acc_s *prlo = (struct fc_prlo_acc_s *) (fchs + 1); + int num_pages = 0; + int page = 0; + + len = len; + + if (prlo->command != FC_ELS_ACC) + return FC_PARSE_FAILURE; + + num_pages = ((bfa_os_ntohs(prlo->payload_len)) - 4) / 16; + + for (page = 0; page < num_pages; page++) { + if (prlo->prlo_acc_params[page].type != FC_TYPE_FCP) + return FC_PARSE_FAILURE; + + if (prlo->prlo_acc_params[page].opa_valid != 0) + return FC_PARSE_FAILURE; + + if (prlo->prlo_acc_params[page].rpa_valid != 0) + return FC_PARSE_FAILURE; + + if (prlo->prlo_acc_params[page].orig_process_assc != 0) + return FC_PARSE_FAILURE; + + if (prlo->prlo_acc_params[page].resp_process_assc != 0) + return FC_PARSE_FAILURE; + } + return FC_PARSE_OK; + +} + +u16 +fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, + int num_pages, enum fc_tprlo_type tprlo_type, u32 tpr_id) +{ + struct fc_tprlo_s *tprlo = (struct fc_tprlo_s *) (fchs + 1); + int page; + + fc_els_req_build(fchs, d_id, s_id, ox_id); + bfa_os_memset(tprlo, 0, (num_pages * 16) + 4); + tprlo->command = FC_ELS_TPRLO; + tprlo->page_len = 0x10; + tprlo->payload_len = bfa_os_htons((num_pages * 16) + 4); + + for (page = 0; page < num_pages; page++) { + tprlo->tprlo_params[page].type = FC_TYPE_FCP; + tprlo->tprlo_params[page].opa_valid = 0; + tprlo->tprlo_params[page].rpa_valid = 0; + tprlo->tprlo_params[page].orig_process_assc = 0; + tprlo->tprlo_params[page].resp_process_assc = 0; + if (tprlo_type == FC_GLOBAL_LOGO) { + tprlo->tprlo_params[page].global_process_logout = 1; + } else if (tprlo_type == FC_TPR_LOGO) { + tprlo->tprlo_params[page].tpo_nport_valid = 1; + tprlo->tprlo_params[page].tpo_nport_id = (tpr_id); + } + } + + return bfa_os_ntohs(tprlo->payload_len); +} + +u16 +fc_tprlo_rsp_parse(struct fchs_s *fchs, int len) +{ + struct fc_tprlo_acc_s *tprlo = (struct fc_tprlo_acc_s *) (fchs + 1); + int num_pages = 0; + int page = 0; + + len = len; + + if (tprlo->command != FC_ELS_ACC) + return FC_PARSE_ACC_INVAL; + + num_pages = (bfa_os_ntohs(tprlo->payload_len) - 4) / 16; + + for (page = 0; page < num_pages; page++) { + if (tprlo->tprlo_acc_params[page].type != FC_TYPE_FCP) + return FC_PARSE_NOT_FCP; + if (tprlo->tprlo_acc_params[page].opa_valid != 0) + return FC_PARSE_OPAFLAG_INVAL; + if (tprlo->tprlo_acc_params[page].rpa_valid != 0) + return FC_PARSE_RPAFLAG_INVAL; + if (tprlo->tprlo_acc_params[page].orig_process_assc != 0) + return FC_PARSE_OPA_INVAL; + if (tprlo->tprlo_acc_params[page].resp_process_assc != 0) + return FC_PARSE_RPA_INVAL; + } + return FC_PARSE_OK; +} + +enum fc_parse_status +fc_rrq_rsp_parse(struct fchs_s *fchs, int len) +{ + struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); + + len = len; + if (els_cmd->els_code != FC_ELS_ACC) + return FC_PARSE_FAILURE; + + return FC_PARSE_OK; +} + +u16 +fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, + u32 reason_code, u32 reason_expl) +{ + struct fc_ba_rjt_s *ba_rjt = (struct fc_ba_rjt_s *) (fchs + 1); + + fc_bls_rsp_build(fchs, d_id, s_id, ox_id); + + fchs->cat_info = FC_CAT_BA_RJT; + ba_rjt->reason_code = reason_code; + ba_rjt->reason_expl = reason_expl; + return sizeof(struct fc_ba_rjt_s); +} + +static void +fc_gs_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code) +{ + bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s)); + cthdr->rev_id = CT_GS3_REVISION; + cthdr->gs_type = CT_GSTYPE_DIRSERVICE; + cthdr->gs_sub_type = CT_GSSUBTYPE_NAMESERVER; + cthdr->cmd_rsp_code = bfa_os_htons(cmd_code); +} + +static void +fc_gs_fdmi_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code) +{ + bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s)); + cthdr->rev_id = CT_GS3_REVISION; + cthdr->gs_type = CT_GSTYPE_MGMTSERVICE; + cthdr->gs_sub_type = CT_GSSUBTYPE_HBA_MGMTSERVER; + cthdr->cmd_rsp_code = bfa_os_htons(cmd_code); +} + +static void +fc_gs_ms_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code, + u8 sub_type) +{ + bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s)); + cthdr->rev_id = CT_GS3_REVISION; + cthdr->gs_type = CT_GSTYPE_MGMTSERVICE; + cthdr->gs_sub_type = sub_type; + cthdr->cmd_rsp_code = bfa_os_htons(cmd_code); +} + +u16 +fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, + wwn_t port_name) +{ + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + struct fcgs_gidpn_req_s *gidpn = (struct fcgs_gidpn_req_s *)(cthdr + 1); + u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); + fc_gs_cthdr_build(cthdr, s_id, GS_GID_PN); + + bfa_os_memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s)); + gidpn->port_name = port_name; + return sizeof(struct fcgs_gidpn_req_s) + sizeof(struct ct_hdr_s); +} + +u16 +fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, + u32 port_id) +{ + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + fcgs_gpnid_req_t *gpnid = (fcgs_gpnid_req_t *) (cthdr + 1); + u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); + fc_gs_cthdr_build(cthdr, s_id, GS_GPN_ID); + + bfa_os_memset(gpnid, 0, sizeof(fcgs_gpnid_req_t)); + gpnid->dap = port_id; + return sizeof(fcgs_gpnid_req_t) + sizeof(struct ct_hdr_s); +} + +u16 +fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, + u32 port_id) +{ + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + fcgs_gnnid_req_t *gnnid = (fcgs_gnnid_req_t *) (cthdr + 1); + u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); + fc_gs_cthdr_build(cthdr, s_id, GS_GNN_ID); + + bfa_os_memset(gnnid, 0, sizeof(fcgs_gnnid_req_t)); + gnnid->dap = port_id; + return sizeof(fcgs_gnnid_req_t) + sizeof(struct ct_hdr_s); +} + +u16 +fc_ct_rsp_parse(struct ct_hdr_s *cthdr) +{ + if (bfa_os_ntohs(cthdr->cmd_rsp_code) != CT_RSP_ACCEPT) { + if (cthdr->reason_code == CT_RSN_LOGICAL_BUSY) + return FC_PARSE_BUSY; + else + return FC_PARSE_FAILURE; + } + + return FC_PARSE_OK; +} + +u16 +fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, + u8 set_br_reg, u32 s_id, u16 ox_id) +{ + u32 d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER); + + fc_els_req_build(fchs, d_id, s_id, ox_id); + + bfa_os_memset(scr, 0, sizeof(struct fc_scr_s)); + scr->command = FC_ELS_SCR; + scr->reg_func = FC_SCR_REG_FUNC_FULL; + if (set_br_reg) + scr->vu_reg_func = FC_VU_SCR_REG_FUNC_FABRIC_NAME_CHANGE; + + return sizeof(struct fc_scr_s); +} + +u16 +fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, + u32 s_id, u16 ox_id) +{ + u32 d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER); + u16 payldlen; + + fc_els_req_build(fchs, d_id, s_id, ox_id); + rscn->command = FC_ELS_RSCN; + rscn->pagelen = sizeof(rscn->event[0]); + + payldlen = sizeof(u32) + rscn->pagelen; + rscn->payldlen = bfa_os_htons(payldlen); + + rscn->event[0].format = FC_RSCN_FORMAT_PORTID; + rscn->event[0].portid = s_id; + + return sizeof(struct fc_rscn_pl_s); +} + +u16 +fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, + enum bfa_lport_role roles) +{ + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1); + u32 type_value, d_id = bfa_os_hton3b(FC_NAME_SERVER); + u8 index; + + fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); + fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID); + + bfa_os_memset(rftid, 0, sizeof(struct fcgs_rftid_req_s)); + + rftid->dap = s_id; + + /* By default, FCP FC4 Type is registered */ + index = FC_TYPE_FCP >> 5; + type_value = 1 << (FC_TYPE_FCP % 32); + rftid->fc4_type[index] = bfa_os_htonl(type_value); + + return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s); +} + +u16 +fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, + u8 *fc4_bitmap, u32 bitmap_size) +{ + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1); + u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); + fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID); + + bfa_os_memset(rftid, 0, sizeof(struct fcgs_rftid_req_s)); + + rftid->dap = s_id; + bfa_os_memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap, + (bitmap_size < 32 ? bitmap_size : 32)); + + return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s); +} + +u16 +fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, + u8 fc4_type, u8 fc4_ftrs) +{ + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + struct fcgs_rffid_req_s *rffid = (struct fcgs_rffid_req_s *)(cthdr + 1); + u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); + fc_gs_cthdr_build(cthdr, s_id, GS_RFF_ID); + + bfa_os_memset(rffid, 0, sizeof(struct fcgs_rffid_req_s)); + + rffid->dap = s_id; + rffid->fc4ftr_bits = fc4_ftrs; + rffid->fc4_type = fc4_type; + + return sizeof(struct fcgs_rffid_req_s) + sizeof(struct ct_hdr_s); +} + +u16 +fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, + u8 *name) +{ + + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + struct fcgs_rspnid_req_s *rspnid = + (struct fcgs_rspnid_req_s *)(cthdr + 1); + u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); + fc_gs_cthdr_build(cthdr, s_id, GS_RSPN_ID); + + bfa_os_memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s)); + + rspnid->dap = s_id; + rspnid->spn_len = (u8) strlen((char *)name); + strncpy((char *)rspnid->spn, (char *)name, rspnid->spn_len); + + return sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s); +} + +u16 +fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, u8 fc4_type) +{ + + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + struct fcgs_gidft_req_s *gidft = (struct fcgs_gidft_req_s *)(cthdr + 1); + u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, 0); + + fc_gs_cthdr_build(cthdr, s_id, GS_GID_FT); + + bfa_os_memset(gidft, 0, sizeof(struct fcgs_gidft_req_s)); + gidft->fc4_type = fc4_type; + gidft->domain_id = 0; + gidft->area_id = 0; + + return sizeof(struct fcgs_gidft_req_s) + sizeof(struct ct_hdr_s); +} + +u16 +fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, + wwn_t port_name) +{ + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + struct fcgs_rpnid_req_s *rpnid = (struct fcgs_rpnid_req_s *)(cthdr + 1); + u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, 0); + fc_gs_cthdr_build(cthdr, s_id, GS_RPN_ID); + + bfa_os_memset(rpnid, 0, sizeof(struct fcgs_rpnid_req_s)); + rpnid->port_id = port_id; + rpnid->port_name = port_name; + + return sizeof(struct fcgs_rpnid_req_s) + sizeof(struct ct_hdr_s); +} + +u16 +fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, + wwn_t node_name) +{ + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + struct fcgs_rnnid_req_s *rnnid = (struct fcgs_rnnid_req_s *)(cthdr + 1); + u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, 0); + fc_gs_cthdr_build(cthdr, s_id, GS_RNN_ID); + + bfa_os_memset(rnnid, 0, sizeof(struct fcgs_rnnid_req_s)); + rnnid->port_id = port_id; + rnnid->node_name = node_name; + + return sizeof(struct fcgs_rnnid_req_s) + sizeof(struct ct_hdr_s); +} + +u16 +fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, + u32 cos) +{ + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + struct fcgs_rcsid_req_s *rcsid = + (struct fcgs_rcsid_req_s *) (cthdr + 1); + u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, 0); + fc_gs_cthdr_build(cthdr, s_id, GS_RCS_ID); + + bfa_os_memset(rcsid, 0, sizeof(struct fcgs_rcsid_req_s)); + rcsid->port_id = port_id; + rcsid->cos = cos; + + return sizeof(struct fcgs_rcsid_req_s) + sizeof(struct ct_hdr_s); +} + +u16 +fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, + u8 port_type) +{ + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + struct fcgs_rptid_req_s *rptid = (struct fcgs_rptid_req_s *)(cthdr + 1); + u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, 0); + fc_gs_cthdr_build(cthdr, s_id, GS_RPT_ID); + + bfa_os_memset(rptid, 0, sizeof(struct fcgs_rptid_req_s)); + rptid->port_id = port_id; + rptid->port_type = port_type; + + return sizeof(struct fcgs_rptid_req_s) + sizeof(struct ct_hdr_s); +} + +u16 +fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id) +{ + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + struct fcgs_ganxt_req_s *ganxt = (struct fcgs_ganxt_req_s *)(cthdr + 1); + u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, 0); + fc_gs_cthdr_build(cthdr, s_id, GS_GA_NXT); + + bfa_os_memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s)); + ganxt->port_id = port_id; + + return sizeof(struct ct_hdr_s) + sizeof(struct fcgs_ganxt_req_s); +} + +/* + * Builds fc hdr and ct hdr for FDMI requests. + */ +u16 +fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id, + u16 cmd_code) +{ + + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, 0); + fc_gs_fdmi_cthdr_build(cthdr, s_id, cmd_code); + + return sizeof(struct ct_hdr_s); +} + +/* + * Given a FC4 Type, this function returns a fc4 type bitmask + */ +void +fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask) +{ + u8 index; + u32 *ptr = (u32 *) bit_mask; + u32 type_value; + + /* + * @todo : Check for bitmask size + */ + + index = fc4_type >> 5; + type_value = 1 << (fc4_type % 32); + ptr[index] = bfa_os_htonl(type_value); + +} + +/* + * GMAL Request + */ +u16 +fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn) +{ + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + fcgs_gmal_req_t *gmal = (fcgs_gmal_req_t *) (cthdr + 1); + u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, 0); + fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GMAL_CMD, + CT_GSSUBTYPE_CFGSERVER); + + bfa_os_memset(gmal, 0, sizeof(fcgs_gmal_req_t)); + gmal->wwn = wwn; + + return sizeof(struct ct_hdr_s) + sizeof(fcgs_gmal_req_t); +} + +/* + * GFN (Get Fabric Name) Request + */ +u16 +fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn) +{ + struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; + fcgs_gfn_req_t *gfn = (fcgs_gfn_req_t *) (cthdr + 1); + u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER); + + fc_gs_fchdr_build(fchs, d_id, s_id, 0); + fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GFN_CMD, + CT_GSSUBTYPE_CFGSERVER); + + bfa_os_memset(gfn, 0, sizeof(fcgs_gfn_req_t)); + gfn->wwn = wwn; + + return sizeof(struct ct_hdr_s) + sizeof(fcgs_gfn_req_t); +} diff --git a/drivers/scsi/bfa/bfa_fcbuild.h b/drivers/scsi/bfa/bfa_fcbuild.h new file mode 100644 index 000000000000..73abd02e53cc --- /dev/null +++ b/drivers/scsi/bfa/bfa_fcbuild.h @@ -0,0 +1,316 @@ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + * + * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * fcbuild.h - FC link service frame building and parsing routines + */ + +#ifndef __FCBUILD_H__ +#define __FCBUILD_H__ + +#include "bfa_os_inc.h" +#include "bfa_fc.h" +#include "bfa_defs_fcs.h" + +/* + * Utility Macros/functions + */ + +#define wwn_is_equal(_wwn1, _wwn2) \ + (memcmp(&(_wwn1), &(_wwn2), sizeof(wwn_t)) == 0) + +#define fc_roundup(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1)) + +/* + * Given the fc response length, this routine will return + * the length of the actual payload bytes following the CT header. + * + * Assumes the input response length does not include the crc, eof, etc. + */ +static inline u32 +fc_get_ctresp_pyld_len(u32 resp_len) +{ + return resp_len - sizeof(struct ct_hdr_s); +} + +/* + * Convert bfa speed to rpsc speed value. + */ +static inline enum bfa_port_speed +fc_rpsc_operspeed_to_bfa_speed(enum fc_rpsc_op_speed speed) +{ + switch (speed) { + + case RPSC_OP_SPEED_1G: + return BFA_PORT_SPEED_1GBPS; + + case RPSC_OP_SPEED_2G: + return BFA_PORT_SPEED_2GBPS; + + case RPSC_OP_SPEED_4G: + return BFA_PORT_SPEED_4GBPS; + + case RPSC_OP_SPEED_8G: + return BFA_PORT_SPEED_8GBPS; + + case RPSC_OP_SPEED_10G: + return BFA_PORT_SPEED_10GBPS; + + default: + return BFA_PORT_SPEED_UNKNOWN; + } +} + +/* + * Convert RPSC speed to bfa speed value. + */ +static inline enum fc_rpsc_op_speed +fc_bfa_speed_to_rpsc_operspeed(enum bfa_port_speed op_speed) +{ + switch (op_speed) { + + case BFA_PORT_SPEED_1GBPS: + return RPSC_OP_SPEED_1G; + + case BFA_PORT_SPEED_2GBPS: + return RPSC_OP_SPEED_2G; + + case BFA_PORT_SPEED_4GBPS: + return RPSC_OP_SPEED_4G; + + case BFA_PORT_SPEED_8GBPS: + return RPSC_OP_SPEED_8G; + + case BFA_PORT_SPEED_10GBPS: + return RPSC_OP_SPEED_10G; + + default: + return RPSC_OP_SPEED_NOT_EST; + } +} + +enum fc_parse_status { + FC_PARSE_OK = 0, + FC_PARSE_FAILURE = 1, + FC_PARSE_BUSY = 2, + FC_PARSE_LEN_INVAL, + FC_PARSE_ACC_INVAL, + FC_PARSE_PWWN_NOT_EQUAL, + FC_PARSE_NWWN_NOT_EQUAL, + FC_PARSE_RXSZ_INVAL, + FC_PARSE_NOT_FCP, + FC_PARSE_OPAFLAG_INVAL, + FC_PARSE_RPAFLAG_INVAL, + FC_PARSE_OPA_INVAL, + FC_PARSE_RPA_INVAL, + +}; + +struct fc_templates_s { + struct fchs_s fc_els_req; + struct fchs_s fc_bls_req; + struct fc_logi_s plogi; + struct fc_rrq_s rrq; +}; + +void fcbuild_init(void); + +u16 fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, + u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name, + u16 pdu_size, u8 set_npiv, u8 set_auth, + u16 local_bb_credits); + +u16 fc_fdisc_build(struct fchs_s *buf, struct fc_logi_s *flogi, u32 s_id, + u16 ox_id, wwn_t port_name, wwn_t node_name, + u16 pdu_size); + +u16 fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, + u32 s_id, u16 ox_id, + wwn_t port_name, wwn_t node_name, + u16 pdu_size, + u16 local_bb_credits); + +u16 fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id, + u32 s_id, u16 ox_id, wwn_t port_name, + wwn_t node_name, u16 pdu_size); + +enum fc_parse_status fc_plogi_parse(struct fchs_s *fchs); + +u16 fc_abts_build(struct fchs_s *buf, u32 d_id, u32 s_id, + u16 ox_id); + +enum fc_parse_status fc_abts_rsp_parse(struct fchs_s *buf, int len); + +u16 fc_rrq_build(struct fchs_s *buf, struct fc_rrq_s *rrq, u32 d_id, + u32 s_id, u16 ox_id, u16 rrq_oxid); +enum fc_parse_status fc_rrq_rsp_parse(struct fchs_s *buf, int len); + +u16 fc_rspnid_build(struct fchs_s *fchs, void *pld, u32 s_id, + u16 ox_id, u8 *name); + +u16 fc_rftid_build(struct fchs_s *fchs, void *pld, u32 s_id, + u16 ox_id, enum bfa_lport_role role); + +u16 fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, + u16 ox_id, u8 *fc4_bitmap, + u32 bitmap_size); + +u16 fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, + u16 ox_id, u8 fc4_type, u8 fc4_ftrs); + +u16 fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, + u16 ox_id, wwn_t port_name); + +u16 fc_gpnid_build(struct fchs_s *fchs, void *pld, u32 s_id, + u16 ox_id, u32 port_id); + +u16 fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, + u8 set_br_reg, u32 s_id, u16 ox_id); + +u16 fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, + u32 s_id, u16 ox_id, + wwn_t port_name, wwn_t node_name, + u16 pdu_size); + +u16 fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, + u32 d_id, u32 s_id, u16 ox_id, wwn_t port_name, + wwn_t node_name); + +enum fc_parse_status fc_adisc_parse(struct fchs_s *fchs, void *pld, + u32 host_dap, wwn_t node_name, wwn_t port_name); + +enum fc_parse_status fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len, + wwn_t port_name, wwn_t node_name); + +u16 fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, + u32 d_id, u32 s_id, u16 ox_id, + wwn_t port_name, wwn_t node_name); +u16 fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, + u32 d_id, u32 s_id, u16 ox_id, + u8 reason_code, u8 reason_code_expl); +u16 fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, + u32 d_id, u32 s_id, u16 ox_id); +u16 fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, + u32 s_id, u16 ox_id); + +enum fc_parse_status fc_prli_rsp_parse(struct fc_prli_s *prli, int len); + +u16 fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, + u32 s_id, u16 ox_id, + enum bfa_lport_role role); + +u16 fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, + u32 d_id, u32 s_id, u16 ox_id, + u32 data_format); + +u16 fc_rnid_acc_build(struct fchs_s *fchs, + struct fc_rnid_acc_s *rnid_acc, u32 d_id, u32 s_id, + u16 ox_id, u32 data_format, + struct fc_rnid_common_id_data_s *common_id_data, + struct fc_rnid_general_topology_data_s *gen_topo_data); + +u16 fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rps2c, + u32 d_id, u32 s_id, u32 *pid_list, u16 npids); +u16 fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, + u32 d_id, u32 s_id, u16 ox_id); +u16 fc_rpsc_acc_build(struct fchs_s *fchs, + struct fc_rpsc_acc_s *rpsc_acc, u32 d_id, u32 s_id, + u16 ox_id, struct fc_rpsc_speed_info_s *oper_speed); +u16 fc_gid_ft_build(struct fchs_s *fchs, void *pld, u32 s_id, + u8 fc4_type); + +u16 fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, + u32 port_id, wwn_t port_name); + +u16 fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, + u32 port_id, wwn_t node_name); + +u16 fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, + u32 port_id, u32 cos); + +u16 fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, + u32 port_id, u8 port_type); + +u16 fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, + u32 port_id); + +u16 fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, + u32 s_id, u16 ox_id, wwn_t port_name); + +u16 fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, + u32 s_id, u16 ox_id); + +u16 fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id, + u16 cmd_code); +u16 fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn); +u16 fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn); + +void fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask); + +void fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, + u16 ox_id); + +enum fc_parse_status fc_els_rsp_parse(struct fchs_s *fchs, int len); + +enum fc_parse_status fc_plogi_rsp_parse(struct fchs_s *fchs, int len, + wwn_t port_name); + +enum fc_parse_status fc_prli_parse(struct fc_prli_s *prli); + +enum fc_parse_status fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, + wwn_t port_name); + +u16 fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id, + u32 s_id, u16 ox_id, u16 rx_id); + +int fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code); + +u16 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc, + u32 d_id, u32 s_id, u16 ox_id, int num_pages); + +u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, + u32 d_id, u32 s_id, u16 ox_id, int num_pages); + +u16 fc_logo_rsp_parse(struct fchs_s *fchs, int len); + +u16 fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, + u16 ox_id, wwn_t port_name, wwn_t node_name, + u16 pdu_size); + +u16 fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name); + +u16 fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, + u16 ox_id, int num_pages); + +u16 fc_prlo_rsp_parse(struct fchs_s *fchs, int len); + +u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, + u16 ox_id, int num_pages, enum fc_tprlo_type tprlo_type, + u32 tpr_id); + +u16 fc_tprlo_rsp_parse(struct fchs_s *fchs, int len); + +u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, + u16 ox_id, u32 reason_code, u32 reason_expl); + +u16 fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, + u32 port_id); + +u16 fc_ct_rsp_parse(struct ct_hdr_s *cthdr); + +u16 fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, u32 s_id, + u16 ox_id); +#endif diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c index 8c703d8dc94b..33c8dd51f474 100644 --- a/drivers/scsi/bfa/bfa_fcpim.c +++ b/drivers/scsi/bfa/bfa_fcpim.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * @@ -15,18 +15,291 @@ * General Public License for more details. */ -#include -#include +#include "bfa_modules.h" +#include "bfa_cb_ioim.h" BFA_TRC_FILE(HAL, FCPIM); BFA_MODULE(fcpim); + +#define bfa_fcpim_add_iostats(__l, __r, __stats) \ + (__l->__stats += __r->__stats) + + +/** + * BFA ITNIM Related definitions + */ +static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim); + +#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \ + (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1)))) + +#define bfa_fcpim_additn(__itnim) \ + list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q) +#define bfa_fcpim_delitn(__itnim) do { \ + bfa_assert(bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \ + bfa_itnim_update_del_itn_stats(__itnim); \ + list_del(&(__itnim)->qe); \ + bfa_assert(list_empty(&(__itnim)->io_q)); \ + bfa_assert(list_empty(&(__itnim)->io_cleanup_q)); \ + bfa_assert(list_empty(&(__itnim)->pending_q)); \ +} while (0) + +#define bfa_itnim_online_cb(__itnim) do { \ + if ((__itnim)->bfa->fcs) \ + bfa_cb_itnim_online((__itnim)->ditn); \ + else { \ + bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \ + __bfa_cb_itnim_online, (__itnim)); \ + } \ +} while (0) + +#define bfa_itnim_offline_cb(__itnim) do { \ + if ((__itnim)->bfa->fcs) \ + bfa_cb_itnim_offline((__itnim)->ditn); \ + else { \ + bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \ + __bfa_cb_itnim_offline, (__itnim)); \ + } \ +} while (0) + +#define bfa_itnim_sler_cb(__itnim) do { \ + if ((__itnim)->bfa->fcs) \ + bfa_cb_itnim_sler((__itnim)->ditn); \ + else { \ + bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \ + __bfa_cb_itnim_sler, (__itnim)); \ + } \ +} while (0) + +/** + * bfa_itnim_sm BFA itnim state machine + */ + + +enum bfa_itnim_event { + BFA_ITNIM_SM_CREATE = 1, /* itnim is created */ + BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */ + BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */ + BFA_ITNIM_SM_FWRSP = 4, /* firmware response */ + BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */ + BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */ + BFA_ITNIM_SM_SLER = 7, /* second level error recovery */ + BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */ + BFA_ITNIM_SM_QRESUME = 9, /* queue space available */ +}; + +/** + * BFA IOIM related definitions + */ +#define bfa_ioim_move_to_comp_q(__ioim) do { \ + list_del(&(__ioim)->qe); \ + list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \ +} while (0) + + +#define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \ + if ((__fcpim)->profile_comp) \ + (__fcpim)->profile_comp(__ioim); \ +} while (0) + +#define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \ + if ((__fcpim)->profile_start) \ + (__fcpim)->profile_start(__ioim); \ +} while (0) +/** + * hal_ioim_sm + */ + +/** + * IO state machine events + */ +enum bfa_ioim_event { + BFA_IOIM_SM_START = 1, /* io start request from host */ + BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */ + BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */ + BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */ + BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */ + BFA_IOIM_SM_FREE = 6, /* io resource is freed */ + BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */ + BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */ + BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */ + BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */ + BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */ + BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */ + BFA_IOIM_SM_HCB = 13, /* bfa callback complete */ + BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */ + BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */ + BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */ + BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */ + BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */ +}; + + +/** + * BFA TSKIM related definitions + */ + +/** + * task management completion handling + */ +#define bfa_tskim_qcomp(__tskim, __cbfn) do { \ + bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\ + bfa_tskim_notify_comp(__tskim); \ +} while (0) + +#define bfa_tskim_notify_comp(__tskim) do { \ + if ((__tskim)->notify) \ + bfa_itnim_tskdone((__tskim)->itnim); \ +} while (0) + + +enum bfa_tskim_event { + BFA_TSKIM_SM_START = 1, /* TM command start */ + BFA_TSKIM_SM_DONE = 2, /* TM completion */ + BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */ + BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */ + BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */ + BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */ + BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */ + BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */ +}; + +/** + * forward declaration for BFA ITNIM functions + */ +static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim); +static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim); +static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim); +static void bfa_itnim_cleanp_comp(void *itnim_cbarg); +static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim); +static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete); +static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete); +static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete); +static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim); +static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim); +static void bfa_itnim_iotov(void *itnim_arg); +static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim); +static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim); +static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim); + +/** + * forward declaration of ITNIM state machine + */ +static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); +static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event); + +/** + * forward declaration for BFA IOIM functions + */ +static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim); +static bfa_boolean_t bfa_ioim_sge_setup(struct bfa_ioim_s *ioim); +static void bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim); +static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim); +static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim); +static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete); +static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete); +static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete); +static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete); +static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete); +static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim); + + +/** + * forward declaration of BFA IO state machine + */ +static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, + enum bfa_ioim_event event); +static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, + enum bfa_ioim_event event); +static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim, + enum bfa_ioim_event event); +static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, + enum bfa_ioim_event event); +static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, + enum bfa_ioim_event event); +static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, + enum bfa_ioim_event event); +static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, + enum bfa_ioim_event event); +static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, + enum bfa_ioim_event event); +static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, + enum bfa_ioim_event event); +static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, + enum bfa_ioim_event event); +static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, + enum bfa_ioim_event event); +static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, + enum bfa_ioim_event event); + +/** + * forward declaration for BFA TSKIM functions + */ +static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete); +static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete); +static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim, + lun_t lun); +static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim); +static void bfa_tskim_cleanp_comp(void *tskim_cbarg); +static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim); +static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim); +static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim); +static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim); + + +/** + * forward declaration of BFA TSKIM state machine + */ +static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, + enum bfa_tskim_event event); +static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim, + enum bfa_tskim_event event); +static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, + enum bfa_tskim_event event); +static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, + enum bfa_tskim_event event); +static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, + enum bfa_tskim_event event); +static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, + enum bfa_tskim_event event); +static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, + enum bfa_tskim_event event); + /** * hal_fcpim_mod BFA FCP Initiator Mode module */ /** - * Compute and return memory needed by FCP(im) module. + * Compute and return memory needed by FCP(im) module. */ static void bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, @@ -58,7 +331,7 @@ bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, static void bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) + struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) { struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); @@ -67,12 +340,14 @@ bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs); bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs); - fcpim->bfa = bfa; - fcpim->num_itnims = cfg->fwcfg.num_rports; + fcpim->bfa = bfa; + fcpim->num_itnims = cfg->fwcfg.num_rports; fcpim->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs; fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs; - fcpim->path_tov = cfg->drvcfg.path_tov; - fcpim->delay_comp = cfg->drvcfg.delay_comp; + fcpim->path_tov = cfg->drvcfg.path_tov; + fcpim->delay_comp = cfg->drvcfg.delay_comp; + fcpim->profile_comp = NULL; + fcpim->profile_start = NULL; bfa_itnim_attach(fcpim, meminfo); bfa_tskim_attach(fcpim, meminfo); @@ -103,7 +378,7 @@ bfa_fcpim_iocdisable(struct bfa_s *bfa) { struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); struct bfa_itnim_s *itnim; - struct list_head *qe, *qen; + struct list_head *qe, *qen; list_for_each_safe(qe, qen, &fcpim->itnim_q) { itnim = (struct bfa_itnim_s *) qe; @@ -111,6 +386,56 @@ bfa_fcpim_iocdisable(struct bfa_s *bfa) } } +void +bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats, + struct bfa_itnim_iostats_s *rstats) +{ + bfa_fcpim_add_iostats(lstats, rstats, total_ios); + bfa_fcpim_add_iostats(lstats, rstats, qresumes); + bfa_fcpim_add_iostats(lstats, rstats, no_iotags); + bfa_fcpim_add_iostats(lstats, rstats, io_aborts); + bfa_fcpim_add_iostats(lstats, rstats, no_tskims); + bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok); + bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun); + bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun); + bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted); + bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout); + bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort); + bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err); + bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err); + bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed); + bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free); + bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts); + bfa_fcpim_add_iostats(lstats, rstats, iocom_utags); + bfa_fcpim_add_iostats(lstats, rstats, io_cleanups); + bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts); + bfa_fcpim_add_iostats(lstats, rstats, onlines); + bfa_fcpim_add_iostats(lstats, rstats, offlines); + bfa_fcpim_add_iostats(lstats, rstats, creates); + bfa_fcpim_add_iostats(lstats, rstats, deletes); + bfa_fcpim_add_iostats(lstats, rstats, create_comps); + bfa_fcpim_add_iostats(lstats, rstats, delete_comps); + bfa_fcpim_add_iostats(lstats, rstats, sler_events); + bfa_fcpim_add_iostats(lstats, rstats, fw_create); + bfa_fcpim_add_iostats(lstats, rstats, fw_delete); + bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled); + bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps); + bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds); + bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps); + bfa_fcpim_add_iostats(lstats, rstats, tm_success); + bfa_fcpim_add_iostats(lstats, rstats, tm_failures); + bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps); + bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes); + bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns); + bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups); + bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps); + bfa_fcpim_add_iostats(lstats, rstats, io_comps); + bfa_fcpim_add_iostats(lstats, rstats, input_reqs); + bfa_fcpim_add_iostats(lstats, rstats, output_reqs); + bfa_fcpim_add_iostats(lstats, rstats, rd_throughput); + bfa_fcpim_add_iostats(lstats, rstats, wr_throughput); +} + void bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov) { @@ -130,21 +455,113 @@ bfa_fcpim_path_tov_get(struct bfa_s *bfa) } bfa_status_t -bfa_fcpim_get_modstats(struct bfa_s *bfa, struct bfa_fcpim_stats_s *modstats) +bfa_fcpim_port_iostats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *stats, + u8 lp_tag) +{ + struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); + struct list_head *qe, *qen; + struct bfa_itnim_s *itnim; + + /* accumulate IO stats from itnim */ + bfa_os_memset(stats, 0, sizeof(struct bfa_itnim_iostats_s)); + list_for_each_safe(qe, qen, &fcpim->itnim_q) { + itnim = (struct bfa_itnim_s *) qe; + if (itnim->rport->rport_info.lp_tag != lp_tag) + continue; + bfa_fcpim_add_stats(stats, &(itnim->stats)); + } + return BFA_STATUS_OK; +} +bfa_status_t +bfa_fcpim_get_modstats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *modstats) +{ + struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); + struct list_head *qe, *qen; + struct bfa_itnim_s *itnim; + + /* accumulate IO stats from itnim */ + bfa_os_memset(modstats, 0, sizeof(struct bfa_itnim_iostats_s)); + list_for_each_safe(qe, qen, &fcpim->itnim_q) { + itnim = (struct bfa_itnim_s *) qe; + bfa_fcpim_add_stats(modstats, &(itnim->stats)); + } + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_fcpim_get_del_itn_stats(struct bfa_s *bfa, + struct bfa_fcpim_del_itn_stats_s *modstats) +{ + struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); + + *modstats = fcpim->del_itn_stats; + + return BFA_STATUS_OK; +} + + +bfa_status_t +bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time) +{ + struct bfa_itnim_s *itnim; + struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); + struct list_head *qe, *qen; + + /* accumulate IO stats from itnim */ + list_for_each_safe(qe, qen, &fcpim->itnim_q) { + itnim = (struct bfa_itnim_s *) qe; + bfa_itnim_clear_stats(itnim); + } + fcpim->io_profile = BFA_TRUE; + fcpim->io_profile_start_time = time; + fcpim->profile_comp = bfa_ioim_profile_comp; + fcpim->profile_start = bfa_ioim_profile_start; + + return BFA_STATUS_OK; +} +bfa_status_t +bfa_fcpim_profile_off(struct bfa_s *bfa) { struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); + fcpim->io_profile = BFA_FALSE; + fcpim->io_profile_start_time = 0; + fcpim->profile_comp = NULL; + fcpim->profile_start = NULL; + return BFA_STATUS_OK; +} - *modstats = fcpim->stats; +bfa_status_t +bfa_fcpim_port_clear_iostats(struct bfa_s *bfa, u8 lp_tag) +{ + struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); + struct list_head *qe, *qen; + struct bfa_itnim_s *itnim; + /* clear IO stats from all active itnims */ + list_for_each_safe(qe, qen, &fcpim->itnim_q) { + itnim = (struct bfa_itnim_s *) qe; + if (itnim->rport->rport_info.lp_tag != lp_tag) + continue; + bfa_itnim_clear_stats(itnim); + } return BFA_STATUS_OK; + } bfa_status_t bfa_fcpim_clr_modstats(struct bfa_s *bfa) { struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); + struct list_head *qe, *qen; + struct bfa_itnim_s *itnim; - memset(&fcpim->stats, 0, sizeof(struct bfa_fcpim_stats_s)); + /* clear IO stats from all active itnims */ + list_for_each_safe(qe, qen, &fcpim->itnim_q) { + itnim = (struct bfa_itnim_s *) qe; + bfa_itnim_clear_stats(itnim); + } + bfa_os_memset(&fcpim->del_itn_stats, 0, + sizeof(struct bfa_fcpim_del_itn_stats_s)); return BFA_STATUS_OK; } @@ -176,14 +593,6 @@ bfa_fcpim_update_ioredirect(struct bfa_s *bfa) * IO redirection is turned off when QoS is enabled and vice versa */ ioredirect = bfa_fcport_is_qos_enabled(bfa) ? BFA_FALSE : BFA_TRUE; - - /* - * Notify the bfad module of a possible state change in - * IO redirection capability, due to a QoS state change. bfad will - * check on the support for io redirection and update the - * fcpim's ioredirect state accordingly. - */ - bfa_cb_ioredirect_state_change((void *)(bfa->bfad), ioredirect); } void @@ -192,3 +601,3012 @@ bfa_fcpim_set_ioredirect(struct bfa_s *bfa, bfa_boolean_t state) struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); fcpim->ioredirect = state; } + + + +/** + * BFA ITNIM module state machine functions + */ + +/** + * Beginning/unallocated state - no events expected. + */ +static void +bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_CREATE: + bfa_sm_set_state(itnim, bfa_itnim_sm_created); + itnim->is_online = BFA_FALSE; + bfa_fcpim_additn(itnim); + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +/** + * Beginning state, only online event expected. + */ +static void +bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_ONLINE: + if (bfa_itnim_send_fwcreate(itnim)) + bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); + else + bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull); + break; + + case BFA_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); + bfa_fcpim_delitn(itnim); + break; + + case BFA_ITNIM_SM_HWFAIL: + bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +/** + * Waiting for itnim create response from firmware. + */ +static void +bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_FWRSP: + bfa_sm_set_state(itnim, bfa_itnim_sm_online); + itnim->is_online = BFA_TRUE; + bfa_itnim_iotov_online(itnim); + bfa_itnim_online_cb(itnim); + break; + + case BFA_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending); + break; + + case BFA_ITNIM_SM_OFFLINE: + if (bfa_itnim_send_fwdelete(itnim)) + bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete); + else + bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull); + break; + + case BFA_ITNIM_SM_HWFAIL: + bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +static void +bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_QRESUME: + bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); + bfa_itnim_send_fwcreate(itnim); + break; + + case BFA_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); + bfa_reqq_wcancel(&itnim->reqq_wait); + bfa_fcpim_delitn(itnim); + break; + + case BFA_ITNIM_SM_OFFLINE: + bfa_sm_set_state(itnim, bfa_itnim_sm_offline); + bfa_reqq_wcancel(&itnim->reqq_wait); + bfa_itnim_offline_cb(itnim); + break; + + case BFA_ITNIM_SM_HWFAIL: + bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); + bfa_reqq_wcancel(&itnim->reqq_wait); + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +/** + * Waiting for itnim create response from firmware, a delete is pending. + */ +static void +bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_FWRSP: + if (bfa_itnim_send_fwdelete(itnim)) + bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); + else + bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull); + break; + + case BFA_ITNIM_SM_HWFAIL: + bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); + bfa_fcpim_delitn(itnim); + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +/** + * Online state - normal parking state. + */ +static void +bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_OFFLINE: + bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline); + itnim->is_online = BFA_FALSE; + bfa_itnim_iotov_start(itnim); + bfa_itnim_cleanup(itnim); + break; + + case BFA_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete); + itnim->is_online = BFA_FALSE; + bfa_itnim_cleanup(itnim); + break; + + case BFA_ITNIM_SM_SLER: + bfa_sm_set_state(itnim, bfa_itnim_sm_sler); + itnim->is_online = BFA_FALSE; + bfa_itnim_iotov_start(itnim); + bfa_itnim_sler_cb(itnim); + break; + + case BFA_ITNIM_SM_HWFAIL: + bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); + itnim->is_online = BFA_FALSE; + bfa_itnim_iotov_start(itnim); + bfa_itnim_iocdisable_cleanup(itnim); + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +/** + * Second level error recovery need. + */ +static void +bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_OFFLINE: + bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline); + bfa_itnim_cleanup(itnim); + break; + + case BFA_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete); + bfa_itnim_cleanup(itnim); + bfa_itnim_iotov_delete(itnim); + break; + + case BFA_ITNIM_SM_HWFAIL: + bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); + bfa_itnim_iocdisable_cleanup(itnim); + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +/** + * Going offline. Waiting for active IO cleanup. + */ +static void +bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_CLEANUP: + if (bfa_itnim_send_fwdelete(itnim)) + bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete); + else + bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull); + break; + + case BFA_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete); + bfa_itnim_iotov_delete(itnim); + break; + + case BFA_ITNIM_SM_HWFAIL: + bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); + bfa_itnim_iocdisable_cleanup(itnim); + bfa_itnim_offline_cb(itnim); + break; + + case BFA_ITNIM_SM_SLER: + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +/** + * Deleting itnim. Waiting for active IO cleanup. + */ +static void +bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_CLEANUP: + if (bfa_itnim_send_fwdelete(itnim)) + bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); + else + bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull); + break; + + case BFA_ITNIM_SM_HWFAIL: + bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); + bfa_itnim_iocdisable_cleanup(itnim); + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +/** + * Rport offline. Fimrware itnim is being deleted - awaiting f/w response. + */ +static void +bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_FWRSP: + bfa_sm_set_state(itnim, bfa_itnim_sm_offline); + bfa_itnim_offline_cb(itnim); + break; + + case BFA_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); + break; + + case BFA_ITNIM_SM_HWFAIL: + bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); + bfa_itnim_offline_cb(itnim); + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +static void +bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_QRESUME: + bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete); + bfa_itnim_send_fwdelete(itnim); + break; + + case BFA_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull); + break; + + case BFA_ITNIM_SM_HWFAIL: + bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); + bfa_reqq_wcancel(&itnim->reqq_wait); + bfa_itnim_offline_cb(itnim); + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +/** + * Offline state. + */ +static void +bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); + bfa_itnim_iotov_delete(itnim); + bfa_fcpim_delitn(itnim); + break; + + case BFA_ITNIM_SM_ONLINE: + if (bfa_itnim_send_fwcreate(itnim)) + bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); + else + bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull); + break; + + case BFA_ITNIM_SM_HWFAIL: + bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +/** + * IOC h/w failed state. + */ +static void +bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); + bfa_itnim_iotov_delete(itnim); + bfa_fcpim_delitn(itnim); + break; + + case BFA_ITNIM_SM_OFFLINE: + bfa_itnim_offline_cb(itnim); + break; + + case BFA_ITNIM_SM_ONLINE: + if (bfa_itnim_send_fwcreate(itnim)) + bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); + else + bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull); + break; + + case BFA_ITNIM_SM_HWFAIL: + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +/** + * Itnim is deleted, waiting for firmware response to delete. + */ +static void +bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_FWRSP: + case BFA_ITNIM_SM_HWFAIL: + bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); + bfa_fcpim_delitn(itnim); + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +static void +bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim, + enum bfa_itnim_event event) +{ + bfa_trc(itnim->bfa, itnim->rport->rport_tag); + bfa_trc(itnim->bfa, event); + + switch (event) { + case BFA_ITNIM_SM_QRESUME: + bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); + bfa_itnim_send_fwdelete(itnim); + break; + + case BFA_ITNIM_SM_HWFAIL: + bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); + bfa_reqq_wcancel(&itnim->reqq_wait); + bfa_fcpim_delitn(itnim); + break; + + default: + bfa_sm_fault(itnim->bfa, event); + } +} + +/** + * Initiate cleanup of all IOs on an IOC failure. + */ +static void +bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim) +{ + struct bfa_tskim_s *tskim; + struct bfa_ioim_s *ioim; + struct list_head *qe, *qen; + + list_for_each_safe(qe, qen, &itnim->tsk_q) { + tskim = (struct bfa_tskim_s *) qe; + bfa_tskim_iocdisable(tskim); + } + + list_for_each_safe(qe, qen, &itnim->io_q) { + ioim = (struct bfa_ioim_s *) qe; + bfa_ioim_iocdisable(ioim); + } + + /** + * For IO request in pending queue, we pretend an early timeout. + */ + list_for_each_safe(qe, qen, &itnim->pending_q) { + ioim = (struct bfa_ioim_s *) qe; + bfa_ioim_tov(ioim); + } + + list_for_each_safe(qe, qen, &itnim->io_cleanup_q) { + ioim = (struct bfa_ioim_s *) qe; + bfa_ioim_iocdisable(ioim); + } +} + +/** + * IO cleanup completion + */ +static void +bfa_itnim_cleanp_comp(void *itnim_cbarg) +{ + struct bfa_itnim_s *itnim = itnim_cbarg; + + bfa_stats(itnim, cleanup_comps); + bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP); +} + +/** + * Initiate cleanup of all IOs. + */ +static void +bfa_itnim_cleanup(struct bfa_itnim_s *itnim) +{ + struct bfa_ioim_s *ioim; + struct bfa_tskim_s *tskim; + struct list_head *qe, *qen; + + bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim); + + list_for_each_safe(qe, qen, &itnim->io_q) { + ioim = (struct bfa_ioim_s *) qe; + + /** + * Move IO to a cleanup queue from active queue so that a later + * TM will not pickup this IO. + */ + list_del(&ioim->qe); + list_add_tail(&ioim->qe, &itnim->io_cleanup_q); + + bfa_wc_up(&itnim->wc); + bfa_ioim_cleanup(ioim); + } + + list_for_each_safe(qe, qen, &itnim->tsk_q) { + tskim = (struct bfa_tskim_s *) qe; + bfa_wc_up(&itnim->wc); + bfa_tskim_cleanup(tskim); + } + + bfa_wc_wait(&itnim->wc); +} + +static void +__bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_itnim_s *itnim = cbarg; + + if (complete) + bfa_cb_itnim_online(itnim->ditn); +} + +static void +__bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_itnim_s *itnim = cbarg; + + if (complete) + bfa_cb_itnim_offline(itnim->ditn); +} + +static void +__bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_itnim_s *itnim = cbarg; + + if (complete) + bfa_cb_itnim_sler(itnim->ditn); +} + +/** + * Call to resume any I/O requests waiting for room in request queue. + */ +static void +bfa_itnim_qresume(void *cbarg) +{ + struct bfa_itnim_s *itnim = cbarg; + + bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME); +} + + + + +/** + * bfa_itnim_public + */ + +void +bfa_itnim_iodone(struct bfa_itnim_s *itnim) +{ + bfa_wc_down(&itnim->wc); +} + +void +bfa_itnim_tskdone(struct bfa_itnim_s *itnim) +{ + bfa_wc_down(&itnim->wc); +} + +void +bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, + u32 *dm_len) +{ + /** + * ITN memory + */ + *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s); +} + +void +bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) +{ + struct bfa_s *bfa = fcpim->bfa; + struct bfa_itnim_s *itnim; + int i, j; + + INIT_LIST_HEAD(&fcpim->itnim_q); + + itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo); + fcpim->itnim_arr = itnim; + + for (i = 0; i < fcpim->num_itnims; i++, itnim++) { + bfa_os_memset(itnim, 0, sizeof(struct bfa_itnim_s)); + itnim->bfa = bfa; + itnim->fcpim = fcpim; + itnim->reqq = BFA_REQQ_QOS_LO; + itnim->rport = BFA_RPORT_FROM_TAG(bfa, i); + itnim->iotov_active = BFA_FALSE; + bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim); + + INIT_LIST_HEAD(&itnim->io_q); + INIT_LIST_HEAD(&itnim->io_cleanup_q); + INIT_LIST_HEAD(&itnim->pending_q); + INIT_LIST_HEAD(&itnim->tsk_q); + INIT_LIST_HEAD(&itnim->delay_comp_q); + for (j = 0; j < BFA_IOBUCKET_MAX; j++) + itnim->ioprofile.io_latency.min[j] = ~0; + bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); + } + + bfa_meminfo_kva(minfo) = (u8 *) itnim; +} + +void +bfa_itnim_iocdisable(struct bfa_itnim_s *itnim) +{ + bfa_stats(itnim, ioc_disabled); + bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL); +} + +static bfa_boolean_t +bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim) +{ + struct bfi_itnim_create_req_s *m; + + itnim->msg_no++; + + /** + * check for room in queue to send request now + */ + m = bfa_reqq_next(itnim->bfa, itnim->reqq); + if (!m) { + bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait); + return BFA_FALSE; + } + + bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_CREATE_REQ, + bfa_lpuid(itnim->bfa)); + m->fw_handle = itnim->rport->fw_handle; + m->class = FC_CLASS_3; + m->seq_rec = itnim->seq_rec; + m->msg_no = itnim->msg_no; + bfa_stats(itnim, fw_create); + + /** + * queue I/O message to firmware + */ + bfa_reqq_produce(itnim->bfa, itnim->reqq); + return BFA_TRUE; +} + +static bfa_boolean_t +bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim) +{ + struct bfi_itnim_delete_req_s *m; + + /** + * check for room in queue to send request now + */ + m = bfa_reqq_next(itnim->bfa, itnim->reqq); + if (!m) { + bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait); + return BFA_FALSE; + } + + bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_DELETE_REQ, + bfa_lpuid(itnim->bfa)); + m->fw_handle = itnim->rport->fw_handle; + bfa_stats(itnim, fw_delete); + + /** + * queue I/O message to firmware + */ + bfa_reqq_produce(itnim->bfa, itnim->reqq); + return BFA_TRUE; +} + +/** + * Cleanup all pending failed inflight requests. + */ +static void +bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov) +{ + struct bfa_ioim_s *ioim; + struct list_head *qe, *qen; + + list_for_each_safe(qe, qen, &itnim->delay_comp_q) { + ioim = (struct bfa_ioim_s *)qe; + bfa_ioim_delayed_comp(ioim, iotov); + } +} + +/** + * Start all pending IO requests. + */ +static void +bfa_itnim_iotov_online(struct bfa_itnim_s *itnim) +{ + struct bfa_ioim_s *ioim; + + bfa_itnim_iotov_stop(itnim); + + /** + * Abort all inflight IO requests in the queue + */ + bfa_itnim_delayed_comp(itnim, BFA_FALSE); + + /** + * Start all pending IO requests. + */ + while (!list_empty(&itnim->pending_q)) { + bfa_q_deq(&itnim->pending_q, &ioim); + list_add_tail(&ioim->qe, &itnim->io_q); + bfa_ioim_start(ioim); + } +} + +/** + * Fail all pending IO requests + */ +static void +bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim) +{ + struct bfa_ioim_s *ioim; + + /** + * Fail all inflight IO requests in the queue + */ + bfa_itnim_delayed_comp(itnim, BFA_TRUE); + + /** + * Fail any pending IO requests. + */ + while (!list_empty(&itnim->pending_q)) { + bfa_q_deq(&itnim->pending_q, &ioim); + list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); + bfa_ioim_tov(ioim); + } +} + +/** + * IO TOV timer callback. Fail any pending IO requests. + */ +static void +bfa_itnim_iotov(void *itnim_arg) +{ + struct bfa_itnim_s *itnim = itnim_arg; + + itnim->iotov_active = BFA_FALSE; + + bfa_cb_itnim_tov_begin(itnim->ditn); + bfa_itnim_iotov_cleanup(itnim); + bfa_cb_itnim_tov(itnim->ditn); +} + +/** + * Start IO TOV timer for failing back pending IO requests in offline state. + */ +static void +bfa_itnim_iotov_start(struct bfa_itnim_s *itnim) +{ + if (itnim->fcpim->path_tov > 0) { + + itnim->iotov_active = BFA_TRUE; + bfa_assert(bfa_itnim_hold_io(itnim)); + bfa_timer_start(itnim->bfa, &itnim->timer, + bfa_itnim_iotov, itnim, itnim->fcpim->path_tov); + } +} + +/** + * Stop IO TOV timer. + */ +static void +bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim) +{ + if (itnim->iotov_active) { + itnim->iotov_active = BFA_FALSE; + bfa_timer_stop(&itnim->timer); + } +} + +/** + * Stop IO TOV timer. + */ +static void +bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim) +{ + bfa_boolean_t pathtov_active = BFA_FALSE; + + if (itnim->iotov_active) + pathtov_active = BFA_TRUE; + + bfa_itnim_iotov_stop(itnim); + if (pathtov_active) + bfa_cb_itnim_tov_begin(itnim->ditn); + bfa_itnim_iotov_cleanup(itnim); + if (pathtov_active) + bfa_cb_itnim_tov(itnim->ditn); +} + +static void +bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim) +{ + struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa); + fcpim->del_itn_stats.del_itn_iocomp_aborted += + itnim->stats.iocomp_aborted; + fcpim->del_itn_stats.del_itn_iocomp_timedout += + itnim->stats.iocomp_timedout; + fcpim->del_itn_stats.del_itn_iocom_sqer_needed += + itnim->stats.iocom_sqer_needed; + fcpim->del_itn_stats.del_itn_iocom_res_free += + itnim->stats.iocom_res_free; + fcpim->del_itn_stats.del_itn_iocom_hostabrts += + itnim->stats.iocom_hostabrts; + fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios; + fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns; + fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns; +} + + + +/** + * bfa_itnim_public + */ + +/** + * Itnim interrupt processing. + */ +void +bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) +{ + struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); + union bfi_itnim_i2h_msg_u msg; + struct bfa_itnim_s *itnim; + + bfa_trc(bfa, m->mhdr.msg_id); + + msg.msg = m; + + switch (m->mhdr.msg_id) { + case BFI_ITNIM_I2H_CREATE_RSP: + itnim = BFA_ITNIM_FROM_TAG(fcpim, + msg.create_rsp->bfa_handle); + bfa_assert(msg.create_rsp->status == BFA_STATUS_OK); + bfa_stats(itnim, create_comps); + bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP); + break; + + case BFI_ITNIM_I2H_DELETE_RSP: + itnim = BFA_ITNIM_FROM_TAG(fcpim, + msg.delete_rsp->bfa_handle); + bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK); + bfa_stats(itnim, delete_comps); + bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP); + break; + + case BFI_ITNIM_I2H_SLER_EVENT: + itnim = BFA_ITNIM_FROM_TAG(fcpim, + msg.sler_event->bfa_handle); + bfa_stats(itnim, sler_events); + bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER); + break; + + default: + bfa_trc(bfa, m->mhdr.msg_id); + bfa_assert(0); + } +} + + + +/** + * bfa_itnim_api + */ + +struct bfa_itnim_s * +bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn) +{ + struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); + struct bfa_itnim_s *itnim; + + itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag); + bfa_assert(itnim->rport == rport); + + itnim->ditn = ditn; + + bfa_stats(itnim, creates); + bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE); + + return itnim; +} + +void +bfa_itnim_delete(struct bfa_itnim_s *itnim) +{ + bfa_stats(itnim, deletes); + bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE); +} + +void +bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec) +{ + itnim->seq_rec = seq_rec; + bfa_stats(itnim, onlines); + bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE); +} + +void +bfa_itnim_offline(struct bfa_itnim_s *itnim) +{ + bfa_stats(itnim, offlines); + bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE); +} + +/** + * Return true if itnim is considered offline for holding off IO request. + * IO is not held if itnim is being deleted. + */ +bfa_boolean_t +bfa_itnim_hold_io(struct bfa_itnim_s *itnim) +{ + return itnim->fcpim->path_tov && itnim->iotov_active && + (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) || + bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) || + bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) || + bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) || + bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) || + bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable)); +} + +bfa_status_t +bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim, + struct bfa_itnim_ioprofile_s *ioprofile) +{ + struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa); + if (!fcpim->io_profile) + return BFA_STATUS_IOPROFILE_OFF; + + itnim->ioprofile.index = BFA_IOBUCKET_MAX; + itnim->ioprofile.io_profile_start_time = + bfa_io_profile_start_time(itnim->bfa); + itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul; + itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div; + *ioprofile = itnim->ioprofile; + + return BFA_STATUS_OK; +} + +void +bfa_itnim_get_stats(struct bfa_itnim_s *itnim, + struct bfa_itnim_iostats_s *stats) +{ + *stats = itnim->stats; +} + +void +bfa_itnim_clear_stats(struct bfa_itnim_s *itnim) +{ + int j; + bfa_os_memset(&itnim->stats, 0, sizeof(itnim->stats)); + bfa_os_memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile)); + for (j = 0; j < BFA_IOBUCKET_MAX; j++) + itnim->ioprofile.io_latency.min[j] = ~0; +} + +/** + * BFA IO module state machine functions + */ + +/** + * IO is not started (unallocated). + */ +static void +bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) +{ + bfa_trc_fp(ioim->bfa, ioim->iotag); + bfa_trc_fp(ioim->bfa, event); + + switch (event) { + case BFA_IOIM_SM_START: + if (!bfa_itnim_is_online(ioim->itnim)) { + if (!bfa_itnim_hold_io(ioim->itnim)) { + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + list_del(&ioim->qe); + list_add_tail(&ioim->qe, + &ioim->fcpim->ioim_comp_q); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, + __bfa_cb_ioim_pathtov, ioim); + } else { + list_del(&ioim->qe); + list_add_tail(&ioim->qe, + &ioim->itnim->pending_q); + } + break; + } + + if (ioim->nsges > BFI_SGE_INLINE) { + if (!bfa_ioim_sge_setup(ioim)) { + bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc); + return; + } + } + + if (!bfa_ioim_send_ioreq(ioim)) { + bfa_sm_set_state(ioim, bfa_ioim_sm_qfull); + break; + } + + bfa_sm_set_state(ioim, bfa_ioim_sm_active); + break; + + case BFA_IOIM_SM_IOTOV: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, + __bfa_cb_ioim_pathtov, ioim); + break; + + case BFA_IOIM_SM_ABORT: + /** + * IO in pending queue can get abort requests. Complete abort + * requests immediately. + */ + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim)); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, + __bfa_cb_ioim_abort, ioim); + break; + + default: + bfa_sm_fault(ioim->bfa, event); + } +} + +/** + * IO is waiting for SG pages. + */ +static void +bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) +{ + bfa_trc(ioim->bfa, ioim->iotag); + bfa_trc(ioim->bfa, event); + + switch (event) { + case BFA_IOIM_SM_SGALLOCED: + if (!bfa_ioim_send_ioreq(ioim)) { + bfa_sm_set_state(ioim, bfa_ioim_sm_qfull); + break; + } + bfa_sm_set_state(ioim, bfa_ioim_sm_active); + break; + + case BFA_IOIM_SM_CLEANUP: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, + ioim); + bfa_ioim_notify_cleanup(ioim); + break; + + case BFA_IOIM_SM_ABORT: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, + ioim); + break; + + case BFA_IOIM_SM_HWFAIL: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, + ioim); + break; + + default: + bfa_sm_fault(ioim->bfa, event); + } +} + +/** + * IO is active. + */ +static void +bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) +{ + bfa_trc_fp(ioim->bfa, ioim->iotag); + bfa_trc_fp(ioim->bfa, event); + + switch (event) { + case BFA_IOIM_SM_COMP_GOOD: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, + __bfa_cb_ioim_good_comp, ioim); + break; + + case BFA_IOIM_SM_COMP: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp, + ioim); + break; + + case BFA_IOIM_SM_DONE: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp, + ioim); + break; + + case BFA_IOIM_SM_ABORT: + ioim->iosp->abort_explicit = BFA_TRUE; + ioim->io_cbfn = __bfa_cb_ioim_abort; + + if (bfa_ioim_send_abort(ioim)) + bfa_sm_set_state(ioim, bfa_ioim_sm_abort); + else { + bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull); + bfa_stats(ioim->itnim, qwait); + bfa_reqq_wait(ioim->bfa, ioim->reqq, + &ioim->iosp->reqq_wait); + } + break; + + case BFA_IOIM_SM_CLEANUP: + ioim->iosp->abort_explicit = BFA_FALSE; + ioim->io_cbfn = __bfa_cb_ioim_failed; + + if (bfa_ioim_send_abort(ioim)) + bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); + else { + bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); + bfa_stats(ioim->itnim, qwait); + bfa_reqq_wait(ioim->bfa, ioim->reqq, + &ioim->iosp->reqq_wait); + } + break; + + case BFA_IOIM_SM_HWFAIL: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, + ioim); + break; + + case BFA_IOIM_SM_SQRETRY: + if (bfa_ioim_get_iotag(ioim) != BFA_TRUE) { + /* max retry completed free IO */ + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, + __bfa_cb_ioim_failed, ioim); + break; + } + /* waiting for IO tag resource free */ + bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry); + break; + + default: + bfa_sm_fault(ioim->bfa, event); + } +} + +/** +* IO is retried with new tag. +*/ +static void +bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) +{ + bfa_trc_fp(ioim->bfa, ioim->iotag); + bfa_trc_fp(ioim->bfa, event); + + switch (event) { + case BFA_IOIM_SM_FREE: + /* abts and rrq done. Now retry the IO with new tag */ + if (!bfa_ioim_send_ioreq(ioim)) { + bfa_sm_set_state(ioim, bfa_ioim_sm_qfull); + break; + } + bfa_sm_set_state(ioim, bfa_ioim_sm_active); + break; + + case BFA_IOIM_SM_CLEANUP: + ioim->iosp->abort_explicit = BFA_FALSE; + ioim->io_cbfn = __bfa_cb_ioim_failed; + + if (bfa_ioim_send_abort(ioim)) + bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); + else { + bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); + bfa_stats(ioim->itnim, qwait); + bfa_reqq_wait(ioim->bfa, ioim->reqq, + &ioim->iosp->reqq_wait); + } + break; + + case BFA_IOIM_SM_HWFAIL: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, + __bfa_cb_ioim_failed, ioim); + break; + + case BFA_IOIM_SM_ABORT: + /** in this state IO abort is done. + * Waiting for IO tag resource free. + */ + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, + ioim); + break; + + default: + bfa_sm_fault(ioim->bfa, event); + } +} + +/** + * IO is being aborted, waiting for completion from firmware. + */ +static void +bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) +{ + bfa_trc(ioim->bfa, ioim->iotag); + bfa_trc(ioim->bfa, event); + + switch (event) { + case BFA_IOIM_SM_COMP_GOOD: + case BFA_IOIM_SM_COMP: + case BFA_IOIM_SM_DONE: + case BFA_IOIM_SM_FREE: + break; + + case BFA_IOIM_SM_ABORT_DONE: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, + ioim); + break; + + case BFA_IOIM_SM_ABORT_COMP: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, + ioim); + break; + + case BFA_IOIM_SM_COMP_UTAG: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, + ioim); + break; + + case BFA_IOIM_SM_CLEANUP: + bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE); + ioim->iosp->abort_explicit = BFA_FALSE; + + if (bfa_ioim_send_abort(ioim)) + bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); + else { + bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); + bfa_stats(ioim->itnim, qwait); + bfa_reqq_wait(ioim->bfa, ioim->reqq, + &ioim->iosp->reqq_wait); + } + break; + + case BFA_IOIM_SM_HWFAIL: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, + ioim); + break; + + default: + bfa_sm_fault(ioim->bfa, event); + } +} + +/** + * IO is being cleaned up (implicit abort), waiting for completion from + * firmware. + */ +static void +bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) +{ + bfa_trc(ioim->bfa, ioim->iotag); + bfa_trc(ioim->bfa, event); + + switch (event) { + case BFA_IOIM_SM_COMP_GOOD: + case BFA_IOIM_SM_COMP: + case BFA_IOIM_SM_DONE: + case BFA_IOIM_SM_FREE: + break; + + case BFA_IOIM_SM_ABORT: + /** + * IO is already being aborted implicitly + */ + ioim->io_cbfn = __bfa_cb_ioim_abort; + break; + + case BFA_IOIM_SM_ABORT_DONE: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); + bfa_ioim_notify_cleanup(ioim); + break; + + case BFA_IOIM_SM_ABORT_COMP: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); + bfa_ioim_notify_cleanup(ioim); + break; + + case BFA_IOIM_SM_COMP_UTAG: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); + bfa_ioim_notify_cleanup(ioim); + break; + + case BFA_IOIM_SM_HWFAIL: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, + ioim); + break; + + case BFA_IOIM_SM_CLEANUP: + /** + * IO can be in cleanup state already due to TM command. + * 2nd cleanup request comes from ITN offline event. + */ + break; + + default: + bfa_sm_fault(ioim->bfa, event); + } +} + +/** + * IO is waiting for room in request CQ + */ +static void +bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) +{ + bfa_trc(ioim->bfa, ioim->iotag); + bfa_trc(ioim->bfa, event); + + switch (event) { + case BFA_IOIM_SM_QRESUME: + bfa_sm_set_state(ioim, bfa_ioim_sm_active); + bfa_ioim_send_ioreq(ioim); + break; + + case BFA_IOIM_SM_ABORT: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_reqq_wcancel(&ioim->iosp->reqq_wait); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, + ioim); + break; + + case BFA_IOIM_SM_CLEANUP: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_reqq_wcancel(&ioim->iosp->reqq_wait); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, + ioim); + bfa_ioim_notify_cleanup(ioim); + break; + + case BFA_IOIM_SM_HWFAIL: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_reqq_wcancel(&ioim->iosp->reqq_wait); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, + ioim); + break; + + default: + bfa_sm_fault(ioim->bfa, event); + } +} + +/** + * Active IO is being aborted, waiting for room in request CQ. + */ +static void +bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) +{ + bfa_trc(ioim->bfa, ioim->iotag); + bfa_trc(ioim->bfa, event); + + switch (event) { + case BFA_IOIM_SM_QRESUME: + bfa_sm_set_state(ioim, bfa_ioim_sm_abort); + bfa_ioim_send_abort(ioim); + break; + + case BFA_IOIM_SM_CLEANUP: + bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE); + ioim->iosp->abort_explicit = BFA_FALSE; + bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); + break; + + case BFA_IOIM_SM_COMP_GOOD: + case BFA_IOIM_SM_COMP: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_reqq_wcancel(&ioim->iosp->reqq_wait); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, + ioim); + break; + + case BFA_IOIM_SM_DONE: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); + bfa_reqq_wcancel(&ioim->iosp->reqq_wait); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, + ioim); + break; + + case BFA_IOIM_SM_HWFAIL: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_reqq_wcancel(&ioim->iosp->reqq_wait); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, + ioim); + break; + + default: + bfa_sm_fault(ioim->bfa, event); + } +} + +/** + * Active IO is being cleaned up, waiting for room in request CQ. + */ +static void +bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) +{ + bfa_trc(ioim->bfa, ioim->iotag); + bfa_trc(ioim->bfa, event); + + switch (event) { + case BFA_IOIM_SM_QRESUME: + bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); + bfa_ioim_send_abort(ioim); + break; + + case BFA_IOIM_SM_ABORT: + /** + * IO is alraedy being cleaned up implicitly + */ + ioim->io_cbfn = __bfa_cb_ioim_abort; + break; + + case BFA_IOIM_SM_COMP_GOOD: + case BFA_IOIM_SM_COMP: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_reqq_wcancel(&ioim->iosp->reqq_wait); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); + bfa_ioim_notify_cleanup(ioim); + break; + + case BFA_IOIM_SM_DONE: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); + bfa_reqq_wcancel(&ioim->iosp->reqq_wait); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); + bfa_ioim_notify_cleanup(ioim); + break; + + case BFA_IOIM_SM_HWFAIL: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + bfa_reqq_wcancel(&ioim->iosp->reqq_wait); + bfa_ioim_move_to_comp_q(ioim); + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, + ioim); + break; + + default: + bfa_sm_fault(ioim->bfa, event); + } +} + +/** + * IO bfa callback is pending. + */ +static void +bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) +{ + bfa_trc_fp(ioim->bfa, ioim->iotag); + bfa_trc_fp(ioim->bfa, event); + + switch (event) { + case BFA_IOIM_SM_HCB: + bfa_sm_set_state(ioim, bfa_ioim_sm_uninit); + bfa_ioim_free(ioim); + break; + + case BFA_IOIM_SM_CLEANUP: + bfa_ioim_notify_cleanup(ioim); + break; + + case BFA_IOIM_SM_HWFAIL: + break; + + default: + bfa_sm_fault(ioim->bfa, event); + } +} + +/** + * IO bfa callback is pending. IO resource cannot be freed. + */ +static void +bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) +{ + bfa_trc(ioim->bfa, ioim->iotag); + bfa_trc(ioim->bfa, event); + + switch (event) { + case BFA_IOIM_SM_HCB: + bfa_sm_set_state(ioim, bfa_ioim_sm_resfree); + list_del(&ioim->qe); + list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q); + break; + + case BFA_IOIM_SM_FREE: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + break; + + case BFA_IOIM_SM_CLEANUP: + bfa_ioim_notify_cleanup(ioim); + break; + + case BFA_IOIM_SM_HWFAIL: + bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); + break; + + default: + bfa_sm_fault(ioim->bfa, event); + } +} + +/** + * IO is completed, waiting resource free from firmware. + */ +static void +bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) +{ + bfa_trc(ioim->bfa, ioim->iotag); + bfa_trc(ioim->bfa, event); + + switch (event) { + case BFA_IOIM_SM_FREE: + bfa_sm_set_state(ioim, bfa_ioim_sm_uninit); + bfa_ioim_free(ioim); + break; + + case BFA_IOIM_SM_CLEANUP: + bfa_ioim_notify_cleanup(ioim); + break; + + case BFA_IOIM_SM_HWFAIL: + break; + + default: + bfa_sm_fault(ioim->bfa, event); + } +} + + + +/** + * hal_ioim_private + */ + +static void +__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_ioim_s *ioim = cbarg; + + if (!complete) { + bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); + return; + } + + bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio); +} + +static void +__bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_ioim_s *ioim = cbarg; + struct bfi_ioim_rsp_s *m; + u8 *snsinfo = NULL; + u8 sns_len = 0; + s32 residue = 0; + + if (!complete) { + bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); + return; + } + + m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg; + if (m->io_status == BFI_IOIM_STS_OK) { + /** + * setup sense information, if present + */ + if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) && + m->sns_len) { + sns_len = m->sns_len; + snsinfo = ioim->iosp->snsinfo; + } + + /** + * setup residue value correctly for normal completions + */ + if (m->resid_flags == FCP_RESID_UNDER) { + residue = bfa_os_ntohl(m->residue); + bfa_stats(ioim->itnim, iocomp_underrun); + } + if (m->resid_flags == FCP_RESID_OVER) { + residue = bfa_os_ntohl(m->residue); + residue = -residue; + bfa_stats(ioim->itnim, iocomp_overrun); + } + } + + bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status, + m->scsi_status, sns_len, snsinfo, residue); +} + +static void +__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_ioim_s *ioim = cbarg; + + if (!complete) { + bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); + return; + } + + bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED, + 0, 0, NULL, 0); +} + +static void +__bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_ioim_s *ioim = cbarg; + + bfa_stats(ioim->itnim, path_tov_expired); + if (!complete) { + bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); + return; + } + + bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV, + 0, 0, NULL, 0); +} + +static void +__bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_ioim_s *ioim = cbarg; + + if (!complete) { + bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); + return; + } + + bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio); +} + +static void +bfa_ioim_sgpg_alloced(void *cbarg) +{ + struct bfa_ioim_s *ioim = cbarg; + + ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges); + list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q); + bfa_ioim_sgpg_setup(ioim); + bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED); +} + +/** + * Send I/O request to firmware. + */ +static bfa_boolean_t +bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim) +{ + struct bfa_itnim_s *itnim = ioim->itnim; + struct bfi_ioim_req_s *m; + static struct fcp_cmnd_s cmnd_z0 = { 0 }; + struct bfi_sge_s *sge; + u32 pgdlen = 0; + u32 fcp_dl; + u64 addr; + struct scatterlist *sg; + struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio; + + /** + * check for room in queue to send request now + */ + m = bfa_reqq_next(ioim->bfa, ioim->reqq); + if (!m) { + bfa_stats(ioim->itnim, qwait); + bfa_reqq_wait(ioim->bfa, ioim->reqq, + &ioim->iosp->reqq_wait); + return BFA_FALSE; + } + + /** + * build i/o request message next + */ + m->io_tag = bfa_os_htons(ioim->iotag); + m->rport_hdl = ioim->itnim->rport->fw_handle; + m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio); + + /** + * build inline IO SG element here + */ + sge = &m->sges[0]; + if (ioim->nsges) { + sg = (struct scatterlist *)scsi_sglist(cmnd); + addr = bfa_os_sgaddr(sg_dma_address(sg)); + sge->sga = *(union bfi_addr_u *) &addr; + pgdlen = sg_dma_len(sg); + sge->sg_len = pgdlen; + sge->flags = (ioim->nsges > BFI_SGE_INLINE) ? + BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST; + bfa_sge_to_be(sge); + sge++; + } + + if (ioim->nsges > BFI_SGE_INLINE) { + sge->sga = ioim->sgpg->sgpg_pa; + } else { + sge->sga.a32.addr_lo = 0; + sge->sga.a32.addr_hi = 0; + } + sge->sg_len = pgdlen; + sge->flags = BFI_SGE_PGDLEN; + bfa_sge_to_be(sge); + + /** + * set up I/O command parameters + */ + bfa_os_assign(m->cmnd, cmnd_z0); + m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio); + m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio); + bfa_os_assign(m->cmnd.cdb, + *(scsi_cdb_t *)bfa_cb_ioim_get_cdb(ioim->dio)); + fcp_dl = bfa_cb_ioim_get_size(ioim->dio); + m->cmnd.fcp_dl = bfa_os_htonl(fcp_dl); + + /** + * set up I/O message header + */ + switch (m->cmnd.iodir) { + case FCP_IODIR_READ: + bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa)); + bfa_stats(itnim, input_reqs); + ioim->itnim->stats.rd_throughput += fcp_dl; + break; + case FCP_IODIR_WRITE: + bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa)); + bfa_stats(itnim, output_reqs); + ioim->itnim->stats.wr_throughput += fcp_dl; + break; + case FCP_IODIR_RW: + bfa_stats(itnim, input_reqs); + bfa_stats(itnim, output_reqs); + default: + bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa)); + } + if (itnim->seq_rec || + (bfa_cb_ioim_get_size(ioim->dio) & (sizeof(u32) - 1))) + bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa)); + +#ifdef IOIM_ADVANCED + m->cmnd.crn = bfa_cb_ioim_get_crn(ioim->dio); + m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio); + m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio); + + /** + * Handle large CDB (>16 bytes). + */ + m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) - + FCP_CMND_CDB_LEN) / sizeof(u32); + if (m->cmnd.addl_cdb_len) { + bfa_os_memcpy(&m->cmnd.cdb + 1, (scsi_cdb_t *) + bfa_cb_ioim_get_cdb(ioim->dio) + 1, + m->cmnd.addl_cdb_len * sizeof(u32)); + fcp_cmnd_fcpdl(&m->cmnd) = + bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio)); + } +#endif + + /** + * queue I/O message to firmware + */ + bfa_reqq_produce(ioim->bfa, ioim->reqq); + return BFA_TRUE; +} + +/** + * Setup any additional SG pages needed.Inline SG element is setup + * at queuing time. + */ +static bfa_boolean_t +bfa_ioim_sge_setup(struct bfa_ioim_s *ioim) +{ + u16 nsgpgs; + + bfa_assert(ioim->nsges > BFI_SGE_INLINE); + + /** + * allocate SG pages needed + */ + nsgpgs = BFA_SGPG_NPAGE(ioim->nsges); + if (!nsgpgs) + return BFA_TRUE; + + if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs) + != BFA_STATUS_OK) { + bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs); + return BFA_FALSE; + } + + ioim->nsgpgs = nsgpgs; + bfa_ioim_sgpg_setup(ioim); + + return BFA_TRUE; +} + +static void +bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim) +{ + int sgeid, nsges, i; + struct bfi_sge_s *sge; + struct bfa_sgpg_s *sgpg; + u32 pgcumsz; + u64 addr; + struct scatterlist *sg; + struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio; + + sgeid = BFI_SGE_INLINE; + ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q); + + sg = scsi_sglist(cmnd); + sg = sg_next(sg); + + do { + sge = sgpg->sgpg->sges; + nsges = ioim->nsges - sgeid; + if (nsges > BFI_SGPG_DATA_SGES) + nsges = BFI_SGPG_DATA_SGES; + + pgcumsz = 0; + for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) { + addr = bfa_os_sgaddr(sg_dma_address(sg)); + sge->sga = *(union bfi_addr_u *) &addr; + sge->sg_len = sg_dma_len(sg); + pgcumsz += sge->sg_len; + + /** + * set flags + */ + if (i < (nsges - 1)) + sge->flags = BFI_SGE_DATA; + else if (sgeid < (ioim->nsges - 1)) + sge->flags = BFI_SGE_DATA_CPL; + else + sge->flags = BFI_SGE_DATA_LAST; + + bfa_sge_to_le(sge); + } + + sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg); + + /** + * set the link element of each page + */ + if (sgeid == ioim->nsges) { + sge->flags = BFI_SGE_PGDLEN; + sge->sga.a32.addr_lo = 0; + sge->sga.a32.addr_hi = 0; + } else { + sge->flags = BFI_SGE_LINK; + sge->sga = sgpg->sgpg_pa; + } + sge->sg_len = pgcumsz; + + bfa_sge_to_le(sge); + } while (sgeid < ioim->nsges); +} + +/** + * Send I/O abort request to firmware. + */ +static bfa_boolean_t +bfa_ioim_send_abort(struct bfa_ioim_s *ioim) +{ + struct bfi_ioim_abort_req_s *m; + enum bfi_ioim_h2i msgop; + + /** + * check for room in queue to send request now + */ + m = bfa_reqq_next(ioim->bfa, ioim->reqq); + if (!m) + return BFA_FALSE; + + /** + * build i/o request message next + */ + if (ioim->iosp->abort_explicit) + msgop = BFI_IOIM_H2I_IOABORT_REQ; + else + msgop = BFI_IOIM_H2I_IOCLEANUP_REQ; + + bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa)); + m->io_tag = bfa_os_htons(ioim->iotag); + m->abort_tag = ++ioim->abort_tag; + + /** + * queue I/O message to firmware + */ + bfa_reqq_produce(ioim->bfa, ioim->reqq); + return BFA_TRUE; +} + +/** + * Call to resume any I/O requests waiting for room in request queue. + */ +static void +bfa_ioim_qresume(void *cbarg) +{ + struct bfa_ioim_s *ioim = cbarg; + + bfa_stats(ioim->itnim, qresumes); + bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME); +} + + +static void +bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim) +{ + /** + * Move IO from itnim queue to fcpim global queue since itnim will be + * freed. + */ + list_del(&ioim->qe); + list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); + + if (!ioim->iosp->tskim) { + if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) { + bfa_cb_dequeue(&ioim->hcb_qe); + list_del(&ioim->qe); + list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q); + } + bfa_itnim_iodone(ioim->itnim); + } else + bfa_tskim_iodone(ioim->iosp->tskim); +} + +static bfa_boolean_t +bfa_ioim_is_abortable(struct bfa_ioim_s *ioim) +{ + if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) && + (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim))) || + (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort)) || + (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull)) || + (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb)) || + (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free)) || + (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree))) + return BFA_FALSE; + + return BFA_TRUE; +} + +/** + * or after the link comes back. + */ +void +bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov) +{ + /** + * If path tov timer expired, failback with PATHTOV status - these + * IO requests are not normally retried by IO stack. + * + * Otherwise device cameback online and fail it with normal failed + * status so that IO stack retries these failed IO requests. + */ + if (iotov) + ioim->io_cbfn = __bfa_cb_ioim_pathtov; + else { + ioim->io_cbfn = __bfa_cb_ioim_failed; + bfa_stats(ioim->itnim, iocom_nexus_abort); + } + bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); + + /** + * Move IO to fcpim global queue since itnim will be + * freed. + */ + list_del(&ioim->qe); + list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); +} + + + +/** + * hal_ioim_friend + */ + +/** + * Memory allocation and initialization. + */ +void +bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) +{ + struct bfa_ioim_s *ioim; + struct bfa_ioim_sp_s *iosp; + u16 i; + u8 *snsinfo; + u32 snsbufsz; + + /** + * claim memory first + */ + ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo); + fcpim->ioim_arr = ioim; + bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs); + + iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo); + fcpim->ioim_sp_arr = iosp; + bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs); + + /** + * Claim DMA memory for per IO sense data. + */ + snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN; + fcpim->snsbase.pa = bfa_meminfo_dma_phys(minfo); + bfa_meminfo_dma_phys(minfo) += snsbufsz; + + fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo); + bfa_meminfo_dma_virt(minfo) += snsbufsz; + snsinfo = fcpim->snsbase.kva; + bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa); + + /** + * Initialize ioim free queues + */ + INIT_LIST_HEAD(&fcpim->ioim_free_q); + INIT_LIST_HEAD(&fcpim->ioim_resfree_q); + INIT_LIST_HEAD(&fcpim->ioim_comp_q); + + for (i = 0; i < fcpim->num_ioim_reqs; + i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) { + /* + * initialize IOIM + */ + bfa_os_memset(ioim, 0, sizeof(struct bfa_ioim_s)); + ioim->iotag = i; + ioim->bfa = fcpim->bfa; + ioim->fcpim = fcpim; + ioim->iosp = iosp; + iosp->snsinfo = snsinfo; + INIT_LIST_HEAD(&ioim->sgpg_q); + bfa_reqq_winit(&ioim->iosp->reqq_wait, + bfa_ioim_qresume, ioim); + bfa_sgpg_winit(&ioim->iosp->sgpg_wqe, + bfa_ioim_sgpg_alloced, ioim); + bfa_sm_set_state(ioim, bfa_ioim_sm_uninit); + + list_add_tail(&ioim->qe, &fcpim->ioim_free_q); + } +} + +/** + * Driver detach time call. + */ +void +bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim) +{ +} + +void +bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) +{ + struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); + struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m; + struct bfa_ioim_s *ioim; + u16 iotag; + enum bfa_ioim_event evt = BFA_IOIM_SM_COMP; + + iotag = bfa_os_ntohs(rsp->io_tag); + + ioim = BFA_IOIM_FROM_TAG(fcpim, iotag); + bfa_assert(ioim->iotag == iotag); + + bfa_trc(ioim->bfa, ioim->iotag); + bfa_trc(ioim->bfa, rsp->io_status); + bfa_trc(ioim->bfa, rsp->reuse_io_tag); + + if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active)) + bfa_os_assign(ioim->iosp->comp_rspmsg, *m); + + switch (rsp->io_status) { + case BFI_IOIM_STS_OK: + bfa_stats(ioim->itnim, iocomp_ok); + if (rsp->reuse_io_tag == 0) + evt = BFA_IOIM_SM_DONE; + else + evt = BFA_IOIM_SM_COMP; + break; + + case BFI_IOIM_STS_TIMEDOUT: + bfa_stats(ioim->itnim, iocomp_timedout); + case BFI_IOIM_STS_ABORTED: + rsp->io_status = BFI_IOIM_STS_ABORTED; + bfa_stats(ioim->itnim, iocomp_aborted); + if (rsp->reuse_io_tag == 0) + evt = BFA_IOIM_SM_DONE; + else + evt = BFA_IOIM_SM_COMP; + break; + + case BFI_IOIM_STS_PROTO_ERR: + bfa_stats(ioim->itnim, iocom_proto_err); + bfa_assert(rsp->reuse_io_tag); + evt = BFA_IOIM_SM_COMP; + break; + + case BFI_IOIM_STS_SQER_NEEDED: + bfa_stats(ioim->itnim, iocom_sqer_needed); + bfa_assert(rsp->reuse_io_tag == 0); + evt = BFA_IOIM_SM_SQRETRY; + break; + + case BFI_IOIM_STS_RES_FREE: + bfa_stats(ioim->itnim, iocom_res_free); + evt = BFA_IOIM_SM_FREE; + break; + + case BFI_IOIM_STS_HOST_ABORTED: + bfa_stats(ioim->itnim, iocom_hostabrts); + if (rsp->abort_tag != ioim->abort_tag) { + bfa_trc(ioim->bfa, rsp->abort_tag); + bfa_trc(ioim->bfa, ioim->abort_tag); + return; + } + + if (rsp->reuse_io_tag) + evt = BFA_IOIM_SM_ABORT_COMP; + else + evt = BFA_IOIM_SM_ABORT_DONE; + break; + + case BFI_IOIM_STS_UTAG: + bfa_stats(ioim->itnim, iocom_utags); + evt = BFA_IOIM_SM_COMP_UTAG; + break; + + default: + bfa_assert(0); + } + + bfa_sm_send_event(ioim, evt); +} + +void +bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m) +{ + struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); + struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m; + struct bfa_ioim_s *ioim; + u16 iotag; + + iotag = bfa_os_ntohs(rsp->io_tag); + + ioim = BFA_IOIM_FROM_TAG(fcpim, iotag); + bfa_assert(ioim->iotag == iotag); + + bfa_trc_fp(ioim->bfa, ioim->iotag); + bfa_ioim_cb_profile_comp(fcpim, ioim); + + bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD); +} + +void +bfa_ioim_profile_start(struct bfa_ioim_s *ioim) +{ + ioim->start_time = bfa_os_get_clock(); +} + +void +bfa_ioim_profile_comp(struct bfa_ioim_s *ioim) +{ + u32 fcp_dl = bfa_cb_ioim_get_size(ioim->dio); + u32 index = bfa_ioim_get_index(fcp_dl); + u64 end_time = bfa_os_get_clock(); + struct bfa_itnim_latency_s *io_lat = + &(ioim->itnim->ioprofile.io_latency); + u32 val = (u32)(end_time - ioim->start_time); + + bfa_itnim_ioprofile_update(ioim->itnim, index); + + io_lat->count[index]++; + io_lat->min[index] = (io_lat->min[index] < val) ? + io_lat->min[index] : val; + io_lat->max[index] = (io_lat->max[index] > val) ? + io_lat->max[index] : val; + io_lat->avg[index] += val; +} +/** + * Called by itnim to clean up IO while going offline. + */ +void +bfa_ioim_cleanup(struct bfa_ioim_s *ioim) +{ + bfa_trc(ioim->bfa, ioim->iotag); + bfa_stats(ioim->itnim, io_cleanups); + + ioim->iosp->tskim = NULL; + bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP); +} + +void +bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim) +{ + bfa_trc(ioim->bfa, ioim->iotag); + bfa_stats(ioim->itnim, io_tmaborts); + + ioim->iosp->tskim = tskim; + bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP); +} + +/** + * IOC failure handling. + */ +void +bfa_ioim_iocdisable(struct bfa_ioim_s *ioim) +{ + bfa_trc(ioim->bfa, ioim->iotag); + bfa_stats(ioim->itnim, io_iocdowns); + bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL); +} + +/** + * IO offline TOV popped. Fail the pending IO. + */ +void +bfa_ioim_tov(struct bfa_ioim_s *ioim) +{ + bfa_trc(ioim->bfa, ioim->iotag); + bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV); +} + + + +/** + * hal_ioim_api + */ + +/** + * Allocate IOIM resource for initiator mode I/O request. + */ +struct bfa_ioim_s * +bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio, + struct bfa_itnim_s *itnim, u16 nsges) +{ + struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); + struct bfa_ioim_s *ioim; + + /** + * alocate IOIM resource + */ + bfa_q_deq(&fcpim->ioim_free_q, &ioim); + if (!ioim) { + bfa_stats(itnim, no_iotags); + return NULL; + } + + ioim->dio = dio; + ioim->itnim = itnim; + ioim->nsges = nsges; + ioim->nsgpgs = 0; + + bfa_stats(itnim, total_ios); + fcpim->ios_active++; + + list_add_tail(&ioim->qe, &itnim->io_q); + bfa_trc_fp(ioim->bfa, ioim->iotag); + + return ioim; +} + +void +bfa_ioim_free(struct bfa_ioim_s *ioim) +{ + struct bfa_fcpim_mod_s *fcpim = ioim->fcpim; + + bfa_trc_fp(ioim->bfa, ioim->iotag); + bfa_assert_fp(bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit)); + + bfa_assert_fp(list_empty(&ioim->sgpg_q) || + (ioim->nsges > BFI_SGE_INLINE)); + + if (ioim->nsgpgs > 0) + bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs); + + bfa_stats(ioim->itnim, io_comps); + fcpim->ios_active--; + + list_del(&ioim->qe); + list_add_tail(&ioim->qe, &fcpim->ioim_free_q); +} + +void +bfa_ioim_start(struct bfa_ioim_s *ioim) +{ + bfa_trc_fp(ioim->bfa, ioim->iotag); + + bfa_ioim_cb_profile_start(ioim->fcpim, ioim); + + /** + * Obtain the queue over which this request has to be issued + */ + ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ? + bfa_cb_ioim_get_reqq(ioim->dio) : + bfa_itnim_get_reqq(ioim); + + bfa_sm_send_event(ioim, BFA_IOIM_SM_START); +} + +/** + * Driver I/O abort request. + */ +bfa_status_t +bfa_ioim_abort(struct bfa_ioim_s *ioim) +{ + + bfa_trc(ioim->bfa, ioim->iotag); + + if (!bfa_ioim_is_abortable(ioim)) + return BFA_STATUS_FAILED; + + bfa_stats(ioim->itnim, io_aborts); + bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT); + + return BFA_STATUS_OK; +} + + +/** + * BFA TSKIM state machine functions + */ + +/** + * Task management command beginning state. + */ +static void +bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) +{ + bfa_trc(tskim->bfa, event); + + switch (event) { + case BFA_TSKIM_SM_START: + bfa_sm_set_state(tskim, bfa_tskim_sm_active); + bfa_tskim_gather_ios(tskim); + + /** + * If device is offline, do not send TM on wire. Just cleanup + * any pending IO requests and complete TM request. + */ + if (!bfa_itnim_is_online(tskim->itnim)) { + bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); + tskim->tsk_status = BFI_TSKIM_STS_OK; + bfa_tskim_cleanup_ios(tskim); + return; + } + + if (!bfa_tskim_send(tskim)) { + bfa_sm_set_state(tskim, bfa_tskim_sm_qfull); + bfa_stats(tskim->itnim, tm_qwait); + bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq, + &tskim->reqq_wait); + } + break; + + default: + bfa_sm_fault(tskim->bfa, event); + } +} + +/** + * brief + * TM command is active, awaiting completion from firmware to + * cleanup IO requests in TM scope. + */ +static void +bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) +{ + bfa_trc(tskim->bfa, event); + + switch (event) { + case BFA_TSKIM_SM_DONE: + bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); + bfa_tskim_cleanup_ios(tskim); + break; + + case BFA_TSKIM_SM_CLEANUP: + bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup); + if (!bfa_tskim_send_abort(tskim)) { + bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull); + bfa_stats(tskim->itnim, tm_qwait); + bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq, + &tskim->reqq_wait); + } + break; + + case BFA_TSKIM_SM_HWFAIL: + bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); + bfa_tskim_iocdisable_ios(tskim); + bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); + break; + + default: + bfa_sm_fault(tskim->bfa, event); + } +} + +/** + * An active TM is being cleaned up since ITN is offline. Awaiting cleanup + * completion event from firmware. + */ +static void +bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) +{ + bfa_trc(tskim->bfa, event); + + switch (event) { + case BFA_TSKIM_SM_DONE: + /** + * Ignore and wait for ABORT completion from firmware. + */ + break; + + case BFA_TSKIM_SM_CLEANUP_DONE: + bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); + bfa_tskim_cleanup_ios(tskim); + break; + + case BFA_TSKIM_SM_HWFAIL: + bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); + bfa_tskim_iocdisable_ios(tskim); + bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); + break; + + default: + bfa_sm_fault(tskim->bfa, event); + } +} + +static void +bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) +{ + bfa_trc(tskim->bfa, event); + + switch (event) { + case BFA_TSKIM_SM_IOS_DONE: + bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); + bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done); + break; + + case BFA_TSKIM_SM_CLEANUP: + /** + * Ignore, TM command completed on wire. + * Notify TM conmpletion on IO cleanup completion. + */ + break; + + case BFA_TSKIM_SM_HWFAIL: + bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); + bfa_tskim_iocdisable_ios(tskim); + bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); + break; + + default: + bfa_sm_fault(tskim->bfa, event); + } +} + +/** + * Task management command is waiting for room in request CQ + */ +static void +bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) +{ + bfa_trc(tskim->bfa, event); + + switch (event) { + case BFA_TSKIM_SM_QRESUME: + bfa_sm_set_state(tskim, bfa_tskim_sm_active); + bfa_tskim_send(tskim); + break; + + case BFA_TSKIM_SM_CLEANUP: + /** + * No need to send TM on wire since ITN is offline. + */ + bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); + bfa_reqq_wcancel(&tskim->reqq_wait); + bfa_tskim_cleanup_ios(tskim); + break; + + case BFA_TSKIM_SM_HWFAIL: + bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); + bfa_reqq_wcancel(&tskim->reqq_wait); + bfa_tskim_iocdisable_ios(tskim); + bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); + break; + + default: + bfa_sm_fault(tskim->bfa, event); + } +} + +/** + * Task management command is active, awaiting for room in request CQ + * to send clean up request. + */ +static void +bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, + enum bfa_tskim_event event) +{ + bfa_trc(tskim->bfa, event); + + switch (event) { + case BFA_TSKIM_SM_DONE: + bfa_reqq_wcancel(&tskim->reqq_wait); + /** + * + * Fall through !!! + */ + + case BFA_TSKIM_SM_QRESUME: + bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup); + bfa_tskim_send_abort(tskim); + break; + + case BFA_TSKIM_SM_HWFAIL: + bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); + bfa_reqq_wcancel(&tskim->reqq_wait); + bfa_tskim_iocdisable_ios(tskim); + bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); + break; + + default: + bfa_sm_fault(tskim->bfa, event); + } +} + +/** + * BFA callback is pending + */ +static void +bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) +{ + bfa_trc(tskim->bfa, event); + + switch (event) { + case BFA_TSKIM_SM_HCB: + bfa_sm_set_state(tskim, bfa_tskim_sm_uninit); + bfa_tskim_free(tskim); + break; + + case BFA_TSKIM_SM_CLEANUP: + bfa_tskim_notify_comp(tskim); + break; + + case BFA_TSKIM_SM_HWFAIL: + break; + + default: + bfa_sm_fault(tskim->bfa, event); + } +} + + + +/** + * hal_tskim_private + */ + +static void +__bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_tskim_s *tskim = cbarg; + + if (!complete) { + bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB); + return; + } + + bfa_stats(tskim->itnim, tm_success); + bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status); +} + +static void +__bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_tskim_s *tskim = cbarg; + + if (!complete) { + bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB); + return; + } + + bfa_stats(tskim->itnim, tm_failures); + bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, + BFI_TSKIM_STS_FAILED); +} + +static bfa_boolean_t +bfa_tskim_match_scope(struct bfa_tskim_s *tskim, lun_t lun) +{ + switch (tskim->tm_cmnd) { + case FCP_TM_TARGET_RESET: + return BFA_TRUE; + + case FCP_TM_ABORT_TASK_SET: + case FCP_TM_CLEAR_TASK_SET: + case FCP_TM_LUN_RESET: + case FCP_TM_CLEAR_ACA: + return (tskim->lun == lun); + + default: + bfa_assert(0); + } + + return BFA_FALSE; +} + +/** + * Gather affected IO requests and task management commands. + */ +static void +bfa_tskim_gather_ios(struct bfa_tskim_s *tskim) +{ + struct bfa_itnim_s *itnim = tskim->itnim; + struct bfa_ioim_s *ioim; + struct list_head *qe, *qen; + + INIT_LIST_HEAD(&tskim->io_q); + + /** + * Gather any active IO requests first. + */ + list_for_each_safe(qe, qen, &itnim->io_q) { + ioim = (struct bfa_ioim_s *) qe; + if (bfa_tskim_match_scope + (tskim, bfa_cb_ioim_get_lun(ioim->dio))) { + list_del(&ioim->qe); + list_add_tail(&ioim->qe, &tskim->io_q); + } + } + + /** + * Failback any pending IO requests immediately. + */ + list_for_each_safe(qe, qen, &itnim->pending_q) { + ioim = (struct bfa_ioim_s *) qe; + if (bfa_tskim_match_scope + (tskim, bfa_cb_ioim_get_lun(ioim->dio))) { + list_del(&ioim->qe); + list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); + bfa_ioim_tov(ioim); + } + } +} + +/** + * IO cleanup completion + */ +static void +bfa_tskim_cleanp_comp(void *tskim_cbarg) +{ + struct bfa_tskim_s *tskim = tskim_cbarg; + + bfa_stats(tskim->itnim, tm_io_comps); + bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE); +} + +/** + * Gather affected IO requests and task management commands. + */ +static void +bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim) +{ + struct bfa_ioim_s *ioim; + struct list_head *qe, *qen; + + bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim); + + list_for_each_safe(qe, qen, &tskim->io_q) { + ioim = (struct bfa_ioim_s *) qe; + bfa_wc_up(&tskim->wc); + bfa_ioim_cleanup_tm(ioim, tskim); + } + + bfa_wc_wait(&tskim->wc); +} + +/** + * Send task management request to firmware. + */ +static bfa_boolean_t +bfa_tskim_send(struct bfa_tskim_s *tskim) +{ + struct bfa_itnim_s *itnim = tskim->itnim; + struct bfi_tskim_req_s *m; + + /** + * check for room in queue to send request now + */ + m = bfa_reqq_next(tskim->bfa, itnim->reqq); + if (!m) + return BFA_FALSE; + + /** + * build i/o request message next + */ + bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ, + bfa_lpuid(tskim->bfa)); + + m->tsk_tag = bfa_os_htons(tskim->tsk_tag); + m->itn_fhdl = tskim->itnim->rport->fw_handle; + m->t_secs = tskim->tsecs; + m->lun = tskim->lun; + m->tm_flags = tskim->tm_cmnd; + + /** + * queue I/O message to firmware + */ + bfa_reqq_produce(tskim->bfa, itnim->reqq); + return BFA_TRUE; +} + +/** + * Send abort request to cleanup an active TM to firmware. + */ +static bfa_boolean_t +bfa_tskim_send_abort(struct bfa_tskim_s *tskim) +{ + struct bfa_itnim_s *itnim = tskim->itnim; + struct bfi_tskim_abortreq_s *m; + + /** + * check for room in queue to send request now + */ + m = bfa_reqq_next(tskim->bfa, itnim->reqq); + if (!m) + return BFA_FALSE; + + /** + * build i/o request message next + */ + bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ, + bfa_lpuid(tskim->bfa)); + + m->tsk_tag = bfa_os_htons(tskim->tsk_tag); + + /** + * queue I/O message to firmware + */ + bfa_reqq_produce(tskim->bfa, itnim->reqq); + return BFA_TRUE; +} + +/** + * Call to resume task management cmnd waiting for room in request queue. + */ +static void +bfa_tskim_qresume(void *cbarg) +{ + struct bfa_tskim_s *tskim = cbarg; + + bfa_stats(tskim->itnim, tm_qresumes); + bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME); +} + +/** + * Cleanup IOs associated with a task mangement command on IOC failures. + */ +static void +bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim) +{ + struct bfa_ioim_s *ioim; + struct list_head *qe, *qen; + + list_for_each_safe(qe, qen, &tskim->io_q) { + ioim = (struct bfa_ioim_s *) qe; + bfa_ioim_iocdisable(ioim); + } +} + + + +/** + * hal_tskim_friend + */ + +/** + * Notification on completions from related ioim. + */ +void +bfa_tskim_iodone(struct bfa_tskim_s *tskim) +{ + bfa_wc_down(&tskim->wc); +} + +/** + * Handle IOC h/w failure notification from itnim. + */ +void +bfa_tskim_iocdisable(struct bfa_tskim_s *tskim) +{ + tskim->notify = BFA_FALSE; + bfa_stats(tskim->itnim, tm_iocdowns); + bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL); +} + +/** + * Cleanup TM command and associated IOs as part of ITNIM offline. + */ +void +bfa_tskim_cleanup(struct bfa_tskim_s *tskim) +{ + tskim->notify = BFA_TRUE; + bfa_stats(tskim->itnim, tm_cleanups); + bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP); +} + +/** + * Memory allocation and initialization. + */ +void +bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) +{ + struct bfa_tskim_s *tskim; + u16 i; + + INIT_LIST_HEAD(&fcpim->tskim_free_q); + + tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo); + fcpim->tskim_arr = tskim; + + for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) { + /* + * initialize TSKIM + */ + bfa_os_memset(tskim, 0, sizeof(struct bfa_tskim_s)); + tskim->tsk_tag = i; + tskim->bfa = fcpim->bfa; + tskim->fcpim = fcpim; + tskim->notify = BFA_FALSE; + bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume, + tskim); + bfa_sm_set_state(tskim, bfa_tskim_sm_uninit); + + list_add_tail(&tskim->qe, &fcpim->tskim_free_q); + } + + bfa_meminfo_kva(minfo) = (u8 *) tskim; +} + +void +bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim) +{ + /** + * @todo + */ +} + +void +bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) +{ + struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); + struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m; + struct bfa_tskim_s *tskim; + u16 tsk_tag = bfa_os_ntohs(rsp->tsk_tag); + + tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag); + bfa_assert(tskim->tsk_tag == tsk_tag); + + tskim->tsk_status = rsp->tsk_status; + + /** + * Firmware sends BFI_TSKIM_STS_ABORTED status for abort + * requests. All other statuses are for normal completions. + */ + if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) { + bfa_stats(tskim->itnim, tm_cleanup_comps); + bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE); + } else { + bfa_stats(tskim->itnim, tm_fw_rsps); + bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE); + } +} + + + +/** + * hal_tskim_api + */ + + +struct bfa_tskim_s * +bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk) +{ + struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); + struct bfa_tskim_s *tskim; + + bfa_q_deq(&fcpim->tskim_free_q, &tskim); + + if (tskim) + tskim->dtsk = dtsk; + + return tskim; +} + +void +bfa_tskim_free(struct bfa_tskim_s *tskim) +{ + bfa_assert(bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe)); + list_del(&tskim->qe); + list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q); +} + +/** + * Start a task management command. + * + * @param[in] tskim BFA task management command instance + * @param[in] itnim i-t nexus for the task management command + * @param[in] lun lun, if applicable + * @param[in] tm_cmnd Task management command code. + * @param[in] t_secs Timeout in seconds + * + * @return None. + */ +void +bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim, lun_t lun, + enum fcp_tm_cmnd tm_cmnd, u8 tsecs) +{ + tskim->itnim = itnim; + tskim->lun = lun; + tskim->tm_cmnd = tm_cmnd; + tskim->tsecs = tsecs; + tskim->notify = BFA_FALSE; + bfa_stats(itnim, tm_cmnds); + + list_add_tail(&tskim->qe, &itnim->tsk_q); + bfa_sm_send_event(tskim, BFA_TSKIM_SM_START); +} diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h new file mode 100644 index 000000000000..3bf343160aac --- /dev/null +++ b/drivers/scsi/bfa/bfa_fcpim.h @@ -0,0 +1,401 @@ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + * + * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef __BFA_FCPIM_H__ +#define __BFA_FCPIM_H__ + +#include "bfa.h" +#include "bfa_svc.h" +#include "bfi_ms.h" +#include "bfa_defs_svc.h" +#include "bfa_cs.h" + + +#define BFA_ITNIM_MIN 32 +#define BFA_ITNIM_MAX 1024 + +#define BFA_IOIM_MIN 8 +#define BFA_IOIM_MAX 2000 + +#define BFA_TSKIM_MIN 4 +#define BFA_TSKIM_MAX 512 +#define BFA_FCPIM_PATHTOV_DEF (30 * 1000) /* in millisecs */ +#define BFA_FCPIM_PATHTOV_MAX (90 * 1000) /* in millisecs */ + + +#define bfa_itnim_ioprofile_update(__itnim, __index) \ + (__itnim->ioprofile.iocomps[__index]++) + +#define BFA_IOIM_RETRY_TAG_OFFSET 11 +#define BFA_IOIM_RETRY_TAG_MASK 0x07ff /* 2K IOs */ +#define BFA_IOIM_RETRY_MAX 7 + +/* Buckets are are 512 bytes to 2MB */ +static inline u32 +bfa_ioim_get_index(u32 n) { + int pos = 0; + if (n >= (1UL)<<22) + return BFA_IOBUCKET_MAX - 1; + n >>= 8; + if (n >= (1UL)<<16) + n >>= 16; pos += 16; + if (n >= 1 << 8) + n >>= 8; pos += 8; + if (n >= 1 << 4) + n >>= 4; pos += 4; + if (n >= 1 << 2) + n >>= 2; pos += 2; + if (n >= 1 << 1) + pos += 1; + + return (n == 0) ? (0) : pos; +} + +/* + * forward declarations + */ +struct bfa_ioim_s; +struct bfa_tskim_s; +struct bfad_ioim_s; +struct bfad_tskim_s; + +typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim); + +struct bfa_fcpim_mod_s { + struct bfa_s *bfa; + struct bfa_itnim_s *itnim_arr; + struct bfa_ioim_s *ioim_arr; + struct bfa_ioim_sp_s *ioim_sp_arr; + struct bfa_tskim_s *tskim_arr; + struct bfa_dma_s snsbase; + int num_itnims; + int num_ioim_reqs; + int num_tskim_reqs; + u32 path_tov; + u16 q_depth; + u8 reqq; /* Request queue to be used */ + u8 rsvd; + struct list_head itnim_q; /* queue of active itnim */ + struct list_head ioim_free_q; /* free IO resources */ + struct list_head ioim_resfree_q; /* IOs waiting for f/w */ + struct list_head ioim_comp_q; /* IO global comp Q */ + struct list_head tskim_free_q; + u32 ios_active; /* current active IOs */ + u32 delay_comp; + struct bfa_fcpim_del_itn_stats_s del_itn_stats; + bfa_boolean_t ioredirect; + bfa_boolean_t io_profile; + u32 io_profile_start_time; + bfa_fcpim_profile_t profile_comp; + bfa_fcpim_profile_t profile_start; +}; + +/** + * BFA IO (initiator mode) + */ +struct bfa_ioim_s { + struct list_head qe; /* queue elememt */ + bfa_sm_t sm; /* BFA ioim state machine */ + struct bfa_s *bfa; /* BFA module */ + struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */ + struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */ + struct bfad_ioim_s *dio; /* driver IO handle */ + u16 iotag; /* FWI IO tag */ + u16 abort_tag; /* unqiue abort request tag */ + u16 nsges; /* number of SG elements */ + u16 nsgpgs; /* number of SG pages */ + struct bfa_sgpg_s *sgpg; /* first SG page */ + struct list_head sgpg_q; /* allocated SG pages */ + struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */ + bfa_cb_cbfn_t io_cbfn; /* IO completion handler */ + struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */ + u8 reqq; /* Request queue for I/O */ + u64 start_time; /* IO's Profile start val */ +}; + + +struct bfa_ioim_sp_s { + struct bfi_msg_s comp_rspmsg; /* IO comp f/w response */ + u8 *snsinfo; /* sense info for this IO */ + struct bfa_sgpg_wqe_s sgpg_wqe; /* waitq elem for sgpg */ + struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ + bfa_boolean_t abort_explicit; /* aborted by OS */ + struct bfa_tskim_s *tskim; /* Relevant TM cmd */ +}; + +/** + * BFA Task management command (initiator mode) + */ +struct bfa_tskim_s { + struct list_head qe; + bfa_sm_t sm; + struct bfa_s *bfa; /* BFA module */ + struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */ + struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */ + struct bfad_tskim_s *dtsk; /* driver task mgmt cmnd */ + bfa_boolean_t notify; /* notify itnim on TM comp */ + lun_t lun; /* lun if applicable */ + enum fcp_tm_cmnd tm_cmnd; /* task management command */ + u16 tsk_tag; /* FWI IO tag */ + u8 tsecs; /* timeout in seconds */ + struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ + struct list_head io_q; /* queue of affected IOs */ + struct bfa_wc_s wc; /* waiting counter */ + struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */ + enum bfi_tskim_status tsk_status; /* TM status */ +}; + + +/** + * BFA i-t-n (initiator mode) + */ +struct bfa_itnim_s { + struct list_head qe; /* queue element */ + bfa_sm_t sm; /* i-t-n im BFA state machine */ + struct bfa_s *bfa; /* bfa instance */ + struct bfa_rport_s *rport; /* bfa rport */ + void *ditn; /* driver i-t-n structure */ + struct bfi_mhdr_s mhdr; /* pre-built mhdr */ + u8 msg_no; /* itnim/rport firmware handle */ + u8 reqq; /* CQ for requests */ + struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */ + struct list_head pending_q; /* queue of pending IO requests */ + struct list_head io_q; /* queue of active IO requests */ + struct list_head io_cleanup_q; /* IO being cleaned up */ + struct list_head tsk_q; /* queue of active TM commands */ + struct list_head delay_comp_q; /* queue of failed inflight cmds */ + bfa_boolean_t seq_rec; /* SQER supported */ + bfa_boolean_t is_online; /* itnim is ONLINE for IO */ + bfa_boolean_t iotov_active; /* IO TOV timer is active */ + struct bfa_wc_s wc; /* waiting counter */ + struct bfa_timer_s timer; /* pending IO TOV */ + struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ + struct bfa_fcpim_mod_s *fcpim; /* fcpim module */ + struct bfa_itnim_iostats_s stats; + struct bfa_itnim_ioprofile_s ioprofile; +}; + + +#define bfa_itnim_is_online(_itnim) ((_itnim)->is_online) +#define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod) +#define BFA_IOIM_FROM_TAG(_fcpim, _iotag) \ + (&fcpim->ioim_arr[(_iotag & BFA_IOIM_RETRY_TAG_MASK)]) +#define BFA_TSKIM_FROM_TAG(_fcpim, _tmtag) \ + (&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)]) + +#define bfa_io_profile_start_time(_bfa) \ + (_bfa->modules.fcpim_mod.io_profile_start_time) +#define bfa_fcpim_get_io_profile(_bfa) \ + (_bfa->modules.fcpim_mod.io_profile) + +static inline bfa_boolean_t +bfa_ioim_get_iotag(struct bfa_ioim_s *ioim) +{ + u16 k = ioim->iotag; + + k >>= BFA_IOIM_RETRY_TAG_OFFSET; k++; + + if (k > BFA_IOIM_RETRY_MAX) + return BFA_FALSE; + ioim->iotag &= BFA_IOIM_RETRY_TAG_MASK; + ioim->iotag |= k<ioredirect) + +#define bfa_fcpim_get_next_reqq(__bfa, __qid) \ +{ \ + struct bfa_fcpim_mod_s *__fcpim = BFA_FCPIM_MOD(__bfa); \ + __fcpim->reqq++; \ + __fcpim->reqq &= (BFI_IOC_MAX_CQS - 1); \ + *(__qid) = __fcpim->reqq; \ +} + +#define bfa_iocfc_map_msg_to_qid(__msg, __qid) \ + *(__qid) = (u8)((__msg) & (BFI_IOC_MAX_CQS - 1)); +/* + * bfa itnim API functions + */ +struct bfa_itnim_s *bfa_itnim_create(struct bfa_s *bfa, + struct bfa_rport_s *rport, void *itnim); +void bfa_itnim_delete(struct bfa_itnim_s *itnim); +void bfa_itnim_online(struct bfa_itnim_s *itnim, + bfa_boolean_t seq_rec); +void bfa_itnim_offline(struct bfa_itnim_s *itnim); +void bfa_itnim_get_stats(struct bfa_itnim_s *itnim, + struct bfa_itnim_iostats_s *stats); +void bfa_itnim_clear_stats(struct bfa_itnim_s *itnim); +bfa_status_t bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim, + struct bfa_itnim_ioprofile_s *ioprofile); +#define bfa_itnim_get_reqq(__ioim) (((struct bfa_ioim_s *)__ioim)->itnim->reqq) + +/** + * BFA completion callback for bfa_itnim_online(). + * + * @param[in] itnim FCS or driver itnim instance + * + * return None + */ +void bfa_cb_itnim_online(void *itnim); + +/** + * BFA completion callback for bfa_itnim_offline(). + * + * @param[in] itnim FCS or driver itnim instance + * + * return None + */ +void bfa_cb_itnim_offline(void *itnim); +void bfa_cb_itnim_tov_begin(void *itnim); +void bfa_cb_itnim_tov(void *itnim); + +/** + * BFA notification to FCS/driver for second level error recovery. + * + * Atleast one I/O request has timedout and target is unresponsive to + * repeated abort requests. Second level error recovery should be initiated + * by starting implicit logout and recovery procedures. + * + * @param[in] itnim FCS or driver itnim instance + * + * return None + */ +void bfa_cb_itnim_sler(void *itnim); + +/* + * bfa ioim API functions + */ +struct bfa_ioim_s *bfa_ioim_alloc(struct bfa_s *bfa, + struct bfad_ioim_s *dio, + struct bfa_itnim_s *itnim, + u16 nsgles); + +void bfa_ioim_free(struct bfa_ioim_s *ioim); +void bfa_ioim_start(struct bfa_ioim_s *ioim); +bfa_status_t bfa_ioim_abort(struct bfa_ioim_s *ioim); +void bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, + bfa_boolean_t iotov); + + +/** + * I/O completion notification. + * + * @param[in] dio driver IO structure + * @param[in] io_status IO completion status + * @param[in] scsi_status SCSI status returned by target + * @param[in] sns_len SCSI sense length, 0 if none + * @param[in] sns_info SCSI sense data, if any + * @param[in] residue Residual length + * + * @return None + */ +void bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio, + enum bfi_ioim_status io_status, + u8 scsi_status, int sns_len, + u8 *sns_info, s32 residue); + +/** + * I/O good completion notification. + * + * @param[in] dio driver IO structure + * + * @return None + */ +void bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio); + +/** + * I/O abort completion notification + * + * @param[in] dio driver IO that was aborted + * + * @return None + */ +void bfa_cb_ioim_abort(void *bfad, struct bfad_ioim_s *dio); + +/* + * bfa tskim API functions + */ +struct bfa_tskim_s *bfa_tskim_alloc(struct bfa_s *bfa, + struct bfad_tskim_s *dtsk); +void bfa_tskim_free(struct bfa_tskim_s *tskim); +void bfa_tskim_start(struct bfa_tskim_s *tskim, + struct bfa_itnim_s *itnim, lun_t lun, + enum fcp_tm_cmnd tm, u8 t_secs); +void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk, + enum bfi_tskim_status tsk_status); + +#endif /* __BFA_FCPIM_H__ */ diff --git a/drivers/scsi/bfa/bfa_fcpim_priv.h b/drivers/scsi/bfa/bfa_fcpim_priv.h deleted file mode 100644 index 762516cb5cb2..000000000000 --- a/drivers/scsi/bfa/bfa_fcpim_priv.h +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_FCPIM_PRIV_H__ -#define __BFA_FCPIM_PRIV_H__ - -#include -#include -#include -#include "bfa_sgpg_priv.h" - -#define BFA_ITNIM_MIN 32 -#define BFA_ITNIM_MAX 1024 - -#define BFA_IOIM_MIN 8 -#define BFA_IOIM_MAX 2000 - -#define BFA_TSKIM_MIN 4 -#define BFA_TSKIM_MAX 512 -#define BFA_FCPIM_PATHTOV_DEF (30 * 1000) /* in millisecs */ -#define BFA_FCPIM_PATHTOV_MAX (90 * 1000) /* in millisecs */ - -#define bfa_fcpim_stats(__fcpim, __stats) \ - ((__fcpim)->stats.__stats++) - -struct bfa_fcpim_mod_s { - struct bfa_s *bfa; - struct bfa_itnim_s *itnim_arr; - struct bfa_ioim_s *ioim_arr; - struct bfa_ioim_sp_s *ioim_sp_arr; - struct bfa_tskim_s *tskim_arr; - struct bfa_dma_s snsbase; - int num_itnims; - int num_ioim_reqs; - int num_tskim_reqs; - u32 path_tov; - u16 q_depth; - u8 reqq; /* Request queue to be used */ - u8 rsvd; - struct list_head itnim_q; /* queue of active itnim */ - struct list_head ioim_free_q; /* free IO resources */ - struct list_head ioim_resfree_q; /* IOs waiting for f/w */ - struct list_head ioim_comp_q; /* IO global comp Q */ - struct list_head tskim_free_q; - u32 ios_active; /* current active IOs */ - u32 delay_comp; - struct bfa_fcpim_stats_s stats; - bfa_boolean_t ioredirect; -}; - -struct bfa_ioim_s; -struct bfa_tskim_s; - -/** - * BFA IO (initiator mode) - */ -struct bfa_ioim_s { - struct list_head qe; /* queue elememt */ - bfa_sm_t sm; /* BFA ioim state machine */ - struct bfa_s *bfa; /* BFA module */ - struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */ - struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */ - struct bfad_ioim_s *dio; /* driver IO handle */ - u16 iotag; /* FWI IO tag */ - u16 abort_tag; /* unqiue abort request tag */ - u16 nsges; /* number of SG elements */ - u16 nsgpgs; /* number of SG pages */ - struct bfa_sgpg_s *sgpg; /* first SG page */ - struct list_head sgpg_q; /* allocated SG pages */ - struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */ - bfa_cb_cbfn_t io_cbfn; /* IO completion handler */ - struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */ - u8 reqq; /* Request queue for I/O */ -}; - -struct bfa_ioim_sp_s { - struct bfi_msg_s comp_rspmsg; /* IO comp f/w response */ - u8 *snsinfo; /* sense info for this IO */ - struct bfa_sgpg_wqe_s sgpg_wqe; /* waitq elem for sgpg */ - struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ - bfa_boolean_t abort_explicit; /* aborted by OS */ - struct bfa_tskim_s *tskim; /* Relevant TM cmd */ -}; - -/** - * BFA Task management command (initiator mode) - */ -struct bfa_tskim_s { - struct list_head qe; - bfa_sm_t sm; - struct bfa_s *bfa; /* BFA module */ - struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */ - struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */ - struct bfad_tskim_s *dtsk; /* driver task mgmt cmnd */ - bfa_boolean_t notify; /* notify itnim on TM comp */ - lun_t lun; /* lun if applicable */ - enum fcp_tm_cmnd tm_cmnd; /* task management command */ - u16 tsk_tag; /* FWI IO tag */ - u8 tsecs; /* timeout in seconds */ - struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ - struct list_head io_q; /* queue of affected IOs */ - struct bfa_wc_s wc; /* waiting counter */ - struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */ - enum bfi_tskim_status tsk_status; /* TM status */ -}; - -/** - * BFA i-t-n (initiator mode) - */ -struct bfa_itnim_s { - struct list_head qe; /* queue element */ - bfa_sm_t sm; /* i-t-n im BFA state machine */ - struct bfa_s *bfa; /* bfa instance */ - struct bfa_rport_s *rport; /* bfa rport */ - void *ditn; /* driver i-t-n structure */ - struct bfi_mhdr_s mhdr; /* pre-built mhdr */ - u8 msg_no; /* itnim/rport firmware handle */ - u8 reqq; /* CQ for requests */ - struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */ - struct list_head pending_q; /* queue of pending IO requests*/ - struct list_head io_q; /* queue of active IO requests */ - struct list_head io_cleanup_q; /* IO being cleaned up */ - struct list_head tsk_q; /* queue of active TM commands */ - struct list_head delay_comp_q;/* queue of failed inflight cmds */ - bfa_boolean_t seq_rec; /* SQER supported */ - bfa_boolean_t is_online; /* itnim is ONLINE for IO */ - bfa_boolean_t iotov_active; /* IO TOV timer is active */ - struct bfa_wc_s wc; /* waiting counter */ - struct bfa_timer_s timer; /* pending IO TOV */ - struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ - struct bfa_fcpim_mod_s *fcpim; /* fcpim module */ - struct bfa_itnim_hal_stats_s stats; - struct bfa_itnim_latency_s io_latency; -}; - -#define bfa_itnim_is_online(_itnim) ((_itnim)->is_online) -#define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod) -#define BFA_IOIM_FROM_TAG(_fcpim, _iotag) \ - (&fcpim->ioim_arr[_iotag]) -#define BFA_TSKIM_FROM_TAG(_fcpim, _tmtag) \ - (&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)]) - -/* - * function prototypes - */ -void bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, - struct bfa_meminfo_s *minfo); -void bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim); -void bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); -void bfa_ioim_good_comp_isr(struct bfa_s *bfa, - struct bfi_msg_s *msg); -void bfa_ioim_cleanup(struct bfa_ioim_s *ioim); -void bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, - struct bfa_tskim_s *tskim); -void bfa_ioim_iocdisable(struct bfa_ioim_s *ioim); -void bfa_ioim_tov(struct bfa_ioim_s *ioim); - -void bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, - struct bfa_meminfo_s *minfo); -void bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim); -void bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); -void bfa_tskim_iodone(struct bfa_tskim_s *tskim); -void bfa_tskim_iocdisable(struct bfa_tskim_s *tskim); -void bfa_tskim_cleanup(struct bfa_tskim_s *tskim); - -void bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, - u32 *dm_len); -void bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, - struct bfa_meminfo_s *minfo); -void bfa_itnim_detach(struct bfa_fcpim_mod_s *fcpim); -void bfa_itnim_iocdisable(struct bfa_itnim_s *itnim); -void bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); -void bfa_itnim_iodone(struct bfa_itnim_s *itnim); -void bfa_itnim_tskdone(struct bfa_itnim_s *itnim); -bfa_boolean_t bfa_itnim_hold_io(struct bfa_itnim_s *itnim); - -#endif /* __BFA_FCPIM_PRIV_H__ */ - diff --git a/drivers/scsi/bfa/bfa_fcport.c b/drivers/scsi/bfa/bfa_fcport.c deleted file mode 100644 index 76867b5577fa..000000000000 --- a/drivers/scsi/bfa/bfa_fcport.c +++ /dev/null @@ -1,1962 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -BFA_TRC_FILE(HAL, FCPORT); -BFA_MODULE(fcport); - -/* - * The port is considered disabled if corresponding physical port or IOC are - * disabled explicitly - */ -#define BFA_PORT_IS_DISABLED(bfa) \ - ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \ - (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE)) - -/* - * forward declarations - */ -static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport); -static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport); -static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport); -static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport); -static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport); -static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete); -static void bfa_fcport_callback(struct bfa_fcport_s *fcport, - enum bfa_pport_linkstate event); -static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, - enum bfa_pport_linkstate event); -static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete); -static void bfa_fcport_stats_get_timeout(void *cbarg); -static void bfa_fcport_stats_clr_timeout(void *cbarg); - -/** - * bfa_pport_private - */ - -/** - * BFA port state machine events - */ -enum bfa_fcport_sm_event { - BFA_FCPORT_SM_START = 1, /* start port state machine */ - BFA_FCPORT_SM_STOP = 2, /* stop port state machine */ - BFA_FCPORT_SM_ENABLE = 3, /* enable port */ - BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */ - BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */ - BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */ - BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */ - BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */ - BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */ -}; - -/** - * BFA port link notification state machine events - */ - -enum bfa_fcport_ln_sm_event { - BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */ - BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */ - BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */ -}; - -static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport, - enum bfa_fcport_sm_event event); -static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport, - enum bfa_fcport_sm_event event); -static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport, - enum bfa_fcport_sm_event event); -static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, - enum bfa_fcport_sm_event event); -static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport, - enum bfa_fcport_sm_event event); -static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport, - enum bfa_fcport_sm_event event); -static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport, - enum bfa_fcport_sm_event event); -static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport, - enum bfa_fcport_sm_event event); -static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport, - enum bfa_fcport_sm_event event); -static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport, - enum bfa_fcport_sm_event event); -static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport, - enum bfa_fcport_sm_event event); - -static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln, - enum bfa_fcport_ln_sm_event event); -static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln, - enum bfa_fcport_ln_sm_event event); -static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln, - enum bfa_fcport_ln_sm_event event); -static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln, - enum bfa_fcport_ln_sm_event event); -static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln, - enum bfa_fcport_ln_sm_event event); -static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln, - enum bfa_fcport_ln_sm_event event); -static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln, - enum bfa_fcport_ln_sm_event event); - -static struct bfa_sm_table_s hal_pport_sm_table[] = { - {BFA_SM(bfa_fcport_sm_uninit), BFA_PPORT_ST_UNINIT}, - {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PPORT_ST_ENABLING_QWAIT}, - {BFA_SM(bfa_fcport_sm_enabling), BFA_PPORT_ST_ENABLING}, - {BFA_SM(bfa_fcport_sm_linkdown), BFA_PPORT_ST_LINKDOWN}, - {BFA_SM(bfa_fcport_sm_linkup), BFA_PPORT_ST_LINKUP}, - {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PPORT_ST_DISABLING_QWAIT}, - {BFA_SM(bfa_fcport_sm_disabling), BFA_PPORT_ST_DISABLING}, - {BFA_SM(bfa_fcport_sm_disabled), BFA_PPORT_ST_DISABLED}, - {BFA_SM(bfa_fcport_sm_stopped), BFA_PPORT_ST_STOPPED}, - {BFA_SM(bfa_fcport_sm_iocdown), BFA_PPORT_ST_IOCDOWN}, - {BFA_SM(bfa_fcport_sm_iocfail), BFA_PPORT_ST_IOCDOWN}, -}; - -static void -bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event) -{ - union bfa_aen_data_u aen_data; - struct bfa_log_mod_s *logmod = fcport->bfa->logm; - wwn_t pwwn = fcport->pwwn; - char pwwn_ptr[BFA_STRING_32]; - - memset(&aen_data, 0, sizeof(aen_data)); - wwn2str(pwwn_ptr, pwwn); - bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, event), pwwn_ptr); - - aen_data.port.ioc_type = bfa_get_type(fcport->bfa); - aen_data.port.pwwn = pwwn; -} - -static void -bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport, - enum bfa_fcport_sm_event event) -{ - bfa_trc(fcport->bfa, event); - - switch (event) { - case BFA_FCPORT_SM_START: - /** - * Start event after IOC is configured and BFA is started. - */ - if (bfa_fcport_send_enable(fcport)) - bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); - else - bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); - break; - - case BFA_FCPORT_SM_ENABLE: - /** - * Port is persistently configured to be in enabled state. Do - * not change state. Port enabling is done when START event is - * received. - */ - break; - - case BFA_FCPORT_SM_DISABLE: - /** - * If a port is persistently configured to be disabled, the - * first event will a port disable request. - */ - bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); - break; - - case BFA_FCPORT_SM_HWFAIL: - bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); - break; - - default: - bfa_sm_fault(fcport->bfa, event); - } -} - -static void -bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport, - enum bfa_fcport_sm_event event) -{ - bfa_trc(fcport->bfa, event); - - switch (event) { - case BFA_FCPORT_SM_QRESUME: - bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); - bfa_fcport_send_enable(fcport); - break; - - case BFA_FCPORT_SM_STOP: - bfa_reqq_wcancel(&fcport->reqq_wait); - bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); - break; - - case BFA_FCPORT_SM_ENABLE: - /** - * Already enable is in progress. - */ - break; - - case BFA_FCPORT_SM_DISABLE: - /** - * Just send disable request to firmware when room becomes - * available in request queue. - */ - bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); - bfa_reqq_wcancel(&fcport->reqq_wait); - bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, - BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); - bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); - break; - - case BFA_FCPORT_SM_LINKUP: - case BFA_FCPORT_SM_LINKDOWN: - /** - * Possible to get link events when doing back-to-back - * enable/disables. - */ - break; - - case BFA_FCPORT_SM_HWFAIL: - bfa_reqq_wcancel(&fcport->reqq_wait); - bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); - break; - - default: - bfa_sm_fault(fcport->bfa, event); - } -} - -static void -bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport, - enum bfa_fcport_sm_event event) -{ - bfa_trc(fcport->bfa, event); - - switch (event) { - case BFA_FCPORT_SM_FWRSP: - case BFA_FCPORT_SM_LINKDOWN: - bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown); - break; - - case BFA_FCPORT_SM_LINKUP: - bfa_fcport_update_linkinfo(fcport); - bfa_sm_set_state(fcport, bfa_fcport_sm_linkup); - - bfa_assert(fcport->event_cbfn); - bfa_fcport_callback(fcport, BFA_PPORT_LINKUP); - break; - - case BFA_FCPORT_SM_ENABLE: - /** - * Already being enabled. - */ - break; - - case BFA_FCPORT_SM_DISABLE: - if (bfa_fcport_send_disable(fcport)) - bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); - else - bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); - - bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, - BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); - bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); - break; - - case BFA_FCPORT_SM_STOP: - bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); - break; - - case BFA_FCPORT_SM_HWFAIL: - bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); - break; - - default: - bfa_sm_fault(fcport->bfa, event); - } -} - -static void -bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, - enum bfa_fcport_sm_event event) -{ - struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event; - bfa_trc(fcport->bfa, event); - - switch (event) { - case BFA_FCPORT_SM_LINKUP: - bfa_fcport_update_linkinfo(fcport); - bfa_sm_set_state(fcport, bfa_fcport_sm_linkup); - bfa_assert(fcport->event_cbfn); - bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, - BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup"); - - if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { - - bfa_trc(fcport->bfa, - pevent->link_state.vc_fcf.fcf.fipenabled); - bfa_trc(fcport->bfa, - pevent->link_state.vc_fcf.fcf.fipfailed); - - if (pevent->link_state.vc_fcf.fcf.fipfailed) - bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, - BFA_PL_EID_FIP_FCF_DISC, 0, - "FIP FCF Discovery Failed"); - else - bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, - BFA_PL_EID_FIP_FCF_DISC, 0, - "FIP FCF Discovered"); - } - - bfa_fcport_callback(fcport, BFA_PPORT_LINKUP); - bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE); - /** - * If QoS is enabled and it is not online, - * Send a separate event. - */ - if ((fcport->cfg.qos_enabled) - && (bfa_os_ntohl(fcport->qos_attr.state) != BFA_QOS_ONLINE)) - bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG); - - break; - - case BFA_FCPORT_SM_LINKDOWN: - /** - * Possible to get link down event. - */ - break; - - case BFA_FCPORT_SM_ENABLE: - /** - * Already enabled. - */ - break; - - case BFA_FCPORT_SM_DISABLE: - if (bfa_fcport_send_disable(fcport)) - bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); - else - bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); - - bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, - BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); - bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); - break; - - case BFA_FCPORT_SM_STOP: - bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); - break; - - case BFA_FCPORT_SM_HWFAIL: - bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); - break; - - default: - bfa_sm_fault(fcport->bfa, event); - } -} - -static void -bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport, - enum bfa_fcport_sm_event event) -{ - bfa_trc(fcport->bfa, event); - - switch (event) { - case BFA_FCPORT_SM_ENABLE: - /** - * Already enabled. - */ - break; - - case BFA_FCPORT_SM_DISABLE: - if (bfa_fcport_send_disable(fcport)) - bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); - else - bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); - - bfa_fcport_reset_linkinfo(fcport); - bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN); - bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, - BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); - bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); - bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); - break; - - case BFA_FCPORT_SM_LINKDOWN: - bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown); - bfa_fcport_reset_linkinfo(fcport); - bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN); - bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, - BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown"); - if (BFA_PORT_IS_DISABLED(fcport->bfa)) - bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); - else - bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); - break; - - case BFA_FCPORT_SM_STOP: - bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); - bfa_fcport_reset_linkinfo(fcport); - if (BFA_PORT_IS_DISABLED(fcport->bfa)) - bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); - else - bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); - break; - - case BFA_FCPORT_SM_HWFAIL: - bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); - bfa_fcport_reset_linkinfo(fcport); - bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN); - if (BFA_PORT_IS_DISABLED(fcport->bfa)) - bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); - else - bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); - break; - - default: - bfa_sm_fault(fcport->bfa, event); - } -} - -static void -bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport, - enum bfa_fcport_sm_event event) -{ - bfa_trc(fcport->bfa, event); - - switch (event) { - case BFA_FCPORT_SM_QRESUME: - bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); - bfa_fcport_send_disable(fcport); - break; - - case BFA_FCPORT_SM_STOP: - bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); - bfa_reqq_wcancel(&fcport->reqq_wait); - break; - - case BFA_FCPORT_SM_DISABLE: - /** - * Already being disabled. - */ - break; - - case BFA_FCPORT_SM_LINKUP: - case BFA_FCPORT_SM_LINKDOWN: - /** - * Possible to get link events when doing back-to-back - * enable/disables. - */ - break; - - case BFA_FCPORT_SM_HWFAIL: - bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); - bfa_reqq_wcancel(&fcport->reqq_wait); - break; - - default: - bfa_sm_fault(fcport->bfa, event); - } -} - -static void -bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport, - enum bfa_fcport_sm_event event) -{ - bfa_trc(fcport->bfa, event); - - switch (event) { - case BFA_FCPORT_SM_FWRSP: - bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); - break; - - case BFA_FCPORT_SM_DISABLE: - /** - * Already being disabled. - */ - break; - - case BFA_FCPORT_SM_ENABLE: - if (bfa_fcport_send_enable(fcport)) - bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); - else - bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); - - bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, - BFA_PL_EID_PORT_ENABLE, 0, "Port Enable"); - bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE); - break; - - case BFA_FCPORT_SM_STOP: - bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); - break; - - case BFA_FCPORT_SM_LINKUP: - case BFA_FCPORT_SM_LINKDOWN: - /** - * Possible to get link events when doing back-to-back - * enable/disables. - */ - break; - - case BFA_FCPORT_SM_HWFAIL: - bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); - break; - - default: - bfa_sm_fault(fcport->bfa, event); - } -} - -static void -bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport, - enum bfa_fcport_sm_event event) -{ - bfa_trc(fcport->bfa, event); - - switch (event) { - case BFA_FCPORT_SM_START: - /** - * Ignore start event for a port that is disabled. - */ - break; - - case BFA_FCPORT_SM_STOP: - bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); - break; - - case BFA_FCPORT_SM_ENABLE: - if (bfa_fcport_send_enable(fcport)) - bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); - else - bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); - - bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, - BFA_PL_EID_PORT_ENABLE, 0, "Port Enable"); - bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE); - break; - - case BFA_FCPORT_SM_DISABLE: - /** - * Already disabled. - */ - break; - - case BFA_FCPORT_SM_HWFAIL: - bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); - break; - - default: - bfa_sm_fault(fcport->bfa, event); - } -} - -static void -bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport, - enum bfa_fcport_sm_event event) -{ - bfa_trc(fcport->bfa, event); - - switch (event) { - case BFA_FCPORT_SM_START: - if (bfa_fcport_send_enable(fcport)) - bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); - else - bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); - break; - - default: - /** - * Ignore all other events. - */ - ; - } -} - -/** - * Port is enabled. IOC is down/failed. - */ -static void -bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport, - enum bfa_fcport_sm_event event) -{ - bfa_trc(fcport->bfa, event); - - switch (event) { - case BFA_FCPORT_SM_START: - if (bfa_fcport_send_enable(fcport)) - bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); - else - bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); - break; - - default: - /** - * Ignore all events. - */ - ; - } -} - -/** - * Port is disabled. IOC is down/failed. - */ -static void -bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport, - enum bfa_fcport_sm_event event) -{ - bfa_trc(fcport->bfa, event); - - switch (event) { - case BFA_FCPORT_SM_START: - bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); - break; - - case BFA_FCPORT_SM_ENABLE: - bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); - break; - - default: - /** - * Ignore all events. - */ - ; - } -} - -/** - * Link state is down - */ -static void -bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln, - enum bfa_fcport_ln_sm_event event) -{ - bfa_trc(ln->fcport->bfa, event); - - switch (event) { - case BFA_FCPORT_LN_SM_LINKUP: - bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf); - bfa_fcport_queue_cb(ln, BFA_PPORT_LINKUP); - break; - - default: - bfa_sm_fault(ln->fcport->bfa, event); - } -} - -/** - * Link state is waiting for down notification - */ -static void -bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln, - enum bfa_fcport_ln_sm_event event) -{ - bfa_trc(ln->fcport->bfa, event); - - switch (event) { - case BFA_FCPORT_LN_SM_LINKUP: - bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf); - break; - - case BFA_FCPORT_LN_SM_NOTIFICATION: - bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn); - break; - - default: - bfa_sm_fault(ln->fcport->bfa, event); - } -} - -/** - * Link state is waiting for down notification and there is a pending up - */ -static void -bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln, - enum bfa_fcport_ln_sm_event event) -{ - bfa_trc(ln->fcport->bfa, event); - - switch (event) { - case BFA_FCPORT_LN_SM_LINKDOWN: - bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf); - break; - - case BFA_FCPORT_LN_SM_NOTIFICATION: - bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf); - bfa_fcport_queue_cb(ln, BFA_PPORT_LINKUP); - break; - - default: - bfa_sm_fault(ln->fcport->bfa, event); - } -} - -/** - * Link state is up - */ -static void -bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln, - enum bfa_fcport_ln_sm_event event) -{ - bfa_trc(ln->fcport->bfa, event); - - switch (event) { - case BFA_FCPORT_LN_SM_LINKDOWN: - bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf); - bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN); - break; - - default: - bfa_sm_fault(ln->fcport->bfa, event); - } -} - -/** - * Link state is waiting for up notification - */ -static void -bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln, - enum bfa_fcport_ln_sm_event event) -{ - bfa_trc(ln->fcport->bfa, event); - - switch (event) { - case BFA_FCPORT_LN_SM_LINKDOWN: - bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf); - break; - - case BFA_FCPORT_LN_SM_NOTIFICATION: - bfa_sm_set_state(ln, bfa_fcport_ln_sm_up); - break; - - default: - bfa_sm_fault(ln->fcport->bfa, event); - } -} - -/** - * Link state is waiting for up notification and there is a pending down - */ -static void -bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln, - enum bfa_fcport_ln_sm_event event) -{ - bfa_trc(ln->fcport->bfa, event); - - switch (event) { - case BFA_FCPORT_LN_SM_LINKUP: - bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf); - break; - - case BFA_FCPORT_LN_SM_NOTIFICATION: - bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf); - bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN); - break; - - default: - bfa_sm_fault(ln->fcport->bfa, event); - } -} - -/** - * Link state is waiting for up notification and there are pending down and up - */ -static void -bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln, - enum bfa_fcport_ln_sm_event event) -{ - bfa_trc(ln->fcport->bfa, event); - - switch (event) { - case BFA_FCPORT_LN_SM_LINKDOWN: - bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf); - break; - - case BFA_FCPORT_LN_SM_NOTIFICATION: - bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf); - bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN); - break; - - default: - bfa_sm_fault(ln->fcport->bfa, event); - } -} - -/** - * bfa_pport_private - */ - -static void -__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete) -{ - struct bfa_fcport_ln_s *ln = cbarg; - - if (complete) - ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event); - else - bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION); -} - -static void -bfa_fcport_callback(struct bfa_fcport_s *fcport, enum bfa_pport_linkstate event) -{ - if (fcport->bfa->fcs) { - fcport->event_cbfn(fcport->event_cbarg, event); - return; - } - - switch (event) { - case BFA_PPORT_LINKUP: - bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP); - break; - case BFA_PPORT_LINKDOWN: - bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN); - break; - default: - bfa_assert(0); - } -} - -static void -bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_pport_linkstate event) -{ - ln->ln_event = event; - bfa_cb_queue(ln->fcport->bfa, &ln->ln_qe, __bfa_cb_fcport_event, ln); -} - -#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \ - BFA_CACHELINE_SZ)) - -static void -bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, - u32 *dm_len) -{ - *dm_len += FCPORT_STATS_DMA_SZ; -} - -static void -bfa_fcport_qresume(void *cbarg) -{ - struct bfa_fcport_s *fcport = cbarg; - - bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME); -} - -static void -bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo) -{ - u8 *dm_kva; - u64 dm_pa; - - dm_kva = bfa_meminfo_dma_virt(meminfo); - dm_pa = bfa_meminfo_dma_phys(meminfo); - - fcport->stats_kva = dm_kva; - fcport->stats_pa = dm_pa; - fcport->stats = (union bfa_fcport_stats_u *)dm_kva; - - dm_kva += FCPORT_STATS_DMA_SZ; - dm_pa += FCPORT_STATS_DMA_SZ; - - bfa_meminfo_dma_virt(meminfo) = dm_kva; - bfa_meminfo_dma_phys(meminfo) = dm_pa; -} - -/** - * Memory initialization. - */ -static void -bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - struct bfa_pport_cfg_s *port_cfg = &fcport->cfg; - struct bfa_fcport_ln_s *ln = &fcport->ln; - struct bfa_timeval_s tv; - - bfa_os_memset(fcport, 0, sizeof(struct bfa_fcport_s)); - fcport->bfa = bfa; - ln->fcport = fcport; - - bfa_fcport_mem_claim(fcport, meminfo); - - bfa_sm_set_state(fcport, bfa_fcport_sm_uninit); - bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn); - - /** - * initialize time stamp for stats reset - */ - bfa_os_gettimeofday(&tv); - fcport->stats_reset_time = tv.tv_sec; - - /** - * initialize and set default configuration - */ - port_cfg->topology = BFA_PPORT_TOPOLOGY_P2P; - port_cfg->speed = BFA_PPORT_SPEED_AUTO; - port_cfg->trunked = BFA_FALSE; - port_cfg->maxfrsize = 0; - - port_cfg->trl_def_speed = BFA_PPORT_SPEED_1GBPS; - - bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport); -} - -static void -bfa_fcport_detach(struct bfa_s *bfa) -{ -} - -/** - * Called when IOC is ready. - */ -static void -bfa_fcport_start(struct bfa_s *bfa) -{ - bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START); -} - -/** - * Called before IOC is stopped. - */ -static void -bfa_fcport_stop(struct bfa_s *bfa) -{ - bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP); -} - -/** - * Called when IOC failure is detected. - */ -static void -bfa_fcport_iocdisable(struct bfa_s *bfa) -{ - bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_HWFAIL); -} - -static void -bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport) -{ - struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event; - - fcport->speed = pevent->link_state.speed; - fcport->topology = pevent->link_state.topology; - - if (fcport->topology == BFA_PPORT_TOPOLOGY_LOOP) - fcport->myalpa = 0; - - /* - * QoS Details - */ - bfa_os_assign(fcport->qos_attr, pevent->link_state.qos_attr); - bfa_os_assign(fcport->qos_vc_attr, - pevent->link_state.vc_fcf.qos_vc_attr); - - - bfa_trc(fcport->bfa, fcport->speed); - bfa_trc(fcport->bfa, fcport->topology); -} - -static void -bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport) -{ - fcport->speed = BFA_PPORT_SPEED_UNKNOWN; - fcport->topology = BFA_PPORT_TOPOLOGY_NONE; -} - -/** - * Send port enable message to firmware. - */ -static bfa_boolean_t -bfa_fcport_send_enable(struct bfa_fcport_s *fcport) -{ - struct bfi_fcport_enable_req_s *m; - - /** - * Increment message tag before queue check, so that responses to old - * requests are discarded. - */ - fcport->msgtag++; - - /** - * check for room in queue to send request now - */ - m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); - if (!m) { - bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, - &fcport->reqq_wait); - return BFA_FALSE; - } - - bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ, - bfa_lpuid(fcport->bfa)); - m->nwwn = fcport->nwwn; - m->pwwn = fcport->pwwn; - m->port_cfg = fcport->cfg; - m->msgtag = fcport->msgtag; - m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize); - bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa); - bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo); - bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi); - - /** - * queue I/O message to firmware - */ - bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); - return BFA_TRUE; -} - -/** - * Send port disable message to firmware. - */ -static bfa_boolean_t -bfa_fcport_send_disable(struct bfa_fcport_s *fcport) -{ - struct bfi_fcport_req_s *m; - - /** - * Increment message tag before queue check, so that responses to old - * requests are discarded. - */ - fcport->msgtag++; - - /** - * check for room in queue to send request now - */ - m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); - if (!m) { - bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, - &fcport->reqq_wait); - return BFA_FALSE; - } - - bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ, - bfa_lpuid(fcport->bfa)); - m->msgtag = fcport->msgtag; - - /** - * queue I/O message to firmware - */ - bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); - - return BFA_TRUE; -} - -static void -bfa_fcport_set_wwns(struct bfa_fcport_s *fcport) -{ - fcport->pwwn = bfa_ioc_get_pwwn(&fcport->bfa->ioc); - fcport->nwwn = bfa_ioc_get_nwwn(&fcport->bfa->ioc); - - bfa_trc(fcport->bfa, fcport->pwwn); - bfa_trc(fcport->bfa, fcport->nwwn); -} - -static void -bfa_fcport_send_txcredit(void *port_cbarg) -{ - - struct bfa_fcport_s *fcport = port_cbarg; - struct bfi_fcport_set_svc_params_req_s *m; - - /** - * check for room in queue to send request now - */ - m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); - if (!m) { - bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit); - return; - } - - bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ, - bfa_lpuid(fcport->bfa)); - m->tx_bbcredit = bfa_os_htons((u16) fcport->cfg.tx_bbcredit); - - /** - * queue I/O message to firmware - */ - bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); -} - -static void -bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d, - struct bfa_qos_stats_s *s) -{ - u32 *dip = (u32 *) d; - u32 *sip = (u32 *) s; - int i; - - /* Now swap the 32 bit fields */ - for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i) - dip[i] = bfa_os_ntohl(sip[i]); -} - -static void -bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d, - struct bfa_fcoe_stats_s *s) -{ - u32 *dip = (u32 *) d; - u32 *sip = (u32 *) s; - int i; - - for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32)); - i = i + 2) { -#ifdef __BIGENDIAN - dip[i] = bfa_os_ntohl(sip[i]); - dip[i + 1] = bfa_os_ntohl(sip[i + 1]); -#else - dip[i] = bfa_os_ntohl(sip[i + 1]); - dip[i + 1] = bfa_os_ntohl(sip[i]); -#endif - } -} - -static void -__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete) -{ - struct bfa_fcport_s *fcport = cbarg; - - if (complete) { - if (fcport->stats_status == BFA_STATUS_OK) { - struct bfa_timeval_s tv; - - /* Swap FC QoS or FCoE stats */ - if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { - bfa_fcport_qos_stats_swap( - &fcport->stats_ret->fcqos, - &fcport->stats->fcqos); - } else { - bfa_fcport_fcoe_stats_swap( - &fcport->stats_ret->fcoe, - &fcport->stats->fcoe); - - bfa_os_gettimeofday(&tv); - fcport->stats_ret->fcoe.secs_reset = - tv.tv_sec - fcport->stats_reset_time; - } - } - fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); - } else { - fcport->stats_busy = BFA_FALSE; - fcport->stats_status = BFA_STATUS_OK; - } -} - -static void -bfa_fcport_stats_get_timeout(void *cbarg) -{ - struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; - - bfa_trc(fcport->bfa, fcport->stats_qfull); - - if (fcport->stats_qfull) { - bfa_reqq_wcancel(&fcport->stats_reqq_wait); - fcport->stats_qfull = BFA_FALSE; - } - - fcport->stats_status = BFA_STATUS_ETIMER; - bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get, - fcport); -} - -static void -bfa_fcport_send_stats_get(void *cbarg) -{ - struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; - struct bfi_fcport_req_s *msg; - - msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); - - if (!msg) { - fcport->stats_qfull = BFA_TRUE; - bfa_reqq_winit(&fcport->stats_reqq_wait, - bfa_fcport_send_stats_get, fcport); - bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, - &fcport->stats_reqq_wait); - return; - } - fcport->stats_qfull = BFA_FALSE; - - bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s)); - bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ, - bfa_lpuid(fcport->bfa)); - bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); -} - -static void -__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete) -{ - struct bfa_fcport_s *fcport = cbarg; - - if (complete) { - struct bfa_timeval_s tv; - - /** - * re-initialize time stamp for stats reset - */ - bfa_os_gettimeofday(&tv); - fcport->stats_reset_time = tv.tv_sec; - - fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); - } else { - fcport->stats_busy = BFA_FALSE; - fcport->stats_status = BFA_STATUS_OK; - } -} - -static void -bfa_fcport_stats_clr_timeout(void *cbarg) -{ - struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; - - bfa_trc(fcport->bfa, fcport->stats_qfull); - - if (fcport->stats_qfull) { - bfa_reqq_wcancel(&fcport->stats_reqq_wait); - fcport->stats_qfull = BFA_FALSE; - } - - fcport->stats_status = BFA_STATUS_ETIMER; - bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, - __bfa_cb_fcport_stats_clr, fcport); -} - -static void -bfa_fcport_send_stats_clear(void *cbarg) -{ - struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; - struct bfi_fcport_req_s *msg; - - msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); - - if (!msg) { - fcport->stats_qfull = BFA_TRUE; - bfa_reqq_winit(&fcport->stats_reqq_wait, - bfa_fcport_send_stats_clear, fcport); - bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, - &fcport->stats_reqq_wait); - return; - } - fcport->stats_qfull = BFA_FALSE; - - bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s)); - bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ, - bfa_lpuid(fcport->bfa)); - bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); -} - -/** - * bfa_pport_public - */ - -/** - * Called to initialize port attributes - */ -void -bfa_fcport_init(struct bfa_s *bfa) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - /** - * Initialize port attributes from IOC hardware data. - */ - bfa_fcport_set_wwns(fcport); - if (fcport->cfg.maxfrsize == 0) - fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc); - fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc); - fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc); - - bfa_assert(fcport->cfg.maxfrsize); - bfa_assert(fcport->cfg.rx_bbcredit); - bfa_assert(fcport->speed_sup); -} - - -/** - * Firmware message handler. - */ -void -bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - union bfi_fcport_i2h_msg_u i2hmsg; - - i2hmsg.msg = msg; - fcport->event_arg.i2hmsg = i2hmsg; - - switch (msg->mhdr.msg_id) { - case BFI_FCPORT_I2H_ENABLE_RSP: - if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) - bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); - break; - - case BFI_FCPORT_I2H_DISABLE_RSP: - if (fcport->msgtag == i2hmsg.pdisable_rsp->msgtag) - bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); - break; - - case BFI_FCPORT_I2H_EVENT: - switch (i2hmsg.event->link_state.linkstate) { - case BFA_PPORT_LINKUP: - bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP); - break; - case BFA_PPORT_LINKDOWN: - bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN); - break; - case BFA_PPORT_TRUNK_LINKDOWN: - /** todo: event notification */ - break; - } - break; - - case BFI_FCPORT_I2H_STATS_GET_RSP: - /* - * check for timer pop before processing the rsp - */ - if (fcport->stats_busy == BFA_FALSE || - fcport->stats_status == BFA_STATUS_ETIMER) - break; - - bfa_timer_stop(&fcport->timer); - fcport->stats_status = i2hmsg.pstatsget_rsp->status; - bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, - __bfa_cb_fcport_stats_get, fcport); - break; - - case BFI_FCPORT_I2H_STATS_CLEAR_RSP: - /* - * check for timer pop before processing the rsp - */ - if (fcport->stats_busy == BFA_FALSE || - fcport->stats_status == BFA_STATUS_ETIMER) - break; - - bfa_timer_stop(&fcport->timer); - fcport->stats_status = BFA_STATUS_OK; - bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, - __bfa_cb_fcport_stats_clr, fcport); - break; - - default: - bfa_assert(0); - break; - } -} - -/** - * bfa_pport_api - */ - -/** - * Registered callback for port events. - */ -void -bfa_fcport_event_register(struct bfa_s *bfa, - void (*cbfn) (void *cbarg, bfa_pport_event_t event), - void *cbarg) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - fcport->event_cbfn = cbfn; - fcport->event_cbarg = cbarg; -} - -bfa_status_t -bfa_fcport_enable(struct bfa_s *bfa) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - struct bfa_iocfc_s *iocfc = &bfa->iocfc; - struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; - - /* if port is PBC disabled, return error */ - if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) { - bfa_trc(bfa, fcport->pwwn); - return BFA_STATUS_PBC; - } - - if (bfa_ioc_is_disabled(&bfa->ioc)) - return BFA_STATUS_IOC_DISABLED; - - if (fcport->diag_busy) - return BFA_STATUS_DIAG_BUSY; - else if (bfa_sm_cmp_state - (BFA_FCPORT_MOD(bfa), bfa_fcport_sm_disabling_qwait)) - return BFA_STATUS_DEVBUSY; - - bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE); - return BFA_STATUS_OK; -} - -bfa_status_t -bfa_fcport_disable(struct bfa_s *bfa) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - struct bfa_iocfc_s *iocfc = &bfa->iocfc; - struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; - - /* if port is PBC disabled, return error */ - if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) { - bfa_trc(bfa, fcport->pwwn); - return BFA_STATUS_PBC; - } - - bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE); - return BFA_STATUS_OK; -} - -/** - * Configure port speed. - */ -bfa_status_t -bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - bfa_trc(bfa, speed); - - if ((speed != BFA_PPORT_SPEED_AUTO) && (speed > fcport->speed_sup)) { - bfa_trc(bfa, fcport->speed_sup); - return BFA_STATUS_UNSUPP_SPEED; - } - - fcport->cfg.speed = speed; - - return BFA_STATUS_OK; -} - -/** - * Get current speed. - */ -enum bfa_pport_speed -bfa_fcport_get_speed(struct bfa_s *bfa) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - return fcport->speed; -} - -/** - * Configure port topology. - */ -bfa_status_t -bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - bfa_trc(bfa, topology); - bfa_trc(bfa, fcport->cfg.topology); - - switch (topology) { - case BFA_PPORT_TOPOLOGY_P2P: - case BFA_PPORT_TOPOLOGY_LOOP: - case BFA_PPORT_TOPOLOGY_AUTO: - break; - - default: - return BFA_STATUS_EINVAL; - } - - fcport->cfg.topology = topology; - return BFA_STATUS_OK; -} - -/** - * Get current topology. - */ -enum bfa_pport_topology -bfa_fcport_get_topology(struct bfa_s *bfa) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - return fcport->topology; -} - -bfa_status_t -bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - bfa_trc(bfa, alpa); - bfa_trc(bfa, fcport->cfg.cfg_hardalpa); - bfa_trc(bfa, fcport->cfg.hardalpa); - - fcport->cfg.cfg_hardalpa = BFA_TRUE; - fcport->cfg.hardalpa = alpa; - - return BFA_STATUS_OK; -} - -bfa_status_t -bfa_fcport_clr_hardalpa(struct bfa_s *bfa) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - bfa_trc(bfa, fcport->cfg.cfg_hardalpa); - bfa_trc(bfa, fcport->cfg.hardalpa); - - fcport->cfg.cfg_hardalpa = BFA_FALSE; - return BFA_STATUS_OK; -} - -bfa_boolean_t -bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - *alpa = fcport->cfg.hardalpa; - return fcport->cfg.cfg_hardalpa; -} - -u8 -bfa_fcport_get_myalpa(struct bfa_s *bfa) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - return fcport->myalpa; -} - -bfa_status_t -bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - bfa_trc(bfa, maxfrsize); - bfa_trc(bfa, fcport->cfg.maxfrsize); - - /* - * with in range - */ - if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ)) - return BFA_STATUS_INVLD_DFSZ; - - /* - * power of 2, if not the max frame size of 2112 - */ - if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1))) - return BFA_STATUS_INVLD_DFSZ; - - fcport->cfg.maxfrsize = maxfrsize; - return BFA_STATUS_OK; -} - -u16 -bfa_fcport_get_maxfrsize(struct bfa_s *bfa) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - return fcport->cfg.maxfrsize; -} - -u32 -bfa_fcport_mypid(struct bfa_s *bfa) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - return fcport->mypid; -} - -u8 -bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - return fcport->cfg.rx_bbcredit; -} - -void -bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - fcport->cfg.tx_bbcredit = (u8) tx_bbcredit; - bfa_fcport_send_txcredit(fcport); -} - -/** - * Get port attributes. - */ - -wwn_t -bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - if (node) - return fcport->nwwn; - else - return fcport->pwwn; -} - -void -bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - struct bfa_iocfc_s *iocfc = &bfa->iocfc; - struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; - - bfa_os_memset(attr, 0, sizeof(struct bfa_pport_attr_s)); - - attr->nwwn = fcport->nwwn; - attr->pwwn = fcport->pwwn; - - attr->factorypwwn = bfa_ioc_get_mfg_pwwn(&bfa->ioc); - attr->factorynwwn = bfa_ioc_get_mfg_nwwn(&bfa->ioc); - - bfa_os_memcpy(&attr->pport_cfg, &fcport->cfg, - sizeof(struct bfa_pport_cfg_s)); - /* - * speed attributes - */ - attr->pport_cfg.speed = fcport->cfg.speed; - attr->speed_supported = fcport->speed_sup; - attr->speed = fcport->speed; - attr->cos_supported = FC_CLASS_3; - - /* - * topology attributes - */ - attr->pport_cfg.topology = fcport->cfg.topology; - attr->topology = fcport->topology; - - /* - * beacon attributes - */ - attr->beacon = fcport->beacon; - attr->link_e2e_beacon = fcport->link_e2e_beacon; - attr->plog_enabled = bfa_plog_get_setting(fcport->bfa->plog); - - attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa); - attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa); - - /* PBC Disabled State */ - if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) - attr->port_state = BFA_PPORT_ST_PREBOOT_DISABLED; - else { - attr->port_state = bfa_sm_to_state( - hal_pport_sm_table, fcport->sm); - if (bfa_ioc_is_disabled(&fcport->bfa->ioc)) - attr->port_state = BFA_PPORT_ST_IOCDIS; - else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc)) - attr->port_state = BFA_PPORT_ST_FWMISMATCH; - } -} - -#define BFA_FCPORT_STATS_TOV 1000 - -/** - * Fetch port attributes (FCQoS or FCoE). - */ -bfa_status_t -bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats, - bfa_cb_pport_t cbfn, void *cbarg) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - if (fcport->stats_busy) { - bfa_trc(bfa, fcport->stats_busy); - return BFA_STATUS_DEVBUSY; - } - - fcport->stats_busy = BFA_TRUE; - fcport->stats_ret = stats; - fcport->stats_cbfn = cbfn; - fcport->stats_cbarg = cbarg; - - bfa_fcport_send_stats_get(fcport); - - bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout, - fcport, BFA_FCPORT_STATS_TOV); - return BFA_STATUS_OK; -} - -/** - * Reset port statistics (FCQoS or FCoE). - */ -bfa_status_t -bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - if (fcport->stats_busy) { - bfa_trc(bfa, fcport->stats_busy); - return BFA_STATUS_DEVBUSY; - } - - fcport->stats_busy = BFA_TRUE; - fcport->stats_cbfn = cbfn; - fcport->stats_cbarg = cbarg; - - bfa_fcport_send_stats_clear(fcport); - - bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout, - fcport, BFA_FCPORT_STATS_TOV); - return BFA_STATUS_OK; -} - -/** - * Fetch FCQoS port statistics - */ -bfa_status_t -bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats, - bfa_cb_pport_t cbfn, void *cbarg) -{ - /* Meaningful only for FC mode */ - bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc)); - - return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg); -} - -/** - * Reset FCoE port statistics - */ -bfa_status_t -bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) -{ - /* Meaningful only for FC mode */ - bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc)); - - return bfa_fcport_clear_stats(bfa, cbfn, cbarg); -} - -/** - * Fetch FCQoS port statistics - */ -bfa_status_t -bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats, - bfa_cb_pport_t cbfn, void *cbarg) -{ - /* Meaningful only for FCoE mode */ - bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc)); - - return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg); -} - -/** - * Reset FCoE port statistics - */ -bfa_status_t -bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) -{ - /* Meaningful only for FCoE mode */ - bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc)); - - return bfa_fcport_clear_stats(bfa, cbfn, cbarg); -} - -bfa_status_t -bfa_fcport_trunk_enable(struct bfa_s *bfa, u8 bitmap) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - bfa_trc(bfa, bitmap); - bfa_trc(bfa, fcport->cfg.trunked); - bfa_trc(bfa, fcport->cfg.trunk_ports); - - if (!bitmap || (bitmap & (bitmap - 1))) - return BFA_STATUS_EINVAL; - - fcport->cfg.trunked = BFA_TRUE; - fcport->cfg.trunk_ports = bitmap; - - return BFA_STATUS_OK; -} - -void -bfa_fcport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - qos_attr->state = bfa_os_ntohl(fcport->qos_attr.state); - qos_attr->total_bb_cr = bfa_os_ntohl(fcport->qos_attr.total_bb_cr); -} - -void -bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa, - struct bfa_qos_vc_attr_s *qos_vc_attr) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr; - u32 i = 0; - - qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count); - qos_vc_attr->shared_credit = bfa_os_ntohs(bfa_vc_attr->shared_credit); - qos_vc_attr->elp_opmode_flags = - bfa_os_ntohl(bfa_vc_attr->elp_opmode_flags); - - /* - * Individual VC info - */ - while (i < qos_vc_attr->total_vc_count) { - qos_vc_attr->vc_info[i].vc_credit = - bfa_vc_attr->vc_info[i].vc_credit; - qos_vc_attr->vc_info[i].borrow_credit = - bfa_vc_attr->vc_info[i].borrow_credit; - qos_vc_attr->vc_info[i].priority = - bfa_vc_attr->vc_info[i].priority; - ++i; - } -} - -/** - * Fetch port attributes. - */ -bfa_status_t -bfa_fcport_trunk_disable(struct bfa_s *bfa) -{ - return BFA_STATUS_OK; -} - -bfa_boolean_t -bfa_fcport_trunk_query(struct bfa_s *bfa, u32 *bitmap) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - *bitmap = fcport->cfg.trunk_ports; - return fcport->cfg.trunked; -} - -bfa_boolean_t -bfa_fcport_is_disabled(struct bfa_s *bfa) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - return bfa_sm_to_state(hal_pport_sm_table, fcport->sm) == - BFA_PPORT_ST_DISABLED; - -} - -bfa_boolean_t -bfa_fcport_is_ratelim(struct bfa_s *bfa) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE; - -} - -void -bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa); - - bfa_trc(bfa, on_off); - bfa_trc(bfa, fcport->cfg.qos_enabled); - - bfa_trc(bfa, ioc_type); - - if (ioc_type == BFA_IOC_TYPE_FC) { - fcport->cfg.qos_enabled = on_off; - /** - * Notify fcpim of the change in QoS state - */ - bfa_fcpim_update_ioredirect(bfa); - } -} - -void -bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - bfa_trc(bfa, on_off); - bfa_trc(bfa, fcport->cfg.ratelimit); - - fcport->cfg.ratelimit = on_off; - if (fcport->cfg.trl_def_speed == BFA_PPORT_SPEED_UNKNOWN) - fcport->cfg.trl_def_speed = BFA_PPORT_SPEED_1GBPS; -} - -/** - * Configure default minimum ratelim speed - */ -bfa_status_t -bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - bfa_trc(bfa, speed); - - /* - * Auto and speeds greater than the supported speed, are invalid - */ - if ((speed == BFA_PPORT_SPEED_AUTO) || (speed > fcport->speed_sup)) { - bfa_trc(bfa, fcport->speed_sup); - return BFA_STATUS_UNSUPP_SPEED; - } - - fcport->cfg.trl_def_speed = speed; - - return BFA_STATUS_OK; -} - -/** - * Get default minimum ratelim speed - */ -enum bfa_pport_speed -bfa_fcport_get_ratelim_speed(struct bfa_s *bfa) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - bfa_trc(bfa, fcport->cfg.trl_def_speed); - return fcport->cfg.trl_def_speed; - -} - -void -bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - bfa_trc(bfa, status); - bfa_trc(bfa, fcport->diag_busy); - - fcport->diag_busy = status; -} - -void -bfa_fcport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon, - bfa_boolean_t link_e2e_beacon) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - bfa_trc(bfa, beacon); - bfa_trc(bfa, link_e2e_beacon); - bfa_trc(bfa, fcport->beacon); - bfa_trc(bfa, fcport->link_e2e_beacon); - - fcport->beacon = beacon; - fcport->link_e2e_beacon = link_e2e_beacon; -} - -bfa_boolean_t -bfa_fcport_is_linkup(struct bfa_s *bfa) -{ - return bfa_sm_cmp_state(BFA_FCPORT_MOD(bfa), bfa_fcport_sm_linkup); -} - -bfa_boolean_t -bfa_fcport_is_qos_enabled(struct bfa_s *bfa) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - return fcport->cfg.qos_enabled; -} diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c index d1a99209bf5f..9cebbe30a678 100644 --- a/drivers/scsi/bfa/bfa_fcs.c +++ b/drivers/scsi/bfa/bfa_fcs.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * @@ -19,35 +19,28 @@ * bfa_fcs.c BFA FCS main */ -#include -#include "fcs_port.h" -#include "fcs_uf.h" -#include "fcs_vport.h" -#include "fcs_rport.h" -#include "fcs_fabric.h" -#include "fcs_fcpim.h" -#include "fcs_fcptm.h" -#include "fcbuild.h" -#include "fcs.h" +#include "bfa_fcs.h" +#include "bfa_fcbuild.h" #include "bfad_drv.h" -#include + +BFA_TRC_FILE(FCS, FCS); /** * FCS sub-modules */ struct bfa_fcs_mod_s { void (*attach) (struct bfa_fcs_s *fcs); - void (*modinit) (struct bfa_fcs_s *fcs); - void (*modexit) (struct bfa_fcs_s *fcs); + void (*modinit) (struct bfa_fcs_s *fcs); + void (*modexit) (struct bfa_fcs_s *fcs); }; #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit } static struct bfa_fcs_mod_s fcs_modules[] = { - { bfa_fcs_pport_attach, NULL, NULL }, + { bfa_fcs_port_attach, NULL, NULL }, { bfa_fcs_uf_attach, NULL, NULL }, { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit, - bfa_fcs_fabric_modexit }, + bfa_fcs_fabric_modexit }, }; /** @@ -57,8 +50,8 @@ static struct bfa_fcs_mod_s fcs_modules[] = { static void bfa_fcs_exit_comp(void *fcs_cbarg) { - struct bfa_fcs_s *fcs = fcs_cbarg; - struct bfad_s *bfad = fcs->bfad; + struct bfa_fcs_s *fcs = fcs_cbarg; + struct bfad_s *bfad = fcs->bfad; complete(&bfad->comp); } @@ -74,9 +67,9 @@ bfa_fcs_exit_comp(void *fcs_cbarg) */ void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, - bfa_boolean_t min_cfg) + bfa_boolean_t min_cfg) { - int i; + int i; struct bfa_fcs_mod_s *mod; fcs->bfa = bfa; @@ -86,7 +79,7 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, bfa_attach_fcs(bfa); fcbuild_init(); - for (i = 0; i < ARRAY_SIZE(fcs_modules); i++) { + for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) { mod = &fcs_modules[i]; if (mod->attach) mod->attach(fcs); @@ -99,11 +92,11 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, void bfa_fcs_init(struct bfa_fcs_s *fcs) { - int i, npbc_vports; + int i, npbc_vports; struct bfa_fcs_mod_s *mod; struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS]; - for (i = 0; i < ARRAY_SIZE(fcs_modules); i++) { + for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) { mod = &fcs_modules[i]; if (mod->modinit) mod->modinit(fcs); @@ -111,7 +104,7 @@ bfa_fcs_init(struct bfa_fcs_s *fcs) /* Initialize pbc vports */ if (!fcs->min_cfg) { npbc_vports = - bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports); + bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports); for (i = 0; i < npbc_vports; i++) bfa_fcb_pbc_vport_create(fcs->bfa->bfad, pbc_vports[i]); } @@ -127,12 +120,13 @@ bfa_fcs_start(struct bfa_fcs_s *fcs) } /** - * FCS driver details initialization. + * brief + * FCS driver details initialization. * - * param[in] fcs FCS instance - * param[in] driver_info Driver Details + * param[in] fcs FCS instance + * param[in] driver_info Driver Details * - * return None + * return None */ void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, @@ -145,13 +139,13 @@ bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, } /** - * @brief - * FCS FDMI Driver Parameter Initialization + * brief + * FCS FDMI Driver Parameter Initialization * - * @param[in] fcs FCS instance - * @param[in] fdmi_enable TRUE/FALSE + * param[in] fcs FCS instance + * param[in] fdmi_enable TRUE/FALSE * - * @return None + * return None */ void bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable) @@ -160,22 +154,24 @@ bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable) fcs->fdmi_enabled = fdmi_enable; } - /** - * FCS instance cleanup and exit. + * brief + * FCS instance cleanup and exit. * - * param[in] fcs FCS instance - * return None + * param[in] fcs FCS instance + * return None */ void bfa_fcs_exit(struct bfa_fcs_s *fcs) { struct bfa_fcs_mod_s *mod; - int i; + int nmods, i; bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs); - for (i = 0; i < ARRAY_SIZE(fcs_modules); i++) { + nmods = sizeof(fcs_modules) / sizeof(fcs_modules[0]); + + for (i = 0; i < nmods; i++) { mod = &fcs_modules[i]; if (mod->modexit) { @@ -194,24 +190,1547 @@ bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod) fcs->trcmod = trcmod; } - void -bfa_fcs_log_init(struct bfa_fcs_s *fcs, struct bfa_log_mod_s *logmod) +bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs) +{ + bfa_wc_down(&fcs->wc); +} + +/** + * Fabric module implementation. + */ + +#define BFA_FCS_FABRIC_RETRY_DELAY (2000) /* Milliseconds */ +#define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */ + +#define bfa_fcs_fabric_set_opertype(__fabric) do { \ + if (bfa_fcport_get_topology((__fabric)->fcs->bfa) \ + == BFA_PORT_TOPOLOGY_P2P) \ + (__fabric)->oper_type = BFA_PORT_TYPE_NPORT; \ + else \ + (__fabric)->oper_type = BFA_PORT_TYPE_NLPORT; \ +} while (0) + +/* + * forward declarations + */ +static void bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric); +static void bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric); +static void bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric); +static void bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric); +static void bfa_fcs_fabric_delay(void *cbarg); +static void bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric); +static void bfa_fcs_fabric_delete_comp(void *cbarg); +static void bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, + struct fchs_s *fchs, u16 len); +static void bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric, + struct fchs_s *fchs, u16 len); +static void bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric); +static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg, + struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rspfchs); +/** + * fcs_fabric_sm fabric state machine functions + */ + +/** + * Fabric state machine events + */ +enum bfa_fcs_fabric_event { + BFA_FCS_FABRIC_SM_CREATE = 1, /* create from driver */ + BFA_FCS_FABRIC_SM_DELETE = 2, /* delete from driver */ + BFA_FCS_FABRIC_SM_LINK_DOWN = 3, /* link down from port */ + BFA_FCS_FABRIC_SM_LINK_UP = 4, /* link up from port */ + BFA_FCS_FABRIC_SM_CONT_OP = 5, /* flogi/auth continue op */ + BFA_FCS_FABRIC_SM_RETRY_OP = 6, /* flogi/auth retry op */ + BFA_FCS_FABRIC_SM_NO_FABRIC = 7, /* from flogi/auth */ + BFA_FCS_FABRIC_SM_PERF_EVFP = 8, /* from flogi/auth */ + BFA_FCS_FABRIC_SM_ISOLATE = 9, /* from EVFP processing */ + BFA_FCS_FABRIC_SM_NO_TAGGING = 10, /* no VFT tagging from EVFP */ + BFA_FCS_FABRIC_SM_DELAYED = 11, /* timeout delay event */ + BFA_FCS_FABRIC_SM_AUTH_FAILED = 12, /* auth failed */ + BFA_FCS_FABRIC_SM_AUTH_SUCCESS = 13, /* auth successful */ + BFA_FCS_FABRIC_SM_DELCOMP = 14, /* all vports deleted event */ + BFA_FCS_FABRIC_SM_LOOPBACK = 15, /* Received our own FLOGI */ + BFA_FCS_FABRIC_SM_START = 16, /* from driver */ +}; + +static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +static void bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +static void bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +static void bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +static void bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +static void bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +static void bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +static void bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +static void bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +static void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +static void bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +static void bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event); +/** + * Beginning state before fabric creation. + */ +static void +bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + + switch (event) { + case BFA_FCS_FABRIC_SM_CREATE: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created); + bfa_fcs_fabric_init(fabric); + bfa_fcs_lport_init(&fabric->bport, &fabric->bport.port_cfg); + break; + + case BFA_FCS_FABRIC_SM_LINK_UP: + case BFA_FCS_FABRIC_SM_LINK_DOWN: + break; + + default: + bfa_sm_fault(fabric->fcs, event); + } +} + +/** + * Beginning state before fabric creation. + */ +static void +bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + + switch (event) { + case BFA_FCS_FABRIC_SM_START: + if (bfa_fcport_is_linkup(fabric->fcs->bfa)) { + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); + bfa_fcs_fabric_login(fabric); + } else + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); + break; + + case BFA_FCS_FABRIC_SM_LINK_UP: + case BFA_FCS_FABRIC_SM_LINK_DOWN: + break; + + case BFA_FCS_FABRIC_SM_DELETE: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); + bfa_fcs_modexit_comp(fabric->fcs); + break; + + default: + bfa_sm_fault(fabric->fcs, event); + } +} + +/** + * Link is down, awaiting LINK UP event from port. This is also the + * first state at fabric creation. + */ +static void +bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + + switch (event) { + case BFA_FCS_FABRIC_SM_LINK_UP: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); + bfa_fcs_fabric_login(fabric); + break; + + case BFA_FCS_FABRIC_SM_RETRY_OP: + break; + + case BFA_FCS_FABRIC_SM_DELETE: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); + bfa_fcs_fabric_delete(fabric); + break; + + default: + bfa_sm_fault(fabric->fcs, event); + } +} + +/** + * FLOGI is in progress, awaiting FLOGI reply. + */ +static void +bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + + switch (event) { + case BFA_FCS_FABRIC_SM_CONT_OP: + + bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, + fabric->bb_credit); + fabric->fab_type = BFA_FCS_FABRIC_SWITCHED; + + if (fabric->auth_reqd && fabric->is_auth) { + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth); + bfa_trc(fabric->fcs, event); + } else { + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online); + bfa_fcs_fabric_notify_online(fabric); + } + break; + + case BFA_FCS_FABRIC_SM_RETRY_OP: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi_retry); + bfa_timer_start(fabric->fcs->bfa, &fabric->delay_timer, + bfa_fcs_fabric_delay, fabric, + BFA_FCS_FABRIC_RETRY_DELAY); + break; + + case BFA_FCS_FABRIC_SM_LOOPBACK: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_loopback); + bfa_lps_discard(fabric->lps); + bfa_fcs_fabric_set_opertype(fabric); + break; + + case BFA_FCS_FABRIC_SM_NO_FABRIC: + fabric->fab_type = BFA_FCS_FABRIC_N2N; + bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, + fabric->bb_credit); + bfa_fcs_fabric_notify_online(fabric); + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric); + break; + + case BFA_FCS_FABRIC_SM_LINK_DOWN: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); + bfa_lps_discard(fabric->lps); + break; + + case BFA_FCS_FABRIC_SM_DELETE: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); + bfa_lps_discard(fabric->lps); + bfa_fcs_fabric_delete(fabric); + break; + + default: + bfa_sm_fault(fabric->fcs, event); + } +} + + +static void +bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + + switch (event) { + case BFA_FCS_FABRIC_SM_DELAYED: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); + bfa_fcs_fabric_login(fabric); + break; + + case BFA_FCS_FABRIC_SM_LINK_DOWN: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); + bfa_timer_stop(&fabric->delay_timer); + break; + + case BFA_FCS_FABRIC_SM_DELETE: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); + bfa_timer_stop(&fabric->delay_timer); + bfa_fcs_fabric_delete(fabric); + break; + + default: + bfa_sm_fault(fabric->fcs, event); + } +} + +/** + * Authentication is in progress, awaiting authentication results. + */ +static void +bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + + switch (event) { + case BFA_FCS_FABRIC_SM_AUTH_FAILED: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed); + bfa_lps_discard(fabric->lps); + break; + + case BFA_FCS_FABRIC_SM_AUTH_SUCCESS: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online); + bfa_fcs_fabric_notify_online(fabric); + break; + + case BFA_FCS_FABRIC_SM_PERF_EVFP: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp); + break; + + case BFA_FCS_FABRIC_SM_LINK_DOWN: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); + bfa_lps_discard(fabric->lps); + break; + + case BFA_FCS_FABRIC_SM_DELETE: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); + bfa_fcs_fabric_delete(fabric); + break; + + default: + bfa_sm_fault(fabric->fcs, event); + } +} + +/** + * Authentication failed + */ +static void +bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + + switch (event) { + case BFA_FCS_FABRIC_SM_LINK_DOWN: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); + bfa_fcs_fabric_notify_offline(fabric); + break; + + case BFA_FCS_FABRIC_SM_DELETE: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); + bfa_fcs_fabric_delete(fabric); + break; + + default: + bfa_sm_fault(fabric->fcs, event); + } +} + +/** + * Port is in loopback mode. + */ +static void +bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + + switch (event) { + case BFA_FCS_FABRIC_SM_LINK_DOWN: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); + bfa_fcs_fabric_notify_offline(fabric); + break; + + case BFA_FCS_FABRIC_SM_DELETE: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); + bfa_fcs_fabric_delete(fabric); + break; + + default: + bfa_sm_fault(fabric->fcs, event); + } +} + +/** + * There is no attached fabric - private loop or NPort-to-NPort topology. + */ +static void +bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + + switch (event) { + case BFA_FCS_FABRIC_SM_LINK_DOWN: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); + bfa_lps_discard(fabric->lps); + bfa_fcs_fabric_notify_offline(fabric); + break; + + case BFA_FCS_FABRIC_SM_DELETE: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); + bfa_fcs_fabric_delete(fabric); + break; + + case BFA_FCS_FABRIC_SM_NO_FABRIC: + bfa_trc(fabric->fcs, fabric->bb_credit); + bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, + fabric->bb_credit); + break; + + default: + bfa_sm_fault(fabric->fcs, event); + } +} + +/** + * Fabric is online - normal operating state. + */ +static void +bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + + switch (event) { + case BFA_FCS_FABRIC_SM_LINK_DOWN: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); + bfa_lps_discard(fabric->lps); + bfa_fcs_fabric_notify_offline(fabric); + break; + + case BFA_FCS_FABRIC_SM_DELETE: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); + bfa_fcs_fabric_delete(fabric); + break; + + case BFA_FCS_FABRIC_SM_AUTH_FAILED: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed); + bfa_lps_discard(fabric->lps); + break; + + case BFA_FCS_FABRIC_SM_AUTH_SUCCESS: + break; + + default: + bfa_sm_fault(fabric->fcs, event); + } +} + +/** + * Exchanging virtual fabric parameters. + */ +static void +bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + + switch (event) { + case BFA_FCS_FABRIC_SM_CONT_OP: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp_done); + break; + + case BFA_FCS_FABRIC_SM_ISOLATE: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_isolated); + break; + + default: + bfa_sm_fault(fabric->fcs, event); + } +} + +/** + * EVFP exchange complete and VFT tagging is enabled. + */ +static void +bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); +} + +/** + * Port is isolated after EVFP exchange due to VF_ID mismatch (N and F). + */ +static void +bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) +{ + struct bfad_s *bfad = (struct bfad_s *)fabric->fcs->bfad; + char pwwn_ptr[BFA_STRING_32]; + + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + wwn2str(pwwn_ptr, fabric->bport.port_cfg.pwwn); + + BFA_LOG(KERN_INFO, bfad, log_level, + "Port is isolated due to VF_ID mismatch. " + "PWWN: %s Port VF_ID: %04x switch port VF_ID: %04x.", + pwwn_ptr, fabric->fcs->port_vfid, + fabric->event_arg.swp_vfid); +} + +/** + * Fabric is being deleted, awaiting vport delete completions. + */ +static void +bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric, + enum bfa_fcs_fabric_event event) { - fcs->logm = logmod; + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, event); + + switch (event) { + case BFA_FCS_FABRIC_SM_DELCOMP: + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); + bfa_fcs_modexit_comp(fabric->fcs); + break; + + case BFA_FCS_FABRIC_SM_LINK_UP: + break; + + case BFA_FCS_FABRIC_SM_LINK_DOWN: + bfa_fcs_fabric_notify_offline(fabric); + break; + + default: + bfa_sm_fault(fabric->fcs, event); + } } + +/** + * fcs_fabric_private fabric private functions + */ + +static void +bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric) +{ + struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg; + + port_cfg->roles = BFA_LPORT_ROLE_FCP_IM; + port_cfg->nwwn = bfa_ioc_get_nwwn(&fabric->fcs->bfa->ioc); + port_cfg->pwwn = bfa_ioc_get_pwwn(&fabric->fcs->bfa->ioc); +} + +/** + * Port Symbolic Name Creation for base port. + */ void -bfa_fcs_aen_init(struct bfa_fcs_s *fcs, struct bfa_aen_s *aen) +bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric) { - fcs->aen = aen; + struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg; + char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0}; + struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info; + + bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model); + + /* Model name/number */ + strncpy((char *)&port_cfg->sym_name, model, + BFA_FCS_PORT_SYMBNAME_MODEL_SZ); + strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, + sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); + + /* Driver Version */ + strncat((char *)&port_cfg->sym_name, (char *)driver_info->version, + BFA_FCS_PORT_SYMBNAME_VERSION_SZ); + strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, + sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); + + /* Host machine name */ + strncat((char *)&port_cfg->sym_name, + (char *)driver_info->host_machine_name, + BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ); + strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, + sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); + + /* + * Host OS Info : + * If OS Patch Info is not there, do not truncate any bytes from the + * OS name string and instead copy the entire OS info string (64 bytes). + */ + if (driver_info->host_os_patch[0] == '\0') { + strncat((char *)&port_cfg->sym_name, + (char *)driver_info->host_os_name, + BFA_FCS_OS_STR_LEN); + strncat((char *)&port_cfg->sym_name, + BFA_FCS_PORT_SYMBNAME_SEPARATOR, + sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); + } else { + strncat((char *)&port_cfg->sym_name, + (char *)driver_info->host_os_name, + BFA_FCS_PORT_SYMBNAME_OSINFO_SZ); + strncat((char *)&port_cfg->sym_name, + BFA_FCS_PORT_SYMBNAME_SEPARATOR, + sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); + + /* Append host OS Patch Info */ + strncat((char *)&port_cfg->sym_name, + (char *)driver_info->host_os_patch, + BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ); + } + + /* null terminate */ + port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0; } +/** + * bfa lps login completion callback + */ void -bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs) +bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status) { - bfa_wc_down(&fcs->wc); + struct bfa_fcs_fabric_s *fabric = uarg; + + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_trc(fabric->fcs, status); + + switch (status) { + case BFA_STATUS_OK: + fabric->stats.flogi_accepts++; + break; + + case BFA_STATUS_INVALID_MAC: + /* Only for CNA */ + fabric->stats.flogi_acc_err++; + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); + + return; + + case BFA_STATUS_EPROTOCOL: + switch (bfa_lps_get_extstatus(fabric->lps)) { + case BFA_EPROTO_BAD_ACCEPT: + fabric->stats.flogi_acc_err++; + break; + + case BFA_EPROTO_UNKNOWN_RSP: + fabric->stats.flogi_unknown_rsp++; + break; + + default: + break; + } + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); + + return; + + case BFA_STATUS_FABRIC_RJT: + fabric->stats.flogi_rejects++; + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); + return; + + default: + fabric->stats.flogi_rsp_err++; + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); + return; + } + + fabric->bb_credit = bfa_lps_get_peer_bbcredit(fabric->lps); + bfa_trc(fabric->fcs, fabric->bb_credit); + + if (!bfa_lps_is_brcd_fabric(fabric->lps)) + fabric->fabric_name = bfa_lps_get_peer_nwwn(fabric->lps); + + /* + * Check port type. It should be 1 = F-port. + */ + if (bfa_lps_is_fport(fabric->lps)) { + fabric->bport.pid = bfa_lps_get_pid(fabric->lps); + fabric->is_npiv = bfa_lps_is_npiv_en(fabric->lps); + fabric->is_auth = bfa_lps_is_authreq(fabric->lps); + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CONT_OP); + } else { + /* + * Nport-2-Nport direct attached + */ + fabric->bport.port_topo.pn2n.rem_port_wwn = + bfa_lps_get_peer_pwwn(fabric->lps); + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC); + } + + bfa_trc(fabric->fcs, fabric->bport.pid); + bfa_trc(fabric->fcs, fabric->is_npiv); + bfa_trc(fabric->fcs, fabric->is_auth); +} +/** + * Allocate and send FLOGI. + */ +static void +bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric) +{ + struct bfa_s *bfa = fabric->fcs->bfa; + struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg; + u8 alpa = 0; + + if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) + alpa = bfa_fcport_get_myalpa(bfa); + + bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa), + pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd); + + fabric->stats.flogi_sent++; +} + +static void +bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric) +{ + struct bfa_fcs_vport_s *vport; + struct list_head *qe, *qen; + + bfa_trc(fabric->fcs, fabric->fabric_name); + + bfa_fcs_fabric_set_opertype(fabric); + fabric->stats.fabric_onlines++; + + /** + * notify online event to base and then virtual ports + */ + bfa_fcs_lport_online(&fabric->bport); + + list_for_each_safe(qe, qen, &fabric->vport_q) { + vport = (struct bfa_fcs_vport_s *) qe; + bfa_fcs_vport_online(vport); + } +} + +static void +bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric) +{ + struct bfa_fcs_vport_s *vport; + struct list_head *qe, *qen; + + bfa_trc(fabric->fcs, fabric->fabric_name); + fabric->stats.fabric_offlines++; + + /** + * notify offline event first to vports and then base port. + */ + list_for_each_safe(qe, qen, &fabric->vport_q) { + vport = (struct bfa_fcs_vport_s *) qe; + bfa_fcs_vport_offline(vport); + } + + bfa_fcs_lport_offline(&fabric->bport); + + fabric->fabric_name = 0; + fabric->fabric_ip_addr[0] = 0; +} + +static void +bfa_fcs_fabric_delay(void *cbarg) +{ + struct bfa_fcs_fabric_s *fabric = cbarg; + + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED); +} + +/** + * Delete all vports and wait for vport delete completions. + */ +static void +bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric) +{ + struct bfa_fcs_vport_s *vport; + struct list_head *qe, *qen; + + list_for_each_safe(qe, qen, &fabric->vport_q) { + vport = (struct bfa_fcs_vport_s *) qe; + bfa_fcs_vport_fcs_delete(vport); + } + + bfa_fcs_lport_delete(&fabric->bport); + bfa_wc_wait(&fabric->wc); } +static void +bfa_fcs_fabric_delete_comp(void *cbarg) +{ + struct bfa_fcs_fabric_s *fabric = cbarg; + + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP); +} +/** + * fcs_fabric_public fabric public functions + */ + +/** + * Attach time initialization. + */ +void +bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs) +{ + struct bfa_fcs_fabric_s *fabric; + + fabric = &fcs->fabric; + bfa_os_memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s)); + + /** + * Initialize base fabric. + */ + fabric->fcs = fcs; + INIT_LIST_HEAD(&fabric->vport_q); + INIT_LIST_HEAD(&fabric->vf_q); + fabric->lps = bfa_lps_alloc(fcs->bfa); + bfa_assert(fabric->lps); + + /** + * Initialize fabric delete completion handler. Fabric deletion is + * complete when the last vport delete is complete. + */ + bfa_wc_init(&fabric->wc, bfa_fcs_fabric_delete_comp, fabric); + bfa_wc_up(&fabric->wc); /* For the base port */ + + bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); + bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL); +} + +void +bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs) +{ + bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE); + bfa_trc(fcs, 0); +} + +/** + * Module cleanup + */ +void +bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs) +{ + struct bfa_fcs_fabric_s *fabric; + + bfa_trc(fcs, 0); + + /** + * Cleanup base fabric. + */ + fabric = &fcs->fabric; + bfa_lps_delete(fabric->lps); + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE); +} + +/** + * Fabric module start -- kick starts FCS actions + */ +void +bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs) +{ + struct bfa_fcs_fabric_s *fabric; + + bfa_trc(fcs, 0); + fabric = &fcs->fabric; + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START); +} + +/** + * Suspend fabric activity as part of driver suspend. + */ +void +bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs) +{ +} + +bfa_boolean_t +bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric) +{ + return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_loopback); +} + +bfa_boolean_t +bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric) +{ + return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_auth_failed); +} + +enum bfa_port_type +bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric) +{ + return fabric->oper_type; +} + +/** + * Link up notification from BFA physical port module. + */ +void +bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric) +{ + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP); +} + +/** + * Link down notification from BFA physical port module. + */ +void +bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric) +{ + bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN); +} + +/** + * A child vport is being created in the fabric. + * + * Call from vport module at vport creation. A list of base port and vports + * belonging to a fabric is maintained to propagate link events. + * + * param[in] fabric - Fabric instance. This can be a base fabric or vf. + * param[in] vport - Vport being created. + * + * @return None (always succeeds) + */ +void +bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric, + struct bfa_fcs_vport_s *vport) +{ + /** + * - add vport to fabric's vport_q + */ + bfa_trc(fabric->fcs, fabric->vf_id); + + list_add_tail(&vport->qe, &fabric->vport_q); + fabric->num_vports++; + bfa_wc_up(&fabric->wc); +} + +/** + * A child vport is being deleted from fabric. + * + * Vport is being deleted. + */ +void +bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric, + struct bfa_fcs_vport_s *vport) +{ + list_del(&vport->qe); + fabric->num_vports--; + bfa_wc_down(&fabric->wc); +} + +/** + * Base port is deleted. + */ +void +bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric) +{ + bfa_wc_down(&fabric->wc); +} + + +/** + * Check if fabric is online. + * + * param[in] fabric - Fabric instance. This can be a base fabric or vf. + * + * @return TRUE/FALSE + */ +int +bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric) +{ + return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online); +} + +/** + * brief + * + */ +bfa_status_t +bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf, struct bfa_fcs_s *fcs, + struct bfa_lport_cfg_s *port_cfg, struct bfad_vf_s *vf_drv) +{ + bfa_sm_set_state(vf, bfa_fcs_fabric_sm_uninit); + return BFA_STATUS_OK; +} + +/** + * Lookup for a vport withing a fabric given its pwwn + */ +struct bfa_fcs_vport_s * +bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn) +{ + struct bfa_fcs_vport_s *vport; + struct list_head *qe; + + list_for_each(qe, &fabric->vport_q) { + vport = (struct bfa_fcs_vport_s *) qe; + if (bfa_fcs_lport_get_pwwn(&vport->lport) == pwwn) + return vport; + } + + return NULL; +} + +/** + * In a given fabric, return the number of lports. + * + * param[in] fabric - Fabric instance. This can be a base fabric or vf. + * + * @return : 1 or more. + */ +u16 +bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric) +{ + return fabric->num_vports; +} + +/* + * Get OUI of the attached switch. + * + * Note : Use of this function should be avoided as much as possible. + * This function should be used only if there is any requirement +* to check for FOS version below 6.3. + * To check if the attached fabric is a brocade fabric, use + * bfa_lps_is_brcd_fabric() which works for FOS versions 6.3 + * or above only. + */ + +u16 +bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric) +{ + wwn_t fab_nwwn; + u8 *tmp; + u16 oui; + + fab_nwwn = bfa_lps_get_peer_nwwn(fabric->lps); + + tmp = (u8 *)&fab_nwwn; + oui = (tmp[3] << 8) | tmp[4]; + + return oui; +} +/** + * Unsolicited frame receive handling. + */ +void +bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, + u16 len) +{ + u32 pid = fchs->d_id; + struct bfa_fcs_vport_s *vport; + struct list_head *qe; + struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); + struct fc_logi_s *flogi = (struct fc_logi_s *) els_cmd; + + bfa_trc(fabric->fcs, len); + bfa_trc(fabric->fcs, pid); + + /** + * Look for our own FLOGI frames being looped back. This means an + * external loopback cable is in place. Our own FLOGI frames are + * sometimes looped back when switch port gets temporarily bypassed. + */ + if ((pid == bfa_os_ntoh3b(FC_FABRIC_PORT)) && + (els_cmd->els_code == FC_ELS_FLOGI) && + (flogi->port_name == bfa_fcs_lport_get_pwwn(&fabric->bport))) { + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOOPBACK); + return; + } + + /** + * FLOGI/EVFP exchanges should be consumed by base fabric. + */ + if (fchs->d_id == bfa_os_hton3b(FC_FABRIC_PORT)) { + bfa_trc(fabric->fcs, pid); + bfa_fcs_fabric_process_uf(fabric, fchs, len); + return; + } + + if (fabric->bport.pid == pid) { + /** + * All authentication frames should be routed to auth + */ + bfa_trc(fabric->fcs, els_cmd->els_code); + if (els_cmd->els_code == FC_ELS_AUTH) { + bfa_trc(fabric->fcs, els_cmd->els_code); + return; + } + + bfa_trc(fabric->fcs, *(u8 *) ((u8 *) fchs)); + bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len); + return; + } + + /** + * look for a matching local port ID + */ + list_for_each(qe, &fabric->vport_q) { + vport = (struct bfa_fcs_vport_s *) qe; + if (vport->lport.pid == pid) { + bfa_fcs_lport_uf_recv(&vport->lport, fchs, len); + return; + } + } + bfa_trc(fabric->fcs, els_cmd->els_code); + bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len); +} + +/** + * Unsolicited frames to be processed by fabric. + */ +static void +bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, + u16 len) +{ + struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); + + bfa_trc(fabric->fcs, els_cmd->els_code); + + switch (els_cmd->els_code) { + case FC_ELS_FLOGI: + bfa_fcs_fabric_process_flogi(fabric, fchs, len); + break; + + default: + /* + * need to generate a LS_RJT + */ + break; + } +} + +/** + * Process incoming FLOGI + */ +static void +bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric, + struct fchs_s *fchs, u16 len) +{ + struct fc_logi_s *flogi = (struct fc_logi_s *) (fchs + 1); + struct bfa_fcs_lport_s *bport = &fabric->bport; + + bfa_trc(fabric->fcs, fchs->s_id); + + fabric->stats.flogi_rcvd++; + /* + * Check port type. It should be 0 = n-port. + */ + if (flogi->csp.port_type) { + /* + * @todo: may need to send a LS_RJT + */ + bfa_trc(fabric->fcs, flogi->port_name); + fabric->stats.flogi_rejected++; + return; + } + + fabric->bb_credit = bfa_os_ntohs(flogi->csp.bbcred); + bport->port_topo.pn2n.rem_port_wwn = flogi->port_name; + bport->port_topo.pn2n.reply_oxid = fchs->ox_id; + + /* + * Send a Flogi Acc + */ + bfa_fcs_fabric_send_flogi_acc(fabric); + bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC); +} + +static void +bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric) +{ + struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg; + struct bfa_fcs_lport_n2n_s *n2n_port = &fabric->bport.port_topo.pn2n; + struct bfa_s *bfa = fabric->fcs->bfa; + struct bfa_fcxp_s *fcxp; + u16 reqlen; + struct fchs_s fchs; + + fcxp = bfa_fcs_fcxp_alloc(fabric->fcs); + /** + * Do not expect this failure -- expect remote node to retry + */ + if (!fcxp) + return; + + reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + bfa_os_hton3b(FC_FABRIC_PORT), + n2n_port->reply_oxid, pcfg->pwwn, + pcfg->nwwn, + bfa_fcport_get_maxfrsize(bfa), + bfa_fcport_get_rx_bbcredit(bfa)); + + bfa_fcxp_send(fcxp, NULL, fabric->vf_id, bfa_lps_get_tag(fabric->lps), + BFA_FALSE, FC_CLASS_3, + reqlen, &fchs, bfa_fcs_fabric_flogiacc_comp, fabric, + FC_MAX_PDUSZ, 0); +} + +/** + * Flogi Acc completion callback. + */ +static void +bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t status, u32 rsp_len, + u32 resid_len, struct fchs_s *rspfchs) +{ + struct bfa_fcs_fabric_s *fabric = cbarg; + + bfa_trc(fabric->fcs, status); +} + +/* + * + * @param[in] fabric - fabric + * @param[in] wwn_t - new fabric name + * + * @return - none + */ +void +bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric, + wwn_t fabric_name) +{ + struct bfad_s *bfad = (struct bfad_s *)fabric->fcs->bfad; + char pwwn_ptr[BFA_STRING_32]; + char fwwn_ptr[BFA_STRING_32]; + + bfa_trc(fabric->fcs, fabric_name); + + if (fabric->fabric_name == 0) { + /* + * With BRCD switches, we don't get Fabric Name in FLOGI. + * Don't generate a fabric name change event in this case. + */ + fabric->fabric_name = fabric_name; + } else { + fabric->fabric_name = fabric_name; + wwn2str(pwwn_ptr, bfa_fcs_lport_get_pwwn(&fabric->bport)); + wwn2str(fwwn_ptr, + bfa_fcs_lport_get_fabric_name(&fabric->bport)); + BFA_LOG(KERN_WARNING, bfad, log_level, + "Base port WWN = %s Fabric WWN = %s\n", + pwwn_ptr, fwwn_ptr); + } +} + +/** + * fcs_vf_api virtual fabrics API + */ + +/** + * Enable VF mode. + * + * @param[in] fcs fcs module instance + * @param[in] vf_id default vf_id of port, FC_VF_ID_NULL + * to use standard default vf_id of 1. + * + * @retval BFA_STATUS_OK vf mode is enabled + * @retval BFA_STATUS_BUSY Port is active. Port must be disabled + * before VF mode can be enabled. + */ +bfa_status_t +bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id) +{ + return BFA_STATUS_OK; +} + +/** + * Disable VF mode. + * + * @param[in] fcs fcs module instance + * + * @retval BFA_STATUS_OK vf mode is disabled + * @retval BFA_STATUS_BUSY VFs are present and being used. All + * VFs must be deleted before disabling + * VF mode. + */ +bfa_status_t +bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs) +{ + return BFA_STATUS_OK; +} + +/** + * Create a new VF instance. + * + * A new VF is created using the given VF configuration. A VF is identified + * by VF id. No duplicate VF creation is allowed with the same VF id. Once + * a VF is created, VF is automatically started after link initialization + * and EVFP exchange is completed. + * + * param[in] vf - FCS vf data structure. Memory is + * allocated by caller (driver) + * param[in] fcs - FCS module + * param[in] vf_cfg - VF configuration + * param[in] vf_drv - Opaque handle back to the driver's + * virtual vf structure + * + * retval BFA_STATUS_OK VF creation is successful + * retval BFA_STATUS_FAILED VF creation failed + * retval BFA_STATUS_EEXIST A VF exists with the given vf_id + */ +bfa_status_t +bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs, u16 vf_id, + struct bfa_lport_cfg_s *port_cfg, struct bfad_vf_s *vf_drv) +{ + bfa_trc(fcs, vf_id); + return BFA_STATUS_OK; +} + +/** + * Use this function to delete a BFA VF object. VF object should + * be stopped before this function call. + * + * param[in] vf - pointer to bfa_vf_t. + * + * retval BFA_STATUS_OK On vf deletion success + * retval BFA_STATUS_BUSY VF is not in a stopped state + * retval BFA_STATUS_INPROGRESS VF deletion in in progress + */ +bfa_status_t +bfa_fcs_vf_delete(bfa_fcs_vf_t *vf) +{ + bfa_trc(vf->fcs, vf->vf_id); + return BFA_STATUS_OK; +} + + +/** + * Returns attributes of the given VF. + * + * param[in] vf pointer to bfa_vf_t. + * param[out] vf_attr vf attributes returned + * + * return None + */ +void +bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr) +{ + bfa_trc(vf->fcs, vf->vf_id); +} + +/** + * Return statistics associated with the given vf. + * + * param[in] vf pointer to bfa_vf_t. + * param[out] vf_stats vf statistics returned + * + * @return None + */ +void +bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf, struct bfa_vf_stats_s *vf_stats) +{ + bfa_os_memcpy(vf_stats, &vf->stats, sizeof(struct bfa_vf_stats_s)); +} + +/** + * clear statistics associated with the given vf. + * + * param[in] vf pointer to bfa_vf_t. + * + * @return None + */ +void +bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf) +{ + bfa_os_memset(&vf->stats, 0, sizeof(struct bfa_vf_stats_s)); +} + +/** + * Returns FCS vf structure for a given vf_id. + * + * param[in] vf_id - VF_ID + * + * return + * If lookup succeeds, retuns fcs vf object, otherwise returns NULL + */ +bfa_fcs_vf_t * +bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id) +{ + bfa_trc(fcs, vf_id); + if (vf_id == FC_VF_ID_NULL) + return &fcs->fabric; + + return NULL; +} + +/** + * Return the list of VFs configured. + * + * param[in] fcs fcs module instance + * param[out] vf_ids returned list of vf_ids + * param[in,out] nvfs in:size of vf_ids array, + * out:total elements present, + * actual elements returned is limited by the size + * + * return Driver VF structure + */ +void +bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs) +{ + bfa_trc(fcs, *nvfs); +} + +/** + * Return the list of all VFs visible from fabric. + * + * param[in] fcs fcs module instance + * param[out] vf_ids returned list of vf_ids + * param[in,out] nvfs in:size of vf_ids array, + * out:total elements present, + * actual elements returned is limited by the size + * + * return Driver VF structure + */ +void +bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs) +{ + bfa_trc(fcs, *nvfs); +} + +/** + * Return the list of local logical ports present in the given VF. + * + * param[in] vf vf for which logical ports are returned + * param[out] lpwwn returned logical port wwn list + * param[in,out] nlports in:size of lpwwn list; + * out:total elements present, + * actual elements returned is limited by the size + */ +void +bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t lpwwn[], int *nlports) +{ + struct list_head *qe; + struct bfa_fcs_vport_s *vport; + int i; + struct bfa_fcs_s *fcs; + + if (vf == NULL || lpwwn == NULL || *nlports == 0) + return; + + fcs = vf->fcs; + + bfa_trc(fcs, vf->vf_id); + bfa_trc(fcs, (u32) *nlports); + + i = 0; + lpwwn[i++] = vf->bport.port_cfg.pwwn; + + list_for_each(qe, &vf->vport_q) { + if (i >= *nlports) + break; + + vport = (struct bfa_fcs_vport_s *) qe; + lpwwn[i++] = vport->lport.port_cfg.pwwn; + } + + bfa_trc(fcs, i); + *nlports = i; +} + +/** + * BFA FCS PPORT ( physical port) + */ +static void +bfa_fcs_port_event_handler(void *cbarg, enum bfa_port_linkstate event) +{ + struct bfa_fcs_s *fcs = cbarg; + + bfa_trc(fcs, event); + + switch (event) { + case BFA_PORT_LINKUP: + bfa_fcs_fabric_link_up(&fcs->fabric); + break; + + case BFA_PORT_LINKDOWN: + bfa_fcs_fabric_link_down(&fcs->fabric); + break; + + default: + bfa_assert(0); + } +} + +void +bfa_fcs_port_attach(struct bfa_fcs_s *fcs) +{ + bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs); +} + +/** + * BFA FCS UF ( Unsolicited Frames) + */ + +/** + * BFA callback for unsolicited frame receive handler. + * + * @param[in] cbarg callback arg for receive handler + * @param[in] uf unsolicited frame descriptor + * + * @return None + */ +static void +bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf) +{ + struct bfa_fcs_s *fcs = (struct bfa_fcs_s *) cbarg; + struct fchs_s *fchs = bfa_uf_get_frmbuf(uf); + u16 len = bfa_uf_get_frmlen(uf); + struct fc_vft_s *vft; + struct bfa_fcs_fabric_s *fabric; + + /** + * check for VFT header + */ + if (fchs->routing == FC_RTG_EXT_HDR && + fchs->cat_info == FC_CAT_VFT_HDR) { + bfa_stats(fcs, uf.tagged); + vft = bfa_uf_get_frmbuf(uf); + if (fcs->port_vfid == vft->vf_id) + fabric = &fcs->fabric; + else + fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id); + + /** + * drop frame if vfid is unknown + */ + if (!fabric) { + bfa_assert(0); + bfa_stats(fcs, uf.vfid_unknown); + bfa_uf_free(uf); + return; + } + + /** + * skip vft header + */ + fchs = (struct fchs_s *) (vft + 1); + len -= sizeof(struct fc_vft_s); + + bfa_trc(fcs, vft->vf_id); + } else { + bfa_stats(fcs, uf.untagged); + fabric = &fcs->fabric; + } + + bfa_trc(fcs, ((u32 *) fchs)[0]); + bfa_trc(fcs, ((u32 *) fchs)[1]); + bfa_trc(fcs, ((u32 *) fchs)[2]); + bfa_trc(fcs, ((u32 *) fchs)[3]); + bfa_trc(fcs, ((u32 *) fchs)[4]); + bfa_trc(fcs, ((u32 *) fchs)[5]); + bfa_trc(fcs, len); + + bfa_fcs_fabric_uf_recv(fabric, fchs, len); + bfa_uf_free(uf); +} + +void +bfa_fcs_uf_attach(struct bfa_fcs_s *fcs) +{ + bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs); +} diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h new file mode 100644 index 000000000000..d75045df1e7e --- /dev/null +++ b/drivers/scsi/bfa/bfa_fcs.h @@ -0,0 +1,779 @@ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + * + * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef __BFA_FCS_H__ +#define __BFA_FCS_H__ + +#include "bfa_cs.h" +#include "bfa_defs.h" +#include "bfa_defs_fcs.h" +#include "bfa_modules.h" +#include "bfa_fc.h" + +#define BFA_FCS_OS_STR_LEN 64 + +/* + * !!! Only append to the enums defined here to avoid any versioning + * !!! needed between trace utility and driver version + */ +enum { + BFA_TRC_FCS_FCS = 1, + BFA_TRC_FCS_PORT = 2, + BFA_TRC_FCS_RPORT = 3, + BFA_TRC_FCS_FCPIM = 4, +}; + + +struct bfa_fcs_s; + +#define __fcs_min_cfg(__fcs) ((__fcs)->min_cfg) +void bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs); + +#define BFA_FCS_BRCD_SWITCH_OUI 0x051e +#define N2N_LOCAL_PID 0x010000 +#define N2N_REMOTE_PID 0x020000 +#define BFA_FCS_RETRY_TIMEOUT 2000 +#define BFA_FCS_PID_IS_WKA(pid) ((bfa_os_ntoh3b(pid) > 0xFFF000) ? 1 : 0) + + + +struct bfa_fcs_lport_ns_s { + bfa_sm_t sm; /* state machine */ + struct bfa_timer_s timer; + struct bfa_fcs_lport_s *port; /* parent port */ + struct bfa_fcxp_s *fcxp; + struct bfa_fcxp_wqe_s fcxp_wqe; +}; + + +struct bfa_fcs_lport_scn_s { + bfa_sm_t sm; /* state machine */ + struct bfa_timer_s timer; + struct bfa_fcs_lport_s *port; /* parent port */ + struct bfa_fcxp_s *fcxp; + struct bfa_fcxp_wqe_s fcxp_wqe; +}; + + +struct bfa_fcs_lport_fdmi_s { + bfa_sm_t sm; /* state machine */ + struct bfa_timer_s timer; + struct bfa_fcs_lport_ms_s *ms; /* parent ms */ + struct bfa_fcxp_s *fcxp; + struct bfa_fcxp_wqe_s fcxp_wqe; + u8 retry_cnt; /* retry count */ + u8 rsvd[3]; +}; + + +struct bfa_fcs_lport_ms_s { + bfa_sm_t sm; /* state machine */ + struct bfa_timer_s timer; + struct bfa_fcs_lport_s *port; /* parent port */ + struct bfa_fcxp_s *fcxp; + struct bfa_fcxp_wqe_s fcxp_wqe; + struct bfa_fcs_lport_fdmi_s fdmi; /* FDMI component of MS */ + u8 retry_cnt; /* retry count */ + u8 rsvd[3]; +}; + + +struct bfa_fcs_lport_fab_s { + struct bfa_fcs_lport_ns_s ns; /* NS component of port */ + struct bfa_fcs_lport_scn_s scn; /* scn component of port */ + struct bfa_fcs_lport_ms_s ms; /* MS component of port */ +}; + +#define MAX_ALPA_COUNT 127 + +struct bfa_fcs_lport_loop_s { + u8 num_alpa; /* Num of ALPA entries in the map */ + u8 alpa_pos_map[MAX_ALPA_COUNT]; /* ALPA Positional + *Map */ + struct bfa_fcs_lport_s *port; /* parent port */ +}; + +struct bfa_fcs_lport_n2n_s { + u32 rsvd; + u16 reply_oxid; /* ox_id from the req flogi to be + *used in flogi acc */ + wwn_t rem_port_wwn; /* Attached port's wwn */ +}; + + +union bfa_fcs_lport_topo_u { + struct bfa_fcs_lport_fab_s pfab; + struct bfa_fcs_lport_loop_s ploop; + struct bfa_fcs_lport_n2n_s pn2n; +}; + + +struct bfa_fcs_lport_s { + struct list_head qe; /* used by port/vport */ + bfa_sm_t sm; /* state machine */ + struct bfa_fcs_fabric_s *fabric; /* parent fabric */ + struct bfa_lport_cfg_s port_cfg; /* port configuration */ + struct bfa_timer_s link_timer; /* timer for link offline */ + u32 pid:24; /* FC address */ + u8 lp_tag; /* lport tag */ + u16 num_rports; /* Num of r-ports */ + struct list_head rport_q; /* queue of discovered r-ports */ + struct bfa_fcs_s *fcs; /* FCS instance */ + union bfa_fcs_lport_topo_u port_topo; /* fabric/loop/n2n details */ + struct bfad_port_s *bfad_port; /* driver peer instance */ + struct bfa_fcs_vport_s *vport; /* NULL for base ports */ + struct bfa_fcxp_s *fcxp; + struct bfa_fcxp_wqe_s fcxp_wqe; + struct bfa_lport_stats_s stats; + struct bfa_wc_s wc; /* waiting counter for events */ +}; +#define BFA_FCS_GET_HAL_FROM_PORT(port) (port->fcs->bfa) +#define BFA_FCS_GET_NS_FROM_PORT(port) (&port->port_topo.pfab.ns) +#define BFA_FCS_GET_SCN_FROM_PORT(port) (&port->port_topo.pfab.scn) +#define BFA_FCS_GET_MS_FROM_PORT(port) (&port->port_topo.pfab.ms) +#define BFA_FCS_GET_FDMI_FROM_PORT(port) (&port->port_topo.pfab.ms.fdmi) +#define BFA_FCS_VPORT_IS_INITIATOR_MODE(port) \ + (port->port_cfg.roles & BFA_LPORT_ROLE_FCP_IM) + +/* + * forward declaration + */ +struct bfad_vf_s; + +enum bfa_fcs_fabric_type { + BFA_FCS_FABRIC_UNKNOWN = 0, + BFA_FCS_FABRIC_SWITCHED = 1, + BFA_FCS_FABRIC_N2N = 2, +}; + + +struct bfa_fcs_fabric_s { + struct list_head qe; /* queue element */ + bfa_sm_t sm; /* state machine */ + struct bfa_fcs_s *fcs; /* FCS instance */ + struct bfa_fcs_lport_s bport; /* base logical port */ + enum bfa_fcs_fabric_type fab_type; /* fabric type */ + enum bfa_port_type oper_type; /* current link topology */ + u8 is_vf; /* is virtual fabric? */ + u8 is_npiv; /* is NPIV supported ? */ + u8 is_auth; /* is Security/Auth supported ? */ + u16 bb_credit; /* BB credit from fabric */ + u16 vf_id; /* virtual fabric ID */ + u16 num_vports; /* num vports */ + u16 rsvd; + struct list_head vport_q; /* queue of virtual ports */ + struct list_head vf_q; /* queue of virtual fabrics */ + struct bfad_vf_s *vf_drv; /* driver vf structure */ + struct bfa_timer_s link_timer; /* Link Failure timer. Vport */ + wwn_t fabric_name; /* attached fabric name */ + bfa_boolean_t auth_reqd; /* authentication required */ + struct bfa_timer_s delay_timer; /* delay timer */ + union { + u16 swp_vfid;/* switch port VF id */ + } event_arg; + struct bfa_wc_s wc; /* wait counter for delete */ + struct bfa_vf_stats_s stats; /* fabric/vf stats */ + struct bfa_lps_s *lps; /* lport login services */ + u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ]; + /* attached fabric's ip addr */ +}; + +#define bfa_fcs_fabric_npiv_capable(__f) ((__f)->is_npiv) +#define bfa_fcs_fabric_is_switched(__f) \ + ((__f)->fab_type == BFA_FCS_FABRIC_SWITCHED) + +/** + * The design calls for a single implementation of base fabric and vf. + */ +#define bfa_fcs_vf_t struct bfa_fcs_fabric_s + +struct bfa_vf_event_s { + u32 undefined; +}; + +struct bfa_fcs_s; +struct bfa_fcs_fabric_s; + +/* + * @todo : need to move to a global config file. + * Maximum Rports supported per port (physical/logical). + */ +#define BFA_FCS_MAX_RPORTS_SUPP 256 /* @todo : tentative value */ + +#define bfa_fcs_lport_t struct bfa_fcs_lport_s + +/** + * Symbolic Name related defines + * Total bytes 255. + * Physical Port's symbolic name 128 bytes. + * For Vports, Vport's symbolic name is appended to the Physical port's + * Symbolic Name. + * + * Physical Port's symbolic name Format : (Total 128 bytes) + * Adapter Model number/name : 12 bytes + * Driver Version : 10 bytes + * Host Machine Name : 30 bytes + * Host OS Info : 48 bytes + * Host OS PATCH Info : 16 bytes + * ( remaining 12 bytes reserved to be used for separator) + */ +#define BFA_FCS_PORT_SYMBNAME_SEPARATOR " | " + +#define BFA_FCS_PORT_SYMBNAME_MODEL_SZ 12 +#define BFA_FCS_PORT_SYMBNAME_VERSION_SZ 10 +#define BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ 30 +#define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ 48 +#define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ 16 + +/** + * Get FC port ID for a logical port. + */ +#define bfa_fcs_lport_get_fcid(_lport) ((_lport)->pid) +#define bfa_fcs_lport_get_pwwn(_lport) ((_lport)->port_cfg.pwwn) +#define bfa_fcs_lport_get_nwwn(_lport) ((_lport)->port_cfg.nwwn) +#define bfa_fcs_lport_get_psym_name(_lport) ((_lport)->port_cfg.sym_name) +#define bfa_fcs_lport_is_initiator(_lport) \ + ((_lport)->port_cfg.roles & BFA_LPORT_ROLE_FCP_IM) +#define bfa_fcs_lport_get_nrports(_lport) \ + ((_lport) ? (_lport)->num_rports : 0) + +static inline struct bfad_port_s * +bfa_fcs_lport_get_drvport(struct bfa_fcs_lport_s *port) +{ + return port->bfad_port; +} + +#define bfa_fcs_lport_get_opertype(_lport) ((_lport)->fabric->oper_type) +#define bfa_fcs_lport_get_fabric_name(_lport) ((_lport)->fabric->fabric_name) +#define bfa_fcs_lport_get_fabric_ipaddr(_lport) \ + ((_lport)->fabric->fabric_ip_addr) + +/** + * bfa fcs port public functions + */ + +bfa_boolean_t bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port); +struct bfa_fcs_lport_s *bfa_fcs_get_base_port(struct bfa_fcs_s *fcs); +void bfa_fcs_lport_get_rports(struct bfa_fcs_lport_s *port, + wwn_t rport_wwns[], int *nrports); + +wwn_t bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, + int index, int nrports, bfa_boolean_t bwwn); + +struct bfa_fcs_lport_s *bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, + u16 vf_id, wwn_t lpwwn); + +void bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port, + struct bfa_lport_info_s *port_info); +void bfa_fcs_lport_get_attr(struct bfa_fcs_lport_s *port, + struct bfa_lport_attr_s *port_attr); +void bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port, + struct bfa_lport_stats_s *port_stats); +void bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port); +enum bfa_port_speed bfa_fcs_lport_get_rport_max_speed( + struct bfa_fcs_lport_s *port); + +/* MS FCS routines */ +void bfa_fcs_lport_ms_init(struct bfa_fcs_lport_s *port); +void bfa_fcs_lport_ms_offline(struct bfa_fcs_lport_s *port); +void bfa_fcs_lport_ms_online(struct bfa_fcs_lport_s *port); +void bfa_fcs_lport_ms_fabric_rscn(struct bfa_fcs_lport_s *port); + +/* FDMI FCS routines */ +void bfa_fcs_lport_fdmi_init(struct bfa_fcs_lport_ms_s *ms); +void bfa_fcs_lport_fdmi_offline(struct bfa_fcs_lport_ms_s *ms); +void bfa_fcs_lport_fdmi_online(struct bfa_fcs_lport_ms_s *ms); +void bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport, struct fchs_s *fchs, + u16 len); +void bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs, + u16 vf_id, struct bfa_fcs_vport_s *vport); +void bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport, + struct bfa_lport_cfg_s *port_cfg); +void bfa_fcs_lport_online(struct bfa_fcs_lport_s *port); +void bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port); +void bfa_fcs_lport_delete(struct bfa_fcs_lport_s *port); +struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pid( + struct bfa_fcs_lport_s *port, u32 pid); +struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pwwn( + struct bfa_fcs_lport_s *port, wwn_t pwwn); +struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_nwwn( + struct bfa_fcs_lport_s *port, wwn_t nwwn); +void bfa_fcs_lport_add_rport(struct bfa_fcs_lport_s *port, + struct bfa_fcs_rport_s *rport); +void bfa_fcs_lport_del_rport(struct bfa_fcs_lport_s *port, + struct bfa_fcs_rport_s *rport); +void bfa_fcs_lport_modinit(struct bfa_fcs_s *fcs); +void bfa_fcs_lport_modexit(struct bfa_fcs_s *fcs); +void bfa_fcs_lport_ns_init(struct bfa_fcs_lport_s *vport); +void bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *vport); +void bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *vport); +void bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port); +void bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *vport); +void bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *vport); +void bfa_fcs_lport_scn_online(struct bfa_fcs_lport_s *vport); +void bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port, + struct fchs_s *rx_frame, u32 len); + +struct bfa_fcs_vport_s { + struct list_head qe; /* queue elem */ + bfa_sm_t sm; /* state machine */ + bfa_fcs_lport_t lport; /* logical port */ + struct bfa_timer_s timer; + struct bfad_vport_s *vport_drv; /* Driver private */ + struct bfa_vport_stats_s vport_stats; /* vport statistics */ + struct bfa_lps_s *lps; /* Lport login service*/ + int fdisc_retries; +}; + +#define bfa_fcs_vport_get_port(vport) \ + ((struct bfa_fcs_lport_s *)(&vport->port)) + +/** + * bfa fcs vport public functions + */ +bfa_status_t bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, + struct bfa_fcs_s *fcs, u16 vf_id, + struct bfa_lport_cfg_s *port_cfg, + struct bfad_vport_s *vport_drv); +bfa_status_t bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, + struct bfa_fcs_s *fcs, u16 vf_id, + struct bfa_lport_cfg_s *port_cfg, + struct bfad_vport_s *vport_drv); +bfa_boolean_t bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport); +bfa_status_t bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport); +bfa_status_t bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport); +bfa_status_t bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport); +void bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport, + struct bfa_vport_attr_s *vport_attr); +void bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport, + struct bfa_vport_stats_s *vport_stats); +void bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport); +struct bfa_fcs_vport_s *bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs, + u16 vf_id, wwn_t vpwwn); +void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport); +void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport); +void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport); +void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport); +void bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport); + +#define BFA_FCS_RPORT_DEF_DEL_TIMEOUT 90 /* in secs */ +#define BFA_FCS_RPORT_MAX_RETRIES (5) + +/* + * forward declarations + */ +struct bfad_rport_s; + +struct bfa_fcs_itnim_s; +struct bfa_fcs_tin_s; +struct bfa_fcs_iprp_s; + +/* Rport Features (RPF) */ +struct bfa_fcs_rpf_s { + bfa_sm_t sm; /* state machine */ + struct bfa_fcs_rport_s *rport; /* parent rport */ + struct bfa_timer_s timer; /* general purpose timer */ + struct bfa_fcxp_s *fcxp; /* FCXP needed for discarding */ + struct bfa_fcxp_wqe_s fcxp_wqe; /* fcxp wait queue element */ + int rpsc_retries; /* max RPSC retry attempts */ + enum bfa_port_speed rpsc_speed; + /* Current Speed from RPSC. O if RPSC fails */ + enum bfa_port_speed assigned_speed; + /** + * Speed assigned by the user. will be used if RPSC is + * not supported by the rport. + */ +}; + +struct bfa_fcs_rport_s { + struct list_head qe; /* used by port/vport */ + struct bfa_fcs_lport_s *port; /* parent FCS port */ + struct bfa_fcs_s *fcs; /* fcs instance */ + struct bfad_rport_s *rp_drv; /* driver peer instance */ + u32 pid; /* port ID of rport */ + u16 maxfrsize; /* maximum frame size */ + u16 reply_oxid; /* OX_ID of inbound requests */ + enum fc_cos fc_cos; /* FC classes of service supp */ + bfa_boolean_t cisc; /* CISC capable device */ + bfa_boolean_t prlo; /* processing prlo or LOGO */ + wwn_t pwwn; /* port wwn of rport */ + wwn_t nwwn; /* node wwn of rport */ + struct bfa_rport_symname_s psym_name; /* port symbolic name */ + bfa_sm_t sm; /* state machine */ + struct bfa_timer_s timer; /* general purpose timer */ + struct bfa_fcs_itnim_s *itnim; /* ITN initiator mode role */ + struct bfa_fcs_tin_s *tin; /* ITN initiator mode role */ + struct bfa_fcs_iprp_s *iprp; /* IP/FC role */ + struct bfa_rport_s *bfa_rport; /* BFA Rport */ + struct bfa_fcxp_s *fcxp; /* FCXP needed for discarding */ + int plogi_retries; /* max plogi retry attempts */ + int ns_retries; /* max NS query retry attempts */ + struct bfa_fcxp_wqe_s fcxp_wqe; /* fcxp wait queue element */ + struct bfa_rport_stats_s stats; /* rport stats */ + enum bfa_rport_function scsi_function; /* Initiator/Target */ + struct bfa_fcs_rpf_s rpf; /* Rport features module */ +}; + +static inline struct bfa_rport_s * +bfa_fcs_rport_get_halrport(struct bfa_fcs_rport_s *rport) +{ + return rport->bfa_rport; +} + +/** + * bfa fcs rport API functions + */ +bfa_status_t bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn, + struct bfa_fcs_rport_s *rport, + struct bfad_rport_s *rport_drv); +bfa_status_t bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport); +void bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport, + struct bfa_rport_attr_s *attr); +void bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport, + struct bfa_rport_stats_s *stats); +void bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport); +struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, + wwn_t rpwwn); +struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn( + struct bfa_fcs_lport_s *port, wwn_t rnwwn); +void bfa_fcs_rport_set_del_timeout(u8 rport_tmo); + +void bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport, + enum bfa_port_speed speed); +void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, + struct fchs_s *fchs, u16 len); +void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport); + +struct bfa_fcs_rport_s *bfa_fcs_rport_create(struct bfa_fcs_lport_s *port, + u32 pid); +void bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport); +void bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport); +void bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport); +void bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, + struct fc_logi_s *plogi_rsp); +void bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port, + struct fchs_s *rx_fchs, + struct fc_logi_s *plogi); +void bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs, + struct fc_logi_s *plogi); +void bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport); +void bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id); + +void bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport); +void bfa_fcs_rport_itntm_ack(struct bfa_fcs_rport_s *rport); +void bfa_fcs_rport_fcptm_offline_done(struct bfa_fcs_rport_s *rport); +int bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport); +struct bfa_fcs_rport_s *bfa_fcs_rport_create_by_wwn( + struct bfa_fcs_lport_s *port, wwn_t wwn); +void bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport); +void bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport); +void bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport); + +/* + * forward declarations + */ +struct bfad_itnim_s; + +struct bfa_fcs_itnim_s { + bfa_sm_t sm; /* state machine */ + struct bfa_fcs_rport_s *rport; /* parent remote rport */ + struct bfad_itnim_s *itnim_drv; /* driver peer instance */ + struct bfa_fcs_s *fcs; /* fcs instance */ + struct bfa_timer_s timer; /* timer functions */ + struct bfa_itnim_s *bfa_itnim; /* BFA itnim struct */ + u32 prli_retries; /* max prli retry attempts */ + bfa_boolean_t seq_rec; /* seq recovery support */ + bfa_boolean_t rec_support; /* REC supported */ + bfa_boolean_t conf_comp; /* FCP_CONF support */ + bfa_boolean_t task_retry_id; /* task retry id supp */ + struct bfa_fcxp_wqe_s fcxp_wqe; /* wait qelem for fcxp */ + struct bfa_fcxp_s *fcxp; /* FCXP in use */ + struct bfa_itnim_stats_s stats; /* itn statistics */ +}; +#define bfa_fcs_fcxp_alloc(__fcs) \ + bfa_fcxp_alloc(NULL, (__fcs)->bfa, 0, 0, NULL, NULL, NULL, NULL) + +#define bfa_fcs_fcxp_alloc_wait(__bfa, __wqe, __alloc_cbfn, __alloc_cbarg) \ + bfa_fcxp_alloc_wait(__bfa, __wqe, __alloc_cbfn, __alloc_cbarg, \ + NULL, 0, 0, NULL, NULL, NULL, NULL) + +static inline struct bfad_port_s * +bfa_fcs_itnim_get_drvport(struct bfa_fcs_itnim_s *itnim) +{ + return itnim->rport->port->bfad_port; +} + + +static inline struct bfa_fcs_lport_s * +bfa_fcs_itnim_get_port(struct bfa_fcs_itnim_s *itnim) +{ + return itnim->rport->port; +} + + +static inline wwn_t +bfa_fcs_itnim_get_nwwn(struct bfa_fcs_itnim_s *itnim) +{ + return itnim->rport->nwwn; +} + + +static inline wwn_t +bfa_fcs_itnim_get_pwwn(struct bfa_fcs_itnim_s *itnim) +{ + return itnim->rport->pwwn; +} + + +static inline u32 +bfa_fcs_itnim_get_fcid(struct bfa_fcs_itnim_s *itnim) +{ + return itnim->rport->pid; +} + + +static inline u32 +bfa_fcs_itnim_get_maxfrsize(struct bfa_fcs_itnim_s *itnim) +{ + return itnim->rport->maxfrsize; +} + + +static inline enum fc_cos +bfa_fcs_itnim_get_cos(struct bfa_fcs_itnim_s *itnim) +{ + return itnim->rport->fc_cos; +} + + +static inline struct bfad_itnim_s * +bfa_fcs_itnim_get_drvitn(struct bfa_fcs_itnim_s *itnim) +{ + return itnim->itnim_drv; +} + + +static inline struct bfa_itnim_s * +bfa_fcs_itnim_get_halitn(struct bfa_fcs_itnim_s *itnim) +{ + return itnim->bfa_itnim; +} + +/** + * bfa fcs FCP Initiator mode API functions + */ +void bfa_fcs_itnim_get_attr(struct bfa_fcs_itnim_s *itnim, + struct bfa_itnim_attr_s *attr); +void bfa_fcs_itnim_get_stats(struct bfa_fcs_itnim_s *itnim, + struct bfa_itnim_stats_s *stats); +struct bfa_fcs_itnim_s *bfa_fcs_itnim_lookup(struct bfa_fcs_lport_s *port, + wwn_t rpwwn); +bfa_status_t bfa_fcs_itnim_attr_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn, + struct bfa_itnim_attr_s *attr); +bfa_status_t bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn, + struct bfa_itnim_stats_s *stats); +bfa_status_t bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port, + wwn_t rpwwn); +struct bfa_fcs_itnim_s *bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport); +void bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim); +void bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim); +void bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim); +bfa_status_t bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim); +void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim); +void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, + struct fchs_s *fchs, u16 len); + +#define BFA_FCS_FDMI_SUPORTED_SPEEDS (FDMI_TRANS_SPEED_1G | \ + FDMI_TRANS_SPEED_2G | \ + FDMI_TRANS_SPEED_4G | \ + FDMI_TRANS_SPEED_8G) + +/* + * HBA Attribute Block : BFA internal representation. Note : Some variable + * sizes have been trimmed to suit BFA For Ex : Model will be "Brocade". Based + * on this the size has been reduced to 16 bytes from the standard's 64 bytes. + */ +struct bfa_fcs_fdmi_hba_attr_s { + wwn_t node_name; + u8 manufacturer[64]; + u8 serial_num[64]; + u8 model[16]; + u8 model_desc[256]; + u8 hw_version[8]; + u8 driver_version[8]; + u8 option_rom_ver[BFA_VERSION_LEN]; + u8 fw_version[8]; + u8 os_name[256]; + u32 max_ct_pyld; +}; + +/* + * Port Attribute Block + */ +struct bfa_fcs_fdmi_port_attr_s { + u8 supp_fc4_types[32]; /* supported FC4 types */ + u32 supp_speed; /* supported speed */ + u32 curr_speed; /* current Speed */ + u32 max_frm_size; /* max frame size */ + u8 os_device_name[256]; /* OS device Name */ + u8 host_name[256]; /* host name */ +}; + +struct bfa_fcs_stats_s { + struct { + u32 untagged; /* untagged receive frames */ + u32 tagged; /* tagged receive frames */ + u32 vfid_unknown; /* VF id is unknown */ + } uf; +}; + +struct bfa_fcs_driver_info_s { + u8 version[BFA_VERSION_LEN]; /* Driver Version */ + u8 host_machine_name[BFA_FCS_OS_STR_LEN]; + u8 host_os_name[BFA_FCS_OS_STR_LEN]; /* OS name and version */ + u8 host_os_patch[BFA_FCS_OS_STR_LEN]; /* patch or service pack */ + u8 os_device_name[BFA_FCS_OS_STR_LEN]; /* Driver Device Name */ +}; + +struct bfa_fcs_s { + struct bfa_s *bfa; /* corresponding BFA bfa instance */ + struct bfad_s *bfad; /* corresponding BDA driver instance */ + struct bfa_trc_mod_s *trcmod; /* tracing module */ + bfa_boolean_t vf_enabled; /* VF mode is enabled */ + bfa_boolean_t fdmi_enabled; /* FDMI is enabled */ + bfa_boolean_t min_cfg; /* min cfg enabled/disabled */ + u16 port_vfid; /* port default VF ID */ + struct bfa_fcs_driver_info_s driver_info; + struct bfa_fcs_fabric_s fabric; /* base fabric state machine */ + struct bfa_fcs_stats_s stats; /* FCS statistics */ + struct bfa_wc_s wc; /* waiting counter */ +}; + +/* + * bfa fcs API functions + */ +void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, + struct bfad_s *bfad, + bfa_boolean_t min_cfg); +void bfa_fcs_init(struct bfa_fcs_s *fcs); +void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, + struct bfa_fcs_driver_info_s *driver_info); +void bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable); +void bfa_fcs_exit(struct bfa_fcs_s *fcs); +void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod); +void bfa_fcs_start(struct bfa_fcs_s *fcs); + +/** + * bfa fcs vf public functions + */ +bfa_status_t bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id); +bfa_status_t bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs); +bfa_status_t bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs, + u16 vf_id, struct bfa_lport_cfg_s *port_cfg, + struct bfad_vf_s *vf_drv); +bfa_status_t bfa_fcs_vf_delete(bfa_fcs_vf_t *vf); +void bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs); +void bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs); +void bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr); +void bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf, + struct bfa_vf_stats_s *vf_stats); +void bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf); +void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports); +bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id); +u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric); + +/* + * fabric protected interface functions + */ +void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs); +void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs); +void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs); +void bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs); +void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric); +void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric); +void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric, + struct bfa_fcs_vport_s *vport); +void bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric, + struct bfa_fcs_vport_s *vport); +int bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric); +struct bfa_fcs_vport_s *bfa_fcs_fabric_vport_lookup( + struct bfa_fcs_fabric_s *fabric, wwn_t pwwn); +void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs); +void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, + struct fchs_s *fchs, u16 len); +bfa_boolean_t bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric); +bfa_boolean_t bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric); +enum bfa_port_type bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric); +void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric); +void bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric); +bfa_status_t bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf, + struct bfa_fcs_s *fcs, struct bfa_lport_cfg_s *port_cfg, + struct bfad_vf_s *vf_drv); +void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric, + wwn_t fabric_name); +u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric); +void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs); +void bfa_fcs_port_attach(struct bfa_fcs_s *fcs); + +/** + * BFA FCS callback interfaces + */ + +/** + * fcb Main fcs callbacks + */ + +struct bfad_port_s; +struct bfad_vf_s; +struct bfad_vport_s; +struct bfad_rport_s; + +/** + * lport callbacks + */ +struct bfad_port_s *bfa_fcb_lport_new(struct bfad_s *bfad, + struct bfa_fcs_lport_s *port, + enum bfa_lport_role roles, + struct bfad_vf_s *vf_drv, + struct bfad_vport_s *vp_drv); +void bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles, + struct bfad_vf_s *vf_drv, + struct bfad_vport_s *vp_drv); + +/** + * vport callbacks + */ +void bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s); + +/** + * rport callbacks + */ +bfa_status_t bfa_fcb_rport_alloc(struct bfad_s *bfad, + struct bfa_fcs_rport_s **rport, + struct bfad_rport_s **rport_drv); + +/** + * itnim callbacks + */ +void bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim, + struct bfad_itnim_s **itnim_drv); +void bfa_fcb_itnim_free(struct bfad_s *bfad, + struct bfad_itnim_s *itnim_drv); +void bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv); +void bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv); + +#endif /* __BFA_FCS_H__ */ diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c new file mode 100644 index 000000000000..569dfefab70d --- /dev/null +++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c @@ -0,0 +1,783 @@ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + * + * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +/** + * fcpim.c - FCP initiator mode i-t nexus state machine + */ + +#include "bfa_fcs.h" +#include "bfa_fcbuild.h" +#include "bfad_drv.h" +#include "bfad_im.h" + +BFA_TRC_FILE(FCS, FCPIM); + +/* + * forward declarations + */ +static void bfa_fcs_itnim_timeout(void *arg); +static void bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim); +static void bfa_fcs_itnim_send_prli(void *itnim_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_itnim_prli_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs); + +/** + * fcs_itnim_sm FCS itnim state machine events + */ + +enum bfa_fcs_itnim_event { + BFA_FCS_ITNIM_SM_ONLINE = 1, /* rport online event */ + BFA_FCS_ITNIM_SM_OFFLINE = 2, /* rport offline */ + BFA_FCS_ITNIM_SM_FRMSENT = 3, /* prli frame is sent */ + BFA_FCS_ITNIM_SM_RSP_OK = 4, /* good response */ + BFA_FCS_ITNIM_SM_RSP_ERROR = 5, /* error response */ + BFA_FCS_ITNIM_SM_TIMEOUT = 6, /* delay timeout */ + BFA_FCS_ITNIM_SM_HCB_OFFLINE = 7, /* BFA online callback */ + BFA_FCS_ITNIM_SM_HCB_ONLINE = 8, /* BFA offline callback */ + BFA_FCS_ITNIM_SM_INITIATOR = 9, /* rport is initiator */ + BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */ + BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */ +}; + +static void bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event); +static void bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event); +static void bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event); +static void bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event); +static void bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event); +static void bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event); +static void bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event); +static void bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event); + +static struct bfa_sm_table_s itnim_sm_table[] = { + {BFA_SM(bfa_fcs_itnim_sm_offline), BFA_ITNIM_OFFLINE}, + {BFA_SM(bfa_fcs_itnim_sm_prli_send), BFA_ITNIM_PRLI_SEND}, + {BFA_SM(bfa_fcs_itnim_sm_prli), BFA_ITNIM_PRLI_SENT}, + {BFA_SM(bfa_fcs_itnim_sm_prli_retry), BFA_ITNIM_PRLI_RETRY}, + {BFA_SM(bfa_fcs_itnim_sm_hcb_online), BFA_ITNIM_HCB_ONLINE}, + {BFA_SM(bfa_fcs_itnim_sm_online), BFA_ITNIM_ONLINE}, + {BFA_SM(bfa_fcs_itnim_sm_hcb_offline), BFA_ITNIM_HCB_OFFLINE}, + {BFA_SM(bfa_fcs_itnim_sm_initiator), BFA_ITNIM_INITIATIOR}, +}; + +/** + * fcs_itnim_sm FCS itnim state machine + */ + +static void +bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event) +{ + bfa_trc(itnim->fcs, itnim->rport->pwwn); + bfa_trc(itnim->fcs, event); + + switch (event) { + case BFA_FCS_ITNIM_SM_ONLINE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send); + itnim->prli_retries = 0; + bfa_fcs_itnim_send_prli(itnim, NULL); + break; + + case BFA_FCS_ITNIM_SM_OFFLINE: + bfa_fcs_rport_itnim_ack(itnim->rport); + break; + + case BFA_FCS_ITNIM_SM_INITIATOR: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); + break; + + case BFA_FCS_ITNIM_SM_DELETE: + bfa_fcs_itnim_free(itnim); + break; + + default: + bfa_sm_fault(itnim->fcs, event); + } + +} + +static void +bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event) +{ + bfa_trc(itnim->fcs, itnim->rport->pwwn); + bfa_trc(itnim->fcs, event); + + switch (event) { + case BFA_FCS_ITNIM_SM_FRMSENT: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli); + break; + + case BFA_FCS_ITNIM_SM_INITIATOR: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); + bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe); + break; + + case BFA_FCS_ITNIM_SM_OFFLINE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe); + bfa_fcs_rport_itnim_ack(itnim->rport); + break; + + case BFA_FCS_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe); + bfa_fcs_itnim_free(itnim); + break; + + default: + bfa_sm_fault(itnim->fcs, event); + } +} + +static void +bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event) +{ + bfa_trc(itnim->fcs, itnim->rport->pwwn); + bfa_trc(itnim->fcs, event); + + switch (event) { + case BFA_FCS_ITNIM_SM_RSP_OK: + if (itnim->rport->scsi_function == BFA_RPORT_INITIATOR) { + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); + } else { + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_online); + bfa_itnim_online(itnim->bfa_itnim, itnim->seq_rec); + } + break; + + case BFA_FCS_ITNIM_SM_RSP_ERROR: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_retry); + bfa_timer_start(itnim->fcs->bfa, &itnim->timer, + bfa_fcs_itnim_timeout, itnim, + BFA_FCS_RETRY_TIMEOUT); + break; + + case BFA_FCS_ITNIM_SM_OFFLINE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_fcxp_discard(itnim->fcxp); + bfa_fcs_rport_itnim_ack(itnim->rport); + break; + + case BFA_FCS_ITNIM_SM_INITIATOR: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); + bfa_fcxp_discard(itnim->fcxp); + break; + + case BFA_FCS_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_fcxp_discard(itnim->fcxp); + bfa_fcs_itnim_free(itnim); + break; + + default: + bfa_sm_fault(itnim->fcs, event); + } +} + +static void +bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event) +{ + bfa_trc(itnim->fcs, itnim->rport->pwwn); + bfa_trc(itnim->fcs, event); + + switch (event) { + case BFA_FCS_ITNIM_SM_TIMEOUT: + if (itnim->prli_retries < BFA_FCS_RPORT_MAX_RETRIES) { + itnim->prli_retries++; + bfa_trc(itnim->fcs, itnim->prli_retries); + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send); + bfa_fcs_itnim_send_prli(itnim, NULL); + } else { + /* invoke target offline */ + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_fcs_rport_logo_imp(itnim->rport); + } + break; + + + case BFA_FCS_ITNIM_SM_OFFLINE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_timer_stop(&itnim->timer); + bfa_fcs_rport_itnim_ack(itnim->rport); + break; + + case BFA_FCS_ITNIM_SM_INITIATOR: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); + bfa_timer_stop(&itnim->timer); + break; + + case BFA_FCS_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_timer_stop(&itnim->timer); + bfa_fcs_itnim_free(itnim); + break; + + default: + bfa_sm_fault(itnim->fcs, event); + } +} + +static void +bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event) +{ + struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad; + char lpwwn_buf[BFA_STRING_32]; + char rpwwn_buf[BFA_STRING_32]; + + bfa_trc(itnim->fcs, itnim->rport->pwwn); + bfa_trc(itnim->fcs, event); + + switch (event) { + case BFA_FCS_ITNIM_SM_HCB_ONLINE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_online); + bfa_fcb_itnim_online(itnim->itnim_drv); + wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port)); + wwn2str(rpwwn_buf, itnim->rport->pwwn); + BFA_LOG(KERN_INFO, bfad, log_level, + "Target (WWN = %s) is online for initiator (WWN = %s)\n", + rpwwn_buf, lpwwn_buf); + break; + + case BFA_FCS_ITNIM_SM_OFFLINE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_itnim_offline(itnim->bfa_itnim); + bfa_fcs_rport_itnim_ack(itnim->rport); + break; + + case BFA_FCS_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_fcs_itnim_free(itnim); + break; + + default: + bfa_sm_fault(itnim->fcs, event); + } +} + +static void +bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event) +{ + struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad; + char lpwwn_buf[BFA_STRING_32]; + char rpwwn_buf[BFA_STRING_32]; + + bfa_trc(itnim->fcs, itnim->rport->pwwn); + bfa_trc(itnim->fcs, event); + + switch (event) { + case BFA_FCS_ITNIM_SM_OFFLINE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline); + bfa_fcb_itnim_offline(itnim->itnim_drv); + bfa_itnim_offline(itnim->bfa_itnim); + wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port)); + wwn2str(rpwwn_buf, itnim->rport->pwwn); + if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE) + BFA_LOG(KERN_ERR, bfad, log_level, + "Target (WWN = %s) connectivity lost for " + "initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf); + else + BFA_LOG(KERN_INFO, bfad, log_level, + "Target (WWN = %s) offlined by initiator (WWN = %s)\n", + rpwwn_buf, lpwwn_buf); + break; + + case BFA_FCS_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_fcs_itnim_free(itnim); + break; + + default: + bfa_sm_fault(itnim->fcs, event); + } +} + +static void +bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event) +{ + bfa_trc(itnim->fcs, itnim->rport->pwwn); + bfa_trc(itnim->fcs, event); + + switch (event) { + case BFA_FCS_ITNIM_SM_HCB_OFFLINE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_fcs_rport_itnim_ack(itnim->rport); + break; + + case BFA_FCS_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_fcs_itnim_free(itnim); + break; + + default: + bfa_sm_fault(itnim->fcs, event); + } +} + +/* + * This state is set when a discovered rport is also in intiator mode. + * This ITN is marked as no_op and is not active and will not be truned into + * online state. + */ +static void +bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim, + enum bfa_fcs_itnim_event event) +{ + bfa_trc(itnim->fcs, itnim->rport->pwwn); + bfa_trc(itnim->fcs, event); + + switch (event) { + case BFA_FCS_ITNIM_SM_OFFLINE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_fcs_rport_itnim_ack(itnim->rport); + break; + + case BFA_FCS_ITNIM_SM_RSP_ERROR: + case BFA_FCS_ITNIM_SM_ONLINE: + case BFA_FCS_ITNIM_SM_INITIATOR: + break; + + case BFA_FCS_ITNIM_SM_DELETE: + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + bfa_fcs_itnim_free(itnim); + break; + + default: + bfa_sm_fault(itnim->fcs, event); + } +} + +static void +bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_itnim_s *itnim = itnim_cbarg; + struct bfa_fcs_rport_s *rport = itnim->rport; + struct bfa_fcs_lport_s *port = rport->port; + struct fchs_s fchs; + struct bfa_fcxp_s *fcxp; + int len; + + bfa_trc(itnim->fcs, itnim->rport->pwwn); + + fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); + if (!fcxp) { + itnim->stats.fcxp_alloc_wait++; + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &itnim->fcxp_wqe, + bfa_fcs_itnim_send_prli, itnim); + return; + } + itnim->fcxp = fcxp; + + len = fc_prli_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + itnim->rport->pid, bfa_fcs_lport_get_fcid(port), 0); + + bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id, port->lp_tag, + BFA_FALSE, FC_CLASS_3, len, &fchs, + bfa_fcs_itnim_prli_response, (void *)itnim, + FC_MAX_PDUSZ, FC_ELS_TOV); + + itnim->stats.prli_sent++; + bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_FRMSENT); +} + +static void +bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cbarg; + struct fc_els_cmd_s *els_cmd; + struct fc_prli_s *prli_resp; + struct fc_ls_rjt_s *ls_rjt; + struct fc_prli_params_s *sparams; + + bfa_trc(itnim->fcs, req_status); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + itnim->stats.prli_rsp_err++; + bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_ERROR); + return; + } + + els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp); + + if (els_cmd->els_code == FC_ELS_ACC) { + prli_resp = (struct fc_prli_s *) els_cmd; + + if (fc_prli_rsp_parse(prli_resp, rsp_len) != FC_PARSE_OK) { + bfa_trc(itnim->fcs, rsp_len); + /* + * Check if this r-port is also in Initiator mode. + * If so, we need to set this ITN as a no-op. + */ + if (prli_resp->parampage.servparams.initiator) { + bfa_trc(itnim->fcs, prli_resp->parampage.type); + itnim->rport->scsi_function = + BFA_RPORT_INITIATOR; + itnim->stats.prli_rsp_acc++; + bfa_sm_send_event(itnim, + BFA_FCS_ITNIM_SM_RSP_OK); + return; + } + + itnim->stats.prli_rsp_parse_err++; + return; + } + itnim->rport->scsi_function = BFA_RPORT_TARGET; + + sparams = &prli_resp->parampage.servparams; + itnim->seq_rec = sparams->retry; + itnim->rec_support = sparams->rec_support; + itnim->task_retry_id = sparams->task_retry_id; + itnim->conf_comp = sparams->confirm; + + itnim->stats.prli_rsp_acc++; + bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_OK); + } else { + ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); + + bfa_trc(itnim->fcs, ls_rjt->reason_code); + bfa_trc(itnim->fcs, ls_rjt->reason_code_expl); + + itnim->stats.prli_rsp_rjt++; + bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_ERROR); + } +} + +static void +bfa_fcs_itnim_timeout(void *arg) +{ + struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) arg; + + itnim->stats.timeout++; + bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_TIMEOUT); +} + +static void +bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim) +{ + bfa_itnim_delete(itnim->bfa_itnim); + bfa_fcb_itnim_free(itnim->fcs->bfad, itnim->itnim_drv); +} + + + +/** + * itnim_public FCS ITNIM public interfaces + */ + +/** + * Called by rport when a new rport is created. + * + * @param[in] rport - remote port. + */ +struct bfa_fcs_itnim_s * +bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport) +{ + struct bfa_fcs_lport_s *port = rport->port; + struct bfa_fcs_itnim_s *itnim; + struct bfad_itnim_s *itnim_drv; + struct bfa_itnim_s *bfa_itnim; + + /* + * call bfad to allocate the itnim + */ + bfa_fcb_itnim_alloc(port->fcs->bfad, &itnim, &itnim_drv); + if (itnim == NULL) { + bfa_trc(port->fcs, rport->pwwn); + return NULL; + } + + /* + * Initialize itnim + */ + itnim->rport = rport; + itnim->fcs = rport->fcs; + itnim->itnim_drv = itnim_drv; + + /* + * call BFA to create the itnim + */ + bfa_itnim = + bfa_itnim_create(port->fcs->bfa, rport->bfa_rport, itnim); + + if (bfa_itnim == NULL) { + bfa_trc(port->fcs, rport->pwwn); + bfa_fcb_itnim_free(port->fcs->bfad, itnim_drv); + bfa_assert(0); + return NULL; + } + + itnim->bfa_itnim = bfa_itnim; + itnim->seq_rec = BFA_FALSE; + itnim->rec_support = BFA_FALSE; + itnim->conf_comp = BFA_FALSE; + itnim->task_retry_id = BFA_FALSE; + + /* + * Set State machine + */ + bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); + + return itnim; +} + +/** + * Called by rport to delete the instance of FCPIM. + * + * @param[in] rport - remote port. + */ +void +bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim) +{ + bfa_trc(itnim->fcs, itnim->rport->pid); + bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_DELETE); +} + +/** + * Notification from rport that PLOGI is complete to initiate FC-4 session. + */ +void +bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim) +{ + itnim->stats.onlines++; + + if (!BFA_FCS_PID_IS_WKA(itnim->rport->pid)) { + bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_ONLINE); + } else { + /* + * For well known addresses, we set the itnim to initiator + * state + */ + itnim->stats.initiator++; + bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR); + } +} + +/** + * Called by rport to handle a remote device offline. + */ +void +bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim) +{ + itnim->stats.offlines++; + bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_OFFLINE); +} + +/** + * Called by rport when remote port is known to be an initiator from + * PRLI received. + */ +void +bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim) +{ + bfa_trc(itnim->fcs, itnim->rport->pid); + itnim->stats.initiator++; + bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR); +} + +/** + * Called by rport to check if the itnim is online. + */ +bfa_status_t +bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim) +{ + bfa_trc(itnim->fcs, itnim->rport->pid); + switch (bfa_sm_to_state(itnim_sm_table, itnim->sm)) { + case BFA_ITNIM_ONLINE: + case BFA_ITNIM_INITIATIOR: + return BFA_STATUS_OK; + + default: + return BFA_STATUS_NO_FCPIM_NEXUS; + } +} + +/** + * BFA completion callback for bfa_itnim_online(). + */ +void +bfa_cb_itnim_online(void *cbarg) +{ + struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cbarg; + + bfa_trc(itnim->fcs, itnim->rport->pwwn); + bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE); +} + +/** + * BFA completion callback for bfa_itnim_offline(). + */ +void +bfa_cb_itnim_offline(void *cb_arg) +{ + struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg; + + bfa_trc(itnim->fcs, itnim->rport->pwwn); + bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE); +} + +/** + * Mark the beginning of PATH TOV handling. IO completion callbacks + * are still pending. + */ +void +bfa_cb_itnim_tov_begin(void *cb_arg) +{ + struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg; + + bfa_trc(itnim->fcs, itnim->rport->pwwn); +} + +/** + * Mark the end of PATH TOV handling. All pending IOs are already cleaned up. + */ +void +bfa_cb_itnim_tov(void *cb_arg) +{ + struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg; + struct bfad_itnim_s *itnim_drv = itnim->itnim_drv; + + bfa_trc(itnim->fcs, itnim->rport->pwwn); + itnim_drv->state = ITNIM_STATE_TIMEOUT; +} + +/** + * BFA notification to FCS/driver for second level error recovery. + * + * Atleast one I/O request has timedout and target is unresponsive to + * repeated abort requests. Second level error recovery should be initiated + * by starting implicit logout and recovery procedures. + */ +void +bfa_cb_itnim_sler(void *cb_arg) +{ + struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg; + + itnim->stats.sler++; + bfa_trc(itnim->fcs, itnim->rport->pwwn); + bfa_fcs_rport_logo_imp(itnim->rport); +} + +struct bfa_fcs_itnim_s * +bfa_fcs_itnim_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn) +{ + struct bfa_fcs_rport_s *rport; + rport = bfa_fcs_rport_lookup(port, rpwwn); + + if (!rport) + return NULL; + + bfa_assert(rport->itnim != NULL); + return rport->itnim; +} + +bfa_status_t +bfa_fcs_itnim_attr_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn, + struct bfa_itnim_attr_s *attr) +{ + struct bfa_fcs_itnim_s *itnim = NULL; + + itnim = bfa_fcs_itnim_lookup(port, rpwwn); + + if (itnim == NULL) + return BFA_STATUS_NO_FCPIM_NEXUS; + + attr->state = bfa_sm_to_state(itnim_sm_table, itnim->sm); + attr->retry = itnim->seq_rec; + attr->rec_support = itnim->rec_support; + attr->conf_comp = itnim->conf_comp; + attr->task_retry_id = itnim->task_retry_id; + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn, + struct bfa_itnim_stats_s *stats) +{ + struct bfa_fcs_itnim_s *itnim = NULL; + + bfa_assert(port != NULL); + + itnim = bfa_fcs_itnim_lookup(port, rpwwn); + + if (itnim == NULL) + return BFA_STATUS_NO_FCPIM_NEXUS; + + bfa_os_memcpy(stats, &itnim->stats, sizeof(struct bfa_itnim_stats_s)); + + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port, wwn_t rpwwn) +{ + struct bfa_fcs_itnim_s *itnim = NULL; + + bfa_assert(port != NULL); + + itnim = bfa_fcs_itnim_lookup(port, rpwwn); + + if (itnim == NULL) + return BFA_STATUS_NO_FCPIM_NEXUS; + + bfa_os_memset(&itnim->stats, 0, sizeof(struct bfa_itnim_stats_s)); + return BFA_STATUS_OK; +} + +void +bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, + struct fchs_s *fchs, u16 len) +{ + struct fc_els_cmd_s *els_cmd; + + bfa_trc(itnim->fcs, fchs->type); + + if (fchs->type != FC_TYPE_ELS) + return; + + els_cmd = (struct fc_els_cmd_s *) (fchs + 1); + + bfa_trc(itnim->fcs, els_cmd->els_code); + + switch (els_cmd->els_code) { + case FC_ELS_PRLO: + bfa_fcs_rport_prlo(itnim->rport, fchs->ox_id); + break; + + default: + bfa_assert(0); + } +} diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index 35df20e68a52..b522bf30247a 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * @@ -16,23 +16,13 @@ */ /** - * bfa_fcs_port.c BFA FCS port + * bfa_fcs_lport.c BFA FCS port */ -#include -#include -#include -#include -#include -#include -#include "fcs.h" -#include "fcs_lport.h" -#include "fcs_vport.h" -#include "fcs_rport.h" -#include "fcs_fcxp.h" -#include "fcs_trcmod.h" -#include "lport_priv.h" -#include +#include "bfa_fcs.h" +#include "bfa_fcbuild.h" +#include "bfa_fc.h" +#include "bfad_drv.h" BFA_TRC_FILE(FCS, PORT); @@ -40,49 +30,53 @@ BFA_TRC_FILE(FCS, PORT); * Forward declarations */ -static void bfa_fcs_port_aen_post(struct bfa_fcs_port_s *port, - enum bfa_lport_aen_event event); -static void bfa_fcs_port_send_ls_rjt(struct bfa_fcs_port_s *port, - struct fchs_s *rx_fchs, u8 reason_code, - u8 reason_code_expl); -static void bfa_fcs_port_plogi(struct bfa_fcs_port_s *port, - struct fchs_s *rx_fchs, - struct fc_logi_s *plogi); -static void bfa_fcs_port_online_actions(struct bfa_fcs_port_s *port); -static void bfa_fcs_port_offline_actions(struct bfa_fcs_port_s *port); -static void bfa_fcs_port_unknown_init(struct bfa_fcs_port_s *port); -static void bfa_fcs_port_unknown_online(struct bfa_fcs_port_s *port); -static void bfa_fcs_port_unknown_offline(struct bfa_fcs_port_s *port); -static void bfa_fcs_port_deleted(struct bfa_fcs_port_s *port); -static void bfa_fcs_port_echo(struct bfa_fcs_port_s *port, +static void bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, + struct fchs_s *rx_fchs, u8 reason_code, + u8 reason_code_expl); +static void bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port, + struct fchs_s *rx_fchs, struct fc_logi_s *plogi); +static void bfa_fcs_lport_online_actions(struct bfa_fcs_lport_s *port); +static void bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port); +static void bfa_fcs_lport_unknown_init(struct bfa_fcs_lport_s *port); +static void bfa_fcs_lport_unknown_online(struct bfa_fcs_lport_s *port); +static void bfa_fcs_lport_unknown_offline(struct bfa_fcs_lport_s *port); +static void bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port); +static void bfa_fcs_lport_echo(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, struct fc_echo_s *echo, u16 len); -static void bfa_fcs_port_rnid(struct bfa_fcs_port_s *port, +static void bfa_fcs_lport_rnid(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, struct fc_rnid_cmd_s *rnid, u16 len); -static void bfa_fs_port_get_gen_topo_data(struct bfa_fcs_port_s *port, +static void bfa_fs_port_get_gen_topo_data(struct bfa_fcs_lport_s *port, struct fc_rnid_general_topology_data_s *gen_topo_data); +static void bfa_fcs_lport_fab_init(struct bfa_fcs_lport_s *port); +static void bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port); +static void bfa_fcs_lport_fab_offline(struct bfa_fcs_lport_s *port); + +static void bfa_fcs_lport_n2n_init(struct bfa_fcs_lport_s *port); +static void bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port); +static void bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port); + static struct { - void (*init) (struct bfa_fcs_port_s *port); - void (*online) (struct bfa_fcs_port_s *port); - void (*offline) (struct bfa_fcs_port_s *port); + void (*init) (struct bfa_fcs_lport_s *port); + void (*online) (struct bfa_fcs_lport_s *port); + void (*offline) (struct bfa_fcs_lport_s *port); } __port_action[] = { { - bfa_fcs_port_unknown_init, bfa_fcs_port_unknown_online, - bfa_fcs_port_unknown_offline}, { - bfa_fcs_port_fab_init, bfa_fcs_port_fab_online, - bfa_fcs_port_fab_offline}, { - bfa_fcs_port_loop_init, bfa_fcs_port_loop_online, - bfa_fcs_port_loop_offline}, { -bfa_fcs_port_n2n_init, bfa_fcs_port_n2n_online, - bfa_fcs_port_n2n_offline},}; + bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online, + bfa_fcs_lport_unknown_offline}, { + bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online, + bfa_fcs_lport_fab_offline}, { + bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online, + bfa_fcs_lport_n2n_offline}, + }; /** * fcs_port_sm FCS logical port state machine */ -enum bfa_fcs_port_event { +enum bfa_fcs_lport_event { BFA_FCS_PORT_SM_CREATE = 1, BFA_FCS_PORT_SM_ONLINE = 2, BFA_FCS_PORT_SM_OFFLINE = 3, @@ -90,27 +84,28 @@ enum bfa_fcs_port_event { BFA_FCS_PORT_SM_DELRPORT = 5, }; -static void bfa_fcs_port_sm_uninit(struct bfa_fcs_port_s *port, - enum bfa_fcs_port_event event); -static void bfa_fcs_port_sm_init(struct bfa_fcs_port_s *port, - enum bfa_fcs_port_event event); -static void bfa_fcs_port_sm_online(struct bfa_fcs_port_s *port, - enum bfa_fcs_port_event event); -static void bfa_fcs_port_sm_offline(struct bfa_fcs_port_s *port, - enum bfa_fcs_port_event event); -static void bfa_fcs_port_sm_deleting(struct bfa_fcs_port_s *port, - enum bfa_fcs_port_event event); +static void bfa_fcs_lport_sm_uninit(struct bfa_fcs_lport_s *port, + enum bfa_fcs_lport_event event); +static void bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port, + enum bfa_fcs_lport_event event); +static void bfa_fcs_lport_sm_online(struct bfa_fcs_lport_s *port, + enum bfa_fcs_lport_event event); +static void bfa_fcs_lport_sm_offline(struct bfa_fcs_lport_s *port, + enum bfa_fcs_lport_event event); +static void bfa_fcs_lport_sm_deleting(struct bfa_fcs_lport_s *port, + enum bfa_fcs_lport_event event); static void -bfa_fcs_port_sm_uninit(struct bfa_fcs_port_s *port, - enum bfa_fcs_port_event event) +bfa_fcs_lport_sm_uninit( + struct bfa_fcs_lport_s *port, + enum bfa_fcs_lport_event event) { bfa_trc(port->fcs, port->port_cfg.pwwn); bfa_trc(port->fcs, event); switch (event) { case BFA_FCS_PORT_SM_CREATE: - bfa_sm_set_state(port, bfa_fcs_port_sm_init); + bfa_sm_set_state(port, bfa_fcs_lport_sm_init); break; default: @@ -119,20 +114,21 @@ bfa_fcs_port_sm_uninit(struct bfa_fcs_port_s *port, } static void -bfa_fcs_port_sm_init(struct bfa_fcs_port_s *port, enum bfa_fcs_port_event event) +bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port, + enum bfa_fcs_lport_event event) { bfa_trc(port->fcs, port->port_cfg.pwwn); bfa_trc(port->fcs, event); switch (event) { case BFA_FCS_PORT_SM_ONLINE: - bfa_sm_set_state(port, bfa_fcs_port_sm_online); - bfa_fcs_port_online_actions(port); + bfa_sm_set_state(port, bfa_fcs_lport_sm_online); + bfa_fcs_lport_online_actions(port); break; case BFA_FCS_PORT_SM_DELETE: - bfa_sm_set_state(port, bfa_fcs_port_sm_uninit); - bfa_fcs_port_deleted(port); + bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit); + bfa_fcs_lport_deleted(port); break; case BFA_FCS_PORT_SM_OFFLINE: @@ -144,19 +140,20 @@ bfa_fcs_port_sm_init(struct bfa_fcs_port_s *port, enum bfa_fcs_port_event event) } static void -bfa_fcs_port_sm_online(struct bfa_fcs_port_s *port, - enum bfa_fcs_port_event event) +bfa_fcs_lport_sm_online( + struct bfa_fcs_lport_s *port, + enum bfa_fcs_lport_event event) { struct bfa_fcs_rport_s *rport; - struct list_head *qe, *qen; + struct list_head *qe, *qen; bfa_trc(port->fcs, port->port_cfg.pwwn); bfa_trc(port->fcs, event); switch (event) { case BFA_FCS_PORT_SM_OFFLINE: - bfa_sm_set_state(port, bfa_fcs_port_sm_offline); - bfa_fcs_port_offline_actions(port); + bfa_sm_set_state(port, bfa_fcs_lport_sm_offline); + bfa_fcs_lport_offline_actions(port); break; case BFA_FCS_PORT_SM_DELETE: @@ -164,12 +161,12 @@ bfa_fcs_port_sm_online(struct bfa_fcs_port_s *port, __port_action[port->fabric->fab_type].offline(port); if (port->num_rports == 0) { - bfa_sm_set_state(port, bfa_fcs_port_sm_uninit); - bfa_fcs_port_deleted(port); + bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit); + bfa_fcs_lport_deleted(port); } else { - bfa_sm_set_state(port, bfa_fcs_port_sm_deleting); + bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting); list_for_each_safe(qe, qen, &port->rport_q) { - rport = (struct bfa_fcs_rport_s *)qe; + rport = (struct bfa_fcs_rport_s *) qe; bfa_fcs_rport_delete(rport); } } @@ -184,29 +181,30 @@ bfa_fcs_port_sm_online(struct bfa_fcs_port_s *port, } static void -bfa_fcs_port_sm_offline(struct bfa_fcs_port_s *port, - enum bfa_fcs_port_event event) +bfa_fcs_lport_sm_offline( + struct bfa_fcs_lport_s *port, + enum bfa_fcs_lport_event event) { struct bfa_fcs_rport_s *rport; - struct list_head *qe, *qen; + struct list_head *qe, *qen; bfa_trc(port->fcs, port->port_cfg.pwwn); bfa_trc(port->fcs, event); switch (event) { case BFA_FCS_PORT_SM_ONLINE: - bfa_sm_set_state(port, bfa_fcs_port_sm_online); - bfa_fcs_port_online_actions(port); + bfa_sm_set_state(port, bfa_fcs_lport_sm_online); + bfa_fcs_lport_online_actions(port); break; case BFA_FCS_PORT_SM_DELETE: if (port->num_rports == 0) { - bfa_sm_set_state(port, bfa_fcs_port_sm_uninit); - bfa_fcs_port_deleted(port); + bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit); + bfa_fcs_lport_deleted(port); } else { - bfa_sm_set_state(port, bfa_fcs_port_sm_deleting); + bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting); list_for_each_safe(qe, qen, &port->rport_q) { - rport = (struct bfa_fcs_rport_s *)qe; + rport = (struct bfa_fcs_rport_s *) qe; bfa_fcs_rport_delete(rport); } } @@ -222,8 +220,9 @@ bfa_fcs_port_sm_offline(struct bfa_fcs_port_s *port, } static void -bfa_fcs_port_sm_deleting(struct bfa_fcs_port_s *port, - enum bfa_fcs_port_event event) +bfa_fcs_lport_sm_deleting( + struct bfa_fcs_lport_s *port, + enum bfa_fcs_lport_event event) { bfa_trc(port->fcs, port->port_cfg.pwwn); bfa_trc(port->fcs, event); @@ -231,8 +230,8 @@ bfa_fcs_port_sm_deleting(struct bfa_fcs_port_s *port, switch (event) { case BFA_FCS_PORT_SM_DELRPORT: if (port->num_rports == 0) { - bfa_sm_set_state(port, bfa_fcs_port_sm_uninit); - bfa_fcs_port_deleted(port); + bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit); + bfa_fcs_lport_deleted(port); } break; @@ -241,74 +240,44 @@ bfa_fcs_port_sm_deleting(struct bfa_fcs_port_s *port, } } - - /** * fcs_port_pvt */ -/** - * Send AEN notification - */ -static void -bfa_fcs_port_aen_post(struct bfa_fcs_port_s *port, - enum bfa_lport_aen_event event) -{ - union bfa_aen_data_u aen_data; - struct bfa_log_mod_s *logmod = port->fcs->logm; - enum bfa_port_role role = port->port_cfg.roles; - wwn_t lpwwn = bfa_fcs_port_get_pwwn(port); - char lpwwn_ptr[BFA_STRING_32]; - char *role_str[BFA_PORT_ROLE_FCP_MAX / 2 + 1] = - { "Initiator", "Target", "IPFC" }; - - wwn2str(lpwwn_ptr, lpwwn); - - bfa_assert(role <= BFA_PORT_ROLE_FCP_MAX); - - bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, event), lpwwn_ptr, - role_str[role/2]); - - aen_data.lport.vf_id = port->fabric->vf_id; - aen_data.lport.roles = role; - aen_data.lport.ppwwn = - bfa_fcs_port_get_pwwn(bfa_fcs_get_base_port(port->fcs)); - aen_data.lport.lpwwn = lpwwn; -} - /* * Send a LS reject */ static void -bfa_fcs_port_send_ls_rjt(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs, +bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, u8 reason_code, u8 reason_code_expl) { - struct fchs_s fchs; + struct fchs_s fchs; struct bfa_fcxp_s *fcxp; struct bfa_rport_s *bfa_rport = NULL; - int len; + int len; + bfa_trc(port->fcs, rx_fchs->d_id); bfa_trc(port->fcs, rx_fchs->s_id); fcxp = bfa_fcs_fcxp_alloc(port->fcs); if (!fcxp) return; - len = fc_ls_rjt_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, - bfa_fcs_port_get_fcid(port), rx_fchs->ox_id, - reason_code, reason_code_expl); + len = fc_ls_rjt_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), + rx_fchs->ox_id, reason_code, reason_code_expl); bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag, - BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, - FC_MAX_PDUSZ, 0); + BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, + FC_MAX_PDUSZ, 0); } /** * Process incoming plogi from a remote port. */ static void -bfa_fcs_port_plogi(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs, - struct fc_logi_s *plogi) +bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port, + struct fchs_s *rx_fchs, struct fc_logi_s *plogi) { struct bfa_fcs_rport_s *rport; @@ -328,46 +297,40 @@ bfa_fcs_port_plogi(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs, /* * send a LS reject */ - bfa_fcs_port_send_ls_rjt(port, rx_fchs, - FC_LS_RJT_RSN_PROTOCOL_ERROR, - FC_LS_RJT_EXP_SPARMS_ERR_OPTIONS); + bfa_fcs_lport_send_ls_rjt(port, rx_fchs, + FC_LS_RJT_RSN_PROTOCOL_ERROR, + FC_LS_RJT_EXP_SPARMS_ERR_OPTIONS); return; } /** -* Direct Attach P2P mode : verify address assigned by the r-port. + * Direct Attach P2P mode : verify address assigned by the r-port. */ - if ((!bfa_fcs_fabric_is_switched(port->fabric)) - && - (memcmp - ((void *)&bfa_fcs_port_get_pwwn(port), (void *)&plogi->port_name, - sizeof(wwn_t)) < 0)) { + if ((!bfa_fcs_fabric_is_switched(port->fabric)) && + (memcmp((void *)&bfa_fcs_lport_get_pwwn(port), + (void *)&plogi->port_name, sizeof(wwn_t)) < 0)) { if (BFA_FCS_PID_IS_WKA(rx_fchs->d_id)) { - /* - * Address assigned to us cannot be a WKA - */ - bfa_fcs_port_send_ls_rjt(port, rx_fchs, + /* Address assigned to us cannot be a WKA */ + bfa_fcs_lport_send_ls_rjt(port, rx_fchs, FC_LS_RJT_RSN_PROTOCOL_ERROR, FC_LS_RJT_EXP_INVALID_NPORT_ID); return; } - port->pid = rx_fchs->d_id; + port->pid = rx_fchs->d_id; } /** * First, check if we know the device by pwwn. */ - rport = bfa_fcs_port_get_rport_by_pwwn(port, plogi->port_name); + rport = bfa_fcs_lport_get_rport_by_pwwn(port, plogi->port_name); if (rport) { /** - * Direct Attach P2P mode: handle address assigned by the rport. - */ - if ((!bfa_fcs_fabric_is_switched(port->fabric)) - && - (memcmp - ((void *)&bfa_fcs_port_get_pwwn(port), - (void *)&plogi->port_name, sizeof(wwn_t)) < 0)) { - port->pid = rx_fchs->d_id; + * Direct Attach P2P mode : handle address assigned by r-port. + */ + if ((!bfa_fcs_fabric_is_switched(port->fabric)) && + (memcmp((void *)&bfa_fcs_lport_get_pwwn(port), + (void *)&plogi->port_name, sizeof(wwn_t)) < 0)) { + port->pid = rx_fchs->d_id; rport->pid = rx_fchs->s_id; } bfa_fcs_rport_plogi(rport, rx_fchs, plogi); @@ -377,7 +340,7 @@ bfa_fcs_port_plogi(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs, /** * Next, lookup rport by PID. */ - rport = bfa_fcs_port_get_rport_by_pid(port, rx_fchs->s_id); + rport = bfa_fcs_lport_get_rport_by_pid(port, rx_fchs->s_id); if (!rport) { /** * Inbound PLOGI from a new device. @@ -416,39 +379,40 @@ bfa_fcs_port_plogi(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs, * Since it does not require a login, it is processed here. */ static void -bfa_fcs_port_echo(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs, - struct fc_echo_s *echo, u16 rx_len) +bfa_fcs_lport_echo(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, + struct fc_echo_s *echo, u16 rx_len) { - struct fchs_s fchs; - struct bfa_fcxp_s *fcxp; - struct bfa_rport_s *bfa_rport = NULL; - int len, pyld_len; + struct fchs_s fchs; + struct bfa_fcxp_s *fcxp; + struct bfa_rport_s *bfa_rport = NULL; + int len, pyld_len; bfa_trc(port->fcs, rx_fchs->s_id); bfa_trc(port->fcs, rx_fchs->d_id); - bfa_trc(port->fcs, rx_len); fcxp = bfa_fcs_fcxp_alloc(port->fcs); if (!fcxp) return; - len = fc_ls_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, - bfa_fcs_port_get_fcid(port), rx_fchs->ox_id); + len = fc_ls_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), + rx_fchs->ox_id); /* * Copy the payload (if any) from the echo frame */ pyld_len = rx_len - sizeof(struct fchs_s); + bfa_trc(port->fcs, rx_len); bfa_trc(port->fcs, pyld_len); if (pyld_len > len) memcpy(((u8 *) bfa_fcxp_get_reqbuf(fcxp)) + - sizeof(struct fc_echo_s), (echo + 1), - (pyld_len - sizeof(struct fc_echo_s))); + sizeof(struct fc_echo_s), (echo + 1), + (pyld_len - sizeof(struct fc_echo_s))); bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag, - BFA_FALSE, FC_CLASS_3, pyld_len, &fchs, NULL, NULL, - FC_MAX_PDUSZ, 0); + BFA_FALSE, FC_CLASS_3, pyld_len, &fchs, NULL, NULL, + FC_MAX_PDUSZ, 0); } /* @@ -456,16 +420,16 @@ bfa_fcs_port_echo(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs, * Since it does not require a login, it is processed here. */ static void -bfa_fcs_port_rnid(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs, - struct fc_rnid_cmd_s *rnid, u16 rx_len) +bfa_fcs_lport_rnid(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, + struct fc_rnid_cmd_s *rnid, u16 rx_len) { struct fc_rnid_common_id_data_s common_id_data; struct fc_rnid_general_topology_data_s gen_topo_data; - struct fchs_s fchs; + struct fchs_s fchs; struct bfa_fcxp_s *fcxp; struct bfa_rport_s *bfa_rport = NULL; - u16 len; - u32 data_format; + u16 len; + u32 data_format; bfa_trc(port->fcs, rx_fchs->s_id); bfa_trc(port->fcs, rx_fchs->d_id); @@ -495,28 +459,26 @@ bfa_fcs_port_rnid(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs, /* * Copy the Node Id Info */ - common_id_data.port_name = bfa_fcs_port_get_pwwn(port); - common_id_data.node_name = bfa_fcs_port_get_nwwn(port); + common_id_data.port_name = bfa_fcs_lport_get_pwwn(port); + common_id_data.node_name = bfa_fcs_lport_get_nwwn(port); - len = fc_rnid_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, - bfa_fcs_port_get_fcid(port), rx_fchs->ox_id, - data_format, &common_id_data, &gen_topo_data); + len = fc_rnid_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), + rx_fchs->ox_id, data_format, &common_id_data, + &gen_topo_data); bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag, - BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, - FC_MAX_PDUSZ, 0); - - return; + BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, + FC_MAX_PDUSZ, 0); } /* * Fill out General Topolpgy Discovery Data for RNID ELS. */ static void -bfa_fs_port_get_gen_topo_data(struct bfa_fcs_port_s *port, +bfa_fs_port_get_gen_topo_data(struct bfa_fcs_lport_s *port, struct fc_rnid_general_topology_data_s *gen_topo_data) { - bfa_os_memset(gen_topo_data, 0, sizeof(struct fc_rnid_general_topology_data_s)); @@ -526,76 +488,111 @@ bfa_fs_port_get_gen_topo_data(struct bfa_fcs_port_s *port, } static void -bfa_fcs_port_online_actions(struct bfa_fcs_port_s *port) +bfa_fcs_lport_online_actions(struct bfa_fcs_lport_s *port) { + struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad; + char lpwwn_buf[BFA_STRING_32]; + bfa_trc(port->fcs, port->fabric->oper_type); __port_action[port->fabric->fab_type].init(port); __port_action[port->fabric->fab_type].online(port); - bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_ONLINE); - bfa_fcb_port_online(port->fcs->bfad, port->port_cfg.roles, - port->fabric->vf_drv, (port->vport == NULL) ? - NULL : port->vport->vport_drv); + wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); + BFA_LOG(KERN_INFO, bfad, log_level, + "Logical port online: WWN = %s Role = %s\n", + lpwwn_buf, "Initiator"); + + bfad->bfad_flags |= BFAD_PORT_ONLINE; } static void -bfa_fcs_port_offline_actions(struct bfa_fcs_port_s *port) +bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port) { - struct list_head *qe, *qen; + struct list_head *qe, *qen; struct bfa_fcs_rport_s *rport; + struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad; + char lpwwn_buf[BFA_STRING_32]; bfa_trc(port->fcs, port->fabric->oper_type); __port_action[port->fabric->fab_type].offline(port); + wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); if (bfa_fcs_fabric_is_online(port->fabric) == BFA_TRUE) - bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_DISCONNECT); + BFA_LOG(KERN_ERR, bfad, log_level, + "Logical port lost fabric connectivity: WWN = %s Role = %s\n", + lpwwn_buf, "Initiator"); else - bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_OFFLINE); - bfa_fcb_port_offline(port->fcs->bfad, port->port_cfg.roles, - port->fabric->vf_drv, - (port->vport == NULL) ? NULL : port->vport->vport_drv); + BFA_LOG(KERN_INFO, bfad, log_level, + "Logical port taken offline: WWN = %s Role = %s\n", + lpwwn_buf, "Initiator"); list_for_each_safe(qe, qen, &port->rport_q) { - rport = (struct bfa_fcs_rport_s *)qe; + rport = (struct bfa_fcs_rport_s *) qe; bfa_fcs_rport_offline(rport); } } static void -bfa_fcs_port_unknown_init(struct bfa_fcs_port_s *port) +bfa_fcs_lport_unknown_init(struct bfa_fcs_lport_s *port) { bfa_assert(0); } static void -bfa_fcs_port_unknown_online(struct bfa_fcs_port_s *port) +bfa_fcs_lport_unknown_online(struct bfa_fcs_lport_s *port) { bfa_assert(0); } static void -bfa_fcs_port_unknown_offline(struct bfa_fcs_port_s *port) +bfa_fcs_lport_unknown_offline(struct bfa_fcs_lport_s *port) { bfa_assert(0); } static void -bfa_fcs_port_deleted(struct bfa_fcs_port_s *port) +bfa_fcs_lport_abts_acc(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs) { - bfa_fcs_port_aen_post(port, BFA_LPORT_AEN_DELETE); + struct fchs_s fchs; + struct bfa_fcxp_s *fcxp; + int len; - /* - * Base port will be deleted by the OS driver - */ + bfa_trc(port->fcs, rx_fchs->d_id); + bfa_trc(port->fcs, rx_fchs->s_id); + + fcxp = bfa_fcs_fcxp_alloc(port->fcs); + if (!fcxp) + return; + + len = fc_ba_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), + rx_fchs->ox_id, 0); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, + BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, + FC_MAX_PDUSZ, 0); +} +static void +bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port) +{ + struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad; + char lpwwn_buf[BFA_STRING_32]; + + wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); + BFA_LOG(KERN_INFO, bfad, log_level, + "Logical port deleted: WWN = %s Role = %s\n", + lpwwn_buf, "Initiator"); + + /* Base port will be deleted by the OS driver */ if (port->vport) { - bfa_fcb_port_delete(port->fcs->bfad, port->port_cfg.roles, - port->fabric->vf_drv, - port->vport ? port->vport->vport_drv : NULL); + bfa_fcb_lport_delete(port->fcs->bfad, port->port_cfg.roles, + port->fabric->vf_drv, + port->vport ? port->vport->vport_drv : NULL); bfa_fcs_vport_delete_comp(port->vport); } else { - bfa_fcs_fabric_port_delete_comp(port->fabric); + bfa_fcs_fabric_port_delete_comp(port->fabric); } } @@ -608,7 +605,7 @@ bfa_fcs_port_deleted(struct bfa_fcs_port_s *port) * Module initialization */ void -bfa_fcs_port_modinit(struct bfa_fcs_s *fcs) +bfa_fcs_lport_modinit(struct bfa_fcs_s *fcs) { } @@ -617,25 +614,25 @@ bfa_fcs_port_modinit(struct bfa_fcs_s *fcs) * Module cleanup */ void -bfa_fcs_port_modexit(struct bfa_fcs_s *fcs) +bfa_fcs_lport_modexit(struct bfa_fcs_s *fcs) { bfa_fcs_modexit_comp(fcs); } /** - * Unsolicited frame receive handling. + * Unsolicited frame receive handling. */ void -bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs, - u16 len) +bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport, + struct fchs_s *fchs, u16 len) { - u32 pid = fchs->s_id; + u32 pid = fchs->s_id; struct bfa_fcs_rport_s *rport = NULL; - struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); + struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); bfa_stats(lport, uf_recvs); - if (!bfa_fcs_port_is_online(lport)) { + if (!bfa_fcs_lport_is_online(lport)) { bfa_stats(lport, uf_recv_drops); return; } @@ -648,7 +645,7 @@ bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs, */ if ((fchs->type == FC_TYPE_ELS) && (els_cmd->els_code == FC_ELS_PLOGI)) { - bfa_fcs_port_plogi(lport, fchs, (struct fc_logi_s *) els_cmd); + bfa_fcs_lport_plogi(lport, fchs, (struct fc_logi_s *) els_cmd); return; } @@ -656,8 +653,8 @@ bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs, * Handle ECHO separately. */ if ((fchs->type == FC_TYPE_ELS) && (els_cmd->els_code == FC_ELS_ECHO)) { - bfa_fcs_port_echo(lport, fchs, - (struct fc_echo_s *) els_cmd, len); + bfa_fcs_lport_echo(lport, fchs, + (struct fc_echo_s *)els_cmd, len); return; } @@ -665,15 +662,21 @@ bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs, * Handle RNID separately. */ if ((fchs->type == FC_TYPE_ELS) && (els_cmd->els_code == FC_ELS_RNID)) { - bfa_fcs_port_rnid(lport, fchs, + bfa_fcs_lport_rnid(lport, fchs, (struct fc_rnid_cmd_s *) els_cmd, len); return; } + if (fchs->type == FC_TYPE_BLS) { + if ((fchs->routing == FC_RTG_BASIC_LINK) && + (fchs->cat_info == FC_CAT_ABTS)) + bfa_fcs_lport_abts_acc(lport, fchs); + return; + } /** * look for a matching remote port ID */ - rport = bfa_fcs_port_get_rport_by_pid(lport, pid); + rport = bfa_fcs_lport_get_rport_by_pid(lport, pid); if (rport) { bfa_trc(rport->fcs, fchs->s_id); bfa_trc(rport->fcs, fchs->d_id); @@ -694,7 +697,7 @@ bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs, bfa_trc(lport->fcs, els_cmd->els_code); if (els_cmd->els_code == FC_ELS_RSCN) { - bfa_fcs_port_scn_process_rscn(lport, fchs, len); + bfa_fcs_lport_scn_process_rscn(lport, fchs, len); return; } @@ -702,7 +705,6 @@ bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs, /** * @todo Handle LOGO frames received. */ - bfa_trc(lport->fcs, els_cmd->els_code); return; } @@ -710,14 +712,13 @@ bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs, /** * @todo Handle PRLI frames received. */ - bfa_trc(lport->fcs, els_cmd->els_code); return; } /** * Unhandled ELS frames. Send a LS_RJT. */ - bfa_fcs_port_send_ls_rjt(lport, fchs, FC_LS_RJT_RSN_CMD_NOT_SUPP, + bfa_fcs_lport_send_ls_rjt(lport, fchs, FC_LS_RJT_RSN_CMD_NOT_SUPP, FC_LS_RJT_EXP_NO_ADDL_INFO); } @@ -726,13 +727,13 @@ bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs, * PID based Lookup for a R-Port in the Port R-Port Queue */ struct bfa_fcs_rport_s * -bfa_fcs_port_get_rport_by_pid(struct bfa_fcs_port_s *port, u32 pid) +bfa_fcs_lport_get_rport_by_pid(struct bfa_fcs_lport_s *port, u32 pid) { struct bfa_fcs_rport_s *rport; - struct list_head *qe; + struct list_head *qe; list_for_each(qe, &port->rport_q) { - rport = (struct bfa_fcs_rport_s *)qe; + rport = (struct bfa_fcs_rport_s *) qe; if (rport->pid == pid) return rport; } @@ -745,13 +746,13 @@ bfa_fcs_port_get_rport_by_pid(struct bfa_fcs_port_s *port, u32 pid) * PWWN based Lookup for a R-Port in the Port R-Port Queue */ struct bfa_fcs_rport_s * -bfa_fcs_port_get_rport_by_pwwn(struct bfa_fcs_port_s *port, wwn_t pwwn) +bfa_fcs_lport_get_rport_by_pwwn(struct bfa_fcs_lport_s *port, wwn_t pwwn) { struct bfa_fcs_rport_s *rport; - struct list_head *qe; + struct list_head *qe; list_for_each(qe, &port->rport_q) { - rport = (struct bfa_fcs_rport_s *)qe; + rport = (struct bfa_fcs_rport_s *) qe; if (wwn_is_equal(rport->pwwn, pwwn)) return rport; } @@ -764,13 +765,13 @@ bfa_fcs_port_get_rport_by_pwwn(struct bfa_fcs_port_s *port, wwn_t pwwn) * NWWN based Lookup for a R-Port in the Port R-Port Queue */ struct bfa_fcs_rport_s * -bfa_fcs_port_get_rport_by_nwwn(struct bfa_fcs_port_s *port, wwn_t nwwn) +bfa_fcs_lport_get_rport_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t nwwn) { struct bfa_fcs_rport_s *rport; - struct list_head *qe; + struct list_head *qe; list_for_each(qe, &port->rport_q) { - rport = (struct bfa_fcs_rport_s *)qe; + rport = (struct bfa_fcs_rport_s *) qe; if (wwn_is_equal(rport->nwwn, nwwn)) return rport; } @@ -783,8 +784,9 @@ bfa_fcs_port_get_rport_by_nwwn(struct bfa_fcs_port_s *port, wwn_t nwwn) * Called by rport module when new rports are discovered. */ void -bfa_fcs_port_add_rport(struct bfa_fcs_port_s *port, - struct bfa_fcs_rport_s *rport) +bfa_fcs_lport_add_rport( + struct bfa_fcs_lport_s *port, + struct bfa_fcs_rport_s *rport) { list_add_tail(&rport->qe, &port->rport_q); port->num_rports++; @@ -794,8 +796,9 @@ bfa_fcs_port_add_rport(struct bfa_fcs_port_s *port, * Called by rport module to when rports are deleted. */ void -bfa_fcs_port_del_rport(struct bfa_fcs_port_s *port, - struct bfa_fcs_rport_s *rport) +bfa_fcs_lport_del_rport( + struct bfa_fcs_lport_s *port, + struct bfa_fcs_rport_s *rport) { bfa_assert(bfa_q_is_on_q(&port->rport_q, rport)); list_del(&rport->qe); @@ -809,7 +812,7 @@ bfa_fcs_port_del_rport(struct bfa_fcs_port_s *port, * Called by vport for virtual ports when FDISC is complete. */ void -bfa_fcs_port_online(struct bfa_fcs_port_s *port) +bfa_fcs_lport_online(struct bfa_fcs_lport_s *port) { bfa_sm_send_event(port, BFA_FCS_PORT_SM_ONLINE); } @@ -819,7 +822,7 @@ bfa_fcs_port_online(struct bfa_fcs_port_s *port) * Called by vport for virtual ports when virtual port becomes offline. */ void -bfa_fcs_port_offline(struct bfa_fcs_port_s *port) +bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port) { bfa_sm_send_event(port, BFA_FCS_PORT_SM_OFFLINE); } @@ -831,40 +834,32 @@ bfa_fcs_port_offline(struct bfa_fcs_port_s *port) * bfa_fcs_vport_delete_comp() for vports on completion. */ void -bfa_fcs_port_delete(struct bfa_fcs_port_s *port) +bfa_fcs_lport_delete(struct bfa_fcs_lport_s *port) { bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELETE); } -/** - * Called by fabric in private loop topology to process LIP event. - */ -void -bfa_fcs_port_lip(struct bfa_fcs_port_s *port) -{ -} - /** * Return TRUE if port is online, else return FALSE */ bfa_boolean_t -bfa_fcs_port_is_online(struct bfa_fcs_port_s *port) +bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port) { - return bfa_sm_cmp_state(port, bfa_fcs_port_sm_online); + return bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online); } /** - * Attach time initialization of logical ports. + * Attach time initialization of logical ports. */ void -bfa_fcs_lport_attach(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs, - uint16_t vf_id, struct bfa_fcs_vport_s *vport) +bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs, + u16 vf_id, struct bfa_fcs_vport_s *vport) { lport->fcs = fcs; lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id); lport->vport = vport; lport->lp_tag = (vport) ? bfa_lps_get_tag(vport->lps) : - bfa_lps_get_tag(lport->fabric->lps); + bfa_lps_get_tag(lport->fabric->lps); INIT_LIST_HEAD(&lport->rport_q); lport->num_rports = 0; @@ -876,21 +871,26 @@ bfa_fcs_lport_attach(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs, */ void -bfa_fcs_lport_init(struct bfa_fcs_port_s *lport, - struct bfa_port_cfg_s *port_cfg) +bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport, + struct bfa_lport_cfg_s *port_cfg) { struct bfa_fcs_vport_s *vport = lport->vport; + struct bfad_s *bfad = (struct bfad_s *)lport->fcs->bfad; + char lpwwn_buf[BFA_STRING_32]; bfa_os_assign(lport->port_cfg, *port_cfg); - lport->bfad_port = bfa_fcb_port_new(lport->fcs->bfad, lport, - lport->port_cfg.roles, - lport->fabric->vf_drv, - vport ? vport->vport_drv : NULL); + lport->bfad_port = bfa_fcb_lport_new(lport->fcs->bfad, lport, + lport->port_cfg.roles, + lport->fabric->vf_drv, + vport ? vport->vport_drv : NULL); - bfa_fcs_port_aen_post(lport, BFA_LPORT_AEN_NEW); + wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(lport)); + BFA_LOG(KERN_INFO, bfad, log_level, + "New logical port created: WWN = %s Role = %s\n", + lpwwn_buf, "Initiator"); - bfa_sm_set_state(lport, bfa_fcs_port_sm_uninit); + bfa_sm_set_state(lport, bfa_fcs_lport_sm_uninit); bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE); } @@ -899,10 +899,11 @@ bfa_fcs_lport_init(struct bfa_fcs_port_s *lport, */ void -bfa_fcs_port_get_attr(struct bfa_fcs_port_s *port, - struct bfa_port_attr_s *port_attr) +bfa_fcs_lport_get_attr( + struct bfa_fcs_lport_s *port, + struct bfa_lport_attr_s *port_attr) { - if (bfa_sm_cmp_state(port, bfa_fcs_port_sm_online)) + if (bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online)) port_attr->pid = port->pid; else port_attr->pid = 0; @@ -913,25 +914,4895 @@ bfa_fcs_port_get_attr(struct bfa_fcs_port_s *port, port_attr->port_type = bfa_fcs_fabric_port_type(port->fabric); port_attr->loopback = bfa_fcs_fabric_is_loopback(port->fabric); port_attr->authfail = - bfa_fcs_fabric_is_auth_failed(port->fabric); - port_attr->fabric_name = bfa_fcs_port_get_fabric_name(port); + bfa_fcs_fabric_is_auth_failed(port->fabric); + port_attr->fabric_name = bfa_fcs_lport_get_fabric_name(port); memcpy(port_attr->fabric_ip_addr, - bfa_fcs_port_get_fabric_ipaddr(port), - BFA_FCS_FABRIC_IPADDR_SZ); + bfa_fcs_lport_get_fabric_ipaddr(port), + BFA_FCS_FABRIC_IPADDR_SZ); if (port->vport != NULL) { - port_attr->port_type = BFA_PPORT_TYPE_VPORT; + port_attr->port_type = BFA_PORT_TYPE_VPORT; port_attr->fpma_mac = bfa_lps_get_lp_mac(port->vport->lps); - } else + } else { port_attr->fpma_mac = bfa_lps_get_lp_mac(port->fabric->lps); - + } } else { - port_attr->port_type = BFA_PPORT_TYPE_UNKNOWN; - port_attr->state = BFA_PORT_UNINIT; + port_attr->port_type = BFA_PORT_TYPE_UNKNOWN; + port_attr->state = BFA_LPORT_UNINIT; + } +} + +/** + * bfa_fcs_lport_fab port fab functions + */ + +/** + * Called by port to initialize fabric services of the base port. + */ +static void +bfa_fcs_lport_fab_init(struct bfa_fcs_lport_s *port) +{ + bfa_fcs_lport_ns_init(port); + bfa_fcs_lport_scn_init(port); + bfa_fcs_lport_ms_init(port); +} + +/** + * Called by port to notify transition to online state. + */ +static void +bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port) +{ + bfa_fcs_lport_ns_online(port); + bfa_fcs_lport_scn_online(port); +} + +/** + * Called by port to notify transition to offline state. + */ +static void +bfa_fcs_lport_fab_offline(struct bfa_fcs_lport_s *port) +{ + bfa_fcs_lport_ns_offline(port); + bfa_fcs_lport_scn_offline(port); + bfa_fcs_lport_ms_offline(port); +} + +/** + * bfa_fcs_lport_n2n functions + */ + +/** + * Called by fcs/port to initialize N2N topology. + */ +static void +bfa_fcs_lport_n2n_init(struct bfa_fcs_lport_s *port) +{ +} + +/** + * Called by fcs/port to notify transition to online state. + */ +static void +bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port) +{ + struct bfa_fcs_lport_n2n_s *n2n_port = &port->port_topo.pn2n; + struct bfa_lport_cfg_s *pcfg = &port->port_cfg; + struct bfa_fcs_rport_s *rport; + + bfa_trc(port->fcs, pcfg->pwwn); + + /* + * If our PWWN is > than that of the r-port, we have to initiate PLOGI + * and assign an Address. if not, we need to wait for its PLOGI. + * + * If our PWWN is < than that of the remote port, it will send a PLOGI + * with the PIDs assigned. The rport state machine take care of this + * incoming PLOGI. + */ + if (memcmp + ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn, + sizeof(wwn_t)) > 0) { + port->pid = N2N_LOCAL_PID; + /** + * First, check if we know the device by pwwn. + */ + rport = bfa_fcs_lport_get_rport_by_pwwn(port, + n2n_port->rem_port_wwn); + if (rport) { + bfa_trc(port->fcs, rport->pid); + bfa_trc(port->fcs, rport->pwwn); + rport->pid = N2N_REMOTE_PID; + bfa_fcs_rport_online(rport); + return; + } + + /* + * In n2n there can be only one rport. Delete the old one + * whose pid should be zero, because it is offline. + */ + if (port->num_rports > 0) { + rport = bfa_fcs_lport_get_rport_by_pid(port, 0); + bfa_assert(rport != NULL); + if (rport) { + bfa_trc(port->fcs, rport->pwwn); + bfa_fcs_rport_delete(rport); + } + } + bfa_fcs_rport_create(port, N2N_REMOTE_PID); + } +} + +/** + * Called by fcs/port to notify transition to offline state. + */ +static void +bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port) +{ + struct bfa_fcs_lport_n2n_s *n2n_port = &port->port_topo.pn2n; + + bfa_trc(port->fcs, port->pid); + port->pid = 0; + n2n_port->rem_port_wwn = 0; + n2n_port->reply_oxid = 0; +} + +#define BFA_FCS_FDMI_CMD_MAX_RETRIES 2 + +/* + * forward declarations + */ +static void bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_lport_fdmi_send_rpa(void *fdmi_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_lport_fdmi_rhba_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); +static void bfa_fcs_lport_fdmi_rprt_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); +static void bfa_fcs_lport_fdmi_rpa_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); +static void bfa_fcs_lport_fdmi_timeout(void *arg); +static u16 bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, + u8 *pyld); +static u16 bfa_fcs_lport_fdmi_build_rprt_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, + u8 *pyld); +static u16 bfa_fcs_lport_fdmi_build_rpa_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, + u8 *pyld); +static u16 bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s * + fdmi, u8 *pyld); +static void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi, + struct bfa_fcs_fdmi_hba_attr_s *hba_attr); +static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi, + struct bfa_fcs_fdmi_port_attr_s *port_attr); +/** + * fcs_fdmi_sm FCS FDMI state machine + */ + +/** + * FDMI State Machine events + */ +enum port_fdmi_event { + FDMISM_EVENT_PORT_ONLINE = 1, + FDMISM_EVENT_PORT_OFFLINE = 2, + FDMISM_EVENT_RSP_OK = 4, + FDMISM_EVENT_RSP_ERROR = 5, + FDMISM_EVENT_TIMEOUT = 6, + FDMISM_EVENT_RHBA_SENT = 7, + FDMISM_EVENT_RPRT_SENT = 8, + FDMISM_EVENT_RPA_SENT = 9, +}; + +static void bfa_fcs_lport_fdmi_sm_offline(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event); +static void bfa_fcs_lport_fdmi_sm_sending_rhba( + struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event); +static void bfa_fcs_lport_fdmi_sm_rhba(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event); +static void bfa_fcs_lport_fdmi_sm_rhba_retry( + struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event); +static void bfa_fcs_lport_fdmi_sm_sending_rprt( + struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event); +static void bfa_fcs_lport_fdmi_sm_rprt(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event); +static void bfa_fcs_lport_fdmi_sm_rprt_retry( + struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event); +static void bfa_fcs_lport_fdmi_sm_sending_rpa( + struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event); +static void bfa_fcs_lport_fdmi_sm_rpa(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event); +static void bfa_fcs_lport_fdmi_sm_rpa_retry( + struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event); +static void bfa_fcs_lport_fdmi_sm_online(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event); +static void bfa_fcs_lport_fdmi_sm_disabled( + struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event); +/** + * Start in offline state - awaiting MS to send start. + */ +static void +bfa_fcs_lport_fdmi_sm_offline(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + fdmi->retry_cnt = 0; + + switch (event) { + case FDMISM_EVENT_PORT_ONLINE: + if (port->vport) { + /* + * For Vports, register a new port. + */ + bfa_sm_set_state(fdmi, + bfa_fcs_lport_fdmi_sm_sending_rprt); + bfa_fcs_lport_fdmi_send_rprt(fdmi, NULL); + } else { + /* + * For a base port, we should first register the HBA + * atribute. The HBA attribute also contains the base + * port registration. + */ + bfa_sm_set_state(fdmi, + bfa_fcs_lport_fdmi_sm_sending_rhba); + bfa_fcs_lport_fdmi_send_rhba(fdmi, NULL); + } + break; + + case FDMISM_EVENT_PORT_OFFLINE: + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} + +static void +bfa_fcs_lport_fdmi_sm_sending_rhba(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + switch (event) { + case FDMISM_EVENT_RHBA_SENT: + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rhba); + break; + + case FDMISM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port), + &fdmi->fcxp_wqe); + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} + +static void +bfa_fcs_lport_fdmi_sm_rhba(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + switch (event) { + case FDMISM_EVENT_RSP_ERROR: + /* + * if max retries have not been reached, start timer for a + * delayed retry + */ + if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) { + bfa_sm_set_state(fdmi, + bfa_fcs_lport_fdmi_sm_rhba_retry); + bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port), + &fdmi->timer, + bfa_fcs_lport_fdmi_timeout, fdmi, + BFA_FCS_RETRY_TIMEOUT); + } else { + /* + * set state to offline + */ + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + } + break; + + case FDMISM_EVENT_RSP_OK: + /* + * Initiate Register Port Attributes + */ + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rpa); + fdmi->retry_cnt = 0; + bfa_fcs_lport_fdmi_send_rpa(fdmi, NULL); + break; + + case FDMISM_EVENT_PORT_OFFLINE: + bfa_fcxp_discard(fdmi->fcxp); + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} + +static void +bfa_fcs_lport_fdmi_sm_rhba_retry(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + switch (event) { + case FDMISM_EVENT_TIMEOUT: + /* + * Retry Timer Expired. Re-send + */ + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rhba); + bfa_fcs_lport_fdmi_send_rhba(fdmi, NULL); + break; + + case FDMISM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + bfa_timer_stop(&fdmi->timer); + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} + +/* +* RPRT : Register Port + */ +static void +bfa_fcs_lport_fdmi_sm_sending_rprt(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + switch (event) { + case FDMISM_EVENT_RPRT_SENT: + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rprt); + break; + + case FDMISM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port), + &fdmi->fcxp_wqe); + break; + + default: + bfa_sm_fault(port->fcs, event); } +} + +static void +bfa_fcs_lport_fdmi_sm_rprt(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + switch (event) { + case FDMISM_EVENT_RSP_ERROR: + /* + * if max retries have not been reached, start timer for a + * delayed retry + */ + if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) { + bfa_sm_set_state(fdmi, + bfa_fcs_lport_fdmi_sm_rprt_retry); + bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port), + &fdmi->timer, + bfa_fcs_lport_fdmi_timeout, fdmi, + BFA_FCS_RETRY_TIMEOUT); + } else { + /* + * set state to offline + */ + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + fdmi->retry_cnt = 0; + } + break; + + case FDMISM_EVENT_RSP_OK: + fdmi->retry_cnt = 0; + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_online); + break; + + case FDMISM_EVENT_PORT_OFFLINE: + bfa_fcxp_discard(fdmi->fcxp); + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + break; + + default: + bfa_sm_fault(port->fcs, event); + } } +static void +bfa_fcs_lport_fdmi_sm_rprt_retry(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + switch (event) { + case FDMISM_EVENT_TIMEOUT: + /* + * Retry Timer Expired. Re-send + */ + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rprt); + bfa_fcs_lport_fdmi_send_rprt(fdmi, NULL); + break; + + case FDMISM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + bfa_timer_stop(&fdmi->timer); + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} + +/* + * Register Port Attributes + */ +static void +bfa_fcs_lport_fdmi_sm_sending_rpa(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + switch (event) { + case FDMISM_EVENT_RPA_SENT: + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rpa); + break; + + case FDMISM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port), + &fdmi->fcxp_wqe); + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} + +static void +bfa_fcs_lport_fdmi_sm_rpa(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + switch (event) { + case FDMISM_EVENT_RSP_ERROR: + /* + * if max retries have not been reached, start timer for a + * delayed retry + */ + if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) { + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rpa_retry); + bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port), + &fdmi->timer, + bfa_fcs_lport_fdmi_timeout, fdmi, + BFA_FCS_RETRY_TIMEOUT); + } else { + /* + * set state to offline + */ + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + fdmi->retry_cnt = 0; + } + break; + + case FDMISM_EVENT_RSP_OK: + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_online); + fdmi->retry_cnt = 0; + break; + + case FDMISM_EVENT_PORT_OFFLINE: + bfa_fcxp_discard(fdmi->fcxp); + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} + +static void +bfa_fcs_lport_fdmi_sm_rpa_retry(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + switch (event) { + case FDMISM_EVENT_TIMEOUT: + /* + * Retry Timer Expired. Re-send + */ + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rpa); + bfa_fcs_lport_fdmi_send_rpa(fdmi, NULL); + break; + + case FDMISM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + bfa_timer_stop(&fdmi->timer); + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} + +static void +bfa_fcs_lport_fdmi_sm_online(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + switch (event) { + case FDMISM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} +/** + * FDMI is disabled state. + */ +static void +bfa_fcs_lport_fdmi_sm_disabled(struct bfa_fcs_lport_fdmi_s *fdmi, + enum port_fdmi_event event) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + /* No op State. It can only be enabled at Driver Init. */ +} + +/** +* RHBA : Register HBA Attributes. + */ +static void +bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_lport_fdmi_s *fdmi = fdmi_cbarg; + struct bfa_fcs_lport_s *port = fdmi->ms->port; + struct fchs_s fchs; + int len, attr_len; + struct bfa_fcxp_s *fcxp; + u8 *pyld; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); + if (!fcxp) { + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe, + bfa_fcs_lport_fdmi_send_rhba, fdmi); + return; + } + fdmi->fcxp = fcxp; + + pyld = bfa_fcxp_get_reqbuf(fcxp); + bfa_os_memset(pyld, 0, FC_MAX_PDUSZ); + + len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port), + FDMI_RHBA); + + attr_len = + bfa_fcs_lport_fdmi_build_rhba_pyld(fdmi, + (u8 *) ((struct ct_hdr_s *) pyld + + 1)); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, (len + attr_len), &fchs, + bfa_fcs_lport_fdmi_rhba_response, (void *)fdmi, + FC_MAX_PDUSZ, FC_FCCT_TOV); + + bfa_sm_send_event(fdmi, FDMISM_EVENT_RHBA_SENT); +} + +static u16 +bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + struct bfa_fcs_fdmi_hba_attr_s hba_attr; + struct bfa_fcs_fdmi_hba_attr_s *fcs_hba_attr = &hba_attr; + struct fdmi_rhba_s *rhba = (struct fdmi_rhba_s *) pyld; + struct fdmi_attr_s *attr; + u8 *curr_ptr; + u16 len, count; + + /* + * get hba attributes + */ + bfa_fcs_fdmi_get_hbaattr(fdmi, fcs_hba_attr); + + rhba->hba_id = bfa_fcs_lport_get_pwwn(port); + rhba->port_list.num_ports = bfa_os_htonl(1); + rhba->port_list.port_entry = bfa_fcs_lport_get_pwwn(port); + + len = sizeof(rhba->hba_id) + sizeof(rhba->port_list); + + count = 0; + len += sizeof(rhba->hba_attr_blk.attr_count); + + /* + * fill out the invididual entries of the HBA attrib Block + */ + curr_ptr = (u8 *) &rhba->hba_attr_blk.hba_attr; + + /* + * Node Name + */ + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_NODENAME); + attr->len = sizeof(wwn_t); + memcpy(attr->value, &bfa_fcs_lport_get_nwwn(port), attr->len); + curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; + len += attr->len; + count++; + attr->len = + bfa_os_htons(attr->len + sizeof(attr->type) + + sizeof(attr->len)); + + /* + * Manufacturer + */ + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MANUFACTURER); + attr->len = (u16) strlen(fcs_hba_attr->manufacturer); + memcpy(attr->value, fcs_hba_attr->manufacturer, attr->len); + attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable + *fields need + *to be 4 byte + *aligned */ + curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; + len += attr->len; + count++; + attr->len = + bfa_os_htons(attr->len + sizeof(attr->type) + + sizeof(attr->len)); + + /* + * Serial Number + */ + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_SERIALNUM); + attr->len = (u16) strlen(fcs_hba_attr->serial_num); + memcpy(attr->value, fcs_hba_attr->serial_num, attr->len); + attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable + *fields need + *to be 4 byte + *aligned */ + curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; + len += attr->len; + count++; + attr->len = + bfa_os_htons(attr->len + sizeof(attr->type) + + sizeof(attr->len)); + + /* + * Model + */ + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL); + attr->len = (u16) strlen(fcs_hba_attr->model); + memcpy(attr->value, fcs_hba_attr->model, attr->len); + attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable + *fields need + *to be 4 byte + *aligned */ + curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; + len += attr->len; + count++; + attr->len = + bfa_os_htons(attr->len + sizeof(attr->type) + + sizeof(attr->len)); + + /* + * Model Desc + */ + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL_DESC); + attr->len = (u16) strlen(fcs_hba_attr->model_desc); + memcpy(attr->value, fcs_hba_attr->model_desc, attr->len); + attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable + *fields need + *to be 4 byte + *aligned */ + curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; + len += attr->len; + count++; + attr->len = + bfa_os_htons(attr->len + sizeof(attr->type) + + sizeof(attr->len)); + + /* + * H/W Version + */ + if (fcs_hba_attr->hw_version[0] != '\0') { + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_HW_VERSION); + attr->len = (u16) strlen(fcs_hba_attr->hw_version); + memcpy(attr->value, fcs_hba_attr->hw_version, attr->len); + attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable + *fields need + *to be 4 byte + *aligned */ + curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; + len += attr->len; + count++; + attr->len = + bfa_os_htons(attr->len + sizeof(attr->type) + + sizeof(attr->len)); + } + + /* + * Driver Version + */ + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_DRIVER_VERSION); + attr->len = (u16) strlen(fcs_hba_attr->driver_version); + memcpy(attr->value, fcs_hba_attr->driver_version, attr->len); + attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable + *fields need + *to be 4 byte + *aligned */ + curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; + len += attr->len;; + count++; + attr->len = + bfa_os_htons(attr->len + sizeof(attr->type) + + sizeof(attr->len)); + + /* + * Option Rom Version + */ + if (fcs_hba_attr->option_rom_ver[0] != '\0') { + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_ROM_VERSION); + attr->len = (u16) strlen(fcs_hba_attr->option_rom_ver); + memcpy(attr->value, fcs_hba_attr->option_rom_ver, attr->len); + attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable + *fields need + *to be 4 byte + *aligned */ + curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; + len += attr->len; + count++; + attr->len = + bfa_os_htons(attr->len + sizeof(attr->type) + + sizeof(attr->len)); + } + + /* + * f/w Version = driver version + */ + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_FW_VERSION); + attr->len = (u16) strlen(fcs_hba_attr->driver_version); + memcpy(attr->value, fcs_hba_attr->driver_version, attr->len); + attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable + *fields need + *to be 4 byte + *aligned */ + curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; + len += attr->len; + count++; + attr->len = + bfa_os_htons(attr->len + sizeof(attr->type) + + sizeof(attr->len)); + + /* + * OS Name + */ + if (fcs_hba_attr->os_name[0] != '\0') { + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_OS_NAME); + attr->len = (u16) strlen(fcs_hba_attr->os_name); + memcpy(attr->value, fcs_hba_attr->os_name, attr->len); + attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable + *fields need + *to be 4 byte + *aligned */ + curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; + len += attr->len; + count++; + attr->len = + bfa_os_htons(attr->len + sizeof(attr->type) + + sizeof(attr->len)); + } + + /* + * MAX_CT_PAYLOAD + */ + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MAX_CT); + attr->len = sizeof(fcs_hba_attr->max_ct_pyld); + memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, attr->len); + len += attr->len; + count++; + attr->len = + bfa_os_htons(attr->len + sizeof(attr->type) + + sizeof(attr->len)); + + /* + * Update size of payload + */ + len += ((sizeof(attr->type) + + sizeof(attr->len)) * count); + + rhba->hba_attr_blk.attr_count = bfa_os_htonl(count); + return len; +} + +static void +bfa_fcs_lport_fdmi_rhba_response(void *fcsarg, struct bfa_fcxp_s *fcxp, + void *cbarg, bfa_status_t req_status, + u32 rsp_len, u32 resid_len, + struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_lport_fdmi_s *fdmi = + (struct bfa_fcs_lport_fdmi_s *) cbarg; + struct bfa_fcs_lport_s *port = fdmi->ms->port; + struct ct_hdr_s *cthdr = NULL; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + bfa_trc(port->fcs, req_status); + bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); + return; + } + + cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); + cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); + + if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { + bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK); + return; + } + + bfa_trc(port->fcs, cthdr->reason_code); + bfa_trc(port->fcs, cthdr->exp_code); + bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); +} + +/** +* RPRT : Register Port + */ +static void +bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_lport_fdmi_s *fdmi = fdmi_cbarg; + struct bfa_fcs_lport_s *port = fdmi->ms->port; + struct fchs_s fchs; + u16 len, attr_len; + struct bfa_fcxp_s *fcxp; + u8 *pyld; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); + if (!fcxp) { + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe, + bfa_fcs_lport_fdmi_send_rprt, fdmi); + return; + } + fdmi->fcxp = fcxp; + + pyld = bfa_fcxp_get_reqbuf(fcxp); + bfa_os_memset(pyld, 0, FC_MAX_PDUSZ); + + len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port), + FDMI_RPRT); + + attr_len = + bfa_fcs_lport_fdmi_build_rprt_pyld(fdmi, + (u8 *) ((struct ct_hdr_s *) pyld + + 1)); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len + attr_len, &fchs, + bfa_fcs_lport_fdmi_rprt_response, (void *)fdmi, + FC_MAX_PDUSZ, FC_FCCT_TOV); + + bfa_sm_send_event(fdmi, FDMISM_EVENT_RPRT_SENT); +} + +/** + * This routine builds Port Attribute Block that used in RPA, RPRT commands. + */ +static u16 +bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi, + u8 *pyld) +{ + struct bfa_fcs_fdmi_port_attr_s fcs_port_attr; + struct fdmi_port_attr_s *port_attrib = (struct fdmi_port_attr_s *) pyld; + struct fdmi_attr_s *attr; + u8 *curr_ptr; + u16 len; + u8 count = 0; + + /* + * get port attributes + */ + bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr); + + len = sizeof(port_attrib->attr_count); + + /* + * fill out the invididual entries + */ + curr_ptr = (u8 *) &port_attrib->port_attr; + + /* + * FC4 Types + */ + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FC4_TYPES); + attr->len = sizeof(fcs_port_attr.supp_fc4_types); + memcpy(attr->value, fcs_port_attr.supp_fc4_types, attr->len); + curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; + len += attr->len; + ++count; + attr->len = + bfa_os_htons(attr->len + sizeof(attr->type) + + sizeof(attr->len)); + + /* + * Supported Speed + */ + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_SUPP_SPEED); + attr->len = sizeof(fcs_port_attr.supp_speed); + memcpy(attr->value, &fcs_port_attr.supp_speed, attr->len); + curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; + len += attr->len; + ++count; + attr->len = + bfa_os_htons(attr->len + sizeof(attr->type) + + sizeof(attr->len)); + + /* + * current Port Speed + */ + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_PORT_SPEED); + attr->len = sizeof(fcs_port_attr.curr_speed); + memcpy(attr->value, &fcs_port_attr.curr_speed, attr->len); + curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; + len += attr->len; + ++count; + attr->len = + bfa_os_htons(attr->len + sizeof(attr->type) + + sizeof(attr->len)); + + /* + * max frame size + */ + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FRAME_SIZE); + attr->len = sizeof(fcs_port_attr.max_frm_size); + memcpy(attr->value, &fcs_port_attr.max_frm_size, attr->len); + curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; + len += attr->len; + ++count; + attr->len = + bfa_os_htons(attr->len + sizeof(attr->type) + + sizeof(attr->len)); + + /* + * OS Device Name + */ + if (fcs_port_attr.os_device_name[0] != '\0') { + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_DEV_NAME); + attr->len = (u16) strlen(fcs_port_attr.os_device_name); + memcpy(attr->value, fcs_port_attr.os_device_name, attr->len); + attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable + *fields need + *to be 4 byte + *aligned */ + curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; + len += attr->len; + ++count; + attr->len = + bfa_os_htons(attr->len + sizeof(attr->type) + + sizeof(attr->len)); + } + /* + * Host Name + */ + if (fcs_port_attr.host_name[0] != '\0') { + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_HOST_NAME); + attr->len = (u16) strlen(fcs_port_attr.host_name); + memcpy(attr->value, fcs_port_attr.host_name, attr->len); + attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable + *fields need + *to be 4 byte + *aligned */ + curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; + len += attr->len; + ++count; + attr->len = + bfa_os_htons(attr->len + sizeof(attr->type) + + sizeof(attr->len)); + } + + /* + * Update size of payload + */ + port_attrib->attr_count = bfa_os_htonl(count); + len += ((sizeof(attr->type) + + sizeof(attr->len)) * count); + return len; +} + +static u16 +bfa_fcs_lport_fdmi_build_rprt_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + struct fdmi_rprt_s *rprt = (struct fdmi_rprt_s *) pyld; + u16 len; + + rprt->hba_id = bfa_fcs_lport_get_pwwn(bfa_fcs_get_base_port(port->fcs)); + rprt->port_name = bfa_fcs_lport_get_pwwn(port); + + len = bfa_fcs_lport_fdmi_build_portattr_block(fdmi, + (u8 *) &rprt->port_attr_blk); + + len += sizeof(rprt->hba_id) + sizeof(rprt->port_name); + + return len; +} + +static void +bfa_fcs_lport_fdmi_rprt_response(void *fcsarg, struct bfa_fcxp_s *fcxp, + void *cbarg, bfa_status_t req_status, + u32 rsp_len, u32 resid_len, + struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_lport_fdmi_s *fdmi = + (struct bfa_fcs_lport_fdmi_s *) cbarg; + struct bfa_fcs_lport_s *port = fdmi->ms->port; + struct ct_hdr_s *cthdr = NULL; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + bfa_trc(port->fcs, req_status); + bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); + return; + } + + cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); + cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); + + if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { + bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK); + return; + } + + bfa_trc(port->fcs, cthdr->reason_code); + bfa_trc(port->fcs, cthdr->exp_code); + bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); +} + +/** +* RPA : Register Port Attributes. + */ +static void +bfa_fcs_lport_fdmi_send_rpa(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_lport_fdmi_s *fdmi = fdmi_cbarg; + struct bfa_fcs_lport_s *port = fdmi->ms->port; + struct fchs_s fchs; + u16 len, attr_len; + struct bfa_fcxp_s *fcxp; + u8 *pyld; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); + if (!fcxp) { + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe, + bfa_fcs_lport_fdmi_send_rpa, fdmi); + return; + } + fdmi->fcxp = fcxp; + + pyld = bfa_fcxp_get_reqbuf(fcxp); + bfa_os_memset(pyld, 0, FC_MAX_PDUSZ); + + len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port), + FDMI_RPA); + + attr_len = + bfa_fcs_lport_fdmi_build_rpa_pyld(fdmi, + (u8 *) ((struct ct_hdr_s *) pyld + + 1)); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len + attr_len, &fchs, + bfa_fcs_lport_fdmi_rpa_response, (void *)fdmi, + FC_MAX_PDUSZ, FC_FCCT_TOV); + + bfa_sm_send_event(fdmi, FDMISM_EVENT_RPA_SENT); +} + +static u16 +bfa_fcs_lport_fdmi_build_rpa_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + struct fdmi_rpa_s *rpa = (struct fdmi_rpa_s *) pyld; + u16 len; + + rpa->port_name = bfa_fcs_lport_get_pwwn(port); + + len = bfa_fcs_lport_fdmi_build_portattr_block(fdmi, + (u8 *) &rpa->port_attr_blk); + + len += sizeof(rpa->port_name); + + return len; +} + +static void +bfa_fcs_lport_fdmi_rpa_response(void *fcsarg, struct bfa_fcxp_s *fcxp, + void *cbarg, bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_lport_fdmi_s *fdmi = + (struct bfa_fcs_lport_fdmi_s *) cbarg; + struct bfa_fcs_lport_s *port = fdmi->ms->port; + struct ct_hdr_s *cthdr = NULL; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + bfa_trc(port->fcs, req_status); + bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); + return; + } + + cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); + cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); + + if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { + bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK); + return; + } + + bfa_trc(port->fcs, cthdr->reason_code); + bfa_trc(port->fcs, cthdr->exp_code); + bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); +} + +static void +bfa_fcs_lport_fdmi_timeout(void *arg) +{ + struct bfa_fcs_lport_fdmi_s *fdmi = (struct bfa_fcs_lport_fdmi_s *) arg; + + bfa_sm_send_event(fdmi, FDMISM_EVENT_TIMEOUT); +} + +void +bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi, + struct bfa_fcs_fdmi_hba_attr_s *hba_attr) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info; + + bfa_os_memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s)); + + bfa_ioc_get_adapter_manufacturer(&port->fcs->bfa->ioc, + hba_attr->manufacturer); + bfa_ioc_get_adapter_serial_num(&port->fcs->bfa->ioc, + hba_attr->serial_num); + bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc, + hba_attr->model); + bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc, + hba_attr->model_desc); + bfa_ioc_get_pci_chip_rev(&port->fcs->bfa->ioc, + hba_attr->hw_version); + bfa_ioc_get_adapter_optrom_ver(&port->fcs->bfa->ioc, + hba_attr->option_rom_ver); + bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc, + hba_attr->fw_version); + + strncpy(hba_attr->driver_version, (char *)driver_info->version, + sizeof(hba_attr->driver_version)); + + strncpy(hba_attr->os_name, driver_info->host_os_name, + sizeof(hba_attr->os_name)); + + /* + * If there is a patch level, append it + * to the os name along with a separator + */ + if (driver_info->host_os_patch[0] != '\0') { + strncat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, + sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); + strncat(hba_attr->os_name, driver_info->host_os_patch, + sizeof(driver_info->host_os_patch)); + } + + hba_attr->max_ct_pyld = bfa_os_htonl(FC_MAX_PDUSZ); +} + +void +bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi, + struct bfa_fcs_fdmi_port_attr_s *port_attr) +{ + struct bfa_fcs_lport_s *port = fdmi->ms->port; + struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info; + struct bfa_port_attr_s pport_attr; + + bfa_os_memset(port_attr, 0, sizeof(struct bfa_fcs_fdmi_port_attr_s)); + + /* + * get pport attributes from hal + */ + bfa_fcport_get_attr(port->fcs->bfa, &pport_attr); + + /* + * get FC4 type Bitmask + */ + fc_get_fc4type_bitmask(FC_TYPE_FCP, port_attr->supp_fc4_types); + + /* + * Supported Speeds + */ + port_attr->supp_speed = bfa_os_htonl(BFA_FCS_FDMI_SUPORTED_SPEEDS); + + /* + * Current Speed + */ + port_attr->curr_speed = bfa_os_htonl(pport_attr.speed); + + /* + * Max PDU Size. + */ + port_attr->max_frm_size = bfa_os_htonl(FC_MAX_PDUSZ); + + /* + * OS device Name + */ + strncpy(port_attr->os_device_name, (char *)driver_info->os_device_name, + sizeof(port_attr->os_device_name)); + + /* + * Host name + */ + strncpy(port_attr->host_name, (char *)driver_info->host_machine_name, + sizeof(port_attr->host_name)); + +} + + +void +bfa_fcs_lport_fdmi_init(struct bfa_fcs_lport_ms_s *ms) +{ + struct bfa_fcs_lport_fdmi_s *fdmi = &ms->fdmi; + + fdmi->ms = ms; + if (ms->port->fcs->fdmi_enabled) + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline); + else + bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_disabled); +} + +void +bfa_fcs_lport_fdmi_offline(struct bfa_fcs_lport_ms_s *ms) +{ + struct bfa_fcs_lport_fdmi_s *fdmi = &ms->fdmi; + + fdmi->ms = ms; + bfa_sm_send_event(fdmi, FDMISM_EVENT_PORT_OFFLINE); +} + +void +bfa_fcs_lport_fdmi_online(struct bfa_fcs_lport_ms_s *ms) +{ + struct bfa_fcs_lport_fdmi_s *fdmi = &ms->fdmi; + + fdmi->ms = ms; + bfa_sm_send_event(fdmi, FDMISM_EVENT_PORT_ONLINE); +} + +#define BFA_FCS_MS_CMD_MAX_RETRIES 2 + +/* + * forward declarations + */ +static void bfa_fcs_lport_ms_send_plogi(void *ms_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_lport_ms_timeout(void *arg); +static void bfa_fcs_lport_ms_plogi_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); + +static void bfa_fcs_lport_ms_send_gmal(void *ms_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_lport_ms_gmal_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); +static void bfa_fcs_lport_ms_send_gfn(void *ms_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_lport_ms_gfn_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); +/** + * fcs_ms_sm FCS MS state machine + */ + +/** + * MS State Machine events + */ +enum port_ms_event { + MSSM_EVENT_PORT_ONLINE = 1, + MSSM_EVENT_PORT_OFFLINE = 2, + MSSM_EVENT_RSP_OK = 3, + MSSM_EVENT_RSP_ERROR = 4, + MSSM_EVENT_TIMEOUT = 5, + MSSM_EVENT_FCXP_SENT = 6, + MSSM_EVENT_PORT_FABRIC_RSCN = 7 +}; + +static void bfa_fcs_lport_ms_sm_offline(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event); +static void bfa_fcs_lport_ms_sm_plogi_sending(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event); +static void bfa_fcs_lport_ms_sm_plogi(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event); +static void bfa_fcs_lport_ms_sm_plogi_retry(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event); +static void bfa_fcs_lport_ms_sm_gmal_sending(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event); +static void bfa_fcs_lport_ms_sm_gmal(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event); +static void bfa_fcs_lport_ms_sm_gmal_retry(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event); +static void bfa_fcs_lport_ms_sm_gfn_sending(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event); +static void bfa_fcs_lport_ms_sm_gfn(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event); +static void bfa_fcs_lport_ms_sm_gfn_retry(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event); +static void bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event); +/** + * Start in offline state - awaiting NS to send start. + */ +static void +bfa_fcs_lport_ms_sm_offline(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event) +{ + bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); + bfa_trc(ms->port->fcs, event); + + switch (event) { + case MSSM_EVENT_PORT_ONLINE: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi_sending); + bfa_fcs_lport_ms_send_plogi(ms, NULL); + break; + + case MSSM_EVENT_PORT_OFFLINE: + break; + + default: + bfa_sm_fault(ms->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ms_sm_plogi_sending(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event) +{ + bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); + bfa_trc(ms->port->fcs, event); + + switch (event) { + case MSSM_EVENT_FCXP_SENT: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi); + break; + + case MSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); + bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port), + &ms->fcxp_wqe); + break; + + default: + bfa_sm_fault(ms->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ms_sm_plogi(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event) +{ + bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); + bfa_trc(ms->port->fcs, event); + + switch (event) { + case MSSM_EVENT_RSP_ERROR: + /* + * Start timer for a delayed retry + */ + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi_retry); + ms->port->stats.ms_retries++; + bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port), + &ms->timer, bfa_fcs_lport_ms_timeout, ms, + BFA_FCS_RETRY_TIMEOUT); + break; + + case MSSM_EVENT_RSP_OK: + /* + * since plogi is done, now invoke MS related sub-modules + */ + bfa_fcs_lport_fdmi_online(ms); + + /** + * if this is a Vport, go to online state. + */ + if (ms->port->vport) { + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_online); + break; + } + + /* + * For a base port we need to get the + * switch's IP address. + */ + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal_sending); + bfa_fcs_lport_ms_send_gmal(ms, NULL); + break; + + case MSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); + bfa_fcxp_discard(ms->fcxp); + break; + + default: + bfa_sm_fault(ms->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ms_sm_plogi_retry(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event) +{ + bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); + bfa_trc(ms->port->fcs, event); + + switch (event) { + case MSSM_EVENT_TIMEOUT: + /* + * Retry Timer Expired. Re-send + */ + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi_sending); + bfa_fcs_lport_ms_send_plogi(ms, NULL); + break; + + case MSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); + bfa_timer_stop(&ms->timer); + break; + + default: + bfa_sm_fault(ms->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event) +{ + bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); + bfa_trc(ms->port->fcs, event); + + switch (event) { + case MSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); + break; + + case MSSM_EVENT_PORT_FABRIC_RSCN: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending); + ms->retry_cnt = 0; + bfa_fcs_lport_ms_send_gfn(ms, NULL); + break; + + default: + bfa_sm_fault(ms->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ms_sm_gmal_sending(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event) +{ + bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); + bfa_trc(ms->port->fcs, event); + + switch (event) { + case MSSM_EVENT_FCXP_SENT: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal); + break; + + case MSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); + bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port), + &ms->fcxp_wqe); + break; + + default: + bfa_sm_fault(ms->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ms_sm_gmal(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event) +{ + bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); + bfa_trc(ms->port->fcs, event); + + switch (event) { + case MSSM_EVENT_RSP_ERROR: + /* + * Start timer for a delayed retry + */ + if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) { + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal_retry); + ms->port->stats.ms_retries++; + bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port), + &ms->timer, bfa_fcs_lport_ms_timeout, ms, + BFA_FCS_RETRY_TIMEOUT); + } else { + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending); + bfa_fcs_lport_ms_send_gfn(ms, NULL); + ms->retry_cnt = 0; + } + break; + + case MSSM_EVENT_RSP_OK: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending); + bfa_fcs_lport_ms_send_gfn(ms, NULL); + break; + + case MSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); + bfa_fcxp_discard(ms->fcxp); + break; + + default: + bfa_sm_fault(ms->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ms_sm_gmal_retry(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event) +{ + bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); + bfa_trc(ms->port->fcs, event); + + switch (event) { + case MSSM_EVENT_TIMEOUT: + /* + * Retry Timer Expired. Re-send + */ + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal_sending); + bfa_fcs_lport_ms_send_gmal(ms, NULL); + break; + + case MSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); + bfa_timer_stop(&ms->timer); + break; + + default: + bfa_sm_fault(ms->port->fcs, event); + } +} +/** + * ms_pvt MS local functions + */ + +static void +bfa_fcs_lport_ms_send_gmal(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_lport_ms_s *ms = ms_cbarg; + bfa_fcs_lport_t *port = ms->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + + bfa_trc(port->fcs, port->pid); + + fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); + if (!fcxp) { + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe, + bfa_fcs_lport_ms_send_gmal, ms); + return; + } + ms->fcxp = fcxp; + + len = fc_gmal_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + bfa_fcs_lport_get_fcid(port), + bfa_lps_get_peer_nwwn(port->fabric->lps)); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, + bfa_fcs_lport_ms_gmal_response, (void *)ms, + FC_MAX_PDUSZ, FC_FCCT_TOV); + + bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT); +} + +static void +bfa_fcs_lport_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp, + void *cbarg, bfa_status_t req_status, + u32 rsp_len, u32 resid_len, + struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) cbarg; + bfa_fcs_lport_t *port = ms->port; + struct ct_hdr_s *cthdr = NULL; + struct fcgs_gmal_resp_s *gmal_resp; + struct fcgs_gmal_entry_s *gmal_entry; + u32 num_entries; + u8 *rsp_str; + + bfa_trc(port->fcs, req_status); + bfa_trc(port->fcs, port->port_cfg.pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + bfa_trc(port->fcs, req_status); + bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); + return; + } + + cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); + cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); + + if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { + gmal_resp = (struct fcgs_gmal_resp_s *)(cthdr + 1); + + num_entries = bfa_os_ntohl(gmal_resp->ms_len); + if (num_entries == 0) { + bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); + return; + } + /* + * The response could contain multiple Entries. + * Entries for SNMP interface, etc. + * We look for the entry with a telnet prefix. + * First "http://" entry refers to IP addr + */ + + gmal_entry = (struct fcgs_gmal_entry_s *)gmal_resp->ms_ma; + while (num_entries > 0) { + if (strncmp(gmal_entry->prefix, + CT_GMAL_RESP_PREFIX_HTTP, + sizeof(gmal_entry->prefix)) == 0) { + + /* + * if the IP address is terminating with a '/', + * remove it. + * Byte 0 consists of the length of the string. + */ + rsp_str = &(gmal_entry->prefix[0]); + if (rsp_str[gmal_entry->len-1] == '/') + rsp_str[gmal_entry->len-1] = 0; + + /* copy IP Address to fabric */ + strncpy(bfa_fcs_lport_get_fabric_ipaddr(port), + gmal_entry->ip_addr, + BFA_FCS_FABRIC_IPADDR_SZ); + break; + } else { + --num_entries; + ++gmal_entry; + } + } + + bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK); + return; + } + + bfa_trc(port->fcs, cthdr->reason_code); + bfa_trc(port->fcs, cthdr->exp_code); + bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); +} + +static void +bfa_fcs_lport_ms_sm_gfn_sending(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event) +{ + bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); + bfa_trc(ms->port->fcs, event); + + switch (event) { + case MSSM_EVENT_FCXP_SENT: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn); + break; + + case MSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); + bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port), + &ms->fcxp_wqe); + break; + + default: + bfa_sm_fault(ms->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ms_sm_gfn(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event) +{ + bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); + bfa_trc(ms->port->fcs, event); + + switch (event) { + case MSSM_EVENT_RSP_ERROR: + /* + * Start timer for a delayed retry + */ + if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) { + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_retry); + ms->port->stats.ms_retries++; + bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port), + &ms->timer, bfa_fcs_lport_ms_timeout, ms, + BFA_FCS_RETRY_TIMEOUT); + } else { + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_online); + ms->retry_cnt = 0; + } + break; + + case MSSM_EVENT_RSP_OK: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_online); + break; + + case MSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); + bfa_fcxp_discard(ms->fcxp); + break; + + default: + bfa_sm_fault(ms->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ms_sm_gfn_retry(struct bfa_fcs_lport_ms_s *ms, + enum port_ms_event event) +{ + bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); + bfa_trc(ms->port->fcs, event); + + switch (event) { + case MSSM_EVENT_TIMEOUT: + /* + * Retry Timer Expired. Re-send + */ + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending); + bfa_fcs_lport_ms_send_gfn(ms, NULL); + break; + + case MSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); + bfa_timer_stop(&ms->timer); + break; + + default: + bfa_sm_fault(ms->port->fcs, event); + } +} +/** + * ms_pvt MS local functions + */ + +static void +bfa_fcs_lport_ms_send_gfn(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_lport_ms_s *ms = ms_cbarg; + bfa_fcs_lport_t *port = ms->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + + bfa_trc(port->fcs, port->pid); + + fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); + if (!fcxp) { + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe, + bfa_fcs_lport_ms_send_gfn, ms); + return; + } + ms->fcxp = fcxp; + + len = fc_gfn_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + bfa_fcs_lport_get_fcid(port), + bfa_lps_get_peer_nwwn(port->fabric->lps)); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, + bfa_fcs_lport_ms_gfn_response, (void *)ms, + FC_MAX_PDUSZ, FC_FCCT_TOV); + + bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT); +} + +static void +bfa_fcs_lport_ms_gfn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, + void *cbarg, bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) cbarg; + bfa_fcs_lport_t *port = ms->port; + struct ct_hdr_s *cthdr = NULL; + wwn_t *gfn_resp; + + bfa_trc(port->fcs, req_status); + bfa_trc(port->fcs, port->port_cfg.pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + bfa_trc(port->fcs, req_status); + bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); + return; + } + + cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); + cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); + + if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { + gfn_resp = (wwn_t *)(cthdr + 1); + /* check if it has actually changed */ + if ((memcmp((void *)&bfa_fcs_lport_get_fabric_name(port), + gfn_resp, sizeof(wwn_t)) != 0)) { + bfa_fcs_fabric_set_fabric_name(port->fabric, *gfn_resp); + } + bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK); + return; + } + + bfa_trc(port->fcs, cthdr->reason_code); + bfa_trc(port->fcs, cthdr->exp_code); + bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); +} + +/** + * ms_pvt MS local functions + */ + +static void +bfa_fcs_lport_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_lport_ms_s *ms = ms_cbarg; + struct bfa_fcs_lport_s *port = ms->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + + bfa_trc(port->fcs, port->pid); + + fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); + if (!fcxp) { + port->stats.ms_plogi_alloc_wait++; + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe, + bfa_fcs_lport_ms_send_plogi, ms); + return; + } + ms->fcxp = fcxp; + + len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + bfa_os_hton3b(FC_MGMT_SERVER), + bfa_fcs_lport_get_fcid(port), 0, + port->port_cfg.pwwn, port->port_cfg.nwwn, + bfa_fcport_get_maxfrsize(port->fcs->bfa)); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, + bfa_fcs_lport_ms_plogi_response, (void *)ms, + FC_MAX_PDUSZ, FC_ELS_TOV); + + port->stats.ms_plogi_sent++; + bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT); +} + +static void +bfa_fcs_lport_ms_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, + void *cbarg, bfa_status_t req_status, + u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) cbarg; + struct bfa_fcs_lport_s *port = ms->port; + struct fc_els_cmd_s *els_cmd; + struct fc_ls_rjt_s *ls_rjt; + + bfa_trc(port->fcs, req_status); + bfa_trc(port->fcs, port->port_cfg.pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + port->stats.ms_plogi_rsp_err++; + bfa_trc(port->fcs, req_status); + bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); + return; + } + + els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp); + + switch (els_cmd->els_code) { + + case FC_ELS_ACC: + if (rsp_len < sizeof(struct fc_logi_s)) { + bfa_trc(port->fcs, rsp_len); + port->stats.ms_plogi_acc_err++; + bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); + break; + } + port->stats.ms_plogi_accepts++; + bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK); + break; + + case FC_ELS_LS_RJT: + ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); + + bfa_trc(port->fcs, ls_rjt->reason_code); + bfa_trc(port->fcs, ls_rjt->reason_code_expl); + + port->stats.ms_rejects++; + bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); + break; + + default: + port->stats.ms_plogi_unknown_rsp++; + bfa_trc(port->fcs, els_cmd->els_code); + bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); + } +} + +static void +bfa_fcs_lport_ms_timeout(void *arg) +{ + struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) arg; + + ms->port->stats.ms_timeouts++; + bfa_sm_send_event(ms, MSSM_EVENT_TIMEOUT); +} + + +void +bfa_fcs_lport_ms_init(struct bfa_fcs_lport_s *port) +{ + struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port); + + ms->port = port; + bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline); + + /* + * Invoke init routines of sub modules. + */ + bfa_fcs_lport_fdmi_init(ms); +} + +void +bfa_fcs_lport_ms_offline(struct bfa_fcs_lport_s *port) +{ + struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port); + + ms->port = port; + bfa_sm_send_event(ms, MSSM_EVENT_PORT_OFFLINE); + bfa_fcs_lport_fdmi_offline(ms); +} + +void +bfa_fcs_lport_ms_online(struct bfa_fcs_lport_s *port) +{ + struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port); + + ms->port = port; + bfa_sm_send_event(ms, MSSM_EVENT_PORT_ONLINE); +} +void +bfa_fcs_lport_ms_fabric_rscn(struct bfa_fcs_lport_s *port) +{ + struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port); + + /* todo. Handle this only when in Online state */ + if (bfa_sm_cmp_state(ms, bfa_fcs_lport_ms_sm_online)) + bfa_sm_send_event(ms, MSSM_EVENT_PORT_FABRIC_RSCN); +} + +/** + * @page ns_sm_info VPORT NS State Machine + * + * @section ns_sm_interactions VPORT NS State Machine Interactions + * + * @section ns_sm VPORT NS State Machine + * img ns_sm.jpg + */ + +/* + * forward declarations + */ +static void bfa_fcs_lport_ns_send_plogi(void *ns_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_lport_ns_send_rft_id(void *ns_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_lport_ns_send_rff_id(void *ns_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_lport_ns_send_gid_ft(void *ns_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_lport_ns_timeout(void *arg); +static void bfa_fcs_lport_ns_plogi_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); +static void bfa_fcs_lport_ns_rspn_id_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); +static void bfa_fcs_lport_ns_rft_id_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); +static void bfa_fcs_lport_ns_rff_id_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); +static void bfa_fcs_lport_ns_gid_ft_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); +static void bfa_fcs_lport_ns_process_gidft_pids( + struct bfa_fcs_lport_s *port, + u32 *pid_buf, u32 n_pids); + +static void bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port); +/** + * fcs_ns_sm FCS nameserver interface state machine + */ + +/** + * VPort NS State Machine events + */ +enum vport_ns_event { + NSSM_EVENT_PORT_ONLINE = 1, + NSSM_EVENT_PORT_OFFLINE = 2, + NSSM_EVENT_PLOGI_SENT = 3, + NSSM_EVENT_RSP_OK = 4, + NSSM_EVENT_RSP_ERROR = 5, + NSSM_EVENT_TIMEOUT = 6, + NSSM_EVENT_NS_QUERY = 7, + NSSM_EVENT_RSPNID_SENT = 8, + NSSM_EVENT_RFTID_SENT = 9, + NSSM_EVENT_RFFID_SENT = 10, + NSSM_EVENT_GIDFT_SENT = 11, +}; + +static void bfa_fcs_lport_ns_sm_offline(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_plogi_sending(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_plogi(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_plogi_retry(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_sending_rspn_id( + struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_rspn_id(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_rspn_id_retry(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_sending_rft_id( + struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_rft_id_retry(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_rft_id(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_sending_rff_id( + struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_rff_id_retry(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_rff_id(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_sending_gid_ft( + struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_gid_ft(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_gid_ft_retry(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +static void bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event); +/** + * Start in offline state - awaiting linkup + */ +static void +bfa_fcs_lport_ns_sm_offline(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_PORT_ONLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi_sending); + bfa_fcs_lport_ns_send_plogi(ns, NULL); + break; + + case NSSM_EVENT_PORT_OFFLINE: + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_plogi_sending(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_PLOGI_SENT: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port), + &ns->fcxp_wqe); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_plogi(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_RSP_ERROR: + /* + * Start timer for a delayed retry + */ + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi_retry); + ns->port->stats.ns_retries++; + bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), + &ns->timer, bfa_fcs_lport_ns_timeout, ns, + BFA_FCS_RETRY_TIMEOUT); + break; + + case NSSM_EVENT_RSP_OK: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rspn_id); + bfa_fcs_lport_ns_send_rspn_id(ns, NULL); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_fcxp_discard(ns->fcxp); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_plogi_retry(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_TIMEOUT: + /* + * Retry Timer Expired. Re-send + */ + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi_sending); + bfa_fcs_lport_ns_send_plogi(ns, NULL); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_timer_stop(&ns->timer); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_sending_rspn_id(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_RSPNID_SENT: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rspn_id); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port), + &ns->fcxp_wqe); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_rspn_id(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_RSP_ERROR: + /* + * Start timer for a delayed retry + */ + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rspn_id_retry); + ns->port->stats.ns_retries++; + bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), + &ns->timer, bfa_fcs_lport_ns_timeout, ns, + BFA_FCS_RETRY_TIMEOUT); + break; + + case NSSM_EVENT_RSP_OK: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rft_id); + bfa_fcs_lport_ns_send_rft_id(ns, NULL); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_fcxp_discard(ns->fcxp); + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_rspn_id_retry(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_TIMEOUT: + /* + * Retry Timer Expired. Re-send + */ + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rspn_id); + bfa_fcs_lport_ns_send_rspn_id(ns, NULL); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_timer_stop(&ns->timer); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_sending_rft_id(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_RFTID_SENT: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rft_id); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port), + &ns->fcxp_wqe); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_rft_id(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_RSP_OK: + /* Now move to register FC4 Features */ + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rff_id); + bfa_fcs_lport_ns_send_rff_id(ns, NULL); + break; + + case NSSM_EVENT_RSP_ERROR: + /* + * Start timer for a delayed retry + */ + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rft_id_retry); + ns->port->stats.ns_retries++; + bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), + &ns->timer, bfa_fcs_lport_ns_timeout, ns, + BFA_FCS_RETRY_TIMEOUT); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_fcxp_discard(ns->fcxp); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_rft_id_retry(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_TIMEOUT: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rft_id); + bfa_fcs_lport_ns_send_rft_id(ns, NULL); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_timer_stop(&ns->timer); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_sending_rff_id(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_RFFID_SENT: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rff_id); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port), + &ns->fcxp_wqe); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_rff_id(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_RSP_OK: + + /* + * If min cfg mode is enabled, we donot initiate rport + * discovery with the fabric. Instead, we will retrieve the + * boot targets from HAL/FW. + */ + if (__fcs_min_cfg(ns->port->fcs)) { + bfa_fcs_lport_ns_boot_target_disc(ns->port); + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_online); + return; + } + + /* + * If the port role is Initiator Mode issue NS query. + * If it is Target Mode, skip this and go to online. + */ + if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) { + bfa_sm_set_state(ns, + bfa_fcs_lport_ns_sm_sending_gid_ft); + bfa_fcs_lport_ns_send_gid_ft(ns, NULL); + } + /* + * kick off mgmt srvr state machine + */ + bfa_fcs_lport_ms_online(ns->port); + break; + + case NSSM_EVENT_RSP_ERROR: + /* + * Start timer for a delayed retry + */ + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rff_id_retry); + ns->port->stats.ns_retries++; + bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), + &ns->timer, bfa_fcs_lport_ns_timeout, ns, + BFA_FCS_RETRY_TIMEOUT); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_fcxp_discard(ns->fcxp); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_rff_id_retry(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_TIMEOUT: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rff_id); + bfa_fcs_lport_ns_send_rff_id(ns, NULL); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_timer_stop(&ns->timer); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} +static void +bfa_fcs_lport_ns_sm_sending_gid_ft(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_GIDFT_SENT: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_gid_ft); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port), + &ns->fcxp_wqe); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_gid_ft(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_RSP_OK: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_online); + break; + + case NSSM_EVENT_RSP_ERROR: + /* + * TBD: for certain reject codes, we don't need to retry + */ + /* + * Start timer for a delayed retry + */ + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_gid_ft_retry); + ns->port->stats.ns_retries++; + bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), + &ns->timer, bfa_fcs_lport_ns_timeout, ns, + BFA_FCS_RETRY_TIMEOUT); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_fcxp_discard(ns->fcxp); + break; + + case NSSM_EVENT_NS_QUERY: + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_gid_ft_retry(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_TIMEOUT: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_gid_ft); + bfa_fcs_lport_ns_send_gid_ft(ns, NULL); + break; + + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + bfa_timer_stop(&ns->timer); + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + +static void +bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns, + enum vport_ns_event event) +{ + bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); + bfa_trc(ns->port->fcs, event); + + switch (event) { + case NSSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); + break; + + case NSSM_EVENT_NS_QUERY: + /* + * If the port role is Initiator Mode issue NS query. + * If it is Target Mode, skip this and go to online. + */ + if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) { + bfa_sm_set_state(ns, + bfa_fcs_lport_ns_sm_sending_gid_ft); + bfa_fcs_lport_ns_send_gid_ft(ns, NULL); + }; + break; + + default: + bfa_sm_fault(ns->port->fcs, event); + } +} + + + +/** + * ns_pvt Nameserver local functions + */ + +static void +bfa_fcs_lport_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_lport_ns_s *ns = ns_cbarg; + struct bfa_fcs_lport_s *port = ns->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + + bfa_trc(port->fcs, port->pid); + +fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); + if (!fcxp) { + port->stats.ns_plogi_alloc_wait++; + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, + bfa_fcs_lport_ns_send_plogi, ns); + return; + } + ns->fcxp = fcxp; + + len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + bfa_os_hton3b(FC_NAME_SERVER), + bfa_fcs_lport_get_fcid(port), 0, + port->port_cfg.pwwn, port->port_cfg.nwwn, + bfa_fcport_get_maxfrsize(port->fcs->bfa)); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, + bfa_fcs_lport_ns_plogi_response, (void *)ns, + FC_MAX_PDUSZ, FC_ELS_TOV); + port->stats.ns_plogi_sent++; + + bfa_sm_send_event(ns, NSSM_EVENT_PLOGI_SENT); +} + +static void +bfa_fcs_lport_ns_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, + void *cbarg, bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg; + struct bfa_fcs_lport_s *port = ns->port; + /* struct fc_logi_s *plogi_resp; */ + struct fc_els_cmd_s *els_cmd; + struct fc_ls_rjt_s *ls_rjt; + + bfa_trc(port->fcs, req_status); + bfa_trc(port->fcs, port->port_cfg.pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + bfa_trc(port->fcs, req_status); + port->stats.ns_plogi_rsp_err++; + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); + return; + } + + els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp); + + switch (els_cmd->els_code) { + + case FC_ELS_ACC: + if (rsp_len < sizeof(struct fc_logi_s)) { + bfa_trc(port->fcs, rsp_len); + port->stats.ns_plogi_acc_err++; + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); + break; + } + port->stats.ns_plogi_accepts++; + bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); + break; + + case FC_ELS_LS_RJT: + ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); + + bfa_trc(port->fcs, ls_rjt->reason_code); + bfa_trc(port->fcs, ls_rjt->reason_code_expl); + + port->stats.ns_rejects++; + + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); + break; + + default: + port->stats.ns_plogi_unknown_rsp++; + bfa_trc(port->fcs, els_cmd->els_code); + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); + } +} + +/** + * Register the symbolic port name. + */ +static void +bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_lport_ns_s *ns = ns_cbarg; + struct bfa_fcs_lport_s *port = ns->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + u8 symbl[256]; + u8 *psymbl = &symbl[0]; + + bfa_os_memset(symbl, 0, sizeof(symbl)); + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); + if (!fcxp) { + port->stats.ns_rspnid_alloc_wait++; + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, + bfa_fcs_lport_ns_send_rspn_id, ns); + return; + } + ns->fcxp = fcxp; + + /* + * for V-Port, form a Port Symbolic Name + */ + if (port->vport) { + /** + * For Vports, we append the vport's port symbolic name + * to that of the base port. + */ + + strncpy((char *)psymbl, + (char *) & + (bfa_fcs_lport_get_psym_name + (bfa_fcs_get_base_port(port->fcs))), + strlen((char *) & + bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port + (port->fcs)))); + + /* Ensure we have a null terminating string. */ + ((char *)psymbl)[strlen((char *) & + bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port + (port->fcs)))] = 0; + strncat((char *)psymbl, + (char *) &(bfa_fcs_lport_get_psym_name(port)), + strlen((char *) &bfa_fcs_lport_get_psym_name(port))); + } else { + psymbl = (u8 *) &(bfa_fcs_lport_get_psym_name(port)); + } + + len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + bfa_fcs_lport_get_fcid(port), 0, psymbl); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, + bfa_fcs_lport_ns_rspn_id_response, (void *)ns, + FC_MAX_PDUSZ, FC_FCCT_TOV); + + port->stats.ns_rspnid_sent++; + + bfa_sm_send_event(ns, NSSM_EVENT_RSPNID_SENT); +} + +static void +bfa_fcs_lport_ns_rspn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp, + void *cbarg, bfa_status_t req_status, + u32 rsp_len, u32 resid_len, + struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg; + struct bfa_fcs_lport_s *port = ns->port; + struct ct_hdr_s *cthdr = NULL; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + bfa_trc(port->fcs, req_status); + port->stats.ns_rspnid_rsp_err++; + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); + return; + } + + cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); + cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); + + if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { + port->stats.ns_rspnid_accepts++; + bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); + return; + } + + port->stats.ns_rspnid_rejects++; + bfa_trc(port->fcs, cthdr->reason_code); + bfa_trc(port->fcs, cthdr->exp_code); + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); +} + +/** + * Register FC4-Types + */ +static void +bfa_fcs_lport_ns_send_rft_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_lport_ns_s *ns = ns_cbarg; + struct bfa_fcs_lport_s *port = ns->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); + if (!fcxp) { + port->stats.ns_rftid_alloc_wait++; + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, + bfa_fcs_lport_ns_send_rft_id, ns); + return; + } + ns->fcxp = fcxp; + + len = fc_rftid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + bfa_fcs_lport_get_fcid(port), 0, port->port_cfg.roles); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, + bfa_fcs_lport_ns_rft_id_response, (void *)ns, + FC_MAX_PDUSZ, FC_FCCT_TOV); + + port->stats.ns_rftid_sent++; + bfa_sm_send_event(ns, NSSM_EVENT_RFTID_SENT); +} + +static void +bfa_fcs_lport_ns_rft_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp, + void *cbarg, bfa_status_t req_status, + u32 rsp_len, u32 resid_len, + struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg; + struct bfa_fcs_lport_s *port = ns->port; + struct ct_hdr_s *cthdr = NULL; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + bfa_trc(port->fcs, req_status); + port->stats.ns_rftid_rsp_err++; + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); + return; + } + + cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); + cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); + + if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { + port->stats.ns_rftid_accepts++; + bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); + return; + } + + port->stats.ns_rftid_rejects++; + bfa_trc(port->fcs, cthdr->reason_code); + bfa_trc(port->fcs, cthdr->exp_code); + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); +} + +/** + * Register FC4-Features : Should be done after RFT_ID + */ +static void +bfa_fcs_lport_ns_send_rff_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_lport_ns_s *ns = ns_cbarg; + struct bfa_fcs_lport_s *port = ns->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + u8 fc4_ftrs = 0; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); + if (!fcxp) { + port->stats.ns_rffid_alloc_wait++; + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, + bfa_fcs_lport_ns_send_rff_id, ns); + return; + } + ns->fcxp = fcxp; + + if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) + fc4_ftrs = FC_GS_FCP_FC4_FEATURE_INITIATOR; + + len = fc_rffid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + bfa_fcs_lport_get_fcid(port), 0, + FC_TYPE_FCP, fc4_ftrs); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, + bfa_fcs_lport_ns_rff_id_response, (void *)ns, + FC_MAX_PDUSZ, FC_FCCT_TOV); + + port->stats.ns_rffid_sent++; + bfa_sm_send_event(ns, NSSM_EVENT_RFFID_SENT); +} + +static void +bfa_fcs_lport_ns_rff_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp, + void *cbarg, bfa_status_t req_status, + u32 rsp_len, u32 resid_len, + struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg; + struct bfa_fcs_lport_s *port = ns->port; + struct ct_hdr_s *cthdr = NULL; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + bfa_trc(port->fcs, req_status); + port->stats.ns_rffid_rsp_err++; + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); + return; + } + + cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); + cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); + + if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { + port->stats.ns_rffid_accepts++; + bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); + return; + } + + port->stats.ns_rffid_rejects++; + bfa_trc(port->fcs, cthdr->reason_code); + bfa_trc(port->fcs, cthdr->exp_code); + + if (cthdr->reason_code == CT_RSN_NOT_SUPP) { + /* if this command is not supported, we don't retry */ + bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); + } else + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); +} +/** + * Query Fabric for FC4-Types Devices. + * +* TBD : Need to use a local (FCS private) response buffer, since the response + * can be larger than 2K. + */ +static void +bfa_fcs_lport_ns_send_gid_ft(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_lport_ns_s *ns = ns_cbarg; + struct bfa_fcs_lport_s *port = ns->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + + bfa_trc(port->fcs, port->pid); + + fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); + if (!fcxp) { + port->stats.ns_gidft_alloc_wait++; + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, + bfa_fcs_lport_ns_send_gid_ft, ns); + return; + } + ns->fcxp = fcxp; + + /* + * This query is only initiated for FCP initiator mode. + */ + len = fc_gid_ft_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + ns->port->pid, FC_TYPE_FCP); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, + bfa_fcs_lport_ns_gid_ft_response, (void *)ns, + bfa_fcxp_get_maxrsp(port->fcs->bfa), FC_FCCT_TOV); + + port->stats.ns_gidft_sent++; + + bfa_sm_send_event(ns, NSSM_EVENT_GIDFT_SENT); +} + +static void +bfa_fcs_lport_ns_gid_ft_response(void *fcsarg, struct bfa_fcxp_s *fcxp, + void *cbarg, bfa_status_t req_status, + u32 rsp_len, u32 resid_len, + struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg; + struct bfa_fcs_lport_s *port = ns->port; + struct ct_hdr_s *cthdr = NULL; + u32 n_pids; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + bfa_trc(port->fcs, req_status); + port->stats.ns_gidft_rsp_err++; + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); + return; + } + + if (resid_len != 0) { + /* + * TBD : we will need to allocate a larger buffer & retry the + * command + */ + bfa_trc(port->fcs, rsp_len); + bfa_trc(port->fcs, resid_len); + return; + } + + cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); + cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); + + switch (cthdr->cmd_rsp_code) { + + case CT_RSP_ACCEPT: + + port->stats.ns_gidft_accepts++; + n_pids = (fc_get_ctresp_pyld_len(rsp_len) / sizeof(u32)); + bfa_trc(port->fcs, n_pids); + bfa_fcs_lport_ns_process_gidft_pids(port, + (u32 *) (cthdr + 1), + n_pids); + bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); + break; + + case CT_RSP_REJECT: + + /* + * Check the reason code & explanation. + * There may not have been any FC4 devices in the fabric + */ + port->stats.ns_gidft_rejects++; + bfa_trc(port->fcs, cthdr->reason_code); + bfa_trc(port->fcs, cthdr->exp_code); + + if ((cthdr->reason_code == CT_RSN_UNABLE_TO_PERF) + && (cthdr->exp_code == CT_NS_EXP_FT_NOT_REG)) { + + bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); + } else { + /* + * for all other errors, retry + */ + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); + } + break; + + default: + port->stats.ns_gidft_unknown_rsp++; + bfa_trc(port->fcs, cthdr->cmd_rsp_code); + bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); + } +} + +/** + * This routine will be called by bfa_timer on timer timeouts. + * + * param[in] port - pointer to bfa_fcs_lport_t. + * + * return + * void + * + * Special Considerations: + * + * note + */ +static void +bfa_fcs_lport_ns_timeout(void *arg) +{ + struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) arg; + + ns->port->stats.ns_timeouts++; + bfa_sm_send_event(ns, NSSM_EVENT_TIMEOUT); +} + +/* + * Process the PID list in GID_FT response + */ +static void +bfa_fcs_lport_ns_process_gidft_pids(struct bfa_fcs_lport_s *port, u32 *pid_buf, + u32 n_pids) +{ + struct fcgs_gidft_resp_s *gidft_entry; + struct bfa_fcs_rport_s *rport; + u32 ii; + + for (ii = 0; ii < n_pids; ii++) { + gidft_entry = (struct fcgs_gidft_resp_s *) &pid_buf[ii]; + + if (gidft_entry->pid == port->pid) + continue; + + /* + * Check if this rport already exists + */ + rport = bfa_fcs_lport_get_rport_by_pid(port, gidft_entry->pid); + if (rport == NULL) { + /* + * this is a new device. create rport + */ + rport = bfa_fcs_rport_create(port, gidft_entry->pid); + } else { + /* + * this rport already exists + */ + bfa_fcs_rport_scn(rport); + } + + bfa_trc(port->fcs, gidft_entry->pid); + + /* + * if the last entry bit is set, bail out. + */ + if (gidft_entry->last) + return; + } +} + +/** + * fcs_ns_public FCS nameserver public interfaces + */ + +/* + * Functions called by port/fab. + * These will send relevant Events to the ns state machine. + */ +void +bfa_fcs_lport_ns_init(struct bfa_fcs_lport_s *port) +{ + struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port); + + ns->port = port; + bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline); +} + +void +bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *port) +{ + struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port); + + ns->port = port; + bfa_sm_send_event(ns, NSSM_EVENT_PORT_OFFLINE); +} + +void +bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *port) +{ + struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port); + + ns->port = port; + bfa_sm_send_event(ns, NSSM_EVENT_PORT_ONLINE); +} + +void +bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port) +{ + struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port); + + bfa_trc(port->fcs, port->pid); + bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY); +} + +void +bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port) +{ + + struct bfa_fcs_rport_s *rport; + u8 nwwns; + wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX]; + int ii; + + bfa_iocfc_get_bootwwns(port->fcs->bfa, &nwwns, wwns); + + for (ii = 0 ; ii < nwwns; ++ii) { + rport = bfa_fcs_rport_create_by_wwn(port, wwns[ii]); + bfa_assert(rport); + } +} + +/** + * FCS SCN + */ + +#define FC_QOS_RSCN_EVENT 0x0c +#define FC_FABRIC_NAME_RSCN_EVENT 0x0d + +/* + * forward declarations + */ +static void bfa_fcs_lport_scn_send_scr(void *scn_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_lport_scn_scr_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); +static void bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port, + struct fchs_s *rx_fchs); +static void bfa_fcs_lport_scn_timeout(void *arg); + +/** + * fcs_scm_sm FCS SCN state machine + */ + +/** + * VPort SCN State Machine events + */ +enum port_scn_event { + SCNSM_EVENT_PORT_ONLINE = 1, + SCNSM_EVENT_PORT_OFFLINE = 2, + SCNSM_EVENT_RSP_OK = 3, + SCNSM_EVENT_RSP_ERROR = 4, + SCNSM_EVENT_TIMEOUT = 5, + SCNSM_EVENT_SCR_SENT = 6, +}; + +static void bfa_fcs_lport_scn_sm_offline(struct bfa_fcs_lport_scn_s *scn, + enum port_scn_event event); +static void bfa_fcs_lport_scn_sm_sending_scr( + struct bfa_fcs_lport_scn_s *scn, + enum port_scn_event event); +static void bfa_fcs_lport_scn_sm_scr(struct bfa_fcs_lport_scn_s *scn, + enum port_scn_event event); +static void bfa_fcs_lport_scn_sm_scr_retry(struct bfa_fcs_lport_scn_s *scn, + enum port_scn_event event); +static void bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn, + enum port_scn_event event); + +/** + * Starting state - awaiting link up. + */ +static void +bfa_fcs_lport_scn_sm_offline(struct bfa_fcs_lport_scn_s *scn, + enum port_scn_event event) +{ + switch (event) { + case SCNSM_EVENT_PORT_ONLINE: + bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_sending_scr); + bfa_fcs_lport_scn_send_scr(scn, NULL); + break; + + case SCNSM_EVENT_PORT_OFFLINE: + break; + + default: + bfa_sm_fault(scn->port->fcs, event); + } +} + +static void +bfa_fcs_lport_scn_sm_sending_scr(struct bfa_fcs_lport_scn_s *scn, + enum port_scn_event event) +{ + switch (event) { + case SCNSM_EVENT_SCR_SENT: + bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_scr); + break; + + case SCNSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline); + bfa_fcxp_walloc_cancel(scn->port->fcs->bfa, &scn->fcxp_wqe); + break; + + default: + bfa_sm_fault(scn->port->fcs, event); + } +} + +static void +bfa_fcs_lport_scn_sm_scr(struct bfa_fcs_lport_scn_s *scn, + enum port_scn_event event) +{ + struct bfa_fcs_lport_s *port = scn->port; + + switch (event) { + case SCNSM_EVENT_RSP_OK: + bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_online); + break; + + case SCNSM_EVENT_RSP_ERROR: + bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_scr_retry); + bfa_timer_start(port->fcs->bfa, &scn->timer, + bfa_fcs_lport_scn_timeout, scn, + BFA_FCS_RETRY_TIMEOUT); + break; + + case SCNSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline); + bfa_fcxp_discard(scn->fcxp); + break; + + default: + bfa_sm_fault(port->fcs, event); + } +} + +static void +bfa_fcs_lport_scn_sm_scr_retry(struct bfa_fcs_lport_scn_s *scn, + enum port_scn_event event) +{ + switch (event) { + case SCNSM_EVENT_TIMEOUT: + bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_sending_scr); + bfa_fcs_lport_scn_send_scr(scn, NULL); + break; + + case SCNSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline); + bfa_timer_stop(&scn->timer); + break; + + default: + bfa_sm_fault(scn->port->fcs, event); + } +} + +static void +bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn, + enum port_scn_event event) +{ + switch (event) { + case SCNSM_EVENT_PORT_OFFLINE: + bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline); + break; + + default: + bfa_sm_fault(scn->port->fcs, event); + } +} + + + +/** + * fcs_scn_private FCS SCN private functions + */ + +/** + * This routine will be called to send a SCR command. + */ +static void +bfa_fcs_lport_scn_send_scr(void *scn_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_lport_scn_s *scn = scn_cbarg; + struct bfa_fcs_lport_s *port = scn->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + + bfa_trc(port->fcs, port->pid); + bfa_trc(port->fcs, port->port_cfg.pwwn); + + fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); + if (!fcxp) { + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &scn->fcxp_wqe, + bfa_fcs_lport_scn_send_scr, scn); + return; + } + scn->fcxp = fcxp; + + /* Handle VU registrations for Base port only */ + if ((!port->vport) && bfa_ioc_get_fcmode(&port->fcs->bfa->ioc)) { + len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + bfa_lps_is_brcd_fabric(port->fabric->lps), + port->pid, 0); + } else { + len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + BFA_FALSE, + port->pid, 0); + } + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, + bfa_fcs_lport_scn_scr_response, + (void *)scn, FC_MAX_PDUSZ, FC_ELS_TOV); + + bfa_sm_send_event(scn, SCNSM_EVENT_SCR_SENT); +} + +static void +bfa_fcs_lport_scn_scr_response(void *fcsarg, struct bfa_fcxp_s *fcxp, + void *cbarg, bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_lport_scn_s *scn = (struct bfa_fcs_lport_scn_s *) cbarg; + struct bfa_fcs_lport_s *port = scn->port; + struct fc_els_cmd_s *els_cmd; + struct fc_ls_rjt_s *ls_rjt; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + bfa_trc(port->fcs, req_status); + bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR); + return; + } + + els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp); + + switch (els_cmd->els_code) { + + case FC_ELS_ACC: + bfa_sm_send_event(scn, SCNSM_EVENT_RSP_OK); + break; + + case FC_ELS_LS_RJT: + + ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); + + bfa_trc(port->fcs, ls_rjt->reason_code); + bfa_trc(port->fcs, ls_rjt->reason_code_expl); + + bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR); + break; + + default: + bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR); + } +} + +/* + * Send a LS Accept + */ +static void +bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port, + struct fchs_s *rx_fchs) +{ + struct fchs_s fchs; + struct bfa_fcxp_s *fcxp; + struct bfa_rport_s *bfa_rport = NULL; + int len; + + bfa_trc(port->fcs, rx_fchs->s_id); + + fcxp = bfa_fcs_fcxp_alloc(port->fcs); + if (!fcxp) + return; + + len = fc_ls_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), + rx_fchs->ox_id); + + bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag, + BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, + FC_MAX_PDUSZ, 0); +} + +/** + * This routine will be called by bfa_timer on timer timeouts. + * + * param[in] vport - pointer to bfa_fcs_lport_t. + * param[out] vport_status - pointer to return vport status in + * + * return + * void + * + * Special Considerations: + * + * note + */ +static void +bfa_fcs_lport_scn_timeout(void *arg) +{ + struct bfa_fcs_lport_scn_s *scn = (struct bfa_fcs_lport_scn_s *) arg; + + bfa_sm_send_event(scn, SCNSM_EVENT_TIMEOUT); +} + + + +/** + * fcs_scn_public FCS state change notification public interfaces + */ + +/* + * Functions called by port/fab + */ +void +bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *port) +{ + struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port); + + scn->port = port; + bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline); +} + +void +bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *port) +{ + struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port); + + scn->port = port; + bfa_sm_send_event(scn, SCNSM_EVENT_PORT_OFFLINE); +} + +void +bfa_fcs_lport_scn_online(struct bfa_fcs_lport_s *port) +{ + struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port); + + scn->port = port; + bfa_sm_send_event(scn, SCNSM_EVENT_PORT_ONLINE); +} + +static void +bfa_fcs_lport_scn_portid_rscn(struct bfa_fcs_lport_s *port, u32 rpid) +{ + struct bfa_fcs_rport_s *rport; + + bfa_trc(port->fcs, rpid); + + /** + * If this is an unknown device, then it just came online. + * Otherwise let rport handle the RSCN event. + */ + rport = bfa_fcs_lport_get_rport_by_pid(port, rpid); + if (rport == NULL) { + /* + * If min cfg mode is enabled, we donot need to + * discover any new rports. + */ + if (!__fcs_min_cfg(port->fcs)) + rport = bfa_fcs_rport_create(port, rpid); + } else + bfa_fcs_rport_scn(rport); +} + +/** + * rscn format based PID comparison + */ +#define __fc_pid_match(__c0, __c1, __fmt) \ + (((__fmt) == FC_RSCN_FORMAT_FABRIC) || \ + (((__fmt) == FC_RSCN_FORMAT_DOMAIN) && \ + ((__c0)[0] == (__c1)[0])) || \ + (((__fmt) == FC_RSCN_FORMAT_AREA) && \ + ((__c0)[0] == (__c1)[0]) && \ + ((__c0)[1] == (__c1)[1]))) + +static void +bfa_fcs_lport_scn_multiport_rscn(struct bfa_fcs_lport_s *port, + enum fc_rscn_format format, + u32 rscn_pid) +{ + struct bfa_fcs_rport_s *rport; + struct list_head *qe, *qe_next; + u8 *c0, *c1; + + bfa_trc(port->fcs, format); + bfa_trc(port->fcs, rscn_pid); + + c0 = (u8 *) &rscn_pid; + + list_for_each_safe(qe, qe_next, &port->rport_q) { + rport = (struct bfa_fcs_rport_s *) qe; + c1 = (u8 *) &rport->pid; + if (__fc_pid_match(c0, c1, format)) + bfa_fcs_rport_scn(rport); + } +} + + +void +bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port, + struct fchs_s *fchs, u32 len) +{ + struct fc_rscn_pl_s *rscn = (struct fc_rscn_pl_s *) (fchs + 1); + int num_entries; + u32 rscn_pid; + bfa_boolean_t nsquery = BFA_FALSE, found; + int i = 0, j; + + num_entries = + (bfa_os_ntohs(rscn->payldlen) - + sizeof(u32)) / sizeof(rscn->event[0]); + + bfa_trc(port->fcs, num_entries); + + port->stats.num_rscn++; + + bfa_fcs_lport_scn_send_ls_acc(port, fchs); + + for (i = 0; i < num_entries; i++) { + rscn_pid = rscn->event[i].portid; + + bfa_trc(port->fcs, rscn->event[i].format); + bfa_trc(port->fcs, rscn_pid); + + /* check for duplicate entries in the list */ + found = BFA_FALSE; + for (j = 0; j < i; j++) { + if (rscn->event[j].portid == rscn_pid) { + found = BFA_TRUE; + break; + } + } + + /* if found in down the list, pid has been already processed */ + if (found) { + bfa_trc(port->fcs, rscn_pid); + continue; + } + + switch (rscn->event[i].format) { + case FC_RSCN_FORMAT_PORTID: + if (rscn->event[i].qualifier == FC_QOS_RSCN_EVENT) { + /* + * Ignore this event. + * f/w would have processed it + */ + bfa_trc(port->fcs, rscn_pid); + } else { + port->stats.num_portid_rscn++; + bfa_fcs_lport_scn_portid_rscn(port, rscn_pid); + } + break; + + case FC_RSCN_FORMAT_FABRIC: + if (rscn->event[i].qualifier == + FC_FABRIC_NAME_RSCN_EVENT) { + bfa_fcs_lport_ms_fabric_rscn(port); + break; + } + /* !!!!!!!!! Fall Through !!!!!!!!!!!!! */ + + case FC_RSCN_FORMAT_AREA: + case FC_RSCN_FORMAT_DOMAIN: + nsquery = BFA_TRUE; + bfa_fcs_lport_scn_multiport_rscn(port, + rscn->event[i].format, + rscn_pid); + break; + + + default: + bfa_assert(0); + nsquery = BFA_TRUE; + } + } + + /** + * If any of area, domain or fabric RSCN is received, do a fresh discovery + * to find new devices. + */ + if (nsquery) + bfa_fcs_lport_ns_query(port); +} + +/** + * BFA FCS port + */ +/** + * fcs_port_api BFA FCS port API + */ +struct bfa_fcs_lport_s * +bfa_fcs_get_base_port(struct bfa_fcs_s *fcs) +{ + return &fcs->fabric.bport; +} + +wwn_t +bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, int index, + int nrports, bfa_boolean_t bwwn) +{ + struct list_head *qh, *qe; + struct bfa_fcs_rport_s *rport = NULL; + int i; + struct bfa_fcs_s *fcs; + + if (port == NULL || nrports == 0) + return (wwn_t) 0; + + fcs = port->fcs; + bfa_trc(fcs, (u32) nrports); + + i = 0; + qh = &port->rport_q; + qe = bfa_q_first(qh); + + while ((qe != qh) && (i < nrports)) { + rport = (struct bfa_fcs_rport_s *) qe; + if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) { + qe = bfa_q_next(qe); + bfa_trc(fcs, (u32) rport->pwwn); + bfa_trc(fcs, rport->pid); + bfa_trc(fcs, i); + continue; + } + + if (bwwn) { + if (!memcmp(&wwn, &rport->pwwn, 8)) + break; + } else { + if (i == index) + break; + } + + i++; + qe = bfa_q_next(qe); + } + + bfa_trc(fcs, i); + if (rport) + return rport->pwwn; + else + return (wwn_t) 0; +} + +void +bfa_fcs_lport_get_rports(struct bfa_fcs_lport_s *port, + wwn_t rport_wwns[], int *nrports) +{ + struct list_head *qh, *qe; + struct bfa_fcs_rport_s *rport = NULL; + int i; + struct bfa_fcs_s *fcs; + + if (port == NULL || rport_wwns == NULL || *nrports == 0) + return; + + fcs = port->fcs; + bfa_trc(fcs, (u32) *nrports); + + i = 0; + qh = &port->rport_q; + qe = bfa_q_first(qh); + + while ((qe != qh) && (i < *nrports)) { + rport = (struct bfa_fcs_rport_s *) qe; + if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) { + qe = bfa_q_next(qe); + bfa_trc(fcs, (u32) rport->pwwn); + bfa_trc(fcs, rport->pid); + bfa_trc(fcs, i); + continue; + } + + rport_wwns[i] = rport->pwwn; + + i++; + qe = bfa_q_next(qe); + } + + bfa_trc(fcs, i); + *nrports = i; +} + +/* + * Iterate's through all the rport's in the given port to + * determine the maximum operating speed. + * + * !!!! To be used in TRL Functionality only !!!! + */ +bfa_port_speed_t +bfa_fcs_lport_get_rport_max_speed(bfa_fcs_lport_t *port) +{ + struct list_head *qh, *qe; + struct bfa_fcs_rport_s *rport = NULL; + struct bfa_fcs_s *fcs; + bfa_port_speed_t max_speed = 0; + struct bfa_port_attr_s port_attr; + bfa_port_speed_t port_speed, rport_speed; + bfa_boolean_t trl_enabled = bfa_fcport_is_ratelim(port->fcs->bfa); + + + if (port == NULL) + return 0; + + fcs = port->fcs; + + /* Get Physical port's current speed */ + bfa_fcport_get_attr(port->fcs->bfa, &port_attr); + port_speed = port_attr.speed; + bfa_trc(fcs, port_speed); + + qh = &port->rport_q; + qe = bfa_q_first(qh); + + while (qe != qh) { + rport = (struct bfa_fcs_rport_s *) qe; + if ((bfa_os_ntoh3b(rport->pid) > 0xFFF000) || + (bfa_fcs_rport_get_state(rport) == + BFA_RPORT_OFFLINE)) { + qe = bfa_q_next(qe); + continue; + } + + rport_speed = rport->rpf.rpsc_speed; + if ((trl_enabled) && (rport_speed == + BFA_PORT_SPEED_UNKNOWN)) { + /* Use default ratelim speed setting */ + rport_speed = + bfa_fcport_get_ratelim_speed(port->fcs->bfa); + } + + if ((rport_speed == BFA_PORT_SPEED_8GBPS) || + (rport_speed > port_speed)) { + max_speed = rport_speed; + break; + } else if (rport_speed > max_speed) { + max_speed = rport_speed; + } + + qe = bfa_q_next(qe); + } + + bfa_trc(fcs, max_speed); + return max_speed; +} + +struct bfa_fcs_lport_s * +bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn) +{ + struct bfa_fcs_vport_s *vport; + bfa_fcs_vf_t *vf; + + bfa_assert(fcs != NULL); + + vf = bfa_fcs_vf_lookup(fcs, vf_id); + if (vf == NULL) { + bfa_trc(fcs, vf_id); + return NULL; + } + + if (!lpwwn || (vf->bport.port_cfg.pwwn == lpwwn)) + return &vf->bport; + + vport = bfa_fcs_fabric_vport_lookup(vf, lpwwn); + if (vport) + return &vport->lport; + + return NULL; +} + +/* + * API corresponding to NPIV_VPORT_GETINFO. + */ +void +bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port, + struct bfa_lport_info_s *port_info) +{ + + bfa_trc(port->fcs, port->fabric->fabric_name); + + if (port->vport == NULL) { + /* + * This is a Physical port + */ + port_info->port_type = BFA_LPORT_TYPE_PHYSICAL; + + /* + * @todo : need to fix the state & reason + */ + port_info->port_state = 0; + port_info->offline_reason = 0; + + port_info->port_wwn = bfa_fcs_lport_get_pwwn(port); + port_info->node_wwn = bfa_fcs_lport_get_nwwn(port); + + port_info->max_vports_supp = + bfa_lps_get_max_vport(port->fcs->bfa); + port_info->num_vports_inuse = + bfa_fcs_fabric_vport_count(port->fabric); + port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP; + port_info->num_rports_inuse = port->num_rports; + } else { + /* + * This is a virtual port + */ + port_info->port_type = BFA_LPORT_TYPE_VIRTUAL; + + /* + * @todo : need to fix the state & reason + */ + port_info->port_state = 0; + port_info->offline_reason = 0; + + port_info->port_wwn = bfa_fcs_lport_get_pwwn(port); + port_info->node_wwn = bfa_fcs_lport_get_nwwn(port); + } +} + +void +bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port, + struct bfa_lport_stats_s *port_stats) +{ + *port_stats = fcs_port->stats; +} + +void +bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port) +{ + bfa_os_memset(&fcs_port->stats, 0, sizeof(struct bfa_lport_stats_s)); +} + +/** + * FCS virtual port state machine + */ + +#define __vport_fcs(__vp) ((__vp)->lport.fcs) +#define __vport_pwwn(__vp) ((__vp)->lport.port_cfg.pwwn) +#define __vport_nwwn(__vp) ((__vp)->lport.port_cfg.nwwn) +#define __vport_bfa(__vp) ((__vp)->lport.fcs->bfa) +#define __vport_fcid(__vp) ((__vp)->lport.pid) +#define __vport_fabric(__vp) ((__vp)->lport.fabric) +#define __vport_vfid(__vp) ((__vp)->lport.fabric->vf_id) + +#define BFA_FCS_VPORT_MAX_RETRIES 5 +/* + * Forward declarations + */ +static void bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport); +static void bfa_fcs_vport_timeout(void *vport_arg); +static void bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport); +static void bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport); + +/** + * fcs_vport_sm FCS virtual port state machine + */ + +/** + * VPort State Machine events + */ +enum bfa_fcs_vport_event { + BFA_FCS_VPORT_SM_CREATE = 1, /* vport create event */ + BFA_FCS_VPORT_SM_DELETE = 2, /* vport delete event */ + BFA_FCS_VPORT_SM_START = 3, /* vport start request */ + BFA_FCS_VPORT_SM_STOP = 4, /* stop: unsupported */ + BFA_FCS_VPORT_SM_ONLINE = 5, /* fabric online */ + BFA_FCS_VPORT_SM_OFFLINE = 6, /* fabric offline event */ + BFA_FCS_VPORT_SM_FRMSENT = 7, /* fdisc/logo sent events */ + BFA_FCS_VPORT_SM_RSP_OK = 8, /* good response */ + BFA_FCS_VPORT_SM_RSP_ERROR = 9, /* error/bad response */ + BFA_FCS_VPORT_SM_TIMEOUT = 10, /* delay timer event */ + BFA_FCS_VPORT_SM_DELCOMP = 11, /* lport delete completion */ + BFA_FCS_VPORT_SM_RSP_DUP_WWN = 12, /* Dup wnn error*/ + BFA_FCS_VPORT_SM_RSP_FAILED = 13, /* non-retryable failure */ +}; + +static void bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event); +static void bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event); +static void bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event); +static void bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event); +static void bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event); +static void bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event); +static void bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event); +static void bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event); +static void bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event); +static void bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event); + +static struct bfa_sm_table_s vport_sm_table[] = { + {BFA_SM(bfa_fcs_vport_sm_uninit), BFA_FCS_VPORT_UNINIT}, + {BFA_SM(bfa_fcs_vport_sm_created), BFA_FCS_VPORT_CREATED}, + {BFA_SM(bfa_fcs_vport_sm_offline), BFA_FCS_VPORT_OFFLINE}, + {BFA_SM(bfa_fcs_vport_sm_fdisc), BFA_FCS_VPORT_FDISC}, + {BFA_SM(bfa_fcs_vport_sm_fdisc_retry), BFA_FCS_VPORT_FDISC_RETRY}, + {BFA_SM(bfa_fcs_vport_sm_online), BFA_FCS_VPORT_ONLINE}, + {BFA_SM(bfa_fcs_vport_sm_deleting), BFA_FCS_VPORT_DELETING}, + {BFA_SM(bfa_fcs_vport_sm_cleanup), BFA_FCS_VPORT_CLEANUP}, + {BFA_SM(bfa_fcs_vport_sm_logo), BFA_FCS_VPORT_LOGO}, + {BFA_SM(bfa_fcs_vport_sm_error), BFA_FCS_VPORT_ERROR} +}; + +/** + * Beginning state. + */ +static void +bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), event); + + switch (event) { + case BFA_FCS_VPORT_SM_CREATE: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_created); + bfa_fcs_fabric_addvport(__vport_fabric(vport), vport); + break; + + default: + bfa_sm_fault(__vport_fcs(vport), event); + } +} + +/** + * Created state - a start event is required to start up the state machine. + */ +static void +bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), event); + + switch (event) { + case BFA_FCS_VPORT_SM_START: + if (bfa_fcs_fabric_is_online(__vport_fabric(vport)) + && bfa_fcs_fabric_npiv_capable(__vport_fabric(vport))) { + bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc); + bfa_fcs_vport_do_fdisc(vport); + } else { + /** + * Fabric is offline or not NPIV capable, stay in + * offline state. + */ + vport->vport_stats.fab_no_npiv++; + bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); + } + break; + + case BFA_FCS_VPORT_SM_DELETE: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); + bfa_fcs_lport_delete(&vport->lport); + break; + + case BFA_FCS_VPORT_SM_ONLINE: + case BFA_FCS_VPORT_SM_OFFLINE: + /** + * Ignore ONLINE/OFFLINE events from fabric + * till vport is started. + */ + break; + + default: + bfa_sm_fault(__vport_fcs(vport), event); + } +} + +/** + * Offline state - awaiting ONLINE event from fabric SM. + */ +static void +bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), event); + + switch (event) { + case BFA_FCS_VPORT_SM_DELETE: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); + bfa_fcs_lport_delete(&vport->lport); + break; + + case BFA_FCS_VPORT_SM_ONLINE: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc); + vport->fdisc_retries = 0; + bfa_fcs_vport_do_fdisc(vport); + break; + + case BFA_FCS_VPORT_SM_OFFLINE: + /* + * This can happen if the vport couldn't be initialzied + * due the fact that the npiv was not enabled on the switch. + * In that case we will put the vport in offline state. + * However, the link can go down and cause the this event to + * be sent when we are already offline. Ignore it. + */ + break; + + default: + bfa_sm_fault(__vport_fcs(vport), event); + } +} + + +/** + * FDISC is sent and awaiting reply from fabric. + */ +static void +bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), event); + + switch (event) { + case BFA_FCS_VPORT_SM_DELETE: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); + bfa_lps_discard(vport->lps); + bfa_fcs_lport_delete(&vport->lport); + break; + + case BFA_FCS_VPORT_SM_OFFLINE: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); + bfa_lps_discard(vport->lps); + break; + + case BFA_FCS_VPORT_SM_RSP_OK: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_online); + bfa_fcs_lport_online(&vport->lport); + break; + + case BFA_FCS_VPORT_SM_RSP_ERROR: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc_retry); + bfa_timer_start(__vport_bfa(vport), &vport->timer, + bfa_fcs_vport_timeout, vport, + BFA_FCS_RETRY_TIMEOUT); + break; + + case BFA_FCS_VPORT_SM_RSP_FAILED: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); + break; + + case BFA_FCS_VPORT_SM_RSP_DUP_WWN: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_error); + break; + + default: + bfa_sm_fault(__vport_fcs(vport), event); + } +} + +/** + * FDISC attempt failed - a timer is active to retry FDISC. + */ +static void +bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), event); + + switch (event) { + case BFA_FCS_VPORT_SM_DELETE: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); + bfa_timer_stop(&vport->timer); + bfa_fcs_lport_delete(&vport->lport); + break; + + case BFA_FCS_VPORT_SM_OFFLINE: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); + bfa_timer_stop(&vport->timer); + break; + + case BFA_FCS_VPORT_SM_TIMEOUT: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc); + vport->vport_stats.fdisc_retries++; + vport->fdisc_retries++; + bfa_fcs_vport_do_fdisc(vport); + break; + + default: + bfa_sm_fault(__vport_fcs(vport), event); + } +} + +/** + * Vport is online (FDISC is complete). + */ +static void +bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), event); + + switch (event) { + case BFA_FCS_VPORT_SM_DELETE: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_deleting); + bfa_fcs_lport_delete(&vport->lport); + break; + + case BFA_FCS_VPORT_SM_OFFLINE: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); + bfa_lps_discard(vport->lps); + bfa_fcs_lport_offline(&vport->lport); + break; + + default: + bfa_sm_fault(__vport_fcs(vport), event); + } +} + +/** + * Vport is being deleted - awaiting lport delete completion to send + * LOGO to fabric. + */ +static void +bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), event); + + switch (event) { + case BFA_FCS_VPORT_SM_DELETE: + break; + + case BFA_FCS_VPORT_SM_DELCOMP: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_logo); + bfa_fcs_vport_do_logo(vport); + break; + + case BFA_FCS_VPORT_SM_OFFLINE: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); + break; + + default: + bfa_sm_fault(__vport_fcs(vport), event); + } +} + +/** + * Error State. + * This state will be set when the Vport Creation fails due + * to errors like Dup WWN. In this state only operation allowed + * is a Vport Delete. + */ +static void +bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), event); + + switch (event) { + case BFA_FCS_VPORT_SM_DELETE: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); + bfa_fcs_lport_delete(&vport->lport); + break; + + default: + bfa_trc(__vport_fcs(vport), event); + } +} + +/** + * Lport cleanup is in progress since vport is being deleted. Fabric is + * offline, so no LOGO is needed to complete vport deletion. + */ +static void +bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), event); + + switch (event) { + case BFA_FCS_VPORT_SM_DELCOMP: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit); + bfa_fcs_vport_free(vport); + break; + + case BFA_FCS_VPORT_SM_DELETE: + break; + + default: + bfa_sm_fault(__vport_fcs(vport), event); + } +} + +/** + * LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup + * is done. + */ +static void +bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport, + enum bfa_fcs_vport_event event) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), event); + + switch (event) { + case BFA_FCS_VPORT_SM_OFFLINE: + bfa_lps_discard(vport->lps); + /* + * !!! fall through !!! + */ + + case BFA_FCS_VPORT_SM_RSP_OK: + case BFA_FCS_VPORT_SM_RSP_ERROR: + bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit); + bfa_fcs_vport_free(vport); + break; + + case BFA_FCS_VPORT_SM_DELETE: + break; + + default: + bfa_sm_fault(__vport_fcs(vport), event); + } +} + + + +/** + * fcs_vport_private FCS virtual port private functions + */ +/** + * This routine will be called to send a FDISC command. + */ +static void +bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport) +{ + bfa_lps_fdisc(vport->lps, vport, + bfa_fcport_get_maxfrsize(__vport_bfa(vport)), + __vport_pwwn(vport), __vport_nwwn(vport)); + vport->vport_stats.fdisc_sent++; +} + +static void +bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport) +{ + u8 lsrjt_rsn = bfa_lps_get_lsrjt_rsn(vport->lps); + u8 lsrjt_expl = bfa_lps_get_lsrjt_expl(vport->lps); + + bfa_trc(__vport_fcs(vport), lsrjt_rsn); + bfa_trc(__vport_fcs(vport), lsrjt_expl); + + /* For certain reason codes, we don't want to retry. */ + switch (bfa_lps_get_lsrjt_expl(vport->lps)) { + case FC_LS_RJT_EXP_INV_PORT_NAME: /* by brocade */ + case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */ + if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES) + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); + else + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_DUP_WWN); + break; + + case FC_LS_RJT_EXP_INSUFF_RES: + /* + * This means max logins per port/switch setting on the + * switch was exceeded. + */ + if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES) + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); + else + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED); + break; + + default: + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); + } +} + +/** + * Called to send a logout to the fabric. Used when a V-Port is + * deleted/stopped. + */ +static void +bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport) +{ + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + + vport->vport_stats.logo_sent++; + bfa_lps_fdisclogo(vport->lps); +} + + +/** + * This routine will be called by bfa_timer on timer timeouts. + * + * param[in] vport - pointer to bfa_fcs_vport_t. + * param[out] vport_status - pointer to return vport status in + * + * return + * void + * + * Special Considerations: + * + * note + */ +static void +bfa_fcs_vport_timeout(void *vport_arg) +{ + struct bfa_fcs_vport_s *vport = (struct bfa_fcs_vport_s *) vport_arg; + + vport->vport_stats.fdisc_timeouts++; + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_TIMEOUT); +} + +static void +bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport) +{ + struct bfad_vport_s *vport_drv = + (struct bfad_vport_s *)vport->vport_drv; + + bfa_fcs_fabric_delvport(__vport_fabric(vport), vport); + + if (vport_drv->comp_del) + complete(vport_drv->comp_del); + + bfa_lps_delete(vport->lps); +} + + + +/** + * fcs_vport_public FCS virtual port public interfaces + */ + +/** + * Online notification from fabric SM. + */ +void +bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport) +{ + vport->vport_stats.fab_online++; + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE); +} + +/** + * Offline notification from fabric SM. + */ +void +bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport) +{ + vport->vport_stats.fab_offline++; + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE); +} + +/** + * Cleanup notification from fabric SM on link timer expiry. + */ +void +bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport) +{ + vport->vport_stats.fab_cleanup++; +} +/** + * delete notification from fabric SM. To be invoked from within FCS. + */ +void +bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport) +{ + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE); +} + +/** + * Delete completion callback from associated lport + */ +void +bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport) +{ + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELCOMP); +} + + + +/** + * fcs_vport_api Virtual port API + */ + +/** + * Use this function to instantiate a new FCS vport object. This + * function will not trigger any HW initialization process (which will be + * done in vport_start() call) + * + * param[in] vport - pointer to bfa_fcs_vport_t. This space + * needs to be allocated by the driver. + * param[in] fcs - FCS instance + * param[in] vport_cfg - vport configuration + * param[in] vf_id - VF_ID if vport is created within a VF. + * FC_VF_ID_NULL to specify base fabric. + * param[in] vport_drv - Opaque handle back to the driver's vport + * structure + * + * retval BFA_STATUS_OK - on success. + * retval BFA_STATUS_FAILED - on failure. + */ +bfa_status_t +bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs, + u16 vf_id, struct bfa_lport_cfg_s *vport_cfg, + struct bfad_vport_s *vport_drv) +{ + if (vport_cfg->pwwn == 0) + return BFA_STATUS_INVALID_WWN; + + if (bfa_fcs_lport_get_pwwn(&fcs->fabric.bport) == vport_cfg->pwwn) + return BFA_STATUS_VPORT_WWN_BP; + + if (bfa_fcs_vport_lookup(fcs, vf_id, vport_cfg->pwwn) != NULL) + return BFA_STATUS_VPORT_EXISTS; + + if (bfa_fcs_fabric_vport_count(&fcs->fabric) == + bfa_lps_get_max_vport(fcs->bfa)) + return BFA_STATUS_VPORT_MAX; + + vport->lps = bfa_lps_alloc(fcs->bfa); + if (!vport->lps) + return BFA_STATUS_VPORT_MAX; + + vport->vport_drv = vport_drv; + vport_cfg->preboot_vp = BFA_FALSE; + + bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit); + bfa_fcs_lport_attach(&vport->lport, fcs, vf_id, vport); + bfa_fcs_lport_init(&vport->lport, vport_cfg); + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_CREATE); + + return BFA_STATUS_OK; +} + +/** + * Use this function to instantiate a new FCS PBC vport object. This + * function will not trigger any HW initialization process (which will be + * done in vport_start() call) + * + * param[in] vport - pointer to bfa_fcs_vport_t. This space + * needs to be allocated by the driver. + * param[in] fcs - FCS instance + * param[in] vport_cfg - vport configuration + * param[in] vf_id - VF_ID if vport is created within a VF. + * FC_VF_ID_NULL to specify base fabric. + * param[in] vport_drv - Opaque handle back to the driver's vport + * structure + * + * retval BFA_STATUS_OK - on success. + * retval BFA_STATUS_FAILED - on failure. + */ +bfa_status_t +bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs, + u16 vf_id, struct bfa_lport_cfg_s *vport_cfg, + struct bfad_vport_s *vport_drv) +{ + bfa_status_t rc; + + rc = bfa_fcs_vport_create(vport, fcs, vf_id, vport_cfg, vport_drv); + vport->lport.port_cfg.preboot_vp = BFA_TRUE; + + return rc; +} + +/** + * Use this function to findout if this is a pbc vport or not. + * + * @param[in] vport - pointer to bfa_fcs_vport_t. + * + * @returns None + */ +bfa_boolean_t +bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport) +{ + + if (vport && (vport->lport.port_cfg.preboot_vp == BFA_TRUE)) + return BFA_TRUE; + else + return BFA_FALSE; + +} + +/** + * Use this function initialize the vport. + * + * @param[in] vport - pointer to bfa_fcs_vport_t. + * + * @returns None + */ +bfa_status_t +bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport) +{ + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_START); + + return BFA_STATUS_OK; +} + +/** + * Use this function quiese the vport object. This function will return + * immediately, when the vport is actually stopped, the + * bfa_drv_vport_stop_cb() will be called. + * + * param[in] vport - pointer to bfa_fcs_vport_t. + * + * return None + */ +bfa_status_t +bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport) +{ + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOP); + + return BFA_STATUS_OK; +} + +/** + * Use this function to delete a vport object. Fabric object should + * be stopped before this function call. + * + * !!!!!!! Donot invoke this from within FCS !!!!!!! + * + * param[in] vport - pointer to bfa_fcs_vport_t. + * + * return None + */ +bfa_status_t +bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport) +{ + + if (vport->lport.port_cfg.preboot_vp) + return BFA_STATUS_PBC; + + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE); + + return BFA_STATUS_OK; +} + +/** + * Use this function to get vport's current status info. + * + * param[in] vport pointer to bfa_fcs_vport_t. + * param[out] attr pointer to return vport attributes + * + * return None + */ +void +bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport, + struct bfa_vport_attr_s *attr) +{ + if (vport == NULL || attr == NULL) + return; + + bfa_os_memset(attr, 0, sizeof(struct bfa_vport_attr_s)); + + bfa_fcs_lport_get_attr(&vport->lport, &attr->port_attr); + attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm); +} + +/** + * Use this function to get vport's statistics. + * + * param[in] vport pointer to bfa_fcs_vport_t. + * param[out] stats pointer to return vport statistics in + * + * return None + */ +void +bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport, + struct bfa_vport_stats_s *stats) +{ + *stats = vport->vport_stats; +} + +/** + * Use this function to clear vport's statistics. + * + * param[in] vport pointer to bfa_fcs_vport_t. + * + * return None + */ +void +bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport) +{ + bfa_os_memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s)); +} + +/** + * Lookup a virtual port. Excludes base port from lookup. + */ +struct bfa_fcs_vport_s * +bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t vpwwn) +{ + struct bfa_fcs_vport_s *vport; + struct bfa_fcs_fabric_s *fabric; + + bfa_trc(fcs, vf_id); + bfa_trc(fcs, vpwwn); + + fabric = bfa_fcs_vf_lookup(fcs, vf_id); + if (!fabric) { + bfa_trc(fcs, vf_id); + return NULL; + } + + vport = bfa_fcs_fabric_vport_lookup(fabric, vpwwn); + return vport; +} + +/** + * FDISC Response + */ +void +bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status) +{ + struct bfa_fcs_vport_s *vport = uarg; + + bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); + bfa_trc(__vport_fcs(vport), status); + + switch (status) { + case BFA_STATUS_OK: + /* + * Initialiaze the V-Port fields + */ + __vport_fcid(vport) = bfa_lps_get_pid(vport->lps); + vport->vport_stats.fdisc_accepts++; + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK); + break; + + case BFA_STATUS_INVALID_MAC: + /* Only for CNA */ + vport->vport_stats.fdisc_acc_bad++; + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); + + break; + + case BFA_STATUS_EPROTOCOL: + switch (bfa_lps_get_extstatus(vport->lps)) { + case BFA_EPROTO_BAD_ACCEPT: + vport->vport_stats.fdisc_acc_bad++; + break; + + case BFA_EPROTO_UNKNOWN_RSP: + vport->vport_stats.fdisc_unknown_rsp++; + break; + + default: + break; + } + + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); + break; + + case BFA_STATUS_FABRIC_RJT: + vport->vport_stats.fdisc_rejects++; + bfa_fcs_vport_fdisc_rejected(vport); + break; + + default: + vport->vport_stats.fdisc_rsp_err++; + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); + } +} + +/** + * LOGO response + */ +void +bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg) +{ + struct bfa_fcs_vport_s *vport = uarg; + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK); +} + +/** + * Received clear virtual link + */ +void +bfa_cb_lps_cvl_event(void *bfad, void *uarg) +{ + struct bfa_fcs_vport_s *vport = uarg; + + /* Send an Offline followed by an ONLINE */ + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE); + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE); +} diff --git a/drivers/scsi/bfa/bfa_fcs_port.c b/drivers/scsi/bfa/bfa_fcs_port.c deleted file mode 100644 index 3c27788cd527..000000000000 --- a/drivers/scsi/bfa/bfa_fcs_port.c +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * bfa_fcs_pport.c BFA FCS PPORT ( physical port) - */ - -#include -#include -#include -#include "fcs_trcmod.h" -#include "fcs.h" -#include "fcs_fabric.h" -#include "fcs_port.h" - -BFA_TRC_FILE(FCS, PPORT); - -static void -bfa_fcs_pport_event_handler(void *cbarg, bfa_pport_event_t event) -{ - struct bfa_fcs_s *fcs = cbarg; - - bfa_trc(fcs, event); - - switch (event) { - case BFA_PPORT_LINKUP: - bfa_fcs_fabric_link_up(&fcs->fabric); - break; - - case BFA_PPORT_LINKDOWN: - bfa_fcs_fabric_link_down(&fcs->fabric); - break; - - case BFA_PPORT_TRUNK_LINKDOWN: - bfa_assert(0); - break; - - default: - bfa_assert(0); - } -} - -void -bfa_fcs_pport_attach(struct bfa_fcs_s *fcs) -{ - bfa_fcport_event_register(fcs->bfa, bfa_fcs_pport_event_handler, fcs); -} diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c new file mode 100644 index 000000000000..635f0cd88714 --- /dev/null +++ b/drivers/scsi/bfa/bfa_fcs_rport.c @@ -0,0 +1,3126 @@ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + * + * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +/** + * rport.c Remote port implementation. + */ + +#include "bfa_fcs.h" +#include "bfa_fcbuild.h" +#include "bfad_drv.h" + +BFA_TRC_FILE(FCS, RPORT); + +static u32 +bfa_fcs_rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT * 1000; + /* In millisecs */ +/* + * forward declarations + */ +static struct bfa_fcs_rport_s *bfa_fcs_rport_alloc( + struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid); +static void bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport); +static void bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport); +static void bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport); +static void bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport); +static void bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, + struct fc_logi_s *plogi); +static void bfa_fcs_rport_timeout(void *arg); +static void bfa_fcs_rport_send_plogi(void *rport_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_rport_send_plogiacc(void *rport_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_rport_plogi_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs); +static void bfa_fcs_rport_send_adisc(void *rport_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_rport_adisc_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs); +static void bfa_fcs_rport_send_nsdisc(void *rport_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_rport_gidpn_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs); +static void bfa_fcs_rport_gpnid_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs); +static void bfa_fcs_rport_send_logo(void *rport_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_rport_send_logo_acc(void *rport_cbarg); +static void bfa_fcs_rport_process_prli(struct bfa_fcs_rport_s *rport, + struct fchs_s *rx_fchs, u16 len); +static void bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, + struct fchs_s *rx_fchs, u8 reason_code, + u8 reason_code_expl); +static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport, + struct fchs_s *rx_fchs, u16 len); +static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport); +/** + * fcs_rport_sm FCS rport state machine events + */ + +enum rport_event { + RPSM_EVENT_PLOGI_SEND = 1, /* new rport; start with PLOGI */ + RPSM_EVENT_PLOGI_RCVD = 2, /* Inbound PLOGI from remote port */ + RPSM_EVENT_PLOGI_COMP = 3, /* PLOGI completed to rport */ + RPSM_EVENT_LOGO_RCVD = 4, /* LOGO from remote device */ + RPSM_EVENT_LOGO_IMP = 5, /* implicit logo for SLER */ + RPSM_EVENT_FCXP_SENT = 6, /* Frame from has been sent */ + RPSM_EVENT_DELETE = 7, /* RPORT delete request */ + RPSM_EVENT_SCN = 8, /* state change notification */ + RPSM_EVENT_ACCEPTED = 9, /* Good response from remote device */ + RPSM_EVENT_FAILED = 10, /* Request to rport failed. */ + RPSM_EVENT_TIMEOUT = 11, /* Rport SM timeout event */ + RPSM_EVENT_HCB_ONLINE = 12, /* BFA rport online callback */ + RPSM_EVENT_HCB_OFFLINE = 13, /* BFA rport offline callback */ + RPSM_EVENT_FC4_OFFLINE = 14, /* FC-4 offline complete */ + RPSM_EVENT_ADDRESS_CHANGE = 15, /* Rport's PID has changed */ + RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */ + RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */ + RPSM_EVENT_PLOGI_RETRY = 18, /* Retry PLOGI continously */ +}; + +static void bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport, + enum rport_event event); +static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport, + enum rport_event event); + +static struct bfa_sm_table_s rport_sm_table[] = { + {BFA_SM(bfa_fcs_rport_sm_uninit), BFA_RPORT_UNINIT}, + {BFA_SM(bfa_fcs_rport_sm_plogi_sending), BFA_RPORT_PLOGI}, + {BFA_SM(bfa_fcs_rport_sm_plogiacc_sending), BFA_RPORT_ONLINE}, + {BFA_SM(bfa_fcs_rport_sm_plogi_retry), BFA_RPORT_PLOGI_RETRY}, + {BFA_SM(bfa_fcs_rport_sm_plogi), BFA_RPORT_PLOGI}, + {BFA_SM(bfa_fcs_rport_sm_hal_online), BFA_RPORT_ONLINE}, + {BFA_SM(bfa_fcs_rport_sm_online), BFA_RPORT_ONLINE}, + {BFA_SM(bfa_fcs_rport_sm_nsquery_sending), BFA_RPORT_NSQUERY}, + {BFA_SM(bfa_fcs_rport_sm_nsquery), BFA_RPORT_NSQUERY}, + {BFA_SM(bfa_fcs_rport_sm_adisc_sending), BFA_RPORT_ADISC}, + {BFA_SM(bfa_fcs_rport_sm_adisc), BFA_RPORT_ADISC}, + {BFA_SM(bfa_fcs_rport_sm_fc4_logorcv), BFA_RPORT_LOGORCV}, + {BFA_SM(bfa_fcs_rport_sm_fc4_logosend), BFA_RPORT_LOGO}, + {BFA_SM(bfa_fcs_rport_sm_fc4_offline), BFA_RPORT_OFFLINE}, + {BFA_SM(bfa_fcs_rport_sm_hcb_offline), BFA_RPORT_OFFLINE}, + {BFA_SM(bfa_fcs_rport_sm_hcb_logorcv), BFA_RPORT_LOGORCV}, + {BFA_SM(bfa_fcs_rport_sm_hcb_logosend), BFA_RPORT_LOGO}, + {BFA_SM(bfa_fcs_rport_sm_logo_sending), BFA_RPORT_LOGO}, + {BFA_SM(bfa_fcs_rport_sm_offline), BFA_RPORT_OFFLINE}, + {BFA_SM(bfa_fcs_rport_sm_nsdisc_sending), BFA_RPORT_NSDISC}, + {BFA_SM(bfa_fcs_rport_sm_nsdisc_retry), BFA_RPORT_NSDISC}, + {BFA_SM(bfa_fcs_rport_sm_nsdisc_sent), BFA_RPORT_NSDISC}, +}; + +/** + * Beginning state. + */ +static void +bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_PLOGI_SEND: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); + rport->plogi_retries = 0; + bfa_fcs_rport_send_plogi(rport, NULL); + break; + + case RPSM_EVENT_PLOGI_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); + bfa_fcs_rport_send_plogiacc(rport, NULL); + break; + + case RPSM_EVENT_PLOGI_COMP: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); + bfa_fcs_rport_hal_online(rport); + break; + + case RPSM_EVENT_ADDRESS_CHANGE: + case RPSM_EVENT_ADDRESS_DISC: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); + rport->ns_retries = 0; + bfa_fcs_rport_send_nsdisc(rport, NULL); + break; + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/** + * PLOGI is being sent. + */ +static void +bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_FCXP_SENT: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi); + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_free(rport); + break; + + case RPSM_EVENT_PLOGI_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_send_plogiacc(rport, NULL); + break; + + case RPSM_EVENT_ADDRESS_CHANGE: + case RPSM_EVENT_SCN: + /* query the NS */ + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); + rport->ns_retries = 0; + bfa_fcs_rport_send_nsdisc(rport, NULL); + break; + + case RPSM_EVENT_LOGO_IMP: + rport->pid = 0; + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/** + * PLOGI is being sent. + */ +static void +bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_FCXP_SENT: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); + bfa_fcs_rport_hal_online(rport); + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_free(rport); + break; + + case RPSM_EVENT_PLOGI_RCVD: + case RPSM_EVENT_SCN: + /** + * Ignore, SCN is possibly online notification. + */ + break; + + case RPSM_EVENT_ADDRESS_CHANGE: + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); + rport->ns_retries = 0; + bfa_fcs_rport_send_nsdisc(rport, NULL); + break; + + case RPSM_EVENT_LOGO_IMP: + rport->pid = 0; + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + + case RPSM_EVENT_HCB_OFFLINE: + /** + * Ignore BFA callback, on a PLOGI receive we call bfa offline. + */ + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/** + * PLOGI is sent. + */ +static void +bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_TIMEOUT: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); + bfa_fcs_rport_send_plogi(rport, NULL); + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); + bfa_timer_stop(&rport->timer); + bfa_fcs_rport_free(rport); + break; + + case RPSM_EVENT_PRLO_RCVD: + case RPSM_EVENT_LOGO_RCVD: + break; + + case RPSM_EVENT_PLOGI_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); + bfa_timer_stop(&rport->timer); + bfa_fcs_rport_send_plogiacc(rport, NULL); + break; + + case RPSM_EVENT_ADDRESS_CHANGE: + case RPSM_EVENT_SCN: + bfa_timer_stop(&rport->timer); + bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); + rport->ns_retries = 0; + bfa_fcs_rport_send_nsdisc(rport, NULL); + break; + + case RPSM_EVENT_LOGO_IMP: + rport->pid = 0; + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_timer_stop(&rport->timer); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + + case RPSM_EVENT_PLOGI_COMP: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); + bfa_timer_stop(&rport->timer); + bfa_fcs_rport_hal_online(rport); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/** + * PLOGI is sent. + */ +static void +bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_ACCEPTED: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); + rport->plogi_retries = 0; + bfa_fcs_rport_hal_online(rport); + break; + + case RPSM_EVENT_LOGO_RCVD: + bfa_fcs_rport_send_logo_acc(rport); + /* + * !! fall through !! + */ + case RPSM_EVENT_PRLO_RCVD: + if (rport->prlo == BFA_TRUE) + bfa_fcs_rport_send_prlo_acc(rport); + + bfa_fcxp_discard(rport->fcxp); + /* + * !! fall through !! + */ + case RPSM_EVENT_FAILED: + if (rport->plogi_retries < BFA_FCS_RPORT_MAX_RETRIES) { + rport->plogi_retries++; + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + BFA_FCS_RETRY_TIMEOUT); + } else { + bfa_stats(rport->port, rport_del_max_plogi_retry); + rport->pid = 0; + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + } + break; + + case RPSM_EVENT_PLOGI_RETRY: + rport->plogi_retries = 0; + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + (FC_RA_TOV * 1000)); + break; + + case RPSM_EVENT_LOGO_IMP: + rport->pid = 0; + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_fcxp_discard(rport->fcxp); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + + case RPSM_EVENT_ADDRESS_CHANGE: + case RPSM_EVENT_SCN: + bfa_fcxp_discard(rport->fcxp); + bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); + rport->ns_retries = 0; + bfa_fcs_rport_send_nsdisc(rport, NULL); + break; + + case RPSM_EVENT_PLOGI_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); + bfa_fcxp_discard(rport->fcxp); + bfa_fcs_rport_send_plogiacc(rport, NULL); + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); + bfa_fcxp_discard(rport->fcxp); + bfa_fcs_rport_free(rport); + break; + + case RPSM_EVENT_PLOGI_COMP: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); + bfa_fcxp_discard(rport->fcxp); + bfa_fcs_rport_hal_online(rport); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/** + * PLOGI is complete. Awaiting BFA rport online callback. FC-4s + * are offline. + */ +static void +bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_HCB_ONLINE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_online); + bfa_fcs_rport_online_action(rport); + break; + + case RPSM_EVENT_PRLO_RCVD: + break; + + case RPSM_EVENT_LOGO_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv); + bfa_rport_offline(rport->bfa_rport); + break; + + case RPSM_EVENT_LOGO_IMP: + case RPSM_EVENT_ADDRESS_CHANGE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline); + bfa_rport_offline(rport->bfa_rport); + break; + + case RPSM_EVENT_PLOGI_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); + bfa_rport_offline(rport->bfa_rport); + bfa_fcs_rport_send_plogiacc(rport, NULL); + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend); + bfa_rport_offline(rport->bfa_rport); + break; + + case RPSM_EVENT_SCN: + /** + * @todo + * Ignore SCN - PLOGI just completed, FC-4 login should detect + * device failures. + */ + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/** + * Rport is ONLINE. FC-4s active. + */ +static void +bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_SCN: + if (bfa_fcs_fabric_is_switched(rport->port->fabric)) { + bfa_sm_set_state(rport, + bfa_fcs_rport_sm_nsquery_sending); + rport->ns_retries = 0; + bfa_fcs_rport_send_nsdisc(rport, NULL); + } else { + bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_sending); + bfa_fcs_rport_send_adisc(rport, NULL); + } + break; + + case RPSM_EVENT_PLOGI_RCVD: + case RPSM_EVENT_LOGO_IMP: + case RPSM_EVENT_ADDRESS_CHANGE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); + bfa_fcs_rport_offline_action(rport); + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); + bfa_fcs_rport_offline_action(rport); + break; + + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); + bfa_fcs_rport_offline_action(rport); + break; + + case RPSM_EVENT_PLOGI_COMP: + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/** + * An SCN event is received in ONLINE state. NS query is being sent + * prior to ADISC authentication with rport. FC-4s are paused. + */ +static void +bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_FCXP_SENT: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsquery); + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_offline_action(rport); + break; + + case RPSM_EVENT_SCN: + /** + * ignore SCN, wait for response to query itself + */ + break; + + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_offline_action(rport); + break; + + case RPSM_EVENT_LOGO_IMP: + rport->pid = 0; + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + + case RPSM_EVENT_PLOGI_RCVD: + case RPSM_EVENT_ADDRESS_CHANGE: + case RPSM_EVENT_PLOGI_COMP: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_offline_action(rport); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/** + * An SCN event is received in ONLINE state. NS query is sent to rport. + * FC-4s are paused. + */ +static void +bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_ACCEPTED: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_sending); + bfa_fcs_rport_send_adisc(rport, NULL); + break; + + case RPSM_EVENT_FAILED: + rport->ns_retries++; + if (rport->ns_retries < BFA_FCS_RPORT_MAX_RETRIES) { + bfa_sm_set_state(rport, + bfa_fcs_rport_sm_nsquery_sending); + bfa_fcs_rport_send_nsdisc(rport, NULL); + } else { + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); + bfa_fcs_rport_offline_action(rport); + } + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); + bfa_fcxp_discard(rport->fcxp); + bfa_fcs_rport_offline_action(rport); + break; + + case RPSM_EVENT_SCN: + break; + + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); + bfa_fcxp_discard(rport->fcxp); + bfa_fcs_rport_offline_action(rport); + break; + + case RPSM_EVENT_PLOGI_COMP: + case RPSM_EVENT_ADDRESS_CHANGE: + case RPSM_EVENT_PLOGI_RCVD: + case RPSM_EVENT_LOGO_IMP: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); + bfa_fcxp_discard(rport->fcxp); + bfa_fcs_rport_offline_action(rport); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/** + * An SCN event is received in ONLINE state. ADISC is being sent for + * authenticating with rport. FC-4s are paused. + */ +static void +bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_FCXP_SENT: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc); + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_offline_action(rport); + break; + + case RPSM_EVENT_LOGO_IMP: + case RPSM_EVENT_ADDRESS_CHANGE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_offline_action(rport); + break; + + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_offline_action(rport); + break; + + case RPSM_EVENT_SCN: + break; + + case RPSM_EVENT_PLOGI_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_offline_action(rport); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/** + * An SCN event is received in ONLINE state. ADISC is to rport. + * FC-4s are paused. + */ +static void +bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_ACCEPTED: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_online); + break; + + case RPSM_EVENT_PLOGI_RCVD: + /** + * Too complex to cleanup FC-4 & rport and then acc to PLOGI. + * At least go offline when a PLOGI is received. + */ + bfa_fcxp_discard(rport->fcxp); + /* + * !!! fall through !!! + */ + + case RPSM_EVENT_FAILED: + case RPSM_EVENT_ADDRESS_CHANGE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); + bfa_fcs_rport_offline_action(rport); + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); + bfa_fcxp_discard(rport->fcxp); + bfa_fcs_rport_offline_action(rport); + break; + + case RPSM_EVENT_SCN: + /** + * already processing RSCN + */ + break; + + case RPSM_EVENT_LOGO_IMP: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); + bfa_fcxp_discard(rport->fcxp); + bfa_fcs_rport_offline_action(rport); + break; + + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); + bfa_fcxp_discard(rport->fcxp); + bfa_fcs_rport_offline_action(rport); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/** + * Rport has sent LOGO. Awaiting FC-4 offline completion callback. + */ +static void +bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_FC4_OFFLINE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv); + bfa_rport_offline(rport->bfa_rport); + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); + break; + + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + case RPSM_EVENT_ADDRESS_CHANGE: + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/** + * LOGO needs to be sent to rport. Awaiting FC-4 offline completion + * callback. + */ +static void +bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_FC4_OFFLINE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend); + bfa_rport_offline(rport->bfa_rport); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/** + * Rport is going offline. Awaiting FC-4 offline completion callback. + */ +static void +bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_FC4_OFFLINE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline); + bfa_rport_offline(rport->bfa_rport); + break; + + case RPSM_EVENT_SCN: + case RPSM_EVENT_LOGO_IMP: + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + case RPSM_EVENT_ADDRESS_CHANGE: + /** + * rport is already going offline. + * SCN - ignore and wait till transitioning to offline state + */ + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/** + * Rport is offline. FC-4s are offline. Awaiting BFA rport offline + * callback. + */ +static void +bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_HCB_OFFLINE: + case RPSM_EVENT_ADDRESS_CHANGE: + if (bfa_fcs_lport_is_online(rport->port)) { + if (bfa_fcs_fabric_is_switched(rport->port->fabric)) { + bfa_sm_set_state(rport, + bfa_fcs_rport_sm_nsdisc_sending); + rport->ns_retries = 0; + bfa_fcs_rport_send_nsdisc(rport, NULL); + } else { + bfa_sm_set_state(rport, + bfa_fcs_rport_sm_plogi_sending); + rport->plogi_retries = 0; + bfa_fcs_rport_send_plogi(rport, NULL); + } + } else { + rport->pid = 0; + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + } + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); + bfa_fcs_rport_free(rport); + break; + + case RPSM_EVENT_SCN: + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + /** + * Ignore, already offline. + */ + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/** + * Rport is offline. FC-4s are offline. Awaiting BFA rport offline + * callback to send LOGO accept. + */ +static void +bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_HCB_OFFLINE: + case RPSM_EVENT_ADDRESS_CHANGE: + if (rport->pid && (rport->prlo == BFA_TRUE)) + bfa_fcs_rport_send_prlo_acc(rport); + if (rport->pid && (rport->prlo == BFA_FALSE)) + bfa_fcs_rport_send_logo_acc(rport); + /* + * If the lport is online and if the rport is not a well + * known address port, + * we try to re-discover the r-port. + */ + if (bfa_fcs_lport_is_online(rport->port) && + (!BFA_FCS_PID_IS_WKA(rport->pid))) { + bfa_sm_set_state(rport, + bfa_fcs_rport_sm_nsdisc_sending); + rport->ns_retries = 0; + bfa_fcs_rport_send_nsdisc(rport, NULL); + } else { + /* + * if it is not a well known address, reset the + * pid to 0. + */ + if (!BFA_FCS_PID_IS_WKA(rport->pid)) + rport->pid = 0; + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + } + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend); + break; + + case RPSM_EVENT_LOGO_IMP: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline); + break; + + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + /** + * Ignore - already processing a LOGO. + */ + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/** + * Rport is being deleted. FC-4s are offline. + * Awaiting BFA rport offline + * callback to send LOGO. + */ +static void +bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_HCB_OFFLINE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_logo_sending); + bfa_fcs_rport_send_logo(rport, NULL); + break; + + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + case RPSM_EVENT_ADDRESS_CHANGE: + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/** + * Rport is being deleted. FC-4s are offline. LOGO is being sent. + */ +static void +bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_FCXP_SENT: + /* Once LOGO is sent, we donot wait for the response */ + bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); + bfa_fcs_rport_free(rport); + break; + + case RPSM_EVENT_SCN: + case RPSM_EVENT_ADDRESS_CHANGE: + break; + + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_free(rport); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/** + * Rport is offline. FC-4s are offline. BFA rport is offline. + * Timer active to delete stale rport. + */ +static void +bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_TIMEOUT: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); + bfa_fcs_rport_free(rport); + break; + + case RPSM_EVENT_SCN: + case RPSM_EVENT_ADDRESS_CHANGE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); + bfa_timer_stop(&rport->timer); + rport->ns_retries = 0; + bfa_fcs_rport_send_nsdisc(rport, NULL); + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); + bfa_timer_stop(&rport->timer); + bfa_fcs_rport_free(rport); + break; + + case RPSM_EVENT_PLOGI_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); + bfa_timer_stop(&rport->timer); + bfa_fcs_rport_send_plogiacc(rport, NULL); + break; + + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + case RPSM_EVENT_LOGO_IMP: + break; + + case RPSM_EVENT_PLOGI_COMP: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); + bfa_timer_stop(&rport->timer); + bfa_fcs_rport_hal_online(rport); + break; + + case RPSM_EVENT_PLOGI_SEND: + bfa_timer_stop(&rport->timer); + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); + rport->plogi_retries = 0; + bfa_fcs_rport_send_plogi(rport, NULL); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/** + * Rport address has changed. Nameserver discovery request is being sent. + */ +static void +bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_FCXP_SENT: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sent); + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_free(rport); + break; + + case RPSM_EVENT_PLOGI_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_send_plogiacc(rport, NULL); + break; + + case RPSM_EVENT_SCN: + case RPSM_EVENT_LOGO_RCVD: + case RPSM_EVENT_PRLO_RCVD: + case RPSM_EVENT_PLOGI_SEND: + break; + + case RPSM_EVENT_ADDRESS_CHANGE: + rport->ns_retries = 0; /* reset the retry count */ + break; + + case RPSM_EVENT_LOGO_IMP: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + + case RPSM_EVENT_PLOGI_COMP: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); + bfa_fcs_rport_hal_online(rport); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/** + * Nameserver discovery failed. Waiting for timeout to retry. + */ +static void +bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_TIMEOUT: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); + bfa_fcs_rport_send_nsdisc(rport, NULL); + break; + + case RPSM_EVENT_SCN: + case RPSM_EVENT_ADDRESS_CHANGE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); + bfa_timer_stop(&rport->timer); + rport->ns_retries = 0; + bfa_fcs_rport_send_nsdisc(rport, NULL); + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); + bfa_timer_stop(&rport->timer); + bfa_fcs_rport_free(rport); + break; + + case RPSM_EVENT_PLOGI_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); + bfa_timer_stop(&rport->timer); + bfa_fcs_rport_send_plogiacc(rport, NULL); + break; + + case RPSM_EVENT_LOGO_IMP: + rport->pid = 0; + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_timer_stop(&rport->timer); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + + case RPSM_EVENT_LOGO_RCVD: + bfa_fcs_rport_send_logo_acc(rport); + break; + case RPSM_EVENT_PRLO_RCVD: + bfa_fcs_rport_send_prlo_acc(rport); + break; + + case RPSM_EVENT_PLOGI_COMP: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); + bfa_timer_stop(&rport->timer); + bfa_fcs_rport_hal_online(rport); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +/** + * Rport address has changed. Nameserver discovery request is sent. + */ +static void +bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport, + enum rport_event event) +{ + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPSM_EVENT_ACCEPTED: + case RPSM_EVENT_ADDRESS_CHANGE: + if (rport->pid) { + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); + bfa_fcs_rport_send_plogi(rport, NULL); + } else { + bfa_sm_set_state(rport, + bfa_fcs_rport_sm_nsdisc_sending); + rport->ns_retries = 0; + bfa_fcs_rport_send_nsdisc(rport, NULL); + } + break; + + case RPSM_EVENT_FAILED: + rport->ns_retries++; + if (rport->ns_retries < BFA_FCS_RPORT_MAX_RETRIES) { + bfa_sm_set_state(rport, + bfa_fcs_rport_sm_nsdisc_sending); + bfa_fcs_rport_send_nsdisc(rport, NULL); + } else { + rport->pid = 0; + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + }; + break; + + case RPSM_EVENT_DELETE: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); + bfa_fcxp_discard(rport->fcxp); + bfa_fcs_rport_free(rport); + break; + + case RPSM_EVENT_PLOGI_RCVD: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); + bfa_fcxp_discard(rport->fcxp); + bfa_fcs_rport_send_plogiacc(rport, NULL); + break; + + case RPSM_EVENT_LOGO_IMP: + rport->pid = 0; + bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); + bfa_fcxp_discard(rport->fcxp); + bfa_timer_start(rport->fcs->bfa, &rport->timer, + bfa_fcs_rport_timeout, rport, + bfa_fcs_rport_del_timeout); + break; + + + case RPSM_EVENT_PRLO_RCVD: + bfa_fcs_rport_send_prlo_acc(rport); + break; + case RPSM_EVENT_SCN: + /** + * ignore, wait for NS query response + */ + break; + + case RPSM_EVENT_LOGO_RCVD: + /** + * Not logged-in yet. Accept LOGO. + */ + bfa_fcs_rport_send_logo_acc(rport); + break; + + case RPSM_EVENT_PLOGI_COMP: + bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); + bfa_fcxp_discard(rport->fcxp); + bfa_fcs_rport_hal_online(rport); + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + + + +/** + * fcs_rport_private FCS RPORT provate functions + */ + +static void +bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_rport_s *rport = rport_cbarg; + struct bfa_fcs_lport_s *port = rport->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + + bfa_trc(rport->fcs, rport->pwwn); + + fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); + if (!fcxp) { + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, + bfa_fcs_rport_send_plogi, rport); + return; + } + rport->fcxp = fcxp; + + len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, + bfa_fcs_lport_get_fcid(port), 0, + port->port_cfg.pwwn, port->port_cfg.nwwn, + bfa_fcport_get_maxfrsize(port->fcs->bfa)); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response, + (void *)rport, FC_MAX_PDUSZ, FC_ELS_TOV); + + rport->stats.plogis++; + bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); +} + +static void +bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; + struct fc_logi_s *plogi_rsp; + struct fc_ls_rjt_s *ls_rjt; + struct bfa_fcs_rport_s *twin; + struct list_head *qe; + + bfa_trc(rport->fcs, rport->pwwn); + + /* + * Sanity Checks + */ + if (req_status != BFA_STATUS_OK) { + bfa_trc(rport->fcs, req_status); + rport->stats.plogi_failed++; + bfa_sm_send_event(rport, RPSM_EVENT_FAILED); + return; + } + + plogi_rsp = (struct fc_logi_s *) BFA_FCXP_RSP_PLD(fcxp); + + /** + * Check for failure first. + */ + if (plogi_rsp->els_cmd.els_code != FC_ELS_ACC) { + ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); + + bfa_trc(rport->fcs, ls_rjt->reason_code); + bfa_trc(rport->fcs, ls_rjt->reason_code_expl); + + if ((ls_rjt->reason_code == FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD) && + (ls_rjt->reason_code_expl == FC_LS_RJT_EXP_INSUFF_RES)) { + rport->stats.rjt_insuff_res++; + bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RETRY); + return; + } + + rport->stats.plogi_rejects++; + bfa_sm_send_event(rport, RPSM_EVENT_FAILED); + return; + } + + /** + * PLOGI is complete. Make sure this device is not one of the known + * device with a new FC port address. + */ + list_for_each(qe, &rport->port->rport_q) { + twin = (struct bfa_fcs_rport_s *) qe; + if (twin == rport) + continue; + if (!rport->pwwn && (plogi_rsp->port_name == twin->pwwn)) { + bfa_trc(rport->fcs, twin->pid); + bfa_trc(rport->fcs, rport->pid); + + /* Update plogi stats in twin */ + twin->stats.plogis += rport->stats.plogis; + twin->stats.plogi_rejects += + rport->stats.plogi_rejects; + twin->stats.plogi_timeouts += + rport->stats.plogi_timeouts; + twin->stats.plogi_failed += + rport->stats.plogi_failed; + twin->stats.plogi_rcvd += rport->stats.plogi_rcvd; + twin->stats.plogi_accs++; + + bfa_fcs_rport_delete(rport); + + bfa_fcs_rport_update(twin, plogi_rsp); + twin->pid = rsp_fchs->s_id; + bfa_sm_send_event(twin, RPSM_EVENT_PLOGI_COMP); + return; + } + } + + /** + * Normal login path -- no evil twins. + */ + rport->stats.plogi_accs++; + bfa_fcs_rport_update(rport, plogi_rsp); + bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED); +} + +static void +bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_rport_s *rport = rport_cbarg; + struct bfa_fcs_lport_s *port = rport->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->reply_oxid); + + fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); + if (!fcxp) { + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, + bfa_fcs_rport_send_plogiacc, rport); + return; + } + rport->fcxp = fcxp; + + len = fc_plogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + rport->pid, bfa_fcs_lport_get_fcid(port), + rport->reply_oxid, port->port_cfg.pwwn, + port->port_cfg.nwwn, + bfa_fcport_get_maxfrsize(port->fcs->bfa)); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); + + bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); +} + +static void +bfa_fcs_rport_send_adisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_rport_s *rport = rport_cbarg; + struct bfa_fcs_lport_s *port = rport->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + + bfa_trc(rport->fcs, rport->pwwn); + + fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); + if (!fcxp) { + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, + bfa_fcs_rport_send_adisc, rport); + return; + } + rport->fcxp = fcxp; + + len = fc_adisc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, + bfa_fcs_lport_get_fcid(port), 0, + port->port_cfg.pwwn, port->port_cfg.nwwn); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, bfa_fcs_rport_adisc_response, + rport, FC_MAX_PDUSZ, FC_ELS_TOV); + + rport->stats.adisc_sent++; + bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); +} + +static void +bfa_fcs_rport_adisc_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; + void *pld = bfa_fcxp_get_rspbuf(fcxp); + struct fc_ls_rjt_s *ls_rjt; + + if (req_status != BFA_STATUS_OK) { + bfa_trc(rport->fcs, req_status); + rport->stats.adisc_failed++; + bfa_sm_send_event(rport, RPSM_EVENT_FAILED); + return; + } + + if (fc_adisc_rsp_parse((struct fc_adisc_s *)pld, rsp_len, rport->pwwn, + rport->nwwn) == FC_PARSE_OK) { + rport->stats.adisc_accs++; + bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED); + return; + } + + rport->stats.adisc_rejects++; + ls_rjt = pld; + bfa_trc(rport->fcs, ls_rjt->els_cmd.els_code); + bfa_trc(rport->fcs, ls_rjt->reason_code); + bfa_trc(rport->fcs, ls_rjt->reason_code_expl); + bfa_sm_send_event(rport, RPSM_EVENT_FAILED); +} + +static void +bfa_fcs_rport_send_nsdisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_rport_s *rport = rport_cbarg; + struct bfa_fcs_lport_s *port = rport->port; + struct fchs_s fchs; + struct bfa_fcxp_s *fcxp; + int len; + bfa_cb_fcxp_send_t cbfn; + + bfa_trc(rport->fcs, rport->pid); + + fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); + if (!fcxp) { + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, + bfa_fcs_rport_send_nsdisc, rport); + return; + } + rport->fcxp = fcxp; + + if (rport->pwwn) { + len = fc_gidpn_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + bfa_fcs_lport_get_fcid(port), 0, rport->pwwn); + cbfn = bfa_fcs_rport_gidpn_response; + } else { + len = fc_gpnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + bfa_fcs_lport_get_fcid(port), 0, rport->pid); + cbfn = bfa_fcs_rport_gpnid_response; + } + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, cbfn, + (void *)rport, FC_MAX_PDUSZ, FC_FCCT_TOV); + + bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); +} + +static void +bfa_fcs_rport_gidpn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; + struct ct_hdr_s *cthdr; + struct fcgs_gidpn_resp_s *gidpn_rsp; + struct bfa_fcs_rport_s *twin; + struct list_head *qe; + + bfa_trc(rport->fcs, rport->pwwn); + + cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); + cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); + + if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { + /* Check if the pid is the same as before. */ + gidpn_rsp = (struct fcgs_gidpn_resp_s *) (cthdr + 1); + + if (gidpn_rsp->dap == rport->pid) { + /* Device is online */ + bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED); + } else { + /* + * Device's PID has changed. We need to cleanup + * and re-login. If there is another device with + * the the newly discovered pid, send an scn notice + * so that its new pid can be discovered. + */ + list_for_each(qe, &rport->port->rport_q) { + twin = (struct bfa_fcs_rport_s *) qe; + if (twin == rport) + continue; + if (gidpn_rsp->dap == twin->pid) { + bfa_trc(rport->fcs, twin->pid); + bfa_trc(rport->fcs, rport->pid); + + twin->pid = 0; + bfa_sm_send_event(twin, + RPSM_EVENT_ADDRESS_CHANGE); + } + } + rport->pid = gidpn_rsp->dap; + bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_CHANGE); + } + return; + } + + /* + * Reject Response + */ + switch (cthdr->reason_code) { + case CT_RSN_LOGICAL_BUSY: + /* + * Need to retry + */ + bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT); + break; + + case CT_RSN_UNABLE_TO_PERF: + /* + * device doesn't exist : Start timer to cleanup this later. + */ + bfa_sm_send_event(rport, RPSM_EVENT_FAILED); + break; + + default: + bfa_sm_send_event(rport, RPSM_EVENT_FAILED); + break; + } +} + +static void +bfa_fcs_rport_gpnid_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; + struct ct_hdr_s *cthdr; + + bfa_trc(rport->fcs, rport->pwwn); + + cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); + cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); + + if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { + bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED); + return; + } + + /* + * Reject Response + */ + switch (cthdr->reason_code) { + case CT_RSN_LOGICAL_BUSY: + /* + * Need to retry + */ + bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT); + break; + + case CT_RSN_UNABLE_TO_PERF: + /* + * device doesn't exist : Start timer to cleanup this later. + */ + bfa_sm_send_event(rport, RPSM_EVENT_FAILED); + break; + + default: + bfa_sm_send_event(rport, RPSM_EVENT_FAILED); + break; + } +} + +/** + * Called to send a logout to the rport. + */ +static void +bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_rport_s *rport = rport_cbarg; + struct bfa_fcs_lport_s *port; + struct fchs_s fchs; + struct bfa_fcxp_s *fcxp; + u16 len; + + bfa_trc(rport->fcs, rport->pid); + + port = rport->port; + + fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); + if (!fcxp) { + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, + bfa_fcs_rport_send_logo, rport); + return; + } + rport->fcxp = fcxp; + + len = fc_logo_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, + bfa_fcs_lport_get_fcid(port), 0, + bfa_fcs_lport_get_pwwn(port)); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, NULL, + rport, FC_MAX_PDUSZ, FC_ELS_TOV); + + rport->stats.logos++; + bfa_fcxp_discard(rport->fcxp); + bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); +} + +/** + * Send ACC for a LOGO received. + */ +static void +bfa_fcs_rport_send_logo_acc(void *rport_cbarg) +{ + struct bfa_fcs_rport_s *rport = rport_cbarg; + struct bfa_fcs_lport_s *port; + struct fchs_s fchs; + struct bfa_fcxp_s *fcxp; + u16 len; + + bfa_trc(rport->fcs, rport->pid); + + port = rport->port; + + fcxp = bfa_fcs_fcxp_alloc(port->fcs); + if (!fcxp) + return; + + rport->stats.logo_rcvd++; + len = fc_logo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + rport->pid, bfa_fcs_lport_get_fcid(port), + rport->reply_oxid); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); +} + +/** + * brief + * This routine will be called by bfa_timer on timer timeouts. + * + * param[in] rport - pointer to bfa_fcs_lport_ns_t. + * param[out] rport_status - pointer to return vport status in + * + * return + * void + * + * Special Considerations: + * + * note + */ +static void +bfa_fcs_rport_timeout(void *arg) +{ + struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) arg; + + rport->stats.plogi_timeouts++; + bfa_stats(rport->port, rport_plogi_timeouts); + bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT); +} + +static void +bfa_fcs_rport_process_prli(struct bfa_fcs_rport_s *rport, + struct fchs_s *rx_fchs, u16 len) +{ + struct bfa_fcxp_s *fcxp; + struct fchs_s fchs; + struct bfa_fcs_lport_s *port = rport->port; + struct fc_prli_s *prli; + + bfa_trc(port->fcs, rx_fchs->s_id); + bfa_trc(port->fcs, rx_fchs->d_id); + + rport->stats.prli_rcvd++; + + /* + * We are in Initiator Mode + */ + prli = (struct fc_prli_s *) (rx_fchs + 1); + + if (prli->parampage.servparams.target) { + /* + * PRLI from a target ? + * Send the Acc. + * PRLI sent by us will be used to transition the IT nexus, + * once the response is received from the target. + */ + bfa_trc(port->fcs, rx_fchs->s_id); + rport->scsi_function = BFA_RPORT_TARGET; + } else { + bfa_trc(rport->fcs, prli->parampage.type); + rport->scsi_function = BFA_RPORT_INITIATOR; + bfa_fcs_itnim_is_initiator(rport->itnim); + } + + fcxp = bfa_fcs_fcxp_alloc(port->fcs); + if (!fcxp) + return; + + len = fc_prli_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), + rx_fchs->ox_id, port->port_cfg.roles); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); +} + +static void +bfa_fcs_rport_process_rpsc(struct bfa_fcs_rport_s *rport, + struct fchs_s *rx_fchs, u16 len) +{ + struct bfa_fcxp_s *fcxp; + struct fchs_s fchs; + struct bfa_fcs_lport_s *port = rport->port; + struct fc_rpsc_speed_info_s speeds; + struct bfa_port_attr_s pport_attr; + + bfa_trc(port->fcs, rx_fchs->s_id); + bfa_trc(port->fcs, rx_fchs->d_id); + + rport->stats.rpsc_rcvd++; + speeds.port_speed_cap = + RPSC_SPEED_CAP_1G | RPSC_SPEED_CAP_2G | RPSC_SPEED_CAP_4G | + RPSC_SPEED_CAP_8G; + + /* + * get curent speed from pport attributes from BFA + */ + bfa_fcport_get_attr(port->fcs->bfa, &pport_attr); + + speeds.port_op_speed = fc_bfa_speed_to_rpsc_operspeed(pport_attr.speed); + + fcxp = bfa_fcs_fcxp_alloc(port->fcs); + if (!fcxp) + return; + + len = fc_rpsc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), + rx_fchs->ox_id, &speeds); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); +} + +static void +bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport, + struct fchs_s *rx_fchs, u16 len) +{ + struct bfa_fcxp_s *fcxp; + struct fchs_s fchs; + struct bfa_fcs_lport_s *port = rport->port; + struct fc_adisc_s *adisc; + + bfa_trc(port->fcs, rx_fchs->s_id); + bfa_trc(port->fcs, rx_fchs->d_id); + + rport->stats.adisc_rcvd++; + + adisc = (struct fc_adisc_s *) (rx_fchs + 1); + + /* + * Accept if the itnim for this rport is online. + * Else reject the ADISC. + */ + if (bfa_fcs_itnim_get_online_state(rport->itnim) == BFA_STATUS_OK) { + + fcxp = bfa_fcs_fcxp_alloc(port->fcs); + if (!fcxp) + return; + + len = fc_adisc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), + rx_fchs->ox_id, port->port_cfg.pwwn, + port->port_cfg.nwwn); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, + BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, + FC_MAX_PDUSZ, 0); + } else { + rport->stats.adisc_rejected++; + bfa_fcs_rport_send_ls_rjt(rport, rx_fchs, + FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD, + FC_LS_RJT_EXP_LOGIN_REQUIRED); + } +} + +static void +bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport) +{ + struct bfa_fcs_lport_s *port = rport->port; + struct bfa_rport_info_s rport_info; + + rport_info.pid = rport->pid; + rport_info.local_pid = port->pid; + rport_info.lp_tag = port->lp_tag; + rport_info.vf_id = port->fabric->vf_id; + rport_info.vf_en = port->fabric->is_vf; + rport_info.fc_class = rport->fc_cos; + rport_info.cisc = rport->cisc; + rport_info.max_frmsz = rport->maxfrsize; + bfa_rport_online(rport->bfa_rport, &rport_info); +} + +static struct bfa_fcs_rport_s * +bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid) +{ + struct bfa_fcs_s *fcs = port->fcs; + struct bfa_fcs_rport_s *rport; + struct bfad_rport_s *rport_drv; + + /** + * allocate rport + */ + if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv) + != BFA_STATUS_OK) { + bfa_trc(fcs, rpid); + return NULL; + } + + /* + * Initialize r-port + */ + rport->port = port; + rport->fcs = fcs; + rport->rp_drv = rport_drv; + rport->pid = rpid; + rport->pwwn = pwwn; + + /** + * allocate BFA rport + */ + rport->bfa_rport = bfa_rport_create(port->fcs->bfa, rport); + if (!rport->bfa_rport) { + bfa_trc(fcs, rpid); + kfree(rport_drv); + return NULL; + } + + /** + * allocate FC-4s + */ + bfa_assert(bfa_fcs_lport_is_initiator(port)); + + if (bfa_fcs_lport_is_initiator(port)) { + rport->itnim = bfa_fcs_itnim_create(rport); + if (!rport->itnim) { + bfa_trc(fcs, rpid); + bfa_rport_delete(rport->bfa_rport); + kfree(rport_drv); + return NULL; + } + } + + bfa_fcs_lport_add_rport(port, rport); + + bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); + + /* Initialize the Rport Features(RPF) Sub Module */ + if (!BFA_FCS_PID_IS_WKA(rport->pid)) + bfa_fcs_rpf_init(rport); + + return rport; +} + + +static void +bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport) +{ + struct bfa_fcs_lport_s *port = rport->port; + + /** + * - delete FC-4s + * - delete BFA rport + * - remove from queue of rports + */ + if (bfa_fcs_lport_is_initiator(port)) { + bfa_fcs_itnim_delete(rport->itnim); + if (rport->pid != 0 && !BFA_FCS_PID_IS_WKA(rport->pid)) + bfa_fcs_rpf_rport_offline(rport); + } + + bfa_rport_delete(rport->bfa_rport); + bfa_fcs_lport_del_rport(port, rport); + kfree(rport->rp_drv); +} + +static void +bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport) +{ + struct bfa_fcs_lport_s *port = rport->port; + struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad; + char lpwwn_buf[BFA_STRING_32]; + char rpwwn_buf[BFA_STRING_32]; + + rport->stats.onlines++; + + if (bfa_fcs_lport_is_initiator(port)) { + bfa_fcs_itnim_rport_online(rport->itnim); + if (!BFA_FCS_PID_IS_WKA(rport->pid)) + bfa_fcs_rpf_rport_online(rport); + }; + + wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); + wwn2str(rpwwn_buf, rport->pwwn); + if (!BFA_FCS_PID_IS_WKA(rport->pid)) + BFA_LOG(KERN_INFO, bfad, log_level, + "Remote port (WWN = %s) online for logical port (WWN = %s)\n", + rpwwn_buf, lpwwn_buf); +} + +static void +bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport) +{ + struct bfa_fcs_lport_s *port = rport->port; + struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad; + char lpwwn_buf[BFA_STRING_32]; + char rpwwn_buf[BFA_STRING_32]; + + rport->stats.offlines++; + + wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); + wwn2str(rpwwn_buf, rport->pwwn); + if (!BFA_FCS_PID_IS_WKA(rport->pid)) { + if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE) + BFA_LOG(KERN_ERR, bfad, log_level, + "Remote port (WWN = %s) connectivity lost for " + "logical port (WWN = %s)\n", + rpwwn_buf, lpwwn_buf); + else + BFA_LOG(KERN_INFO, bfad, log_level, + "Remote port (WWN = %s) offlined by " + "logical port (WWN = %s)\n", + rpwwn_buf, lpwwn_buf); + } + + if (bfa_fcs_lport_is_initiator(port)) { + bfa_fcs_itnim_rport_offline(rport->itnim); + if (!BFA_FCS_PID_IS_WKA(rport->pid)) + bfa_fcs_rpf_rport_offline(rport); + } +} + +/** + * Update rport parameters from PLOGI or PLOGI accept. + */ +static void +bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi) +{ + bfa_fcs_lport_t *port = rport->port; + + /** + * - port name + * - node name + */ + rport->pwwn = plogi->port_name; + rport->nwwn = plogi->node_name; + + /** + * - class of service + */ + rport->fc_cos = 0; + if (plogi->class3.class_valid) + rport->fc_cos = FC_CLASS_3; + + if (plogi->class2.class_valid) + rport->fc_cos |= FC_CLASS_2; + + /** + * - CISC + * - MAX receive frame size + */ + rport->cisc = plogi->csp.cisc; + rport->maxfrsize = bfa_os_ntohs(plogi->class3.rxsz); + + bfa_trc(port->fcs, bfa_os_ntohs(plogi->csp.bbcred)); + bfa_trc(port->fcs, port->fabric->bb_credit); + /** + * Direct Attach P2P mode : + * This is to handle a bug (233476) in IBM targets in Direct Attach + * Mode. Basically, in FLOGI Accept the target would have + * erroneously set the BB Credit to the value used in the FLOGI + * sent by the HBA. It uses the correct value (its own BB credit) + * in PLOGI. + */ + if ((!bfa_fcs_fabric_is_switched(port->fabric)) && + (bfa_os_ntohs(plogi->csp.bbcred) < port->fabric->bb_credit)) { + + bfa_trc(port->fcs, bfa_os_ntohs(plogi->csp.bbcred)); + bfa_trc(port->fcs, port->fabric->bb_credit); + + port->fabric->bb_credit = bfa_os_ntohs(plogi->csp.bbcred); + bfa_fcport_set_tx_bbcredit(port->fcs->bfa, + port->fabric->bb_credit); + } + +} + +/** + * Called to handle LOGO received from an existing remote port. + */ +static void +bfa_fcs_rport_process_logo(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs) +{ + rport->reply_oxid = fchs->ox_id; + bfa_trc(rport->fcs, rport->reply_oxid); + + rport->prlo = BFA_FALSE; + rport->stats.logo_rcvd++; + bfa_sm_send_event(rport, RPSM_EVENT_LOGO_RCVD); +} + + + +/** + * fcs_rport_public FCS rport public interfaces + */ + +/** + * Called by bport/vport to create a remote port instance for a discovered + * remote device. + * + * @param[in] port - base port or vport + * @param[in] rpid - remote port ID + * + * @return None + */ +struct bfa_fcs_rport_s * +bfa_fcs_rport_create(struct bfa_fcs_lport_s *port, u32 rpid) +{ + struct bfa_fcs_rport_s *rport; + + bfa_trc(port->fcs, rpid); + rport = bfa_fcs_rport_alloc(port, WWN_NULL, rpid); + if (!rport) + return NULL; + + bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND); + return rport; +} + +/** + * Called to create a rport for which only the wwn is known. + * + * @param[in] port - base port + * @param[in] rpwwn - remote port wwn + * + * @return None + */ +struct bfa_fcs_rport_s * +bfa_fcs_rport_create_by_wwn(struct bfa_fcs_lport_s *port, wwn_t rpwwn) +{ + struct bfa_fcs_rport_s *rport; + bfa_trc(port->fcs, rpwwn); + rport = bfa_fcs_rport_alloc(port, rpwwn, 0); + if (!rport) + return NULL; + + bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC); + return rport; +} +/** + * Called by bport in private loop topology to indicate that a + * rport has been discovered and plogi has been completed. + * + * @param[in] port - base port or vport + * @param[in] rpid - remote port ID + */ +void +bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *fchs, + struct fc_logi_s *plogi) +{ + struct bfa_fcs_rport_s *rport; + + rport = bfa_fcs_rport_alloc(port, WWN_NULL, fchs->s_id); + if (!rport) + return; + + bfa_fcs_rport_update(rport, plogi); + + bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_COMP); +} + +/** + * Called by bport/vport to handle PLOGI received from a new remote port. + * If an existing rport does a plogi, it will be handled separately. + */ +void +bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port, struct fchs_s *fchs, + struct fc_logi_s *plogi) +{ + struct bfa_fcs_rport_s *rport; + + rport = bfa_fcs_rport_alloc(port, plogi->port_name, fchs->s_id); + if (!rport) + return; + + bfa_fcs_rport_update(rport, plogi); + + rport->reply_oxid = fchs->ox_id; + bfa_trc(rport->fcs, rport->reply_oxid); + + rport->stats.plogi_rcvd++; + bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD); +} + +static int +wwn_compare(wwn_t wwn1, wwn_t wwn2) +{ + u8 *b1 = (u8 *) &wwn1; + u8 *b2 = (u8 *) &wwn2; + int i; + + for (i = 0; i < sizeof(wwn_t); i++) { + if (b1[i] < b2[i]) + return -1; + if (b1[i] > b2[i]) + return 1; + } + return 0; +} + +/** + * Called by bport/vport to handle PLOGI received from an existing + * remote port. + */ +void +bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, + struct fc_logi_s *plogi) +{ + /** + * @todo Handle P2P and initiator-initiator. + */ + + bfa_fcs_rport_update(rport, plogi); + + rport->reply_oxid = rx_fchs->ox_id; + bfa_trc(rport->fcs, rport->reply_oxid); + + /** + * In Switched fabric topology, + * PLOGI to each other. If our pwwn is smaller, ignore it, + * if it is not a well known address. + * If the link topology is N2N, + * this Plogi should be accepted. + */ + if ((wwn_compare(rport->port->port_cfg.pwwn, rport->pwwn) == -1) && + (bfa_fcs_fabric_is_switched(rport->port->fabric)) && + (!BFA_FCS_PID_IS_WKA(rport->pid))) { + bfa_trc(rport->fcs, rport->pid); + return; + } + + rport->stats.plogi_rcvd++; + bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD); +} + +/** + * Called by bport/vport to delete a remote port instance. + * + * Rport delete is called under the following conditions: + * - vport is deleted + * - vf is deleted + * - explicit request from OS to delete rport + */ +void +bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport) +{ + bfa_sm_send_event(rport, RPSM_EVENT_DELETE); +} + +/** + * Called by bport/vport to when a target goes offline. + * + */ +void +bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport) +{ + bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP); +} + +/** + * Called by bport in n2n when a target (attached port) becomes online. + * + */ +void +bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport) +{ + bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND); +} +/** + * Called by bport/vport to notify SCN for the remote port + */ +void +bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport) +{ + rport->stats.rscns++; + bfa_sm_send_event(rport, RPSM_EVENT_SCN); +} + +/** + * Called by fcpim to notify that the ITN cleanup is done. + */ +void +bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport) +{ + bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE); +} + +/** + * Called by fcptm to notify that the ITN cleanup is done. + */ +void +bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport) +{ + bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE); +} + +/** + * brief + * This routine BFA callback for bfa_rport_online() call. + * + * param[in] cb_arg - rport struct. + * + * return + * void + * + * Special Considerations: + * + * note + */ +void +bfa_cb_rport_online(void *cbarg) +{ + + struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; + + bfa_trc(rport->fcs, rport->pwwn); + bfa_sm_send_event(rport, RPSM_EVENT_HCB_ONLINE); +} + +/** + * brief + * This routine BFA callback for bfa_rport_offline() call. + * + * param[in] rport - + * + * return + * void + * + * Special Considerations: + * + * note + */ +void +bfa_cb_rport_offline(void *cbarg) +{ + struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; + + bfa_trc(rport->fcs, rport->pwwn); + bfa_sm_send_event(rport, RPSM_EVENT_HCB_OFFLINE); +} + +/** + * brief + * This routine is a static BFA callback when there is a QoS flow_id + * change notification + * + * param[in] rport - + * + * return + * void + * + * Special Considerations: + * + * note + */ +void +bfa_cb_rport_qos_scn_flowid(void *cbarg, + struct bfa_rport_qos_attr_s old_qos_attr, + struct bfa_rport_qos_attr_s new_qos_attr) +{ + struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; + + bfa_trc(rport->fcs, rport->pwwn); +} + +/** + * brief + * This routine is a static BFA callback when there is a QoS priority + * change notification + * + * param[in] rport - + * + * return + * void + * + * Special Considerations: + * + * note + */ +void +bfa_cb_rport_qos_scn_prio(void *cbarg, + struct bfa_rport_qos_attr_s old_qos_attr, + struct bfa_rport_qos_attr_s new_qos_attr) +{ + struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg; + + bfa_trc(rport->fcs, rport->pwwn); +} + +/** + * Called to process any unsolicted frames from this remote port + */ +void +bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport) +{ + bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP); +} + +/** + * Called to process any unsolicted frames from this remote port + */ +void +bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, + struct fchs_s *fchs, u16 len) +{ + struct bfa_fcs_lport_s *port = rport->port; + struct fc_els_cmd_s *els_cmd; + + bfa_trc(rport->fcs, fchs->s_id); + bfa_trc(rport->fcs, fchs->d_id); + bfa_trc(rport->fcs, fchs->type); + + if (fchs->type != FC_TYPE_ELS) + return; + + els_cmd = (struct fc_els_cmd_s *) (fchs + 1); + + bfa_trc(rport->fcs, els_cmd->els_code); + + switch (els_cmd->els_code) { + case FC_ELS_LOGO: + bfa_stats(port, plogi_rcvd); + bfa_fcs_rport_process_logo(rport, fchs); + break; + + case FC_ELS_ADISC: + bfa_stats(port, adisc_rcvd); + bfa_fcs_rport_process_adisc(rport, fchs, len); + break; + + case FC_ELS_PRLO: + bfa_stats(port, prlo_rcvd); + if (bfa_fcs_lport_is_initiator(port)) + bfa_fcs_fcpim_uf_recv(rport->itnim, fchs, len); + break; + + case FC_ELS_PRLI: + bfa_stats(port, prli_rcvd); + bfa_fcs_rport_process_prli(rport, fchs, len); + break; + + case FC_ELS_RPSC: + bfa_stats(port, rpsc_rcvd); + bfa_fcs_rport_process_rpsc(rport, fchs, len); + break; + + default: + bfa_stats(port, un_handled_els_rcvd); + bfa_fcs_rport_send_ls_rjt(rport, fchs, + FC_LS_RJT_RSN_CMD_NOT_SUPP, + FC_LS_RJT_EXP_NO_ADDL_INFO); + break; + } +} + +/* send best case acc to prlo */ +static void +bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport) +{ + struct bfa_fcs_lport_s *port = rport->port; + struct fchs_s fchs; + struct bfa_fcxp_s *fcxp; + int len; + + bfa_trc(rport->fcs, rport->pid); + + fcxp = bfa_fcs_fcxp_alloc(port->fcs); + if (!fcxp) + return; + len = fc_prlo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + rport->pid, bfa_fcs_lport_get_fcid(port), + rport->reply_oxid, 0); + + bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id, + port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, + NULL, NULL, FC_MAX_PDUSZ, 0); +} + +/* + * Send a LS reject + */ +static void +bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, + u8 reason_code, u8 reason_code_expl) +{ + struct bfa_fcs_lport_s *port = rport->port; + struct fchs_s fchs; + struct bfa_fcxp_s *fcxp; + int len; + + bfa_trc(rport->fcs, rx_fchs->s_id); + + fcxp = bfa_fcs_fcxp_alloc(rport->fcs); + if (!fcxp) + return; + + len = fc_ls_rjt_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), + rx_fchs->s_id, bfa_fcs_lport_get_fcid(port), + rx_fchs->ox_id, reason_code, reason_code_expl); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, + BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, + FC_MAX_PDUSZ, 0); +} + +/** + * Return state of rport. + */ +int +bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport) +{ + return bfa_sm_to_state(rport_sm_table, rport->sm); +} + +/** + * brief + * Called by the Driver to set rport delete/ageout timeout + * + * param[in] rport timeout value in seconds. + * + * return None + */ +void +bfa_fcs_rport_set_del_timeout(u8 rport_tmo) +{ + /* convert to Millisecs */ + if (rport_tmo > 0) + bfa_fcs_rport_del_timeout = rport_tmo * 1000; +} +void +bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id) +{ + bfa_trc(rport->fcs, rport->pid); + + rport->prlo = BFA_TRUE; + rport->reply_oxid = ox_id; + bfa_sm_send_event(rport, RPSM_EVENT_PRLO_RCVD); +} + + + +/** + * Remote port implementation. + */ + +/** + * fcs_rport_api FCS rport API. + */ + +/** + * Direct API to add a target by port wwn. This interface is used, for + * example, by bios when target pwwn is known from boot lun configuration. + */ +bfa_status_t +bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn, + struct bfa_fcs_rport_s *rport, struct bfad_rport_s *rport_drv) +{ + bfa_trc(port->fcs, *pwwn); + + return BFA_STATUS_OK; +} + +/** + * Direct API to remove a target and its associated resources. This + * interface is used, for example, by driver to remove target + * ports from the target list for a VM. + */ +bfa_status_t +bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport_in) +{ + + struct bfa_fcs_rport_s *rport; + + bfa_trc(rport_in->fcs, rport_in->pwwn); + + rport = bfa_fcs_lport_get_rport_by_pwwn(rport_in->port, rport_in->pwwn); + if (rport == NULL) { + /* + * TBD Error handling + */ + bfa_trc(rport_in->fcs, rport_in->pid); + return BFA_STATUS_UNKNOWN_RWWN; + } + + /* + * TBD if this remote port is online, send a logo + */ + return BFA_STATUS_OK; + +} + +/** + * Remote device status for display/debug. + */ +void +bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport, + struct bfa_rport_attr_s *rport_attr) +{ + struct bfa_rport_qos_attr_s qos_attr; + bfa_fcs_lport_t *port = rport->port; + bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed; + + bfa_os_memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s)); + + rport_attr->pid = rport->pid; + rport_attr->pwwn = rport->pwwn; + rport_attr->nwwn = rport->nwwn; + rport_attr->cos_supported = rport->fc_cos; + rport_attr->df_sz = rport->maxfrsize; + rport_attr->state = bfa_fcs_rport_get_state(rport); + rport_attr->fc_cos = rport->fc_cos; + rport_attr->cisc = rport->cisc; + rport_attr->scsi_function = rport->scsi_function; + rport_attr->curr_speed = rport->rpf.rpsc_speed; + rport_attr->assigned_speed = rport->rpf.assigned_speed; + + bfa_rport_get_qos_attr(rport->bfa_rport, &qos_attr); + rport_attr->qos_attr = qos_attr; + + rport_attr->trl_enforced = BFA_FALSE; + if (bfa_fcport_is_ratelim(port->fcs->bfa)) { + if (rport_speed == BFA_PORT_SPEED_UNKNOWN) { + /* Use default ratelim speed setting */ + rport_speed = + bfa_fcport_get_ratelim_speed(rport->fcs->bfa); + } + + if (rport_speed < bfa_fcs_lport_get_rport_max_speed(port)) + rport_attr->trl_enforced = BFA_TRUE; + } +} + +/** + * Per remote device statistics. + */ +void +bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport, + struct bfa_rport_stats_s *stats) +{ + *stats = rport->stats; +} + +void +bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport) +{ + bfa_os_memset((char *)&rport->stats, 0, + sizeof(struct bfa_rport_stats_s)); +} + +struct bfa_fcs_rport_s * +bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn) +{ + struct bfa_fcs_rport_s *rport; + + rport = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn); + if (rport == NULL) { + /* + * TBD Error handling + */ + } + + return rport; +} + +struct bfa_fcs_rport_s * +bfa_fcs_rport_lookup_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t rnwwn) +{ + struct bfa_fcs_rport_s *rport; + + rport = bfa_fcs_lport_get_rport_by_nwwn(port, rnwwn); + if (rport == NULL) { + /* + * TBD Error handling + */ + } + + return rport; +} + +/* + * This API is to set the Rport's speed. Should be used when RPSC is not + * supported by the rport. + */ +void +bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport, bfa_port_speed_t speed) +{ + rport->rpf.assigned_speed = speed; + + /* Set this speed in f/w only if the RPSC speed is not available */ + if (rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN) + bfa_rport_speed(rport->bfa_rport, speed); +} + + + +/** + * Remote port features (RPF) implementation. + */ + +#define BFA_FCS_RPF_RETRIES (3) +#define BFA_FCS_RPF_RETRY_TIMEOUT (1000) /* 1 sec (In millisecs) */ + +static void bfa_fcs_rpf_send_rpsc2(void *rport_cbarg, + struct bfa_fcxp_s *fcxp_alloced); +static void bfa_fcs_rpf_rpsc2_response(void *fcsarg, + struct bfa_fcxp_s *fcxp, + void *cbarg, + bfa_status_t req_status, + u32 rsp_len, + u32 resid_len, + struct fchs_s *rsp_fchs); + +static void bfa_fcs_rpf_timeout(void *arg); + +/** + * fcs_rport_ftrs_sm FCS rport state machine events + */ + +enum rpf_event { + RPFSM_EVENT_RPORT_OFFLINE = 1, /* Rport offline */ + RPFSM_EVENT_RPORT_ONLINE = 2, /* Rport online */ + RPFSM_EVENT_FCXP_SENT = 3, /* Frame from has been sent */ + RPFSM_EVENT_TIMEOUT = 4, /* Rport SM timeout event */ + RPFSM_EVENT_RPSC_COMP = 5, + RPFSM_EVENT_RPSC_FAIL = 6, + RPFSM_EVENT_RPSC_ERROR = 7, +}; + +static void bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, + enum rpf_event event); +static void bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf, + enum rpf_event event); +static void bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, + enum rpf_event event); +static void bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf, + enum rpf_event event); +static void bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, + enum rpf_event event); +static void bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf, + enum rpf_event event); + +static void +bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) +{ + struct bfa_fcs_rport_s *rport = rpf->rport; + struct bfa_fcs_fabric_s *fabric = &rport->fcs->fabric; + + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPFSM_EVENT_RPORT_ONLINE: + /* Send RPSC2 to a Brocade fabric only. */ + if ((!BFA_FCS_PID_IS_WKA(rport->pid)) && + ((bfa_lps_is_brcd_fabric(rport->port->fabric->lps)) || + (bfa_fcs_fabric_get_switch_oui(fabric) == + BFA_FCS_BRCD_SWITCH_OUI))) { + bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); + rpf->rpsc_retries = 0; + bfa_fcs_rpf_send_rpsc2(rpf, NULL); + } + break; + + case RPFSM_EVENT_RPORT_OFFLINE: + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +static void +bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) +{ + struct bfa_fcs_rport_s *rport = rpf->rport; + + bfa_trc(rport->fcs, event); + + switch (event) { + case RPFSM_EVENT_FCXP_SENT: + bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc); + break; + + case RPFSM_EVENT_RPORT_OFFLINE: + bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); + bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rpf->fcxp_wqe); + rpf->rpsc_retries = 0; + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +static void +bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) +{ + struct bfa_fcs_rport_s *rport = rpf->rport; + + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPFSM_EVENT_RPSC_COMP: + bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online); + /* Update speed info in f/w via BFA */ + if (rpf->rpsc_speed != BFA_PORT_SPEED_UNKNOWN) + bfa_rport_speed(rport->bfa_rport, rpf->rpsc_speed); + else if (rpf->assigned_speed != BFA_PORT_SPEED_UNKNOWN) + bfa_rport_speed(rport->bfa_rport, rpf->assigned_speed); + break; + + case RPFSM_EVENT_RPSC_FAIL: + /* RPSC not supported by rport */ + bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online); + break; + + case RPFSM_EVENT_RPSC_ERROR: + /* need to retry...delayed a bit. */ + if (rpf->rpsc_retries++ < BFA_FCS_RPF_RETRIES) { + bfa_timer_start(rport->fcs->bfa, &rpf->timer, + bfa_fcs_rpf_timeout, rpf, + BFA_FCS_RPF_RETRY_TIMEOUT); + bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_retry); + } else { + bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online); + } + break; + + case RPFSM_EVENT_RPORT_OFFLINE: + bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); + bfa_fcxp_discard(rpf->fcxp); + rpf->rpsc_retries = 0; + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +static void +bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) +{ + struct bfa_fcs_rport_s *rport = rpf->rport; + + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPFSM_EVENT_TIMEOUT: + /* re-send the RPSC */ + bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); + bfa_fcs_rpf_send_rpsc2(rpf, NULL); + break; + + case RPFSM_EVENT_RPORT_OFFLINE: + bfa_timer_stop(&rpf->timer); + bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); + rpf->rpsc_retries = 0; + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +static void +bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) +{ + struct bfa_fcs_rport_s *rport = rpf->rport; + + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPFSM_EVENT_RPORT_OFFLINE: + bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); + rpf->rpsc_retries = 0; + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} + +static void +bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) +{ + struct bfa_fcs_rport_s *rport = rpf->rport; + + bfa_trc(rport->fcs, rport->pwwn); + bfa_trc(rport->fcs, rport->pid); + bfa_trc(rport->fcs, event); + + switch (event) { + case RPFSM_EVENT_RPORT_ONLINE: + bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); + bfa_fcs_rpf_send_rpsc2(rpf, NULL); + break; + + case RPFSM_EVENT_RPORT_OFFLINE: + break; + + default: + bfa_sm_fault(rport->fcs, event); + } +} +/** + * Called when Rport is created. + */ +void +bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport) +{ + struct bfa_fcs_rpf_s *rpf = &rport->rpf; + + bfa_trc(rport->fcs, rport->pid); + rpf->rport = rport; + + bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_uninit); +} + +/** + * Called when Rport becomes online + */ +void +bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport) +{ + bfa_trc(rport->fcs, rport->pid); + + if (__fcs_min_cfg(rport->port->fcs)) + return; + + if (bfa_fcs_fabric_is_switched(rport->port->fabric)) + bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_ONLINE); +} + +/** + * Called when Rport becomes offline + */ +void +bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport) +{ + bfa_trc(rport->fcs, rport->pid); + + if (__fcs_min_cfg(rport->port->fcs)) + return; + + rport->rpf.rpsc_speed = 0; + bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_OFFLINE); +} + +static void +bfa_fcs_rpf_timeout(void *arg) +{ + struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *) arg; + struct bfa_fcs_rport_s *rport = rpf->rport; + + bfa_trc(rport->fcs, rport->pid); + bfa_sm_send_event(rpf, RPFSM_EVENT_TIMEOUT); +} + +static void +bfa_fcs_rpf_send_rpsc2(void *rpf_cbarg, struct bfa_fcxp_s *fcxp_alloced) +{ + struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *)rpf_cbarg; + struct bfa_fcs_rport_s *rport = rpf->rport; + struct bfa_fcs_lport_s *port = rport->port; + struct fchs_s fchs; + int len; + struct bfa_fcxp_s *fcxp; + + bfa_trc(rport->fcs, rport->pwwn); + + fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); + if (!fcxp) { + bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rpf->fcxp_wqe, + bfa_fcs_rpf_send_rpsc2, rpf); + return; + } + rpf->fcxp = fcxp; + + len = fc_rpsc2_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, + bfa_fcs_lport_get_fcid(port), &rport->pid, 1); + + bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, + FC_CLASS_3, len, &fchs, bfa_fcs_rpf_rpsc2_response, + rpf, FC_MAX_PDUSZ, FC_ELS_TOV); + rport->stats.rpsc_sent++; + bfa_sm_send_event(rpf, RPFSM_EVENT_FCXP_SENT); + +} + +static void +bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs) +{ + struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *) cbarg; + struct bfa_fcs_rport_s *rport = rpf->rport; + struct fc_ls_rjt_s *ls_rjt; + struct fc_rpsc2_acc_s *rpsc2_acc; + u16 num_ents; + + bfa_trc(rport->fcs, req_status); + + if (req_status != BFA_STATUS_OK) { + bfa_trc(rport->fcs, req_status); + if (req_status == BFA_STATUS_ETIMER) + rport->stats.rpsc_failed++; + bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR); + return; + } + + rpsc2_acc = (struct fc_rpsc2_acc_s *) BFA_FCXP_RSP_PLD(fcxp); + if (rpsc2_acc->els_cmd == FC_ELS_ACC) { + rport->stats.rpsc_accs++; + num_ents = bfa_os_ntohs(rpsc2_acc->num_pids); + bfa_trc(rport->fcs, num_ents); + if (num_ents > 0) { + bfa_assert(rpsc2_acc->port_info[0].pid != rport->pid); + bfa_trc(rport->fcs, + bfa_os_ntohs(rpsc2_acc->port_info[0].pid)); + bfa_trc(rport->fcs, + bfa_os_ntohs(rpsc2_acc->port_info[0].speed)); + bfa_trc(rport->fcs, + bfa_os_ntohs(rpsc2_acc->port_info[0].index)); + bfa_trc(rport->fcs, + rpsc2_acc->port_info[0].type); + + if (rpsc2_acc->port_info[0].speed == 0) { + bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR); + return; + } + + rpf->rpsc_speed = fc_rpsc_operspeed_to_bfa_speed( + bfa_os_ntohs(rpsc2_acc->port_info[0].speed)); + + bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_COMP); + } + } else { + ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); + bfa_trc(rport->fcs, ls_rjt->reason_code); + bfa_trc(rport->fcs, ls_rjt->reason_code_expl); + rport->stats.rpsc_rejects++; + if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP) + bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_FAIL); + else + bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR); + } +} diff --git a/drivers/scsi/bfa/bfa_fcs_uf.c b/drivers/scsi/bfa/bfa_fcs_uf.c deleted file mode 100644 index 3d57d48bbae4..000000000000 --- a/drivers/scsi/bfa/bfa_fcs_uf.c +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * bfa_fcs_uf.c BFA FCS UF ( Unsolicited Frames) - */ - -#include -#include -#include -#include "fcs.h" -#include "fcs_trcmod.h" -#include "fcs_fabric.h" -#include "fcs_uf.h" - -BFA_TRC_FILE(FCS, UF); - -/** - * BFA callback for unsolicited frame receive handler. - * - * @param[in] cbarg callback arg for receive handler - * @param[in] uf unsolicited frame descriptor - * - * @return None - */ -static void -bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf) -{ - struct bfa_fcs_s *fcs = (struct bfa_fcs_s *) cbarg; - struct fchs_s *fchs = bfa_uf_get_frmbuf(uf); - u16 len = bfa_uf_get_frmlen(uf); - struct fc_vft_s *vft; - struct bfa_fcs_fabric_s *fabric; - - /** - * check for VFT header - */ - if (fchs->routing == FC_RTG_EXT_HDR && - fchs->cat_info == FC_CAT_VFT_HDR) { - bfa_stats(fcs, uf.tagged); - vft = bfa_uf_get_frmbuf(uf); - if (fcs->port_vfid == vft->vf_id) - fabric = &fcs->fabric; - else - fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id); - - /** - * drop frame if vfid is unknown - */ - if (!fabric) { - bfa_assert(0); - bfa_stats(fcs, uf.vfid_unknown); - bfa_uf_free(uf); - return; - } - - /** - * skip vft header - */ - fchs = (struct fchs_s *) (vft + 1); - len -= sizeof(struct fc_vft_s); - - bfa_trc(fcs, vft->vf_id); - } else { - bfa_stats(fcs, uf.untagged); - fabric = &fcs->fabric; - } - - bfa_trc(fcs, ((u32 *) fchs)[0]); - bfa_trc(fcs, ((u32 *) fchs)[1]); - bfa_trc(fcs, ((u32 *) fchs)[2]); - bfa_trc(fcs, ((u32 *) fchs)[3]); - bfa_trc(fcs, ((u32 *) fchs)[4]); - bfa_trc(fcs, ((u32 *) fchs)[5]); - bfa_trc(fcs, len); - - bfa_fcs_fabric_uf_recv(fabric, fchs, len); - bfa_uf_free(uf); -} - -void -bfa_fcs_uf_attach(struct bfa_fcs_s *fcs) -{ - bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs); -} diff --git a/drivers/scsi/bfa/bfa_fcxp.c b/drivers/scsi/bfa/bfa_fcxp.c deleted file mode 100644 index 8258f88bfee6..000000000000 --- a/drivers/scsi/bfa/bfa_fcxp.c +++ /dev/null @@ -1,774 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#include -#include -#include - -BFA_TRC_FILE(HAL, FCXP); -BFA_MODULE(fcxp); - -/** - * forward declarations - */ -static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete); -static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp, - struct bfi_fcxp_send_rsp_s *fcxp_rsp); -static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, - struct bfa_fcxp_s *fcxp, struct fchs_s *fchs); -static void bfa_fcxp_qresume(void *cbarg); -static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, - struct bfi_fcxp_send_req_s *send_req); - -/** - * fcxp_pvt BFA FCXP private functions - */ - -static void -claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi) -{ - u8 *dm_kva = NULL; - u64 dm_pa; - u32 buf_pool_sz; - - dm_kva = bfa_meminfo_dma_virt(mi); - dm_pa = bfa_meminfo_dma_phys(mi); - - buf_pool_sz = mod->req_pld_sz * mod->num_fcxps; - - /* - * Initialize the fcxp req payload list - */ - mod->req_pld_list_kva = dm_kva; - mod->req_pld_list_pa = dm_pa; - dm_kva += buf_pool_sz; - dm_pa += buf_pool_sz; - bfa_os_memset(mod->req_pld_list_kva, 0, buf_pool_sz); - - /* - * Initialize the fcxp rsp payload list - */ - buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps; - mod->rsp_pld_list_kva = dm_kva; - mod->rsp_pld_list_pa = dm_pa; - dm_kva += buf_pool_sz; - dm_pa += buf_pool_sz; - bfa_os_memset(mod->rsp_pld_list_kva, 0, buf_pool_sz); - - bfa_meminfo_dma_virt(mi) = dm_kva; - bfa_meminfo_dma_phys(mi) = dm_pa; -} - -static void -claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi) -{ - u16 i; - struct bfa_fcxp_s *fcxp; - - fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi); - bfa_os_memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps); - - INIT_LIST_HEAD(&mod->fcxp_free_q); - INIT_LIST_HEAD(&mod->fcxp_active_q); - - mod->fcxp_list = fcxp; - - for (i = 0; i < mod->num_fcxps; i++) { - fcxp->fcxp_mod = mod; - fcxp->fcxp_tag = i; - - list_add_tail(&fcxp->qe, &mod->fcxp_free_q); - bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp); - fcxp->reqq_waiting = BFA_FALSE; - - fcxp = fcxp + 1; - } - - bfa_meminfo_kva(mi) = (void *)fcxp; -} - -static void -bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, - u32 *dm_len) -{ - u16 num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs; - - if (num_fcxp_reqs == 0) - return; - - /* - * Account for req/rsp payload - */ - *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs; - if (cfg->drvcfg.min_cfg) - *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs; - else - *dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs; - - /* - * Account for fcxp structs - */ - *ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs; -} - -static void -bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) -{ - struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); - - bfa_os_memset(mod, 0, sizeof(struct bfa_fcxp_mod_s)); - mod->bfa = bfa; - mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs; - - /** - * Initialize FCXP request and response payload sizes. - */ - mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ; - if (!cfg->drvcfg.min_cfg) - mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ; - - INIT_LIST_HEAD(&mod->wait_q); - - claim_fcxp_req_rsp_mem(mod, meminfo); - claim_fcxps_mem(mod, meminfo); -} - -static void -bfa_fcxp_detach(struct bfa_s *bfa) -{ -} - -static void -bfa_fcxp_start(struct bfa_s *bfa) -{ -} - -static void -bfa_fcxp_stop(struct bfa_s *bfa) -{ -} - -static void -bfa_fcxp_iocdisable(struct bfa_s *bfa) -{ - struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); - struct bfa_fcxp_s *fcxp; - struct list_head *qe, *qen; - - list_for_each_safe(qe, qen, &mod->fcxp_active_q) { - fcxp = (struct bfa_fcxp_s *) qe; - if (fcxp->caller == NULL) { - fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg, - BFA_STATUS_IOC_FAILURE, 0, 0, NULL); - bfa_fcxp_free(fcxp); - } else { - fcxp->rsp_status = BFA_STATUS_IOC_FAILURE; - bfa_cb_queue(bfa, &fcxp->hcb_qe, - __bfa_fcxp_send_cbfn, fcxp); - } - } -} - -static struct bfa_fcxp_s * -bfa_fcxp_get(struct bfa_fcxp_mod_s *fm) -{ - struct bfa_fcxp_s *fcxp; - - bfa_q_deq(&fm->fcxp_free_q, &fcxp); - - if (fcxp) - list_add_tail(&fcxp->qe, &fm->fcxp_active_q); - - return fcxp; -} - -static void -bfa_fcxp_put(struct bfa_fcxp_s *fcxp) -{ - struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; - struct bfa_fcxp_wqe_s *wqe; - - bfa_q_deq(&mod->wait_q, &wqe); - if (wqe) { - bfa_trc(mod->bfa, fcxp->fcxp_tag); - wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp); - return; - } - - bfa_assert(bfa_q_is_on_q(&mod->fcxp_active_q, fcxp)); - list_del(&fcxp->qe); - list_add_tail(&fcxp->qe, &mod->fcxp_free_q); -} - -static void -bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg, - bfa_status_t req_status, u32 rsp_len, - u32 resid_len, struct fchs_s *rsp_fchs) -{ - /* discarded fcxp completion */ -} - -static void -__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete) -{ - struct bfa_fcxp_s *fcxp = cbarg; - - if (complete) { - fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg, - fcxp->rsp_status, fcxp->rsp_len, - fcxp->residue_len, &fcxp->rsp_fchs); - } else { - bfa_fcxp_free(fcxp); - } -} - -static void -hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp) -{ - struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); - struct bfa_fcxp_s *fcxp; - u16 fcxp_tag = bfa_os_ntohs(fcxp_rsp->fcxp_tag); - - bfa_trc(bfa, fcxp_tag); - - fcxp_rsp->rsp_len = bfa_os_ntohl(fcxp_rsp->rsp_len); - - /** - * @todo f/w should not set residue to non-0 when everything - * is received. - */ - if (fcxp_rsp->req_status == BFA_STATUS_OK) - fcxp_rsp->residue_len = 0; - else - fcxp_rsp->residue_len = bfa_os_ntohl(fcxp_rsp->residue_len); - - fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag); - - bfa_assert(fcxp->send_cbfn != NULL); - - hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp); - - if (fcxp->send_cbfn != NULL) { - if (fcxp->caller == NULL) { - bfa_trc(mod->bfa, fcxp->fcxp_tag); - - fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg, - fcxp_rsp->req_status, fcxp_rsp->rsp_len, - fcxp_rsp->residue_len, &fcxp_rsp->fchs); - /* - * fcxp automatically freed on return from the callback - */ - bfa_fcxp_free(fcxp); - } else { - bfa_trc(mod->bfa, fcxp->fcxp_tag); - fcxp->rsp_status = fcxp_rsp->req_status; - fcxp->rsp_len = fcxp_rsp->rsp_len; - fcxp->residue_len = fcxp_rsp->residue_len; - fcxp->rsp_fchs = fcxp_rsp->fchs; - - bfa_cb_queue(bfa, &fcxp->hcb_qe, - __bfa_fcxp_send_cbfn, fcxp); - } - } else { - bfa_trc(bfa, fcxp_tag); - } -} - -static void -hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa) -{ - union bfi_addr_u sga_zero = { {0} }; - - sge->sg_len = reqlen; - sge->flags = BFI_SGE_DATA_LAST; - bfa_dma_addr_set(sge[0].sga, req_pa); - bfa_sge_to_be(sge); - sge++; - - sge->sga = sga_zero; - sge->sg_len = reqlen; - sge->flags = BFI_SGE_PGDLEN; - bfa_sge_to_be(sge); -} - -static void -hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp, - struct fchs_s *fchs) -{ - /* - * TODO: TX ox_id - */ - if (reqlen > 0) { - if (fcxp->use_ireqbuf) { - u32 pld_w0 = - *((u32 *) BFA_FCXP_REQ_PLD(fcxp)); - - bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP, - BFA_PL_EID_TX, - reqlen + sizeof(struct fchs_s), fchs, pld_w0); - } else { - bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, - BFA_PL_EID_TX, reqlen + sizeof(struct fchs_s), - fchs); - } - } else { - bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX, - reqlen + sizeof(struct fchs_s), fchs); - } -} - -static void -hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp, - struct bfi_fcxp_send_rsp_s *fcxp_rsp) -{ - if (fcxp_rsp->rsp_len > 0) { - if (fcxp->use_irspbuf) { - u32 pld_w0 = - *((u32 *) BFA_FCXP_RSP_PLD(fcxp)); - - bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP, - BFA_PL_EID_RX, - (u16) fcxp_rsp->rsp_len, - &fcxp_rsp->fchs, pld_w0); - } else { - bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, - BFA_PL_EID_RX, - (u16) fcxp_rsp->rsp_len, - &fcxp_rsp->fchs); - } - } else { - bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX, - (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs); - } -} - -/** - * Handler to resume sending fcxp when space in available in cpe queue. - */ -static void -bfa_fcxp_qresume(void *cbarg) -{ - struct bfa_fcxp_s *fcxp = cbarg; - struct bfa_s *bfa = fcxp->fcxp_mod->bfa; - struct bfi_fcxp_send_req_s *send_req; - - fcxp->reqq_waiting = BFA_FALSE; - send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP); - bfa_fcxp_queue(fcxp, send_req); -} - -/** - * Queue fcxp send request to foimrware. - */ -static void -bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req) -{ - struct bfa_s *bfa = fcxp->fcxp_mod->bfa; - struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info; - struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info; - struct bfa_rport_s *rport = reqi->bfa_rport; - - bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ, - bfa_lpuid(bfa)); - - send_req->fcxp_tag = bfa_os_htons(fcxp->fcxp_tag); - if (rport) { - send_req->rport_fw_hndl = rport->fw_handle; - send_req->max_frmsz = bfa_os_htons(rport->rport_info.max_frmsz); - if (send_req->max_frmsz == 0) - send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ); - } else { - send_req->rport_fw_hndl = 0; - send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ); - } - - send_req->vf_id = bfa_os_htons(reqi->vf_id); - send_req->lp_tag = reqi->lp_tag; - send_req->class = reqi->class; - send_req->rsp_timeout = rspi->rsp_timeout; - send_req->cts = reqi->cts; - send_req->fchs = reqi->fchs; - - send_req->req_len = bfa_os_htonl(reqi->req_tot_len); - send_req->rsp_maxlen = bfa_os_htonl(rspi->rsp_maxlen); - - /* - * setup req sgles - */ - if (fcxp->use_ireqbuf == 1) { - hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len, - BFA_FCXP_REQ_PLD_PA(fcxp)); - } else { - if (fcxp->nreq_sgles > 0) { - bfa_assert(fcxp->nreq_sgles == 1); - hal_fcxp_set_local_sges(send_req->req_sge, - reqi->req_tot_len, - fcxp->req_sga_cbfn(fcxp->caller, - 0)); - } else { - bfa_assert(reqi->req_tot_len == 0); - hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0); - } - } - - /* - * setup rsp sgles - */ - if (fcxp->use_irspbuf == 1) { - bfa_assert(rspi->rsp_maxlen <= BFA_FCXP_MAX_LBUF_SZ); - - hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen, - BFA_FCXP_RSP_PLD_PA(fcxp)); - - } else { - if (fcxp->nrsp_sgles > 0) { - bfa_assert(fcxp->nrsp_sgles == 1); - hal_fcxp_set_local_sges(send_req->rsp_sge, - rspi->rsp_maxlen, - fcxp->rsp_sga_cbfn(fcxp->caller, - 0)); - } else { - bfa_assert(rspi->rsp_maxlen == 0); - hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0); - } - } - - hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs); - - bfa_reqq_produce(bfa, BFA_REQQ_FCXP); - - bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP)); - bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP)); -} - - -/** - * hal_fcxp_api BFA FCXP API - */ - -/** - * Allocate an FCXP instance to send a response or to send a request - * that has a response. Request/response buffers are allocated by caller. - * - * @param[in] bfa BFA bfa instance - * @param[in] nreq_sgles Number of SG elements required for request - * buffer. 0, if fcxp internal buffers are used. - * Use bfa_fcxp_get_reqbuf() to get the - * internal req buffer. - * @param[in] req_sgles SG elements describing request buffer. Will be - * copied in by BFA and hence can be freed on - * return from this function. - * @param[in] get_req_sga function ptr to be called to get a request SG - * Address (given the sge index). - * @param[in] get_req_sglen function ptr to be called to get a request SG - * len (given the sge index). - * @param[in] get_rsp_sga function ptr to be called to get a response SG - * Address (given the sge index). - * @param[in] get_rsp_sglen function ptr to be called to get a response SG - * len (given the sge index). - * - * @return FCXP instance. NULL on failure. - */ -struct bfa_fcxp_s * -bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles, - int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn, - bfa_fcxp_get_sglen_t req_sglen_cbfn, - bfa_fcxp_get_sgaddr_t rsp_sga_cbfn, - bfa_fcxp_get_sglen_t rsp_sglen_cbfn) -{ - struct bfa_fcxp_s *fcxp = NULL; - u32 nreq_sgpg, nrsp_sgpg; - - bfa_assert(bfa != NULL); - - fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa)); - if (fcxp == NULL) - return NULL; - - bfa_trc(bfa, fcxp->fcxp_tag); - - fcxp->caller = caller; - - if (nreq_sgles == 0) { - fcxp->use_ireqbuf = 1; - } else { - bfa_assert(req_sga_cbfn != NULL); - bfa_assert(req_sglen_cbfn != NULL); - - fcxp->use_ireqbuf = 0; - fcxp->req_sga_cbfn = req_sga_cbfn; - fcxp->req_sglen_cbfn = req_sglen_cbfn; - - fcxp->nreq_sgles = nreq_sgles; - - /* - * alloc required sgpgs - */ - if (nreq_sgles > BFI_SGE_INLINE) { - nreq_sgpg = BFA_SGPG_NPAGE(nreq_sgles); - - if (bfa_sgpg_malloc(bfa, &fcxp->req_sgpg_q, nreq_sgpg) - != BFA_STATUS_OK) { - /* - * TODO - */ - } - } - } - - if (nrsp_sgles == 0) { - fcxp->use_irspbuf = 1; - } else { - bfa_assert(rsp_sga_cbfn != NULL); - bfa_assert(rsp_sglen_cbfn != NULL); - - fcxp->use_irspbuf = 0; - fcxp->rsp_sga_cbfn = rsp_sga_cbfn; - fcxp->rsp_sglen_cbfn = rsp_sglen_cbfn; - - fcxp->nrsp_sgles = nrsp_sgles; - /* - * alloc required sgpgs - */ - if (nrsp_sgles > BFI_SGE_INLINE) { - nrsp_sgpg = BFA_SGPG_NPAGE(nreq_sgles); - - if (bfa_sgpg_malloc - (bfa, &fcxp->rsp_sgpg_q, nrsp_sgpg) - != BFA_STATUS_OK) { - /* bfa_sgpg_wait(bfa, &fcxp->rsp_sgpg_wqe, - nrsp_sgpg); */ - /* - * TODO - */ - } - } - } - - return fcxp; -} - -/** - * Get the internal request buffer pointer - * - * @param[in] fcxp BFA fcxp pointer - * - * @return pointer to the internal request buffer - */ -void * -bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp) -{ - struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; - void *reqbuf; - - bfa_assert(fcxp->use_ireqbuf == 1); - reqbuf = ((u8 *)mod->req_pld_list_kva) + - fcxp->fcxp_tag * mod->req_pld_sz; - return reqbuf; -} - -u32 -bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp) -{ - struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; - - return mod->req_pld_sz; -} - -/** - * Get the internal response buffer pointer - * - * @param[in] fcxp BFA fcxp pointer - * - * @return pointer to the internal request buffer - */ -void * -bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp) -{ - struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; - void *rspbuf; - - bfa_assert(fcxp->use_irspbuf == 1); - - rspbuf = ((u8 *)mod->rsp_pld_list_kva) + - fcxp->fcxp_tag * mod->rsp_pld_sz; - return rspbuf; -} - -/** - * Free the BFA FCXP - * - * @param[in] fcxp BFA fcxp pointer - * - * @return void - */ -void -bfa_fcxp_free(struct bfa_fcxp_s *fcxp) -{ - struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; - - bfa_assert(fcxp != NULL); - bfa_trc(mod->bfa, fcxp->fcxp_tag); - bfa_fcxp_put(fcxp); -} - -/** - * Send a FCXP request - * - * @param[in] fcxp BFA fcxp pointer - * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports - * @param[in] vf_id virtual Fabric ID - * @param[in] lp_tag lport tag - * @param[in] cts use Continous sequence - * @param[in] cos fc Class of Service - * @param[in] reqlen request length, does not include FCHS length - * @param[in] fchs fc Header Pointer. The header content will be copied - * in by BFA. - * - * @param[in] cbfn call back function to be called on receiving - * the response - * @param[in] cbarg arg for cbfn - * @param[in] rsp_timeout - * response timeout - * - * @return bfa_status_t - */ -void -bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport, - u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos, - u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn, - void *cbarg, u32 rsp_maxlen, u8 rsp_timeout) -{ - struct bfa_s *bfa = fcxp->fcxp_mod->bfa; - struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info; - struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info; - struct bfi_fcxp_send_req_s *send_req; - - bfa_trc(bfa, fcxp->fcxp_tag); - - /** - * setup request/response info - */ - reqi->bfa_rport = rport; - reqi->vf_id = vf_id; - reqi->lp_tag = lp_tag; - reqi->class = cos; - rspi->rsp_timeout = rsp_timeout; - reqi->cts = cts; - reqi->fchs = *fchs; - reqi->req_tot_len = reqlen; - rspi->rsp_maxlen = rsp_maxlen; - fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp; - fcxp->send_cbarg = cbarg; - - /** - * If no room in CPE queue, wait for space in request queue - */ - send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP); - if (!send_req) { - bfa_trc(bfa, fcxp->fcxp_tag); - fcxp->reqq_waiting = BFA_TRUE; - bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe); - return; - } - - bfa_fcxp_queue(fcxp, send_req); -} - -/** - * Abort a BFA FCXP - * - * @param[in] fcxp BFA fcxp pointer - * - * @return void - */ -bfa_status_t -bfa_fcxp_abort(struct bfa_fcxp_s *fcxp) -{ - bfa_assert(0); - return BFA_STATUS_OK; -} - -void -bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe, - bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg) -{ - struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); - - bfa_assert(list_empty(&mod->fcxp_free_q)); - - wqe->alloc_cbfn = alloc_cbfn; - wqe->alloc_cbarg = alloc_cbarg; - list_add_tail(&wqe->qe, &mod->wait_q); -} - -void -bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe) -{ - struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); - - bfa_assert(bfa_q_is_on_q(&mod->wait_q, wqe)); - list_del(&wqe->qe); -} - -void -bfa_fcxp_discard(struct bfa_fcxp_s *fcxp) -{ - /** - * If waiting for room in request queue, cancel reqq wait - * and free fcxp. - */ - if (fcxp->reqq_waiting) { - fcxp->reqq_waiting = BFA_FALSE; - bfa_reqq_wcancel(&fcxp->reqq_wqe); - bfa_fcxp_free(fcxp); - return; - } - - fcxp->send_cbfn = bfa_fcxp_null_comp; -} - - - -/** - * hal_fcxp_public BFA FCXP public functions - */ - -void -bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) -{ - switch (msg->mhdr.msg_id) { - case BFI_FCXP_I2H_SEND_RSP: - hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg); - break; - - default: - bfa_trc(bfa, msg->mhdr.msg_id); - bfa_assert(0); - } -} - -u32 -bfa_fcxp_get_maxrsp(struct bfa_s *bfa) -{ - struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); - - return mod->rsp_pld_sz; -} - - diff --git a/drivers/scsi/bfa/bfa_fcxp_priv.h b/drivers/scsi/bfa/bfa_fcxp_priv.h deleted file mode 100644 index 4cda49397da0..000000000000 --- a/drivers/scsi/bfa/bfa_fcxp_priv.h +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_FCXP_PRIV_H__ -#define __BFA_FCXP_PRIV_H__ - -#include -#include -#include -#include - -#define BFA_FCXP_MIN (1) -#define BFA_FCXP_MAX_IBUF_SZ (2 * 1024 + 256) -#define BFA_FCXP_MAX_LBUF_SZ (4 * 1024 + 256) - -struct bfa_fcxp_mod_s { - struct bfa_s *bfa; /* backpointer to BFA */ - struct bfa_fcxp_s *fcxp_list; /* array of FCXPs */ - u16 num_fcxps; /* max num FCXP requests */ - struct list_head fcxp_free_q; /* free FCXPs */ - struct list_head fcxp_active_q; /* active FCXPs */ - void *req_pld_list_kva; /* list of FCXP req pld */ - u64 req_pld_list_pa; /* list of FCXP req pld */ - void *rsp_pld_list_kva; /* list of FCXP resp pld */ - u64 rsp_pld_list_pa; /* list of FCXP resp pld */ - struct list_head wait_q; /* wait queue for free fcxp */ - u32 req_pld_sz; - u32 rsp_pld_sz; -}; - -#define BFA_FCXP_MOD(__bfa) (&(__bfa)->modules.fcxp_mod) -#define BFA_FCXP_FROM_TAG(__mod, __tag) (&(__mod)->fcxp_list[__tag]) - -typedef void (*fcxp_send_cb_t) (struct bfa_s *ioc, struct bfa_fcxp_s *fcxp, - void *cb_arg, bfa_status_t req_status, - u32 rsp_len, u32 resid_len, - struct fchs_s *rsp_fchs); - -/** - * Information needed for a FCXP request - */ -struct bfa_fcxp_req_info_s { - struct bfa_rport_s *bfa_rport; /* Pointer to the bfa rport that was - *returned from bfa_rport_create(). - *This could be left NULL for WKA or for - *FCXP interactions before the rport - *nexus is established - */ - struct fchs_s fchs; /* request FC header structure */ - u8 cts; /* continous sequence */ - u8 class; /* FC class for the request/response */ - u16 max_frmsz; /* max send frame size */ - u16 vf_id; /* vsan tag if applicable */ - u8 lp_tag; /* lport tag */ - u32 req_tot_len; /* request payload total length */ -}; - -struct bfa_fcxp_rsp_info_s { - struct fchs_s rsp_fchs; /* Response frame's FC header will - * be *sent back in this field */ - u8 rsp_timeout; /* timeout in seconds, 0-no response - */ - u8 rsvd2[3]; - u32 rsp_maxlen; /* max response length expected */ -}; - -struct bfa_fcxp_s { - struct list_head qe; /* fcxp queue element */ - bfa_sm_t sm; /* state machine */ - void *caller; /* driver or fcs */ - struct bfa_fcxp_mod_s *fcxp_mod; - /* back pointer to fcxp mod */ - u16 fcxp_tag; /* internal tag */ - struct bfa_fcxp_req_info_s req_info; - /* request info */ - struct bfa_fcxp_rsp_info_s rsp_info; - /* response info */ - u8 use_ireqbuf; /* use internal req buf */ - u8 use_irspbuf; /* use internal rsp buf */ - u32 nreq_sgles; /* num request SGLEs */ - u32 nrsp_sgles; /* num response SGLEs */ - struct list_head req_sgpg_q; /* SG pages for request buf */ - struct list_head req_sgpg_wqe; /* wait queue for req SG page */ - struct list_head rsp_sgpg_q; /* SG pages for response buf */ - struct list_head rsp_sgpg_wqe; /* wait queue for rsp SG page */ - - bfa_fcxp_get_sgaddr_t req_sga_cbfn; - /* SG elem addr user function */ - bfa_fcxp_get_sglen_t req_sglen_cbfn; - /* SG elem len user function */ - bfa_fcxp_get_sgaddr_t rsp_sga_cbfn; - /* SG elem addr user function */ - bfa_fcxp_get_sglen_t rsp_sglen_cbfn; - /* SG elem len user function */ - bfa_cb_fcxp_send_t send_cbfn; /* send completion callback */ - void *send_cbarg; /* callback arg */ - struct bfa_sge_s req_sge[BFA_FCXP_MAX_SGES]; - /* req SG elems */ - struct bfa_sge_s rsp_sge[BFA_FCXP_MAX_SGES]; - /* rsp SG elems */ - u8 rsp_status; /* comp: rsp status */ - u32 rsp_len; /* comp: actual response len */ - u32 residue_len; /* comp: residual rsp length */ - struct fchs_s rsp_fchs; /* comp: response fchs */ - struct bfa_cb_qe_s hcb_qe; /* comp: callback qelem */ - struct bfa_reqq_wait_s reqq_wqe; - bfa_boolean_t reqq_waiting; -}; - -#define BFA_FCXP_REQ_PLD(_fcxp) (bfa_fcxp_get_reqbuf(_fcxp)) - -#define BFA_FCXP_RSP_FCHS(_fcxp) (&((_fcxp)->rsp_info.fchs)) -#define BFA_FCXP_RSP_PLD(_fcxp) (bfa_fcxp_get_rspbuf(_fcxp)) - -#define BFA_FCXP_REQ_PLD_PA(_fcxp) \ - ((_fcxp)->fcxp_mod->req_pld_list_pa + \ - ((_fcxp)->fcxp_mod->req_pld_sz * (_fcxp)->fcxp_tag)) - -#define BFA_FCXP_RSP_PLD_PA(_fcxp) \ - ((_fcxp)->fcxp_mod->rsp_pld_list_pa + \ - ((_fcxp)->fcxp_mod->rsp_pld_sz * (_fcxp)->fcxp_tag)) - -void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); -#endif /* __BFA_FCXP_PRIV_H__ */ diff --git a/drivers/scsi/bfa/bfa_fwimg_priv.h b/drivers/scsi/bfa/bfa_fwimg_priv.h deleted file mode 100644 index d33e19e54395..000000000000 --- a/drivers/scsi/bfa/bfa_fwimg_priv.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_FWIMG_PRIV_H__ -#define __BFA_FWIMG_PRIV_H__ - -#define BFI_FLASH_CHUNK_SZ 256 /* Flash chunk size */ -#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32)) - -/** - * BFI FW image type - */ -enum { - BFI_IMAGE_CB_FC, - BFI_IMAGE_CT_FC, - BFI_IMAGE_CT_CNA, - BFI_IMAGE_MAX, -}; - -extern u32 *bfi_image_get_chunk(int type, uint32_t off); -extern u32 bfi_image_get_size(int type); -extern u32 bfi_image_ct_fc_size; -extern u32 bfi_image_ct_cna_size; -extern u32 bfi_image_cb_fc_size; -extern u32 *bfi_image_ct_fc; -extern u32 *bfi_image_ct_cna; -extern u32 *bfi_image_cb_fc; - - -#endif /* __BFA_FWIMG_PRIV_H__ */ diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c index edfd729445cf..c787d3af0886 100644 --- a/drivers/scsi/bfa/bfa_hw_cb.c +++ b/drivers/scsi/bfa/bfa_hw_cb.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * @@ -15,15 +15,15 @@ * General Public License for more details. */ -#include -#include +#include "bfa_modules.h" +#include "bfi_cbreg.h" void bfa_hwcb_reginit(struct bfa_s *bfa) { struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs; bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc); - int i, q, fn = bfa_ioc_pcifn(&bfa->ioc); + int i, q, fn = bfa_ioc_pcifn(&bfa->ioc); if (fn == 0) { bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS); diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c index a357fb3066fd..c97ebafec5ea 100644 --- a/drivers/scsi/bfa/bfa_hw_ct.c +++ b/drivers/scsi/bfa/bfa_hw_ct.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * @@ -15,9 +15,8 @@ * General Public License for more details. */ -#include -#include -#include +#include "bfa_modules.h" +#include "bfi_ctreg.h" BFA_TRC_FILE(HAL, IOCFC_CT); @@ -53,7 +52,7 @@ bfa_hwct_reginit(struct bfa_s *bfa) { struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs; bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc); - int i, q, fn = bfa_ioc_pcifn(&bfa->ioc); + int i, q, fn = bfa_ioc_pcifn(&bfa->ioc); if (fn == 0) { bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS); @@ -87,7 +86,7 @@ bfa_hwct_reginit(struct bfa_s *bfa) void bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq) { - u32 r32; + u32 r32; r32 = bfa_reg_read(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]); bfa_reg_write(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq], r32); diff --git a/drivers/scsi/bfa/bfa_intr.c b/drivers/scsi/bfa/bfa_intr.c deleted file mode 100644 index 493678889b24..000000000000 --- a/drivers/scsi/bfa/bfa_intr.c +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#include -#include -#include -#include -#include - -BFA_TRC_FILE(HAL, INTR); - -static void -bfa_msix_errint(struct bfa_s *bfa, u32 intr) -{ - bfa_ioc_error_isr(&bfa->ioc); -} - -static void -bfa_msix_lpu(struct bfa_s *bfa) -{ - bfa_ioc_mbox_isr(&bfa->ioc); -} - -static void -bfa_reqq_resume(struct bfa_s *bfa, int qid) -{ - struct list_head *waitq, *qe, *qen; - struct bfa_reqq_wait_s *wqe; - - waitq = bfa_reqq(bfa, qid); - list_for_each_safe(qe, qen, waitq) { - /** - * Callback only as long as there is room in request queue - */ - if (bfa_reqq_full(bfa, qid)) - break; - - list_del(qe); - wqe = (struct bfa_reqq_wait_s *) qe; - wqe->qresume(wqe->cbarg); - } -} - -void -bfa_msix_all(struct bfa_s *bfa, int vec) -{ - bfa_intx(bfa); -} - -/** - * hal_intr_api - */ -bfa_boolean_t -bfa_intx(struct bfa_s *bfa) -{ - u32 intr, qintr; - int queue; - - intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status); - if (!intr) - return BFA_FALSE; - - /** - * RME completion queue interrupt - */ - qintr = intr & __HFN_INT_RME_MASK; - bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr); - - for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { - if (intr & (__HFN_INT_RME_Q0 << queue)) - bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1)); - } - intr &= ~qintr; - if (!intr) - return BFA_TRUE; - - /** - * CPE completion queue interrupt - */ - qintr = intr & __HFN_INT_CPE_MASK; - bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr); - - for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { - if (intr & (__HFN_INT_CPE_Q0 << queue)) - bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1)); - } - intr &= ~qintr; - if (!intr) - return BFA_TRUE; - - bfa_msix_lpu_err(bfa, intr); - - return BFA_TRUE; -} - -void -bfa_isr_enable(struct bfa_s *bfa) -{ - u32 intr_unmask; - int pci_func = bfa_ioc_pcifn(&bfa->ioc); - - bfa_trc(bfa, pci_func); - - bfa_msix_install(bfa); - intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | - __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | - __HFN_INT_LL_HALT); - - if (pci_func == 0) - intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | - __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 | - __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | - __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | - __HFN_INT_MBOX_LPU0); - else - intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | - __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 | - __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | - __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | - __HFN_INT_MBOX_LPU1); - - bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr_unmask); - bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, ~intr_unmask); - bfa->iocfc.intr_mask = ~intr_unmask; - bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0); -} - -void -bfa_isr_disable(struct bfa_s *bfa) -{ - bfa_isr_mode_set(bfa, BFA_FALSE); - bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L); - bfa_msix_uninstall(bfa); -} - -void -bfa_msix_reqq(struct bfa_s *bfa, int qid) -{ - struct list_head *waitq; - - qid &= (BFI_IOC_MAX_CQS - 1); - - bfa->iocfc.hwif.hw_reqq_ack(bfa, qid); - - /** - * Resume any pending requests in the corresponding reqq. - */ - waitq = bfa_reqq(bfa, qid); - if (!list_empty(waitq)) - bfa_reqq_resume(bfa, qid); -} - -void -bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m) -{ - bfa_trc(bfa, m->mhdr.msg_class); - bfa_trc(bfa, m->mhdr.msg_id); - bfa_trc(bfa, m->mhdr.mtag.i2htok); - bfa_assert(0); - bfa_trc_stop(bfa->trcmod); -} - -void -bfa_msix_rspq(struct bfa_s *bfa, int qid) -{ - struct bfi_msg_s *m; - u32 pi, ci; - struct list_head *waitq; - - bfa_trc_fp(bfa, qid); - - qid &= (BFI_IOC_MAX_CQS - 1); - - bfa->iocfc.hwif.hw_rspq_ack(bfa, qid); - - ci = bfa_rspq_ci(bfa, qid); - pi = bfa_rspq_pi(bfa, qid); - - bfa_trc_fp(bfa, ci); - bfa_trc_fp(bfa, pi); - - if (bfa->rme_process) { - while (ci != pi) { - m = bfa_rspq_elem(bfa, qid, ci); - bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX); - - bfa_isrs[m->mhdr.msg_class] (bfa, m); - - CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems); - } - } - - /** - * update CI - */ - bfa_rspq_ci(bfa, qid) = pi; - bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi); - bfa_os_mmiowb(); - - /** - * Resume any pending requests in the corresponding reqq. - */ - waitq = bfa_reqq(bfa, qid); - if (!list_empty(waitq)) - bfa_reqq_resume(bfa, qid); -} - -void -bfa_msix_lpu_err(struct bfa_s *bfa, int vec) -{ - u32 intr, curr_value; - - intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status); - - if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1)) - bfa_msix_lpu(bfa); - - intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | - __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT); - - if (intr) { - if (intr & __HFN_INT_LL_HALT) { - /** - * If LL_HALT bit is set then FW Init Halt LL Port - * Register needs to be cleared as well so Interrupt - * Status Register will be cleared. - */ - curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt); - curr_value &= ~__FW_INIT_HALT_P; - bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value); - } - - if (intr & __HFN_INT_ERR_PSS) { - /** - * ERR_PSS bit needs to be cleared as well in case - * interrups are shared so driver's interrupt handler is - * still called eventhough it is already masked out. - */ - curr_value = bfa_reg_read( - bfa->ioc.ioc_regs.pss_err_status_reg); - curr_value &= __PSS_ERR_STATUS_SET; - bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg, - curr_value); - } - - bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr); - bfa_msix_errint(bfa, intr); - } -} - -void -bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func) -{ - bfa_isrs[mc] = isr_func; -} - - diff --git a/drivers/scsi/bfa/bfa_intr_priv.h b/drivers/scsi/bfa/bfa_intr_priv.h deleted file mode 100644 index 5fc301cf4d1b..000000000000 --- a/drivers/scsi/bfa/bfa_intr_priv.h +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_INTR_PRIV_H__ -#define __BFA_INTR_PRIV_H__ - -/** - * Message handler - */ -typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m); -void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m); -void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func); - - -#define bfa_reqq_pi(__bfa, __reqq) ((__bfa)->iocfc.req_cq_pi[__reqq]) -#define bfa_reqq_ci(__bfa, __reqq) \ - (*(u32 *)((__bfa)->iocfc.req_cq_shadow_ci[__reqq].kva)) - -#define bfa_reqq_full(__bfa, __reqq) \ - (((bfa_reqq_pi(__bfa, __reqq) + 1) & \ - ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1)) == \ - bfa_reqq_ci(__bfa, __reqq)) - -#define bfa_reqq_next(__bfa, __reqq) \ - (bfa_reqq_full(__bfa, __reqq) ? NULL : \ - ((void *)((struct bfi_msg_s *)((__bfa)->iocfc.req_cq_ba[__reqq].kva) \ - + bfa_reqq_pi((__bfa), (__reqq))))) - -#define bfa_reqq_produce(__bfa, __reqq) do { \ - (__bfa)->iocfc.req_cq_pi[__reqq]++; \ - (__bfa)->iocfc.req_cq_pi[__reqq] &= \ - ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \ - bfa_reg_write((__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq], \ - (__bfa)->iocfc.req_cq_pi[__reqq]); \ - bfa_os_mmiowb(); \ -} while (0) - -#define bfa_rspq_pi(__bfa, __rspq) \ - (*(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva)) - -#define bfa_rspq_ci(__bfa, __rspq) ((__bfa)->iocfc.rsp_cq_ci[__rspq]) -#define bfa_rspq_elem(__bfa, __rspq, __ci) \ - (&((struct bfi_msg_s *)((__bfa)->iocfc.rsp_cq_ba[__rspq].kva))[__ci]) - -#define CQ_INCR(__index, __size) do { \ - (__index)++; \ - (__index) &= ((__size) - 1); \ -} while (0) - -/** - * Queue element to wait for room in request queue. FIFO order is - * maintained when fullfilling requests. - */ -struct bfa_reqq_wait_s { - struct list_head qe; - void (*qresume) (void *cbarg); - void *cbarg; -}; - -/** - * Circular queue usage assignments - */ -enum { - BFA_REQQ_IOC = 0, /* all low-priority IOC msgs */ - BFA_REQQ_FCXP = 0, /* all FCXP messages */ - BFA_REQQ_LPS = 0, /* all lport service msgs */ - BFA_REQQ_PORT = 0, /* all port messages */ - BFA_REQQ_FLASH = 0, /* for flash module */ - BFA_REQQ_DIAG = 0, /* for diag module */ - BFA_REQQ_RPORT = 0, /* all port messages */ - BFA_REQQ_SBOOT = 0, /* all san boot messages */ - BFA_REQQ_QOS_LO = 1, /* all low priority IO */ - BFA_REQQ_QOS_MD = 2, /* all medium priority IO */ - BFA_REQQ_QOS_HI = 3, /* all high priority IO */ -}; - -static inline void -bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg), - void *cbarg) -{ - wqe->qresume = qresume; - wqe->cbarg = cbarg; -} - -#define bfa_reqq(__bfa, __reqq) (&(__bfa)->reqq_waitq[__reqq]) - -/** - * static inline void - * bfa_reqq_wait(struct bfa_s *bfa, int reqq, struct bfa_reqq_wait_s *wqe) - */ -#define bfa_reqq_wait(__bfa, __reqq, __wqe) do { \ - \ - struct list_head *waitq = bfa_reqq(__bfa, __reqq); \ - \ - bfa_assert(((__reqq) < BFI_IOC_MAX_CQS)); \ - bfa_assert((__wqe)->qresume && (__wqe)->cbarg); \ - \ - list_add_tail(&(__wqe)->qe, waitq); \ -} while (0) - -#define bfa_reqq_wcancel(__wqe) list_del(&(__wqe)->qe) - -#endif /* __BFA_INTR_PRIV_H__ */ diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index 8e78f20110a5..6795b247791a 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * @@ -15,35 +15,33 @@ * General Public License for more details. */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "bfa_ioc.h" +#include "bfi_ctreg.h" +#include "bfa_defs.h" +#include "bfa_defs_svc.h" +#include "bfad_drv.h" BFA_TRC_FILE(CNA, IOC); /** * IOC local definitions */ -#define BFA_IOC_TOV 2000 /* msecs */ -#define BFA_IOC_HWSEM_TOV 500 /* msecs */ -#define BFA_IOC_HB_TOV 500 /* msecs */ -#define BFA_IOC_HWINIT_MAX 2 -#define BFA_IOC_FWIMG_MINSZ (16 * 1024) -#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV +#define BFA_IOC_TOV 3000 /* msecs */ +#define BFA_IOC_HWSEM_TOV 500 /* msecs */ +#define BFA_IOC_HB_TOV 500 /* msecs */ +#define BFA_IOC_HWINIT_MAX 2 +#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV #define bfa_ioc_timer_start(__ioc) \ bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ bfa_ioc_timeout, (__ioc), BFA_IOC_TOV) #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer) +#define bfa_hb_timer_start(__ioc) \ + bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \ + bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV) +#define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer) + #define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS) #define BFA_DBG_FWTRC_LEN \ (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \ @@ -55,100 +53,226 @@ BFA_TRC_FILE(CNA, IOC); * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */ -#define bfa_ioc_firmware_lock(__ioc) \ +#define bfa_ioc_firmware_lock(__ioc) \ ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc)) -#define bfa_ioc_firmware_unlock(__ioc) \ +#define bfa_ioc_firmware_unlock(__ioc) \ ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) -#define bfa_ioc_notify_hbfail(__ioc) \ +#define bfa_ioc_notify_hbfail(__ioc) \ ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc)) -#define bfa_ioc_is_optrom(__ioc) \ - (bfi_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ) -bfa_boolean_t bfa_auto_recover = BFA_TRUE; +#ifdef BFA_IOC_IS_UEFI +#define bfa_ioc_is_bios_optrom(__ioc) (0) +#define bfa_ioc_is_uefi(__ioc) BFA_IOC_IS_UEFI +#else +#define bfa_ioc_is_bios_optrom(__ioc) \ + (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ) +#define bfa_ioc_is_uefi(__ioc) (0) +#endif + +#define bfa_ioc_mbox_cmd_pending(__ioc) \ + (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ + bfa_reg_read((__ioc)->ioc_regs.hfn_mbox_cmd)) + +bfa_boolean_t bfa_auto_recover = BFA_TRUE; /* * forward declarations */ -static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc); -static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc); -static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force); -static void bfa_ioc_timeout(void *ioc); -static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc); -static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc); -static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc); -static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc); -static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc); -static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force); -static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc); -static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc); -static void bfa_ioc_recover(struct bfa_ioc_s *ioc); -static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc); -static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); -static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc); +static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc); +static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc); +static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force); +static void bfa_ioc_timeout(void *ioc); +static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc); +static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc); +static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc); +static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc); +static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc); +static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force); +static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc); +static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc); +static void bfa_ioc_recover(struct bfa_ioc_s *ioc); +static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc); +static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); +static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc); +static void bfa_ioc_pf_enabled(struct bfa_ioc_s *ioc); +static void bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc); +static void bfa_ioc_pf_failed(struct bfa_ioc_s *ioc); +static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc); /** - * bfa_ioc_sm + * hal_ioc_sm */ /** - * IOC state machine events + * IOC state machine definitions/declarations */ enum ioc_event { - IOC_E_ENABLE = 1, /* IOC enable request */ - IOC_E_DISABLE = 2, /* IOC disable request */ - IOC_E_TIMEOUT = 3, /* f/w response timeout */ - IOC_E_FWREADY = 4, /* f/w initialization done */ - IOC_E_FWRSP_GETATTR = 5, /* IOC get attribute response */ - IOC_E_FWRSP_ENABLE = 6, /* enable f/w response */ - IOC_E_FWRSP_DISABLE = 7, /* disable f/w response */ - IOC_E_HBFAIL = 8, /* heartbeat failure */ - IOC_E_HWERROR = 9, /* hardware error interrupt */ - IOC_E_SEMLOCKED = 10, /* h/w semaphore is locked */ - IOC_E_DETACH = 11, /* driver detach cleanup */ + IOC_E_RESET = 1, /* IOC reset request */ + IOC_E_ENABLE = 2, /* IOC enable request */ + IOC_E_DISABLE = 3, /* IOC disable request */ + IOC_E_DETACH = 4, /* driver detach cleanup */ + IOC_E_ENABLED = 5, /* f/w enabled */ + IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */ + IOC_E_DISABLED = 7, /* f/w disabled */ + IOC_E_FAILED = 8, /* failure notice by iocpf sm */ + IOC_E_HBFAIL = 9, /* heartbeat failure */ + IOC_E_HWERROR = 10, /* hardware error interrupt */ + IOC_E_TIMEOUT = 11, /* timeout */ }; +bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event); -bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc_s, enum ioc_event); -bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc_s, enum ioc_event); -bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc_s, enum ioc_event); -bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event); -bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc_s, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event); bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event); static struct bfa_sm_table_s ioc_sm_table[] = { + {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT}, {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET}, - {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH}, - {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH}, - {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT}, - {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT}, - {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT}, + {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING}, {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR}, {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL}, {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL}, - {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL}, + {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL}, {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, }; +/** + * IOCPF state machine definitions/declarations + */ + +#define bfa_iocpf_timer_start(__ioc) \ + bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ + bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV) +#define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer) + +#define bfa_iocpf_recovery_timer_start(__ioc) \ + bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ + bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV_RECOVER) + +#define bfa_sem_timer_start(__ioc) \ + bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \ + bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV) +#define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer) + +/* + * Forward declareations for iocpf state machine + */ +static void bfa_iocpf_enable(struct bfa_ioc_s *ioc); +static void bfa_iocpf_disable(struct bfa_ioc_s *ioc); +static void bfa_iocpf_fail(struct bfa_ioc_s *ioc); +static void bfa_iocpf_initfail(struct bfa_ioc_s *ioc); +static void bfa_iocpf_getattrfail(struct bfa_ioc_s *ioc); +static void bfa_iocpf_stop(struct bfa_ioc_s *ioc); +static void bfa_iocpf_timeout(void *ioc_arg); +static void bfa_iocpf_sem_timeout(void *ioc_arg); + +/** + * IOCPF state machine events + */ +enum iocpf_event { + IOCPF_E_ENABLE = 1, /* IOCPF enable request */ + IOCPF_E_DISABLE = 2, /* IOCPF disable request */ + IOCPF_E_STOP = 3, /* stop on driver detach */ + IOCPF_E_FWREADY = 4, /* f/w initialization done */ + IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */ + IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */ + IOCPF_E_FAIL = 7, /* failure notice by ioc sm */ + IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */ + IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */ + IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */ + IOCPF_E_TIMEOUT = 11, /* f/w response timeout */ +}; + +/** + * IOCPF states + */ +enum bfa_iocpf_state { + BFA_IOCPF_RESET = 1, /* IOC is in reset state */ + BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */ + BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */ + BFA_IOCPF_READY = 4, /* IOCPF is initialized */ + BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */ + BFA_IOCPF_FAIL = 6, /* IOCPF failed */ + BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */ + BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */ + BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */ +}; + +bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event); + +static struct bfa_sm_table_s iocpf_sm_table[] = { + {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET}, + {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH}, + {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH}, + {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT}, + {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT}, + {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT}, + {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY}, + {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL}, + {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL}, + {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING}, + {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, +}; + +/** + * IOC State Machine + */ + +/** + * Beginning state. IOC uninit state. + */ + +static void +bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc) +{ +} + +/** + * IOC is in uninit state. + */ +static void +bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event) +{ + bfa_trc(ioc, event); + + switch (event) { + case IOC_E_RESET: + bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); + break; + + default: + bfa_sm_fault(ioc, event); + } +} /** * Reset entry actions -- initialize state machine */ static void bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc) { - ioc->retry_count = 0; - ioc->auto_recover = bfa_auto_recover; + bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); } /** - * Beginning state. IOC is in reset state. + * IOC is in reset state. */ static void bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event) @@ -157,7 +281,7 @@ bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event) switch (event) { case IOC_E_ENABLE: - bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck); + bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); break; case IOC_E_DISABLE: @@ -165,6 +289,7 @@ bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event) break; case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); break; default: @@ -172,46 +297,209 @@ bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event) } } + +static void +bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc) +{ + bfa_iocpf_enable(ioc); +} + /** - * Semaphore should be acquired for version check. + * Host IOC function is being enabled, awaiting response from firmware. + * Semaphore is acquired. */ static void -bfa_ioc_sm_fwcheck_entry(struct bfa_ioc_s *ioc) +bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event) { - bfa_ioc_hw_sem_get(ioc); + bfa_trc(ioc, event); + + switch (event) { + case IOC_E_ENABLED: + bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); + break; + + case IOC_E_FAILED: + bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); + break; + + case IOC_E_HWERROR: + bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); + bfa_iocpf_initfail(ioc); + break; + + case IOC_E_DISABLE: + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + bfa_iocpf_stop(ioc); + break; + + case IOC_E_ENABLE: + break; + + default: + bfa_sm_fault(ioc, event); + } +} + + +static void +bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc) +{ + bfa_ioc_timer_start(ioc); + bfa_ioc_send_getattr(ioc); } /** - * Awaiting h/w semaphore to continue with version check. + * IOC configuration in progress. Timer is active. */ static void -bfa_ioc_sm_fwcheck(struct bfa_ioc_s *ioc, enum ioc_event event) +bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event) { bfa_trc(ioc, event); switch (event) { - case IOC_E_SEMLOCKED: - if (bfa_ioc_firmware_lock(ioc)) { - ioc->retry_count = 0; - bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); - } else { - bfa_ioc_hw_sem_release(ioc); - bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch); - } + case IOC_E_FWRSP_GETATTR: + bfa_ioc_timer_stop(ioc); + bfa_ioc_check_attr_wwns(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_op); + break; + + case IOC_E_FAILED: + bfa_ioc_timer_stop(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); + break; + + case IOC_E_HWERROR: + bfa_ioc_timer_stop(ioc); + /* fall through */ + + case IOC_E_TIMEOUT: + bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); + bfa_iocpf_getattrfail(ioc); break; case IOC_E_DISABLE: - bfa_ioc_disable_comp(ioc); + bfa_ioc_timer_stop(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); + break; + + case IOC_E_ENABLE: + break; + + default: + bfa_sm_fault(ioc, event); + } +} + + +static void +bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc) +{ + struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; + + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); + bfa_ioc_hb_monitor(ioc); + BFA_LOG(KERN_INFO, bfad, log_level, "IOC enabled\n"); +} + +static void +bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event) +{ + bfa_trc(ioc, event); + + switch (event) { + case IOC_E_ENABLE: + break; + + case IOC_E_DISABLE: + bfa_ioc_hb_stop(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); + break; + + case IOC_E_FAILED: + bfa_ioc_hb_stop(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); + break; + + case IOC_E_HWERROR: + bfa_ioc_hb_stop(ioc); + /* !!! fall through !!! */ + + case IOC_E_HBFAIL: + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); + bfa_iocpf_fail(ioc); + break; + + default: + bfa_sm_fault(ioc, event); + } +} + + +static void +bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc) +{ + struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; + bfa_iocpf_disable(ioc); + BFA_LOG(KERN_INFO, bfad, log_level, "IOC disabled\n"); +} + +/** + * IOC is being disabled + */ +static void +bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event) +{ + bfa_trc(ioc, event); + + switch (event) { + case IOC_E_DISABLED: + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); + break; + + case IOC_E_HWERROR: /* - * fall through + * No state change. Will move to disabled state + * after iocpf sm completes failure processing and + * moves to disabled state. */ + bfa_iocpf_fail(ioc); + break; - case IOC_E_DETACH: - bfa_ioc_hw_sem_get_cancel(ioc); - bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); + default: + bfa_sm_fault(ioc, event); + } +} + +/** + * IOC disable completion entry. + */ +static void +bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc) +{ + bfa_ioc_disable_comp(ioc); +} + +static void +bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event) +{ + bfa_trc(ioc, event); + + switch (event) { + case IOC_E_ENABLE: + bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); break; - case IOC_E_FWREADY: + case IOC_E_DISABLE: + ioc->cbfn->disable_cbfn(ioc->bfa); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + bfa_iocpf_stop(ioc); break; default: @@ -219,48 +507,138 @@ bfa_ioc_sm_fwcheck(struct bfa_ioc_s *ioc, enum ioc_event event) } } + +static void +bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc) +{ + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); +} + /** - * Notify enable completion callback and generate mismatch AEN. + * Hardware initialization failed. */ static void -bfa_ioc_sm_mismatch_entry(struct bfa_ioc_s *ioc) +bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event) { + bfa_trc(ioc, event); + + switch (event) { + case IOC_E_ENABLED: + bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); + break; + + case IOC_E_FAILED: + /** + * Initialization failure during iocpf init retry. + */ + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + break; + + case IOC_E_DISABLE: + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + bfa_iocpf_stop(ioc); + break; + + default: + bfa_sm_fault(ioc, event); + } +} + + +static void +bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc) +{ + struct list_head *qe; + struct bfa_ioc_hbfail_notify_s *notify; + struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; + /** - * Provide enable completion callback and AEN notification only once. + * Notify driver and common modules registered for notification. */ - if (ioc->retry_count == 0) { - ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); - bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH); + ioc->cbfn->hbfail_cbfn(ioc->bfa); + list_for_each(qe, &ioc->hb_notify_q) { + notify = (struct bfa_ioc_hbfail_notify_s *) qe; + notify->cbfn(notify->cbarg); } - ioc->retry_count++; - bfa_ioc_timer_start(ioc); + + BFA_LOG(KERN_CRIT, bfad, log_level, + "Heart Beat of IOC has failed\n"); } /** - * Awaiting firmware version match. + * IOC failure. */ static void -bfa_ioc_sm_mismatch(struct bfa_ioc_s *ioc, enum ioc_event event) +bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event) { bfa_trc(ioc, event); switch (event) { - case IOC_E_TIMEOUT: - bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck); + + case IOC_E_FAILED: + /** + * Initialization failure during iocpf recovery. + * !!! Fall through !!! + */ + case IOC_E_ENABLE: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + break; + + case IOC_E_ENABLED: + bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); break; case IOC_E_DISABLE: - bfa_ioc_disable_comp(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); + break; + + case IOC_E_HWERROR: /* - * fall through + * HB failure notification, ignore. */ + break; + default: + bfa_sm_fault(ioc, event); + } +} - case IOC_E_DETACH: - bfa_ioc_timer_stop(ioc); - bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); + + +/** + * IOCPF State Machine + */ + + +/** + * Reset entry actions -- initialize state machine + */ +static void +bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf) +{ + iocpf->retry_count = 0; + iocpf->auto_recover = bfa_auto_recover; +} + +/** + * Beginning state. IOC is in reset state. + */ +static void +bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event) +{ + struct bfa_ioc_s *ioc = iocpf->ioc; + + bfa_trc(ioc, event); + + switch (event) { + case IOCPF_E_ENABLE: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); break; - case IOC_E_FWREADY: + case IOCPF_E_STOP: break; default: @@ -269,31 +647,44 @@ bfa_ioc_sm_mismatch(struct bfa_ioc_s *ioc, enum ioc_event event) } /** - * Request for semaphore. + * Semaphore should be acquired for version check. */ static void -bfa_ioc_sm_semwait_entry(struct bfa_ioc_s *ioc) +bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf) { - bfa_ioc_hw_sem_get(ioc); + bfa_ioc_hw_sem_get(iocpf->ioc); } /** - * Awaiting semaphore for h/w initialzation. + * Awaiting h/w semaphore to continue with version check. */ static void -bfa_ioc_sm_semwait(struct bfa_ioc_s *ioc, enum ioc_event event) +bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event) { + struct bfa_ioc_s *ioc = iocpf->ioc; + bfa_trc(ioc, event); switch (event) { - case IOC_E_SEMLOCKED: - ioc->retry_count = 0; - bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); + case IOCPF_E_SEMLOCKED: + if (bfa_ioc_firmware_lock(ioc)) { + iocpf->retry_count = 0; + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); + } else { + bfa_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch); + } break; - case IOC_E_DISABLE: + case IOCPF_E_DISABLE: bfa_ioc_hw_sem_get_cancel(ioc); - bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + bfa_ioc_pf_disabled(ioc); + break; + + case IOCPF_E_STOP: + bfa_ioc_hw_sem_get_cancel(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); break; default: @@ -301,51 +692,81 @@ bfa_ioc_sm_semwait(struct bfa_ioc_s *ioc, enum ioc_event event) } } - +/** + * Notify enable completion callback. + */ static void -bfa_ioc_sm_hwinit_entry(struct bfa_ioc_s *ioc) +bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf) { - bfa_ioc_timer_start(ioc); - bfa_ioc_reset(ioc, BFA_FALSE); + /* + * Call only the first time sm enters fwmismatch state. + */ + if (iocpf->retry_count == 0) + bfa_ioc_pf_fwmismatch(iocpf->ioc); + + iocpf->retry_count++; + bfa_iocpf_timer_start(iocpf->ioc); } /** - * Hardware is being initialized. Interrupts are enabled. - * Holding hardware semaphore lock. + * Awaiting firmware version match. */ static void -bfa_ioc_sm_hwinit(struct bfa_ioc_s *ioc, enum ioc_event event) +bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event) { + struct bfa_ioc_s *ioc = iocpf->ioc; + bfa_trc(ioc, event); switch (event) { - case IOC_E_FWREADY: - bfa_ioc_timer_stop(ioc); - bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); + case IOCPF_E_TIMEOUT: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); break; - case IOC_E_HWERROR: - bfa_ioc_timer_stop(ioc); - /* - * fall through - */ + case IOCPF_E_DISABLE: + bfa_iocpf_timer_stop(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + bfa_ioc_pf_disabled(ioc); + break; - case IOC_E_TIMEOUT: - ioc->retry_count++; - if (ioc->retry_count < BFA_IOC_HWINIT_MAX) { - bfa_ioc_timer_start(ioc); - bfa_ioc_reset(ioc, BFA_TRUE); - break; - } + case IOCPF_E_STOP: + bfa_iocpf_timer_stop(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + break; - bfa_ioc_hw_sem_release(ioc); - bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); + default: + bfa_sm_fault(ioc, event); + } +} + +/** + * Request for semaphore. + */ +static void +bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf) +{ + bfa_ioc_hw_sem_get(iocpf->ioc); +} + +/** + * Awaiting semaphore for h/w initialzation. + */ +static void +bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event) +{ + struct bfa_ioc_s *ioc = iocpf->ioc; + + bfa_trc(ioc, event); + + switch (event) { + case IOCPF_E_SEMLOCKED: + iocpf->retry_count = 0; + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); break; - case IOC_E_DISABLE: - bfa_ioc_hw_sem_release(ioc); - bfa_ioc_timer_stop(ioc); - bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); + case IOCPF_E_DISABLE: + bfa_ioc_hw_sem_get_cancel(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); break; default: @@ -355,55 +776,54 @@ bfa_ioc_sm_hwinit(struct bfa_ioc_s *ioc, enum ioc_event event) static void -bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc) +bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf) { - bfa_ioc_timer_start(ioc); - bfa_ioc_send_enable(ioc); + bfa_iocpf_timer_start(iocpf->ioc); + bfa_ioc_reset(iocpf->ioc, BFA_FALSE); } /** - * Host IOC function is being enabled, awaiting response from firmware. - * Semaphore is acquired. + * Hardware is being initialized. Interrupts are enabled. + * Holding hardware semaphore lock. */ static void -bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event) +bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event) { + struct bfa_ioc_s *ioc = iocpf->ioc; + bfa_trc(ioc, event); switch (event) { - case IOC_E_FWRSP_ENABLE: - bfa_ioc_timer_stop(ioc); - bfa_ioc_hw_sem_release(ioc); - bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); + case IOCPF_E_FWREADY: + bfa_iocpf_timer_stop(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling); break; - case IOC_E_HWERROR: - bfa_ioc_timer_stop(ioc); + case IOCPF_E_INITFAIL: + bfa_iocpf_timer_stop(ioc); /* - * fall through + * !!! fall through !!! */ - case IOC_E_TIMEOUT: - ioc->retry_count++; - if (ioc->retry_count < BFA_IOC_HWINIT_MAX) { - bfa_reg_write(ioc->ioc_regs.ioc_fwstate, - BFI_IOC_UNINIT); - bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); + case IOCPF_E_TIMEOUT: + iocpf->retry_count++; + if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) { + bfa_iocpf_timer_start(ioc); + bfa_ioc_reset(ioc, BFA_TRUE); break; } bfa_ioc_hw_sem_release(ioc); - bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); - break; + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); - case IOC_E_DISABLE: - bfa_ioc_timer_stop(ioc); - bfa_ioc_hw_sem_release(ioc); - bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); + if (event == IOCPF_E_TIMEOUT) + bfa_ioc_pf_failed(ioc); break; - case IOC_E_FWREADY: - bfa_ioc_send_enable(ioc); + case IOCPF_E_DISABLE: + bfa_ioc_hw_sem_release(ioc); + bfa_iocpf_timer_stop(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); break; default: @@ -413,40 +833,60 @@ bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event) static void -bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc) +bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf) { - bfa_ioc_timer_start(ioc); - bfa_ioc_send_getattr(ioc); + bfa_iocpf_timer_start(iocpf->ioc); + bfa_ioc_send_enable(iocpf->ioc); } /** - * IOC configuration in progress. Timer is active. + * Host IOC function is being enabled, awaiting response from firmware. + * Semaphore is acquired. */ static void -bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event) +bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) { + struct bfa_ioc_s *ioc = iocpf->ioc; + bfa_trc(ioc, event); switch (event) { - case IOC_E_FWRSP_GETATTR: - bfa_ioc_timer_stop(ioc); - bfa_ioc_check_attr_wwns(ioc); - bfa_fsm_set_state(ioc, bfa_ioc_sm_op); + case IOCPF_E_FWRSP_ENABLE: + bfa_iocpf_timer_stop(ioc); + bfa_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready); break; - case IOC_E_HWERROR: - bfa_ioc_timer_stop(ioc); + case IOCPF_E_INITFAIL: + bfa_iocpf_timer_stop(ioc); /* - * fall through + * !!! fall through !!! */ - case IOC_E_TIMEOUT: - bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); + case IOCPF_E_TIMEOUT: + iocpf->retry_count++; + if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) { + bfa_reg_write(ioc->ioc_regs.ioc_fwstate, + BFI_IOC_UNINIT); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); + break; + } + + bfa_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); + + if (event == IOCPF_E_TIMEOUT) + bfa_ioc_pf_failed(ioc); break; - case IOC_E_DISABLE: - bfa_ioc_timer_stop(ioc); - bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); + case IOCPF_E_DISABLE: + bfa_iocpf_timer_stop(ioc); + bfa_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); + break; + + case IOCPF_E_FWREADY: + bfa_ioc_send_enable(ioc); break; default: @@ -455,41 +895,40 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event) } + static void -bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc) +bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf) { - ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); - bfa_ioc_hb_monitor(ioc); - bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE); + bfa_ioc_pf_enabled(iocpf->ioc); } static void -bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event) +bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event) { + struct bfa_ioc_s *ioc = iocpf->ioc; + bfa_trc(ioc, event); switch (event) { - case IOC_E_ENABLE: + case IOCPF_E_DISABLE: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); + break; + + case IOCPF_E_GETATTRFAIL: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); break; - case IOC_E_DISABLE: - bfa_ioc_hb_stop(ioc); - bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); + case IOCPF_E_FAIL: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); break; - case IOC_E_HWERROR: - case IOC_E_FWREADY: - /** - * Hard error or IOC recovery by other function. - * Treat it same as heartbeat failure. - */ - bfa_ioc_hb_stop(ioc); - /* - * !!! fall through !!! - */ + case IOCPF_E_FWREADY: + if (bfa_ioc_is_operational(ioc)) + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + else + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); - case IOC_E_HBFAIL: - bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail); + bfa_ioc_pf_failed(ioc); break; default: @@ -499,36 +938,41 @@ bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event) static void -bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc) +bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf) { - bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE); - bfa_ioc_timer_start(ioc); - bfa_ioc_send_disable(ioc); + bfa_iocpf_timer_start(iocpf->ioc); + bfa_ioc_send_disable(iocpf->ioc); } /** * IOC is being disabled */ static void -bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event) +bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) { + struct bfa_ioc_s *ioc = iocpf->ioc; + bfa_trc(ioc, event); switch (event) { - case IOC_E_FWRSP_DISABLE: - bfa_ioc_timer_stop(ioc); - bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); + case IOCPF_E_FWRSP_DISABLE: + case IOCPF_E_FWREADY: + bfa_iocpf_timer_stop(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); break; - case IOC_E_HWERROR: - bfa_ioc_timer_stop(ioc); + case IOCPF_E_FAIL: + bfa_iocpf_timer_stop(ioc); /* * !!! fall through !!! */ - case IOC_E_TIMEOUT: + case IOCPF_E_TIMEOUT: bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL); - bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); + break; + + case IOCPF_E_FWRSP_ENABLE: break; default: @@ -540,31 +984,26 @@ bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event) * IOC disable completion entry. */ static void -bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc) +bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf) { - bfa_ioc_disable_comp(ioc); + bfa_ioc_pf_disabled(iocpf->ioc); } static void -bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event) +bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event) { + struct bfa_ioc_s *ioc = iocpf->ioc; + bfa_trc(ioc, event); switch (event) { - case IOC_E_ENABLE: - bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); - break; - - case IOC_E_DISABLE: - ioc->cbfn->disable_cbfn(ioc->bfa); + case IOCPF_E_ENABLE: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); break; - case IOC_E_FWREADY: - break; - - case IOC_E_DETACH: + case IOCPF_E_STOP: bfa_ioc_firmware_unlock(ioc); - bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); break; default: @@ -574,34 +1013,35 @@ bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event) static void -bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc) +bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf) { - ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); - bfa_ioc_timer_start(ioc); + bfa_iocpf_timer_start(iocpf->ioc); } /** * Hardware initialization failed. */ static void -bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event) +bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event) { + struct bfa_ioc_s *ioc = iocpf->ioc; + bfa_trc(ioc, event); switch (event) { - case IOC_E_DISABLE: - bfa_ioc_timer_stop(ioc); - bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); + case IOCPF_E_DISABLE: + bfa_iocpf_timer_stop(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); break; - case IOC_E_DETACH: - bfa_ioc_timer_stop(ioc); + case IOCPF_E_STOP: + bfa_iocpf_timer_stop(ioc); bfa_ioc_firmware_unlock(ioc); - bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); break; - case IOC_E_TIMEOUT: - bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); + case IOCPF_E_TIMEOUT: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); break; default: @@ -611,80 +1051,47 @@ bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event) static void -bfa_ioc_sm_hbfail_entry(struct bfa_ioc_s *ioc) +bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf) { - struct list_head *qe; - struct bfa_ioc_hbfail_notify_s *notify; - /** * Mark IOC as failed in hardware and stop firmware. */ - bfa_ioc_lpu_stop(ioc); - bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL); + bfa_ioc_lpu_stop(iocpf->ioc); + bfa_reg_write(iocpf->ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL); /** * Notify other functions on HB failure. */ - bfa_ioc_notify_hbfail(ioc); - - /** - * Notify driver and common modules registered for notification. - */ - ioc->cbfn->hbfail_cbfn(ioc->bfa); - list_for_each(qe, &ioc->hb_notify_q) { - notify = (struct bfa_ioc_hbfail_notify_s *)qe; - notify->cbfn(notify->cbarg); - } + bfa_ioc_notify_hbfail(iocpf->ioc); /** * Flush any queued up mailbox requests. */ - bfa_ioc_mbox_hbfail(ioc); - bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL); + bfa_ioc_mbox_hbfail(iocpf->ioc); - /** - * Trigger auto-recovery after a delay. - */ - if (ioc->auto_recover) { - bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, - bfa_ioc_timeout, ioc, BFA_IOC_TOV_RECOVER); - } + if (iocpf->auto_recover) + bfa_iocpf_recovery_timer_start(iocpf->ioc); } /** - * IOC heartbeat failure. + * IOC is in failed state. */ static void -bfa_ioc_sm_hbfail(struct bfa_ioc_s *ioc, enum ioc_event event) +bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event) { + struct bfa_ioc_s *ioc = iocpf->ioc; + bfa_trc(ioc, event); switch (event) { - - case IOC_E_ENABLE: - ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + case IOCPF_E_DISABLE: + if (iocpf->auto_recover) + bfa_iocpf_timer_stop(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); break; - case IOC_E_DISABLE: - if (ioc->auto_recover) - bfa_ioc_timer_stop(ioc); - bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); - break; - - case IOC_E_TIMEOUT: - bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); - break; - - case IOC_E_FWREADY: - /** - * Recovery is already initiated by other function. - */ - break; - - case IOC_E_HWERROR: - /* - * HB failure notification, ignore. - */ + case IOCPF_E_TIMEOUT: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); break; default: @@ -695,14 +1102,14 @@ bfa_ioc_sm_hbfail(struct bfa_ioc_s *ioc, enum ioc_event event) /** - * bfa_ioc_pvt BFA IOC private functions + * hal_ioc_pvt BFA IOC private functions */ static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc) { - struct list_head *qe; - struct bfa_ioc_hbfail_notify_s *notify; + struct list_head *qe; + struct bfa_ioc_hbfail_notify_s *notify; ioc->cbfn->disable_cbfn(ioc->bfa); @@ -710,25 +1117,17 @@ bfa_ioc_disable_comp(struct bfa_ioc_s *ioc) * Notify common modules registered for notification. */ list_for_each(qe, &ioc->hb_notify_q) { - notify = (struct bfa_ioc_hbfail_notify_s *)qe; + notify = (struct bfa_ioc_hbfail_notify_s *) qe; notify->cbfn(notify->cbarg); } } -void -bfa_ioc_sem_timeout(void *ioc_arg) -{ - struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg; - - bfa_ioc_hw_sem_get(ioc); -} - bfa_boolean_t bfa_ioc_sem_get(bfa_os_addr_t sem_reg) { u32 r32; int cnt = 0; -#define BFA_SEM_SPINCNT 3000 +#define BFA_SEM_SPINCNT 3000 r32 = bfa_reg_read(sem_reg); @@ -754,7 +1153,7 @@ bfa_ioc_sem_release(bfa_os_addr_t sem_reg) static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc) { - u32 r32; + u32 r32; /** * First read to the semaphore register will return 0, subsequent reads @@ -762,12 +1161,11 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc) */ r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); if (r32 == 0) { - bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED); + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED); return; } - bfa_timer_begin(ioc->timer_mod, &ioc->sem_timer, bfa_ioc_sem_timeout, - ioc, BFA_IOC_HWSEM_TOV); + bfa_sem_timer_start(ioc); } void @@ -779,7 +1177,7 @@ bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc) static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc) { - bfa_timer_stop(&ioc->sem_timer); + bfa_sem_timer_stop(ioc); } /** @@ -788,14 +1186,18 @@ bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc) static void bfa_ioc_lmem_init(struct bfa_ioc_s *ioc) { - u32 pss_ctl; - int i; + u32 pss_ctl; + int i; #define PSS_LMEM_INIT_TIME 10000 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg); pss_ctl &= ~__PSS_LMEM_RESET; pss_ctl |= __PSS_LMEM_INIT_EN; - pss_ctl |= __PSS_I2C_CLK_DIV(3UL); /* i2c workaround 12.5khz clock */ + + /* + * i2c workaround 12.5khz clock + */ + pss_ctl |= __PSS_I2C_CLK_DIV(3UL); bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl); /** @@ -821,7 +1223,7 @@ bfa_ioc_lmem_init(struct bfa_ioc_s *ioc) static void bfa_ioc_lpu_start(struct bfa_ioc_s *ioc) { - u32 pss_ctl; + u32 pss_ctl; /** * Take processor out of reset. @@ -835,7 +1237,7 @@ bfa_ioc_lpu_start(struct bfa_ioc_s *ioc) static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc) { - u32 pss_ctl; + u32 pss_ctl; /** * Put processors in reset. @@ -852,10 +1254,10 @@ bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc) void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) { - u32 pgnum, pgoff; - u32 loff = 0; - int i; - u32 *fwsig = (u32 *) fwhdr; + u32 pgnum, pgoff; + u32 loff = 0; + int i; + u32 *fwsig = (u32 *) fwhdr; pgnum = bfa_ioc_smem_pgnum(ioc, loff); pgoff = bfa_ioc_smem_pgoff(ioc, loff); @@ -863,7 +1265,8 @@ bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32)); i++) { - fwsig[i] = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); + fwsig[i] = + bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); loff += sizeof(u32); } } @@ -875,10 +1278,10 @@ bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) { struct bfi_ioc_image_hdr_s *drv_fwhdr; - int i; + int i; drv_fwhdr = (struct bfi_ioc_image_hdr_s *) - bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); + bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) { @@ -897,21 +1300,20 @@ bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) * Return true if current running version is valid. Firmware signature and * execution context (driver/bios) must match. */ -static bfa_boolean_t -bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc) +static bfa_boolean_t +bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env) { struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr; /** * If bios/efi boot (flash based) -- return true */ - if (bfa_ioc_is_optrom(ioc)) + if (bfa_ioc_is_bios_optrom(ioc)) return BFA_TRUE; bfa_ioc_fwver_get(ioc, &fwhdr); drv_fwhdr = (struct bfi_ioc_image_hdr_s *) - bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); - + bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); if (fwhdr.signature != drv_fwhdr->signature) { bfa_trc(ioc, fwhdr.signature); @@ -919,9 +1321,9 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc) return BFA_FALSE; } - if (fwhdr.exec != drv_fwhdr->exec) { - bfa_trc(ioc, fwhdr.exec); - bfa_trc(ioc, drv_fwhdr->exec); + if (bfa_os_swap32(fwhdr.param) != boot_env) { + bfa_trc(ioc, fwhdr.param); + bfa_trc(ioc, boot_env); return BFA_FALSE; } @@ -934,7 +1336,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc) static void bfa_ioc_msgflush(struct bfa_ioc_s *ioc) { - u32 r32; + u32 r32; r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd); if (r32) @@ -946,7 +1348,9 @@ static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) { enum bfi_ioc_state ioc_fwstate; - bfa_boolean_t fwvalid; + bfa_boolean_t fwvalid; + u32 boot_type; + u32 boot_env; ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate); @@ -955,14 +1359,33 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) bfa_trc(ioc, ioc_fwstate); + boot_type = BFI_BOOT_TYPE_NORMAL; + boot_env = BFI_BOOT_LOADER_OS; + + /** + * Flash based firmware boot BIOS env. + */ + if (bfa_ioc_is_bios_optrom(ioc)) { + boot_type = BFI_BOOT_TYPE_FLASH; + boot_env = BFI_BOOT_LOADER_BIOS; + } + + /** + * Flash based firmware boot UEFI env. + */ + if (bfa_ioc_is_uefi(ioc)) { + boot_type = BFI_BOOT_TYPE_FLASH; + boot_env = BFI_BOOT_LOADER_UEFI; + } + /** * check if firmware is valid */ fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? - BFA_FALSE : bfa_ioc_fwver_valid(ioc); + BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env); if (!fwvalid) { - bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id); + bfa_ioc_boot(ioc, boot_type, boot_env); return; } @@ -971,7 +1394,6 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) * just wait for an initialization completion interrupt. */ if (ioc_fwstate == BFI_IOC_INITING) { - bfa_trc(ioc, ioc_fwstate); ioc->cbfn->reset_cbfn(ioc->bfa); return; } @@ -985,8 +1407,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) * is loaded. */ if (ioc_fwstate == BFI_IOC_DISABLED || - (!bfa_ioc_is_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) { - bfa_trc(ioc, ioc_fwstate); + (!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) { /** * When using MSI-X any pending firmware ready event should @@ -994,20 +1415,20 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) */ bfa_ioc_msgflush(ioc); ioc->cbfn->reset_cbfn(ioc->bfa); - bfa_fsm_send_event(ioc, IOC_E_FWREADY); + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); return; } /** * Initialize the h/w for any other states. */ - bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id); + bfa_ioc_boot(ioc, boot_type, boot_env); } static void bfa_ioc_timeout(void *ioc_arg) { - struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg; + struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg; bfa_trc(ioc, 0); bfa_fsm_send_event(ioc, IOC_E_TIMEOUT); @@ -1016,8 +1437,8 @@ bfa_ioc_timeout(void *ioc_arg) void bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len) { - u32 *msgp = (u32 *) ioc_msg; - u32 i; + u32 *msgp = (u32 *) ioc_msg; + u32 i; bfa_trc(ioc, msgp[0]); bfa_trc(ioc, len); @@ -1038,17 +1459,20 @@ bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len) * write 1 to mailbox CMD to trigger LPU event */ bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1); - (void)bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd); + (void) bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd); } static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc) { struct bfi_ioc_ctrl_req_s enable_req; + struct bfa_timeval_s tv; bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, bfa_ioc_portid(ioc)); enable_req.ioc_class = ioc->ioc_mc; + bfa_os_gettimeofday(&tv); + enable_req.tv_sec = bfa_os_ntohl(tv.tv_sec); bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s)); } @@ -1065,7 +1489,7 @@ bfa_ioc_send_disable(struct bfa_ioc_s *ioc) static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc) { - struct bfi_ioc_getattr_req_s attr_req; + struct bfi_ioc_getattr_req_s attr_req; bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ, bfa_ioc_portid(ioc)); @@ -1077,12 +1501,11 @@ static void bfa_ioc_hb_check(void *cbarg) { struct bfa_ioc_s *ioc = cbarg; - u32 hb_count; + u32 hb_count; hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); if (ioc->hb_count == hb_count) { - bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE, - hb_count); + printk(KERN_CRIT "Firmware heartbeat failure at %d", hb_count); bfa_ioc_recover(ioc); return; } else { @@ -1090,61 +1513,54 @@ bfa_ioc_hb_check(void *cbarg) } bfa_ioc_mbox_poll(ioc); - bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, - ioc, BFA_IOC_HB_TOV); + bfa_hb_timer_start(ioc); } static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc) { ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); - bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc, - BFA_IOC_HB_TOV); + bfa_hb_timer_start(ioc); } static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc) { - bfa_timer_stop(&ioc->ioc_timer); + bfa_hb_timer_stop(ioc); } + /** - * Initiate a full firmware download. + * Initiate a full firmware download. */ static void bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, - u32 boot_param) + u32 boot_env) { - u32 *fwimg; - u32 pgnum, pgoff; - u32 loff = 0; - u32 chunkno = 0; - u32 i; + u32 *fwimg; + u32 pgnum, pgoff; + u32 loff = 0; + u32 chunkno = 0; + u32 i; /** * Initialize LMEM first before code download */ bfa_ioc_lmem_init(ioc); - /** - * Flash based firmware boot - */ - bfa_trc(ioc, bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc))); - if (bfa_ioc_is_optrom(ioc)) - boot_type = BFI_BOOT_TYPE_FLASH; - fwimg = bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno); - + bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc))); + fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno); pgnum = bfa_ioc_smem_pgnum(ioc, loff); pgoff = bfa_ioc_smem_pgoff(ioc, loff); bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); - for (i = 0; i < bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) { + for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) { if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { chunkno = BFA_IOC_FLASH_CHUNK_NO(i); - fwimg = bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), + fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); } @@ -1162,7 +1578,8 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, loff = PSS_SMEM_PGOFF(loff); if (loff == 0) { pgnum++; - bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); + bfa_reg_write(ioc->ioc_regs.host_page_num_fn, + pgnum); } } @@ -1171,105 +1588,274 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, /* * Set boot type and boot param at the end. - */ + */ bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF, bfa_os_swap32(boot_type)); - bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_PARAM_OFF, - bfa_os_swap32(boot_param)); + bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF, + bfa_os_swap32(boot_env)); +} + +static void +bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force) +{ + bfa_ioc_hwinit(ioc, force); +} + +/** + * Update BFA configuration from firmware configuration. + */ +static void +bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc) +{ + struct bfi_ioc_attr_s *attr = ioc->attr; + + attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop); + attr->card_type = bfa_os_ntohl(attr->card_type); + attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize); + + bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); +} + +/** + * Attach time initialization of mbox logic. + */ +static void +bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc) +{ + struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; + int mc; + + INIT_LIST_HEAD(&mod->cmd_q); + for (mc = 0; mc < BFI_MC_MAX; mc++) { + mod->mbhdlr[mc].cbfn = NULL; + mod->mbhdlr[mc].cbarg = ioc->bfa; + } +} + +/** + * Mbox poll timer -- restarts any pending mailbox requests. + */ +static void +bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc) +{ + struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; + struct bfa_mbox_cmd_s *cmd; + u32 stat; + + /** + * If no command pending, do nothing + */ + if (list_empty(&mod->cmd_q)) + return; + + /** + * If previous command is not yet fetched by firmware, do nothing + */ + stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd); + if (stat) + return; + + /** + * Enqueue command to firmware. + */ + bfa_q_deq(&mod->cmd_q, &cmd); + bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); +} + +/** + * Cleanup any pending requests. + */ +static void +bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc) +{ + struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; + struct bfa_mbox_cmd_s *cmd; + + while (!list_empty(&mod->cmd_q)) + bfa_q_deq(&mod->cmd_q, &cmd); +} + +/** + * Read data from SMEM to host through PCI memmap + * + * @param[in] ioc memory for IOC + * @param[in] tbuf app memory to store data from smem + * @param[in] soff smem offset + * @param[in] sz size of smem in bytes + */ +static bfa_status_t +bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz) +{ + u32 pgnum, loff, r32; + int i, len; + u32 *buf = tbuf; + + pgnum = bfa_ioc_smem_pgnum(ioc, soff); + loff = bfa_ioc_smem_pgoff(ioc, soff); + bfa_trc(ioc, pgnum); + bfa_trc(ioc, loff); + bfa_trc(ioc, sz); + + /* + * Hold semaphore to serialize pll init and fwtrc. + */ + if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) { + bfa_trc(ioc, 0); + return BFA_STATUS_FAILED; + } + + bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); + + len = sz/sizeof(u32); + bfa_trc(ioc, len); + for (i = 0; i < len; i++) { + r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); + buf[i] = bfa_os_ntohl(r32); + loff += sizeof(u32); + + /** + * handle page offset wrap around + */ + loff = PSS_SMEM_PGOFF(loff); + if (loff == 0) { + pgnum++; + bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); + } + } + bfa_reg_write(ioc->ioc_regs.host_page_num_fn, + bfa_ioc_smem_pgnum(ioc, 0)); + /* + * release semaphore. + */ + bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); + + bfa_trc(ioc, pgnum); + return BFA_STATUS_OK; +} + +/** + * Clear SMEM data from host through PCI memmap + * + * @param[in] ioc memory for IOC + * @param[in] soff smem offset + * @param[in] sz size of smem in bytes + */ +static bfa_status_t +bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz) +{ + int i, len; + u32 pgnum, loff; + + pgnum = bfa_ioc_smem_pgnum(ioc, soff); + loff = bfa_ioc_smem_pgoff(ioc, soff); + bfa_trc(ioc, pgnum); + bfa_trc(ioc, loff); + bfa_trc(ioc, sz); + + /* + * Hold semaphore to serialize pll init and fwtrc. + */ + if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) { + bfa_trc(ioc, 0); + return BFA_STATUS_FAILED; + } + + bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); + + len = sz/sizeof(u32); /* len in words */ + bfa_trc(ioc, len); + for (i = 0; i < len; i++) { + bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0); + loff += sizeof(u32); + + /** + * handle page offset wrap around + */ + loff = PSS_SMEM_PGOFF(loff); + if (loff == 0) { + pgnum++; + bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); + } + } + bfa_reg_write(ioc->ioc_regs.host_page_num_fn, + bfa_ioc_smem_pgnum(ioc, 0)); + + /* + * release semaphore. + */ + bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); + bfa_trc(ioc, pgnum); + return BFA_STATUS_OK; } +/** + * hal iocpf to ioc interface + */ static void -bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force) +bfa_ioc_pf_enabled(struct bfa_ioc_s *ioc) { - bfa_ioc_hwinit(ioc, force); + bfa_fsm_send_event(ioc, IOC_E_ENABLED); } -/** - * Update BFA configuration from firmware configuration. - */ static void -bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc) +bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc) { - struct bfi_ioc_attr_s *attr = ioc->attr; - - attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop); - attr->card_type = bfa_os_ntohl(attr->card_type); - attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize); - - bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); + bfa_fsm_send_event(ioc, IOC_E_DISABLED); } -/** - * Attach time initialization of mbox logic. - */ static void -bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc) +bfa_ioc_pf_failed(struct bfa_ioc_s *ioc) { - struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; - int mc; - - INIT_LIST_HEAD(&mod->cmd_q); - for (mc = 0; mc < BFI_MC_MAX; mc++) { - mod->mbhdlr[mc].cbfn = NULL; - mod->mbhdlr[mc].cbarg = ioc->bfa; - } + bfa_fsm_send_event(ioc, IOC_E_FAILED); } -/** - * Mbox poll timer -- restarts any pending mailbox requests. - */ static void -bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc) +bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc) { - struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; - struct bfa_mbox_cmd_s *cmd; - u32 stat; - + struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; /** - * If no command pending, do nothing + * Provide enable completion callback. */ - if (list_empty(&mod->cmd_q)) - return; + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + BFA_LOG(KERN_WARNING, bfad, log_level, + "Running firmware version is incompatible " + "with the driver version\n"); +} - /** - * If previous command is not yet fetched by firmware, do nothing - */ - stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd); - if (stat) - return; - /** - * Enqueue command to firmware. - */ - bfa_q_deq(&mod->cmd_q, &cmd); - bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); -} /** - * Cleanup any pending requests. + * hal_ioc_public */ -static void -bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc) + +bfa_status_t +bfa_ioc_pll_init(struct bfa_ioc_s *ioc) { - struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; - struct bfa_mbox_cmd_s *cmd; - while (!list_empty(&mod->cmd_q)) - bfa_q_deq(&mod->cmd_q, &cmd); -} + /* + * Hold semaphore so that nobody can access the chip during init. + */ + bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); -/** - * bfa_ioc_public - */ + bfa_ioc_pll_init_asic(ioc); + + ioc->pllinit = BFA_TRUE; + /* + * release semaphore. + */ + bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); + + return BFA_STATUS_OK; +} /** * Interface used by diag module to do firmware boot with memory test * as the entry vector. */ void -bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param) +bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env) { - bfa_os_addr_t rb; + bfa_os_addr_t rb; bfa_ioc_stats(ioc, ioc_boots); @@ -1280,7 +1866,7 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param) * Initialize IOC state of all functions on a chip reset. */ rb = ioc->pcidev.pci_bar_kva; - if (boot_param == BFI_BOOT_TYPE_MEMTEST) { + if (boot_type == BFI_BOOT_TYPE_MEMTEST) { bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST); bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST); } else { @@ -1289,7 +1875,7 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param) } bfa_ioc_msgflush(ioc); - bfa_ioc_download_fw(ioc, boot_type, boot_param); + bfa_ioc_download_fw(ioc, boot_type, boot_env); /** * Enable interrupts just before starting LPU @@ -1308,18 +1894,29 @@ bfa_ioc_auto_recover(bfa_boolean_t auto_recover) } + bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc) { return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); } +bfa_boolean_t +bfa_ioc_is_initialized(struct bfa_ioc_s *ioc) +{ + u32 r32 = bfa_reg_read(ioc->ioc_regs.ioc_fwstate); + + return ((r32 != BFI_IOC_UNINIT) && + (r32 != BFI_IOC_INITING) && + (r32 != BFI_IOC_MEMTEST)); +} + void bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg) { - u32 *msgp = mbmsg; - u32 r32; - int i; + u32 *msgp = mbmsg; + u32 r32; + int i; /** * read the MBOX msg @@ -1341,9 +1938,10 @@ bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg) void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m) { - union bfi_ioc_i2h_msg_u *msg; + union bfi_ioc_i2h_msg_u *msg; + struct bfa_iocpf_s *iocpf = &ioc->iocpf; - msg = (union bfi_ioc_i2h_msg_u *)m; + msg = (union bfi_ioc_i2h_msg_u *) m; bfa_ioc_stats(ioc, ioc_isrs); @@ -1352,15 +1950,15 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m) break; case BFI_IOC_I2H_READY_EVENT: - bfa_fsm_send_event(ioc, IOC_E_FWREADY); + bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY); break; case BFI_IOC_I2H_ENABLE_REPLY: - bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE); + bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE); break; case BFI_IOC_I2H_DISABLE_REPLY: - bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE); + bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE); break; case BFI_IOC_I2H_GETATTR_REPLY: @@ -1378,29 +1976,24 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m) * * @param[in] ioc memory for IOC * @param[in] bfa driver instance structure - * @param[in] trcmod kernel trace module - * @param[in] aen kernel aen event module - * @param[in] logm kernel logging module */ void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn, - struct bfa_timer_mod_s *timer_mod, struct bfa_trc_mod_s *trcmod, - struct bfa_aen_s *aen, struct bfa_log_mod_s *logm) -{ - ioc->bfa = bfa; - ioc->cbfn = cbfn; - ioc->timer_mod = timer_mod; - ioc->trcmod = trcmod; - ioc->aen = aen; - ioc->logm = logm; - ioc->fcmode = BFA_FALSE; - ioc->pllinit = BFA_FALSE; + struct bfa_timer_mod_s *timer_mod) +{ + ioc->bfa = bfa; + ioc->cbfn = cbfn; + ioc->timer_mod = timer_mod; + ioc->fcmode = BFA_FALSE; + ioc->pllinit = BFA_FALSE; ioc->dbg_fwsave_once = BFA_TRUE; + ioc->iocpf.ioc = ioc; bfa_ioc_mbox_attach(ioc); INIT_LIST_HEAD(&ioc->hb_notify_q); - bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + bfa_fsm_send_event(ioc, IOC_E_RESET); } /** @@ -1421,10 +2014,10 @@ void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, enum bfi_mclass mc) { - ioc->ioc_mc = mc; - ioc->pcidev = *pcidev; - ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id); - ioc->cna = ioc->ctdev && !ioc->fcmode; + ioc->ioc_mc = mc; + ioc->pcidev = *pcidev; + ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id); + ioc->cna = ioc->ctdev && !ioc->fcmode; /** * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c @@ -1445,14 +2038,14 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, * @param[in] dm_pa physical address of IOC dma memory */ void -bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa) +bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa) { /** * dma memory for firmware attribute */ ioc->attr_dma.kva = dm_kva; ioc->attr_dma.pa = dm_pa; - ioc->attr = (struct bfi_ioc_attr_s *)dm_kva; + ioc->attr = (struct bfi_ioc_attr_s *) dm_kva; } /** @@ -1490,7 +2083,7 @@ bfa_ioc_disable(struct bfa_ioc_s *ioc) int bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover) { -return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0; + return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0; } /** @@ -1500,8 +2093,8 @@ return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0; void bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave) { - ioc->dbg_fwsave = dbg_fwsave; - ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->auto_recover); + ioc->dbg_fwsave = dbg_fwsave; + ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->iocpf.auto_recover); } u32 @@ -1525,8 +2118,8 @@ bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr) void bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs) { - struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; - int mc; + struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; + int mc; for (mc = 0; mc < BFI_MC_MAX; mc++) mod->mbhdlr[mc].cbfn = mcfuncs[mc]; @@ -1539,10 +2132,10 @@ void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc, bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg) { - struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; + struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; - mod->mbhdlr[mc].cbfn = cbfn; - mod->mbhdlr[mc].cbarg = cbarg; + mod->mbhdlr[mc].cbfn = cbfn; + mod->mbhdlr[mc].cbarg = cbarg; } /** @@ -1555,8 +2148,8 @@ bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc, void bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd) { - struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; - u32 stat; + struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; + u32 stat; /** * If a previous command is pending, queue new command @@ -1587,9 +2180,9 @@ bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd) void bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc) { - struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; - struct bfi_mbmsg_s m; - int mc; + struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; + struct bfi_mbmsg_s m; + int mc; bfa_ioc_msgget(ioc, &m); @@ -1621,16 +2214,14 @@ bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc) ioc->port_id = bfa_ioc_pcifn(ioc); } -#ifndef BFA_BIOS_BUILD - /** * return true if IOC is disabled */ bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc) { - return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) - || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); + return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) || + bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); } /** @@ -1639,9 +2230,9 @@ bfa_ioc_is_disabled(struct bfa_ioc_s *ioc) bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc) { - return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) - || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck) - || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch); + return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) || + bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) || + bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch); } #define bfa_ioc_state_disabled(__sm) \ @@ -1659,8 +2250,8 @@ bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc) bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc) { - u32 ioc_state; - bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; + u32 ioc_state; + bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled)) return BFA_FALSE; @@ -1669,16 +2260,18 @@ bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc) if (!bfa_ioc_state_disabled(ioc_state)) return BFA_FALSE; - ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG); - if (!bfa_ioc_state_disabled(ioc_state)) - return BFA_FALSE; + if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) { + ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG); + if (!bfa_ioc_state_disabled(ioc_state)) + return BFA_FALSE; + } return BFA_TRUE; } /** * Add to IOC heartbeat failure notification queue. To be used by common - * modules such as + * modules such as cee, port, diag. */ void bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc, @@ -1692,7 +2285,7 @@ void bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, struct bfa_adapter_attr_s *ad_attr) { - struct bfi_ioc_attr_s *ioc_attr; + struct bfi_ioc_attr_s *ioc_attr; ioc_attr = ioc->attr; @@ -1719,7 +2312,7 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, ad_attr->prototype = 0; ad_attr->pwwn = bfa_ioc_get_pwwn(ioc); - ad_attr->mac = bfa_ioc_get_mac(ioc); + ad_attr->mac = bfa_ioc_get_mac(ioc); ad_attr->pcie_gen = ioc_attr->pcie_gen; ad_attr->pcie_lanes = ioc_attr->pcie_lanes; @@ -1729,6 +2322,7 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); ad_attr->cna_capable = ioc->cna; + ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna; } enum bfa_ioc_type_e @@ -1782,7 +2376,7 @@ bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver) { bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN); bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version, - BFA_VERSION_LEN); + BFA_VERSION_LEN); } void @@ -1795,7 +2389,7 @@ bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer) void bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model) { - struct bfi_ioc_attr_s *ioc_attr; + struct bfi_ioc_attr_s *ioc_attr; bfa_assert(model); bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN); @@ -1805,14 +2399,48 @@ bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model) /** * model name */ - snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", - BFA_MFG_NAME, ioc_attr->card_type); + bfa_os_snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", + BFA_MFG_NAME, ioc_attr->card_type); } enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc_s *ioc) { - return bfa_sm_to_state(ioc_sm_table, ioc->fsm); + enum bfa_iocpf_state iocpf_st; + enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm); + + if (ioc_st == BFA_IOC_ENABLING || + ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) { + + iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); + + switch (iocpf_st) { + case BFA_IOCPF_SEMWAIT: + ioc_st = BFA_IOC_SEMWAIT; + break; + + case BFA_IOCPF_HWINIT: + ioc_st = BFA_IOC_HWINIT; + break; + + case BFA_IOCPF_FWMISMATCH: + ioc_st = BFA_IOC_FWMISMATCH; + break; + + case BFA_IOCPF_FAIL: + ioc_st = BFA_IOC_FAIL; + break; + + case BFA_IOCPF_INITFAIL: + ioc_st = BFA_IOC_INITFAIL; + break; + + default: + break; + } + } + + return ioc_st; } void @@ -1833,7 +2461,7 @@ bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr) } /** - * bfa_wwn_public + * hal_wwn_public */ wwn_t bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc) @@ -1857,10 +2485,10 @@ mac_t bfa_ioc_get_mac(struct bfa_ioc_s *ioc) { /* - * Currently mfg mac is used as FCoE enode mac (not configured by PBC) + * Check the IOC type and return the appropriate MAC */ if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE) - return bfa_ioc_get_mfg_mac(ioc); + return ioc->attr->fcoe_mac; else return ioc->attr->mac; } @@ -1880,12 +2508,16 @@ bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc) mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc) { - mac_t mac; + mac_t m; - mac = ioc->attr->mfg_mac; - mac.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc); + m = ioc->attr->mfg_mac; + if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type)) + m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc); + else + bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]), + bfa_ioc_pcifn(ioc)); - return mac; + return m; } bfa_boolean_t @@ -1894,47 +2526,13 @@ bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc) return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id); } -/** - * Send AEN notification - */ -void -bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event) -{ - union bfa_aen_data_u aen_data; - struct bfa_log_mod_s *logmod = ioc->logm; - s32 inst_num = 0; - enum bfa_ioc_type_e ioc_type; - - bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, event), inst_num); - - memset(&aen_data.ioc.pwwn, 0, sizeof(aen_data.ioc.pwwn)); - memset(&aen_data.ioc.mac, 0, sizeof(aen_data.ioc.mac)); - ioc_type = bfa_ioc_get_type(ioc); - switch (ioc_type) { - case BFA_IOC_TYPE_FC: - aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc); - break; - case BFA_IOC_TYPE_FCoE: - aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc); - aen_data.ioc.mac = bfa_ioc_get_mac(ioc); - break; - case BFA_IOC_TYPE_LL: - aen_data.ioc.mac = bfa_ioc_get_mac(ioc); - break; - default: - bfa_assert(ioc_type == BFA_IOC_TYPE_FC); - break; - } - aen_data.ioc.ioc_type = ioc_type; -} - /** * Retrieve saved firmware trace from a prior IOC failure. */ bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen) { - int tlen; + int tlen; if (ioc->dbg_fwsave_len == 0) return BFA_STATUS_ENOFSAVE; @@ -1963,57 +2561,145 @@ bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc) bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen) { - u32 pgnum; - u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc)); - int i, tlen; - u32 *tbuf = trcdata, r32; + u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc)); + int tlen; + bfa_status_t status; bfa_trc(ioc, *trclen); - pgnum = bfa_ioc_smem_pgnum(ioc, loff); - loff = bfa_ioc_smem_pgoff(ioc, loff); - - /* - * Hold semaphore to serialize pll init and fwtrc. - */ - if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) - return BFA_STATUS_FAILED; - - bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); - tlen = *trclen; if (tlen > BFA_DBG_FWTRC_LEN) tlen = BFA_DBG_FWTRC_LEN; - tlen /= sizeof(u32); - bfa_trc(ioc, tlen); + status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen); + *trclen = tlen; + return status; +} - for (i = 0; i < tlen; i++) { - r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); - tbuf[i] = bfa_os_ntohl(r32); - loff += sizeof(u32); +static void +bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc) +{ + struct bfa_mbox_cmd_s cmd; + struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg; - /** - * handle page offset wrap around - */ - loff = PSS_SMEM_PGOFF(loff); - if (loff == 0) { - pgnum++; - bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); - } + bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC, + bfa_ioc_portid(ioc)); + req->ioc_class = ioc->ioc_mc; + bfa_ioc_mbox_queue(ioc, &cmd); +} + +static void +bfa_ioc_fwsync(struct bfa_ioc_s *ioc) +{ + u32 fwsync_iter = 1000; + + bfa_ioc_send_fwsync(ioc); + + /** + * After sending a fw sync mbox command wait for it to + * take effect. We will not wait for a response because + * 1. fw_sync mbox cmd doesn't have a response. + * 2. Even if we implement that, interrupts might not + * be enabled when we call this function. + * So, just keep checking if any mbox cmd is pending, and + * after waiting for a reasonable amount of time, go ahead. + * It is possible that fw has crashed and the mbox command + * is never acknowledged. + */ + while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0) + fwsync_iter--; +} + +/** + * Dump firmware smem + */ +bfa_status_t +bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf, + u32 *offset, int *buflen) +{ + u32 loff; + int dlen; + bfa_status_t status; + u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc); + + if (*offset >= smem_len) { + *offset = *buflen = 0; + return BFA_STATUS_EINVAL; } - bfa_reg_write(ioc->ioc_regs.host_page_num_fn, - bfa_ioc_smem_pgnum(ioc, 0)); - /* - * release semaphore. + loff = *offset; + dlen = *buflen; + + /** + * First smem read, sync smem before proceeding + * No need to sync before reading every chunk. */ - bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); + if (loff == 0) + bfa_ioc_fwsync(ioc); - bfa_trc(ioc, pgnum); + if ((loff + dlen) >= smem_len) + dlen = smem_len - loff; - *trclen = tlen * sizeof(u32); - return BFA_STATUS_OK; + status = bfa_ioc_smem_read(ioc, buf, loff, dlen); + + if (status != BFA_STATUS_OK) { + *offset = *buflen = 0; + return status; + } + + *offset += dlen; + + if (*offset >= smem_len) + *offset = 0; + + *buflen = dlen; + + return status; +} + +/** + * Firmware statistics + */ +bfa_status_t +bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats) +{ + u32 loff = BFI_IOC_FWSTATS_OFF + \ + BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc)); + int tlen; + bfa_status_t status; + + if (ioc->stats_busy) { + bfa_trc(ioc, ioc->stats_busy); + return BFA_STATUS_DEVBUSY; + } + ioc->stats_busy = BFA_TRUE; + + tlen = sizeof(struct bfa_fw_stats_s); + status = bfa_ioc_smem_read(ioc, stats, loff, tlen); + + ioc->stats_busy = BFA_FALSE; + return status; +} + +bfa_status_t +bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc) +{ + u32 loff = BFI_IOC_FWSTATS_OFF + \ + BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc)); + int tlen; + bfa_status_t status; + + if (ioc->stats_busy) { + bfa_trc(ioc, ioc->stats_busy); + return BFA_STATUS_DEVBUSY; + } + ioc->stats_busy = BFA_TRUE; + + tlen = sizeof(struct bfa_fw_stats_s); + status = bfa_ioc_smem_clr(ioc, loff, tlen); + + ioc->stats_busy = BFA_FALSE; + return status; } /** @@ -2022,7 +2708,7 @@ bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen) static void bfa_ioc_debug_save(struct bfa_ioc_s *ioc) { - int tlen; + int tlen; if (ioc->dbg_fwsave_len) { tlen = ioc->dbg_fwsave_len; @@ -2050,11 +2736,135 @@ bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc) { if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL) return; +} + +/** + * hal_iocpf_pvt BFA IOC PF private functions + */ - if (ioc->attr->nwwn == 0) - bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN); - if (ioc->attr->pwwn == 0) - bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN); +static void +bfa_iocpf_enable(struct bfa_ioc_s *ioc) +{ + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE); } -#endif +static void +bfa_iocpf_disable(struct bfa_ioc_s *ioc) +{ + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE); +} + +static void +bfa_iocpf_fail(struct bfa_ioc_s *ioc) +{ + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); +} + +static void +bfa_iocpf_initfail(struct bfa_ioc_s *ioc) +{ + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); +} + +static void +bfa_iocpf_getattrfail(struct bfa_ioc_s *ioc) +{ + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); +} + +static void +bfa_iocpf_stop(struct bfa_ioc_s *ioc) +{ + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); +} + +static void +bfa_iocpf_timeout(void *ioc_arg) +{ + struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg; + + bfa_trc(ioc, 0); + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); +} + +static void +bfa_iocpf_sem_timeout(void *ioc_arg) +{ + struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg; + + bfa_ioc_hw_sem_get(ioc); +} + +/** + * bfa timer function + */ +void +bfa_timer_init(struct bfa_timer_mod_s *mod) +{ + INIT_LIST_HEAD(&mod->timer_q); +} + +void +bfa_timer_beat(struct bfa_timer_mod_s *mod) +{ + struct list_head *qh = &mod->timer_q; + struct list_head *qe, *qe_next; + struct bfa_timer_s *elem; + struct list_head timedout_q; + + INIT_LIST_HEAD(&timedout_q); + + qe = bfa_q_next(qh); + + while (qe != qh) { + qe_next = bfa_q_next(qe); + + elem = (struct bfa_timer_s *) qe; + if (elem->timeout <= BFA_TIMER_FREQ) { + elem->timeout = 0; + list_del(&elem->qe); + list_add_tail(&elem->qe, &timedout_q); + } else { + elem->timeout -= BFA_TIMER_FREQ; + } + + qe = qe_next; /* go to next elem */ + } + + /* + * Pop all the timeout entries + */ + while (!list_empty(&timedout_q)) { + bfa_q_deq(&timedout_q, &elem); + elem->timercb(elem->arg); + } +} + +/** + * Should be called with lock protection + */ +void +bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer, + void (*timercb) (void *), void *arg, unsigned int timeout) +{ + + bfa_assert(timercb != NULL); + bfa_assert(!bfa_q_is_on_q(&mod->timer_q, timer)); + + timer->timeout = timeout; + timer->timercb = timercb; + timer->arg = arg; + + list_add_tail(&timer->qe, &mod->timer_q); +} + +/** + * Should be called with lock protection + */ +void +bfa_timer_stop(struct bfa_timer_s *timer) +{ + bfa_assert(!list_empty(&timer->qe)); + + list_del(&timer->qe); +} diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h index cae05b251c99..288c5801aace 100644 --- a/drivers/scsi/bfa/bfa_ioc.h +++ b/drivers/scsi/bfa/bfa_ioc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * @@ -18,18 +18,74 @@ #ifndef __BFA_IOC_H__ #define __BFA_IOC_H__ -#include -#include -#include -#include -#include +#include "bfa_os_inc.h" +#include "bfa_cs.h" +#include "bfi.h" + +/** + * BFA timer declarations + */ +typedef void (*bfa_timer_cbfn_t)(void *); + +/** + * BFA timer data structure + */ +struct bfa_timer_s { + struct list_head qe; + bfa_timer_cbfn_t timercb; + void *arg; + int timeout; /**< in millisecs. */ +}; + +/** + * Timer module structure + */ +struct bfa_timer_mod_s { + struct list_head timer_q; +}; + +#define BFA_TIMER_FREQ 200 /**< specified in millisecs */ + +void bfa_timer_beat(struct bfa_timer_mod_s *mod); +void bfa_timer_init(struct bfa_timer_mod_s *mod); +void bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer, + bfa_timer_cbfn_t timercb, void *arg, + unsigned int timeout); +void bfa_timer_stop(struct bfa_timer_s *timer); + +/** + * Generic Scatter Gather Element used by driver + */ +struct bfa_sge_s { + u32 sg_len; + void *sg_addr; +}; + +#define bfa_sge_word_swap(__sge) do { \ + ((u32 *)(__sge))[0] = bfa_os_swap32(((u32 *)(__sge))[0]); \ + ((u32 *)(__sge))[1] = bfa_os_swap32(((u32 *)(__sge))[1]); \ + ((u32 *)(__sge))[2] = bfa_os_swap32(((u32 *)(__sge))[2]); \ +} while (0) + +#define bfa_swap_words(_x) ( \ + ((_x) << 32) | ((_x) >> 32)) + +#ifdef __BIGENDIAN +#define bfa_sge_to_be(_x) +#define bfa_sge_to_le(_x) bfa_sge_word_swap(_x) +#define bfa_sgaddr_le(_x) bfa_swap_words(_x) +#else +#define bfa_sge_to_be(_x) bfa_sge_word_swap(_x) +#define bfa_sge_to_le(_x) +#define bfa_sgaddr_le(_x) (_x) +#endif /** * PCI device information required by IOC */ struct bfa_pcidev_s { - int pci_slot; - u8 pci_func; + int pci_slot; + u8 pci_func; u16 device_id; bfa_os_addr_t pci_bar_kva; }; @@ -39,13 +95,18 @@ struct bfa_pcidev_s { * Address */ struct bfa_dma_s { - void *kva; /*! Kernel virtual address */ - u64 pa; /*! Physical address */ + void *kva; /* ! Kernel virtual address */ + u64 pa; /* ! Physical address */ }; #define BFA_DMA_ALIGN_SZ 256 #define BFA_ROUNDUP(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1)) +/** + * smem size for Crossbow and Catapult + */ +#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */ +#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */ #define bfa_dma_addr_set(dma_addr, pa) \ @@ -101,7 +162,7 @@ struct bfa_ioc_regs_s { * IOC Mailbox structures */ struct bfa_mbox_cmd_s { - struct list_head qe; + struct list_head qe; u32 msg[BFI_IOC_MSGSZ]; }; @@ -110,8 +171,8 @@ struct bfa_mbox_cmd_s { */ typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg_s *m); struct bfa_ioc_mbox_mod_s { - struct list_head cmd_q; /* pending mbox queue */ - int nmclass; /* number of handlers */ + struct list_head cmd_q; /* pending mbox queue */ + int nmclass; /* number of handlers */ struct { bfa_ioc_mbox_mcfunc_t cbfn; /* message handlers */ void *cbarg; @@ -149,49 +210,54 @@ struct bfa_ioc_hbfail_notify_s { (__notify)->cbarg = (__cbarg); \ } while (0) +struct bfa_iocpf_s { + bfa_fsm_t fsm; + struct bfa_ioc_s *ioc; + u32 retry_count; + bfa_boolean_t auto_recover; +}; + struct bfa_ioc_s { bfa_fsm_t fsm; struct bfa_s *bfa; struct bfa_pcidev_s pcidev; - struct bfa_timer_mod_s *timer_mod; - struct bfa_timer_s ioc_timer; - struct bfa_timer_s sem_timer; + struct bfa_timer_mod_s *timer_mod; + struct bfa_timer_s ioc_timer; + struct bfa_timer_s sem_timer; + struct bfa_timer_s hb_timer; u32 hb_count; - u32 retry_count; struct list_head hb_notify_q; void *dbg_fwsave; int dbg_fwsave_len; bfa_boolean_t dbg_fwsave_once; enum bfi_mclass ioc_mc; - struct bfa_ioc_regs_s ioc_regs; + struct bfa_ioc_regs_s ioc_regs; struct bfa_trc_mod_s *trcmod; - struct bfa_aen_s *aen; - struct bfa_log_mod_s *logm; struct bfa_ioc_drv_stats_s stats; - bfa_boolean_t auto_recover; bfa_boolean_t fcmode; bfa_boolean_t ctdev; bfa_boolean_t cna; bfa_boolean_t pllinit; + bfa_boolean_t stats_busy; /* outstanding stats */ u8 port_id; - struct bfa_dma_s attr_dma; struct bfi_ioc_attr_s *attr; struct bfa_ioc_cbfn_s *cbfn; struct bfa_ioc_mbox_mod_s mbox_mod; - struct bfa_ioc_hwif_s *ioc_hwif; + struct bfa_ioc_hwif_s *ioc_hwif; + struct bfa_iocpf_s iocpf; }; struct bfa_ioc_hwif_s { - bfa_status_t (*ioc_pll_init) (struct bfa_ioc_s *ioc); - bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc); - void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc); - void (*ioc_reg_init) (struct bfa_ioc_s *ioc); - void (*ioc_map_port) (struct bfa_ioc_s *ioc); - void (*ioc_isr_mode_set) (struct bfa_ioc_s *ioc, - bfa_boolean_t msix); - void (*ioc_notify_hbfail) (struct bfa_ioc_s *ioc); - void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc); + bfa_status_t (*ioc_pll_init) (bfa_os_addr_t rb, bfa_boolean_t fcmode); + bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc); + void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc); + void (*ioc_reg_init) (struct bfa_ioc_s *ioc); + void (*ioc_map_port) (struct bfa_ioc_s *ioc); + void (*ioc_isr_mode_set) (struct bfa_ioc_s *ioc, + bfa_boolean_t msix); + void (*ioc_notify_hbfail) (struct bfa_ioc_s *ioc); + void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc); }; #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) @@ -206,18 +272,19 @@ struct bfa_ioc_hwif_s { #define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit) #define bfa_ioc_speed_sup(__ioc) \ BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop) -#define bfa_ioc_get_nports(__ioc) \ +#define bfa_ioc_get_nports(__ioc) \ BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop) -#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++) -#define BFA_IOC_FWIMG_MINSZ (16 * 1024) -#define BFA_IOC_FWIMG_TYPE(__ioc) \ - (((__ioc)->ctdev) ? \ - (((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) : \ +#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++) +#define BFA_IOC_FWIMG_MINSZ (16 * 1024) +#define BFA_IOC_FWIMG_TYPE(__ioc) \ + (((__ioc)->ctdev) ? \ + (((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) : \ BFI_IMAGE_CB_FC) - -#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS) -#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) +#define BFA_IOC_FW_SMEM_SIZE(__ioc) \ + (((__ioc)->ctdev) ? BFI_SMEM_CT_SIZE : BFI_SMEM_CB_SIZE) +#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS) +#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) #define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS) /** @@ -235,18 +302,28 @@ void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc, /** * IOC interfaces */ -#define bfa_ioc_pll_init(__ioc) ((__ioc)->ioc_hwif->ioc_pll_init(__ioc)) -#define bfa_ioc_isr_mode_set(__ioc, __msix) \ + +#define bfa_ioc_pll_init_asic(__ioc) \ + ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \ + (__ioc)->fcmode)) + +bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc); +bfa_status_t bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode); +bfa_boolean_t bfa_ioc_ct_pll_init_complete(bfa_os_addr_t rb); +bfa_status_t bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode); + +#define bfa_ioc_isr_mode_set(__ioc, __msix) \ ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)) -#define bfa_ioc_ownership_reset(__ioc) \ +#define bfa_ioc_ownership_reset(__ioc) \ ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc)) + void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc); void bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc); + void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, - struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod, - struct bfa_trc_mod_s *trcmod, - struct bfa_aen_s *aen, struct bfa_log_mod_s *logm); + struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod); +void bfa_ioc_auto_recover(bfa_boolean_t auto_recover); void bfa_ioc_detach(struct bfa_ioc_s *ioc); void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, enum bfi_mclass mc); @@ -256,21 +333,22 @@ void bfa_ioc_enable(struct bfa_ioc_s *ioc); void bfa_ioc_disable(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc); -void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param); +void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, + u32 boot_param); void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg); void bfa_ioc_error_isr(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc); +bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc); -void bfa_ioc_cfg_complete(struct bfa_ioc_s *ioc); enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc); void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num); void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver); void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver); void bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model); void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, - char *manufacturer); + char *manufacturer); void bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev); enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc_s *ioc); @@ -284,6 +362,8 @@ bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, void bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc); bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen); +bfa_status_t bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf, + u32 *offset, int *buflen); u32 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr); u32 bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr); void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc); @@ -297,7 +377,8 @@ void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr); bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr); -void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event); +bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats); +bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc); /* * bfa mfg wwn API functions @@ -310,5 +391,68 @@ wwn_t bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc); mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc); u64 bfa_ioc_get_adid(struct bfa_ioc_s *ioc); -#endif /* __BFA_IOC_H__ */ +/* + * F/W Image Size & Chunk + */ +extern u32 bfi_image_ct_fc_size; +extern u32 bfi_image_ct_cna_size; +extern u32 bfi_image_cb_fc_size; +extern u32 *bfi_image_ct_fc; +extern u32 *bfi_image_ct_cna; +extern u32 *bfi_image_cb_fc; + +static inline u32 * +bfi_image_ct_fc_get_chunk(u32 off) +{ return (u32 *)(bfi_image_ct_fc + off); } + +static inline u32 * +bfi_image_ct_cna_get_chunk(u32 off) +{ return (u32 *)(bfi_image_ct_cna + off); } +static inline u32 * +bfi_image_cb_fc_get_chunk(u32 off) +{ return (u32 *)(bfi_image_cb_fc + off); } + +static inline u32* +bfa_cb_image_get_chunk(int type, u32 off) +{ + switch (type) { + case BFI_IMAGE_CT_FC: + return bfi_image_ct_fc_get_chunk(off); break; + case BFI_IMAGE_CT_CNA: + return bfi_image_ct_cna_get_chunk(off); break; + case BFI_IMAGE_CB_FC: + return bfi_image_cb_fc_get_chunk(off); break; + default: return 0; + } +} + +static inline u32 +bfa_cb_image_get_size(int type) +{ + switch (type) { + case BFI_IMAGE_CT_FC: + return bfi_image_ct_fc_size; break; + case BFI_IMAGE_CT_CNA: + return bfi_image_ct_cna_size; break; + case BFI_IMAGE_CB_FC: + return bfi_image_cb_fc_size; break; + default: return 0; + } +} + +/** + * CNA TRCMOD declaration + */ +/* + * !!! Only append to the enums defined here to avoid any versioning + * !!! needed between trace utility and driver version + */ +enum { + BFA_TRC_CNA_PORT = 1, + BFA_TRC_CNA_IOC = 2, + BFA_TRC_CNA_IOC_CB = 3, + BFA_TRC_CNA_IOC_CT = 4, +}; + +#endif /* __BFA_IOC_H__ */ diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c index 324bdde7ea2e..d7ac864d8539 100644 --- a/drivers/scsi/bfa/bfa_ioc_cb.c +++ b/drivers/scsi/bfa/bfa_ioc_cb.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * @@ -15,22 +15,15 @@ * General Public License for more details. */ -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "bfa_ioc.h" +#include "bfi_cbreg.h" +#include "bfa_defs.h" BFA_TRC_FILE(CNA, IOC_CB); /* * forward declarations */ -static bfa_status_t bfa_ioc_cb_pll_init(struct bfa_ioc_s *ioc); static bfa_boolean_t bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc); static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc); static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc); @@ -95,6 +88,7 @@ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { * Host <-> LPU mailbox command/status registers */ static struct { u32 hfn, lpu; } iocreg_mbcmd[] = { + { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT }, { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT } }; @@ -154,6 +148,7 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc) /** * Initialize IOC to port mapping. */ + static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc) { @@ -161,6 +156,7 @@ bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc) * For crossbow, port id is same as pci function. */ ioc->port_id = bfa_ioc_pcifn(ioc); + bfa_trc(ioc, ioc->port_id); } @@ -172,87 +168,69 @@ bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix) { } -static bfa_status_t -bfa_ioc_cb_pll_init(struct bfa_ioc_s *ioc) +/** + * Cleanup hw semaphore and usecnt registers + */ +static void +bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc) { - bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; - u32 pll_sclk, pll_fclk; /* - * Hold semaphore so that nobody can access the chip during init. + * Read the hw sem reg to make sure that it is locked + * before we clear it. If it is not locked, writing 1 + * will lock it instead of clearing it. */ - bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); + bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); + bfa_ioc_hw_sem_release(ioc); +} + + + +bfa_status_t +bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode) +{ + u32 pll_sclk, pll_fclk; pll_sclk = __APP_PLL_212_ENABLE | __APP_PLL_212_LRESETN | - __APP_PLL_212_P0_1(3U) | - __APP_PLL_212_JITLMT0_1(3U) | - __APP_PLL_212_CNTLMT0_1(3U); + __APP_PLL_212_P0_1(3U) | + __APP_PLL_212_JITLMT0_1(3U) | + __APP_PLL_212_CNTLMT0_1(3U); pll_fclk = __APP_PLL_400_ENABLE | __APP_PLL_400_LRESETN | - __APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) | - __APP_PLL_400_JITLMT0_1(3U) | - __APP_PLL_400_CNTLMT0_1(3U); - + __APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) | + __APP_PLL_400_JITLMT0_1(3U) | + __APP_PLL_400_CNTLMT0_1(3U); bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT); bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT); - bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); - - bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, - __APP_PLL_212_LOGIC_SOFT_RESET); - bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, - __APP_PLL_212_BYPASS | - __APP_PLL_212_LOGIC_SOFT_RESET); - bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, - __APP_PLL_400_LOGIC_SOFT_RESET); - bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, - __APP_PLL_400_BYPASS | - __APP_PLL_400_LOGIC_SOFT_RESET); + bfa_reg_write(rb + APP_PLL_212_CTL_REG, + __APP_PLL_212_LOGIC_SOFT_RESET); + bfa_reg_write(rb + APP_PLL_212_CTL_REG, + __APP_PLL_212_BYPASS | + __APP_PLL_212_LOGIC_SOFT_RESET); + bfa_reg_write(rb + APP_PLL_400_CTL_REG, + __APP_PLL_400_LOGIC_SOFT_RESET); + bfa_reg_write(rb + APP_PLL_400_CTL_REG, + __APP_PLL_400_BYPASS | + __APP_PLL_400_LOGIC_SOFT_RESET); bfa_os_udelay(2); - bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, - __APP_PLL_212_LOGIC_SOFT_RESET); - bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, - __APP_PLL_400_LOGIC_SOFT_RESET); - - bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, - pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET); - bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, - pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET); - - /** - * Wait for PLLs to lock. - */ + bfa_reg_write(rb + APP_PLL_212_CTL_REG, + __APP_PLL_212_LOGIC_SOFT_RESET); + bfa_reg_write(rb + APP_PLL_400_CTL_REG, + __APP_PLL_400_LOGIC_SOFT_RESET); + bfa_reg_write(rb + APP_PLL_212_CTL_REG, + pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET); + bfa_reg_write(rb + APP_PLL_400_CTL_REG, + pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET); bfa_os_udelay(2000); bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); - - bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk); - bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk); - - /* - * release semaphore. - */ - bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); + bfa_reg_write((rb + APP_PLL_212_CTL_REG), pll_sclk); + bfa_reg_write((rb + APP_PLL_400_CTL_REG), pll_fclk); return BFA_STATUS_OK; } - -/** - * Cleanup hw semaphore and usecnt registers - */ -static void -bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc) -{ - - /* - * Read the hw sem reg to make sure that it is locked - * before we clear it. If it is not locked, writing 1 - * will lock it instead of clearing it. - */ - bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); - bfa_ioc_hw_sem_release(ioc); -} diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c index 68f027da001e..f21b82c5f64c 100644 --- a/drivers/scsi/bfa/bfa_ioc_ct.c +++ b/drivers/scsi/bfa/bfa_ioc_ct.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * @@ -15,22 +15,15 @@ * General Public License for more details. */ -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "bfa_ioc.h" +#include "bfi_ctreg.h" +#include "bfa_defs.h" BFA_TRC_FILE(CNA, IOC_CT); /* * forward declarations */ -static bfa_status_t bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc); static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc); static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc); static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc); @@ -78,7 +71,8 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc) /** * If bios boot (flash based) -- do not increment usage count */ - if (bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < BFA_IOC_FWIMG_MINSZ) + if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < + BFA_IOC_FWIMG_MINSZ) return BFA_TRUE; bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); @@ -136,7 +130,8 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc) /** * If bios boot (flash based) -- do not decrement usage count */ - if (bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < BFA_IOC_FWIMG_MINSZ) + if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < + BFA_IOC_FWIMG_MINSZ) return; /** @@ -308,16 +303,47 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix) bfa_reg_write(rb + FNC_PERS_REG, r32); } -static bfa_status_t -bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc) +/** + * Cleanup hw semaphore and usecnt registers + */ +static void +bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc) { - bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; - u32 pll_sclk, pll_fclk, r32; + + if (ioc->cna) { + bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); + bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 0); + bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); + } /* - * Hold semaphore so that nobody can access the chip during init. + * Read the hw sem reg to make sure that it is locked + * before we clear it. If it is not locked, writing 1 + * will lock it instead of clearing it. */ - bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); + bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); + bfa_ioc_hw_sem_release(ioc); +} + + + +/* + * Check the firmware state to know if pll_init has been completed already + */ +bfa_boolean_t +bfa_ioc_ct_pll_init_complete(bfa_os_addr_t rb) +{ + if ((bfa_reg_read(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) || + (bfa_reg_read(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP)) + return BFA_TRUE; + + return BFA_FALSE; +} + +bfa_status_t +bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode) +{ + u32 pll_sclk, pll_fclk, r32; pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST | __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) | @@ -327,70 +353,50 @@ bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc) __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) | __APP_PLL_425_JITLMT0_1(3U) | __APP_PLL_425_CNTLMT0_1(1U); - - /** - * For catapult, choose operational mode FC/FCoE - */ - if (ioc->fcmode) { + if (fcmode) { bfa_reg_write((rb + OP_MODE), 0); bfa_reg_write((rb + ETH_MAC_SER_REG), __APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 | __APP_EMS_CHANNEL_SEL); } else { - ioc->pllinit = BFA_TRUE; bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE); bfa_reg_write((rb + ETH_MAC_SER_REG), - __APP_EMS_REFCKBUFEN1); + __APP_EMS_REFCKBUFEN1); } - bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT); bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT); - bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); - - bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk | + bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET); - bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk | + bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET); - bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk | + bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE); - bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk | + bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE); - - /** - * Wait for PLLs to lock. - */ bfa_reg_read(rb + HOSTFN0_INT_MSK); bfa_os_udelay(2000); bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); - - bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk | + bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk | __APP_PLL_312_ENABLE); - bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk | + bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk | __APP_PLL_425_ENABLE); - - /** - * PSS memory reset is asserted at power-on-reset. Need to clear - * this before running EDRAM BISTR - */ - if (ioc->cna) { + if (!fcmode) { bfa_reg_write((rb + PMM_1T_RESET_REG_P0), __PMM_1T_RESET_P); bfa_reg_write((rb + PMM_1T_RESET_REG_P1), __PMM_1T_RESET_P); } - r32 = bfa_reg_read((rb + PSS_CTL_REG)); r32 &= ~__PSS_LMEM_RESET; bfa_reg_write((rb + PSS_CTL_REG), r32); bfa_os_udelay(1000); - - if (ioc->cna) { + if (!fcmode) { bfa_reg_write((rb + PMM_1T_RESET_REG_P0), 0); bfa_reg_write((rb + PMM_1T_RESET_REG_P1), 0); } @@ -398,39 +404,6 @@ bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc) bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START); bfa_os_udelay(1000); r32 = bfa_reg_read((rb + MBIST_STAT_REG)); - bfa_trc(ioc, r32); - - /** - * Clear BISTR - */ bfa_reg_write((rb + MBIST_CTL_REG), 0); - - /* - * release semaphore. - */ - bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); - return BFA_STATUS_OK; } - -/** - * Cleanup hw semaphore and usecnt registers - */ -static void -bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc) -{ - - if (ioc->cna) { - bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); - bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 0); - bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); - } - - /* - * Read the hw sem reg to make sure that it is locked - * before we clear it. If it is not locked, writing 1 - * will lock it instead of clearing it. - */ - bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); - bfa_ioc_hw_sem_release(ioc); -} diff --git a/drivers/scsi/bfa/bfa_iocfc.c b/drivers/scsi/bfa/bfa_iocfc.c deleted file mode 100644 index 90820be99864..000000000000 --- a/drivers/scsi/bfa/bfa_iocfc.c +++ /dev/null @@ -1,927 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include "bfa_callback_priv.h" -#include "bfad_drv.h" - -BFA_TRC_FILE(HAL, IOCFC); - -/** - * IOC local definitions - */ -#define BFA_IOCFC_TOV 5000 /* msecs */ - -enum { - BFA_IOCFC_ACT_NONE = 0, - BFA_IOCFC_ACT_INIT = 1, - BFA_IOCFC_ACT_STOP = 2, - BFA_IOCFC_ACT_DISABLE = 3, -}; - -/* - * forward declarations - */ -static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status); -static void bfa_iocfc_disable_cbfn(void *bfa_arg); -static void bfa_iocfc_hbfail_cbfn(void *bfa_arg); -static void bfa_iocfc_reset_cbfn(void *bfa_arg); -static void bfa_iocfc_stats_clear(void *bfa_arg); -static void bfa_iocfc_stats_swap(struct bfa_fw_stats_s *d, - struct bfa_fw_stats_s *s); -static void bfa_iocfc_stats_clr_cb(void *bfa_arg, bfa_boolean_t complete); -static void bfa_iocfc_stats_clr_timeout(void *bfa_arg); -static void bfa_iocfc_stats_cb(void *bfa_arg, bfa_boolean_t complete); -static void bfa_iocfc_stats_timeout(void *bfa_arg); - -static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn; - -/** - * bfa_ioc_pvt BFA IOC private functions - */ - -static void -bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len) -{ - int i, per_reqq_sz, per_rspq_sz; - - per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), - BFA_DMA_ALIGN_SZ); - per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), - BFA_DMA_ALIGN_SZ); - - /* - * Calculate CQ size - */ - for (i = 0; i < cfg->fwcfg.num_cqs; i++) { - *dm_len = *dm_len + per_reqq_sz; - *dm_len = *dm_len + per_rspq_sz; - } - - /* - * Calculate Shadow CI/PI size - */ - for (i = 0; i < cfg->fwcfg.num_cqs; i++) - *dm_len += (2 * BFA_CACHELINE_SZ); -} - -static void -bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len) -{ - *dm_len += - BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); - *dm_len += - BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), - BFA_CACHELINE_SZ); - *dm_len += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ); -} - -/** - * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ - */ -static void -bfa_iocfc_send_cfg(void *bfa_arg) -{ - struct bfa_s *bfa = bfa_arg; - struct bfa_iocfc_s *iocfc = &bfa->iocfc; - struct bfi_iocfc_cfg_req_s cfg_req; - struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo; - struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg; - int i; - - bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS); - bfa_trc(bfa, cfg->fwcfg.num_cqs); - - bfa_iocfc_reset_queues(bfa); - - /** - * initialize IOC configuration info - */ - cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; - cfg_info->num_cqs = cfg->fwcfg.num_cqs; - - bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); - bfa_dma_be_addr_set(cfg_info->stats_addr, iocfc->stats_pa); - - /** - * dma map REQ and RSP circular queues and shadow pointers - */ - for (i = 0; i < cfg->fwcfg.num_cqs; i++) { - bfa_dma_be_addr_set(cfg_info->req_cq_ba[i], - iocfc->req_cq_ba[i].pa); - bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i], - iocfc->req_cq_shadow_ci[i].pa); - cfg_info->req_cq_elems[i] = - bfa_os_htons(cfg->drvcfg.num_reqq_elems); - - bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i], - iocfc->rsp_cq_ba[i].pa); - bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i], - iocfc->rsp_cq_shadow_pi[i].pa); - cfg_info->rsp_cq_elems[i] = - bfa_os_htons(cfg->drvcfg.num_rspq_elems); - } - - /** - * Enable interrupt coalescing if it is driver init path - * and not ioc disable/enable path. - */ - if (!iocfc->cfgdone) - cfg_info->intr_attr.coalesce = BFA_TRUE; - - iocfc->cfgdone = BFA_FALSE; - - /** - * dma map IOC configuration itself - */ - bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ, - bfa_lpuid(bfa)); - bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa); - - bfa_ioc_mbox_send(&bfa->ioc, &cfg_req, - sizeof(struct bfi_iocfc_cfg_req_s)); -} - -static void -bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, - struct bfa_pcidev_s *pcidev) -{ - struct bfa_iocfc_s *iocfc = &bfa->iocfc; - - bfa->bfad = bfad; - iocfc->bfa = bfa; - iocfc->action = BFA_IOCFC_ACT_NONE; - - bfa_os_assign(iocfc->cfg, *cfg); - - /** - * Initialize chip specific handlers. - */ - if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) { - iocfc->hwif.hw_reginit = bfa_hwct_reginit; - iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack; - iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack; - iocfc->hwif.hw_msix_init = bfa_hwct_msix_init; - iocfc->hwif.hw_msix_install = bfa_hwct_msix_install; - iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall; - iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set; - iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs; - iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range; - } else { - iocfc->hwif.hw_reginit = bfa_hwcb_reginit; - iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack; - iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack; - iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init; - iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install; - iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall; - iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set; - iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs; - iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range; - } - - iocfc->hwif.hw_reginit(bfa); - bfa->msix.nvecs = 0; -} - -static void -bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *meminfo) -{ - u8 *dm_kva; - u64 dm_pa; - int i, per_reqq_sz, per_rspq_sz; - struct bfa_iocfc_s *iocfc = &bfa->iocfc; - int dbgsz; - - dm_kva = bfa_meminfo_dma_virt(meminfo); - dm_pa = bfa_meminfo_dma_phys(meminfo); - - /* - * First allocate dma memory for IOC. - */ - bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa); - dm_kva += bfa_ioc_meminfo(); - dm_pa += bfa_ioc_meminfo(); - - /* - * Claim DMA-able memory for the request/response queues and for shadow - * ci/pi registers - */ - per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ), - BFA_DMA_ALIGN_SZ); - per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ), - BFA_DMA_ALIGN_SZ); - - for (i = 0; i < cfg->fwcfg.num_cqs; i++) { - iocfc->req_cq_ba[i].kva = dm_kva; - iocfc->req_cq_ba[i].pa = dm_pa; - bfa_os_memset(dm_kva, 0, per_reqq_sz); - dm_kva += per_reqq_sz; - dm_pa += per_reqq_sz; - - iocfc->rsp_cq_ba[i].kva = dm_kva; - iocfc->rsp_cq_ba[i].pa = dm_pa; - bfa_os_memset(dm_kva, 0, per_rspq_sz); - dm_kva += per_rspq_sz; - dm_pa += per_rspq_sz; - } - - for (i = 0; i < cfg->fwcfg.num_cqs; i++) { - iocfc->req_cq_shadow_ci[i].kva = dm_kva; - iocfc->req_cq_shadow_ci[i].pa = dm_pa; - dm_kva += BFA_CACHELINE_SZ; - dm_pa += BFA_CACHELINE_SZ; - - iocfc->rsp_cq_shadow_pi[i].kva = dm_kva; - iocfc->rsp_cq_shadow_pi[i].pa = dm_pa; - dm_kva += BFA_CACHELINE_SZ; - dm_pa += BFA_CACHELINE_SZ; - } - - /* - * Claim DMA-able memory for the config info page - */ - bfa->iocfc.cfg_info.kva = dm_kva; - bfa->iocfc.cfg_info.pa = dm_pa; - bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva; - dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); - dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ); - - /* - * Claim DMA-able memory for the config response - */ - bfa->iocfc.cfgrsp_dma.kva = dm_kva; - bfa->iocfc.cfgrsp_dma.pa = dm_pa; - bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva; - - dm_kva += - BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), - BFA_CACHELINE_SZ); - dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s), - BFA_CACHELINE_SZ); - - /* - * Claim DMA-able memory for iocfc stats - */ - bfa->iocfc.stats_kva = dm_kva; - bfa->iocfc.stats_pa = dm_pa; - bfa->iocfc.fw_stats = (struct bfa_fw_stats_s *) dm_kva; - dm_kva += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ); - dm_pa += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ); - - bfa_meminfo_dma_virt(meminfo) = dm_kva; - bfa_meminfo_dma_phys(meminfo) = dm_pa; - - dbgsz = bfa_ioc_debug_trcsz(bfa_auto_recover); - if (dbgsz > 0) { - bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo)); - bfa_meminfo_kva(meminfo) += dbgsz; - } -} - -/** - * Start BFA submodules. - */ -static void -bfa_iocfc_start_submod(struct bfa_s *bfa) -{ - int i; - - bfa->rme_process = BFA_TRUE; - - for (i = 0; hal_mods[i]; i++) - hal_mods[i]->start(bfa); -} - -/** - * Disable BFA submodules. - */ -static void -bfa_iocfc_disable_submod(struct bfa_s *bfa) -{ - int i; - - for (i = 0; hal_mods[i]; i++) - hal_mods[i]->iocdisable(bfa); -} - -static void -bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete) -{ - struct bfa_s *bfa = bfa_arg; - - if (complete) { - if (bfa->iocfc.cfgdone) - bfa_cb_init(bfa->bfad, BFA_STATUS_OK); - else - bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED); - } else { - if (bfa->iocfc.cfgdone) - bfa->iocfc.action = BFA_IOCFC_ACT_NONE; - } -} - -static void -bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl) -{ - struct bfa_s *bfa = bfa_arg; - struct bfad_s *bfad = bfa->bfad; - - if (compl) - complete(&bfad->comp); - - else - bfa->iocfc.action = BFA_IOCFC_ACT_NONE; -} - -static void -bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl) -{ - struct bfa_s *bfa = bfa_arg; - struct bfad_s *bfad = bfa->bfad; - - if (compl) - complete(&bfad->disable_comp); -} - -/** - * Update BFA configuration from firmware configuration. - */ -static void -bfa_iocfc_cfgrsp(struct bfa_s *bfa) -{ - struct bfa_iocfc_s *iocfc = &bfa->iocfc; - struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; - struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg; - - fwcfg->num_cqs = fwcfg->num_cqs; - fwcfg->num_ioim_reqs = bfa_os_ntohs(fwcfg->num_ioim_reqs); - fwcfg->num_tskim_reqs = bfa_os_ntohs(fwcfg->num_tskim_reqs); - fwcfg->num_fcxp_reqs = bfa_os_ntohs(fwcfg->num_fcxp_reqs); - fwcfg->num_uf_bufs = bfa_os_ntohs(fwcfg->num_uf_bufs); - fwcfg->num_rports = bfa_os_ntohs(fwcfg->num_rports); - - iocfc->cfgdone = BFA_TRUE; - - /** - * Configuration is complete - initialize/start submodules - */ - bfa_fcport_init(bfa); - - if (iocfc->action == BFA_IOCFC_ACT_INIT) - bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa); - else - bfa_iocfc_start_submod(bfa); -} - -static void -bfa_iocfc_stats_clear(void *bfa_arg) -{ - struct bfa_s *bfa = bfa_arg; - struct bfa_iocfc_s *iocfc = &bfa->iocfc; - struct bfi_iocfc_stats_req_s stats_req; - - bfa_timer_start(bfa, &iocfc->stats_timer, - bfa_iocfc_stats_clr_timeout, bfa, - BFA_IOCFC_TOV); - - bfi_h2i_set(stats_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CLEAR_STATS_REQ, - bfa_lpuid(bfa)); - bfa_ioc_mbox_send(&bfa->ioc, &stats_req, - sizeof(struct bfi_iocfc_stats_req_s)); -} - -static void -bfa_iocfc_stats_swap(struct bfa_fw_stats_s *d, struct bfa_fw_stats_s *s) -{ - u32 *dip = (u32 *) d; - u32 *sip = (u32 *) s; - int i; - - for (i = 0; i < (sizeof(struct bfa_fw_stats_s) / sizeof(u32)); i++) - dip[i] = bfa_os_ntohl(sip[i]); -} - -static void -bfa_iocfc_stats_clr_cb(void *bfa_arg, bfa_boolean_t complete) -{ - struct bfa_s *bfa = bfa_arg; - struct bfa_iocfc_s *iocfc = &bfa->iocfc; - - if (complete) { - bfa_ioc_clr_stats(&bfa->ioc); - iocfc->stats_cbfn(iocfc->stats_cbarg, iocfc->stats_status); - } else { - iocfc->stats_busy = BFA_FALSE; - iocfc->stats_status = BFA_STATUS_OK; - } -} - -static void -bfa_iocfc_stats_clr_timeout(void *bfa_arg) -{ - struct bfa_s *bfa = bfa_arg; - struct bfa_iocfc_s *iocfc = &bfa->iocfc; - - bfa_trc(bfa, 0); - - iocfc->stats_status = BFA_STATUS_ETIMER; - bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_clr_cb, bfa); -} - -static void -bfa_iocfc_stats_cb(void *bfa_arg, bfa_boolean_t complete) -{ - struct bfa_s *bfa = bfa_arg; - struct bfa_iocfc_s *iocfc = &bfa->iocfc; - - if (complete) { - if (iocfc->stats_status == BFA_STATUS_OK) { - bfa_os_memset(iocfc->stats_ret, 0, - sizeof(*iocfc->stats_ret)); - bfa_iocfc_stats_swap(&iocfc->stats_ret->fw_stats, - iocfc->fw_stats); - } - iocfc->stats_cbfn(iocfc->stats_cbarg, iocfc->stats_status); - } else { - iocfc->stats_busy = BFA_FALSE; - iocfc->stats_status = BFA_STATUS_OK; - } -} - -static void -bfa_iocfc_stats_timeout(void *bfa_arg) -{ - struct bfa_s *bfa = bfa_arg; - struct bfa_iocfc_s *iocfc = &bfa->iocfc; - - bfa_trc(bfa, 0); - - iocfc->stats_status = BFA_STATUS_ETIMER; - bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_cb, bfa); -} - -static void -bfa_iocfc_stats_query(struct bfa_s *bfa) -{ - struct bfa_iocfc_s *iocfc = &bfa->iocfc; - struct bfi_iocfc_stats_req_s stats_req; - - bfa_timer_start(bfa, &iocfc->stats_timer, - bfa_iocfc_stats_timeout, bfa, BFA_IOCFC_TOV); - - bfi_h2i_set(stats_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_GET_STATS_REQ, - bfa_lpuid(bfa)); - bfa_ioc_mbox_send(&bfa->ioc, &stats_req, - sizeof(struct bfi_iocfc_stats_req_s)); -} - -void -bfa_iocfc_reset_queues(struct bfa_s *bfa) -{ - int q; - - for (q = 0; q < BFI_IOC_MAX_CQS; q++) { - bfa_reqq_ci(bfa, q) = 0; - bfa_reqq_pi(bfa, q) = 0; - bfa_rspq_ci(bfa, q) = 0; - bfa_rspq_pi(bfa, q) = 0; - } -} - -/** - * IOC enable request is complete - */ -static void -bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status) -{ - struct bfa_s *bfa = bfa_arg; - - if (status != BFA_STATUS_OK) { - bfa_isr_disable(bfa); - if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) - bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, - bfa_iocfc_init_cb, bfa); - return; - } - - bfa_iocfc_send_cfg(bfa); -} - -/** - * IOC disable request is complete - */ -static void -bfa_iocfc_disable_cbfn(void *bfa_arg) -{ - struct bfa_s *bfa = bfa_arg; - - bfa_isr_disable(bfa); - bfa_iocfc_disable_submod(bfa); - - if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP) - bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb, - bfa); - else { - bfa_assert(bfa->iocfc.action == BFA_IOCFC_ACT_DISABLE); - bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb, - bfa); - } -} - -/** - * Notify sub-modules of hardware failure. - */ -static void -bfa_iocfc_hbfail_cbfn(void *bfa_arg) -{ - struct bfa_s *bfa = bfa_arg; - - bfa->rme_process = BFA_FALSE; - - bfa_isr_disable(bfa); - bfa_iocfc_disable_submod(bfa); - - if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) - bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb, - bfa); -} - -/** - * Actions on chip-reset completion. - */ -static void -bfa_iocfc_reset_cbfn(void *bfa_arg) -{ - struct bfa_s *bfa = bfa_arg; - - bfa_iocfc_reset_queues(bfa); - bfa_isr_enable(bfa); -} - - - -/** - * bfa_ioc_public - */ - -/** - * Query IOC memory requirement information. - */ -void -bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, - u32 *dm_len) -{ - /* dma memory for IOC */ - *dm_len += bfa_ioc_meminfo(); - - bfa_iocfc_fw_cfg_sz(cfg, dm_len); - bfa_iocfc_cqs_sz(cfg, dm_len); - *km_len += bfa_ioc_debug_trcsz(bfa_auto_recover); -} - -/** - * Query IOC memory requirement information. - */ -void -bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) -{ - int i; - - bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn; - bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn; - bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn; - bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn; - - bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod, - bfa->trcmod, bfa->aen, bfa->logm); - - /** - * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC. - */ - if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC) - bfa_ioc_set_fcmode(&bfa->ioc); - - bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC); - bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs); - - bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev); - bfa_iocfc_mem_claim(bfa, cfg, meminfo); - bfa_timer_init(&bfa->timer_mod); - - INIT_LIST_HEAD(&bfa->comp_q); - for (i = 0; i < BFI_IOC_MAX_CQS; i++) - INIT_LIST_HEAD(&bfa->reqq_waitq[i]); -} - -/** - * Query IOC memory requirement information. - */ -void -bfa_iocfc_detach(struct bfa_s *bfa) -{ - bfa_ioc_detach(&bfa->ioc); -} - -/** - * Query IOC memory requirement information. - */ -void -bfa_iocfc_init(struct bfa_s *bfa) -{ - bfa->iocfc.action = BFA_IOCFC_ACT_INIT; - bfa_ioc_enable(&bfa->ioc); -} - -/** - * IOC start called from bfa_start(). Called to start IOC operations - * at driver instantiation for this instance. - */ -void -bfa_iocfc_start(struct bfa_s *bfa) -{ - if (bfa->iocfc.cfgdone) - bfa_iocfc_start_submod(bfa); -} - -/** - * IOC stop called from bfa_stop(). Called only when driver is unloaded - * for this instance. - */ -void -bfa_iocfc_stop(struct bfa_s *bfa) -{ - bfa->iocfc.action = BFA_IOCFC_ACT_STOP; - - bfa->rme_process = BFA_FALSE; - bfa_ioc_disable(&bfa->ioc); -} - -void -bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m) -{ - struct bfa_s *bfa = bfaarg; - struct bfa_iocfc_s *iocfc = &bfa->iocfc; - union bfi_iocfc_i2h_msg_u *msg; - - msg = (union bfi_iocfc_i2h_msg_u *) m; - bfa_trc(bfa, msg->mh.msg_id); - - switch (msg->mh.msg_id) { - case BFI_IOCFC_I2H_CFG_REPLY: - iocfc->cfg_reply = &msg->cfg_reply; - bfa_iocfc_cfgrsp(bfa); - break; - - case BFI_IOCFC_I2H_GET_STATS_RSP: - if (iocfc->stats_busy == BFA_FALSE - || iocfc->stats_status == BFA_STATUS_ETIMER) - break; - - bfa_timer_stop(&iocfc->stats_timer); - iocfc->stats_status = BFA_STATUS_OK; - bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_cb, - bfa); - break; - case BFI_IOCFC_I2H_CLEAR_STATS_RSP: - /* - * check for timer pop before processing the rsp - */ - if (iocfc->stats_busy == BFA_FALSE - || iocfc->stats_status == BFA_STATUS_ETIMER) - break; - - bfa_timer_stop(&iocfc->stats_timer); - iocfc->stats_status = BFA_STATUS_OK; - bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, - bfa_iocfc_stats_clr_cb, bfa); - break; - case BFI_IOCFC_I2H_UPDATEQ_RSP: - iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK); - break; - default: - bfa_assert(0); - } -} - -#ifndef BFA_BIOS_BUILD -void -bfa_adapter_get_attr(struct bfa_s *bfa, struct bfa_adapter_attr_s *ad_attr) -{ - bfa_ioc_get_adapter_attr(&bfa->ioc, ad_attr); -} - -u64 -bfa_adapter_get_id(struct bfa_s *bfa) -{ - return bfa_ioc_get_adid(&bfa->ioc); -} - -void -bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr) -{ - struct bfa_iocfc_s *iocfc = &bfa->iocfc; - - attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce; - - attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ? - bfa_os_ntohs(iocfc->cfginfo->intr_attr.delay) : - bfa_os_ntohs(iocfc->cfgrsp->intr_attr.delay); - - attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ? - bfa_os_ntohs(iocfc->cfginfo->intr_attr.latency) : - bfa_os_ntohs(iocfc->cfgrsp->intr_attr.latency); - - attr->config = iocfc->cfg; - -} - -bfa_status_t -bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr) -{ - struct bfa_iocfc_s *iocfc = &bfa->iocfc; - struct bfi_iocfc_set_intr_req_s *m; - - iocfc->cfginfo->intr_attr.coalesce = attr->coalesce; - iocfc->cfginfo->intr_attr.delay = bfa_os_htons(attr->delay); - iocfc->cfginfo->intr_attr.latency = bfa_os_htons(attr->latency); - - if (!bfa_iocfc_is_operational(bfa)) - return BFA_STATUS_OK; - - m = bfa_reqq_next(bfa, BFA_REQQ_IOC); - if (!m) - return BFA_STATUS_DEVBUSY; - - bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ, - bfa_lpuid(bfa)); - m->coalesce = iocfc->cfginfo->intr_attr.coalesce; - m->delay = iocfc->cfginfo->intr_attr.delay; - m->latency = iocfc->cfginfo->intr_attr.latency; - - - bfa_trc(bfa, attr->delay); - bfa_trc(bfa, attr->latency); - - bfa_reqq_produce(bfa, BFA_REQQ_IOC); - return BFA_STATUS_OK; -} - -void -bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa) -{ - struct bfa_iocfc_s *iocfc = &bfa->iocfc; - - iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1); - bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa); -} - -bfa_status_t -bfa_iocfc_get_stats(struct bfa_s *bfa, struct bfa_iocfc_stats_s *stats, - bfa_cb_ioc_t cbfn, void *cbarg) -{ - struct bfa_iocfc_s *iocfc = &bfa->iocfc; - - if (iocfc->stats_busy) { - bfa_trc(bfa, iocfc->stats_busy); - return BFA_STATUS_DEVBUSY; - } - - if (!bfa_iocfc_is_operational(bfa)) { - bfa_trc(bfa, 0); - return BFA_STATUS_IOC_NON_OP; - } - - iocfc->stats_busy = BFA_TRUE; - iocfc->stats_ret = stats; - iocfc->stats_cbfn = cbfn; - iocfc->stats_cbarg = cbarg; - - bfa_iocfc_stats_query(bfa); - - return BFA_STATUS_OK; -} - -bfa_status_t -bfa_iocfc_clear_stats(struct bfa_s *bfa, bfa_cb_ioc_t cbfn, void *cbarg) -{ - struct bfa_iocfc_s *iocfc = &bfa->iocfc; - - if (iocfc->stats_busy) { - bfa_trc(bfa, iocfc->stats_busy); - return BFA_STATUS_DEVBUSY; - } - - if (!bfa_iocfc_is_operational(bfa)) { - bfa_trc(bfa, 0); - return BFA_STATUS_IOC_NON_OP; - } - - iocfc->stats_busy = BFA_TRUE; - iocfc->stats_cbfn = cbfn; - iocfc->stats_cbarg = cbarg; - - bfa_iocfc_stats_clear(bfa); - return BFA_STATUS_OK; -} - -/** - * Enable IOC after it is disabled. - */ -void -bfa_iocfc_enable(struct bfa_s *bfa) -{ - bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, - "IOC Enable"); - bfa_ioc_enable(&bfa->ioc); -} - -void -bfa_iocfc_disable(struct bfa_s *bfa) -{ - bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, - "IOC Disable"); - bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE; - - bfa->rme_process = BFA_FALSE; - bfa_ioc_disable(&bfa->ioc); -} - - -bfa_boolean_t -bfa_iocfc_is_operational(struct bfa_s *bfa) -{ - return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone; -} - -/** - * Return boot target port wwns -- read from boot information in flash. - */ -void -bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns) -{ - struct bfa_iocfc_s *iocfc = &bfa->iocfc; - struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; - int i; - - if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) { - bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns); - *nwwns = cfgrsp->pbc_cfg.nbluns; - for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++) - wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn; - - return; - } - - *nwwns = cfgrsp->bootwwns.nwwns; - memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn)); -} - -void -bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa, struct bfa_boot_pbc_s *pbcfg) -{ - struct bfa_iocfc_s *iocfc = &bfa->iocfc; - struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; - - pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled; - pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns; - pbcfg->speed = cfgrsp->pbc_cfg.port_speed; - memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun)); -} - -int -bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport) -{ - struct bfa_iocfc_s *iocfc = &bfa->iocfc; - struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; - - memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport)); - return cfgrsp->pbc_cfg.nvports; -} - - -#endif - - diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h deleted file mode 100644 index 74a6a048d1fd..000000000000 --- a/drivers/scsi/bfa/bfa_iocfc.h +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_IOCFC_H__ -#define __BFA_IOCFC_H__ - -#include -#include -#include -#include -#include - -#define BFA_REQQ_NELEMS_MIN (4) -#define BFA_RSPQ_NELEMS_MIN (4) - -struct bfa_iocfc_regs_s { - bfa_os_addr_t intr_status; - bfa_os_addr_t intr_mask; - bfa_os_addr_t cpe_q_pi[BFI_IOC_MAX_CQS]; - bfa_os_addr_t cpe_q_ci[BFI_IOC_MAX_CQS]; - bfa_os_addr_t cpe_q_depth[BFI_IOC_MAX_CQS]; - bfa_os_addr_t cpe_q_ctrl[BFI_IOC_MAX_CQS]; - bfa_os_addr_t rme_q_ci[BFI_IOC_MAX_CQS]; - bfa_os_addr_t rme_q_pi[BFI_IOC_MAX_CQS]; - bfa_os_addr_t rme_q_depth[BFI_IOC_MAX_CQS]; - bfa_os_addr_t rme_q_ctrl[BFI_IOC_MAX_CQS]; -}; - -/** - * MSIX vector handlers - */ -#define BFA_MSIX_MAX_VECTORS 22 -typedef void (*bfa_msix_handler_t)(struct bfa_s *bfa, int vec); -struct bfa_msix_s { - int nvecs; - bfa_msix_handler_t handler[BFA_MSIX_MAX_VECTORS]; -}; - -/** - * Chip specific interfaces - */ -struct bfa_hwif_s { - void (*hw_reginit)(struct bfa_s *bfa); - void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq); - void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq); - void (*hw_msix_init)(struct bfa_s *bfa, int nvecs); - void (*hw_msix_install)(struct bfa_s *bfa); - void (*hw_msix_uninstall)(struct bfa_s *bfa); - void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix); - void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap, - u32 *nvecs, u32 *maxvec); - void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start, - u32 *end); -}; -typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status); - -struct bfa_iocfc_s { - struct bfa_s *bfa; - struct bfa_iocfc_cfg_s cfg; - int action; - - u32 req_cq_pi[BFI_IOC_MAX_CQS]; - u32 rsp_cq_ci[BFI_IOC_MAX_CQS]; - - struct bfa_cb_qe_s init_hcb_qe; - struct bfa_cb_qe_s stop_hcb_qe; - struct bfa_cb_qe_s dis_hcb_qe; - struct bfa_cb_qe_s stats_hcb_qe; - bfa_boolean_t cfgdone; - - struct bfa_dma_s cfg_info; - struct bfi_iocfc_cfg_s *cfginfo; - struct bfa_dma_s cfgrsp_dma; - struct bfi_iocfc_cfgrsp_s *cfgrsp; - struct bfi_iocfc_cfg_reply_s *cfg_reply; - - u8 *stats_kva; - u64 stats_pa; - struct bfa_fw_stats_s *fw_stats; - struct bfa_timer_s stats_timer; /* timer */ - struct bfa_iocfc_stats_s *stats_ret; /* driver stats location */ - bfa_status_t stats_status; /* stats/statsclr status */ - bfa_boolean_t stats_busy; /* outstanding stats */ - bfa_cb_ioc_t stats_cbfn; /* driver callback function */ - void *stats_cbarg; /* user callback arg */ - - struct bfa_dma_s req_cq_ba[BFI_IOC_MAX_CQS]; - struct bfa_dma_s req_cq_shadow_ci[BFI_IOC_MAX_CQS]; - struct bfa_dma_s rsp_cq_ba[BFI_IOC_MAX_CQS]; - struct bfa_dma_s rsp_cq_shadow_pi[BFI_IOC_MAX_CQS]; - struct bfa_iocfc_regs_s bfa_regs; /* BFA device registers */ - struct bfa_hwif_s hwif; - - bfa_cb_iocfc_t updateq_cbfn; /* bios callback function */ - void *updateq_cbarg; /* bios callback arg */ - u32 intr_mask; -}; - -#define bfa_lpuid(__bfa) bfa_ioc_portid(&(__bfa)->ioc) -#define bfa_msix_init(__bfa, __nvecs) \ - ((__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs)) -#define bfa_msix_install(__bfa) \ - ((__bfa)->iocfc.hwif.hw_msix_install(__bfa)) -#define bfa_msix_uninstall(__bfa) \ - ((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa)) -#define bfa_isr_mode_set(__bfa, __msix) \ - ((__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix)) -#define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) \ - ((__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap, \ - __nvecs, __maxvec)) -#define bfa_msix_get_rme_range(__bfa, __start, __end) \ - ((__bfa)->iocfc.hwif.hw_msix_get_rme_range(__bfa, __start, __end)) - -/* - * FC specific IOC functions. - */ -void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, - u32 *dm_len); -void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, - struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, - struct bfa_pcidev_s *pcidev); -void bfa_iocfc_detach(struct bfa_s *bfa); -void bfa_iocfc_init(struct bfa_s *bfa); -void bfa_iocfc_start(struct bfa_s *bfa); -void bfa_iocfc_stop(struct bfa_s *bfa); -void bfa_iocfc_isr(void *bfa, struct bfi_mbmsg_s *msg); -void bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa); -bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa); -void bfa_iocfc_reset_queues(struct bfa_s *bfa); -void bfa_iocfc_updateq(struct bfa_s *bfa, u32 reqq_ba, u32 rspq_ba, - u32 reqq_sci, u32 rspq_spi, - bfa_cb_iocfc_t cbfn, void *cbarg); - -void bfa_msix_all(struct bfa_s *bfa, int vec); -void bfa_msix_reqq(struct bfa_s *bfa, int vec); -void bfa_msix_rspq(struct bfa_s *bfa, int vec); -void bfa_msix_lpu_err(struct bfa_s *bfa, int vec); - -void bfa_hwcb_reginit(struct bfa_s *bfa); -void bfa_hwcb_reqq_ack(struct bfa_s *bfa, int rspq); -void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq); -void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs); -void bfa_hwcb_msix_install(struct bfa_s *bfa); -void bfa_hwcb_msix_uninstall(struct bfa_s *bfa); -void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix); -void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, - u32 *nvecs, u32 *maxvec); -void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end); -void bfa_hwct_reginit(struct bfa_s *bfa); -void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq); -void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq); -void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs); -void bfa_hwct_msix_install(struct bfa_s *bfa); -void bfa_hwct_msix_uninstall(struct bfa_s *bfa); -void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix); -void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, - u32 *nvecs, u32 *maxvec); -void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end); - -void bfa_com_meminfo(bfa_boolean_t mincfg, u32 *dm_len); -void bfa_com_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi, - bfa_boolean_t mincfg); -void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns); -void bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa, - struct bfa_boot_pbc_s *pbcfg); -int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, - struct bfi_pbc_vport_s *pbc_vport); - -#endif /* __BFA_IOCFC_H__ */ - diff --git a/drivers/scsi/bfa/bfa_iocfc_q.c b/drivers/scsi/bfa/bfa_iocfc_q.c deleted file mode 100644 index 500a17df40b2..000000000000 --- a/drivers/scsi/bfa/bfa_iocfc_q.c +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#include -#include "bfa_intr_priv.h" - -BFA_TRC_FILE(HAL, IOCFC_Q); - -void -bfa_iocfc_updateq(struct bfa_s *bfa, u32 reqq_ba, u32 rspq_ba, - u32 reqq_sci, u32 rspq_spi, bfa_cb_iocfc_t cbfn, - void *cbarg) -{ - struct bfa_iocfc_s *iocfc = &bfa->iocfc; - struct bfi_iocfc_updateq_req_s updateq_req; - - iocfc->updateq_cbfn = cbfn; - iocfc->updateq_cbarg = cbarg; - - bfi_h2i_set(updateq_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_UPDATEQ_REQ, - bfa_lpuid(bfa)); - - updateq_req.reqq_ba = bfa_os_htonl(reqq_ba); - updateq_req.rspq_ba = bfa_os_htonl(rspq_ba); - updateq_req.reqq_sci = bfa_os_htonl(reqq_sci); - updateq_req.rspq_spi = bfa_os_htonl(rspq_spi); - - bfa_ioc_mbox_send(&bfa->ioc, &updateq_req, - sizeof(struct bfi_iocfc_updateq_req_s)); -} diff --git a/drivers/scsi/bfa/bfa_ioim.c b/drivers/scsi/bfa/bfa_ioim.c deleted file mode 100644 index bdfdc19915f8..000000000000 --- a/drivers/scsi/bfa/bfa_ioim.c +++ /dev/null @@ -1,1364 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#include -#include -#include - -BFA_TRC_FILE(HAL, IOIM); - -/* - * forward declarations. - */ -static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim); -static bfa_boolean_t bfa_ioim_sge_setup(struct bfa_ioim_s *ioim); -static void bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim); -static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim); -static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim); -static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete); -static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete); -static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete); -static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete); -static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete); - -/** - * bfa_ioim_sm - */ - -/** - * IO state machine events - */ -enum bfa_ioim_event { - BFA_IOIM_SM_START = 1, /* io start request from host */ - BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */ - BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */ - BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */ - BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */ - BFA_IOIM_SM_FREE = 6, /* io resource is freed */ - BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */ - BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */ - BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */ - BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */ - BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */ - BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */ - BFA_IOIM_SM_HCB = 13, /* bfa callback complete */ - BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */ - BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */ - BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */ - BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */ - BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */ -}; - -/* - * forward declaration of IO state machine - */ -static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, - enum bfa_ioim_event event); -static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, - enum bfa_ioim_event event); -static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim, - enum bfa_ioim_event event); -static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, - enum bfa_ioim_event event); -static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, - enum bfa_ioim_event event); -static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, - enum bfa_ioim_event event); -static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, - enum bfa_ioim_event event); -static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, - enum bfa_ioim_event event); -static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, - enum bfa_ioim_event event); -static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, - enum bfa_ioim_event event); -static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, - enum bfa_ioim_event event); - -/** - * IO is not started (unallocated). - */ -static void -bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) -{ - bfa_trc_fp(ioim->bfa, ioim->iotag); - bfa_trc_fp(ioim->bfa, event); - - switch (event) { - case BFA_IOIM_SM_START: - if (!bfa_itnim_is_online(ioim->itnim)) { - if (!bfa_itnim_hold_io(ioim->itnim)) { - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); - list_del(&ioim->qe); - list_add_tail(&ioim->qe, - &ioim->fcpim->ioim_comp_q); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, - __bfa_cb_ioim_pathtov, ioim); - } else { - list_del(&ioim->qe); - list_add_tail(&ioim->qe, - &ioim->itnim->pending_q); - } - break; - } - - if (ioim->nsges > BFI_SGE_INLINE) { - if (!bfa_ioim_sge_setup(ioim)) { - bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc); - return; - } - } - - if (!bfa_ioim_send_ioreq(ioim)) { - bfa_sm_set_state(ioim, bfa_ioim_sm_qfull); - break; - } - - bfa_sm_set_state(ioim, bfa_ioim_sm_active); - break; - - case BFA_IOIM_SM_IOTOV: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); - list_del(&ioim->qe); - list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, - __bfa_cb_ioim_pathtov, ioim); - break; - - case BFA_IOIM_SM_ABORT: - /** - * IO in pending queue can get abort requests. Complete abort - * requests immediately. - */ - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); - bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim)); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, - ioim); - break; - - default: - bfa_sm_fault(ioim->bfa, event); - } -} - -/** - * IO is waiting for SG pages. - */ -static void -bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) -{ - bfa_trc(ioim->bfa, ioim->iotag); - bfa_trc(ioim->bfa, event); - - switch (event) { - case BFA_IOIM_SM_SGALLOCED: - if (!bfa_ioim_send_ioreq(ioim)) { - bfa_sm_set_state(ioim, bfa_ioim_sm_qfull); - break; - } - bfa_sm_set_state(ioim, bfa_ioim_sm_active); - break; - - case BFA_IOIM_SM_CLEANUP: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); - bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, - ioim); - bfa_ioim_notify_cleanup(ioim); - break; - - case BFA_IOIM_SM_ABORT: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); - bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe); - list_del(&ioim->qe); - list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, - ioim); - break; - - case BFA_IOIM_SM_HWFAIL: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); - bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe); - list_del(&ioim->qe); - list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, - ioim); - break; - - default: - bfa_sm_fault(ioim->bfa, event); - } -} - -/** - * IO is active. - */ -static void -bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) -{ - bfa_trc_fp(ioim->bfa, ioim->iotag); - bfa_trc_fp(ioim->bfa, event); - - switch (event) { - case BFA_IOIM_SM_COMP_GOOD: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); - list_del(&ioim->qe); - list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, - __bfa_cb_ioim_good_comp, ioim); - break; - - case BFA_IOIM_SM_COMP: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); - list_del(&ioim->qe); - list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp, - ioim); - break; - - case BFA_IOIM_SM_DONE: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); - list_del(&ioim->qe); - list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp, - ioim); - break; - - case BFA_IOIM_SM_ABORT: - ioim->iosp->abort_explicit = BFA_TRUE; - ioim->io_cbfn = __bfa_cb_ioim_abort; - - if (bfa_ioim_send_abort(ioim)) - bfa_sm_set_state(ioim, bfa_ioim_sm_abort); - else { - bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull); - bfa_reqq_wait(ioim->bfa, ioim->reqq, - &ioim->iosp->reqq_wait); - } - break; - - case BFA_IOIM_SM_CLEANUP: - ioim->iosp->abort_explicit = BFA_FALSE; - ioim->io_cbfn = __bfa_cb_ioim_failed; - - if (bfa_ioim_send_abort(ioim)) - bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); - else { - bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); - bfa_reqq_wait(ioim->bfa, ioim->reqq, - &ioim->iosp->reqq_wait); - } - break; - - case BFA_IOIM_SM_HWFAIL: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); - list_del(&ioim->qe); - list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, - ioim); - break; - - default: - bfa_sm_fault(ioim->bfa, event); - } -} - -/** - * IO is being aborted, waiting for completion from firmware. - */ -static void -bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) -{ - bfa_trc(ioim->bfa, ioim->iotag); - bfa_trc(ioim->bfa, event); - - switch (event) { - case BFA_IOIM_SM_COMP_GOOD: - case BFA_IOIM_SM_COMP: - case BFA_IOIM_SM_DONE: - case BFA_IOIM_SM_FREE: - break; - - case BFA_IOIM_SM_ABORT_DONE: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, - ioim); - break; - - case BFA_IOIM_SM_ABORT_COMP: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); - list_del(&ioim->qe); - list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, - ioim); - break; - - case BFA_IOIM_SM_COMP_UTAG: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); - list_del(&ioim->qe); - list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, - ioim); - break; - - case BFA_IOIM_SM_CLEANUP: - bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE); - ioim->iosp->abort_explicit = BFA_FALSE; - - if (bfa_ioim_send_abort(ioim)) - bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); - else { - bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); - bfa_reqq_wait(ioim->bfa, ioim->reqq, - &ioim->iosp->reqq_wait); - } - break; - - case BFA_IOIM_SM_HWFAIL: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); - list_del(&ioim->qe); - list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, - ioim); - break; - - default: - bfa_sm_fault(ioim->bfa, event); - } -} - -/** - * IO is being cleaned up (implicit abort), waiting for completion from - * firmware. - */ -static void -bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) -{ - bfa_trc(ioim->bfa, ioim->iotag); - bfa_trc(ioim->bfa, event); - - switch (event) { - case BFA_IOIM_SM_COMP_GOOD: - case BFA_IOIM_SM_COMP: - case BFA_IOIM_SM_DONE: - case BFA_IOIM_SM_FREE: - break; - - case BFA_IOIM_SM_ABORT: - /** - * IO is already being aborted implicitly - */ - ioim->io_cbfn = __bfa_cb_ioim_abort; - break; - - case BFA_IOIM_SM_ABORT_DONE: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); - bfa_ioim_notify_cleanup(ioim); - break; - - case BFA_IOIM_SM_ABORT_COMP: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); - bfa_ioim_notify_cleanup(ioim); - break; - - case BFA_IOIM_SM_COMP_UTAG: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); - bfa_ioim_notify_cleanup(ioim); - break; - - case BFA_IOIM_SM_HWFAIL: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); - list_del(&ioim->qe); - list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, - ioim); - break; - - case BFA_IOIM_SM_CLEANUP: - /** - * IO can be in cleanup state already due to TM command. 2nd cleanup - * request comes from ITN offline event. - */ - break; - - default: - bfa_sm_fault(ioim->bfa, event); - } -} - -/** - * IO is waiting for room in request CQ - */ -static void -bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) -{ - bfa_trc(ioim->bfa, ioim->iotag); - bfa_trc(ioim->bfa, event); - - switch (event) { - case BFA_IOIM_SM_QRESUME: - bfa_sm_set_state(ioim, bfa_ioim_sm_active); - bfa_ioim_send_ioreq(ioim); - break; - - case BFA_IOIM_SM_ABORT: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); - bfa_reqq_wcancel(&ioim->iosp->reqq_wait); - list_del(&ioim->qe); - list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, - ioim); - break; - - case BFA_IOIM_SM_CLEANUP: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); - bfa_reqq_wcancel(&ioim->iosp->reqq_wait); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, - ioim); - bfa_ioim_notify_cleanup(ioim); - break; - - case BFA_IOIM_SM_HWFAIL: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); - bfa_reqq_wcancel(&ioim->iosp->reqq_wait); - list_del(&ioim->qe); - list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, - ioim); - break; - - default: - bfa_sm_fault(ioim->bfa, event); - } -} - -/** - * Active IO is being aborted, waiting for room in request CQ. - */ -static void -bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) -{ - bfa_trc(ioim->bfa, ioim->iotag); - bfa_trc(ioim->bfa, event); - - switch (event) { - case BFA_IOIM_SM_QRESUME: - bfa_sm_set_state(ioim, bfa_ioim_sm_abort); - bfa_ioim_send_abort(ioim); - break; - - case BFA_IOIM_SM_CLEANUP: - bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE); - ioim->iosp->abort_explicit = BFA_FALSE; - bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); - break; - - case BFA_IOIM_SM_COMP_GOOD: - case BFA_IOIM_SM_COMP: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); - bfa_reqq_wcancel(&ioim->iosp->reqq_wait); - list_del(&ioim->qe); - list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, - ioim); - break; - - case BFA_IOIM_SM_DONE: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); - bfa_reqq_wcancel(&ioim->iosp->reqq_wait); - list_del(&ioim->qe); - list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, - ioim); - break; - - case BFA_IOIM_SM_HWFAIL: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); - bfa_reqq_wcancel(&ioim->iosp->reqq_wait); - list_del(&ioim->qe); - list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, - ioim); - break; - - default: - bfa_sm_fault(ioim->bfa, event); - } -} - -/** - * Active IO is being cleaned up, waiting for room in request CQ. - */ -static void -bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) -{ - bfa_trc(ioim->bfa, ioim->iotag); - bfa_trc(ioim->bfa, event); - - switch (event) { - case BFA_IOIM_SM_QRESUME: - bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); - bfa_ioim_send_abort(ioim); - break; - - case BFA_IOIM_SM_ABORT: - /** - * IO is already being cleaned up implicitly - */ - ioim->io_cbfn = __bfa_cb_ioim_abort; - break; - - case BFA_IOIM_SM_COMP_GOOD: - case BFA_IOIM_SM_COMP: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); - bfa_reqq_wcancel(&ioim->iosp->reqq_wait); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); - bfa_ioim_notify_cleanup(ioim); - break; - - case BFA_IOIM_SM_DONE: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); - bfa_reqq_wcancel(&ioim->iosp->reqq_wait); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); - bfa_ioim_notify_cleanup(ioim); - break; - - case BFA_IOIM_SM_HWFAIL: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); - bfa_reqq_wcancel(&ioim->iosp->reqq_wait); - list_del(&ioim->qe); - list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, - ioim); - break; - - default: - bfa_sm_fault(ioim->bfa, event); - } -} - -/** - * IO bfa callback is pending. - */ -static void -bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) -{ - bfa_trc_fp(ioim->bfa, ioim->iotag); - bfa_trc_fp(ioim->bfa, event); - - switch (event) { - case BFA_IOIM_SM_HCB: - bfa_sm_set_state(ioim, bfa_ioim_sm_uninit); - bfa_ioim_free(ioim); - bfa_cb_ioim_resfree(ioim->bfa->bfad); - break; - - case BFA_IOIM_SM_CLEANUP: - bfa_ioim_notify_cleanup(ioim); - break; - - case BFA_IOIM_SM_HWFAIL: - break; - - default: - bfa_sm_fault(ioim->bfa, event); - } -} - -/** - * IO bfa callback is pending. IO resource cannot be freed. - */ -static void -bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) -{ - bfa_trc(ioim->bfa, ioim->iotag); - bfa_trc(ioim->bfa, event); - - switch (event) { - case BFA_IOIM_SM_HCB: - bfa_sm_set_state(ioim, bfa_ioim_sm_resfree); - list_del(&ioim->qe); - list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q); - break; - - case BFA_IOIM_SM_FREE: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); - break; - - case BFA_IOIM_SM_CLEANUP: - bfa_ioim_notify_cleanup(ioim); - break; - - case BFA_IOIM_SM_HWFAIL: - bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); - break; - - default: - bfa_sm_fault(ioim->bfa, event); - } -} - -/** - * IO is completed, waiting resource free from firmware. - */ -static void -bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) -{ - bfa_trc(ioim->bfa, ioim->iotag); - bfa_trc(ioim->bfa, event); - - switch (event) { - case BFA_IOIM_SM_FREE: - bfa_sm_set_state(ioim, bfa_ioim_sm_uninit); - bfa_ioim_free(ioim); - bfa_cb_ioim_resfree(ioim->bfa->bfad); - break; - - case BFA_IOIM_SM_CLEANUP: - bfa_ioim_notify_cleanup(ioim); - break; - - case BFA_IOIM_SM_HWFAIL: - break; - - default: - bfa_sm_fault(ioim->bfa, event); - } -} - - - -/** - * bfa_ioim_private - */ - -static void -__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete) -{ - struct bfa_ioim_s *ioim = cbarg; - - if (!complete) { - bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); - return; - } - - bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio); -} - -static void -__bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete) -{ - struct bfa_ioim_s *ioim = cbarg; - struct bfi_ioim_rsp_s *m; - u8 *snsinfo = NULL; - u8 sns_len = 0; - s32 residue = 0; - - if (!complete) { - bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); - return; - } - - m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg; - if (m->io_status == BFI_IOIM_STS_OK) { - /** - * setup sense information, if present - */ - if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION - && m->sns_len) { - sns_len = m->sns_len; - snsinfo = ioim->iosp->snsinfo; - } - - /** - * setup residue value correctly for normal completions - */ - if (m->resid_flags == FCP_RESID_UNDER) - residue = bfa_os_ntohl(m->residue); - if (m->resid_flags == FCP_RESID_OVER) { - residue = bfa_os_ntohl(m->residue); - residue = -residue; - } - } - - bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status, - m->scsi_status, sns_len, snsinfo, residue); -} - -static void -__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete) -{ - struct bfa_ioim_s *ioim = cbarg; - - if (!complete) { - bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); - return; - } - - bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED, - 0, 0, NULL, 0); -} - -static void -__bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete) -{ - struct bfa_ioim_s *ioim = cbarg; - - if (!complete) { - bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); - return; - } - - bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV, - 0, 0, NULL, 0); -} - -static void -__bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete) -{ - struct bfa_ioim_s *ioim = cbarg; - - if (!complete) { - bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB); - return; - } - - bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio); -} - -static void -bfa_ioim_sgpg_alloced(void *cbarg) -{ - struct bfa_ioim_s *ioim = cbarg; - - ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges); - list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q); - bfa_ioim_sgpg_setup(ioim); - bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED); -} - -/** - * Send I/O request to firmware. - */ -static bfa_boolean_t -bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim) -{ - struct bfa_itnim_s *itnim = ioim->itnim; - struct bfi_ioim_req_s *m; - static struct fcp_cmnd_s cmnd_z0 = { 0 }; - struct bfi_sge_s *sge; - u32 pgdlen = 0; - u64 addr; - struct scatterlist *sg; - struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio; - - /** - * check for room in queue to send request now - */ - m = bfa_reqq_next(ioim->bfa, ioim->reqq); - if (!m) { - bfa_reqq_wait(ioim->bfa, ioim->reqq, - &ioim->iosp->reqq_wait); - return BFA_FALSE; - } - - /** - * build i/o request message next - */ - m->io_tag = bfa_os_htons(ioim->iotag); - m->rport_hdl = ioim->itnim->rport->fw_handle; - m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio); - - /** - * build inline IO SG element here - */ - sge = &m->sges[0]; - if (ioim->nsges) { - sg = (struct scatterlist *)scsi_sglist(cmnd); - addr = bfa_os_sgaddr(sg_dma_address(sg)); - sge->sga = *(union bfi_addr_u *) &addr; - pgdlen = sg_dma_len(sg); - sge->sg_len = pgdlen; - sge->flags = (ioim->nsges > BFI_SGE_INLINE) ? - BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST; - bfa_sge_to_be(sge); - sge++; - } - - if (ioim->nsges > BFI_SGE_INLINE) { - sge->sga = ioim->sgpg->sgpg_pa; - } else { - sge->sga.a32.addr_lo = 0; - sge->sga.a32.addr_hi = 0; - } - sge->sg_len = pgdlen; - sge->flags = BFI_SGE_PGDLEN; - bfa_sge_to_be(sge); - - /** - * set up I/O command parameters - */ - bfa_os_assign(m->cmnd, cmnd_z0); - m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio); - m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio); - bfa_os_assign(m->cmnd.cdb, - *(struct scsi_cdb_s *)bfa_cb_ioim_get_cdb(ioim->dio)); - m->cmnd.fcp_dl = bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio)); - - /** - * set up I/O message header - */ - switch (m->cmnd.iodir) { - case FCP_IODIR_READ: - bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa)); - bfa_stats(itnim, input_reqs); - break; - case FCP_IODIR_WRITE: - bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa)); - bfa_stats(itnim, output_reqs); - break; - case FCP_IODIR_RW: - bfa_stats(itnim, input_reqs); - bfa_stats(itnim, output_reqs); - default: - bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa)); - } - if (itnim->seq_rec || - (bfa_cb_ioim_get_size(ioim->dio) & (sizeof(u32) - 1))) - bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa)); - -#ifdef IOIM_ADVANCED - m->cmnd.crn = bfa_cb_ioim_get_crn(ioim->dio); - m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio); - m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio); - - /** - * Handle large CDB (>16 bytes). - */ - m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) - - FCP_CMND_CDB_LEN) / sizeof(u32); - if (m->cmnd.addl_cdb_len) { - bfa_os_memcpy(&m->cmnd.cdb + 1, (struct scsi_cdb_s *) - bfa_cb_ioim_get_cdb(ioim->dio) + 1, - m->cmnd.addl_cdb_len * sizeof(u32)); - fcp_cmnd_fcpdl(&m->cmnd) = - bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio)); - } -#endif - - /** - * queue I/O message to firmware - */ - bfa_reqq_produce(ioim->bfa, ioim->reqq); - return BFA_TRUE; -} - -/** - * Setup any additional SG pages needed.Inline SG element is setup - * at queuing time. - */ -static bfa_boolean_t -bfa_ioim_sge_setup(struct bfa_ioim_s *ioim) -{ - u16 nsgpgs; - - bfa_assert(ioim->nsges > BFI_SGE_INLINE); - - /** - * allocate SG pages needed - */ - nsgpgs = BFA_SGPG_NPAGE(ioim->nsges); - if (!nsgpgs) - return BFA_TRUE; - - if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs) - != BFA_STATUS_OK) { - bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs); - return BFA_FALSE; - } - - ioim->nsgpgs = nsgpgs; - bfa_ioim_sgpg_setup(ioim); - - return BFA_TRUE; -} - -static void -bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim) -{ - int sgeid, nsges, i; - struct bfi_sge_s *sge; - struct bfa_sgpg_s *sgpg; - u32 pgcumsz; - u64 addr; - struct scatterlist *sg; - struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio; - - sgeid = BFI_SGE_INLINE; - ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q); - - sg = scsi_sglist(cmnd); - sg = sg_next(sg); - - do { - sge = sgpg->sgpg->sges; - nsges = ioim->nsges - sgeid; - if (nsges > BFI_SGPG_DATA_SGES) - nsges = BFI_SGPG_DATA_SGES; - - pgcumsz = 0; - for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) { - addr = bfa_os_sgaddr(sg_dma_address(sg)); - sge->sga = *(union bfi_addr_u *) &addr; - sge->sg_len = sg_dma_len(sg); - pgcumsz += sge->sg_len; - - /** - * set flags - */ - if (i < (nsges - 1)) - sge->flags = BFI_SGE_DATA; - else if (sgeid < (ioim->nsges - 1)) - sge->flags = BFI_SGE_DATA_CPL; - else - sge->flags = BFI_SGE_DATA_LAST; - } - - sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg); - - /** - * set the link element of each page - */ - if (sgeid == ioim->nsges) { - sge->flags = BFI_SGE_PGDLEN; - sge->sga.a32.addr_lo = 0; - sge->sga.a32.addr_hi = 0; - } else { - sge->flags = BFI_SGE_LINK; - sge->sga = sgpg->sgpg_pa; - } - sge->sg_len = pgcumsz; - } while (sgeid < ioim->nsges); -} - -/** - * Send I/O abort request to firmware. - */ -static bfa_boolean_t -bfa_ioim_send_abort(struct bfa_ioim_s *ioim) -{ - struct bfi_ioim_abort_req_s *m; - enum bfi_ioim_h2i msgop; - - /** - * check for room in queue to send request now - */ - m = bfa_reqq_next(ioim->bfa, ioim->reqq); - if (!m) - return BFA_FALSE; - - /** - * build i/o request message next - */ - if (ioim->iosp->abort_explicit) - msgop = BFI_IOIM_H2I_IOABORT_REQ; - else - msgop = BFI_IOIM_H2I_IOCLEANUP_REQ; - - bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa)); - m->io_tag = bfa_os_htons(ioim->iotag); - m->abort_tag = ++ioim->abort_tag; - - /** - * queue I/O message to firmware - */ - bfa_reqq_produce(ioim->bfa, ioim->reqq); - return BFA_TRUE; -} - -/** - * Call to resume any I/O requests waiting for room in request queue. - */ -static void -bfa_ioim_qresume(void *cbarg) -{ - struct bfa_ioim_s *ioim = cbarg; - - bfa_fcpim_stats(ioim->fcpim, qresumes); - bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME); -} - - -static void -bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim) -{ - /** - * Move IO from itnim queue to fcpim global queue since itnim will be - * freed. - */ - list_del(&ioim->qe); - list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); - - if (!ioim->iosp->tskim) { - if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) { - bfa_cb_dequeue(&ioim->hcb_qe); - list_del(&ioim->qe); - list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q); - } - bfa_itnim_iodone(ioim->itnim); - } else - bfa_tskim_iodone(ioim->iosp->tskim); -} - -/** - * or after the link comes back. - */ -void -bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov) -{ - /** - * If path tov timer expired, failback with PATHTOV status - these - * IO requests are not normally retried by IO stack. - * - * Otherwise device cameback online and fail it with normal failed - * status so that IO stack retries these failed IO requests. - */ - if (iotov) - ioim->io_cbfn = __bfa_cb_ioim_pathtov; - else - ioim->io_cbfn = __bfa_cb_ioim_failed; - - bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); - - /** - * Move IO to fcpim global queue since itnim will be - * freed. - */ - list_del(&ioim->qe); - list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); -} - - - -/** - * bfa_ioim_friend - */ - -/** - * Memory allocation and initialization. - */ -void -bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) -{ - struct bfa_ioim_s *ioim; - struct bfa_ioim_sp_s *iosp; - u16 i; - u8 *snsinfo; - u32 snsbufsz; - - /** - * claim memory first - */ - ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo); - fcpim->ioim_arr = ioim; - bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs); - - iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo); - fcpim->ioim_sp_arr = iosp; - bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs); - - /** - * Claim DMA memory for per IO sense data. - */ - snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN; - fcpim->snsbase.pa = bfa_meminfo_dma_phys(minfo); - bfa_meminfo_dma_phys(minfo) += snsbufsz; - - fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo); - bfa_meminfo_dma_virt(minfo) += snsbufsz; - snsinfo = fcpim->snsbase.kva; - bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa); - - /** - * Initialize ioim free queues - */ - INIT_LIST_HEAD(&fcpim->ioim_free_q); - INIT_LIST_HEAD(&fcpim->ioim_resfree_q); - INIT_LIST_HEAD(&fcpim->ioim_comp_q); - - for (i = 0; i < fcpim->num_ioim_reqs; - i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) { - /* - * initialize IOIM - */ - bfa_os_memset(ioim, 0, sizeof(struct bfa_ioim_s)); - ioim->iotag = i; - ioim->bfa = fcpim->bfa; - ioim->fcpim = fcpim; - ioim->iosp = iosp; - iosp->snsinfo = snsinfo; - INIT_LIST_HEAD(&ioim->sgpg_q); - bfa_reqq_winit(&ioim->iosp->reqq_wait, - bfa_ioim_qresume, ioim); - bfa_sgpg_winit(&ioim->iosp->sgpg_wqe, - bfa_ioim_sgpg_alloced, ioim); - bfa_sm_set_state(ioim, bfa_ioim_sm_uninit); - - list_add_tail(&ioim->qe, &fcpim->ioim_free_q); - } -} - -/** - * Driver detach time call. - */ -void -bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim) -{ -} - -void -bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) -{ - struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); - struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m; - struct bfa_ioim_s *ioim; - u16 iotag; - enum bfa_ioim_event evt = BFA_IOIM_SM_COMP; - - iotag = bfa_os_ntohs(rsp->io_tag); - - ioim = BFA_IOIM_FROM_TAG(fcpim, iotag); - bfa_assert(ioim->iotag == iotag); - - bfa_trc(ioim->bfa, ioim->iotag); - bfa_trc(ioim->bfa, rsp->io_status); - bfa_trc(ioim->bfa, rsp->reuse_io_tag); - - if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active)) - bfa_os_assign(ioim->iosp->comp_rspmsg, *m); - - switch (rsp->io_status) { - case BFI_IOIM_STS_OK: - bfa_fcpim_stats(fcpim, iocomp_ok); - if (rsp->reuse_io_tag == 0) - evt = BFA_IOIM_SM_DONE; - else - evt = BFA_IOIM_SM_COMP; - break; - - case BFI_IOIM_STS_TIMEDOUT: - case BFI_IOIM_STS_ABORTED: - rsp->io_status = BFI_IOIM_STS_ABORTED; - bfa_fcpim_stats(fcpim, iocomp_aborted); - if (rsp->reuse_io_tag == 0) - evt = BFA_IOIM_SM_DONE; - else - evt = BFA_IOIM_SM_COMP; - break; - - case BFI_IOIM_STS_PROTO_ERR: - bfa_fcpim_stats(fcpim, iocom_proto_err); - bfa_assert(rsp->reuse_io_tag); - evt = BFA_IOIM_SM_COMP; - break; - - case BFI_IOIM_STS_SQER_NEEDED: - bfa_fcpim_stats(fcpim, iocom_sqer_needed); - bfa_assert(rsp->reuse_io_tag == 0); - evt = BFA_IOIM_SM_SQRETRY; - break; - - case BFI_IOIM_STS_RES_FREE: - bfa_fcpim_stats(fcpim, iocom_res_free); - evt = BFA_IOIM_SM_FREE; - break; - - case BFI_IOIM_STS_HOST_ABORTED: - bfa_fcpim_stats(fcpim, iocom_hostabrts); - if (rsp->abort_tag != ioim->abort_tag) { - bfa_trc(ioim->bfa, rsp->abort_tag); - bfa_trc(ioim->bfa, ioim->abort_tag); - return; - } - - if (rsp->reuse_io_tag) - evt = BFA_IOIM_SM_ABORT_COMP; - else - evt = BFA_IOIM_SM_ABORT_DONE; - break; - - case BFI_IOIM_STS_UTAG: - bfa_fcpim_stats(fcpim, iocom_utags); - evt = BFA_IOIM_SM_COMP_UTAG; - break; - - default: - bfa_assert(0); - } - - bfa_sm_send_event(ioim, evt); -} - -void -bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m) -{ - struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); - struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m; - struct bfa_ioim_s *ioim; - u16 iotag; - - iotag = bfa_os_ntohs(rsp->io_tag); - - ioim = BFA_IOIM_FROM_TAG(fcpim, iotag); - bfa_assert(ioim->iotag == iotag); - - bfa_trc_fp(ioim->bfa, ioim->iotag); - bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD); -} - -/** - * Called by itnim to clean up IO while going offline. - */ -void -bfa_ioim_cleanup(struct bfa_ioim_s *ioim) -{ - bfa_trc(ioim->bfa, ioim->iotag); - bfa_fcpim_stats(ioim->fcpim, io_cleanups); - - ioim->iosp->tskim = NULL; - bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP); -} - -void -bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim) -{ - bfa_trc(ioim->bfa, ioim->iotag); - bfa_fcpim_stats(ioim->fcpim, io_tmaborts); - - ioim->iosp->tskim = tskim; - bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP); -} - -/** - * IOC failure handling. - */ -void -bfa_ioim_iocdisable(struct bfa_ioim_s *ioim) -{ - bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL); -} - -/** - * IO offline TOV popped. Fail the pending IO. - */ -void -bfa_ioim_tov(struct bfa_ioim_s *ioim) -{ - bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV); -} - - - -/** - * bfa_ioim_api - */ - -/** - * Allocate IOIM resource for initiator mode I/O request. - */ -struct bfa_ioim_s * -bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio, - struct bfa_itnim_s *itnim, u16 nsges) -{ - struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); - struct bfa_ioim_s *ioim; - - /** - * alocate IOIM resource - */ - bfa_q_deq(&fcpim->ioim_free_q, &ioim); - if (!ioim) { - bfa_fcpim_stats(fcpim, no_iotags); - return NULL; - } - - ioim->dio = dio; - ioim->itnim = itnim; - ioim->nsges = nsges; - ioim->nsgpgs = 0; - - bfa_stats(fcpim, total_ios); - bfa_stats(itnim, ios); - fcpim->ios_active++; - - list_add_tail(&ioim->qe, &itnim->io_q); - bfa_trc_fp(ioim->bfa, ioim->iotag); - - return ioim; -} - -void -bfa_ioim_free(struct bfa_ioim_s *ioim) -{ - struct bfa_fcpim_mod_s *fcpim = ioim->fcpim; - - bfa_trc_fp(ioim->bfa, ioim->iotag); - bfa_assert_fp(bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit)); - - bfa_assert_fp(list_empty(&ioim->sgpg_q) - || (ioim->nsges > BFI_SGE_INLINE)); - - if (ioim->nsgpgs > 0) - bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs); - - bfa_stats(ioim->itnim, io_comps); - fcpim->ios_active--; - - list_del(&ioim->qe); - list_add_tail(&ioim->qe, &fcpim->ioim_free_q); -} - -void -bfa_ioim_start(struct bfa_ioim_s *ioim) -{ - bfa_trc_fp(ioim->bfa, ioim->iotag); - - /** - * Obtain the queue over which this request has to be issued - */ - ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ? - bfa_cb_ioim_get_reqq(ioim->dio) : - bfa_itnim_get_reqq(ioim); - - bfa_sm_send_event(ioim, BFA_IOIM_SM_START); -} - -/** - * Driver I/O abort request. - */ -void -bfa_ioim_abort(struct bfa_ioim_s *ioim) -{ - bfa_trc(ioim->bfa, ioim->iotag); - bfa_fcpim_stats(ioim->fcpim, io_aborts); - bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT); -} - - diff --git a/drivers/scsi/bfa/bfa_itnim.c b/drivers/scsi/bfa/bfa_itnim.c deleted file mode 100644 index a914ff255135..000000000000 --- a/drivers/scsi/bfa/bfa_itnim.c +++ /dev/null @@ -1,1088 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#include -#include -#include "bfa_fcpim_priv.h" - -BFA_TRC_FILE(HAL, ITNIM); - -#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \ - ((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))) - -#define bfa_fcpim_additn(__itnim) \ - list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q) -#define bfa_fcpim_delitn(__itnim) do { \ - bfa_assert(bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \ - list_del(&(__itnim)->qe); \ - bfa_assert(list_empty(&(__itnim)->io_q)); \ - bfa_assert(list_empty(&(__itnim)->io_cleanup_q)); \ - bfa_assert(list_empty(&(__itnim)->pending_q)); \ -} while (0) - -#define bfa_itnim_online_cb(__itnim) do { \ - if ((__itnim)->bfa->fcs) \ - bfa_cb_itnim_online((__itnim)->ditn); \ - else { \ - bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \ - __bfa_cb_itnim_online, (__itnim)); \ - } \ -} while (0) - -#define bfa_itnim_offline_cb(__itnim) do { \ - if ((__itnim)->bfa->fcs) \ - bfa_cb_itnim_offline((__itnim)->ditn); \ - else { \ - bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \ - __bfa_cb_itnim_offline, (__itnim)); \ - } \ -} while (0) - -#define bfa_itnim_sler_cb(__itnim) do { \ - if ((__itnim)->bfa->fcs) \ - bfa_cb_itnim_sler((__itnim)->ditn); \ - else { \ - bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \ - __bfa_cb_itnim_sler, (__itnim)); \ - } \ -} while (0) - -/* - * forward declarations - */ -static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim); -static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim); -static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim); -static void bfa_itnim_cleanp_comp(void *itnim_cbarg); -static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim); -static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete); -static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete); -static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete); -static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim); -static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim); -static void bfa_itnim_iotov(void *itnim_arg); -static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim); -static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim); -static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim); - -/** - * bfa_itnim_sm BFA itnim state machine - */ - - -enum bfa_itnim_event { - BFA_ITNIM_SM_CREATE = 1, /* itnim is created */ - BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */ - BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */ - BFA_ITNIM_SM_FWRSP = 4, /* firmware response */ - BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */ - BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */ - BFA_ITNIM_SM_SLER = 7, /* second level error recovery */ - BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */ - BFA_ITNIM_SM_QRESUME = 9, /* queue space available */ -}; - -static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, - enum bfa_itnim_event event); -static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim, - enum bfa_itnim_event event); -static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, - enum bfa_itnim_event event); -static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim, - enum bfa_itnim_event event); -static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim, - enum bfa_itnim_event event); -static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, - enum bfa_itnim_event event); -static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim, - enum bfa_itnim_event event); -static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim, - enum bfa_itnim_event event); -static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, - enum bfa_itnim_event event); -static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, - enum bfa_itnim_event event); -static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim, - enum bfa_itnim_event event); -static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, - enum bfa_itnim_event event); -static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim, - enum bfa_itnim_event event); -static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim, - enum bfa_itnim_event event); -static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim, - enum bfa_itnim_event event); - -/** - * Beginning/unallocated state - no events expected. - */ -static void -bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) -{ - bfa_trc(itnim->bfa, itnim->rport->rport_tag); - bfa_trc(itnim->bfa, event); - - switch (event) { - case BFA_ITNIM_SM_CREATE: - bfa_sm_set_state(itnim, bfa_itnim_sm_created); - itnim->is_online = BFA_FALSE; - bfa_fcpim_additn(itnim); - break; - - default: - bfa_sm_fault(itnim->bfa, event); - } -} - -/** - * Beginning state, only online event expected. - */ -static void -bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) -{ - bfa_trc(itnim->bfa, itnim->rport->rport_tag); - bfa_trc(itnim->bfa, event); - - switch (event) { - case BFA_ITNIM_SM_ONLINE: - if (bfa_itnim_send_fwcreate(itnim)) - bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); - else - bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull); - break; - - case BFA_ITNIM_SM_DELETE: - bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); - bfa_fcpim_delitn(itnim); - break; - - case BFA_ITNIM_SM_HWFAIL: - bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); - break; - - default: - bfa_sm_fault(itnim->bfa, event); - } -} - -/** - * Waiting for itnim create response from firmware. - */ -static void -bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) -{ - bfa_trc(itnim->bfa, itnim->rport->rport_tag); - bfa_trc(itnim->bfa, event); - - switch (event) { - case BFA_ITNIM_SM_FWRSP: - bfa_sm_set_state(itnim, bfa_itnim_sm_online); - itnim->is_online = BFA_TRUE; - bfa_itnim_iotov_online(itnim); - bfa_itnim_online_cb(itnim); - break; - - case BFA_ITNIM_SM_DELETE: - bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending); - break; - - case BFA_ITNIM_SM_OFFLINE: - if (bfa_itnim_send_fwdelete(itnim)) - bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete); - else - bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull); - break; - - case BFA_ITNIM_SM_HWFAIL: - bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); - break; - - default: - bfa_sm_fault(itnim->bfa, event); - } -} - -static void -bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim, - enum bfa_itnim_event event) -{ - bfa_trc(itnim->bfa, itnim->rport->rport_tag); - bfa_trc(itnim->bfa, event); - - switch (event) { - case BFA_ITNIM_SM_QRESUME: - bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); - bfa_itnim_send_fwcreate(itnim); - break; - - case BFA_ITNIM_SM_DELETE: - bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); - bfa_reqq_wcancel(&itnim->reqq_wait); - bfa_fcpim_delitn(itnim); - break; - - case BFA_ITNIM_SM_OFFLINE: - bfa_sm_set_state(itnim, bfa_itnim_sm_offline); - bfa_reqq_wcancel(&itnim->reqq_wait); - bfa_itnim_offline_cb(itnim); - break; - - case BFA_ITNIM_SM_HWFAIL: - bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); - bfa_reqq_wcancel(&itnim->reqq_wait); - break; - - default: - bfa_sm_fault(itnim->bfa, event); - } -} - -/** - * Waiting for itnim create response from firmware, a delete is pending. - */ -static void -bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim, - enum bfa_itnim_event event) -{ - bfa_trc(itnim->bfa, itnim->rport->rport_tag); - bfa_trc(itnim->bfa, event); - - switch (event) { - case BFA_ITNIM_SM_FWRSP: - if (bfa_itnim_send_fwdelete(itnim)) - bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); - else - bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull); - break; - - case BFA_ITNIM_SM_HWFAIL: - bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); - bfa_fcpim_delitn(itnim); - break; - - default: - bfa_sm_fault(itnim->bfa, event); - } -} - -/** - * Online state - normal parking state. - */ -static void -bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) -{ - bfa_trc(itnim->bfa, itnim->rport->rport_tag); - bfa_trc(itnim->bfa, event); - - switch (event) { - case BFA_ITNIM_SM_OFFLINE: - bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline); - itnim->is_online = BFA_FALSE; - bfa_itnim_iotov_start(itnim); - bfa_itnim_cleanup(itnim); - break; - - case BFA_ITNIM_SM_DELETE: - bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete); - itnim->is_online = BFA_FALSE; - bfa_itnim_cleanup(itnim); - break; - - case BFA_ITNIM_SM_SLER: - bfa_sm_set_state(itnim, bfa_itnim_sm_sler); - itnim->is_online = BFA_FALSE; - bfa_itnim_iotov_start(itnim); - bfa_itnim_sler_cb(itnim); - break; - - case BFA_ITNIM_SM_HWFAIL: - bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); - itnim->is_online = BFA_FALSE; - bfa_itnim_iotov_start(itnim); - bfa_itnim_iocdisable_cleanup(itnim); - break; - - default: - bfa_sm_fault(itnim->bfa, event); - } -} - -/** - * Second level error recovery need. - */ -static void -bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) -{ - bfa_trc(itnim->bfa, itnim->rport->rport_tag); - bfa_trc(itnim->bfa, event); - - switch (event) { - case BFA_ITNIM_SM_OFFLINE: - bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline); - bfa_itnim_cleanup(itnim); - break; - - case BFA_ITNIM_SM_DELETE: - bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete); - bfa_itnim_cleanup(itnim); - bfa_itnim_iotov_delete(itnim); - break; - - case BFA_ITNIM_SM_HWFAIL: - bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); - bfa_itnim_iocdisable_cleanup(itnim); - break; - - default: - bfa_sm_fault(itnim->bfa, event); - } -} - -/** - * Going offline. Waiting for active IO cleanup. - */ -static void -bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim, - enum bfa_itnim_event event) -{ - bfa_trc(itnim->bfa, itnim->rport->rport_tag); - bfa_trc(itnim->bfa, event); - - switch (event) { - case BFA_ITNIM_SM_CLEANUP: - if (bfa_itnim_send_fwdelete(itnim)) - bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete); - else - bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull); - break; - - case BFA_ITNIM_SM_DELETE: - bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete); - bfa_itnim_iotov_delete(itnim); - break; - - case BFA_ITNIM_SM_HWFAIL: - bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); - bfa_itnim_iocdisable_cleanup(itnim); - bfa_itnim_offline_cb(itnim); - break; - - case BFA_ITNIM_SM_SLER: - break; - - default: - bfa_sm_fault(itnim->bfa, event); - } -} - -/** - * Deleting itnim. Waiting for active IO cleanup. - */ -static void -bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim, - enum bfa_itnim_event event) -{ - bfa_trc(itnim->bfa, itnim->rport->rport_tag); - bfa_trc(itnim->bfa, event); - - switch (event) { - case BFA_ITNIM_SM_CLEANUP: - if (bfa_itnim_send_fwdelete(itnim)) - bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); - else - bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull); - break; - - case BFA_ITNIM_SM_HWFAIL: - bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); - bfa_itnim_iocdisable_cleanup(itnim); - break; - - default: - bfa_sm_fault(itnim->bfa, event); - } -} - -/** - * Rport offline. Fimrware itnim is being deleted - awaiting f/w response. - */ -static void -bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) -{ - bfa_trc(itnim->bfa, itnim->rport->rport_tag); - bfa_trc(itnim->bfa, event); - - switch (event) { - case BFA_ITNIM_SM_FWRSP: - bfa_sm_set_state(itnim, bfa_itnim_sm_offline); - bfa_itnim_offline_cb(itnim); - break; - - case BFA_ITNIM_SM_DELETE: - bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); - break; - - case BFA_ITNIM_SM_HWFAIL: - bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); - bfa_itnim_offline_cb(itnim); - break; - - default: - bfa_sm_fault(itnim->bfa, event); - } -} - -static void -bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim, - enum bfa_itnim_event event) -{ - bfa_trc(itnim->bfa, itnim->rport->rport_tag); - bfa_trc(itnim->bfa, event); - - switch (event) { - case BFA_ITNIM_SM_QRESUME: - bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete); - bfa_itnim_send_fwdelete(itnim); - break; - - case BFA_ITNIM_SM_DELETE: - bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull); - break; - - case BFA_ITNIM_SM_HWFAIL: - bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); - bfa_reqq_wcancel(&itnim->reqq_wait); - bfa_itnim_offline_cb(itnim); - break; - - default: - bfa_sm_fault(itnim->bfa, event); - } -} - -/** - * Offline state. - */ -static void -bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) -{ - bfa_trc(itnim->bfa, itnim->rport->rport_tag); - bfa_trc(itnim->bfa, event); - - switch (event) { - case BFA_ITNIM_SM_DELETE: - bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); - bfa_itnim_iotov_delete(itnim); - bfa_fcpim_delitn(itnim); - break; - - case BFA_ITNIM_SM_ONLINE: - if (bfa_itnim_send_fwcreate(itnim)) - bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); - else - bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull); - break; - - case BFA_ITNIM_SM_HWFAIL: - bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable); - break; - - default: - bfa_sm_fault(itnim->bfa, event); - } -} - -/** - * IOC h/w failed state. - */ -static void -bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim, - enum bfa_itnim_event event) -{ - bfa_trc(itnim->bfa, itnim->rport->rport_tag); - bfa_trc(itnim->bfa, event); - - switch (event) { - case BFA_ITNIM_SM_DELETE: - bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); - bfa_itnim_iotov_delete(itnim); - bfa_fcpim_delitn(itnim); - break; - - case BFA_ITNIM_SM_OFFLINE: - bfa_itnim_offline_cb(itnim); - break; - - case BFA_ITNIM_SM_ONLINE: - if (bfa_itnim_send_fwcreate(itnim)) - bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate); - else - bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull); - break; - - case BFA_ITNIM_SM_HWFAIL: - break; - - default: - bfa_sm_fault(itnim->bfa, event); - } -} - -/** - * Itnim is deleted, waiting for firmware response to delete. - */ -static void -bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) -{ - bfa_trc(itnim->bfa, itnim->rport->rport_tag); - bfa_trc(itnim->bfa, event); - - switch (event) { - case BFA_ITNIM_SM_FWRSP: - case BFA_ITNIM_SM_HWFAIL: - bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); - bfa_fcpim_delitn(itnim); - break; - - default: - bfa_sm_fault(itnim->bfa, event); - } -} - -static void -bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim, - enum bfa_itnim_event event) -{ - bfa_trc(itnim->bfa, itnim->rport->rport_tag); - bfa_trc(itnim->bfa, event); - - switch (event) { - case BFA_ITNIM_SM_QRESUME: - bfa_sm_set_state(itnim, bfa_itnim_sm_deleting); - bfa_itnim_send_fwdelete(itnim); - break; - - case BFA_ITNIM_SM_HWFAIL: - bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); - bfa_reqq_wcancel(&itnim->reqq_wait); - bfa_fcpim_delitn(itnim); - break; - - default: - bfa_sm_fault(itnim->bfa, event); - } -} - - - -/** - * bfa_itnim_private - */ - -/** - * Initiate cleanup of all IOs on an IOC failure. - */ -static void -bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim) -{ - struct bfa_tskim_s *tskim; - struct bfa_ioim_s *ioim; - struct list_head *qe, *qen; - - list_for_each_safe(qe, qen, &itnim->tsk_q) { - tskim = (struct bfa_tskim_s *) qe; - bfa_tskim_iocdisable(tskim); - } - - list_for_each_safe(qe, qen, &itnim->io_q) { - ioim = (struct bfa_ioim_s *) qe; - bfa_ioim_iocdisable(ioim); - } - - /** - * For IO request in pending queue, we pretend an early timeout. - */ - list_for_each_safe(qe, qen, &itnim->pending_q) { - ioim = (struct bfa_ioim_s *) qe; - bfa_ioim_tov(ioim); - } - - list_for_each_safe(qe, qen, &itnim->io_cleanup_q) { - ioim = (struct bfa_ioim_s *) qe; - bfa_ioim_iocdisable(ioim); - } -} - -/** - * IO cleanup completion - */ -static void -bfa_itnim_cleanp_comp(void *itnim_cbarg) -{ - struct bfa_itnim_s *itnim = itnim_cbarg; - - bfa_stats(itnim, cleanup_comps); - bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP); -} - -/** - * Initiate cleanup of all IOs. - */ -static void -bfa_itnim_cleanup(struct bfa_itnim_s *itnim) -{ - struct bfa_ioim_s *ioim; - struct bfa_tskim_s *tskim; - struct list_head *qe, *qen; - - bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim); - - list_for_each_safe(qe, qen, &itnim->io_q) { - ioim = (struct bfa_ioim_s *) qe; - - /** - * Move IO to a cleanup queue from active queue so that a later - * TM will not pickup this IO. - */ - list_del(&ioim->qe); - list_add_tail(&ioim->qe, &itnim->io_cleanup_q); - - bfa_wc_up(&itnim->wc); - bfa_ioim_cleanup(ioim); - } - - list_for_each_safe(qe, qen, &itnim->tsk_q) { - tskim = (struct bfa_tskim_s *) qe; - bfa_wc_up(&itnim->wc); - bfa_tskim_cleanup(tskim); - } - - bfa_wc_wait(&itnim->wc); -} - -static void -__bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete) -{ - struct bfa_itnim_s *itnim = cbarg; - - if (complete) - bfa_cb_itnim_online(itnim->ditn); -} - -static void -__bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete) -{ - struct bfa_itnim_s *itnim = cbarg; - - if (complete) - bfa_cb_itnim_offline(itnim->ditn); -} - -static void -__bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete) -{ - struct bfa_itnim_s *itnim = cbarg; - - if (complete) - bfa_cb_itnim_sler(itnim->ditn); -} - -/** - * Call to resume any I/O requests waiting for room in request queue. - */ -static void -bfa_itnim_qresume(void *cbarg) -{ - struct bfa_itnim_s *itnim = cbarg; - - bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME); -} - - - - -/** - * bfa_itnim_public - */ - -void -bfa_itnim_iodone(struct bfa_itnim_s *itnim) -{ - bfa_wc_down(&itnim->wc); -} - -void -bfa_itnim_tskdone(struct bfa_itnim_s *itnim) -{ - bfa_wc_down(&itnim->wc); -} - -void -bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, - u32 *dm_len) -{ - /** - * ITN memory - */ - *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s); -} - -void -bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) -{ - struct bfa_s *bfa = fcpim->bfa; - struct bfa_itnim_s *itnim; - int i; - - INIT_LIST_HEAD(&fcpim->itnim_q); - - itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo); - fcpim->itnim_arr = itnim; - - for (i = 0; i < fcpim->num_itnims; i++, itnim++) { - bfa_os_memset(itnim, 0, sizeof(struct bfa_itnim_s)); - itnim->bfa = bfa; - itnim->fcpim = fcpim; - itnim->reqq = BFA_REQQ_QOS_LO; - itnim->rport = BFA_RPORT_FROM_TAG(bfa, i); - itnim->iotov_active = BFA_FALSE; - bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim); - - INIT_LIST_HEAD(&itnim->io_q); - INIT_LIST_HEAD(&itnim->io_cleanup_q); - INIT_LIST_HEAD(&itnim->pending_q); - INIT_LIST_HEAD(&itnim->tsk_q); - INIT_LIST_HEAD(&itnim->delay_comp_q); - bfa_sm_set_state(itnim, bfa_itnim_sm_uninit); - } - - bfa_meminfo_kva(minfo) = (u8 *) itnim; -} - -void -bfa_itnim_iocdisable(struct bfa_itnim_s *itnim) -{ - bfa_stats(itnim, ioc_disabled); - bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL); -} - -static bfa_boolean_t -bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim) -{ - struct bfi_itnim_create_req_s *m; - - itnim->msg_no++; - - /** - * check for room in queue to send request now - */ - m = bfa_reqq_next(itnim->bfa, itnim->reqq); - if (!m) { - bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait); - return BFA_FALSE; - } - - bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_CREATE_REQ, - bfa_lpuid(itnim->bfa)); - m->fw_handle = itnim->rport->fw_handle; - m->class = FC_CLASS_3; - m->seq_rec = itnim->seq_rec; - m->msg_no = itnim->msg_no; - - /** - * queue I/O message to firmware - */ - bfa_reqq_produce(itnim->bfa, itnim->reqq); - return BFA_TRUE; -} - -static bfa_boolean_t -bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim) -{ - struct bfi_itnim_delete_req_s *m; - - /** - * check for room in queue to send request now - */ - m = bfa_reqq_next(itnim->bfa, itnim->reqq); - if (!m) { - bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait); - return BFA_FALSE; - } - - bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_DELETE_REQ, - bfa_lpuid(itnim->bfa)); - m->fw_handle = itnim->rport->fw_handle; - - /** - * queue I/O message to firmware - */ - bfa_reqq_produce(itnim->bfa, itnim->reqq); - return BFA_TRUE; -} - -/** - * Cleanup all pending failed inflight requests. - */ -static void -bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov) -{ - struct bfa_ioim_s *ioim; - struct list_head *qe, *qen; - - list_for_each_safe(qe, qen, &itnim->delay_comp_q) { - ioim = (struct bfa_ioim_s *)qe; - bfa_ioim_delayed_comp(ioim, iotov); - } -} - -/** - * Start all pending IO requests. - */ -static void -bfa_itnim_iotov_online(struct bfa_itnim_s *itnim) -{ - struct bfa_ioim_s *ioim; - - bfa_itnim_iotov_stop(itnim); - - /** - * Abort all inflight IO requests in the queue - */ - bfa_itnim_delayed_comp(itnim, BFA_FALSE); - - /** - * Start all pending IO requests. - */ - while (!list_empty(&itnim->pending_q)) { - bfa_q_deq(&itnim->pending_q, &ioim); - list_add_tail(&ioim->qe, &itnim->io_q); - bfa_ioim_start(ioim); - } -} - -/** - * Fail all pending IO requests - */ -static void -bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim) -{ - struct bfa_ioim_s *ioim; - - /** - * Fail all inflight IO requests in the queue - */ - bfa_itnim_delayed_comp(itnim, BFA_TRUE); - - /** - * Fail any pending IO requests. - */ - while (!list_empty(&itnim->pending_q)) { - bfa_q_deq(&itnim->pending_q, &ioim); - list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); - bfa_ioim_tov(ioim); - } -} - -/** - * IO TOV timer callback. Fail any pending IO requests. - */ -static void -bfa_itnim_iotov(void *itnim_arg) -{ - struct bfa_itnim_s *itnim = itnim_arg; - - itnim->iotov_active = BFA_FALSE; - - bfa_cb_itnim_tov_begin(itnim->ditn); - bfa_itnim_iotov_cleanup(itnim); - bfa_cb_itnim_tov(itnim->ditn); -} - -/** - * Start IO TOV timer for failing back pending IO requests in offline state. - */ -static void -bfa_itnim_iotov_start(struct bfa_itnim_s *itnim) -{ - if (itnim->fcpim->path_tov > 0) { - - itnim->iotov_active = BFA_TRUE; - bfa_assert(bfa_itnim_hold_io(itnim)); - bfa_timer_start(itnim->bfa, &itnim->timer, - bfa_itnim_iotov, itnim, itnim->fcpim->path_tov); - } -} - -/** - * Stop IO TOV timer. - */ -static void -bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim) -{ - if (itnim->iotov_active) { - itnim->iotov_active = BFA_FALSE; - bfa_timer_stop(&itnim->timer); - } -} - -/** - * Stop IO TOV timer. - */ -static void -bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim) -{ - bfa_boolean_t pathtov_active = BFA_FALSE; - - if (itnim->iotov_active) - pathtov_active = BFA_TRUE; - - bfa_itnim_iotov_stop(itnim); - if (pathtov_active) - bfa_cb_itnim_tov_begin(itnim->ditn); - bfa_itnim_iotov_cleanup(itnim); - if (pathtov_active) - bfa_cb_itnim_tov(itnim->ditn); -} - - - -/** - * bfa_itnim_public - */ - -/** - * Itnim interrupt processing. - */ -void -bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) -{ - struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); - union bfi_itnim_i2h_msg_u msg; - struct bfa_itnim_s *itnim; - - bfa_trc(bfa, m->mhdr.msg_id); - - msg.msg = m; - - switch (m->mhdr.msg_id) { - case BFI_ITNIM_I2H_CREATE_RSP: - itnim = BFA_ITNIM_FROM_TAG(fcpim, - msg.create_rsp->bfa_handle); - bfa_assert(msg.create_rsp->status == BFA_STATUS_OK); - bfa_stats(itnim, create_comps); - bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP); - break; - - case BFI_ITNIM_I2H_DELETE_RSP: - itnim = BFA_ITNIM_FROM_TAG(fcpim, - msg.delete_rsp->bfa_handle); - bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK); - bfa_stats(itnim, delete_comps); - bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP); - break; - - case BFI_ITNIM_I2H_SLER_EVENT: - itnim = BFA_ITNIM_FROM_TAG(fcpim, - msg.sler_event->bfa_handle); - bfa_stats(itnim, sler_events); - bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER); - break; - - default: - bfa_trc(bfa, m->mhdr.msg_id); - bfa_assert(0); - } -} - - - -/** - * bfa_itnim_api - */ - -struct bfa_itnim_s * -bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn) -{ - struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); - struct bfa_itnim_s *itnim; - - itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag); - bfa_assert(itnim->rport == rport); - - itnim->ditn = ditn; - - bfa_stats(itnim, creates); - bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE); - - return itnim; -} - -void -bfa_itnim_delete(struct bfa_itnim_s *itnim) -{ - bfa_stats(itnim, deletes); - bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE); -} - -void -bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec) -{ - itnim->seq_rec = seq_rec; - bfa_stats(itnim, onlines); - bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE); -} - -void -bfa_itnim_offline(struct bfa_itnim_s *itnim) -{ - bfa_stats(itnim, offlines); - bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE); -} - -/** - * Return true if itnim is considered offline for holding off IO request. - * IO is not held if itnim is being deleted. - */ -bfa_boolean_t -bfa_itnim_hold_io(struct bfa_itnim_s *itnim) -{ - return - itnim->fcpim->path_tov && itnim->iotov_active && - (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) || - bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) || - bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) || - bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) || - bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) || - bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable)) - ; -} - -void -bfa_itnim_get_stats(struct bfa_itnim_s *itnim, - struct bfa_itnim_hal_stats_s *stats) -{ - *stats = itnim->stats; -} - -void -bfa_itnim_clear_stats(struct bfa_itnim_s *itnim) -{ - bfa_os_memset(&itnim->stats, 0, sizeof(itnim->stats)); -} - - diff --git a/drivers/scsi/bfa/bfa_log.c b/drivers/scsi/bfa/bfa_log.c deleted file mode 100644 index e7514016c9c6..000000000000 --- a/drivers/scsi/bfa/bfa_log.c +++ /dev/null @@ -1,346 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * bfa_log.c BFA log library - */ - -#include -#include - -/* - * global log info structure - */ -struct bfa_log_info_s { - u32 start_idx; /* start index for a module */ - u32 total_count; /* total count for a module */ - enum bfa_log_severity level; /* global log level */ - bfa_log_cb_t cbfn; /* callback function */ -}; - -static struct bfa_log_info_s bfa_log_info[BFA_LOG_MODULE_ID_MAX + 1]; -static u32 bfa_log_msg_total_count; -static int bfa_log_initialized; - -static char *bfa_log_severity[] = - { "[none]", "[critical]", "[error]", "[warn]", "[info]", "" }; - -/** - * BFA log library initialization - * - * The log library initialization includes the following, - * - set log instance name and callback function - * - read the message array generated from xml files - * - calculate start index for each module - * - calculate message count for each module - * - perform error checking - * - * @param[in] log_mod - log module info - * @param[in] instance_name - instance name - * @param[in] cbfn - callback function - * - * It return 0 on success, or -1 on failure - */ -int -bfa_log_init(struct bfa_log_mod_s *log_mod, char *instance_name, - bfa_log_cb_t cbfn) -{ - struct bfa_log_msgdef_s *msg; - u32 pre_mod_id = 0; - u32 cur_mod_id = 0; - u32 i, pre_idx, idx, msg_id; - - /* - * set instance name - */ - if (log_mod) { - strncpy(log_mod->instance_info, instance_name, - sizeof(log_mod->instance_info)); - log_mod->cbfn = cbfn; - for (i = 0; i <= BFA_LOG_MODULE_ID_MAX; i++) - log_mod->log_level[i] = BFA_LOG_WARNING; - } - - if (bfa_log_initialized) - return 0; - - for (i = 0; i <= BFA_LOG_MODULE_ID_MAX; i++) { - bfa_log_info[i].start_idx = 0; - bfa_log_info[i].total_count = 0; - bfa_log_info[i].level = BFA_LOG_WARNING; - bfa_log_info[i].cbfn = cbfn; - } - - pre_idx = 0; - idx = 0; - msg = bfa_log_msg_array; - msg_id = BFA_LOG_GET_MSG_ID(msg); - pre_mod_id = BFA_LOG_GET_MOD_ID(msg_id); - while (msg_id != 0) { - cur_mod_id = BFA_LOG_GET_MOD_ID(msg_id); - - if (cur_mod_id > BFA_LOG_MODULE_ID_MAX) { - cbfn(log_mod, msg_id, - "%s%s log: module id %u out of range\n", - BFA_LOG_CAT_NAME, - bfa_log_severity[BFA_LOG_ERROR], - cur_mod_id); - return -1; - } - - if (pre_mod_id > BFA_LOG_MODULE_ID_MAX) { - cbfn(log_mod, msg_id, - "%s%s log: module id %u out of range\n", - BFA_LOG_CAT_NAME, - bfa_log_severity[BFA_LOG_ERROR], - pre_mod_id); - return -1; - } - - if (cur_mod_id != pre_mod_id) { - bfa_log_info[pre_mod_id].start_idx = pre_idx; - bfa_log_info[pre_mod_id].total_count = idx - pre_idx; - pre_mod_id = cur_mod_id; - pre_idx = idx; - } - - idx++; - msg++; - msg_id = BFA_LOG_GET_MSG_ID(msg); - } - - bfa_log_info[cur_mod_id].start_idx = pre_idx; - bfa_log_info[cur_mod_id].total_count = idx - pre_idx; - bfa_log_msg_total_count = idx; - - cbfn(log_mod, msg_id, "%s%s log: init OK, msg total count %u\n", - BFA_LOG_CAT_NAME, - bfa_log_severity[BFA_LOG_INFO], bfa_log_msg_total_count); - - bfa_log_initialized = 1; - - return 0; -} - -/** - * BFA log set log level for a module - * - * @param[in] log_mod - log module info - * @param[in] mod_id - module id - * @param[in] log_level - log severity level - * - * It return BFA_STATUS_OK on success, or > 0 on failure - */ -bfa_status_t -bfa_log_set_level(struct bfa_log_mod_s *log_mod, int mod_id, - enum bfa_log_severity log_level) -{ - if (mod_id <= BFA_LOG_UNUSED_ID || mod_id > BFA_LOG_MODULE_ID_MAX) - return BFA_STATUS_EINVAL; - - if (log_level <= BFA_LOG_INVALID || log_level > BFA_LOG_LEVEL_MAX) - return BFA_STATUS_EINVAL; - - if (log_mod) - log_mod->log_level[mod_id] = log_level; - else - bfa_log_info[mod_id].level = log_level; - - return BFA_STATUS_OK; -} - -/** - * BFA log set log level for all modules - * - * @param[in] log_mod - log module info - * @param[in] log_level - log severity level - * - * It return BFA_STATUS_OK on success, or > 0 on failure - */ -bfa_status_t -bfa_log_set_level_all(struct bfa_log_mod_s *log_mod, - enum bfa_log_severity log_level) -{ - int mod_id = BFA_LOG_UNUSED_ID + 1; - - if (log_level <= BFA_LOG_INVALID || log_level > BFA_LOG_LEVEL_MAX) - return BFA_STATUS_EINVAL; - - if (log_mod) { - for (; mod_id <= BFA_LOG_MODULE_ID_MAX; mod_id++) - log_mod->log_level[mod_id] = log_level; - } else { - for (; mod_id <= BFA_LOG_MODULE_ID_MAX; mod_id++) - bfa_log_info[mod_id].level = log_level; - } - - return BFA_STATUS_OK; -} - -/** - * BFA log set log level for all aen sub-modules - * - * @param[in] log_mod - log module info - * @param[in] log_level - log severity level - * - * It return BFA_STATUS_OK on success, or > 0 on failure - */ -bfa_status_t -bfa_log_set_level_aen(struct bfa_log_mod_s *log_mod, - enum bfa_log_severity log_level) -{ - int mod_id = BFA_LOG_AEN_MIN + 1; - - if (log_mod) { - for (; mod_id <= BFA_LOG_AEN_MAX; mod_id++) - log_mod->log_level[mod_id] = log_level; - } else { - for (; mod_id <= BFA_LOG_AEN_MAX; mod_id++) - bfa_log_info[mod_id].level = log_level; - } - - return BFA_STATUS_OK; -} - -/** - * BFA log get log level for a module - * - * @param[in] log_mod - log module info - * @param[in] mod_id - module id - * - * It returns log level or -1 on error - */ -enum bfa_log_severity -bfa_log_get_level(struct bfa_log_mod_s *log_mod, int mod_id) -{ - if (mod_id <= BFA_LOG_UNUSED_ID || mod_id > BFA_LOG_MODULE_ID_MAX) - return BFA_LOG_INVALID; - - if (log_mod) - return log_mod->log_level[mod_id]; - else - return bfa_log_info[mod_id].level; -} - -enum bfa_log_severity -bfa_log_get_msg_level(struct bfa_log_mod_s *log_mod, u32 msg_id) -{ - struct bfa_log_msgdef_s *msg; - u32 mod = BFA_LOG_GET_MOD_ID(msg_id); - u32 idx = BFA_LOG_GET_MSG_IDX(msg_id) - 1; - - if (!bfa_log_initialized) - return BFA_LOG_INVALID; - - if (mod > BFA_LOG_MODULE_ID_MAX) - return BFA_LOG_INVALID; - - if (idx >= bfa_log_info[mod].total_count) { - bfa_log_info[mod].cbfn(log_mod, msg_id, - "%s%s log: inconsistent idx %u vs. total count %u\n", - BFA_LOG_CAT_NAME, bfa_log_severity[BFA_LOG_ERROR], idx, - bfa_log_info[mod].total_count); - return BFA_LOG_INVALID; - } - - msg = bfa_log_msg_array + bfa_log_info[mod].start_idx + idx; - if (msg_id != BFA_LOG_GET_MSG_ID(msg)) { - bfa_log_info[mod].cbfn(log_mod, msg_id, - "%s%s log: inconsistent msg id %u array msg id %u\n", - BFA_LOG_CAT_NAME, bfa_log_severity[BFA_LOG_ERROR], - msg_id, BFA_LOG_GET_MSG_ID(msg)); - return BFA_LOG_INVALID; - } - - return BFA_LOG_GET_SEVERITY(msg); -} - -/** - * BFA log message handling - * - * BFA log message handling finds the message based on message id and prints - * out the message based on its format and arguments. It also does prefix - * the severity etc. - * - * @param[in] log_mod - log module info - * @param[in] msg_id - message id - * @param[in] ... - message arguments - * - * It return 0 on success, or -1 on errors - */ -int -bfa_log(struct bfa_log_mod_s *log_mod, u32 msg_id, ...) -{ - va_list ap; - char buf[256]; - struct bfa_log_msgdef_s *msg; - int log_level; - u32 mod = BFA_LOG_GET_MOD_ID(msg_id); - u32 idx = BFA_LOG_GET_MSG_IDX(msg_id) - 1; - - if (!bfa_log_initialized) - return -1; - - if (mod > BFA_LOG_MODULE_ID_MAX) - return -1; - - if (idx >= bfa_log_info[mod].total_count) { - bfa_log_info[mod]. - cbfn - (log_mod, msg_id, - "%s%s log: inconsistent idx %u vs. total count %u\n", - BFA_LOG_CAT_NAME, bfa_log_severity[BFA_LOG_ERROR], idx, - bfa_log_info[mod].total_count); - return -1; - } - - msg = bfa_log_msg_array + bfa_log_info[mod].start_idx + idx; - if (msg_id != BFA_LOG_GET_MSG_ID(msg)) { - bfa_log_info[mod]. - cbfn - (log_mod, msg_id, - "%s%s log: inconsistent msg id %u array msg id %u\n", - BFA_LOG_CAT_NAME, bfa_log_severity[BFA_LOG_ERROR], - msg_id, BFA_LOG_GET_MSG_ID(msg)); - return -1; - } - - log_level = log_mod ? log_mod->log_level[mod] : bfa_log_info[mod].level; - if ((BFA_LOG_GET_SEVERITY(msg) > log_level) && - (msg->attributes != BFA_LOG_ATTR_NONE)) - return 0; - - va_start(ap, msg_id); - bfa_os_vsprintf(buf, BFA_LOG_GET_MSG_FMT_STRING(msg), ap); - va_end(ap); - - if (log_mod) - log_mod->cbfn(log_mod, msg_id, "%s[%s]%s%s %s: %s\n", - BFA_LOG_CAT_NAME, log_mod->instance_info, - bfa_log_severity[BFA_LOG_GET_SEVERITY(msg)], - (msg->attributes & BFA_LOG_ATTR_AUDIT) - ? " (audit) " : "", msg->msg_value, buf); - else - bfa_log_info[mod].cbfn(log_mod, msg_id, "%s%s%s %s: %s\n", - BFA_LOG_CAT_NAME, - bfa_log_severity[BFA_LOG_GET_SEVERITY(msg)], - (msg->attributes & BFA_LOG_ATTR_AUDIT) ? - " (audit) " : "", msg->msg_value, buf); - - return 0; -} - diff --git a/drivers/scsi/bfa/bfa_log_module.c b/drivers/scsi/bfa/bfa_log_module.c deleted file mode 100644 index cf577ef7cb97..000000000000 --- a/drivers/scsi/bfa/bfa_log_module.c +++ /dev/null @@ -1,537 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -struct bfa_log_msgdef_s bfa_log_msg_array[] = { - - -/* messages define for BFA_AEN_CAT_ADAPTER Module */ -{BFA_AEN_ADAPTER_ADD, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "BFA_AEN_ADAPTER_ADD", - "New adapter found: SN = %s, base port WWN = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2}, - -{BFA_AEN_ADAPTER_REMOVE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_WARNING, "BFA_AEN_ADAPTER_REMOVE", - "Adapter removed: SN = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - - - - -/* messages define for BFA_AEN_CAT_AUDIT Module */ -{BFA_AEN_AUDIT_AUTH_ENABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "BFA_AEN_AUDIT_AUTH_ENABLE", - "Authentication enabled for base port: WWN = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_AEN_AUDIT_AUTH_DISABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "BFA_AEN_AUDIT_AUTH_DISABLE", - "Authentication disabled for base port: WWN = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - - - - -/* messages define for BFA_AEN_CAT_ETHPORT Module */ -{BFA_AEN_ETHPORT_LINKUP, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "BFA_AEN_ETHPORT_LINKUP", - "Base port ethernet linkup: mac = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_AEN_ETHPORT_LINKDOWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "BFA_AEN_ETHPORT_LINKDOWN", - "Base port ethernet linkdown: mac = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_AEN_ETHPORT_ENABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "BFA_AEN_ETHPORT_ENABLE", - "Base port ethernet interface enabled: mac = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_AEN_ETHPORT_DISABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "BFA_AEN_ETHPORT_DISABLE", - "Base port ethernet interface disabled: mac = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - - - - -/* messages define for BFA_AEN_CAT_IOC Module */ -{BFA_AEN_IOC_HBGOOD, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "BFA_AEN_IOC_HBGOOD", - "Heart Beat of IOC %d is good.", - ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1}, - -{BFA_AEN_IOC_HBFAIL, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_CRITICAL, - "BFA_AEN_IOC_HBFAIL", - "Heart Beat of IOC %d has failed.", - ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1}, - -{BFA_AEN_IOC_ENABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "BFA_AEN_IOC_ENABLE", - "IOC %d is enabled.", - ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1}, - -{BFA_AEN_IOC_DISABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "BFA_AEN_IOC_DISABLE", - "IOC %d is disabled.", - ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1}, - -{BFA_AEN_IOC_FWMISMATCH, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_CRITICAL, "BFA_AEN_IOC_FWMISMATCH", - "Running firmware version is incompatible with the driver version.", - (0), 0}, - -{BFA_AEN_IOC_FWCFG_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_CRITICAL, "BFA_AEN_IOC_FWCFG_ERROR", - "Link initialization failed due to firmware configuration read error:" - " WWN = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_AEN_IOC_INVALID_VENDOR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_ERROR, "BFA_AEN_IOC_INVALID_VENDOR", - "Unsupported switch vendor. Link initialization failed: WWN = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_AEN_IOC_INVALID_NWWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_ERROR, "BFA_AEN_IOC_INVALID_NWWN", - "Invalid NWWN. Link initialization failed: NWWN = 00:00:00:00:00:00:00:00.", - (0), 0}, - -{BFA_AEN_IOC_INVALID_PWWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_ERROR, "BFA_AEN_IOC_INVALID_PWWN", - "Invalid PWWN. Link initialization failed: PWWN = 00:00:00:00:00:00:00:00.", - (0), 0}, - - - - -/* messages define for BFA_AEN_CAT_ITNIM Module */ -{BFA_AEN_ITNIM_ONLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "BFA_AEN_ITNIM_ONLINE", - "Target (WWN = %s) is online for initiator (WWN = %s).", - ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2}, - -{BFA_AEN_ITNIM_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "BFA_AEN_ITNIM_OFFLINE", - "Target (WWN = %s) offlined by initiator (WWN = %s).", - ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2}, - -{BFA_AEN_ITNIM_DISCONNECT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_ERROR, "BFA_AEN_ITNIM_DISCONNECT", - "Target (WWN = %s) connectivity lost for initiator (WWN = %s).", - ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2}, - - - - -/* messages define for BFA_AEN_CAT_LPORT Module */ -{BFA_AEN_LPORT_NEW, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "BFA_AEN_LPORT_NEW", - "New logical port created: WWN = %s, Role = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2}, - -{BFA_AEN_LPORT_DELETE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "BFA_AEN_LPORT_DELETE", - "Logical port deleted: WWN = %s, Role = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2}, - -{BFA_AEN_LPORT_ONLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "BFA_AEN_LPORT_ONLINE", - "Logical port online: WWN = %s, Role = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2}, - -{BFA_AEN_LPORT_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "BFA_AEN_LPORT_OFFLINE", - "Logical port taken offline: WWN = %s, Role = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2}, - -{BFA_AEN_LPORT_DISCONNECT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_ERROR, "BFA_AEN_LPORT_DISCONNECT", - "Logical port lost fabric connectivity: WWN = %s, Role = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2}, - -{BFA_AEN_LPORT_NEW_PROP, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "BFA_AEN_LPORT_NEW_PROP", - "New virtual port created using proprietary interface: WWN = %s, Role = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2}, - -{BFA_AEN_LPORT_DELETE_PROP, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "BFA_AEN_LPORT_DELETE_PROP", - "Virtual port deleted using proprietary interface: WWN = %s, Role = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2}, - -{BFA_AEN_LPORT_NEW_STANDARD, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "BFA_AEN_LPORT_NEW_STANDARD", - "New virtual port created using standard interface: WWN = %s, Role = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2}, - -{BFA_AEN_LPORT_DELETE_STANDARD, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "BFA_AEN_LPORT_DELETE_STANDARD", - "Virtual port deleted using standard interface: WWN = %s, Role = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2}, - -{BFA_AEN_LPORT_NPIV_DUP_WWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_WARNING, "BFA_AEN_LPORT_NPIV_DUP_WWN", - "Virtual port login failed. Duplicate WWN = %s reported by fabric.", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_AEN_LPORT_NPIV_FABRIC_MAX, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_WARNING, "BFA_AEN_LPORT_NPIV_FABRIC_MAX", - "Virtual port (WWN = %s) login failed. Max NPIV ports already exist in" - " fabric/fport.", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_AEN_LPORT_NPIV_UNKNOWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_WARNING, "BFA_AEN_LPORT_NPIV_UNKNOWN", - "Virtual port (WWN = %s) login failed.", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - - - - -/* messages define for BFA_AEN_CAT_PORT Module */ -{BFA_AEN_PORT_ONLINE, BFA_LOG_ATTR_NONE, BFA_LOG_INFO, "BFA_AEN_PORT_ONLINE", - "Base port online: WWN = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_AEN_PORT_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_WARNING, - "BFA_AEN_PORT_OFFLINE", - "Base port offline: WWN = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_AEN_PORT_RLIR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "BFA_AEN_PORT_RLIR", - "RLIR event not supported.", - (0), 0}, - -{BFA_AEN_PORT_SFP_INSERT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "BFA_AEN_PORT_SFP_INSERT", - "New SFP found: WWN/MAC = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_AEN_PORT_SFP_REMOVE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_WARNING, "BFA_AEN_PORT_SFP_REMOVE", - "SFP removed: WWN/MAC = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_AEN_PORT_SFP_POM, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_WARNING, - "BFA_AEN_PORT_SFP_POM", - "SFP POM level to %s: WWN/MAC = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2}, - -{BFA_AEN_PORT_ENABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "BFA_AEN_PORT_ENABLE", - "Base port enabled: WWN = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_AEN_PORT_DISABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "BFA_AEN_PORT_DISABLE", - "Base port disabled: WWN = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_AEN_PORT_AUTH_ON, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "BFA_AEN_PORT_AUTH_ON", - "Authentication successful for base port: WWN = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_AEN_PORT_AUTH_OFF, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_ERROR, - "BFA_AEN_PORT_AUTH_OFF", - "Authentication unsuccessful for base port: WWN = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_AEN_PORT_DISCONNECT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_ERROR, - "BFA_AEN_PORT_DISCONNECT", - "Base port (WWN = %s) lost fabric connectivity.", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_AEN_PORT_QOS_NEG, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_WARNING, - "BFA_AEN_PORT_QOS_NEG", - "QOS negotiation failed for base port: WWN = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_AEN_PORT_FABRIC_NAME_CHANGE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_WARNING, "BFA_AEN_PORT_FABRIC_NAME_CHANGE", - "Base port WWN = %s, Fabric WWN = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2}, - -{BFA_AEN_PORT_SFP_ACCESS_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_WARNING, "BFA_AEN_PORT_SFP_ACCESS_ERROR", - "SFP access error: WWN/MAC = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_AEN_PORT_SFP_UNSUPPORT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_WARNING, "BFA_AEN_PORT_SFP_UNSUPPORT", - "Unsupported SFP found: WWN/MAC = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - - - - -/* messages define for BFA_AEN_CAT_RPORT Module */ -{BFA_AEN_RPORT_ONLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "BFA_AEN_RPORT_ONLINE", - "Remote port (WWN = %s) online for logical port (WWN = %s).", - ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2}, - -{BFA_AEN_RPORT_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "BFA_AEN_RPORT_OFFLINE", - "Remote port (WWN = %s) offlined by logical port (WWN = %s).", - ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2}, - -{BFA_AEN_RPORT_DISCONNECT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_ERROR, "BFA_AEN_RPORT_DISCONNECT", - "Remote port (WWN = %s) connectivity lost for logical port (WWN = %s).", - ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2}, - -{BFA_AEN_RPORT_QOS_PRIO, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "BFA_AEN_RPORT_QOS_PRIO", - "QOS priority changed to %s: RPWWN = %s and LPWWN = %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | - (BFA_LOG_S << BFA_LOG_ARG2) | 0), 3}, - -{BFA_AEN_RPORT_QOS_FLOWID, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "BFA_AEN_RPORT_QOS_FLOWID", - "QOS flow ID changed to %d: RPWWN = %s and LPWWN = %s.", - ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | - (BFA_LOG_S << BFA_LOG_ARG2) | 0), 3}, - - - - -/* messages define for FCS Module */ -{BFA_LOG_FCS_FABRIC_NOSWITCH, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "FCS_FABRIC_NOSWITCH", - "No switched fabric presence is detected.", - (0), 0}, - -{BFA_LOG_FCS_FABRIC_ISOLATED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "FCS_FABRIC_ISOLATED", - "Port is isolated due to VF_ID mismatch. PWWN: %s, Port VF_ID: %04x and" - " switch port VF_ID: %04x.", - ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_X << BFA_LOG_ARG1) | - (BFA_LOG_X << BFA_LOG_ARG2) | 0), 3}, - - - - -/* messages define for HAL Module */ -{BFA_LOG_HAL_ASSERT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_ERROR, - "HAL_ASSERT", - "Assertion failure: %s:%d: %s", - ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) | - (BFA_LOG_S << BFA_LOG_ARG2) | 0), 3}, - -{BFA_LOG_HAL_HEARTBEAT_FAILURE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_CRITICAL, "HAL_HEARTBEAT_FAILURE", - "Firmware heartbeat failure at %d", - ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1}, - -{BFA_LOG_HAL_FCPIM_PARM_INVALID, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "HAL_FCPIM_PARM_INVALID", - "Driver configuration %s value %d is invalid. Value should be within" - " %d and %d.", - ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) | - (BFA_LOG_D << BFA_LOG_ARG2) | (BFA_LOG_D << BFA_LOG_ARG3) | 0), 4}, - -{BFA_LOG_HAL_SM_ASSERT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_ERROR, - "HAL_SM_ASSERT", - "SM Assertion failure: %s:%d: event = %d", - ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) | - (BFA_LOG_D << BFA_LOG_ARG2) | 0), 3}, - -{BFA_LOG_HAL_DRIVER_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "HAL_DRIVER_ERROR", - "%s", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_LOG_HAL_DRIVER_CONFIG_ERROR, - BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "HAL_DRIVER_CONFIG_ERROR", - "%s", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_LOG_HAL_MBOX_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "HAL_MBOX_ERROR", - "%s", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - - - - -/* messages define for LINUX Module */ -{BFA_LOG_LINUX_DEVICE_CLAIMED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "LINUX_DEVICE_CLAIMED", - "bfa device at %s claimed.", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_LOG_LINUX_HASH_INIT_FAILED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "LINUX_HASH_INIT_FAILED", - "Hash table initialization failure for the port %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_LOG_LINUX_SYSFS_FAILED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "LINUX_SYSFS_FAILED", - "sysfs file creation failure for the port %s.", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_LOG_LINUX_MEM_ALLOC_FAILED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "LINUX_MEM_ALLOC_FAILED", - "Memory allocation failed: %s. ", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_LOG_LINUX_DRIVER_REGISTRATION_FAILED, - BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "LINUX_DRIVER_REGISTRATION_FAILED", - "%s. ", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_LOG_LINUX_ITNIM_FREE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "LINUX_ITNIM_FREE", - "scsi%d: FCID: %s WWPN: %s", - ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | - (BFA_LOG_S << BFA_LOG_ARG2) | 0), 3}, - -{BFA_LOG_LINUX_ITNIM_ONLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "LINUX_ITNIM_ONLINE", - "Target: %d:0:%d FCID: %s WWPN: %s", - ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) | - (BFA_LOG_S << BFA_LOG_ARG2) | (BFA_LOG_S << BFA_LOG_ARG3) | 0), 4}, - -{BFA_LOG_LINUX_ITNIM_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "LINUX_ITNIM_OFFLINE", - "Target: %d:0:%d FCID: %s WWPN: %s", - ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) | - (BFA_LOG_S << BFA_LOG_ARG2) | (BFA_LOG_S << BFA_LOG_ARG3) | 0), 4}, - -{BFA_LOG_LINUX_SCSI_HOST_FREE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "LINUX_SCSI_HOST_FREE", - "Free scsi%d", - ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1}, - -{BFA_LOG_LINUX_SCSI_ABORT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "LINUX_SCSI_ABORT", - "scsi%d: abort cmnd %p, iotag %x", - ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_P << BFA_LOG_ARG1) | - (BFA_LOG_X << BFA_LOG_ARG2) | 0), 3}, - -{BFA_LOG_LINUX_SCSI_ABORT_COMP, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "LINUX_SCSI_ABORT_COMP", - "scsi%d: complete abort 0x%p, iotag 0x%x", - ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_P << BFA_LOG_ARG1) | - (BFA_LOG_X << BFA_LOG_ARG2) | 0), 3}, - -{BFA_LOG_LINUX_DRIVER_CONFIG_ERROR, - BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "LINUX_DRIVER_CONFIG_ERROR", - "%s", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_LOG_LINUX_BNA_STATE_MACHINE, - BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "LINUX_BNA_STATE_MACHINE", - "%s", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_LOG_LINUX_IOC_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "LINUX_IOC_ERROR", - "%s", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_LOG_LINUX_RESOURCE_ALLOC_ERROR, - BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "LINUX_RESOURCE_ALLOC_ERROR", - "%s", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_LOG_LINUX_RING_BUFFER_ERROR, - BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO, - "LINUX_RING_BUFFER_ERROR", - "%s", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_LOG_LINUX_DRIVER_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_ERROR, "LINUX_DRIVER_ERROR", - "%s", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_LOG_LINUX_DRIVER_INFO, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "LINUX_DRIVER_INFO", - "%s", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_LOG_LINUX_DRIVER_DIAG, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "LINUX_DRIVER_DIAG", - "%s", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - -{BFA_LOG_LINUX_DRIVER_AEN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "LINUX_DRIVER_AEN", - "%s", - ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1}, - - - - -/* messages define for WDRV Module */ -{BFA_LOG_WDRV_IOC_INIT_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "WDRV_IOC_INIT_ERROR", - "IOC initialization has failed.", - (0), 0}, - -{BFA_LOG_WDRV_IOC_INTERNAL_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "WDRV_IOC_INTERNAL_ERROR", - "IOC internal error. ", - (0), 0}, - -{BFA_LOG_WDRV_IOC_START_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "WDRV_IOC_START_ERROR", - "IOC could not be started. ", - (0), 0}, - -{BFA_LOG_WDRV_IOC_STOP_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "WDRV_IOC_STOP_ERROR", - "IOC could not be stopped. ", - (0), 0}, - -{BFA_LOG_WDRV_INSUFFICIENT_RESOURCES, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "WDRV_INSUFFICIENT_RESOURCES", - "Insufficient memory. ", - (0), 0}, - -{BFA_LOG_WDRV_BASE_ADDRESS_MAP_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, - BFA_LOG_INFO, "WDRV_BASE_ADDRESS_MAP_ERROR", - "Unable to map the IOC onto the system address space. ", - (0), 0}, - - -{0, 0, 0, "", "", 0, 0}, -}; diff --git a/drivers/scsi/bfa/bfa_lps.c b/drivers/scsi/bfa/bfa_lps.c deleted file mode 100644 index acabb44f092f..000000000000 --- a/drivers/scsi/bfa/bfa_lps.c +++ /dev/null @@ -1,892 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#include -#include -#include -#include - -BFA_TRC_FILE(HAL, LPS); -BFA_MODULE(lps); - -#define BFA_LPS_MIN_LPORTS (1) -#define BFA_LPS_MAX_LPORTS (256) - -/* - * Maximum Vports supported per physical port or vf. - */ -#define BFA_LPS_MAX_VPORTS_SUPP_CB 255 -#define BFA_LPS_MAX_VPORTS_SUPP_CT 190 - -/** - * forward declarations - */ -static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, - u32 *dm_len); -static void bfa_lps_attach(struct bfa_s *bfa, void *bfad, - struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *meminfo, - struct bfa_pcidev_s *pcidev); -static void bfa_lps_detach(struct bfa_s *bfa); -static void bfa_lps_start(struct bfa_s *bfa); -static void bfa_lps_stop(struct bfa_s *bfa); -static void bfa_lps_iocdisable(struct bfa_s *bfa); -static void bfa_lps_login_rsp(struct bfa_s *bfa, - struct bfi_lps_login_rsp_s *rsp); -static void bfa_lps_logout_rsp(struct bfa_s *bfa, - struct bfi_lps_logout_rsp_s *rsp); -static void bfa_lps_reqq_resume(void *lps_arg); -static void bfa_lps_free(struct bfa_lps_s *lps); -static void bfa_lps_send_login(struct bfa_lps_s *lps); -static void bfa_lps_send_logout(struct bfa_lps_s *lps); -static void bfa_lps_login_comp(struct bfa_lps_s *lps); -static void bfa_lps_logout_comp(struct bfa_lps_s *lps); -static void bfa_lps_cvl_event(struct bfa_lps_s *lps); - -/** - * lps_pvt BFA LPS private functions - */ - -enum bfa_lps_event { - BFA_LPS_SM_LOGIN = 1, /* login request from user */ - BFA_LPS_SM_LOGOUT = 2, /* logout request from user */ - BFA_LPS_SM_FWRSP = 3, /* f/w response to login/logout */ - BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */ - BFA_LPS_SM_DELETE = 5, /* lps delete from user */ - BFA_LPS_SM_OFFLINE = 6, /* Link is offline */ - BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */ -}; - -static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event); -static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event); -static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, - enum bfa_lps_event event); -static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event); -static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event); -static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, - enum bfa_lps_event event); - -/** - * Init state -- no login - */ -static void -bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event) -{ - bfa_trc(lps->bfa, lps->lp_tag); - bfa_trc(lps->bfa, event); - - switch (event) { - case BFA_LPS_SM_LOGIN: - if (bfa_reqq_full(lps->bfa, lps->reqq)) { - bfa_sm_set_state(lps, bfa_lps_sm_loginwait); - bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe); - } else { - bfa_sm_set_state(lps, bfa_lps_sm_login); - bfa_lps_send_login(lps); - } - if (lps->fdisc) - bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, - BFA_PL_EID_LOGIN, 0, "FDISC Request"); - else - bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, - BFA_PL_EID_LOGIN, 0, "FLOGI Request"); - break; - - case BFA_LPS_SM_LOGOUT: - bfa_lps_logout_comp(lps); - break; - - case BFA_LPS_SM_DELETE: - bfa_lps_free(lps); - break; - - case BFA_LPS_SM_RX_CVL: - case BFA_LPS_SM_OFFLINE: - break; - - case BFA_LPS_SM_FWRSP: - /* Could happen when fabric detects loopback and discards - * the lps request. Fw will eventually sent out the timeout - * Just ignore - */ - break; - - default: - bfa_sm_fault(lps->bfa, event); - } -} - -/** - * login is in progress -- awaiting response from firmware - */ -static void -bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event) -{ - bfa_trc(lps->bfa, lps->lp_tag); - bfa_trc(lps->bfa, event); - - switch (event) { - case BFA_LPS_SM_FWRSP: - if (lps->status == BFA_STATUS_OK) { - bfa_sm_set_state(lps, bfa_lps_sm_online); - if (lps->fdisc) - bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, - BFA_PL_EID_LOGIN, 0, "FDISC Accept"); - else - bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, - BFA_PL_EID_LOGIN, 0, "FLOGI Accept"); - } else { - bfa_sm_set_state(lps, bfa_lps_sm_init); - if (lps->fdisc) - bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, - BFA_PL_EID_LOGIN, 0, - "FDISC Fail (RJT or timeout)"); - else - bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, - BFA_PL_EID_LOGIN, 0, - "FLOGI Fail (RJT or timeout)"); - } - bfa_lps_login_comp(lps); - break; - - case BFA_LPS_SM_OFFLINE: - bfa_sm_set_state(lps, bfa_lps_sm_init); - break; - - default: - bfa_sm_fault(lps->bfa, event); - } -} - -/** - * login pending - awaiting space in request queue - */ -static void -bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event) -{ - bfa_trc(lps->bfa, lps->lp_tag); - bfa_trc(lps->bfa, event); - - switch (event) { - case BFA_LPS_SM_RESUME: - bfa_sm_set_state(lps, bfa_lps_sm_login); - break; - - case BFA_LPS_SM_OFFLINE: - bfa_sm_set_state(lps, bfa_lps_sm_init); - bfa_reqq_wcancel(&lps->wqe); - break; - - case BFA_LPS_SM_RX_CVL: - /* - * Login was not even sent out; so when getting out - * of this state, it will appear like a login retry - * after Clear virtual link - */ - break; - - default: - bfa_sm_fault(lps->bfa, event); - } -} - -/** - * login complete - */ -static void -bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event) -{ - bfa_trc(lps->bfa, lps->lp_tag); - bfa_trc(lps->bfa, event); - - switch (event) { - case BFA_LPS_SM_LOGOUT: - if (bfa_reqq_full(lps->bfa, lps->reqq)) { - bfa_sm_set_state(lps, bfa_lps_sm_logowait); - bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe); - } else { - bfa_sm_set_state(lps, bfa_lps_sm_logout); - bfa_lps_send_logout(lps); - } - bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, - BFA_PL_EID_LOGO, 0, "Logout"); - break; - - case BFA_LPS_SM_RX_CVL: - bfa_sm_set_state(lps, bfa_lps_sm_init); - - /* Let the vport module know about this event */ - bfa_lps_cvl_event(lps); - bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, - BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx"); - break; - - case BFA_LPS_SM_OFFLINE: - case BFA_LPS_SM_DELETE: - bfa_sm_set_state(lps, bfa_lps_sm_init); - break; - - default: - bfa_sm_fault(lps->bfa, event); - } -} - -/** - * logout in progress - awaiting firmware response - */ -static void -bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event) -{ - bfa_trc(lps->bfa, lps->lp_tag); - bfa_trc(lps->bfa, event); - - switch (event) { - case BFA_LPS_SM_FWRSP: - bfa_sm_set_state(lps, bfa_lps_sm_init); - bfa_lps_logout_comp(lps); - break; - - case BFA_LPS_SM_OFFLINE: - bfa_sm_set_state(lps, bfa_lps_sm_init); - break; - - default: - bfa_sm_fault(lps->bfa, event); - } -} - -/** - * logout pending -- awaiting space in request queue - */ -static void -bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event) -{ - bfa_trc(lps->bfa, lps->lp_tag); - bfa_trc(lps->bfa, event); - - switch (event) { - case BFA_LPS_SM_RESUME: - bfa_sm_set_state(lps, bfa_lps_sm_logout); - bfa_lps_send_logout(lps); - break; - - case BFA_LPS_SM_OFFLINE: - bfa_sm_set_state(lps, bfa_lps_sm_init); - bfa_reqq_wcancel(&lps->wqe); - break; - - default: - bfa_sm_fault(lps->bfa, event); - } -} - - - -/** - * lps_pvt BFA LPS private functions - */ - -/** - * return memory requirement - */ -static void -bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len) -{ - if (cfg->drvcfg.min_cfg) - *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS; - else - *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS; -} - -/** - * bfa module attach at initialization time - */ -static void -bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) -{ - struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); - struct bfa_lps_s *lps; - int i; - - bfa_os_memset(mod, 0, sizeof(struct bfa_lps_mod_s)); - mod->num_lps = BFA_LPS_MAX_LPORTS; - if (cfg->drvcfg.min_cfg) - mod->num_lps = BFA_LPS_MIN_LPORTS; - else - mod->num_lps = BFA_LPS_MAX_LPORTS; - mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo); - - bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s); - - INIT_LIST_HEAD(&mod->lps_free_q); - INIT_LIST_HEAD(&mod->lps_active_q); - - for (i = 0; i < mod->num_lps; i++, lps++) { - lps->bfa = bfa; - lps->lp_tag = (u8) i; - lps->reqq = BFA_REQQ_LPS; - bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps); - list_add_tail(&lps->qe, &mod->lps_free_q); - } -} - -static void -bfa_lps_detach(struct bfa_s *bfa) -{ -} - -static void -bfa_lps_start(struct bfa_s *bfa) -{ -} - -static void -bfa_lps_stop(struct bfa_s *bfa) -{ -} - -/** - * IOC in disabled state -- consider all lps offline - */ -static void -bfa_lps_iocdisable(struct bfa_s *bfa) -{ - struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); - struct bfa_lps_s *lps; - struct list_head *qe, *qen; - - list_for_each_safe(qe, qen, &mod->lps_active_q) { - lps = (struct bfa_lps_s *) qe; - bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE); - } -} - -/** - * Firmware login response - */ -static void -bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp) -{ - struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); - struct bfa_lps_s *lps; - - bfa_assert(rsp->lp_tag < mod->num_lps); - lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag); - - lps->status = rsp->status; - switch (rsp->status) { - case BFA_STATUS_OK: - lps->fport = rsp->f_port; - lps->npiv_en = rsp->npiv_en; - lps->lp_pid = rsp->lp_pid; - lps->pr_bbcred = bfa_os_ntohs(rsp->bb_credit); - lps->pr_pwwn = rsp->port_name; - lps->pr_nwwn = rsp->node_name; - lps->auth_req = rsp->auth_req; - lps->lp_mac = rsp->lp_mac; - lps->brcd_switch = rsp->brcd_switch; - lps->fcf_mac = rsp->fcf_mac; - - break; - - case BFA_STATUS_FABRIC_RJT: - lps->lsrjt_rsn = rsp->lsrjt_rsn; - lps->lsrjt_expl = rsp->lsrjt_expl; - - break; - - case BFA_STATUS_EPROTOCOL: - lps->ext_status = rsp->ext_status; - - break; - - default: - /* Nothing to do with other status */ - break; - } - - bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); -} - -/** - * Firmware logout response - */ -static void -bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp) -{ - struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); - struct bfa_lps_s *lps; - - bfa_assert(rsp->lp_tag < mod->num_lps); - lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag); - - bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); -} - -/** - * Firmware received a Clear virtual link request (for FCoE) - */ -static void -bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl) -{ - struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); - struct bfa_lps_s *lps; - - lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag); - - bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL); -} - -/** - * Space is available in request queue, resume queueing request to firmware. - */ -static void -bfa_lps_reqq_resume(void *lps_arg) -{ - struct bfa_lps_s *lps = lps_arg; - - bfa_sm_send_event(lps, BFA_LPS_SM_RESUME); -} - -/** - * lps is freed -- triggered by vport delete - */ -static void -bfa_lps_free(struct bfa_lps_s *lps) -{ - struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa); - - list_del(&lps->qe); - list_add_tail(&lps->qe, &mod->lps_free_q); -} - -/** - * send login request to firmware - */ -static void -bfa_lps_send_login(struct bfa_lps_s *lps) -{ - struct bfi_lps_login_req_s *m; - - m = bfa_reqq_next(lps->bfa, lps->reqq); - bfa_assert(m); - - bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ, - bfa_lpuid(lps->bfa)); - - m->lp_tag = lps->lp_tag; - m->alpa = lps->alpa; - m->pdu_size = bfa_os_htons(lps->pdusz); - m->pwwn = lps->pwwn; - m->nwwn = lps->nwwn; - m->fdisc = lps->fdisc; - m->auth_en = lps->auth_en; - - bfa_reqq_produce(lps->bfa, lps->reqq); -} - -/** - * send logout request to firmware - */ -static void -bfa_lps_send_logout(struct bfa_lps_s *lps) -{ - struct bfi_lps_logout_req_s *m; - - m = bfa_reqq_next(lps->bfa, lps->reqq); - bfa_assert(m); - - bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ, - bfa_lpuid(lps->bfa)); - - m->lp_tag = lps->lp_tag; - m->port_name = lps->pwwn; - bfa_reqq_produce(lps->bfa, lps->reqq); -} - -/** - * Indirect login completion handler for non-fcs - */ -static void -bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete) -{ - struct bfa_lps_s *lps = arg; - - if (!complete) - return; - - if (lps->fdisc) - bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status); - else - bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status); -} - -/** - * Login completion handler -- direct call for fcs, queue for others - */ -static void -bfa_lps_login_comp(struct bfa_lps_s *lps) -{ - if (!lps->bfa->fcs) { - bfa_cb_queue(lps->bfa, &lps->hcb_qe, - bfa_lps_login_comp_cb, lps); - return; - } - - if (lps->fdisc) - bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status); - else - bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status); -} - -/** - * Indirect logout completion handler for non-fcs - */ -static void -bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete) -{ - struct bfa_lps_s *lps = arg; - - if (!complete) - return; - - if (lps->fdisc) - bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg); - else - bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg); -} - -/** - * Logout completion handler -- direct call for fcs, queue for others - */ -static void -bfa_lps_logout_comp(struct bfa_lps_s *lps) -{ - if (!lps->bfa->fcs) { - bfa_cb_queue(lps->bfa, &lps->hcb_qe, - bfa_lps_logout_comp_cb, lps); - return; - } - if (lps->fdisc) - bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg); - else - bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg); -} - -/** - * Clear virtual link completion handler for non-fcs - */ -static void -bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete) -{ - struct bfa_lps_s *lps = arg; - - if (!complete) - return; - - /* Clear virtual link to base port will result in link down */ - if (lps->fdisc) - bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg); -} - -/** - * Received Clear virtual link event --direct call for fcs, - * queue for others - */ -static void -bfa_lps_cvl_event(struct bfa_lps_s *lps) -{ - if (!lps->bfa->fcs) { - bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb, - lps); - return; - } - - /* Clear virtual link to base port will result in link down */ - if (lps->fdisc) - bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg); -} - -u32 -bfa_lps_get_max_vport(struct bfa_s *bfa) -{ - if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT) - return BFA_LPS_MAX_VPORTS_SUPP_CT; - else - return BFA_LPS_MAX_VPORTS_SUPP_CB; -} - -/** - * lps_public BFA LPS public functions - */ - -/** - * Allocate a lport srvice tag. - */ -struct bfa_lps_s * -bfa_lps_alloc(struct bfa_s *bfa) -{ - struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); - struct bfa_lps_s *lps = NULL; - - bfa_q_deq(&mod->lps_free_q, &lps); - - if (lps == NULL) - return NULL; - - list_add_tail(&lps->qe, &mod->lps_active_q); - - bfa_sm_set_state(lps, bfa_lps_sm_init); - return lps; -} - -/** - * Free lport service tag. This can be called anytime after an alloc. - * No need to wait for any pending login/logout completions. - */ -void -bfa_lps_delete(struct bfa_lps_s *lps) -{ - bfa_sm_send_event(lps, BFA_LPS_SM_DELETE); -} - -/** - * Initiate a lport login. - */ -void -bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz, - wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en) -{ - lps->uarg = uarg; - lps->alpa = alpa; - lps->pdusz = pdusz; - lps->pwwn = pwwn; - lps->nwwn = nwwn; - lps->fdisc = BFA_FALSE; - lps->auth_en = auth_en; - bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); -} - -/** - * Initiate a lport fdisc login. - */ -void -bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn, - wwn_t nwwn) -{ - lps->uarg = uarg; - lps->alpa = 0; - lps->pdusz = pdusz; - lps->pwwn = pwwn; - lps->nwwn = nwwn; - lps->fdisc = BFA_TRUE; - lps->auth_en = BFA_FALSE; - bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); -} - -/** - * Initiate a lport logout (flogi). - */ -void -bfa_lps_flogo(struct bfa_lps_s *lps) -{ - bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT); -} - -/** - * Initiate a lport FDSIC logout. - */ -void -bfa_lps_fdisclogo(struct bfa_lps_s *lps) -{ - bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT); -} - -/** - * Discard a pending login request -- should be called only for - * link down handling. - */ -void -bfa_lps_discard(struct bfa_lps_s *lps) -{ - bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE); -} - -/** - * Return lport services tag - */ -u8 -bfa_lps_get_tag(struct bfa_lps_s *lps) -{ - return lps->lp_tag; -} - -/** - * Return lport services tag given the pid - */ -u8 -bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid) -{ - struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); - struct bfa_lps_s *lps; - int i; - - for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) { - if (lps->lp_pid == pid) - return lps->lp_tag; - } - - /* Return base port tag anyway */ - return 0; -} - -/** - * return if fabric login indicates support for NPIV - */ -bfa_boolean_t -bfa_lps_is_npiv_en(struct bfa_lps_s *lps) -{ - return lps->npiv_en; -} - -/** - * Return TRUE if attached to F-Port, else return FALSE - */ -bfa_boolean_t -bfa_lps_is_fport(struct bfa_lps_s *lps) -{ - return lps->fport; -} - -/** - * Return TRUE if attached to a Brocade Fabric - */ -bfa_boolean_t -bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps) -{ - return lps->brcd_switch; -} -/** - * return TRUE if authentication is required - */ -bfa_boolean_t -bfa_lps_is_authreq(struct bfa_lps_s *lps) -{ - return lps->auth_req; -} - -bfa_eproto_status_t -bfa_lps_get_extstatus(struct bfa_lps_s *lps) -{ - return lps->ext_status; -} - -/** - * return port id assigned to the lport - */ -u32 -bfa_lps_get_pid(struct bfa_lps_s *lps) -{ - return lps->lp_pid; -} - -/** - * Return bb_credit assigned in FLOGI response - */ -u16 -bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps) -{ - return lps->pr_bbcred; -} - -/** - * Return peer port name - */ -wwn_t -bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps) -{ - return lps->pr_pwwn; -} - -/** - * Return peer node name - */ -wwn_t -bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps) -{ - return lps->pr_nwwn; -} - -/** - * return reason code if login request is rejected - */ -u8 -bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps) -{ - return lps->lsrjt_rsn; -} - -/** - * return explanation code if login request is rejected - */ -u8 -bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps) -{ - return lps->lsrjt_expl; -} - -/** - * Return fpma/spma MAC for lport - */ -struct mac_s -bfa_lps_get_lp_mac(struct bfa_lps_s *lps) -{ - return lps->lp_mac; -} - -/** - * LPS firmware message class handler. - */ -void -bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m) -{ - union bfi_lps_i2h_msg_u msg; - - bfa_trc(bfa, m->mhdr.msg_id); - msg.msg = m; - - switch (m->mhdr.msg_id) { - case BFI_LPS_H2I_LOGIN_RSP: - bfa_lps_login_rsp(bfa, msg.login_rsp); - break; - - case BFI_LPS_H2I_LOGOUT_RSP: - bfa_lps_logout_rsp(bfa, msg.logout_rsp); - break; - - case BFI_LPS_H2I_CVL_EVENT: - bfa_lps_rx_cvl_event(bfa, msg.cvl_event); - break; - - default: - bfa_trc(bfa, m->mhdr.msg_id); - bfa_assert(0); - } -} - - diff --git a/drivers/scsi/bfa/bfa_lps_priv.h b/drivers/scsi/bfa/bfa_lps_priv.h deleted file mode 100644 index d16c6ce995df..000000000000 --- a/drivers/scsi/bfa/bfa_lps_priv.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_LPS_PRIV_H__ -#define __BFA_LPS_PRIV_H__ - -#include - -struct bfa_lps_mod_s { - struct list_head lps_free_q; - struct list_head lps_active_q; - struct bfa_lps_s *lps_arr; - int num_lps; -}; - -#define BFA_LPS_MOD(__bfa) (&(__bfa)->modules.lps_mod) -#define BFA_LPS_FROM_TAG(__mod, __tag) (&(__mod)->lps_arr[__tag]) - -/* - * external functions - */ -void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); - -#endif /* __BFA_LPS_PRIV_H__ */ diff --git a/drivers/scsi/bfa/bfa_module.c b/drivers/scsi/bfa/bfa_module.c deleted file mode 100644 index a7fcc80c177e..000000000000 --- a/drivers/scsi/bfa/bfa_module.c +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#include -#include -#include -#include - -/** - * BFA module list terminated by NULL - */ -struct bfa_module_s *hal_mods[] = { - &hal_mod_sgpg, - &hal_mod_fcport, - &hal_mod_fcxp, - &hal_mod_lps, - &hal_mod_uf, - &hal_mod_rport, - &hal_mod_fcpim, -#ifdef BFA_CFG_PBIND - &hal_mod_pbind, -#endif - NULL -}; - -/** - * Message handlers for various modules. - */ -bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = { - bfa_isr_unhandled, /* NONE */ - bfa_isr_unhandled, /* BFI_MC_IOC */ - bfa_isr_unhandled, /* BFI_MC_DIAG */ - bfa_isr_unhandled, /* BFI_MC_FLASH */ - bfa_isr_unhandled, /* BFI_MC_CEE */ - bfa_fcport_isr, /* BFI_MC_FCPORT */ - bfa_isr_unhandled, /* BFI_MC_IOCFC */ - bfa_isr_unhandled, /* BFI_MC_LL */ - bfa_uf_isr, /* BFI_MC_UF */ - bfa_fcxp_isr, /* BFI_MC_FCXP */ - bfa_lps_isr, /* BFI_MC_LPS */ - bfa_rport_isr, /* BFI_MC_RPORT */ - bfa_itnim_isr, /* BFI_MC_ITNIM */ - bfa_isr_unhandled, /* BFI_MC_IOIM_READ */ - bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */ - bfa_isr_unhandled, /* BFI_MC_IOIM_IO */ - bfa_ioim_isr, /* BFI_MC_IOIM */ - bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */ - bfa_tskim_isr, /* BFI_MC_TSKIM */ - bfa_isr_unhandled, /* BFI_MC_SBOOT */ - bfa_isr_unhandled, /* BFI_MC_IPFC */ - bfa_isr_unhandled, /* BFI_MC_PORT */ - bfa_isr_unhandled, /* --------- */ - bfa_isr_unhandled, /* --------- */ - bfa_isr_unhandled, /* --------- */ - bfa_isr_unhandled, /* --------- */ - bfa_isr_unhandled, /* --------- */ - bfa_isr_unhandled, /* --------- */ - bfa_isr_unhandled, /* --------- */ - bfa_isr_unhandled, /* --------- */ - bfa_isr_unhandled, /* --------- */ - bfa_isr_unhandled, /* --------- */ -}; - -/** - * Message handlers for mailbox command classes - */ -bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = { - NULL, - NULL, /* BFI_MC_IOC */ - NULL, /* BFI_MC_DIAG */ - NULL, /* BFI_MC_FLASH */ - NULL, /* BFI_MC_CEE */ - NULL, /* BFI_MC_PORT */ - bfa_iocfc_isr, /* BFI_MC_IOCFC */ - NULL, -}; - diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h new file mode 100644 index 000000000000..2cd527338677 --- /dev/null +++ b/drivers/scsi/bfa/bfa_modules.h @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + * + * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +/** + * bfa_modules.h BFA modules + */ + +#ifndef __BFA_MODULES_H__ +#define __BFA_MODULES_H__ + +#include "bfa_cs.h" +#include "bfa.h" +#include "bfa_svc.h" +#include "bfa_fcpim.h" +#include "bfa_port.h" + +struct bfa_modules_s { + struct bfa_fcport_s fcport; /* fc port module */ + struct bfa_fcxp_mod_s fcxp_mod; /* fcxp module */ + struct bfa_lps_mod_s lps_mod; /* fcxp module */ + struct bfa_uf_mod_s uf_mod; /* unsolicited frame module */ + struct bfa_rport_mod_s rport_mod; /* remote port module */ + struct bfa_fcpim_mod_s fcpim_mod; /* FCP initiator module */ + struct bfa_sgpg_mod_s sgpg_mod; /* SG page module */ + struct bfa_port_s port; /* Physical port module */ +}; + +/* + * !!! Only append to the enums defined here to avoid any versioning + * !!! needed between trace utility and driver version + */ +enum { + BFA_TRC_HAL_CORE = 1, + BFA_TRC_HAL_FCXP = 2, + BFA_TRC_HAL_FCPIM = 3, + BFA_TRC_HAL_IOCFC_CT = 4, + BFA_TRC_HAL_IOCFC_CB = 5, +}; + + +/** + * Macro to define a new BFA module + */ +#define BFA_MODULE(__mod) \ + static void bfa_ ## __mod ## _meminfo( \ + struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, \ + u32 *dm_len); \ + static void bfa_ ## __mod ## _attach(struct bfa_s *bfa, \ + void *bfad, struct bfa_iocfc_cfg_s *cfg, \ + struct bfa_meminfo_s *meminfo, \ + struct bfa_pcidev_s *pcidev); \ + static void bfa_ ## __mod ## _detach(struct bfa_s *bfa); \ + static void bfa_ ## __mod ## _start(struct bfa_s *bfa); \ + static void bfa_ ## __mod ## _stop(struct bfa_s *bfa); \ + static void bfa_ ## __mod ## _iocdisable(struct bfa_s *bfa); \ + \ + extern struct bfa_module_s hal_mod_ ## __mod; \ + struct bfa_module_s hal_mod_ ## __mod = { \ + bfa_ ## __mod ## _meminfo, \ + bfa_ ## __mod ## _attach, \ + bfa_ ## __mod ## _detach, \ + bfa_ ## __mod ## _start, \ + bfa_ ## __mod ## _stop, \ + bfa_ ## __mod ## _iocdisable, \ + } + +#define BFA_CACHELINE_SZ (256) + +/** + * Structure used to interact between different BFA sub modules + * + * Each sub module needs to implement only the entry points relevant to it (and + * can leave entry points as NULL) + */ +struct bfa_module_s { + void (*meminfo) (struct bfa_iocfc_cfg_s *cfg, u32 *km_len, + u32 *dm_len); + void (*attach) (struct bfa_s *bfa, void *bfad, + struct bfa_iocfc_cfg_s *cfg, + struct bfa_meminfo_s *meminfo, + struct bfa_pcidev_s *pcidev); + void (*detach) (struct bfa_s *bfa); + void (*start) (struct bfa_s *bfa); + void (*stop) (struct bfa_s *bfa); + void (*iocdisable) (struct bfa_s *bfa); +}; + +extern struct bfa_module_s *hal_mods[]; + +struct bfa_s { + void *bfad; /* BFA driver instance */ + struct bfa_plog_s *plog; /* portlog buffer */ + struct bfa_trc_mod_s *trcmod; /* driver tracing */ + struct bfa_ioc_s ioc; /* IOC module */ + struct bfa_iocfc_s iocfc; /* IOCFC module */ + struct bfa_timer_mod_s timer_mod; /* timer module */ + struct bfa_modules_s modules; /* BFA modules */ + struct list_head comp_q; /* pending completions */ + bfa_boolean_t rme_process; /* RME processing enabled */ + struct list_head reqq_waitq[BFI_IOC_MAX_CQS]; + bfa_boolean_t fcs; /* FCS is attached to BFA */ + struct bfa_msix_s msix; +}; + +extern bfa_isr_func_t bfa_isrs[BFI_MC_MAX]; +extern bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[]; +extern bfa_boolean_t bfa_auto_recover; +extern struct bfa_module_s hal_mod_sgpg; +extern struct bfa_module_s hal_mod_fcport; +extern struct bfa_module_s hal_mod_fcxp; +extern struct bfa_module_s hal_mod_lps; +extern struct bfa_module_s hal_mod_uf; +extern struct bfa_module_s hal_mod_rport; +extern struct bfa_module_s hal_mod_fcpim; + +#endif /* __BFA_MODULES_H__ */ diff --git a/drivers/scsi/bfa/bfa_modules_priv.h b/drivers/scsi/bfa/bfa_modules_priv.h deleted file mode 100644 index f554c2fad6a9..000000000000 --- a/drivers/scsi/bfa/bfa_modules_priv.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_MODULES_PRIV_H__ -#define __BFA_MODULES_PRIV_H__ - -#include "bfa_uf_priv.h" -#include "bfa_port_priv.h" -#include "bfa_rport_priv.h" -#include "bfa_fcxp_priv.h" -#include "bfa_lps_priv.h" -#include "bfa_fcpim_priv.h" -#include -#include - - -struct bfa_modules_s { - struct bfa_fcport_s fcport; /* fc port module */ - struct bfa_fcxp_mod_s fcxp_mod; /* fcxp module */ - struct bfa_lps_mod_s lps_mod; /* fcxp module */ - struct bfa_uf_mod_s uf_mod; /* unsolicited frame module */ - struct bfa_rport_mod_s rport_mod; /* remote port module */ - struct bfa_fcpim_mod_s fcpim_mod; /* FCP initiator module */ - struct bfa_sgpg_mod_s sgpg_mod; /* SG page module */ - struct bfa_cee_s cee; /* CEE Module */ - struct bfa_port_s port; /* Physical port module */ -}; - -#endif /* __BFA_MODULES_PRIV_H__ */ diff --git a/drivers/scsi/bfa/bfa_os_inc.h b/drivers/scsi/bfa/bfa_os_inc.h index bd1cd3ee3022..788a250ffb8a 100644 --- a/drivers/scsi/bfa/bfa_os_inc.h +++ b/drivers/scsi/bfa/bfa_os_inc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * @@ -22,30 +22,20 @@ #ifndef __BFA_OS_INC_H__ #define __BFA_OS_INC_H__ -#ifndef __KERNEL__ -#include -#else #include - #include #include - #include -#define SET_MODULE_VERSION(VER) - #include - #include #include #include #include #include - #include - +#include #include #include - #include #include #include @@ -54,97 +44,75 @@ #define __BIGENDIAN #endif -#define BFA_ERR KERN_ERR -#define BFA_WARNING KERN_WARNING -#define BFA_NOTICE KERN_NOTICE -#define BFA_INFO KERN_INFO -#define BFA_DEBUG KERN_DEBUG - -#define LOG_BFAD_INIT 0x00000001 -#define LOG_FCP_IO 0x00000002 - -#ifdef DEBUG -#define BFA_LOG_TRACE(bfad, level, mask, fmt, arg...) \ - BFA_LOG(bfad, level, mask, fmt, ## arg) -#define BFA_DEV_TRACE(bfad, level, fmt, arg...) \ - BFA_DEV_PRINTF(bfad, level, fmt, ## arg) -#define BFA_TRACE(level, fmt, arg...) \ - BFA_PRINTF(level, fmt, ## arg) -#else -#define BFA_LOG_TRACE(bfad, level, mask, fmt, arg...) -#define BFA_DEV_TRACE(bfad, level, fmt, arg...) -#define BFA_TRACE(level, fmt, arg...) -#endif +static inline u64 bfa_os_get_clock(void) +{ + return jiffies; +} + +static inline u64 bfa_os_get_log_time(void) +{ + u64 system_time = 0; + struct timeval tv; + do_gettimeofday(&tv); + + /* We are interested in seconds only. */ + system_time = tv.tv_sec; + return system_time; +} + +#define bfa_io_lat_clock_res_div HZ +#define bfa_io_lat_clock_res_mul 1000 #define BFA_ASSERT(p) do { \ if (!(p)) { \ printk(KERN_ERR "assert(%s) failed at %s:%d\n", \ #p, __FILE__, __LINE__); \ - BUG(); \ } \ } while (0) - -#define BFA_LOG(bfad, level, mask, fmt, arg...) \ -do { \ - if (((mask) & (((struct bfad_s *)(bfad))-> \ - cfg_data[cfg_log_mask])) || (level[1] <= '3')) \ - dev_printk(level, &(((struct bfad_s *) \ - (bfad))->pcidev->dev), fmt, ##arg); \ +#define BFA_LOG(level, bfad, mask, fmt, arg...) \ +do { \ + if (((mask) == 4) || (level[1] <= '4')) \ + dev_printk(level, &((bfad)->pcidev)->dev, fmt, ##arg); \ } while (0) -#ifndef BFA_DEV_PRINTF -#define BFA_DEV_PRINTF(bfad, level, fmt, arg...) \ - dev_printk(level, &(((struct bfad_s *) \ - (bfad))->pcidev->dev), fmt, ##arg); -#endif - -#define BFA_PRINTF(level, fmt, arg...) \ - printk(level fmt, ##arg); - -int bfa_os_MWB(void *); - -#define bfa_os_mmiowb() mmiowb() - #define bfa_swap_3b(_x) \ ((((_x) & 0xff) << 16) | \ ((_x) & 0x00ff00) | \ (((_x) & 0xff0000) >> 16)) -#define bfa_swap_8b(_x) \ - ((((_x) & 0xff00000000000000ull) >> 56) \ - | (((_x) & 0x00ff000000000000ull) >> 40) \ - | (((_x) & 0x0000ff0000000000ull) >> 24) \ - | (((_x) & 0x000000ff00000000ull) >> 8) \ - | (((_x) & 0x00000000ff000000ull) << 8) \ - | (((_x) & 0x0000000000ff0000ull) << 24) \ - | (((_x) & 0x000000000000ff00ull) << 40) \ - | (((_x) & 0x00000000000000ffull) << 56)) - -#define bfa_os_swap32(_x) \ - ((((_x) & 0xff) << 24) | \ +#define bfa_swap_8b(_x) \ + ((((_x) & 0xff00000000000000ull) >> 56) \ + | (((_x) & 0x00ff000000000000ull) >> 40) \ + | (((_x) & 0x0000ff0000000000ull) >> 24) \ + | (((_x) & 0x000000ff00000000ull) >> 8) \ + | (((_x) & 0x00000000ff000000ull) << 8) \ + | (((_x) & 0x0000000000ff0000ull) << 24) \ + | (((_x) & 0x000000000000ff00ull) << 40) \ + | (((_x) & 0x00000000000000ffull) << 56)) + +#define bfa_os_swap32(_x) \ + ((((_x) & 0xff) << 24) | \ (((_x) & 0x0000ff00) << 8) | \ (((_x) & 0x00ff0000) >> 8) | \ (((_x) & 0xff000000) >> 24)) -#define bfa_os_swap_sgaddr(_x) ((u64)( \ - (((u64)(_x) & (u64)0x00000000000000ffull) << 32) | \ - (((u64)(_x) & (u64)0x000000000000ff00ull) << 32) | \ - (((u64)(_x) & (u64)0x0000000000ff0000ull) << 32) | \ - (((u64)(_x) & (u64)0x00000000ff000000ull) << 32) | \ - (((u64)(_x) & (u64)0x000000ff00000000ull) >> 32) | \ - (((u64)(_x) & (u64)0x0000ff0000000000ull) >> 32) | \ - (((u64)(_x) & (u64)0x00ff000000000000ull) >> 32) | \ +#define bfa_os_swap_sgaddr(_x) ((u64)( \ + (((u64)(_x) & (u64)0x00000000000000ffull) << 32) | \ + (((u64)(_x) & (u64)0x000000000000ff00ull) << 32) | \ + (((u64)(_x) & (u64)0x0000000000ff0000ull) << 32) | \ + (((u64)(_x) & (u64)0x00000000ff000000ull) << 32) | \ + (((u64)(_x) & (u64)0x000000ff00000000ull) >> 32) | \ + (((u64)(_x) & (u64)0x0000ff0000000000ull) >> 32) | \ + (((u64)(_x) & (u64)0x00ff000000000000ull) >> 32) | \ (((u64)(_x) & (u64)0xff00000000000000ull) >> 32))) #ifndef __BIGENDIAN #define bfa_os_htons(_x) ((u16)((((_x) & 0xff00) >> 8) | \ (((_x) & 0x00ff) << 8))) - #define bfa_os_htonl(_x) bfa_os_swap32(_x) #define bfa_os_htonll(_x) bfa_swap_8b(_x) #define bfa_os_hton3b(_x) bfa_swap_3b(_x) - #define bfa_os_wtole(_x) (_x) #define bfa_os_sgaddr(_x) (_x) @@ -170,17 +138,16 @@ int bfa_os_MWB(void *); #define bfa_os_memcpy memcpy #define bfa_os_udelay udelay #define bfa_os_vsprintf vsprintf +#define bfa_os_snprintf snprintf #define bfa_os_assign(__t, __s) __t = __s - -#define bfa_os_addr_t char __iomem * -#define bfa_os_panic() +#define bfa_os_addr_t void __iomem * #define bfa_os_reg_read(_raddr) readl(_raddr) #define bfa_os_reg_write(_raddr, _val) writel((_val), (_raddr)) -#define bfa_os_mem_read(_raddr, _off) \ +#define bfa_os_mem_read(_raddr, _off) \ bfa_os_swap32(readl(((_raddr) + (_off)))) -#define bfa_os_mem_write(_raddr, _off, _val) \ +#define bfa_os_mem_write(_raddr, _off, _val) \ writel(bfa_os_swap32((_val)), ((_raddr) + (_off))) #define BFA_TRC_TS(_trcm) \ @@ -191,11 +158,6 @@ int bfa_os_MWB(void *); (tv.tv_sec*1000000+tv.tv_usec); \ }) -struct bfa_log_mod_s; -void bfa_os_printf(struct bfa_log_mod_s *log_mod, u32 msg_id, - const char *fmt, ...); -#endif - #define boolean_t int /** @@ -206,7 +168,15 @@ struct bfa_timeval_s { u32 tv_usec; /* microseconds */ }; -void bfa_os_gettimeofday(struct bfa_timeval_s *tv); +static inline void +bfa_os_gettimeofday(struct bfa_timeval_s *tv) +{ + struct timeval tmp_tv; + + do_gettimeofday(&tmp_tv); + tv->tv_sec = (u32) tmp_tv.tv_sec; + tv->tv_usec = (u32) tmp_tv.tv_usec; +} static inline void wwn2str(char *wwn_str, u64 wwn) diff --git a/drivers/scsi/bfa/bfa_plog.h b/drivers/scsi/bfa/bfa_plog.h new file mode 100644 index 000000000000..501f0ed35cf0 --- /dev/null +++ b/drivers/scsi/bfa/bfa_plog.h @@ -0,0 +1,159 @@ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + * + * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#ifndef __BFA_PORTLOG_H__ +#define __BFA_PORTLOG_H__ + +#include "bfa_fc.h" +#include "bfa_defs.h" + +#define BFA_PL_NLOG_ENTS 256 +#define BFA_PL_LOG_REC_INCR(_x) ((_x)++, (_x) %= BFA_PL_NLOG_ENTS) + +#define BFA_PL_STRING_LOG_SZ 32 /* number of chars in string log */ +#define BFA_PL_INT_LOG_SZ 8 /* number of integers in the integer log */ + +enum bfa_plog_log_type { + BFA_PL_LOG_TYPE_INVALID = 0, + BFA_PL_LOG_TYPE_INT = 1, + BFA_PL_LOG_TYPE_STRING = 2, +}; + +/* + * the (fixed size) record format for each entry in the portlog + */ +struct bfa_plog_rec_s { + u64 tv; /* timestamp */ + u8 port; /* Source port that logged this entry */ + u8 mid; /* module id */ + u8 eid; /* indicates Rx, Tx, IOCTL, etc. bfa_plog_eid */ + u8 log_type; /* string/integer log, bfa_plog_log_type_t */ + u8 log_num_ints; + /* + * interpreted only if log_type is INT_LOG. indicates number of + * integers in the int_log[] (0-PL_INT_LOG_SZ). + */ + u8 rsvd; + u16 misc; /* can be used to indicate fc frame length */ + union { + char string_log[BFA_PL_STRING_LOG_SZ]; + u32 int_log[BFA_PL_INT_LOG_SZ]; + } log_entry; + +}; + +/* + * the following #defines will be used by the logging entities to indicate + * their module id. BFAL will convert the integer value to string format + * +* process to be used while changing the following #defines: + * - Always add new entries at the end + * - define corresponding string in BFAL + * - Do not remove any entry or rearrange the order. + */ +enum bfa_plog_mid { + BFA_PL_MID_INVALID = 0, + BFA_PL_MID_DEBUG = 1, + BFA_PL_MID_DRVR = 2, + BFA_PL_MID_HAL = 3, + BFA_PL_MID_HAL_FCXP = 4, + BFA_PL_MID_HAL_UF = 5, + BFA_PL_MID_FCS = 6, + BFA_PL_MID_LPS = 7, + BFA_PL_MID_MAX = 8 +}; + +#define BFA_PL_MID_STRLEN 8 +struct bfa_plog_mid_strings_s { + char m_str[BFA_PL_MID_STRLEN]; +}; + +/* + * the following #defines will be used by the logging entities to indicate + * their event type. BFAL will convert the integer value to string format + * +* process to be used while changing the following #defines: + * - Always add new entries at the end + * - define corresponding string in BFAL + * - Do not remove any entry or rearrange the order. + */ +enum bfa_plog_eid { + BFA_PL_EID_INVALID = 0, + BFA_PL_EID_IOC_DISABLE = 1, + BFA_PL_EID_IOC_ENABLE = 2, + BFA_PL_EID_PORT_DISABLE = 3, + BFA_PL_EID_PORT_ENABLE = 4, + BFA_PL_EID_PORT_ST_CHANGE = 5, + BFA_PL_EID_TX = 6, + BFA_PL_EID_TX_ACK1 = 7, + BFA_PL_EID_TX_RJT = 8, + BFA_PL_EID_TX_BSY = 9, + BFA_PL_EID_RX = 10, + BFA_PL_EID_RX_ACK1 = 11, + BFA_PL_EID_RX_RJT = 12, + BFA_PL_EID_RX_BSY = 13, + BFA_PL_EID_CT_IN = 14, + BFA_PL_EID_CT_OUT = 15, + BFA_PL_EID_DRIVER_START = 16, + BFA_PL_EID_RSCN = 17, + BFA_PL_EID_DEBUG = 18, + BFA_PL_EID_MISC = 19, + BFA_PL_EID_FIP_FCF_DISC = 20, + BFA_PL_EID_FIP_FCF_CVL = 21, + BFA_PL_EID_LOGIN = 22, + BFA_PL_EID_LOGO = 23, + BFA_PL_EID_TRUNK_SCN = 24, + BFA_PL_EID_MAX +}; + +#define BFA_PL_ENAME_STRLEN 8 +struct bfa_plog_eid_strings_s { + char e_str[BFA_PL_ENAME_STRLEN]; +}; + +#define BFA_PL_SIG_LEN 8 +#define BFA_PL_SIG_STR "12pl123" + +/* + * per port circular log buffer + */ +struct bfa_plog_s { + char plog_sig[BFA_PL_SIG_LEN]; /* Start signature */ + u8 plog_enabled; + u8 rsvd[7]; + u32 ticks; + u16 head; + u16 tail; + struct bfa_plog_rec_s plog_recs[BFA_PL_NLOG_ENTS]; +}; + +void bfa_plog_init(struct bfa_plog_s *plog); +void bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid, + enum bfa_plog_eid event, u16 misc, char *log_str); +void bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid, + enum bfa_plog_eid event, u16 misc, + u32 *intarr, u32 num_ints); +void bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid, + enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr); +void bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid, + enum bfa_plog_eid event, u16 misc, + struct fchs_s *fchdr, u32 pld_w0); +void bfa_plog_clear(struct bfa_plog_s *plog); +void bfa_plog_enable(struct bfa_plog_s *plog); +void bfa_plog_disable(struct bfa_plog_s *plog); +bfa_boolean_t bfa_plog_get_setting(struct bfa_plog_s *plog); + +#endif /* __BFA_PORTLOG_H__ */ diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c index c7e69f1e56e3..b6d170a13bea 100644 --- a/drivers/scsi/bfa/bfa_port.c +++ b/drivers/scsi/bfa/bfa_port.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * @@ -15,30 +15,25 @@ * General Public License for more details. */ -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include "bfa_defs_svc.h" +#include "bfa_port.h" +#include "bfi.h" +#include "bfa_ioc.h" + BFA_TRC_FILE(CNA, PORT); #define bfa_ioc_portid(__ioc) ((__ioc)->port_id) -#define bfa_lpuid(__arg) bfa_ioc_portid(&(__arg)->ioc) static void -bfa_port_stats_swap(struct bfa_port_s *port, union bfa_pport_stats_u *stats) +bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats) { - u32 *dip = (u32 *) stats; - u32 t0, t1; - int i; + u32 *dip = (u32 *) stats; + u32 t0, t1; + int i; - for (i = 0; i < sizeof(union bfa_pport_stats_u) / sizeof(u32); - i += 2) { + for (i = 0; i < sizeof(union bfa_port_stats_u)/sizeof(u32); + i += 2) { t0 = dip[i]; t1 = dip[i + 1]; #ifdef __BIGENDIAN @@ -49,11 +44,6 @@ bfa_port_stats_swap(struct bfa_port_s *port, union bfa_pport_stats_u *stats) dip[i + 1] = bfa_os_ntohl(t0); #endif } - - /** todo - * QoS stats r also swapped as 64bit; that structure also - * has to use 64 bit counters - */ } /** @@ -68,7 +58,9 @@ bfa_port_stats_swap(struct bfa_port_s *port, union bfa_pport_stats_u *stats) static void bfa_port_enable_isr(struct bfa_port_s *port, bfa_status_t status) { - bfa_assert(0); + bfa_trc(port, status); + port->endis_pending = BFA_FALSE; + port->endis_cbfn(port->endis_cbarg, status); } /** @@ -83,7 +75,9 @@ bfa_port_enable_isr(struct bfa_port_s *port, bfa_status_t status) static void bfa_port_disable_isr(struct bfa_port_s *port, bfa_status_t status) { - bfa_assert(0); + bfa_trc(port, status); + port->endis_pending = BFA_FALSE; + port->endis_cbfn(port->endis_cbarg, status); } /** @@ -105,7 +99,7 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status) struct bfa_timeval_s tv; memcpy(port->stats, port->stats_dma.kva, - sizeof(union bfa_pport_stats_u)); + sizeof(union bfa_port_stats_u)); bfa_port_stats_swap(port, port->stats); bfa_os_gettimeofday(&tv); @@ -133,11 +127,11 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status) struct bfa_timeval_s tv; port->stats_status = status; - port->stats_busy = BFA_FALSE; + port->stats_busy = BFA_FALSE; /** - * re-initialize time stamp for stats reset - */ + * re-initialize time stamp for stats reset + */ bfa_os_gettimeofday(&tv); port->stats_reset_time = tv.tv_sec; @@ -158,10 +152,10 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status) static void bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m) { - struct bfa_port_s *port = (struct bfa_port_s *)cbarg; + struct bfa_port_s *port = (struct bfa_port_s *) cbarg; union bfi_port_i2h_msg_u *i2hmsg; - i2hmsg = (union bfi_port_i2h_msg_u *)m; + i2hmsg = (union bfi_port_i2h_msg_u *) m; bfa_trc(port, m->mh.msg_id); switch (m->mh.msg_id) { @@ -178,9 +172,7 @@ bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m) break; case BFI_PORT_I2H_GET_STATS_RSP: - /* - * Stats busy flag is still set? (may be cmd timed out) - */ + /* Stats busy flag is still set? (may be cmd timed out) */ if (port->stats_busy == BFA_FALSE) break; bfa_port_get_stats_isr(port, i2hmsg->getstats_rsp.status); @@ -208,7 +200,7 @@ bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m) u32 bfa_port_meminfo(void) { - return BFA_ROUNDUP(sizeof(union bfa_pport_stats_u), BFA_DMA_ALIGN_SZ); + return BFA_ROUNDUP(sizeof(union bfa_port_stats_u), BFA_DMA_ALIGN_SZ); } /** @@ -216,8 +208,8 @@ bfa_port_meminfo(void) * * * @param[in] port Port module pointer - * dma_kva Kernel Virtual Address of Port DMA Memory - * dma_pa Physical Address of Port DMA Memory + * dma_kva Kernel Virtual Address of Port DMA Memory + * dma_pa Physical Address of Port DMA Memory * * @return void */ @@ -225,7 +217,7 @@ void bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa) { port->stats_dma.kva = dma_kva; - port->stats_dma.pa = dma_pa; + port->stats_dma.pa = dma_pa; } /** @@ -239,12 +231,14 @@ bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa) */ bfa_status_t bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn, - void *cbarg) + void *cbarg) { struct bfi_port_generic_req_s *m; - /** todo Not implemented */ - bfa_assert(0); + if (bfa_ioc_is_disabled(port->ioc)) { + bfa_trc(port, BFA_STATUS_IOC_DISABLED); + return BFA_STATUS_IOC_DISABLED; + } if (!bfa_ioc_is_operational(port->ioc)) { bfa_trc(port, BFA_STATUS_IOC_FAILURE); @@ -256,11 +250,11 @@ bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn, return BFA_STATUS_DEVBUSY; } - m = (struct bfi_port_generic_req_s *)port->endis_mb.msg; + m = (struct bfi_port_generic_req_s *) port->endis_mb.msg; port->msgtag++; - port->endis_cbfn = cbfn; - port->endis_cbarg = cbarg; + port->endis_cbfn = cbfn; + port->endis_cbarg = cbarg; port->endis_pending = BFA_TRUE; bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_ENABLE_REQ, @@ -281,12 +275,14 @@ bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn, */ bfa_status_t bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn, - void *cbarg) + void *cbarg) { struct bfi_port_generic_req_s *m; - /** todo Not implemented */ - bfa_assert(0); + if (bfa_ioc_is_disabled(port->ioc)) { + bfa_trc(port, BFA_STATUS_IOC_DISABLED); + return BFA_STATUS_IOC_DISABLED; + } if (!bfa_ioc_is_operational(port->ioc)) { bfa_trc(port, BFA_STATUS_IOC_FAILURE); @@ -298,11 +294,11 @@ bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn, return BFA_STATUS_DEVBUSY; } - m = (struct bfi_port_generic_req_s *)port->endis_mb.msg; + m = (struct bfi_port_generic_req_s *) port->endis_mb.msg; port->msgtag++; - port->endis_cbfn = cbfn; - port->endis_cbarg = cbarg; + port->endis_cbfn = cbfn; + port->endis_cbarg = cbarg; port->endis_pending = BFA_TRUE; bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_DISABLE_REQ, @@ -322,8 +318,8 @@ bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn, * @return Status */ bfa_status_t -bfa_port_get_stats(struct bfa_port_s *port, union bfa_pport_stats_u *stats, - bfa_port_stats_cbfn_t cbfn, void *cbarg) +bfa_port_get_stats(struct bfa_port_s *port, union bfa_port_stats_u *stats, + bfa_port_stats_cbfn_t cbfn, void *cbarg) { struct bfi_port_get_stats_req_s *m; @@ -337,12 +333,12 @@ bfa_port_get_stats(struct bfa_port_s *port, union bfa_pport_stats_u *stats, return BFA_STATUS_DEVBUSY; } - m = (struct bfi_port_get_stats_req_s *)port->stats_mb.msg; + m = (struct bfi_port_get_stats_req_s *) port->stats_mb.msg; - port->stats = stats; - port->stats_cbfn = cbfn; + port->stats = stats; + port->stats_cbfn = cbfn; port->stats_cbarg = cbarg; - port->stats_busy = BFA_TRUE; + port->stats_busy = BFA_TRUE; bfa_dma_be_addr_set(m->dma_addr, port->stats_dma.pa); bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_GET_STATS_REQ, @@ -362,7 +358,7 @@ bfa_port_get_stats(struct bfa_port_s *port, union bfa_pport_stats_u *stats, */ bfa_status_t bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn, - void *cbarg) + void *cbarg) { struct bfi_port_generic_req_s *m; @@ -376,11 +372,11 @@ bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn, return BFA_STATUS_DEVBUSY; } - m = (struct bfi_port_generic_req_s *)port->stats_mb.msg; + m = (struct bfi_port_generic_req_s *) port->stats_mb.msg; - port->stats_cbfn = cbfn; + port->stats_cbfn = cbfn; port->stats_cbarg = cbarg; - port->stats_busy = BFA_TRUE; + port->stats_busy = BFA_TRUE; bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_CLEAR_STATS_REQ, bfa_ioc_portid(port->ioc)); @@ -400,11 +396,9 @@ bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn, void bfa_port_hbfail(void *arg) { - struct bfa_port_s *port = (struct bfa_port_s *)arg; + struct bfa_port_s *port = (struct bfa_port_s *) arg; - /* - * Fail any pending get_stats/clear_stats requests - */ + /* Fail any pending get_stats/clear_stats requests */ if (port->stats_busy) { if (port->stats_cbfn) port->stats_cbfn(port->stats_cbarg, BFA_STATUS_FAILED); @@ -412,9 +406,7 @@ bfa_port_hbfail(void *arg) port->stats_busy = BFA_FALSE; } - /* - * Clear any enable/disable is pending - */ + /* Clear any enable/disable is pending */ if (port->endis_pending) { if (port->endis_cbfn) port->endis_cbfn(port->endis_cbarg, BFA_STATUS_FAILED); @@ -433,22 +425,20 @@ bfa_port_hbfail(void *arg) * The device driver specific mbox ISR functions have * this pointer as one of the parameters. * trcmod - - * logmod - * * @return void */ void -bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, void *dev, - struct bfa_trc_mod_s *trcmod, struct bfa_log_mod_s *logmod) +bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, + void *dev, struct bfa_trc_mod_s *trcmod) { struct bfa_timeval_s tv; bfa_assert(port); - port->dev = dev; - port->ioc = ioc; + port->dev = dev; + port->ioc = ioc; port->trcmod = trcmod; - port->logmod = logmod; port->stats_busy = BFA_FALSE; port->endis_pending = BFA_FALSE; diff --git a/drivers/scsi/bfa/bfa_port.h b/drivers/scsi/bfa/bfa_port.h new file mode 100644 index 000000000000..dbce9dfd056b --- /dev/null +++ b/drivers/scsi/bfa/bfa_port.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + * + * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef __BFA_PORT_H__ +#define __BFA_PORT_H__ + +#include "bfa_defs_svc.h" +#include "bfa_ioc.h" +#include "bfa_cs.h" + +typedef void (*bfa_port_stats_cbfn_t) (void *dev, bfa_status_t status); +typedef void (*bfa_port_endis_cbfn_t) (void *dev, bfa_status_t status); + +struct bfa_port_s { + void *dev; + struct bfa_ioc_s *ioc; + struct bfa_trc_mod_s *trcmod; + u32 msgtag; + bfa_boolean_t stats_busy; + struct bfa_mbox_cmd_s stats_mb; + bfa_port_stats_cbfn_t stats_cbfn; + void *stats_cbarg; + bfa_status_t stats_status; + u32 stats_reset_time; + union bfa_port_stats_u *stats; + struct bfa_dma_s stats_dma; + bfa_boolean_t endis_pending; + struct bfa_mbox_cmd_s endis_mb; + bfa_port_endis_cbfn_t endis_cbfn; + void *endis_cbarg; + bfa_status_t endis_status; + struct bfa_ioc_hbfail_notify_s hbfail; +}; + +void bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, + void *dev, struct bfa_trc_mod_s *trcmod); +void bfa_port_detach(struct bfa_port_s *port); +void bfa_port_hbfail(void *arg); + +bfa_status_t bfa_port_get_stats(struct bfa_port_s *port, + union bfa_port_stats_u *stats, + bfa_port_stats_cbfn_t cbfn, void *cbarg); +bfa_status_t bfa_port_clear_stats(struct bfa_port_s *port, + bfa_port_stats_cbfn_t cbfn, void *cbarg); +bfa_status_t bfa_port_enable(struct bfa_port_s *port, + bfa_port_endis_cbfn_t cbfn, void *cbarg); +bfa_status_t bfa_port_disable(struct bfa_port_s *port, + bfa_port_endis_cbfn_t cbfn, void *cbarg); +u32 bfa_port_meminfo(void); +void bfa_port_mem_claim(struct bfa_port_s *port, + u8 *dma_kva, u64 dma_pa); +#endif /* __BFA_PORT_H__ */ diff --git a/drivers/scsi/bfa/bfa_port_priv.h b/drivers/scsi/bfa/bfa_port_priv.h deleted file mode 100644 index c9ebe0426fa6..000000000000 --- a/drivers/scsi/bfa/bfa_port_priv.h +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_PORT_PRIV_H__ -#define __BFA_PORT_PRIV_H__ - -#include -#include -#include "bfa_intr_priv.h" - -/** - * Link notification data structure - */ -struct bfa_fcport_ln_s { - struct bfa_fcport_s *fcport; - bfa_sm_t sm; - struct bfa_cb_qe_s ln_qe; /* BFA callback queue elem for ln */ - enum bfa_pport_linkstate ln_event; /* ln event for callback */ -}; - -/** - * BFA FC port data structure - */ -struct bfa_fcport_s { - struct bfa_s *bfa; /* parent BFA instance */ - bfa_sm_t sm; /* port state machine */ - wwn_t nwwn; /* node wwn of physical port */ - wwn_t pwwn; /* port wwn of physical oprt */ - enum bfa_pport_speed speed_sup; - /* supported speeds */ - enum bfa_pport_speed speed; /* current speed */ - enum bfa_pport_topology topology; /* current topology */ - u8 myalpa; /* my ALPA in LOOP topology */ - u8 rsvd[3]; - u32 mypid:24; - u32 rsvd_b:8; - struct bfa_pport_cfg_s cfg; /* current port configuration */ - struct bfa_qos_attr_s qos_attr; /* QoS Attributes */ - struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */ - struct bfa_reqq_wait_s reqq_wait; - /* to wait for room in reqq */ - struct bfa_reqq_wait_s svcreq_wait; - /* to wait for room in reqq */ - struct bfa_reqq_wait_s stats_reqq_wait; - /* to wait for room in reqq (stats) */ - void *event_cbarg; - void (*event_cbfn) (void *cbarg, - bfa_pport_event_t event); - union { - union bfi_fcport_i2h_msg_u i2hmsg; - } event_arg; - void *bfad; /* BFA driver handle */ - struct bfa_fcport_ln_s ln; /* Link Notification */ - struct bfa_cb_qe_s hcb_qe; /* BFA callback queue elem */ - struct bfa_timer_s timer; /* timer */ - u32 msgtag; /* fimrware msg tag for reply */ - u8 *stats_kva; - u64 stats_pa; - union bfa_fcport_stats_u *stats; - union bfa_fcport_stats_u *stats_ret; /* driver stats location */ - bfa_status_t stats_status; /* stats/statsclr status */ - bfa_boolean_t stats_busy; /* outstanding stats/statsclr */ - bfa_boolean_t stats_qfull; - u32 stats_reset_time; /* stats reset time stamp */ - bfa_cb_pport_t stats_cbfn; /* driver callback function */ - void *stats_cbarg; /* user callback arg */ - bfa_boolean_t diag_busy; /* diag busy status */ - bfa_boolean_t beacon; /* port beacon status */ - bfa_boolean_t link_e2e_beacon; /* link beacon status */ -}; - -#define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport) - -/* - * public functions - */ -void bfa_fcport_init(struct bfa_s *bfa); -void bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); - -#endif /* __BFA_PORT_PRIV_H__ */ diff --git a/drivers/scsi/bfa/bfa_priv.h b/drivers/scsi/bfa/bfa_priv.h deleted file mode 100644 index bf4939b1676c..000000000000 --- a/drivers/scsi/bfa/bfa_priv.h +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_PRIV_H__ -#define __BFA_PRIV_H__ - -#include "bfa_iocfc.h" -#include "bfa_intr_priv.h" -#include "bfa_trcmod_priv.h" -#include "bfa_modules_priv.h" -#include "bfa_fwimg_priv.h" -#include -#include - -/** - * Macro to define a new BFA module - */ -#define BFA_MODULE(__mod) \ - static void bfa_ ## __mod ## _meminfo( \ - struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, \ - u32 *dm_len); \ - static void bfa_ ## __mod ## _attach(struct bfa_s *bfa, \ - void *bfad, struct bfa_iocfc_cfg_s *cfg, \ - struct bfa_meminfo_s *meminfo, \ - struct bfa_pcidev_s *pcidev); \ - static void bfa_ ## __mod ## _detach(struct bfa_s *bfa); \ - static void bfa_ ## __mod ## _start(struct bfa_s *bfa); \ - static void bfa_ ## __mod ## _stop(struct bfa_s *bfa); \ - static void bfa_ ## __mod ## _iocdisable(struct bfa_s *bfa); \ - \ - extern struct bfa_module_s hal_mod_ ## __mod; \ - struct bfa_module_s hal_mod_ ## __mod = { \ - bfa_ ## __mod ## _meminfo, \ - bfa_ ## __mod ## _attach, \ - bfa_ ## __mod ## _detach, \ - bfa_ ## __mod ## _start, \ - bfa_ ## __mod ## _stop, \ - bfa_ ## __mod ## _iocdisable, \ - } - -#define BFA_CACHELINE_SZ (256) - -/** - * Structure used to interact between different BFA sub modules - * - * Each sub module needs to implement only the entry points relevant to it (and - * can leave entry points as NULL) - */ -struct bfa_module_s { - void (*meminfo) (struct bfa_iocfc_cfg_s *cfg, u32 *km_len, - u32 *dm_len); - void (*attach) (struct bfa_s *bfa, void *bfad, - struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *meminfo, - struct bfa_pcidev_s *pcidev); - void (*detach) (struct bfa_s *bfa); - void (*start) (struct bfa_s *bfa); - void (*stop) (struct bfa_s *bfa); - void (*iocdisable) (struct bfa_s *bfa); -}; - -extern struct bfa_module_s *hal_mods[]; - -struct bfa_s { - void *bfad; /* BFA driver instance */ - struct bfa_aen_s *aen; /* AEN module */ - struct bfa_plog_s *plog; /* portlog buffer */ - struct bfa_log_mod_s *logm; /* driver logging modulen */ - struct bfa_trc_mod_s *trcmod; /* driver tracing */ - struct bfa_ioc_s ioc; /* IOC module */ - struct bfa_iocfc_s iocfc; /* IOCFC module */ - struct bfa_timer_mod_s timer_mod; /* timer module */ - struct bfa_modules_s modules; /* BFA modules */ - struct list_head comp_q; /* pending completions */ - bfa_boolean_t rme_process; /* RME processing enabled */ - struct list_head reqq_waitq[BFI_IOC_MAX_CQS]; - bfa_boolean_t fcs; /* FCS is attached to BFA */ - struct bfa_msix_s msix; -}; - -extern bfa_isr_func_t bfa_isrs[BFI_MC_MAX]; -extern bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[]; -extern bfa_boolean_t bfa_auto_recover; -extern struct bfa_module_s hal_mod_flash; -extern struct bfa_module_s hal_mod_fcdiag; -extern struct bfa_module_s hal_mod_sgpg; -extern struct bfa_module_s hal_mod_fcport; -extern struct bfa_module_s hal_mod_fcxp; -extern struct bfa_module_s hal_mod_lps; -extern struct bfa_module_s hal_mod_uf; -extern struct bfa_module_s hal_mod_rport; -extern struct bfa_module_s hal_mod_fcpim; -extern struct bfa_module_s hal_mod_pbind; - -#endif /* __BFA_PRIV_H__ */ - diff --git a/drivers/scsi/bfa/bfa_rport.c b/drivers/scsi/bfa/bfa_rport.c deleted file mode 100644 index ccd0680f6f16..000000000000 --- a/drivers/scsi/bfa/bfa_rport.c +++ /dev/null @@ -1,906 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#include -#include -#include -#include -#include "bfa_intr_priv.h" - -BFA_TRC_FILE(HAL, RPORT); -BFA_MODULE(rport); - -#define bfa_rport_offline_cb(__rp) do { \ - if ((__rp)->bfa->fcs) \ - bfa_cb_rport_offline((__rp)->rport_drv); \ - else { \ - bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \ - __bfa_cb_rport_offline, (__rp)); \ - } \ -} while (0) - -#define bfa_rport_online_cb(__rp) do { \ - if ((__rp)->bfa->fcs) \ - bfa_cb_rport_online((__rp)->rport_drv); \ - else { \ - bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \ - __bfa_cb_rport_online, (__rp)); \ - } \ -} while (0) - -/* - * forward declarations - */ -static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod); -static void bfa_rport_free(struct bfa_rport_s *rport); -static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp); -static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp); -static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp); -static void __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete); -static void __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete); - -/** - * bfa_rport_sm BFA rport state machine - */ - - -enum bfa_rport_event { - BFA_RPORT_SM_CREATE = 1, /* rport create event */ - BFA_RPORT_SM_DELETE = 2, /* deleting an existing rport */ - BFA_RPORT_SM_ONLINE = 3, /* rport is online */ - BFA_RPORT_SM_OFFLINE = 4, /* rport is offline */ - BFA_RPORT_SM_FWRSP = 5, /* firmware response */ - BFA_RPORT_SM_HWFAIL = 6, /* IOC h/w failure */ - BFA_RPORT_SM_QOS_SCN = 7, /* QoS SCN from firmware */ - BFA_RPORT_SM_SET_SPEED = 8, /* Set Rport Speed */ - BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */ -}; - -static void bfa_rport_sm_uninit(struct bfa_rport_s *rp, - enum bfa_rport_event event); -static void bfa_rport_sm_created(struct bfa_rport_s *rp, - enum bfa_rport_event event); -static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, - enum bfa_rport_event event); -static void bfa_rport_sm_online(struct bfa_rport_s *rp, - enum bfa_rport_event event); -static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, - enum bfa_rport_event event); -static void bfa_rport_sm_offline(struct bfa_rport_s *rp, - enum bfa_rport_event event); -static void bfa_rport_sm_deleting(struct bfa_rport_s *rp, - enum bfa_rport_event event); -static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp, - enum bfa_rport_event event); -static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp, - enum bfa_rport_event event); -static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, - enum bfa_rport_event event); -static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, - enum bfa_rport_event event); -static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, - enum bfa_rport_event event); -static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, - enum bfa_rport_event event); - -/** - * Beginning state, only online event expected. - */ -static void -bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event) -{ - bfa_trc(rp->bfa, rp->rport_tag); - bfa_trc(rp->bfa, event); - - switch (event) { - case BFA_RPORT_SM_CREATE: - bfa_stats(rp, sm_un_cr); - bfa_sm_set_state(rp, bfa_rport_sm_created); - break; - - default: - bfa_stats(rp, sm_un_unexp); - bfa_sm_fault(rp->bfa, event); - } -} - -static void -bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event) -{ - bfa_trc(rp->bfa, rp->rport_tag); - bfa_trc(rp->bfa, event); - - switch (event) { - case BFA_RPORT_SM_ONLINE: - bfa_stats(rp, sm_cr_on); - if (bfa_rport_send_fwcreate(rp)) - bfa_sm_set_state(rp, bfa_rport_sm_fwcreate); - else - bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull); - break; - - case BFA_RPORT_SM_DELETE: - bfa_stats(rp, sm_cr_del); - bfa_sm_set_state(rp, bfa_rport_sm_uninit); - bfa_rport_free(rp); - break; - - case BFA_RPORT_SM_HWFAIL: - bfa_stats(rp, sm_cr_hwf); - bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); - break; - - default: - bfa_stats(rp, sm_cr_unexp); - bfa_sm_fault(rp->bfa, event); - } -} - -/** - * Waiting for rport create response from firmware. - */ -static void -bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event) -{ - bfa_trc(rp->bfa, rp->rport_tag); - bfa_trc(rp->bfa, event); - - switch (event) { - case BFA_RPORT_SM_FWRSP: - bfa_stats(rp, sm_fwc_rsp); - bfa_sm_set_state(rp, bfa_rport_sm_online); - bfa_rport_online_cb(rp); - break; - - case BFA_RPORT_SM_DELETE: - bfa_stats(rp, sm_fwc_del); - bfa_sm_set_state(rp, bfa_rport_sm_delete_pending); - break; - - case BFA_RPORT_SM_OFFLINE: - bfa_stats(rp, sm_fwc_off); - bfa_sm_set_state(rp, bfa_rport_sm_offline_pending); - break; - - case BFA_RPORT_SM_HWFAIL: - bfa_stats(rp, sm_fwc_hwf); - bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); - break; - - default: - bfa_stats(rp, sm_fwc_unexp); - bfa_sm_fault(rp->bfa, event); - } -} - -/** - * Request queue is full, awaiting queue resume to send create request. - */ -static void -bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event) -{ - bfa_trc(rp->bfa, rp->rport_tag); - bfa_trc(rp->bfa, event); - - switch (event) { - case BFA_RPORT_SM_QRESUME: - bfa_sm_set_state(rp, bfa_rport_sm_fwcreate); - bfa_rport_send_fwcreate(rp); - break; - - case BFA_RPORT_SM_DELETE: - bfa_stats(rp, sm_fwc_del); - bfa_sm_set_state(rp, bfa_rport_sm_uninit); - bfa_reqq_wcancel(&rp->reqq_wait); - bfa_rport_free(rp); - break; - - case BFA_RPORT_SM_OFFLINE: - bfa_stats(rp, sm_fwc_off); - bfa_sm_set_state(rp, bfa_rport_sm_offline); - bfa_reqq_wcancel(&rp->reqq_wait); - bfa_rport_offline_cb(rp); - break; - - case BFA_RPORT_SM_HWFAIL: - bfa_stats(rp, sm_fwc_hwf); - bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); - bfa_reqq_wcancel(&rp->reqq_wait); - break; - - default: - bfa_stats(rp, sm_fwc_unexp); - bfa_sm_fault(rp->bfa, event); - } -} - -/** - * Online state - normal parking state. - */ -static void -bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event) -{ - struct bfi_rport_qos_scn_s *qos_scn; - - bfa_trc(rp->bfa, rp->rport_tag); - bfa_trc(rp->bfa, event); - - switch (event) { - case BFA_RPORT_SM_OFFLINE: - bfa_stats(rp, sm_on_off); - if (bfa_rport_send_fwdelete(rp)) - bfa_sm_set_state(rp, bfa_rport_sm_fwdelete); - else - bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull); - break; - - case BFA_RPORT_SM_DELETE: - bfa_stats(rp, sm_on_del); - if (bfa_rport_send_fwdelete(rp)) - bfa_sm_set_state(rp, bfa_rport_sm_deleting); - else - bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull); - break; - - case BFA_RPORT_SM_HWFAIL: - bfa_stats(rp, sm_on_hwf); - bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); - break; - - case BFA_RPORT_SM_SET_SPEED: - bfa_rport_send_fwspeed(rp); - break; - - case BFA_RPORT_SM_QOS_SCN: - qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg; - rp->qos_attr = qos_scn->new_qos_attr; - bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id); - bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id); - bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority); - bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority); - - qos_scn->old_qos_attr.qos_flow_id = - bfa_os_ntohl(qos_scn->old_qos_attr.qos_flow_id); - qos_scn->new_qos_attr.qos_flow_id = - bfa_os_ntohl(qos_scn->new_qos_attr.qos_flow_id); - qos_scn->old_qos_attr.qos_priority = - bfa_os_ntohl(qos_scn->old_qos_attr.qos_priority); - qos_scn->new_qos_attr.qos_priority = - bfa_os_ntohl(qos_scn->new_qos_attr.qos_priority); - - if (qos_scn->old_qos_attr.qos_flow_id != - qos_scn->new_qos_attr.qos_flow_id) - bfa_cb_rport_qos_scn_flowid(rp->rport_drv, - qos_scn->old_qos_attr, - qos_scn->new_qos_attr); - if (qos_scn->old_qos_attr.qos_priority != - qos_scn->new_qos_attr.qos_priority) - bfa_cb_rport_qos_scn_prio(rp->rport_drv, - qos_scn->old_qos_attr, - qos_scn->new_qos_attr); - break; - - default: - bfa_stats(rp, sm_on_unexp); - bfa_sm_fault(rp->bfa, event); - } -} - -/** - * Firmware rport is being deleted - awaiting f/w response. - */ -static void -bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event) -{ - bfa_trc(rp->bfa, rp->rport_tag); - bfa_trc(rp->bfa, event); - - switch (event) { - case BFA_RPORT_SM_FWRSP: - bfa_stats(rp, sm_fwd_rsp); - bfa_sm_set_state(rp, bfa_rport_sm_offline); - bfa_rport_offline_cb(rp); - break; - - case BFA_RPORT_SM_DELETE: - bfa_stats(rp, sm_fwd_del); - bfa_sm_set_state(rp, bfa_rport_sm_deleting); - break; - - case BFA_RPORT_SM_HWFAIL: - bfa_stats(rp, sm_fwd_hwf); - bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); - bfa_rport_offline_cb(rp); - break; - - default: - bfa_stats(rp, sm_fwd_unexp); - bfa_sm_fault(rp->bfa, event); - } -} - -static void -bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event) -{ - bfa_trc(rp->bfa, rp->rport_tag); - bfa_trc(rp->bfa, event); - - switch (event) { - case BFA_RPORT_SM_QRESUME: - bfa_sm_set_state(rp, bfa_rport_sm_fwdelete); - bfa_rport_send_fwdelete(rp); - break; - - case BFA_RPORT_SM_DELETE: - bfa_stats(rp, sm_fwd_del); - bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull); - break; - - case BFA_RPORT_SM_HWFAIL: - bfa_stats(rp, sm_fwd_hwf); - bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); - bfa_reqq_wcancel(&rp->reqq_wait); - bfa_rport_offline_cb(rp); - break; - - default: - bfa_stats(rp, sm_fwd_unexp); - bfa_sm_fault(rp->bfa, event); - } -} - -/** - * Offline state. - */ -static void -bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event) -{ - bfa_trc(rp->bfa, rp->rport_tag); - bfa_trc(rp->bfa, event); - - switch (event) { - case BFA_RPORT_SM_DELETE: - bfa_stats(rp, sm_off_del); - bfa_sm_set_state(rp, bfa_rport_sm_uninit); - bfa_rport_free(rp); - break; - - case BFA_RPORT_SM_ONLINE: - bfa_stats(rp, sm_off_on); - if (bfa_rport_send_fwcreate(rp)) - bfa_sm_set_state(rp, bfa_rport_sm_fwcreate); - else - bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull); - break; - - case BFA_RPORT_SM_HWFAIL: - bfa_stats(rp, sm_off_hwf); - bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); - break; - - default: - bfa_stats(rp, sm_off_unexp); - bfa_sm_fault(rp->bfa, event); - } -} - -/** - * Rport is deleted, waiting for firmware response to delete. - */ -static void -bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event) -{ - bfa_trc(rp->bfa, rp->rport_tag); - bfa_trc(rp->bfa, event); - - switch (event) { - case BFA_RPORT_SM_FWRSP: - bfa_stats(rp, sm_del_fwrsp); - bfa_sm_set_state(rp, bfa_rport_sm_uninit); - bfa_rport_free(rp); - break; - - case BFA_RPORT_SM_HWFAIL: - bfa_stats(rp, sm_del_hwf); - bfa_sm_set_state(rp, bfa_rport_sm_uninit); - bfa_rport_free(rp); - break; - - default: - bfa_sm_fault(rp->bfa, event); - } -} - -static void -bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event) -{ - bfa_trc(rp->bfa, rp->rport_tag); - bfa_trc(rp->bfa, event); - - switch (event) { - case BFA_RPORT_SM_QRESUME: - bfa_stats(rp, sm_del_fwrsp); - bfa_sm_set_state(rp, bfa_rport_sm_deleting); - bfa_rport_send_fwdelete(rp); - break; - - case BFA_RPORT_SM_HWFAIL: - bfa_stats(rp, sm_del_hwf); - bfa_sm_set_state(rp, bfa_rport_sm_uninit); - bfa_reqq_wcancel(&rp->reqq_wait); - bfa_rport_free(rp); - break; - - default: - bfa_sm_fault(rp->bfa, event); - } -} - -/** - * Waiting for rport create response from firmware. A delete is pending. - */ -static void -bfa_rport_sm_delete_pending(struct bfa_rport_s *rp, - enum bfa_rport_event event) -{ - bfa_trc(rp->bfa, rp->rport_tag); - bfa_trc(rp->bfa, event); - - switch (event) { - case BFA_RPORT_SM_FWRSP: - bfa_stats(rp, sm_delp_fwrsp); - if (bfa_rport_send_fwdelete(rp)) - bfa_sm_set_state(rp, bfa_rport_sm_deleting); - else - bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull); - break; - - case BFA_RPORT_SM_HWFAIL: - bfa_stats(rp, sm_delp_hwf); - bfa_sm_set_state(rp, bfa_rport_sm_uninit); - bfa_rport_free(rp); - break; - - default: - bfa_stats(rp, sm_delp_unexp); - bfa_sm_fault(rp->bfa, event); - } -} - -/** - * Waiting for rport create response from firmware. Rport offline is pending. - */ -static void -bfa_rport_sm_offline_pending(struct bfa_rport_s *rp, - enum bfa_rport_event event) -{ - bfa_trc(rp->bfa, rp->rport_tag); - bfa_trc(rp->bfa, event); - - switch (event) { - case BFA_RPORT_SM_FWRSP: - bfa_stats(rp, sm_offp_fwrsp); - if (bfa_rport_send_fwdelete(rp)) - bfa_sm_set_state(rp, bfa_rport_sm_fwdelete); - else - bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull); - break; - - case BFA_RPORT_SM_DELETE: - bfa_stats(rp, sm_offp_del); - bfa_sm_set_state(rp, bfa_rport_sm_delete_pending); - break; - - case BFA_RPORT_SM_HWFAIL: - bfa_stats(rp, sm_offp_hwf); - bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); - break; - - default: - bfa_stats(rp, sm_offp_unexp); - bfa_sm_fault(rp->bfa, event); - } -} - -/** - * IOC h/w failed. - */ -static void -bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event) -{ - bfa_trc(rp->bfa, rp->rport_tag); - bfa_trc(rp->bfa, event); - - switch (event) { - case BFA_RPORT_SM_OFFLINE: - bfa_stats(rp, sm_iocd_off); - bfa_rport_offline_cb(rp); - break; - - case BFA_RPORT_SM_DELETE: - bfa_stats(rp, sm_iocd_del); - bfa_sm_set_state(rp, bfa_rport_sm_uninit); - bfa_rport_free(rp); - break; - - case BFA_RPORT_SM_ONLINE: - bfa_stats(rp, sm_iocd_on); - if (bfa_rport_send_fwcreate(rp)) - bfa_sm_set_state(rp, bfa_rport_sm_fwcreate); - else - bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull); - break; - - case BFA_RPORT_SM_HWFAIL: - break; - - default: - bfa_stats(rp, sm_iocd_unexp); - bfa_sm_fault(rp->bfa, event); - } -} - - - -/** - * bfa_rport_private BFA rport private functions - */ - -static void -__bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete) -{ - struct bfa_rport_s *rp = cbarg; - - if (complete) - bfa_cb_rport_online(rp->rport_drv); -} - -static void -__bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete) -{ - struct bfa_rport_s *rp = cbarg; - - if (complete) - bfa_cb_rport_offline(rp->rport_drv); -} - -static void -bfa_rport_qresume(void *cbarg) -{ - struct bfa_rport_s *rp = cbarg; - - bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME); -} - -static void -bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, - u32 *dm_len) -{ - if (cfg->fwcfg.num_rports < BFA_RPORT_MIN) - cfg->fwcfg.num_rports = BFA_RPORT_MIN; - - *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s); -} - -static void -bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) -{ - struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa); - struct bfa_rport_s *rp; - u16 i; - - INIT_LIST_HEAD(&mod->rp_free_q); - INIT_LIST_HEAD(&mod->rp_active_q); - - rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo); - mod->rps_list = rp; - mod->num_rports = cfg->fwcfg.num_rports; - - bfa_assert(mod->num_rports - && !(mod->num_rports & (mod->num_rports - 1))); - - for (i = 0; i < mod->num_rports; i++, rp++) { - bfa_os_memset(rp, 0, sizeof(struct bfa_rport_s)); - rp->bfa = bfa; - rp->rport_tag = i; - bfa_sm_set_state(rp, bfa_rport_sm_uninit); - - /** - * - is unused - */ - if (i) - list_add_tail(&rp->qe, &mod->rp_free_q); - - bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp); - } - - /** - * consume memory - */ - bfa_meminfo_kva(meminfo) = (u8 *) rp; -} - -static void -bfa_rport_detach(struct bfa_s *bfa) -{ -} - -static void -bfa_rport_start(struct bfa_s *bfa) -{ -} - -static void -bfa_rport_stop(struct bfa_s *bfa) -{ -} - -static void -bfa_rport_iocdisable(struct bfa_s *bfa) -{ - struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa); - struct bfa_rport_s *rport; - struct list_head *qe, *qen; - - list_for_each_safe(qe, qen, &mod->rp_active_q) { - rport = (struct bfa_rport_s *) qe; - bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL); - } -} - -static struct bfa_rport_s * -bfa_rport_alloc(struct bfa_rport_mod_s *mod) -{ - struct bfa_rport_s *rport; - - bfa_q_deq(&mod->rp_free_q, &rport); - if (rport) - list_add_tail(&rport->qe, &mod->rp_active_q); - - return rport; -} - -static void -bfa_rport_free(struct bfa_rport_s *rport) -{ - struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa); - - bfa_assert(bfa_q_is_on_q(&mod->rp_active_q, rport)); - list_del(&rport->qe); - list_add_tail(&rport->qe, &mod->rp_free_q); -} - -static bfa_boolean_t -bfa_rport_send_fwcreate(struct bfa_rport_s *rp) -{ - struct bfi_rport_create_req_s *m; - - /** - * check for room in queue to send request now - */ - m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); - if (!m) { - bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait); - return BFA_FALSE; - } - - bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ, - bfa_lpuid(rp->bfa)); - m->bfa_handle = rp->rport_tag; - m->max_frmsz = bfa_os_htons(rp->rport_info.max_frmsz); - m->pid = rp->rport_info.pid; - m->lp_tag = rp->rport_info.lp_tag; - m->local_pid = rp->rport_info.local_pid; - m->fc_class = rp->rport_info.fc_class; - m->vf_en = rp->rport_info.vf_en; - m->vf_id = rp->rport_info.vf_id; - m->cisc = rp->rport_info.cisc; - - /** - * queue I/O message to firmware - */ - bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); - return BFA_TRUE; -} - -static bfa_boolean_t -bfa_rport_send_fwdelete(struct bfa_rport_s *rp) -{ - struct bfi_rport_delete_req_s *m; - - /** - * check for room in queue to send request now - */ - m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); - if (!m) { - bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait); - return BFA_FALSE; - } - - bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ, - bfa_lpuid(rp->bfa)); - m->fw_handle = rp->fw_handle; - - /** - * queue I/O message to firmware - */ - bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); - return BFA_TRUE; -} - -static bfa_boolean_t -bfa_rport_send_fwspeed(struct bfa_rport_s *rp) -{ - struct bfa_rport_speed_req_s *m; - - /** - * check for room in queue to send request now - */ - m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); - if (!m) { - bfa_trc(rp->bfa, rp->rport_info.speed); - return BFA_FALSE; - } - - bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ, - bfa_lpuid(rp->bfa)); - m->fw_handle = rp->fw_handle; - m->speed = (u8)rp->rport_info.speed; - - /** - * queue I/O message to firmware - */ - bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); - return BFA_TRUE; -} - - - -/** - * bfa_rport_public - */ - -/** - * Rport interrupt processing. - */ -void -bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m) -{ - union bfi_rport_i2h_msg_u msg; - struct bfa_rport_s *rp; - - bfa_trc(bfa, m->mhdr.msg_id); - - msg.msg = m; - - switch (m->mhdr.msg_id) { - case BFI_RPORT_I2H_CREATE_RSP: - rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle); - rp->fw_handle = msg.create_rsp->fw_handle; - rp->qos_attr = msg.create_rsp->qos_attr; - bfa_assert(msg.create_rsp->status == BFA_STATUS_OK); - bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP); - break; - - case BFI_RPORT_I2H_DELETE_RSP: - rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle); - bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK); - bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP); - break; - - case BFI_RPORT_I2H_QOS_SCN: - rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle); - rp->event_arg.fw_msg = msg.qos_scn_evt; - bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN); - break; - - default: - bfa_trc(bfa, m->mhdr.msg_id); - bfa_assert(0); - } -} - - - -/** - * bfa_rport_api - */ - -struct bfa_rport_s * -bfa_rport_create(struct bfa_s *bfa, void *rport_drv) -{ - struct bfa_rport_s *rp; - - rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa)); - - if (rp == NULL) - return NULL; - - rp->bfa = bfa; - rp->rport_drv = rport_drv; - bfa_rport_clear_stats(rp); - - bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit)); - bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE); - - return rp; -} - -void -bfa_rport_delete(struct bfa_rport_s *rport) -{ - bfa_sm_send_event(rport, BFA_RPORT_SM_DELETE); -} - -void -bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info) -{ - bfa_assert(rport_info->max_frmsz != 0); - - /** - * Some JBODs are seen to be not setting PDU size correctly in PLOGI - * responses. Default to minimum size. - */ - if (rport_info->max_frmsz == 0) { - bfa_trc(rport->bfa, rport->rport_tag); - rport_info->max_frmsz = FC_MIN_PDUSZ; - } - - bfa_os_assign(rport->rport_info, *rport_info); - bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE); -} - -void -bfa_rport_offline(struct bfa_rport_s *rport) -{ - bfa_sm_send_event(rport, BFA_RPORT_SM_OFFLINE); -} - -void -bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_pport_speed speed) -{ - bfa_assert(speed != 0); - bfa_assert(speed != BFA_PPORT_SPEED_AUTO); - - rport->rport_info.speed = speed; - bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED); -} - -void -bfa_rport_get_stats(struct bfa_rport_s *rport, - struct bfa_rport_hal_stats_s *stats) -{ - *stats = rport->stats; -} - -void -bfa_rport_get_qos_attr(struct bfa_rport_s *rport, - struct bfa_rport_qos_attr_s *qos_attr) -{ - qos_attr->qos_priority = bfa_os_ntohl(rport->qos_attr.qos_priority); - qos_attr->qos_flow_id = bfa_os_ntohl(rport->qos_attr.qos_flow_id); - -} - -void -bfa_rport_clear_stats(struct bfa_rport_s *rport) -{ - bfa_os_memset(&rport->stats, 0, sizeof(rport->stats)); -} - - diff --git a/drivers/scsi/bfa/bfa_rport_priv.h b/drivers/scsi/bfa/bfa_rport_priv.h deleted file mode 100644 index 6490ce2e990d..000000000000 --- a/drivers/scsi/bfa/bfa_rport_priv.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_RPORT_PRIV_H__ -#define __BFA_RPORT_PRIV_H__ - -#include - -#define BFA_RPORT_MIN 4 - -struct bfa_rport_mod_s { - struct bfa_rport_s *rps_list; /* list of rports */ - struct list_head rp_free_q; /* free bfa_rports */ - struct list_head rp_active_q; /* free bfa_rports */ - u16 num_rports; /* number of rports */ -}; - -#define BFA_RPORT_MOD(__bfa) (&(__bfa)->modules.rport_mod) - -/** - * Convert rport tag to RPORT - */ -#define BFA_RPORT_FROM_TAG(__bfa, _tag) \ - (BFA_RPORT_MOD(__bfa)->rps_list + \ - ((_tag) & (BFA_RPORT_MOD(__bfa)->num_rports - 1))) - -/* - * external functions - */ -void bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); -#endif /* __BFA_RPORT_PRIV_H__ */ diff --git a/drivers/scsi/bfa/bfa_sgpg.c b/drivers/scsi/bfa/bfa_sgpg.c deleted file mode 100644 index ae452c42e40e..000000000000 --- a/drivers/scsi/bfa/bfa_sgpg.c +++ /dev/null @@ -1,226 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#include - -BFA_TRC_FILE(HAL, SGPG); -BFA_MODULE(sgpg); - -/** - * bfa_sgpg_mod BFA SGPG Mode module - */ - -/** - * Compute and return memory needed by FCP(im) module. - */ -static void -bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, - u32 *dm_len) -{ - if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN) - cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; - - *km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s); - *dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s); -} - - -static void -bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev) -{ - struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); - int i; - struct bfa_sgpg_s *hsgpg; - struct bfi_sgpg_s *sgpg; - u64 align_len; - - union { - u64 pa; - union bfi_addr_u addr; - } sgpg_pa; - - INIT_LIST_HEAD(&mod->sgpg_q); - INIT_LIST_HEAD(&mod->sgpg_wait_q); - - bfa_trc(bfa, cfg->drvcfg.num_sgpgs); - - mod->num_sgpgs = cfg->drvcfg.num_sgpgs; - mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo); - align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa); - mod->sgpg_arr_pa += align_len; - mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) + - align_len); - mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) + - align_len); - - hsgpg = mod->hsgpg_arr; - sgpg = mod->sgpg_arr; - sgpg_pa.pa = mod->sgpg_arr_pa; - mod->free_sgpgs = mod->num_sgpgs; - - bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1))); - - for (i = 0; i < mod->num_sgpgs; i++) { - bfa_os_memset(hsgpg, 0, sizeof(*hsgpg)); - bfa_os_memset(sgpg, 0, sizeof(*sgpg)); - - hsgpg->sgpg = sgpg; - hsgpg->sgpg_pa = sgpg_pa.addr; - list_add_tail(&hsgpg->qe, &mod->sgpg_q); - - hsgpg++; - sgpg++; - sgpg_pa.pa += sizeof(struct bfi_sgpg_s); - } - - bfa_meminfo_kva(minfo) = (u8 *) hsgpg; - bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg; - bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa; -} - -static void -bfa_sgpg_detach(struct bfa_s *bfa) -{ -} - -static void -bfa_sgpg_start(struct bfa_s *bfa) -{ -} - -static void -bfa_sgpg_stop(struct bfa_s *bfa) -{ -} - -static void -bfa_sgpg_iocdisable(struct bfa_s *bfa) -{ -} - - - -/** - * bfa_sgpg_public BFA SGPG public functions - */ - -bfa_status_t -bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs) -{ - struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); - struct bfa_sgpg_s *hsgpg; - int i; - - bfa_trc_fp(bfa, nsgpgs); - - if (mod->free_sgpgs < nsgpgs) - return BFA_STATUS_ENOMEM; - - for (i = 0; i < nsgpgs; i++) { - bfa_q_deq(&mod->sgpg_q, &hsgpg); - bfa_assert(hsgpg); - list_add_tail(&hsgpg->qe, sgpg_q); - } - - mod->free_sgpgs -= nsgpgs; - return BFA_STATUS_OK; -} - -void -bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg) -{ - struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); - struct bfa_sgpg_wqe_s *wqe; - - bfa_trc_fp(bfa, nsgpg); - - mod->free_sgpgs += nsgpg; - bfa_assert(mod->free_sgpgs <= mod->num_sgpgs); - - list_splice_tail_init(sgpg_q, &mod->sgpg_q); - - if (list_empty(&mod->sgpg_wait_q)) - return; - - /** - * satisfy as many waiting requests as possible - */ - do { - wqe = bfa_q_first(&mod->sgpg_wait_q); - if (mod->free_sgpgs < wqe->nsgpg) - nsgpg = mod->free_sgpgs; - else - nsgpg = wqe->nsgpg; - bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg); - wqe->nsgpg -= nsgpg; - if (wqe->nsgpg == 0) { - list_del(&wqe->qe); - wqe->cbfn(wqe->cbarg); - } - } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q)); -} - -void -bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg) -{ - struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); - - bfa_assert(nsgpg > 0); - bfa_assert(nsgpg > mod->free_sgpgs); - - wqe->nsgpg_total = wqe->nsgpg = nsgpg; - - /** - * allocate any left to this one first - */ - if (mod->free_sgpgs) { - /** - * no one else is waiting for SGPG - */ - bfa_assert(list_empty(&mod->sgpg_wait_q)); - list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q); - wqe->nsgpg -= mod->free_sgpgs; - mod->free_sgpgs = 0; - } - - list_add_tail(&wqe->qe, &mod->sgpg_wait_q); -} - -void -bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe) -{ - struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); - - bfa_assert(bfa_q_is_on_q(&mod->sgpg_wait_q, wqe)); - list_del(&wqe->qe); - - if (wqe->nsgpg_total != wqe->nsgpg) - bfa_sgpg_mfree(bfa, &wqe->sgpg_q, - wqe->nsgpg_total - wqe->nsgpg); -} - -void -bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg), - void *cbarg) -{ - INIT_LIST_HEAD(&wqe->sgpg_q); - wqe->cbfn = cbfn; - wqe->cbarg = cbarg; -} - - diff --git a/drivers/scsi/bfa/bfa_sgpg_priv.h b/drivers/scsi/bfa/bfa_sgpg_priv.h deleted file mode 100644 index 9c2a8cbe7522..000000000000 --- a/drivers/scsi/bfa/bfa_sgpg_priv.h +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * hal_sgpg.h BFA SG page module - */ - -#ifndef __BFA_SGPG_PRIV_H__ -#define __BFA_SGPG_PRIV_H__ - -#include - -#define BFA_SGPG_MIN (16) - -/** - * Alignment macro for SG page allocation - */ -#define BFA_SGPG_ROUNDUP(_l) (((_l) + (sizeof(struct bfi_sgpg_s) - 1)) \ - & ~(sizeof(struct bfi_sgpg_s) - 1)) - -struct bfa_sgpg_wqe_s { - struct list_head qe; /* queue sg page element */ - int nsgpg; /* pages to be allocated */ - int nsgpg_total; /* total pages required */ - void (*cbfn) (void *cbarg); - /* callback function */ - void *cbarg; /* callback arg */ - struct list_head sgpg_q; /* queue of alloced sgpgs */ -}; - -struct bfa_sgpg_s { - struct list_head qe; /* queue sg page element */ - struct bfi_sgpg_s *sgpg; /* va of SG page */ - union bfi_addr_u sgpg_pa;/* pa of SG page */ -}; - -/** - * Given number of SG elements, BFA_SGPG_NPAGE() returns the number of - * SG pages required. - */ -#define BFA_SGPG_NPAGE(_nsges) (((_nsges) / BFI_SGPG_DATA_SGES) + 1) - -struct bfa_sgpg_mod_s { - struct bfa_s *bfa; - int num_sgpgs; /* number of SG pages */ - int free_sgpgs; /* number of free SG pages */ - struct bfa_sgpg_s *hsgpg_arr; /* BFA SG page array */ - struct bfi_sgpg_s *sgpg_arr; /* actual SG page array */ - u64 sgpg_arr_pa; /* SG page array DMA addr */ - struct list_head sgpg_q; /* queue of free SG pages */ - struct list_head sgpg_wait_q; /* wait queue for SG pages */ -}; -#define BFA_SGPG_MOD(__bfa) (&(__bfa)->modules.sgpg_mod) - -bfa_status_t bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, - int nsgpgs); -void bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, - int nsgpgs); -void bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, - void (*cbfn) (void *cbarg), void *cbarg); -void bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, - int nsgpgs); -void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe); - -#endif /* __BFA_SGPG_PRIV_H__ */ diff --git a/drivers/scsi/bfa/bfa_sm.c b/drivers/scsi/bfa/bfa_sm.c deleted file mode 100644 index 5420f4f45e58..000000000000 --- a/drivers/scsi/bfa/bfa_sm.c +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * bfasm.c BFA State machine utility functions - */ - -#include - -/** - * cs_sm_api - */ - -int -bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm) -{ - int i = 0; - - while (smt[i].sm && smt[i].sm != sm) - i++; - return smt[i].state; -} - - diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c new file mode 100644 index 000000000000..aa1dc749b281 --- /dev/null +++ b/drivers/scsi/bfa/bfa_svc.c @@ -0,0 +1,5423 @@ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + * + * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include "bfa_os_inc.h" +#include "bfa_plog.h" +#include "bfa_cs.h" +#include "bfa_modules.h" +#include "bfad_drv.h" + +BFA_TRC_FILE(HAL, FCXP); +BFA_MODULE(fcxp); +BFA_MODULE(sgpg); +BFA_MODULE(lps); +BFA_MODULE(fcport); +BFA_MODULE(rport); +BFA_MODULE(uf); + +/** + * LPS related definitions + */ +#define BFA_LPS_MIN_LPORTS (1) +#define BFA_LPS_MAX_LPORTS (256) + +/* + * Maximum Vports supported per physical port or vf. + */ +#define BFA_LPS_MAX_VPORTS_SUPP_CB 255 +#define BFA_LPS_MAX_VPORTS_SUPP_CT 190 + +/** + * lps_pvt BFA LPS private functions + */ + +enum bfa_lps_event { + BFA_LPS_SM_LOGIN = 1, /* login request from user */ + BFA_LPS_SM_LOGOUT = 2, /* logout request from user */ + BFA_LPS_SM_FWRSP = 3, /* f/w response to login/logout */ + BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */ + BFA_LPS_SM_DELETE = 5, /* lps delete from user */ + BFA_LPS_SM_OFFLINE = 6, /* Link is offline */ + BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */ +}; + +/** + * FC PORT related definitions + */ +/* + * The port is considered disabled if corresponding physical port or IOC are + * disabled explicitly + */ +#define BFA_PORT_IS_DISABLED(bfa) \ + ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \ + (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE)) + + +/** + * BFA port state machine events + */ +enum bfa_fcport_sm_event { + BFA_FCPORT_SM_START = 1, /* start port state machine */ + BFA_FCPORT_SM_STOP = 2, /* stop port state machine */ + BFA_FCPORT_SM_ENABLE = 3, /* enable port */ + BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */ + BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */ + BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */ + BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */ + BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */ + BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */ +}; + +/** + * BFA port link notification state machine events + */ + +enum bfa_fcport_ln_sm_event { + BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */ + BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */ + BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */ +}; + +/** + * RPORT related definitions + */ +#define bfa_rport_offline_cb(__rp) do { \ + if ((__rp)->bfa->fcs) \ + bfa_cb_rport_offline((__rp)->rport_drv); \ + else { \ + bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \ + __bfa_cb_rport_offline, (__rp)); \ + } \ +} while (0) + +#define bfa_rport_online_cb(__rp) do { \ + if ((__rp)->bfa->fcs) \ + bfa_cb_rport_online((__rp)->rport_drv); \ + else { \ + bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \ + __bfa_cb_rport_online, (__rp)); \ + } \ +} while (0) + + +enum bfa_rport_event { + BFA_RPORT_SM_CREATE = 1, /* rport create event */ + BFA_RPORT_SM_DELETE = 2, /* deleting an existing rport */ + BFA_RPORT_SM_ONLINE = 3, /* rport is online */ + BFA_RPORT_SM_OFFLINE = 4, /* rport is offline */ + BFA_RPORT_SM_FWRSP = 5, /* firmware response */ + BFA_RPORT_SM_HWFAIL = 6, /* IOC h/w failure */ + BFA_RPORT_SM_QOS_SCN = 7, /* QoS SCN from firmware */ + BFA_RPORT_SM_SET_SPEED = 8, /* Set Rport Speed */ + BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */ +}; + +/** + * forward declarations FCXP related functions + */ +static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete); +static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp, + struct bfi_fcxp_send_rsp_s *fcxp_rsp); +static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, + struct bfa_fcxp_s *fcxp, struct fchs_s *fchs); +static void bfa_fcxp_qresume(void *cbarg); +static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, + struct bfi_fcxp_send_req_s *send_req); + +/** + * forward declarations for LPS functions + */ +static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, + u32 *dm_len); +static void bfa_lps_attach(struct bfa_s *bfa, void *bfad, + struct bfa_iocfc_cfg_s *cfg, + struct bfa_meminfo_s *meminfo, + struct bfa_pcidev_s *pcidev); +static void bfa_lps_detach(struct bfa_s *bfa); +static void bfa_lps_start(struct bfa_s *bfa); +static void bfa_lps_stop(struct bfa_s *bfa); +static void bfa_lps_iocdisable(struct bfa_s *bfa); +static void bfa_lps_login_rsp(struct bfa_s *bfa, + struct bfi_lps_login_rsp_s *rsp); +static void bfa_lps_logout_rsp(struct bfa_s *bfa, + struct bfi_lps_logout_rsp_s *rsp); +static void bfa_lps_reqq_resume(void *lps_arg); +static void bfa_lps_free(struct bfa_lps_s *lps); +static void bfa_lps_send_login(struct bfa_lps_s *lps); +static void bfa_lps_send_logout(struct bfa_lps_s *lps); +static void bfa_lps_login_comp(struct bfa_lps_s *lps); +static void bfa_lps_logout_comp(struct bfa_lps_s *lps); +static void bfa_lps_cvl_event(struct bfa_lps_s *lps); + +/** + * forward declaration for LPS state machine + */ +static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event); +static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event); +static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event + event); +static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event); +static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event); +static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event + event); + +/** + * forward declaration for FC Port functions + */ +static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport); +static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport); +static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport); +static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport); +static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport); +static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete); +static void bfa_fcport_scn(struct bfa_fcport_s *fcport, + enum bfa_port_linkstate event, bfa_boolean_t trunk); +static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, + enum bfa_port_linkstate event); +static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete); +static void bfa_fcport_stats_get_timeout(void *cbarg); +static void bfa_fcport_stats_clr_timeout(void *cbarg); +static void bfa_trunk_iocdisable(struct bfa_s *bfa); + +/** + * forward declaration for FC PORT state machine + */ +static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); + +static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event); +static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event); +static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event); +static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event); +static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event); +static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event); +static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event); + +static struct bfa_sm_table_s hal_port_sm_table[] = { + {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT}, + {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT}, + {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING}, + {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN}, + {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP}, + {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT}, + {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT}, + {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING}, + {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED}, + {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED}, + {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN}, + {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN}, +}; + + +/** + * forward declaration for RPORT related functions + */ +static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod); +static void bfa_rport_free(struct bfa_rport_s *rport); +static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp); +static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp); +static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp); +static void __bfa_cb_rport_online(void *cbarg, + bfa_boolean_t complete); +static void __bfa_cb_rport_offline(void *cbarg, + bfa_boolean_t complete); + +/** + * forward declaration for RPORT state machine + */ +static void bfa_rport_sm_uninit(struct bfa_rport_s *rp, + enum bfa_rport_event event); +static void bfa_rport_sm_created(struct bfa_rport_s *rp, + enum bfa_rport_event event); +static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, + enum bfa_rport_event event); +static void bfa_rport_sm_online(struct bfa_rport_s *rp, + enum bfa_rport_event event); +static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, + enum bfa_rport_event event); +static void bfa_rport_sm_offline(struct bfa_rport_s *rp, + enum bfa_rport_event event); +static void bfa_rport_sm_deleting(struct bfa_rport_s *rp, + enum bfa_rport_event event); +static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp, + enum bfa_rport_event event); +static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp, + enum bfa_rport_event event); +static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, + enum bfa_rport_event event); +static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, + enum bfa_rport_event event); +static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, + enum bfa_rport_event event); +static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, + enum bfa_rport_event event); + +/** + * PLOG related definitions + */ +static int +plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec) +{ + if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) && + (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING)) + return 1; + + if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) && + (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ)) + return 1; + + return 0; +} + +static void +bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec) +{ + u16 tail; + struct bfa_plog_rec_s *pl_recp; + + if (plog->plog_enabled == 0) + return; + + if (plkd_validate_logrec(pl_rec)) { + bfa_assert(0); + return; + } + + tail = plog->tail; + + pl_recp = &(plog->plog_recs[tail]); + + bfa_os_memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s)); + + pl_recp->tv = bfa_os_get_log_time(); + BFA_PL_LOG_REC_INCR(plog->tail); + + if (plog->head == plog->tail) + BFA_PL_LOG_REC_INCR(plog->head); +} + +void +bfa_plog_init(struct bfa_plog_s *plog) +{ + bfa_os_memset((char *)plog, 0, sizeof(struct bfa_plog_s)); + + bfa_os_memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN); + plog->head = plog->tail = 0; + plog->plog_enabled = 1; +} + +void +bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid, + enum bfa_plog_eid event, + u16 misc, char *log_str) +{ + struct bfa_plog_rec_s lp; + + if (plog->plog_enabled) { + bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); + lp.mid = mid; + lp.eid = event; + lp.log_type = BFA_PL_LOG_TYPE_STRING; + lp.misc = misc; + strncpy(lp.log_entry.string_log, log_str, + BFA_PL_STRING_LOG_SZ - 1); + lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0'; + bfa_plog_add(plog, &lp); + } +} + +void +bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid, + enum bfa_plog_eid event, + u16 misc, u32 *intarr, u32 num_ints) +{ + struct bfa_plog_rec_s lp; + u32 i; + + if (num_ints > BFA_PL_INT_LOG_SZ) + num_ints = BFA_PL_INT_LOG_SZ; + + if (plog->plog_enabled) { + bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); + lp.mid = mid; + lp.eid = event; + lp.log_type = BFA_PL_LOG_TYPE_INT; + lp.misc = misc; + + for (i = 0; i < num_ints; i++) + bfa_os_assign(lp.log_entry.int_log[i], + intarr[i]); + + lp.log_num_ints = (u8) num_ints; + + bfa_plog_add(plog, &lp); + } +} + +void +bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid, + enum bfa_plog_eid event, + u16 misc, struct fchs_s *fchdr) +{ + struct bfa_plog_rec_s lp; + u32 *tmp_int = (u32 *) fchdr; + u32 ints[BFA_PL_INT_LOG_SZ]; + + if (plog->plog_enabled) { + bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); + + ints[0] = tmp_int[0]; + ints[1] = tmp_int[1]; + ints[2] = tmp_int[4]; + + bfa_plog_intarr(plog, mid, event, misc, ints, 3); + } +} + +void +bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid, + enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr, + u32 pld_w0) +{ + struct bfa_plog_rec_s lp; + u32 *tmp_int = (u32 *) fchdr; + u32 ints[BFA_PL_INT_LOG_SZ]; + + if (plog->plog_enabled) { + bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); + + ints[0] = tmp_int[0]; + ints[1] = tmp_int[1]; + ints[2] = tmp_int[4]; + ints[3] = pld_w0; + + bfa_plog_intarr(plog, mid, event, misc, ints, 4); + } +} + +void +bfa_plog_clear(struct bfa_plog_s *plog) +{ + plog->head = plog->tail = 0; +} + +void +bfa_plog_enable(struct bfa_plog_s *plog) +{ + plog->plog_enabled = 1; +} + +void +bfa_plog_disable(struct bfa_plog_s *plog) +{ + plog->plog_enabled = 0; +} + +bfa_boolean_t +bfa_plog_get_setting(struct bfa_plog_s *plog) +{ + return (bfa_boolean_t)plog->plog_enabled; +} + +/** + * fcxp_pvt BFA FCXP private functions + */ + +static void +claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi) +{ + u8 *dm_kva = NULL; + u64 dm_pa; + u32 buf_pool_sz; + + dm_kva = bfa_meminfo_dma_virt(mi); + dm_pa = bfa_meminfo_dma_phys(mi); + + buf_pool_sz = mod->req_pld_sz * mod->num_fcxps; + + /* + * Initialize the fcxp req payload list + */ + mod->req_pld_list_kva = dm_kva; + mod->req_pld_list_pa = dm_pa; + dm_kva += buf_pool_sz; + dm_pa += buf_pool_sz; + bfa_os_memset(mod->req_pld_list_kva, 0, buf_pool_sz); + + /* + * Initialize the fcxp rsp payload list + */ + buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps; + mod->rsp_pld_list_kva = dm_kva; + mod->rsp_pld_list_pa = dm_pa; + dm_kva += buf_pool_sz; + dm_pa += buf_pool_sz; + bfa_os_memset(mod->rsp_pld_list_kva, 0, buf_pool_sz); + + bfa_meminfo_dma_virt(mi) = dm_kva; + bfa_meminfo_dma_phys(mi) = dm_pa; +} + +static void +claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi) +{ + u16 i; + struct bfa_fcxp_s *fcxp; + + fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi); + bfa_os_memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps); + + INIT_LIST_HEAD(&mod->fcxp_free_q); + INIT_LIST_HEAD(&mod->fcxp_active_q); + + mod->fcxp_list = fcxp; + + for (i = 0; i < mod->num_fcxps; i++) { + fcxp->fcxp_mod = mod; + fcxp->fcxp_tag = i; + + list_add_tail(&fcxp->qe, &mod->fcxp_free_q); + bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp); + fcxp->reqq_waiting = BFA_FALSE; + + fcxp = fcxp + 1; + } + + bfa_meminfo_kva(mi) = (void *)fcxp; +} + +static void +bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, + u32 *dm_len) +{ + u16 num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs; + + if (num_fcxp_reqs == 0) + return; + + /* + * Account for req/rsp payload + */ + *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs; + if (cfg->drvcfg.min_cfg) + *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs; + else + *dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs; + + /* + * Account for fcxp structs + */ + *ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs; +} + +static void +bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, + struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) +{ + struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); + + bfa_os_memset(mod, 0, sizeof(struct bfa_fcxp_mod_s)); + mod->bfa = bfa; + mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs; + + /** + * Initialize FCXP request and response payload sizes. + */ + mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ; + if (!cfg->drvcfg.min_cfg) + mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ; + + INIT_LIST_HEAD(&mod->wait_q); + + claim_fcxp_req_rsp_mem(mod, meminfo); + claim_fcxps_mem(mod, meminfo); +} + +static void +bfa_fcxp_detach(struct bfa_s *bfa) +{ +} + +static void +bfa_fcxp_start(struct bfa_s *bfa) +{ +} + +static void +bfa_fcxp_stop(struct bfa_s *bfa) +{ +} + +static void +bfa_fcxp_iocdisable(struct bfa_s *bfa) +{ + struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); + struct bfa_fcxp_s *fcxp; + struct list_head *qe, *qen; + + list_for_each_safe(qe, qen, &mod->fcxp_active_q) { + fcxp = (struct bfa_fcxp_s *) qe; + if (fcxp->caller == NULL) { + fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg, + BFA_STATUS_IOC_FAILURE, 0, 0, NULL); + bfa_fcxp_free(fcxp); + } else { + fcxp->rsp_status = BFA_STATUS_IOC_FAILURE; + bfa_cb_queue(bfa, &fcxp->hcb_qe, + __bfa_fcxp_send_cbfn, fcxp); + } + } +} + +static struct bfa_fcxp_s * +bfa_fcxp_get(struct bfa_fcxp_mod_s *fm) +{ + struct bfa_fcxp_s *fcxp; + + bfa_q_deq(&fm->fcxp_free_q, &fcxp); + + if (fcxp) + list_add_tail(&fcxp->qe, &fm->fcxp_active_q); + + return fcxp; +} + +static void +bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp, + struct bfa_s *bfa, + u8 *use_ibuf, + u32 *nr_sgles, + bfa_fcxp_get_sgaddr_t *r_sga_cbfn, + bfa_fcxp_get_sglen_t *r_sglen_cbfn, + struct list_head *r_sgpg_q, + int n_sgles, + bfa_fcxp_get_sgaddr_t sga_cbfn, + bfa_fcxp_get_sglen_t sglen_cbfn) +{ + + bfa_assert(bfa != NULL); + + bfa_trc(bfa, fcxp->fcxp_tag); + + if (n_sgles == 0) { + *use_ibuf = 1; + } else { + bfa_assert(*sga_cbfn != NULL); + bfa_assert(*sglen_cbfn != NULL); + + *use_ibuf = 0; + *r_sga_cbfn = sga_cbfn; + *r_sglen_cbfn = sglen_cbfn; + + *nr_sgles = n_sgles; + + /* + * alloc required sgpgs + */ + if (n_sgles > BFI_SGE_INLINE) + bfa_assert(0); + } + +} + +static void +bfa_fcxp_init(struct bfa_fcxp_s *fcxp, + void *caller, struct bfa_s *bfa, int nreq_sgles, + int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn, + bfa_fcxp_get_sglen_t req_sglen_cbfn, + bfa_fcxp_get_sgaddr_t rsp_sga_cbfn, + bfa_fcxp_get_sglen_t rsp_sglen_cbfn) +{ + + bfa_assert(bfa != NULL); + + bfa_trc(bfa, fcxp->fcxp_tag); + + fcxp->caller = caller; + + bfa_fcxp_init_reqrsp(fcxp, bfa, + &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn, + &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q, + nreq_sgles, req_sga_cbfn, req_sglen_cbfn); + + bfa_fcxp_init_reqrsp(fcxp, bfa, + &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn, + &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q, + nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn); + +} + +static void +bfa_fcxp_put(struct bfa_fcxp_s *fcxp) +{ + struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; + struct bfa_fcxp_wqe_s *wqe; + + bfa_q_deq(&mod->wait_q, &wqe); + if (wqe) { + bfa_trc(mod->bfa, fcxp->fcxp_tag); + + bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles, + wqe->nrsp_sgles, wqe->req_sga_cbfn, + wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn, + wqe->rsp_sglen_cbfn); + + wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp); + return; + } + + bfa_assert(bfa_q_is_on_q(&mod->fcxp_active_q, fcxp)); + list_del(&fcxp->qe); + list_add_tail(&fcxp->qe, &mod->fcxp_free_q); +} + +static void +bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg, + bfa_status_t req_status, u32 rsp_len, + u32 resid_len, struct fchs_s *rsp_fchs) +{ + /* discarded fcxp completion */ +} + +static void +__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_fcxp_s *fcxp = cbarg; + + if (complete) { + fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg, + fcxp->rsp_status, fcxp->rsp_len, + fcxp->residue_len, &fcxp->rsp_fchs); + } else { + bfa_fcxp_free(fcxp); + } +} + +static void +hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp) +{ + struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); + struct bfa_fcxp_s *fcxp; + u16 fcxp_tag = bfa_os_ntohs(fcxp_rsp->fcxp_tag); + + bfa_trc(bfa, fcxp_tag); + + fcxp_rsp->rsp_len = bfa_os_ntohl(fcxp_rsp->rsp_len); + + /** + * @todo f/w should not set residue to non-0 when everything + * is received. + */ + if (fcxp_rsp->req_status == BFA_STATUS_OK) + fcxp_rsp->residue_len = 0; + else + fcxp_rsp->residue_len = bfa_os_ntohl(fcxp_rsp->residue_len); + + fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag); + + bfa_assert(fcxp->send_cbfn != NULL); + + hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp); + + if (fcxp->send_cbfn != NULL) { + bfa_trc(mod->bfa, (NULL == fcxp->caller)); + if (fcxp->caller == NULL) { + fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg, + fcxp_rsp->req_status, fcxp_rsp->rsp_len, + fcxp_rsp->residue_len, &fcxp_rsp->fchs); + /* + * fcxp automatically freed on return from the callback + */ + bfa_fcxp_free(fcxp); + } else { + fcxp->rsp_status = fcxp_rsp->req_status; + fcxp->rsp_len = fcxp_rsp->rsp_len; + fcxp->residue_len = fcxp_rsp->residue_len; + fcxp->rsp_fchs = fcxp_rsp->fchs; + + bfa_cb_queue(bfa, &fcxp->hcb_qe, + __bfa_fcxp_send_cbfn, fcxp); + } + } else { + bfa_trc(bfa, (NULL == fcxp->send_cbfn)); + } +} + +static void +hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa) +{ + union bfi_addr_u sga_zero = { {0} }; + + sge->sg_len = reqlen; + sge->flags = BFI_SGE_DATA_LAST; + bfa_dma_addr_set(sge[0].sga, req_pa); + bfa_sge_to_be(sge); + sge++; + + sge->sga = sga_zero; + sge->sg_len = reqlen; + sge->flags = BFI_SGE_PGDLEN; + bfa_sge_to_be(sge); +} + +static void +hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp, + struct fchs_s *fchs) +{ + /* + * TODO: TX ox_id + */ + if (reqlen > 0) { + if (fcxp->use_ireqbuf) { + u32 pld_w0 = + *((u32 *) BFA_FCXP_REQ_PLD(fcxp)); + + bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP, + BFA_PL_EID_TX, + reqlen + sizeof(struct fchs_s), fchs, + pld_w0); + } else { + bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, + BFA_PL_EID_TX, + reqlen + sizeof(struct fchs_s), + fchs); + } + } else { + bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX, + reqlen + sizeof(struct fchs_s), fchs); + } +} + +static void +hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp, + struct bfi_fcxp_send_rsp_s *fcxp_rsp) +{ + if (fcxp_rsp->rsp_len > 0) { + if (fcxp->use_irspbuf) { + u32 pld_w0 = + *((u32 *) BFA_FCXP_RSP_PLD(fcxp)); + + bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP, + BFA_PL_EID_RX, + (u16) fcxp_rsp->rsp_len, + &fcxp_rsp->fchs, pld_w0); + } else { + bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, + BFA_PL_EID_RX, + (u16) fcxp_rsp->rsp_len, + &fcxp_rsp->fchs); + } + } else { + bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX, + (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs); + } +} + +/** + * Handler to resume sending fcxp when space in available in cpe queue. + */ +static void +bfa_fcxp_qresume(void *cbarg) +{ + struct bfa_fcxp_s *fcxp = cbarg; + struct bfa_s *bfa = fcxp->fcxp_mod->bfa; + struct bfi_fcxp_send_req_s *send_req; + + fcxp->reqq_waiting = BFA_FALSE; + send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP); + bfa_fcxp_queue(fcxp, send_req); +} + +/** + * Queue fcxp send request to foimrware. + */ +static void +bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req) +{ + struct bfa_s *bfa = fcxp->fcxp_mod->bfa; + struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info; + struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info; + struct bfa_rport_s *rport = reqi->bfa_rport; + + bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ, + bfa_lpuid(bfa)); + + send_req->fcxp_tag = bfa_os_htons(fcxp->fcxp_tag); + if (rport) { + send_req->rport_fw_hndl = rport->fw_handle; + send_req->max_frmsz = bfa_os_htons(rport->rport_info.max_frmsz); + if (send_req->max_frmsz == 0) + send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ); + } else { + send_req->rport_fw_hndl = 0; + send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ); + } + + send_req->vf_id = bfa_os_htons(reqi->vf_id); + send_req->lp_tag = reqi->lp_tag; + send_req->class = reqi->class; + send_req->rsp_timeout = rspi->rsp_timeout; + send_req->cts = reqi->cts; + send_req->fchs = reqi->fchs; + + send_req->req_len = bfa_os_htonl(reqi->req_tot_len); + send_req->rsp_maxlen = bfa_os_htonl(rspi->rsp_maxlen); + + /* + * setup req sgles + */ + if (fcxp->use_ireqbuf == 1) { + hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len, + BFA_FCXP_REQ_PLD_PA(fcxp)); + } else { + if (fcxp->nreq_sgles > 0) { + bfa_assert(fcxp->nreq_sgles == 1); + hal_fcxp_set_local_sges(send_req->req_sge, + reqi->req_tot_len, + fcxp->req_sga_cbfn(fcxp->caller, + 0)); + } else { + bfa_assert(reqi->req_tot_len == 0); + hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0); + } + } + + /* + * setup rsp sgles + */ + if (fcxp->use_irspbuf == 1) { + bfa_assert(rspi->rsp_maxlen <= BFA_FCXP_MAX_LBUF_SZ); + + hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen, + BFA_FCXP_RSP_PLD_PA(fcxp)); + + } else { + if (fcxp->nrsp_sgles > 0) { + bfa_assert(fcxp->nrsp_sgles == 1); + hal_fcxp_set_local_sges(send_req->rsp_sge, + rspi->rsp_maxlen, + fcxp->rsp_sga_cbfn(fcxp->caller, + 0)); + } else { + bfa_assert(rspi->rsp_maxlen == 0); + hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0); + } + } + + hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs); + + bfa_reqq_produce(bfa, BFA_REQQ_FCXP); + + bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP)); + bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP)); +} + +/** + * hal_fcxp_api BFA FCXP API + */ + +/** + * Allocate an FCXP instance to send a response or to send a request + * that has a response. Request/response buffers are allocated by caller. + * + * @param[in] bfa BFA bfa instance + * @param[in] nreq_sgles Number of SG elements required for request + * buffer. 0, if fcxp internal buffers are used. + * Use bfa_fcxp_get_reqbuf() to get the + * internal req buffer. + * @param[in] req_sgles SG elements describing request buffer. Will be + * copied in by BFA and hence can be freed on + * return from this function. + * @param[in] get_req_sga function ptr to be called to get a request SG + * Address (given the sge index). + * @param[in] get_req_sglen function ptr to be called to get a request SG + * len (given the sge index). + * @param[in] get_rsp_sga function ptr to be called to get a response SG + * Address (given the sge index). + * @param[in] get_rsp_sglen function ptr to be called to get a response SG + * len (given the sge index). + * + * @return FCXP instance. NULL on failure. + */ +struct bfa_fcxp_s * +bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles, + int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn, + bfa_fcxp_get_sglen_t req_sglen_cbfn, + bfa_fcxp_get_sgaddr_t rsp_sga_cbfn, + bfa_fcxp_get_sglen_t rsp_sglen_cbfn) +{ + struct bfa_fcxp_s *fcxp = NULL; + + bfa_assert(bfa != NULL); + + fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa)); + if (fcxp == NULL) + return NULL; + + bfa_trc(bfa, fcxp->fcxp_tag); + + bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn, + req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn); + + return fcxp; +} + +/** + * Get the internal request buffer pointer + * + * @param[in] fcxp BFA fcxp pointer + * + * @return pointer to the internal request buffer + */ +void * +bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp) +{ + struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; + void *reqbuf; + + bfa_assert(fcxp->use_ireqbuf == 1); + reqbuf = ((u8 *)mod->req_pld_list_kva) + + fcxp->fcxp_tag * mod->req_pld_sz; + return reqbuf; +} + +u32 +bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp) +{ + struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; + + return mod->req_pld_sz; +} + +/** + * Get the internal response buffer pointer + * + * @param[in] fcxp BFA fcxp pointer + * + * @return pointer to the internal request buffer + */ +void * +bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp) +{ + struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; + void *rspbuf; + + bfa_assert(fcxp->use_irspbuf == 1); + + rspbuf = ((u8 *)mod->rsp_pld_list_kva) + + fcxp->fcxp_tag * mod->rsp_pld_sz; + return rspbuf; +} + +/** + * Free the BFA FCXP + * + * @param[in] fcxp BFA fcxp pointer + * + * @return void + */ +void +bfa_fcxp_free(struct bfa_fcxp_s *fcxp) +{ + struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; + + bfa_assert(fcxp != NULL); + bfa_trc(mod->bfa, fcxp->fcxp_tag); + bfa_fcxp_put(fcxp); +} + +/** + * Send a FCXP request + * + * @param[in] fcxp BFA fcxp pointer + * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports + * @param[in] vf_id virtual Fabric ID + * @param[in] lp_tag lport tag + * @param[in] cts use Continous sequence + * @param[in] cos fc Class of Service + * @param[in] reqlen request length, does not include FCHS length + * @param[in] fchs fc Header Pointer. The header content will be copied + * in by BFA. + * + * @param[in] cbfn call back function to be called on receiving + * the response + * @param[in] cbarg arg for cbfn + * @param[in] rsp_timeout + * response timeout + * + * @return bfa_status_t + */ +void +bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport, + u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos, + u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn, + void *cbarg, u32 rsp_maxlen, u8 rsp_timeout) +{ + struct bfa_s *bfa = fcxp->fcxp_mod->bfa; + struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info; + struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info; + struct bfi_fcxp_send_req_s *send_req; + + bfa_trc(bfa, fcxp->fcxp_tag); + + /** + * setup request/response info + */ + reqi->bfa_rport = rport; + reqi->vf_id = vf_id; + reqi->lp_tag = lp_tag; + reqi->class = cos; + rspi->rsp_timeout = rsp_timeout; + reqi->cts = cts; + reqi->fchs = *fchs; + reqi->req_tot_len = reqlen; + rspi->rsp_maxlen = rsp_maxlen; + fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp; + fcxp->send_cbarg = cbarg; + + /** + * If no room in CPE queue, wait for space in request queue + */ + send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP); + if (!send_req) { + bfa_trc(bfa, fcxp->fcxp_tag); + fcxp->reqq_waiting = BFA_TRUE; + bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe); + return; + } + + bfa_fcxp_queue(fcxp, send_req); +} + +/** + * Abort a BFA FCXP + * + * @param[in] fcxp BFA fcxp pointer + * + * @return void + */ +bfa_status_t +bfa_fcxp_abort(struct bfa_fcxp_s *fcxp) +{ + bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag); + bfa_assert(0); + return BFA_STATUS_OK; +} + +void +bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe, + bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg, + void *caller, int nreq_sgles, + int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn, + bfa_fcxp_get_sglen_t req_sglen_cbfn, + bfa_fcxp_get_sgaddr_t rsp_sga_cbfn, + bfa_fcxp_get_sglen_t rsp_sglen_cbfn) +{ + struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); + + bfa_assert(list_empty(&mod->fcxp_free_q)); + + wqe->alloc_cbfn = alloc_cbfn; + wqe->alloc_cbarg = alloc_cbarg; + wqe->caller = caller; + wqe->bfa = bfa; + wqe->nreq_sgles = nreq_sgles; + wqe->nrsp_sgles = nrsp_sgles; + wqe->req_sga_cbfn = req_sga_cbfn; + wqe->req_sglen_cbfn = req_sglen_cbfn; + wqe->rsp_sga_cbfn = rsp_sga_cbfn; + wqe->rsp_sglen_cbfn = rsp_sglen_cbfn; + + list_add_tail(&wqe->qe, &mod->wait_q); +} + +void +bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe) +{ + struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); + + bfa_assert(bfa_q_is_on_q(&mod->wait_q, wqe)); + list_del(&wqe->qe); +} + +void +bfa_fcxp_discard(struct bfa_fcxp_s *fcxp) +{ + /** + * If waiting for room in request queue, cancel reqq wait + * and free fcxp. + */ + if (fcxp->reqq_waiting) { + fcxp->reqq_waiting = BFA_FALSE; + bfa_reqq_wcancel(&fcxp->reqq_wqe); + bfa_fcxp_free(fcxp); + return; + } + + fcxp->send_cbfn = bfa_fcxp_null_comp; +} + + + +/** + * hal_fcxp_public BFA FCXP public functions + */ + +void +bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) +{ + switch (msg->mhdr.msg_id) { + case BFI_FCXP_I2H_SEND_RSP: + hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg); + break; + + default: + bfa_trc(bfa, msg->mhdr.msg_id); + bfa_assert(0); + } +} + +u32 +bfa_fcxp_get_maxrsp(struct bfa_s *bfa) +{ + struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); + + return mod->rsp_pld_sz; +} + + +/** + * BFA LPS state machine functions + */ + +/** + * Init state -- no login + */ +static void +bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event) +{ + bfa_trc(lps->bfa, lps->lp_tag); + bfa_trc(lps->bfa, event); + + switch (event) { + case BFA_LPS_SM_LOGIN: + if (bfa_reqq_full(lps->bfa, lps->reqq)) { + bfa_sm_set_state(lps, bfa_lps_sm_loginwait); + bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe); + } else { + bfa_sm_set_state(lps, bfa_lps_sm_login); + bfa_lps_send_login(lps); + } + + if (lps->fdisc) + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, + BFA_PL_EID_LOGIN, 0, "FDISC Request"); + else + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, + BFA_PL_EID_LOGIN, 0, "FLOGI Request"); + break; + + case BFA_LPS_SM_LOGOUT: + bfa_lps_logout_comp(lps); + break; + + case BFA_LPS_SM_DELETE: + bfa_lps_free(lps); + break; + + case BFA_LPS_SM_RX_CVL: + case BFA_LPS_SM_OFFLINE: + break; + + case BFA_LPS_SM_FWRSP: + /* + * Could happen when fabric detects loopback and discards + * the lps request. Fw will eventually sent out the timeout + * Just ignore + */ + break; + + default: + bfa_sm_fault(lps->bfa, event); + } +} + +/** + * login is in progress -- awaiting response from firmware + */ +static void +bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event) +{ + bfa_trc(lps->bfa, lps->lp_tag); + bfa_trc(lps->bfa, event); + + switch (event) { + case BFA_LPS_SM_FWRSP: + if (lps->status == BFA_STATUS_OK) { + bfa_sm_set_state(lps, bfa_lps_sm_online); + if (lps->fdisc) + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, + BFA_PL_EID_LOGIN, 0, "FDISC Accept"); + else + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, + BFA_PL_EID_LOGIN, 0, "FLOGI Accept"); + } else { + bfa_sm_set_state(lps, bfa_lps_sm_init); + if (lps->fdisc) + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, + BFA_PL_EID_LOGIN, 0, + "FDISC Fail (RJT or timeout)"); + else + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, + BFA_PL_EID_LOGIN, 0, + "FLOGI Fail (RJT or timeout)"); + } + bfa_lps_login_comp(lps); + break; + + case BFA_LPS_SM_OFFLINE: + bfa_sm_set_state(lps, bfa_lps_sm_init); + break; + + default: + bfa_sm_fault(lps->bfa, event); + } +} + +/** + * login pending - awaiting space in request queue + */ +static void +bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event) +{ + bfa_trc(lps->bfa, lps->lp_tag); + bfa_trc(lps->bfa, event); + + switch (event) { + case BFA_LPS_SM_RESUME: + bfa_sm_set_state(lps, bfa_lps_sm_login); + break; + + case BFA_LPS_SM_OFFLINE: + bfa_sm_set_state(lps, bfa_lps_sm_init); + bfa_reqq_wcancel(&lps->wqe); + break; + + case BFA_LPS_SM_RX_CVL: + /* + * Login was not even sent out; so when getting out + * of this state, it will appear like a login retry + * after Clear virtual link + */ + break; + + default: + bfa_sm_fault(lps->bfa, event); + } +} + +/** + * login complete + */ +static void +bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event) +{ + bfa_trc(lps->bfa, lps->lp_tag); + bfa_trc(lps->bfa, event); + + switch (event) { + case BFA_LPS_SM_LOGOUT: + if (bfa_reqq_full(lps->bfa, lps->reqq)) { + bfa_sm_set_state(lps, bfa_lps_sm_logowait); + bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe); + } else { + bfa_sm_set_state(lps, bfa_lps_sm_logout); + bfa_lps_send_logout(lps); + } + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, + BFA_PL_EID_LOGO, 0, "Logout"); + break; + + case BFA_LPS_SM_RX_CVL: + bfa_sm_set_state(lps, bfa_lps_sm_init); + + /* Let the vport module know about this event */ + bfa_lps_cvl_event(lps); + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, + BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx"); + break; + + case BFA_LPS_SM_OFFLINE: + case BFA_LPS_SM_DELETE: + bfa_sm_set_state(lps, bfa_lps_sm_init); + break; + + default: + bfa_sm_fault(lps->bfa, event); + } +} + +/** + * logout in progress - awaiting firmware response + */ +static void +bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event) +{ + bfa_trc(lps->bfa, lps->lp_tag); + bfa_trc(lps->bfa, event); + + switch (event) { + case BFA_LPS_SM_FWRSP: + bfa_sm_set_state(lps, bfa_lps_sm_init); + bfa_lps_logout_comp(lps); + break; + + case BFA_LPS_SM_OFFLINE: + bfa_sm_set_state(lps, bfa_lps_sm_init); + break; + + default: + bfa_sm_fault(lps->bfa, event); + } +} + +/** + * logout pending -- awaiting space in request queue + */ +static void +bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event) +{ + bfa_trc(lps->bfa, lps->lp_tag); + bfa_trc(lps->bfa, event); + + switch (event) { + case BFA_LPS_SM_RESUME: + bfa_sm_set_state(lps, bfa_lps_sm_logout); + bfa_lps_send_logout(lps); + break; + + case BFA_LPS_SM_OFFLINE: + bfa_sm_set_state(lps, bfa_lps_sm_init); + bfa_reqq_wcancel(&lps->wqe); + break; + + default: + bfa_sm_fault(lps->bfa, event); + } +} + + + +/** + * lps_pvt BFA LPS private functions + */ + +/** + * return memory requirement + */ +static void +bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, + u32 *dm_len) +{ + if (cfg->drvcfg.min_cfg) + *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS; + else + *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS; +} + +/** + * bfa module attach at initialization time + */ +static void +bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, + struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) +{ + struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); + struct bfa_lps_s *lps; + int i; + + bfa_os_memset(mod, 0, sizeof(struct bfa_lps_mod_s)); + mod->num_lps = BFA_LPS_MAX_LPORTS; + if (cfg->drvcfg.min_cfg) + mod->num_lps = BFA_LPS_MIN_LPORTS; + else + mod->num_lps = BFA_LPS_MAX_LPORTS; + mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo); + + bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s); + + INIT_LIST_HEAD(&mod->lps_free_q); + INIT_LIST_HEAD(&mod->lps_active_q); + + for (i = 0; i < mod->num_lps; i++, lps++) { + lps->bfa = bfa; + lps->lp_tag = (u8) i; + lps->reqq = BFA_REQQ_LPS; + bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps); + list_add_tail(&lps->qe, &mod->lps_free_q); + } +} + +static void +bfa_lps_detach(struct bfa_s *bfa) +{ +} + +static void +bfa_lps_start(struct bfa_s *bfa) +{ +} + +static void +bfa_lps_stop(struct bfa_s *bfa) +{ +} + +/** + * IOC in disabled state -- consider all lps offline + */ +static void +bfa_lps_iocdisable(struct bfa_s *bfa) +{ + struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); + struct bfa_lps_s *lps; + struct list_head *qe, *qen; + + list_for_each_safe(qe, qen, &mod->lps_active_q) { + lps = (struct bfa_lps_s *) qe; + bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE); + } +} + +/** + * Firmware login response + */ +static void +bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp) +{ + struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); + struct bfa_lps_s *lps; + + bfa_assert(rsp->lp_tag < mod->num_lps); + lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag); + + lps->status = rsp->status; + switch (rsp->status) { + case BFA_STATUS_OK: + lps->fport = rsp->f_port; + lps->npiv_en = rsp->npiv_en; + lps->lp_pid = rsp->lp_pid; + lps->pr_bbcred = bfa_os_ntohs(rsp->bb_credit); + lps->pr_pwwn = rsp->port_name; + lps->pr_nwwn = rsp->node_name; + lps->auth_req = rsp->auth_req; + lps->lp_mac = rsp->lp_mac; + lps->brcd_switch = rsp->brcd_switch; + lps->fcf_mac = rsp->fcf_mac; + + break; + + case BFA_STATUS_FABRIC_RJT: + lps->lsrjt_rsn = rsp->lsrjt_rsn; + lps->lsrjt_expl = rsp->lsrjt_expl; + + break; + + case BFA_STATUS_EPROTOCOL: + lps->ext_status = rsp->ext_status; + + break; + + default: + /* Nothing to do with other status */ + break; + } + + bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); +} + +/** + * Firmware logout response + */ +static void +bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp) +{ + struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); + struct bfa_lps_s *lps; + + bfa_assert(rsp->lp_tag < mod->num_lps); + lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag); + + bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); +} + +/** + * Firmware received a Clear virtual link request (for FCoE) + */ +static void +bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl) +{ + struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); + struct bfa_lps_s *lps; + + lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag); + + bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL); +} + +/** + * Space is available in request queue, resume queueing request to firmware. + */ +static void +bfa_lps_reqq_resume(void *lps_arg) +{ + struct bfa_lps_s *lps = lps_arg; + + bfa_sm_send_event(lps, BFA_LPS_SM_RESUME); +} + +/** + * lps is freed -- triggered by vport delete + */ +static void +bfa_lps_free(struct bfa_lps_s *lps) +{ + struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa); + + lps->lp_pid = 0; + list_del(&lps->qe); + list_add_tail(&lps->qe, &mod->lps_free_q); +} + +/** + * send login request to firmware + */ +static void +bfa_lps_send_login(struct bfa_lps_s *lps) +{ + struct bfi_lps_login_req_s *m; + + m = bfa_reqq_next(lps->bfa, lps->reqq); + bfa_assert(m); + + bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ, + bfa_lpuid(lps->bfa)); + + m->lp_tag = lps->lp_tag; + m->alpa = lps->alpa; + m->pdu_size = bfa_os_htons(lps->pdusz); + m->pwwn = lps->pwwn; + m->nwwn = lps->nwwn; + m->fdisc = lps->fdisc; + m->auth_en = lps->auth_en; + + bfa_reqq_produce(lps->bfa, lps->reqq); +} + +/** + * send logout request to firmware + */ +static void +bfa_lps_send_logout(struct bfa_lps_s *lps) +{ + struct bfi_lps_logout_req_s *m; + + m = bfa_reqq_next(lps->bfa, lps->reqq); + bfa_assert(m); + + bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ, + bfa_lpuid(lps->bfa)); + + m->lp_tag = lps->lp_tag; + m->port_name = lps->pwwn; + bfa_reqq_produce(lps->bfa, lps->reqq); +} + +/** + * Indirect login completion handler for non-fcs + */ +static void +bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete) +{ + struct bfa_lps_s *lps = arg; + + if (!complete) + return; + + if (lps->fdisc) + bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status); + else + bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status); +} + +/** + * Login completion handler -- direct call for fcs, queue for others + */ +static void +bfa_lps_login_comp(struct bfa_lps_s *lps) +{ + if (!lps->bfa->fcs) { + bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb, + lps); + return; + } + + if (lps->fdisc) + bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status); + else + bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status); +} + +/** + * Indirect logout completion handler for non-fcs + */ +static void +bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete) +{ + struct bfa_lps_s *lps = arg; + + if (!complete) + return; + + if (lps->fdisc) + bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg); +} + +/** + * Logout completion handler -- direct call for fcs, queue for others + */ +static void +bfa_lps_logout_comp(struct bfa_lps_s *lps) +{ + if (!lps->bfa->fcs) { + bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb, + lps); + return; + } + if (lps->fdisc) + bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg); +} + +/** + * Clear virtual link completion handler for non-fcs + */ +static void +bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete) +{ + struct bfa_lps_s *lps = arg; + + if (!complete) + return; + + /* Clear virtual link to base port will result in link down */ + if (lps->fdisc) + bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg); +} + +/** + * Received Clear virtual link event --direct call for fcs, + * queue for others + */ +static void +bfa_lps_cvl_event(struct bfa_lps_s *lps) +{ + if (!lps->bfa->fcs) { + bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb, + lps); + return; + } + + /* Clear virtual link to base port will result in link down */ + if (lps->fdisc) + bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg); +} + + + +/** + * lps_public BFA LPS public functions + */ + +u32 +bfa_lps_get_max_vport(struct bfa_s *bfa) +{ + if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT) + return BFA_LPS_MAX_VPORTS_SUPP_CT; + else + return BFA_LPS_MAX_VPORTS_SUPP_CB; +} + +/** + * Allocate a lport srvice tag. + */ +struct bfa_lps_s * +bfa_lps_alloc(struct bfa_s *bfa) +{ + struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); + struct bfa_lps_s *lps = NULL; + + bfa_q_deq(&mod->lps_free_q, &lps); + + if (lps == NULL) + return NULL; + + list_add_tail(&lps->qe, &mod->lps_active_q); + + bfa_sm_set_state(lps, bfa_lps_sm_init); + return lps; +} + +/** + * Free lport service tag. This can be called anytime after an alloc. + * No need to wait for any pending login/logout completions. + */ +void +bfa_lps_delete(struct bfa_lps_s *lps) +{ + bfa_sm_send_event(lps, BFA_LPS_SM_DELETE); +} + +/** + * Initiate a lport login. + */ +void +bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz, + wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en) +{ + lps->uarg = uarg; + lps->alpa = alpa; + lps->pdusz = pdusz; + lps->pwwn = pwwn; + lps->nwwn = nwwn; + lps->fdisc = BFA_FALSE; + lps->auth_en = auth_en; + bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); +} + +/** + * Initiate a lport fdisc login. + */ +void +bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn, + wwn_t nwwn) +{ + lps->uarg = uarg; + lps->alpa = 0; + lps->pdusz = pdusz; + lps->pwwn = pwwn; + lps->nwwn = nwwn; + lps->fdisc = BFA_TRUE; + lps->auth_en = BFA_FALSE; + bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); +} + +/** + * Initiate a lport logout (flogi). + */ +void +bfa_lps_flogo(struct bfa_lps_s *lps) +{ + bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT); +} + +/** + * Initiate a lport FDSIC logout. + */ +void +bfa_lps_fdisclogo(struct bfa_lps_s *lps) +{ + bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT); +} + +/** + * Discard a pending login request -- should be called only for + * link down handling. + */ +void +bfa_lps_discard(struct bfa_lps_s *lps) +{ + bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE); +} + +/** + * Return lport services tag + */ +u8 +bfa_lps_get_tag(struct bfa_lps_s *lps) +{ + return lps->lp_tag; +} + +/** + * Return lport services tag given the pid + */ +u8 +bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid) +{ + struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); + struct bfa_lps_s *lps; + int i; + + for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) { + if (lps->lp_pid == pid) + return lps->lp_tag; + } + + /* Return base port tag anyway */ + return 0; +} + +/** + * return if fabric login indicates support for NPIV + */ +bfa_boolean_t +bfa_lps_is_npiv_en(struct bfa_lps_s *lps) +{ + return lps->npiv_en; +} + +/** + * Return TRUE if attached to F-Port, else return FALSE + */ +bfa_boolean_t +bfa_lps_is_fport(struct bfa_lps_s *lps) +{ + return lps->fport; +} + +/** + * Return TRUE if attached to a Brocade Fabric + */ +bfa_boolean_t +bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps) +{ + return lps->brcd_switch; +} +/** + * return TRUE if authentication is required + */ +bfa_boolean_t +bfa_lps_is_authreq(struct bfa_lps_s *lps) +{ + return lps->auth_req; +} + +bfa_eproto_status_t +bfa_lps_get_extstatus(struct bfa_lps_s *lps) +{ + return lps->ext_status; +} + +/** + * return port id assigned to the lport + */ +u32 +bfa_lps_get_pid(struct bfa_lps_s *lps) +{ + return lps->lp_pid; +} + +/** + * return port id assigned to the base lport + */ +u32 +bfa_lps_get_base_pid(struct bfa_s *bfa) +{ + struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); + + return BFA_LPS_FROM_TAG(mod, 0)->lp_pid; +} + +/** + * Return bb_credit assigned in FLOGI response + */ +u16 +bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps) +{ + return lps->pr_bbcred; +} + +/** + * Return peer port name + */ +wwn_t +bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps) +{ + return lps->pr_pwwn; +} + +/** + * Return peer node name + */ +wwn_t +bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps) +{ + return lps->pr_nwwn; +} + +/** + * return reason code if login request is rejected + */ +u8 +bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps) +{ + return lps->lsrjt_rsn; +} + +/** + * return explanation code if login request is rejected + */ +u8 +bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps) +{ + return lps->lsrjt_expl; +} + +/** + * Return fpma/spma MAC for lport + */ +mac_t +bfa_lps_get_lp_mac(struct bfa_lps_s *lps) +{ + return lps->lp_mac; +} + +/** + * LPS firmware message class handler. + */ +void +bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m) +{ + union bfi_lps_i2h_msg_u msg; + + bfa_trc(bfa, m->mhdr.msg_id); + msg.msg = m; + + switch (m->mhdr.msg_id) { + case BFI_LPS_H2I_LOGIN_RSP: + bfa_lps_login_rsp(bfa, msg.login_rsp); + break; + + case BFI_LPS_H2I_LOGOUT_RSP: + bfa_lps_logout_rsp(bfa, msg.logout_rsp); + break; + + case BFI_LPS_H2I_CVL_EVENT: + bfa_lps_rx_cvl_event(bfa, msg.cvl_event); + break; + + default: + bfa_trc(bfa, m->mhdr.msg_id); + bfa_assert(0); + } +} + +/** + * FC PORT state machine functions + */ +static void +bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_START: + /** + * Start event after IOC is configured and BFA is started. + */ + if (bfa_fcport_send_enable(fcport)) { + bfa_trc(fcport->bfa, BFA_TRUE); + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); + } else { + bfa_trc(fcport->bfa, BFA_FALSE); + bfa_sm_set_state(fcport, + bfa_fcport_sm_enabling_qwait); + } + break; + + case BFA_FCPORT_SM_ENABLE: + /** + * Port is persistently configured to be in enabled state. Do + * not change state. Port enabling is done when START event is + * received. + */ + break; + + case BFA_FCPORT_SM_DISABLE: + /** + * If a port is persistently configured to be disabled, the + * first event will a port disable request. + */ + bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); + break; + + case BFA_FCPORT_SM_HWFAIL: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); + break; + + default: + bfa_sm_fault(fcport->bfa, event); + } +} + +static void +bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + char pwwn_buf[BFA_STRING_32]; + struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_QRESUME: + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); + bfa_fcport_send_enable(fcport); + break; + + case BFA_FCPORT_SM_STOP: + bfa_reqq_wcancel(&fcport->reqq_wait); + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); + break; + + case BFA_FCPORT_SM_ENABLE: + /** + * Already enable is in progress. + */ + break; + + case BFA_FCPORT_SM_DISABLE: + /** + * Just send disable request to firmware when room becomes + * available in request queue. + */ + bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); + bfa_reqq_wcancel(&fcport->reqq_wait); + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); + wwn2str(pwwn_buf, fcport->pwwn); + BFA_LOG(KERN_INFO, bfad, log_level, + "Base port disabled: WWN = %s\n", pwwn_buf); + break; + + case BFA_FCPORT_SM_LINKUP: + case BFA_FCPORT_SM_LINKDOWN: + /** + * Possible to get link events when doing back-to-back + * enable/disables. + */ + break; + + case BFA_FCPORT_SM_HWFAIL: + bfa_reqq_wcancel(&fcport->reqq_wait); + bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); + break; + + default: + bfa_sm_fault(fcport->bfa, event); + } +} + +static void +bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + char pwwn_buf[BFA_STRING_32]; + struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_FWRSP: + case BFA_FCPORT_SM_LINKDOWN: + bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown); + break; + + case BFA_FCPORT_SM_LINKUP: + bfa_fcport_update_linkinfo(fcport); + bfa_sm_set_state(fcport, bfa_fcport_sm_linkup); + + bfa_assert(fcport->event_cbfn); + bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE); + break; + + case BFA_FCPORT_SM_ENABLE: + /** + * Already being enabled. + */ + break; + + case BFA_FCPORT_SM_DISABLE: + if (bfa_fcport_send_disable(fcport)) + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); + else + bfa_sm_set_state(fcport, + bfa_fcport_sm_disabling_qwait); + + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); + wwn2str(pwwn_buf, fcport->pwwn); + BFA_LOG(KERN_INFO, bfad, log_level, + "Base port disabled: WWN = %s\n", pwwn_buf); + break; + + case BFA_FCPORT_SM_STOP: + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); + break; + + case BFA_FCPORT_SM_HWFAIL: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); + break; + + default: + bfa_sm_fault(fcport->bfa, event); + } +} + +static void +bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event; + char pwwn_buf[BFA_STRING_32]; + struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; + + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_LINKUP: + bfa_fcport_update_linkinfo(fcport); + bfa_sm_set_state(fcport, bfa_fcport_sm_linkup); + bfa_assert(fcport->event_cbfn); + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup"); + if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { + + bfa_trc(fcport->bfa, + pevent->link_state.vc_fcf.fcf.fipenabled); + bfa_trc(fcport->bfa, + pevent->link_state.vc_fcf.fcf.fipfailed); + + if (pevent->link_state.vc_fcf.fcf.fipfailed) + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_FIP_FCF_DISC, 0, + "FIP FCF Discovery Failed"); + else + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_FIP_FCF_DISC, 0, + "FIP FCF Discovered"); + } + + bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE); + wwn2str(pwwn_buf, fcport->pwwn); + BFA_LOG(KERN_INFO, bfad, log_level, + "Base port online: WWN = %s\n", pwwn_buf); + break; + + case BFA_FCPORT_SM_LINKDOWN: + /** + * Possible to get link down event. + */ + break; + + case BFA_FCPORT_SM_ENABLE: + /** + * Already enabled. + */ + break; + + case BFA_FCPORT_SM_DISABLE: + if (bfa_fcport_send_disable(fcport)) + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); + else + bfa_sm_set_state(fcport, + bfa_fcport_sm_disabling_qwait); + + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); + wwn2str(pwwn_buf, fcport->pwwn); + BFA_LOG(KERN_INFO, bfad, log_level, + "Base port disabled: WWN = %s\n", pwwn_buf); + break; + + case BFA_FCPORT_SM_STOP: + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); + break; + + case BFA_FCPORT_SM_HWFAIL: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); + break; + + default: + bfa_sm_fault(fcport->bfa, event); + } +} + +static void +bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + char pwwn_buf[BFA_STRING_32]; + struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; + + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_ENABLE: + /** + * Already enabled. + */ + break; + + case BFA_FCPORT_SM_DISABLE: + if (bfa_fcport_send_disable(fcport)) + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); + else + bfa_sm_set_state(fcport, + bfa_fcport_sm_disabling_qwait); + + bfa_fcport_reset_linkinfo(fcport); + bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); + wwn2str(pwwn_buf, fcport->pwwn); + BFA_LOG(KERN_INFO, bfad, log_level, + "Base port offline: WWN = %s\n", pwwn_buf); + BFA_LOG(KERN_INFO, bfad, log_level, + "Base port disabled: WWN = %s\n", pwwn_buf); + break; + + case BFA_FCPORT_SM_LINKDOWN: + bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown); + bfa_fcport_reset_linkinfo(fcport); + bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown"); + wwn2str(pwwn_buf, fcport->pwwn); + if (BFA_PORT_IS_DISABLED(fcport->bfa)) + BFA_LOG(KERN_INFO, bfad, log_level, + "Base port offline: WWN = %s\n", pwwn_buf); + else + BFA_LOG(KERN_ERR, bfad, log_level, + "Base port (WWN = %s) " + "lost fabric connectivity\n", pwwn_buf); + break; + + case BFA_FCPORT_SM_STOP: + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); + bfa_fcport_reset_linkinfo(fcport); + wwn2str(pwwn_buf, fcport->pwwn); + if (BFA_PORT_IS_DISABLED(fcport->bfa)) + BFA_LOG(KERN_INFO, bfad, log_level, + "Base port offline: WWN = %s\n", pwwn_buf); + else + BFA_LOG(KERN_ERR, bfad, log_level, + "Base port (WWN = %s) " + "lost fabric connectivity\n", pwwn_buf); + break; + + case BFA_FCPORT_SM_HWFAIL: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); + bfa_fcport_reset_linkinfo(fcport); + bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE); + wwn2str(pwwn_buf, fcport->pwwn); + if (BFA_PORT_IS_DISABLED(fcport->bfa)) + BFA_LOG(KERN_INFO, bfad, log_level, + "Base port offline: WWN = %s\n", pwwn_buf); + else + BFA_LOG(KERN_ERR, bfad, log_level, + "Base port (WWN = %s) " + "lost fabric connectivity\n", pwwn_buf); + break; + + default: + bfa_sm_fault(fcport->bfa, event); + } +} + +static void +bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_QRESUME: + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); + bfa_fcport_send_disable(fcport); + break; + + case BFA_FCPORT_SM_STOP: + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); + bfa_reqq_wcancel(&fcport->reqq_wait); + break; + + case BFA_FCPORT_SM_ENABLE: + bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait); + break; + + case BFA_FCPORT_SM_DISABLE: + /** + * Already being disabled. + */ + break; + + case BFA_FCPORT_SM_LINKUP: + case BFA_FCPORT_SM_LINKDOWN: + /** + * Possible to get link events when doing back-to-back + * enable/disables. + */ + break; + + case BFA_FCPORT_SM_HWFAIL: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); + bfa_reqq_wcancel(&fcport->reqq_wait); + break; + + default: + bfa_sm_fault(fcport->bfa, event); + } +} + +static void +bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_QRESUME: + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); + bfa_fcport_send_disable(fcport); + if (bfa_fcport_send_enable(fcport)) + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); + else + bfa_sm_set_state(fcport, + bfa_fcport_sm_enabling_qwait); + break; + + case BFA_FCPORT_SM_STOP: + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); + bfa_reqq_wcancel(&fcport->reqq_wait); + break; + + case BFA_FCPORT_SM_ENABLE: + break; + + case BFA_FCPORT_SM_DISABLE: + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); + break; + + case BFA_FCPORT_SM_LINKUP: + case BFA_FCPORT_SM_LINKDOWN: + /** + * Possible to get link events when doing back-to-back + * enable/disables. + */ + break; + + case BFA_FCPORT_SM_HWFAIL: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); + bfa_reqq_wcancel(&fcport->reqq_wait); + break; + + default: + bfa_sm_fault(fcport->bfa, event); + } +} + +static void +bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + char pwwn_buf[BFA_STRING_32]; + struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_FWRSP: + bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); + break; + + case BFA_FCPORT_SM_DISABLE: + /** + * Already being disabled. + */ + break; + + case BFA_FCPORT_SM_ENABLE: + if (bfa_fcport_send_enable(fcport)) + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); + else + bfa_sm_set_state(fcport, + bfa_fcport_sm_enabling_qwait); + + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_PORT_ENABLE, 0, "Port Enable"); + wwn2str(pwwn_buf, fcport->pwwn); + BFA_LOG(KERN_INFO, bfad, log_level, + "Base port enabled: WWN = %s\n", pwwn_buf); + break; + + case BFA_FCPORT_SM_STOP: + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); + break; + + case BFA_FCPORT_SM_LINKUP: + case BFA_FCPORT_SM_LINKDOWN: + /** + * Possible to get link events when doing back-to-back + * enable/disables. + */ + break; + + case BFA_FCPORT_SM_HWFAIL: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); + break; + + default: + bfa_sm_fault(fcport->bfa, event); + } +} + +static void +bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + char pwwn_buf[BFA_STRING_32]; + struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad; + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_START: + /** + * Ignore start event for a port that is disabled. + */ + break; + + case BFA_FCPORT_SM_STOP: + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); + break; + + case BFA_FCPORT_SM_ENABLE: + if (bfa_fcport_send_enable(fcport)) + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); + else + bfa_sm_set_state(fcport, + bfa_fcport_sm_enabling_qwait); + + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_PORT_ENABLE, 0, "Port Enable"); + wwn2str(pwwn_buf, fcport->pwwn); + BFA_LOG(KERN_INFO, bfad, log_level, + "Base port enabled: WWN = %s\n", pwwn_buf); + break; + + case BFA_FCPORT_SM_DISABLE: + /** + * Already disabled. + */ + break; + + case BFA_FCPORT_SM_HWFAIL: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); + break; + + default: + bfa_sm_fault(fcport->bfa, event); + } +} + +static void +bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_START: + if (bfa_fcport_send_enable(fcport)) + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); + else + bfa_sm_set_state(fcport, + bfa_fcport_sm_enabling_qwait); + break; + + default: + /** + * Ignore all other events. + */ + ; + } +} + +/** + * Port is enabled. IOC is down/failed. + */ +static void +bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_START: + if (bfa_fcport_send_enable(fcport)) + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); + else + bfa_sm_set_state(fcport, + bfa_fcport_sm_enabling_qwait); + break; + + default: + /** + * Ignore all events. + */ + ; + } +} + +/** + * Port is disabled. IOC is down/failed. + */ +static void +bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_START: + bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); + break; + + case BFA_FCPORT_SM_ENABLE: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); + break; + + default: + /** + * Ignore all events. + */ + ; + } +} + +/** + * Link state is down + */ +static void +bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event) +{ + bfa_trc(ln->fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_LN_SM_LINKUP: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf); + bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP); + break; + + default: + bfa_sm_fault(ln->fcport->bfa, event); + } +} + +/** + * Link state is waiting for down notification + */ +static void +bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event) +{ + bfa_trc(ln->fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_LN_SM_LINKUP: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf); + break; + + case BFA_FCPORT_LN_SM_NOTIFICATION: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn); + break; + + default: + bfa_sm_fault(ln->fcport->bfa, event); + } +} + +/** + * Link state is waiting for down notification and there is a pending up + */ +static void +bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event) +{ + bfa_trc(ln->fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_LN_SM_LINKDOWN: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf); + break; + + case BFA_FCPORT_LN_SM_NOTIFICATION: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf); + bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP); + break; + + default: + bfa_sm_fault(ln->fcport->bfa, event); + } +} + +/** + * Link state is up + */ +static void +bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event) +{ + bfa_trc(ln->fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_LN_SM_LINKDOWN: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf); + bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN); + break; + + default: + bfa_sm_fault(ln->fcport->bfa, event); + } +} + +/** + * Link state is waiting for up notification + */ +static void +bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event) +{ + bfa_trc(ln->fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_LN_SM_LINKDOWN: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf); + break; + + case BFA_FCPORT_LN_SM_NOTIFICATION: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_up); + break; + + default: + bfa_sm_fault(ln->fcport->bfa, event); + } +} + +/** + * Link state is waiting for up notification and there is a pending down + */ +static void +bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event) +{ + bfa_trc(ln->fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_LN_SM_LINKUP: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf); + break; + + case BFA_FCPORT_LN_SM_NOTIFICATION: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf); + bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN); + break; + + default: + bfa_sm_fault(ln->fcport->bfa, event); + } +} + +/** + * Link state is waiting for up notification and there are pending down and up + */ +static void +bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event) +{ + bfa_trc(ln->fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_LN_SM_LINKDOWN: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf); + break; + + case BFA_FCPORT_LN_SM_NOTIFICATION: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf); + bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN); + break; + + default: + bfa_sm_fault(ln->fcport->bfa, event); + } +} + + + +/** + * hal_port_private + */ + +static void +__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_fcport_ln_s *ln = cbarg; + + if (complete) + ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event); + else + bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION); +} + +/** + * Send SCN notification to upper layers. + * trunk - false if caller is fcport to ignore fcport event in trunked mode + */ +static void +bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event, + bfa_boolean_t trunk) +{ + if (fcport->cfg.trunked && !trunk) + return; + + switch (event) { + case BFA_PORT_LINKUP: + bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP); + break; + case BFA_PORT_LINKDOWN: + bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN); + break; + default: + bfa_assert(0); + } +} + +static void +bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event) +{ + struct bfa_fcport_s *fcport = ln->fcport; + + if (fcport->bfa->fcs) { + fcport->event_cbfn(fcport->event_cbarg, event); + bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION); + } else { + ln->ln_event = event; + bfa_cb_queue(fcport->bfa, &ln->ln_qe, + __bfa_cb_fcport_event, ln); + } +} + +#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \ + BFA_CACHELINE_SZ)) + +static void +bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, + u32 *dm_len) +{ + *dm_len += FCPORT_STATS_DMA_SZ; +} + +static void +bfa_fcport_qresume(void *cbarg) +{ + struct bfa_fcport_s *fcport = cbarg; + + bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME); +} + +static void +bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo) +{ + u8 *dm_kva; + u64 dm_pa; + + dm_kva = bfa_meminfo_dma_virt(meminfo); + dm_pa = bfa_meminfo_dma_phys(meminfo); + + fcport->stats_kva = dm_kva; + fcport->stats_pa = dm_pa; + fcport->stats = (union bfa_fcport_stats_u *) dm_kva; + + dm_kva += FCPORT_STATS_DMA_SZ; + dm_pa += FCPORT_STATS_DMA_SZ; + + bfa_meminfo_dma_virt(meminfo) = dm_kva; + bfa_meminfo_dma_phys(meminfo) = dm_pa; +} + +/** + * Memory initialization. + */ +static void +bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, + struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + struct bfa_port_cfg_s *port_cfg = &fcport->cfg; + struct bfa_fcport_ln_s *ln = &fcport->ln; + struct bfa_timeval_s tv; + + bfa_os_memset(fcport, 0, sizeof(struct bfa_fcport_s)); + fcport->bfa = bfa; + ln->fcport = fcport; + + bfa_fcport_mem_claim(fcport, meminfo); + + bfa_sm_set_state(fcport, bfa_fcport_sm_uninit); + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn); + + /** + * initialize time stamp for stats reset + */ + bfa_os_gettimeofday(&tv); + fcport->stats_reset_time = tv.tv_sec; + + /** + * initialize and set default configuration + */ + port_cfg->topology = BFA_PORT_TOPOLOGY_P2P; + port_cfg->speed = BFA_PORT_SPEED_AUTO; + port_cfg->trunked = BFA_FALSE; + port_cfg->maxfrsize = 0; + + port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS; + + bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport); +} + +static void +bfa_fcport_detach(struct bfa_s *bfa) +{ +} + +/** + * Called when IOC is ready. + */ +static void +bfa_fcport_start(struct bfa_s *bfa) +{ + bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START); +} + +/** + * Called before IOC is stopped. + */ +static void +bfa_fcport_stop(struct bfa_s *bfa) +{ + bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP); + bfa_trunk_iocdisable(bfa); +} + +/** + * Called when IOC failure is detected. + */ +static void +bfa_fcport_iocdisable(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL); + bfa_trunk_iocdisable(bfa); +} + +static void +bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport) +{ + struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event; + struct bfa_fcport_trunk_s *trunk = &fcport->trunk; + + fcport->speed = pevent->link_state.speed; + fcport->topology = pevent->link_state.topology; + + if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) + fcport->myalpa = 0; + + /* QoS Details */ + bfa_os_assign(fcport->qos_attr, pevent->link_state.qos_attr); + bfa_os_assign(fcport->qos_vc_attr, + pevent->link_state.vc_fcf.qos_vc_attr); + + /** + * update trunk state if applicable + */ + if (!fcport->cfg.trunked) + trunk->attr.state = BFA_TRUNK_DISABLED; + + /* update FCoE specific */ + fcport->fcoe_vlan = bfa_os_ntohs(pevent->link_state.vc_fcf.fcf.vlan); + + bfa_trc(fcport->bfa, fcport->speed); + bfa_trc(fcport->bfa, fcport->topology); +} + +static void +bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport) +{ + fcport->speed = BFA_PORT_SPEED_UNKNOWN; + fcport->topology = BFA_PORT_TOPOLOGY_NONE; +} + +/** + * Send port enable message to firmware. + */ +static bfa_boolean_t +bfa_fcport_send_enable(struct bfa_fcport_s *fcport) +{ + struct bfi_fcport_enable_req_s *m; + + /** + * Increment message tag before queue check, so that responses to old + * requests are discarded. + */ + fcport->msgtag++; + + /** + * check for room in queue to send request now + */ + m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); + if (!m) { + bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, + &fcport->reqq_wait); + return BFA_FALSE; + } + + bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ, + bfa_lpuid(fcport->bfa)); + m->nwwn = fcport->nwwn; + m->pwwn = fcport->pwwn; + m->port_cfg = fcport->cfg; + m->msgtag = fcport->msgtag; + m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize); + bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa); + bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo); + bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi); + + /** + * queue I/O message to firmware + */ + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); + return BFA_TRUE; +} + +/** + * Send port disable message to firmware. + */ +static bfa_boolean_t +bfa_fcport_send_disable(struct bfa_fcport_s *fcport) +{ + struct bfi_fcport_req_s *m; + + /** + * Increment message tag before queue check, so that responses to old + * requests are discarded. + */ + fcport->msgtag++; + + /** + * check for room in queue to send request now + */ + m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); + if (!m) { + bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, + &fcport->reqq_wait); + return BFA_FALSE; + } + + bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ, + bfa_lpuid(fcport->bfa)); + m->msgtag = fcport->msgtag; + + /** + * queue I/O message to firmware + */ + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); + + return BFA_TRUE; +} + +static void +bfa_fcport_set_wwns(struct bfa_fcport_s *fcport) +{ + fcport->pwwn = bfa_ioc_get_pwwn(&fcport->bfa->ioc); + fcport->nwwn = bfa_ioc_get_nwwn(&fcport->bfa->ioc); + + bfa_trc(fcport->bfa, fcport->pwwn); + bfa_trc(fcport->bfa, fcport->nwwn); +} + +static void +bfa_fcport_send_txcredit(void *port_cbarg) +{ + + struct bfa_fcport_s *fcport = port_cbarg; + struct bfi_fcport_set_svc_params_req_s *m; + + /** + * check for room in queue to send request now + */ + m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); + if (!m) { + bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit); + return; + } + + bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ, + bfa_lpuid(fcport->bfa)); + m->tx_bbcredit = bfa_os_htons((u16)fcport->cfg.tx_bbcredit); + + /** + * queue I/O message to firmware + */ + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); +} + +static void +bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d, + struct bfa_qos_stats_s *s) +{ + u32 *dip = (u32 *) d; + u32 *sip = (u32 *) s; + int i; + + /* Now swap the 32 bit fields */ + for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i) + dip[i] = bfa_os_ntohl(sip[i]); +} + +static void +bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d, + struct bfa_fcoe_stats_s *s) +{ + u32 *dip = (u32 *) d; + u32 *sip = (u32 *) s; + int i; + + for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32)); + i = i + 2) { +#ifdef __BIGENDIAN + dip[i] = bfa_os_ntohl(sip[i]); + dip[i + 1] = bfa_os_ntohl(sip[i + 1]); +#else + dip[i] = bfa_os_ntohl(sip[i + 1]); + dip[i + 1] = bfa_os_ntohl(sip[i]); +#endif + } +} + +static void +__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_fcport_s *fcport = cbarg; + + if (complete) { + if (fcport->stats_status == BFA_STATUS_OK) { + struct bfa_timeval_s tv; + + /* Swap FC QoS or FCoE stats */ + if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { + bfa_fcport_qos_stats_swap( + &fcport->stats_ret->fcqos, + &fcport->stats->fcqos); + } else { + bfa_fcport_fcoe_stats_swap( + &fcport->stats_ret->fcoe, + &fcport->stats->fcoe); + + bfa_os_gettimeofday(&tv); + fcport->stats_ret->fcoe.secs_reset = + tv.tv_sec - fcport->stats_reset_time; + } + } + fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); + } else { + fcport->stats_busy = BFA_FALSE; + fcport->stats_status = BFA_STATUS_OK; + } +} + +static void +bfa_fcport_stats_get_timeout(void *cbarg) +{ + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; + + bfa_trc(fcport->bfa, fcport->stats_qfull); + + if (fcport->stats_qfull) { + bfa_reqq_wcancel(&fcport->stats_reqq_wait); + fcport->stats_qfull = BFA_FALSE; + } + + fcport->stats_status = BFA_STATUS_ETIMER; + bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get, + fcport); +} + +static void +bfa_fcport_send_stats_get(void *cbarg) +{ + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; + struct bfi_fcport_req_s *msg; + + msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); + + if (!msg) { + fcport->stats_qfull = BFA_TRUE; + bfa_reqq_winit(&fcport->stats_reqq_wait, + bfa_fcport_send_stats_get, fcport); + bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, + &fcport->stats_reqq_wait); + return; + } + fcport->stats_qfull = BFA_FALSE; + + bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s)); + bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ, + bfa_lpuid(fcport->bfa)); + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); +} + +static void +__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_fcport_s *fcport = cbarg; + + if (complete) { + struct bfa_timeval_s tv; + + /** + * re-initialize time stamp for stats reset + */ + bfa_os_gettimeofday(&tv); + fcport->stats_reset_time = tv.tv_sec; + + fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); + } else { + fcport->stats_busy = BFA_FALSE; + fcport->stats_status = BFA_STATUS_OK; + } +} + +static void +bfa_fcport_stats_clr_timeout(void *cbarg) +{ + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; + + bfa_trc(fcport->bfa, fcport->stats_qfull); + + if (fcport->stats_qfull) { + bfa_reqq_wcancel(&fcport->stats_reqq_wait); + fcport->stats_qfull = BFA_FALSE; + } + + fcport->stats_status = BFA_STATUS_ETIMER; + bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, + __bfa_cb_fcport_stats_clr, fcport); +} + +static void +bfa_fcport_send_stats_clear(void *cbarg) +{ + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; + struct bfi_fcport_req_s *msg; + + msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); + + if (!msg) { + fcport->stats_qfull = BFA_TRUE; + bfa_reqq_winit(&fcport->stats_reqq_wait, + bfa_fcport_send_stats_clear, fcport); + bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, + &fcport->stats_reqq_wait); + return; + } + fcport->stats_qfull = BFA_FALSE; + + bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s)); + bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ, + bfa_lpuid(fcport->bfa)); + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); +} + +/** + * Handle trunk SCN event from firmware. + */ +static void +bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn) +{ + struct bfa_fcport_trunk_s *trunk = &fcport->trunk; + struct bfi_fcport_trunk_link_s *tlink; + struct bfa_trunk_link_attr_s *lattr; + enum bfa_trunk_state state_prev; + int i; + int link_bm = 0; + + bfa_trc(fcport->bfa, fcport->cfg.trunked); + bfa_assert(scn->trunk_state == BFA_TRUNK_ONLINE || + scn->trunk_state == BFA_TRUNK_OFFLINE); + + bfa_trc(fcport->bfa, trunk->attr.state); + bfa_trc(fcport->bfa, scn->trunk_state); + bfa_trc(fcport->bfa, scn->trunk_speed); + + /** + * Save off new state for trunk attribute query + */ + state_prev = trunk->attr.state; + if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED)) + trunk->attr.state = scn->trunk_state; + trunk->attr.speed = scn->trunk_speed; + for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) { + lattr = &trunk->attr.link_attr[i]; + tlink = &scn->tlink[i]; + + lattr->link_state = tlink->state; + lattr->trunk_wwn = tlink->trunk_wwn; + lattr->fctl = tlink->fctl; + lattr->speed = tlink->speed; + lattr->deskew = bfa_os_ntohl(tlink->deskew); + + if (tlink->state == BFA_TRUNK_LINK_STATE_UP) { + fcport->speed = tlink->speed; + fcport->topology = BFA_PORT_TOPOLOGY_P2P; + link_bm |= 1 << i; + } + + bfa_trc(fcport->bfa, lattr->link_state); + bfa_trc(fcport->bfa, lattr->trunk_wwn); + bfa_trc(fcport->bfa, lattr->fctl); + bfa_trc(fcport->bfa, lattr->speed); + bfa_trc(fcport->bfa, lattr->deskew); + } + + switch (link_bm) { + case 3: + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)"); + break; + case 2: + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)"); + break; + case 1: + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)"); + break; + default: + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_TRUNK_SCN, 0, "Trunk down"); + } + + /** + * Notify upper layers if trunk state changed. + */ + if ((state_prev != trunk->attr.state) || + (scn->trunk_state == BFA_TRUNK_OFFLINE)) { + bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ? + BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE); + } +} + +static void +bfa_trunk_iocdisable(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + int i = 0; + + /** + * In trunked mode, notify upper layers that link is down + */ + if (fcport->cfg.trunked) { + if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE) + bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE); + + fcport->trunk.attr.state = BFA_TRUNK_OFFLINE; + fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN; + for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) { + fcport->trunk.attr.link_attr[i].trunk_wwn = 0; + fcport->trunk.attr.link_attr[i].fctl = + BFA_TRUNK_LINK_FCTL_NORMAL; + fcport->trunk.attr.link_attr[i].link_state = + BFA_TRUNK_LINK_STATE_DN_LINKDN; + fcport->trunk.attr.link_attr[i].speed = + BFA_PORT_SPEED_UNKNOWN; + fcport->trunk.attr.link_attr[i].deskew = 0; + } + } +} + + + +/** + * hal_port_public + */ + +/** + * Called to initialize port attributes + */ +void +bfa_fcport_init(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + /** + * Initialize port attributes from IOC hardware data. + */ + bfa_fcport_set_wwns(fcport); + if (fcport->cfg.maxfrsize == 0) + fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc); + fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc); + fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc); + + bfa_assert(fcport->cfg.maxfrsize); + bfa_assert(fcport->cfg.rx_bbcredit); + bfa_assert(fcport->speed_sup); +} + +/** + * Firmware message handler. + */ +void +bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + union bfi_fcport_i2h_msg_u i2hmsg; + + i2hmsg.msg = msg; + fcport->event_arg.i2hmsg = i2hmsg; + + bfa_trc(bfa, msg->mhdr.msg_id); + bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm)); + + switch (msg->mhdr.msg_id) { + case BFI_FCPORT_I2H_ENABLE_RSP: + if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) + bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); + break; + + case BFI_FCPORT_I2H_DISABLE_RSP: + if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) + bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); + break; + + case BFI_FCPORT_I2H_EVENT: + if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP) + bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP); + else + bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN); + break; + + case BFI_FCPORT_I2H_TRUNK_SCN: + bfa_trunk_scn(fcport, i2hmsg.trunk_scn); + break; + + case BFI_FCPORT_I2H_STATS_GET_RSP: + /* + * check for timer pop before processing the rsp + */ + if (fcport->stats_busy == BFA_FALSE || + fcport->stats_status == BFA_STATUS_ETIMER) + break; + + bfa_timer_stop(&fcport->timer); + fcport->stats_status = i2hmsg.pstatsget_rsp->status; + bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, + __bfa_cb_fcport_stats_get, fcport); + break; + + case BFI_FCPORT_I2H_STATS_CLEAR_RSP: + /* + * check for timer pop before processing the rsp + */ + if (fcport->stats_busy == BFA_FALSE || + fcport->stats_status == BFA_STATUS_ETIMER) + break; + + bfa_timer_stop(&fcport->timer); + fcport->stats_status = BFA_STATUS_OK; + bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, + __bfa_cb_fcport_stats_clr, fcport); + break; + + case BFI_FCPORT_I2H_ENABLE_AEN: + bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE); + break; + + case BFI_FCPORT_I2H_DISABLE_AEN: + bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE); + break; + + default: + bfa_assert(0); + break; + } +} + + + +/** + * hal_port_api + */ + +/** + * Registered callback for port events. + */ +void +bfa_fcport_event_register(struct bfa_s *bfa, + void (*cbfn) (void *cbarg, + enum bfa_port_linkstate event), + void *cbarg) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + fcport->event_cbfn = cbfn; + fcport->event_cbarg = cbarg; +} + +bfa_status_t +bfa_fcport_enable(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + if (bfa_ioc_is_disabled(&bfa->ioc)) + return BFA_STATUS_IOC_DISABLED; + + if (fcport->diag_busy) + return BFA_STATUS_DIAG_BUSY; + + bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE); + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_fcport_disable(struct bfa_s *bfa) +{ + + if (bfa_ioc_is_disabled(&bfa->ioc)) + return BFA_STATUS_IOC_DISABLED; + + bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE); + return BFA_STATUS_OK; +} + +/** + * Configure port speed. + */ +bfa_status_t +bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + bfa_trc(bfa, speed); + + if (fcport->cfg.trunked == BFA_TRUE) + return BFA_STATUS_TRUNK_ENABLED; + if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) { + bfa_trc(bfa, fcport->speed_sup); + return BFA_STATUS_UNSUPP_SPEED; + } + + fcport->cfg.speed = speed; + + return BFA_STATUS_OK; +} + +/** + * Get current speed. + */ +enum bfa_port_speed +bfa_fcport_get_speed(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + return fcport->speed; +} + +/** + * Configure port topology. + */ +bfa_status_t +bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + bfa_trc(bfa, topology); + bfa_trc(bfa, fcport->cfg.topology); + + switch (topology) { + case BFA_PORT_TOPOLOGY_P2P: + case BFA_PORT_TOPOLOGY_LOOP: + case BFA_PORT_TOPOLOGY_AUTO: + break; + + default: + return BFA_STATUS_EINVAL; + } + + fcport->cfg.topology = topology; + return BFA_STATUS_OK; +} + +/** + * Get current topology. + */ +enum bfa_port_topology +bfa_fcport_get_topology(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + return fcport->topology; +} + +bfa_status_t +bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + bfa_trc(bfa, alpa); + bfa_trc(bfa, fcport->cfg.cfg_hardalpa); + bfa_trc(bfa, fcport->cfg.hardalpa); + + fcport->cfg.cfg_hardalpa = BFA_TRUE; + fcport->cfg.hardalpa = alpa; + + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_fcport_clr_hardalpa(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + bfa_trc(bfa, fcport->cfg.cfg_hardalpa); + bfa_trc(bfa, fcport->cfg.hardalpa); + + fcport->cfg.cfg_hardalpa = BFA_FALSE; + return BFA_STATUS_OK; +} + +bfa_boolean_t +bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + *alpa = fcport->cfg.hardalpa; + return fcport->cfg.cfg_hardalpa; +} + +u8 +bfa_fcport_get_myalpa(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + return fcport->myalpa; +} + +bfa_status_t +bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + bfa_trc(bfa, maxfrsize); + bfa_trc(bfa, fcport->cfg.maxfrsize); + + /* with in range */ + if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ)) + return BFA_STATUS_INVLD_DFSZ; + + /* power of 2, if not the max frame size of 2112 */ + if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1))) + return BFA_STATUS_INVLD_DFSZ; + + fcport->cfg.maxfrsize = maxfrsize; + return BFA_STATUS_OK; +} + +u16 +bfa_fcport_get_maxfrsize(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + return fcport->cfg.maxfrsize; +} + +u8 +bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + return fcport->cfg.rx_bbcredit; +} + +void +bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + fcport->cfg.tx_bbcredit = (u8)tx_bbcredit; + bfa_fcport_send_txcredit(fcport); +} + +/** + * Get port attributes. + */ + +wwn_t +bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + if (node) + return fcport->nwwn; + else + return fcport->pwwn; +} + +void +bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + bfa_os_memset(attr, 0, sizeof(struct bfa_port_attr_s)); + + attr->nwwn = fcport->nwwn; + attr->pwwn = fcport->pwwn; + + attr->factorypwwn = bfa_ioc_get_mfg_pwwn(&bfa->ioc); + attr->factorynwwn = bfa_ioc_get_mfg_nwwn(&bfa->ioc); + + bfa_os_memcpy(&attr->pport_cfg, &fcport->cfg, + sizeof(struct bfa_port_cfg_s)); + /* speed attributes */ + attr->pport_cfg.speed = fcport->cfg.speed; + attr->speed_supported = fcport->speed_sup; + attr->speed = fcport->speed; + attr->cos_supported = FC_CLASS_3; + + /* topology attributes */ + attr->pport_cfg.topology = fcport->cfg.topology; + attr->topology = fcport->topology; + attr->pport_cfg.trunked = fcport->cfg.trunked; + + /* beacon attributes */ + attr->beacon = fcport->beacon; + attr->link_e2e_beacon = fcport->link_e2e_beacon; + attr->plog_enabled = bfa_plog_get_setting(fcport->bfa->plog); + attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa); + + attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa); + attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa); + attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm); + if (bfa_ioc_is_disabled(&fcport->bfa->ioc)) + attr->port_state = BFA_PORT_ST_IOCDIS; + else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc)) + attr->port_state = BFA_PORT_ST_FWMISMATCH; + + /* FCoE vlan */ + attr->fcoe_vlan = fcport->fcoe_vlan; +} + +#define BFA_FCPORT_STATS_TOV 1000 + +/** + * Fetch port statistics (FCQoS or FCoE). + */ +bfa_status_t +bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats, + bfa_cb_port_t cbfn, void *cbarg) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + if (fcport->stats_busy) { + bfa_trc(bfa, fcport->stats_busy); + return BFA_STATUS_DEVBUSY; + } + + fcport->stats_busy = BFA_TRUE; + fcport->stats_ret = stats; + fcport->stats_cbfn = cbfn; + fcport->stats_cbarg = cbarg; + + bfa_fcport_send_stats_get(fcport); + + bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout, + fcport, BFA_FCPORT_STATS_TOV); + return BFA_STATUS_OK; +} + +/** + * Reset port statistics (FCQoS or FCoE). + */ +bfa_status_t +bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + if (fcport->stats_busy) { + bfa_trc(bfa, fcport->stats_busy); + return BFA_STATUS_DEVBUSY; + } + + fcport->stats_busy = BFA_TRUE; + fcport->stats_cbfn = cbfn; + fcport->stats_cbarg = cbarg; + + bfa_fcport_send_stats_clear(fcport); + + bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout, + fcport, BFA_FCPORT_STATS_TOV); + return BFA_STATUS_OK; +} + +/** + * Fetch FCQoS port statistics + */ +bfa_status_t +bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats, + bfa_cb_port_t cbfn, void *cbarg) +{ + /* Meaningful only for FC mode */ + bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc)); + + return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg); +} + +/** + * Reset FCoE port statistics + */ +bfa_status_t +bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg) +{ + /* Meaningful only for FC mode */ + bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc)); + + return bfa_fcport_clear_stats(bfa, cbfn, cbarg); +} + +/** + * Fetch FCQoS port statistics + */ +bfa_status_t +bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats, + bfa_cb_port_t cbfn, void *cbarg) +{ + /* Meaningful only for FCoE mode */ + bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc)); + + return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg); +} + +/** + * Reset FCoE port statistics + */ +bfa_status_t +bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg) +{ + /* Meaningful only for FCoE mode */ + bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc)); + + return bfa_fcport_clear_stats(bfa, cbfn, cbarg); +} + +void +bfa_fcport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + qos_attr->state = fcport->qos_attr.state; + qos_attr->total_bb_cr = bfa_os_ntohl(fcport->qos_attr.total_bb_cr); +} + +void +bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa, + struct bfa_qos_vc_attr_s *qos_vc_attr) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr; + u32 i = 0; + + qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count); + qos_vc_attr->shared_credit = bfa_os_ntohs(bfa_vc_attr->shared_credit); + qos_vc_attr->elp_opmode_flags = + bfa_os_ntohl(bfa_vc_attr->elp_opmode_flags); + + /* Individual VC info */ + while (i < qos_vc_attr->total_vc_count) { + qos_vc_attr->vc_info[i].vc_credit = + bfa_vc_attr->vc_info[i].vc_credit; + qos_vc_attr->vc_info[i].borrow_credit = + bfa_vc_attr->vc_info[i].borrow_credit; + qos_vc_attr->vc_info[i].priority = + bfa_vc_attr->vc_info[i].priority; + ++i; + } +} + +/** + * Fetch port attributes. + */ +bfa_boolean_t +bfa_fcport_is_disabled(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + return bfa_sm_to_state(hal_port_sm_table, fcport->sm) == + BFA_PORT_ST_DISABLED; + +} + +bfa_boolean_t +bfa_fcport_is_ratelim(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE; + +} + +void +bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa); + + bfa_trc(bfa, on_off); + bfa_trc(bfa, fcport->cfg.qos_enabled); + + bfa_trc(bfa, ioc_type); + + if (ioc_type == BFA_IOC_TYPE_FC) { + fcport->cfg.qos_enabled = on_off; + /** + * Notify fcpim of the change in QoS state + */ + bfa_fcpim_update_ioredirect(bfa); + } +} + +void +bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + bfa_trc(bfa, on_off); + bfa_trc(bfa, fcport->cfg.ratelimit); + + fcport->cfg.ratelimit = on_off; + if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN) + fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS; +} + +/** + * Configure default minimum ratelim speed + */ +bfa_status_t +bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_port_speed speed) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + bfa_trc(bfa, speed); + + /* Auto and speeds greater than the supported speed, are invalid */ + if ((speed == BFA_PORT_SPEED_AUTO) || (speed > fcport->speed_sup)) { + bfa_trc(bfa, fcport->speed_sup); + return BFA_STATUS_UNSUPP_SPEED; + } + + fcport->cfg.trl_def_speed = speed; + + return BFA_STATUS_OK; +} + +/** + * Get default minimum ratelim speed + */ +enum bfa_port_speed +bfa_fcport_get_ratelim_speed(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + bfa_trc(bfa, fcport->cfg.trl_def_speed); + return fcport->cfg.trl_def_speed; + +} +void +bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + bfa_trc(bfa, status); + bfa_trc(bfa, fcport->diag_busy); + + fcport->diag_busy = status; +} + +void +bfa_fcport_beacon(void *dev, bfa_boolean_t beacon, + bfa_boolean_t link_e2e_beacon) +{ + struct bfa_s *bfa = dev; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + bfa_trc(bfa, beacon); + bfa_trc(bfa, link_e2e_beacon); + bfa_trc(bfa, fcport->beacon); + bfa_trc(bfa, fcport->link_e2e_beacon); + + fcport->beacon = beacon; + fcport->link_e2e_beacon = link_e2e_beacon; +} + +bfa_boolean_t +bfa_fcport_is_linkup(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + return (!fcport->cfg.trunked && + bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) || + (fcport->cfg.trunked && + fcport->trunk.attr.state == BFA_TRUNK_ONLINE); +} + +bfa_boolean_t +bfa_fcport_is_qos_enabled(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + return fcport->cfg.qos_enabled; +} + +bfa_status_t +bfa_trunk_get_attr(struct bfa_s *bfa, struct bfa_trunk_attr_s *attr) + +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + struct bfa_fcport_trunk_s *trunk = &fcport->trunk; + + bfa_trc(bfa, fcport->cfg.trunked); + bfa_trc(bfa, trunk->attr.state); + *attr = trunk->attr; + attr->port_id = bfa_lps_get_base_pid(bfa); + + return BFA_STATUS_OK; +} + +void +bfa_trunk_enable_cfg(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + struct bfa_fcport_trunk_s *trunk = &fcport->trunk; + + bfa_trc(bfa, 1); + trunk->attr.state = BFA_TRUNK_OFFLINE; + fcport->cfg.trunked = BFA_TRUE; +} + +bfa_status_t +bfa_trunk_enable(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + struct bfa_fcport_trunk_s *trunk = &fcport->trunk; + + bfa_trc(bfa, 1); + + trunk->attr.state = BFA_TRUNK_OFFLINE; + bfa_fcport_disable(bfa); + fcport->cfg.trunked = BFA_TRUE; + bfa_fcport_enable(bfa); + + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_trunk_disable(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + struct bfa_fcport_trunk_s *trunk = &fcport->trunk; + + bfa_trc(bfa, 0); + trunk->attr.state = BFA_TRUNK_DISABLED; + bfa_fcport_disable(bfa); + fcport->cfg.trunked = BFA_FALSE; + bfa_fcport_enable(bfa); + return BFA_STATUS_OK; +} + + +/** + * Rport State machine functions + */ +/** + * Beginning state, only online event expected. + */ +static void +bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event) +{ + bfa_trc(rp->bfa, rp->rport_tag); + bfa_trc(rp->bfa, event); + + switch (event) { + case BFA_RPORT_SM_CREATE: + bfa_stats(rp, sm_un_cr); + bfa_sm_set_state(rp, bfa_rport_sm_created); + break; + + default: + bfa_stats(rp, sm_un_unexp); + bfa_sm_fault(rp->bfa, event); + } +} + +static void +bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event) +{ + bfa_trc(rp->bfa, rp->rport_tag); + bfa_trc(rp->bfa, event); + + switch (event) { + case BFA_RPORT_SM_ONLINE: + bfa_stats(rp, sm_cr_on); + if (bfa_rport_send_fwcreate(rp)) + bfa_sm_set_state(rp, bfa_rport_sm_fwcreate); + else + bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull); + break; + + case BFA_RPORT_SM_DELETE: + bfa_stats(rp, sm_cr_del); + bfa_sm_set_state(rp, bfa_rport_sm_uninit); + bfa_rport_free(rp); + break; + + case BFA_RPORT_SM_HWFAIL: + bfa_stats(rp, sm_cr_hwf); + bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); + break; + + default: + bfa_stats(rp, sm_cr_unexp); + bfa_sm_fault(rp->bfa, event); + } +} + +/** + * Waiting for rport create response from firmware. + */ +static void +bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event) +{ + bfa_trc(rp->bfa, rp->rport_tag); + bfa_trc(rp->bfa, event); + + switch (event) { + case BFA_RPORT_SM_FWRSP: + bfa_stats(rp, sm_fwc_rsp); + bfa_sm_set_state(rp, bfa_rport_sm_online); + bfa_rport_online_cb(rp); + break; + + case BFA_RPORT_SM_DELETE: + bfa_stats(rp, sm_fwc_del); + bfa_sm_set_state(rp, bfa_rport_sm_delete_pending); + break; + + case BFA_RPORT_SM_OFFLINE: + bfa_stats(rp, sm_fwc_off); + bfa_sm_set_state(rp, bfa_rport_sm_offline_pending); + break; + + case BFA_RPORT_SM_HWFAIL: + bfa_stats(rp, sm_fwc_hwf); + bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); + break; + + default: + bfa_stats(rp, sm_fwc_unexp); + bfa_sm_fault(rp->bfa, event); + } +} + +/** + * Request queue is full, awaiting queue resume to send create request. + */ +static void +bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event) +{ + bfa_trc(rp->bfa, rp->rport_tag); + bfa_trc(rp->bfa, event); + + switch (event) { + case BFA_RPORT_SM_QRESUME: + bfa_sm_set_state(rp, bfa_rport_sm_fwcreate); + bfa_rport_send_fwcreate(rp); + break; + + case BFA_RPORT_SM_DELETE: + bfa_stats(rp, sm_fwc_del); + bfa_sm_set_state(rp, bfa_rport_sm_uninit); + bfa_reqq_wcancel(&rp->reqq_wait); + bfa_rport_free(rp); + break; + + case BFA_RPORT_SM_OFFLINE: + bfa_stats(rp, sm_fwc_off); + bfa_sm_set_state(rp, bfa_rport_sm_offline); + bfa_reqq_wcancel(&rp->reqq_wait); + bfa_rport_offline_cb(rp); + break; + + case BFA_RPORT_SM_HWFAIL: + bfa_stats(rp, sm_fwc_hwf); + bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); + bfa_reqq_wcancel(&rp->reqq_wait); + break; + + default: + bfa_stats(rp, sm_fwc_unexp); + bfa_sm_fault(rp->bfa, event); + } +} + +/** + * Online state - normal parking state. + */ +static void +bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event) +{ + struct bfi_rport_qos_scn_s *qos_scn; + + bfa_trc(rp->bfa, rp->rport_tag); + bfa_trc(rp->bfa, event); + + switch (event) { + case BFA_RPORT_SM_OFFLINE: + bfa_stats(rp, sm_on_off); + if (bfa_rport_send_fwdelete(rp)) + bfa_sm_set_state(rp, bfa_rport_sm_fwdelete); + else + bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull); + break; + + case BFA_RPORT_SM_DELETE: + bfa_stats(rp, sm_on_del); + if (bfa_rport_send_fwdelete(rp)) + bfa_sm_set_state(rp, bfa_rport_sm_deleting); + else + bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull); + break; + + case BFA_RPORT_SM_HWFAIL: + bfa_stats(rp, sm_on_hwf); + bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); + break; + + case BFA_RPORT_SM_SET_SPEED: + bfa_rport_send_fwspeed(rp); + break; + + case BFA_RPORT_SM_QOS_SCN: + qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg; + rp->qos_attr = qos_scn->new_qos_attr; + bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id); + bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id); + bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority); + bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority); + + qos_scn->old_qos_attr.qos_flow_id = + bfa_os_ntohl(qos_scn->old_qos_attr.qos_flow_id); + qos_scn->new_qos_attr.qos_flow_id = + bfa_os_ntohl(qos_scn->new_qos_attr.qos_flow_id); + + if (qos_scn->old_qos_attr.qos_flow_id != + qos_scn->new_qos_attr.qos_flow_id) + bfa_cb_rport_qos_scn_flowid(rp->rport_drv, + qos_scn->old_qos_attr, + qos_scn->new_qos_attr); + if (qos_scn->old_qos_attr.qos_priority != + qos_scn->new_qos_attr.qos_priority) + bfa_cb_rport_qos_scn_prio(rp->rport_drv, + qos_scn->old_qos_attr, + qos_scn->new_qos_attr); + break; + + default: + bfa_stats(rp, sm_on_unexp); + bfa_sm_fault(rp->bfa, event); + } +} + +/** + * Firmware rport is being deleted - awaiting f/w response. + */ +static void +bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event) +{ + bfa_trc(rp->bfa, rp->rport_tag); + bfa_trc(rp->bfa, event); + + switch (event) { + case BFA_RPORT_SM_FWRSP: + bfa_stats(rp, sm_fwd_rsp); + bfa_sm_set_state(rp, bfa_rport_sm_offline); + bfa_rport_offline_cb(rp); + break; + + case BFA_RPORT_SM_DELETE: + bfa_stats(rp, sm_fwd_del); + bfa_sm_set_state(rp, bfa_rport_sm_deleting); + break; + + case BFA_RPORT_SM_HWFAIL: + bfa_stats(rp, sm_fwd_hwf); + bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); + bfa_rport_offline_cb(rp); + break; + + default: + bfa_stats(rp, sm_fwd_unexp); + bfa_sm_fault(rp->bfa, event); + } +} + +static void +bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event) +{ + bfa_trc(rp->bfa, rp->rport_tag); + bfa_trc(rp->bfa, event); + + switch (event) { + case BFA_RPORT_SM_QRESUME: + bfa_sm_set_state(rp, bfa_rport_sm_fwdelete); + bfa_rport_send_fwdelete(rp); + break; + + case BFA_RPORT_SM_DELETE: + bfa_stats(rp, sm_fwd_del); + bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull); + break; + + case BFA_RPORT_SM_HWFAIL: + bfa_stats(rp, sm_fwd_hwf); + bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); + bfa_reqq_wcancel(&rp->reqq_wait); + bfa_rport_offline_cb(rp); + break; + + default: + bfa_stats(rp, sm_fwd_unexp); + bfa_sm_fault(rp->bfa, event); + } +} + +/** + * Offline state. + */ +static void +bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event) +{ + bfa_trc(rp->bfa, rp->rport_tag); + bfa_trc(rp->bfa, event); + + switch (event) { + case BFA_RPORT_SM_DELETE: + bfa_stats(rp, sm_off_del); + bfa_sm_set_state(rp, bfa_rport_sm_uninit); + bfa_rport_free(rp); + break; + + case BFA_RPORT_SM_ONLINE: + bfa_stats(rp, sm_off_on); + if (bfa_rport_send_fwcreate(rp)) + bfa_sm_set_state(rp, bfa_rport_sm_fwcreate); + else + bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull); + break; + + case BFA_RPORT_SM_HWFAIL: + bfa_stats(rp, sm_off_hwf); + bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); + break; + + default: + bfa_stats(rp, sm_off_unexp); + bfa_sm_fault(rp->bfa, event); + } +} + +/** + * Rport is deleted, waiting for firmware response to delete. + */ +static void +bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event) +{ + bfa_trc(rp->bfa, rp->rport_tag); + bfa_trc(rp->bfa, event); + + switch (event) { + case BFA_RPORT_SM_FWRSP: + bfa_stats(rp, sm_del_fwrsp); + bfa_sm_set_state(rp, bfa_rport_sm_uninit); + bfa_rport_free(rp); + break; + + case BFA_RPORT_SM_HWFAIL: + bfa_stats(rp, sm_del_hwf); + bfa_sm_set_state(rp, bfa_rport_sm_uninit); + bfa_rport_free(rp); + break; + + default: + bfa_sm_fault(rp->bfa, event); + } +} + +static void +bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event) +{ + bfa_trc(rp->bfa, rp->rport_tag); + bfa_trc(rp->bfa, event); + + switch (event) { + case BFA_RPORT_SM_QRESUME: + bfa_stats(rp, sm_del_fwrsp); + bfa_sm_set_state(rp, bfa_rport_sm_deleting); + bfa_rport_send_fwdelete(rp); + break; + + case BFA_RPORT_SM_HWFAIL: + bfa_stats(rp, sm_del_hwf); + bfa_sm_set_state(rp, bfa_rport_sm_uninit); + bfa_reqq_wcancel(&rp->reqq_wait); + bfa_rport_free(rp); + break; + + default: + bfa_sm_fault(rp->bfa, event); + } +} + +/** + * Waiting for rport create response from firmware. A delete is pending. + */ +static void +bfa_rport_sm_delete_pending(struct bfa_rport_s *rp, + enum bfa_rport_event event) +{ + bfa_trc(rp->bfa, rp->rport_tag); + bfa_trc(rp->bfa, event); + + switch (event) { + case BFA_RPORT_SM_FWRSP: + bfa_stats(rp, sm_delp_fwrsp); + if (bfa_rport_send_fwdelete(rp)) + bfa_sm_set_state(rp, bfa_rport_sm_deleting); + else + bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull); + break; + + case BFA_RPORT_SM_HWFAIL: + bfa_stats(rp, sm_delp_hwf); + bfa_sm_set_state(rp, bfa_rport_sm_uninit); + bfa_rport_free(rp); + break; + + default: + bfa_stats(rp, sm_delp_unexp); + bfa_sm_fault(rp->bfa, event); + } +} + +/** + * Waiting for rport create response from firmware. Rport offline is pending. + */ +static void +bfa_rport_sm_offline_pending(struct bfa_rport_s *rp, + enum bfa_rport_event event) +{ + bfa_trc(rp->bfa, rp->rport_tag); + bfa_trc(rp->bfa, event); + + switch (event) { + case BFA_RPORT_SM_FWRSP: + bfa_stats(rp, sm_offp_fwrsp); + if (bfa_rport_send_fwdelete(rp)) + bfa_sm_set_state(rp, bfa_rport_sm_fwdelete); + else + bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull); + break; + + case BFA_RPORT_SM_DELETE: + bfa_stats(rp, sm_offp_del); + bfa_sm_set_state(rp, bfa_rport_sm_delete_pending); + break; + + case BFA_RPORT_SM_HWFAIL: + bfa_stats(rp, sm_offp_hwf); + bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); + break; + + default: + bfa_stats(rp, sm_offp_unexp); + bfa_sm_fault(rp->bfa, event); + } +} + +/** + * IOC h/w failed. + */ +static void +bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event) +{ + bfa_trc(rp->bfa, rp->rport_tag); + bfa_trc(rp->bfa, event); + + switch (event) { + case BFA_RPORT_SM_OFFLINE: + bfa_stats(rp, sm_iocd_off); + bfa_rport_offline_cb(rp); + break; + + case BFA_RPORT_SM_DELETE: + bfa_stats(rp, sm_iocd_del); + bfa_sm_set_state(rp, bfa_rport_sm_uninit); + bfa_rport_free(rp); + break; + + case BFA_RPORT_SM_ONLINE: + bfa_stats(rp, sm_iocd_on); + if (bfa_rport_send_fwcreate(rp)) + bfa_sm_set_state(rp, bfa_rport_sm_fwcreate); + else + bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull); + break; + + case BFA_RPORT_SM_HWFAIL: + break; + + default: + bfa_stats(rp, sm_iocd_unexp); + bfa_sm_fault(rp->bfa, event); + } +} + + + +/** + * bfa_rport_private BFA rport private functions + */ + +static void +__bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_rport_s *rp = cbarg; + + if (complete) + bfa_cb_rport_online(rp->rport_drv); +} + +static void +__bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_rport_s *rp = cbarg; + + if (complete) + bfa_cb_rport_offline(rp->rport_drv); +} + +static void +bfa_rport_qresume(void *cbarg) +{ + struct bfa_rport_s *rp = cbarg; + + bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME); +} + +static void +bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, + u32 *dm_len) +{ + if (cfg->fwcfg.num_rports < BFA_RPORT_MIN) + cfg->fwcfg.num_rports = BFA_RPORT_MIN; + + *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s); +} + +static void +bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, + struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) +{ + struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa); + struct bfa_rport_s *rp; + u16 i; + + INIT_LIST_HEAD(&mod->rp_free_q); + INIT_LIST_HEAD(&mod->rp_active_q); + + rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo); + mod->rps_list = rp; + mod->num_rports = cfg->fwcfg.num_rports; + + bfa_assert(mod->num_rports && + !(mod->num_rports & (mod->num_rports - 1))); + + for (i = 0; i < mod->num_rports; i++, rp++) { + bfa_os_memset(rp, 0, sizeof(struct bfa_rport_s)); + rp->bfa = bfa; + rp->rport_tag = i; + bfa_sm_set_state(rp, bfa_rport_sm_uninit); + + /** + * - is unused + */ + if (i) + list_add_tail(&rp->qe, &mod->rp_free_q); + + bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp); + } + + /** + * consume memory + */ + bfa_meminfo_kva(meminfo) = (u8 *) rp; +} + +static void +bfa_rport_detach(struct bfa_s *bfa) +{ +} + +static void +bfa_rport_start(struct bfa_s *bfa) +{ +} + +static void +bfa_rport_stop(struct bfa_s *bfa) +{ +} + +static void +bfa_rport_iocdisable(struct bfa_s *bfa) +{ + struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa); + struct bfa_rport_s *rport; + struct list_head *qe, *qen; + + list_for_each_safe(qe, qen, &mod->rp_active_q) { + rport = (struct bfa_rport_s *) qe; + bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL); + } +} + +static struct bfa_rport_s * +bfa_rport_alloc(struct bfa_rport_mod_s *mod) +{ + struct bfa_rport_s *rport; + + bfa_q_deq(&mod->rp_free_q, &rport); + if (rport) + list_add_tail(&rport->qe, &mod->rp_active_q); + + return rport; +} + +static void +bfa_rport_free(struct bfa_rport_s *rport) +{ + struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa); + + bfa_assert(bfa_q_is_on_q(&mod->rp_active_q, rport)); + list_del(&rport->qe); + list_add_tail(&rport->qe, &mod->rp_free_q); +} + +static bfa_boolean_t +bfa_rport_send_fwcreate(struct bfa_rport_s *rp) +{ + struct bfi_rport_create_req_s *m; + + /** + * check for room in queue to send request now + */ + m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); + if (!m) { + bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait); + return BFA_FALSE; + } + + bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ, + bfa_lpuid(rp->bfa)); + m->bfa_handle = rp->rport_tag; + m->max_frmsz = bfa_os_htons(rp->rport_info.max_frmsz); + m->pid = rp->rport_info.pid; + m->lp_tag = rp->rport_info.lp_tag; + m->local_pid = rp->rport_info.local_pid; + m->fc_class = rp->rport_info.fc_class; + m->vf_en = rp->rport_info.vf_en; + m->vf_id = rp->rport_info.vf_id; + m->cisc = rp->rport_info.cisc; + + /** + * queue I/O message to firmware + */ + bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); + return BFA_TRUE; +} + +static bfa_boolean_t +bfa_rport_send_fwdelete(struct bfa_rport_s *rp) +{ + struct bfi_rport_delete_req_s *m; + + /** + * check for room in queue to send request now + */ + m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); + if (!m) { + bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait); + return BFA_FALSE; + } + + bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ, + bfa_lpuid(rp->bfa)); + m->fw_handle = rp->fw_handle; + + /** + * queue I/O message to firmware + */ + bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); + return BFA_TRUE; +} + +static bfa_boolean_t +bfa_rport_send_fwspeed(struct bfa_rport_s *rp) +{ + struct bfa_rport_speed_req_s *m; + + /** + * check for room in queue to send request now + */ + m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); + if (!m) { + bfa_trc(rp->bfa, rp->rport_info.speed); + return BFA_FALSE; + } + + bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ, + bfa_lpuid(rp->bfa)); + m->fw_handle = rp->fw_handle; + m->speed = (u8)rp->rport_info.speed; + + /** + * queue I/O message to firmware + */ + bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); + return BFA_TRUE; +} + + + +/** + * bfa_rport_public + */ + +/** + * Rport interrupt processing. + */ +void +bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m) +{ + union bfi_rport_i2h_msg_u msg; + struct bfa_rport_s *rp; + + bfa_trc(bfa, m->mhdr.msg_id); + + msg.msg = m; + + switch (m->mhdr.msg_id) { + case BFI_RPORT_I2H_CREATE_RSP: + rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle); + rp->fw_handle = msg.create_rsp->fw_handle; + rp->qos_attr = msg.create_rsp->qos_attr; + bfa_assert(msg.create_rsp->status == BFA_STATUS_OK); + bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP); + break; + + case BFI_RPORT_I2H_DELETE_RSP: + rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle); + bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK); + bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP); + break; + + case BFI_RPORT_I2H_QOS_SCN: + rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle); + rp->event_arg.fw_msg = msg.qos_scn_evt; + bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN); + break; + + default: + bfa_trc(bfa, m->mhdr.msg_id); + bfa_assert(0); + } +} + + + +/** + * bfa_rport_api + */ + +struct bfa_rport_s * +bfa_rport_create(struct bfa_s *bfa, void *rport_drv) +{ + struct bfa_rport_s *rp; + + rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa)); + + if (rp == NULL) + return NULL; + + rp->bfa = bfa; + rp->rport_drv = rport_drv; + bfa_rport_clear_stats(rp); + + bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit)); + bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE); + + return rp; +} + +void +bfa_rport_delete(struct bfa_rport_s *rport) +{ + bfa_sm_send_event(rport, BFA_RPORT_SM_DELETE); +} + +void +bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info) +{ + bfa_assert(rport_info->max_frmsz != 0); + + /** + * Some JBODs are seen to be not setting PDU size correctly in PLOGI + * responses. Default to minimum size. + */ + if (rport_info->max_frmsz == 0) { + bfa_trc(rport->bfa, rport->rport_tag); + rport_info->max_frmsz = FC_MIN_PDUSZ; + } + + bfa_os_assign(rport->rport_info, *rport_info); + bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE); +} + +void +bfa_rport_offline(struct bfa_rport_s *rport) +{ + bfa_sm_send_event(rport, BFA_RPORT_SM_OFFLINE); +} + +void +bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed) +{ + bfa_assert(speed != 0); + bfa_assert(speed != BFA_PORT_SPEED_AUTO); + + rport->rport_info.speed = speed; + bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED); +} + +void +bfa_rport_get_stats(struct bfa_rport_s *rport, + struct bfa_rport_hal_stats_s *stats) +{ + *stats = rport->stats; +} + +void +bfa_rport_get_qos_attr(struct bfa_rport_s *rport, + struct bfa_rport_qos_attr_s *qos_attr) +{ + qos_attr->qos_priority = rport->qos_attr.qos_priority; + qos_attr->qos_flow_id = bfa_os_ntohl(rport->qos_attr.qos_flow_id); + +} + +void +bfa_rport_clear_stats(struct bfa_rport_s *rport) +{ + bfa_os_memset(&rport->stats, 0, sizeof(rport->stats)); +} + + +/** + * SGPG related functions + */ + +/** + * Compute and return memory needed by FCP(im) module. + */ +static void +bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, + u32 *dm_len) +{ + if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN) + cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN; + + *km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s); + *dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s); +} + + +static void +bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, + struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev) +{ + struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); + int i; + struct bfa_sgpg_s *hsgpg; + struct bfi_sgpg_s *sgpg; + u64 align_len; + + union { + u64 pa; + union bfi_addr_u addr; + } sgpg_pa, sgpg_pa_tmp; + + INIT_LIST_HEAD(&mod->sgpg_q); + INIT_LIST_HEAD(&mod->sgpg_wait_q); + + bfa_trc(bfa, cfg->drvcfg.num_sgpgs); + + mod->num_sgpgs = cfg->drvcfg.num_sgpgs; + mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo); + align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa); + mod->sgpg_arr_pa += align_len; + mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) + + align_len); + mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) + + align_len); + + hsgpg = mod->hsgpg_arr; + sgpg = mod->sgpg_arr; + sgpg_pa.pa = mod->sgpg_arr_pa; + mod->free_sgpgs = mod->num_sgpgs; + + bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1))); + + for (i = 0; i < mod->num_sgpgs; i++) { + bfa_os_memset(hsgpg, 0, sizeof(*hsgpg)); + bfa_os_memset(sgpg, 0, sizeof(*sgpg)); + + hsgpg->sgpg = sgpg; + sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa); + hsgpg->sgpg_pa = sgpg_pa_tmp.addr; + list_add_tail(&hsgpg->qe, &mod->sgpg_q); + + hsgpg++; + sgpg++; + sgpg_pa.pa += sizeof(struct bfi_sgpg_s); + } + + bfa_meminfo_kva(minfo) = (u8 *) hsgpg; + bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg; + bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa; +} + +static void +bfa_sgpg_detach(struct bfa_s *bfa) +{ +} + +static void +bfa_sgpg_start(struct bfa_s *bfa) +{ +} + +static void +bfa_sgpg_stop(struct bfa_s *bfa) +{ +} + +static void +bfa_sgpg_iocdisable(struct bfa_s *bfa) +{ +} + + + +/** + * hal_sgpg_public BFA SGPG public functions + */ + +bfa_status_t +bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs) +{ + struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); + struct bfa_sgpg_s *hsgpg; + int i; + + bfa_trc_fp(bfa, nsgpgs); + + if (mod->free_sgpgs < nsgpgs) + return BFA_STATUS_ENOMEM; + + for (i = 0; i < nsgpgs; i++) { + bfa_q_deq(&mod->sgpg_q, &hsgpg); + bfa_assert(hsgpg); + list_add_tail(&hsgpg->qe, sgpg_q); + } + + mod->free_sgpgs -= nsgpgs; + return BFA_STATUS_OK; +} + +void +bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg) +{ + struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); + struct bfa_sgpg_wqe_s *wqe; + + bfa_trc_fp(bfa, nsgpg); + + mod->free_sgpgs += nsgpg; + bfa_assert(mod->free_sgpgs <= mod->num_sgpgs); + + list_splice_tail_init(sgpg_q, &mod->sgpg_q); + + if (list_empty(&mod->sgpg_wait_q)) + return; + + /** + * satisfy as many waiting requests as possible + */ + do { + wqe = bfa_q_first(&mod->sgpg_wait_q); + if (mod->free_sgpgs < wqe->nsgpg) + nsgpg = mod->free_sgpgs; + else + nsgpg = wqe->nsgpg; + bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg); + wqe->nsgpg -= nsgpg; + if (wqe->nsgpg == 0) { + list_del(&wqe->qe); + wqe->cbfn(wqe->cbarg); + } + } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q)); +} + +void +bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg) +{ + struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); + + bfa_assert(nsgpg > 0); + bfa_assert(nsgpg > mod->free_sgpgs); + + wqe->nsgpg_total = wqe->nsgpg = nsgpg; + + /** + * allocate any left to this one first + */ + if (mod->free_sgpgs) { + /** + * no one else is waiting for SGPG + */ + bfa_assert(list_empty(&mod->sgpg_wait_q)); + list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q); + wqe->nsgpg -= mod->free_sgpgs; + mod->free_sgpgs = 0; + } + + list_add_tail(&wqe->qe, &mod->sgpg_wait_q); +} + +void +bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe) +{ + struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); + + bfa_assert(bfa_q_is_on_q(&mod->sgpg_wait_q, wqe)); + list_del(&wqe->qe); + + if (wqe->nsgpg_total != wqe->nsgpg) + bfa_sgpg_mfree(bfa, &wqe->sgpg_q, + wqe->nsgpg_total - wqe->nsgpg); +} + +void +bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg), + void *cbarg) +{ + INIT_LIST_HEAD(&wqe->sgpg_q); + wqe->cbfn = cbfn; + wqe->cbarg = cbarg; +} + +/** + * UF related functions + */ +/* + ***************************************************************************** + * Internal functions + ***************************************************************************** + */ +static void +__bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_uf_s *uf = cbarg; + struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa); + + if (complete) + ufm->ufrecv(ufm->cbarg, uf); +} + +static void +claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) +{ + u32 uf_pb_tot_sz; + + ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi); + ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi); + uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs), + BFA_DMA_ALIGN_SZ); + + bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz; + bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz; + + bfa_os_memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz); +} + +static void +claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) +{ + struct bfi_uf_buf_post_s *uf_bp_msg; + struct bfi_sge_s *sge; + union bfi_addr_u sga_zero = { {0} }; + u16 i; + u16 buf_len; + + ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi); + uf_bp_msg = ufm->uf_buf_posts; + + for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs; + i++, uf_bp_msg++) { + bfa_os_memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s)); + + uf_bp_msg->buf_tag = i; + buf_len = sizeof(struct bfa_uf_buf_s); + uf_bp_msg->buf_len = bfa_os_htons(buf_len); + bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST, + bfa_lpuid(ufm->bfa)); + + sge = uf_bp_msg->sge; + sge[0].sg_len = buf_len; + sge[0].flags = BFI_SGE_DATA_LAST; + bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i)); + bfa_sge_to_be(sge); + + sge[1].sg_len = buf_len; + sge[1].flags = BFI_SGE_PGDLEN; + sge[1].sga = sga_zero; + bfa_sge_to_be(&sge[1]); + } + + /** + * advance pointer beyond consumed memory + */ + bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg; +} + +static void +claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) +{ + u16 i; + struct bfa_uf_s *uf; + + /* + * Claim block of memory for UF list + */ + ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi); + + /* + * Initialize UFs and queue it in UF free queue + */ + for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) { + bfa_os_memset(uf, 0, sizeof(struct bfa_uf_s)); + uf->bfa = ufm->bfa; + uf->uf_tag = i; + uf->pb_len = sizeof(struct bfa_uf_buf_s); + uf->buf_kva = (void *)&ufm->uf_pbs_kva[i]; + uf->buf_pa = ufm_pbs_pa(ufm, i); + list_add_tail(&uf->qe, &ufm->uf_free_q); + } + + /** + * advance memory pointer + */ + bfa_meminfo_kva(mi) = (u8 *) uf; +} + +static void +uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) +{ + claim_uf_pbs(ufm, mi); + claim_ufs(ufm, mi); + claim_uf_post_msgs(ufm, mi); +} + +static void +bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len) +{ + u32 num_ufs = cfg->fwcfg.num_uf_bufs; + + /* + * dma-able memory for UF posted bufs + */ + *dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs), + BFA_DMA_ALIGN_SZ); + + /* + * kernel Virtual memory for UFs and UF buf post msg copies + */ + *ndm_len += sizeof(struct bfa_uf_s) * num_ufs; + *ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs; +} + +static void +bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, + struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) +{ + struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); + + bfa_os_memset(ufm, 0, sizeof(struct bfa_uf_mod_s)); + ufm->bfa = bfa; + ufm->num_ufs = cfg->fwcfg.num_uf_bufs; + INIT_LIST_HEAD(&ufm->uf_free_q); + INIT_LIST_HEAD(&ufm->uf_posted_q); + + uf_mem_claim(ufm, meminfo); +} + +static void +bfa_uf_detach(struct bfa_s *bfa) +{ +} + +static struct bfa_uf_s * +bfa_uf_get(struct bfa_uf_mod_s *uf_mod) +{ + struct bfa_uf_s *uf; + + bfa_q_deq(&uf_mod->uf_free_q, &uf); + return uf; +} + +static void +bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf) +{ + list_add_tail(&uf->qe, &uf_mod->uf_free_q); +} + +static bfa_status_t +bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf) +{ + struct bfi_uf_buf_post_s *uf_post_msg; + + uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP); + if (!uf_post_msg) + return BFA_STATUS_FAILED; + + bfa_os_memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag], + sizeof(struct bfi_uf_buf_post_s)); + bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP); + + bfa_trc(ufm->bfa, uf->uf_tag); + + list_add_tail(&uf->qe, &ufm->uf_posted_q); + return BFA_STATUS_OK; +} + +static void +bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod) +{ + struct bfa_uf_s *uf; + + while ((uf = bfa_uf_get(uf_mod)) != NULL) { + if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK) + break; + } +} + +static void +uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m) +{ + struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); + u16 uf_tag = m->buf_tag; + struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag]; + struct bfa_uf_s *uf = &ufm->uf_list[uf_tag]; + u8 *buf = &uf_buf->d[0]; + struct fchs_s *fchs; + + m->frm_len = bfa_os_ntohs(m->frm_len); + m->xfr_len = bfa_os_ntohs(m->xfr_len); + + fchs = (struct fchs_s *)uf_buf; + + list_del(&uf->qe); /* dequeue from posted queue */ + + uf->data_ptr = buf; + uf->data_len = m->xfr_len; + + bfa_assert(uf->data_len >= sizeof(struct fchs_s)); + + if (uf->data_len == sizeof(struct fchs_s)) { + bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX, + uf->data_len, (struct fchs_s *)buf); + } else { + u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s))); + bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF, + BFA_PL_EID_RX, uf->data_len, + (struct fchs_s *)buf, pld_w0); + } + + if (bfa->fcs) + __bfa_cb_uf_recv(uf, BFA_TRUE); + else + bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf); +} + +static void +bfa_uf_stop(struct bfa_s *bfa) +{ +} + +static void +bfa_uf_iocdisable(struct bfa_s *bfa) +{ + struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); + struct bfa_uf_s *uf; + struct list_head *qe, *qen; + + list_for_each_safe(qe, qen, &ufm->uf_posted_q) { + uf = (struct bfa_uf_s *) qe; + list_del(&uf->qe); + bfa_uf_put(ufm, uf); + } +} + +static void +bfa_uf_start(struct bfa_s *bfa) +{ + bfa_uf_post_all(BFA_UF_MOD(bfa)); +} + + + +/** + * hal_uf_api + */ + +/** + * Register handler for all unsolicted recieve frames. + * + * @param[in] bfa BFA instance + * @param[in] ufrecv receive handler function + * @param[in] cbarg receive handler arg + */ +void +bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg) +{ + struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); + + ufm->ufrecv = ufrecv; + ufm->cbarg = cbarg; +} + +/** + * Free an unsolicited frame back to BFA. + * + * @param[in] uf unsolicited frame to be freed + * + * @return None + */ +void +bfa_uf_free(struct bfa_uf_s *uf) +{ + bfa_uf_put(BFA_UF_MOD(uf->bfa), uf); + bfa_uf_post_all(BFA_UF_MOD(uf->bfa)); +} + + + +/** + * uf_pub BFA uf module public functions + */ +void +bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) +{ + bfa_trc(bfa, msg->mhdr.msg_id); + + switch (msg->mhdr.msg_id) { + case BFI_UF_I2H_FRM_RCVD: + uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg); + break; + + default: + bfa_trc(bfa, msg->mhdr.msg_id); + bfa_assert(0); + } +} + + diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h new file mode 100644 index 000000000000..9921dad0d039 --- /dev/null +++ b/drivers/scsi/bfa/bfa_svc.h @@ -0,0 +1,657 @@ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + * + * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef __BFA_SVC_H__ +#define __BFA_SVC_H__ + +#include "bfa_cs.h" +#include "bfi_ms.h" + + +/** + * Scatter-gather DMA related defines + */ +#define BFA_SGPG_MIN (16) + +/** + * Alignment macro for SG page allocation + */ +#define BFA_SGPG_ROUNDUP(_l) (((_l) + (sizeof(struct bfi_sgpg_s) - 1)) \ + & ~(sizeof(struct bfi_sgpg_s) - 1)) + +struct bfa_sgpg_wqe_s { + struct list_head qe; /* queue sg page element */ + int nsgpg; /* pages to be allocated */ + int nsgpg_total; /* total pages required */ + void (*cbfn) (void *cbarg); /* callback function */ + void *cbarg; /* callback arg */ + struct list_head sgpg_q; /* queue of alloced sgpgs */ +}; + +struct bfa_sgpg_s { + struct list_head qe; /* queue sg page element */ + struct bfi_sgpg_s *sgpg; /* va of SG page */ + union bfi_addr_u sgpg_pa; /* pa of SG page */ +}; + +/** + * Given number of SG elements, BFA_SGPG_NPAGE() returns the number of + * SG pages required. + */ +#define BFA_SGPG_NPAGE(_nsges) (((_nsges) / BFI_SGPG_DATA_SGES) + 1) + +struct bfa_sgpg_mod_s { + struct bfa_s *bfa; + int num_sgpgs; /* number of SG pages */ + int free_sgpgs; /* number of free SG pages */ + struct bfa_sgpg_s *hsgpg_arr; /* BFA SG page array */ + struct bfi_sgpg_s *sgpg_arr; /* actual SG page array */ + u64 sgpg_arr_pa; /* SG page array DMA addr */ + struct list_head sgpg_q; /* queue of free SG pages */ + struct list_head sgpg_wait_q; /* wait queue for SG pages */ +}; +#define BFA_SGPG_MOD(__bfa) (&(__bfa)->modules.sgpg_mod) + +bfa_status_t bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, + int nsgpgs); +void bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs); +void bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, + void (*cbfn) (void *cbarg), void *cbarg); +void bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpgs); +void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe); + + +/** + * FCXP related defines + */ +#define BFA_FCXP_MIN (1) +#define BFA_FCXP_MAX_IBUF_SZ (2 * 1024 + 256) +#define BFA_FCXP_MAX_LBUF_SZ (4 * 1024 + 256) + +struct bfa_fcxp_mod_s { + struct bfa_s *bfa; /* backpointer to BFA */ + struct bfa_fcxp_s *fcxp_list; /* array of FCXPs */ + u16 num_fcxps; /* max num FCXP requests */ + struct list_head fcxp_free_q; /* free FCXPs */ + struct list_head fcxp_active_q; /* active FCXPs */ + void *req_pld_list_kva; /* list of FCXP req pld */ + u64 req_pld_list_pa; /* list of FCXP req pld */ + void *rsp_pld_list_kva; /* list of FCXP resp pld */ + u64 rsp_pld_list_pa; /* list of FCXP resp pld */ + struct list_head wait_q; /* wait queue for free fcxp */ + u32 req_pld_sz; + u32 rsp_pld_sz; +}; + +#define BFA_FCXP_MOD(__bfa) (&(__bfa)->modules.fcxp_mod) +#define BFA_FCXP_FROM_TAG(__mod, __tag) (&(__mod)->fcxp_list[__tag]) + +typedef void (*fcxp_send_cb_t) (struct bfa_s *ioc, struct bfa_fcxp_s *fcxp, + void *cb_arg, bfa_status_t req_status, + u32 rsp_len, u32 resid_len, + struct fchs_s *rsp_fchs); + +typedef u64 (*bfa_fcxp_get_sgaddr_t) (void *bfad_fcxp, int sgeid); +typedef u32 (*bfa_fcxp_get_sglen_t) (void *bfad_fcxp, int sgeid); +typedef void (*bfa_cb_fcxp_send_t) (void *bfad_fcxp, struct bfa_fcxp_s *fcxp, + void *cbarg, enum bfa_status req_status, + u32 rsp_len, u32 resid_len, + struct fchs_s *rsp_fchs); +typedef void (*bfa_fcxp_alloc_cbfn_t) (void *cbarg, struct bfa_fcxp_s *fcxp); + + + +/** + * Information needed for a FCXP request + */ +struct bfa_fcxp_req_info_s { + struct bfa_rport_s *bfa_rport; + /** Pointer to the bfa rport that was + * returned from bfa_rport_create(). + * This could be left NULL for WKA or + * for FCXP interactions before the + * rport nexus is established + */ + struct fchs_s fchs; /* request FC header structure */ + u8 cts; /* continous sequence */ + u8 class; /* FC class for the request/response */ + u16 max_frmsz; /* max send frame size */ + u16 vf_id; /* vsan tag if applicable */ + u8 lp_tag; /* lport tag */ + u32 req_tot_len; /* request payload total length */ +}; + +struct bfa_fcxp_rsp_info_s { + struct fchs_s rsp_fchs; + /** !< Response frame's FC header will + * be sent back in this field */ + u8 rsp_timeout; + /** !< timeout in seconds, 0-no response + */ + u8 rsvd2[3]; + u32 rsp_maxlen; /* max response length expected */ +}; + +struct bfa_fcxp_s { + struct list_head qe; /* fcxp queue element */ + bfa_sm_t sm; /* state machine */ + void *caller; /* driver or fcs */ + struct bfa_fcxp_mod_s *fcxp_mod; + /* back pointer to fcxp mod */ + u16 fcxp_tag; /* internal tag */ + struct bfa_fcxp_req_info_s req_info; + /* request info */ + struct bfa_fcxp_rsp_info_s rsp_info; + /* response info */ + u8 use_ireqbuf; /* use internal req buf */ + u8 use_irspbuf; /* use internal rsp buf */ + u32 nreq_sgles; /* num request SGLEs */ + u32 nrsp_sgles; /* num response SGLEs */ + struct list_head req_sgpg_q; /* SG pages for request buf */ + struct list_head req_sgpg_wqe; /* wait queue for req SG page */ + struct list_head rsp_sgpg_q; /* SG pages for response buf */ + struct list_head rsp_sgpg_wqe; /* wait queue for rsp SG page */ + + bfa_fcxp_get_sgaddr_t req_sga_cbfn; + /* SG elem addr user function */ + bfa_fcxp_get_sglen_t req_sglen_cbfn; + /* SG elem len user function */ + bfa_fcxp_get_sgaddr_t rsp_sga_cbfn; + /* SG elem addr user function */ + bfa_fcxp_get_sglen_t rsp_sglen_cbfn; + /* SG elem len user function */ + bfa_cb_fcxp_send_t send_cbfn; /* send completion callback */ + void *send_cbarg; /* callback arg */ + struct bfa_sge_s req_sge[BFA_FCXP_MAX_SGES]; + /* req SG elems */ + struct bfa_sge_s rsp_sge[BFA_FCXP_MAX_SGES]; + /* rsp SG elems */ + u8 rsp_status; /* comp: rsp status */ + u32 rsp_len; /* comp: actual response len */ + u32 residue_len; /* comp: residual rsp length */ + struct fchs_s rsp_fchs; /* comp: response fchs */ + struct bfa_cb_qe_s hcb_qe; /* comp: callback qelem */ + struct bfa_reqq_wait_s reqq_wqe; + bfa_boolean_t reqq_waiting; +}; + +struct bfa_fcxp_wqe_s { + struct list_head qe; + bfa_fcxp_alloc_cbfn_t alloc_cbfn; + void *alloc_cbarg; + void *caller; + struct bfa_s *bfa; + int nreq_sgles; + int nrsp_sgles; + bfa_fcxp_get_sgaddr_t req_sga_cbfn; + bfa_fcxp_get_sglen_t req_sglen_cbfn; + bfa_fcxp_get_sgaddr_t rsp_sga_cbfn; + bfa_fcxp_get_sglen_t rsp_sglen_cbfn; +}; + +#define BFA_FCXP_REQ_PLD(_fcxp) (bfa_fcxp_get_reqbuf(_fcxp)) +#define BFA_FCXP_RSP_FCHS(_fcxp) (&((_fcxp)->rsp_info.fchs)) +#define BFA_FCXP_RSP_PLD(_fcxp) (bfa_fcxp_get_rspbuf(_fcxp)) + +#define BFA_FCXP_REQ_PLD_PA(_fcxp) \ + ((_fcxp)->fcxp_mod->req_pld_list_pa + \ + ((_fcxp)->fcxp_mod->req_pld_sz * (_fcxp)->fcxp_tag)) + +#define BFA_FCXP_RSP_PLD_PA(_fcxp) \ + ((_fcxp)->fcxp_mod->rsp_pld_list_pa + \ + ((_fcxp)->fcxp_mod->rsp_pld_sz * (_fcxp)->fcxp_tag)) + +void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); + + +/** + * RPORT related defines + */ +#define BFA_RPORT_MIN 4 + +struct bfa_rport_mod_s { + struct bfa_rport_s *rps_list; /* list of rports */ + struct list_head rp_free_q; /* free bfa_rports */ + struct list_head rp_active_q; /* free bfa_rports */ + u16 num_rports; /* number of rports */ +}; + +#define BFA_RPORT_MOD(__bfa) (&(__bfa)->modules.rport_mod) + +/** + * Convert rport tag to RPORT + */ +#define BFA_RPORT_FROM_TAG(__bfa, _tag) \ + (BFA_RPORT_MOD(__bfa)->rps_list + \ + ((_tag) & (BFA_RPORT_MOD(__bfa)->num_rports - 1))) + +/* + * protected functions + */ +void bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); + +/** + * BFA rport information. + */ +struct bfa_rport_info_s { + u16 max_frmsz; /* max rcv pdu size */ + u32 pid:24, /* remote port ID */ + lp_tag:8; /* tag */ + u32 local_pid:24, /* local port ID */ + cisc:8; /* CIRO supported */ + u8 fc_class; /* supported FC classes. enum fc_cos */ + u8 vf_en; /* virtual fabric enable */ + u16 vf_id; /* virtual fabric ID */ + enum bfa_port_speed speed; /* Rport's current speed */ +}; + +/** + * BFA rport data structure + */ +struct bfa_rport_s { + struct list_head qe; /* queue element */ + bfa_sm_t sm; /* state machine */ + struct bfa_s *bfa; /* backpointer to BFA */ + void *rport_drv; /* fcs/driver rport object */ + u16 fw_handle; /* firmware rport handle */ + u16 rport_tag; /* BFA rport tag */ + struct bfa_rport_info_s rport_info; /* rport info from fcs/driver */ + struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ + struct bfa_cb_qe_s hcb_qe; /* BFA callback qelem */ + struct bfa_rport_hal_stats_s stats; /* BFA rport statistics */ + struct bfa_rport_qos_attr_s qos_attr; + union a { + bfa_status_t status; /* f/w status */ + void *fw_msg; /* QoS scn event */ + } event_arg; +}; +#define BFA_RPORT_FC_COS(_rport) ((_rport)->rport_info.fc_class) + + +/** + * UF - unsolicited receive related defines + */ + +#define BFA_UF_MIN (4) + + +struct bfa_uf_s { + struct list_head qe; /* queue element */ + struct bfa_s *bfa; /* bfa instance */ + u16 uf_tag; /* identifying tag fw msgs */ + u16 vf_id; + u16 src_rport_handle; + u16 rsvd; + u8 *data_ptr; + u16 data_len; /* actual receive length */ + u16 pb_len; /* posted buffer length */ + void *buf_kva; /* buffer virtual address */ + u64 buf_pa; /* buffer physical address */ + struct bfa_cb_qe_s hcb_qe; /* comp: BFA comp qelem */ + struct bfa_sge_s sges[BFI_SGE_INLINE_MAX]; +}; + +/** + * Callback prototype for unsolicited frame receive handler. + * + * @param[in] cbarg callback arg for receive handler + * @param[in] uf unsolicited frame descriptor + * + * @return None + */ +typedef void (*bfa_cb_uf_recv_t) (void *cbarg, struct bfa_uf_s *uf); + +struct bfa_uf_mod_s { + struct bfa_s *bfa; /* back pointer to BFA */ + struct bfa_uf_s *uf_list; /* array of UFs */ + u16 num_ufs; /* num unsolicited rx frames */ + struct list_head uf_free_q; /* free UFs */ + struct list_head uf_posted_q; /* UFs posted to IOC */ + struct bfa_uf_buf_s *uf_pbs_kva; /* list UF bufs request pld */ + u64 uf_pbs_pa; /* phy addr for UF bufs */ + struct bfi_uf_buf_post_s *uf_buf_posts; + /* pre-built UF post msgs */ + bfa_cb_uf_recv_t ufrecv; /* uf recv handler function */ + void *cbarg; /* uf receive handler arg */ +}; + +#define BFA_UF_MOD(__bfa) (&(__bfa)->modules.uf_mod) + +#define ufm_pbs_pa(_ufmod, _uftag) \ + ((_ufmod)->uf_pbs_pa + sizeof(struct bfa_uf_buf_s) * (_uftag)) + +void bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); + +#define BFA_UF_BUFSZ (2 * 1024 + 256) + +/** + * @todo private + */ +struct bfa_uf_buf_s { + u8 d[BFA_UF_BUFSZ]; +}; + + +/** + * LPS - bfa lport login/logout service interface + */ +struct bfa_lps_s { + struct list_head qe; /* queue element */ + struct bfa_s *bfa; /* parent bfa instance */ + bfa_sm_t sm; /* finite state machine */ + u8 lp_tag; /* lport tag */ + u8 reqq; /* lport request queue */ + u8 alpa; /* ALPA for loop topologies */ + u32 lp_pid; /* lport port ID */ + bfa_boolean_t fdisc; /* snd FDISC instead of FLOGI */ + bfa_boolean_t auth_en; /* enable authentication */ + bfa_boolean_t auth_req; /* authentication required */ + bfa_boolean_t npiv_en; /* NPIV is allowed by peer */ + bfa_boolean_t fport; /* attached peer is F_PORT */ + bfa_boolean_t brcd_switch; /* attached peer is brcd sw */ + bfa_status_t status; /* login status */ + u16 pdusz; /* max receive PDU size */ + u16 pr_bbcred; /* BB_CREDIT from peer */ + u8 lsrjt_rsn; /* LSRJT reason */ + u8 lsrjt_expl; /* LSRJT explanation */ + wwn_t pwwn; /* port wwn of lport */ + wwn_t nwwn; /* node wwn of lport */ + wwn_t pr_pwwn; /* port wwn of lport peer */ + wwn_t pr_nwwn; /* node wwn of lport peer */ + mac_t lp_mac; /* fpma/spma MAC for lport */ + mac_t fcf_mac; /* FCF MAC of lport */ + struct bfa_reqq_wait_s wqe; /* request wait queue element */ + void *uarg; /* user callback arg */ + struct bfa_cb_qe_s hcb_qe; /* comp: callback qelem */ + struct bfi_lps_login_rsp_s *loginrsp; + bfa_eproto_status_t ext_status; +}; + +struct bfa_lps_mod_s { + struct list_head lps_free_q; + struct list_head lps_active_q; + struct bfa_lps_s *lps_arr; + int num_lps; +}; + +#define BFA_LPS_MOD(__bfa) (&(__bfa)->modules.lps_mod) +#define BFA_LPS_FROM_TAG(__mod, __tag) (&(__mod)->lps_arr[__tag]) + +/* + * external functions + */ +void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); + + +/** + * FCPORT related defines + */ + +#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port)) +typedef void (*bfa_cb_port_t) (void *cbarg, enum bfa_status status); + +/** + * Link notification data structure + */ +struct bfa_fcport_ln_s { + struct bfa_fcport_s *fcport; + bfa_sm_t sm; + struct bfa_cb_qe_s ln_qe; /* BFA callback queue elem for ln */ + enum bfa_port_linkstate ln_event; /* ln event for callback */ +}; + +struct bfa_fcport_trunk_s { + struct bfa_trunk_attr_s attr; +}; + +/** + * BFA FC port data structure + */ +struct bfa_fcport_s { + struct bfa_s *bfa; /* parent BFA instance */ + bfa_sm_t sm; /* port state machine */ + wwn_t nwwn; /* node wwn of physical port */ + wwn_t pwwn; /* port wwn of physical oprt */ + enum bfa_port_speed speed_sup; + /* supported speeds */ + enum bfa_port_speed speed; /* current speed */ + enum bfa_port_topology topology; /* current topology */ + u8 myalpa; /* my ALPA in LOOP topology */ + u8 rsvd[3]; + struct bfa_port_cfg_s cfg; /* current port configuration */ + struct bfa_qos_attr_s qos_attr; /* QoS Attributes */ + struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */ + struct bfa_reqq_wait_s reqq_wait; + /* to wait for room in reqq */ + struct bfa_reqq_wait_s svcreq_wait; + /* to wait for room in reqq */ + struct bfa_reqq_wait_s stats_reqq_wait; + /* to wait for room in reqq (stats) */ + void *event_cbarg; + void (*event_cbfn) (void *cbarg, + enum bfa_port_linkstate event); + union { + union bfi_fcport_i2h_msg_u i2hmsg; + } event_arg; + void *bfad; /* BFA driver handle */ + struct bfa_fcport_ln_s ln; /* Link Notification */ + struct bfa_cb_qe_s hcb_qe; /* BFA callback queue elem */ + struct bfa_timer_s timer; /* timer */ + u32 msgtag; /* fimrware msg tag for reply */ + u8 *stats_kva; + u64 stats_pa; + union bfa_fcport_stats_u *stats; + union bfa_fcport_stats_u *stats_ret; /* driver stats location */ + bfa_status_t stats_status; /* stats/statsclr status */ + bfa_boolean_t stats_busy; /* outstanding stats/statsclr */ + bfa_boolean_t stats_qfull; + u32 stats_reset_time; /* stats reset time stamp */ + bfa_cb_port_t stats_cbfn; /* driver callback function */ + void *stats_cbarg; /* *!< user callback arg */ + bfa_boolean_t diag_busy; /* diag busy status */ + bfa_boolean_t beacon; /* port beacon status */ + bfa_boolean_t link_e2e_beacon; /* link beacon status */ + struct bfa_fcport_trunk_s trunk; + u16 fcoe_vlan; +}; + +#define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport) + +/* + * protected functions + */ +void bfa_fcport_init(struct bfa_s *bfa); +void bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); + +/* + * bfa fcport API functions + */ +bfa_status_t bfa_fcport_enable(struct bfa_s *bfa); +bfa_status_t bfa_fcport_disable(struct bfa_s *bfa); +bfa_status_t bfa_fcport_cfg_speed(struct bfa_s *bfa, + enum bfa_port_speed speed); +enum bfa_port_speed bfa_fcport_get_speed(struct bfa_s *bfa); +bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa, + enum bfa_port_topology topo); +enum bfa_port_topology bfa_fcport_get_topology(struct bfa_s *bfa); +bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa); +bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa); +u8 bfa_fcport_get_myalpa(struct bfa_s *bfa); +bfa_status_t bfa_fcport_clr_hardalpa(struct bfa_s *bfa); +bfa_status_t bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize); +u16 bfa_fcport_get_maxfrsize(struct bfa_s *bfa); +u8 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa); +void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr); +wwn_t bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node); +void bfa_fcport_event_register(struct bfa_s *bfa, + void (*event_cbfn) (void *cbarg, + enum bfa_port_linkstate event), void *event_cbarg); +bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa); +void bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off); +void bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off); +bfa_status_t bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, + enum bfa_port_speed speed); +enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa); + +void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit); +void bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status); +void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon, + bfa_boolean_t link_e2e_beacon); +void bfa_fcport_qos_get_attr(struct bfa_s *bfa, + struct bfa_qos_attr_s *qos_attr); +void bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa, + struct bfa_qos_vc_attr_s *qos_vc_attr); +bfa_status_t bfa_fcport_get_qos_stats(struct bfa_s *bfa, + union bfa_fcport_stats_u *stats, + bfa_cb_port_t cbfn, void *cbarg); +bfa_status_t bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, + void *cbarg); +bfa_status_t bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, + union bfa_fcport_stats_u *stats, + bfa_cb_port_t cbfn, void *cbarg); +bfa_status_t bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, + void *cbarg); +bfa_boolean_t bfa_fcport_is_ratelim(struct bfa_s *bfa); +bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa); +bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa, + union bfa_fcport_stats_u *stats, + bfa_cb_port_t cbfn, void *cbarg); +bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, + void *cbarg); +bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa); + +/* + * bfa rport API functions + */ +struct bfa_rport_s *bfa_rport_create(struct bfa_s *bfa, void *rport_drv); +void bfa_rport_delete(struct bfa_rport_s *rport); +void bfa_rport_online(struct bfa_rport_s *rport, + struct bfa_rport_info_s *rport_info); +void bfa_rport_offline(struct bfa_rport_s *rport); +void bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed); +void bfa_rport_get_stats(struct bfa_rport_s *rport, + struct bfa_rport_hal_stats_s *stats); +void bfa_rport_clear_stats(struct bfa_rport_s *rport); +void bfa_cb_rport_online(void *rport); +void bfa_cb_rport_offline(void *rport); +void bfa_cb_rport_qos_scn_flowid(void *rport, + struct bfa_rport_qos_attr_s old_qos_attr, + struct bfa_rport_qos_attr_s new_qos_attr); +void bfa_cb_rport_qos_scn_prio(void *rport, + struct bfa_rport_qos_attr_s old_qos_attr, + struct bfa_rport_qos_attr_s new_qos_attr); +void bfa_rport_get_qos_attr(struct bfa_rport_s *rport, + struct bfa_rport_qos_attr_s *qos_attr); + +/* + * bfa fcxp API functions + */ +struct bfa_fcxp_s *bfa_fcxp_alloc(void *bfad_fcxp, struct bfa_s *bfa, + int nreq_sgles, int nrsp_sgles, + bfa_fcxp_get_sgaddr_t get_req_sga, + bfa_fcxp_get_sglen_t get_req_sglen, + bfa_fcxp_get_sgaddr_t get_rsp_sga, + bfa_fcxp_get_sglen_t get_rsp_sglen); +void bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe, + bfa_fcxp_alloc_cbfn_t alloc_cbfn, + void *cbarg, void *bfad_fcxp, + int nreq_sgles, int nrsp_sgles, + bfa_fcxp_get_sgaddr_t get_req_sga, + bfa_fcxp_get_sglen_t get_req_sglen, + bfa_fcxp_get_sgaddr_t get_rsp_sga, + bfa_fcxp_get_sglen_t get_rsp_sglen); +void bfa_fcxp_walloc_cancel(struct bfa_s *bfa, + struct bfa_fcxp_wqe_s *wqe); +void bfa_fcxp_discard(struct bfa_fcxp_s *fcxp); + +void *bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp); +void *bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp); + +void bfa_fcxp_free(struct bfa_fcxp_s *fcxp); + +void bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport, + u16 vf_id, u8 lp_tag, + bfa_boolean_t cts, enum fc_cos cos, + u32 reqlen, struct fchs_s *fchs, + bfa_cb_fcxp_send_t cbfn, + void *cbarg, + u32 rsp_maxlen, u8 rsp_timeout); +bfa_status_t bfa_fcxp_abort(struct bfa_fcxp_s *fcxp); +u32 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp); +u32 bfa_fcxp_get_maxrsp(struct bfa_s *bfa); + +static inline void * +bfa_uf_get_frmbuf(struct bfa_uf_s *uf) +{ + return uf->data_ptr; +} + +static inline u16 +bfa_uf_get_frmlen(struct bfa_uf_s *uf) +{ + return uf->data_len; +} + +/* + * bfa uf API functions + */ +void bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, + void *cbarg); +void bfa_uf_free(struct bfa_uf_s *uf); + +/** + * bfa lport service api + */ + +u32 bfa_lps_get_max_vport(struct bfa_s *bfa); +struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa); +void bfa_lps_delete(struct bfa_lps_s *lps); +void bfa_lps_discard(struct bfa_lps_s *lps); +void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, + u16 pdusz, wwn_t pwwn, wwn_t nwwn, + bfa_boolean_t auth_en); +void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, + wwn_t pwwn, wwn_t nwwn); +void bfa_lps_flogo(struct bfa_lps_s *lps); +void bfa_lps_fdisclogo(struct bfa_lps_s *lps); +u8 bfa_lps_get_tag(struct bfa_lps_s *lps); +bfa_boolean_t bfa_lps_is_npiv_en(struct bfa_lps_s *lps); +bfa_boolean_t bfa_lps_is_fport(struct bfa_lps_s *lps); +bfa_boolean_t bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps); +bfa_boolean_t bfa_lps_is_authreq(struct bfa_lps_s *lps); +bfa_eproto_status_t bfa_lps_get_extstatus(struct bfa_lps_s *lps); +u32 bfa_lps_get_pid(struct bfa_lps_s *lps); +u32 bfa_lps_get_base_pid(struct bfa_s *bfa); +u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid); +u16 bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps); +wwn_t bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps); +wwn_t bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps); +u8 bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps); +u8 bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps); +mac_t bfa_lps_get_lp_mac(struct bfa_lps_s *lps); +void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status); +void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status); +void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg); +void bfa_cb_lps_cvl_event(void *bfad, void *uarg); + +void bfa_trunk_enable_cfg(struct bfa_s *bfa); +bfa_status_t bfa_trunk_enable(struct bfa_s *bfa); +bfa_status_t bfa_trunk_disable(struct bfa_s *bfa); +bfa_status_t bfa_trunk_get_attr(struct bfa_s *bfa, + struct bfa_trunk_attr_s *attr); + +#endif /* __BFA_SVC_H__ */ diff --git a/drivers/scsi/bfa/bfa_timer.c b/drivers/scsi/bfa/bfa_timer.c deleted file mode 100644 index cb76481f5cb1..000000000000 --- a/drivers/scsi/bfa/bfa_timer.c +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#include -#include - -void -bfa_timer_init(struct bfa_timer_mod_s *mod) -{ - INIT_LIST_HEAD(&mod->timer_q); -} - -void -bfa_timer_beat(struct bfa_timer_mod_s *mod) -{ - struct list_head *qh = &mod->timer_q; - struct list_head *qe, *qe_next; - struct bfa_timer_s *elem; - struct list_head timedout_q; - - INIT_LIST_HEAD(&timedout_q); - - qe = bfa_q_next(qh); - - while (qe != qh) { - qe_next = bfa_q_next(qe); - - elem = (struct bfa_timer_s *) qe; - if (elem->timeout <= BFA_TIMER_FREQ) { - elem->timeout = 0; - list_del(&elem->qe); - list_add_tail(&elem->qe, &timedout_q); - } else { - elem->timeout -= BFA_TIMER_FREQ; - } - - qe = qe_next; /* go to next elem */ - } - - /* - * Pop all the timeout entries - */ - while (!list_empty(&timedout_q)) { - bfa_q_deq(&timedout_q, &elem); - elem->timercb(elem->arg); - } -} - -/** - * Should be called with lock protection - */ -void -bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer, - void (*timercb) (void *), void *arg, unsigned int timeout) -{ - - bfa_assert(timercb != NULL); - bfa_assert(!bfa_q_is_on_q(&mod->timer_q, timer)); - - timer->timeout = timeout; - timer->timercb = timercb; - timer->arg = arg; - - list_add_tail(&timer->qe, &mod->timer_q); -} - -/** - * Should be called with lock protection - */ -void -bfa_timer_stop(struct bfa_timer_s *timer) -{ - bfa_assert(!list_empty(&timer->qe)); - - list_del(&timer->qe); -} diff --git a/drivers/scsi/bfa/bfa_trcmod_priv.h b/drivers/scsi/bfa/bfa_trcmod_priv.h deleted file mode 100644 index a7a82610db85..000000000000 --- a/drivers/scsi/bfa/bfa_trcmod_priv.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * hal_trcmod.h BFA trace modules - */ - -#ifndef __BFA_TRCMOD_PRIV_H__ -#define __BFA_TRCMOD_PRIV_H__ - -#include - -/* - * !!! Only append to the enums defined here to avoid any versioning - * !!! needed between trace utility and driver version - */ -enum { - BFA_TRC_HAL_INTR = 1, - BFA_TRC_HAL_FCXP = 2, - BFA_TRC_HAL_UF = 3, - BFA_TRC_HAL_RPORT = 4, - BFA_TRC_HAL_FCPIM = 5, - BFA_TRC_HAL_IOIM = 6, - BFA_TRC_HAL_TSKIM = 7, - BFA_TRC_HAL_ITNIM = 8, - BFA_TRC_HAL_FCPORT = 9, - BFA_TRC_HAL_SGPG = 10, - BFA_TRC_HAL_FLASH = 11, - BFA_TRC_HAL_DEBUG = 12, - BFA_TRC_HAL_WWN = 13, - BFA_TRC_HAL_FLASH_RAW = 14, - BFA_TRC_HAL_SBOOT = 15, - BFA_TRC_HAL_SBOOT_IO = 16, - BFA_TRC_HAL_SBOOT_INTR = 17, - BFA_TRC_HAL_SBTEST = 18, - BFA_TRC_HAL_IPFC = 19, - BFA_TRC_HAL_IOCFC = 20, - BFA_TRC_HAL_FCPTM = 21, - BFA_TRC_HAL_IOTM = 22, - BFA_TRC_HAL_TSKTM = 23, - BFA_TRC_HAL_TIN = 24, - BFA_TRC_HAL_LPS = 25, - BFA_TRC_HAL_FCDIAG = 26, - BFA_TRC_HAL_PBIND = 27, - BFA_TRC_HAL_IOCFC_CT = 28, - BFA_TRC_HAL_IOCFC_CB = 29, - BFA_TRC_HAL_IOCFC_Q = 30, -}; - -#endif /* __BFA_TRCMOD_PRIV_H__ */ diff --git a/drivers/scsi/bfa/bfa_tskim.c b/drivers/scsi/bfa/bfa_tskim.c deleted file mode 100644 index ad9aaaedd3f1..000000000000 --- a/drivers/scsi/bfa/bfa_tskim.c +++ /dev/null @@ -1,690 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#include -#include - -BFA_TRC_FILE(HAL, TSKIM); - -/** - * task management completion handling - */ -#define bfa_tskim_qcomp(__tskim, __cbfn) do { \ - bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, \ - __cbfn, (__tskim)); \ - bfa_tskim_notify_comp(__tskim); \ -} while (0) - -#define bfa_tskim_notify_comp(__tskim) do { \ - if ((__tskim)->notify) \ - bfa_itnim_tskdone((__tskim)->itnim); \ -} while (0) - -/* - * forward declarations - */ -static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete); -static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete); -static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim, - lun_t lun); -static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim); -static void bfa_tskim_cleanp_comp(void *tskim_cbarg); -static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim); -static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim); -static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim); -static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim); - -/** - * bfa_tskim_sm - */ - -enum bfa_tskim_event { - BFA_TSKIM_SM_START = 1, /* TM command start */ - BFA_TSKIM_SM_DONE = 2, /* TM completion */ - BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */ - BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */ - BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */ - BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */ - BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */ - BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */ -}; - -static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, - enum bfa_tskim_event event); -static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim, - enum bfa_tskim_event event); -static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, - enum bfa_tskim_event event); -static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, - enum bfa_tskim_event event); -static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, - enum bfa_tskim_event event); -static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, - enum bfa_tskim_event event); -static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, - enum bfa_tskim_event event); - -/** - * Task management command beginning state. - */ -static void -bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) -{ - bfa_trc(tskim->bfa, event); - - switch (event) { - case BFA_TSKIM_SM_START: - bfa_sm_set_state(tskim, bfa_tskim_sm_active); - bfa_tskim_gather_ios(tskim); - - /** - * If device is offline, do not send TM on wire. Just cleanup - * any pending IO requests and complete TM request. - */ - if (!bfa_itnim_is_online(tskim->itnim)) { - bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); - tskim->tsk_status = BFI_TSKIM_STS_OK; - bfa_tskim_cleanup_ios(tskim); - return; - } - - if (!bfa_tskim_send(tskim)) { - bfa_sm_set_state(tskim, bfa_tskim_sm_qfull); - bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq, - &tskim->reqq_wait); - } - break; - - default: - bfa_sm_fault(tskim->bfa, event); - } -} - -/** - * brief - * TM command is active, awaiting completion from firmware to - * cleanup IO requests in TM scope. - */ -static void -bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) -{ - bfa_trc(tskim->bfa, event); - - switch (event) { - case BFA_TSKIM_SM_DONE: - bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); - bfa_tskim_cleanup_ios(tskim); - break; - - case BFA_TSKIM_SM_CLEANUP: - bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup); - if (!bfa_tskim_send_abort(tskim)) { - bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull); - bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq, - &tskim->reqq_wait); - } - break; - - case BFA_TSKIM_SM_HWFAIL: - bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); - bfa_tskim_iocdisable_ios(tskim); - bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); - break; - - default: - bfa_sm_fault(tskim->bfa, event); - } -} - -/** - * An active TM is being cleaned up since ITN is offline. Awaiting cleanup - * completion event from firmware. - */ -static void -bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) -{ - bfa_trc(tskim->bfa, event); - - switch (event) { - case BFA_TSKIM_SM_DONE: - /** - * Ignore and wait for ABORT completion from firmware. - */ - break; - - case BFA_TSKIM_SM_CLEANUP_DONE: - bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); - bfa_tskim_cleanup_ios(tskim); - break; - - case BFA_TSKIM_SM_HWFAIL: - bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); - bfa_tskim_iocdisable_ios(tskim); - bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); - break; - - default: - bfa_sm_fault(tskim->bfa, event); - } -} - -static void -bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) -{ - bfa_trc(tskim->bfa, event); - - switch (event) { - case BFA_TSKIM_SM_IOS_DONE: - bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); - bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done); - break; - - case BFA_TSKIM_SM_CLEANUP: - /** - * Ignore, TM command completed on wire. - * Notify TM conmpletion on IO cleanup completion. - */ - break; - - case BFA_TSKIM_SM_HWFAIL: - bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); - bfa_tskim_iocdisable_ios(tskim); - bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); - break; - - default: - bfa_sm_fault(tskim->bfa, event); - } -} - -/** - * Task management command is waiting for room in request CQ - */ -static void -bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) -{ - bfa_trc(tskim->bfa, event); - - switch (event) { - case BFA_TSKIM_SM_QRESUME: - bfa_sm_set_state(tskim, bfa_tskim_sm_active); - bfa_tskim_send(tskim); - break; - - case BFA_TSKIM_SM_CLEANUP: - /** - * No need to send TM on wire since ITN is offline. - */ - bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); - bfa_reqq_wcancel(&tskim->reqq_wait); - bfa_tskim_cleanup_ios(tskim); - break; - - case BFA_TSKIM_SM_HWFAIL: - bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); - bfa_reqq_wcancel(&tskim->reqq_wait); - bfa_tskim_iocdisable_ios(tskim); - bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); - break; - - default: - bfa_sm_fault(tskim->bfa, event); - } -} - -/** - * Task management command is active, awaiting for room in request CQ - * to send clean up request. - */ -static void -bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, - enum bfa_tskim_event event) -{ - bfa_trc(tskim->bfa, event); - - switch (event) { - case BFA_TSKIM_SM_DONE: - bfa_reqq_wcancel(&tskim->reqq_wait); - /** - * - * Fall through !!! - */ - - case BFA_TSKIM_SM_QRESUME: - bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup); - bfa_tskim_send_abort(tskim); - break; - - case BFA_TSKIM_SM_HWFAIL: - bfa_sm_set_state(tskim, bfa_tskim_sm_hcb); - bfa_reqq_wcancel(&tskim->reqq_wait); - bfa_tskim_iocdisable_ios(tskim); - bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed); - break; - - default: - bfa_sm_fault(tskim->bfa, event); - } -} - -/** - * BFA callback is pending - */ -static void -bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) -{ - bfa_trc(tskim->bfa, event); - - switch (event) { - case BFA_TSKIM_SM_HCB: - bfa_sm_set_state(tskim, bfa_tskim_sm_uninit); - bfa_tskim_free(tskim); - break; - - case BFA_TSKIM_SM_CLEANUP: - bfa_tskim_notify_comp(tskim); - break; - - case BFA_TSKIM_SM_HWFAIL: - break; - - default: - bfa_sm_fault(tskim->bfa, event); - } -} - - - -/** - * bfa_tskim_private - */ - -static void -__bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete) -{ - struct bfa_tskim_s *tskim = cbarg; - - if (!complete) { - bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB); - return; - } - - bfa_stats(tskim->itnim, tm_success); - bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status); -} - -static void -__bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete) -{ - struct bfa_tskim_s *tskim = cbarg; - - if (!complete) { - bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB); - return; - } - - bfa_stats(tskim->itnim, tm_failures); - bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, - BFI_TSKIM_STS_FAILED); -} - -static bfa_boolean_t -bfa_tskim_match_scope(struct bfa_tskim_s *tskim, lun_t lun) -{ - switch (tskim->tm_cmnd) { - case FCP_TM_TARGET_RESET: - return BFA_TRUE; - - case FCP_TM_ABORT_TASK_SET: - case FCP_TM_CLEAR_TASK_SET: - case FCP_TM_LUN_RESET: - case FCP_TM_CLEAR_ACA: - return (tskim->lun == lun); - - default: - bfa_assert(0); - } - - return BFA_FALSE; -} - -/** - * Gather affected IO requests and task management commands. - */ -static void -bfa_tskim_gather_ios(struct bfa_tskim_s *tskim) -{ - struct bfa_itnim_s *itnim = tskim->itnim; - struct bfa_ioim_s *ioim; - struct list_head *qe, *qen; - - INIT_LIST_HEAD(&tskim->io_q); - - /** - * Gather any active IO requests first. - */ - list_for_each_safe(qe, qen, &itnim->io_q) { - ioim = (struct bfa_ioim_s *) qe; - if (bfa_tskim_match_scope - (tskim, bfa_cb_ioim_get_lun(ioim->dio))) { - list_del(&ioim->qe); - list_add_tail(&ioim->qe, &tskim->io_q); - } - } - - /** - * Failback any pending IO requests immediately. - */ - list_for_each_safe(qe, qen, &itnim->pending_q) { - ioim = (struct bfa_ioim_s *) qe; - if (bfa_tskim_match_scope - (tskim, bfa_cb_ioim_get_lun(ioim->dio))) { - list_del(&ioim->qe); - list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); - bfa_ioim_tov(ioim); - } - } -} - -/** - * IO cleanup completion - */ -static void -bfa_tskim_cleanp_comp(void *tskim_cbarg) -{ - struct bfa_tskim_s *tskim = tskim_cbarg; - - bfa_stats(tskim->itnim, tm_io_comps); - bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE); -} - -/** - * Gather affected IO requests and task management commands. - */ -static void -bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim) -{ - struct bfa_ioim_s *ioim; - struct list_head *qe, *qen; - - bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim); - - list_for_each_safe(qe, qen, &tskim->io_q) { - ioim = (struct bfa_ioim_s *) qe; - bfa_wc_up(&tskim->wc); - bfa_ioim_cleanup_tm(ioim, tskim); - } - - bfa_wc_wait(&tskim->wc); -} - -/** - * Send task management request to firmware. - */ -static bfa_boolean_t -bfa_tskim_send(struct bfa_tskim_s *tskim) -{ - struct bfa_itnim_s *itnim = tskim->itnim; - struct bfi_tskim_req_s *m; - - /** - * check for room in queue to send request now - */ - m = bfa_reqq_next(tskim->bfa, itnim->reqq); - if (!m) - return BFA_FALSE; - - /** - * build i/o request message next - */ - bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ, - bfa_lpuid(tskim->bfa)); - - m->tsk_tag = bfa_os_htons(tskim->tsk_tag); - m->itn_fhdl = tskim->itnim->rport->fw_handle; - m->t_secs = tskim->tsecs; - m->lun = tskim->lun; - m->tm_flags = tskim->tm_cmnd; - - /** - * queue I/O message to firmware - */ - bfa_reqq_produce(tskim->bfa, itnim->reqq); - return BFA_TRUE; -} - -/** - * Send abort request to cleanup an active TM to firmware. - */ -static bfa_boolean_t -bfa_tskim_send_abort(struct bfa_tskim_s *tskim) -{ - struct bfa_itnim_s *itnim = tskim->itnim; - struct bfi_tskim_abortreq_s *m; - - /** - * check for room in queue to send request now - */ - m = bfa_reqq_next(tskim->bfa, itnim->reqq); - if (!m) - return BFA_FALSE; - - /** - * build i/o request message next - */ - bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ, - bfa_lpuid(tskim->bfa)); - - m->tsk_tag = bfa_os_htons(tskim->tsk_tag); - - /** - * queue I/O message to firmware - */ - bfa_reqq_produce(tskim->bfa, itnim->reqq); - return BFA_TRUE; -} - -/** - * Call to resume task management cmnd waiting for room in request queue. - */ -static void -bfa_tskim_qresume(void *cbarg) -{ - struct bfa_tskim_s *tskim = cbarg; - - bfa_fcpim_stats(tskim->fcpim, qresumes); - bfa_stats(tskim->itnim, tm_qresumes); - bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME); -} - -/** - * Cleanup IOs associated with a task mangement command on IOC failures. - */ -static void -bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim) -{ - struct bfa_ioim_s *ioim; - struct list_head *qe, *qen; - - list_for_each_safe(qe, qen, &tskim->io_q) { - ioim = (struct bfa_ioim_s *) qe; - bfa_ioim_iocdisable(ioim); - } -} - - - -/** - * bfa_tskim_friend - */ - -/** - * Notification on completions from related ioim. - */ -void -bfa_tskim_iodone(struct bfa_tskim_s *tskim) -{ - bfa_wc_down(&tskim->wc); -} - -/** - * Handle IOC h/w failure notification from itnim. - */ -void -bfa_tskim_iocdisable(struct bfa_tskim_s *tskim) -{ - tskim->notify = BFA_FALSE; - bfa_stats(tskim->itnim, tm_iocdowns); - bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL); -} - -/** - * Cleanup TM command and associated IOs as part of ITNIM offline. - */ -void -bfa_tskim_cleanup(struct bfa_tskim_s *tskim) -{ - tskim->notify = BFA_TRUE; - bfa_stats(tskim->itnim, tm_cleanups); - bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP); -} - -/** - * Memory allocation and initialization. - */ -void -bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) -{ - struct bfa_tskim_s *tskim; - u16 i; - - INIT_LIST_HEAD(&fcpim->tskim_free_q); - - tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo); - fcpim->tskim_arr = tskim; - - for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) { - /* - * initialize TSKIM - */ - bfa_os_memset(tskim, 0, sizeof(struct bfa_tskim_s)); - tskim->tsk_tag = i; - tskim->bfa = fcpim->bfa; - tskim->fcpim = fcpim; - tskim->notify = BFA_FALSE; - bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume, - tskim); - bfa_sm_set_state(tskim, bfa_tskim_sm_uninit); - - list_add_tail(&tskim->qe, &fcpim->tskim_free_q); - } - - bfa_meminfo_kva(minfo) = (u8 *) tskim; -} - -void -bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim) -{ - /** - * @todo - */ -} - -void -bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) -{ - struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); - struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m; - struct bfa_tskim_s *tskim; - u16 tsk_tag = bfa_os_ntohs(rsp->tsk_tag); - - tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag); - bfa_assert(tskim->tsk_tag == tsk_tag); - - tskim->tsk_status = rsp->tsk_status; - - /** - * Firmware sends BFI_TSKIM_STS_ABORTED status for abort - * requests. All other statuses are for normal completions. - */ - if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) { - bfa_stats(tskim->itnim, tm_cleanup_comps); - bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE); - } else { - bfa_stats(tskim->itnim, tm_fw_rsps); - bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE); - } -} - - - -/** - * bfa_tskim_api - */ - - -struct bfa_tskim_s * -bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk) -{ - struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); - struct bfa_tskim_s *tskim; - - bfa_q_deq(&fcpim->tskim_free_q, &tskim); - - if (!tskim) - bfa_fcpim_stats(fcpim, no_tskims); - else - tskim->dtsk = dtsk; - - return tskim; -} - -void -bfa_tskim_free(struct bfa_tskim_s *tskim) -{ - bfa_assert(bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe)); - list_del(&tskim->qe); - list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q); -} - -/** - * Start a task management command. - * - * @param[in] tskim BFA task management command instance - * @param[in] itnim i-t nexus for the task management command - * @param[in] lun lun, if applicable - * @param[in] tm_cmnd Task management command code. - * @param[in] t_secs Timeout in seconds - * - * @return None. - */ -void -bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim, lun_t lun, - enum fcp_tm_cmnd tm_cmnd, u8 tsecs) -{ - tskim->itnim = itnim; - tskim->lun = lun; - tskim->tm_cmnd = tm_cmnd; - tskim->tsecs = tsecs; - tskim->notify = BFA_FALSE; - bfa_stats(itnim, tm_cmnds); - - list_add_tail(&tskim->qe, &itnim->tsk_q); - bfa_sm_send_event(tskim, BFA_TSKIM_SM_START); -} - - diff --git a/drivers/scsi/bfa/bfa_uf.c b/drivers/scsi/bfa/bfa_uf.c deleted file mode 100644 index b9a9a686ef6a..000000000000 --- a/drivers/scsi/bfa/bfa_uf.c +++ /dev/null @@ -1,343 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * bfa_uf.c BFA unsolicited frame receive implementation - */ - -#include -#include -#include -#include - -BFA_TRC_FILE(HAL, UF); -BFA_MODULE(uf); - -/* - ***************************************************************************** - * Internal functions - ***************************************************************************** - */ -static void -__bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete) -{ - struct bfa_uf_s *uf = cbarg; - struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa); - - if (complete) - ufm->ufrecv(ufm->cbarg, uf); -} - -static void -claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) -{ - u32 uf_pb_tot_sz; - - ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi); - ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi); - uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs), - BFA_DMA_ALIGN_SZ); - - bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz; - bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz; - - bfa_os_memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz); -} - -static void -claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) -{ - struct bfi_uf_buf_post_s *uf_bp_msg; - struct bfi_sge_s *sge; - union bfi_addr_u sga_zero = { {0} }; - u16 i; - u16 buf_len; - - ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi); - uf_bp_msg = ufm->uf_buf_posts; - - for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs; - i++, uf_bp_msg++) { - bfa_os_memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s)); - - uf_bp_msg->buf_tag = i; - buf_len = sizeof(struct bfa_uf_buf_s); - uf_bp_msg->buf_len = bfa_os_htons(buf_len); - bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST, - bfa_lpuid(ufm->bfa)); - - sge = uf_bp_msg->sge; - sge[0].sg_len = buf_len; - sge[0].flags = BFI_SGE_DATA_LAST; - bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i)); - bfa_sge_to_be(sge); - - sge[1].sg_len = buf_len; - sge[1].flags = BFI_SGE_PGDLEN; - sge[1].sga = sga_zero; - bfa_sge_to_be(&sge[1]); - } - - /** - * advance pointer beyond consumed memory - */ - bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg; -} - -static void -claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) -{ - u16 i; - struct bfa_uf_s *uf; - - /* - * Claim block of memory for UF list - */ - ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi); - - /* - * Initialize UFs and queue it in UF free queue - */ - for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) { - bfa_os_memset(uf, 0, sizeof(struct bfa_uf_s)); - uf->bfa = ufm->bfa; - uf->uf_tag = i; - uf->pb_len = sizeof(struct bfa_uf_buf_s); - uf->buf_kva = (void *)&ufm->uf_pbs_kva[i]; - uf->buf_pa = ufm_pbs_pa(ufm, i); - list_add_tail(&uf->qe, &ufm->uf_free_q); - } - - /** - * advance memory pointer - */ - bfa_meminfo_kva(mi) = (u8 *) uf; -} - -static void -uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) -{ - claim_uf_pbs(ufm, mi); - claim_ufs(ufm, mi); - claim_uf_post_msgs(ufm, mi); -} - -static void -bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len) -{ - u32 num_ufs = cfg->fwcfg.num_uf_bufs; - - /* - * dma-able memory for UF posted bufs - */ - *dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs), - BFA_DMA_ALIGN_SZ); - - /* - * kernel Virtual memory for UFs and UF buf post msg copies - */ - *ndm_len += sizeof(struct bfa_uf_s) * num_ufs; - *ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs; -} - -static void -bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) -{ - struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); - - bfa_os_memset(ufm, 0, sizeof(struct bfa_uf_mod_s)); - ufm->bfa = bfa; - ufm->num_ufs = cfg->fwcfg.num_uf_bufs; - INIT_LIST_HEAD(&ufm->uf_free_q); - INIT_LIST_HEAD(&ufm->uf_posted_q); - - uf_mem_claim(ufm, meminfo); -} - -static void -bfa_uf_detach(struct bfa_s *bfa) -{ -} - -static struct bfa_uf_s * -bfa_uf_get(struct bfa_uf_mod_s *uf_mod) -{ - struct bfa_uf_s *uf; - - bfa_q_deq(&uf_mod->uf_free_q, &uf); - return uf; -} - -static void -bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf) -{ - list_add_tail(&uf->qe, &uf_mod->uf_free_q); -} - -static bfa_status_t -bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf) -{ - struct bfi_uf_buf_post_s *uf_post_msg; - - uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP); - if (!uf_post_msg) - return BFA_STATUS_FAILED; - - bfa_os_memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag], - sizeof(struct bfi_uf_buf_post_s)); - bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP); - - bfa_trc(ufm->bfa, uf->uf_tag); - - list_add_tail(&uf->qe, &ufm->uf_posted_q); - return BFA_STATUS_OK; -} - -static void -bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod) -{ - struct bfa_uf_s *uf; - - while ((uf = bfa_uf_get(uf_mod)) != NULL) { - if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK) - break; - } -} - -static void -uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m) -{ - struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); - u16 uf_tag = m->buf_tag; - struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag]; - struct bfa_uf_s *uf = &ufm->uf_list[uf_tag]; - u8 *buf = &uf_buf->d[0]; - struct fchs_s *fchs; - - m->frm_len = bfa_os_ntohs(m->frm_len); - m->xfr_len = bfa_os_ntohs(m->xfr_len); - - fchs = (struct fchs_s *) uf_buf; - - list_del(&uf->qe); /* dequeue from posted queue */ - - uf->data_ptr = buf; - uf->data_len = m->xfr_len; - - bfa_assert(uf->data_len >= sizeof(struct fchs_s)); - - if (uf->data_len == sizeof(struct fchs_s)) { - bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX, - uf->data_len, (struct fchs_s *) buf); - } else { - u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s))); - bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF, - BFA_PL_EID_RX, uf->data_len, - (struct fchs_s *) buf, pld_w0); - } - - if (bfa->fcs) - __bfa_cb_uf_recv(uf, BFA_TRUE); - else - bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf); -} - -static void -bfa_uf_stop(struct bfa_s *bfa) -{ -} - -static void -bfa_uf_iocdisable(struct bfa_s *bfa) -{ - struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); - struct bfa_uf_s *uf; - struct list_head *qe, *qen; - - list_for_each_safe(qe, qen, &ufm->uf_posted_q) { - uf = (struct bfa_uf_s *) qe; - list_del(&uf->qe); - bfa_uf_put(ufm, uf); - } -} - -static void -bfa_uf_start(struct bfa_s *bfa) -{ - bfa_uf_post_all(BFA_UF_MOD(bfa)); -} - - - -/** - * bfa_uf_api - */ - -/** - * Register handler for all unsolicted recieve frames. - * - * @param[in] bfa BFA instance - * @param[in] ufrecv receive handler function - * @param[in] cbarg receive handler arg - */ -void -bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg) -{ - struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); - - ufm->ufrecv = ufrecv; - ufm->cbarg = cbarg; -} - -/** - * Free an unsolicited frame back to BFA. - * - * @param[in] uf unsolicited frame to be freed - * - * @return None - */ -void -bfa_uf_free(struct bfa_uf_s *uf) -{ - bfa_uf_put(BFA_UF_MOD(uf->bfa), uf); - bfa_uf_post_all(BFA_UF_MOD(uf->bfa)); -} - - - -/** - * uf_pub BFA uf module public functions - */ - -void -bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) -{ - bfa_trc(bfa, msg->mhdr.msg_id); - - switch (msg->mhdr.msg_id) { - case BFI_UF_I2H_FRM_RCVD: - uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg); - break; - - default: - bfa_trc(bfa, msg->mhdr.msg_id); - bfa_assert(0); - } -} - - diff --git a/drivers/scsi/bfa/bfa_uf_priv.h b/drivers/scsi/bfa/bfa_uf_priv.h deleted file mode 100644 index bcb490f834f3..000000000000 --- a/drivers/scsi/bfa/bfa_uf_priv.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __BFA_UF_PRIV_H__ -#define __BFA_UF_PRIV_H__ - -#include -#include -#include - -#define BFA_UF_MIN (4) - -struct bfa_uf_mod_s { - struct bfa_s *bfa; /* back pointer to BFA */ - struct bfa_uf_s *uf_list; /* array of UFs */ - u16 num_ufs; /* num unsolicited rx frames */ - struct list_head uf_free_q; /* free UFs */ - struct list_head uf_posted_q; /* UFs posted to IOC */ - struct bfa_uf_buf_s *uf_pbs_kva; /* list UF bufs request pld */ - u64 uf_pbs_pa; /* phy addr for UF bufs */ - struct bfi_uf_buf_post_s *uf_buf_posts; - /* pre-built UF post msgs */ - bfa_cb_uf_recv_t ufrecv; /* uf recv handler function */ - void *cbarg; /* uf receive handler arg */ -}; - -#define BFA_UF_MOD(__bfa) (&(__bfa)->modules.uf_mod) - -#define ufm_pbs_pa(_ufmod, _uftag) \ - ((_ufmod)->uf_pbs_pa + sizeof(struct bfa_uf_buf_s) * (_uftag)) - -void bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); - -#endif /* __BFA_UF_PRIV_H__ */ diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index ca04cc9d332f..4d8784e06e14 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * @@ -18,46 +18,62 @@ /** * bfad.c Linux driver PCI interface module. */ - -#include #include #include +#include +#include +#include +#include +#include +#include +#include +#include + #include "bfad_drv.h" #include "bfad_im.h" -#include "bfad_tm.h" -#include "bfad_ipfc.h" -#include "bfad_trcmod.h" -#include -#include -#include -#include +#include "bfa_fcs.h" +#include "bfa_os_inc.h" +#include "bfa_defs.h" +#include "bfa.h" BFA_TRC_FILE(LDRV, BFAD); DEFINE_MUTEX(bfad_mutex); LIST_HEAD(bfad_list); -static int bfad_inst; -int bfad_supported_fc4s; - -static char *host_name; -static char *os_name; -static char *os_patch; -static int num_rports; -static int num_ios; -static int num_tms; -static int num_fcxps; -static int num_ufbufs; -static int reqq_size; -static int rspq_size; -static int num_sgpgs; -static int rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT; -static int bfa_io_max_sge = BFAD_IO_MAX_SGE; -static int log_level = BFA_LOG_WARNING; -static int ioc_auto_recover = BFA_TRUE; -static int ipfc_enable = BFA_FALSE; -static int fdmi_enable = BFA_TRUE; -int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH; -int bfa_linkup_delay = -1; + +static int bfad_inst; +static int num_sgpgs_parm; +int supported_fc4s; +char *host_name, *os_name, *os_patch; +int num_rports, num_ios, num_tms; +int num_fcxps, num_ufbufs; +int reqq_size, rspq_size, num_sgpgs; +int rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT; +int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH; +int bfa_io_max_sge = BFAD_IO_MAX_SGE; +int log_level = 3; /* WARNING log level */ +int ioc_auto_recover = BFA_TRUE; +int bfa_linkup_delay = -1; +int fdmi_enable = BFA_TRUE; +int pcie_max_read_reqsz; int bfa_debugfs_enable = 1; +int msix_disable_cb = 0, msix_disable_ct = 0; + +u32 bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size; +u32 *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc; + +const char *msix_name_ct[] = { + "cpe0", "cpe1", "cpe2", "cpe3", + "rme0", "rme1", "rme2", "rme3", + "ctrl" }; + +const char *msix_name_cb[] = { + "cpe0", "cpe1", "cpe2", "cpe3", + "rme0", "rme1", "rme2", "rme3", + "eemc", "elpu0", "elpu1", "epss", "mlpu" }; + +MODULE_FIRMWARE(BFAD_FW_FILE_CT_FC); +MODULE_FIRMWARE(BFAD_FW_FILE_CT_CNA); +MODULE_FIRMWARE(BFAD_FW_FILE_CB_FC); module_param(os_name, charp, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(os_name, "OS name of the hba host machine"); @@ -66,8 +82,8 @@ MODULE_PARM_DESC(os_patch, "OS patch level of the hba host machine"); module_param(host_name, charp, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(host_name, "Hostname of the hba host machine"); module_param(num_rports, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(num_rports, "Max number of rports supported per port" - " (physical/logical), default=1024"); +MODULE_PARM_DESC(num_rports, "Max number of rports supported per port " + "(physical/logical), default=1024"); module_param(num_ios, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(num_ios, "Max number of ioim requests, default=2000"); module_param(num_tms, int, S_IRUGO | S_IWUSR); @@ -75,120 +91,277 @@ MODULE_PARM_DESC(num_tms, "Max number of task im requests, default=128"); module_param(num_fcxps, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(num_fcxps, "Max number of fcxp requests, default=64"); module_param(num_ufbufs, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(num_ufbufs, "Max number of unsolicited frame buffers," - " default=64"); +MODULE_PARM_DESC(num_ufbufs, "Max number of unsolicited frame " + "buffers, default=64"); module_param(reqq_size, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(reqq_size, "Max number of request queue elements," - " default=256"); +MODULE_PARM_DESC(reqq_size, "Max number of request queue elements, " + "default=256"); module_param(rspq_size, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(rspq_size, "Max number of response queue elements," - " default=64"); +MODULE_PARM_DESC(rspq_size, "Max number of response queue elements, " + "default=64"); module_param(num_sgpgs, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(num_sgpgs, "Number of scatter/gather pages, default=2048"); module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(rport_del_timeout, "Rport delete timeout, default=90 secs," - " Range[>0]"); +MODULE_PARM_DESC(rport_del_timeout, "Rport delete timeout, default=90 secs, " + "Range[>0]"); module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32," - " Range[>0]"); +MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32, Range[>0]"); module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255"); module_param(log_level, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(log_level, "Driver log level, default=3," - " Range[Critical:1|Error:2|Warning:3|Info:4]"); +MODULE_PARM_DESC(log_level, "Driver log level, default=3, " + "Range[Critical:1|Error:2|Warning:3|Info:4]"); module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1," - " Range[off:0|on:1]"); -module_param(ipfc_enable, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(ipfc_enable, "Enable IPoFC, default=0, Range[off:0|on:1]"); +MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1, " + "Range[off:0|on:1]"); module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for boot" - " port. Otherwise Range[>0]"); +MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for " + "boot port. Otherwise 10 secs in RHEL4 & 0 for " + "[RHEL5, SLES10, ESX40] Range[>0]"); +module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(msix_disable_cb, "Disable Message Signaled Interrupts " + "for Brocade-415/425/815/825 cards, default=0, " + " Range[false:0|true:1]"); +module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(msix_disable_ct, "Disable Message Signaled Interrupts " + "if possible for Brocade-1010/1020/804/1007/902/1741 " + "cards, default=0, Range[false:0|true:1]"); module_param(fdmi_enable, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1," - " Range[false:0|true:1]"); +MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1, " + "Range[false:0|true:1]"); +module_param(pcie_max_read_reqsz, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 " + "(use system setting), Range[128|256|512|1024|2048|4096]"); module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1," " Range[false:0|true:1]"); -/* - * Stores the module parm num_sgpgs value; - * used to reset for bfad next instance. +static void +bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event); +static void +bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event); +static void +bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event); +static void +bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event); +static void +bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event); +static void +bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event); +static void +bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event); + +/** + * Beginning state for the driver instance, awaiting the pci_probe event */ -static int num_sgpgs_parm; +static void +bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event) +{ + bfa_trc(bfad, event); + + switch (event) { + case BFAD_E_CREATE: + bfa_sm_set_state(bfad, bfad_sm_created); + bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad, + "%s", "bfad_worker"); + if (IS_ERR(bfad->bfad_tsk)) { + printk(KERN_INFO "bfad[%d]: Kernel thread " + "creation failed!\n", bfad->inst_no); + bfa_sm_send_event(bfad, BFAD_E_KTHREAD_CREATE_FAILED); + } + bfa_sm_send_event(bfad, BFAD_E_INIT); + break; + + case BFAD_E_STOP: + /* Ignore stop; already in uninit */ + break; + + default: + bfa_sm_fault(bfad, event); + } +} -static bfa_status_t -bfad_fc4_probe(struct bfad_s *bfad) +/** + * Driver Instance is created, awaiting event INIT to initialize the bfad + */ +static void +bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event) { - int rc; + unsigned long flags; - rc = bfad_im_probe(bfad); - if (rc != BFA_STATUS_OK) - goto ext; + bfa_trc(bfad, event); - bfad_tm_probe(bfad); + switch (event) { + case BFAD_E_INIT: + bfa_sm_set_state(bfad, bfad_sm_initializing); - if (ipfc_enable) - bfad_ipfc_probe(bfad); + init_completion(&bfad->comp); - bfad->bfad_flags |= BFAD_FC4_PROBE_DONE; -ext: - return rc; + /* Enable Interrupt and wait bfa_init completion */ + if (bfad_setup_intr(bfad)) { + printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n", + bfad->inst_no); + bfa_sm_send_event(bfad, BFAD_E_INTR_INIT_FAILED); + break; + } + + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_init(&bfad->bfa); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + /* Set up interrupt handler for each vectors */ + if ((bfad->bfad_flags & BFAD_MSIX_ON) && + bfad_install_msix_handler(bfad)) { + printk(KERN_WARNING "%s: install_msix failed, bfad%d\n", + __func__, bfad->inst_no); + } + + bfad_init_timer(bfad); + + wait_for_completion(&bfad->comp); + + if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { + bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS); + } else { + bfad->bfad_flags |= BFAD_HAL_INIT_FAIL; + bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED); + } + + break; + + case BFAD_E_KTHREAD_CREATE_FAILED: + bfa_sm_set_state(bfad, bfad_sm_uninit); + break; + + default: + bfa_sm_fault(bfad, event); + } } static void -bfad_fc4_probe_undo(struct bfad_s *bfad) +bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event) { - bfad_im_probe_undo(bfad); - bfad_tm_probe_undo(bfad); - if (ipfc_enable) - bfad_ipfc_probe_undo(bfad); - bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; + int retval; + unsigned long flags; + + bfa_trc(bfad, event); + + switch (event) { + case BFAD_E_INIT_SUCCESS: + kthread_stop(bfad->bfad_tsk); + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfad->bfad_tsk = NULL; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + retval = bfad_start_ops(bfad); + if (retval != BFA_STATUS_OK) + break; + bfa_sm_set_state(bfad, bfad_sm_operational); + break; + + case BFAD_E_INTR_INIT_FAILED: + bfa_sm_set_state(bfad, bfad_sm_uninit); + kthread_stop(bfad->bfad_tsk); + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfad->bfad_tsk = NULL; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + break; + + case BFAD_E_INIT_FAILED: + bfa_sm_set_state(bfad, bfad_sm_failed); + break; + default: + bfa_sm_fault(bfad, event); + } } static void -bfad_fc4_probe_post(struct bfad_s *bfad) +bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event) { - if (bfad->im) - bfad_im_probe_post(bfad->im); + int retval; - bfad_tm_probe_post(bfad); - if (ipfc_enable) - bfad_ipfc_probe_post(bfad); + bfa_trc(bfad, event); + + switch (event) { + case BFAD_E_INIT_SUCCESS: + retval = bfad_start_ops(bfad); + if (retval != BFA_STATUS_OK) + break; + bfa_sm_set_state(bfad, bfad_sm_operational); + break; + + case BFAD_E_STOP: + if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE) + bfad_uncfg_pport(bfad); + if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE) { + bfad_im_probe_undo(bfad); + bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; + } + bfad_stop(bfad); + break; + + case BFAD_E_EXIT_COMP: + bfa_sm_set_state(bfad, bfad_sm_uninit); + bfad_remove_intr(bfad); + del_timer_sync(&bfad->hal_tmo); + break; + + default: + bfa_sm_fault(bfad, event); + } } -static bfa_status_t -bfad_fc4_port_new(struct bfad_s *bfad, struct bfad_port_s *port, int roles) +static void +bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event) { - int rc = BFA_STATUS_FAILED; + bfa_trc(bfad, event); - if (roles & BFA_PORT_ROLE_FCP_IM) - rc = bfad_im_port_new(bfad, port); - if (rc != BFA_STATUS_OK) - goto ext; + switch (event) { + case BFAD_E_STOP: + bfa_sm_set_state(bfad, bfad_sm_fcs_exit); + bfad_fcs_stop(bfad); + break; - if (roles & BFA_PORT_ROLE_FCP_TM) - rc = bfad_tm_port_new(bfad, port); - if (rc != BFA_STATUS_OK) - goto ext; + default: + bfa_sm_fault(bfad, event); + } +} - if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) - rc = bfad_ipfc_port_new(bfad, port, port->pvb_type); -ext: - return rc; +static void +bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event) +{ + bfa_trc(bfad, event); + + switch (event) { + case BFAD_E_FCS_EXIT_COMP: + bfa_sm_set_state(bfad, bfad_sm_stopping); + bfad_stop(bfad); + break; + + default: + bfa_sm_fault(bfad, event); + } } static void -bfad_fc4_port_delete(struct bfad_s *bfad, struct bfad_port_s *port, int roles) +bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event) { - if (roles & BFA_PORT_ROLE_FCP_IM) - bfad_im_port_delete(bfad, port); + bfa_trc(bfad, event); - if (roles & BFA_PORT_ROLE_FCP_TM) - bfad_tm_port_delete(bfad, port); + switch (event) { + case BFAD_E_EXIT_COMP: + bfa_sm_set_state(bfad, bfad_sm_uninit); + bfad_remove_intr(bfad); + del_timer_sync(&bfad->hal_tmo); + bfad_im_probe_undo(bfad); + bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; + bfad_uncfg_pport(bfad); + break; - if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) - bfad_ipfc_port_delete(bfad, port); + default: + bfa_sm_fault(bfad, event); + break; + } } /** @@ -209,12 +382,13 @@ bfad_hcb_comp(void *arg, bfa_status_t status) void bfa_cb_init(void *drv, bfa_status_t init_status) { - struct bfad_s *bfad = drv; + struct bfad_s *bfad = drv; if (init_status == BFA_STATUS_OK) { bfad->bfad_flags |= BFAD_HAL_INIT_DONE; - /* If BFAD_HAL_INIT_FAIL flag is set: + /* + * If BFAD_HAL_INIT_FAIL flag is set: * Wake up the kernel thread to start * the bfad operations after HAL init done */ @@ -227,26 +401,16 @@ bfa_cb_init(void *drv, bfa_status_t init_status) complete(&bfad->comp); } - - /** * BFA_FCS callbacks */ -static struct bfad_port_s * -bfad_get_drv_port(struct bfad_s *bfad, struct bfad_vf_s *vf_drv, - struct bfad_vport_s *vp_drv) -{ - return (vp_drv) ? (&(vp_drv)->drv_port) - : ((vf_drv) ? (&(vf_drv)->base_port) : (&(bfad)->pport)); -} - struct bfad_port_s * -bfa_fcb_port_new(struct bfad_s *bfad, struct bfa_fcs_port_s *port, - enum bfa_port_role roles, struct bfad_vf_s *vf_drv, +bfa_fcb_lport_new(struct bfad_s *bfad, struct bfa_fcs_lport_s *port, + enum bfa_lport_role roles, struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv) { - bfa_status_t rc; - struct bfad_port_s *port_drv; + bfa_status_t rc; + struct bfad_port_s *port_drv; if (!vp_drv && !vf_drv) { port_drv = &bfad->pport; @@ -264,71 +428,32 @@ bfa_fcb_port_new(struct bfad_s *bfad, struct bfa_fcs_port_s *port, port_drv->fcs_port = port; port_drv->roles = roles; - rc = bfad_fc4_port_new(bfad, port_drv, roles); - if (rc != BFA_STATUS_OK) { - bfad_fc4_port_delete(bfad, port_drv, roles); - port_drv = NULL; + + if (roles & BFA_LPORT_ROLE_FCP_IM) { + rc = bfad_im_port_new(bfad, port_drv); + if (rc != BFA_STATUS_OK) { + bfad_im_port_delete(bfad, port_drv); + port_drv = NULL; + } } return port_drv; } void -bfa_fcb_port_delete(struct bfad_s *bfad, enum bfa_port_role roles, +bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles, struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv) { - struct bfad_port_s *port_drv; + struct bfad_port_s *port_drv; - /* - * this will be only called from rmmod context - */ + /* this will be only called from rmmod context */ if (vp_drv && !vp_drv->comp_del) { - port_drv = bfad_get_drv_port(bfad, vf_drv, vp_drv); + port_drv = (vp_drv) ? (&(vp_drv)->drv_port) : + ((vf_drv) ? (&(vf_drv)->base_port) : + (&(bfad)->pport)); bfa_trc(bfad, roles); - bfad_fc4_port_delete(bfad, port_drv, roles); - } -} - -void -bfa_fcb_port_online(struct bfad_s *bfad, enum bfa_port_role roles, - struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv) -{ - struct bfad_port_s *port_drv = bfad_get_drv_port(bfad, vf_drv, vp_drv); - - if (roles & BFA_PORT_ROLE_FCP_IM) - bfad_im_port_online(bfad, port_drv); - - if (roles & BFA_PORT_ROLE_FCP_TM) - bfad_tm_port_online(bfad, port_drv); - - if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) - bfad_ipfc_port_online(bfad, port_drv); - - bfad->bfad_flags |= BFAD_PORT_ONLINE; -} - -void -bfa_fcb_port_offline(struct bfad_s *bfad, enum bfa_port_role roles, - struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv) -{ - struct bfad_port_s *port_drv = bfad_get_drv_port(bfad, vf_drv, vp_drv); - - if (roles & BFA_PORT_ROLE_FCP_IM) - bfad_im_port_offline(bfad, port_drv); - - if (roles & BFA_PORT_ROLE_FCP_TM) - bfad_tm_port_offline(bfad, port_drv); - - if ((roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) - bfad_ipfc_port_offline(bfad, port_drv); -} - -void -bfa_fcb_vport_delete(struct bfad_vport_s *vport_drv) -{ - if (vport_drv->comp_del) { - complete(vport_drv->comp_del); - return; + if (roles & BFA_LPORT_ROLE_FCP_IM) + bfad_im_port_delete(bfad, port_drv); } } @@ -339,7 +464,7 @@ bfa_status_t bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport, struct bfad_rport_s **rport_drv) { - bfa_status_t rc = BFA_STATUS_OK; + bfa_status_t rc = BFA_STATUS_OK; *rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC); if (*rport_drv == NULL) { @@ -354,35 +479,43 @@ ext: } /** - * @brief * FCS PBC VPORT Create */ void bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport) { - struct bfad_pcfg_s *pcfg; + struct bfa_lport_cfg_s port_cfg = {0}; + struct bfad_vport_s *vport; + int rc; - pcfg = kzalloc(sizeof(struct bfad_pcfg_s), GFP_ATOMIC); - if (!pcfg) { + vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL); + if (!vport) { bfa_trc(bfad, 0); return; } - pcfg->port_cfg.roles = BFA_PORT_ROLE_FCP_IM; - pcfg->port_cfg.pwwn = pbc_vport.vp_pwwn; - pcfg->port_cfg.nwwn = pbc_vport.vp_nwwn; - pcfg->port_cfg.preboot_vp = BFA_TRUE; + vport->drv_port.bfad = bfad; + port_cfg.roles = BFA_LPORT_ROLE_FCP_IM; + port_cfg.pwwn = pbc_vport.vp_pwwn; + port_cfg.nwwn = pbc_vport.vp_nwwn; + port_cfg.preboot_vp = BFA_TRUE; + + rc = bfa_fcs_pbc_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, 0, + &port_cfg, vport); - list_add_tail(&pcfg->list_entry, &bfad->pbc_pcfg_list); + if (rc != BFA_STATUS_OK) { + bfa_trc(bfad, 0); + return; + } - return; + list_add_tail(&vport->list_entry, &bfad->pbc_vport_list); } void bfad_hal_mem_release(struct bfad_s *bfad) { - int i; + int i; struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; struct bfa_mem_elem_s *meminfo_elem; @@ -395,9 +528,9 @@ bfad_hal_mem_release(struct bfad_s *bfad) break; case BFA_MEM_TYPE_DMA: dma_free_coherent(&bfad->pcidev->dev, - meminfo_elem->mem_len, - meminfo_elem->kva, - (dma_addr_t) meminfo_elem->dma); + meminfo_elem->mem_len, + meminfo_elem->kva, + (dma_addr_t) meminfo_elem->dma); break; default: bfa_assert(0); @@ -434,27 +567,27 @@ bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg) * otherwise, the default values will be shown as 0 in sysfs */ num_rports = bfa_cfg->fwcfg.num_rports; - num_ios = bfa_cfg->fwcfg.num_ioim_reqs; - num_tms = bfa_cfg->fwcfg.num_tskim_reqs; - num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs; + num_ios = bfa_cfg->fwcfg.num_ioim_reqs; + num_tms = bfa_cfg->fwcfg.num_tskim_reqs; + num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs; num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs; - reqq_size = bfa_cfg->drvcfg.num_reqq_elems; - rspq_size = bfa_cfg->drvcfg.num_rspq_elems; - num_sgpgs = bfa_cfg->drvcfg.num_sgpgs; + reqq_size = bfa_cfg->drvcfg.num_reqq_elems; + rspq_size = bfa_cfg->drvcfg.num_rspq_elems; + num_sgpgs = bfa_cfg->drvcfg.num_sgpgs; } bfa_status_t bfad_hal_mem_alloc(struct bfad_s *bfad) { + int i; struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; struct bfa_mem_elem_s *meminfo_elem; - bfa_status_t rc = BFA_STATUS_OK; - dma_addr_t phys_addr; - int retry_count = 0; - int reset_value = 1; - int min_num_sgpgs = 512; - void *kva; - int i; + dma_addr_t phys_addr; + void *kva; + bfa_status_t rc = BFA_STATUS_OK; + int retry_count = 0; + int reset_value = 1; + int min_num_sgpgs = 512; bfa_cfg_get_default(&bfad->ioc_cfg); @@ -478,8 +611,7 @@ retry: break; case BFA_MEM_TYPE_DMA: kva = dma_alloc_coherent(&bfad->pcidev->dev, - meminfo_elem->mem_len, - &phys_addr, GFP_KERNEL); + meminfo_elem->mem_len, &phys_addr, GFP_KERNEL); if (kva == NULL) { bfad_hal_mem_release(bfad); /* @@ -487,14 +619,14 @@ retry: * num_sgpages try with half the value. */ if (num_sgpgs > min_num_sgpgs) { - printk(KERN_INFO "bfad[%d]: memory" - " allocation failed with" - " num_sgpgs: %d\n", + printk(KERN_INFO + "bfad[%d]: memory allocation failed" + " with num_sgpgs: %d\n", bfad->inst_no, num_sgpgs); nextLowerInt(&num_sgpgs); - printk(KERN_INFO "bfad[%d]: trying to" - " allocate memory with" - " num_sgpgs: %d\n", + printk(KERN_INFO + "bfad[%d]: trying to allocate memory" + " with num_sgpgs: %d\n", bfad->inst_no, num_sgpgs); retry_count++; goto retry; @@ -536,11 +668,11 @@ ext: */ bfa_status_t bfad_vport_create(struct bfad_s *bfad, u16 vf_id, - struct bfa_port_cfg_s *port_cfg, struct device *dev) + struct bfa_lport_cfg_s *port_cfg, struct device *dev) { - struct bfad_vport_s *vport; - int rc = BFA_STATUS_OK; - unsigned long flags; + struct bfad_vport_s *vport; + int rc = BFA_STATUS_OK; + unsigned long flags; struct completion fcomp; vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL); @@ -551,18 +683,14 @@ bfad_vport_create(struct bfad_s *bfad, u16 vf_id, vport->drv_port.bfad = bfad; spin_lock_irqsave(&bfad->bfad_lock, flags); - if (port_cfg->preboot_vp == BFA_TRUE) - rc = bfa_fcs_pbc_vport_create(&vport->fcs_vport, - &bfad->bfa_fcs, vf_id, port_cfg, vport); - else - rc = bfa_fcs_vport_create(&vport->fcs_vport, - &bfad->bfa_fcs, vf_id, port_cfg, vport); + rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id, + port_cfg, vport); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (rc != BFA_STATUS_OK) goto ext_free_vport; - if (port_cfg->roles & BFA_PORT_ROLE_FCP_IM) { + if (port_cfg->roles & BFA_LPORT_ROLE_FCP_IM) { rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port, dev); if (rc != BFA_STATUS_OK) @@ -593,10 +721,10 @@ ext: */ bfa_status_t bfad_vf_create(struct bfad_s *bfad, u16 vf_id, - struct bfa_port_cfg_s *port_cfg) + struct bfa_lport_cfg_s *port_cfg) { - struct bfad_vf_s *vf; - int rc = BFA_STATUS_OK; + struct bfad_vf_s *vf; + int rc = BFA_STATUS_OK; vf = kzalloc(sizeof(struct bfad_vf_s), GFP_KERNEL); if (!vf) { @@ -615,9 +743,9 @@ ext: void bfad_bfa_tmo(unsigned long data) { - struct bfad_s *bfad = (struct bfad_s *)data; - unsigned long flags; - struct list_head doneq; + struct bfad_s *bfad = (struct bfad_s *) data; + unsigned long flags; + struct list_head doneq; spin_lock_irqsave(&bfad->bfad_lock, flags); @@ -633,7 +761,8 @@ bfad_bfa_tmo(unsigned long data) spin_unlock_irqrestore(&bfad->bfad_lock, flags); } - mod_timer(&bfad->hal_tmo, jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); + mod_timer(&bfad->hal_tmo, + jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); } void @@ -643,16 +772,17 @@ bfad_init_timer(struct bfad_s *bfad) bfad->hal_tmo.function = bfad_bfa_tmo; bfad->hal_tmo.data = (unsigned long)bfad; - mod_timer(&bfad->hal_tmo, jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); + mod_timer(&bfad->hal_tmo, + jiffies + msecs_to_jiffies(BFA_TIMER_FREQ)); } int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) { - int rc = -ENODEV; + int rc = -ENODEV; if (pci_enable_device(pdev)) { - BFA_PRINTF(BFA_ERR, "pci_enable_device fail %p\n", pdev); + printk(KERN_ERR "pci_enable_device fail %p\n", pdev); goto out; } @@ -664,14 +794,14 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { - BFA_PRINTF(BFA_ERR, "pci_set_dma_mask fail %p\n", pdev); + printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev); goto out_release_region; } bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); if (bfad->pci_bar0_kva == NULL) { - BFA_PRINTF(BFA_ERR, "Fail to map bar0\n"); + printk(KERN_ERR "Fail to map bar0\n"); goto out_release_region; } @@ -688,6 +818,54 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn); bfad->pcidev = pdev; + + /* Adjust PCIe Maximum Read Request Size */ + if (pcie_max_read_reqsz > 0) { + int pcie_cap_reg; + u16 pcie_dev_ctl; + u16 mask = 0xffff; + + switch (pcie_max_read_reqsz) { + case 128: + mask = 0x0; + break; + case 256: + mask = 0x1000; + break; + case 512: + mask = 0x2000; + break; + case 1024: + mask = 0x3000; + break; + case 2048: + mask = 0x4000; + break; + case 4096: + mask = 0x5000; + break; + default: + break; + } + + pcie_cap_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (mask != 0xffff && pcie_cap_reg) { + pcie_cap_reg += 0x08; + pci_read_config_word(pdev, pcie_cap_reg, &pcie_dev_ctl); + if ((pcie_dev_ctl & 0x7000) != mask) { + printk(KERN_WARNING "BFA[%s]: " + "pcie_max_read_request_size is %d, " + "reset to %d\n", bfad->pci_name, + (1 << ((pcie_dev_ctl & 0x7000) >> 12)) << 7, + pcie_max_read_reqsz); + + pcie_dev_ctl &= ~0x7000; + pci_write_config_word(pdev, pcie_cap_reg, + pcie_dev_ctl | mask); + } + } + } + return 0; out_release_region: @@ -710,25 +888,22 @@ bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad) void bfad_fcs_port_cfg(struct bfad_s *bfad) { - struct bfa_port_cfg_s port_cfg; - struct bfa_pport_attr_s attr; - char symname[BFA_SYMNAME_MAXLEN]; + struct bfa_lport_cfg_s port_cfg; + struct bfa_port_attr_s attr; + char symname[BFA_SYMNAME_MAXLEN]; sprintf(symname, "%s-%d", BFAD_DRIVER_NAME, bfad->inst_no); memcpy(port_cfg.sym_name.symname, symname, strlen(symname)); bfa_fcport_get_attr(&bfad->bfa, &attr); port_cfg.nwwn = attr.nwwn; port_cfg.pwwn = attr.pwwn; - - bfa_fcs_cfg_base_port(&bfad->bfa_fcs, &port_cfg); } bfa_status_t bfad_drv_init(struct bfad_s *bfad) { - bfa_status_t rc; - unsigned long flags; - struct bfa_fcs_driver_info_s driver_info; + bfa_status_t rc; + unsigned long flags; bfad->cfg_data.rport_del_timeout = rport_del_timeout; bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth; @@ -740,15 +915,12 @@ bfad_drv_init(struct bfad_s *bfad) printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n", bfad->inst_no); printk(KERN_WARNING - "Not enough memory to attach all Brocade HBA ports," - " System may need more memory.\n"); + "Not enough memory to attach all Brocade HBA ports, %s", + "System may need more memory.\n"); goto out_hal_mem_alloc_failure; } - bfa_init_log(&bfad->bfa, bfad->logmod); bfa_init_trc(&bfad->bfa, bfad->trcmod); - bfa_init_aen(&bfad->bfa, bfad->aen); - memset(bfad->file_map, 0, sizeof(bfad->file_map)); bfa_init_plog(&bfad->bfa, &bfad->plog_buf); bfa_plog_init(&bfad->plog_buf); bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START, @@ -757,77 +929,17 @@ bfad_drv_init(struct bfad_s *bfad) bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo, &bfad->hal_pcidev); - init_completion(&bfad->comp); - - /* - * Enable Interrupt and wait bfa_init completion - */ - if (bfad_setup_intr(bfad)) { - printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n", - bfad->inst_no); - goto out_setup_intr_failure; - } - + /* FCS INIT */ spin_lock_irqsave(&bfad->bfad_lock, flags); - bfa_init(&bfad->bfa); - spin_unlock_irqrestore(&bfad->bfad_lock, flags); - - /* - * Set up interrupt handler for each vectors - */ - if ((bfad->bfad_flags & BFAD_MSIX_ON) - && bfad_install_msix_handler(bfad)) { - printk(KERN_WARNING "%s: install_msix failed, bfad%d\n", - __func__, bfad->inst_no); - } - - bfad_init_timer(bfad); - - wait_for_completion(&bfad->comp); - - memset(&driver_info, 0, sizeof(driver_info)); - strncpy(driver_info.version, BFAD_DRIVER_VERSION, - sizeof(driver_info.version) - 1); - __kernel_param_lock(); - if (host_name) - strncpy(driver_info.host_machine_name, host_name, - sizeof(driver_info.host_machine_name) - 1); - if (os_name) - strncpy(driver_info.host_os_name, os_name, - sizeof(driver_info.host_os_name) - 1); - if (os_patch) - strncpy(driver_info.host_os_patch, os_patch, - sizeof(driver_info.host_os_patch) - 1); - __kernel_param_unlock(); - - strncpy(driver_info.os_device_name, bfad->pci_name, - sizeof(driver_info.os_device_name - 1)); - - /* - * FCS INIT - */ - spin_lock_irqsave(&bfad->bfad_lock, flags); - bfa_fcs_log_init(&bfad->bfa_fcs, bfad->logmod); bfa_fcs_trc_init(&bfad->bfa_fcs, bfad->trcmod); - bfa_fcs_aen_init(&bfad->bfa_fcs, bfad->aen); bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); - - /* Do FCS init only when HAL init is done */ - if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { - bfa_fcs_init(&bfad->bfa_fcs); - bfad->bfad_flags |= BFAD_FCS_INIT_DONE; - } - - bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info); bfa_fcs_set_fdmi_param(&bfad->bfa_fcs, fdmi_enable); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfad->bfad_flags |= BFAD_DRV_INIT_DONE; + return BFA_STATUS_OK; -out_setup_intr_failure: - bfa_detach(&bfad->bfa); - bfad_hal_mem_release(bfad); out_hal_mem_alloc_failure: return BFA_STATUS_FAILED; } @@ -855,7 +967,7 @@ bfad_drv_uninit(struct bfad_s *bfad) void bfad_drv_start(struct bfad_s *bfad) { - unsigned long flags; + unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_start(&bfad->bfa); @@ -863,13 +975,14 @@ bfad_drv_start(struct bfad_s *bfad) bfad->bfad_flags |= BFAD_HAL_START_DONE; spin_unlock_irqrestore(&bfad->bfad_lock, flags); - bfad_fc4_probe_post(bfad); + if (bfad->im) + flush_workqueue(bfad->im->drv_workq); } void -bfad_drv_stop(struct bfad_s *bfad) +bfad_fcs_stop(struct bfad_s *bfad) { - unsigned long flags; + unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); init_completion(&bfad->comp); @@ -878,24 +991,32 @@ bfad_drv_stop(struct bfad_s *bfad) spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_for_completion(&bfad->comp); + bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP); +} + +void +bfad_stop(struct bfad_s *bfad) +{ + unsigned long flags; + spin_lock_irqsave(&bfad->bfad_lock, flags); init_completion(&bfad->comp); bfa_stop(&bfad->bfa); bfad->bfad_flags &= ~BFAD_HAL_START_DONE; spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_for_completion(&bfad->comp); + + bfa_sm_send_event(bfad, BFAD_E_EXIT_COMP); } bfa_status_t -bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role) +bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role) { - int rc = BFA_STATUS_OK; + int rc = BFA_STATUS_OK; - /* - * Allocate scsi_host for the physical port - */ - if ((bfad_supported_fc4s & BFA_PORT_ROLE_FCP_IM) - && (role & BFA_PORT_ROLE_FCP_IM)) { + /* Allocate scsi_host for the physical port */ + if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) && + (role & BFA_LPORT_ROLE_FCP_IM)) { if (bfad->pport.im_port == NULL) { rc = BFA_STATUS_FAILED; goto out; @@ -906,7 +1027,7 @@ bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role) if (rc != BFA_STATUS_OK) goto out; - bfad->pport.roles |= BFA_PORT_ROLE_FCP_IM; + bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM; } /* Setup the debugfs node for this scsi_host */ @@ -922,74 +1043,102 @@ out: void bfad_uncfg_pport(struct bfad_s *bfad) { - /* Remove the debugfs node for this scsi_host */ + /* Remove the debugfs node for this scsi_host */ kfree(bfad->regdata); bfad_debugfs_exit(&bfad->pport); - if ((bfad->pport.roles & BFA_PORT_ROLE_FCP_IPFC) && ipfc_enable) { - bfad_ipfc_port_delete(bfad, &bfad->pport); - bfad->pport.roles &= ~BFA_PORT_ROLE_FCP_IPFC; - } - - if ((bfad_supported_fc4s & BFA_PORT_ROLE_FCP_IM) - && (bfad->pport.roles & BFA_PORT_ROLE_FCP_IM)) { + if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) && + (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) { bfad_im_scsi_host_free(bfad, bfad->pport.im_port); bfad_im_port_clean(bfad->pport.im_port); kfree(bfad->pport.im_port); - bfad->pport.roles &= ~BFA_PORT_ROLE_FCP_IM; + bfad->pport.roles &= ~BFA_LPORT_ROLE_FCP_IM; } bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE; } -void -bfad_drv_log_level_set(struct bfad_s *bfad) -{ - if (log_level > BFA_LOG_INVALID && log_level <= BFA_LOG_LEVEL_MAX) - bfa_log_set_level_all(&bfad->log_data, log_level); -} - bfa_status_t -bfad_start_ops(struct bfad_s *bfad) -{ - int retval; - struct bfad_pcfg_s *pcfg, *pcfg_new; +bfad_start_ops(struct bfad_s *bfad) { + + int retval; + unsigned long flags; + struct bfad_vport_s *vport, *vport_new; + struct bfa_fcs_driver_info_s driver_info; + + /* Fill the driver_info info to fcs*/ + memset(&driver_info, 0, sizeof(driver_info)); + strncpy(driver_info.version, BFAD_DRIVER_VERSION, + sizeof(driver_info.version) - 1); + if (host_name) + strncpy(driver_info.host_machine_name, host_name, + sizeof(driver_info.host_machine_name) - 1); + if (os_name) + strncpy(driver_info.host_os_name, os_name, + sizeof(driver_info.host_os_name) - 1); + if (os_patch) + strncpy(driver_info.host_os_patch, os_patch, + sizeof(driver_info.host_os_patch) - 1); + + strncpy(driver_info.os_device_name, bfad->pci_name, + sizeof(driver_info.os_device_name - 1)); + + /* FCS INIT */ + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info); + bfa_fcs_init(&bfad->bfa_fcs); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); /* PPORT FCS config */ bfad_fcs_port_cfg(bfad); - retval = bfad_cfg_pport(bfad, BFA_PORT_ROLE_FCP_IM); - if (retval != BFA_STATUS_OK) - goto out_cfg_pport_failure; - - /* BFAD level FC4 (IM/TM/IPFC) specific resource allocation */ - retval = bfad_fc4_probe(bfad); + retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM); if (retval != BFA_STATUS_OK) { - printk(KERN_WARNING "bfad_fc4_probe failed\n"); - goto out_fc4_probe_failure; + if (bfa_sm_cmp_state(bfad, bfad_sm_initializing)) + bfa_sm_set_state(bfad, bfad_sm_failed); + bfad_stop(bfad); + return BFA_STATUS_FAILED; } + /* BFAD level FC4 IM specific resource allocation */ + retval = bfad_im_probe(bfad); + if (retval != BFA_STATUS_OK) { + printk(KERN_WARNING "bfad_im_probe failed\n"); + if (bfa_sm_cmp_state(bfad, bfad_sm_initializing)) + bfa_sm_set_state(bfad, bfad_sm_failed); + bfad_im_probe_undo(bfad); + bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; + bfad_uncfg_pport(bfad); + bfad_stop(bfad); + return BFA_STATUS_FAILED; + } else + bfad->bfad_flags |= BFAD_FC4_PROBE_DONE; + bfad_drv_start(bfad); - /* pbc vport creation */ - list_for_each_entry_safe(pcfg, pcfg_new, &bfad->pbc_pcfg_list, - list_entry) { + /* Complete pbc vport create */ + list_for_each_entry_safe(vport, vport_new, &bfad->pbc_vport_list, + list_entry) { struct fc_vport_identifiers vid; struct fc_vport *fc_vport; + char pwwn_buf[BFA_STRING_32]; memset(&vid, 0, sizeof(vid)); vid.roles = FC_PORT_ROLE_FCP_INITIATOR; vid.vport_type = FC_PORTTYPE_NPIV; vid.disable = false; - vid.node_name = wwn_to_u64((u8 *)&pcfg->port_cfg.nwwn); - vid.port_name = wwn_to_u64((u8 *)&pcfg->port_cfg.pwwn); + vid.node_name = wwn_to_u64((u8 *) + (&((vport->fcs_vport).lport.port_cfg.nwwn))); + vid.port_name = wwn_to_u64((u8 *) + (&((vport->fcs_vport).lport.port_cfg.pwwn))); fc_vport = fc_vport_create(bfad->pport.im_port->shost, 0, &vid); - if (!fc_vport) + if (!fc_vport) { + wwn2str(pwwn_buf, vid.port_name); printk(KERN_WARNING "bfad%d: failed to create pbc vport" - " %llx\n", bfad->inst_no, vid.port_name); - list_del(&pcfg->list_entry); - kfree(pcfg); - + " %s\n", bfad->inst_no, pwwn_buf); + } + list_del(&vport->list_entry); + kfree(vport); } /* @@ -998,24 +1147,15 @@ bfad_start_ops(struct bfad_s *bfad) * passed in module param value as the bfa_linkup_delay. */ if (bfa_linkup_delay < 0) { - bfa_linkup_delay = bfad_os_get_linkup_delay(bfad); bfad_os_rport_online_wait(bfad); bfa_linkup_delay = -1; - - } else { + } else bfad_os_rport_online_wait(bfad); - } - bfa_log(bfad->logmod, BFA_LOG_LINUX_DEVICE_CLAIMED, bfad->pci_name); + BFA_LOG(KERN_INFO, bfad, log_level, "bfa device claimed\n"); return BFA_STATUS_OK; - -out_fc4_probe_failure: - bfad_fc4_probe_undo(bfad); - bfad_uncfg_pport(bfad); -out_cfg_pport_failure: - return BFA_STATUS_FAILED; } int @@ -1028,18 +1168,8 @@ bfad_worker(void *ptr) while (!kthread_should_stop()) { - /* Check if the FCS init is done from bfad_drv_init; - * if not done do FCS init and set the flag. - */ - if (!(bfad->bfad_flags & BFAD_FCS_INIT_DONE)) { - spin_lock_irqsave(&bfad->bfad_lock, flags); - bfa_fcs_init(&bfad->bfa_fcs); - bfad->bfad_flags |= BFAD_FCS_INIT_DONE; - spin_unlock_irqrestore(&bfad->bfad_lock, flags); - } - - /* Start the bfad operations after HAL init done */ - bfad_start_ops(bfad); + /* Send event BFAD_E_INIT_SUCCESS */ + bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS); spin_lock_irqsave(&bfad->bfad_lock, flags); bfad->bfad_tsk = NULL; @@ -1051,9 +1181,198 @@ bfad_worker(void *ptr) return 0; } - /* - * PCI_entry PCI driver entries * { - */ +/** + * BFA driver interrupt functions + */ +irqreturn_t +bfad_intx(int irq, void *dev_id) +{ + struct bfad_s *bfad = dev_id; + struct list_head doneq; + unsigned long flags; + bfa_boolean_t rc; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + rc = bfa_intx(&bfad->bfa); + if (!rc) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return IRQ_NONE; + } + + bfa_comp_deq(&bfad->bfa, &doneq); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (!list_empty(&doneq)) { + bfa_comp_process(&bfad->bfa, &doneq); + + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_comp_free(&bfad->bfa, &doneq); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + bfa_trc_fp(bfad, irq); + } + + return IRQ_HANDLED; + +} + +static irqreturn_t +bfad_msix(int irq, void *dev_id) +{ + struct bfad_msix_s *vec = dev_id; + struct bfad_s *bfad = vec->bfad; + struct list_head doneq; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + + bfa_msix(&bfad->bfa, vec->msix.entry); + bfa_comp_deq(&bfad->bfa, &doneq); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (!list_empty(&doneq)) { + bfa_comp_process(&bfad->bfa, &doneq); + + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_comp_free(&bfad->bfa, &doneq); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + } + + return IRQ_HANDLED; +} + +/** + * Initialize the MSIX entry table. + */ +static void +bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries, + int mask, int max_bit) +{ + int i; + int match = 0x00000001; + + for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) { + if (mask & match) { + bfad->msix_tab[bfad->nvec].msix.entry = i; + bfad->msix_tab[bfad->nvec].bfad = bfad; + msix_entries[bfad->nvec].entry = i; + bfad->nvec++; + } + + match <<= 1; + } + +} + +int +bfad_install_msix_handler(struct bfad_s *bfad) +{ + int i, error = 0; + + for (i = 0; i < bfad->nvec; i++) { + sprintf(bfad->msix_tab[i].name, "bfa-%s-%s", + bfad->pci_name, + ((bfa_asic_id_ct(bfad->hal_pcidev.device_id)) ? + msix_name_ct[i] : msix_name_cb[i])); + + error = request_irq(bfad->msix_tab[i].msix.vector, + (irq_handler_t) bfad_msix, 0, + bfad->msix_tab[i].name, &bfad->msix_tab[i]); + bfa_trc(bfad, i); + bfa_trc(bfad, bfad->msix_tab[i].msix.vector); + if (error) { + int j; + + for (j = 0; j < i; j++) + free_irq(bfad->msix_tab[j].msix.vector, + &bfad->msix_tab[j]); + + return 1; + } + } + + return 0; +} + +/** + * Setup MSIX based interrupt. + */ +int +bfad_setup_intr(struct bfad_s *bfad) +{ + int error = 0; + u32 mask = 0, i, num_bit = 0, max_bit = 0; + struct msix_entry msix_entries[MAX_MSIX_ENTRY]; + struct pci_dev *pdev = bfad->pcidev; + + /* Call BFA to get the msix map for this PCI function. */ + bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit); + + /* Set up the msix entry table */ + bfad_init_msix_entry(bfad, msix_entries, mask, max_bit); + + if ((bfa_asic_id_ct(pdev->device) && !msix_disable_ct) || + (!bfa_asic_id_ct(pdev->device) && !msix_disable_cb)) { + + error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec); + if (error) { + /* + * Only error number of vector is available. + * We don't have a mechanism to map multiple + * interrupts into one vector, so even if we + * can try to request less vectors, we don't + * know how to associate interrupt events to + * vectors. Linux doesn't dupicate vectors + * in the MSIX table for this case. + */ + + printk(KERN_WARNING "bfad%d: " + "pci_enable_msix failed (%d)," + " use line based.\n", bfad->inst_no, error); + + goto line_based; + } + + /* Save the vectors */ + for (i = 0; i < bfad->nvec; i++) { + bfa_trc(bfad, msix_entries[i].vector); + bfad->msix_tab[i].msix.vector = msix_entries[i].vector; + } + + bfa_msix_init(&bfad->bfa, bfad->nvec); + + bfad->bfad_flags |= BFAD_MSIX_ON; + + return error; + } + +line_based: + error = 0; + if (request_irq + (bfad->pcidev->irq, (irq_handler_t) bfad_intx, BFAD_IRQ_FLAGS, + BFAD_DRIVER_NAME, bfad) != 0) { + /* Enable interrupt handler failed */ + return 1; + } + + return error; +} + +void +bfad_remove_intr(struct bfad_s *bfad) +{ + int i; + + if (bfad->bfad_flags & BFAD_MSIX_ON) { + for (i = 0; i < bfad->nvec; i++) + free_irq(bfad->msix_tab[i].msix.vector, + &bfad->msix_tab[i]); + + pci_disable_msix(bfad->pcidev); + bfad->bfad_flags &= ~BFAD_MSIX_ON; + } else { + free_irq(bfad->pcidev->irq, bfad); + } +} /** * PCI probe entry. @@ -1061,18 +1380,14 @@ bfad_worker(void *ptr) int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) { - struct bfad_s *bfad; - int error = -ENODEV, retval; + struct bfad_s *bfad; + int error = -ENODEV, retval; - /* - * For single port cards - only claim function 0 - */ - if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) - && (PCI_FUNC(pdev->devfn) != 0)) + /* For single port cards - only claim function 0 */ + if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) && + (PCI_FUNC(pdev->devfn) != 0)) return -ENODEV; - BFA_TRACE(BFA_INFO, "bfad_pci_probe entry"); - bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL); if (!bfad) { error = -ENOMEM; @@ -1086,21 +1401,11 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) goto out_alloc_trace_failure; } - /* - * LOG/TRACE INIT - */ + /* TRACE INIT */ bfa_trc_init(bfad->trcmod); bfa_trc(bfad, bfad_inst); - bfad->logmod = &bfad->log_data; - bfa_log_init(bfad->logmod, (char *)pci_name(pdev), bfa_os_printf); - - bfad_drv_log_level_set(bfad); - - bfad->aen = &bfad->aen_buf; - if (!(bfad_load_fwimg(pdev))) { - printk(KERN_WARNING "bfad_load_fwimg failure!\n"); kfree(bfad->trcmod); goto out_alloc_trace_failure; } @@ -1117,46 +1422,31 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) list_add_tail(&bfad->list_entry, &bfad_list); mutex_unlock(&bfad_mutex); + /* Initializing the state machine: State set to uninit */ + bfa_sm_set_state(bfad, bfad_sm_uninit); + spin_lock_init(&bfad->bfad_lock); pci_set_drvdata(pdev, bfad); bfad->ref_count = 0; bfad->pport.bfad = bfad; - INIT_LIST_HEAD(&bfad->pbc_pcfg_list); - - bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad, "%s", - "bfad_worker"); - if (IS_ERR(bfad->bfad_tsk)) { - printk(KERN_INFO "bfad[%d]: Kernel thread" - " creation failed!\n", - bfad->inst_no); - goto out_kthread_create_failure; - } + INIT_LIST_HEAD(&bfad->pbc_vport_list); retval = bfad_drv_init(bfad); if (retval != BFA_STATUS_OK) goto out_drv_init_failure; - if (!(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { - bfad->bfad_flags |= BFAD_HAL_INIT_FAIL; - printk(KERN_WARNING "bfad%d: hal init failed\n", bfad->inst_no); - goto ok; - } - retval = bfad_start_ops(bfad); - if (retval != BFA_STATUS_OK) - goto out_start_ops_failure; + bfa_sm_send_event(bfad, BFAD_E_CREATE); - kthread_stop(bfad->bfad_tsk); - bfad->bfad_tsk = NULL; + if (bfa_sm_cmp_state(bfad, bfad_sm_uninit)) + goto out_bfad_sm_failure; -ok: return 0; -out_start_ops_failure: - bfad_drv_uninit(bfad); +out_bfad_sm_failure: + bfa_detach(&bfad->bfa); + bfad_hal_mem_release(bfad); out_drv_init_failure: - kthread_stop(bfad->bfad_tsk); -out_kthread_create_failure: mutex_lock(&bfad_mutex); bfad_inst--; list_del(&bfad->list_entry); @@ -1176,62 +1466,29 @@ out: void bfad_pci_remove(struct pci_dev *pdev) { - struct bfad_s *bfad = pci_get_drvdata(pdev); - unsigned long flags; + struct bfad_s *bfad = pci_get_drvdata(pdev); + unsigned long flags; bfa_trc(bfad, bfad->inst_no); spin_lock_irqsave(&bfad->bfad_lock, flags); - if (bfad->bfad_tsk != NULL) - kthread_stop(bfad->bfad_tsk); - spin_unlock_irqrestore(&bfad->bfad_lock, flags); - - if ((bfad->bfad_flags & BFAD_DRV_INIT_DONE) - && !(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { - - spin_lock_irqsave(&bfad->bfad_lock, flags); - init_completion(&bfad->comp); - bfa_stop(&bfad->bfa); + if (bfad->bfad_tsk != NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); - wait_for_completion(&bfad->comp); - - bfad_remove_intr(bfad); - del_timer_sync(&bfad->hal_tmo); - goto hal_detach; - } else if (!(bfad->bfad_flags & BFAD_DRV_INIT_DONE)) { - goto remove_sysfs; - } - - if (bfad->bfad_flags & BFAD_HAL_START_DONE) { - bfad_drv_stop(bfad); - } else if (bfad->bfad_flags & BFAD_DRV_INIT_DONE) { - /* Invoking bfa_stop() before bfa_detach - * when HAL and DRV init are success - * but HAL start did not occur. - */ - spin_lock_irqsave(&bfad->bfad_lock, flags); - init_completion(&bfad->comp); - bfa_stop(&bfad->bfa); + kthread_stop(bfad->bfad_tsk); + } else { spin_unlock_irqrestore(&bfad->bfad_lock, flags); - wait_for_completion(&bfad->comp); } - bfad_remove_intr(bfad); - del_timer_sync(&bfad->hal_tmo); + /* Send Event BFAD_E_STOP */ + bfa_sm_send_event(bfad, BFAD_E_STOP); - if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE) - bfad_fc4_probe_undo(bfad); - - if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE) - bfad_uncfg_pport(bfad); - -hal_detach: + /* Driver detach and dealloc mem */ spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_detach(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfad_hal_mem_release(bfad); -remove_sysfs: + /* Cleaning the BFAD instance */ mutex_lock(&bfad_mutex); bfad_inst--; list_del(&bfad->list_entry); @@ -1242,35 +1499,34 @@ remove_sysfs: kfree(bfad); } - -static struct pci_device_id bfad_id_table[] = { +struct pci_device_id bfad_id_table[] = { { - .vendor = BFA_PCI_VENDOR_ID_BROCADE, - .device = BFA_PCI_DEVICE_ID_FC_8G2P, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - }, + .vendor = BFA_PCI_VENDOR_ID_BROCADE, + .device = BFA_PCI_DEVICE_ID_FC_8G2P, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, { - .vendor = BFA_PCI_VENDOR_ID_BROCADE, - .device = BFA_PCI_DEVICE_ID_FC_8G1P, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - }, + .vendor = BFA_PCI_VENDOR_ID_BROCADE, + .device = BFA_PCI_DEVICE_ID_FC_8G1P, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, { - .vendor = BFA_PCI_VENDOR_ID_BROCADE, - .device = BFA_PCI_DEVICE_ID_CT, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .class = (PCI_CLASS_SERIAL_FIBER << 8), - .class_mask = ~0, - }, + .vendor = BFA_PCI_VENDOR_ID_BROCADE, + .device = BFA_PCI_DEVICE_ID_CT, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_SERIAL_FIBER << 8), + .class_mask = ~0, + }, { - .vendor = BFA_PCI_VENDOR_ID_BROCADE, - .device = BFA_PCI_DEVICE_ID_CT_FC, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .class = (PCI_CLASS_SERIAL_FIBER << 8), - .class_mask = ~0, + .vendor = BFA_PCI_VENDOR_ID_BROCADE, + .device = BFA_PCI_DEVICE_ID_CT_FC, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_SERIAL_FIBER << 8), + .class_mask = ~0, }, {0, 0}, @@ -1285,90 +1541,105 @@ static struct pci_driver bfad_pci_driver = { .remove = __devexit_p(bfad_pci_remove), }; -/** - * Linux driver module functions - */ -bfa_status_t -bfad_fc4_module_init(void) -{ - int rc; - - rc = bfad_im_module_init(); - if (rc != BFA_STATUS_OK) - goto ext; - - bfad_tm_module_init(); - if (ipfc_enable) - bfad_ipfc_module_init(); -ext: - return rc; -} - -void -bfad_fc4_module_exit(void) -{ - if (ipfc_enable) - bfad_ipfc_module_exit(); - bfad_tm_module_exit(); - bfad_im_module_exit(); -} - /** * Driver module init. */ -static int __init +static int __init bfad_init(void) { - int error = 0; + int error = 0; printk(KERN_INFO "Brocade BFA FC/FCOE SCSI driver - version: %s\n", - BFAD_DRIVER_VERSION); + BFAD_DRIVER_VERSION); if (num_sgpgs > 0) num_sgpgs_parm = num_sgpgs; - error = bfad_fc4_module_init(); + error = bfad_im_module_init(); if (error) { error = -ENOMEM; - printk(KERN_WARNING "bfad_fc4_module_init failure\n"); + printk(KERN_WARNING "bfad_im_module_init failure\n"); goto ext; } - if (!strcmp(FCPI_NAME, " fcpim")) - bfad_supported_fc4s |= BFA_PORT_ROLE_FCP_IM; - if (!strcmp(FCPT_NAME, " fcptm")) - bfad_supported_fc4s |= BFA_PORT_ROLE_FCP_TM; - if (!strcmp(IPFC_NAME, " ipfc")) - bfad_supported_fc4s |= BFA_PORT_ROLE_FCP_IPFC; + if (strcmp(FCPI_NAME, " fcpim") == 0) + supported_fc4s |= BFA_LPORT_ROLE_FCP_IM; bfa_ioc_auto_recover(ioc_auto_recover); bfa_fcs_rport_set_del_timeout(rport_del_timeout); - error = pci_register_driver(&bfad_pci_driver); + error = pci_register_driver(&bfad_pci_driver); if (error) { - printk(KERN_WARNING "bfad pci_register_driver failure\n"); + printk(KERN_WARNING "pci_register_driver failure\n"); goto ext; } return 0; ext: - bfad_fc4_module_exit(); + bfad_im_module_exit(); return error; } /** * Driver module exit. */ -static void __exit +static void __exit bfad_exit(void) { pci_unregister_driver(&bfad_pci_driver); - bfad_fc4_module_exit(); + bfad_im_module_exit(); bfad_free_fwimg(); } -#define BFAD_PROTO_NAME FCPI_NAME FCPT_NAME IPFC_NAME +/* Firmware handling */ +u32 * +bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, + u32 *bfi_image_size, char *fw_name) +{ + const struct firmware *fw; + + if (request_firmware(&fw, fw_name, &pdev->dev)) { + printk(KERN_ALERT "Can't locate firmware %s\n", fw_name); + goto error; + } + + *bfi_image = vmalloc(fw->size); + if (NULL == *bfi_image) { + printk(KERN_ALERT "Fail to allocate buffer for fw image " + "size=%x!\n", (u32) fw->size); + goto error; + } + + memcpy(*bfi_image, fw->data, fw->size); + *bfi_image_size = fw->size/sizeof(u32); + + return *bfi_image; + +error: + return NULL; +} + +u32 * +bfad_get_firmware_buf(struct pci_dev *pdev) +{ + if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) { + if (bfi_image_ct_fc_size == 0) + bfad_read_firmware(pdev, &bfi_image_ct_fc, + &bfi_image_ct_fc_size, BFAD_FW_FILE_CT_FC); + return bfi_image_ct_fc; + } else if (pdev->device == BFA_PCI_DEVICE_ID_CT) { + if (bfi_image_ct_cna_size == 0) + bfad_read_firmware(pdev, &bfi_image_ct_cna, + &bfi_image_ct_cna_size, BFAD_FW_FILE_CT_CNA); + return bfi_image_ct_cna; + } else { + if (bfi_image_cb_fc_size == 0) + bfad_read_firmware(pdev, &bfi_image_cb_fc, + &bfi_image_cb_fc_size, BFAD_FW_FILE_CB_FC); + return bfi_image_cb_fc; + } +} module_init(bfad_init); module_exit(bfad_exit); @@ -1376,5 +1647,3 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Brocade Fibre Channel HBA Driver" BFAD_PROTO_NAME); MODULE_AUTHOR("Brocade Communications Systems, Inc."); MODULE_VERSION(BFAD_DRIVER_VERSION); - - diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c index 0818eb07ef88..d8843720eac1 100644 --- a/drivers/scsi/bfa/bfad_attr.c +++ b/drivers/scsi/bfa/bfad_attr.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * @@ -19,15 +19,8 @@ * bfa_attr.c Linux driver configuration interface module. */ -#include #include "bfad_drv.h" #include "bfad_im.h" -#include "bfad_trcmod.h" -#include "bfad_attr.h" - -/** - * FC_transport_template FC transport template - */ /** * FC transport template entry, get SCSI target port ID. @@ -42,7 +35,7 @@ bfad_im_get_starget_port_id(struct scsi_target *starget) u32 fc_id = -1; unsigned long flags; - shost = bfad_os_starget_to_shost(starget); + shost = dev_to_shost(starget->dev.parent); im_port = (struct bfad_im_port_s *) shost->hostdata[0]; bfad = im_port->bfad; spin_lock_irqsave(&bfad->bfad_lock, flags); @@ -68,7 +61,7 @@ bfad_im_get_starget_node_name(struct scsi_target *starget) u64 node_name = 0; unsigned long flags; - shost = bfad_os_starget_to_shost(starget); + shost = dev_to_shost(starget->dev.parent); im_port = (struct bfad_im_port_s *) shost->hostdata[0]; bfad = im_port->bfad; spin_lock_irqsave(&bfad->bfad_lock, flags); @@ -94,7 +87,7 @@ bfad_im_get_starget_port_name(struct scsi_target *starget) u64 port_name = 0; unsigned long flags; - shost = bfad_os_starget_to_shost(starget); + shost = dev_to_shost(starget->dev.parent); im_port = (struct bfad_im_port_s *) shost->hostdata[0]; bfad = im_port->bfad; spin_lock_irqsave(&bfad->bfad_lock, flags); @@ -118,17 +111,7 @@ bfad_im_get_host_port_id(struct Scsi_Host *shost) struct bfad_port_s *port = im_port->port; fc_host_port_id(shost) = - bfa_os_hton3b(bfa_fcs_port_get_fcid(port->fcs_port)); -} - - - - - -struct Scsi_Host * -bfad_os_starget_to_shost(struct scsi_target *starget) -{ - return dev_to_shost(starget->dev.parent); + bfa_os_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port)); } /** @@ -140,21 +123,21 @@ bfad_im_get_host_port_type(struct Scsi_Host *shost) struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; - struct bfa_pport_attr_s attr; + struct bfa_lport_attr_s port_attr; - bfa_fcport_get_attr(&bfad->bfa, &attr); + bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); - switch (attr.port_type) { - case BFA_PPORT_TYPE_NPORT: + switch (port_attr.port_type) { + case BFA_PORT_TYPE_NPORT: fc_host_port_type(shost) = FC_PORTTYPE_NPORT; break; - case BFA_PPORT_TYPE_NLPORT: + case BFA_PORT_TYPE_NLPORT: fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; break; - case BFA_PPORT_TYPE_P2P: + case BFA_PORT_TYPE_P2P: fc_host_port_type(shost) = FC_PORTTYPE_PTP; break; - case BFA_PPORT_TYPE_LPORT: + case BFA_PORT_TYPE_LPORT: fc_host_port_type(shost) = FC_PORTTYPE_LPORT; break; default: @@ -172,25 +155,28 @@ bfad_im_get_host_port_state(struct Scsi_Host *shost) struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; - struct bfa_pport_attr_s attr; + struct bfa_port_attr_s attr; bfa_fcport_get_attr(&bfad->bfa, &attr); switch (attr.port_state) { - case BFA_PPORT_ST_LINKDOWN: + case BFA_PORT_ST_LINKDOWN: fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; break; - case BFA_PPORT_ST_LINKUP: + case BFA_PORT_ST_LINKUP: fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; break; - case BFA_PPORT_ST_UNINIT: - case BFA_PPORT_ST_ENABLING_QWAIT: - case BFA_PPORT_ST_ENABLING: - case BFA_PPORT_ST_DISABLING_QWAIT: - case BFA_PPORT_ST_DISABLING: - case BFA_PPORT_ST_DISABLED: - case BFA_PPORT_ST_STOPPED: - case BFA_PPORT_ST_IOCDOWN: + case BFA_PORT_ST_DISABLED: + case BFA_PORT_ST_STOPPED: + case BFA_PORT_ST_IOCDOWN: + case BFA_PORT_ST_IOCDIS: + fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; + break; + case BFA_PORT_ST_UNINIT: + case BFA_PORT_ST_ENABLING_QWAIT: + case BFA_PORT_ST_ENABLING: + case BFA_PORT_ST_DISABLING_QWAIT: + case BFA_PORT_ST_DISABLING: default: fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; break; @@ -210,13 +196,9 @@ bfad_im_get_host_active_fc4s(struct Scsi_Host *shost) memset(fc_host_active_fc4s(shost), 0, sizeof(fc_host_active_fc4s(shost))); - if (port->supported_fc4s & - (BFA_PORT_ROLE_FCP_IM | BFA_PORT_ROLE_FCP_TM)) + if (port->supported_fc4s & BFA_LPORT_ROLE_FCP_IM) fc_host_active_fc4s(shost)[2] = 1; - if (port->supported_fc4s & BFA_PORT_ROLE_FCP_IPFC) - fc_host_active_fc4s(shost)[3] = 0x20; - fc_host_active_fc4s(shost)[7] = 1; } @@ -229,29 +211,29 @@ bfad_im_get_host_speed(struct Scsi_Host *shost) struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; - struct bfa_pport_attr_s attr; - unsigned long flags; + struct bfa_port_attr_s attr; - spin_lock_irqsave(shost->host_lock, flags); bfa_fcport_get_attr(&bfad->bfa, &attr); switch (attr.speed) { - case BFA_PPORT_SPEED_8GBPS: + case BFA_PORT_SPEED_10GBPS: + fc_host_speed(shost) = FC_PORTSPEED_10GBIT; + break; + case BFA_PORT_SPEED_8GBPS: fc_host_speed(shost) = FC_PORTSPEED_8GBIT; break; - case BFA_PPORT_SPEED_4GBPS: + case BFA_PORT_SPEED_4GBPS: fc_host_speed(shost) = FC_PORTSPEED_4GBIT; break; - case BFA_PPORT_SPEED_2GBPS: + case BFA_PORT_SPEED_2GBPS: fc_host_speed(shost) = FC_PORTSPEED_2GBIT; break; - case BFA_PPORT_SPEED_1GBPS: + case BFA_PORT_SPEED_1GBPS: fc_host_speed(shost) = FC_PORTSPEED_1GBIT; break; default: fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; } - spin_unlock_irqrestore(shost->host_lock, flags); } /** @@ -265,7 +247,7 @@ bfad_im_get_host_fabric_name(struct Scsi_Host *shost) struct bfad_port_s *port = im_port->port; wwn_t fabric_nwwn = 0; - fabric_nwwn = bfa_fcs_port_get_fabric_name(port->fcs_port); + fabric_nwwn = bfa_fcs_lport_get_fabric_name(port->fcs_port); fc_host_fabric_name(shost) = bfa_os_htonll(fabric_nwwn); @@ -281,23 +263,44 @@ bfad_im_get_stats(struct Scsi_Host *shost) (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfad_hal_comp fcomp; + union bfa_port_stats_u *fcstats; struct fc_host_statistics *hstats; bfa_status_t rc; unsigned long flags; + fcstats = kzalloc(sizeof(union bfa_port_stats_u), GFP_KERNEL); + if (fcstats == NULL) + return NULL; + hstats = &bfad->link_stats; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); memset(hstats, 0, sizeof(struct fc_host_statistics)); - rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa), - (union bfa_pport_stats_u *) hstats, - bfad_hcb_comp, &fcomp); + rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa), + fcstats, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (rc != BFA_STATUS_OK) return NULL; wait_for_completion(&fcomp.comp); + /* Fill the fc_host_statistics structure */ + hstats->seconds_since_last_reset = fcstats->fc.secs_reset; + hstats->tx_frames = fcstats->fc.tx_frames; + hstats->tx_words = fcstats->fc.tx_words; + hstats->rx_frames = fcstats->fc.rx_frames; + hstats->rx_words = fcstats->fc.rx_words; + hstats->lip_count = fcstats->fc.lip_count; + hstats->nos_count = fcstats->fc.nos_count; + hstats->error_frames = fcstats->fc.error_frames; + hstats->dumped_frames = fcstats->fc.dropped_frames; + hstats->link_failure_count = fcstats->fc.link_failures; + hstats->loss_of_sync_count = fcstats->fc.loss_of_syncs; + hstats->loss_of_signal_count = fcstats->fc.loss_of_signals; + hstats->prim_seq_protocol_err_count = fcstats->fc.primseq_errs; + hstats->invalid_crc_count = fcstats->fc.invalid_crcs; + + kfree(fcstats); return hstats; } @@ -317,7 +320,7 @@ bfad_im_reset_stats(struct Scsi_Host *shost) init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); rc = bfa_port_clear_stats(BFA_FCPORT(&bfad->bfa), bfad_hcb_comp, - &fcomp); + &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (rc != BFA_STATUS_OK) @@ -372,8 +375,8 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable) struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; - struct bfa_port_cfg_s port_cfg; - struct bfad_pcfg_s *pcfg; + struct bfa_lport_cfg_s port_cfg; + struct bfad_vport_s *vp; int status = 0, rc; unsigned long flags; @@ -382,12 +385,14 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable) u64_to_wwn(fc_vport->port_name, (u8 *)&port_cfg.pwwn); if (strlen(vname) > 0) strcpy((char *)&port_cfg.sym_name, vname); - port_cfg.roles = BFA_PORT_ROLE_FCP_IM; + port_cfg.roles = BFA_LPORT_ROLE_FCP_IM; spin_lock_irqsave(&bfad->bfad_lock, flags); - list_for_each_entry(pcfg, &bfad->pbc_pcfg_list, list_entry) { - if (port_cfg.pwwn == pcfg->port_cfg.pwwn) { - port_cfg.preboot_vp = pcfg->port_cfg.preboot_vp; + list_for_each_entry(vp, &bfad->pbc_vport_list, list_entry) { + if (port_cfg.pwwn == + vp->fcs_vport.lport.port_cfg.pwwn) { + port_cfg.preboot_vp = + vp->fcs_vport.lport.port_cfg.preboot_vp; break; } } @@ -638,7 +643,7 @@ bfad_im_serial_num_show(struct device *dev, struct device_attribute *attr, struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; - struct bfad_s *bfad = im_port->bfad; + struct bfad_s *bfad = im_port->bfad; char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; bfa_get_adapter_serial_num(&bfad->bfa, serial_num); @@ -652,7 +657,7 @@ bfad_im_model_show(struct device *dev, struct device_attribute *attr, struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; - struct bfad_s *bfad = im_port->bfad; + struct bfad_s *bfad = im_port->bfad; char model[BFA_ADAPTER_MODEL_NAME_LEN]; bfa_get_adapter_model(&bfad->bfa, model); @@ -666,10 +671,54 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr, struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; - struct bfad_s *bfad = im_port->bfad; + struct bfad_s *bfad = im_port->bfad; + char model[BFA_ADAPTER_MODEL_NAME_LEN]; char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN]; - bfa_get_adapter_model(&bfad->bfa, model_descr); + bfa_get_adapter_model(&bfad->bfa, model); + if (!strcmp(model, "Brocade-425")) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "Brocade 4Gbps PCIe dual port FC HBA"); + else if (!strcmp(model, "Brocade-825")) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "Brocade 8Gbps PCIe dual port FC HBA"); + else if (!strcmp(model, "Brocade-42B")) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "HP 4Gbps PCIe dual port FC HBA"); + else if (!strcmp(model, "Brocade-82B")) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "HP 8Gbps PCIe dual port FC HBA"); + else if (!strcmp(model, "Brocade-1010")) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "Brocade 10Gbps single port CNA"); + else if (!strcmp(model, "Brocade-1020")) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "Brocade 10Gbps dual port CNA"); + else if (!strcmp(model, "Brocade-1007")) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "Brocade 10Gbps CNA"); + else if (!strcmp(model, "Brocade-415")) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "Brocade 4Gbps PCIe single port FC HBA"); + else if (!strcmp(model, "Brocade-815")) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "Brocade 8Gbps PCIe single port FC HBA"); + else if (!strcmp(model, "Brocade-41B")) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "HP 4Gbps PCIe single port FC HBA"); + else if (!strcmp(model, "Brocade-81B")) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "HP 8Gbps PCIe single port FC HBA"); + else if (!strcmp(model, "Brocade-804")) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "HP Bladesystem C-class 8Gbps FC HBA"); + else if (!strcmp(model, "Brocade-902")) + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "Brocade 10Gbps CNA"); + else + snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, + "Invalid Model"); + return snprintf(buf, PAGE_SIZE, "%s\n", model_descr); } @@ -683,7 +732,7 @@ bfad_im_node_name_show(struct device *dev, struct device_attribute *attr, struct bfad_port_s *port = im_port->port; u64 nwwn; - nwwn = bfa_fcs_port_get_nwwn(port->fcs_port); + nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port); return snprintf(buf, PAGE_SIZE, "0x%llx\n", bfa_os_htonll(nwwn)); } @@ -694,14 +743,14 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr, struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; - struct bfad_s *bfad = im_port->bfad; - char model[BFA_ADAPTER_MODEL_NAME_LEN]; - char fw_ver[BFA_VERSION_LEN]; + struct bfad_s *bfad = im_port->bfad; + struct bfa_lport_attr_s port_attr; + char symname[BFA_SYMNAME_MAXLEN]; - bfa_get_adapter_model(&bfad->bfa, model); - bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver); - return snprintf(buf, PAGE_SIZE, "Brocade %s FV%s DV%s\n", - model, fw_ver, BFAD_DRIVER_VERSION); + bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); + strncpy(symname, port_attr.port_cfg.sym_name.symname, + BFA_SYMNAME_MAXLEN); + return snprintf(buf, PAGE_SIZE, "%s\n", symname); } static ssize_t @@ -711,7 +760,7 @@ bfad_im_hw_version_show(struct device *dev, struct device_attribute *attr, struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; - struct bfad_s *bfad = im_port->bfad; + struct bfad_s *bfad = im_port->bfad; char hw_ver[BFA_VERSION_LEN]; bfa_get_pci_chip_rev(&bfad->bfa, hw_ver); @@ -732,7 +781,7 @@ bfad_im_optionrom_version_show(struct device *dev, struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; - struct bfad_s *bfad = im_port->bfad; + struct bfad_s *bfad = im_port->bfad; char optrom_ver[BFA_VERSION_LEN]; bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver); @@ -746,7 +795,7 @@ bfad_im_fw_version_show(struct device *dev, struct device_attribute *attr, struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; - struct bfad_s *bfad = im_port->bfad; + struct bfad_s *bfad = im_port->bfad; char fw_ver[BFA_VERSION_LEN]; bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver); @@ -760,10 +809,10 @@ bfad_im_num_of_ports_show(struct device *dev, struct device_attribute *attr, struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; - struct bfad_s *bfad = im_port->bfad; + struct bfad_s *bfad = im_port->bfad; return snprintf(buf, PAGE_SIZE, "%d\n", - bfa_get_nports(&bfad->bfa)); + bfa_get_nports(&bfad->bfa)); } static ssize_t @@ -788,10 +837,10 @@ bfad_im_num_of_discovered_ports_show(struct device *dev, rports = kzalloc(sizeof(wwn_t) * nrports , GFP_ATOMIC); if (rports == NULL) - return -ENOMEM; + return snprintf(buf, PAGE_SIZE, "Failed\n"); spin_lock_irqsave(&bfad->bfad_lock, flags); - bfa_fcs_port_get_rports(port->fcs_port, rports, &nrports); + bfa_fcs_lport_get_rports(port->fcs_port, rports, &nrports); spin_unlock_irqrestore(&bfad->bfad_lock, flags); kfree(rports); @@ -837,19 +886,19 @@ struct device_attribute *bfad_im_host_attrs[] = { }; struct device_attribute *bfad_im_vport_attrs[] = { - &dev_attr_serial_number, - &dev_attr_model, - &dev_attr_model_description, - &dev_attr_node_name, - &dev_attr_symbolic_name, - &dev_attr_hardware_version, - &dev_attr_driver_version, - &dev_attr_option_rom_version, - &dev_attr_firmware_version, - &dev_attr_number_of_ports, - &dev_attr_driver_name, - &dev_attr_number_of_discovered_ports, - NULL, + &dev_attr_serial_number, + &dev_attr_model, + &dev_attr_model_description, + &dev_attr_node_name, + &dev_attr_symbolic_name, + &dev_attr_hardware_version, + &dev_attr_driver_version, + &dev_attr_option_rom_version, + &dev_attr_firmware_version, + &dev_attr_number_of_ports, + &dev_attr_driver_name, + &dev_attr_number_of_discovered_ports, + NULL, }; diff --git a/drivers/scsi/bfa/bfad_attr.h b/drivers/scsi/bfa/bfad_attr.h deleted file mode 100644 index bf0102076508..000000000000 --- a/drivers/scsi/bfa/bfad_attr.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFAD_ATTR_H__ -#define __BFAD_ATTR_H__ - -/** - * FC_transport_template FC transport template - */ - -struct Scsi_Host* -bfad_os_dev_to_shost(struct scsi_target *starget); - -/** - * FC transport template entry, get SCSI target port ID. - */ -void -bfad_im_get_starget_port_id(struct scsi_target *starget); - -/** - * FC transport template entry, get SCSI target nwwn. - */ -void -bfad_im_get_starget_node_name(struct scsi_target *starget); - -/** - * FC transport template entry, get SCSI target pwwn. - */ -void -bfad_im_get_starget_port_name(struct scsi_target *starget); - -/** - * FC transport template entry, get SCSI host port ID. - */ -void -bfad_im_get_host_port_id(struct Scsi_Host *shost); - -struct Scsi_Host* -bfad_os_starget_to_shost(struct scsi_target *starget); - - -#endif /* __BFAD_ATTR_H__ */ diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c index 4b82f12aad62..69ed1c4a903e 100644 --- a/drivers/scsi/bfa/bfad_debugfs.c +++ b/drivers/scsi/bfa/bfad_debugfs.c @@ -17,8 +17,8 @@ #include -#include -#include +#include "bfad_drv.h" +#include "bfad_im.h" /* * BFA debufs interface @@ -28,7 +28,7 @@ * mount -t debugfs none /sys/kernel/debug * * BFA Hierarchy: - * - bfa/host# + * - bfa/host# * where the host number corresponds to the one under /sys/class/scsi_host/host# * * Debugging service available per host: @@ -217,7 +217,7 @@ bfad_debugfs_read(struct file *file, char __user *buf, #define BFA_REG_ADDRSZ(__bfa) \ ((bfa_ioc_devid(&(__bfa)->ioc) == BFA_PCI_DEVICE_ID_CT) ? \ BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ) -#define BFA_REG_ADDRMSK(__bfa) ((uint32_t)(BFA_REG_ADDRSZ(__bfa) - 1)) +#define BFA_REG_ADDRMSK(__bfa) ((u32)(BFA_REG_ADDRSZ(__bfa) - 1)) static bfa_status_t bfad_reg_offset_check(struct bfa_s *bfa, u32 offset, u32 len) @@ -359,7 +359,7 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf, return -EINVAL; } - reg_addr = (uint32_t *) ((uint8_t *) bfa_ioc_bar0(ioc) + addr); + reg_addr = (u32 *) ((u8 *) bfa_ioc_bar0(ioc) + addr); spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_reg_write(reg_addr, val); spin_unlock_irqrestore(&bfad->bfad_lock, flags); diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h index 465b8b86ec9c..98420bbb4f3f 100644 --- a/drivers/scsi/bfa/bfad_drv.h +++ b/drivers/scsi/bfa/bfad_drv.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * @@ -28,30 +28,27 @@ #include "bfa_os_inc.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include "aen/bfa_aen.h" -#include - -#define BFAD_DRIVER_NAME "bfa" +#include "bfa_modules.h" +#include "bfa_fcs.h" +#include "bfa_defs_fcs.h" + +#include "bfa_plog.h" +#include "bfa_cs.h" + +#define BFAD_DRIVER_NAME "bfa" #ifdef BFA_DRIVER_VERSION #define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION #else -#define BFAD_DRIVER_VERSION "2.2.2.1" +#define BFAD_DRIVER_VERSION "2.3.2.0" #endif - +#define BFAD_PROTO_NAME FCPI_NAME #define BFAD_IRQ_FLAGS IRQF_SHARED +#ifndef FC_PORTSPEED_8GBIT +#define FC_PORTSPEED_8GBIT 0x10 +#endif + /* * BFAD flags */ @@ -62,9 +59,9 @@ #define BFAD_HAL_START_DONE 0x00000010 #define BFAD_PORT_ONLINE 0x00000020 #define BFAD_RPORT_ONLINE 0x00000040 -#define BFAD_FCS_INIT_DONE 0x00000080 -#define BFAD_HAL_INIT_FAIL 0x00000100 -#define BFAD_FC4_PROBE_DONE 0x00000200 +#define BFAD_FCS_INIT_DONE 0x00000080 +#define BFAD_HAL_INIT_FAIL 0x00000100 +#define BFAD_FC4_PROBE_DONE 0x00000200 #define BFAD_PORT_DELETE 0x00000001 /* @@ -77,8 +74,8 @@ /* * BFAD configuration parameter default values */ -#define BFAD_LUN_QUEUE_DEPTH 32 -#define BFAD_IO_MAX_SGE SG_ALL +#define BFAD_LUN_QUEUE_DEPTH 32 +#define BFAD_IO_MAX_SGE SG_ALL #define bfad_isr_t irq_handler_t @@ -87,6 +84,16 @@ struct bfad_msix_s { struct bfad_s *bfad; struct msix_entry msix; + char name[32]; +}; + +/* + * Only append to the enums defined here to avoid any versioning + * needed between trace utility and driver version + */ +enum { + BFA_TRC_LDRV_BFAD = 1, + BFA_TRC_LDRV_IM = 2, }; enum bfad_port_pvb_type { @@ -101,17 +108,13 @@ enum bfad_port_pvb_type { */ struct bfad_port_s { struct list_head list_entry; - struct bfad_s *bfad; - struct bfa_fcs_port_s *fcs_port; - u32 roles; - s32 flags; - u32 supported_fc4s; - u8 ipfc_flags; + struct bfad_s *bfad; + struct bfa_fcs_lport_s *fcs_port; + u32 roles; + s32 flags; + u32 supported_fc4s; enum bfad_port_pvb_type pvb_type; struct bfad_im_port_s *im_port; /* IM specific data */ - struct bfad_tm_port_s *tm_port; /* TM specific data */ - struct bfad_ipfc_port_s *ipfc_port; /* IPFC specific data */ - /* port debugfs specific data */ struct dentry *port_debugfs_root; }; @@ -124,7 +127,6 @@ struct bfad_vport_s { struct bfa_fcs_vport_s fcs_vport; struct completion *comp_del; struct list_head list_entry; - struct bfa_port_cfg_s port_cfg; }; /* @@ -137,20 +139,35 @@ struct bfad_vf_s { }; struct bfad_cfg_param_s { - u32 rport_del_timeout; - u32 ioc_queue_depth; - u32 lun_queue_depth; - u32 io_max_sge; - u32 binding_method; + u32 rport_del_timeout; + u32 ioc_queue_depth; + u32 lun_queue_depth; + u32 io_max_sge; + u32 binding_method; +}; + +union bfad_tmp_buf { + /* From struct bfa_adapter_attr_s */ + char manufacturer[BFA_ADAPTER_MFG_NAME_LEN]; + char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; + char model[BFA_ADAPTER_MODEL_NAME_LEN]; + char fw_ver[BFA_VERSION_LEN]; + char optrom_ver[BFA_VERSION_LEN]; + + /* From struct bfa_ioc_pci_attr_s */ + u8 chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */ + + wwn_t wwn[BFA_FCS_MAX_LPORTS]; }; /* * BFAD (PCI function) data structure */ struct bfad_s { + bfa_sm_t sm; /* state machine */ struct list_head list_entry; - struct bfa_s bfa; - struct bfa_fcs_s bfa_fcs; + struct bfa_s bfa; + struct bfa_fcs_s bfa_fcs; struct pci_dev *pcidev; const char *pci_name; struct bfa_pcidev_s hal_pcidev; @@ -163,41 +180,41 @@ struct bfad_s { struct bfad_port_s pport; /* physical port of the BFAD */ struct bfa_meminfo_s meminfo; struct bfa_iocfc_cfg_s ioc_cfg; - u32 inst_no; /* BFAD instance number */ - u32 bfad_flags; + u32 inst_no; /* BFAD instance number */ + u32 bfad_flags; spinlock_t bfad_lock; struct task_struct *bfad_tsk; struct bfad_cfg_param_s cfg_data; struct bfad_msix_s msix_tab[MAX_MSIX_ENTRY]; - int nvec; - char adapter_name[BFA_ADAPTER_SYM_NAME_LEN]; - char port_name[BFA_ADAPTER_SYM_NAME_LEN]; + int nvec; + char adapter_name[BFA_ADAPTER_SYM_NAME_LEN]; + char port_name[BFA_ADAPTER_SYM_NAME_LEN]; struct timer_list hal_tmo; unsigned long hs_start; struct bfad_im_s *im; /* IM specific data */ - struct bfad_tm_s *tm; /* TM specific data */ - struct bfad_ipfc_s *ipfc; /* IPFC specific data */ - struct bfa_log_mod_s log_data; struct bfa_trc_mod_s *trcmod; - struct bfa_log_mod_s *logmod; - struct bfa_aen_s *aen; - struct bfa_aen_s aen_buf; - void *file_map[BFA_AEN_MAX_APP]; struct bfa_plog_s plog_buf; - int ref_count; - bfa_boolean_t ipfc_enabled; + int ref_count; + union bfad_tmp_buf tmp_buf; struct fc_host_statistics link_stats; - struct list_head pbc_pcfg_list; - atomic_t wq_reqcnt; + struct list_head pbc_vport_list; /* debugfs specific data */ char *regdata; u32 reglen; struct dentry *bfad_dentry_files[5]; }; -struct bfad_pcfg_s { - struct list_head list_entry; - struct bfa_port_cfg_s port_cfg; +/* BFAD state machine events */ +enum bfad_sm_event { + BFAD_E_CREATE = 1, + BFAD_E_KTHREAD_CREATE_FAILED = 2, + BFAD_E_INIT = 3, + BFAD_E_INIT_SUCCESS = 4, + BFAD_E_INIT_FAILED = 5, + BFAD_E_INTR_INIT_FAILED = 6, + BFAD_E_FCS_EXIT_COMP = 7, + BFAD_E_EXIT_COMP = 8, + BFAD_E_STOP = 9 }; /* @@ -208,30 +225,30 @@ struct bfad_rport_s { }; struct bfad_buf_info { - void *virt; + void *virt; dma_addr_t phys; - u32 size; + u32 size; }; struct bfad_fcxp { struct bfad_port_s *port; struct bfa_rport_s *bfa_rport; bfa_status_t req_status; - u16 tag; - u16 rsp_len; - u16 rsp_maxlen; - u8 use_ireqbuf; - u8 use_irspbuf; - u32 num_req_sgles; - u32 num_rsp_sgles; - struct fchs_s fchs; - void *reqbuf_info; - void *rspbuf_info; + u16 tag; + u16 rsp_len; + u16 rsp_maxlen; + u8 use_ireqbuf; + u8 use_irspbuf; + u32 num_req_sgles; + u32 num_rsp_sgles; + struct fchs_s fchs; + void *reqbuf_info; + void *rspbuf_info; struct bfa_sge_s *req_sge; struct bfa_sge_s *rsp_sge; fcxp_send_cb_t send_cbfn; - void *send_cbarg; - void *bfa_fcxp; + void *send_cbarg; + void *bfa_fcxp; struct completion comp; }; @@ -244,34 +261,48 @@ struct bfad_hal_comp { * Macro to obtain the immediate lower power * of two for the integer. */ -#define nextLowerInt(x) \ -do { \ - int j; \ - (*x)--; \ - for (j = 1; j < (sizeof(int) * 8); j <<= 1) \ - (*x) = (*x) | (*x) >> j; \ - (*x)++; \ - (*x) = (*x) >> 1; \ +#define nextLowerInt(x) \ +do { \ + int i; \ + (*x)--; \ + for (i = 1; i < (sizeof(int)*8); i <<= 1) \ + (*x) = (*x) | (*x) >> i; \ + (*x)++; \ + (*x) = (*x) >> 1; \ } while (0) -bfa_status_t bfad_vport_create(struct bfad_s *bfad, u16 vf_id, - struct bfa_port_cfg_s *port_cfg, struct device *dev); -bfa_status_t bfad_vf_create(struct bfad_s *bfad, u16 vf_id, - struct bfa_port_cfg_s *port_cfg); -bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role); -bfa_status_t bfad_drv_init(struct bfad_s *bfad); -bfa_status_t bfad_start_ops(struct bfad_s *bfad); -void bfad_drv_start(struct bfad_s *bfad); -void bfad_uncfg_pport(struct bfad_s *bfad); -void bfad_drv_stop(struct bfad_s *bfad); -void bfad_remove_intr(struct bfad_s *bfad); -void bfad_hal_mem_release(struct bfad_s *bfad); -void bfad_hcb_comp(void *arg, bfa_status_t status); - -int bfad_setup_intr(struct bfad_s *bfad); -void bfad_remove_intr(struct bfad_s *bfad); +#define list_remove_head(list, entry, type, member) \ +do { \ + entry = NULL; \ + if (!list_empty(list)) { \ + entry = list_entry((list)->next, type, member); \ + list_del_init(&entry->member); \ + } \ +} while (0) +#define list_get_first(list, type, member) \ +((list_empty(list)) ? NULL : \ + list_entry((list)->next, type, member)) + +bfa_status_t bfad_vport_create(struct bfad_s *bfad, u16 vf_id, + struct bfa_lport_cfg_s *port_cfg, + struct device *dev); +bfa_status_t bfad_vf_create(struct bfad_s *bfad, u16 vf_id, + struct bfa_lport_cfg_s *port_cfg); +bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role); +bfa_status_t bfad_drv_init(struct bfad_s *bfad); +bfa_status_t bfad_start_ops(struct bfad_s *bfad); +void bfad_drv_start(struct bfad_s *bfad); +void bfad_uncfg_pport(struct bfad_s *bfad); +void bfad_stop(struct bfad_s *bfad); +void bfad_fcs_stop(struct bfad_s *bfad); +void bfad_remove_intr(struct bfad_s *bfad); +void bfad_hal_mem_release(struct bfad_s *bfad); +void bfad_hcb_comp(void *arg, bfa_status_t status); + +int bfad_setup_intr(struct bfad_s *bfad); +void bfad_remove_intr(struct bfad_s *bfad); void bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg); bfa_status_t bfad_hal_mem_alloc(struct bfad_s *bfad); void bfad_bfa_tmo(unsigned long data); @@ -280,9 +311,6 @@ int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad); void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad); void bfad_fcs_port_cfg(struct bfad_s *bfad); void bfad_drv_uninit(struct bfad_s *bfad); -void bfad_drv_log_level_set(struct bfad_s *bfad); -bfa_status_t bfad_fc4_module_init(void); -void bfad_fc4_module_exit(void); int bfad_worker(void *ptr); void bfad_debugfs_init(struct bfad_port_s *port); void bfad_debugfs_exit(struct bfad_port_s *port); @@ -294,10 +322,30 @@ int bfad_os_get_linkup_delay(struct bfad_s *bfad); int bfad_install_msix_handler(struct bfad_s *bfad); extern struct idr bfad_im_port_index; +extern struct pci_device_id bfad_id_table[]; extern struct list_head bfad_list; -extern int bfa_lun_queue_depth; -extern int bfad_supported_fc4s; -extern int bfa_linkup_delay; +extern char *os_name; +extern char *os_patch; +extern char *host_name; +extern int num_rports; +extern int num_ios; +extern int num_tms; +extern int num_fcxps; +extern int num_ufbufs; +extern int reqq_size; +extern int rspq_size; +extern int num_sgpgs; +extern int rport_del_timeout; +extern int bfa_lun_queue_depth; +extern int bfa_io_max_sge; +extern int log_level; +extern int ioc_auto_recover; +extern int bfa_linkup_delay; +extern int msix_disable_cb; +extern int msix_disable_ct; +extern int fdmi_enable; +extern int supported_fc4s; +extern int pcie_max_read_reqsz; extern int bfa_debugfs_enable; extern struct mutex bfad_mutex; diff --git a/drivers/scsi/bfa/bfad_fwimg.c b/drivers/scsi/bfa/bfad_fwimg.c deleted file mode 100644 index 1baca1a12085..000000000000 --- a/drivers/scsi/bfa/bfad_fwimg.c +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * bfad_fwimg.c Linux driver PCI interface module. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -u32 bfi_image_ct_fc_size; -u32 bfi_image_ct_cna_size; -u32 bfi_image_cb_fc_size; -u32 *bfi_image_ct_fc; -u32 *bfi_image_ct_cna; -u32 *bfi_image_cb_fc; - - -#define BFAD_FW_FILE_CT_FC "ctfw_fc.bin" -#define BFAD_FW_FILE_CT_CNA "ctfw_cna.bin" -#define BFAD_FW_FILE_CB_FC "cbfw_fc.bin" -MODULE_FIRMWARE(BFAD_FW_FILE_CT_FC); -MODULE_FIRMWARE(BFAD_FW_FILE_CT_CNA); -MODULE_FIRMWARE(BFAD_FW_FILE_CB_FC); - -u32 * -bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, - u32 *bfi_image_size, char *fw_name) -{ - const struct firmware *fw; - - if (request_firmware(&fw, fw_name, &pdev->dev)) { - printk(KERN_ALERT "Can't locate firmware %s\n", fw_name); - goto error; - } - - *bfi_image = vmalloc(fw->size); - if (NULL == *bfi_image) { - printk(KERN_ALERT "Fail to allocate buffer for fw image " - "size=%x!\n", (u32) fw->size); - goto error; - } - - memcpy(*bfi_image, fw->data, fw->size); - *bfi_image_size = fw->size/sizeof(u32); - - return *bfi_image; - -error: - return NULL; -} - -u32 * -bfad_get_firmware_buf(struct pci_dev *pdev) -{ - if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) { - if (bfi_image_ct_fc_size == 0) - bfad_read_firmware(pdev, &bfi_image_ct_fc, - &bfi_image_ct_fc_size, BFAD_FW_FILE_CT_FC); - return bfi_image_ct_fc; - } else if (pdev->device == BFA_PCI_DEVICE_ID_CT) { - if (bfi_image_ct_cna_size == 0) - bfad_read_firmware(pdev, &bfi_image_ct_cna, - &bfi_image_ct_cna_size, BFAD_FW_FILE_CT_CNA); - return bfi_image_ct_cna; - } else { - if (bfi_image_cb_fc_size == 0) - bfad_read_firmware(pdev, &bfi_image_cb_fc, - &bfi_image_cb_fc_size, BFAD_FW_FILE_CB_FC); - return bfi_image_cb_fc; - } -} - -u32 * -bfi_image_ct_fc_get_chunk(u32 off) -{ return (u32 *)(bfi_image_ct_fc + off); } - -u32 * -bfi_image_ct_cna_get_chunk(u32 off) -{ return (u32 *)(bfi_image_ct_cna + off); } - -u32 * -bfi_image_cb_fc_get_chunk(u32 off) -{ return (u32 *)(bfi_image_cb_fc + off); } - -uint32_t * -bfi_image_get_chunk(int type, uint32_t off) -{ - switch (type) { - case BFI_IMAGE_CT_FC: return bfi_image_ct_fc_get_chunk(off); break; - case BFI_IMAGE_CT_CNA: return bfi_image_ct_cna_get_chunk(off); break; - case BFI_IMAGE_CB_FC: return bfi_image_cb_fc_get_chunk(off); break; - default: return 0; break; - } -} - -uint32_t -bfi_image_get_size(int type) -{ - switch (type) { - case BFI_IMAGE_CT_FC: return bfi_image_ct_fc_size; break; - case BFI_IMAGE_CT_CNA: return bfi_image_ct_cna_size; break; - case BFI_IMAGE_CB_FC: return bfi_image_cb_fc_size; break; - default: return 0; break; - } -} diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index 6ef87f6fcdbb..d950ee44016e 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * @@ -19,12 +19,10 @@ * bfad_im.c Linux driver IM module. */ -#include #include "bfad_drv.h" #include "bfad_im.h" -#include "bfad_trcmod.h" -#include "bfa_cb_ioim_macros.h" -#include +#include "bfa_cb_ioim.h" +#include "bfa_fcs.h" BFA_TRC_FILE(LDRV, IM); @@ -33,8 +31,10 @@ struct scsi_transport_template *bfad_im_scsi_transport_template; struct scsi_transport_template *bfad_im_scsi_vport_transport_template; static void bfad_im_itnim_work_handler(struct work_struct *work); static int bfad_im_queuecommand(struct scsi_cmnd *cmnd, - void (*done)(struct scsi_cmnd *)); + void (*done)(struct scsi_cmnd *)); static int bfad_im_slave_alloc(struct scsi_device *sdev); +static void bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, + struct bfad_itnim_s *itnim); void bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio, @@ -58,6 +58,7 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio, sns_len = SCSI_SENSE_BUFFERSIZE; memcpy(cmnd->sense_buffer, sns_info, sns_len); } + if (residue > 0) { bfa_trc(bfad, residue); scsi_set_resid(cmnd, residue); @@ -76,7 +77,8 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio, case BFI_IOIM_STS_TIMEDOUT: case BFI_IOIM_STS_PATHTOV: default: - cmnd->result = ScsiResult(DID_ERROR, 0); + host_status = DID_ERROR; + cmnd->result = ScsiResult(host_status, 0); } /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */ @@ -162,11 +164,6 @@ bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk, wake_up(wq); } -void -bfa_cb_ioim_resfree(void *drv) -{ -} - /** * Scsi_Host_template SCSI host template */ @@ -179,15 +176,23 @@ bfad_im_info(struct Scsi_Host *shost) static char bfa_buf[256]; struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; - struct bfad_s *bfad = im_port->bfad; + struct bfad_s *bfad = im_port->bfad; + struct bfa_s *bfa = &bfad->bfa; + struct bfa_ioc_s *ioc = &bfa->ioc; char model[BFA_ADAPTER_MODEL_NAME_LEN]; - bfa_get_adapter_model(&bfad->bfa, model); + bfa_get_adapter_model(bfa, model); memset(bfa_buf, 0, sizeof(bfa_buf)); - snprintf(bfa_buf, sizeof(bfa_buf), - "Brocade FC/FCOE Adapter, " "model: %s hwpath: %s driver: %s", + if (ioc->ctdev) + snprintf(bfa_buf, sizeof(bfa_buf), + "Brocade FCOE Adapter, " "model: %s hwpath: %s driver: %s", + model, bfad->pci_name, BFAD_DRIVER_VERSION); + else + snprintf(bfa_buf, sizeof(bfa_buf), + "Brocade FC Adapter, " "model: %s hwpath: %s driver: %s", model, bfad->pci_name, BFAD_DRIVER_VERSION); + return bfa_buf; } @@ -221,9 +226,9 @@ bfad_im_abort_handler(struct scsi_cmnd *cmnd) } bfa_trc(bfad, hal_io->iotag); - bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_ABORT, + BFA_LOG(KERN_INFO, bfad, log_level, "scsi%d: abort cmnd %p iotag %x\n", im_port->shost->host_no, cmnd, hal_io->iotag); - bfa_ioim_abort(hal_io); + (void) bfa_ioim_abort(hal_io); spin_unlock_irqrestore(&bfad->bfad_lock, flags); /* Need to wait until the command get aborted */ @@ -237,7 +242,8 @@ bfad_im_abort_handler(struct scsi_cmnd *cmnd) cmnd->scsi_done(cmnd); bfa_trc(bfad, hal_io->iotag); - bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_ABORT_COMP, + BFA_LOG(KERN_INFO, bfad, log_level, + "scsi%d: complete abort 0x%p iotag 0x%x\n", im_port->shost->host_no, cmnd, hal_io->iotag); return SUCCESS; out: @@ -255,8 +261,8 @@ bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd, tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); if (!tskim) { - BFA_DEV_PRINTF(bfad, BFA_ERR, - "target reset, fail to allocate tskim\n"); + BFA_LOG(KERN_ERR, bfad, log_level, + "target reset, fail to allocate tskim\n"); rc = BFA_STATUS_FAILED; goto out; } @@ -306,7 +312,7 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd) tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); if (!tskim) { - BFA_DEV_PRINTF(bfad, BFA_ERR, + BFA_LOG(KERN_ERR, bfad, log_level, "LUN reset, fail to allocate tskim"); spin_unlock_irqrestore(&bfad->bfad_lock, flags); rc = FAILED; @@ -331,8 +337,8 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd) task_status = cmnd->SCp.Status >> 1; if (task_status != BFI_TSKIM_STS_OK) { - BFA_DEV_PRINTF(bfad, BFA_ERR, "LUN reset failure, status: %d\n", - task_status); + BFA_LOG(KERN_ERR, bfad, log_level, + "LUN reset failure, status: %d\n", task_status); rc = FAILED; } @@ -375,7 +381,7 @@ bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd) task_status = cmnd->SCp.Status >> 1; if (task_status != BFI_TSKIM_STS_OK) { - BFA_DEV_PRINTF(bfad, BFA_ERR, + BFA_LOG(KERN_ERR, bfad, log_level, "target reset failure," " status: %d\n", task_status); err_cnt++; @@ -438,6 +444,7 @@ bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv) wwn_t wwpn; u32 fcid; char wwpn_str[32], fcid_str[16]; + struct bfad_im_s *im = itnim_drv->im; /* online to free state transtion should not happen */ bfa_assert(itnim_drv->state != ITNIM_STATE_ONLINE); @@ -454,10 +461,14 @@ bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv) fcid = bfa_fcs_itnim_get_fcid(&itnim_drv->fcs_itnim); wwn2str(wwpn_str, wwpn); fcid2str(fcid_str, fcid); - bfa_log(bfad->logmod, BFA_LOG_LINUX_ITNIM_FREE, + BFA_LOG(KERN_INFO, bfad, log_level, + "ITNIM FREE scsi%d: FCID: %s WWPN: %s\n", port->im_port->shost->host_no, fcid_str, wwpn_str); - bfad_os_itnim_process(itnim_drv); + + /* ITNIM processing */ + if (itnim_drv->queue_work) + queue_work(im->drv_workq, &itnim_drv->itnim_work); } /** @@ -468,13 +479,17 @@ void bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv) { struct bfad_port_s *port; + struct bfad_im_s *im = itnim_drv->im; itnim_drv->bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim_drv->fcs_itnim); port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim); itnim_drv->state = ITNIM_STATE_ONLINE; itnim_drv->queue_work = 1; itnim_drv->im_port = port->im_port; - bfad_os_itnim_process(itnim_drv); + + /* ITNIM processing */ + if (itnim_drv->queue_work) + queue_work(im->drv_workq, &itnim_drv->itnim_work); } /** @@ -486,6 +501,7 @@ bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv) { struct bfad_port_s *port; struct bfad_s *bfad; + struct bfad_im_s *im = itnim_drv->im; port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim); bfad = port->bfad; @@ -497,16 +513,10 @@ bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv) itnim_drv->im_port = port->im_port; itnim_drv->state = ITNIM_STATE_OFFLINE_PENDING; itnim_drv->queue_work = 1; - bfad_os_itnim_process(itnim_drv); -} -/** - * BFA FCS itnim timeout callback. - * Context: Interrupt. bfad_lock is held - */ -void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim) -{ - itnim->state = ITNIM_STATE_TIMEOUT; + /* ITNIM processing */ + if (itnim_drv->queue_work) + queue_work(im->drv_workq, &itnim_drv->itnim_work); } /** @@ -514,7 +524,7 @@ void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim) */ int bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port, - struct device *dev) + struct device *dev) { int error = 1; @@ -580,7 +590,7 @@ void bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port) { bfa_trc(bfad, bfad->inst_no); - bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_HOST_FREE, + BFA_LOG(KERN_INFO, bfad, log_level, "Free scsi%d\n", im_port->shost->host_no); fc_remove_host(im_port->shost); @@ -598,14 +608,11 @@ bfad_im_port_delete_handler(struct work_struct *work) { struct bfad_im_port_s *im_port = container_of(work, struct bfad_im_port_s, port_delete_work); - struct bfad_s *bfad = im_port->bfad; if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) { im_port->flags |= BFAD_PORT_DELETE; fc_vport_terminate(im_port->fc_vport); - atomic_dec(&bfad->wq_reqcnt); } - } bfa_status_t @@ -636,11 +643,8 @@ bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port) { struct bfad_im_port_s *im_port = port->im_port; - if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) { - atomic_inc(&bfad->wq_reqcnt); - queue_work(bfad->im->drv_workq, + queue_work(bfad->im->drv_workq, &im_port->port_delete_work); - } } void @@ -663,16 +667,6 @@ bfad_im_port_clean(struct bfad_im_port_s *im_port) spin_unlock_irqrestore(&bfad->bfad_lock, flags); } -void -bfad_im_port_online(struct bfad_s *bfad, struct bfad_port_s *port) -{ -} - -void -bfad_im_port_offline(struct bfad_s *bfad, struct bfad_port_s *port) -{ -} - bfa_status_t bfad_im_probe(struct bfad_s *bfad) { @@ -701,27 +695,12 @@ void bfad_im_probe_undo(struct bfad_s *bfad) { if (bfad->im) { - while (atomic_read(&bfad->wq_reqcnt)) { - printk(KERN_INFO "bfa %s: waiting workq processing," - " wq_reqcnt:%x\n", bfad->pci_name, - atomic_read(&bfad->wq_reqcnt)); - schedule_timeout_uninterruptible(HZ); - } bfad_os_destroy_workq(bfad->im); kfree(bfad->im); bfad->im = NULL; } } -/** - * Call back function to handle IO redirection state change - */ -void -bfa_cb_ioredirect_state_change(void *hcb_bfad, bfa_boolean_t ioredirect) -{ - /* Do nothing */ -} - struct Scsi_Host * bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad) { @@ -751,6 +730,7 @@ void bfad_os_destroy_workq(struct bfad_im_s *im) { if (im && im->drv_workq) { + flush_workqueue(im->drv_workq); destroy_workqueue(im->drv_workq); im->drv_workq = NULL; } @@ -762,7 +742,7 @@ bfad_os_thread_workq(struct bfad_s *bfad) struct bfad_im_s *im = bfad->im; bfa_trc(bfad, 0); - snprintf(im->drv_workq_name, BFAD_KOBJ_NAME_LEN, "bfad_wq_%d", + snprintf(im->drv_workq_name, KOBJ_NAME_LEN, "bfad_wq_%d", bfad->inst_no); im->drv_workq = create_singlethread_workqueue(im->drv_workq_name); if (!im->drv_workq) @@ -832,12 +812,6 @@ struct scsi_host_template bfad_im_vport_template = { .max_sectors = 0xFFFF, }; -void -bfad_im_probe_post(struct bfad_im_s *im) -{ - flush_workqueue(im->drv_workq); -} - bfa_status_t bfad_im_module_init(void) { @@ -861,19 +835,11 @@ bfad_im_module_exit(void) { if (bfad_im_scsi_transport_template) fc_release_transport(bfad_im_scsi_transport_template); + if (bfad_im_scsi_vport_transport_template) fc_release_transport(bfad_im_scsi_vport_transport_template); } -void -bfad_os_itnim_process(struct bfad_itnim_s *itnim_drv) -{ - struct bfad_im_s *im = itnim_drv->im; - - if (itnim_drv->queue_work) - queue_work(im->drv_workq, &itnim_drv->itnim_work); -} - void bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev) { @@ -916,9 +882,6 @@ bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev) } } - - - struct bfad_itnim_s * bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id) { @@ -949,44 +912,64 @@ bfad_im_slave_alloc(struct scsi_device *sdev) return 0; } +static u32 +bfad_im_supported_speeds(struct bfa_s *bfa) +{ + struct bfa_ioc_attr_s ioc_attr; + u32 supported_speed = 0; + + bfa_get_attr(bfa, &ioc_attr); + if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) { + if (ioc_attr.adapter_attr.is_mezz) { + supported_speed |= FC_PORTSPEED_8GBIT | + FC_PORTSPEED_4GBIT | + FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; + } else { + supported_speed |= FC_PORTSPEED_8GBIT | + FC_PORTSPEED_4GBIT | + FC_PORTSPEED_2GBIT; + } + } else if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) { + supported_speed |= FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT | + FC_PORTSPEED_1GBIT; + } else if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) { + supported_speed |= FC_PORTSPEED_10GBIT; + } + return supported_speed; +} + void bfad_os_fc_host_init(struct bfad_im_port_s *im_port) { struct Scsi_Host *host = im_port->shost; struct bfad_s *bfad = im_port->bfad; struct bfad_port_s *port = im_port->port; - struct bfa_pport_attr_s pattr; - char model[BFA_ADAPTER_MODEL_NAME_LEN]; - char fw_ver[BFA_VERSION_LEN]; + struct bfa_port_attr_s pattr; + struct bfa_lport_attr_s port_attr; + char symname[BFA_SYMNAME_MAXLEN]; fc_host_node_name(host) = - bfa_os_htonll((bfa_fcs_port_get_nwwn(port->fcs_port))); + bfa_os_htonll((bfa_fcs_lport_get_nwwn(port->fcs_port))); fc_host_port_name(host) = - bfa_os_htonll((bfa_fcs_port_get_pwwn(port->fcs_port))); + bfa_os_htonll((bfa_fcs_lport_get_pwwn(port->fcs_port))); fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa); fc_host_supported_classes(host) = FC_COS_CLASS3; memset(fc_host_supported_fc4s(host), 0, sizeof(fc_host_supported_fc4s(host))); - if (bfad_supported_fc4s & (BFA_PORT_ROLE_FCP_IM | BFA_PORT_ROLE_FCP_TM)) + if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM) /* For FCP type 0x08 */ fc_host_supported_fc4s(host)[2] = 1; - if (bfad_supported_fc4s & BFA_PORT_ROLE_FCP_IPFC) - /* For LLC/SNAP type 0x05 */ - fc_host_supported_fc4s(host)[3] = 0x20; /* For fibre channel services type 0x20 */ fc_host_supported_fc4s(host)[7] = 1; - bfa_get_adapter_model(&bfad->bfa, model); - bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver); - sprintf(fc_host_symbolic_name(host), "Brocade %s FV%s DV%s", - model, fw_ver, BFAD_DRIVER_VERSION); + bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); + strncpy(symname, port_attr.port_cfg.sym_name.symname, + BFA_SYMNAME_MAXLEN); + sprintf(fc_host_symbolic_name(host), "%s", symname); - fc_host_supported_speeds(host) = 0; - fc_host_supported_speeds(host) |= - FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT | - FC_PORTSPEED_1GBIT; + fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa); bfa_fcport_get_attr(&bfad->bfa, &pattr); fc_host_maxframe_size(host) = pattr.pport_cfg.maxfrsize; @@ -1065,7 +1048,9 @@ bfad_im_itnim_work_handler(struct work_struct *work) fcid2str(fcid_str, fcid); list_add_tail(&itnim->list_entry, &im_port->itnim_mapped_list); - bfa_log(bfad->logmod, BFA_LOG_LINUX_ITNIM_ONLINE, + BFA_LOG(KERN_INFO, bfad, log_level, + "ITNIM ONLINE Target: %d:0:%d " + "FCID: %s WWPN: %s\n", im_port->shost->host_no, itnim->scsi_tgt_id, fcid_str, wwpn_str); @@ -1096,7 +1081,9 @@ bfad_im_itnim_work_handler(struct work_struct *work) wwn2str(wwpn_str, wwpn); fcid2str(fcid_str, fcid); list_del(&itnim->list_entry); - bfa_log(bfad->logmod, BFA_LOG_LINUX_ITNIM_OFFLINE, + BFA_LOG(KERN_INFO, bfad, log_level, + "ITNIM OFFLINE Target: %d:0:%d " + "FCID: %s WWPN: %s\n", im_port->shost->host_no, itnim->scsi_tgt_id, fcid_str, wwpn_str); @@ -1142,7 +1129,7 @@ bfad_im_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) struct bfa_ioim_s *hal_io; unsigned long flags; int rc; - s16 sg_cnt = 0; + int sg_cnt = 0; struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); rc = fc_remote_port_chkready(rport); @@ -1153,7 +1140,6 @@ bfad_im_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) } sg_cnt = scsi_dma_map(cmnd); - if (sg_cnt < 0) return SCSI_MLQUEUE_HOST_BUSY; @@ -1168,6 +1154,7 @@ bfad_im_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) goto out_fail_cmd; } + itnim = itnim_data->itnim; if (!itnim) { cmnd->result = ScsiResult(DID_IMM_RETRY, 0); @@ -1206,47 +1193,49 @@ bfad_os_rport_online_wait(struct bfad_s *bfad) int rport_delay = 10; for (i = 0; !(bfad->bfad_flags & BFAD_PORT_ONLINE) - && i < bfa_linkup_delay; i++) - schedule_timeout_uninterruptible(HZ); + && i < bfa_linkup_delay; i++) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ); + } if (bfad->bfad_flags & BFAD_PORT_ONLINE) { rport_delay = rport_delay < bfa_linkup_delay ? - rport_delay : bfa_linkup_delay; + rport_delay : bfa_linkup_delay; for (i = 0; !(bfad->bfad_flags & BFAD_RPORT_ONLINE) - && i < rport_delay; i++) - schedule_timeout_uninterruptible(HZ); + && i < rport_delay; i++) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ); + } - if (rport_delay > 0 && (bfad->bfad_flags & BFAD_RPORT_ONLINE)) - schedule_timeout_uninterruptible(rport_delay * HZ); + if (rport_delay > 0 && (bfad->bfad_flags & BFAD_RPORT_ONLINE)) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(rport_delay * HZ); + } } } int bfad_os_get_linkup_delay(struct bfad_s *bfad) { - - u8 nwwns = 0; - wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX]; - int ldelay; + u8 nwwns = 0; + wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX]; + int linkup_delay; /* * Querying for the boot target port wwns * -- read from boot information in flash. - * If nwwns > 0 => boot over SAN and set bfa_linkup_delay = 30 - * else => local boot machine set bfa_linkup_delay = 10 + * If nwwns > 0 => boot over SAN and set linkup_delay = 30 + * else => local boot machine set linkup_delay = 0 */ bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, wwns); - if (nwwns > 0) { - /* If boot over SAN; linkup_delay = 30sec */ - ldelay = 30; - } else { - /* If local boot; linkup_delay = 10sec */ - ldelay = 0; - } + if (nwwns > 0) + /* If Boot over SAN set linkup_delay = 30sec */ + linkup_delay = 30; + else + /* If local boot; no linkup_delay */ + linkup_delay = 0; - return ldelay; + return linkup_delay; } - - diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h index 973cab4d09c7..b038c0e08921 100644 --- a/drivers/scsi/bfa/bfad_im.h +++ b/drivers/scsi/bfa/bfad_im.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * @@ -18,20 +18,20 @@ #ifndef __BFAD_IM_H__ #define __BFAD_IM_H__ -#include "fcs/bfa_fcs_fcpim.h" -#include "bfad_im_compat.h" +#include "bfa_fcs.h" #define FCPI_NAME " fcpim" +#ifndef KOBJ_NAME_LEN +#define KOBJ_NAME_LEN 20 +#endif + bfa_status_t bfad_im_module_init(void); void bfad_im_module_exit(void); bfa_status_t bfad_im_probe(struct bfad_s *bfad); void bfad_im_probe_undo(struct bfad_s *bfad); -void bfad_im_probe_post(struct bfad_im_s *im); bfa_status_t bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port); void bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port); -void bfad_im_port_online(struct bfad_s *bfad, struct bfad_port_s *port); -void bfad_im_port_offline(struct bfad_s *bfad, struct bfad_port_s *port); void bfad_im_port_clean(struct bfad_im_port_s *im_port); int bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port, struct device *dev); @@ -44,14 +44,10 @@ void bfad_im_scsi_host_free(struct bfad_s *bfad, #define BFAD_LUN_RESET_TMO 60 #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) #define BFA_QUEUE_FULL_RAMP_UP_TIME 120 -#define BFAD_KOBJ_NAME_LEN 20 /* * itnim flags */ -#define ITNIM_MAPPED 0x00000001 - -#define SCSI_TASK_MGMT 0x00000001 #define IO_DONE_BIT 0 struct bfad_itnim_data_s { @@ -64,7 +60,7 @@ struct bfad_im_port_s { struct work_struct port_delete_work; int idr_id; u16 cur_scsi_id; - u16 flags; + u16 flags; struct list_head binding_list; struct Scsi_Host *shost; struct list_head itnim_mapped_list; @@ -118,14 +114,13 @@ struct bfad_fcp_binding { struct bfad_im_s { struct bfad_s *bfad; struct workqueue_struct *drv_workq; - char drv_workq_name[BFAD_KOBJ_NAME_LEN]; + char drv_workq_name[KOBJ_NAME_LEN]; }; struct Scsi_Host *bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *); bfa_status_t bfad_os_thread_workq(struct bfad_s *bfad); void bfad_os_destroy_workq(struct bfad_im_s *im); -void bfad_os_itnim_process(struct bfad_itnim_s *itnim_drv); void bfad_os_fc_host_init(struct bfad_im_port_s *im_port); void bfad_os_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port); @@ -133,11 +128,6 @@ void bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev); void bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev); struct bfad_itnim_s *bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id); -int bfad_os_scsi_add_host(struct Scsi_Host *shost, - struct bfad_im_port_s *im_port, struct bfad_s *bfad); - -void bfad_im_itnim_unmap(struct bfad_im_port_s *im_port, - struct bfad_itnim_s *itnim); extern struct scsi_host_template bfad_im_scsi_host_template; extern struct scsi_host_template bfad_im_vport_template; @@ -146,4 +136,34 @@ extern struct fc_function_template bfad_im_vport_fc_function_template; extern struct scsi_transport_template *bfad_im_scsi_transport_template; extern struct scsi_transport_template *bfad_im_scsi_vport_transport_template; +extern struct device_attribute *bfad_im_host_attrs[]; +extern struct device_attribute *bfad_im_vport_attrs[]; + +irqreturn_t bfad_intx(int irq, void *dev_id); + +/* Firmware releated */ +#define BFAD_FW_FILE_CT_FC "ctfw_fc.bin" +#define BFAD_FW_FILE_CT_CNA "ctfw_cna.bin" +#define BFAD_FW_FILE_CB_FC "cbfw_fc.bin" + +u32 *bfad_get_firmware_buf(struct pci_dev *pdev); +u32 *bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, + u32 *bfi_image_size, char *fw_name); + +static inline u32 * +bfad_load_fwimg(struct pci_dev *pdev) +{ + return bfad_get_firmware_buf(pdev); +} + +static inline void +bfad_free_fwimg(void) +{ + if (bfi_image_ct_fc_size && bfi_image_ct_fc) + vfree(bfi_image_ct_fc); + if (bfi_image_ct_cna_size && bfi_image_ct_cna) + vfree(bfi_image_ct_cna); + if (bfi_image_cb_fc_size && bfi_image_cb_fc) + vfree(bfi_image_cb_fc); +} #endif diff --git a/drivers/scsi/bfa/bfad_im_compat.h b/drivers/scsi/bfa/bfad_im_compat.h deleted file mode 100644 index 0a122abbbe89..000000000000 --- a/drivers/scsi/bfa/bfad_im_compat.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFAD_IM_COMPAT_H__ -#define __BFAD_IM_COMPAT_H__ - -extern struct device_attribute *bfad_im_host_attrs[]; -extern struct device_attribute *bfad_im_vport_attrs[]; - -u32 *bfad_get_firmware_buf(struct pci_dev *pdev); -u32 *bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, - u32 *bfi_image_size, char *fw_name); - -static inline u32 * -bfad_load_fwimg(struct pci_dev *pdev) -{ - return bfad_get_firmware_buf(pdev); -} - -static inline void -bfad_free_fwimg(void) -{ - if (bfi_image_ct_fc_size && bfi_image_ct_fc) - vfree(bfi_image_ct_fc); - if (bfi_image_ct_cna_size && bfi_image_ct_cna) - vfree(bfi_image_ct_cna); - if (bfi_image_cb_fc_size && bfi_image_cb_fc) - vfree(bfi_image_cb_fc); -} - -#endif diff --git a/drivers/scsi/bfa/bfad_intr.c b/drivers/scsi/bfa/bfad_intr.c deleted file mode 100644 index 56a351584f0c..000000000000 --- a/drivers/scsi/bfa/bfad_intr.c +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#include "bfad_drv.h" -#include "bfad_trcmod.h" - -BFA_TRC_FILE(LDRV, INTR); - -/** - * bfa_isr BFA driver interrupt functions - */ -static int msix_disable_cb; -static int msix_disable_ct; -module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(msix_disable_cb, "Disable MSIX for Brocade-415/425/815/825" - " cards, default=0, Range[false:0|true:1]"); -module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(msix_disable_ct, "Disable MSIX for Brocade-1010/1020/804" - " cards, default=0, Range[false:0|true:1]"); -/** - * Line based interrupt handler. - */ -static irqreturn_t -bfad_intx(int irq, void *dev_id) -{ - struct bfad_s *bfad = dev_id; - struct list_head doneq; - unsigned long flags; - bfa_boolean_t rc; - - spin_lock_irqsave(&bfad->bfad_lock, flags); - rc = bfa_intx(&bfad->bfa); - if (!rc) { - spin_unlock_irqrestore(&bfad->bfad_lock, flags); - return IRQ_NONE; - } - - bfa_comp_deq(&bfad->bfa, &doneq); - spin_unlock_irqrestore(&bfad->bfad_lock, flags); - - if (!list_empty(&doneq)) { - bfa_comp_process(&bfad->bfa, &doneq); - - spin_lock_irqsave(&bfad->bfad_lock, flags); - bfa_comp_free(&bfad->bfa, &doneq); - spin_unlock_irqrestore(&bfad->bfad_lock, flags); - bfa_trc_fp(bfad, irq); - } - - return IRQ_HANDLED; - -} - -static irqreturn_t -bfad_msix(int irq, void *dev_id) -{ - struct bfad_msix_s *vec = dev_id; - struct bfad_s *bfad = vec->bfad; - struct list_head doneq; - unsigned long flags; - - spin_lock_irqsave(&bfad->bfad_lock, flags); - - bfa_msix(&bfad->bfa, vec->msix.entry); - bfa_comp_deq(&bfad->bfa, &doneq); - spin_unlock_irqrestore(&bfad->bfad_lock, flags); - - if (!list_empty(&doneq)) { - bfa_comp_process(&bfad->bfa, &doneq); - - spin_lock_irqsave(&bfad->bfad_lock, flags); - bfa_comp_free(&bfad->bfa, &doneq); - spin_unlock_irqrestore(&bfad->bfad_lock, flags); - } - - return IRQ_HANDLED; -} - -/** - * Initialize the MSIX entry table. - */ -static void -bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries, - int mask, int max_bit) -{ - int i; - int match = 0x00000001; - - for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) { - if (mask & match) { - bfad->msix_tab[bfad->nvec].msix.entry = i; - bfad->msix_tab[bfad->nvec].bfad = bfad; - msix_entries[bfad->nvec].entry = i; - bfad->nvec++; - } - - match <<= 1; - } - -} - -int -bfad_install_msix_handler(struct bfad_s *bfad) -{ - int i, error = 0; - - for (i = 0; i < bfad->nvec; i++) { - error = request_irq(bfad->msix_tab[i].msix.vector, - (irq_handler_t) bfad_msix, 0, - BFAD_DRIVER_NAME, &bfad->msix_tab[i]); - bfa_trc(bfad, i); - bfa_trc(bfad, bfad->msix_tab[i].msix.vector); - if (error) { - int j; - - for (j = 0; j < i; j++) - free_irq(bfad->msix_tab[j].msix.vector, - &bfad->msix_tab[j]); - - return 1; - } - } - - return 0; -} - -/** - * Setup MSIX based interrupt. - */ -int -bfad_setup_intr(struct bfad_s *bfad) -{ - int error = 0; - u32 mask = 0, i, num_bit = 0, max_bit = 0; - struct msix_entry msix_entries[MAX_MSIX_ENTRY]; - struct pci_dev *pdev = bfad->pcidev; - - /* Call BFA to get the msix map for this PCI function. */ - bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit); - - /* Set up the msix entry table */ - bfad_init_msix_entry(bfad, msix_entries, mask, max_bit); - - if ((bfa_asic_id_ct(pdev->device) && !msix_disable_ct) || - (!bfa_asic_id_ct(pdev->device) && !msix_disable_cb)) { - - error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec); - if (error) { - /* - * Only error number of vector is available. - * We don't have a mechanism to map multiple - * interrupts into one vector, so even if we - * can try to request less vectors, we don't - * know how to associate interrupt events to - * vectors. Linux doesn't dupicate vectors - * in the MSIX table for this case. - */ - - printk(KERN_WARNING "bfad%d: " - "pci_enable_msix failed (%d)," - " use line based.\n", bfad->inst_no, error); - - goto line_based; - } - - /* Save the vectors */ - for (i = 0; i < bfad->nvec; i++) { - bfa_trc(bfad, msix_entries[i].vector); - bfad->msix_tab[i].msix.vector = msix_entries[i].vector; - } - - bfa_msix_init(&bfad->bfa, bfad->nvec); - - bfad->bfad_flags |= BFAD_MSIX_ON; - - return error; - } - -line_based: - error = 0; - if (request_irq - (bfad->pcidev->irq, (irq_handler_t) bfad_intx, BFAD_IRQ_FLAGS, - BFAD_DRIVER_NAME, bfad) != 0) { - /* Enable interrupt handler failed */ - return 1; - } - - return error; -} - -void -bfad_remove_intr(struct bfad_s *bfad) -{ - int i; - - if (bfad->bfad_flags & BFAD_MSIX_ON) { - for (i = 0; i < bfad->nvec; i++) - free_irq(bfad->msix_tab[i].msix.vector, - &bfad->msix_tab[i]); - - pci_disable_msix(bfad->pcidev); - bfad->bfad_flags &= ~BFAD_MSIX_ON; - } else { - free_irq(bfad->pcidev->irq, bfad); - } -} - - diff --git a/drivers/scsi/bfa/bfad_ipfc.h b/drivers/scsi/bfa/bfad_ipfc.h deleted file mode 100644 index 718bc5227671..000000000000 --- a/drivers/scsi/bfa/bfad_ipfc.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __BFA_DRV_IPFC_H__ -#define __BFA_DRV_IPFC_H__ - - -#define IPFC_NAME "" - -#define bfad_ipfc_module_init(x) do {} while (0) -#define bfad_ipfc_module_exit(x) do {} while (0) -#define bfad_ipfc_probe(x) do {} while (0) -#define bfad_ipfc_probe_undo(x) do {} while (0) -#define bfad_ipfc_port_config(x, y) BFA_STATUS_OK -#define bfad_ipfc_port_unconfig(x, y) do {} while (0) - -#define bfad_ipfc_probe_post(x) do {} while (0) -#define bfad_ipfc_port_new(x, y, z) BFA_STATUS_OK -#define bfad_ipfc_port_delete(x, y) do {} while (0) -#define bfad_ipfc_port_online(x, y) do {} while (0) -#define bfad_ipfc_port_offline(x, y) do {} while (0) - -#define bfad_ip_get_attr(x) BFA_STATUS_FAILED -#define bfad_ip_reset_drv_stats(x) BFA_STATUS_FAILED -#define bfad_ip_get_drv_stats(x, y) BFA_STATUS_FAILED -#define bfad_ip_enable_ipfc(x, y, z) BFA_STATUS_FAILED - - -#endif diff --git a/drivers/scsi/bfa/bfad_os.c b/drivers/scsi/bfa/bfad_os.c deleted file mode 100644 index faf47b4f1a38..000000000000 --- a/drivers/scsi/bfa/bfad_os.c +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * bfad_os.c Linux driver OS specific calls. - */ - -#include "bfa_os_inc.h" -#include "bfad_drv.h" - -void -bfa_os_gettimeofday(struct bfa_timeval_s *tv) -{ - struct timeval tmp_tv; - - do_gettimeofday(&tmp_tv); - tv->tv_sec = (u32) tmp_tv.tv_sec; - tv->tv_usec = (u32) tmp_tv.tv_usec; -} - -void -bfa_os_printf(struct bfa_log_mod_s *log_mod, u32 msg_id, - const char *fmt, ...) -{ - va_list ap; - #define BFA_STRING_256 256 - char tmp[BFA_STRING_256]; - - va_start(ap, fmt); - vsprintf(tmp, fmt, ap); - va_end(ap); - - printk(tmp); -} - - diff --git a/drivers/scsi/bfa/bfad_tm.h b/drivers/scsi/bfa/bfad_tm.h deleted file mode 100644 index 4901b1b7df02..000000000000 --- a/drivers/scsi/bfa/bfad_tm.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/* - * Brocade Fibre Channel HBA Linux Target Mode Driver - */ - -/** - * tm/dummy/bfad_tm.h BFA callback dummy header file for BFA Linux target mode PCI interface module. - */ - -#ifndef __BFAD_TM_H__ -#define __BFAD_TM_H__ - -#include - -#define FCPT_NAME "" - -/* - * Called from base Linux driver on (De)Init events - */ - -/* attach tgt template with scst */ -#define bfad_tm_module_init() do {} while (0) - -/* detach/release tgt template */ -#define bfad_tm_module_exit() do {} while (0) - -#define bfad_tm_probe(x) do {} while (0) -#define bfad_tm_probe_undo(x) do {} while (0) -#define bfad_tm_probe_post(x) do {} while (0) - -/* - * Called by base Linux driver but triggered by BFA FCS on config events - */ -#define bfad_tm_port_new(x, y) BFA_STATUS_OK -#define bfad_tm_port_delete(x, y) do {} while (0) - -/* - * Called by base Linux driver but triggered by BFA FCS on PLOGI/O events - */ -#define bfad_tm_port_online(x, y) do {} while (0) -#define bfad_tm_port_offline(x, y) do {} while (0) - -#endif diff --git a/drivers/scsi/bfa/bfad_trcmod.h b/drivers/scsi/bfa/bfad_trcmod.h deleted file mode 100644 index 2827b2acd041..000000000000 --- a/drivers/scsi/bfa/bfad_trcmod.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * bfad_trcmod.h Linux driver trace modules - */ - - -#ifndef __BFAD_TRCMOD_H__ -#define __BFAD_TRCMOD_H__ - -#include - -/* - * !!! Only append to the enums defined here to avoid any versioning - * !!! needed between trace utility and driver version - */ -enum { - /* 2.6 Driver */ - BFA_TRC_LDRV_BFAD = 1, - BFA_TRC_LDRV_BFAD_2_6 = 2, - BFA_TRC_LDRV_BFAD_2_6_9 = 3, - BFA_TRC_LDRV_BFAD_2_6_10 = 4, - BFA_TRC_LDRV_INTR = 5, - BFA_TRC_LDRV_IOCTL = 6, - BFA_TRC_LDRV_OS = 7, - BFA_TRC_LDRV_IM = 8, - BFA_TRC_LDRV_IM_2_6 = 9, - BFA_TRC_LDRV_IM_2_6_9 = 10, - BFA_TRC_LDRV_IM_2_6_10 = 11, - BFA_TRC_LDRV_TM = 12, - BFA_TRC_LDRV_IPFC = 13, - BFA_TRC_LDRV_IM_2_4 = 14, - BFA_TRC_LDRV_IM_VMW = 15, - BFA_TRC_LDRV_IM_LT_2_6_10 = 16, -}; - -#endif /* __BFAD_TRCMOD_H__ */ diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h new file mode 100644 index 000000000000..85f2224a5733 --- /dev/null +++ b/drivers/scsi/bfa/bfi.h @@ -0,0 +1,579 @@ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + * + * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef __BFI_H__ +#define __BFI_H__ + +#include "bfa_defs.h" +#include "bfa_defs_svc.h" + +#pragma pack(1) + +/** + * BFI FW image type + */ +#define BFI_FLASH_CHUNK_SZ 256 /* Flash chunk size */ +#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32)) +enum { + BFI_IMAGE_CB_FC, + BFI_IMAGE_CT_FC, + BFI_IMAGE_CT_CNA, + BFI_IMAGE_MAX, +}; + +/** + * Msg header common to all msgs + */ +struct bfi_mhdr_s { + u8 msg_class; /* @ref bfi_mclass_t */ + u8 msg_id; /* msg opcode with in the class */ + union { + struct { + u8 rsvd; + u8 lpu_id; /* msg destination */ + } h2i; + u16 i2htok; /* token in msgs to host */ + } mtag; +}; + +#define bfi_h2i_set(_mh, _mc, _op, _lpuid) do { \ + (_mh).msg_class = (_mc); \ + (_mh).msg_id = (_op); \ + (_mh).mtag.h2i.lpu_id = (_lpuid); \ +} while (0) + +#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \ + (_mh).msg_class = (_mc); \ + (_mh).msg_id = (_op); \ + (_mh).mtag.i2htok = (_i2htok); \ +} while (0) + +/* + * Message opcodes: 0-127 to firmware, 128-255 to host + */ +#define BFI_I2H_OPCODE_BASE 128 +#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE) + +/** + **************************************************************************** + * + * Scatter Gather Element and Page definition + * + **************************************************************************** + */ + +#define BFI_SGE_INLINE 1 +#define BFI_SGE_INLINE_MAX (BFI_SGE_INLINE + 1) + +/** + * SG Flags + */ +enum { + BFI_SGE_DATA = 0, /* data address, not last */ + BFI_SGE_DATA_CPL = 1, /* data addr, last in current page */ + BFI_SGE_DATA_LAST = 3, /* data address, last */ + BFI_SGE_LINK = 2, /* link address */ + BFI_SGE_PGDLEN = 2, /* cumulative data length for page */ +}; + +/** + * DMA addresses + */ +union bfi_addr_u { + struct { + u32 addr_lo; + u32 addr_hi; + } a32; +}; + +/** + * Scatter Gather Element + */ +struct bfi_sge_s { +#ifdef __BIGENDIAN + u32 flags:2, + rsvd:2, + sg_len:28; +#else + u32 sg_len:28, + rsvd:2, + flags:2; +#endif + union bfi_addr_u sga; +}; + +/** + * Scatter Gather Page + */ +#define BFI_SGPG_DATA_SGES 7 +#define BFI_SGPG_SGES_MAX (BFI_SGPG_DATA_SGES + 1) +#define BFI_SGPG_RSVD_WD_LEN 8 +struct bfi_sgpg_s { + struct bfi_sge_s sges[BFI_SGPG_SGES_MAX]; + u32 rsvd[BFI_SGPG_RSVD_WD_LEN]; +}; + +/* + * Large Message structure - 128 Bytes size Msgs + */ +#define BFI_LMSG_SZ 128 +#define BFI_LMSG_PL_WSZ \ + ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr_s)) / 4) + +struct bfi_msg_s { + struct bfi_mhdr_s mhdr; + u32 pl[BFI_LMSG_PL_WSZ]; +}; + +/** + * Mailbox message structure + */ +#define BFI_MBMSG_SZ 7 +struct bfi_mbmsg_s { + struct bfi_mhdr_s mh; + u32 pl[BFI_MBMSG_SZ]; +}; + +/** + * Message Classes + */ +enum bfi_mclass { + BFI_MC_IOC = 1, /* IO Controller (IOC) */ + BFI_MC_FCPORT = 5, /* FC port */ + BFI_MC_IOCFC = 6, /* FC - IO Controller (IOC) */ + BFI_MC_LL = 7, /* Link Layer */ + BFI_MC_UF = 8, /* Unsolicited frame receive */ + BFI_MC_FCXP = 9, /* FC Transport */ + BFI_MC_LPS = 10, /* lport fc login services */ + BFI_MC_RPORT = 11, /* Remote port */ + BFI_MC_ITNIM = 12, /* I-T nexus (Initiator mode) */ + BFI_MC_IOIM_READ = 13, /* read IO (Initiator mode) */ + BFI_MC_IOIM_WRITE = 14, /* write IO (Initiator mode) */ + BFI_MC_IOIM_IO = 15, /* IO (Initiator mode) */ + BFI_MC_IOIM = 16, /* IO (Initiator mode) */ + BFI_MC_IOIM_IOCOM = 17, /* good IO completion */ + BFI_MC_TSKIM = 18, /* Initiator Task management */ + BFI_MC_PORT = 21, /* Physical port */ + BFI_MC_MAX = 32 +}; + +#define BFI_IOC_MAX_CQS 4 +#define BFI_IOC_MAX_CQS_ASIC 8 +#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */ + +#define BFI_BOOT_TYPE_OFF 8 +#define BFI_BOOT_LOADER_OFF 12 + +#define BFI_BOOT_TYPE_NORMAL 0 +#define BFI_BOOT_TYPE_FLASH 1 +#define BFI_BOOT_TYPE_MEMTEST 2 + +#define BFI_BOOT_LOADER_OS 0 +#define BFI_BOOT_LOADER_BIOS 1 +#define BFI_BOOT_LOADER_UEFI 2 + +/** + *---------------------------------------------------------------------- + * IOC + *---------------------------------------------------------------------- + */ + +enum bfi_ioc_h2i_msgs { + BFI_IOC_H2I_ENABLE_REQ = 1, + BFI_IOC_H2I_DISABLE_REQ = 2, + BFI_IOC_H2I_GETATTR_REQ = 3, + BFI_IOC_H2I_DBG_SYNC = 4, + BFI_IOC_H2I_DBG_DUMP = 5, +}; + +enum bfi_ioc_i2h_msgs { + BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1), + BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2), + BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3), + BFI_IOC_I2H_READY_EVENT = BFA_I2HM(4), + BFI_IOC_I2H_HBEAT = BFA_I2HM(5), +}; + +/** + * BFI_IOC_H2I_GETATTR_REQ message + */ +struct bfi_ioc_getattr_req_s { + struct bfi_mhdr_s mh; + union bfi_addr_u attr_addr; +}; + +struct bfi_ioc_attr_s { + wwn_t mfg_pwwn; /* Mfg port wwn */ + wwn_t mfg_nwwn; /* Mfg node wwn */ + mac_t mfg_mac; /* Mfg mac */ + u16 rsvd_a; + wwn_t pwwn; + wwn_t nwwn; + mac_t mac; /* PBC or Mfg mac */ + u16 rsvd_b; + mac_t fcoe_mac; + u16 rsvd_c; + char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)]; + u8 pcie_gen; + u8 pcie_lanes_orig; + u8 pcie_lanes; + u8 rx_bbcredit; /* receive buffer credits */ + u32 adapter_prop; /* adapter properties */ + u16 maxfrsize; /* max receive frame size */ + char asic_rev; + u8 rsvd_d; + char fw_version[BFA_VERSION_LEN]; + char optrom_version[BFA_VERSION_LEN]; + struct bfa_mfg_vpd_s vpd; + u32 card_type; /* card type */ +}; + +/** + * BFI_IOC_I2H_GETATTR_REPLY message + */ +struct bfi_ioc_getattr_reply_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u8 status; /* cfg reply status */ + u8 rsvd[3]; +}; + +/** + * Firmware memory page offsets + */ +#define BFI_IOC_SMEM_PG0_CB (0x40) +#define BFI_IOC_SMEM_PG0_CT (0x180) + +/** + * Firmware statistic offset + */ +#define BFI_IOC_FWSTATS_OFF (0x6B40) +#define BFI_IOC_FWSTATS_SZ (4096) + +/** + * Firmware trace offset + */ +#define BFI_IOC_TRC_OFF (0x4b00) +#define BFI_IOC_TRC_ENTS 256 + +#define BFI_IOC_FW_SIGNATURE (0xbfadbfad) +#define BFI_IOC_MD5SUM_SZ 4 +struct bfi_ioc_image_hdr_s { + u32 signature; /* constant signature */ + u32 rsvd_a; + u32 exec; /* exec vector */ + u32 param; /* parameters */ + u32 rsvd_b[4]; + u32 md5sum[BFI_IOC_MD5SUM_SZ]; +}; + +/** + * BFI_IOC_I2H_READY_EVENT message + */ +struct bfi_ioc_rdy_event_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 init_status; /* init event status */ + u8 rsvd[3]; +}; + +struct bfi_ioc_hbeat_s { + struct bfi_mhdr_s mh; /* common msg header */ + u32 hb_count; /* current heart beat count */ +}; + +/** + * IOC hardware/firmware state + */ +enum bfi_ioc_state { + BFI_IOC_UNINIT = 0, /* not initialized */ + BFI_IOC_INITING = 1, /* h/w is being initialized */ + BFI_IOC_HWINIT = 2, /* h/w is initialized */ + BFI_IOC_CFG = 3, /* IOC configuration in progress */ + BFI_IOC_OP = 4, /* IOC is operational */ + BFI_IOC_DISABLING = 5, /* IOC is being disabled */ + BFI_IOC_DISABLED = 6, /* IOC is disabled */ + BFI_IOC_CFG_DISABLED = 7, /* IOC is being disabled;transient */ + BFI_IOC_FAIL = 8, /* IOC heart-beat failure */ + BFI_IOC_MEMTEST = 9, /* IOC is doing memtest */ +}; + +#define BFI_IOC_ENDIAN_SIG 0x12345678 + +enum { + BFI_ADAPTER_TYPE_FC = 0x01, /* FC adapters */ + BFI_ADAPTER_TYPE_MK = 0x0f0000, /* adapter type mask */ + BFI_ADAPTER_TYPE_SH = 16, /* adapter type shift */ + BFI_ADAPTER_NPORTS_MK = 0xff00, /* number of ports mask */ + BFI_ADAPTER_NPORTS_SH = 8, /* number of ports shift */ + BFI_ADAPTER_SPEED_MK = 0xff, /* adapter speed mask */ + BFI_ADAPTER_SPEED_SH = 0, /* adapter speed shift */ + BFI_ADAPTER_PROTO = 0x100000, /* prototype adapaters */ + BFI_ADAPTER_TTV = 0x200000, /* TTV debug capable */ + BFI_ADAPTER_UNSUPP = 0x400000, /* unknown adapter type */ +}; + +#define BFI_ADAPTER_GETP(__prop, __adap_prop) \ + (((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >> \ + BFI_ADAPTER_ ## __prop ## _SH) +#define BFI_ADAPTER_SETP(__prop, __val) \ + ((__val) << BFI_ADAPTER_ ## __prop ## _SH) +#define BFI_ADAPTER_IS_PROTO(__adap_type) \ + ((__adap_type) & BFI_ADAPTER_PROTO) +#define BFI_ADAPTER_IS_TTV(__adap_type) \ + ((__adap_type) & BFI_ADAPTER_TTV) +#define BFI_ADAPTER_IS_UNSUPP(__adap_type) \ + ((__adap_type) & BFI_ADAPTER_UNSUPP) +#define BFI_ADAPTER_IS_SPECIAL(__adap_type) \ + ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \ + BFI_ADAPTER_UNSUPP)) + +/** + * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages + */ +struct bfi_ioc_ctrl_req_s { + struct bfi_mhdr_s mh; + u8 ioc_class; + u8 rsvd[3]; + u32 tv_sec; +}; +#define bfi_ioc_enable_req_t struct bfi_ioc_ctrl_req_s; +#define bfi_ioc_disable_req_t struct bfi_ioc_ctrl_req_s; + +/** + * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages + */ +struct bfi_ioc_ctrl_reply_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u8 status; /* enable/disable status */ + u8 rsvd[3]; +}; +#define bfi_ioc_enable_reply_t struct bfi_ioc_ctrl_reply_s; +#define bfi_ioc_disable_reply_t struct bfi_ioc_ctrl_reply_s; + +#define BFI_IOC_MSGSZ 8 +/** + * H2I Messages + */ +union bfi_ioc_h2i_msg_u { + struct bfi_mhdr_s mh; + struct bfi_ioc_ctrl_req_s enable_req; + struct bfi_ioc_ctrl_req_s disable_req; + struct bfi_ioc_getattr_req_s getattr_req; + u32 mboxmsg[BFI_IOC_MSGSZ]; +}; + +/** + * I2H Messages + */ +union bfi_ioc_i2h_msg_u { + struct bfi_mhdr_s mh; + struct bfi_ioc_rdy_event_s rdy_event; + u32 mboxmsg[BFI_IOC_MSGSZ]; +}; + + +/** + *---------------------------------------------------------------------- + * PBC + *---------------------------------------------------------------------- + */ + +#define BFI_PBC_MAX_BLUNS 8 +#define BFI_PBC_MAX_VPORTS 16 + +/** + * PBC boot lun configuration + */ +struct bfi_pbc_blun_s { + wwn_t tgt_pwwn; + lun_t tgt_lun; +}; + +/** + * PBC virtual port configuration + */ +struct bfi_pbc_vport_s { + wwn_t vp_pwwn; + wwn_t vp_nwwn; +}; + +/** + * BFI pre-boot configuration information + */ +struct bfi_pbc_s { + u8 port_enabled; + u8 boot_enabled; + u8 nbluns; + u8 nvports; + u8 port_speed; + u8 rsvd_a; + u16 hss; + wwn_t pbc_pwwn; + wwn_t pbc_nwwn; + struct bfi_pbc_blun_s blun[BFI_PBC_MAX_BLUNS]; + struct bfi_pbc_vport_s vport[BFI_PBC_MAX_VPORTS]; +}; + +/** + *---------------------------------------------------------------------- + * MSGQ + *---------------------------------------------------------------------- + */ +#define BFI_MSGQ_FULL(_q) (((_q->pi + 1) % _q->q_depth) == _q->ci) +#define BFI_MSGQ_EMPTY(_q) (_q->pi == _q->ci) +#define BFI_MSGQ_UPDATE_CI(_q) (_q->ci = (_q->ci + 1) % _q->q_depth) +#define BFI_MSGQ_UPDATE_PI(_q) (_q->pi = (_q->pi + 1) % _q->q_depth) + +/* q_depth must be power of 2 */ +#define BFI_MSGQ_FREE_CNT(_q) ((_q->ci - _q->pi - 1) & (_q->q_depth - 1)) + +enum bfi_msgq_h2i_msgs_e { + BFI_MSGQ_H2I_INIT_REQ = 1, + BFI_MSGQ_H2I_DOORBELL = 2, + BFI_MSGQ_H2I_SHUTDOWN = 3, +}; + +enum bfi_msgq_i2h_msgs_e { + BFI_MSGQ_I2H_INIT_RSP = 1, + BFI_MSGQ_I2H_DOORBELL = 2, +}; + + +/* Messages(commands/responsed/AENS will have the following header */ +struct bfi_msgq_mhdr_s { + u8 msg_class; + u8 msg_id; + u16 msg_token; + u16 num_entries; + u8 enet_id; + u8 rsvd[1]; +}; + +#define bfi_msgq_mhdr_set(_mh, _mc, _mid, _tok, _enet_id) do { \ + (_mh).msg_class = (_mc); \ + (_mh).msg_id = (_mid); \ + (_mh).msg_token = (_tok); \ + (_mh).enet_id = (_enet_id); \ +} while (0) + +/* + * Mailbox for messaging interface + * +*/ +#define BFI_MSGQ_CMD_ENTRY_SIZE (64) /* TBD */ +#define BFI_MSGQ_RSP_ENTRY_SIZE (64) /* TBD */ +#define BFI_MSGQ_MSG_SIZE_MAX (2048) /* TBD */ + +struct bfi_msgq_s { + union bfi_addr_u addr; + u16 q_depth; /* Total num of entries in the queue */ + u8 rsvd[2]; +}; + +/* BFI_ENET_MSGQ_CFG_REQ TBD init or cfg? */ +struct bfi_msgq_cfg_req_s { + struct bfi_mhdr_s mh; + struct bfi_msgq_s cmdq; + struct bfi_msgq_s rspq; +}; + +/* BFI_ENET_MSGQ_CFG_RSP */ +struct bfi_msgq_cfg_rsp_s { + struct bfi_mhdr_s mh; + u8 cmd_status; + u8 rsvd[3]; +}; + + +/* BFI_MSGQ_H2I_DOORBELL */ +struct bfi_msgq_h2i_db_s { + struct bfi_mhdr_s mh; + u16 cmdq_pi; + u16 rspq_ci; +}; + +/* BFI_MSGQ_I2H_DOORBELL */ +struct bfi_msgq_i2h_db_s { + struct bfi_mhdr_s mh; + u16 rspq_pi; + u16 cmdq_ci; +}; + +#pragma pack() + +/* BFI port specific */ +#pragma pack(1) + +enum bfi_port_h2i { + BFI_PORT_H2I_ENABLE_REQ = (1), + BFI_PORT_H2I_DISABLE_REQ = (2), + BFI_PORT_H2I_GET_STATS_REQ = (3), + BFI_PORT_H2I_CLEAR_STATS_REQ = (4), +}; + +enum bfi_port_i2h { + BFI_PORT_I2H_ENABLE_RSP = BFA_I2HM(1), + BFI_PORT_I2H_DISABLE_RSP = BFA_I2HM(2), + BFI_PORT_I2H_GET_STATS_RSP = BFA_I2HM(3), + BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4), +}; + +/** + * Generic REQ type + */ +struct bfi_port_generic_req_s { + struct bfi_mhdr_s mh; /* msg header */ + u32 msgtag; /* msgtag for reply */ + u32 rsvd; +}; + +/** + * Generic RSP type + */ +struct bfi_port_generic_rsp_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 status; /* port enable status */ + u8 rsvd[3]; + u32 msgtag; /* msgtag for reply */ +}; + +/** + * BFI_PORT_H2I_GET_STATS_REQ + */ +struct bfi_port_get_stats_req_s { + struct bfi_mhdr_s mh; /* common msg header */ + union bfi_addr_u dma_addr; +}; + +union bfi_port_h2i_msg_u { + struct bfi_mhdr_s mh; + struct bfi_port_generic_req_s enable_req; + struct bfi_port_generic_req_s disable_req; + struct bfi_port_get_stats_req_s getstats_req; + struct bfi_port_generic_req_s clearstats_req; +}; + +union bfi_port_i2h_msg_u { + struct bfi_mhdr_s mh; + struct bfi_port_generic_rsp_s enable_rsp; + struct bfi_port_generic_rsp_s disable_rsp; + struct bfi_port_generic_rsp_s getstats_rsp; + struct bfi_port_generic_rsp_s clearstats_rsp; +}; + +#pragma pack() + +#endif /* __BFI_H__ */ diff --git a/drivers/scsi/bfa/bfi_cbreg.h b/drivers/scsi/bfa/bfi_cbreg.h new file mode 100644 index 000000000000..6f03ed382c69 --- /dev/null +++ b/drivers/scsi/bfa/bfi_cbreg.h @@ -0,0 +1,304 @@ + +/* + * bfi_cbreg.h crossbow host block register definitions + * + * !!! Do not edit. Auto generated. !!! + */ + +#ifndef __BFI_CBREG_H__ +#define __BFI_CBREG_H__ + + +#define HOSTFN0_INT_STATUS 0x00014000 +#define __HOSTFN0_INT_STATUS_LVL_MK 0x00f00000 +#define __HOSTFN0_INT_STATUS_LVL_SH 20 +#define __HOSTFN0_INT_STATUS_LVL(_v) ((_v) << __HOSTFN0_INT_STATUS_LVL_SH) +#define __HOSTFN0_INT_STATUS_P 0x000fffff +#define HOSTFN0_INT_MSK 0x00014004 +#define HOST_PAGE_NUM_FN0 0x00014008 +#define __HOST_PAGE_NUM_FN 0x000001ff +#define HOSTFN1_INT_STATUS 0x00014100 +#define __HOSTFN1_INT_STAT_LVL_MK 0x00f00000 +#define __HOSTFN1_INT_STAT_LVL_SH 20 +#define __HOSTFN1_INT_STAT_LVL(_v) ((_v) << __HOSTFN1_INT_STAT_LVL_SH) +#define __HOSTFN1_INT_STAT_P 0x000fffff +#define HOSTFN1_INT_MSK 0x00014104 +#define HOST_PAGE_NUM_FN1 0x00014108 +#define APP_PLL_400_CTL_REG 0x00014204 +#define __P_400_PLL_LOCK 0x80000000 +#define __APP_PLL_400_SRAM_USE_100MHZ 0x00100000 +#define __APP_PLL_400_RESET_TIMER_MK 0x000e0000 +#define __APP_PLL_400_RESET_TIMER_SH 17 +#define __APP_PLL_400_RESET_TIMER(_v) ((_v) << __APP_PLL_400_RESET_TIMER_SH) +#define __APP_PLL_400_LOGIC_SOFT_RESET 0x00010000 +#define __APP_PLL_400_CNTLMT0_1_MK 0x0000c000 +#define __APP_PLL_400_CNTLMT0_1_SH 14 +#define __APP_PLL_400_CNTLMT0_1(_v) ((_v) << __APP_PLL_400_CNTLMT0_1_SH) +#define __APP_PLL_400_JITLMT0_1_MK 0x00003000 +#define __APP_PLL_400_JITLMT0_1_SH 12 +#define __APP_PLL_400_JITLMT0_1(_v) ((_v) << __APP_PLL_400_JITLMT0_1_SH) +#define __APP_PLL_400_HREF 0x00000800 +#define __APP_PLL_400_HDIV 0x00000400 +#define __APP_PLL_400_P0_1_MK 0x00000300 +#define __APP_PLL_400_P0_1_SH 8 +#define __APP_PLL_400_P0_1(_v) ((_v) << __APP_PLL_400_P0_1_SH) +#define __APP_PLL_400_Z0_2_MK 0x000000e0 +#define __APP_PLL_400_Z0_2_SH 5 +#define __APP_PLL_400_Z0_2(_v) ((_v) << __APP_PLL_400_Z0_2_SH) +#define __APP_PLL_400_RSEL200500 0x00000010 +#define __APP_PLL_400_ENARST 0x00000008 +#define __APP_PLL_400_BYPASS 0x00000004 +#define __APP_PLL_400_LRESETN 0x00000002 +#define __APP_PLL_400_ENABLE 0x00000001 +#define APP_PLL_212_CTL_REG 0x00014208 +#define __P_212_PLL_LOCK 0x80000000 +#define __APP_PLL_212_RESET_TIMER_MK 0x000e0000 +#define __APP_PLL_212_RESET_TIMER_SH 17 +#define __APP_PLL_212_RESET_TIMER(_v) ((_v) << __APP_PLL_212_RESET_TIMER_SH) +#define __APP_PLL_212_LOGIC_SOFT_RESET 0x00010000 +#define __APP_PLL_212_CNTLMT0_1_MK 0x0000c000 +#define __APP_PLL_212_CNTLMT0_1_SH 14 +#define __APP_PLL_212_CNTLMT0_1(_v) ((_v) << __APP_PLL_212_CNTLMT0_1_SH) +#define __APP_PLL_212_JITLMT0_1_MK 0x00003000 +#define __APP_PLL_212_JITLMT0_1_SH 12 +#define __APP_PLL_212_JITLMT0_1(_v) ((_v) << __APP_PLL_212_JITLMT0_1_SH) +#define __APP_PLL_212_HREF 0x00000800 +#define __APP_PLL_212_HDIV 0x00000400 +#define __APP_PLL_212_P0_1_MK 0x00000300 +#define __APP_PLL_212_P0_1_SH 8 +#define __APP_PLL_212_P0_1(_v) ((_v) << __APP_PLL_212_P0_1_SH) +#define __APP_PLL_212_Z0_2_MK 0x000000e0 +#define __APP_PLL_212_Z0_2_SH 5 +#define __APP_PLL_212_Z0_2(_v) ((_v) << __APP_PLL_212_Z0_2_SH) +#define __APP_PLL_212_RSEL200500 0x00000010 +#define __APP_PLL_212_ENARST 0x00000008 +#define __APP_PLL_212_BYPASS 0x00000004 +#define __APP_PLL_212_LRESETN 0x00000002 +#define __APP_PLL_212_ENABLE 0x00000001 +#define HOST_SEM0_REG 0x00014230 +#define __HOST_SEMAPHORE 0x00000001 +#define HOST_SEM1_REG 0x00014234 +#define HOST_SEM2_REG 0x00014238 +#define HOST_SEM3_REG 0x0001423c +#define HOST_SEM0_INFO_REG 0x00014240 +#define HOST_SEM1_INFO_REG 0x00014244 +#define HOST_SEM2_INFO_REG 0x00014248 +#define HOST_SEM3_INFO_REG 0x0001424c +#define HOSTFN0_LPU0_CMD_STAT 0x00019000 +#define __HOSTFN0_LPU0_MBOX_INFO_MK 0xfffffffe +#define __HOSTFN0_LPU0_MBOX_INFO_SH 1 +#define __HOSTFN0_LPU0_MBOX_INFO(_v) ((_v) << __HOSTFN0_LPU0_MBOX_INFO_SH) +#define __HOSTFN0_LPU0_MBOX_CMD_STATUS 0x00000001 +#define LPU0_HOSTFN0_CMD_STAT 0x00019008 +#define __LPU0_HOSTFN0_MBOX_INFO_MK 0xfffffffe +#define __LPU0_HOSTFN0_MBOX_INFO_SH 1 +#define __LPU0_HOSTFN0_MBOX_INFO(_v) ((_v) << __LPU0_HOSTFN0_MBOX_INFO_SH) +#define __LPU0_HOSTFN0_MBOX_CMD_STATUS 0x00000001 +#define HOSTFN1_LPU1_CMD_STAT 0x00019014 +#define __HOSTFN1_LPU1_MBOX_INFO_MK 0xfffffffe +#define __HOSTFN1_LPU1_MBOX_INFO_SH 1 +#define __HOSTFN1_LPU1_MBOX_INFO(_v) ((_v) << __HOSTFN1_LPU1_MBOX_INFO_SH) +#define __HOSTFN1_LPU1_MBOX_CMD_STATUS 0x00000001 +#define LPU1_HOSTFN1_CMD_STAT 0x0001901c +#define __LPU1_HOSTFN1_MBOX_INFO_MK 0xfffffffe +#define __LPU1_HOSTFN1_MBOX_INFO_SH 1 +#define __LPU1_HOSTFN1_MBOX_INFO(_v) ((_v) << __LPU1_HOSTFN1_MBOX_INFO_SH) +#define __LPU1_HOSTFN1_MBOX_CMD_STATUS 0x00000001 +#define CPE_Q0_DEPTH 0x00010014 +#define CPE_Q0_PI 0x0001001c +#define CPE_Q0_CI 0x00010020 +#define CPE_Q1_DEPTH 0x00010034 +#define CPE_Q1_PI 0x0001003c +#define CPE_Q1_CI 0x00010040 +#define CPE_Q2_DEPTH 0x00010054 +#define CPE_Q2_PI 0x0001005c +#define CPE_Q2_CI 0x00010060 +#define CPE_Q3_DEPTH 0x00010074 +#define CPE_Q3_PI 0x0001007c +#define CPE_Q3_CI 0x00010080 +#define CPE_Q4_DEPTH 0x00010094 +#define CPE_Q4_PI 0x0001009c +#define CPE_Q4_CI 0x000100a0 +#define CPE_Q5_DEPTH 0x000100b4 +#define CPE_Q5_PI 0x000100bc +#define CPE_Q5_CI 0x000100c0 +#define CPE_Q6_DEPTH 0x000100d4 +#define CPE_Q6_PI 0x000100dc +#define CPE_Q6_CI 0x000100e0 +#define CPE_Q7_DEPTH 0x000100f4 +#define CPE_Q7_PI 0x000100fc +#define CPE_Q7_CI 0x00010100 +#define RME_Q0_DEPTH 0x00011014 +#define RME_Q0_PI 0x0001101c +#define RME_Q0_CI 0x00011020 +#define RME_Q1_DEPTH 0x00011034 +#define RME_Q1_PI 0x0001103c +#define RME_Q1_CI 0x00011040 +#define RME_Q2_DEPTH 0x00011054 +#define RME_Q2_PI 0x0001105c +#define RME_Q2_CI 0x00011060 +#define RME_Q3_DEPTH 0x00011074 +#define RME_Q3_PI 0x0001107c +#define RME_Q3_CI 0x00011080 +#define RME_Q4_DEPTH 0x00011094 +#define RME_Q4_PI 0x0001109c +#define RME_Q4_CI 0x000110a0 +#define RME_Q5_DEPTH 0x000110b4 +#define RME_Q5_PI 0x000110bc +#define RME_Q5_CI 0x000110c0 +#define RME_Q6_DEPTH 0x000110d4 +#define RME_Q6_PI 0x000110dc +#define RME_Q6_CI 0x000110e0 +#define RME_Q7_DEPTH 0x000110f4 +#define RME_Q7_PI 0x000110fc +#define RME_Q7_CI 0x00011100 +#define PSS_CTL_REG 0x00018800 +#define __PSS_I2C_CLK_DIV_MK 0x00030000 +#define __PSS_I2C_CLK_DIV_SH 16 +#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH) +#define __PSS_LMEM_INIT_DONE 0x00001000 +#define __PSS_LMEM_RESET 0x00000200 +#define __PSS_LMEM_INIT_EN 0x00000100 +#define __PSS_LPU1_RESET 0x00000002 +#define __PSS_LPU0_RESET 0x00000001 +#define PSS_ERR_STATUS_REG 0x00018810 +#define __PSS_LMEM1_CORR_ERR 0x00000800 +#define __PSS_LMEM0_CORR_ERR 0x00000400 +#define __PSS_LMEM1_UNCORR_ERR 0x00000200 +#define __PSS_LMEM0_UNCORR_ERR 0x00000100 +#define __PSS_BAL_PERR 0x00000080 +#define __PSS_DIP_IF_ERR 0x00000040 +#define __PSS_IOH_IF_ERR 0x00000020 +#define __PSS_TDS_IF_ERR 0x00000010 +#define __PSS_RDS_IF_ERR 0x00000008 +#define __PSS_SGM_IF_ERR 0x00000004 +#define __PSS_LPU1_RAM_ERR 0x00000002 +#define __PSS_LPU0_RAM_ERR 0x00000001 +#define ERR_SET_REG 0x00018818 +#define __PSS_ERR_STATUS_SET 0x00000fff + + +/* + * These definitions are either in error/missing in spec. Its auto-generated + * from hard coded values in regparse.pl. + */ +#define __EMPHPOST_AT_4G_MK_FIX 0x0000001c +#define __EMPHPOST_AT_4G_SH_FIX 0x00000002 +#define __EMPHPRE_AT_4G_FIX 0x00000003 +#define __SFP_TXRATE_EN_FIX 0x00000100 +#define __SFP_RXRATE_EN_FIX 0x00000080 + + +/* + * These register definitions are auto-generated from hard coded values + * in regparse.pl. + */ +#define HOSTFN0_LPU_MBOX0_0 0x00019200 +#define HOSTFN1_LPU_MBOX0_8 0x00019260 +#define LPU_HOSTFN0_MBOX0_0 0x00019280 +#define LPU_HOSTFN1_MBOX0_8 0x000192e0 + + +/* + * These register mapping definitions are auto-generated from mapping tables + * in regparse.pl. + */ +#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG +#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG +#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG +#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG +#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG + +#define CPE_Q_DEPTH(__n) \ + (CPE_Q0_DEPTH + (__n) * (CPE_Q1_DEPTH - CPE_Q0_DEPTH)) +#define CPE_Q_PI(__n) \ + (CPE_Q0_PI + (__n) * (CPE_Q1_PI - CPE_Q0_PI)) +#define CPE_Q_CI(__n) \ + (CPE_Q0_CI + (__n) * (CPE_Q1_CI - CPE_Q0_CI)) +#define RME_Q_DEPTH(__n) \ + (RME_Q0_DEPTH + (__n) * (RME_Q1_DEPTH - RME_Q0_DEPTH)) +#define RME_Q_PI(__n) \ + (RME_Q0_PI + (__n) * (RME_Q1_PI - RME_Q0_PI)) +#define RME_Q_CI(__n) \ + (RME_Q0_CI + (__n) * (RME_Q1_CI - RME_Q0_CI)) + +#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) +#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) +#define CPE_Q_MASK(__q) ((__q) & 0x3) +#define RME_Q_MASK(__q) ((__q) & 0x3) + + +/* + * PCI MSI-X vector defines + */ +enum { + BFA_MSIX_CPE_Q0 = 0, + BFA_MSIX_CPE_Q1 = 1, + BFA_MSIX_CPE_Q2 = 2, + BFA_MSIX_CPE_Q3 = 3, + BFA_MSIX_CPE_Q4 = 4, + BFA_MSIX_CPE_Q5 = 5, + BFA_MSIX_CPE_Q6 = 6, + BFA_MSIX_CPE_Q7 = 7, + BFA_MSIX_RME_Q0 = 8, + BFA_MSIX_RME_Q1 = 9, + BFA_MSIX_RME_Q2 = 10, + BFA_MSIX_RME_Q3 = 11, + BFA_MSIX_RME_Q4 = 12, + BFA_MSIX_RME_Q5 = 13, + BFA_MSIX_RME_Q6 = 14, + BFA_MSIX_RME_Q7 = 15, + BFA_MSIX_ERR_EMC = 16, + BFA_MSIX_ERR_LPU0 = 17, + BFA_MSIX_ERR_LPU1 = 18, + BFA_MSIX_ERR_PSS = 19, + BFA_MSIX_MBOX_LPU0 = 20, + BFA_MSIX_MBOX_LPU1 = 21, + BFA_MSIX_CB_MAX = 22, +}; + +/* + * And corresponding host interrupt status bit field defines + */ +#define __HFN_INT_CPE_Q0 0x00000001U +#define __HFN_INT_CPE_Q1 0x00000002U +#define __HFN_INT_CPE_Q2 0x00000004U +#define __HFN_INT_CPE_Q3 0x00000008U +#define __HFN_INT_CPE_Q4 0x00000010U +#define __HFN_INT_CPE_Q5 0x00000020U +#define __HFN_INT_CPE_Q6 0x00000040U +#define __HFN_INT_CPE_Q7 0x00000080U +#define __HFN_INT_RME_Q0 0x00000100U +#define __HFN_INT_RME_Q1 0x00000200U +#define __HFN_INT_RME_Q2 0x00000400U +#define __HFN_INT_RME_Q3 0x00000800U +#define __HFN_INT_RME_Q4 0x00001000U +#define __HFN_INT_RME_Q5 0x00002000U +#define __HFN_INT_RME_Q6 0x00004000U +#define __HFN_INT_RME_Q7 0x00008000U +#define __HFN_INT_ERR_EMC 0x00010000U +#define __HFN_INT_ERR_LPU0 0x00020000U +#define __HFN_INT_ERR_LPU1 0x00040000U +#define __HFN_INT_ERR_PSS 0x00080000U +#define __HFN_INT_MBOX_LPU0 0x00100000U +#define __HFN_INT_MBOX_LPU1 0x00200000U +#define __HFN_INT_MBOX1_LPU0 0x00400000U +#define __HFN_INT_MBOX1_LPU1 0x00800000U +#define __HFN_INT_CPE_MASK 0x000000ffU +#define __HFN_INT_RME_MASK 0x0000ff00U + + +/* + * crossbow memory map. + */ +#define PSS_SMEM_PAGE_START 0x8000 +#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15)) +#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff) + +/* + * End of crossbow memory map + */ + + +#endif /* __BFI_CBREG_H__ */ + diff --git a/drivers/scsi/bfa/bfi_ctreg.h b/drivers/scsi/bfa/bfi_ctreg.h new file mode 100644 index 000000000000..62b86a4b0e4b --- /dev/null +++ b/drivers/scsi/bfa/bfi_ctreg.h @@ -0,0 +1,627 @@ + +/* + * bfi_ctreg.h catapult host block register definitions + * + * !!! Do not edit. Auto generated. !!! + */ + +#ifndef __BFI_CTREG_H__ +#define __BFI_CTREG_H__ + + +#define HOSTFN0_LPU_MBOX0_0 0x00019200 +#define HOSTFN1_LPU_MBOX0_8 0x00019260 +#define LPU_HOSTFN0_MBOX0_0 0x00019280 +#define LPU_HOSTFN1_MBOX0_8 0x000192e0 +#define HOSTFN2_LPU_MBOX0_0 0x00019400 +#define HOSTFN3_LPU_MBOX0_8 0x00019460 +#define LPU_HOSTFN2_MBOX0_0 0x00019480 +#define LPU_HOSTFN3_MBOX0_8 0x000194e0 +#define HOSTFN0_INT_STATUS 0x00014000 +#define __HOSTFN0_HALT_OCCURRED 0x01000000 +#define __HOSTFN0_INT_STATUS_LVL_MK 0x00f00000 +#define __HOSTFN0_INT_STATUS_LVL_SH 20 +#define __HOSTFN0_INT_STATUS_LVL(_v) ((_v) << __HOSTFN0_INT_STATUS_LVL_SH) +#define __HOSTFN0_INT_STATUS_P_MK 0x000f0000 +#define __HOSTFN0_INT_STATUS_P_SH 16 +#define __HOSTFN0_INT_STATUS_P(_v) ((_v) << __HOSTFN0_INT_STATUS_P_SH) +#define __HOSTFN0_INT_STATUS_F 0x0000ffff +#define HOSTFN0_INT_MSK 0x00014004 +#define HOST_PAGE_NUM_FN0 0x00014008 +#define __HOST_PAGE_NUM_FN 0x000001ff +#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c +#define __MSIX_ERR_INDEX_FN 0x000001ff +#define HOSTFN1_INT_STATUS 0x00014100 +#define __HOSTFN1_HALT_OCCURRED 0x01000000 +#define __HOSTFN1_INT_STATUS_LVL_MK 0x00f00000 +#define __HOSTFN1_INT_STATUS_LVL_SH 20 +#define __HOSTFN1_INT_STATUS_LVL(_v) ((_v) << __HOSTFN1_INT_STATUS_LVL_SH) +#define __HOSTFN1_INT_STATUS_P_MK 0x000f0000 +#define __HOSTFN1_INT_STATUS_P_SH 16 +#define __HOSTFN1_INT_STATUS_P(_v) ((_v) << __HOSTFN1_INT_STATUS_P_SH) +#define __HOSTFN1_INT_STATUS_F 0x0000ffff +#define HOSTFN1_INT_MSK 0x00014104 +#define HOST_PAGE_NUM_FN1 0x00014108 +#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c +#define APP_PLL_425_CTL_REG 0x00014204 +#define __P_425_PLL_LOCK 0x80000000 +#define __APP_PLL_425_SRAM_USE_100MHZ 0x00100000 +#define __APP_PLL_425_RESET_TIMER_MK 0x000e0000 +#define __APP_PLL_425_RESET_TIMER_SH 17 +#define __APP_PLL_425_RESET_TIMER(_v) ((_v) << __APP_PLL_425_RESET_TIMER_SH) +#define __APP_PLL_425_LOGIC_SOFT_RESET 0x00010000 +#define __APP_PLL_425_CNTLMT0_1_MK 0x0000c000 +#define __APP_PLL_425_CNTLMT0_1_SH 14 +#define __APP_PLL_425_CNTLMT0_1(_v) ((_v) << __APP_PLL_425_CNTLMT0_1_SH) +#define __APP_PLL_425_JITLMT0_1_MK 0x00003000 +#define __APP_PLL_425_JITLMT0_1_SH 12 +#define __APP_PLL_425_JITLMT0_1(_v) ((_v) << __APP_PLL_425_JITLMT0_1_SH) +#define __APP_PLL_425_HREF 0x00000800 +#define __APP_PLL_425_HDIV 0x00000400 +#define __APP_PLL_425_P0_1_MK 0x00000300 +#define __APP_PLL_425_P0_1_SH 8 +#define __APP_PLL_425_P0_1(_v) ((_v) << __APP_PLL_425_P0_1_SH) +#define __APP_PLL_425_Z0_2_MK 0x000000e0 +#define __APP_PLL_425_Z0_2_SH 5 +#define __APP_PLL_425_Z0_2(_v) ((_v) << __APP_PLL_425_Z0_2_SH) +#define __APP_PLL_425_RSEL200500 0x00000010 +#define __APP_PLL_425_ENARST 0x00000008 +#define __APP_PLL_425_BYPASS 0x00000004 +#define __APP_PLL_425_LRESETN 0x00000002 +#define __APP_PLL_425_ENABLE 0x00000001 +#define APP_PLL_312_CTL_REG 0x00014208 +#define __P_312_PLL_LOCK 0x80000000 +#define __ENABLE_MAC_AHB_1 0x00800000 +#define __ENABLE_MAC_AHB_0 0x00400000 +#define __ENABLE_MAC_1 0x00200000 +#define __ENABLE_MAC_0 0x00100000 +#define __APP_PLL_312_RESET_TIMER_MK 0x000e0000 +#define __APP_PLL_312_RESET_TIMER_SH 17 +#define __APP_PLL_312_RESET_TIMER(_v) ((_v) << __APP_PLL_312_RESET_TIMER_SH) +#define __APP_PLL_312_LOGIC_SOFT_RESET 0x00010000 +#define __APP_PLL_312_CNTLMT0_1_MK 0x0000c000 +#define __APP_PLL_312_CNTLMT0_1_SH 14 +#define __APP_PLL_312_CNTLMT0_1(_v) ((_v) << __APP_PLL_312_CNTLMT0_1_SH) +#define __APP_PLL_312_JITLMT0_1_MK 0x00003000 +#define __APP_PLL_312_JITLMT0_1_SH 12 +#define __APP_PLL_312_JITLMT0_1(_v) ((_v) << __APP_PLL_312_JITLMT0_1_SH) +#define __APP_PLL_312_HREF 0x00000800 +#define __APP_PLL_312_HDIV 0x00000400 +#define __APP_PLL_312_P0_1_MK 0x00000300 +#define __APP_PLL_312_P0_1_SH 8 +#define __APP_PLL_312_P0_1(_v) ((_v) << __APP_PLL_312_P0_1_SH) +#define __APP_PLL_312_Z0_2_MK 0x000000e0 +#define __APP_PLL_312_Z0_2_SH 5 +#define __APP_PLL_312_Z0_2(_v) ((_v) << __APP_PLL_312_Z0_2_SH) +#define __APP_PLL_312_RSEL200500 0x00000010 +#define __APP_PLL_312_ENARST 0x00000008 +#define __APP_PLL_312_BYPASS 0x00000004 +#define __APP_PLL_312_LRESETN 0x00000002 +#define __APP_PLL_312_ENABLE 0x00000001 +#define MBIST_CTL_REG 0x00014220 +#define __EDRAM_BISTR_START 0x00000004 +#define __MBIST_RESET 0x00000002 +#define __MBIST_START 0x00000001 +#define MBIST_STAT_REG 0x00014224 +#define __EDRAM_BISTR_STATUS 0x00000008 +#define __EDRAM_BISTR_DONE 0x00000004 +#define __MEM_BIT_STATUS 0x00000002 +#define __MBIST_DONE 0x00000001 +#define HOST_SEM0_REG 0x00014230 +#define __HOST_SEMAPHORE 0x00000001 +#define HOST_SEM1_REG 0x00014234 +#define HOST_SEM2_REG 0x00014238 +#define HOST_SEM3_REG 0x0001423c +#define HOST_SEM0_INFO_REG 0x00014240 +#define HOST_SEM1_INFO_REG 0x00014244 +#define HOST_SEM2_INFO_REG 0x00014248 +#define HOST_SEM3_INFO_REG 0x0001424c +#define ETH_MAC_SER_REG 0x00014288 +#define __APP_EMS_CKBUFAMPIN 0x00000020 +#define __APP_EMS_REFCLKSEL 0x00000010 +#define __APP_EMS_CMLCKSEL 0x00000008 +#define __APP_EMS_REFCKBUFEN2 0x00000004 +#define __APP_EMS_REFCKBUFEN1 0x00000002 +#define __APP_EMS_CHANNEL_SEL 0x00000001 +#define HOSTFN2_INT_STATUS 0x00014300 +#define __HOSTFN2_HALT_OCCURRED 0x01000000 +#define __HOSTFN2_INT_STATUS_LVL_MK 0x00f00000 +#define __HOSTFN2_INT_STATUS_LVL_SH 20 +#define __HOSTFN2_INT_STATUS_LVL(_v) ((_v) << __HOSTFN2_INT_STATUS_LVL_SH) +#define __HOSTFN2_INT_STATUS_P_MK 0x000f0000 +#define __HOSTFN2_INT_STATUS_P_SH 16 +#define __HOSTFN2_INT_STATUS_P(_v) ((_v) << __HOSTFN2_INT_STATUS_P_SH) +#define __HOSTFN2_INT_STATUS_F 0x0000ffff +#define HOSTFN2_INT_MSK 0x00014304 +#define HOST_PAGE_NUM_FN2 0x00014308 +#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c +#define HOSTFN3_INT_STATUS 0x00014400 +#define __HALT_OCCURRED 0x01000000 +#define __HOSTFN3_INT_STATUS_LVL_MK 0x00f00000 +#define __HOSTFN3_INT_STATUS_LVL_SH 20 +#define __HOSTFN3_INT_STATUS_LVL(_v) ((_v) << __HOSTFN3_INT_STATUS_LVL_SH) +#define __HOSTFN3_INT_STATUS_P_MK 0x000f0000 +#define __HOSTFN3_INT_STATUS_P_SH 16 +#define __HOSTFN3_INT_STATUS_P(_v) ((_v) << __HOSTFN3_INT_STATUS_P_SH) +#define __HOSTFN3_INT_STATUS_F 0x0000ffff +#define HOSTFN3_INT_MSK 0x00014404 +#define HOST_PAGE_NUM_FN3 0x00014408 +#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c +#define FNC_ID_REG 0x00014600 +#define __FUNCTION_NUMBER 0x00000007 +#define FNC_PERS_REG 0x00014604 +#define __F3_FUNCTION_ACTIVE 0x80000000 +#define __F3_FUNCTION_MODE 0x40000000 +#define __F3_PORT_MAP_MK 0x30000000 +#define __F3_PORT_MAP_SH 28 +#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH) +#define __F3_VM_MODE 0x08000000 +#define __F3_INTX_STATUS_MK 0x07000000 +#define __F3_INTX_STATUS_SH 24 +#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH) +#define __F2_FUNCTION_ACTIVE 0x00800000 +#define __F2_FUNCTION_MODE 0x00400000 +#define __F2_PORT_MAP_MK 0x00300000 +#define __F2_PORT_MAP_SH 20 +#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH) +#define __F2_VM_MODE 0x00080000 +#define __F2_INTX_STATUS_MK 0x00070000 +#define __F2_INTX_STATUS_SH 16 +#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH) +#define __F1_FUNCTION_ACTIVE 0x00008000 +#define __F1_FUNCTION_MODE 0x00004000 +#define __F1_PORT_MAP_MK 0x00003000 +#define __F1_PORT_MAP_SH 12 +#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH) +#define __F1_VM_MODE 0x00000800 +#define __F1_INTX_STATUS_MK 0x00000700 +#define __F1_INTX_STATUS_SH 8 +#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH) +#define __F0_FUNCTION_ACTIVE 0x00000080 +#define __F0_FUNCTION_MODE 0x00000040 +#define __F0_PORT_MAP_MK 0x00000030 +#define __F0_PORT_MAP_SH 4 +#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH) +#define __F0_VM_MODE 0x00000008 +#define __F0_INTX_STATUS 0x00000007 +enum { + __F0_INTX_STATUS_MSIX = 0x0, + __F0_INTX_STATUS_INTA = 0x1, + __F0_INTX_STATUS_INTB = 0x2, + __F0_INTX_STATUS_INTC = 0x3, + __F0_INTX_STATUS_INTD = 0x4, +}; +#define OP_MODE 0x0001460c +#define __APP_ETH_CLK_LOWSPEED 0x00000004 +#define __GLOBAL_CORECLK_HALFSPEED 0x00000002 +#define __GLOBAL_FCOE_MODE 0x00000001 +#define HOST_SEM4_REG 0x00014610 +#define HOST_SEM5_REG 0x00014614 +#define HOST_SEM6_REG 0x00014618 +#define HOST_SEM7_REG 0x0001461c +#define HOST_SEM4_INFO_REG 0x00014620 +#define HOST_SEM5_INFO_REG 0x00014624 +#define HOST_SEM6_INFO_REG 0x00014628 +#define HOST_SEM7_INFO_REG 0x0001462c +#define HOSTFN0_LPU0_MBOX0_CMD_STAT 0x00019000 +#define __HOSTFN0_LPU0_MBOX0_INFO_MK 0xfffffffe +#define __HOSTFN0_LPU0_MBOX0_INFO_SH 1 +#define __HOSTFN0_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU0_MBOX0_INFO_SH) +#define __HOSTFN0_LPU0_MBOX0_CMD_STATUS 0x00000001 +#define HOSTFN0_LPU1_MBOX0_CMD_STAT 0x00019004 +#define __HOSTFN0_LPU1_MBOX0_INFO_MK 0xfffffffe +#define __HOSTFN0_LPU1_MBOX0_INFO_SH 1 +#define __HOSTFN0_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU1_MBOX0_INFO_SH) +#define __HOSTFN0_LPU1_MBOX0_CMD_STATUS 0x00000001 +#define LPU0_HOSTFN0_MBOX0_CMD_STAT 0x00019008 +#define __LPU0_HOSTFN0_MBOX0_INFO_MK 0xfffffffe +#define __LPU0_HOSTFN0_MBOX0_INFO_SH 1 +#define __LPU0_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN0_MBOX0_INFO_SH) +#define __LPU0_HOSTFN0_MBOX0_CMD_STATUS 0x00000001 +#define LPU1_HOSTFN0_MBOX0_CMD_STAT 0x0001900c +#define __LPU1_HOSTFN0_MBOX0_INFO_MK 0xfffffffe +#define __LPU1_HOSTFN0_MBOX0_INFO_SH 1 +#define __LPU1_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN0_MBOX0_INFO_SH) +#define __LPU1_HOSTFN0_MBOX0_CMD_STATUS 0x00000001 +#define HOSTFN1_LPU0_MBOX0_CMD_STAT 0x00019010 +#define __HOSTFN1_LPU0_MBOX0_INFO_MK 0xfffffffe +#define __HOSTFN1_LPU0_MBOX0_INFO_SH 1 +#define __HOSTFN1_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU0_MBOX0_INFO_SH) +#define __HOSTFN1_LPU0_MBOX0_CMD_STATUS 0x00000001 +#define HOSTFN1_LPU1_MBOX0_CMD_STAT 0x00019014 +#define __HOSTFN1_LPU1_MBOX0_INFO_MK 0xfffffffe +#define __HOSTFN1_LPU1_MBOX0_INFO_SH 1 +#define __HOSTFN1_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU1_MBOX0_INFO_SH) +#define __HOSTFN1_LPU1_MBOX0_CMD_STATUS 0x00000001 +#define LPU0_HOSTFN1_MBOX0_CMD_STAT 0x00019018 +#define __LPU0_HOSTFN1_MBOX0_INFO_MK 0xfffffffe +#define __LPU0_HOSTFN1_MBOX0_INFO_SH 1 +#define __LPU0_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN1_MBOX0_INFO_SH) +#define __LPU0_HOSTFN1_MBOX0_CMD_STATUS 0x00000001 +#define LPU1_HOSTFN1_MBOX0_CMD_STAT 0x0001901c +#define __LPU1_HOSTFN1_MBOX0_INFO_MK 0xfffffffe +#define __LPU1_HOSTFN1_MBOX0_INFO_SH 1 +#define __LPU1_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN1_MBOX0_INFO_SH) +#define __LPU1_HOSTFN1_MBOX0_CMD_STATUS 0x00000001 +#define HOSTFN2_LPU0_MBOX0_CMD_STAT 0x00019150 +#define __HOSTFN2_LPU0_MBOX0_INFO_MK 0xfffffffe +#define __HOSTFN2_LPU0_MBOX0_INFO_SH 1 +#define __HOSTFN2_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU0_MBOX0_INFO_SH) +#define __HOSTFN2_LPU0_MBOX0_CMD_STATUS 0x00000001 +#define HOSTFN2_LPU1_MBOX0_CMD_STAT 0x00019154 +#define __HOSTFN2_LPU1_MBOX0_INFO_MK 0xfffffffe +#define __HOSTFN2_LPU1_MBOX0_INFO_SH 1 +#define __HOSTFN2_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU1_MBOX0_INFO_SH) +#define __HOSTFN2_LPU1_MBOX0BOX0_CMD_STATUS 0x00000001 +#define LPU0_HOSTFN2_MBOX0_CMD_STAT 0x00019158 +#define __LPU0_HOSTFN2_MBOX0_INFO_MK 0xfffffffe +#define __LPU0_HOSTFN2_MBOX0_INFO_SH 1 +#define __LPU0_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN2_MBOX0_INFO_SH) +#define __LPU0_HOSTFN2_MBOX0_CMD_STATUS 0x00000001 +#define LPU1_HOSTFN2_MBOX0_CMD_STAT 0x0001915c +#define __LPU1_HOSTFN2_MBOX0_INFO_MK 0xfffffffe +#define __LPU1_HOSTFN2_MBOX0_INFO_SH 1 +#define __LPU1_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN2_MBOX0_INFO_SH) +#define __LPU1_HOSTFN2_MBOX0_CMD_STATUS 0x00000001 +#define HOSTFN3_LPU0_MBOX0_CMD_STAT 0x00019160 +#define __HOSTFN3_LPU0_MBOX0_INFO_MK 0xfffffffe +#define __HOSTFN3_LPU0_MBOX0_INFO_SH 1 +#define __HOSTFN3_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU0_MBOX0_INFO_SH) +#define __HOSTFN3_LPU0_MBOX0_CMD_STATUS 0x00000001 +#define HOSTFN3_LPU1_MBOX0_CMD_STAT 0x00019164 +#define __HOSTFN3_LPU1_MBOX0_INFO_MK 0xfffffffe +#define __HOSTFN3_LPU1_MBOX0_INFO_SH 1 +#define __HOSTFN3_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU1_MBOX0_INFO_SH) +#define __HOSTFN3_LPU1_MBOX0_CMD_STATUS 0x00000001 +#define LPU0_HOSTFN3_MBOX0_CMD_STAT 0x00019168 +#define __LPU0_HOSTFN3_MBOX0_INFO_MK 0xfffffffe +#define __LPU0_HOSTFN3_MBOX0_INFO_SH 1 +#define __LPU0_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN3_MBOX0_INFO_SH) +#define __LPU0_HOSTFN3_MBOX0_CMD_STATUS 0x00000001 +#define LPU1_HOSTFN3_MBOX0_CMD_STAT 0x0001916c +#define __LPU1_HOSTFN3_MBOX0_INFO_MK 0xfffffffe +#define __LPU1_HOSTFN3_MBOX0_INFO_SH 1 +#define __LPU1_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN3_MBOX0_INFO_SH) +#define __LPU1_HOSTFN3_MBOX0_CMD_STATUS 0x00000001 +#define FW_INIT_HALT_P0 0x000191ac +#define __FW_INIT_HALT_P 0x00000001 +#define FW_INIT_HALT_P1 0x000191bc +#define CPE_PI_PTR_Q0 0x00038000 +#define __CPE_PI_UNUSED_MK 0xffff0000 +#define __CPE_PI_UNUSED_SH 16 +#define __CPE_PI_UNUSED(_v) ((_v) << __CPE_PI_UNUSED_SH) +#define __CPE_PI_PTR 0x0000ffff +#define CPE_PI_PTR_Q1 0x00038040 +#define CPE_CI_PTR_Q0 0x00038004 +#define __CPE_CI_UNUSED_MK 0xffff0000 +#define __CPE_CI_UNUSED_SH 16 +#define __CPE_CI_UNUSED(_v) ((_v) << __CPE_CI_UNUSED_SH) +#define __CPE_CI_PTR 0x0000ffff +#define CPE_CI_PTR_Q1 0x00038044 +#define CPE_DEPTH_Q0 0x00038008 +#define __CPE_DEPTH_UNUSED_MK 0xf8000000 +#define __CPE_DEPTH_UNUSED_SH 27 +#define __CPE_DEPTH_UNUSED(_v) ((_v) << __CPE_DEPTH_UNUSED_SH) +#define __CPE_MSIX_VEC_INDEX_MK 0x07ff0000 +#define __CPE_MSIX_VEC_INDEX_SH 16 +#define __CPE_MSIX_VEC_INDEX(_v) ((_v) << __CPE_MSIX_VEC_INDEX_SH) +#define __CPE_DEPTH 0x0000ffff +#define CPE_DEPTH_Q1 0x00038048 +#define CPE_QCTRL_Q0 0x0003800c +#define __CPE_CTRL_UNUSED30_MK 0xfc000000 +#define __CPE_CTRL_UNUSED30_SH 26 +#define __CPE_CTRL_UNUSED30(_v) ((_v) << __CPE_CTRL_UNUSED30_SH) +#define __CPE_FUNC_INT_CTRL_MK 0x03000000 +#define __CPE_FUNC_INT_CTRL_SH 24 +#define __CPE_FUNC_INT_CTRL(_v) ((_v) << __CPE_FUNC_INT_CTRL_SH) +enum { + __CPE_FUNC_INT_CTRL_DISABLE = 0x0, + __CPE_FUNC_INT_CTRL_F2NF = 0x1, + __CPE_FUNC_INT_CTRL_3QUART = 0x2, + __CPE_FUNC_INT_CTRL_HALF = 0x3, +}; +#define __CPE_CTRL_UNUSED20_MK 0x00f00000 +#define __CPE_CTRL_UNUSED20_SH 20 +#define __CPE_CTRL_UNUSED20(_v) ((_v) << __CPE_CTRL_UNUSED20_SH) +#define __CPE_SCI_TH_MK 0x000f0000 +#define __CPE_SCI_TH_SH 16 +#define __CPE_SCI_TH(_v) ((_v) << __CPE_SCI_TH_SH) +#define __CPE_CTRL_UNUSED10_MK 0x0000c000 +#define __CPE_CTRL_UNUSED10_SH 14 +#define __CPE_CTRL_UNUSED10(_v) ((_v) << __CPE_CTRL_UNUSED10_SH) +#define __CPE_ACK_PENDING 0x00002000 +#define __CPE_CTRL_UNUSED40_MK 0x00001c00 +#define __CPE_CTRL_UNUSED40_SH 10 +#define __CPE_CTRL_UNUSED40(_v) ((_v) << __CPE_CTRL_UNUSED40_SH) +#define __CPE_PCIEID_MK 0x00000300 +#define __CPE_PCIEID_SH 8 +#define __CPE_PCIEID(_v) ((_v) << __CPE_PCIEID_SH) +#define __CPE_CTRL_UNUSED00_MK 0x000000fe +#define __CPE_CTRL_UNUSED00_SH 1 +#define __CPE_CTRL_UNUSED00(_v) ((_v) << __CPE_CTRL_UNUSED00_SH) +#define __CPE_ESIZE 0x00000001 +#define CPE_QCTRL_Q1 0x0003804c +#define __CPE_CTRL_UNUSED31_MK 0xfc000000 +#define __CPE_CTRL_UNUSED31_SH 26 +#define __CPE_CTRL_UNUSED31(_v) ((_v) << __CPE_CTRL_UNUSED31_SH) +#define __CPE_CTRL_UNUSED21_MK 0x00f00000 +#define __CPE_CTRL_UNUSED21_SH 20 +#define __CPE_CTRL_UNUSED21(_v) ((_v) << __CPE_CTRL_UNUSED21_SH) +#define __CPE_CTRL_UNUSED11_MK 0x0000c000 +#define __CPE_CTRL_UNUSED11_SH 14 +#define __CPE_CTRL_UNUSED11(_v) ((_v) << __CPE_CTRL_UNUSED11_SH) +#define __CPE_CTRL_UNUSED41_MK 0x00001c00 +#define __CPE_CTRL_UNUSED41_SH 10 +#define __CPE_CTRL_UNUSED41(_v) ((_v) << __CPE_CTRL_UNUSED41_SH) +#define __CPE_CTRL_UNUSED01_MK 0x000000fe +#define __CPE_CTRL_UNUSED01_SH 1 +#define __CPE_CTRL_UNUSED01(_v) ((_v) << __CPE_CTRL_UNUSED01_SH) +#define RME_PI_PTR_Q0 0x00038020 +#define __LATENCY_TIME_STAMP_MK 0xffff0000 +#define __LATENCY_TIME_STAMP_SH 16 +#define __LATENCY_TIME_STAMP(_v) ((_v) << __LATENCY_TIME_STAMP_SH) +#define __RME_PI_PTR 0x0000ffff +#define RME_PI_PTR_Q1 0x00038060 +#define RME_CI_PTR_Q0 0x00038024 +#define __DELAY_TIME_STAMP_MK 0xffff0000 +#define __DELAY_TIME_STAMP_SH 16 +#define __DELAY_TIME_STAMP(_v) ((_v) << __DELAY_TIME_STAMP_SH) +#define __RME_CI_PTR 0x0000ffff +#define RME_CI_PTR_Q1 0x00038064 +#define RME_DEPTH_Q0 0x00038028 +#define __RME_DEPTH_UNUSED_MK 0xf8000000 +#define __RME_DEPTH_UNUSED_SH 27 +#define __RME_DEPTH_UNUSED(_v) ((_v) << __RME_DEPTH_UNUSED_SH) +#define __RME_MSIX_VEC_INDEX_MK 0x07ff0000 +#define __RME_MSIX_VEC_INDEX_SH 16 +#define __RME_MSIX_VEC_INDEX(_v) ((_v) << __RME_MSIX_VEC_INDEX_SH) +#define __RME_DEPTH 0x0000ffff +#define RME_DEPTH_Q1 0x00038068 +#define RME_QCTRL_Q0 0x0003802c +#define __RME_INT_LATENCY_TIMER_MK 0xff000000 +#define __RME_INT_LATENCY_TIMER_SH 24 +#define __RME_INT_LATENCY_TIMER(_v) ((_v) << __RME_INT_LATENCY_TIMER_SH) +#define __RME_INT_DELAY_TIMER_MK 0x00ff0000 +#define __RME_INT_DELAY_TIMER_SH 16 +#define __RME_INT_DELAY_TIMER(_v) ((_v) << __RME_INT_DELAY_TIMER_SH) +#define __RME_INT_DELAY_DISABLE 0x00008000 +#define __RME_DLY_DELAY_DISABLE 0x00004000 +#define __RME_ACK_PENDING 0x00002000 +#define __RME_FULL_INTERRUPT_DISABLE 0x00001000 +#define __RME_CTRL_UNUSED10_MK 0x00000c00 +#define __RME_CTRL_UNUSED10_SH 10 +#define __RME_CTRL_UNUSED10(_v) ((_v) << __RME_CTRL_UNUSED10_SH) +#define __RME_PCIEID_MK 0x00000300 +#define __RME_PCIEID_SH 8 +#define __RME_PCIEID(_v) ((_v) << __RME_PCIEID_SH) +#define __RME_CTRL_UNUSED00_MK 0x000000fe +#define __RME_CTRL_UNUSED00_SH 1 +#define __RME_CTRL_UNUSED00(_v) ((_v) << __RME_CTRL_UNUSED00_SH) +#define __RME_ESIZE 0x00000001 +#define RME_QCTRL_Q1 0x0003806c +#define __RME_CTRL_UNUSED11_MK 0x00000c00 +#define __RME_CTRL_UNUSED11_SH 10 +#define __RME_CTRL_UNUSED11(_v) ((_v) << __RME_CTRL_UNUSED11_SH) +#define __RME_CTRL_UNUSED01_MK 0x000000fe +#define __RME_CTRL_UNUSED01_SH 1 +#define __RME_CTRL_UNUSED01(_v) ((_v) << __RME_CTRL_UNUSED01_SH) +#define PSS_CTL_REG 0x00018800 +#define __PSS_I2C_CLK_DIV_MK 0x007f0000 +#define __PSS_I2C_CLK_DIV_SH 16 +#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH) +#define __PSS_LMEM_INIT_DONE 0x00001000 +#define __PSS_LMEM_RESET 0x00000200 +#define __PSS_LMEM_INIT_EN 0x00000100 +#define __PSS_LPU1_RESET 0x00000002 +#define __PSS_LPU0_RESET 0x00000001 +#define PSS_ERR_STATUS_REG 0x00018810 +#define __PSS_LPU1_TCM_READ_ERR 0x00200000 +#define __PSS_LPU0_TCM_READ_ERR 0x00100000 +#define __PSS_LMEM5_CORR_ERR 0x00080000 +#define __PSS_LMEM4_CORR_ERR 0x00040000 +#define __PSS_LMEM3_CORR_ERR 0x00020000 +#define __PSS_LMEM2_CORR_ERR 0x00010000 +#define __PSS_LMEM1_CORR_ERR 0x00008000 +#define __PSS_LMEM0_CORR_ERR 0x00004000 +#define __PSS_LMEM5_UNCORR_ERR 0x00002000 +#define __PSS_LMEM4_UNCORR_ERR 0x00001000 +#define __PSS_LMEM3_UNCORR_ERR 0x00000800 +#define __PSS_LMEM2_UNCORR_ERR 0x00000400 +#define __PSS_LMEM1_UNCORR_ERR 0x00000200 +#define __PSS_LMEM0_UNCORR_ERR 0x00000100 +#define __PSS_BAL_PERR 0x00000080 +#define __PSS_DIP_IF_ERR 0x00000040 +#define __PSS_IOH_IF_ERR 0x00000020 +#define __PSS_TDS_IF_ERR 0x00000010 +#define __PSS_RDS_IF_ERR 0x00000008 +#define __PSS_SGM_IF_ERR 0x00000004 +#define __PSS_LPU1_RAM_ERR 0x00000002 +#define __PSS_LPU0_RAM_ERR 0x00000001 +#define ERR_SET_REG 0x00018818 +#define __PSS_ERR_STATUS_SET 0x003fffff +#define PMM_1T_RESET_REG_P0 0x0002381c +#define __PMM_1T_RESET_P 0x00000001 +#define PMM_1T_RESET_REG_P1 0x00023c1c +#define HQM_QSET0_RXQ_DRBL_P0 0x00038000 +#define __RXQ0_ADD_VECTORS_P 0x80000000 +#define __RXQ0_STOP_P 0x40000000 +#define __RXQ0_PRD_PTR_P 0x0000ffff +#define HQM_QSET1_RXQ_DRBL_P0 0x00038080 +#define __RXQ1_ADD_VECTORS_P 0x80000000 +#define __RXQ1_STOP_P 0x40000000 +#define __RXQ1_PRD_PTR_P 0x0000ffff +#define HQM_QSET0_RXQ_DRBL_P1 0x0003c000 +#define HQM_QSET1_RXQ_DRBL_P1 0x0003c080 +#define HQM_QSET0_TXQ_DRBL_P0 0x00038020 +#define __TXQ0_ADD_VECTORS_P 0x80000000 +#define __TXQ0_STOP_P 0x40000000 +#define __TXQ0_PRD_PTR_P 0x0000ffff +#define HQM_QSET1_TXQ_DRBL_P0 0x000380a0 +#define __TXQ1_ADD_VECTORS_P 0x80000000 +#define __TXQ1_STOP_P 0x40000000 +#define __TXQ1_PRD_PTR_P 0x0000ffff +#define HQM_QSET0_TXQ_DRBL_P1 0x0003c020 +#define HQM_QSET1_TXQ_DRBL_P1 0x0003c0a0 +#define HQM_QSET0_IB_DRBL_1_P0 0x00038040 +#define __IB1_0_ACK_P 0x80000000 +#define __IB1_0_DISABLE_P 0x40000000 +#define __IB1_0_COALESCING_CFG_P_MK 0x00ff0000 +#define __IB1_0_COALESCING_CFG_P_SH 16 +#define __IB1_0_COALESCING_CFG_P(_v) ((_v) << __IB1_0_COALESCING_CFG_P_SH) +#define __IB1_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff +#define HQM_QSET1_IB_DRBL_1_P0 0x000380c0 +#define __IB1_1_ACK_P 0x80000000 +#define __IB1_1_DISABLE_P 0x40000000 +#define __IB1_1_COALESCING_CFG_P_MK 0x00ff0000 +#define __IB1_1_COALESCING_CFG_P_SH 16 +#define __IB1_1_COALESCING_CFG_P(_v) ((_v) << __IB1_1_COALESCING_CFG_P_SH) +#define __IB1_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff +#define HQM_QSET0_IB_DRBL_1_P1 0x0003c040 +#define HQM_QSET1_IB_DRBL_1_P1 0x0003c0c0 +#define HQM_QSET0_IB_DRBL_2_P0 0x00038060 +#define __IB2_0_ACK_P 0x80000000 +#define __IB2_0_DISABLE_P 0x40000000 +#define __IB2_0_COALESCING_CFG_P_MK 0x00ff0000 +#define __IB2_0_COALESCING_CFG_P_SH 16 +#define __IB2_0_COALESCING_CFG_P(_v) ((_v) << __IB2_0_COALESCING_CFG_P_SH) +#define __IB2_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff +#define HQM_QSET1_IB_DRBL_2_P0 0x000380e0 +#define __IB2_1_ACK_P 0x80000000 +#define __IB2_1_DISABLE_P 0x40000000 +#define __IB2_1_COALESCING_CFG_P_MK 0x00ff0000 +#define __IB2_1_COALESCING_CFG_P_SH 16 +#define __IB2_1_COALESCING_CFG_P(_v) ((_v) << __IB2_1_COALESCING_CFG_P_SH) +#define __IB2_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff +#define HQM_QSET0_IB_DRBL_2_P1 0x0003c060 +#define HQM_QSET1_IB_DRBL_2_P1 0x0003c0e0 + + +/* + * These definitions are either in error/missing in spec. Its auto-generated + * from hard coded values in regparse.pl. + */ +#define __EMPHPOST_AT_4G_MK_FIX 0x0000001c +#define __EMPHPOST_AT_4G_SH_FIX 0x00000002 +#define __EMPHPRE_AT_4G_FIX 0x00000003 +#define __SFP_TXRATE_EN_FIX 0x00000100 +#define __SFP_RXRATE_EN_FIX 0x00000080 + + +/* + * These register definitions are auto-generated from hard coded values + * in regparse.pl. + */ + + +/* + * These register mapping definitions are auto-generated from mapping tables + * in regparse.pl. + */ +#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG +#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG +#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG +#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG +#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG + +#define CPE_DEPTH_Q(__n) \ + (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0)) +#define CPE_QCTRL_Q(__n) \ + (CPE_QCTRL_Q0 + (__n) * (CPE_QCTRL_Q1 - CPE_QCTRL_Q0)) +#define CPE_PI_PTR_Q(__n) \ + (CPE_PI_PTR_Q0 + (__n) * (CPE_PI_PTR_Q1 - CPE_PI_PTR_Q0)) +#define CPE_CI_PTR_Q(__n) \ + (CPE_CI_PTR_Q0 + (__n) * (CPE_CI_PTR_Q1 - CPE_CI_PTR_Q0)) +#define RME_DEPTH_Q(__n) \ + (RME_DEPTH_Q0 + (__n) * (RME_DEPTH_Q1 - RME_DEPTH_Q0)) +#define RME_QCTRL_Q(__n) \ + (RME_QCTRL_Q0 + (__n) * (RME_QCTRL_Q1 - RME_QCTRL_Q0)) +#define RME_PI_PTR_Q(__n) \ + (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0)) +#define RME_CI_PTR_Q(__n) \ + (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0)) +#define HQM_QSET_RXQ_DRBL_P0(__n) (HQM_QSET0_RXQ_DRBL_P0 + (__n) \ + * (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0)) +#define HQM_QSET_TXQ_DRBL_P0(__n) (HQM_QSET0_TXQ_DRBL_P0 + (__n) \ + * (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0)) +#define HQM_QSET_IB_DRBL_1_P0(__n) (HQM_QSET0_IB_DRBL_1_P0 + (__n) \ + * (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0)) +#define HQM_QSET_IB_DRBL_2_P0(__n) (HQM_QSET0_IB_DRBL_2_P0 + (__n) \ + * (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0)) +#define HQM_QSET_RXQ_DRBL_P1(__n) (HQM_QSET0_RXQ_DRBL_P1 + (__n) \ + * (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1)) +#define HQM_QSET_TXQ_DRBL_P1(__n) (HQM_QSET0_TXQ_DRBL_P1 + (__n) \ + * (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1)) +#define HQM_QSET_IB_DRBL_1_P1(__n) (HQM_QSET0_IB_DRBL_1_P1 + (__n) \ + * (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1)) +#define HQM_QSET_IB_DRBL_2_P1(__n) (HQM_QSET0_IB_DRBL_2_P1 + (__n) \ + * (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1)) + +#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) +#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) +#define CPE_Q_MASK(__q) ((__q) & 0x3) +#define RME_Q_MASK(__q) ((__q) & 0x3) + + +/* + * PCI MSI-X vector defines + */ +enum { + BFA_MSIX_CPE_Q0 = 0, + BFA_MSIX_CPE_Q1 = 1, + BFA_MSIX_CPE_Q2 = 2, + BFA_MSIX_CPE_Q3 = 3, + BFA_MSIX_RME_Q0 = 4, + BFA_MSIX_RME_Q1 = 5, + BFA_MSIX_RME_Q2 = 6, + BFA_MSIX_RME_Q3 = 7, + BFA_MSIX_LPU_ERR = 8, + BFA_MSIX_CT_MAX = 9, +}; + +/* + * And corresponding host interrupt status bit field defines + */ +#define __HFN_INT_CPE_Q0 0x00000001U +#define __HFN_INT_CPE_Q1 0x00000002U +#define __HFN_INT_CPE_Q2 0x00000004U +#define __HFN_INT_CPE_Q3 0x00000008U +#define __HFN_INT_CPE_Q4 0x00000010U +#define __HFN_INT_CPE_Q5 0x00000020U +#define __HFN_INT_CPE_Q6 0x00000040U +#define __HFN_INT_CPE_Q7 0x00000080U +#define __HFN_INT_RME_Q0 0x00000100U +#define __HFN_INT_RME_Q1 0x00000200U +#define __HFN_INT_RME_Q2 0x00000400U +#define __HFN_INT_RME_Q3 0x00000800U +#define __HFN_INT_RME_Q4 0x00001000U +#define __HFN_INT_RME_Q5 0x00002000U +#define __HFN_INT_RME_Q6 0x00004000U +#define __HFN_INT_RME_Q7 0x00008000U +#define __HFN_INT_ERR_EMC 0x00010000U +#define __HFN_INT_ERR_LPU0 0x00020000U +#define __HFN_INT_ERR_LPU1 0x00040000U +#define __HFN_INT_ERR_PSS 0x00080000U +#define __HFN_INT_MBOX_LPU0 0x00100000U +#define __HFN_INT_MBOX_LPU1 0x00200000U +#define __HFN_INT_MBOX1_LPU0 0x00400000U +#define __HFN_INT_MBOX1_LPU1 0x00800000U +#define __HFN_INT_LL_HALT 0x01000000U +#define __HFN_INT_CPE_MASK 0x000000ffU +#define __HFN_INT_RME_MASK 0x0000ff00U + + +/* + * catapult memory map. + */ +#define LL_PGN_HQM0 0x0096 +#define LL_PGN_HQM1 0x0097 +#define PSS_SMEM_PAGE_START 0x8000 +#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15)) +#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff) + +/* + * End of catapult memory map + */ + + +#endif /* __BFI_CTREG_H__ */ diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h new file mode 100644 index 000000000000..69ac85f9e938 --- /dev/null +++ b/drivers/scsi/bfa/bfi_ms.h @@ -0,0 +1,765 @@ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + * + * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef __BFI_MS_H__ +#define __BFI_MS_H__ + +#include "bfi.h" +#include "bfa_fc.h" +#include "bfa_defs_svc.h" + +#pragma pack(1) + +enum bfi_iocfc_h2i_msgs { + BFI_IOCFC_H2I_CFG_REQ = 1, + BFI_IOCFC_H2I_SET_INTR_REQ = 2, + BFI_IOCFC_H2I_UPDATEQ_REQ = 3, +}; + +enum bfi_iocfc_i2h_msgs { + BFI_IOCFC_I2H_CFG_REPLY = BFA_I2HM(1), + BFI_IOCFC_I2H_UPDATEQ_RSP = BFA_I2HM(3), +}; + +struct bfi_iocfc_cfg_s { + u8 num_cqs; /* Number of CQs to be used */ + u8 sense_buf_len; /* SCSI sense length */ + u16 rsvd_1; + u32 endian_sig; /* endian signature of host */ + + /** + * Request and response circular queue base addresses, size and + * shadow index pointers. + */ + union bfi_addr_u req_cq_ba[BFI_IOC_MAX_CQS]; + union bfi_addr_u req_shadow_ci[BFI_IOC_MAX_CQS]; + u16 req_cq_elems[BFI_IOC_MAX_CQS]; + union bfi_addr_u rsp_cq_ba[BFI_IOC_MAX_CQS]; + union bfi_addr_u rsp_shadow_pi[BFI_IOC_MAX_CQS]; + u16 rsp_cq_elems[BFI_IOC_MAX_CQS]; + + union bfi_addr_u stats_addr; /* DMA-able address for stats */ + union bfi_addr_u cfgrsp_addr; /* config response dma address */ + union bfi_addr_u ioim_snsbase; /* IO sense buffer base address */ + struct bfa_iocfc_intr_attr_s intr_attr; /* IOC interrupt attributes */ +}; + +/** + * Boot target wwn information for this port. This contains either the stored + * or discovered boot target port wwns for the port. + */ +struct bfi_iocfc_bootwwns { + wwn_t wwn[BFA_BOOT_BOOTLUN_MAX]; + u8 nwwns; + u8 rsvd[7]; +}; + +struct bfi_iocfc_cfgrsp_s { + struct bfa_iocfc_fwcfg_s fwcfg; + struct bfa_iocfc_intr_attr_s intr_attr; + struct bfi_iocfc_bootwwns bootwwns; + struct bfi_pbc_s pbc_cfg; +}; + +/** + * BFI_IOCFC_H2I_CFG_REQ message + */ +struct bfi_iocfc_cfg_req_s { + struct bfi_mhdr_s mh; + union bfi_addr_u ioc_cfg_dma_addr; +}; + + +/** + * BFI_IOCFC_I2H_CFG_REPLY message + */ +struct bfi_iocfc_cfg_reply_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u8 cfg_success; /* cfg reply status */ + u8 lpu_bm; /* LPUs assigned for this IOC */ + u8 rsvd[2]; +}; + + +/** + * BFI_IOCFC_H2I_SET_INTR_REQ message + */ +struct bfi_iocfc_set_intr_req_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 coalesce; /* enable intr coalescing */ + u8 rsvd[3]; + u16 delay; /* delay timer 0..1125us */ + u16 latency; /* latency timer 0..225us */ +}; + + +/** + * BFI_IOCFC_H2I_UPDATEQ_REQ message + */ +struct bfi_iocfc_updateq_req_s { + struct bfi_mhdr_s mh; /* common msg header */ + u32 reqq_ba; /* reqq base addr */ + u32 rspq_ba; /* rspq base addr */ + u32 reqq_sci; /* reqq shadow ci */ + u32 rspq_spi; /* rspq shadow pi */ +}; + + +/** + * BFI_IOCFC_I2H_UPDATEQ_RSP message + */ +struct bfi_iocfc_updateq_rsp_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 status; /* updateq status */ + u8 rsvd[3]; +}; + + +/** + * H2I Messages + */ +union bfi_iocfc_h2i_msg_u { + struct bfi_mhdr_s mh; + struct bfi_iocfc_cfg_req_s cfg_req; + struct bfi_iocfc_updateq_req_s updateq_req; + u32 mboxmsg[BFI_IOC_MSGSZ]; +}; + + +/** + * I2H Messages + */ +union bfi_iocfc_i2h_msg_u { + struct bfi_mhdr_s mh; + struct bfi_iocfc_cfg_reply_s cfg_reply; + struct bfi_iocfc_updateq_rsp_s updateq_rsp; + u32 mboxmsg[BFI_IOC_MSGSZ]; +}; + + +enum bfi_fcport_h2i { + BFI_FCPORT_H2I_ENABLE_REQ = (1), + BFI_FCPORT_H2I_DISABLE_REQ = (2), + BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ = (3), + BFI_FCPORT_H2I_STATS_GET_REQ = (4), + BFI_FCPORT_H2I_STATS_CLEAR_REQ = (5), +}; + + +enum bfi_fcport_i2h { + BFI_FCPORT_I2H_ENABLE_RSP = BFA_I2HM(1), + BFI_FCPORT_I2H_DISABLE_RSP = BFA_I2HM(2), + BFI_FCPORT_I2H_SET_SVC_PARAMS_RSP = BFA_I2HM(3), + BFI_FCPORT_I2H_STATS_GET_RSP = BFA_I2HM(4), + BFI_FCPORT_I2H_STATS_CLEAR_RSP = BFA_I2HM(5), + BFI_FCPORT_I2H_EVENT = BFA_I2HM(6), + BFI_FCPORT_I2H_TRUNK_SCN = BFA_I2HM(7), + BFI_FCPORT_I2H_ENABLE_AEN = BFA_I2HM(8), + BFI_FCPORT_I2H_DISABLE_AEN = BFA_I2HM(9), +}; + + +/** + * Generic REQ type + */ +struct bfi_fcport_req_s { + struct bfi_mhdr_s mh; /* msg header */ + u32 msgtag; /* msgtag for reply */ +}; + +/** + * Generic RSP type + */ +struct bfi_fcport_rsp_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 status; /* port enable status */ + u8 rsvd[3]; + u32 msgtag; /* msgtag for reply */ +}; + +/** + * BFI_FCPORT_H2I_ENABLE_REQ + */ +struct bfi_fcport_enable_req_s { + struct bfi_mhdr_s mh; /* msg header */ + u32 rsvd1; + wwn_t nwwn; /* node wwn of physical port */ + wwn_t pwwn; /* port wwn of physical port */ + struct bfa_port_cfg_s port_cfg; /* port configuration */ + union bfi_addr_u stats_dma_addr; /* DMA address for stats */ + u32 msgtag; /* msgtag for reply */ + u32 rsvd2; +}; + +/** + * BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ + */ +struct bfi_fcport_set_svc_params_req_s { + struct bfi_mhdr_s mh; /* msg header */ + u16 tx_bbcredit; /* Tx credits */ + u16 rsvd; +}; + +/** + * BFI_FCPORT_I2H_EVENT + */ +struct bfi_fcport_event_s { + struct bfi_mhdr_s mh; /* common msg header */ + struct bfa_port_link_s link_state; +}; + +/** + * BFI_FCPORT_I2H_TRUNK_SCN + */ +struct bfi_fcport_trunk_link_s { + wwn_t trunk_wwn; + u8 fctl; /* bfa_trunk_link_fctl_t */ + u8 state; /* bfa_trunk_link_state_t */ + u8 speed; /* bfa_port_speed_t */ + u8 rsvd; + u32 deskew; +}; + +#define BFI_FCPORT_MAX_LINKS 2 +struct bfi_fcport_trunk_scn_s { + struct bfi_mhdr_s mh; + u8 trunk_state; /* bfa_trunk_state_t */ + u8 trunk_speed; /* bfa_port_speed_t */ + u8 rsvd_a[2]; + struct bfi_fcport_trunk_link_s tlink[BFI_FCPORT_MAX_LINKS]; +}; + +/** + * fcport H2I message + */ +union bfi_fcport_h2i_msg_u { + struct bfi_mhdr_s *mhdr; + struct bfi_fcport_enable_req_s *penable; + struct bfi_fcport_req_s *pdisable; + struct bfi_fcport_set_svc_params_req_s *psetsvcparams; + struct bfi_fcport_req_s *pstatsget; + struct bfi_fcport_req_s *pstatsclear; +}; + +/** + * fcport I2H message + */ +union bfi_fcport_i2h_msg_u { + struct bfi_msg_s *msg; + struct bfi_fcport_rsp_s *penable_rsp; + struct bfi_fcport_rsp_s *pdisable_rsp; + struct bfi_fcport_rsp_s *psetsvcparams_rsp; + struct bfi_fcport_rsp_s *pstatsget_rsp; + struct bfi_fcport_rsp_s *pstatsclear_rsp; + struct bfi_fcport_event_s *event; + struct bfi_fcport_trunk_scn_s *trunk_scn; +}; + +enum bfi_fcxp_h2i { + BFI_FCXP_H2I_SEND_REQ = 1, +}; + +enum bfi_fcxp_i2h { + BFI_FCXP_I2H_SEND_RSP = BFA_I2HM(1), +}; + +#define BFA_FCXP_MAX_SGES 2 + +/** + * FCXP send request structure + */ +struct bfi_fcxp_send_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u16 fcxp_tag; /* driver request tag */ + u16 max_frmsz; /* max send frame size */ + u16 vf_id; /* vsan tag if applicable */ + u16 rport_fw_hndl; /* FW Handle for the remote port */ + u8 class; /* FC class used for req/rsp */ + u8 rsp_timeout; /* timeout in secs, 0-no response */ + u8 cts; /* continue sequence */ + u8 lp_tag; /* lport tag */ + struct fchs_s fchs; /* request FC header structure */ + u32 req_len; /* request payload length */ + u32 rsp_maxlen; /* max response length expected */ + struct bfi_sge_s req_sge[BFA_FCXP_MAX_SGES]; /* request buf */ + struct bfi_sge_s rsp_sge[BFA_FCXP_MAX_SGES]; /* response buf */ +}; + +/** + * FCXP send response structure + */ +struct bfi_fcxp_send_rsp_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u16 fcxp_tag; /* send request tag */ + u8 req_status; /* request status */ + u8 rsvd; + u32 rsp_len; /* actual response length */ + u32 residue_len; /* residual response length */ + struct fchs_s fchs; /* response FC header structure */ +}; + +enum bfi_uf_h2i { + BFI_UF_H2I_BUF_POST = 1, +}; + +enum bfi_uf_i2h { + BFI_UF_I2H_FRM_RCVD = BFA_I2HM(1), +}; + +#define BFA_UF_MAX_SGES 2 + +struct bfi_uf_buf_post_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u16 buf_tag; /* buffer tag */ + u16 buf_len; /* total buffer length */ + struct bfi_sge_s sge[BFA_UF_MAX_SGES]; /* buffer DMA SGEs */ +}; + +struct bfi_uf_frm_rcvd_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u16 buf_tag; /* buffer tag */ + u16 rsvd; + u16 frm_len; /* received frame length */ + u16 xfr_len; /* tranferred length */ +}; + +enum bfi_lps_h2i_msgs { + BFI_LPS_H2I_LOGIN_REQ = 1, + BFI_LPS_H2I_LOGOUT_REQ = 2, +}; + +enum bfi_lps_i2h_msgs { + BFI_LPS_H2I_LOGIN_RSP = BFA_I2HM(1), + BFI_LPS_H2I_LOGOUT_RSP = BFA_I2HM(2), + BFI_LPS_H2I_CVL_EVENT = BFA_I2HM(3), +}; + +struct bfi_lps_login_req_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 lp_tag; + u8 alpa; + u16 pdu_size; + wwn_t pwwn; + wwn_t nwwn; + u8 fdisc; + u8 auth_en; + u8 rsvd[2]; +}; + +struct bfi_lps_login_rsp_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 lp_tag; + u8 status; + u8 lsrjt_rsn; + u8 lsrjt_expl; + wwn_t port_name; + wwn_t node_name; + u16 bb_credit; + u8 f_port; + u8 npiv_en; + u32 lp_pid:24; + u32 auth_req:8; + mac_t lp_mac; + mac_t fcf_mac; + u8 ext_status; + u8 brcd_switch; /* attached peer is brcd switch */ +}; + +struct bfi_lps_logout_req_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 lp_tag; + u8 rsvd[3]; + wwn_t port_name; +}; + +struct bfi_lps_logout_rsp_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 lp_tag; + u8 status; + u8 rsvd[2]; +}; + +struct bfi_lps_cvl_event_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 lp_tag; + u8 rsvd[3]; +}; + +union bfi_lps_h2i_msg_u { + struct bfi_mhdr_s *msg; + struct bfi_lps_login_req_s *login_req; + struct bfi_lps_logout_req_s *logout_req; +}; + +union bfi_lps_i2h_msg_u { + struct bfi_msg_s *msg; + struct bfi_lps_login_rsp_s *login_rsp; + struct bfi_lps_logout_rsp_s *logout_rsp; + struct bfi_lps_cvl_event_s *cvl_event; +}; + +enum bfi_rport_h2i_msgs { + BFI_RPORT_H2I_CREATE_REQ = 1, + BFI_RPORT_H2I_DELETE_REQ = 2, + BFI_RPORT_H2I_SET_SPEED_REQ = 3, +}; + +enum bfi_rport_i2h_msgs { + BFI_RPORT_I2H_CREATE_RSP = BFA_I2HM(1), + BFI_RPORT_I2H_DELETE_RSP = BFA_I2HM(2), + BFI_RPORT_I2H_QOS_SCN = BFA_I2HM(3), +}; + +struct bfi_rport_create_req_s { + struct bfi_mhdr_s mh; /* common msg header */ + u16 bfa_handle; /* host rport handle */ + u16 max_frmsz; /* max rcv pdu size */ + u32 pid:24, /* remote port ID */ + lp_tag:8; /* local port tag */ + u32 local_pid:24, /* local port ID */ + cisc:8; + u8 fc_class; /* supported FC classes */ + u8 vf_en; /* virtual fabric enable */ + u16 vf_id; /* virtual fabric ID */ +}; + +struct bfi_rport_create_rsp_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 status; /* rport creation status */ + u8 rsvd[3]; + u16 bfa_handle; /* host rport handle */ + u16 fw_handle; /* firmware rport handle */ + struct bfa_rport_qos_attr_s qos_attr; /* QoS Attributes */ +}; + +struct bfa_rport_speed_req_s { + struct bfi_mhdr_s mh; /* common msg header */ + u16 fw_handle; /* firmware rport handle */ + u8 speed; /* rport's speed via RPSC */ + u8 rsvd; +}; + +struct bfi_rport_delete_req_s { + struct bfi_mhdr_s mh; /* common msg header */ + u16 fw_handle; /* firmware rport handle */ + u16 rsvd; +}; + +struct bfi_rport_delete_rsp_s { + struct bfi_mhdr_s mh; /* common msg header */ + u16 bfa_handle; /* host rport handle */ + u8 status; /* rport deletion status */ + u8 rsvd; +}; + +struct bfi_rport_qos_scn_s { + struct bfi_mhdr_s mh; /* common msg header */ + u16 bfa_handle; /* host rport handle */ + u16 rsvd; + struct bfa_rport_qos_attr_s old_qos_attr; /* Old QoS Attributes */ + struct bfa_rport_qos_attr_s new_qos_attr; /* New QoS Attributes */ +}; + +union bfi_rport_h2i_msg_u { + struct bfi_msg_s *msg; + struct bfi_rport_create_req_s *create_req; + struct bfi_rport_delete_req_s *delete_req; + struct bfi_rport_speed_req_s *speed_req; +}; + +union bfi_rport_i2h_msg_u { + struct bfi_msg_s *msg; + struct bfi_rport_create_rsp_s *create_rsp; + struct bfi_rport_delete_rsp_s *delete_rsp; + struct bfi_rport_qos_scn_s *qos_scn_evt; +}; + +/* + * Initiator mode I-T nexus interface defines. + */ + +enum bfi_itnim_h2i { + BFI_ITNIM_H2I_CREATE_REQ = 1, /* i-t nexus creation */ + BFI_ITNIM_H2I_DELETE_REQ = 2, /* i-t nexus deletion */ +}; + +enum bfi_itnim_i2h { + BFI_ITNIM_I2H_CREATE_RSP = BFA_I2HM(1), + BFI_ITNIM_I2H_DELETE_RSP = BFA_I2HM(2), + BFI_ITNIM_I2H_SLER_EVENT = BFA_I2HM(3), +}; + +struct bfi_itnim_create_req_s { + struct bfi_mhdr_s mh; /* common msg header */ + u16 fw_handle; /* f/w handle for itnim */ + u8 class; /* FC class for IO */ + u8 seq_rec; /* sequence recovery support */ + u8 msg_no; /* seq id of the msg */ +}; + +struct bfi_itnim_create_rsp_s { + struct bfi_mhdr_s mh; /* common msg header */ + u16 bfa_handle; /* bfa handle for itnim */ + u8 status; /* fcp request status */ + u8 seq_id; /* seq id of the msg */ +}; + +struct bfi_itnim_delete_req_s { + struct bfi_mhdr_s mh; /* common msg header */ + u16 fw_handle; /* f/w itnim handle */ + u8 seq_id; /* seq id of the msg */ + u8 rsvd; +}; + +struct bfi_itnim_delete_rsp_s { + struct bfi_mhdr_s mh; /* common msg header */ + u16 bfa_handle; /* bfa handle for itnim */ + u8 status; /* fcp request status */ + u8 seq_id; /* seq id of the msg */ +}; + +struct bfi_itnim_sler_event_s { + struct bfi_mhdr_s mh; /* common msg header */ + u16 bfa_handle; /* bfa handle for itnim */ + u16 rsvd; +}; + +union bfi_itnim_h2i_msg_u { + struct bfi_itnim_create_req_s *create_req; + struct bfi_itnim_delete_req_s *delete_req; + struct bfi_msg_s *msg; +}; + +union bfi_itnim_i2h_msg_u { + struct bfi_itnim_create_rsp_s *create_rsp; + struct bfi_itnim_delete_rsp_s *delete_rsp; + struct bfi_itnim_sler_event_s *sler_event; + struct bfi_msg_s *msg; +}; + +/* + * Initiator mode IO interface defines. + */ + +enum bfi_ioim_h2i { + BFI_IOIM_H2I_IOABORT_REQ = 1, /* IO abort request */ + BFI_IOIM_H2I_IOCLEANUP_REQ = 2, /* IO cleanup request */ +}; + +enum bfi_ioim_i2h { + BFI_IOIM_I2H_IO_RSP = BFA_I2HM(1), /* non-fp IO response */ + BFI_IOIM_I2H_IOABORT_RSP = BFA_I2HM(2), /* ABORT rsp */ +}; + +/** + * IO command DIF info + */ +struct bfi_ioim_dif_s { + u32 dif_info[4]; +}; + +/** + * FCP IO messages overview + * + * @note + * - Max CDB length supported is 64 bytes. + * - SCSI Linked commands and SCSI bi-directional Commands not + * supported. + * + */ +struct bfi_ioim_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u16 io_tag; /* I/O tag */ + u16 rport_hdl; /* itnim/rport firmware handle */ + struct fcp_cmnd_s cmnd; /* IO request info */ + + /** + * SG elements array within the IO request must be double word + * aligned. This aligment is required to optimize SGM setup for the IO. + */ + struct bfi_sge_s sges[BFI_SGE_INLINE_MAX]; + u8 io_timeout; + u8 dif_en; + u8 rsvd_a[2]; + struct bfi_ioim_dif_s dif; +}; + +/** + * This table shows various IO status codes from firmware and their + * meaning. Host driver can use these status codes to further process + * IO completions. + * + * BFI_IOIM_STS_OK : IO completed with error free SCSI & + * transport status. + * io-tag can be reused. + * + * BFA_IOIM_STS_SCSI_ERR : IO completed with scsi error. + * - io-tag can be reused. + * + * BFI_IOIM_STS_HOST_ABORTED : IO was aborted successfully due to + * host request. + * - io-tag cannot be reused yet. + * + * BFI_IOIM_STS_ABORTED : IO was aborted successfully + * internally by f/w. + * - io-tag cannot be reused yet. + * + * BFI_IOIM_STS_TIMEDOUT : IO timedout and ABTS/RRQ is happening + * in the firmware and + * - io-tag cannot be reused yet. + * + * BFI_IOIM_STS_SQER_NEEDED : Firmware could not recover the IO + * with sequence level error + * logic and hence host needs to retry + * this IO with a different IO tag + * - io-tag cannot be used yet. + * + * BFI_IOIM_STS_NEXUS_ABORT : Second Level Error Recovery from host + * is required because 2 consecutive ABTS + * timedout and host needs logout and + * re-login with the target + * - io-tag cannot be used yet. + * + * BFI_IOIM_STS_UNDERRUN : IO completed with SCSI status good, + * but the data tranferred is less than + * the fcp data length in the command. + * ex. SCSI INQUIRY where transferred + * data length and residue count in FCP + * response accounts for total fcp-dl + * - io-tag can be reused. + * + * BFI_IOIM_STS_OVERRUN : IO completed with SCSI status good, + * but the data transerred is more than + * fcp data length in the command. ex. + * TAPE IOs where blocks can of unequal + * lengths. + * - io-tag can be reused. + * + * BFI_IOIM_STS_RES_FREE : Firmware has completed using io-tag + * during abort process + * - io-tag can be reused. + * + * BFI_IOIM_STS_PROTO_ERR : Firmware detected a protocol error. + * ex target sent more data than + * requested, or there was data frame + * loss and other reasons + * - io-tag cannot be used yet. + * + * BFI_IOIM_STS_DIF_ERR : Firwmare detected DIF error. ex: DIF + * CRC err or Ref Tag err or App tag err. + * - io-tag can be reused. + * + * BFA_IOIM_STS_TSK_MGT_ABORT : IO was aborted because of Task + * Management command from the host + * - io-tag can be reused. + * + * BFI_IOIM_STS_UTAG : Firmware does not know about this + * io_tag. + * - io-tag can be reused. + */ +enum bfi_ioim_status { + BFI_IOIM_STS_OK = 0, + BFI_IOIM_STS_HOST_ABORTED = 1, + BFI_IOIM_STS_ABORTED = 2, + BFI_IOIM_STS_TIMEDOUT = 3, + BFI_IOIM_STS_RES_FREE = 4, + BFI_IOIM_STS_SQER_NEEDED = 5, + BFI_IOIM_STS_PROTO_ERR = 6, + BFI_IOIM_STS_UTAG = 7, + BFI_IOIM_STS_PATHTOV = 8, +}; + +#define BFI_IOIM_SNSLEN (256) +/** + * I/O response message + */ +struct bfi_ioim_rsp_s { + struct bfi_mhdr_s mh; /* common msg header */ + u16 io_tag; /* completed IO tag */ + u16 bfa_rport_hndl; /* releated rport handle */ + u8 io_status; /* IO completion status */ + u8 reuse_io_tag; /* IO tag can be reused */ + u16 abort_tag; /* host abort request tag */ + u8 scsi_status; /* scsi status from target */ + u8 sns_len; /* scsi sense length */ + u8 resid_flags; /* IO residue flags */ + u8 rsvd_a; + u32 residue; /* IO residual length in bytes */ + u32 rsvd_b[3]; +}; + +struct bfi_ioim_abort_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u16 io_tag; /* I/O tag */ + u16 abort_tag; /* unique request tag */ +}; + +/* + * Initiator mode task management command interface defines. + */ + +enum bfi_tskim_h2i { + BFI_TSKIM_H2I_TM_REQ = 1, /* task-mgmt command */ + BFI_TSKIM_H2I_ABORT_REQ = 2, /* task-mgmt command */ +}; + +enum bfi_tskim_i2h { + BFI_TSKIM_I2H_TM_RSP = BFA_I2HM(1), +}; + +struct bfi_tskim_req_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u16 tsk_tag; /* task management tag */ + u16 itn_fhdl; /* itn firmware handle */ + lun_t lun; /* LU number */ + u8 tm_flags; /* see enum fcp_tm_cmnd */ + u8 t_secs; /* Timeout value in seconds */ + u8 rsvd[2]; +}; + +struct bfi_tskim_abortreq_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u16 tsk_tag; /* task management tag */ + u16 rsvd; +}; + +enum bfi_tskim_status { + /* + * Following are FCP-4 spec defined status codes, + * **DO NOT CHANGE THEM ** + */ + BFI_TSKIM_STS_OK = 0, + BFI_TSKIM_STS_NOT_SUPP = 4, + BFI_TSKIM_STS_FAILED = 5, + + /** + * Defined by BFA + */ + BFI_TSKIM_STS_TIMEOUT = 10, /* TM request timedout */ + BFI_TSKIM_STS_ABORTED = 11, /* Aborted on host request */ +}; + +struct bfi_tskim_rsp_s { + struct bfi_mhdr_s mh; /* Common msg header */ + u16 tsk_tag; /* task mgmt cmnd tag */ + u8 tsk_status; /* @ref bfi_tskim_status */ + u8 rsvd; +}; + +#pragma pack() + +#endif /* __BFI_MS_H__ */ diff --git a/drivers/scsi/bfa/fab.c b/drivers/scsi/bfa/fab.c deleted file mode 100644 index 7e3a4d5d7bb4..000000000000 --- a/drivers/scsi/bfa/fab.c +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#include -#include -#include "fcs_lport.h" -#include "fcs_rport.h" -#include "lport_priv.h" - -/** - * fab.c port fab implementation. - */ - -/** - * bfa_fcs_port_fab_public port fab public functions - */ - -/** - * Called by port to initialize fabric services of the base port. - */ -void -bfa_fcs_port_fab_init(struct bfa_fcs_port_s *port) -{ - bfa_fcs_port_ns_init(port); - bfa_fcs_port_scn_init(port); - bfa_fcs_port_ms_init(port); -} - -/** - * Called by port to notify transition to online state. - */ -void -bfa_fcs_port_fab_online(struct bfa_fcs_port_s *port) -{ - bfa_fcs_port_ns_online(port); - bfa_fcs_port_scn_online(port); -} - -/** - * Called by port to notify transition to offline state. - */ -void -bfa_fcs_port_fab_offline(struct bfa_fcs_port_s *port) -{ - bfa_fcs_port_ns_offline(port); - bfa_fcs_port_scn_offline(port); - bfa_fcs_port_ms_offline(port); -} diff --git a/drivers/scsi/bfa/fabric.c b/drivers/scsi/bfa/fabric.c deleted file mode 100644 index ddd4ba9317e6..000000000000 --- a/drivers/scsi/bfa/fabric.c +++ /dev/null @@ -1,1323 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * fabric.c Fabric module implementation. - */ - -#include "fcs_fabric.h" -#include "fcs_lport.h" -#include "fcs_vport.h" -#include "fcs_trcmod.h" -#include "fcs_fcxp.h" -#include "fcs_auth.h" -#include "fcs.h" -#include "fcbuild.h" -#include -#include -#include - -BFA_TRC_FILE(FCS, FABRIC); - -#define BFA_FCS_FABRIC_RETRY_DELAY (2000) /* Milliseconds */ -#define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */ - -#define bfa_fcs_fabric_set_opertype(__fabric) do { \ - if (bfa_fcport_get_topology((__fabric)->fcs->bfa) \ - == BFA_PPORT_TOPOLOGY_P2P) \ - (__fabric)->oper_type = BFA_PPORT_TYPE_NPORT; \ - else \ - (__fabric)->oper_type = BFA_PPORT_TYPE_NLPORT; \ -} while (0) - -/* - * forward declarations - */ -static void bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric); -static void bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric); -static void bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric); -static void bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric); -static void bfa_fcs_fabric_delay(void *cbarg); -static void bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric); -static void bfa_fcs_fabric_delete_comp(void *cbarg); -static void bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, - struct fchs_s *fchs, u16 len); -static void bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric, - struct fchs_s *fchs, u16 len); -static void bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric); -static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg, - struct bfa_fcxp_s *fcxp, - void *cbarg, bfa_status_t status, - u32 rsp_len, - u32 resid_len, - struct fchs_s *rspfchs); -/** - * fcs_fabric_sm fabric state machine functions - */ - -/** - * Fabric state machine events - */ -enum bfa_fcs_fabric_event { - BFA_FCS_FABRIC_SM_CREATE = 1, /* fabric create from driver */ - BFA_FCS_FABRIC_SM_DELETE = 2, /* fabric delete from driver */ - BFA_FCS_FABRIC_SM_LINK_DOWN = 3, /* link down from port */ - BFA_FCS_FABRIC_SM_LINK_UP = 4, /* link up from port */ - BFA_FCS_FABRIC_SM_CONT_OP = 5, /* continue op from flogi/auth */ - BFA_FCS_FABRIC_SM_RETRY_OP = 6, /* continue op from flogi/auth */ - BFA_FCS_FABRIC_SM_NO_FABRIC = 7, /* no fabric from flogi/auth - */ - BFA_FCS_FABRIC_SM_PERF_EVFP = 8, /* perform EVFP from - *flogi/auth */ - BFA_FCS_FABRIC_SM_ISOLATE = 9, /* isolate from EVFP processing */ - BFA_FCS_FABRIC_SM_NO_TAGGING = 10,/* no VFT tagging from EVFP */ - BFA_FCS_FABRIC_SM_DELAYED = 11, /* timeout delay event */ - BFA_FCS_FABRIC_SM_AUTH_FAILED = 12, /* authentication failed */ - BFA_FCS_FABRIC_SM_AUTH_SUCCESS = 13, /* authentication successful - */ - BFA_FCS_FABRIC_SM_DELCOMP = 14, /* all vports deleted event */ - BFA_FCS_FABRIC_SM_LOOPBACK = 15, /* Received our own FLOGI */ - BFA_FCS_FABRIC_SM_START = 16, /* fabric delete from driver */ -}; - -static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event); -static void bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event); -static void bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event); -static void bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event); -static void bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event); -static void bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event); -static void bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event); -static void bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event); -static void bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event); -static void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event); -static void bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event); -static void bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event); -static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event); -static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event); -/** - * Beginning state before fabric creation. - */ -static void -bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event) -{ - bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); - bfa_trc(fabric->fcs, event); - - switch (event) { - case BFA_FCS_FABRIC_SM_CREATE: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created); - bfa_fcs_fabric_init(fabric); - bfa_fcs_lport_init(&fabric->bport, &fabric->bport.port_cfg); - break; - - case BFA_FCS_FABRIC_SM_LINK_UP: - case BFA_FCS_FABRIC_SM_LINK_DOWN: - break; - - default: - bfa_sm_fault(fabric->fcs, event); - } -} - -/** - * Beginning state before fabric creation. - */ -static void -bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event) -{ - bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); - bfa_trc(fabric->fcs, event); - - switch (event) { - case BFA_FCS_FABRIC_SM_START: - if (bfa_fcport_is_linkup(fabric->fcs->bfa)) { - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); - bfa_fcs_fabric_login(fabric); - } else - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); - break; - - case BFA_FCS_FABRIC_SM_LINK_UP: - case BFA_FCS_FABRIC_SM_LINK_DOWN: - break; - - case BFA_FCS_FABRIC_SM_DELETE: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); - bfa_fcs_modexit_comp(fabric->fcs); - break; - - default: - bfa_sm_fault(fabric->fcs, event); - } -} - -/** - * Link is down, awaiting LINK UP event from port. This is also the - * first state at fabric creation. - */ -static void -bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event) -{ - bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); - bfa_trc(fabric->fcs, event); - - switch (event) { - case BFA_FCS_FABRIC_SM_LINK_UP: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); - bfa_fcs_fabric_login(fabric); - break; - - case BFA_FCS_FABRIC_SM_RETRY_OP: - break; - - case BFA_FCS_FABRIC_SM_DELETE: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); - bfa_fcs_fabric_delete(fabric); - break; - - default: - bfa_sm_fault(fabric->fcs, event); - } -} - -/** - * FLOGI is in progress, awaiting FLOGI reply. - */ -static void -bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event) -{ - bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); - bfa_trc(fabric->fcs, event); - - switch (event) { - case BFA_FCS_FABRIC_SM_CONT_OP: - - bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); - fabric->fab_type = BFA_FCS_FABRIC_SWITCHED; - - if (fabric->auth_reqd && fabric->is_auth) { - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth); - bfa_trc(fabric->fcs, event); - } else { - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online); - bfa_fcs_fabric_notify_online(fabric); - } - break; - - case BFA_FCS_FABRIC_SM_RETRY_OP: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi_retry); - bfa_timer_start(fabric->fcs->bfa, &fabric->delay_timer, - bfa_fcs_fabric_delay, fabric, - BFA_FCS_FABRIC_RETRY_DELAY); - break; - - case BFA_FCS_FABRIC_SM_LOOPBACK: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_loopback); - bfa_lps_discard(fabric->lps); - bfa_fcs_fabric_set_opertype(fabric); - break; - - case BFA_FCS_FABRIC_SM_NO_FABRIC: - fabric->fab_type = BFA_FCS_FABRIC_N2N; - bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); - bfa_fcs_fabric_notify_online(fabric); - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric); - break; - - case BFA_FCS_FABRIC_SM_LINK_DOWN: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); - bfa_lps_discard(fabric->lps); - break; - - case BFA_FCS_FABRIC_SM_DELETE: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); - bfa_lps_discard(fabric->lps); - bfa_fcs_fabric_delete(fabric); - break; - - default: - bfa_sm_fault(fabric->fcs, event); - } -} - - -static void -bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event) -{ - bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); - bfa_trc(fabric->fcs, event); - - switch (event) { - case BFA_FCS_FABRIC_SM_DELAYED: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); - bfa_fcs_fabric_login(fabric); - break; - - case BFA_FCS_FABRIC_SM_LINK_DOWN: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); - bfa_timer_stop(&fabric->delay_timer); - break; - - case BFA_FCS_FABRIC_SM_DELETE: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); - bfa_timer_stop(&fabric->delay_timer); - bfa_fcs_fabric_delete(fabric); - break; - - default: - bfa_sm_fault(fabric->fcs, event); - } -} - -/** - * Authentication is in progress, awaiting authentication results. - */ -static void -bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event) -{ - bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); - bfa_trc(fabric->fcs, event); - - switch (event) { - case BFA_FCS_FABRIC_SM_AUTH_FAILED: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed); - bfa_lps_discard(fabric->lps); - break; - - case BFA_FCS_FABRIC_SM_AUTH_SUCCESS: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online); - bfa_fcs_fabric_notify_online(fabric); - break; - - case BFA_FCS_FABRIC_SM_PERF_EVFP: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp); - break; - - case BFA_FCS_FABRIC_SM_LINK_DOWN: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); - bfa_lps_discard(fabric->lps); - break; - - case BFA_FCS_FABRIC_SM_DELETE: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); - bfa_fcs_fabric_delete(fabric); - break; - - default: - bfa_sm_fault(fabric->fcs, event); - } -} - -/** - * Authentication failed - */ -static void -bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event) -{ - bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); - bfa_trc(fabric->fcs, event); - - switch (event) { - case BFA_FCS_FABRIC_SM_LINK_DOWN: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); - bfa_fcs_fabric_notify_offline(fabric); - break; - - case BFA_FCS_FABRIC_SM_DELETE: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); - bfa_fcs_fabric_delete(fabric); - break; - - default: - bfa_sm_fault(fabric->fcs, event); - } -} - -/** - * Port is in loopback mode. - */ -static void -bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event) -{ - bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); - bfa_trc(fabric->fcs, event); - - switch (event) { - case BFA_FCS_FABRIC_SM_LINK_DOWN: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); - bfa_fcs_fabric_notify_offline(fabric); - break; - - case BFA_FCS_FABRIC_SM_DELETE: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); - bfa_fcs_fabric_delete(fabric); - break; - - default: - bfa_sm_fault(fabric->fcs, event); - } -} - -/** - * There is no attached fabric - private loop or NPort-to-NPort topology. - */ -static void -bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event) -{ - bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); - bfa_trc(fabric->fcs, event); - - switch (event) { - case BFA_FCS_FABRIC_SM_LINK_DOWN: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); - bfa_lps_discard(fabric->lps); - bfa_fcs_fabric_notify_offline(fabric); - break; - - case BFA_FCS_FABRIC_SM_DELETE: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); - bfa_fcs_fabric_delete(fabric); - break; - - case BFA_FCS_FABRIC_SM_NO_FABRIC: - bfa_trc(fabric->fcs, fabric->bb_credit); - bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); - break; - - default: - bfa_sm_fault(fabric->fcs, event); - } -} - -/** - * Fabric is online - normal operating state. - */ -static void -bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event) -{ - bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); - bfa_trc(fabric->fcs, event); - - switch (event) { - case BFA_FCS_FABRIC_SM_LINK_DOWN: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); - bfa_lps_discard(fabric->lps); - bfa_fcs_fabric_notify_offline(fabric); - break; - - case BFA_FCS_FABRIC_SM_DELETE: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); - bfa_fcs_fabric_delete(fabric); - break; - - case BFA_FCS_FABRIC_SM_AUTH_FAILED: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed); - bfa_lps_discard(fabric->lps); - break; - - case BFA_FCS_FABRIC_SM_AUTH_SUCCESS: - break; - - default: - bfa_sm_fault(fabric->fcs, event); - } -} - -/** - * Exchanging virtual fabric parameters. - */ -static void -bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event) -{ - bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); - bfa_trc(fabric->fcs, event); - - switch (event) { - case BFA_FCS_FABRIC_SM_CONT_OP: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp_done); - break; - - case BFA_FCS_FABRIC_SM_ISOLATE: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_isolated); - break; - - default: - bfa_sm_fault(fabric->fcs, event); - } -} - -/** - * EVFP exchange complete and VFT tagging is enabled. - */ -static void -bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event) -{ - bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); - bfa_trc(fabric->fcs, event); -} - -/** - * Port is isolated after EVFP exchange due to VF_ID mismatch (N and F). - */ -static void -bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event) -{ - bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); - bfa_trc(fabric->fcs, event); - - bfa_log(fabric->fcs->logm, BFA_LOG_FCS_FABRIC_ISOLATED, - fabric->bport.port_cfg.pwwn, fabric->fcs->port_vfid, - fabric->event_arg.swp_vfid); -} - -/** - * Fabric is being deleted, awaiting vport delete completions. - */ -static void -bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric, - enum bfa_fcs_fabric_event event) -{ - bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); - bfa_trc(fabric->fcs, event); - - switch (event) { - case BFA_FCS_FABRIC_SM_DELCOMP: - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); - bfa_fcs_modexit_comp(fabric->fcs); - break; - - case BFA_FCS_FABRIC_SM_LINK_UP: - break; - - case BFA_FCS_FABRIC_SM_LINK_DOWN: - bfa_fcs_fabric_notify_offline(fabric); - break; - - default: - bfa_sm_fault(fabric->fcs, event); - } -} - - - -/** - * fcs_fabric_private fabric private functions - */ - -static void -bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric) -{ - struct bfa_port_cfg_s *port_cfg = &fabric->bport.port_cfg; - - port_cfg->roles = BFA_PORT_ROLE_FCP_IM; - port_cfg->nwwn = bfa_ioc_get_nwwn(&fabric->fcs->bfa->ioc); - port_cfg->pwwn = bfa_ioc_get_pwwn(&fabric->fcs->bfa->ioc); -} - -/** - * Port Symbolic Name Creation for base port. - */ -void -bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric) -{ - struct bfa_port_cfg_s *port_cfg = &fabric->bport.port_cfg; - char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0}; - struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info; - - bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model); - - /* - * Model name/number - */ - strncpy((char *)&port_cfg->sym_name, model, - BFA_FCS_PORT_SYMBNAME_MODEL_SZ); - strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, - sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); - - /* - * Driver Version - */ - strncat((char *)&port_cfg->sym_name, (char *)driver_info->version, - BFA_FCS_PORT_SYMBNAME_VERSION_SZ); - strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, - sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); - - /* - * Host machine name - */ - strncat((char *)&port_cfg->sym_name, - (char *)driver_info->host_machine_name, - BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ); - strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, - sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); - - /* - * Host OS Info : - * If OS Patch Info is not there, do not truncate any bytes from the - * OS name string and instead copy the entire OS info string (64 bytes). - */ - if (driver_info->host_os_patch[0] == '\0') { - strncat((char *)&port_cfg->sym_name, - (char *)driver_info->host_os_name, BFA_FCS_OS_STR_LEN); - strncat((char *)&port_cfg->sym_name, - BFA_FCS_PORT_SYMBNAME_SEPARATOR, - sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); - } else { - strncat((char *)&port_cfg->sym_name, - (char *)driver_info->host_os_name, - BFA_FCS_PORT_SYMBNAME_OSINFO_SZ); - strncat((char *)&port_cfg->sym_name, - BFA_FCS_PORT_SYMBNAME_SEPARATOR, - sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); - - /* - * Append host OS Patch Info - */ - strncat((char *)&port_cfg->sym_name, - (char *)driver_info->host_os_patch, - BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ); - } - - /* - * null terminate - */ - port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0; -} - -/** - * bfa lps login completion callback - */ -void -bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status) -{ - struct bfa_fcs_fabric_s *fabric = uarg; - - bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); - bfa_trc(fabric->fcs, status); - - switch (status) { - case BFA_STATUS_OK: - fabric->stats.flogi_accepts++; - break; - - case BFA_STATUS_INVALID_MAC: - /* - * Only for CNA - */ - fabric->stats.flogi_acc_err++; - bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); - - return; - - case BFA_STATUS_EPROTOCOL: - switch (bfa_lps_get_extstatus(fabric->lps)) { - case BFA_EPROTO_BAD_ACCEPT: - fabric->stats.flogi_acc_err++; - break; - - case BFA_EPROTO_UNKNOWN_RSP: - fabric->stats.flogi_unknown_rsp++; - break; - - default: - break; - } - bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); - - return; - - case BFA_STATUS_FABRIC_RJT: - fabric->stats.flogi_rejects++; - bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); - return; - - default: - fabric->stats.flogi_rsp_err++; - bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); - return; - } - - fabric->bb_credit = bfa_lps_get_peer_bbcredit(fabric->lps); - bfa_trc(fabric->fcs, fabric->bb_credit); - - if (!bfa_lps_is_brcd_fabric(fabric->lps)) - fabric->fabric_name = bfa_lps_get_peer_nwwn(fabric->lps); - - /* - * Check port type. It should be 1 = F-port. - */ - if (bfa_lps_is_fport(fabric->lps)) { - fabric->bport.pid = bfa_lps_get_pid(fabric->lps); - fabric->is_npiv = bfa_lps_is_npiv_en(fabric->lps); - fabric->is_auth = bfa_lps_is_authreq(fabric->lps); - bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CONT_OP); - } else { - /* - * Nport-2-Nport direct attached - */ - fabric->bport.port_topo.pn2n.rem_port_wwn = - bfa_lps_get_peer_pwwn(fabric->lps); - bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC); - } - - bfa_trc(fabric->fcs, fabric->bport.pid); - bfa_trc(fabric->fcs, fabric->is_npiv); - bfa_trc(fabric->fcs, fabric->is_auth); -} - -/** - * Allocate and send FLOGI. - */ -static void -bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric) -{ - struct bfa_s *bfa = fabric->fcs->bfa; - struct bfa_port_cfg_s *pcfg = &fabric->bport.port_cfg; - u8 alpa = 0; - - if (bfa_fcport_get_topology(bfa) == BFA_PPORT_TOPOLOGY_LOOP) - alpa = bfa_fcport_get_myalpa(bfa); - - bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa), - pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd); - - fabric->stats.flogi_sent++; -} - -static void -bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric) -{ - struct bfa_fcs_vport_s *vport; - struct list_head *qe, *qen; - - bfa_trc(fabric->fcs, fabric->fabric_name); - - bfa_fcs_fabric_set_opertype(fabric); - fabric->stats.fabric_onlines++; - - /** - * notify online event to base and then virtual ports - */ - bfa_fcs_port_online(&fabric->bport); - - list_for_each_safe(qe, qen, &fabric->vport_q) { - vport = (struct bfa_fcs_vport_s *)qe; - bfa_fcs_vport_online(vport); - } -} - -static void -bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric) -{ - struct bfa_fcs_vport_s *vport; - struct list_head *qe, *qen; - - bfa_trc(fabric->fcs, fabric->fabric_name); - fabric->stats.fabric_offlines++; - - /** - * notify offline event first to vports and then base port. - */ - list_for_each_safe(qe, qen, &fabric->vport_q) { - vport = (struct bfa_fcs_vport_s *)qe; - bfa_fcs_vport_offline(vport); - } - - bfa_fcs_port_offline(&fabric->bport); - - fabric->fabric_name = 0; - fabric->fabric_ip_addr[0] = 0; -} - -static void -bfa_fcs_fabric_delay(void *cbarg) -{ - struct bfa_fcs_fabric_s *fabric = cbarg; - - bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED); -} - -/** - * Delete all vports and wait for vport delete completions. - */ -static void -bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric) -{ - struct bfa_fcs_vport_s *vport; - struct list_head *qe, *qen; - - list_for_each_safe(qe, qen, &fabric->vport_q) { - vport = (struct bfa_fcs_vport_s *)qe; - bfa_fcs_vport_fcs_delete(vport); - } - - bfa_fcs_port_delete(&fabric->bport); - bfa_wc_wait(&fabric->wc); -} - -static void -bfa_fcs_fabric_delete_comp(void *cbarg) -{ - struct bfa_fcs_fabric_s *fabric = cbarg; - - bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP); -} - - - -/** - * fcs_fabric_public fabric public functions - */ - -/** - * Attach time initialization - */ -void -bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs) -{ - struct bfa_fcs_fabric_s *fabric; - - fabric = &fcs->fabric; - bfa_os_memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s)); - - /** - * Initialize base fabric. - */ - fabric->fcs = fcs; - INIT_LIST_HEAD(&fabric->vport_q); - INIT_LIST_HEAD(&fabric->vf_q); - fabric->lps = bfa_lps_alloc(fcs->bfa); - bfa_assert(fabric->lps); - - /** - * Initialize fabric delete completion handler. Fabric deletion is complete - * when the last vport delete is complete. - */ - bfa_wc_init(&fabric->wc, bfa_fcs_fabric_delete_comp, fabric); - bfa_wc_up(&fabric->wc); /* For the base port */ - - bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); - bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL); -} - -void -bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs) -{ - bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE); - bfa_trc(fcs, 0); -} - -/** - * Module cleanup - */ -void -bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs) -{ - struct bfa_fcs_fabric_s *fabric; - - bfa_trc(fcs, 0); - - /** - * Cleanup base fabric. - */ - fabric = &fcs->fabric; - bfa_lps_delete(fabric->lps); - bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE); -} - -/** - * Fabric module start -- kick starts FCS actions - */ -void -bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs) -{ - struct bfa_fcs_fabric_s *fabric; - - bfa_trc(fcs, 0); - fabric = &fcs->fabric; - bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START); -} - -/** - * Suspend fabric activity as part of driver suspend. - */ -void -bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs) -{ -} - -bfa_boolean_t -bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric) -{ - return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_loopback); -} - -bfa_boolean_t -bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric) -{ - return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_auth_failed); -} - -enum bfa_pport_type -bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric) -{ - return fabric->oper_type; -} - -/** - * Link up notification from BFA physical port module. - */ -void -bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric) -{ - bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); - bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP); -} - -/** - * Link down notification from BFA physical port module. - */ -void -bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric) -{ - bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); - bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN); -} - -/** - * A child vport is being created in the fabric. - * - * Call from vport module at vport creation. A list of base port and vports - * belonging to a fabric is maintained to propagate link events. - * - * param[in] fabric - Fabric instance. This can be a base fabric or vf. - * param[in] vport - Vport being created. - * - * @return None (always succeeds) - */ -void -bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric, - struct bfa_fcs_vport_s *vport) -{ - /** - * - add vport to fabric's vport_q - */ - bfa_trc(fabric->fcs, fabric->vf_id); - - list_add_tail(&vport->qe, &fabric->vport_q); - fabric->num_vports++; - bfa_wc_up(&fabric->wc); -} - -/** - * A child vport is being deleted from fabric. - * - * Vport is being deleted. - */ -void -bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric, - struct bfa_fcs_vport_s *vport) -{ - list_del(&vport->qe); - fabric->num_vports--; - bfa_wc_down(&fabric->wc); -} - -/** - * Base port is deleted. - */ -void -bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric) -{ - bfa_wc_down(&fabric->wc); -} - -/** - * Check if fabric is online. - * - * param[in] fabric - Fabric instance. This can be a base fabric or vf. - * - * @return TRUE/FALSE - */ -int -bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric) -{ - return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online); -} - - -bfa_status_t -bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf, struct bfa_fcs_s *fcs, - struct bfa_port_cfg_s *port_cfg, - struct bfad_vf_s *vf_drv) -{ - bfa_sm_set_state(vf, bfa_fcs_fabric_sm_uninit); - return BFA_STATUS_OK; -} - -/** - * Lookup for a vport withing a fabric given its pwwn - */ -struct bfa_fcs_vport_s * -bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn) -{ - struct bfa_fcs_vport_s *vport; - struct list_head *qe; - - list_for_each(qe, &fabric->vport_q) { - vport = (struct bfa_fcs_vport_s *)qe; - if (bfa_fcs_port_get_pwwn(&vport->lport) == pwwn) - return vport; - } - - return NULL; -} - -/** - * In a given fabric, return the number of lports. - * - * param[in] fabric - Fabric instance. This can be a base fabric or vf. - * -* @return : 1 or more. - */ -u16 -bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric) -{ - return fabric->num_vports; -} - -/* - * Get OUI of the attached switch. - * - * Note : Use of this function should be avoided as much as possible. - * This function should be used only if there is any requirement - * to check for FOS version below 6.3. - * To check if the attached fabric is a brocade fabric, use - * bfa_lps_is_brcd_fabric() which works for FOS versions 6.3 - * or above only. - */ - -u16 -bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric) -{ - wwn_t fab_nwwn; - u8 *tmp; - u16 oui; - - fab_nwwn = bfa_lps_get_peer_nwwn(fabric->lps); - - tmp = (uint8_t *)&fab_nwwn; - oui = (tmp[3] << 8) | tmp[4]; - - return oui; -} - -/** - * Unsolicited frame receive handling. - */ -void -bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, - u16 len) -{ - u32 pid = fchs->d_id; - struct bfa_fcs_vport_s *vport; - struct list_head *qe; - struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); - struct fc_logi_s *flogi = (struct fc_logi_s *) els_cmd; - - bfa_trc(fabric->fcs, len); - bfa_trc(fabric->fcs, pid); - - /** - * Look for our own FLOGI frames being looped back. This means an - * external loopback cable is in place. Our own FLOGI frames are - * sometimes looped back when switch port gets temporarily bypassed. - */ - if ((pid == bfa_os_ntoh3b(FC_FABRIC_PORT)) - && (els_cmd->els_code == FC_ELS_FLOGI) - && (flogi->port_name == bfa_fcs_port_get_pwwn(&fabric->bport))) { - bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOOPBACK); - return; - } - - /** - * FLOGI/EVFP exchanges should be consumed by base fabric. - */ - if (fchs->d_id == bfa_os_hton3b(FC_FABRIC_PORT)) { - bfa_trc(fabric->fcs, pid); - bfa_fcs_fabric_process_uf(fabric, fchs, len); - return; - } - - if (fabric->bport.pid == pid) { - /** - * All authentication frames should be routed to auth - */ - bfa_trc(fabric->fcs, els_cmd->els_code); - if (els_cmd->els_code == FC_ELS_AUTH) { - bfa_trc(fabric->fcs, els_cmd->els_code); - fabric->auth.response = (u8 *) els_cmd; - return; - } - - bfa_trc(fabric->fcs, *(u8 *) ((u8 *) fchs)); - bfa_fcs_port_uf_recv(&fabric->bport, fchs, len); - return; - } - - /** - * look for a matching local port ID - */ - list_for_each(qe, &fabric->vport_q) { - vport = (struct bfa_fcs_vport_s *)qe; - if (vport->lport.pid == pid) { - bfa_fcs_port_uf_recv(&vport->lport, fchs, len); - return; - } - } - bfa_trc(fabric->fcs, els_cmd->els_code); - bfa_fcs_port_uf_recv(&fabric->bport, fchs, len); -} - -/** - * Unsolicited frames to be processed by fabric. - */ -static void -bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, - u16 len) -{ - struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); - - bfa_trc(fabric->fcs, els_cmd->els_code); - - switch (els_cmd->els_code) { - case FC_ELS_FLOGI: - bfa_fcs_fabric_process_flogi(fabric, fchs, len); - break; - - default: - /* - * need to generate a LS_RJT - */ - break; - } -} - -/** - * Process incoming FLOGI - */ -static void -bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric, - struct fchs_s *fchs, u16 len) -{ - struct fc_logi_s *flogi = (struct fc_logi_s *) (fchs + 1); - struct bfa_fcs_port_s *bport = &fabric->bport; - - bfa_trc(fabric->fcs, fchs->s_id); - - fabric->stats.flogi_rcvd++; - /* - * Check port type. It should be 0 = n-port. - */ - if (flogi->csp.port_type) { - /* - * @todo: may need to send a LS_RJT - */ - bfa_trc(fabric->fcs, flogi->port_name); - fabric->stats.flogi_rejected++; - return; - } - - fabric->bb_credit = bfa_os_ntohs(flogi->csp.bbcred); - bport->port_topo.pn2n.rem_port_wwn = flogi->port_name; - bport->port_topo.pn2n.reply_oxid = fchs->ox_id; - - /* - * Send a Flogi Acc - */ - bfa_fcs_fabric_send_flogi_acc(fabric); - bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC); -} - -static void -bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric) -{ - struct bfa_port_cfg_s *pcfg = &fabric->bport.port_cfg; - struct bfa_fcs_port_n2n_s *n2n_port = &fabric->bport.port_topo.pn2n; - struct bfa_s *bfa = fabric->fcs->bfa; - struct bfa_fcxp_s *fcxp; - u16 reqlen; - struct fchs_s fchs; - - fcxp = bfa_fcs_fcxp_alloc(fabric->fcs); - /** - * Do not expect this failure -- expect remote node to retry - */ - if (!fcxp) - return; - - reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), - bfa_os_hton3b(FC_FABRIC_PORT), - n2n_port->reply_oxid, pcfg->pwwn, - pcfg->nwwn, bfa_fcport_get_maxfrsize(bfa), - bfa_fcport_get_rx_bbcredit(bfa)); - - bfa_fcxp_send(fcxp, NULL, fabric->vf_id, bfa_lps_get_tag(fabric->lps), - BFA_FALSE, FC_CLASS_3, reqlen, &fchs, - bfa_fcs_fabric_flogiacc_comp, fabric, - FC_MAX_PDUSZ, 0); /* Timeout 0 indicates no - * response expected - */ -} - -/** - * Flogi Acc completion callback. - */ -static void -bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, - bfa_status_t status, u32 rsp_len, - u32 resid_len, struct fchs_s *rspfchs) -{ - struct bfa_fcs_fabric_s *fabric = cbarg; - - bfa_trc(fabric->fcs, status); -} - -/* - * - * @param[in] fabric - fabric - * @param[in] result - 1 - * - * @return - none - */ -void -bfa_fcs_auth_finished(struct bfa_fcs_fabric_s *fabric, enum auth_status status) -{ - bfa_trc(fabric->fcs, status); - - if (status == FC_AUTH_STATE_SUCCESS) - bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_AUTH_SUCCESS); - else - bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_AUTH_FAILED); -} - -/** - * Send AEN notification - */ -static void -bfa_fcs_fabric_aen_post(struct bfa_fcs_port_s *port, - enum bfa_port_aen_event event) -{ - union bfa_aen_data_u aen_data; - struct bfa_log_mod_s *logmod = port->fcs->logm; - wwn_t pwwn = bfa_fcs_port_get_pwwn(port); - wwn_t fwwn = bfa_fcs_port_get_fabric_name(port); - char pwwn_ptr[BFA_STRING_32]; - char fwwn_ptr[BFA_STRING_32]; - - wwn2str(pwwn_ptr, pwwn); - wwn2str(fwwn_ptr, fwwn); - - bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, event), - pwwn_ptr, fwwn_ptr); - - aen_data.port.pwwn = pwwn; - aen_data.port.fwwn = fwwn; -} - -/* - * - * @param[in] fabric - fabric - * @param[in] wwn_t - new fabric name - * - * @return - none - */ -void -bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric, - wwn_t fabric_name) -{ - bfa_trc(fabric->fcs, fabric_name); - - if (fabric->fabric_name == 0) { - /* - * With BRCD switches, we don't get Fabric Name in FLOGI. - * Don't generate a fabric name change event in this case. - */ - fabric->fabric_name = fabric_name; - } else { - fabric->fabric_name = fabric_name; - /* - * Generate a Event - */ - bfa_fcs_fabric_aen_post(&fabric->bport, - BFA_PORT_AEN_FABRIC_NAME_CHANGE); - } - -} - -/** - * - * @param[in] fabric - fabric - * @param[in] node_symname - - * Caller allocated buffer to receive the symbolic name - * - * @return - none - */ -void -bfa_fcs_get_sym_name(const struct bfa_fcs_s *fcs, char *node_symname) -{ - bfa_os_memcpy(node_symname, - fcs->fabric.bport.port_cfg.sym_name.symname, - BFA_SYMNAME_MAXLEN); -} - -/** - * Not used by FCS. - */ -void -bfa_cb_lps_flogo_comp(void *bfad, void *uarg) -{ -} - - diff --git a/drivers/scsi/bfa/fcbuild.c b/drivers/scsi/bfa/fcbuild.c deleted file mode 100644 index fee5456451cb..000000000000 --- a/drivers/scsi/bfa/fcbuild.c +++ /dev/null @@ -1,1449 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -/* - * fcbuild.c - FC link service frame building and parsing routines - */ - -#include -#include "fcbuild.h" - -/* - * static build functions - */ -static void fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, - u16 ox_id); -static void fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, - u16 ox_id); -static struct fchs_s fc_els_req_tmpl; -static struct fchs_s fc_els_rsp_tmpl; -static struct fchs_s fc_bls_req_tmpl; -static struct fchs_s fc_bls_rsp_tmpl; -static struct fc_ba_acc_s ba_acc_tmpl; -static struct fc_logi_s plogi_tmpl; -static struct fc_prli_s prli_tmpl; -static struct fc_rrq_s rrq_tmpl; -static struct fchs_s fcp_fchs_tmpl; - -void -fcbuild_init(void) -{ - /* - * fc_els_req_tmpl - */ - fc_els_req_tmpl.routing = FC_RTG_EXT_LINK; - fc_els_req_tmpl.cat_info = FC_CAT_LD_REQUEST; - fc_els_req_tmpl.type = FC_TYPE_ELS; - fc_els_req_tmpl.f_ctl = - bfa_os_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ | - FCTL_SI_XFER); - fc_els_req_tmpl.rx_id = FC_RXID_ANY; - - /* - * fc_els_rsp_tmpl - */ - fc_els_rsp_tmpl.routing = FC_RTG_EXT_LINK; - fc_els_rsp_tmpl.cat_info = FC_CAT_LD_REPLY; - fc_els_rsp_tmpl.type = FC_TYPE_ELS; - fc_els_rsp_tmpl.f_ctl = - bfa_os_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH | - FCTL_END_SEQ | FCTL_SI_XFER); - fc_els_rsp_tmpl.rx_id = FC_RXID_ANY; - - /* - * fc_bls_req_tmpl - */ - fc_bls_req_tmpl.routing = FC_RTG_BASIC_LINK; - fc_bls_req_tmpl.type = FC_TYPE_BLS; - fc_bls_req_tmpl.f_ctl = bfa_os_hton3b(FCTL_END_SEQ | FCTL_SI_XFER); - fc_bls_req_tmpl.rx_id = FC_RXID_ANY; - - /* - * fc_bls_rsp_tmpl - */ - fc_bls_rsp_tmpl.routing = FC_RTG_BASIC_LINK; - fc_bls_rsp_tmpl.cat_info = FC_CAT_BA_ACC; - fc_bls_rsp_tmpl.type = FC_TYPE_BLS; - fc_bls_rsp_tmpl.f_ctl = - bfa_os_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH | - FCTL_END_SEQ | FCTL_SI_XFER); - fc_bls_rsp_tmpl.rx_id = FC_RXID_ANY; - - /* - * ba_acc_tmpl - */ - ba_acc_tmpl.seq_id_valid = 0; - ba_acc_tmpl.low_seq_cnt = 0; - ba_acc_tmpl.high_seq_cnt = 0xFFFF; - - /* - * plogi_tmpl - */ - plogi_tmpl.csp.verhi = FC_PH_VER_PH_3; - plogi_tmpl.csp.verlo = FC_PH_VER_4_3; - plogi_tmpl.csp.bbcred = bfa_os_htons(0x0004); - plogi_tmpl.csp.ciro = 0x1; - plogi_tmpl.csp.cisc = 0x0; - plogi_tmpl.csp.altbbcred = 0x0; - plogi_tmpl.csp.conseq = bfa_os_htons(0x00FF); - plogi_tmpl.csp.ro_bitmap = bfa_os_htons(0x0002); - plogi_tmpl.csp.e_d_tov = bfa_os_htonl(2000); - - plogi_tmpl.class3.class_valid = 1; - plogi_tmpl.class3.sequential = 1; - plogi_tmpl.class3.conseq = 0xFF; - plogi_tmpl.class3.ospx = 1; - - /* - * prli_tmpl - */ - prli_tmpl.command = FC_ELS_PRLI; - prli_tmpl.pglen = 0x10; - prli_tmpl.pagebytes = bfa_os_htons(0x0014); - prli_tmpl.parampage.type = FC_TYPE_FCP; - prli_tmpl.parampage.imagepair = 1; - prli_tmpl.parampage.servparams.rxrdisab = 1; - - /* - * rrq_tmpl - */ - rrq_tmpl.els_cmd.els_code = FC_ELS_RRQ; - - /* - * fcp_fchs_tmpl - */ - fcp_fchs_tmpl.routing = FC_RTG_FC4_DEV_DATA; - fcp_fchs_tmpl.cat_info = FC_CAT_UNSOLICIT_CMD; - fcp_fchs_tmpl.type = FC_TYPE_FCP; - fcp_fchs_tmpl.f_ctl = - bfa_os_hton3b(FCTL_FS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER); - fcp_fchs_tmpl.seq_id = 1; - fcp_fchs_tmpl.rx_id = FC_RXID_ANY; -} - -static void -fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, - u32 ox_id) -{ - bfa_os_memset(fchs, 0, sizeof(struct fchs_s)); - - fchs->routing = FC_RTG_FC4_DEV_DATA; - fchs->cat_info = FC_CAT_UNSOLICIT_CTRL; - fchs->type = FC_TYPE_SERVICES; - fchs->f_ctl = - bfa_os_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ | - FCTL_SI_XFER); - fchs->rx_id = FC_RXID_ANY; - fchs->d_id = (d_id); - fchs->s_id = (s_id); - fchs->ox_id = bfa_os_htons(ox_id); - - /** - * @todo no need to set ox_id for request - * no need to set rx_id for response - */ -} - -void -fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, - u16 ox_id) -{ - bfa_os_memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s)); - fchs->d_id = (d_id); - fchs->s_id = (s_id); - fchs->ox_id = bfa_os_htons(ox_id); -} - -static void -fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, - u16 ox_id) -{ - bfa_os_memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s)); - fchs->d_id = d_id; - fchs->s_id = s_id; - fchs->ox_id = ox_id; -} - -enum fc_parse_status -fc_els_rsp_parse(struct fchs_s *fchs, int len) -{ - struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); - struct fc_ls_rjt_s *ls_rjt = (struct fc_ls_rjt_s *) els_cmd; - - len = len; - - switch (els_cmd->els_code) { - case FC_ELS_LS_RJT: - if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY) - return FC_PARSE_BUSY; - else - return FC_PARSE_FAILURE; - - case FC_ELS_ACC: - return FC_PARSE_OK; - } - return FC_PARSE_OK; -} - -static void -fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, - u16 ox_id) -{ - bfa_os_memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s)); - fchs->d_id = d_id; - fchs->s_id = s_id; - fchs->ox_id = ox_id; -} - -static u16 -fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, - u16 ox_id, wwn_t port_name, wwn_t node_name, - u16 pdu_size, u8 els_code) -{ - struct fc_logi_s *plogi = (struct fc_logi_s *) (pld); - - bfa_os_memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s)); - - plogi->els_cmd.els_code = els_code; - if (els_code == FC_ELS_PLOGI) - fc_els_req_build(fchs, d_id, s_id, ox_id); - else - fc_els_rsp_build(fchs, d_id, s_id, ox_id); - - plogi->csp.rxsz = plogi->class3.rxsz = bfa_os_htons(pdu_size); - - bfa_os_memcpy(&plogi->port_name, &port_name, sizeof(wwn_t)); - bfa_os_memcpy(&plogi->node_name, &node_name, sizeof(wwn_t)); - - return sizeof(struct fc_logi_s); -} - -u16 -fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, - u16 ox_id, wwn_t port_name, wwn_t node_name, - u16 pdu_size, u8 set_npiv, u8 set_auth, - u16 local_bb_credits) -{ - u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT); - u32 *vvl_info; - - bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); - - flogi->els_cmd.els_code = FC_ELS_FLOGI; - fc_els_req_build(fchs, d_id, s_id, ox_id); - - flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size); - flogi->port_name = port_name; - flogi->node_name = node_name; - - /* - * Set the NPIV Capability Bit ( word 1, bit 31) of Common - * Service Parameters. - */ - flogi->csp.ciro = set_npiv; - - /* set AUTH capability */ - flogi->csp.security = set_auth; - - flogi->csp.bbcred = bfa_os_htons(local_bb_credits); - - /* Set brcd token in VVL */ - vvl_info = (u32 *)&flogi->vvl[0]; - - /* set the flag to indicate the presence of VVL */ - flogi->csp.npiv_supp = 1; /* @todo. field name is not correct */ - vvl_info[0] = bfa_os_htonl(FLOGI_VVL_BRCD); - - return sizeof(struct fc_logi_s); -} - -u16 -fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, - u16 ox_id, wwn_t port_name, wwn_t node_name, - u16 pdu_size, u16 local_bb_credits) -{ - u32 d_id = 0; - - bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); - fc_els_rsp_build(fchs, d_id, s_id, ox_id); - - flogi->els_cmd.els_code = FC_ELS_ACC; - flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size); - flogi->port_name = port_name; - flogi->node_name = node_name; - - flogi->csp.bbcred = bfa_os_htons(local_bb_credits); - - return sizeof(struct fc_logi_s); -} - -u16 -fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, - u16 ox_id, wwn_t port_name, wwn_t node_name, - u16 pdu_size) -{ - u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT); - - bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); - - flogi->els_cmd.els_code = FC_ELS_FDISC; - fc_els_req_build(fchs, d_id, s_id, ox_id); - - flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size); - flogi->port_name = port_name; - flogi->node_name = node_name; - - return sizeof(struct fc_logi_s); -} - -u16 -fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, - u16 ox_id, wwn_t port_name, wwn_t node_name, - u16 pdu_size) -{ - return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name, - node_name, pdu_size, FC_ELS_PLOGI); -} - -u16 -fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, - u16 ox_id, wwn_t port_name, wwn_t node_name, - u16 pdu_size) -{ - return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name, - node_name, pdu_size, FC_ELS_ACC); -} - -enum fc_parse_status -fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name) -{ - struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); - struct fc_logi_s *plogi; - struct fc_ls_rjt_s *ls_rjt; - - switch (els_cmd->els_code) { - case FC_ELS_LS_RJT: - ls_rjt = (struct fc_ls_rjt_s *) (fchs + 1); - if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY) - return FC_PARSE_BUSY; - else - return FC_PARSE_FAILURE; - case FC_ELS_ACC: - plogi = (struct fc_logi_s *) (fchs + 1); - if (len < sizeof(struct fc_logi_s)) - return FC_PARSE_FAILURE; - - if (!wwn_is_equal(plogi->port_name, port_name)) - return FC_PARSE_FAILURE; - - if (!plogi->class3.class_valid) - return FC_PARSE_FAILURE; - - if (bfa_os_ntohs(plogi->class3.rxsz) < (FC_MIN_PDUSZ)) - return FC_PARSE_FAILURE; - - return FC_PARSE_OK; - default: - return FC_PARSE_FAILURE; - } -} - -enum fc_parse_status -fc_plogi_parse(struct fchs_s *fchs) -{ - struct fc_logi_s *plogi = (struct fc_logi_s *) (fchs + 1); - - if (plogi->class3.class_valid != 1) - return FC_PARSE_FAILURE; - - if ((bfa_os_ntohs(plogi->class3.rxsz) < FC_MIN_PDUSZ) - || (bfa_os_ntohs(plogi->class3.rxsz) > FC_MAX_PDUSZ) - || (plogi->class3.rxsz == 0)) - return FC_PARSE_FAILURE; - - return FC_PARSE_OK; -} - -u16 -fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, - u16 ox_id) -{ - struct fc_prli_s *prli = (struct fc_prli_s *) (pld); - - fc_els_req_build(fchs, d_id, s_id, ox_id); - bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s)); - - prli->command = FC_ELS_PRLI; - prli->parampage.servparams.initiator = 1; - prli->parampage.servparams.retry = 1; - prli->parampage.servparams.rec_support = 1; - prli->parampage.servparams.task_retry_id = 0; - prli->parampage.servparams.confirm = 1; - - return sizeof(struct fc_prli_s); -} - -u16 -fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, - u16 ox_id, enum bfa_port_role role) -{ - struct fc_prli_s *prli = (struct fc_prli_s *) (pld); - - fc_els_rsp_build(fchs, d_id, s_id, ox_id); - bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s)); - - prli->command = FC_ELS_ACC; - - if ((role & BFA_PORT_ROLE_FCP_TM) == BFA_PORT_ROLE_FCP_TM) - prli->parampage.servparams.target = 1; - else - prli->parampage.servparams.initiator = 1; - - prli->parampage.rspcode = FC_PRLI_ACC_XQTD; - - return sizeof(struct fc_prli_s); -} - -enum fc_parse_status -fc_prli_rsp_parse(struct fc_prli_s *prli, int len) -{ - if (len < sizeof(struct fc_prli_s)) - return FC_PARSE_FAILURE; - - if (prli->command != FC_ELS_ACC) - return FC_PARSE_FAILURE; - - if ((prli->parampage.rspcode != FC_PRLI_ACC_XQTD) - && (prli->parampage.rspcode != FC_PRLI_ACC_PREDEF_IMG)) - return FC_PARSE_FAILURE; - - if (prli->parampage.servparams.target != 1) - return FC_PARSE_FAILURE; - - return FC_PARSE_OK; -} - -enum fc_parse_status -fc_prli_parse(struct fc_prli_s *prli) -{ - if (prli->parampage.type != FC_TYPE_FCP) - return FC_PARSE_FAILURE; - - if (!prli->parampage.imagepair) - return FC_PARSE_FAILURE; - - if (!prli->parampage.servparams.initiator) - return FC_PARSE_FAILURE; - - return FC_PARSE_OK; -} - -u16 -fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, - u32 s_id, u16 ox_id, wwn_t port_name) -{ - fc_els_req_build(fchs, d_id, s_id, ox_id); - - memset(logo, '\0', sizeof(struct fc_logo_s)); - logo->els_cmd.els_code = FC_ELS_LOGO; - logo->nport_id = (s_id); - logo->orig_port_name = port_name; - - return sizeof(struct fc_logo_s); -} - -static u16 -fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, - u32 s_id, u16 ox_id, wwn_t port_name, - wwn_t node_name, u8 els_code) -{ - memset(adisc, '\0', sizeof(struct fc_adisc_s)); - - adisc->els_cmd.els_code = els_code; - - if (els_code == FC_ELS_ADISC) - fc_els_req_build(fchs, d_id, s_id, ox_id); - else - fc_els_rsp_build(fchs, d_id, s_id, ox_id); - - adisc->orig_HA = 0; - adisc->orig_port_name = port_name; - adisc->orig_node_name = node_name; - adisc->nport_id = (s_id); - - return sizeof(struct fc_adisc_s); -} - -u16 -fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, - u32 s_id, u16 ox_id, wwn_t port_name, - wwn_t node_name) -{ - return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name, - node_name, FC_ELS_ADISC); -} - -u16 -fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, - u32 s_id, u16 ox_id, wwn_t port_name, - wwn_t node_name) -{ - return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name, - node_name, FC_ELS_ACC); -} - -enum fc_parse_status -fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len, wwn_t port_name, - wwn_t node_name) -{ - - if (len < sizeof(struct fc_adisc_s)) - return FC_PARSE_FAILURE; - - if (adisc->els_cmd.els_code != FC_ELS_ACC) - return FC_PARSE_FAILURE; - - if (!wwn_is_equal(adisc->orig_port_name, port_name)) - return FC_PARSE_FAILURE; - - return FC_PARSE_OK; -} - -enum fc_parse_status -fc_adisc_parse(struct fchs_s *fchs, void *pld, u32 host_dap, - wwn_t node_name, wwn_t port_name) -{ - struct fc_adisc_s *adisc = (struct fc_adisc_s *) pld; - - if (adisc->els_cmd.els_code != FC_ELS_ACC) - return FC_PARSE_FAILURE; - - if ((adisc->nport_id == (host_dap)) - && wwn_is_equal(adisc->orig_port_name, port_name) - && wwn_is_equal(adisc->orig_node_name, node_name)) - return FC_PARSE_OK; - - return FC_PARSE_FAILURE; -} - -enum fc_parse_status -fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name) -{ - struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); - - if (pdisc->class3.class_valid != 1) - return FC_PARSE_FAILURE; - - if ((bfa_os_ntohs(pdisc->class3.rxsz) < - (FC_MIN_PDUSZ - sizeof(struct fchs_s))) - || (pdisc->class3.rxsz == 0)) - return FC_PARSE_FAILURE; - - if (!wwn_is_equal(pdisc->port_name, port_name)) - return FC_PARSE_FAILURE; - - if (!wwn_is_equal(pdisc->node_name, node_name)) - return FC_PARSE_FAILURE; - - return FC_PARSE_OK; -} - -u16 -fc_abts_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) -{ - bfa_os_memcpy(fchs, &fc_bls_req_tmpl, sizeof(struct fchs_s)); - fchs->cat_info = FC_CAT_ABTS; - fchs->d_id = (d_id); - fchs->s_id = (s_id); - fchs->ox_id = bfa_os_htons(ox_id); - - return sizeof(struct fchs_s); -} - -enum fc_parse_status -fc_abts_rsp_parse(struct fchs_s *fchs, int len) -{ - if ((fchs->cat_info == FC_CAT_BA_ACC) - || (fchs->cat_info == FC_CAT_BA_RJT)) - return FC_PARSE_OK; - - return FC_PARSE_FAILURE; -} - -u16 -fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id, - u32 s_id, u16 ox_id, u16 rrq_oxid) -{ - fc_els_req_build(fchs, d_id, s_id, ox_id); - - /* - * build rrq payload - */ - bfa_os_memcpy(rrq, &rrq_tmpl, sizeof(struct fc_rrq_s)); - rrq->s_id = (s_id); - rrq->ox_id = bfa_os_htons(rrq_oxid); - rrq->rx_id = FC_RXID_ANY; - - return sizeof(struct fc_rrq_s); -} - -u16 -fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, - u16 ox_id) -{ - struct fc_els_cmd_s *acc = pld; - - fc_els_rsp_build(fchs, d_id, s_id, ox_id); - - memset(acc, 0, sizeof(struct fc_els_cmd_s)); - acc->els_code = FC_ELS_ACC; - - return sizeof(struct fc_els_cmd_s); -} - -u16 -fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id, - u32 s_id, u16 ox_id, u8 reason_code, - u8 reason_code_expl) -{ - fc_els_rsp_build(fchs, d_id, s_id, ox_id); - memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s)); - - ls_rjt->els_cmd.els_code = FC_ELS_LS_RJT; - ls_rjt->reason_code = reason_code; - ls_rjt->reason_code_expl = reason_code_expl; - ls_rjt->vendor_unique = 0x00; - - return sizeof(struct fc_ls_rjt_s); -} - -u16 -fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id, - u32 s_id, u16 ox_id, u16 rx_id) -{ - fc_bls_rsp_build(fchs, d_id, s_id, ox_id); - - bfa_os_memcpy(ba_acc, &ba_acc_tmpl, sizeof(struct fc_ba_acc_s)); - - fchs->rx_id = rx_id; - - ba_acc->ox_id = fchs->ox_id; - ba_acc->rx_id = fchs->rx_id; - - return sizeof(struct fc_ba_acc_s); -} - -u16 -fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, - u32 d_id, u32 s_id, u16 ox_id) -{ - fc_els_rsp_build(fchs, d_id, s_id, ox_id); - memset(els_cmd, 0, sizeof(struct fc_els_cmd_s)); - els_cmd->els_code = FC_ELS_ACC; - - return sizeof(struct fc_els_cmd_s); -} - -int -fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code) -{ - int num_pages = 0; - struct fc_prlo_s *prlo; - struct fc_tprlo_s *tprlo; - - if (els_code == FC_ELS_PRLO) { - prlo = (struct fc_prlo_s *) (fc_frame + 1); - num_pages = (bfa_os_ntohs(prlo->payload_len) - 4) / 16; - } else { - tprlo = (struct fc_tprlo_s *) (fc_frame + 1); - num_pages = (bfa_os_ntohs(tprlo->payload_len) - 4) / 16; - } - return num_pages; -} - -u16 -fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc, - u32 d_id, u32 s_id, u16 ox_id, - int num_pages) -{ - int page; - - fc_els_rsp_build(fchs, d_id, s_id, ox_id); - - memset(tprlo_acc, 0, (num_pages * 16) + 4); - tprlo_acc->command = FC_ELS_ACC; - - tprlo_acc->page_len = 0x10; - tprlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4); - - for (page = 0; page < num_pages; page++) { - tprlo_acc->tprlo_acc_params[page].opa_valid = 0; - tprlo_acc->tprlo_acc_params[page].rpa_valid = 0; - tprlo_acc->tprlo_acc_params[page].fc4type_csp = FC_TYPE_FCP; - tprlo_acc->tprlo_acc_params[page].orig_process_assc = 0; - tprlo_acc->tprlo_acc_params[page].resp_process_assc = 0; - } - return bfa_os_ntohs(tprlo_acc->payload_len); -} - -u16 -fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, - u32 d_id, u32 s_id, u16 ox_id, - int num_pages) -{ - int page; - - fc_els_rsp_build(fchs, d_id, s_id, ox_id); - - memset(prlo_acc, 0, (num_pages * 16) + 4); - prlo_acc->command = FC_ELS_ACC; - prlo_acc->page_len = 0x10; - prlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4); - - for (page = 0; page < num_pages; page++) { - prlo_acc->prlo_acc_params[page].opa_valid = 0; - prlo_acc->prlo_acc_params[page].rpa_valid = 0; - prlo_acc->prlo_acc_params[page].fc4type_csp = FC_TYPE_FCP; - prlo_acc->prlo_acc_params[page].orig_process_assc = 0; - prlo_acc->prlo_acc_params[page].resp_process_assc = 0; - } - - return bfa_os_ntohs(prlo_acc->payload_len); -} - -u16 -fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id, - u32 s_id, u16 ox_id, u32 data_format) -{ - fc_els_req_build(fchs, d_id, s_id, ox_id); - - memset(rnid, 0, sizeof(struct fc_rnid_cmd_s)); - - rnid->els_cmd.els_code = FC_ELS_RNID; - rnid->node_id_data_format = data_format; - - return sizeof(struct fc_rnid_cmd_s); -} - -u16 -fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, - u32 d_id, u32 s_id, u16 ox_id, - u32 data_format, - struct fc_rnid_common_id_data_s *common_id_data, - struct fc_rnid_general_topology_data_s *gen_topo_data) -{ - memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s)); - - fc_els_rsp_build(fchs, d_id, s_id, ox_id); - - rnid_acc->els_cmd.els_code = FC_ELS_ACC; - rnid_acc->node_id_data_format = data_format; - rnid_acc->common_id_data_length = - sizeof(struct fc_rnid_common_id_data_s); - rnid_acc->common_id_data = *common_id_data; - - if (data_format == RNID_NODEID_DATA_FORMAT_DISCOVERY) { - rnid_acc->specific_id_data_length = - sizeof(struct fc_rnid_general_topology_data_s); - bfa_os_assign(rnid_acc->gen_topology_data, *gen_topo_data); - return sizeof(struct fc_rnid_acc_s); - } else { - return sizeof(struct fc_rnid_acc_s) - - sizeof(struct fc_rnid_general_topology_data_s); - } - -} - -u16 -fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, u32 d_id, - u32 s_id, u16 ox_id) -{ - fc_els_req_build(fchs, d_id, s_id, ox_id); - - memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s)); - - rpsc->els_cmd.els_code = FC_ELS_RPSC; - return sizeof(struct fc_rpsc_cmd_s); -} - -u16 -fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, - u32 d_id, u32 s_id, u32 *pid_list, - u16 npids) -{ - u32 dctlr_id = FC_DOMAIN_CTRLR(bfa_os_hton3b(d_id)); - int i = 0; - - fc_els_req_build(fchs, bfa_os_hton3b(dctlr_id), s_id, 0); - - memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s)); - - rpsc2->els_cmd.els_code = FC_ELS_RPSC; - rpsc2->token = bfa_os_htonl(FC_BRCD_TOKEN); - rpsc2->num_pids = bfa_os_htons(npids); - for (i = 0; i < npids; i++) - rpsc2->pid_list[i].pid = pid_list[i]; - - return sizeof(struct fc_rpsc2_cmd_s) + ((npids - 1) * - (sizeof(u32))); -} - -u16 -fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc, - u32 d_id, u32 s_id, u16 ox_id, - struct fc_rpsc_speed_info_s *oper_speed) -{ - memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s)); - - fc_els_rsp_build(fchs, d_id, s_id, ox_id); - - rpsc_acc->command = FC_ELS_ACC; - rpsc_acc->num_entries = bfa_os_htons(1); - - rpsc_acc->speed_info[0].port_speed_cap = - bfa_os_htons(oper_speed->port_speed_cap); - - rpsc_acc->speed_info[0].port_op_speed = - bfa_os_htons(oper_speed->port_op_speed); - - return sizeof(struct fc_rpsc_acc_s); - -} - -/* - * TBD - - * . get rid of unnecessary memsets - */ - -u16 -fc_logo_rsp_parse(struct fchs_s *fchs, int len) -{ - struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); - - len = len; - if (els_cmd->els_code != FC_ELS_ACC) - return FC_PARSE_FAILURE; - - return FC_PARSE_OK; -} - -u16 -fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, - u16 ox_id, wwn_t port_name, wwn_t node_name, - u16 pdu_size) -{ - struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); - - bfa_os_memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s)); - - pdisc->els_cmd.els_code = FC_ELS_PDISC; - fc_els_req_build(fchs, d_id, s_id, ox_id); - - pdisc->csp.rxsz = pdisc->class3.rxsz = bfa_os_htons(pdu_size); - pdisc->port_name = port_name; - pdisc->node_name = node_name; - - return sizeof(struct fc_logi_s); -} - -u16 -fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name) -{ - struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); - - if (len < sizeof(struct fc_logi_s)) - return FC_PARSE_LEN_INVAL; - - if (pdisc->els_cmd.els_code != FC_ELS_ACC) - return FC_PARSE_ACC_INVAL; - - if (!wwn_is_equal(pdisc->port_name, port_name)) - return FC_PARSE_PWWN_NOT_EQUAL; - - if (!pdisc->class3.class_valid) - return FC_PARSE_NWWN_NOT_EQUAL; - - if (bfa_os_ntohs(pdisc->class3.rxsz) < (FC_MIN_PDUSZ)) - return FC_PARSE_RXSZ_INVAL; - - return FC_PARSE_OK; -} - -u16 -fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, - int num_pages) -{ - struct fc_prlo_s *prlo = (struct fc_prlo_s *) (fchs + 1); - int page; - - fc_els_req_build(fchs, d_id, s_id, ox_id); - memset(prlo, 0, (num_pages * 16) + 4); - prlo->command = FC_ELS_PRLO; - prlo->page_len = 0x10; - prlo->payload_len = bfa_os_htons((num_pages * 16) + 4); - - for (page = 0; page < num_pages; page++) { - prlo->prlo_params[page].type = FC_TYPE_FCP; - prlo->prlo_params[page].opa_valid = 0; - prlo->prlo_params[page].rpa_valid = 0; - prlo->prlo_params[page].orig_process_assc = 0; - prlo->prlo_params[page].resp_process_assc = 0; - } - - return bfa_os_ntohs(prlo->payload_len); -} - -u16 -fc_prlo_rsp_parse(struct fchs_s *fchs, int len) -{ - struct fc_prlo_acc_s *prlo = (struct fc_prlo_acc_s *) (fchs + 1); - int num_pages = 0; - int page = 0; - - len = len; - - if (prlo->command != FC_ELS_ACC) - return FC_PARSE_FAILURE; - - num_pages = ((bfa_os_ntohs(prlo->payload_len)) - 4) / 16; - - for (page = 0; page < num_pages; page++) { - if (prlo->prlo_acc_params[page].type != FC_TYPE_FCP) - return FC_PARSE_FAILURE; - - if (prlo->prlo_acc_params[page].opa_valid != 0) - return FC_PARSE_FAILURE; - - if (prlo->prlo_acc_params[page].rpa_valid != 0) - return FC_PARSE_FAILURE; - - if (prlo->prlo_acc_params[page].orig_process_assc != 0) - return FC_PARSE_FAILURE; - - if (prlo->prlo_acc_params[page].resp_process_assc != 0) - return FC_PARSE_FAILURE; - } - return FC_PARSE_OK; - -} - -u16 -fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, - u16 ox_id, int num_pages, - enum fc_tprlo_type tprlo_type, u32 tpr_id) -{ - struct fc_tprlo_s *tprlo = (struct fc_tprlo_s *) (fchs + 1); - int page; - - fc_els_req_build(fchs, d_id, s_id, ox_id); - memset(tprlo, 0, (num_pages * 16) + 4); - tprlo->command = FC_ELS_TPRLO; - tprlo->page_len = 0x10; - tprlo->payload_len = bfa_os_htons((num_pages * 16) + 4); - - for (page = 0; page < num_pages; page++) { - tprlo->tprlo_params[page].type = FC_TYPE_FCP; - tprlo->tprlo_params[page].opa_valid = 0; - tprlo->tprlo_params[page].rpa_valid = 0; - tprlo->tprlo_params[page].orig_process_assc = 0; - tprlo->tprlo_params[page].resp_process_assc = 0; - if (tprlo_type == FC_GLOBAL_LOGO) { - tprlo->tprlo_params[page].global_process_logout = 1; - } else if (tprlo_type == FC_TPR_LOGO) { - tprlo->tprlo_params[page].tpo_nport_valid = 1; - tprlo->tprlo_params[page].tpo_nport_id = (tpr_id); - } - } - - return bfa_os_ntohs(tprlo->payload_len); -} - -u16 -fc_tprlo_rsp_parse(struct fchs_s *fchs, int len) -{ - struct fc_tprlo_acc_s *tprlo = (struct fc_tprlo_acc_s *) (fchs + 1); - int num_pages = 0; - int page = 0; - - len = len; - - if (tprlo->command != FC_ELS_ACC) - return FC_PARSE_ACC_INVAL; - - num_pages = (bfa_os_ntohs(tprlo->payload_len) - 4) / 16; - - for (page = 0; page < num_pages; page++) { - if (tprlo->tprlo_acc_params[page].type != FC_TYPE_FCP) - return FC_PARSE_NOT_FCP; - if (tprlo->tprlo_acc_params[page].opa_valid != 0) - return FC_PARSE_OPAFLAG_INVAL; - if (tprlo->tprlo_acc_params[page].rpa_valid != 0) - return FC_PARSE_RPAFLAG_INVAL; - if (tprlo->tprlo_acc_params[page].orig_process_assc != 0) - return FC_PARSE_OPA_INVAL; - if (tprlo->tprlo_acc_params[page].resp_process_assc != 0) - return FC_PARSE_RPA_INVAL; - } - return FC_PARSE_OK; -} - -enum fc_parse_status -fc_rrq_rsp_parse(struct fchs_s *fchs, int len) -{ - struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); - - len = len; - if (els_cmd->els_code != FC_ELS_ACC) - return FC_PARSE_FAILURE; - - return FC_PARSE_OK; -} - -u16 -fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, - u16 ox_id, u32 reason_code, - u32 reason_expl) -{ - struct fc_ba_rjt_s *ba_rjt = (struct fc_ba_rjt_s *) (fchs + 1); - - fc_bls_rsp_build(fchs, d_id, s_id, ox_id); - - fchs->cat_info = FC_CAT_BA_RJT; - ba_rjt->reason_code = reason_code; - ba_rjt->reason_expl = reason_expl; - return sizeof(struct fc_ba_rjt_s); -} - -static void -fc_gs_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code) -{ - bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s)); - cthdr->rev_id = CT_GS3_REVISION; - cthdr->gs_type = CT_GSTYPE_DIRSERVICE; - cthdr->gs_sub_type = CT_GSSUBTYPE_NAMESERVER; - cthdr->cmd_rsp_code = bfa_os_htons(cmd_code); -} - -static void -fc_gs_fdmi_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code) -{ - bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s)); - cthdr->rev_id = CT_GS3_REVISION; - cthdr->gs_type = CT_GSTYPE_MGMTSERVICE; - cthdr->gs_sub_type = CT_GSSUBTYPE_HBA_MGMTSERVER; - cthdr->cmd_rsp_code = bfa_os_htons(cmd_code); -} - -static void -fc_gs_ms_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code, - u8 sub_type) -{ - bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s)); - cthdr->rev_id = CT_GS3_REVISION; - cthdr->gs_type = CT_GSTYPE_MGMTSERVICE; - cthdr->gs_sub_type = sub_type; - cthdr->cmd_rsp_code = bfa_os_htons(cmd_code); -} - -u16 -fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, - wwn_t port_name) -{ - - struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; - struct fcgs_gidpn_req_s *gidpn = - (struct fcgs_gidpn_req_s *) (cthdr + 1); - u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); - - fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); - fc_gs_cthdr_build(cthdr, s_id, GS_GID_PN); - - bfa_os_memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s)); - gidpn->port_name = port_name; - return sizeof(struct fcgs_gidpn_req_s) + sizeof(struct ct_hdr_s); -} - -u16 -fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, - u32 port_id) -{ - - struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; - fcgs_gpnid_req_t *gpnid = (fcgs_gpnid_req_t *) (cthdr + 1); - u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); - - fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); - fc_gs_cthdr_build(cthdr, s_id, GS_GPN_ID); - - bfa_os_memset(gpnid, 0, sizeof(fcgs_gpnid_req_t)); - gpnid->dap = port_id; - return sizeof(fcgs_gpnid_req_t) + sizeof(struct ct_hdr_s); -} - -u16 -fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, - u32 port_id) -{ - - struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; - fcgs_gnnid_req_t *gnnid = (fcgs_gnnid_req_t *) (cthdr + 1); - u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); - - fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); - fc_gs_cthdr_build(cthdr, s_id, GS_GNN_ID); - - bfa_os_memset(gnnid, 0, sizeof(fcgs_gnnid_req_t)); - gnnid->dap = port_id; - return sizeof(fcgs_gnnid_req_t) + sizeof(struct ct_hdr_s); -} - -u16 -fc_ct_rsp_parse(struct ct_hdr_s *cthdr) -{ - if (bfa_os_ntohs(cthdr->cmd_rsp_code) != CT_RSP_ACCEPT) { - if (cthdr->reason_code == CT_RSN_LOGICAL_BUSY) - return FC_PARSE_BUSY; - else - return FC_PARSE_FAILURE; - } - - return FC_PARSE_OK; -} - -u16 -fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, u8 set_br_reg, - u32 s_id, u16 ox_id) -{ - u32 d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER); - - fc_els_req_build(fchs, d_id, s_id, ox_id); - - bfa_os_memset(scr, 0, sizeof(struct fc_scr_s)); - scr->command = FC_ELS_SCR; - scr->reg_func = FC_SCR_REG_FUNC_FULL; - if (set_br_reg) - scr->vu_reg_func = FC_VU_SCR_REG_FUNC_FABRIC_NAME_CHANGE; - - return sizeof(struct fc_scr_s); -} - -u16 -fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, u32 s_id, - u16 ox_id) -{ - u32 d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER); - u16 payldlen; - - fc_els_req_build(fchs, d_id, s_id, ox_id); - rscn->command = FC_ELS_RSCN; - rscn->pagelen = sizeof(rscn->event[0]); - - payldlen = sizeof(u32) + rscn->pagelen; - rscn->payldlen = bfa_os_htons(payldlen); - - rscn->event[0].format = FC_RSCN_FORMAT_PORTID; - rscn->event[0].portid = s_id; - - return sizeof(struct fc_rscn_pl_s); -} - -u16 -fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, - enum bfa_port_role roles) -{ - struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; - struct fcgs_rftid_req_s *rftid = - (struct fcgs_rftid_req_s *) (cthdr + 1); - u32 type_value, d_id = bfa_os_hton3b(FC_NAME_SERVER); - u8 index; - - fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); - fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID); - - bfa_os_memset(rftid, 0, sizeof(struct fcgs_rftid_req_s)); - - rftid->dap = s_id; - - /* By default, FCP FC4 Type is registered */ - index = FC_TYPE_FCP >> 5; - type_value = 1 << (FC_TYPE_FCP % 32); - rftid->fc4_type[index] = bfa_os_htonl(type_value); - - if (roles & BFA_PORT_ROLE_FCP_IPFC) { - index = FC_TYPE_IP >> 5; - type_value = 1 << (FC_TYPE_IP % 32); - rftid->fc4_type[index] |= bfa_os_htonl(type_value); - } - - return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s); -} - -u16 -fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, - u16 ox_id, u8 *fc4_bitmap, - u32 bitmap_size) -{ - struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; - struct fcgs_rftid_req_s *rftid = - (struct fcgs_rftid_req_s *) (cthdr + 1); - u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); - - fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); - fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID); - - bfa_os_memset(rftid, 0, sizeof(struct fcgs_rftid_req_s)); - - rftid->dap = s_id; - bfa_os_memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap, - (bitmap_size < 32 ? bitmap_size : 32)); - - return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s); -} - -u16 -fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, - u8 fc4_type, u8 fc4_ftrs) -{ - struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; - struct fcgs_rffid_req_s *rffid = - (struct fcgs_rffid_req_s *) (cthdr + 1); - u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); - - fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); - fc_gs_cthdr_build(cthdr, s_id, GS_RFF_ID); - - bfa_os_memset(rffid, 0, sizeof(struct fcgs_rffid_req_s)); - - rffid->dap = s_id; - rffid->fc4ftr_bits = fc4_ftrs; - rffid->fc4_type = fc4_type; - - return sizeof(struct fcgs_rffid_req_s) + sizeof(struct ct_hdr_s); -} - -u16 -fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, - u8 *name) -{ - - struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; - struct fcgs_rspnid_req_s *rspnid = - (struct fcgs_rspnid_req_s *) (cthdr + 1); - u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); - - fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); - fc_gs_cthdr_build(cthdr, s_id, GS_RSPN_ID); - - bfa_os_memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s)); - - rspnid->dap = s_id; - rspnid->spn_len = (u8) strlen((char *)name); - strncpy((char *)rspnid->spn, (char *)name, rspnid->spn_len); - - return sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s); -} - -u16 -fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, - u8 fc4_type) -{ - - struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; - struct fcgs_gidft_req_s *gidft = - (struct fcgs_gidft_req_s *) (cthdr + 1); - u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); - - fc_gs_fchdr_build(fchs, d_id, s_id, 0); - - fc_gs_cthdr_build(cthdr, s_id, GS_GID_FT); - - bfa_os_memset(gidft, 0, sizeof(struct fcgs_gidft_req_s)); - gidft->fc4_type = fc4_type; - gidft->domain_id = 0; - gidft->area_id = 0; - - return sizeof(struct fcgs_gidft_req_s) + sizeof(struct ct_hdr_s); -} - -u16 -fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, - wwn_t port_name) -{ - struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; - struct fcgs_rpnid_req_s *rpnid = - (struct fcgs_rpnid_req_s *) (cthdr + 1); - u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); - - fc_gs_fchdr_build(fchs, d_id, s_id, 0); - fc_gs_cthdr_build(cthdr, s_id, GS_RPN_ID); - - bfa_os_memset(rpnid, 0, sizeof(struct fcgs_rpnid_req_s)); - rpnid->port_id = port_id; - rpnid->port_name = port_name; - - return sizeof(struct fcgs_rpnid_req_s) + sizeof(struct ct_hdr_s); -} - -u16 -fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, - wwn_t node_name) -{ - struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; - struct fcgs_rnnid_req_s *rnnid = - (struct fcgs_rnnid_req_s *) (cthdr + 1); - u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); - - fc_gs_fchdr_build(fchs, d_id, s_id, 0); - fc_gs_cthdr_build(cthdr, s_id, GS_RNN_ID); - - bfa_os_memset(rnnid, 0, sizeof(struct fcgs_rnnid_req_s)); - rnnid->port_id = port_id; - rnnid->node_name = node_name; - - return sizeof(struct fcgs_rnnid_req_s) + sizeof(struct ct_hdr_s); -} - -u16 -fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, - u32 cos) -{ - struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; - struct fcgs_rcsid_req_s *rcsid = - (struct fcgs_rcsid_req_s *) (cthdr + 1); - u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); - - fc_gs_fchdr_build(fchs, d_id, s_id, 0); - fc_gs_cthdr_build(cthdr, s_id, GS_RCS_ID); - - bfa_os_memset(rcsid, 0, sizeof(struct fcgs_rcsid_req_s)); - rcsid->port_id = port_id; - rcsid->cos = cos; - - return sizeof(struct fcgs_rcsid_req_s) + sizeof(struct ct_hdr_s); -} - -u16 -fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, - u8 port_type) -{ - struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; - struct fcgs_rptid_req_s *rptid = - (struct fcgs_rptid_req_s *) (cthdr + 1); - u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); - - fc_gs_fchdr_build(fchs, d_id, s_id, 0); - fc_gs_cthdr_build(cthdr, s_id, GS_RPT_ID); - - bfa_os_memset(rptid, 0, sizeof(struct fcgs_rptid_req_s)); - rptid->port_id = port_id; - rptid->port_type = port_type; - - return sizeof(struct fcgs_rptid_req_s) + sizeof(struct ct_hdr_s); -} - -u16 -fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id) -{ - struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; - struct fcgs_ganxt_req_s *ganxt = - (struct fcgs_ganxt_req_s *) (cthdr + 1); - u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); - - fc_gs_fchdr_build(fchs, d_id, s_id, 0); - fc_gs_cthdr_build(cthdr, s_id, GS_GA_NXT); - - bfa_os_memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s)); - ganxt->port_id = port_id; - - return sizeof(struct ct_hdr_s) + sizeof(struct fcgs_ganxt_req_s); -} - -/* - * Builds fc hdr and ct hdr for FDMI requests. - */ -u16 -fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id, - u16 cmd_code) -{ - - struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; - u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER); - - fc_gs_fchdr_build(fchs, d_id, s_id, 0); - fc_gs_fdmi_cthdr_build(cthdr, s_id, cmd_code); - - return sizeof(struct ct_hdr_s); -} - -/* - * Given a FC4 Type, this function returns a fc4 type bitmask - */ -void -fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask) -{ - u8 index; - u32 *ptr = (u32 *) bit_mask; - u32 type_value; - - /* - * @todo : Check for bitmask size - */ - - index = fc4_type >> 5; - type_value = 1 << (fc4_type % 32); - ptr[index] = bfa_os_htonl(type_value); - -} - -/* - * GMAL Request - */ -u16 -fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn) -{ - struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; - fcgs_gmal_req_t *gmal = (fcgs_gmal_req_t *) (cthdr + 1); - u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER); - - fc_gs_fchdr_build(fchs, d_id, s_id, 0); - fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GMAL_CMD, - CT_GSSUBTYPE_CFGSERVER); - - bfa_os_memset(gmal, 0, sizeof(fcgs_gmal_req_t)); - gmal->wwn = wwn; - - return sizeof(struct ct_hdr_s) + sizeof(fcgs_gmal_req_t); -} - -/* - * GFN (Get Fabric Name) Request - */ -u16 -fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn) -{ - struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; - fcgs_gfn_req_t *gfn = (fcgs_gfn_req_t *) (cthdr + 1); - u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER); - - fc_gs_fchdr_build(fchs, d_id, s_id, 0); - fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GFN_CMD, - CT_GSSUBTYPE_CFGSERVER); - - bfa_os_memset(gfn, 0, sizeof(fcgs_gfn_req_t)); - gfn->wwn = wwn; - - return sizeof(struct ct_hdr_s) + sizeof(fcgs_gfn_req_t); -} diff --git a/drivers/scsi/bfa/fcbuild.h b/drivers/scsi/bfa/fcbuild.h deleted file mode 100644 index 981d98d542b9..000000000000 --- a/drivers/scsi/bfa/fcbuild.h +++ /dev/null @@ -1,279 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -/* - * fcbuild.h - FC link service frame building and parsing routines - */ - -#ifndef __FCBUILD_H__ -#define __FCBUILD_H__ - -#include -#include -#include -#include -#include -#include - -/* - * Utility Macros/functions - */ - -#define fcif_sof_set(_ifhdr, _sof) ((_ifhdr)->sof = FC_ ## _sof) -#define fcif_eof_set(_ifhdr, _eof) ((_ifhdr)->eof = FC_ ## _eof) - -#define wwn_is_equal(_wwn1, _wwn2) \ - (memcmp(&(_wwn1), &(_wwn2), sizeof(wwn_t)) == 0) - -#define fc_roundup(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1)) - -/* - * Given the fc response length, this routine will return - * the length of the actual payload bytes following the CT header. - * - * Assumes the input response length does not include the crc, eof, etc. - */ -static inline u32 -fc_get_ctresp_pyld_len(u32 resp_len) -{ - return resp_len - sizeof(struct ct_hdr_s); -} - -/* - * Convert bfa speed to rpsc speed value. - */ -static inline enum bfa_pport_speed -fc_rpsc_operspeed_to_bfa_speed(enum fc_rpsc_op_speed_s speed) -{ - switch (speed) { - - case RPSC_OP_SPEED_1G: - return BFA_PPORT_SPEED_1GBPS; - - case RPSC_OP_SPEED_2G: - return BFA_PPORT_SPEED_2GBPS; - - case RPSC_OP_SPEED_4G: - return BFA_PPORT_SPEED_4GBPS; - - case RPSC_OP_SPEED_8G: - return BFA_PPORT_SPEED_8GBPS; - - case RPSC_OP_SPEED_10G: - return BFA_PPORT_SPEED_10GBPS; - - default: - return BFA_PPORT_SPEED_UNKNOWN; - } -} - -/* - * Convert RPSC speed to bfa speed value. - */ -static inline enum fc_rpsc_op_speed_s -fc_bfa_speed_to_rpsc_operspeed(enum bfa_pport_speed op_speed) -{ - switch (op_speed) { - - case BFA_PPORT_SPEED_1GBPS: - return RPSC_OP_SPEED_1G; - - case BFA_PPORT_SPEED_2GBPS: - return RPSC_OP_SPEED_2G; - - case BFA_PPORT_SPEED_4GBPS: - return RPSC_OP_SPEED_4G; - - case BFA_PPORT_SPEED_8GBPS: - return RPSC_OP_SPEED_8G; - - case BFA_PPORT_SPEED_10GBPS: - return RPSC_OP_SPEED_10G; - - default: - return RPSC_OP_SPEED_NOT_EST; - } -} -enum fc_parse_status { - FC_PARSE_OK = 0, - FC_PARSE_FAILURE = 1, - FC_PARSE_BUSY = 2, - FC_PARSE_LEN_INVAL, - FC_PARSE_ACC_INVAL, - FC_PARSE_PWWN_NOT_EQUAL, - FC_PARSE_NWWN_NOT_EQUAL, - FC_PARSE_RXSZ_INVAL, - FC_PARSE_NOT_FCP, - FC_PARSE_OPAFLAG_INVAL, - FC_PARSE_RPAFLAG_INVAL, - FC_PARSE_OPA_INVAL, - FC_PARSE_RPA_INVAL, - -}; - -struct fc_templates_s { - struct fchs_s fc_els_req; - struct fchs_s fc_bls_req; - struct fc_logi_s plogi; - struct fc_rrq_s rrq; -}; - -void fcbuild_init(void); - -u16 fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, - u32 s_id, u16 ox_id, wwn_t port_name, - wwn_t node_name, u16 pdu_size, u8 set_npiv, - u8 set_auth, u16 local_bb_credits); -u16 fc_fdisc_build(struct fchs_s *buf, struct fc_logi_s *flogi, - u32 s_id, u16 ox_id, wwn_t port_name, - wwn_t node_name, u16 pdu_size); -u16 fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, - u32 s_id, u16 ox_id, wwn_t port_name, - wwn_t node_name, u16 pdu_size, - u16 local_bb_credits); -u16 fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id, - u32 s_id, u16 ox_id, wwn_t port_name, - wwn_t node_name, u16 pdu_size); -enum fc_parse_status fc_plogi_parse(struct fchs_s *fchs); -u16 fc_abts_build(struct fchs_s *buf, u32 d_id, u32 s_id, - u16 ox_id); -enum fc_parse_status fc_abts_rsp_parse(struct fchs_s *buf, int len); -u16 fc_rrq_build(struct fchs_s *buf, struct fc_rrq_s *rrq, u32 d_id, - u32 s_id, u16 ox_id, u16 rrq_oxid); -enum fc_parse_status fc_rrq_rsp_parse(struct fchs_s *buf, int len); -u16 fc_rspnid_build(struct fchs_s *fchs, void *pld, u32 s_id, - u16 ox_id, u8 *name); -u16 fc_rftid_build(struct fchs_s *fchs, void *pld, u32 s_id, - u16 ox_id, enum bfa_port_role role); -u16 fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, - u16 ox_id, u8 *fc4_bitmap, - u32 bitmap_size); -u16 fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, - u16 ox_id, u8 fc4_type, u8 fc4_ftrs); -u16 fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, - u16 ox_id, wwn_t port_name); -u16 fc_gpnid_build(struct fchs_s *fchs, void *pld, u32 s_id, - u16 ox_id, u32 port_id); -u16 fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, - u8 set_br_reg, u32 s_id, u16 ox_id); -u16 fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, - u32 s_id, u16 ox_id, - wwn_t port_name, wwn_t node_name, u16 pdu_size); - -u16 fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, - u32 d_id, u32 s_id, u16 ox_id, - wwn_t port_name, wwn_t node_name); -enum fc_parse_status fc_adisc_parse(struct fchs_s *fchs, void *pld, - u32 host_dap, - wwn_t node_name, wwn_t port_name); -enum fc_parse_status fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len, - wwn_t port_name, wwn_t node_name); -u16 fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, - u32 d_id, u32 s_id, u16 ox_id, - wwn_t port_name, wwn_t node_name); -u16 fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, - u32 d_id, u32 s_id, u16 ox_id, - u8 reason_code, u8 reason_code_expl); -u16 fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, - u32 d_id, u32 s_id, u16 ox_id); -u16 fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, - u32 s_id, u16 ox_id); -enum fc_parse_status fc_prli_rsp_parse(struct fc_prli_s *prli, int len); - -u16 fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, - u32 s_id, u16 ox_id, - enum bfa_port_role role); -u16 fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, - u32 d_id, u32 s_id, u16 ox_id, - u32 data_format); -u16 fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, - u32 d_id, u32 s_id, u16 ox_id, - u32 data_format, - struct fc_rnid_common_id_data_s *common_id_data, - struct fc_rnid_general_topology_data_s * - gen_topo_data); -u16 fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rps2c, - u32 d_id, u32 s_id, - u32 *pid_list, u16 npids); -u16 fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, - u32 d_id, u32 s_id, u16 ox_id); -u16 fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc, - u32 d_id, u32 s_id, u16 ox_id, - struct fc_rpsc_speed_info_s *oper_speed); -u16 fc_gid_ft_build(struct fchs_s *fchs, void *pld, u32 s_id, - u8 fc4_type); -u16 fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, - u32 port_id, wwn_t port_name); -u16 fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, - u32 port_id, wwn_t node_name); -u16 fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, - u32 port_id, u32 cos); -u16 fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, - u32 port_id, u8 port_type); -u16 fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, - u32 port_id); -u16 fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, - u32 d_id, u32 s_id, u16 ox_id, - wwn_t port_name); -u16 fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, - u32 s_id, u16 ox_id); -u16 fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id, - u16 cmd_code); -u16 fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, - wwn_t wwn); -u16 fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, - wwn_t wwn); -void fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask); -void fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, - u16 ox_id); -enum fc_parse_status fc_els_rsp_parse(struct fchs_s *fchs, int len); -enum fc_parse_status fc_plogi_rsp_parse(struct fchs_s *fchs, int len, - wwn_t port_name); -enum fc_parse_status fc_prli_parse(struct fc_prli_s *prli); -enum fc_parse_status fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, - wwn_t port_name); -u16 fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, - u32 d_id, u32 s_id, u16 ox_id, - u16 rx_id); -int fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code); -u16 fc_tprlo_acc_build(struct fchs_s *fchs, - struct fc_tprlo_acc_s *tprlo_acc, - u32 d_id, u32 s_id, u16 ox_id, - int num_pages); -u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, - u32 d_id, u32 s_id, u16 ox_id, - int num_pages); -u16 fc_logo_rsp_parse(struct fchs_s *fchs, int len); -u16 fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, - u16 ox_id, wwn_t port_name, wwn_t node_name, - u16 pdu_size); -u16 fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name); -u16 fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, - u16 ox_id, int num_pages); -u16 fc_prlo_rsp_parse(struct fchs_s *fchs, int len); -u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, - u16 ox_id, int num_pages, - enum fc_tprlo_type tprlo_type, u32 tpr_id); -u16 fc_tprlo_rsp_parse(struct fchs_s *fchs, int len); -u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, - u16 ox_id, u32 reason_code, - u32 reason_expl); -u16 fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, - u16 ox_id, u32 port_id); -u16 fc_ct_rsp_parse(struct ct_hdr_s *cthdr); -u16 fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, - u32 s_id, u16 ox_id); -#endif diff --git a/drivers/scsi/bfa/fcpim.c b/drivers/scsi/bfa/fcpim.c deleted file mode 100644 index 6b8976ad22fa..000000000000 --- a/drivers/scsi/bfa/fcpim.c +++ /dev/null @@ -1,824 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * fcpim.c - FCP initiator mode i-t nexus state machine - */ - -#include -#include -#include "fcs_fcpim.h" -#include "fcs_rport.h" -#include "fcs_lport.h" -#include "fcs_trcmod.h" -#include "fcs_fcxp.h" -#include "fcs.h" -#include -#include -#include - -BFA_TRC_FILE(FCS, FCPIM); - -/* - * forward declarations - */ -static void bfa_fcs_itnim_timeout(void *arg); -static void bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim); -static void bfa_fcs_itnim_send_prli(void *itnim_cbarg, - struct bfa_fcxp_s *fcxp_alloced); -static void bfa_fcs_itnim_prli_response(void *fcsarg, - struct bfa_fcxp_s *fcxp, - void *cbarg, - bfa_status_t req_status, - u32 rsp_len, - u32 resid_len, - struct fchs_s *rsp_fchs); -static void bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim, - enum bfa_itnim_aen_event event); - -/** - * fcs_itnim_sm FCS itnim state machine events - */ - -enum bfa_fcs_itnim_event { - BFA_FCS_ITNIM_SM_ONLINE = 1, /* rport online event */ - BFA_FCS_ITNIM_SM_OFFLINE = 2, /* rport offline */ - BFA_FCS_ITNIM_SM_FRMSENT = 3, /* prli frame is sent */ - BFA_FCS_ITNIM_SM_RSP_OK = 4, /* good response */ - BFA_FCS_ITNIM_SM_RSP_ERROR = 5, /* error response */ - BFA_FCS_ITNIM_SM_TIMEOUT = 6, /* delay timeout */ - BFA_FCS_ITNIM_SM_HCB_OFFLINE = 7, /* BFA online callback */ - BFA_FCS_ITNIM_SM_HCB_ONLINE = 8, /* BFA offline callback */ - BFA_FCS_ITNIM_SM_INITIATOR = 9, /* rport is initiator */ - BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */ - BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */ -}; - -static void bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim, - enum bfa_fcs_itnim_event event); -static void bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim, - enum bfa_fcs_itnim_event event); -static void bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim, - enum bfa_fcs_itnim_event event); -static void bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim, - enum bfa_fcs_itnim_event event); -static void bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim, - enum bfa_fcs_itnim_event event); -static void bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim, - enum bfa_fcs_itnim_event event); -static void bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim, - enum bfa_fcs_itnim_event event); -static void bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim, - enum bfa_fcs_itnim_event event); - -static struct bfa_sm_table_s itnim_sm_table[] = { - {BFA_SM(bfa_fcs_itnim_sm_offline), BFA_ITNIM_OFFLINE}, - {BFA_SM(bfa_fcs_itnim_sm_prli_send), BFA_ITNIM_PRLI_SEND}, - {BFA_SM(bfa_fcs_itnim_sm_prli), BFA_ITNIM_PRLI_SENT}, - {BFA_SM(bfa_fcs_itnim_sm_prli_retry), BFA_ITNIM_PRLI_RETRY}, - {BFA_SM(bfa_fcs_itnim_sm_hcb_online), BFA_ITNIM_HCB_ONLINE}, - {BFA_SM(bfa_fcs_itnim_sm_online), BFA_ITNIM_ONLINE}, - {BFA_SM(bfa_fcs_itnim_sm_hcb_offline), BFA_ITNIM_HCB_OFFLINE}, - {BFA_SM(bfa_fcs_itnim_sm_initiator), BFA_ITNIM_INITIATIOR}, -}; - -/** - * fcs_itnim_sm FCS itnim state machine - */ - -static void -bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim, - enum bfa_fcs_itnim_event event) -{ - bfa_trc(itnim->fcs, itnim->rport->pwwn); - bfa_trc(itnim->fcs, event); - - switch (event) { - case BFA_FCS_ITNIM_SM_ONLINE: - bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send); - itnim->prli_retries = 0; - bfa_fcs_itnim_send_prli(itnim, NULL); - break; - - case BFA_FCS_ITNIM_SM_OFFLINE: - bfa_fcs_rport_itnim_ack(itnim->rport); - break; - - case BFA_FCS_ITNIM_SM_INITIATOR: - bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); - break; - - case BFA_FCS_ITNIM_SM_DELETE: - bfa_fcs_itnim_free(itnim); - break; - - default: - bfa_sm_fault(itnim->fcs, event); - } - -} - -static void -bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim, - enum bfa_fcs_itnim_event event) -{ - bfa_trc(itnim->fcs, itnim->rport->pwwn); - bfa_trc(itnim->fcs, event); - - switch (event) { - case BFA_FCS_ITNIM_SM_FRMSENT: - bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli); - break; - - case BFA_FCS_ITNIM_SM_INITIATOR: - bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); - bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe); - break; - - case BFA_FCS_ITNIM_SM_OFFLINE: - bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); - bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe); - bfa_fcs_rport_itnim_ack(itnim->rport); - break; - - case BFA_FCS_ITNIM_SM_DELETE: - bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); - bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe); - bfa_fcs_itnim_free(itnim); - break; - - default: - bfa_sm_fault(itnim->fcs, event); - } -} - -static void -bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim, - enum bfa_fcs_itnim_event event) -{ - bfa_trc(itnim->fcs, itnim->rport->pwwn); - bfa_trc(itnim->fcs, event); - - switch (event) { - case BFA_FCS_ITNIM_SM_RSP_OK: - if (itnim->rport->scsi_function == BFA_RPORT_INITIATOR) { - bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); - } else { - bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_online); - bfa_itnim_online(itnim->bfa_itnim, itnim->seq_rec); - } - break; - - case BFA_FCS_ITNIM_SM_RSP_ERROR: - bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_retry); - bfa_timer_start(itnim->fcs->bfa, &itnim->timer, - bfa_fcs_itnim_timeout, itnim, - BFA_FCS_RETRY_TIMEOUT); - break; - - case BFA_FCS_ITNIM_SM_OFFLINE: - bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); - bfa_fcxp_discard(itnim->fcxp); - bfa_fcs_rport_itnim_ack(itnim->rport); - break; - - case BFA_FCS_ITNIM_SM_INITIATOR: - bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); - bfa_fcxp_discard(itnim->fcxp); - break; - - case BFA_FCS_ITNIM_SM_DELETE: - bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); - bfa_fcxp_discard(itnim->fcxp); - bfa_fcs_itnim_free(itnim); - break; - - default: - bfa_sm_fault(itnim->fcs, event); - } -} - -static void -bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim, - enum bfa_fcs_itnim_event event) -{ - bfa_trc(itnim->fcs, itnim->rport->pwwn); - bfa_trc(itnim->fcs, event); - - switch (event) { - case BFA_FCS_ITNIM_SM_TIMEOUT: - if (itnim->prli_retries < BFA_FCS_RPORT_MAX_RETRIES) { - itnim->prli_retries++; - bfa_trc(itnim->fcs, itnim->prli_retries); - bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send); - bfa_fcs_itnim_send_prli(itnim, NULL); - } else { - /* invoke target offline */ - bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); - bfa_fcs_rport_logo_imp(itnim->rport); - } - break; - - case BFA_FCS_ITNIM_SM_OFFLINE: - bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); - bfa_timer_stop(&itnim->timer); - bfa_fcs_rport_itnim_ack(itnim->rport); - break; - - case BFA_FCS_ITNIM_SM_INITIATOR: - bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); - bfa_timer_stop(&itnim->timer); - break; - - case BFA_FCS_ITNIM_SM_DELETE: - bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); - bfa_timer_stop(&itnim->timer); - bfa_fcs_itnim_free(itnim); - break; - - default: - bfa_sm_fault(itnim->fcs, event); - } -} - -static void -bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim, - enum bfa_fcs_itnim_event event) -{ - bfa_trc(itnim->fcs, itnim->rport->pwwn); - bfa_trc(itnim->fcs, event); - - switch (event) { - case BFA_FCS_ITNIM_SM_HCB_ONLINE: - bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_online); - bfa_fcb_itnim_online(itnim->itnim_drv); - bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_ONLINE); - break; - - case BFA_FCS_ITNIM_SM_OFFLINE: - bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); - bfa_itnim_offline(itnim->bfa_itnim); - bfa_fcs_rport_itnim_ack(itnim->rport); - break; - - case BFA_FCS_ITNIM_SM_DELETE: - bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); - bfa_fcs_itnim_free(itnim); - break; - - default: - bfa_sm_fault(itnim->fcs, event); - } -} - -static void -bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim, - enum bfa_fcs_itnim_event event) -{ - bfa_trc(itnim->fcs, itnim->rport->pwwn); - bfa_trc(itnim->fcs, event); - - switch (event) { - case BFA_FCS_ITNIM_SM_OFFLINE: - bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline); - bfa_fcb_itnim_offline(itnim->itnim_drv); - bfa_itnim_offline(itnim->bfa_itnim); - if (bfa_fcs_port_is_online(itnim->rport->port) == BFA_TRUE) - bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT); - else - bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE); - break; - - case BFA_FCS_ITNIM_SM_DELETE: - bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); - bfa_fcs_itnim_free(itnim); - break; - - default: - bfa_sm_fault(itnim->fcs, event); - } -} - -static void -bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim, - enum bfa_fcs_itnim_event event) -{ - bfa_trc(itnim->fcs, itnim->rport->pwwn); - bfa_trc(itnim->fcs, event); - - switch (event) { - case BFA_FCS_ITNIM_SM_HCB_OFFLINE: - bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); - bfa_fcs_rport_itnim_ack(itnim->rport); - break; - - case BFA_FCS_ITNIM_SM_DELETE: - bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); - bfa_fcs_itnim_free(itnim); - break; - - default: - bfa_sm_fault(itnim->fcs, event); - } -} - -/* - * This state is set when a discovered rport is also in intiator mode. - * This ITN is marked as no_op and is not active and will not be truned into - * online state. - */ -static void -bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim, - enum bfa_fcs_itnim_event event) -{ - bfa_trc(itnim->fcs, itnim->rport->pwwn); - bfa_trc(itnim->fcs, event); - - switch (event) { - case BFA_FCS_ITNIM_SM_OFFLINE: - bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); - bfa_fcs_rport_itnim_ack(itnim->rport); - break; - - case BFA_FCS_ITNIM_SM_RSP_ERROR: - case BFA_FCS_ITNIM_SM_ONLINE: - case BFA_FCS_ITNIM_SM_INITIATOR: - break; - - case BFA_FCS_ITNIM_SM_DELETE: - bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); - bfa_fcs_itnim_free(itnim); - break; - - default: - bfa_sm_fault(itnim->fcs, event); - } -} - - - -/** - * itnim_private FCS ITNIM private interfaces - */ - -static void -bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim, - enum bfa_itnim_aen_event event) -{ - struct bfa_fcs_rport_s *rport = itnim->rport; - union bfa_aen_data_u aen_data; - struct bfa_log_mod_s *logmod = rport->fcs->logm; - wwn_t lpwwn = bfa_fcs_port_get_pwwn(rport->port); - wwn_t rpwwn = rport->pwwn; - char lpwwn_ptr[BFA_STRING_32]; - char rpwwn_ptr[BFA_STRING_32]; - - /* - * Don't post events for well known addresses - */ - if (BFA_FCS_PID_IS_WKA(rport->pid)) - return; - - wwn2str(lpwwn_ptr, lpwwn); - wwn2str(rpwwn_ptr, rpwwn); - - bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_ITNIM, event), - rpwwn_ptr, lpwwn_ptr); - - aen_data.itnim.vf_id = rport->port->fabric->vf_id; - aen_data.itnim.ppwwn = - bfa_fcs_port_get_pwwn(bfa_fcs_get_base_port(itnim->fcs)); - aen_data.itnim.lpwwn = lpwwn; - aen_data.itnim.rpwwn = rpwwn; -} - -static void -bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced) -{ - struct bfa_fcs_itnim_s *itnim = itnim_cbarg; - struct bfa_fcs_rport_s *rport = itnim->rport; - struct bfa_fcs_port_s *port = rport->port; - struct fchs_s fchs; - struct bfa_fcxp_s *fcxp; - int len; - - bfa_trc(itnim->fcs, itnim->rport->pwwn); - - fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); - if (!fcxp) { - itnim->stats.fcxp_alloc_wait++; - bfa_fcxp_alloc_wait(port->fcs->bfa, &itnim->fcxp_wqe, - bfa_fcs_itnim_send_prli, itnim); - return; - } - itnim->fcxp = fcxp; - - len = fc_prli_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), itnim->rport->pid, - bfa_fcs_port_get_fcid(port), 0); - - bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id, port->lp_tag, - BFA_FALSE, FC_CLASS_3, len, &fchs, - bfa_fcs_itnim_prli_response, (void *)itnim, FC_MAX_PDUSZ, - FC_ELS_TOV); - - itnim->stats.prli_sent++; - bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_FRMSENT); -} - -static void -bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, - bfa_status_t req_status, u32 rsp_len, - u32 resid_len, struct fchs_s *rsp_fchs) -{ - struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cbarg; - struct fc_els_cmd_s *els_cmd; - struct fc_prli_s *prli_resp; - struct fc_ls_rjt_s *ls_rjt; - struct fc_prli_params_s *sparams; - - bfa_trc(itnim->fcs, req_status); - - /* - * Sanity Checks - */ - if (req_status != BFA_STATUS_OK) { - itnim->stats.prli_rsp_err++; - bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_ERROR); - return; - } - - els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp); - - if (els_cmd->els_code == FC_ELS_ACC) { - prli_resp = (struct fc_prli_s *) els_cmd; - - if (fc_prli_rsp_parse(prli_resp, rsp_len) != FC_PARSE_OK) { - bfa_trc(itnim->fcs, rsp_len); - /* - * Check if this r-port is also in Initiator mode. - * If so, we need to set this ITN as a no-op. - */ - if (prli_resp->parampage.servparams.initiator) { - bfa_trc(itnim->fcs, prli_resp->parampage.type); - itnim->rport->scsi_function = - BFA_RPORT_INITIATOR; - itnim->stats.prli_rsp_acc++; - bfa_sm_send_event(itnim, - BFA_FCS_ITNIM_SM_RSP_OK); - return; - } - - itnim->stats.prli_rsp_parse_err++; - return; - } - itnim->rport->scsi_function = BFA_RPORT_TARGET; - - sparams = &prli_resp->parampage.servparams; - itnim->seq_rec = sparams->retry; - itnim->rec_support = sparams->rec_support; - itnim->task_retry_id = sparams->task_retry_id; - itnim->conf_comp = sparams->confirm; - - itnim->stats.prli_rsp_acc++; - bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_OK); - } else { - ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); - - bfa_trc(itnim->fcs, ls_rjt->reason_code); - bfa_trc(itnim->fcs, ls_rjt->reason_code_expl); - - itnim->stats.prli_rsp_rjt++; - bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_ERROR); - } -} - -static void -bfa_fcs_itnim_timeout(void *arg) -{ - struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)arg; - - itnim->stats.timeout++; - bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_TIMEOUT); -} - -static void -bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim) -{ - bfa_itnim_delete(itnim->bfa_itnim); - bfa_fcb_itnim_free(itnim->fcs->bfad, itnim->itnim_drv); -} - - - -/** - * itnim_public FCS ITNIM public interfaces - */ - -/** - * Called by rport when a new rport is created. - * - * @param[in] rport - remote port. - */ -struct bfa_fcs_itnim_s * -bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport) -{ - struct bfa_fcs_port_s *port = rport->port; - struct bfa_fcs_itnim_s *itnim; - struct bfad_itnim_s *itnim_drv; - struct bfa_itnim_s *bfa_itnim; - - /* - * call bfad to allocate the itnim - */ - bfa_fcb_itnim_alloc(port->fcs->bfad, &itnim, &itnim_drv); - if (itnim == NULL) { - bfa_trc(port->fcs, rport->pwwn); - return NULL; - } - - /* - * Initialize itnim - */ - itnim->rport = rport; - itnim->fcs = rport->fcs; - itnim->itnim_drv = itnim_drv; - - /* - * call BFA to create the itnim - */ - bfa_itnim = bfa_itnim_create(port->fcs->bfa, rport->bfa_rport, itnim); - - if (bfa_itnim == NULL) { - bfa_trc(port->fcs, rport->pwwn); - bfa_fcb_itnim_free(port->fcs->bfad, itnim_drv); - bfa_assert(0); - return NULL; - } - - itnim->bfa_itnim = bfa_itnim; - itnim->seq_rec = BFA_FALSE; - itnim->rec_support = BFA_FALSE; - itnim->conf_comp = BFA_FALSE; - itnim->task_retry_id = BFA_FALSE; - - /* - * Set State machine - */ - bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); - - return itnim; -} - -/** - * Called by rport to delete the instance of FCPIM. - * - * @param[in] rport - remote port. - */ -void -bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim) -{ - bfa_trc(itnim->fcs, itnim->rport->pid); - bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_DELETE); -} - -/** - * Notification from rport that PLOGI is complete to initiate FC-4 session. - */ -void -bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim) -{ - itnim->stats.onlines++; - - if (!BFA_FCS_PID_IS_WKA(itnim->rport->pid)) { - bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_ONLINE); - } else { - /* - * For well known addresses, we set the itnim to initiator - * state - */ - itnim->stats.initiator++; - bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR); - } -} - -/** - * Called by rport to handle a remote device offline. - */ -void -bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim) -{ - itnim->stats.offlines++; - bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_OFFLINE); -} - -/** - * Called by rport when remote port is known to be an initiator from - * PRLI received. - */ -void -bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim) -{ - bfa_trc(itnim->fcs, itnim->rport->pid); - itnim->stats.initiator++; - bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR); -} - -/** - * Called by rport to check if the itnim is online. - */ -bfa_status_t -bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim) -{ - bfa_trc(itnim->fcs, itnim->rport->pid); - switch (bfa_sm_to_state(itnim_sm_table, itnim->sm)) { - case BFA_ITNIM_ONLINE: - case BFA_ITNIM_INITIATIOR: - return BFA_STATUS_OK; - - default: - return BFA_STATUS_NO_FCPIM_NEXUS; - - } -} - -/** - * BFA completion callback for bfa_itnim_online(). - */ -void -bfa_cb_itnim_online(void *cbarg) -{ - struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cbarg; - - bfa_trc(itnim->fcs, itnim->rport->pwwn); - bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE); -} - -/** - * BFA completion callback for bfa_itnim_offline(). - */ -void -bfa_cb_itnim_offline(void *cb_arg) -{ - struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg; - - bfa_trc(itnim->fcs, itnim->rport->pwwn); - bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE); -} - -/** - * Mark the beginning of PATH TOV handling. IO completion callbacks - * are still pending. - */ -void -bfa_cb_itnim_tov_begin(void *cb_arg) -{ - struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg; - - bfa_trc(itnim->fcs, itnim->rport->pwwn); -} - -/** - * Mark the end of PATH TOV handling. All pending IOs are already cleaned up. - */ -void -bfa_cb_itnim_tov(void *cb_arg) -{ - struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg; - - bfa_trc(itnim->fcs, itnim->rport->pwwn); - bfa_fcb_itnim_tov(itnim->itnim_drv); -} - -/** - * BFA notification to FCS/driver for second level error recovery. - * - * Atleast one I/O request has timedout and target is unresponsive to - * repeated abort requests. Second level error recovery should be initiated - * by starting implicit logout and recovery procedures. - */ -void -bfa_cb_itnim_sler(void *cb_arg) -{ - struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg; - - itnim->stats.sler++; - bfa_trc(itnim->fcs, itnim->rport->pwwn); - bfa_fcs_rport_logo_imp(itnim->rport); -} - -struct bfa_fcs_itnim_s * -bfa_fcs_itnim_lookup(struct bfa_fcs_port_s *port, wwn_t rpwwn) -{ - struct bfa_fcs_rport_s *rport; - rport = bfa_fcs_rport_lookup(port, rpwwn); - - if (!rport) - return NULL; - - bfa_assert(rport->itnim != NULL); - return rport->itnim; -} - -bfa_status_t -bfa_fcs_itnim_attr_get(struct bfa_fcs_port_s *port, wwn_t rpwwn, - struct bfa_itnim_attr_s *attr) -{ - struct bfa_fcs_itnim_s *itnim = NULL; - - itnim = bfa_fcs_itnim_lookup(port, rpwwn); - - if (itnim == NULL) - return BFA_STATUS_NO_FCPIM_NEXUS; - - attr->state = bfa_sm_to_state(itnim_sm_table, itnim->sm); - attr->retry = itnim->seq_rec; - attr->rec_support = itnim->rec_support; - attr->conf_comp = itnim->conf_comp; - attr->task_retry_id = itnim->task_retry_id; - bfa_os_memset(&attr->io_latency, 0, sizeof(struct bfa_itnim_latency_s)); - - return BFA_STATUS_OK; -} - -bfa_status_t -bfa_fcs_itnim_stats_get(struct bfa_fcs_port_s *port, wwn_t rpwwn, - struct bfa_itnim_stats_s *stats) -{ - struct bfa_fcs_itnim_s *itnim = NULL; - - bfa_assert(port != NULL); - - itnim = bfa_fcs_itnim_lookup(port, rpwwn); - - if (itnim == NULL) - return BFA_STATUS_NO_FCPIM_NEXUS; - - bfa_os_memcpy(stats, &itnim->stats, sizeof(struct bfa_itnim_stats_s)); - - return BFA_STATUS_OK; -} - -bfa_status_t -bfa_fcs_itnim_stats_clear(struct bfa_fcs_port_s *port, wwn_t rpwwn) -{ - struct bfa_fcs_itnim_s *itnim = NULL; - - bfa_assert(port != NULL); - - itnim = bfa_fcs_itnim_lookup(port, rpwwn); - - if (itnim == NULL) - return BFA_STATUS_NO_FCPIM_NEXUS; - - bfa_os_memset(&itnim->stats, 0, sizeof(struct bfa_itnim_stats_s)); - return BFA_STATUS_OK; -} - -void -bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs, - u16 len) -{ - struct fc_els_cmd_s *els_cmd; - - bfa_trc(itnim->fcs, fchs->type); - - if (fchs->type != FC_TYPE_ELS) - return; - - els_cmd = (struct fc_els_cmd_s *) (fchs + 1); - - bfa_trc(itnim->fcs, els_cmd->els_code); - - switch (els_cmd->els_code) { - case FC_ELS_PRLO: - bfa_fcs_rport_prlo(itnim->rport, fchs->ox_id); - break; - - default: - bfa_assert(0); - } -} - -void -bfa_fcs_itnim_pause(struct bfa_fcs_itnim_s *itnim) -{ -} - -void -bfa_fcs_itnim_resume(struct bfa_fcs_itnim_s *itnim) -{ -} diff --git a/drivers/scsi/bfa/fcptm.c b/drivers/scsi/bfa/fcptm.c deleted file mode 100644 index 8c8b08c72e7a..000000000000 --- a/drivers/scsi/bfa/fcptm.c +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * This file contains dummy FCPTM routines to aid in Initiator Mode only - * compilation of OS driver. - * - */ - -#include "bfa_os_inc.h" -#include "fcs_rport.h" -#include "fcs_fcptm.h" -#include "fcs/bfa_fcs_rport.h" - -struct bfa_fcs_tin_s * -bfa_fcs_tin_create(struct bfa_fcs_rport_s *rport) -{ - return NULL; -} - -void -bfa_fcs_tin_delete(struct bfa_fcs_tin_s *tin) -{ -} - -void -bfa_fcs_tin_rport_offline(struct bfa_fcs_tin_s *tin) -{ -} - -void -bfa_fcs_tin_rport_online(struct bfa_fcs_tin_s *tin) -{ -} - -void -bfa_fcs_tin_rx_prli(struct bfa_fcs_tin_s *tin, struct fchs_s *fchs, u16 len) -{ -} - -void -bfa_fcs_fcptm_uf_recv(struct bfa_fcs_tin_s *tin, struct fchs_s *fchs, u16 len) -{ -} - -void -bfa_fcs_tin_pause(struct bfa_fcs_tin_s *tin) -{ -} - -void -bfa_fcs_tin_resume(struct bfa_fcs_tin_s *tin) -{ -} diff --git a/drivers/scsi/bfa/fcs.h b/drivers/scsi/bfa/fcs.h deleted file mode 100644 index 8d08230e6295..000000000000 --- a/drivers/scsi/bfa/fcs.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * fcs.h FCS module functions - */ - - -#ifndef __FCS_H__ -#define __FCS_H__ - -#define __fcs_min_cfg(__fcs) ((__fcs)->min_cfg) - -void bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs); - -#endif /* __FCS_H__ */ diff --git a/drivers/scsi/bfa/fcs_auth.h b/drivers/scsi/bfa/fcs_auth.h deleted file mode 100644 index 65d155fea3d7..000000000000 --- a/drivers/scsi/bfa/fcs_auth.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * fcs_uf.h FCS unsolicited frame receive - */ - - -#ifndef __FCS_AUTH_H__ -#define __FCS_AUTH_H__ - -#include -#include -#include - -/* - * fcs friend functions: only between fcs modules - */ -void bfa_fcs_auth_uf_recv(struct bfa_fcs_fabric_s *fabric, int len); -void bfa_fcs_auth_start(struct bfa_fcs_fabric_s *fabric); -void bfa_fcs_auth_stop(struct bfa_fcs_fabric_s *fabric); - -#endif /* __FCS_UF_H__ */ diff --git a/drivers/scsi/bfa/fcs_fabric.h b/drivers/scsi/bfa/fcs_fabric.h deleted file mode 100644 index 432ab8ab8c3c..000000000000 --- a/drivers/scsi/bfa/fcs_fabric.h +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * fcs_lport.h FCS logical port interfaces - */ - -#ifndef __FCS_FABRIC_H__ -#define __FCS_FABRIC_H__ - -#include -#include -#include - -#define BFA_FCS_BRCD_SWITCH_OUI 0x051e - -/* -* fcs friend functions: only between fcs modules - */ -void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs); -void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs); -void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs); -void bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs); -void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric); -void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric); -void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric, - struct bfa_fcs_vport_s *vport); -void bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric, - struct bfa_fcs_vport_s *vport); -int bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric); -struct bfa_fcs_vport_s *bfa_fcs_fabric_vport_lookup( - struct bfa_fcs_fabric_s *fabric, wwn_t pwwn); -void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs); -void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, - struct fchs_s *fchs, u16 len); -u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric); -bfa_boolean_t bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric); -bfa_boolean_t bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric); -enum bfa_pport_type bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric); -void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric); -void bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric); - -bfa_status_t bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf, - struct bfa_fcs_s *fcs, struct bfa_port_cfg_s *port_cfg, - struct bfad_vf_s *vf_drv); -void bfa_fcs_auth_finished(struct bfa_fcs_fabric_s *fabric, - enum auth_status status); - -void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric, - wwn_t fabric_name); -u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric); -void bfa_fcs_get_sym_name(const struct bfa_fcs_s *fcs, char *node_symname); - -#endif /* __FCS_FABRIC_H__ */ diff --git a/drivers/scsi/bfa/fcs_fcpim.h b/drivers/scsi/bfa/fcs_fcpim.h deleted file mode 100644 index 11e6e7bce9f6..000000000000 --- a/drivers/scsi/bfa/fcs_fcpim.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __FCS_FCPIM_H__ -#define __FCS_FCPIM_H__ - -#include -#include -#include - -/* - * Following routines are from FCPIM and will be called by rport. - */ -struct bfa_fcs_itnim_s *bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport); -void bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim); -void bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim); -void bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim); -bfa_status_t bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim); - -void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim); -void bfa_fcs_itnim_pause(struct bfa_fcs_itnim_s *itnim); -void bfa_fcs_itnim_resume(struct bfa_fcs_itnim_s *itnim); - -void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs, - u16 len); -#endif /* __FCS_FCPIM_H__ */ diff --git a/drivers/scsi/bfa/fcs_fcptm.h b/drivers/scsi/bfa/fcs_fcptm.h deleted file mode 100644 index ffff0829fd31..000000000000 --- a/drivers/scsi/bfa/fcs_fcptm.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __FCS_FCPTM_H__ -#define __FCS_FCPTM_H__ - -#include -#include -#include - -/* - * Following routines are from FCPTM and will be called by rport. - */ -struct bfa_fcs_tin_s *bfa_fcs_tin_create(struct bfa_fcs_rport_s *rport); -void bfa_fcs_tin_rport_offline(struct bfa_fcs_tin_s *tin); -void bfa_fcs_tin_rport_online(struct bfa_fcs_tin_s *tin); -void bfa_fcs_tin_delete(struct bfa_fcs_tin_s *tin); -void bfa_fcs_tin_rx_prli(struct bfa_fcs_tin_s *tin, struct fchs_s *fchs, - u16 len); -void bfa_fcs_tin_pause(struct bfa_fcs_tin_s *tin); -void bfa_fcs_tin_resume(struct bfa_fcs_tin_s *tin); - -/* - * Modudle init/cleanup routines. - */ -void bfa_fcs_fcptm_modinit(struct bfa_fcs_s *fcs); -void bfa_fcs_fcptm_modexit(struct bfa_fcs_s *fcs); -void bfa_fcs_fcptm_uf_recv(struct bfa_fcs_tin_s *tin, struct fchs_s *fchs, - u16 len); - -#endif /* __FCS_FCPTM_H__ */ diff --git a/drivers/scsi/bfa/fcs_fcxp.h b/drivers/scsi/bfa/fcs_fcxp.h deleted file mode 100644 index 8277fe9c2b70..000000000000 --- a/drivers/scsi/bfa/fcs_fcxp.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * fcs_fcxp.h FCXP helper macros for FCS - */ - - -#ifndef __FCS_FCXP_H__ -#define __FCS_FCXP_H__ - -#define bfa_fcs_fcxp_alloc(__fcs) \ - bfa_fcxp_alloc(NULL, (__fcs)->bfa, 0, 0, NULL, NULL, NULL, NULL) - -#endif /* __FCS_FCXP_H__ */ diff --git a/drivers/scsi/bfa/fcs_lport.h b/drivers/scsi/bfa/fcs_lport.h deleted file mode 100644 index a6508c8ab184..000000000000 --- a/drivers/scsi/bfa/fcs_lport.h +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * fcs_lport.h FCS logical port interfaces - */ - -#ifndef __FCS_LPORT_H__ -#define __FCS_LPORT_H__ - -#define __VPORT_H__ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * PID used in P2P/N2N ( In Big Endian) - */ -#define N2N_LOCAL_PID 0x010000 -#define N2N_REMOTE_PID 0x020000 - -/* - * Misc Timeouts - */ -/* - * To be used when spawning a timer before retrying a failed command. Milli - * Secs. - */ -#define BFA_FCS_RETRY_TIMEOUT 2000 - -/* - * Check for Port/Vport Mode/Role - */ -#define BFA_FCS_VPORT_IS_INITIATOR_MODE(port) \ - (port->port_cfg.roles & BFA_PORT_ROLE_FCP_IM) - -#define BFA_FCS_VPORT_IS_TARGET_MODE(port) \ - (port->port_cfg.roles & BFA_PORT_ROLE_FCP_TM) - -#define BFA_FCS_VPORT_IS_IPFC_MODE(port) \ - (port->port_cfg.roles & BFA_PORT_ROLE_FCP_IPFC) - -/* - * Is this a Well Known Address - */ -#define BFA_FCS_PID_IS_WKA(pid) ((bfa_os_ntoh3b(pid) > 0xFFF000) ? 1 : 0) - -/* - * Pointer to elements within Port - */ -#define BFA_FCS_GET_HAL_FROM_PORT(port) (port->fcs->bfa) -#define BFA_FCS_GET_NS_FROM_PORT(port) (&port->port_topo.pfab.ns) -#define BFA_FCS_GET_SCN_FROM_PORT(port) (&port->port_topo.pfab.scn) -#define BFA_FCS_GET_MS_FROM_PORT(port) (&port->port_topo.pfab.ms) -#define BFA_FCS_GET_FDMI_FROM_PORT(port) (&port->port_topo.pfab.ms.fdmi) - -/* - * handler for unsolicied frames - */ -void bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs, - u16 len); - -/* - * Following routines will be called by Fabric to indicate port - * online/offline to vport. - */ -void bfa_fcs_lport_attach(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs, - uint16_t vf_id, struct bfa_fcs_vport_s *vport); -void bfa_fcs_lport_init(struct bfa_fcs_port_s *lport, - struct bfa_port_cfg_s *port_cfg); -void bfa_fcs_port_online(struct bfa_fcs_port_s *port); -void bfa_fcs_port_offline(struct bfa_fcs_port_s *port); -void bfa_fcs_port_delete(struct bfa_fcs_port_s *port); -bfa_boolean_t bfa_fcs_port_is_online(struct bfa_fcs_port_s *port); - -/* - * Lookup rport based on PID - */ -struct bfa_fcs_rport_s *bfa_fcs_port_get_rport_by_pid( - struct bfa_fcs_port_s *port, u32 pid); - -/* - * Lookup rport based on PWWN - */ -struct bfa_fcs_rport_s *bfa_fcs_port_get_rport_by_pwwn( - struct bfa_fcs_port_s *port, wwn_t pwwn); -struct bfa_fcs_rport_s *bfa_fcs_port_get_rport_by_nwwn( - struct bfa_fcs_port_s *port, wwn_t nwwn); -void bfa_fcs_port_add_rport(struct bfa_fcs_port_s *port, - struct bfa_fcs_rport_s *rport); -void bfa_fcs_port_del_rport(struct bfa_fcs_port_s *port, - struct bfa_fcs_rport_s *rport); - -void bfa_fcs_port_modinit(struct bfa_fcs_s *fcs); -void bfa_fcs_port_modexit(struct bfa_fcs_s *fcs); -void bfa_fcs_port_lip(struct bfa_fcs_port_s *port); - -#endif /* __FCS_LPORT_H__ */ diff --git a/drivers/scsi/bfa/fcs_ms.h b/drivers/scsi/bfa/fcs_ms.h deleted file mode 100644 index b6a8c12876f4..000000000000 --- a/drivers/scsi/bfa/fcs_ms.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * fcs_ms.h FCS ms interfaces - */ -#ifndef __FCS_MS_H__ -#define __FCS_MS_H__ - -/* MS FCS routines */ -void bfa_fcs_port_ms_init(struct bfa_fcs_port_s *port); -void bfa_fcs_port_ms_offline(struct bfa_fcs_port_s *port); -void bfa_fcs_port_ms_online(struct bfa_fcs_port_s *port); -void bfa_fcs_port_ms_fabric_rscn(struct bfa_fcs_port_s *port); - -/* FDMI FCS routines */ -void bfa_fcs_port_fdmi_init(struct bfa_fcs_port_ms_s *ms); -void bfa_fcs_port_fdmi_offline(struct bfa_fcs_port_ms_s *ms); -void bfa_fcs_port_fdmi_online(struct bfa_fcs_port_ms_s *ms); - -#endif diff --git a/drivers/scsi/bfa/fcs_port.h b/drivers/scsi/bfa/fcs_port.h deleted file mode 100644 index 408c06a7d164..000000000000 --- a/drivers/scsi/bfa/fcs_port.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * fcs_pport.h FCS physical port interfaces - */ - - -#ifndef __FCS_PPORT_H__ -#define __FCS_PPORT_H__ - -/* - * fcs friend functions: only between fcs modules - */ -void bfa_fcs_pport_attach(struct bfa_fcs_s *fcs); - -#endif /* __FCS_PPORT_H__ */ diff --git a/drivers/scsi/bfa/fcs_rport.h b/drivers/scsi/bfa/fcs_rport.h deleted file mode 100644 index e634fb7a69b8..000000000000 --- a/drivers/scsi/bfa/fcs_rport.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * fcs_rport.h FCS rport interfaces and defines - */ - -#ifndef __FCS_RPORT_H__ -#define __FCS_RPORT_H__ - -#include - -#define BFA_FCS_RPORT_MAX_RETRIES (5) - -void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs, - u16 len); -void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport); - -struct bfa_fcs_rport_s *bfa_fcs_rport_create(struct bfa_fcs_port_s *port, - u32 pid); -void bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport); -void bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport); -void bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport); -void bfa_fcs_rport_start(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs, - struct fc_logi_s *plogi_rsp); -void bfa_fcs_rport_plogi_create(struct bfa_fcs_port_s *port, - struct fchs_s *rx_fchs, - struct fc_logi_s *plogi); -void bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs, - struct fc_logi_s *plogi); -void bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport); -void bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, uint16_t ox_id); -void bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport); -void bfa_fcs_rport_itntm_ack(struct bfa_fcs_rport_s *rport); -void bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport); -void bfa_fcs_rport_fcptm_offline_done(struct bfa_fcs_rport_s *rport); -int bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport); -struct bfa_fcs_rport_s *bfa_fcs_rport_create_by_wwn(struct bfa_fcs_port_s *port, - wwn_t wwn); - - -/* Rport Features */ -void bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport); -void bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport); -void bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport); - -#endif /* __FCS_RPORT_H__ */ diff --git a/drivers/scsi/bfa/fcs_trcmod.h b/drivers/scsi/bfa/fcs_trcmod.h deleted file mode 100644 index 41b5ae8d7644..000000000000 --- a/drivers/scsi/bfa/fcs_trcmod.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * fcs_trcmod.h BFA FCS trace modules - */ - -#ifndef __FCS_TRCMOD_H__ -#define __FCS_TRCMOD_H__ - -#include - -/* - * !!! Only append to the enums defined here to avoid any versioning - * !!! needed between trace utility and driver version - */ -enum { - BFA_TRC_FCS_FABRIC = 1, - BFA_TRC_FCS_VFAPI = 2, - BFA_TRC_FCS_PORT = 3, - BFA_TRC_FCS_VPORT = 4, - BFA_TRC_FCS_VP_API = 5, - BFA_TRC_FCS_VPS = 6, - BFA_TRC_FCS_RPORT = 7, - BFA_TRC_FCS_FCPIM = 8, - BFA_TRC_FCS_FCPTM = 9, - BFA_TRC_FCS_NS = 10, - BFA_TRC_FCS_SCN = 11, - BFA_TRC_FCS_LOOP = 12, - BFA_TRC_FCS_UF = 13, - BFA_TRC_FCS_PPORT = 14, - BFA_TRC_FCS_FCPIP = 15, - BFA_TRC_FCS_PORT_API = 16, - BFA_TRC_FCS_RPORT_API = 17, - BFA_TRC_FCS_AUTH = 18, - BFA_TRC_FCS_N2N = 19, - BFA_TRC_FCS_MS = 20, - BFA_TRC_FCS_FDMI = 21, - BFA_TRC_FCS_RPORT_FTRS = 22, -}; - -#endif /* __FCS_TRCMOD_H__ */ diff --git a/drivers/scsi/bfa/fcs_uf.h b/drivers/scsi/bfa/fcs_uf.h deleted file mode 100644 index f591072214fe..000000000000 --- a/drivers/scsi/bfa/fcs_uf.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * fcs_uf.h FCS unsolicited frame receive - */ - - -#ifndef __FCS_UF_H__ -#define __FCS_UF_H__ - -/* - * fcs friend functions: only between fcs modules - */ -void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs); - -#endif /* __FCS_UF_H__ */ diff --git a/drivers/scsi/bfa/fcs_vport.h b/drivers/scsi/bfa/fcs_vport.h deleted file mode 100644 index bb647a4a5dde..000000000000 --- a/drivers/scsi/bfa/fcs_vport.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __FCS_VPORT_H__ -#define __FCS_VPORT_H__ - -#include -#include -#include - -void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport); -void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport); -void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport); -void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport); -void bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport); - -#endif /* __FCS_VPORT_H__ */ - diff --git a/drivers/scsi/bfa/fdmi.c b/drivers/scsi/bfa/fdmi.c deleted file mode 100644 index 2b50eabf4b1e..000000000000 --- a/drivers/scsi/bfa/fdmi.c +++ /dev/null @@ -1,1230 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * port_api.c BFA FCS port - */ - - -#include -#include -#include "fcs_lport.h" -#include "fcs_rport.h" -#include "lport_priv.h" -#include "fcs_trcmod.h" -#include "fcs_fcxp.h" -#include - -BFA_TRC_FILE(FCS, FDMI); - -#define BFA_FCS_FDMI_CMD_MAX_RETRIES 2 - -/* - * forward declarations - */ -static void bfa_fcs_port_fdmi_send_rhba(void *fdmi_cbarg, - struct bfa_fcxp_s *fcxp_alloced); -static void bfa_fcs_port_fdmi_send_rprt(void *fdmi_cbarg, - struct bfa_fcxp_s *fcxp_alloced); -static void bfa_fcs_port_fdmi_send_rpa(void *fdmi_cbarg, - struct bfa_fcxp_s *fcxp_alloced); -static void bfa_fcs_port_fdmi_rhba_response(void *fcsarg, - struct bfa_fcxp_s *fcxp, - void *cbarg, - bfa_status_t req_status, - u32 rsp_len, - u32 resid_len, - struct fchs_s *rsp_fchs); -static void bfa_fcs_port_fdmi_rprt_response(void *fcsarg, - struct bfa_fcxp_s *fcxp, - void *cbarg, - bfa_status_t req_status, - u32 rsp_len, - u32 resid_len, - struct fchs_s *rsp_fchs); -static void bfa_fcs_port_fdmi_rpa_response(void *fcsarg, - struct bfa_fcxp_s *fcxp, - void *cbarg, - bfa_status_t req_status, - u32 rsp_len, - u32 resid_len, - struct fchs_s *rsp_fchs); -static void bfa_fcs_port_fdmi_timeout(void *arg); -static u16 bfa_fcs_port_fdmi_build_rhba_pyld( - struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld); -static u16 bfa_fcs_port_fdmi_build_rprt_pyld( - struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld); -static u16 bfa_fcs_port_fdmi_build_rpa_pyld( - struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld); -static u16 bfa_fcs_port_fdmi_build_portattr_block( - struct bfa_fcs_port_fdmi_s *fdmi, u8 *pyld); -static void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi, - struct bfa_fcs_fdmi_hba_attr_s *hba_attr); -static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_port_fdmi_s *fdmi, - struct bfa_fcs_fdmi_port_attr_s *port_attr); -/** - * fcs_fdmi_sm FCS FDMI state machine - */ - -/** - * FDMI State Machine events - */ -enum port_fdmi_event { - FDMISM_EVENT_PORT_ONLINE = 1, - FDMISM_EVENT_PORT_OFFLINE = 2, - FDMISM_EVENT_RSP_OK = 4, - FDMISM_EVENT_RSP_ERROR = 5, - FDMISM_EVENT_TIMEOUT = 6, - FDMISM_EVENT_RHBA_SENT = 7, - FDMISM_EVENT_RPRT_SENT = 8, - FDMISM_EVENT_RPA_SENT = 9, -}; - -static void bfa_fcs_port_fdmi_sm_offline(struct bfa_fcs_port_fdmi_s *fdmi, - enum port_fdmi_event event); -static void bfa_fcs_port_fdmi_sm_sending_rhba(struct bfa_fcs_port_fdmi_s *fdmi, - enum port_fdmi_event event); -static void bfa_fcs_port_fdmi_sm_rhba(struct bfa_fcs_port_fdmi_s *fdmi, - enum port_fdmi_event event); -static void bfa_fcs_port_fdmi_sm_rhba_retry(struct bfa_fcs_port_fdmi_s *fdmi, - enum port_fdmi_event event); -static void bfa_fcs_port_fdmi_sm_sending_rprt(struct bfa_fcs_port_fdmi_s *fdmi, - enum port_fdmi_event event); -static void bfa_fcs_port_fdmi_sm_rprt(struct bfa_fcs_port_fdmi_s *fdmi, - enum port_fdmi_event event); -static void bfa_fcs_port_fdmi_sm_rprt_retry(struct bfa_fcs_port_fdmi_s *fdmi, - enum port_fdmi_event event); -static void bfa_fcs_port_fdmi_sm_sending_rpa(struct bfa_fcs_port_fdmi_s *fdmi, - enum port_fdmi_event event); -static void bfa_fcs_port_fdmi_sm_rpa(struct bfa_fcs_port_fdmi_s *fdmi, - enum port_fdmi_event event); -static void bfa_fcs_port_fdmi_sm_rpa_retry(struct bfa_fcs_port_fdmi_s *fdmi, - enum port_fdmi_event event); -static void bfa_fcs_port_fdmi_sm_online(struct bfa_fcs_port_fdmi_s *fdmi, - enum port_fdmi_event event); -static void bfa_fcs_port_fdmi_sm_disabled(struct bfa_fcs_port_fdmi_s *fdmi, - enum port_fdmi_event event); - -/** - * Start in offline state - awaiting MS to send start. - */ -static void -bfa_fcs_port_fdmi_sm_offline(struct bfa_fcs_port_fdmi_s *fdmi, - enum port_fdmi_event event) -{ - struct bfa_fcs_port_s *port = fdmi->ms->port; - - bfa_trc(port->fcs, port->port_cfg.pwwn); - bfa_trc(port->fcs, event); - - fdmi->retry_cnt = 0; - - switch (event) { - case FDMISM_EVENT_PORT_ONLINE: - if (port->vport) { - /* - * For Vports, register a new port. - */ - bfa_sm_set_state(fdmi, - bfa_fcs_port_fdmi_sm_sending_rprt); - bfa_fcs_port_fdmi_send_rprt(fdmi, NULL); - } else { - /* - * For a base port, we should first register the HBA - * atribute. The HBA attribute also contains the base - * port registration. - */ - bfa_sm_set_state(fdmi, - bfa_fcs_port_fdmi_sm_sending_rhba); - bfa_fcs_port_fdmi_send_rhba(fdmi, NULL); - } - break; - - case FDMISM_EVENT_PORT_OFFLINE: - break; - - default: - bfa_sm_fault(port->fcs, event); - } -} - -static void -bfa_fcs_port_fdmi_sm_sending_rhba(struct bfa_fcs_port_fdmi_s *fdmi, - enum port_fdmi_event event) -{ - struct bfa_fcs_port_s *port = fdmi->ms->port; - - bfa_trc(port->fcs, port->port_cfg.pwwn); - bfa_trc(port->fcs, event); - - switch (event) { - case FDMISM_EVENT_RHBA_SENT: - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_rhba); - break; - - case FDMISM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline); - bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port), - &fdmi->fcxp_wqe); - break; - - default: - bfa_sm_fault(port->fcs, event); - } -} - -static void -bfa_fcs_port_fdmi_sm_rhba(struct bfa_fcs_port_fdmi_s *fdmi, - enum port_fdmi_event event) -{ - struct bfa_fcs_port_s *port = fdmi->ms->port; - - bfa_trc(port->fcs, port->port_cfg.pwwn); - bfa_trc(port->fcs, event); - - switch (event) { - case FDMISM_EVENT_RSP_ERROR: - /* - * if max retries have not been reached, start timer for a - * delayed retry - */ - if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) { - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_rhba_retry); - bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port), - &fdmi->timer, bfa_fcs_port_fdmi_timeout, - fdmi, BFA_FCS_RETRY_TIMEOUT); - } else { - /* - * set state to offline - */ - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline); - } - break; - - case FDMISM_EVENT_RSP_OK: - /* - * Initiate Register Port Attributes - */ - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_sending_rpa); - fdmi->retry_cnt = 0; - bfa_fcs_port_fdmi_send_rpa(fdmi, NULL); - break; - - case FDMISM_EVENT_PORT_OFFLINE: - bfa_fcxp_discard(fdmi->fcxp); - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline); - break; - - default: - bfa_sm_fault(port->fcs, event); - } -} - -static void -bfa_fcs_port_fdmi_sm_rhba_retry(struct bfa_fcs_port_fdmi_s *fdmi, - enum port_fdmi_event event) -{ - struct bfa_fcs_port_s *port = fdmi->ms->port; - - bfa_trc(port->fcs, port->port_cfg.pwwn); - bfa_trc(port->fcs, event); - - switch (event) { - case FDMISM_EVENT_TIMEOUT: - /* - * Retry Timer Expired. Re-send - */ - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_sending_rhba); - bfa_fcs_port_fdmi_send_rhba(fdmi, NULL); - break; - - case FDMISM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline); - bfa_timer_stop(&fdmi->timer); - break; - - default: - bfa_sm_fault(port->fcs, event); - } -} - -/* -* RPRT : Register Port - */ -static void -bfa_fcs_port_fdmi_sm_sending_rprt(struct bfa_fcs_port_fdmi_s *fdmi, - enum port_fdmi_event event) -{ - struct bfa_fcs_port_s *port = fdmi->ms->port; - - bfa_trc(port->fcs, port->port_cfg.pwwn); - bfa_trc(port->fcs, event); - - switch (event) { - case FDMISM_EVENT_RPRT_SENT: - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_rprt); - break; - - case FDMISM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline); - bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port), - &fdmi->fcxp_wqe); - break; - - default: - bfa_sm_fault(port->fcs, event); - } -} - -static void -bfa_fcs_port_fdmi_sm_rprt(struct bfa_fcs_port_fdmi_s *fdmi, - enum port_fdmi_event event) -{ - struct bfa_fcs_port_s *port = fdmi->ms->port; - - bfa_trc(port->fcs, port->port_cfg.pwwn); - bfa_trc(port->fcs, event); - - switch (event) { - case FDMISM_EVENT_RSP_ERROR: - /* - * if max retries have not been reached, start timer for a - * delayed retry - */ - if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) { - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_rprt_retry); - bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port), - &fdmi->timer, bfa_fcs_port_fdmi_timeout, - fdmi, BFA_FCS_RETRY_TIMEOUT); - - } else { - /* - * set state to offline - */ - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline); - fdmi->retry_cnt = 0; - } - break; - - case FDMISM_EVENT_RSP_OK: - fdmi->retry_cnt = 0; - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_online); - break; - - case FDMISM_EVENT_PORT_OFFLINE: - bfa_fcxp_discard(fdmi->fcxp); - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline); - break; - - default: - bfa_sm_fault(port->fcs, event); - } -} - -static void -bfa_fcs_port_fdmi_sm_rprt_retry(struct bfa_fcs_port_fdmi_s *fdmi, - enum port_fdmi_event event) -{ - struct bfa_fcs_port_s *port = fdmi->ms->port; - - bfa_trc(port->fcs, port->port_cfg.pwwn); - bfa_trc(port->fcs, event); - - switch (event) { - case FDMISM_EVENT_TIMEOUT: - /* - * Retry Timer Expired. Re-send - */ - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_sending_rprt); - bfa_fcs_port_fdmi_send_rprt(fdmi, NULL); - break; - - case FDMISM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline); - bfa_timer_stop(&fdmi->timer); - break; - - default: - bfa_sm_fault(port->fcs, event); - } -} - -/* - * Register Port Attributes - */ -static void -bfa_fcs_port_fdmi_sm_sending_rpa(struct bfa_fcs_port_fdmi_s *fdmi, - enum port_fdmi_event event) -{ - struct bfa_fcs_port_s *port = fdmi->ms->port; - - bfa_trc(port->fcs, port->port_cfg.pwwn); - bfa_trc(port->fcs, event); - - switch (event) { - case FDMISM_EVENT_RPA_SENT: - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_rpa); - break; - - case FDMISM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline); - bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port), - &fdmi->fcxp_wqe); - break; - - default: - bfa_sm_fault(port->fcs, event); - } -} - -static void -bfa_fcs_port_fdmi_sm_rpa(struct bfa_fcs_port_fdmi_s *fdmi, - enum port_fdmi_event event) -{ - struct bfa_fcs_port_s *port = fdmi->ms->port; - - bfa_trc(port->fcs, port->port_cfg.pwwn); - bfa_trc(port->fcs, event); - - switch (event) { - case FDMISM_EVENT_RSP_ERROR: - /* - * if max retries have not been reached, start timer for a - * delayed retry - */ - if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) { - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_rpa_retry); - bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port), - &fdmi->timer, bfa_fcs_port_fdmi_timeout, - fdmi, BFA_FCS_RETRY_TIMEOUT); - } else { - /* - * set state to offline - */ - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline); - fdmi->retry_cnt = 0; - } - break; - - case FDMISM_EVENT_RSP_OK: - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_online); - fdmi->retry_cnt = 0; - break; - - case FDMISM_EVENT_PORT_OFFLINE: - bfa_fcxp_discard(fdmi->fcxp); - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline); - break; - - default: - bfa_sm_fault(port->fcs, event); - } -} - -static void -bfa_fcs_port_fdmi_sm_rpa_retry(struct bfa_fcs_port_fdmi_s *fdmi, - enum port_fdmi_event event) -{ - struct bfa_fcs_port_s *port = fdmi->ms->port; - - bfa_trc(port->fcs, port->port_cfg.pwwn); - bfa_trc(port->fcs, event); - - switch (event) { - case FDMISM_EVENT_TIMEOUT: - /* - * Retry Timer Expired. Re-send - */ - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_sending_rpa); - bfa_fcs_port_fdmi_send_rpa(fdmi, NULL); - break; - - case FDMISM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline); - bfa_timer_stop(&fdmi->timer); - break; - - default: - bfa_sm_fault(port->fcs, event); - } -} - -static void -bfa_fcs_port_fdmi_sm_online(struct bfa_fcs_port_fdmi_s *fdmi, - enum port_fdmi_event event) -{ - struct bfa_fcs_port_s *port = fdmi->ms->port; - - bfa_trc(port->fcs, port->port_cfg.pwwn); - bfa_trc(port->fcs, event); - - switch (event) { - case FDMISM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline); - break; - - default: - bfa_sm_fault(port->fcs, event); - } -} - -/** - * FDMI is disabled state. - */ -static void -bfa_fcs_port_fdmi_sm_disabled(struct bfa_fcs_port_fdmi_s *fdmi, - enum port_fdmi_event event) -{ - struct bfa_fcs_port_s *port = fdmi->ms->port; - - bfa_trc(port->fcs, port->port_cfg.pwwn); - bfa_trc(port->fcs, event); - - /* No op State. It can only be enabled at Driver Init. */ -} - -/** -* RHBA : Register HBA Attributes. - */ -static void -bfa_fcs_port_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced) -{ - struct bfa_fcs_port_fdmi_s *fdmi = fdmi_cbarg; - struct bfa_fcs_port_s *port = fdmi->ms->port; - struct fchs_s fchs; - int len, attr_len; - struct bfa_fcxp_s *fcxp; - u8 *pyld; - - bfa_trc(port->fcs, port->port_cfg.pwwn); - - fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); - if (!fcxp) { - bfa_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe, - bfa_fcs_port_fdmi_send_rhba, fdmi); - return; - } - fdmi->fcxp = fcxp; - - pyld = bfa_fcxp_get_reqbuf(fcxp); - bfa_os_memset(pyld, 0, FC_MAX_PDUSZ); - - len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_port_get_fcid(port), - FDMI_RHBA); - - attr_len = bfa_fcs_port_fdmi_build_rhba_pyld(fdmi, - (u8 *) ((struct ct_hdr_s *) pyld + 1)); - - bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, - FC_CLASS_3, (len + attr_len), &fchs, - bfa_fcs_port_fdmi_rhba_response, (void *)fdmi, - FC_MAX_PDUSZ, FC_FCCT_TOV); - - bfa_sm_send_event(fdmi, FDMISM_EVENT_RHBA_SENT); -} - -static u16 -bfa_fcs_port_fdmi_build_rhba_pyld(struct bfa_fcs_port_fdmi_s *fdmi, - u8 *pyld) -{ - struct bfa_fcs_port_s *port = fdmi->ms->port; - struct bfa_fcs_fdmi_hba_attr_s hba_attr; /* @todo */ - struct bfa_fcs_fdmi_hba_attr_s *fcs_hba_attr = &hba_attr; /* @todo */ - struct fdmi_rhba_s *rhba = (struct fdmi_rhba_s *) pyld; - struct fdmi_attr_s *attr; - u8 *curr_ptr; - u16 len, count; - - /* - * get hba attributes - */ - bfa_fcs_fdmi_get_hbaattr(fdmi, fcs_hba_attr); - - rhba->hba_id = bfa_fcs_port_get_pwwn(port); - rhba->port_list.num_ports = bfa_os_htonl(1); - rhba->port_list.port_entry = bfa_fcs_port_get_pwwn(port); - - len = sizeof(rhba->hba_id) + sizeof(rhba->port_list); - - count = 0; - len += sizeof(rhba->hba_attr_blk.attr_count); - - /* - * fill out the invididual entries of the HBA attrib Block - */ - curr_ptr = (u8 *) &rhba->hba_attr_blk.hba_attr; - - /* - * Node Name - */ - attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_NODENAME); - attr->len = sizeof(wwn_t); - memcpy(attr->value, &bfa_fcs_port_get_nwwn(port), attr->len); - curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; - len += attr->len; - count++; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + - sizeof(attr->len)); - - /* - * Manufacturer - */ - attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MANUFACTURER); - attr->len = (u16) strlen(fcs_hba_attr->manufacturer); - memcpy(attr->value, fcs_hba_attr->manufacturer, attr->len); - /* variable fields need to be 4 byte aligned */ - attr->len = fc_roundup(attr->len, sizeof(u32)); - curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; - len += attr->len; - count++; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + - sizeof(attr->len)); - - /* - * Serial Number - */ - attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_SERIALNUM); - attr->len = (u16) strlen(fcs_hba_attr->serial_num); - memcpy(attr->value, fcs_hba_attr->serial_num, attr->len); - /* variable fields need to be 4 byte aligned */ - attr->len = fc_roundup(attr->len, sizeof(u32)); - curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; - len += attr->len; - count++; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + - sizeof(attr->len)); - - /* - * Model - */ - attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL); - attr->len = (u16) strlen(fcs_hba_attr->model); - memcpy(attr->value, fcs_hba_attr->model, attr->len); - /* variable fields need to be 4 byte aligned */ - attr->len = fc_roundup(attr->len, sizeof(u32)); - curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; - len += attr->len; - count++; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + - sizeof(attr->len)); - - /* - * Model Desc - */ - attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL_DESC); - attr->len = (u16) strlen(fcs_hba_attr->model_desc); - memcpy(attr->value, fcs_hba_attr->model_desc, attr->len); - /* variable fields need to be 4 byte aligned */ - attr->len = fc_roundup(attr->len, sizeof(u32)); - curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; - len += attr->len; - count++; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + - sizeof(attr->len)); - - /* - * H/W Version - */ - if (fcs_hba_attr->hw_version[0] != '\0') { - attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_HW_VERSION); - attr->len = (u16) strlen(fcs_hba_attr->hw_version); - memcpy(attr->value, fcs_hba_attr->hw_version, attr->len); - /* variable fields need to be 4 byte aligned */ - attr->len = fc_roundup(attr->len, sizeof(u32)); - curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; - len += attr->len; - count++; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + - sizeof(attr->len)); - } - - /* - * Driver Version - */ - attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_DRIVER_VERSION); - attr->len = (u16) strlen(fcs_hba_attr->driver_version); - memcpy(attr->value, fcs_hba_attr->driver_version, attr->len); - /* variable fields need to be 4 byte aligned */ - attr->len = fc_roundup(attr->len, sizeof(u32)); - curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; - len += attr->len;; - count++; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + - sizeof(attr->len)); - - /* - * Option Rom Version - */ - if (fcs_hba_attr->option_rom_ver[0] != '\0') { - attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_ROM_VERSION); - attr->len = (u16) strlen(fcs_hba_attr->option_rom_ver); - memcpy(attr->value, fcs_hba_attr->option_rom_ver, attr->len); - /* variable fields need to be 4 byte aligned */ - attr->len = fc_roundup(attr->len, sizeof(u32)); - curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; - len += attr->len; - count++; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + - sizeof(attr->len)); - } - - /* - * f/w Version = driver version - */ - attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_FW_VERSION); - attr->len = (u16) strlen(fcs_hba_attr->driver_version); - memcpy(attr->value, fcs_hba_attr->driver_version, attr->len); - /* variable fields need to be 4 byte aligned */ - attr->len = fc_roundup(attr->len, sizeof(u32)); - curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; - len += attr->len; - count++; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + - sizeof(attr->len)); - - /* - * OS Name - */ - if (fcs_hba_attr->os_name[0] != '\0') { - attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_OS_NAME); - attr->len = (u16) strlen(fcs_hba_attr->os_name); - memcpy(attr->value, fcs_hba_attr->os_name, attr->len); - /* variable fields need to be 4 byte aligned */ - attr->len = fc_roundup(attr->len, sizeof(u32)); - curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; - len += attr->len; - count++; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + - sizeof(attr->len)); - } - - /* - * MAX_CT_PAYLOAD - */ - attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MAX_CT); - attr->len = sizeof(fcs_hba_attr->max_ct_pyld); - memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, attr->len); - len += attr->len; - count++; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + - sizeof(attr->len)); - - /* - * Update size of payload - */ - len += ((sizeof(attr->type) + sizeof(attr->len)) * count); - - rhba->hba_attr_blk.attr_count = bfa_os_htonl(count); - return len; -} - -static void -bfa_fcs_port_fdmi_rhba_response(void *fcsarg, struct bfa_fcxp_s *fcxp, - void *cbarg, bfa_status_t req_status, - u32 rsp_len, u32 resid_len, - struct fchs_s *rsp_fchs) -{ - struct bfa_fcs_port_fdmi_s *fdmi = (struct bfa_fcs_port_fdmi_s *)cbarg; - struct bfa_fcs_port_s *port = fdmi->ms->port; - struct ct_hdr_s *cthdr = NULL; - - bfa_trc(port->fcs, port->port_cfg.pwwn); - - /* - * Sanity Checks - */ - if (req_status != BFA_STATUS_OK) { - bfa_trc(port->fcs, req_status); - bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); - return; - } - - cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); - - if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { - bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK); - return; - } - - bfa_trc(port->fcs, cthdr->reason_code); - bfa_trc(port->fcs, cthdr->exp_code); - bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); -} - -/** -* RPRT : Register Port - */ -static void -bfa_fcs_port_fdmi_send_rprt(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced) -{ - struct bfa_fcs_port_fdmi_s *fdmi = fdmi_cbarg; - struct bfa_fcs_port_s *port = fdmi->ms->port; - struct fchs_s fchs; - u16 len, attr_len; - struct bfa_fcxp_s *fcxp; - u8 *pyld; - - bfa_trc(port->fcs, port->port_cfg.pwwn); - - fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); - if (!fcxp) { - bfa_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe, - bfa_fcs_port_fdmi_send_rprt, fdmi); - return; - } - fdmi->fcxp = fcxp; - - pyld = bfa_fcxp_get_reqbuf(fcxp); - bfa_os_memset(pyld, 0, FC_MAX_PDUSZ); - - len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_port_get_fcid(port), - FDMI_RPRT); - - attr_len = bfa_fcs_port_fdmi_build_rprt_pyld(fdmi, - (u8 *) ((struct ct_hdr_s *) pyld + 1)); - - bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, - FC_CLASS_3, len + attr_len, &fchs, - bfa_fcs_port_fdmi_rprt_response, (void *)fdmi, - FC_MAX_PDUSZ, FC_FCCT_TOV); - - bfa_sm_send_event(fdmi, FDMISM_EVENT_RPRT_SENT); -} - -/** - * This routine builds Port Attribute Block that used in RPA, RPRT commands. - */ -static u16 -bfa_fcs_port_fdmi_build_portattr_block(struct bfa_fcs_port_fdmi_s *fdmi, - u8 *pyld) -{ - struct bfa_fcs_fdmi_port_attr_s fcs_port_attr; - struct fdmi_port_attr_s *port_attrib = (struct fdmi_port_attr_s *) pyld; - struct fdmi_attr_s *attr; - u8 *curr_ptr; - u16 len; - u8 count = 0; - - /* - * get port attributes - */ - bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr); - - len = sizeof(port_attrib->attr_count); - - /* - * fill out the invididual entries - */ - curr_ptr = (u8 *) &port_attrib->port_attr; - - /* - * FC4 Types - */ - attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FC4_TYPES); - attr->len = sizeof(fcs_port_attr.supp_fc4_types); - memcpy(attr->value, fcs_port_attr.supp_fc4_types, attr->len); - curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; - len += attr->len; - ++count; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + - sizeof(attr->len)); - - /* - * Supported Speed - */ - attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_SUPP_SPEED); - attr->len = sizeof(fcs_port_attr.supp_speed); - memcpy(attr->value, &fcs_port_attr.supp_speed, attr->len); - curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; - len += attr->len; - ++count; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + - sizeof(attr->len)); - - /* - * current Port Speed - */ - attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_PORT_SPEED); - attr->len = sizeof(fcs_port_attr.curr_speed); - memcpy(attr->value, &fcs_port_attr.curr_speed, attr->len); - curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; - len += attr->len; - ++count; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + - sizeof(attr->len)); - - /* - * max frame size - */ - attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FRAME_SIZE); - attr->len = sizeof(fcs_port_attr.max_frm_size); - memcpy(attr->value, &fcs_port_attr.max_frm_size, attr->len); - curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; - len += attr->len; - ++count; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + - sizeof(attr->len)); - - /* - * OS Device Name - */ - if (fcs_port_attr.os_device_name[0] != '\0') { - attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_DEV_NAME); - attr->len = (u16) strlen(fcs_port_attr.os_device_name); - memcpy(attr->value, fcs_port_attr.os_device_name, attr->len); - /* variable fields need to be 4 byte aligned */ - attr->len = fc_roundup(attr->len, sizeof(u32)); - curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; - len += attr->len; - ++count; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + - sizeof(attr->len)); - - } - /* - * Host Name - */ - if (fcs_port_attr.host_name[0] != '\0') { - attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_HOST_NAME); - attr->len = (u16) strlen(fcs_port_attr.host_name); - memcpy(attr->value, fcs_port_attr.host_name, attr->len); - /* variable fields need to be 4 byte aligned */ - attr->len = fc_roundup(attr->len, sizeof(u32)); - curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; - len += attr->len; - ++count; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + - sizeof(attr->len)); - - } - - /* - * Update size of payload - */ - port_attrib->attr_count = bfa_os_htonl(count); - len += ((sizeof(attr->type) + sizeof(attr->len)) * count); - return len; -} - -static u16 -bfa_fcs_port_fdmi_build_rprt_pyld(struct bfa_fcs_port_fdmi_s *fdmi, - u8 *pyld) -{ - struct bfa_fcs_port_s *port = fdmi->ms->port; - struct fdmi_rprt_s *rprt = (struct fdmi_rprt_s *) pyld; - u16 len; - - rprt->hba_id = bfa_fcs_port_get_pwwn(bfa_fcs_get_base_port(port->fcs)); - rprt->port_name = bfa_fcs_port_get_pwwn(port); - - len = bfa_fcs_port_fdmi_build_portattr_block(fdmi, - (u8 *) &rprt->port_attr_blk); - - len += sizeof(rprt->hba_id) + sizeof(rprt->port_name); - - return len; -} - -static void -bfa_fcs_port_fdmi_rprt_response(void *fcsarg, struct bfa_fcxp_s *fcxp, - void *cbarg, bfa_status_t req_status, - u32 rsp_len, u32 resid_len, - struct fchs_s *rsp_fchs) -{ - struct bfa_fcs_port_fdmi_s *fdmi = (struct bfa_fcs_port_fdmi_s *)cbarg; - struct bfa_fcs_port_s *port = fdmi->ms->port; - struct ct_hdr_s *cthdr = NULL; - - bfa_trc(port->fcs, port->port_cfg.pwwn); - - /* - * Sanity Checks - */ - if (req_status != BFA_STATUS_OK) { - bfa_trc(port->fcs, req_status); - bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); - return; - } - - cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); - - if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { - bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK); - return; - } - - bfa_trc(port->fcs, cthdr->reason_code); - bfa_trc(port->fcs, cthdr->exp_code); - bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); -} - -/** -* RPA : Register Port Attributes. - */ -static void -bfa_fcs_port_fdmi_send_rpa(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced) -{ - struct bfa_fcs_port_fdmi_s *fdmi = fdmi_cbarg; - struct bfa_fcs_port_s *port = fdmi->ms->port; - struct fchs_s fchs; - u16 len, attr_len; - struct bfa_fcxp_s *fcxp; - u8 *pyld; - - bfa_trc(port->fcs, port->port_cfg.pwwn); - - fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); - if (!fcxp) { - bfa_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe, - bfa_fcs_port_fdmi_send_rpa, fdmi); - return; - } - fdmi->fcxp = fcxp; - - pyld = bfa_fcxp_get_reqbuf(fcxp); - bfa_os_memset(pyld, 0, FC_MAX_PDUSZ); - - len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_port_get_fcid(port), - FDMI_RPA); - - attr_len = bfa_fcs_port_fdmi_build_rpa_pyld(fdmi, - (u8 *) ((struct ct_hdr_s *) pyld + 1)); - - bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, - FC_CLASS_3, len + attr_len, &fchs, - bfa_fcs_port_fdmi_rpa_response, (void *)fdmi, - FC_MAX_PDUSZ, FC_FCCT_TOV); - - bfa_sm_send_event(fdmi, FDMISM_EVENT_RPA_SENT); -} - -static u16 -bfa_fcs_port_fdmi_build_rpa_pyld(struct bfa_fcs_port_fdmi_s *fdmi, - u8 *pyld) -{ - struct bfa_fcs_port_s *port = fdmi->ms->port; - struct fdmi_rpa_s *rpa = (struct fdmi_rpa_s *) pyld; - u16 len; - - rpa->port_name = bfa_fcs_port_get_pwwn(port); - - len = bfa_fcs_port_fdmi_build_portattr_block(fdmi, - (u8 *) &rpa->port_attr_blk); - - len += sizeof(rpa->port_name); - - return len; -} - -static void -bfa_fcs_port_fdmi_rpa_response(void *fcsarg, struct bfa_fcxp_s *fcxp, - void *cbarg, bfa_status_t req_status, - u32 rsp_len, u32 resid_len, - struct fchs_s *rsp_fchs) -{ - struct bfa_fcs_port_fdmi_s *fdmi = (struct bfa_fcs_port_fdmi_s *)cbarg; - struct bfa_fcs_port_s *port = fdmi->ms->port; - struct ct_hdr_s *cthdr = NULL; - - bfa_trc(port->fcs, port->port_cfg.pwwn); - - /* - * Sanity Checks - */ - if (req_status != BFA_STATUS_OK) { - bfa_trc(port->fcs, req_status); - bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); - return; - } - - cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); - - if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { - bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK); - return; - } - - bfa_trc(port->fcs, cthdr->reason_code); - bfa_trc(port->fcs, cthdr->exp_code); - bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); -} - -static void -bfa_fcs_port_fdmi_timeout(void *arg) -{ - struct bfa_fcs_port_fdmi_s *fdmi = (struct bfa_fcs_port_fdmi_s *)arg; - - bfa_sm_send_event(fdmi, FDMISM_EVENT_TIMEOUT); -} - -static void -bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi, - struct bfa_fcs_fdmi_hba_attr_s *hba_attr) -{ - struct bfa_fcs_port_s *port = fdmi->ms->port; - struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info; - - bfa_os_memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s)); - - bfa_ioc_get_adapter_manufacturer(&port->fcs->bfa->ioc, - hba_attr->manufacturer); - bfa_ioc_get_adapter_serial_num(&port->fcs->bfa->ioc, - hba_attr->serial_num); - bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc, hba_attr->model); - bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc, hba_attr->model_desc); - bfa_ioc_get_pci_chip_rev(&port->fcs->bfa->ioc, hba_attr->hw_version); - bfa_ioc_get_adapter_optrom_ver(&port->fcs->bfa->ioc, - hba_attr->option_rom_ver); - bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc, hba_attr->fw_version); - - strncpy(hba_attr->driver_version, (char *)driver_info->version, - sizeof(hba_attr->driver_version)); - - strncpy(hba_attr->os_name, driver_info->host_os_name, - sizeof(hba_attr->os_name)); - - /* - * If there is a patch level, append it to the os name along with a - * separator - */ - if (driver_info->host_os_patch[0] != '\0') { - strncat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, - sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); - strncat(hba_attr->os_name, driver_info->host_os_patch, - sizeof(driver_info->host_os_patch)); - } - - hba_attr->max_ct_pyld = bfa_os_htonl(FC_MAX_PDUSZ); - -} - -static void -bfa_fcs_fdmi_get_portattr(struct bfa_fcs_port_fdmi_s *fdmi, - struct bfa_fcs_fdmi_port_attr_s *port_attr) -{ - struct bfa_fcs_port_s *port = fdmi->ms->port; - struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info; - struct bfa_pport_attr_s pport_attr; - - bfa_os_memset(port_attr, 0, sizeof(struct bfa_fcs_fdmi_port_attr_s)); - - /* - * get pport attributes from hal - */ - bfa_fcport_get_attr(port->fcs->bfa, &pport_attr); - - /* - * get FC4 type Bitmask - */ - fc_get_fc4type_bitmask(FC_TYPE_FCP, port_attr->supp_fc4_types); - - /* - * Supported Speeds - */ - port_attr->supp_speed = bfa_os_htonl(BFA_FCS_FDMI_SUPORTED_SPEEDS); - - /* - * Current Speed - */ - port_attr->curr_speed = bfa_os_htonl(pport_attr.speed); - - /* - * Max PDU Size. - */ - port_attr->max_frm_size = bfa_os_htonl(FC_MAX_PDUSZ); - - /* - * OS device Name - */ - strncpy(port_attr->os_device_name, (char *)driver_info->os_device_name, - sizeof(port_attr->os_device_name)); - - /* - * Host name - */ - strncpy(port_attr->host_name, (char *)driver_info->host_machine_name, - sizeof(port_attr->host_name)); - -} - - -void -bfa_fcs_port_fdmi_init(struct bfa_fcs_port_ms_s *ms) -{ - struct bfa_fcs_port_fdmi_s *fdmi = &ms->fdmi; - - fdmi->ms = ms; - if (ms->port->fcs->fdmi_enabled) - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline); - else - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_disabled); -} - -void -bfa_fcs_port_fdmi_offline(struct bfa_fcs_port_ms_s *ms) -{ - struct bfa_fcs_port_fdmi_s *fdmi = &ms->fdmi; - - fdmi->ms = ms; - bfa_sm_send_event(fdmi, FDMISM_EVENT_PORT_OFFLINE); -} - -void -bfa_fcs_port_fdmi_online(struct bfa_fcs_port_ms_s *ms) -{ - struct bfa_fcs_port_fdmi_s *fdmi = &ms->fdmi; - - fdmi->ms = ms; - bfa_sm_send_event(fdmi, FDMISM_EVENT_PORT_ONLINE); -} diff --git a/drivers/scsi/bfa/include/aen/bfa_aen.h b/drivers/scsi/bfa/include/aen/bfa_aen.h deleted file mode 100644 index 6abbab005db6..000000000000 --- a/drivers/scsi/bfa/include/aen/bfa_aen.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __BFA_AEN_H__ -#define __BFA_AEN_H__ - -#include "defs/bfa_defs_aen.h" -#include "defs/bfa_defs_status.h" -#include "cs/bfa_debug.h" - -#define BFA_AEN_MAX_ENTRY 512 - -extern int bfa_aen_max_cfg_entry; -struct bfa_aen_s { - void *bfad; - int max_entry; - int write_index; - int read_index; - int bfad_num; - int seq_num; - void (*aen_cb_notify)(void *bfad); - void (*gettimeofday)(struct bfa_timeval_s *tv); - struct bfa_trc_mod_s *trcmod; - int app_ri[BFA_AEN_MAX_APP]; /* For multiclient support */ - struct bfa_aen_entry_s list[BFA_AEN_MAX_ENTRY]; /* Must be the last */ -}; - - -/** - * Public APIs - */ -static inline void -bfa_aen_set_max_cfg_entry(int max_entry) -{ - bfa_aen_max_cfg_entry = max_entry; -} - -static inline int -bfa_aen_get_max_cfg_entry(void) -{ - return bfa_aen_max_cfg_entry; -} - -static inline int -bfa_aen_get_meminfo(void) -{ - return sizeof(struct bfa_aen_entry_s) * bfa_aen_get_max_cfg_entry(); -} - -static inline int -bfa_aen_get_wi(struct bfa_aen_s *aen) -{ - return aen->write_index; -} - -static inline int -bfa_aen_get_ri(struct bfa_aen_s *aen) -{ - return aen->read_index; -} - -static inline int -bfa_aen_fetch_count(struct bfa_aen_s *aen, enum bfa_aen_app app_id) -{ - bfa_assert((app_id < BFA_AEN_MAX_APP) && (app_id >= bfa_aen_app_bcu)); - return ((aen->write_index + aen->max_entry) - aen->app_ri[app_id]) - % aen->max_entry; -} - -int bfa_aen_init(struct bfa_aen_s *aen, struct bfa_trc_mod_s *trcmod, - void *bfad, int bfad_num, void (*aen_cb_notify)(void *), - void (*gettimeofday)(struct bfa_timeval_s *)); - -void bfa_aen_post(struct bfa_aen_s *aen, enum bfa_aen_category aen_category, - int aen_type, union bfa_aen_data_u *aen_data); - -bfa_status_t bfa_aen_fetch(struct bfa_aen_s *aen, - struct bfa_aen_entry_s *aen_entry, - int entry_req, enum bfa_aen_app app_id, int *entry_ret); - -int bfa_aen_get_inst(struct bfa_aen_s *aen); - -#endif /* __BFA_AEN_H__ */ diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_adapter.h b/drivers/scsi/bfa/include/aen/bfa_aen_adapter.h deleted file mode 100644 index 260d3ea1cab3..000000000000 --- a/drivers/scsi/bfa/include/aen/bfa_aen_adapter.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/* messages define for BFA_AEN_CAT_ADAPTER Module */ -#ifndef __bfa_aen_adapter_h__ -#define __bfa_aen_adapter_h__ - -#include -#include - -#define BFA_AEN_ADAPTER_ADD \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_ADAPTER, BFA_ADAPTER_AEN_ADD) -#define BFA_AEN_ADAPTER_REMOVE \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_ADAPTER, BFA_ADAPTER_AEN_REMOVE) - -#endif - diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_audit.h b/drivers/scsi/bfa/include/aen/bfa_aen_audit.h deleted file mode 100644 index 12cd7aab5d53..000000000000 --- a/drivers/scsi/bfa/include/aen/bfa_aen_audit.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/* messages define for BFA_AEN_CAT_AUDIT Module */ -#ifndef __bfa_aen_audit_h__ -#define __bfa_aen_audit_h__ - -#include -#include - -#define BFA_AEN_AUDIT_AUTH_ENABLE \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_AUDIT, BFA_AUDIT_AEN_AUTH_ENABLE) -#define BFA_AEN_AUDIT_AUTH_DISABLE \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_AUDIT, BFA_AUDIT_AEN_AUTH_DISABLE) - -#endif - diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_ethport.h b/drivers/scsi/bfa/include/aen/bfa_aen_ethport.h deleted file mode 100644 index 507d0b58d149..000000000000 --- a/drivers/scsi/bfa/include/aen/bfa_aen_ethport.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/* messages define for BFA_AEN_CAT_ETHPORT Module */ -#ifndef __bfa_aen_ethport_h__ -#define __bfa_aen_ethport_h__ - -#include -#include - -#define BFA_AEN_ETHPORT_LINKUP \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_ETHPORT, BFA_ETHPORT_AEN_LINKUP) -#define BFA_AEN_ETHPORT_LINKDOWN \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_ETHPORT, BFA_ETHPORT_AEN_LINKDOWN) -#define BFA_AEN_ETHPORT_ENABLE \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_ETHPORT, BFA_ETHPORT_AEN_ENABLE) -#define BFA_AEN_ETHPORT_DISABLE \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_ETHPORT, BFA_ETHPORT_AEN_DISABLE) - -#endif - diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_ioc.h b/drivers/scsi/bfa/include/aen/bfa_aen_ioc.h deleted file mode 100644 index 4daf96faa266..000000000000 --- a/drivers/scsi/bfa/include/aen/bfa_aen_ioc.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/* messages define for BFA_AEN_CAT_IOC Module */ -#ifndef __bfa_aen_ioc_h__ -#define __bfa_aen_ioc_h__ - -#include -#include - -#define BFA_AEN_IOC_HBGOOD \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_HBGOOD) -#define BFA_AEN_IOC_HBFAIL \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_HBFAIL) -#define BFA_AEN_IOC_ENABLE \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_ENABLE) -#define BFA_AEN_IOC_DISABLE \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_DISABLE) -#define BFA_AEN_IOC_FWMISMATCH \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_FWMISMATCH) -#define BFA_AEN_IOC_FWCFG_ERROR \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_FWCFG_ERROR) -#define BFA_AEN_IOC_INVALID_VENDOR \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_INVALID_VENDOR) -#define BFA_AEN_IOC_INVALID_NWWN \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_INVALID_NWWN) -#define BFA_AEN_IOC_INVALID_PWWN \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, BFA_IOC_AEN_INVALID_PWWN) - -#endif - diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_itnim.h b/drivers/scsi/bfa/include/aen/bfa_aen_itnim.h deleted file mode 100644 index a7d8ddcfef99..000000000000 --- a/drivers/scsi/bfa/include/aen/bfa_aen_itnim.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/* messages define for BFA_AEN_CAT_ITNIM Module */ -#ifndef __bfa_aen_itnim_h__ -#define __bfa_aen_itnim_h__ - -#include -#include - -#define BFA_AEN_ITNIM_ONLINE \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_ITNIM, BFA_ITNIM_AEN_ONLINE) -#define BFA_AEN_ITNIM_OFFLINE \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_ITNIM, BFA_ITNIM_AEN_OFFLINE) -#define BFA_AEN_ITNIM_DISCONNECT \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_ITNIM, BFA_ITNIM_AEN_DISCONNECT) - -#endif - diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_lport.h b/drivers/scsi/bfa/include/aen/bfa_aen_lport.h deleted file mode 100644 index 5a8ebb65193f..000000000000 --- a/drivers/scsi/bfa/include/aen/bfa_aen_lport.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/* messages define for BFA_AEN_CAT_LPORT Module */ -#ifndef __bfa_aen_lport_h__ -#define __bfa_aen_lport_h__ - -#include -#include - -#define BFA_AEN_LPORT_NEW \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_NEW) -#define BFA_AEN_LPORT_DELETE \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_DELETE) -#define BFA_AEN_LPORT_ONLINE \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_ONLINE) -#define BFA_AEN_LPORT_OFFLINE \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_OFFLINE) -#define BFA_AEN_LPORT_DISCONNECT \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_DISCONNECT) -#define BFA_AEN_LPORT_NEW_PROP \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_NEW_PROP) -#define BFA_AEN_LPORT_DELETE_PROP \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_DELETE_PROP) -#define BFA_AEN_LPORT_NEW_STANDARD \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_NEW_STANDARD) -#define BFA_AEN_LPORT_DELETE_STANDARD \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_DELETE_STANDARD) -#define BFA_AEN_LPORT_NPIV_DUP_WWN \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_NPIV_DUP_WWN) -#define BFA_AEN_LPORT_NPIV_FABRIC_MAX \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_NPIV_FABRIC_MAX) -#define BFA_AEN_LPORT_NPIV_UNKNOWN \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, BFA_LPORT_AEN_NPIV_UNKNOWN) - -#endif - diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_port.h b/drivers/scsi/bfa/include/aen/bfa_aen_port.h deleted file mode 100644 index 9add905a622d..000000000000 --- a/drivers/scsi/bfa/include/aen/bfa_aen_port.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/* messages define for BFA_AEN_CAT_PORT Module */ -#ifndef __bfa_aen_port_h__ -#define __bfa_aen_port_h__ - -#include -#include - -#define BFA_AEN_PORT_ONLINE \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_ONLINE) -#define BFA_AEN_PORT_OFFLINE \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_OFFLINE) -#define BFA_AEN_PORT_RLIR \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_RLIR) -#define BFA_AEN_PORT_SFP_INSERT \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_SFP_INSERT) -#define BFA_AEN_PORT_SFP_REMOVE \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_SFP_REMOVE) -#define BFA_AEN_PORT_SFP_POM \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_SFP_POM) -#define BFA_AEN_PORT_ENABLE \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_ENABLE) -#define BFA_AEN_PORT_DISABLE \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_DISABLE) -#define BFA_AEN_PORT_AUTH_ON \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_AUTH_ON) -#define BFA_AEN_PORT_AUTH_OFF \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_AUTH_OFF) -#define BFA_AEN_PORT_DISCONNECT \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_DISCONNECT) -#define BFA_AEN_PORT_QOS_NEG \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_QOS_NEG) -#define BFA_AEN_PORT_FABRIC_NAME_CHANGE \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_FABRIC_NAME_CHANGE) -#define BFA_AEN_PORT_SFP_ACCESS_ERROR \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_SFP_ACCESS_ERROR) -#define BFA_AEN_PORT_SFP_UNSUPPORT \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, BFA_PORT_AEN_SFP_UNSUPPORT) - -#endif - diff --git a/drivers/scsi/bfa/include/aen/bfa_aen_rport.h b/drivers/scsi/bfa/include/aen/bfa_aen_rport.h deleted file mode 100644 index 7e4be1fd5e15..000000000000 --- a/drivers/scsi/bfa/include/aen/bfa_aen_rport.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/* messages define for BFA_AEN_CAT_RPORT Module */ -#ifndef __bfa_aen_rport_h__ -#define __bfa_aen_rport_h__ - -#include -#include - -#define BFA_AEN_RPORT_ONLINE \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, BFA_RPORT_AEN_ONLINE) -#define BFA_AEN_RPORT_OFFLINE \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, BFA_RPORT_AEN_OFFLINE) -#define BFA_AEN_RPORT_DISCONNECT \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, BFA_RPORT_AEN_DISCONNECT) -#define BFA_AEN_RPORT_QOS_PRIO \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, BFA_RPORT_AEN_QOS_PRIO) -#define BFA_AEN_RPORT_QOS_FLOWID \ - BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, BFA_RPORT_AEN_QOS_FLOWID) - -#endif - diff --git a/drivers/scsi/bfa/include/bfa.h b/drivers/scsi/bfa/include/bfa.h deleted file mode 100644 index d52b32f5695c..000000000000 --- a/drivers/scsi/bfa/include/bfa.h +++ /dev/null @@ -1,203 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __BFA_H__ -#define __BFA_H__ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -struct bfa_s; -#include - -struct bfa_pcidev_s; - -/** - * PCI devices supported by the current BFA - */ -struct bfa_pciid_s { - u16 device_id; - u16 vendor_id; -}; - -extern char bfa_version[]; - -/** - * BFA Power Mgmt Commands - */ -enum bfa_pm_cmd { - BFA_PM_CTL_D0 = 0, - BFA_PM_CTL_D1 = 1, - BFA_PM_CTL_D2 = 2, - BFA_PM_CTL_D3 = 3, -}; - -/** - * BFA memory resources - */ -enum bfa_mem_type { - BFA_MEM_TYPE_KVA = 1, /*! Kernel Virtual Memory *(non-dma-able) */ - BFA_MEM_TYPE_DMA = 2, /*! DMA-able memory */ - BFA_MEM_TYPE_MAX = BFA_MEM_TYPE_DMA, -}; - -struct bfa_mem_elem_s { - enum bfa_mem_type mem_type; /* see enum bfa_mem_type */ - u32 mem_len; /* Total Length in Bytes */ - u8 *kva; /* kernel virtual address */ - u64 dma; /* dma address if DMA memory */ - u8 *kva_curp; /* kva allocation cursor */ - u64 dma_curp; /* dma allocation cursor */ -}; - -struct bfa_meminfo_s { - struct bfa_mem_elem_s meminfo[BFA_MEM_TYPE_MAX]; -}; -#define bfa_meminfo_kva(_m) \ - ((_m)->meminfo[BFA_MEM_TYPE_KVA - 1].kva_curp) -#define bfa_meminfo_dma_virt(_m) \ - ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].kva_curp) -#define bfa_meminfo_dma_phys(_m) \ - ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp) - -/** - * Generic Scatter Gather Element used by driver - */ -struct bfa_sge_s { - u32 sg_len; - void *sg_addr; -}; - -#define bfa_sge_to_be(__sge) do { \ - ((u32 *)(__sge))[0] = bfa_os_htonl(((u32 *)(__sge))[0]); \ - ((u32 *)(__sge))[1] = bfa_os_htonl(((u32 *)(__sge))[1]); \ - ((u32 *)(__sge))[2] = bfa_os_htonl(((u32 *)(__sge))[2]); \ -} while (0) - - -/* - * bfa stats interfaces - */ -#define bfa_stats(_mod, _stats) ((_mod)->stats._stats++) - -#define bfa_ioc_get_stats(__bfa, __ioc_stats) \ - bfa_ioc_fetch_stats(&(__bfa)->ioc, __ioc_stats) -#define bfa_ioc_clear_stats(__bfa) \ - bfa_ioc_clr_stats(&(__bfa)->ioc) -#define bfa_get_nports(__bfa) \ - bfa_ioc_get_nports(&(__bfa)->ioc) -#define bfa_get_adapter_manufacturer(__bfa, __manufacturer) \ - bfa_ioc_get_adapter_manufacturer(&(__bfa)->ioc, __manufacturer) -#define bfa_get_adapter_model(__bfa, __model) \ - bfa_ioc_get_adapter_model(&(__bfa)->ioc, __model) -#define bfa_get_adapter_serial_num(__bfa, __serial_num) \ - bfa_ioc_get_adapter_serial_num(&(__bfa)->ioc, __serial_num) -#define bfa_get_adapter_fw_ver(__bfa, __fw_ver) \ - bfa_ioc_get_adapter_fw_ver(&(__bfa)->ioc, __fw_ver) -#define bfa_get_adapter_optrom_ver(__bfa, __optrom_ver) \ - bfa_ioc_get_adapter_optrom_ver(&(__bfa)->ioc, __optrom_ver) -#define bfa_get_pci_chip_rev(__bfa, __chip_rev) \ - bfa_ioc_get_pci_chip_rev(&(__bfa)->ioc, __chip_rev) -#define bfa_get_ioc_state(__bfa) \ - bfa_ioc_get_state(&(__bfa)->ioc) -#define bfa_get_type(__bfa) \ - bfa_ioc_get_type(&(__bfa)->ioc) -#define bfa_get_mac(__bfa) \ - bfa_ioc_get_mac(&(__bfa)->ioc) -#define bfa_get_mfg_mac(__bfa) \ - bfa_ioc_get_mfg_mac(&(__bfa)->ioc) -#define bfa_get_fw_clock_res(__bfa) \ - ((__bfa)->iocfc.cfgrsp->fwcfg.fw_tick_res) - -/* - * bfa API functions - */ -void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids); -void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg); -void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg); -void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *meminfo); -void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *meminfo, - struct bfa_pcidev_s *pcidev); -void bfa_init_trc(struct bfa_s *bfa, struct bfa_trc_mod_s *trcmod); -void bfa_init_log(struct bfa_s *bfa, struct bfa_log_mod_s *logmod); -void bfa_init_aen(struct bfa_s *bfa, struct bfa_aen_s *aen); -void bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog); -void bfa_detach(struct bfa_s *bfa); -void bfa_init(struct bfa_s *bfa); -void bfa_start(struct bfa_s *bfa); -void bfa_stop(struct bfa_s *bfa); -void bfa_attach_fcs(struct bfa_s *bfa); -void bfa_cb_init(void *bfad, bfa_status_t status); -void bfa_cb_stop(void *bfad, bfa_status_t status); -void bfa_cb_updateq(void *bfad, bfa_status_t status); - -bfa_boolean_t bfa_intx(struct bfa_s *bfa); -void bfa_isr_enable(struct bfa_s *bfa); -void bfa_isr_disable(struct bfa_s *bfa); -void bfa_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap, - u32 *num_vecs, u32 *max_vec_bit); -#define bfa_msix(__bfa, __vec) ((__bfa)->msix.handler[__vec](__bfa, __vec)) - -void bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q); -void bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q); -void bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q); - -typedef void (*bfa_cb_ioc_t) (void *cbarg, enum bfa_status status); -void bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr); -bfa_status_t bfa_iocfc_get_stats(struct bfa_s *bfa, - struct bfa_iocfc_stats_s *stats, - bfa_cb_ioc_t cbfn, void *cbarg); -bfa_status_t bfa_iocfc_clear_stats(struct bfa_s *bfa, - bfa_cb_ioc_t cbfn, void *cbarg); -void bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr); - -void bfa_adapter_get_attr(struct bfa_s *bfa, - struct bfa_adapter_attr_s *ad_attr); -u64 bfa_adapter_get_id(struct bfa_s *bfa); - -bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa, - struct bfa_iocfc_intr_attr_s *attr); - -void bfa_iocfc_enable(struct bfa_s *bfa); -void bfa_iocfc_disable(struct bfa_s *bfa); -void bfa_ioc_auto_recover(bfa_boolean_t auto_recover); -void bfa_chip_reset(struct bfa_s *bfa); -void bfa_cb_ioc_disable(void *bfad); -void bfa_timer_tick(struct bfa_s *bfa); -#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \ - bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout) - -/* - * BFA debug API functions - */ -bfa_status_t bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen); -bfa_status_t bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen); -void bfa_debug_fwsave_clear(struct bfa_s *bfa); - -#include "bfa_priv.h" - -#endif /* __BFA_H__ */ diff --git a/drivers/scsi/bfa/include/bfa_fcpim.h b/drivers/scsi/bfa/include/bfa_fcpim.h deleted file mode 100644 index 4bc9453081df..000000000000 --- a/drivers/scsi/bfa/include/bfa_fcpim.h +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_FCPIM_H__ -#define __BFA_FCPIM_H__ - -#include -#include -#include -#include - -/* - * forward declarations - */ -struct bfa_itnim_s; -struct bfa_ioim_s; -struct bfa_tskim_s; -struct bfad_ioim_s; -struct bfad_tskim_s; - -/* - * bfa fcpim module API functions - */ -void bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov); -u16 bfa_fcpim_path_tov_get(struct bfa_s *bfa); -void bfa_fcpim_qdepth_set(struct bfa_s *bfa, u16 q_depth); -u16 bfa_fcpim_qdepth_get(struct bfa_s *bfa); -bfa_status_t bfa_fcpim_get_modstats(struct bfa_s *bfa, - struct bfa_fcpim_stats_s *modstats); -bfa_status_t bfa_fcpim_clr_modstats(struct bfa_s *bfa); -void bfa_fcpim_set_ioredirect(struct bfa_s *bfa, bfa_boolean_t state); -void bfa_fcpim_update_ioredirect(struct bfa_s *bfa); -void bfa_cb_ioredirect_state_change(void *hcb_bfad, bfa_boolean_t ioredirect); - -#define bfa_fcpim_ioredirect_enabled(__bfa) \ - (((struct bfa_fcpim_mod_s *)(BFA_FCPIM_MOD(__bfa)))->ioredirect) - -#define bfa_fcpim_get_next_reqq(__bfa, __qid) \ -{ \ - struct bfa_fcpim_mod_s *__fcpim = BFA_FCPIM_MOD(__bfa); \ - __fcpim->reqq++; \ - __fcpim->reqq &= (BFI_IOC_MAX_CQS - 1); \ - *(__qid) = __fcpim->reqq; \ -} - -#define bfa_iocfc_map_msg_to_qid(__msg, __qid) \ - *(__qid) = (u8)((__msg) & (BFI_IOC_MAX_CQS - 1)); - - -/* - * bfa itnim API functions - */ -struct bfa_itnim_s *bfa_itnim_create(struct bfa_s *bfa, - struct bfa_rport_s *rport, void *itnim); -void bfa_itnim_delete(struct bfa_itnim_s *itnim); -void bfa_itnim_online(struct bfa_itnim_s *itnim, - bfa_boolean_t seq_rec); -void bfa_itnim_offline(struct bfa_itnim_s *itnim); -void bfa_itnim_get_stats(struct bfa_itnim_s *itnim, - struct bfa_itnim_hal_stats_s *stats); -void bfa_itnim_clear_stats(struct bfa_itnim_s *itnim); - -#define bfa_itnim_get_reqq(__ioim) (((struct bfa_ioim_s *)__ioim)->itnim->reqq) - -/** - * BFA completion callback for bfa_itnim_online(). - * - * @param[in] itnim FCS or driver itnim instance - * - * return None - */ -void bfa_cb_itnim_online(void *itnim); - -/** - * BFA completion callback for bfa_itnim_offline(). - * - * @param[in] itnim FCS or driver itnim instance - * - * return None - */ -void bfa_cb_itnim_offline(void *itnim); -void bfa_cb_itnim_tov_begin(void *itnim); -void bfa_cb_itnim_tov(void *itnim); - -/** - * BFA notification to FCS/driver for second level error recovery. - * - * Atleast one I/O request has timedout and target is unresponsive to - * repeated abort requests. Second level error recovery should be initiated - * by starting implicit logout and recovery procedures. - * - * @param[in] itnim FCS or driver itnim instance - * - * return None - */ -void bfa_cb_itnim_sler(void *itnim); - -/* - * bfa ioim API functions - */ -struct bfa_ioim_s *bfa_ioim_alloc(struct bfa_s *bfa, - struct bfad_ioim_s *dio, - struct bfa_itnim_s *itnim, - u16 nsgles); - -void bfa_ioim_free(struct bfa_ioim_s *ioim); -void bfa_ioim_start(struct bfa_ioim_s *ioim); -void bfa_ioim_abort(struct bfa_ioim_s *ioim); -void bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, - bfa_boolean_t iotov); - - -/** - * I/O completion notification. - * - * @param[in] dio driver IO structure - * @param[in] io_status IO completion status - * @param[in] scsi_status SCSI status returned by target - * @param[in] sns_len SCSI sense length, 0 if none - * @param[in] sns_info SCSI sense data, if any - * @param[in] residue Residual length - * - * @return None - */ -void bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio, - enum bfi_ioim_status io_status, - u8 scsi_status, int sns_len, - u8 *sns_info, s32 residue); - -/** - * I/O good completion notification. - * - * @param[in] dio driver IO structure - * - * @return None - */ -void bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio); - -/** - * I/O abort completion notification - * - * @param[in] dio driver IO that was aborted - * - * @return None - */ -void bfa_cb_ioim_abort(void *bfad, struct bfad_ioim_s *dio); -void bfa_cb_ioim_resfree(void *hcb_bfad); - -void bfa_cb_ioim_resfree(void *hcb_bfad); - -/* - * bfa tskim API functions - */ -struct bfa_tskim_s *bfa_tskim_alloc(struct bfa_s *bfa, - struct bfad_tskim_s *dtsk); -void bfa_tskim_free(struct bfa_tskim_s *tskim); -void bfa_tskim_start(struct bfa_tskim_s *tskim, - struct bfa_itnim_s *itnim, lun_t lun, - enum fcp_tm_cmnd tm, u8 t_secs); -void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk, - enum bfi_tskim_status tsk_status); - -#endif /* __BFA_FCPIM_H__ */ diff --git a/drivers/scsi/bfa/include/bfa_fcptm.h b/drivers/scsi/bfa/include/bfa_fcptm.h deleted file mode 100644 index 5f5ffe0bb1bb..000000000000 --- a/drivers/scsi/bfa/include/bfa_fcptm.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_FCPTM_H__ -#define __BFA_FCPTM_H__ - -#include -#include -#include - -/* - * forward declarations - */ -struct bfa_tin_s; -struct bfa_iotm_s; -struct bfa_tsktm_s; - -/* - * bfa fcptm module API functions - */ -void bfa_fcptm_path_tov_set(struct bfa_s *bfa, u16 path_tov); -u16 bfa_fcptm_path_tov_get(struct bfa_s *bfa); -void bfa_fcptm_qdepth_set(struct bfa_s *bfa, u16 q_depth); -u16 bfa_fcptm_qdepth_get(struct bfa_s *bfa); - -/* - * bfa tin API functions - */ -void bfa_tin_get_stats(struct bfa_tin_s *tin, struct bfa_tin_stats_s *stats); -void bfa_tin_clear_stats(struct bfa_tin_s *tin); - -#endif /* __BFA_FCPTM_H__ */ - diff --git a/drivers/scsi/bfa/include/bfa_svc.h b/drivers/scsi/bfa/include/bfa_svc.h deleted file mode 100644 index 7840943d73b0..000000000000 --- a/drivers/scsi/bfa/include/bfa_svc.h +++ /dev/null @@ -1,338 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __BFA_SVC_H__ -#define __BFA_SVC_H__ - -/* - * forward declarations - */ -struct bfa_fcxp_s; - -#include -#include -#include -#include -#include -#include -#include - -/** - * BFA rport information. - */ -struct bfa_rport_info_s { - u16 max_frmsz; /* max rcv pdu size */ - u32 pid:24, /* remote port ID */ - lp_tag:8; /* tag */ - u32 local_pid:24, /* local port ID */ - cisc:8; /* CIRO supported */ - u8 fc_class; /* supported FC classes. enum fc_cos */ - u8 vf_en; /* virtual fabric enable */ - u16 vf_id; /* virtual fabric ID */ - enum bfa_pport_speed speed; /* Rport's current speed */ -}; - -/** - * BFA rport data structure - */ -struct bfa_rport_s { - struct list_head qe; /* queue element */ - bfa_sm_t sm; /* state machine */ - struct bfa_s *bfa; /* backpointer to BFA */ - void *rport_drv; /* fcs/driver rport object */ - u16 fw_handle; /* firmware rport handle */ - u16 rport_tag; /* BFA rport tag */ - struct bfa_rport_info_s rport_info; /* rport info from fcs/driver */ - struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ - struct bfa_cb_qe_s hcb_qe; /* BFA callback qelem */ - struct bfa_rport_hal_stats_s stats; /* BFA rport statistics */ - struct bfa_rport_qos_attr_s qos_attr; - union a { - bfa_status_t status; /* f/w status */ - void *fw_msg; /* QoS scn event */ - } event_arg; -}; -#define BFA_RPORT_FC_COS(_rport) ((_rport)->rport_info.fc_class) - -/** - * Send completion callback. - */ -typedef void (*bfa_cb_fcxp_send_t) (void *bfad_fcxp, struct bfa_fcxp_s *fcxp, - void *cbarg, enum bfa_status req_status, - u32 rsp_len, u32 resid_len, - struct fchs_s *rsp_fchs); - -/** - * BFA fcxp allocation (asynchronous) - */ -typedef void (*bfa_fcxp_alloc_cbfn_t) (void *cbarg, struct bfa_fcxp_s *fcxp); - -struct bfa_fcxp_wqe_s { - struct list_head qe; - bfa_fcxp_alloc_cbfn_t alloc_cbfn; - void *alloc_cbarg; -}; - -typedef u64 (*bfa_fcxp_get_sgaddr_t) (void *bfad_fcxp, int sgeid); -typedef u32 (*bfa_fcxp_get_sglen_t) (void *bfad_fcxp, int sgeid); - -#define BFA_UF_BUFSZ (2 * 1024 + 256) - -/** - * @todo private - */ -struct bfa_uf_buf_s { - u8 d[BFA_UF_BUFSZ]; -}; - - -struct bfa_uf_s { - struct list_head qe; /* queue element */ - struct bfa_s *bfa; /* bfa instance */ - u16 uf_tag; /* identifying tag fw msgs */ - u16 vf_id; - u16 src_rport_handle; - u16 rsvd; - u8 *data_ptr; - u16 data_len; /* actual receive length */ - u16 pb_len; /* posted buffer length */ - void *buf_kva; /* buffer virtual address */ - u64 buf_pa; /* buffer physical address */ - struct bfa_cb_qe_s hcb_qe; /* comp: BFA comp qelem */ - struct bfa_sge_s sges[BFI_SGE_INLINE_MAX]; -}; - -typedef void (*bfa_cb_pport_t) (void *cbarg, enum bfa_status status); - -/** - * bfa lport login/logout service interface - */ -struct bfa_lps_s { - struct list_head qe; /* queue element */ - struct bfa_s *bfa; /* parent bfa instance */ - bfa_sm_t sm; /* finite state machine */ - u8 lp_tag; /* lport tag */ - u8 reqq; /* lport request queue */ - u8 alpa; /* ALPA for loop topologies */ - u32 lp_pid; /* lport port ID */ - bfa_boolean_t fdisc; /* send FDISC instead of FLOGI */ - bfa_boolean_t auth_en; /* enable authentication */ - bfa_boolean_t auth_req; /* authentication required */ - bfa_boolean_t npiv_en; /* NPIV is allowed by peer */ - bfa_boolean_t fport; /* attached peer is F_PORT */ - bfa_boolean_t brcd_switch;/* attached peer is brcd switch */ - bfa_status_t status; /* login status */ - u16 pdusz; /* max receive PDU size */ - u16 pr_bbcred; /* BB_CREDIT from peer */ - u8 lsrjt_rsn; /* LSRJT reason */ - u8 lsrjt_expl; /* LSRJT explanation */ - wwn_t pwwn; /* port wwn of lport */ - wwn_t nwwn; /* node wwn of lport */ - wwn_t pr_pwwn; /* port wwn of lport peer */ - wwn_t pr_nwwn; /* node wwn of lport peer */ - mac_t lp_mac; /* fpma/spma MAC for lport */ - mac_t fcf_mac; /* FCF MAC of lport */ - struct bfa_reqq_wait_s wqe; /* request wait queue element */ - void *uarg; /* user callback arg */ - struct bfa_cb_qe_s hcb_qe; /* comp: callback qelem */ - struct bfi_lps_login_rsp_s *loginrsp; - bfa_eproto_status_t ext_status; -}; - -#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port)) - -/* - * bfa pport API functions - */ -bfa_status_t bfa_fcport_enable(struct bfa_s *bfa); -bfa_status_t bfa_fcport_disable(struct bfa_s *bfa); -bfa_status_t bfa_fcport_cfg_speed(struct bfa_s *bfa, - enum bfa_pport_speed speed); -enum bfa_pport_speed bfa_fcport_get_speed(struct bfa_s *bfa); -bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa, - enum bfa_pport_topology topo); -enum bfa_pport_topology bfa_fcport_get_topology(struct bfa_s *bfa); -bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa); -bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa); -u8 bfa_fcport_get_myalpa(struct bfa_s *bfa); -bfa_status_t bfa_fcport_clr_hardalpa(struct bfa_s *bfa); -bfa_status_t bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize); -u16 bfa_fcport_get_maxfrsize(struct bfa_s *bfa); -u32 bfa_fcport_mypid(struct bfa_s *bfa); -u8 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa); -bfa_status_t bfa_fcport_trunk_enable(struct bfa_s *bfa, u8 bitmap); -bfa_status_t bfa_fcport_trunk_disable(struct bfa_s *bfa); -bfa_boolean_t bfa_fcport_trunk_query(struct bfa_s *bfa, u32 *bitmap); -void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr); -wwn_t bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node); -void bfa_fcport_event_register(struct bfa_s *bfa, - void (*event_cbfn) (void *cbarg, - bfa_pport_event_t event), void *event_cbarg); -bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa); -void bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off); -void bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off); -bfa_status_t bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, - enum bfa_pport_speed speed); -enum bfa_pport_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa); - -void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit); -void bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status); -void bfa_fcport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon, - bfa_boolean_t link_e2e_beacon); -void bfa_cb_pport_event(void *cbarg, bfa_pport_event_t event); -void bfa_fcport_qos_get_attr(struct bfa_s *bfa, - struct bfa_qos_attr_s *qos_attr); -void bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa, - struct bfa_qos_vc_attr_s *qos_vc_attr); -bfa_status_t bfa_fcport_get_qos_stats(struct bfa_s *bfa, - union bfa_fcport_stats_u *stats, - bfa_cb_pport_t cbfn, void *cbarg); -bfa_status_t bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, - void *cbarg); -bfa_status_t bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, - union bfa_fcport_stats_u *stats, - bfa_cb_pport_t cbfn, void *cbarg); -bfa_status_t bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, - void *cbarg); - -bfa_boolean_t bfa_fcport_is_ratelim(struct bfa_s *bfa); -bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa); -bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa, - union bfa_fcport_stats_u *stats, - bfa_cb_pport_t cbfn, void *cbarg); -bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, - void *cbarg); -bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa); - -/* - * bfa rport API functions - */ -struct bfa_rport_s *bfa_rport_create(struct bfa_s *bfa, void *rport_drv); -void bfa_rport_delete(struct bfa_rport_s *rport); -void bfa_rport_online(struct bfa_rport_s *rport, - struct bfa_rport_info_s *rport_info); -void bfa_rport_offline(struct bfa_rport_s *rport); -void bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_pport_speed speed); -void bfa_rport_get_stats(struct bfa_rport_s *rport, - struct bfa_rport_hal_stats_s *stats); -void bfa_rport_clear_stats(struct bfa_rport_s *rport); -void bfa_cb_rport_online(void *rport); -void bfa_cb_rport_offline(void *rport); -void bfa_cb_rport_qos_scn_flowid(void *rport, - struct bfa_rport_qos_attr_s old_qos_attr, - struct bfa_rport_qos_attr_s new_qos_attr); -void bfa_cb_rport_qos_scn_prio(void *rport, - struct bfa_rport_qos_attr_s old_qos_attr, - struct bfa_rport_qos_attr_s new_qos_attr); -void bfa_rport_get_qos_attr(struct bfa_rport_s *rport, - struct bfa_rport_qos_attr_s *qos_attr); - -/* - * bfa fcxp API functions - */ -struct bfa_fcxp_s *bfa_fcxp_alloc(void *bfad_fcxp, struct bfa_s *bfa, - int nreq_sgles, int nrsp_sgles, - bfa_fcxp_get_sgaddr_t get_req_sga, - bfa_fcxp_get_sglen_t get_req_sglen, - bfa_fcxp_get_sgaddr_t get_rsp_sga, - bfa_fcxp_get_sglen_t get_rsp_sglen); -void bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe, - bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *cbarg); -void bfa_fcxp_walloc_cancel(struct bfa_s *bfa, - struct bfa_fcxp_wqe_s *wqe); -void bfa_fcxp_discard(struct bfa_fcxp_s *fcxp); - -void *bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp); -void *bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp); - -void bfa_fcxp_free(struct bfa_fcxp_s *fcxp); - -void bfa_fcxp_send(struct bfa_fcxp_s *fcxp, - struct bfa_rport_s *rport, u16 vf_id, u8 lp_tag, - bfa_boolean_t cts, enum fc_cos cos, - u32 reqlen, struct fchs_s *fchs, - bfa_cb_fcxp_send_t cbfn, - void *cbarg, - u32 rsp_maxlen, u8 rsp_timeout); -bfa_status_t bfa_fcxp_abort(struct bfa_fcxp_s *fcxp); -u32 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp); -u32 bfa_fcxp_get_maxrsp(struct bfa_s *bfa); - -static inline void * -bfa_uf_get_frmbuf(struct bfa_uf_s *uf) -{ - return uf->data_ptr; -} - -static inline u16 -bfa_uf_get_frmlen(struct bfa_uf_s *uf) -{ - return uf->data_len; -} - -/** - * Callback prototype for unsolicited frame receive handler. - * - * @param[in] cbarg callback arg for receive handler - * @param[in] uf unsolicited frame descriptor - * - * @return None - */ -typedef void (*bfa_cb_uf_recv_t) (void *cbarg, struct bfa_uf_s *uf); - -/* - * bfa uf API functions - */ -void bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, - void *cbarg); -void bfa_uf_free(struct bfa_uf_s *uf); - -/** - * bfa lport service api - */ - -u32 bfa_lps_get_max_vport(struct bfa_s *bfa); -struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa); -void bfa_lps_delete(struct bfa_lps_s *lps); -void bfa_lps_discard(struct bfa_lps_s *lps); -void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz, - wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en); -void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn, - wwn_t nwwn); -void bfa_lps_flogo(struct bfa_lps_s *lps); -void bfa_lps_fdisclogo(struct bfa_lps_s *lps); -u8 bfa_lps_get_tag(struct bfa_lps_s *lps); -bfa_boolean_t bfa_lps_is_npiv_en(struct bfa_lps_s *lps); -bfa_boolean_t bfa_lps_is_fport(struct bfa_lps_s *lps); -bfa_boolean_t bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps); -bfa_boolean_t bfa_lps_is_authreq(struct bfa_lps_s *lps); -bfa_eproto_status_t bfa_lps_get_extstatus(struct bfa_lps_s *lps); -u32 bfa_lps_get_pid(struct bfa_lps_s *lps); -u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid); -u16 bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps); -wwn_t bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps); -wwn_t bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps); -u8 bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps); -u8 bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps); -mac_t bfa_lps_get_lp_mac(struct bfa_lps_s *lps); -void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status); -void bfa_cb_lps_flogo_comp(void *bfad, void *uarg); -void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status); -void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg); -void bfa_cb_lps_cvl_event(void *bfad, void *uarg); - -#endif /* __BFA_SVC_H__ */ - diff --git a/drivers/scsi/bfa/include/bfa_timer.h b/drivers/scsi/bfa/include/bfa_timer.h deleted file mode 100644 index f71087448222..000000000000 --- a/drivers/scsi/bfa/include/bfa_timer.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __BFA_TIMER_H__ -#define __BFA_TIMER_H__ - -#include -#include - -struct bfa_s; - -typedef void (*bfa_timer_cbfn_t)(void *); - -/** - * BFA timer data structure - */ -struct bfa_timer_s { - struct list_head qe; - bfa_timer_cbfn_t timercb; - void *arg; - int timeout; /**< in millisecs. */ -}; - -/** - * Timer module structure - */ -struct bfa_timer_mod_s { - struct list_head timer_q; -}; - -#define BFA_TIMER_FREQ 200 /**< specified in millisecs */ - -void bfa_timer_beat(struct bfa_timer_mod_s *mod); -void bfa_timer_init(struct bfa_timer_mod_s *mod); -void bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer, - bfa_timer_cbfn_t timercb, void *arg, - unsigned int timeout); -void bfa_timer_stop(struct bfa_timer_s *timer); - -#endif /* __BFA_TIMER_H__ */ diff --git a/drivers/scsi/bfa/include/bfi/bfi.h b/drivers/scsi/bfa/include/bfi/bfi.h deleted file mode 100644 index a550e80cabd2..000000000000 --- a/drivers/scsi/bfa/include/bfi/bfi.h +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFI_H__ -#define __BFI_H__ - -#include -#include - -#pragma pack(1) - -/** - * Msg header common to all msgs - */ -struct bfi_mhdr_s { - u8 msg_class; /* @ref bfi_mclass_t */ - u8 msg_id; /* msg opcode with in the class */ - union { - struct { - u8 rsvd; - u8 lpu_id; /* msg destination */ - } h2i; - u16 i2htok; /* token in msgs to host */ - } mtag; -}; - -#define bfi_h2i_set(_mh, _mc, _op, _lpuid) do { \ - (_mh).msg_class = (_mc); \ - (_mh).msg_id = (_op); \ - (_mh).mtag.h2i.lpu_id = (_lpuid); \ -} while (0) - -#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \ - (_mh).msg_class = (_mc); \ - (_mh).msg_id = (_op); \ - (_mh).mtag.i2htok = (_i2htok); \ -} while (0) - -/* - * Message opcodes: 0-127 to firmware, 128-255 to host - */ -#define BFI_I2H_OPCODE_BASE 128 -#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE) - -/** - **************************************************************************** - * - * Scatter Gather Element and Page definition - * - **************************************************************************** - */ - -#define BFI_SGE_INLINE 1 -#define BFI_SGE_INLINE_MAX (BFI_SGE_INLINE + 1) - -/** - * SG Flags - */ -enum { - BFI_SGE_DATA = 0, /* data address, not last */ - BFI_SGE_DATA_CPL = 1, /* data addr, last in current page */ - BFI_SGE_DATA_LAST = 3, /* data address, last */ - BFI_SGE_LINK = 2, /* link address */ - BFI_SGE_PGDLEN = 2, /* cumulative data length for page */ -}; - -/** - * DMA addresses - */ -union bfi_addr_u { - struct { - u32 addr_lo; - u32 addr_hi; - } a32; -}; - -/** - * Scatter Gather Element - */ -struct bfi_sge_s { -#ifdef __BIGENDIAN - u32 flags:2, - rsvd:2, - sg_len:28; -#else - u32 sg_len:28, - rsvd:2, - flags:2; -#endif - union bfi_addr_u sga; -}; - -/** - * Scatter Gather Page - */ -#define BFI_SGPG_DATA_SGES 7 -#define BFI_SGPG_SGES_MAX (BFI_SGPG_DATA_SGES + 1) -#define BFI_SGPG_RSVD_WD_LEN 8 -struct bfi_sgpg_s { - struct bfi_sge_s sges[BFI_SGPG_SGES_MAX]; - u32 rsvd[BFI_SGPG_RSVD_WD_LEN]; -}; - -/* - * Large Message structure - 128 Bytes size Msgs - */ -#define BFI_LMSG_SZ 128 -#define BFI_LMSG_PL_WSZ \ - ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr_s)) / 4) - -struct bfi_msg_s { - struct bfi_mhdr_s mhdr; - u32 pl[BFI_LMSG_PL_WSZ]; -}; - -/** - * Mailbox message structure - */ -#define BFI_MBMSG_SZ 7 -struct bfi_mbmsg_s { - struct bfi_mhdr_s mh; - u32 pl[BFI_MBMSG_SZ]; -}; - -/** - * Message Classes - */ -enum bfi_mclass { - BFI_MC_IOC = 1, /* IO Controller (IOC) */ - BFI_MC_DIAG = 2, /* Diagnostic Msgs */ - BFI_MC_FLASH = 3, /* Flash message class */ - BFI_MC_CEE = 4, /* CEE */ - BFI_MC_FCPORT = 5, /* FC port */ - BFI_MC_IOCFC = 6, /* FC - IO Controller (IOC) */ - BFI_MC_LL = 7, /* Link Layer */ - BFI_MC_UF = 8, /* Unsolicited frame receive */ - BFI_MC_FCXP = 9, /* FC Transport */ - BFI_MC_LPS = 10, /* lport fc login services */ - BFI_MC_RPORT = 11, /* Remote port */ - BFI_MC_ITNIM = 12, /* I-T nexus (Initiator mode) */ - BFI_MC_IOIM_READ = 13, /* read IO (Initiator mode) */ - BFI_MC_IOIM_WRITE = 14, /* write IO (Initiator mode) */ - BFI_MC_IOIM_IO = 15, /* IO (Initiator mode) */ - BFI_MC_IOIM = 16, /* IO (Initiator mode) */ - BFI_MC_IOIM_IOCOM = 17, /* good IO completion */ - BFI_MC_TSKIM = 18, /* Initiator Task management */ - BFI_MC_SBOOT = 19, /* SAN boot services */ - BFI_MC_IPFC = 20, /* IP over FC Msgs */ - BFI_MC_PORT = 21, /* Physical port */ - BFI_MC_MAX = 32 -}; - -#define BFI_IOC_MAX_CQS 4 -#define BFI_IOC_MAX_CQS_ASIC 8 -#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */ - -#pragma pack() - -#endif /* __BFI_H__ */ - diff --git a/drivers/scsi/bfa/include/bfi/bfi_boot.h b/drivers/scsi/bfa/include/bfi/bfi_boot.h deleted file mode 100644 index 5955afe7d108..000000000000 --- a/drivers/scsi/bfa/include/bfi/bfi_boot.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -/* - * bfi_boot.h - */ - -#ifndef __BFI_BOOT_H__ -#define __BFI_BOOT_H__ - -#define BFI_BOOT_TYPE_OFF 8 -#define BFI_BOOT_PARAM_OFF 12 - -#define BFI_BOOT_TYPE_NORMAL 0 /* param is device id */ -#define BFI_BOOT_TYPE_FLASH 1 -#define BFI_BOOT_TYPE_MEMTEST 2 - -#define BFI_BOOT_MEMTEST_RES_ADDR 0x900 -#define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3 - -#endif diff --git a/drivers/scsi/bfa/include/bfi/bfi_cbreg.h b/drivers/scsi/bfa/include/bfi/bfi_cbreg.h deleted file mode 100644 index a51ee61ddb19..000000000000 --- a/drivers/scsi/bfa/include/bfi/bfi_cbreg.h +++ /dev/null @@ -1,319 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/* - * bfi_cbreg.h crossbow host block register definitions - * - * !!! Do not edit. Auto generated. !!! - */ - -#ifndef __BFI_CBREG_H__ -#define __BFI_CBREG_H__ - - -#define HOSTFN0_INT_STATUS 0x00014000 -#define __HOSTFN0_INT_STATUS_LVL_MK 0x00f00000 -#define __HOSTFN0_INT_STATUS_LVL_SH 20 -#define __HOSTFN0_INT_STATUS_LVL(_v) ((_v) << __HOSTFN0_INT_STATUS_LVL_SH) -#define __HOSTFN0_INT_STATUS_P 0x000fffff -#define HOSTFN0_INT_MSK 0x00014004 -#define HOST_PAGE_NUM_FN0 0x00014008 -#define __HOST_PAGE_NUM_FN 0x000001ff -#define HOSTFN1_INT_STATUS 0x00014100 -#define __HOSTFN1_INT_STAT_LVL_MK 0x00f00000 -#define __HOSTFN1_INT_STAT_LVL_SH 20 -#define __HOSTFN1_INT_STAT_LVL(_v) ((_v) << __HOSTFN1_INT_STAT_LVL_SH) -#define __HOSTFN1_INT_STAT_P 0x000fffff -#define HOSTFN1_INT_MSK 0x00014104 -#define HOST_PAGE_NUM_FN1 0x00014108 -#define APP_PLL_400_CTL_REG 0x00014204 -#define __P_400_PLL_LOCK 0x80000000 -#define __APP_PLL_400_SRAM_USE_100MHZ 0x00100000 -#define __APP_PLL_400_RESET_TIMER_MK 0x000e0000 -#define __APP_PLL_400_RESET_TIMER_SH 17 -#define __APP_PLL_400_RESET_TIMER(_v) ((_v) << __APP_PLL_400_RESET_TIMER_SH) -#define __APP_PLL_400_LOGIC_SOFT_RESET 0x00010000 -#define __APP_PLL_400_CNTLMT0_1_MK 0x0000c000 -#define __APP_PLL_400_CNTLMT0_1_SH 14 -#define __APP_PLL_400_CNTLMT0_1(_v) ((_v) << __APP_PLL_400_CNTLMT0_1_SH) -#define __APP_PLL_400_JITLMT0_1_MK 0x00003000 -#define __APP_PLL_400_JITLMT0_1_SH 12 -#define __APP_PLL_400_JITLMT0_1(_v) ((_v) << __APP_PLL_400_JITLMT0_1_SH) -#define __APP_PLL_400_HREF 0x00000800 -#define __APP_PLL_400_HDIV 0x00000400 -#define __APP_PLL_400_P0_1_MK 0x00000300 -#define __APP_PLL_400_P0_1_SH 8 -#define __APP_PLL_400_P0_1(_v) ((_v) << __APP_PLL_400_P0_1_SH) -#define __APP_PLL_400_Z0_2_MK 0x000000e0 -#define __APP_PLL_400_Z0_2_SH 5 -#define __APP_PLL_400_Z0_2(_v) ((_v) << __APP_PLL_400_Z0_2_SH) -#define __APP_PLL_400_RSEL200500 0x00000010 -#define __APP_PLL_400_ENARST 0x00000008 -#define __APP_PLL_400_BYPASS 0x00000004 -#define __APP_PLL_400_LRESETN 0x00000002 -#define __APP_PLL_400_ENABLE 0x00000001 -#define APP_PLL_212_CTL_REG 0x00014208 -#define __P_212_PLL_LOCK 0x80000000 -#define __APP_PLL_212_RESET_TIMER_MK 0x000e0000 -#define __APP_PLL_212_RESET_TIMER_SH 17 -#define __APP_PLL_212_RESET_TIMER(_v) ((_v) << __APP_PLL_212_RESET_TIMER_SH) -#define __APP_PLL_212_LOGIC_SOFT_RESET 0x00010000 -#define __APP_PLL_212_CNTLMT0_1_MK 0x0000c000 -#define __APP_PLL_212_CNTLMT0_1_SH 14 -#define __APP_PLL_212_CNTLMT0_1(_v) ((_v) << __APP_PLL_212_CNTLMT0_1_SH) -#define __APP_PLL_212_JITLMT0_1_MK 0x00003000 -#define __APP_PLL_212_JITLMT0_1_SH 12 -#define __APP_PLL_212_JITLMT0_1(_v) ((_v) << __APP_PLL_212_JITLMT0_1_SH) -#define __APP_PLL_212_HREF 0x00000800 -#define __APP_PLL_212_HDIV 0x00000400 -#define __APP_PLL_212_P0_1_MK 0x00000300 -#define __APP_PLL_212_P0_1_SH 8 -#define __APP_PLL_212_P0_1(_v) ((_v) << __APP_PLL_212_P0_1_SH) -#define __APP_PLL_212_Z0_2_MK 0x000000e0 -#define __APP_PLL_212_Z0_2_SH 5 -#define __APP_PLL_212_Z0_2(_v) ((_v) << __APP_PLL_212_Z0_2_SH) -#define __APP_PLL_212_RSEL200500 0x00000010 -#define __APP_PLL_212_ENARST 0x00000008 -#define __APP_PLL_212_BYPASS 0x00000004 -#define __APP_PLL_212_LRESETN 0x00000002 -#define __APP_PLL_212_ENABLE 0x00000001 -#define HOST_SEM0_REG 0x00014230 -#define __HOST_SEMAPHORE 0x00000001 -#define HOST_SEM1_REG 0x00014234 -#define HOST_SEM2_REG 0x00014238 -#define HOST_SEM3_REG 0x0001423c -#define HOST_SEM0_INFO_REG 0x00014240 -#define HOST_SEM1_INFO_REG 0x00014244 -#define HOST_SEM2_INFO_REG 0x00014248 -#define HOST_SEM3_INFO_REG 0x0001424c -#define HOSTFN0_LPU0_CMD_STAT 0x00019000 -#define __HOSTFN0_LPU0_MBOX_INFO_MK 0xfffffffe -#define __HOSTFN0_LPU0_MBOX_INFO_SH 1 -#define __HOSTFN0_LPU0_MBOX_INFO(_v) ((_v) << __HOSTFN0_LPU0_MBOX_INFO_SH) -#define __HOSTFN0_LPU0_MBOX_CMD_STATUS 0x00000001 -#define LPU0_HOSTFN0_CMD_STAT 0x00019008 -#define __LPU0_HOSTFN0_MBOX_INFO_MK 0xfffffffe -#define __LPU0_HOSTFN0_MBOX_INFO_SH 1 -#define __LPU0_HOSTFN0_MBOX_INFO(_v) ((_v) << __LPU0_HOSTFN0_MBOX_INFO_SH) -#define __LPU0_HOSTFN0_MBOX_CMD_STATUS 0x00000001 -#define HOSTFN1_LPU1_CMD_STAT 0x00019014 -#define __HOSTFN1_LPU1_MBOX_INFO_MK 0xfffffffe -#define __HOSTFN1_LPU1_MBOX_INFO_SH 1 -#define __HOSTFN1_LPU1_MBOX_INFO(_v) ((_v) << __HOSTFN1_LPU1_MBOX_INFO_SH) -#define __HOSTFN1_LPU1_MBOX_CMD_STATUS 0x00000001 -#define LPU1_HOSTFN1_CMD_STAT 0x0001901c -#define __LPU1_HOSTFN1_MBOX_INFO_MK 0xfffffffe -#define __LPU1_HOSTFN1_MBOX_INFO_SH 1 -#define __LPU1_HOSTFN1_MBOX_INFO(_v) ((_v) << __LPU1_HOSTFN1_MBOX_INFO_SH) -#define __LPU1_HOSTFN1_MBOX_CMD_STATUS 0x00000001 -#define CPE_Q0_DEPTH 0x00010014 -#define CPE_Q0_PI 0x0001001c -#define CPE_Q0_CI 0x00010020 -#define CPE_Q1_DEPTH 0x00010034 -#define CPE_Q1_PI 0x0001003c -#define CPE_Q1_CI 0x00010040 -#define CPE_Q2_DEPTH 0x00010054 -#define CPE_Q2_PI 0x0001005c -#define CPE_Q2_CI 0x00010060 -#define CPE_Q3_DEPTH 0x00010074 -#define CPE_Q3_PI 0x0001007c -#define CPE_Q3_CI 0x00010080 -#define CPE_Q4_DEPTH 0x00010094 -#define CPE_Q4_PI 0x0001009c -#define CPE_Q4_CI 0x000100a0 -#define CPE_Q5_DEPTH 0x000100b4 -#define CPE_Q5_PI 0x000100bc -#define CPE_Q5_CI 0x000100c0 -#define CPE_Q6_DEPTH 0x000100d4 -#define CPE_Q6_PI 0x000100dc -#define CPE_Q6_CI 0x000100e0 -#define CPE_Q7_DEPTH 0x000100f4 -#define CPE_Q7_PI 0x000100fc -#define CPE_Q7_CI 0x00010100 -#define RME_Q0_DEPTH 0x00011014 -#define RME_Q0_PI 0x0001101c -#define RME_Q0_CI 0x00011020 -#define RME_Q1_DEPTH 0x00011034 -#define RME_Q1_PI 0x0001103c -#define RME_Q1_CI 0x00011040 -#define RME_Q2_DEPTH 0x00011054 -#define RME_Q2_PI 0x0001105c -#define RME_Q2_CI 0x00011060 -#define RME_Q3_DEPTH 0x00011074 -#define RME_Q3_PI 0x0001107c -#define RME_Q3_CI 0x00011080 -#define RME_Q4_DEPTH 0x00011094 -#define RME_Q4_PI 0x0001109c -#define RME_Q4_CI 0x000110a0 -#define RME_Q5_DEPTH 0x000110b4 -#define RME_Q5_PI 0x000110bc -#define RME_Q5_CI 0x000110c0 -#define RME_Q6_DEPTH 0x000110d4 -#define RME_Q6_PI 0x000110dc -#define RME_Q6_CI 0x000110e0 -#define RME_Q7_DEPTH 0x000110f4 -#define RME_Q7_PI 0x000110fc -#define RME_Q7_CI 0x00011100 -#define PSS_CTL_REG 0x00018800 -#define __PSS_I2C_CLK_DIV_MK 0x00030000 -#define __PSS_I2C_CLK_DIV_SH 16 -#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH) -#define __PSS_LMEM_INIT_DONE 0x00001000 -#define __PSS_LMEM_RESET 0x00000200 -#define __PSS_LMEM_INIT_EN 0x00000100 -#define __PSS_LPU1_RESET 0x00000002 -#define __PSS_LPU0_RESET 0x00000001 -#define PSS_ERR_STATUS_REG 0x00018810 -#define __PSS_LMEM1_CORR_ERR 0x00000800 -#define __PSS_LMEM0_CORR_ERR 0x00000400 -#define __PSS_LMEM1_UNCORR_ERR 0x00000200 -#define __PSS_LMEM0_UNCORR_ERR 0x00000100 -#define __PSS_BAL_PERR 0x00000080 -#define __PSS_DIP_IF_ERR 0x00000040 -#define __PSS_IOH_IF_ERR 0x00000020 -#define __PSS_TDS_IF_ERR 0x00000010 -#define __PSS_RDS_IF_ERR 0x00000008 -#define __PSS_SGM_IF_ERR 0x00000004 -#define __PSS_LPU1_RAM_ERR 0x00000002 -#define __PSS_LPU0_RAM_ERR 0x00000001 -#define ERR_SET_REG 0x00018818 -#define __PSS_ERR_STATUS_SET 0x00000fff - -/* - * These definitions are either in error/missing in spec. Its auto-generated - * from hard coded values in regparse.pl. - */ -#define __EMPHPOST_AT_4G_MK_FIX 0x0000001c -#define __EMPHPOST_AT_4G_SH_FIX 0x00000002 -#define __EMPHPRE_AT_4G_FIX 0x00000003 -#define __SFP_TXRATE_EN_FIX 0x00000100 -#define __SFP_RXRATE_EN_FIX 0x00000080 - - -/* - * These register definitions are auto-generated from hard coded values - * in regparse.pl. - */ -#define HOSTFN0_LPU_MBOX0_0 0x00019200 -#define HOSTFN1_LPU_MBOX0_8 0x00019260 -#define LPU_HOSTFN0_MBOX0_0 0x00019280 -#define LPU_HOSTFN1_MBOX0_8 0x000192e0 - - -/* - * These register mapping definitions are auto-generated from mapping tables - * in regparse.pl. - */ -#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG -#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG -#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG -#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG -#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG - -#define CPE_Q_DEPTH(__n) \ - (CPE_Q0_DEPTH + (__n) * (CPE_Q1_DEPTH - CPE_Q0_DEPTH)) -#define CPE_Q_PI(__n) \ - (CPE_Q0_PI + (__n) * (CPE_Q1_PI - CPE_Q0_PI)) -#define CPE_Q_CI(__n) \ - (CPE_Q0_CI + (__n) * (CPE_Q1_CI - CPE_Q0_CI)) -#define RME_Q_DEPTH(__n) \ - (RME_Q0_DEPTH + (__n) * (RME_Q1_DEPTH - RME_Q0_DEPTH)) -#define RME_Q_PI(__n) \ - (RME_Q0_PI + (__n) * (RME_Q1_PI - RME_Q0_PI)) -#define RME_Q_CI(__n) \ - (RME_Q0_CI + (__n) * (RME_Q1_CI - RME_Q0_CI)) - -#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) -#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) -#define CPE_Q_MASK(__q) ((__q) & 0x3) -#define RME_Q_MASK(__q) ((__q) & 0x3) - - -/* - * PCI MSI-X vector defines - */ -enum { - BFA_MSIX_CPE_Q0 = 0, - BFA_MSIX_CPE_Q1 = 1, - BFA_MSIX_CPE_Q2 = 2, - BFA_MSIX_CPE_Q3 = 3, - BFA_MSIX_CPE_Q4 = 4, - BFA_MSIX_CPE_Q5 = 5, - BFA_MSIX_CPE_Q6 = 6, - BFA_MSIX_CPE_Q7 = 7, - BFA_MSIX_RME_Q0 = 8, - BFA_MSIX_RME_Q1 = 9, - BFA_MSIX_RME_Q2 = 10, - BFA_MSIX_RME_Q3 = 11, - BFA_MSIX_RME_Q4 = 12, - BFA_MSIX_RME_Q5 = 13, - BFA_MSIX_RME_Q6 = 14, - BFA_MSIX_RME_Q7 = 15, - BFA_MSIX_ERR_EMC = 16, - BFA_MSIX_ERR_LPU0 = 17, - BFA_MSIX_ERR_LPU1 = 18, - BFA_MSIX_ERR_PSS = 19, - BFA_MSIX_MBOX_LPU0 = 20, - BFA_MSIX_MBOX_LPU1 = 21, - BFA_MSIX_CB_MAX = 22, -}; - -/* - * And corresponding host interrupt status bit field defines - */ -#define __HFN_INT_CPE_Q0 0x00000001U -#define __HFN_INT_CPE_Q1 0x00000002U -#define __HFN_INT_CPE_Q2 0x00000004U -#define __HFN_INT_CPE_Q3 0x00000008U -#define __HFN_INT_CPE_Q4 0x00000010U -#define __HFN_INT_CPE_Q5 0x00000020U -#define __HFN_INT_CPE_Q6 0x00000040U -#define __HFN_INT_CPE_Q7 0x00000080U -#define __HFN_INT_RME_Q0 0x00000100U -#define __HFN_INT_RME_Q1 0x00000200U -#define __HFN_INT_RME_Q2 0x00000400U -#define __HFN_INT_RME_Q3 0x00000800U -#define __HFN_INT_RME_Q4 0x00001000U -#define __HFN_INT_RME_Q5 0x00002000U -#define __HFN_INT_RME_Q6 0x00004000U -#define __HFN_INT_RME_Q7 0x00008000U -#define __HFN_INT_ERR_EMC 0x00010000U -#define __HFN_INT_ERR_LPU0 0x00020000U -#define __HFN_INT_ERR_LPU1 0x00040000U -#define __HFN_INT_ERR_PSS 0x00080000U -#define __HFN_INT_MBOX_LPU0 0x00100000U -#define __HFN_INT_MBOX_LPU1 0x00200000U -#define __HFN_INT_MBOX1_LPU0 0x00400000U -#define __HFN_INT_MBOX1_LPU1 0x00800000U -#define __HFN_INT_CPE_MASK 0x000000ffU -#define __HFN_INT_RME_MASK 0x0000ff00U - - -/* - * crossbow memory map. - */ -#define PSS_SMEM_PAGE_START 0x8000 -#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15)) -#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff) - -/* - * End of crossbow memory map - */ - - -#endif /* __BFI_CBREG_H__ */ - diff --git a/drivers/scsi/bfa/include/bfi/bfi_cee.h b/drivers/scsi/bfa/include/bfi/bfi_cee.h deleted file mode 100644 index 0970596583ea..000000000000 --- a/drivers/scsi/bfa/include/bfi/bfi_cee.h +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -/** - * Copyright (c) 2006-2009 Brocade Communications Systems, Inc. - * All rights reserved. - * - * bfi_dcbx.h BFI Interface (Mailbox commands and related structures) - * between host driver and DCBX/LLDP firmware module. - * -**/ - -#ifndef __BFI_CEE_H__ -#define __BFI_CEE_H__ - -#include - -#pragma pack(1) - - -enum bfi_cee_h2i_msgs_e { - BFI_CEE_H2I_GET_CFG_REQ = 1, - BFI_CEE_H2I_RESET_STATS = 2, - BFI_CEE_H2I_GET_STATS_REQ = 3, -}; - - -enum bfi_cee_i2h_msgs_e { - BFI_CEE_I2H_GET_CFG_RSP = BFA_I2HM(1), - BFI_CEE_I2H_RESET_STATS_RSP = BFA_I2HM(2), - BFI_CEE_I2H_GET_STATS_RSP = BFA_I2HM(3), -}; - - -/* Data structures */ - -/* - * BFI_CEE_H2I_RESET_STATS - */ -struct bfi_lldp_reset_stats_s { - struct bfi_mhdr_s mh; -}; - -/* - * BFI_CEE_H2I_RESET_STATS - */ -struct bfi_cee_reset_stats_s { - struct bfi_mhdr_s mh; -}; - -/* - * BFI_CEE_H2I_GET_CFG_REQ - */ -struct bfi_cee_get_req_s { - struct bfi_mhdr_s mh; - union bfi_addr_u dma_addr; -}; - - -/* - * BFI_CEE_I2H_GET_CFG_RSP - */ -struct bfi_cee_get_rsp_s { - struct bfi_mhdr_s mh; - u8 cmd_status; - u8 rsvd[3]; -}; - -/* - * BFI_CEE_H2I_GET_STATS_REQ - */ -struct bfi_cee_stats_req_s { - struct bfi_mhdr_s mh; - union bfi_addr_u dma_addr; -}; - - -/* - * BFI_CEE_I2H_GET_STATS_RSP - */ -struct bfi_cee_stats_rsp_s { - struct bfi_mhdr_s mh; - u8 cmd_status; - u8 rsvd[3]; -}; - - - -union bfi_cee_h2i_msg_u { - struct bfi_mhdr_s mh; - struct bfi_cee_get_req_s get_req; - struct bfi_cee_stats_req_s stats_req; -}; - - -union bfi_cee_i2h_msg_u { - struct bfi_mhdr_s mh; - struct bfi_cee_get_rsp_s get_rsp; - struct bfi_cee_stats_rsp_s stats_rsp; -}; - -#pragma pack() - - -#endif /* __BFI_CEE_H__ */ - diff --git a/drivers/scsi/bfa/include/bfi/bfi_ctreg.h b/drivers/scsi/bfa/include/bfi/bfi_ctreg.h deleted file mode 100644 index c0ef5a93b797..000000000000 --- a/drivers/scsi/bfa/include/bfi/bfi_ctreg.h +++ /dev/null @@ -1,640 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/* - * bfi_ctreg.h catapult host block register definitions - * - * !!! Do not edit. Auto generated. !!! - */ - -#ifndef __BFI_CTREG_H__ -#define __BFI_CTREG_H__ - - -#define HOSTFN0_LPU_MBOX0_0 0x00019200 -#define HOSTFN1_LPU_MBOX0_8 0x00019260 -#define LPU_HOSTFN0_MBOX0_0 0x00019280 -#define LPU_HOSTFN1_MBOX0_8 0x000192e0 -#define HOSTFN2_LPU_MBOX0_0 0x00019400 -#define HOSTFN3_LPU_MBOX0_8 0x00019460 -#define LPU_HOSTFN2_MBOX0_0 0x00019480 -#define LPU_HOSTFN3_MBOX0_8 0x000194e0 -#define HOSTFN0_INT_STATUS 0x00014000 -#define __HOSTFN0_HALT_OCCURRED 0x01000000 -#define __HOSTFN0_INT_STATUS_LVL_MK 0x00f00000 -#define __HOSTFN0_INT_STATUS_LVL_SH 20 -#define __HOSTFN0_INT_STATUS_LVL(_v) ((_v) << __HOSTFN0_INT_STATUS_LVL_SH) -#define __HOSTFN0_INT_STATUS_P_MK 0x000f0000 -#define __HOSTFN0_INT_STATUS_P_SH 16 -#define __HOSTFN0_INT_STATUS_P(_v) ((_v) << __HOSTFN0_INT_STATUS_P_SH) -#define __HOSTFN0_INT_STATUS_F 0x0000ffff -#define HOSTFN0_INT_MSK 0x00014004 -#define HOST_PAGE_NUM_FN0 0x00014008 -#define __HOST_PAGE_NUM_FN 0x000001ff -#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c -#define __MSIX_ERR_INDEX_FN 0x000001ff -#define HOSTFN1_INT_STATUS 0x00014100 -#define __HOSTFN1_HALT_OCCURRED 0x01000000 -#define __HOSTFN1_INT_STATUS_LVL_MK 0x00f00000 -#define __HOSTFN1_INT_STATUS_LVL_SH 20 -#define __HOSTFN1_INT_STATUS_LVL(_v) ((_v) << __HOSTFN1_INT_STATUS_LVL_SH) -#define __HOSTFN1_INT_STATUS_P_MK 0x000f0000 -#define __HOSTFN1_INT_STATUS_P_SH 16 -#define __HOSTFN1_INT_STATUS_P(_v) ((_v) << __HOSTFN1_INT_STATUS_P_SH) -#define __HOSTFN1_INT_STATUS_F 0x0000ffff -#define HOSTFN1_INT_MSK 0x00014104 -#define HOST_PAGE_NUM_FN1 0x00014108 -#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c -#define APP_PLL_425_CTL_REG 0x00014204 -#define __P_425_PLL_LOCK 0x80000000 -#define __APP_PLL_425_SRAM_USE_100MHZ 0x00100000 -#define __APP_PLL_425_RESET_TIMER_MK 0x000e0000 -#define __APP_PLL_425_RESET_TIMER_SH 17 -#define __APP_PLL_425_RESET_TIMER(_v) ((_v) << __APP_PLL_425_RESET_TIMER_SH) -#define __APP_PLL_425_LOGIC_SOFT_RESET 0x00010000 -#define __APP_PLL_425_CNTLMT0_1_MK 0x0000c000 -#define __APP_PLL_425_CNTLMT0_1_SH 14 -#define __APP_PLL_425_CNTLMT0_1(_v) ((_v) << __APP_PLL_425_CNTLMT0_1_SH) -#define __APP_PLL_425_JITLMT0_1_MK 0x00003000 -#define __APP_PLL_425_JITLMT0_1_SH 12 -#define __APP_PLL_425_JITLMT0_1(_v) ((_v) << __APP_PLL_425_JITLMT0_1_SH) -#define __APP_PLL_425_HREF 0x00000800 -#define __APP_PLL_425_HDIV 0x00000400 -#define __APP_PLL_425_P0_1_MK 0x00000300 -#define __APP_PLL_425_P0_1_SH 8 -#define __APP_PLL_425_P0_1(_v) ((_v) << __APP_PLL_425_P0_1_SH) -#define __APP_PLL_425_Z0_2_MK 0x000000e0 -#define __APP_PLL_425_Z0_2_SH 5 -#define __APP_PLL_425_Z0_2(_v) ((_v) << __APP_PLL_425_Z0_2_SH) -#define __APP_PLL_425_RSEL200500 0x00000010 -#define __APP_PLL_425_ENARST 0x00000008 -#define __APP_PLL_425_BYPASS 0x00000004 -#define __APP_PLL_425_LRESETN 0x00000002 -#define __APP_PLL_425_ENABLE 0x00000001 -#define APP_PLL_312_CTL_REG 0x00014208 -#define __P_312_PLL_LOCK 0x80000000 -#define __ENABLE_MAC_AHB_1 0x00800000 -#define __ENABLE_MAC_AHB_0 0x00400000 -#define __ENABLE_MAC_1 0x00200000 -#define __ENABLE_MAC_0 0x00100000 -#define __APP_PLL_312_RESET_TIMER_MK 0x000e0000 -#define __APP_PLL_312_RESET_TIMER_SH 17 -#define __APP_PLL_312_RESET_TIMER(_v) ((_v) << __APP_PLL_312_RESET_TIMER_SH) -#define __APP_PLL_312_LOGIC_SOFT_RESET 0x00010000 -#define __APP_PLL_312_CNTLMT0_1_MK 0x0000c000 -#define __APP_PLL_312_CNTLMT0_1_SH 14 -#define __APP_PLL_312_CNTLMT0_1(_v) ((_v) << __APP_PLL_312_CNTLMT0_1_SH) -#define __APP_PLL_312_JITLMT0_1_MK 0x00003000 -#define __APP_PLL_312_JITLMT0_1_SH 12 -#define __APP_PLL_312_JITLMT0_1(_v) ((_v) << __APP_PLL_312_JITLMT0_1_SH) -#define __APP_PLL_312_HREF 0x00000800 -#define __APP_PLL_312_HDIV 0x00000400 -#define __APP_PLL_312_P0_1_MK 0x00000300 -#define __APP_PLL_312_P0_1_SH 8 -#define __APP_PLL_312_P0_1(_v) ((_v) << __APP_PLL_312_P0_1_SH) -#define __APP_PLL_312_Z0_2_MK 0x000000e0 -#define __APP_PLL_312_Z0_2_SH 5 -#define __APP_PLL_312_Z0_2(_v) ((_v) << __APP_PLL_312_Z0_2_SH) -#define __APP_PLL_312_RSEL200500 0x00000010 -#define __APP_PLL_312_ENARST 0x00000008 -#define __APP_PLL_312_BYPASS 0x00000004 -#define __APP_PLL_312_LRESETN 0x00000002 -#define __APP_PLL_312_ENABLE 0x00000001 -#define MBIST_CTL_REG 0x00014220 -#define __EDRAM_BISTR_START 0x00000004 -#define __MBIST_RESET 0x00000002 -#define __MBIST_START 0x00000001 -#define MBIST_STAT_REG 0x00014224 -#define __EDRAM_BISTR_STATUS 0x00000008 -#define __EDRAM_BISTR_DONE 0x00000004 -#define __MEM_BIT_STATUS 0x00000002 -#define __MBIST_DONE 0x00000001 -#define HOST_SEM0_REG 0x00014230 -#define __HOST_SEMAPHORE 0x00000001 -#define HOST_SEM1_REG 0x00014234 -#define HOST_SEM2_REG 0x00014238 -#define HOST_SEM3_REG 0x0001423c -#define HOST_SEM0_INFO_REG 0x00014240 -#define HOST_SEM1_INFO_REG 0x00014244 -#define HOST_SEM2_INFO_REG 0x00014248 -#define HOST_SEM3_INFO_REG 0x0001424c -#define ETH_MAC_SER_REG 0x00014288 -#define __APP_EMS_CKBUFAMPIN 0x00000020 -#define __APP_EMS_REFCLKSEL 0x00000010 -#define __APP_EMS_CMLCKSEL 0x00000008 -#define __APP_EMS_REFCKBUFEN2 0x00000004 -#define __APP_EMS_REFCKBUFEN1 0x00000002 -#define __APP_EMS_CHANNEL_SEL 0x00000001 -#define HOSTFN2_INT_STATUS 0x00014300 -#define __HOSTFN2_HALT_OCCURRED 0x01000000 -#define __HOSTFN2_INT_STATUS_LVL_MK 0x00f00000 -#define __HOSTFN2_INT_STATUS_LVL_SH 20 -#define __HOSTFN2_INT_STATUS_LVL(_v) ((_v) << __HOSTFN2_INT_STATUS_LVL_SH) -#define __HOSTFN2_INT_STATUS_P_MK 0x000f0000 -#define __HOSTFN2_INT_STATUS_P_SH 16 -#define __HOSTFN2_INT_STATUS_P(_v) ((_v) << __HOSTFN2_INT_STATUS_P_SH) -#define __HOSTFN2_INT_STATUS_F 0x0000ffff -#define HOSTFN2_INT_MSK 0x00014304 -#define HOST_PAGE_NUM_FN2 0x00014308 -#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c -#define HOSTFN3_INT_STATUS 0x00014400 -#define __HALT_OCCURRED 0x01000000 -#define __HOSTFN3_INT_STATUS_LVL_MK 0x00f00000 -#define __HOSTFN3_INT_STATUS_LVL_SH 20 -#define __HOSTFN3_INT_STATUS_LVL(_v) ((_v) << __HOSTFN3_INT_STATUS_LVL_SH) -#define __HOSTFN3_INT_STATUS_P_MK 0x000f0000 -#define __HOSTFN3_INT_STATUS_P_SH 16 -#define __HOSTFN3_INT_STATUS_P(_v) ((_v) << __HOSTFN3_INT_STATUS_P_SH) -#define __HOSTFN3_INT_STATUS_F 0x0000ffff -#define HOSTFN3_INT_MSK 0x00014404 -#define HOST_PAGE_NUM_FN3 0x00014408 -#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c -#define FNC_ID_REG 0x00014600 -#define __FUNCTION_NUMBER 0x00000007 -#define FNC_PERS_REG 0x00014604 -#define __F3_FUNCTION_ACTIVE 0x80000000 -#define __F3_FUNCTION_MODE 0x40000000 -#define __F3_PORT_MAP_MK 0x30000000 -#define __F3_PORT_MAP_SH 28 -#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH) -#define __F3_VM_MODE 0x08000000 -#define __F3_INTX_STATUS_MK 0x07000000 -#define __F3_INTX_STATUS_SH 24 -#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH) -#define __F2_FUNCTION_ACTIVE 0x00800000 -#define __F2_FUNCTION_MODE 0x00400000 -#define __F2_PORT_MAP_MK 0x00300000 -#define __F2_PORT_MAP_SH 20 -#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH) -#define __F2_VM_MODE 0x00080000 -#define __F2_INTX_STATUS_MK 0x00070000 -#define __F2_INTX_STATUS_SH 16 -#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH) -#define __F1_FUNCTION_ACTIVE 0x00008000 -#define __F1_FUNCTION_MODE 0x00004000 -#define __F1_PORT_MAP_MK 0x00003000 -#define __F1_PORT_MAP_SH 12 -#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH) -#define __F1_VM_MODE 0x00000800 -#define __F1_INTX_STATUS_MK 0x00000700 -#define __F1_INTX_STATUS_SH 8 -#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH) -#define __F0_FUNCTION_ACTIVE 0x00000080 -#define __F0_FUNCTION_MODE 0x00000040 -#define __F0_PORT_MAP_MK 0x00000030 -#define __F0_PORT_MAP_SH 4 -#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH) -#define __F0_VM_MODE 0x00000008 -#define __F0_INTX_STATUS 0x00000007 -enum { - __F0_INTX_STATUS_MSIX = 0x0, - __F0_INTX_STATUS_INTA = 0x1, - __F0_INTX_STATUS_INTB = 0x2, - __F0_INTX_STATUS_INTC = 0x3, - __F0_INTX_STATUS_INTD = 0x4, -}; -#define OP_MODE 0x0001460c -#define __APP_ETH_CLK_LOWSPEED 0x00000004 -#define __GLOBAL_CORECLK_HALFSPEED 0x00000002 -#define __GLOBAL_FCOE_MODE 0x00000001 -#define HOST_SEM4_REG 0x00014610 -#define HOST_SEM5_REG 0x00014614 -#define HOST_SEM6_REG 0x00014618 -#define HOST_SEM7_REG 0x0001461c -#define HOST_SEM4_INFO_REG 0x00014620 -#define HOST_SEM5_INFO_REG 0x00014624 -#define HOST_SEM6_INFO_REG 0x00014628 -#define HOST_SEM7_INFO_REG 0x0001462c -#define HOSTFN0_LPU0_MBOX0_CMD_STAT 0x00019000 -#define __HOSTFN0_LPU0_MBOX0_INFO_MK 0xfffffffe -#define __HOSTFN0_LPU0_MBOX0_INFO_SH 1 -#define __HOSTFN0_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU0_MBOX0_INFO_SH) -#define __HOSTFN0_LPU0_MBOX0_CMD_STATUS 0x00000001 -#define HOSTFN0_LPU1_MBOX0_CMD_STAT 0x00019004 -#define __HOSTFN0_LPU1_MBOX0_INFO_MK 0xfffffffe -#define __HOSTFN0_LPU1_MBOX0_INFO_SH 1 -#define __HOSTFN0_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU1_MBOX0_INFO_SH) -#define __HOSTFN0_LPU1_MBOX0_CMD_STATUS 0x00000001 -#define LPU0_HOSTFN0_MBOX0_CMD_STAT 0x00019008 -#define __LPU0_HOSTFN0_MBOX0_INFO_MK 0xfffffffe -#define __LPU0_HOSTFN0_MBOX0_INFO_SH 1 -#define __LPU0_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN0_MBOX0_INFO_SH) -#define __LPU0_HOSTFN0_MBOX0_CMD_STATUS 0x00000001 -#define LPU1_HOSTFN0_MBOX0_CMD_STAT 0x0001900c -#define __LPU1_HOSTFN0_MBOX0_INFO_MK 0xfffffffe -#define __LPU1_HOSTFN0_MBOX0_INFO_SH 1 -#define __LPU1_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN0_MBOX0_INFO_SH) -#define __LPU1_HOSTFN0_MBOX0_CMD_STATUS 0x00000001 -#define HOSTFN1_LPU0_MBOX0_CMD_STAT 0x00019010 -#define __HOSTFN1_LPU0_MBOX0_INFO_MK 0xfffffffe -#define __HOSTFN1_LPU0_MBOX0_INFO_SH 1 -#define __HOSTFN1_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU0_MBOX0_INFO_SH) -#define __HOSTFN1_LPU0_MBOX0_CMD_STATUS 0x00000001 -#define HOSTFN1_LPU1_MBOX0_CMD_STAT 0x00019014 -#define __HOSTFN1_LPU1_MBOX0_INFO_MK 0xfffffffe -#define __HOSTFN1_LPU1_MBOX0_INFO_SH 1 -#define __HOSTFN1_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU1_MBOX0_INFO_SH) -#define __HOSTFN1_LPU1_MBOX0_CMD_STATUS 0x00000001 -#define LPU0_HOSTFN1_MBOX0_CMD_STAT 0x00019018 -#define __LPU0_HOSTFN1_MBOX0_INFO_MK 0xfffffffe -#define __LPU0_HOSTFN1_MBOX0_INFO_SH 1 -#define __LPU0_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN1_MBOX0_INFO_SH) -#define __LPU0_HOSTFN1_MBOX0_CMD_STATUS 0x00000001 -#define LPU1_HOSTFN1_MBOX0_CMD_STAT 0x0001901c -#define __LPU1_HOSTFN1_MBOX0_INFO_MK 0xfffffffe -#define __LPU1_HOSTFN1_MBOX0_INFO_SH 1 -#define __LPU1_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN1_MBOX0_INFO_SH) -#define __LPU1_HOSTFN1_MBOX0_CMD_STATUS 0x00000001 -#define HOSTFN2_LPU0_MBOX0_CMD_STAT 0x00019150 -#define __HOSTFN2_LPU0_MBOX0_INFO_MK 0xfffffffe -#define __HOSTFN2_LPU0_MBOX0_INFO_SH 1 -#define __HOSTFN2_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU0_MBOX0_INFO_SH) -#define __HOSTFN2_LPU0_MBOX0_CMD_STATUS 0x00000001 -#define HOSTFN2_LPU1_MBOX0_CMD_STAT 0x00019154 -#define __HOSTFN2_LPU1_MBOX0_INFO_MK 0xfffffffe -#define __HOSTFN2_LPU1_MBOX0_INFO_SH 1 -#define __HOSTFN2_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU1_MBOX0_INFO_SH) -#define __HOSTFN2_LPU1_MBOX0BOX0_CMD_STATUS 0x00000001 -#define LPU0_HOSTFN2_MBOX0_CMD_STAT 0x00019158 -#define __LPU0_HOSTFN2_MBOX0_INFO_MK 0xfffffffe -#define __LPU0_HOSTFN2_MBOX0_INFO_SH 1 -#define __LPU0_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN2_MBOX0_INFO_SH) -#define __LPU0_HOSTFN2_MBOX0_CMD_STATUS 0x00000001 -#define LPU1_HOSTFN2_MBOX0_CMD_STAT 0x0001915c -#define __LPU1_HOSTFN2_MBOX0_INFO_MK 0xfffffffe -#define __LPU1_HOSTFN2_MBOX0_INFO_SH 1 -#define __LPU1_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN2_MBOX0_INFO_SH) -#define __LPU1_HOSTFN2_MBOX0_CMD_STATUS 0x00000001 -#define HOSTFN3_LPU0_MBOX0_CMD_STAT 0x00019160 -#define __HOSTFN3_LPU0_MBOX0_INFO_MK 0xfffffffe -#define __HOSTFN3_LPU0_MBOX0_INFO_SH 1 -#define __HOSTFN3_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU0_MBOX0_INFO_SH) -#define __HOSTFN3_LPU0_MBOX0_CMD_STATUS 0x00000001 -#define HOSTFN3_LPU1_MBOX0_CMD_STAT 0x00019164 -#define __HOSTFN3_LPU1_MBOX0_INFO_MK 0xfffffffe -#define __HOSTFN3_LPU1_MBOX0_INFO_SH 1 -#define __HOSTFN3_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU1_MBOX0_INFO_SH) -#define __HOSTFN3_LPU1_MBOX0_CMD_STATUS 0x00000001 -#define LPU0_HOSTFN3_MBOX0_CMD_STAT 0x00019168 -#define __LPU0_HOSTFN3_MBOX0_INFO_MK 0xfffffffe -#define __LPU0_HOSTFN3_MBOX0_INFO_SH 1 -#define __LPU0_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN3_MBOX0_INFO_SH) -#define __LPU0_HOSTFN3_MBOX0_CMD_STATUS 0x00000001 -#define LPU1_HOSTFN3_MBOX0_CMD_STAT 0x0001916c -#define __LPU1_HOSTFN3_MBOX0_INFO_MK 0xfffffffe -#define __LPU1_HOSTFN3_MBOX0_INFO_SH 1 -#define __LPU1_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN3_MBOX0_INFO_SH) -#define __LPU1_HOSTFN3_MBOX0_CMD_STATUS 0x00000001 -#define FW_INIT_HALT_P0 0x000191ac -#define __FW_INIT_HALT_P 0x00000001 -#define FW_INIT_HALT_P1 0x000191bc -#define CPE_PI_PTR_Q0 0x00038000 -#define __CPE_PI_UNUSED_MK 0xffff0000 -#define __CPE_PI_UNUSED_SH 16 -#define __CPE_PI_UNUSED(_v) ((_v) << __CPE_PI_UNUSED_SH) -#define __CPE_PI_PTR 0x0000ffff -#define CPE_PI_PTR_Q1 0x00038040 -#define CPE_CI_PTR_Q0 0x00038004 -#define __CPE_CI_UNUSED_MK 0xffff0000 -#define __CPE_CI_UNUSED_SH 16 -#define __CPE_CI_UNUSED(_v) ((_v) << __CPE_CI_UNUSED_SH) -#define __CPE_CI_PTR 0x0000ffff -#define CPE_CI_PTR_Q1 0x00038044 -#define CPE_DEPTH_Q0 0x00038008 -#define __CPE_DEPTH_UNUSED_MK 0xf8000000 -#define __CPE_DEPTH_UNUSED_SH 27 -#define __CPE_DEPTH_UNUSED(_v) ((_v) << __CPE_DEPTH_UNUSED_SH) -#define __CPE_MSIX_VEC_INDEX_MK 0x07ff0000 -#define __CPE_MSIX_VEC_INDEX_SH 16 -#define __CPE_MSIX_VEC_INDEX(_v) ((_v) << __CPE_MSIX_VEC_INDEX_SH) -#define __CPE_DEPTH 0x0000ffff -#define CPE_DEPTH_Q1 0x00038048 -#define CPE_QCTRL_Q0 0x0003800c -#define __CPE_CTRL_UNUSED30_MK 0xfc000000 -#define __CPE_CTRL_UNUSED30_SH 26 -#define __CPE_CTRL_UNUSED30(_v) ((_v) << __CPE_CTRL_UNUSED30_SH) -#define __CPE_FUNC_INT_CTRL_MK 0x03000000 -#define __CPE_FUNC_INT_CTRL_SH 24 -#define __CPE_FUNC_INT_CTRL(_v) ((_v) << __CPE_FUNC_INT_CTRL_SH) -enum { - __CPE_FUNC_INT_CTRL_DISABLE = 0x0, - __CPE_FUNC_INT_CTRL_F2NF = 0x1, - __CPE_FUNC_INT_CTRL_3QUART = 0x2, - __CPE_FUNC_INT_CTRL_HALF = 0x3, -}; -#define __CPE_CTRL_UNUSED20_MK 0x00f00000 -#define __CPE_CTRL_UNUSED20_SH 20 -#define __CPE_CTRL_UNUSED20(_v) ((_v) << __CPE_CTRL_UNUSED20_SH) -#define __CPE_SCI_TH_MK 0x000f0000 -#define __CPE_SCI_TH_SH 16 -#define __CPE_SCI_TH(_v) ((_v) << __CPE_SCI_TH_SH) -#define __CPE_CTRL_UNUSED10_MK 0x0000c000 -#define __CPE_CTRL_UNUSED10_SH 14 -#define __CPE_CTRL_UNUSED10(_v) ((_v) << __CPE_CTRL_UNUSED10_SH) -#define __CPE_ACK_PENDING 0x00002000 -#define __CPE_CTRL_UNUSED40_MK 0x00001c00 -#define __CPE_CTRL_UNUSED40_SH 10 -#define __CPE_CTRL_UNUSED40(_v) ((_v) << __CPE_CTRL_UNUSED40_SH) -#define __CPE_PCIEID_MK 0x00000300 -#define __CPE_PCIEID_SH 8 -#define __CPE_PCIEID(_v) ((_v) << __CPE_PCIEID_SH) -#define __CPE_CTRL_UNUSED00_MK 0x000000fe -#define __CPE_CTRL_UNUSED00_SH 1 -#define __CPE_CTRL_UNUSED00(_v) ((_v) << __CPE_CTRL_UNUSED00_SH) -#define __CPE_ESIZE 0x00000001 -#define CPE_QCTRL_Q1 0x0003804c -#define __CPE_CTRL_UNUSED31_MK 0xfc000000 -#define __CPE_CTRL_UNUSED31_SH 26 -#define __CPE_CTRL_UNUSED31(_v) ((_v) << __CPE_CTRL_UNUSED31_SH) -#define __CPE_CTRL_UNUSED21_MK 0x00f00000 -#define __CPE_CTRL_UNUSED21_SH 20 -#define __CPE_CTRL_UNUSED21(_v) ((_v) << __CPE_CTRL_UNUSED21_SH) -#define __CPE_CTRL_UNUSED11_MK 0x0000c000 -#define __CPE_CTRL_UNUSED11_SH 14 -#define __CPE_CTRL_UNUSED11(_v) ((_v) << __CPE_CTRL_UNUSED11_SH) -#define __CPE_CTRL_UNUSED41_MK 0x00001c00 -#define __CPE_CTRL_UNUSED41_SH 10 -#define __CPE_CTRL_UNUSED41(_v) ((_v) << __CPE_CTRL_UNUSED41_SH) -#define __CPE_CTRL_UNUSED01_MK 0x000000fe -#define __CPE_CTRL_UNUSED01_SH 1 -#define __CPE_CTRL_UNUSED01(_v) ((_v) << __CPE_CTRL_UNUSED01_SH) -#define RME_PI_PTR_Q0 0x00038020 -#define __LATENCY_TIME_STAMP_MK 0xffff0000 -#define __LATENCY_TIME_STAMP_SH 16 -#define __LATENCY_TIME_STAMP(_v) ((_v) << __LATENCY_TIME_STAMP_SH) -#define __RME_PI_PTR 0x0000ffff -#define RME_PI_PTR_Q1 0x00038060 -#define RME_CI_PTR_Q0 0x00038024 -#define __DELAY_TIME_STAMP_MK 0xffff0000 -#define __DELAY_TIME_STAMP_SH 16 -#define __DELAY_TIME_STAMP(_v) ((_v) << __DELAY_TIME_STAMP_SH) -#define __RME_CI_PTR 0x0000ffff -#define RME_CI_PTR_Q1 0x00038064 -#define RME_DEPTH_Q0 0x00038028 -#define __RME_DEPTH_UNUSED_MK 0xf8000000 -#define __RME_DEPTH_UNUSED_SH 27 -#define __RME_DEPTH_UNUSED(_v) ((_v) << __RME_DEPTH_UNUSED_SH) -#define __RME_MSIX_VEC_INDEX_MK 0x07ff0000 -#define __RME_MSIX_VEC_INDEX_SH 16 -#define __RME_MSIX_VEC_INDEX(_v) ((_v) << __RME_MSIX_VEC_INDEX_SH) -#define __RME_DEPTH 0x0000ffff -#define RME_DEPTH_Q1 0x00038068 -#define RME_QCTRL_Q0 0x0003802c -#define __RME_INT_LATENCY_TIMER_MK 0xff000000 -#define __RME_INT_LATENCY_TIMER_SH 24 -#define __RME_INT_LATENCY_TIMER(_v) ((_v) << __RME_INT_LATENCY_TIMER_SH) -#define __RME_INT_DELAY_TIMER_MK 0x00ff0000 -#define __RME_INT_DELAY_TIMER_SH 16 -#define __RME_INT_DELAY_TIMER(_v) ((_v) << __RME_INT_DELAY_TIMER_SH) -#define __RME_INT_DELAY_DISABLE 0x00008000 -#define __RME_DLY_DELAY_DISABLE 0x00004000 -#define __RME_ACK_PENDING 0x00002000 -#define __RME_FULL_INTERRUPT_DISABLE 0x00001000 -#define __RME_CTRL_UNUSED10_MK 0x00000c00 -#define __RME_CTRL_UNUSED10_SH 10 -#define __RME_CTRL_UNUSED10(_v) ((_v) << __RME_CTRL_UNUSED10_SH) -#define __RME_PCIEID_MK 0x00000300 -#define __RME_PCIEID_SH 8 -#define __RME_PCIEID(_v) ((_v) << __RME_PCIEID_SH) -#define __RME_CTRL_UNUSED00_MK 0x000000fe -#define __RME_CTRL_UNUSED00_SH 1 -#define __RME_CTRL_UNUSED00(_v) ((_v) << __RME_CTRL_UNUSED00_SH) -#define __RME_ESIZE 0x00000001 -#define RME_QCTRL_Q1 0x0003806c -#define __RME_CTRL_UNUSED11_MK 0x00000c00 -#define __RME_CTRL_UNUSED11_SH 10 -#define __RME_CTRL_UNUSED11(_v) ((_v) << __RME_CTRL_UNUSED11_SH) -#define __RME_CTRL_UNUSED01_MK 0x000000fe -#define __RME_CTRL_UNUSED01_SH 1 -#define __RME_CTRL_UNUSED01(_v) ((_v) << __RME_CTRL_UNUSED01_SH) -#define PSS_CTL_REG 0x00018800 -#define __PSS_I2C_CLK_DIV_MK 0x007f0000 -#define __PSS_I2C_CLK_DIV_SH 16 -#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH) -#define __PSS_LMEM_INIT_DONE 0x00001000 -#define __PSS_LMEM_RESET 0x00000200 -#define __PSS_LMEM_INIT_EN 0x00000100 -#define __PSS_LPU1_RESET 0x00000002 -#define __PSS_LPU0_RESET 0x00000001 -#define PSS_ERR_STATUS_REG 0x00018810 -#define __PSS_LPU1_TCM_READ_ERR 0x00200000 -#define __PSS_LPU0_TCM_READ_ERR 0x00100000 -#define __PSS_LMEM5_CORR_ERR 0x00080000 -#define __PSS_LMEM4_CORR_ERR 0x00040000 -#define __PSS_LMEM3_CORR_ERR 0x00020000 -#define __PSS_LMEM2_CORR_ERR 0x00010000 -#define __PSS_LMEM1_CORR_ERR 0x00008000 -#define __PSS_LMEM0_CORR_ERR 0x00004000 -#define __PSS_LMEM5_UNCORR_ERR 0x00002000 -#define __PSS_LMEM4_UNCORR_ERR 0x00001000 -#define __PSS_LMEM3_UNCORR_ERR 0x00000800 -#define __PSS_LMEM2_UNCORR_ERR 0x00000400 -#define __PSS_LMEM1_UNCORR_ERR 0x00000200 -#define __PSS_LMEM0_UNCORR_ERR 0x00000100 -#define __PSS_BAL_PERR 0x00000080 -#define __PSS_DIP_IF_ERR 0x00000040 -#define __PSS_IOH_IF_ERR 0x00000020 -#define __PSS_TDS_IF_ERR 0x00000010 -#define __PSS_RDS_IF_ERR 0x00000008 -#define __PSS_SGM_IF_ERR 0x00000004 -#define __PSS_LPU1_RAM_ERR 0x00000002 -#define __PSS_LPU0_RAM_ERR 0x00000001 -#define ERR_SET_REG 0x00018818 -#define __PSS_ERR_STATUS_SET 0x003fffff -#define PMM_1T_RESET_REG_P0 0x0002381c -#define __PMM_1T_RESET_P 0x00000001 -#define PMM_1T_RESET_REG_P1 0x00023c1c -#define HQM_QSET0_RXQ_DRBL_P0 0x00038000 -#define __RXQ0_ADD_VECTORS_P 0x80000000 -#define __RXQ0_STOP_P 0x40000000 -#define __RXQ0_PRD_PTR_P 0x0000ffff -#define HQM_QSET1_RXQ_DRBL_P0 0x00038080 -#define __RXQ1_ADD_VECTORS_P 0x80000000 -#define __RXQ1_STOP_P 0x40000000 -#define __RXQ1_PRD_PTR_P 0x0000ffff -#define HQM_QSET0_RXQ_DRBL_P1 0x0003c000 -#define HQM_QSET1_RXQ_DRBL_P1 0x0003c080 -#define HQM_QSET0_TXQ_DRBL_P0 0x00038020 -#define __TXQ0_ADD_VECTORS_P 0x80000000 -#define __TXQ0_STOP_P 0x40000000 -#define __TXQ0_PRD_PTR_P 0x0000ffff -#define HQM_QSET1_TXQ_DRBL_P0 0x000380a0 -#define __TXQ1_ADD_VECTORS_P 0x80000000 -#define __TXQ1_STOP_P 0x40000000 -#define __TXQ1_PRD_PTR_P 0x0000ffff -#define HQM_QSET0_TXQ_DRBL_P1 0x0003c020 -#define HQM_QSET1_TXQ_DRBL_P1 0x0003c0a0 -#define HQM_QSET0_IB_DRBL_1_P0 0x00038040 -#define __IB1_0_ACK_P 0x80000000 -#define __IB1_0_DISABLE_P 0x40000000 -#define __IB1_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff -#define HQM_QSET1_IB_DRBL_1_P0 0x000380c0 -#define __IB1_1_ACK_P 0x80000000 -#define __IB1_1_DISABLE_P 0x40000000 -#define __IB1_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff -#define HQM_QSET0_IB_DRBL_1_P1 0x0003c040 -#define HQM_QSET1_IB_DRBL_1_P1 0x0003c0c0 -#define HQM_QSET0_IB_DRBL_2_P0 0x00038060 -#define __IB2_0_ACK_P 0x80000000 -#define __IB2_0_DISABLE_P 0x40000000 -#define __IB2_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff -#define HQM_QSET1_IB_DRBL_2_P0 0x000380e0 -#define __IB2_1_ACK_P 0x80000000 -#define __IB2_1_DISABLE_P 0x40000000 -#define __IB2_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff -#define HQM_QSET0_IB_DRBL_2_P1 0x0003c060 -#define HQM_QSET1_IB_DRBL_2_P1 0x0003c0e0 - - -/* - * These definitions are either in error/missing in spec. Its auto-generated - * from hard coded values in regparse.pl. - */ -#define __EMPHPOST_AT_4G_MK_FIX 0x0000001c -#define __EMPHPOST_AT_4G_SH_FIX 0x00000002 -#define __EMPHPRE_AT_4G_FIX 0x00000003 -#define __SFP_TXRATE_EN_FIX 0x00000100 -#define __SFP_RXRATE_EN_FIX 0x00000080 - - -/* - * These register definitions are auto-generated from hard coded values - * in regparse.pl. - */ - - -/* - * These register mapping definitions are auto-generated from mapping tables - * in regparse.pl. - */ -#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG -#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG -#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG -#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG -#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG - -#define CPE_DEPTH_Q(__n) \ - (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0)) -#define CPE_QCTRL_Q(__n) \ - (CPE_QCTRL_Q0 + (__n) * (CPE_QCTRL_Q1 - CPE_QCTRL_Q0)) -#define CPE_PI_PTR_Q(__n) \ - (CPE_PI_PTR_Q0 + (__n) * (CPE_PI_PTR_Q1 - CPE_PI_PTR_Q0)) -#define CPE_CI_PTR_Q(__n) \ - (CPE_CI_PTR_Q0 + (__n) * (CPE_CI_PTR_Q1 - CPE_CI_PTR_Q0)) -#define RME_DEPTH_Q(__n) \ - (RME_DEPTH_Q0 + (__n) * (RME_DEPTH_Q1 - RME_DEPTH_Q0)) -#define RME_QCTRL_Q(__n) \ - (RME_QCTRL_Q0 + (__n) * (RME_QCTRL_Q1 - RME_QCTRL_Q0)) -#define RME_PI_PTR_Q(__n) \ - (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0)) -#define RME_CI_PTR_Q(__n) \ - (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0)) -#define HQM_QSET_RXQ_DRBL_P0(__n) \ - (HQM_QSET0_RXQ_DRBL_P0 + (__n) * (HQM_QSET1_RXQ_DRBL_P0 - \ - HQM_QSET0_RXQ_DRBL_P0)) -#define HQM_QSET_TXQ_DRBL_P0(__n) \ - (HQM_QSET0_TXQ_DRBL_P0 + (__n) * (HQM_QSET1_TXQ_DRBL_P0 - \ - HQM_QSET0_TXQ_DRBL_P0)) -#define HQM_QSET_IB_DRBL_1_P0(__n) \ - (HQM_QSET0_IB_DRBL_1_P0 + (__n) * (HQM_QSET1_IB_DRBL_1_P0 - \ - HQM_QSET0_IB_DRBL_1_P0)) -#define HQM_QSET_IB_DRBL_2_P0(__n) \ - (HQM_QSET0_IB_DRBL_2_P0 + (__n) * (HQM_QSET1_IB_DRBL_2_P0 - \ - HQM_QSET0_IB_DRBL_2_P0)) -#define HQM_QSET_RXQ_DRBL_P1(__n) \ - (HQM_QSET0_RXQ_DRBL_P1 + (__n) * (HQM_QSET1_RXQ_DRBL_P1 - \ - HQM_QSET0_RXQ_DRBL_P1)) -#define HQM_QSET_TXQ_DRBL_P1(__n) \ - (HQM_QSET0_TXQ_DRBL_P1 + (__n) * (HQM_QSET1_TXQ_DRBL_P1 - \ - HQM_QSET0_TXQ_DRBL_P1)) -#define HQM_QSET_IB_DRBL_1_P1(__n) \ - (HQM_QSET0_IB_DRBL_1_P1 + (__n) * (HQM_QSET1_IB_DRBL_1_P1 - \ - HQM_QSET0_IB_DRBL_1_P1)) -#define HQM_QSET_IB_DRBL_2_P1(__n) \ - (HQM_QSET0_IB_DRBL_2_P1 + (__n) * (HQM_QSET1_IB_DRBL_2_P1 - \ - HQM_QSET0_IB_DRBL_2_P1)) - -#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) -#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) -#define CPE_Q_MASK(__q) ((__q) & 0x3) -#define RME_Q_MASK(__q) ((__q) & 0x3) - - -/* - * PCI MSI-X vector defines - */ -enum { - BFA_MSIX_CPE_Q0 = 0, - BFA_MSIX_CPE_Q1 = 1, - BFA_MSIX_CPE_Q2 = 2, - BFA_MSIX_CPE_Q3 = 3, - BFA_MSIX_RME_Q0 = 4, - BFA_MSIX_RME_Q1 = 5, - BFA_MSIX_RME_Q2 = 6, - BFA_MSIX_RME_Q3 = 7, - BFA_MSIX_LPU_ERR = 8, - BFA_MSIX_CT_MAX = 9, -}; - -/* - * And corresponding host interrupt status bit field defines - */ -#define __HFN_INT_CPE_Q0 0x00000001U -#define __HFN_INT_CPE_Q1 0x00000002U -#define __HFN_INT_CPE_Q2 0x00000004U -#define __HFN_INT_CPE_Q3 0x00000008U -#define __HFN_INT_CPE_Q4 0x00000010U -#define __HFN_INT_CPE_Q5 0x00000020U -#define __HFN_INT_CPE_Q6 0x00000040U -#define __HFN_INT_CPE_Q7 0x00000080U -#define __HFN_INT_RME_Q0 0x00000100U -#define __HFN_INT_RME_Q1 0x00000200U -#define __HFN_INT_RME_Q2 0x00000400U -#define __HFN_INT_RME_Q3 0x00000800U -#define __HFN_INT_RME_Q4 0x00001000U -#define __HFN_INT_RME_Q5 0x00002000U -#define __HFN_INT_RME_Q6 0x00004000U -#define __HFN_INT_RME_Q7 0x00008000U -#define __HFN_INT_ERR_EMC 0x00010000U -#define __HFN_INT_ERR_LPU0 0x00020000U -#define __HFN_INT_ERR_LPU1 0x00040000U -#define __HFN_INT_ERR_PSS 0x00080000U -#define __HFN_INT_MBOX_LPU0 0x00100000U -#define __HFN_INT_MBOX_LPU1 0x00200000U -#define __HFN_INT_MBOX1_LPU0 0x00400000U -#define __HFN_INT_MBOX1_LPU1 0x00800000U -#define __HFN_INT_LL_HALT 0x01000000U -#define __HFN_INT_CPE_MASK 0x000000ffU -#define __HFN_INT_RME_MASK 0x0000ff00U - - -/* - * catapult memory map. - */ -#define LL_PGN_HQM0 0x0096 -#define LL_PGN_HQM1 0x0097 -#define PSS_SMEM_PAGE_START 0x8000 -#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15)) -#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff) - -/* - * End of catapult memory map - */ - - -#endif /* __BFI_CTREG_H__ */ - diff --git a/drivers/scsi/bfa/include/bfi/bfi_fabric.h b/drivers/scsi/bfa/include/bfi/bfi_fabric.h deleted file mode 100644 index c0669ed41078..000000000000 --- a/drivers/scsi/bfa/include/bfi/bfi_fabric.h +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFI_FABRIC_H__ -#define __BFI_FABRIC_H__ - -#include - -#pragma pack(1) - -enum bfi_fabric_h2i_msgs { - BFI_FABRIC_H2I_CREATE_REQ = 1, - BFI_FABRIC_H2I_DELETE_REQ = 2, - BFI_FABRIC_H2I_SETAUTH = 3, -}; - -enum bfi_fabric_i2h_msgs { - BFI_FABRIC_I2H_CREATE_RSP = BFA_I2HM(1), - BFI_FABRIC_I2H_DELETE_RSP = BFA_I2HM(2), - BFI_FABRIC_I2H_SETAUTH_RSP = BFA_I2HM(3), - BFI_FABRIC_I2H_ONLINE = BFA_I2HM(4), - BFI_FABRIC_I2H_OFFLINE = BFA_I2HM(5), -}; - -struct bfi_fabric_create_req_s { - bfi_mhdr_t mh; /* common msg header */ - u8 vf_en; /* virtual fabric enable */ - u8 rsvd; - u16 vf_id; /* virtual fabric ID */ - wwn_t pwwn; /* port name */ - wwn_t nwwn; /* node name */ -}; - -struct bfi_fabric_create_rsp_s { - bfi_mhdr_t mh; /* common msg header */ - u16 bfa_handle; /* host fabric handle */ - u8 status; /* fabric create status */ - u8 rsvd; -}; - -struct bfi_fabric_delete_req_s { - bfi_mhdr_t mh; /* common msg header */ - u16 fw_handle; /* firmware fabric handle */ - u16 rsvd; -}; - -struct bfi_fabric_delete_rsp_s { - bfi_mhdr_t mh; /* common msg header */ - u16 bfa_handle; /* host fabric handle */ - u8 status; /* fabric deletion status */ - u8 rsvd; -}; - -#define BFI_FABRIC_AUTHSECRET_LEN 64 -struct bfi_fabric_setauth_req_s { - bfi_mhdr_t mh; /* common msg header */ - u16 fw_handle; /* f/w handle of fabric */ - u8 algorithm; - u8 group; - u8 secret[BFI_FABRIC_AUTHSECRET_LEN]; -}; - -union bfi_fabric_h2i_msg_u { - bfi_msg_t *msg; - struct bfi_fabric_create_req_s *create_req; - struct bfi_fabric_delete_req_s *delete_req; -}; - -union bfi_fabric_i2h_msg_u { - bfi_msg_t *msg; - struct bfi_fabric_create_rsp_s *create_rsp; - struct bfi_fabric_delete_rsp_s *delete_rsp; -}; - -#pragma pack() - -#endif /* __BFI_FABRIC_H__ */ - diff --git a/drivers/scsi/bfa/include/bfi/bfi_fcpim.h b/drivers/scsi/bfa/include/bfi/bfi_fcpim.h deleted file mode 100644 index 52c059fb4c3a..000000000000 --- a/drivers/scsi/bfa/include/bfi/bfi_fcpim.h +++ /dev/null @@ -1,301 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFI_FCPIM_H__ -#define __BFI_FCPIM_H__ - -#include "bfi.h" -#include - -#pragma pack(1) - -/* - * Initiator mode I-T nexus interface defines. - */ - -enum bfi_itnim_h2i { - BFI_ITNIM_H2I_CREATE_REQ = 1, /* i-t nexus creation */ - BFI_ITNIM_H2I_DELETE_REQ = 2, /* i-t nexus deletion */ -}; - -enum bfi_itnim_i2h { - BFI_ITNIM_I2H_CREATE_RSP = BFA_I2HM(1), - BFI_ITNIM_I2H_DELETE_RSP = BFA_I2HM(2), - BFI_ITNIM_I2H_SLER_EVENT = BFA_I2HM(3), -}; - -struct bfi_itnim_create_req_s { - struct bfi_mhdr_s mh; /* common msg header */ - u16 fw_handle; /* f/w handle for itnim */ - u8 class; /* FC class for IO */ - u8 seq_rec; /* sequence recovery support */ - u8 msg_no; /* seq id of the msg */ -}; - -struct bfi_itnim_create_rsp_s { - struct bfi_mhdr_s mh; /* common msg header */ - u16 bfa_handle; /* bfa handle for itnim */ - u8 status; /* fcp request status */ - u8 seq_id; /* seq id of the msg */ -}; - -struct bfi_itnim_delete_req_s { - struct bfi_mhdr_s mh; /* common msg header */ - u16 fw_handle; /* f/w itnim handle */ - u8 seq_id; /* seq id of the msg */ - u8 rsvd; -}; - -struct bfi_itnim_delete_rsp_s { - struct bfi_mhdr_s mh; /* common msg header */ - u16 bfa_handle; /* bfa handle for itnim */ - u8 status; /* fcp request status */ - u8 seq_id; /* seq id of the msg */ -}; - -struct bfi_itnim_sler_event_s { - struct bfi_mhdr_s mh; /* common msg header */ - u16 bfa_handle; /* bfa handle for itnim */ - u16 rsvd; -}; - -union bfi_itnim_h2i_msg_u { - struct bfi_itnim_create_req_s *create_req; - struct bfi_itnim_delete_req_s *delete_req; - struct bfi_msg_s *msg; -}; - -union bfi_itnim_i2h_msg_u { - struct bfi_itnim_create_rsp_s *create_rsp; - struct bfi_itnim_delete_rsp_s *delete_rsp; - struct bfi_itnim_sler_event_s *sler_event; - struct bfi_msg_s *msg; -}; - -/* - * Initiator mode IO interface defines. - */ - -enum bfi_ioim_h2i { - BFI_IOIM_H2I_IOABORT_REQ = 1, /* IO abort request */ - BFI_IOIM_H2I_IOCLEANUP_REQ = 2, /* IO cleanup request */ -}; - -enum bfi_ioim_i2h { - BFI_IOIM_I2H_IO_RSP = BFA_I2HM(1), /* non-fp IO response */ - BFI_IOIM_I2H_IOABORT_RSP = BFA_I2HM(2),/* ABORT rsp */ -}; - -/** - * IO command DIF info - */ -struct bfi_ioim_dif_s { - u32 dif_info[4]; -}; - -/** - * FCP IO messages overview - * - * @note - * - Max CDB length supported is 64 bytes. - * - SCSI Linked commands and SCSI bi-directional Commands not - * supported. - * - */ -struct bfi_ioim_req_s { - struct bfi_mhdr_s mh; /* Common msg header */ - u16 io_tag; /* I/O tag */ - u16 rport_hdl; /* itnim/rport firmware handle */ - struct fcp_cmnd_s cmnd; /* IO request info */ - - /** - * SG elements array within the IO request must be double word - * aligned. This aligment is required to optimize SGM setup for the IO. - */ - struct bfi_sge_s sges[BFI_SGE_INLINE_MAX]; - u8 io_timeout; - u8 dif_en; - u8 rsvd_a[2]; - struct bfi_ioim_dif_s dif; -}; - -/** - * This table shows various IO status codes from firmware and their - * meaning. Host driver can use these status codes to further process - * IO completions. - * - * BFI_IOIM_STS_OK : IO completed with error free SCSI & - * transport status. - * - io-tag can be reused. - * - * BFA_IOIM_STS_SCSI_ERR : IO completed with scsi error. - * - io-tag can be reused. - * - * BFI_IOIM_STS_HOST_ABORTED : IO was aborted successfully due to - * host request. - * - io-tag cannot be reused yet. - * - * BFI_IOIM_STS_ABORTED : IO was aborted successfully - * internally by f/w. - * - io-tag cannot be reused yet. - * - * BFI_IOIM_STS_TIMEDOUT : IO timedout and ABTS/RRQ is happening - * in the firmware and - * - io-tag cannot be reused yet. - * - * BFI_IOIM_STS_SQER_NEEDED : Firmware could not recover the IO - * with sequence level error - * logic and hence host needs to retry - * this IO with a different IO tag - * - io-tag cannot be used yet. - * - * BFI_IOIM_STS_NEXUS_ABORT : Second Level Error Recovery from host - * is required because 2 consecutive ABTS - * timedout and host needs logout and - * re-login with the target - * - io-tag cannot be used yet. - * - * BFI_IOIM_STS_UNDERRUN : IO completed with SCSI status good, - * but the data tranferred is less than - * the fcp data length in the command. - * ex. SCSI INQUIRY where transferred - * data length and residue count in FCP - * response accounts for total fcp-dl - * - io-tag can be reused. - * - * BFI_IOIM_STS_OVERRUN : IO completed with SCSI status good, - * but the data transerred is more than - * fcp data length in the command. ex. - * TAPE IOs where blocks can of unequal - * lengths. - * - io-tag can be reused. - * - * BFI_IOIM_STS_RES_FREE : Firmware has completed using io-tag - * during abort process - * - io-tag can be reused. - * - * BFI_IOIM_STS_PROTO_ERR : Firmware detected a protocol error. - * ex target sent more data than - * requested, or there was data frame - * loss and other reasons - * - io-tag cannot be used yet. - * - * BFI_IOIM_STS_DIF_ERR : Firwmare detected DIF error. ex: DIF - * CRC err or Ref Tag err or App tag err. - * - io-tag can be reused. - * - * BFA_IOIM_STS_TSK_MGT_ABORT : IO was aborted because of Task - * Management command from the host - * - io-tag can be reused. - * - * BFI_IOIM_STS_UTAG : Firmware does not know about this - * io_tag. - * - io-tag can be reused. - */ -enum bfi_ioim_status { - BFI_IOIM_STS_OK = 0, - BFI_IOIM_STS_HOST_ABORTED = 1, - BFI_IOIM_STS_ABORTED = 2, - BFI_IOIM_STS_TIMEDOUT = 3, - BFI_IOIM_STS_RES_FREE = 4, - BFI_IOIM_STS_SQER_NEEDED = 5, - BFI_IOIM_STS_PROTO_ERR = 6, - BFI_IOIM_STS_UTAG = 7, - BFI_IOIM_STS_PATHTOV = 8, -}; - -#define BFI_IOIM_SNSLEN (256) -/** - * I/O response message - */ -struct bfi_ioim_rsp_s { - struct bfi_mhdr_s mh; /* common msg header */ - u16 io_tag; /* completed IO tag */ - u16 bfa_rport_hndl; /* releated rport handle */ - u8 io_status; /* IO completion status */ - u8 reuse_io_tag; /* IO tag can be reused */ - u16 abort_tag; /* host abort request tag */ - u8 scsi_status; /* scsi status from target */ - u8 sns_len; /* scsi sense length */ - u8 resid_flags; /* IO residue flags */ - u8 rsvd_a; - u32 residue; /* IO residual length in bytes */ - u32 rsvd_b[3]; -}; - -struct bfi_ioim_abort_req_s { - struct bfi_mhdr_s mh; /* Common msg header */ - u16 io_tag; /* I/O tag */ - u16 abort_tag; /* unique request tag */ -}; - -/* - * Initiator mode task management command interface defines. - */ - -enum bfi_tskim_h2i { - BFI_TSKIM_H2I_TM_REQ = 1, /* task-mgmt command */ - BFI_TSKIM_H2I_ABORT_REQ = 2, /* task-mgmt command */ -}; - -enum bfi_tskim_i2h { - BFI_TSKIM_I2H_TM_RSP = BFA_I2HM(1), -}; - -struct bfi_tskim_req_s { - struct bfi_mhdr_s mh; /* Common msg header */ - u16 tsk_tag; /* task management tag */ - u16 itn_fhdl; /* itn firmware handle */ - lun_t lun; /* LU number */ - u8 tm_flags; /* see fcp_tm_cmnd_t */ - u8 t_secs; /* Timeout value in seconds */ - u8 rsvd[2]; -}; - -struct bfi_tskim_abortreq_s { - struct bfi_mhdr_s mh; /* Common msg header */ - u16 tsk_tag; /* task management tag */ - u16 rsvd; -}; - -enum bfi_tskim_status { - /* - * Following are FCP-4 spec defined status codes, - * **DO NOT CHANGE THEM ** - */ - BFI_TSKIM_STS_OK = 0, - BFI_TSKIM_STS_NOT_SUPP = 4, - BFI_TSKIM_STS_FAILED = 5, - - /** - * Defined by BFA - */ - BFI_TSKIM_STS_TIMEOUT = 10, /* TM request timedout */ - BFI_TSKIM_STS_ABORTED = 11, /* Aborted on host request */ -}; - -struct bfi_tskim_rsp_s { - struct bfi_mhdr_s mh; /* Common msg header */ - u16 tsk_tag; /* task mgmt cmnd tag */ - u8 tsk_status; /* @ref bfi_tskim_status */ - u8 rsvd; -}; - -#pragma pack() - -#endif /* __BFI_FCPIM_H__ */ - diff --git a/drivers/scsi/bfa/include/bfi/bfi_fcxp.h b/drivers/scsi/bfa/include/bfi/bfi_fcxp.h deleted file mode 100644 index e0e995a32828..000000000000 --- a/drivers/scsi/bfa/include/bfi/bfi_fcxp.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFI_FCXP_H__ -#define __BFI_FCXP_H__ - -#include "bfi.h" - -#pragma pack(1) - -enum bfi_fcxp_h2i { - BFI_FCXP_H2I_SEND_REQ = 1, -}; - -enum bfi_fcxp_i2h { - BFI_FCXP_I2H_SEND_RSP = BFA_I2HM(1), -}; - -#define BFA_FCXP_MAX_SGES 2 - -/** - * FCXP send request structure - */ -struct bfi_fcxp_send_req_s { - struct bfi_mhdr_s mh; /* Common msg header */ - u16 fcxp_tag; /* driver request tag */ - u16 max_frmsz; /* max send frame size */ - u16 vf_id; /* vsan tag if applicable */ - u16 rport_fw_hndl; /* FW Handle for the remote port */ - u8 class; /* FC class used for req/rsp */ - u8 rsp_timeout; /* timeout in secs, 0-no response */ - u8 cts; /* continue sequence */ - u8 lp_tag; /* lport tag */ - struct fchs_s fchs; /* request FC header structure */ - u32 req_len; /* request payload length */ - u32 rsp_maxlen; /* max response length expected */ - struct bfi_sge_s req_sge[BFA_FCXP_MAX_SGES]; /* request buf */ - struct bfi_sge_s rsp_sge[BFA_FCXP_MAX_SGES]; /* response buf */ -}; - -/** - * FCXP send response structure - */ -struct bfi_fcxp_send_rsp_s { - struct bfi_mhdr_s mh; /* Common msg header */ - u16 fcxp_tag; /* send request tag */ - u8 req_status; /* request status */ - u8 rsvd; - u32 rsp_len; /* actual response length */ - u32 residue_len; /* residual response length */ - struct fchs_s fchs; /* response FC header structure */ -}; - -#pragma pack() - -#endif /* __BFI_FCXP_H__ */ - diff --git a/drivers/scsi/bfa/include/bfi/bfi_ioc.h b/drivers/scsi/bfa/include/bfi/bfi_ioc.h deleted file mode 100644 index 450ded6e9bc2..000000000000 --- a/drivers/scsi/bfa/include/bfi/bfi_ioc.h +++ /dev/null @@ -1,208 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFI_IOC_H__ -#define __BFI_IOC_H__ - -#include "bfi.h" -#include - -#pragma pack(1) - -enum bfi_ioc_h2i_msgs { - BFI_IOC_H2I_ENABLE_REQ = 1, - BFI_IOC_H2I_DISABLE_REQ = 2, - BFI_IOC_H2I_GETATTR_REQ = 3, - BFI_IOC_H2I_DBG_SYNC = 4, - BFI_IOC_H2I_DBG_DUMP = 5, -}; - -enum bfi_ioc_i2h_msgs { - BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1), - BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2), - BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3), - BFI_IOC_I2H_READY_EVENT = BFA_I2HM(4), - BFI_IOC_I2H_HBEAT = BFA_I2HM(5), -}; - -/** - * BFI_IOC_H2I_GETATTR_REQ message - */ -struct bfi_ioc_getattr_req_s { - struct bfi_mhdr_s mh; - union bfi_addr_u attr_addr; -}; - -struct bfi_ioc_attr_s { - wwn_t mfg_pwwn; /* Mfg port wwn */ - wwn_t mfg_nwwn; /* Mfg node wwn */ - mac_t mfg_mac; /* Mfg mac */ - u16 rsvd_a; - wwn_t pwwn; - wwn_t nwwn; - mac_t mac; /* PBC or Mfg mac */ - u16 rsvd_b; - char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)]; - u8 pcie_gen; - u8 pcie_lanes_orig; - u8 pcie_lanes; - u8 rx_bbcredit; /* receive buffer credits */ - u32 adapter_prop; /* adapter properties */ - u16 maxfrsize; /* max receive frame size */ - char asic_rev; - u8 rsvd_c; - char fw_version[BFA_VERSION_LEN]; - char optrom_version[BFA_VERSION_LEN]; - struct bfa_mfg_vpd_s vpd; - u32 card_type; /* card type */ -}; - -/** - * BFI_IOC_I2H_GETATTR_REPLY message - */ -struct bfi_ioc_getattr_reply_s { - struct bfi_mhdr_s mh; /* Common msg header */ - u8 status; /* cfg reply status */ - u8 rsvd[3]; -}; - -/** - * Firmware memory page offsets - */ -#define BFI_IOC_SMEM_PG0_CB (0x40) -#define BFI_IOC_SMEM_PG0_CT (0x180) - -/** - * Firmware trace offset - */ -#define BFI_IOC_TRC_OFF (0x4b00) -#define BFI_IOC_TRC_ENTS 256 - -#define BFI_IOC_FW_SIGNATURE (0xbfadbfad) -#define BFI_IOC_MD5SUM_SZ 4 -struct bfi_ioc_image_hdr_s { - u32 signature; /* constant signature */ - u32 rsvd_a; - u32 exec; /* exec vector */ - u32 param; /* parameters */ - u32 rsvd_b[4]; - u32 md5sum[BFI_IOC_MD5SUM_SZ]; -}; - -/** - * BFI_IOC_I2H_READY_EVENT message - */ -struct bfi_ioc_rdy_event_s { - struct bfi_mhdr_s mh; /* common msg header */ - u8 init_status; /* init event status */ - u8 rsvd[3]; -}; - -struct bfi_ioc_hbeat_s { - struct bfi_mhdr_s mh; /* common msg header */ - u32 hb_count; /* current heart beat count */ -}; - -/** - * IOC hardware/firmware state - */ -enum bfi_ioc_state { - BFI_IOC_UNINIT = 0, /* not initialized */ - BFI_IOC_INITING = 1, /* h/w is being initialized */ - BFI_IOC_HWINIT = 2, /* h/w is initialized */ - BFI_IOC_CFG = 3, /* IOC configuration in progress */ - BFI_IOC_OP = 4, /* IOC is operational */ - BFI_IOC_DISABLING = 5, /* IOC is being disabled */ - BFI_IOC_DISABLED = 6, /* IOC is disabled */ - BFI_IOC_CFG_DISABLED = 7, /* IOC is being disabled;transient */ - BFI_IOC_FAIL = 8, /* IOC heart-beat failure */ - BFI_IOC_MEMTEST = 9, /* IOC is doing memtest */ -}; - -#define BFI_IOC_ENDIAN_SIG 0x12345678 - -enum { - BFI_ADAPTER_TYPE_FC = 0x01, /* FC adapters */ - BFI_ADAPTER_TYPE_MK = 0x0f0000, /* adapter type mask */ - BFI_ADAPTER_TYPE_SH = 16, /* adapter type shift */ - BFI_ADAPTER_NPORTS_MK = 0xff00, /* number of ports mask */ - BFI_ADAPTER_NPORTS_SH = 8, /* number of ports shift */ - BFI_ADAPTER_SPEED_MK = 0xff, /* adapter speed mask */ - BFI_ADAPTER_SPEED_SH = 0, /* adapter speed shift */ - BFI_ADAPTER_PROTO = 0x100000, /* prototype adapaters */ - BFI_ADAPTER_TTV = 0x200000, /* TTV debug capable */ - BFI_ADAPTER_UNSUPP = 0x400000, /* unknown adapter type */ -}; - -#define BFI_ADAPTER_GETP(__prop, __adap_prop) \ - (((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >> \ - BFI_ADAPTER_ ## __prop ## _SH) -#define BFI_ADAPTER_SETP(__prop, __val) \ - ((__val) << BFI_ADAPTER_ ## __prop ## _SH) -#define BFI_ADAPTER_IS_PROTO(__adap_type) \ - ((__adap_type) & BFI_ADAPTER_PROTO) -#define BFI_ADAPTER_IS_TTV(__adap_type) \ - ((__adap_type) & BFI_ADAPTER_TTV) -#define BFI_ADAPTER_IS_UNSUPP(__adap_type) \ - ((__adap_type) & BFI_ADAPTER_UNSUPP) -#define BFI_ADAPTER_IS_SPECIAL(__adap_type) \ - ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \ - BFI_ADAPTER_UNSUPP)) - -/** - * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages - */ -struct bfi_ioc_ctrl_req_s { - struct bfi_mhdr_s mh; - u8 ioc_class; - u8 rsvd[3]; -}; - -/** - * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages - */ -struct bfi_ioc_ctrl_reply_s { - struct bfi_mhdr_s mh; /* Common msg header */ - u8 status; /* enable/disable status */ - u8 rsvd[3]; -}; - -#define BFI_IOC_MSGSZ 8 -/** - * H2I Messages - */ -union bfi_ioc_h2i_msg_u { - struct bfi_mhdr_s mh; - struct bfi_ioc_ctrl_req_s enable_req; - struct bfi_ioc_ctrl_req_s disable_req; - struct bfi_ioc_getattr_req_s getattr_req; - u32 mboxmsg[BFI_IOC_MSGSZ]; -}; - -/** - * I2H Messages - */ -union bfi_ioc_i2h_msg_u { - struct bfi_mhdr_s mh; - struct bfi_ioc_rdy_event_s rdy_event; - u32 mboxmsg[BFI_IOC_MSGSZ]; -}; - -#pragma pack() - -#endif /* __BFI_IOC_H__ */ - diff --git a/drivers/scsi/bfa/include/bfi/bfi_iocfc.h b/drivers/scsi/bfa/include/bfi/bfi_iocfc.h deleted file mode 100644 index ccdfcc5d7e0b..000000000000 --- a/drivers/scsi/bfa/include/bfi/bfi_iocfc.h +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFI_IOCFC_H__ -#define __BFI_IOCFC_H__ - -#include "bfi.h" -#include -#include -#include -#include - -#pragma pack(1) - -enum bfi_iocfc_h2i_msgs { - BFI_IOCFC_H2I_CFG_REQ = 1, - BFI_IOCFC_H2I_GET_STATS_REQ = 2, - BFI_IOCFC_H2I_CLEAR_STATS_REQ = 3, - BFI_IOCFC_H2I_SET_INTR_REQ = 4, - BFI_IOCFC_H2I_UPDATEQ_REQ = 5, -}; - -enum bfi_iocfc_i2h_msgs { - BFI_IOCFC_I2H_CFG_REPLY = BFA_I2HM(1), - BFI_IOCFC_I2H_GET_STATS_RSP = BFA_I2HM(2), - BFI_IOCFC_I2H_CLEAR_STATS_RSP = BFA_I2HM(3), - BFI_IOCFC_I2H_UPDATEQ_RSP = BFA_I2HM(5), -}; - -struct bfi_iocfc_cfg_s { - u8 num_cqs; /* Number of CQs to be used */ - u8 sense_buf_len; /* SCSI sense length */ - u8 trunk_enabled; /* port trunking enabled */ - u8 trunk_ports; /* trunk ports bit map */ - u32 endian_sig; /* endian signature of host */ - - /** - * Request and response circular queue base addresses, size and - * shadow index pointers. - */ - union bfi_addr_u req_cq_ba[BFI_IOC_MAX_CQS]; - union bfi_addr_u req_shadow_ci[BFI_IOC_MAX_CQS]; - u16 req_cq_elems[BFI_IOC_MAX_CQS]; - union bfi_addr_u rsp_cq_ba[BFI_IOC_MAX_CQS]; - union bfi_addr_u rsp_shadow_pi[BFI_IOC_MAX_CQS]; - u16 rsp_cq_elems[BFI_IOC_MAX_CQS]; - - union bfi_addr_u stats_addr; /* DMA-able address for stats */ - union bfi_addr_u cfgrsp_addr; /* config response dma address */ - union bfi_addr_u ioim_snsbase; /* IO sense buffer base address */ - struct bfa_iocfc_intr_attr_s intr_attr; /* IOC interrupt attributes */ -}; - -/** - * Boot target wwn information for this port. This contains either the stored - * or discovered boot target port wwns for the port. - */ -struct bfi_iocfc_bootwwns { - wwn_t wwn[BFA_BOOT_BOOTLUN_MAX]; - u8 nwwns; - u8 rsvd[7]; -}; - -struct bfi_iocfc_cfgrsp_s { - struct bfa_iocfc_fwcfg_s fwcfg; - struct bfa_iocfc_intr_attr_s intr_attr; - struct bfi_iocfc_bootwwns bootwwns; - struct bfi_pbc_s pbc_cfg; -}; - -/** - * BFI_IOCFC_H2I_CFG_REQ message - */ -struct bfi_iocfc_cfg_req_s { - struct bfi_mhdr_s mh; - union bfi_addr_u ioc_cfg_dma_addr; -}; - -/** - * BFI_IOCFC_I2H_CFG_REPLY message - */ -struct bfi_iocfc_cfg_reply_s { - struct bfi_mhdr_s mh; /* Common msg header */ - u8 cfg_success; /* cfg reply status */ - u8 lpu_bm; /* LPUs assigned for this IOC */ - u8 rsvd[2]; -}; - -/** - * BFI_IOCFC_H2I_GET_STATS_REQ & BFI_IOCFC_H2I_CLEAR_STATS_REQ messages - */ -struct bfi_iocfc_stats_req_s { - struct bfi_mhdr_s mh; /* msg header */ - u32 msgtag; /* msgtag for reply */ -}; - -/** - * BFI_IOCFC_I2H_GET_STATS_RSP & BFI_IOCFC_I2H_CLEAR_STATS_RSP messages - */ -struct bfi_iocfc_stats_rsp_s { - struct bfi_mhdr_s mh; /* common msg header */ - u8 status; /* reply status */ - u8 rsvd[3]; - u32 msgtag; /* msgtag for reply */ -}; - -/** - * BFI_IOCFC_H2I_SET_INTR_REQ message - */ -struct bfi_iocfc_set_intr_req_s { - struct bfi_mhdr_s mh; /* common msg header */ - u8 coalesce; /* enable intr coalescing*/ - u8 rsvd[3]; - u16 delay; /* delay timer 0..1125us */ - u16 latency; /* latency timer 0..225us */ -}; - -/** - * BFI_IOCFC_H2I_UPDATEQ_REQ message - */ -struct bfi_iocfc_updateq_req_s { - struct bfi_mhdr_s mh; /* common msg header */ - u32 reqq_ba; /* reqq base addr */ - u32 rspq_ba; /* rspq base addr */ - u32 reqq_sci; /* reqq shadow ci */ - u32 rspq_spi; /* rspq shadow pi */ -}; - -/** - * BFI_IOCFC_I2H_UPDATEQ_RSP message - */ -struct bfi_iocfc_updateq_rsp_s { - struct bfi_mhdr_s mh; /* common msg header */ - u8 status; /* updateq status */ - u8 rsvd[3]; -}; - -/** - * H2I Messages - */ -union bfi_iocfc_h2i_msg_u { - struct bfi_mhdr_s mh; - struct bfi_iocfc_cfg_req_s cfg_req; - struct bfi_iocfc_stats_req_s stats_get; - struct bfi_iocfc_stats_req_s stats_clr; - struct bfi_iocfc_updateq_req_s updateq_req; - u32 mboxmsg[BFI_IOC_MSGSZ]; -}; - -/** - * I2H Messages - */ -union bfi_iocfc_i2h_msg_u { - struct bfi_mhdr_s mh; - struct bfi_iocfc_cfg_reply_s cfg_reply; - struct bfi_iocfc_stats_rsp_s stats_get_rsp; - struct bfi_iocfc_stats_rsp_s stats_clr_rsp; - struct bfi_iocfc_updateq_rsp_s updateq_rsp; - u32 mboxmsg[BFI_IOC_MSGSZ]; -}; - -#pragma pack() - -#endif /* __BFI_IOCFC_H__ */ - diff --git a/drivers/scsi/bfa/include/bfi/bfi_lport.h b/drivers/scsi/bfa/include/bfi/bfi_lport.h deleted file mode 100644 index 29010614bac9..000000000000 --- a/drivers/scsi/bfa/include/bfi/bfi_lport.h +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFI_LPORT_H__ -#define __BFI_LPORT_H__ - -#include - -#pragma pack(1) - -enum bfi_lport_h2i_msgs { - BFI_LPORT_H2I_CREATE_REQ = 1, - BFI_LPORT_H2I_DELETE_REQ = 2, -}; - -enum bfi_lport_i2h_msgs { - BFI_LPORT_I2H_CREATE_RSP = BFA_I2HM(1), - BFI_LPORT_I2H_DELETE_RSP = BFA_I2HM(2), - BFI_LPORT_I2H_ONLINE = BFA_I2HM(3), - BFI_LPORT_I2H_OFFLINE = BFA_I2HM(4), -}; - -#define BFI_LPORT_MAX_SYNNAME 64 - -enum bfi_lport_role_e { - BFI_LPORT_ROLE_FCPIM = 1, - BFI_LPORT_ROLE_FCPTM = 2, - BFI_LPORT_ROLE_IPFC = 4, -}; - -struct bfi_lport_create_req_s { - bfi_mhdr_t mh; /* common msg header */ - u16 fabric_fwhdl; /* parent fabric instance */ - u8 roles; /* lport FC-4 roles */ - u8 rsvd; - wwn_t pwwn; /* port name */ - wwn_t nwwn; /* node name */ - u8 symname[BFI_LPORT_MAX_SYNNAME]; -}; - -struct bfi_lport_create_rsp_s { - bfi_mhdr_t mh; /* common msg header */ - u8 status; /* lport creation status */ - u8 rsvd[3]; -}; - -struct bfi_lport_delete_req_s { - bfi_mhdr_t mh; /* common msg header */ - u16 fw_handle; /* firmware lport handle */ - u16 rsvd; -}; - -struct bfi_lport_delete_rsp_s { - bfi_mhdr_t mh; /* common msg header */ - u16 bfa_handle; /* host lport handle */ - u8 status; /* lport deletion status */ - u8 rsvd; -}; - -union bfi_lport_h2i_msg_u { - bfi_msg_t *msg; - struct bfi_lport_create_req_s *create_req; - struct bfi_lport_delete_req_s *delete_req; -}; - -union bfi_lport_i2h_msg_u { - bfi_msg_t *msg; - struct bfi_lport_create_rsp_s *create_rsp; - struct bfi_lport_delete_rsp_s *delete_rsp; -}; - -#pragma pack() - -#endif /* __BFI_LPORT_H__ */ - diff --git a/drivers/scsi/bfa/include/bfi/bfi_lps.h b/drivers/scsi/bfa/include/bfi/bfi_lps.h deleted file mode 100644 index 7ed31bbb8696..000000000000 --- a/drivers/scsi/bfa/include/bfi/bfi_lps.h +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFI_LPS_H__ -#define __BFI_LPS_H__ - -#include - -#pragma pack(1) - -enum bfi_lps_h2i_msgs { - BFI_LPS_H2I_LOGIN_REQ = 1, - BFI_LPS_H2I_LOGOUT_REQ = 2, -}; - -enum bfi_lps_i2h_msgs { - BFI_LPS_H2I_LOGIN_RSP = BFA_I2HM(1), - BFI_LPS_H2I_LOGOUT_RSP = BFA_I2HM(2), - BFI_LPS_H2I_CVL_EVENT = BFA_I2HM(3), -}; - -struct bfi_lps_login_req_s { - struct bfi_mhdr_s mh; /* common msg header */ - u8 lp_tag; - u8 alpa; - u16 pdu_size; - wwn_t pwwn; - wwn_t nwwn; - u8 fdisc; - u8 auth_en; - u8 rsvd[2]; -}; - -struct bfi_lps_login_rsp_s { - struct bfi_mhdr_s mh; /* common msg header */ - u8 lp_tag; - u8 status; - u8 lsrjt_rsn; - u8 lsrjt_expl; - wwn_t port_name; - wwn_t node_name; - u16 bb_credit; - u8 f_port; - u8 npiv_en; - u32 lp_pid:24; - u32 auth_req:8; - mac_t lp_mac; - mac_t fcf_mac; - u8 ext_status; - u8 brcd_switch;/* attached peer is brcd switch */ -}; - -struct bfi_lps_logout_req_s { - struct bfi_mhdr_s mh; /* common msg header */ - u8 lp_tag; - u8 rsvd[3]; - wwn_t port_name; -}; - -struct bfi_lps_logout_rsp_s { - struct bfi_mhdr_s mh; /* common msg header */ - u8 lp_tag; - u8 status; - u8 rsvd[2]; -}; - -struct bfi_lps_cvl_event_s { - struct bfi_mhdr_s mh; /* common msg header */ - u8 lp_tag; - u8 rsvd[3]; -}; - -union bfi_lps_h2i_msg_u { - struct bfi_mhdr_s *msg; - struct bfi_lps_login_req_s *login_req; - struct bfi_lps_logout_req_s *logout_req; -}; - -union bfi_lps_i2h_msg_u { - struct bfi_msg_s *msg; - struct bfi_lps_login_rsp_s *login_rsp; - struct bfi_lps_logout_rsp_s *logout_rsp; - struct bfi_lps_cvl_event_s *cvl_event; -}; - -#pragma pack() - -#endif /* __BFI_LPS_H__ */ - - diff --git a/drivers/scsi/bfa/include/bfi/bfi_pbc.h b/drivers/scsi/bfa/include/bfi/bfi_pbc.h deleted file mode 100644 index 88a4154c30c0..000000000000 --- a/drivers/scsi/bfa/include/bfi/bfi_pbc.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFI_PBC_H__ -#define __BFI_PBC_H__ - -#pragma pack(1) - -#define BFI_PBC_MAX_BLUNS 8 -#define BFI_PBC_MAX_VPORTS 16 - -#define BFI_PBC_PORT_DISABLED 2 -/** - * PBC boot lun configuration - */ -struct bfi_pbc_blun_s { - wwn_t tgt_pwwn; - lun_t tgt_lun; -}; - -/** - * PBC virtual port configuration - */ -struct bfi_pbc_vport_s { - wwn_t vp_pwwn; - wwn_t vp_nwwn; -}; - -/** - * BFI pre-boot configuration information - */ -struct bfi_pbc_s { - u8 port_enabled; - u8 boot_enabled; - u8 nbluns; - u8 nvports; - u8 port_speed; - u8 rsvd_a; - u16 hss; - wwn_t pbc_pwwn; - wwn_t pbc_nwwn; - struct bfi_pbc_blun_s blun[BFI_PBC_MAX_BLUNS]; - struct bfi_pbc_vport_s vport[BFI_PBC_MAX_VPORTS]; -}; - -#pragma pack() - -#endif /* __BFI_PBC_H__ */ diff --git a/drivers/scsi/bfa/include/bfi/bfi_port.h b/drivers/scsi/bfa/include/bfi/bfi_port.h deleted file mode 100644 index 3ec3bea110ba..000000000000 --- a/drivers/scsi/bfa/include/bfi/bfi_port.h +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __BFI_PORT_H__ -#define __BFI_PORT_H__ - -#include -#include - -#pragma pack(1) - -enum bfi_port_h2i { - BFI_PORT_H2I_ENABLE_REQ = (1), - BFI_PORT_H2I_DISABLE_REQ = (2), - BFI_PORT_H2I_GET_STATS_REQ = (3), - BFI_PORT_H2I_CLEAR_STATS_REQ = (4), -}; - -enum bfi_port_i2h { - BFI_PORT_I2H_ENABLE_RSP = BFA_I2HM(1), - BFI_PORT_I2H_DISABLE_RSP = BFA_I2HM(2), - BFI_PORT_I2H_GET_STATS_RSP = BFA_I2HM(3), - BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4), -}; - -/** - * Generic REQ type - */ -struct bfi_port_generic_req_s { - struct bfi_mhdr_s mh; /* msg header */ - u32 msgtag; /* msgtag for reply */ - u32 rsvd; -}; - -/** - * Generic RSP type - */ -struct bfi_port_generic_rsp_s { - struct bfi_mhdr_s mh; /* common msg header */ - u8 status; /* port enable status */ - u8 rsvd[3]; - u32 msgtag; /* msgtag for reply */ -}; - -/** - * @todo - * BFI_PORT_H2I_ENABLE_REQ - */ - -/** - * @todo - * BFI_PORT_I2H_ENABLE_RSP - */ - -/** - * BFI_PORT_H2I_DISABLE_REQ - */ - -/** - * BFI_PORT_I2H_DISABLE_RSP - */ - -/** - * BFI_PORT_H2I_GET_STATS_REQ - */ -struct bfi_port_get_stats_req_s { - struct bfi_mhdr_s mh; /* common msg header */ - union bfi_addr_u dma_addr; -}; - -/** - * BFI_PORT_I2H_GET_STATS_RSP - */ - -/** - * BFI_PORT_H2I_CLEAR_STATS_REQ - */ - -/** - * BFI_PORT_I2H_CLEAR_STATS_RSP - */ - -union bfi_port_h2i_msg_u { - struct bfi_mhdr_s mh; - struct bfi_port_generic_req_s enable_req; - struct bfi_port_generic_req_s disable_req; - struct bfi_port_get_stats_req_s getstats_req; - struct bfi_port_generic_req_s clearstats_req; -}; - -union bfi_port_i2h_msg_u { - struct bfi_mhdr_s mh; - struct bfi_port_generic_rsp_s enable_rsp; - struct bfi_port_generic_rsp_s disable_rsp; - struct bfi_port_generic_rsp_s getstats_rsp; - struct bfi_port_generic_rsp_s clearstats_rsp; -}; - -#pragma pack() - -#endif /* __BFI_PORT_H__ */ - diff --git a/drivers/scsi/bfa/include/bfi/bfi_pport.h b/drivers/scsi/bfa/include/bfi/bfi_pport.h deleted file mode 100644 index 50dcf45c7470..000000000000 --- a/drivers/scsi/bfa/include/bfi/bfi_pport.h +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __BFI_PPORT_H__ -#define __BFI_PPORT_H__ - -#include -#include - -#pragma pack(1) - -enum bfi_fcport_h2i { - BFI_FCPORT_H2I_ENABLE_REQ = (1), - BFI_FCPORT_H2I_DISABLE_REQ = (2), - BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ = (3), - BFI_FCPORT_H2I_STATS_GET_REQ = (4), - BFI_FCPORT_H2I_STATS_CLEAR_REQ = (5), -}; - -enum bfi_fcport_i2h { - BFI_FCPORT_I2H_ENABLE_RSP = BFA_I2HM(1), - BFI_FCPORT_I2H_DISABLE_RSP = BFA_I2HM(2), - BFI_FCPORT_I2H_SET_SVC_PARAMS_RSP = BFA_I2HM(3), - BFI_FCPORT_I2H_STATS_GET_RSP = BFA_I2HM(4), - BFI_FCPORT_I2H_STATS_CLEAR_RSP = BFA_I2HM(5), - BFI_FCPORT_I2H_EVENT = BFA_I2HM(6), -}; - -/** - * Generic REQ type - */ -struct bfi_fcport_req_s { - struct bfi_mhdr_s mh; /* msg header */ - u32 msgtag; /* msgtag for reply */ -}; - -/** - * Generic RSP type - */ -struct bfi_fcport_rsp_s { - struct bfi_mhdr_s mh; /* common msg header */ - u8 status; /* port enable status */ - u8 rsvd[3]; - u32 msgtag; /* msgtag for reply */ -}; - -/** - * BFI_FCPORT_H2I_ENABLE_REQ - */ -struct bfi_fcport_enable_req_s { - struct bfi_mhdr_s mh; /* msg header */ - u32 rsvd1; - wwn_t nwwn; /* node wwn of physical port */ - wwn_t pwwn; /* port wwn of physical port */ - struct bfa_pport_cfg_s port_cfg; /* port configuration */ - union bfi_addr_u stats_dma_addr; /* DMA address for stats */ - u32 msgtag; /* msgtag for reply */ - u32 rsvd2; -}; - -/** - * BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ - */ -struct bfi_fcport_set_svc_params_req_s { - struct bfi_mhdr_s mh; /* msg header */ - u16 tx_bbcredit; /* Tx credits */ - u16 rsvd; -}; - -/** - * BFI_FCPORT_I2H_EVENT - */ -struct bfi_fcport_event_s { - struct bfi_mhdr_s mh; /* common msg header */ - struct bfa_pport_link_s link_state; -}; - -/** - * fcport H2I message - */ -union bfi_fcport_h2i_msg_u { - struct bfi_mhdr_s *mhdr; - struct bfi_fcport_enable_req_s *penable; - struct bfi_fcport_req_s *pdisable; - struct bfi_fcport_set_svc_params_req_s *psetsvcparams; - struct bfi_fcport_req_s *pstatsget; - struct bfi_fcport_req_s *pstatsclear; -}; - -/** - * fcport I2H message - */ -union bfi_fcport_i2h_msg_u { - struct bfi_msg_s *msg; - struct bfi_fcport_rsp_s *penable_rsp; - struct bfi_fcport_rsp_s *pdisable_rsp; - struct bfi_fcport_rsp_s *psetsvcparams_rsp; - struct bfi_fcport_rsp_s *pstatsget_rsp; - struct bfi_fcport_rsp_s *pstatsclear_rsp; - struct bfi_fcport_event_s *event; -}; - -#pragma pack() - -#endif /* __BFI_PPORT_H__ */ diff --git a/drivers/scsi/bfa/include/bfi/bfi_rport.h b/drivers/scsi/bfa/include/bfi/bfi_rport.h deleted file mode 100644 index e1cd83b56ec6..000000000000 --- a/drivers/scsi/bfa/include/bfi/bfi_rport.h +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFI_RPORT_H__ -#define __BFI_RPORT_H__ - -#include - -#pragma pack(1) - -enum bfi_rport_h2i_msgs { - BFI_RPORT_H2I_CREATE_REQ = 1, - BFI_RPORT_H2I_DELETE_REQ = 2, - BFI_RPORT_H2I_SET_SPEED_REQ = 3, -}; - -enum bfi_rport_i2h_msgs { - BFI_RPORT_I2H_CREATE_RSP = BFA_I2HM(1), - BFI_RPORT_I2H_DELETE_RSP = BFA_I2HM(2), - BFI_RPORT_I2H_QOS_SCN = BFA_I2HM(3), -}; - -struct bfi_rport_create_req_s { - struct bfi_mhdr_s mh; /* common msg header */ - u16 bfa_handle; /* host rport handle */ - u16 max_frmsz; /* max rcv pdu size */ - u32 pid:24, /* remote port ID */ - lp_tag:8; /* local port tag */ - u32 local_pid:24, /* local port ID */ - cisc:8; - u8 fc_class; /* supported FC classes */ - u8 vf_en; /* virtual fabric enable */ - u16 vf_id; /* virtual fabric ID */ -}; - -struct bfi_rport_create_rsp_s { - struct bfi_mhdr_s mh; /* common msg header */ - u8 status; /* rport creation status */ - u8 rsvd[3]; - u16 bfa_handle; /* host rport handle */ - u16 fw_handle; /* firmware rport handle */ - struct bfa_rport_qos_attr_s qos_attr; /* QoS Attributes */ -}; - -struct bfa_rport_speed_req_s { - struct bfi_mhdr_s mh; /* common msg header */ - u16 fw_handle; /* firmware rport handle */ - u8 speed; /*! rport's speed via RPSC */ - u8 rsvd; -}; - -struct bfi_rport_delete_req_s { - struct bfi_mhdr_s mh; /* common msg header */ - u16 fw_handle; /* firmware rport handle */ - u16 rsvd; -}; - -struct bfi_rport_delete_rsp_s { - struct bfi_mhdr_s mh; /* common msg header */ - u16 bfa_handle; /* host rport handle */ - u8 status; /* rport deletion status */ - u8 rsvd; -}; - -struct bfi_rport_qos_scn_s { - struct bfi_mhdr_s mh; /* common msg header */ - u16 bfa_handle; /* host rport handle */ - u16 rsvd; - struct bfa_rport_qos_attr_s old_qos_attr; /* Old QoS Attributes */ - struct bfa_rport_qos_attr_s new_qos_attr; /* New QoS Attributes */ -}; - -union bfi_rport_h2i_msg_u { - struct bfi_msg_s *msg; - struct bfi_rport_create_req_s *create_req; - struct bfi_rport_delete_req_s *delete_req; - struct bfi_rport_speed_req_s *speed_req; -}; - -union bfi_rport_i2h_msg_u { - struct bfi_msg_s *msg; - struct bfi_rport_create_rsp_s *create_rsp; - struct bfi_rport_delete_rsp_s *delete_rsp; - struct bfi_rport_qos_scn_s *qos_scn_evt; -}; - -#pragma pack() - -#endif /* __BFI_RPORT_H__ */ - diff --git a/drivers/scsi/bfa/include/bfi/bfi_uf.h b/drivers/scsi/bfa/include/bfi/bfi_uf.h deleted file mode 100644 index f328a9e7e622..000000000000 --- a/drivers/scsi/bfa/include/bfi/bfi_uf.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFI_UF_H__ -#define __BFI_UF_H__ - -#include "bfi.h" - -#pragma pack(1) - -enum bfi_uf_h2i { - BFI_UF_H2I_BUF_POST = 1, -}; - -enum bfi_uf_i2h { - BFI_UF_I2H_FRM_RCVD = BFA_I2HM(1), -}; - -#define BFA_UF_MAX_SGES 2 - -struct bfi_uf_buf_post_s { - struct bfi_mhdr_s mh; /* Common msg header */ - u16 buf_tag; /* buffer tag */ - u16 buf_len; /* total buffer length */ - struct bfi_sge_s sge[BFA_UF_MAX_SGES]; /* buffer DMA SGEs */ -}; - -struct bfi_uf_frm_rcvd_s { - struct bfi_mhdr_s mh; /* Common msg header */ - u16 buf_tag; /* buffer tag */ - u16 rsvd; - u16 frm_len; /* received frame length */ - u16 xfr_len; /* tranferred length */ -}; - -#pragma pack() - -#endif /* __BFI_UF_H__ */ diff --git a/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h b/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h deleted file mode 100644 index a75a1f3be315..000000000000 --- a/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * bfa_cna_trcmod.h CNA trace modules - */ - -#ifndef __BFA_CNA_TRCMOD_H__ -#define __BFA_CNA_TRCMOD_H__ - -#include - -/* - * !!! Only append to the enums defined here to avoid any versioning - * !!! needed between trace utility and driver version - */ -enum { - BFA_TRC_CNA_CEE = 1, - BFA_TRC_CNA_PORT = 2, - BFA_TRC_CNA_IOC = 3, - BFA_TRC_CNA_DIAG = 4, - BFA_TRC_CNA_IOC_CB = 5, - BFA_TRC_CNA_IOC_CT = 6, -}; - -#endif /* __BFA_CNA_TRCMOD_H__ */ diff --git a/drivers/scsi/bfa/include/cna/cee/bfa_cee.h b/drivers/scsi/bfa/include/cna/cee/bfa_cee.h deleted file mode 100644 index 77f297f68046..000000000000 --- a/drivers/scsi/bfa/include/cna/cee/bfa_cee.h +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_CEE_H__ -#define __BFA_CEE_H__ - -#include -#include -#include -#include - -typedef void (*bfa_cee_get_attr_cbfn_t) (void *dev, bfa_status_t status); -typedef void (*bfa_cee_get_stats_cbfn_t) (void *dev, bfa_status_t status); -typedef void (*bfa_cee_reset_stats_cbfn_t) (void *dev, bfa_status_t status); -typedef void (*bfa_cee_hbfail_cbfn_t) (void *dev, bfa_status_t status); - -struct bfa_cee_cbfn_s { - bfa_cee_get_attr_cbfn_t get_attr_cbfn; - void *get_attr_cbarg; - bfa_cee_get_stats_cbfn_t get_stats_cbfn; - void *get_stats_cbarg; - bfa_cee_reset_stats_cbfn_t reset_stats_cbfn; - void *reset_stats_cbarg; -}; - -struct bfa_cee_s { - void *dev; - bfa_boolean_t get_attr_pending; - bfa_boolean_t get_stats_pending; - bfa_boolean_t reset_stats_pending; - bfa_status_t get_attr_status; - bfa_status_t get_stats_status; - bfa_status_t reset_stats_status; - struct bfa_cee_cbfn_s cbfn; - struct bfa_ioc_hbfail_notify_s hbfail; - struct bfa_trc_mod_s *trcmod; - struct bfa_log_mod_s *logmod; - struct bfa_cee_attr_s *attr; - struct bfa_cee_stats_s *stats; - struct bfa_dma_s attr_dma; - struct bfa_dma_s stats_dma; - struct bfa_ioc_s *ioc; - struct bfa_mbox_cmd_s get_cfg_mb; - struct bfa_mbox_cmd_s get_stats_mb; - struct bfa_mbox_cmd_s reset_stats_mb; -}; - -u32 bfa_cee_meminfo(void); -void bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva, - u64 dma_pa); -void bfa_cee_attach(struct bfa_cee_s *cee, struct bfa_ioc_s *ioc, void *dev, - struct bfa_trc_mod_s *trcmod, - struct bfa_log_mod_s *logmod); -void bfa_cee_detach(struct bfa_cee_s *cee); -bfa_status_t bfa_cee_get_attr(struct bfa_cee_s *cee, - struct bfa_cee_attr_s *attr, - bfa_cee_get_attr_cbfn_t cbfn, void *cbarg); -bfa_status_t bfa_cee_get_stats(struct bfa_cee_s *cee, - struct bfa_cee_stats_s *stats, - bfa_cee_get_stats_cbfn_t cbfn, void *cbarg); -bfa_status_t bfa_cee_reset_stats(struct bfa_cee_s *cee, - bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg); -#endif /* __BFA_CEE_H__ */ diff --git a/drivers/scsi/bfa/include/cna/port/bfa_port.h b/drivers/scsi/bfa/include/cna/port/bfa_port.h deleted file mode 100644 index d7babaf97848..000000000000 --- a/drivers/scsi/bfa/include/cna/port/bfa_port.h +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_PORT_H__ -#define __BFA_PORT_H__ - -#include -#include -#include -#include - -typedef void (*bfa_port_stats_cbfn_t) (void *dev, bfa_status_t status); -typedef void (*bfa_port_endis_cbfn_t) (void *dev, bfa_status_t status); - -struct bfa_port_s { - void *dev; - struct bfa_ioc_s *ioc; - struct bfa_trc_mod_s *trcmod; - struct bfa_log_mod_s *logmod; - u32 msgtag; - bfa_boolean_t stats_busy; - struct bfa_mbox_cmd_s stats_mb; - bfa_port_stats_cbfn_t stats_cbfn; - void *stats_cbarg; - bfa_status_t stats_status; - u32 stats_reset_time; - union bfa_pport_stats_u *stats; - struct bfa_dma_s stats_dma; - bfa_boolean_t endis_pending; - struct bfa_mbox_cmd_s endis_mb; - bfa_port_endis_cbfn_t endis_cbfn; - void *endis_cbarg; - bfa_status_t endis_status; - struct bfa_ioc_hbfail_notify_s hbfail; -}; - -void bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, - void *dev, struct bfa_trc_mod_s *trcmod, - struct bfa_log_mod_s *logmod); -void bfa_port_detach(struct bfa_port_s *port); -void bfa_port_hbfail(void *arg); - -bfa_status_t bfa_port_get_stats(struct bfa_port_s *port, - union bfa_pport_stats_u *stats, - bfa_port_stats_cbfn_t cbfn, void *cbarg); -bfa_status_t bfa_port_clear_stats(struct bfa_port_s *port, - bfa_port_stats_cbfn_t cbfn, void *cbarg); -bfa_status_t bfa_port_enable(struct bfa_port_s *port, - bfa_port_endis_cbfn_t cbfn, void *cbarg); -bfa_status_t bfa_port_disable(struct bfa_port_s *port, - bfa_port_endis_cbfn_t cbfn, void *cbarg); -u32 bfa_port_meminfo(void); -void bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, - u64 dma_pa); - -#endif /* __BFA_PORT_H__ */ diff --git a/drivers/scsi/bfa/include/cna/pstats/ethport_defs.h b/drivers/scsi/bfa/include/cna/pstats/ethport_defs.h deleted file mode 100644 index 1563ee512218..000000000000 --- a/drivers/scsi/bfa/include/cna/pstats/ethport_defs.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved. - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __ETHPORT_DEFS_H__ -#define __ETHPORT_DEFS_H__ - -struct bnad_drv_stats { - u64 netif_queue_stop; - u64 netif_queue_wakeup; - u64 tso4; - u64 tso6; - u64 tso_err; - u64 tcpcsum_offload; - u64 udpcsum_offload; - u64 csum_help; - u64 csum_help_err; - - u64 hw_stats_updates; - u64 netif_rx_schedule; - u64 netif_rx_complete; - u64 netif_rx_dropped; -}; -#endif diff --git a/drivers/scsi/bfa/include/cna/pstats/phyport_defs.h b/drivers/scsi/bfa/include/cna/pstats/phyport_defs.h deleted file mode 100644 index eb7548030d0f..000000000000 --- a/drivers/scsi/bfa/include/cna/pstats/phyport_defs.h +++ /dev/null @@ -1,218 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved. - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __PHYPORT_DEFS_H__ -#define __PHYPORT_DEFS_H__ - -#define BNA_TXF_ID_MAX 64 -#define BNA_RXF_ID_MAX 64 - -/* - * Statistics - */ - -/* - * TxF Frame Statistics - */ -struct bna_stats_txf { - u64 ucast_octets; - u64 ucast; - u64 ucast_vlan; - - u64 mcast_octets; - u64 mcast; - u64 mcast_vlan; - - u64 bcast_octets; - u64 bcast; - u64 bcast_vlan; - - u64 errors; - u64 filter_vlan; /* frames filtered due to VLAN */ - u64 filter_mac_sa; /* frames filtered due to SA check */ -}; - -/* - * RxF Frame Statistics - */ -struct bna_stats_rxf { - u64 ucast_octets; - u64 ucast; - u64 ucast_vlan; - - u64 mcast_octets; - u64 mcast; - u64 mcast_vlan; - - u64 bcast_octets; - u64 bcast; - u64 bcast_vlan; - u64 frame_drops; -}; - -/* - * FC Tx Frame Statistics - */ -struct bna_stats_fc_tx { - u64 txf_ucast_octets; - u64 txf_ucast; - u64 txf_ucast_vlan; - - u64 txf_mcast_octets; - u64 txf_mcast; - u64 txf_mcast_vlan; - - u64 txf_bcast_octets; - u64 txf_bcast; - u64 txf_bcast_vlan; - - u64 txf_parity_errors; - u64 txf_timeout; - u64 txf_fid_parity_errors; -}; - -/* - * FC Rx Frame Statistics - */ -struct bna_stats_fc_rx { - u64 rxf_ucast_octets; - u64 rxf_ucast; - u64 rxf_ucast_vlan; - - u64 rxf_mcast_octets; - u64 rxf_mcast; - u64 rxf_mcast_vlan; - - u64 rxf_bcast_octets; - u64 rxf_bcast; - u64 rxf_bcast_vlan; -}; - -/* - * RAD Frame Statistics - */ -struct cna_stats_rad { - u64 rx_frames; - u64 rx_octets; - u64 rx_vlan_frames; - - u64 rx_ucast; - u64 rx_ucast_octets; - u64 rx_ucast_vlan; - - u64 rx_mcast; - u64 rx_mcast_octets; - u64 rx_mcast_vlan; - - u64 rx_bcast; - u64 rx_bcast_octets; - u64 rx_bcast_vlan; - - u64 rx_drops; -}; - -/* - * BPC Tx Registers - */ -struct cna_stats_bpc_tx { - u64 tx_pause[8]; - u64 tx_zero_pause[8]; /* Pause cancellation */ - u64 tx_first_pause[8]; /* Pause initiation rather - *than retention */ -}; - -/* - * BPC Rx Registers - */ -struct cna_stats_bpc_rx { - u64 rx_pause[8]; - u64 rx_zero_pause[8]; /* Pause cancellation */ - u64 rx_first_pause[8]; /* Pause initiation rather - *than retention */ -}; - -/* - * MAC Rx Statistics - */ -struct cna_stats_mac_rx { - u64 frame_64; /* both rx and tx counter */ - u64 frame_65_127; /* both rx and tx counter */ - u64 frame_128_255; /* both rx and tx counter */ - u64 frame_256_511; /* both rx and tx counter */ - u64 frame_512_1023; /* both rx and tx counter */ - u64 frame_1024_1518; /* both rx and tx counter */ - u64 frame_1518_1522; /* both rx and tx counter */ - u64 rx_bytes; - u64 rx_packets; - u64 rx_fcs_error; - u64 rx_multicast; - u64 rx_broadcast; - u64 rx_control_frames; - u64 rx_pause; - u64 rx_unknown_opcode; - u64 rx_alignment_error; - u64 rx_frame_length_error; - u64 rx_code_error; - u64 rx_carrier_sense_error; - u64 rx_undersize; - u64 rx_oversize; - u64 rx_fragments; - u64 rx_jabber; - u64 rx_drop; -}; - -/* - * MAC Tx Statistics - */ -struct cna_stats_mac_tx { - u64 tx_bytes; - u64 tx_packets; - u64 tx_multicast; - u64 tx_broadcast; - u64 tx_pause; - u64 tx_deferral; - u64 tx_excessive_deferral; - u64 tx_single_collision; - u64 tx_muliple_collision; - u64 tx_late_collision; - u64 tx_excessive_collision; - u64 tx_total_collision; - u64 tx_pause_honored; - u64 tx_drop; - u64 tx_jabber; - u64 tx_fcs_error; - u64 tx_control_frame; - u64 tx_oversize; - u64 tx_undersize; - u64 tx_fragments; -}; - -/* - * Complete statistics - */ -struct bna_stats { - struct cna_stats_mac_rx mac_rx_stats; - struct cna_stats_bpc_rx bpc_rx_stats; - struct cna_stats_rad rad_stats; - struct bna_stats_fc_rx fc_rx_stats; - struct cna_stats_mac_tx mac_tx_stats; - struct cna_stats_bpc_tx bpc_tx_stats; - struct bna_stats_fc_tx fc_tx_stats; - struct bna_stats_rxf rxf_stats[BNA_TXF_ID_MAX]; - struct bna_stats_txf txf_stats[BNA_RXF_ID_MAX]; -}; - -#endif diff --git a/drivers/scsi/bfa/include/cs/bfa_checksum.h b/drivers/scsi/bfa/include/cs/bfa_checksum.h deleted file mode 100644 index 650f8d0aaff9..000000000000 --- a/drivers/scsi/bfa/include/cs/bfa_checksum.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * bfa_checksum.h BFA checksum utilities - */ - -#ifndef __BFA_CHECKSUM_H__ -#define __BFA_CHECKSUM_H__ - -static inline u32 -bfa_checksum_u32(u32 *buf, int sz) -{ - int i, m = sz >> 2; - u32 sum = 0; - - for (i = 0; i < m; i++) - sum ^= buf[i]; - - return sum; -} - -static inline u16 -bfa_checksum_u16(u16 *buf, int sz) -{ - int i, m = sz >> 1; - u16 sum = 0; - - for (i = 0; i < m; i++) - sum ^= buf[i]; - - return sum; -} - -static inline u8 -bfa_checksum_u8(u8 *buf, int sz) -{ - int i; - u8 sum = 0; - - for (i = 0; i < sz; i++) - sum ^= buf[i]; - - return sum; -} -#endif diff --git a/drivers/scsi/bfa/include/cs/bfa_debug.h b/drivers/scsi/bfa/include/cs/bfa_debug.h deleted file mode 100644 index 75a911ea7936..000000000000 --- a/drivers/scsi/bfa/include/cs/bfa_debug.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * bfa_debug.h BFA debug interfaces - */ - -#ifndef __BFA_DEBUG_H__ -#define __BFA_DEBUG_H__ - -#define bfa_assert(__cond) do { \ - if (!(__cond)) \ - bfa_panic(__LINE__, __FILE__, #__cond); \ -} while (0) - -#define bfa_sm_fault(__mod, __event) do { \ - bfa_trc(__mod, (((uint32_t)0xDEAD << 16) | __event)); \ - bfa_sm_panic((__mod)->logm, __LINE__, __FILE__, __event); \ -} while (0) - -#ifndef BFA_PERF_BUILD -#define bfa_assert_fp(__cond) bfa_assert(__cond) -#else -#define bfa_assert_fp(__cond) -#endif - -struct bfa_log_mod_s; -void bfa_panic(int line, char *file, char *panicstr); -void bfa_sm_panic(struct bfa_log_mod_s *logm, int line, char *file, int event); - -#endif /* __BFA_DEBUG_H__ */ diff --git a/drivers/scsi/bfa/include/cs/bfa_log.h b/drivers/scsi/bfa/include/cs/bfa_log.h deleted file mode 100644 index bc334e0a93fa..000000000000 --- a/drivers/scsi/bfa/include/cs/bfa_log.h +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * bfa_log.h BFA log library data structure and function definition - */ - -#ifndef __BFA_LOG_H__ -#define __BFA_LOG_H__ - -#include -#include -#include - -/* - * BFA log module definition - * - * To create a new module id: - * Add a #define at the end of the list below. Select a value for your - * definition so that it is one (1) greater than the previous - * definition. Modify the definition of BFA_LOG_MODULE_ID_MAX to become - * your new definition. - * Should have no gaps in between the values because this is used in arrays. - * IMPORTANT: AEN_IDs must be at the begining, otherwise update bfa_defs_aen.h - */ - -enum bfa_log_module_id { - BFA_LOG_UNUSED_ID = 0, - - /* AEN defs begin */ - BFA_LOG_AEN_MIN = BFA_LOG_UNUSED_ID, - - BFA_LOG_AEN_ID_ADAPTER = BFA_LOG_AEN_MIN + BFA_AEN_CAT_ADAPTER,/* 1 */ - BFA_LOG_AEN_ID_PORT = BFA_LOG_AEN_MIN + BFA_AEN_CAT_PORT, /* 2 */ - BFA_LOG_AEN_ID_LPORT = BFA_LOG_AEN_MIN + BFA_AEN_CAT_LPORT, /* 3 */ - BFA_LOG_AEN_ID_RPORT = BFA_LOG_AEN_MIN + BFA_AEN_CAT_RPORT, /* 4 */ - BFA_LOG_AEN_ID_ITNIM = BFA_LOG_AEN_MIN + BFA_AEN_CAT_ITNIM, /* 5 */ - BFA_LOG_AEN_ID_TIN = BFA_LOG_AEN_MIN + BFA_AEN_CAT_TIN, /* 6 */ - BFA_LOG_AEN_ID_IPFC = BFA_LOG_AEN_MIN + BFA_AEN_CAT_IPFC, /* 7 */ - BFA_LOG_AEN_ID_AUDIT = BFA_LOG_AEN_MIN + BFA_AEN_CAT_AUDIT, /* 8 */ - BFA_LOG_AEN_ID_IOC = BFA_LOG_AEN_MIN + BFA_AEN_CAT_IOC, /* 9 */ - BFA_LOG_AEN_ID_ETHPORT = BFA_LOG_AEN_MIN + BFA_AEN_CAT_ETHPORT,/* 10 */ - - BFA_LOG_AEN_MAX = BFA_LOG_AEN_ID_ETHPORT, - /* AEN defs end */ - - BFA_LOG_MODULE_ID_MIN = BFA_LOG_AEN_MAX, - - BFA_LOG_FW_ID = BFA_LOG_MODULE_ID_MIN + 1, - BFA_LOG_HAL_ID = BFA_LOG_MODULE_ID_MIN + 2, - BFA_LOG_FCS_ID = BFA_LOG_MODULE_ID_MIN + 3, - BFA_LOG_WDRV_ID = BFA_LOG_MODULE_ID_MIN + 4, - BFA_LOG_LINUX_ID = BFA_LOG_MODULE_ID_MIN + 5, - BFA_LOG_SOLARIS_ID = BFA_LOG_MODULE_ID_MIN + 6, - - BFA_LOG_MODULE_ID_MAX = BFA_LOG_SOLARIS_ID, - - /* Not part of any arrays */ - BFA_LOG_MODULE_ID_ALL = BFA_LOG_MODULE_ID_MAX + 1, - BFA_LOG_AEN_ALL = BFA_LOG_MODULE_ID_MAX + 2, - BFA_LOG_DRV_ALL = BFA_LOG_MODULE_ID_MAX + 3, -}; - -/* - * BFA log catalog name - */ -#define BFA_LOG_CAT_NAME "BFA" - -/* - * bfa log severity values - */ -enum bfa_log_severity { - BFA_LOG_INVALID = 0, - BFA_LOG_CRITICAL = 1, - BFA_LOG_ERROR = 2, - BFA_LOG_WARNING = 3, - BFA_LOG_INFO = 4, - BFA_LOG_NONE = 5, - BFA_LOG_LEVEL_MAX = BFA_LOG_NONE -}; - -#define BFA_LOG_MODID_OFFSET 16 - - -struct bfa_log_msgdef_s { - u32 msg_id; /* message id */ - int attributes; /* attributes */ - int severity; /* severity level */ - char *msg_value; - /* msg string */ - char *message; - /* msg format string */ - int arg_type; /* argument type */ - int arg_num; /* number of argument */ -}; - -/* - * supported argument type - */ -enum bfa_log_arg_type { - BFA_LOG_S = 0, /* string */ - BFA_LOG_D, /* decimal */ - BFA_LOG_I, /* integer */ - BFA_LOG_O, /* oct number */ - BFA_LOG_U, /* unsigned integer */ - BFA_LOG_X, /* hex number */ - BFA_LOG_F, /* floating */ - BFA_LOG_C, /* character */ - BFA_LOG_L, /* double */ - BFA_LOG_P /* pointer */ -}; - -#define BFA_LOG_ARG_TYPE 2 -#define BFA_LOG_ARG0 (0 * BFA_LOG_ARG_TYPE) -#define BFA_LOG_ARG1 (1 * BFA_LOG_ARG_TYPE) -#define BFA_LOG_ARG2 (2 * BFA_LOG_ARG_TYPE) -#define BFA_LOG_ARG3 (3 * BFA_LOG_ARG_TYPE) - -#define BFA_LOG_GET_MOD_ID(msgid) ((msgid >> BFA_LOG_MODID_OFFSET) & 0xff) -#define BFA_LOG_GET_MSG_IDX(msgid) (msgid & 0xffff) -#define BFA_LOG_GET_MSG_ID(msgdef) ((msgdef)->msg_id) -#define BFA_LOG_GET_MSG_FMT_STRING(msgdef) ((msgdef)->message) -#define BFA_LOG_GET_SEVERITY(msgdef) ((msgdef)->severity) - -/* - * Event attributes - */ -#define BFA_LOG_ATTR_NONE 0 -#define BFA_LOG_ATTR_AUDIT 1 -#define BFA_LOG_ATTR_LOG 2 -#define BFA_LOG_ATTR_FFDC 4 - -#define BFA_LOG_CREATE_ID(msw, lsw) \ - (((u32)msw << BFA_LOG_MODID_OFFSET) | lsw) - -struct bfa_log_mod_s; - -/** - * callback function - */ -typedef void (*bfa_log_cb_t)(struct bfa_log_mod_s *log_mod, u32 msg_id, - const char *format, ...); - - -struct bfa_log_mod_s { - char instance_info[BFA_STRING_32]; /* instance info */ - int log_level[BFA_LOG_MODULE_ID_MAX + 1]; - /* log level for modules */ - bfa_log_cb_t cbfn; /* callback function */ -}; - -extern int bfa_log_init(struct bfa_log_mod_s *log_mod, - char *instance_name, bfa_log_cb_t cbfn); -extern int bfa_log(struct bfa_log_mod_s *log_mod, u32 msg_id, ...); -extern bfa_status_t bfa_log_set_level(struct bfa_log_mod_s *log_mod, - int mod_id, enum bfa_log_severity log_level); -extern bfa_status_t bfa_log_set_level_all(struct bfa_log_mod_s *log_mod, - enum bfa_log_severity log_level); -extern bfa_status_t bfa_log_set_level_aen(struct bfa_log_mod_s *log_mod, - enum bfa_log_severity log_level); -extern enum bfa_log_severity bfa_log_get_level(struct bfa_log_mod_s *log_mod, - int mod_id); -extern enum bfa_log_severity bfa_log_get_msg_level( - struct bfa_log_mod_s *log_mod, u32 msg_id); -/* - * array of messages generated from xml files - */ -extern struct bfa_log_msgdef_s bfa_log_msg_array[]; - -#endif diff --git a/drivers/scsi/bfa/include/cs/bfa_perf.h b/drivers/scsi/bfa/include/cs/bfa_perf.h deleted file mode 100644 index 45aa5f978ff5..000000000000 --- a/drivers/scsi/bfa/include/cs/bfa_perf.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __BFAD_PERF_H__ -#define __BFAD_PERF_H__ - -#ifdef BFAD_PERF_BUILD - -#undef bfa_trc -#undef bfa_trc32 -#undef bfa_assert -#undef BFA_TRC_FILE - -#define bfa_trc(_trcp, _data) -#define bfa_trc32(_trcp, _data) -#define bfa_assert(__cond) -#define BFA_TRC_FILE(__mod, __submod) - -#endif - -#endif /* __BFAD_PERF_H__ */ diff --git a/drivers/scsi/bfa/include/cs/bfa_plog.h b/drivers/scsi/bfa/include/cs/bfa_plog.h deleted file mode 100644 index f5bef63b5877..000000000000 --- a/drivers/scsi/bfa/include/cs/bfa_plog.h +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __BFA_PORTLOG_H__ -#define __BFA_PORTLOG_H__ - -#include "protocol/fc.h" -#include - -#define BFA_PL_NLOG_ENTS 256 -#define BFA_PL_LOG_REC_INCR(_x) ((_x)++, (_x) %= BFA_PL_NLOG_ENTS) - -#define BFA_PL_STRING_LOG_SZ 32 /* number of chars in string log */ -#define BFA_PL_INT_LOG_SZ 8 /* number of integers in the integer log */ - -enum bfa_plog_log_type { - BFA_PL_LOG_TYPE_INVALID = 0, - BFA_PL_LOG_TYPE_INT = 1, - BFA_PL_LOG_TYPE_STRING = 2, -}; - -/* - * the (fixed size) record format for each entry in the portlog - */ -struct bfa_plog_rec_s { - u32 tv; /* Filled by the portlog driver when the * - * entry is added to the circular log. */ - u8 port; /* Source port that logged this entry. CM - * entities will use 0xFF */ - u8 mid; /* Integer value to be used by all entities * - * while logging. The module id to string * - * conversion will be done by BFAL. See - * enum bfa_plog_mid */ - u8 eid; /* indicates Rx, Tx, IOCTL, etc. See - * enum bfa_plog_eid */ - u8 log_type; /* indicates string log or integer log. - * see bfa_plog_log_type_t */ - u8 log_num_ints; - /* - * interpreted only if log_type is INT_LOG. indicates number of - * integers in the int_log[] (0-PL_INT_LOG_SZ). - */ - u8 rsvd; - u16 misc; /* can be used to indicate fc frame length, - *etc.. */ - union { - char string_log[BFA_PL_STRING_LOG_SZ]; - u32 int_log[BFA_PL_INT_LOG_SZ]; - } log_entry; - -}; - -/* - * the following #defines will be used by the logging entities to indicate - * their module id. BFAL will convert the integer value to string format - * -* process to be used while changing the following #defines: - * - Always add new entries at the end - * - define corresponding string in BFAL - * - Do not remove any entry or rearrange the order. - */ -enum bfa_plog_mid { - BFA_PL_MID_INVALID = 0, - BFA_PL_MID_DEBUG = 1, - BFA_PL_MID_DRVR = 2, - BFA_PL_MID_HAL = 3, - BFA_PL_MID_HAL_FCXP = 4, - BFA_PL_MID_HAL_UF = 5, - BFA_PL_MID_FCS = 6, - BFA_PL_MID_LPS = 7, - BFA_PL_MID_MAX = 8 -}; - -#define BFA_PL_MID_STRLEN 8 -struct bfa_plog_mid_strings_s { - char m_str[BFA_PL_MID_STRLEN]; -}; - -/* - * the following #defines will be used by the logging entities to indicate - * their event type. BFAL will convert the integer value to string format - * -* process to be used while changing the following #defines: - * - Always add new entries at the end - * - define corresponding string in BFAL - * - Do not remove any entry or rearrange the order. - */ -enum bfa_plog_eid { - BFA_PL_EID_INVALID = 0, - BFA_PL_EID_IOC_DISABLE = 1, - BFA_PL_EID_IOC_ENABLE = 2, - BFA_PL_EID_PORT_DISABLE = 3, - BFA_PL_EID_PORT_ENABLE = 4, - BFA_PL_EID_PORT_ST_CHANGE = 5, - BFA_PL_EID_TX = 6, - BFA_PL_EID_TX_ACK1 = 7, - BFA_PL_EID_TX_RJT = 8, - BFA_PL_EID_TX_BSY = 9, - BFA_PL_EID_RX = 10, - BFA_PL_EID_RX_ACK1 = 11, - BFA_PL_EID_RX_RJT = 12, - BFA_PL_EID_RX_BSY = 13, - BFA_PL_EID_CT_IN = 14, - BFA_PL_EID_CT_OUT = 15, - BFA_PL_EID_DRIVER_START = 16, - BFA_PL_EID_RSCN = 17, - BFA_PL_EID_DEBUG = 18, - BFA_PL_EID_MISC = 19, - BFA_PL_EID_FIP_FCF_DISC = 20, - BFA_PL_EID_FIP_FCF_CVL = 21, - BFA_PL_EID_LOGIN = 22, - BFA_PL_EID_LOGO = 23, - BFA_PL_EID_MAX = 24 -}; - -#define BFA_PL_ENAME_STRLEN 8 -struct bfa_plog_eid_strings_s { - char e_str[BFA_PL_ENAME_STRLEN]; -}; - -#define BFA_PL_SIG_LEN 8 -#define BFA_PL_SIG_STR "12pl123" - -/* - * per port circular log buffer - */ -struct bfa_plog_s { - char plog_sig[BFA_PL_SIG_LEN]; /* Start signature */ - u8 plog_enabled; - u8 rsvd[7]; - u32 ticks; - u16 head; - u16 tail; - struct bfa_plog_rec_s plog_recs[BFA_PL_NLOG_ENTS]; -}; - -void bfa_plog_init(struct bfa_plog_s *plog); -void bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid, - enum bfa_plog_eid event, u16 misc, char *log_str); -void bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid, - enum bfa_plog_eid event, u16 misc, - u32 *intarr, u32 num_ints); -void bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid, - enum bfa_plog_eid event, u16 misc, - struct fchs_s *fchdr); -void bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid, - enum bfa_plog_eid event, u16 misc, - struct fchs_s *fchdr, u32 pld_w0); -void bfa_plog_clear(struct bfa_plog_s *plog); -void bfa_plog_enable(struct bfa_plog_s *plog); -void bfa_plog_disable(struct bfa_plog_s *plog); -bfa_boolean_t bfa_plog_get_setting(struct bfa_plog_s *plog); - -#endif /* __BFA_PORTLOG_H__ */ diff --git a/drivers/scsi/bfa/include/cs/bfa_q.h b/drivers/scsi/bfa/include/cs/bfa_q.h deleted file mode 100644 index ea895facedbc..000000000000 --- a/drivers/scsi/bfa/include/cs/bfa_q.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * bfa_q.h Circular queue definitions. - */ - -#ifndef __BFA_Q_H__ -#define __BFA_Q_H__ - -#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next)) -#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next) -#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev) - -/* - * bfa_q_qe_init - to initialize a queue element - */ -#define bfa_q_qe_init(_qe) { \ - bfa_q_next(_qe) = (struct list_head *) NULL; \ - bfa_q_prev(_qe) = (struct list_head *) NULL; \ -} - -/* - * bfa_q_deq - dequeue an element from head of the queue - */ -#define bfa_q_deq(_q, _qe) { \ - if (!list_empty(_q)) { \ - (*((struct list_head **) (_qe))) = bfa_q_next(_q); \ - bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \ - (struct list_head *) (_q); \ - bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe)); \ - BFA_Q_DBG_INIT(*((struct list_head **) _qe)); \ - } else { \ - *((struct list_head **) (_qe)) = (struct list_head *) NULL; \ - } \ -} - -/* - * bfa_q_deq_tail - dequeue an element from tail of the queue - */ -#define bfa_q_deq_tail(_q, _qe) { \ - if (!list_empty(_q)) { \ - *((struct list_head **) (_qe)) = bfa_q_prev(_q); \ - bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) = \ - (struct list_head *) (_q); \ - bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe); \ - BFA_Q_DBG_INIT(*((struct list_head **) _qe)); \ - } else { \ - *((struct list_head **) (_qe)) = (struct list_head *) NULL; \ - } \ -} - -/* - * #ifdef BFA_DEBUG (Using bfa_assert to check for debug_build is not - * consistent across modules) - */ -#ifndef BFA_PERF_BUILD -#define BFA_Q_DBG_INIT(_qe) bfa_q_qe_init(_qe) -#else -#define BFA_Q_DBG_INIT(_qe) -#endif - -#define bfa_q_is_on_q(_q, _qe) \ - bfa_q_is_on_q_func(_q, (struct list_head *)(_qe)) -extern int bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe); - -#endif diff --git a/drivers/scsi/bfa/include/cs/bfa_sm.h b/drivers/scsi/bfa/include/cs/bfa_sm.h deleted file mode 100644 index 11fba9082f05..000000000000 --- a/drivers/scsi/bfa/include/cs/bfa_sm.h +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * bfasm.h State machine defines - */ - -#ifndef __BFA_SM_H__ -#define __BFA_SM_H__ - -typedef void (*bfa_sm_t)(void *sm, int event); -/** - * oc - object class eg. bfa_ioc - * st - state, eg. reset - * otype - object type, eg. struct bfa_ioc_s - * etype - object type, eg. enum ioc_event - */ -#define bfa_sm_state_decl(oc, st, otype, etype) \ - static void oc ## _sm_ ## st(otype * fsm, etype event) - -#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state)) -#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event))) -#define bfa_sm_get_state(_sm) ((_sm)->sm) -#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state)) - -/** - * For converting from state machine function to state encoding. - */ -struct bfa_sm_table_s { - bfa_sm_t sm; /* state machine function */ - int state; /* state machine encoding */ - char *name; /* state name for display */ -}; -#define BFA_SM(_sm) ((bfa_sm_t)(_sm)) - -int bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm); - -/** - * State machine with entry actions. - */ -typedef void (*bfa_fsm_t)(void *fsm, int event); - -/** - * oc - object class eg. bfa_ioc - * st - state, eg. reset - * otype - object type, eg. struct bfa_ioc_s - * etype - object type, eg. enum ioc_event - */ -#define bfa_fsm_state_decl(oc, st, otype, etype) \ - static void oc ## _sm_ ## st(otype * fsm, etype event); \ - static void oc ## _sm_ ## st ## _entry(otype * fsm) - -#define bfa_fsm_set_state(_fsm, _state) do { \ - (_fsm)->fsm = (bfa_fsm_t)(_state); \ - _state ## _entry(_fsm); \ -} while (0) - -#define bfa_fsm_send_event(_fsm, _event) \ - ((_fsm)->fsm((_fsm), (_event))) -#define bfa_fsm_cmp_state(_fsm, _state) \ - ((_fsm)->fsm == (bfa_fsm_t)(_state)) - -#endif diff --git a/drivers/scsi/bfa/include/cs/bfa_trc.h b/drivers/scsi/bfa/include/cs/bfa_trc.h deleted file mode 100644 index 310771c888e7..000000000000 --- a/drivers/scsi/bfa/include/cs/bfa_trc.h +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __BFA_TRC_H__ -#define __BFA_TRC_H__ - -#include - -#ifndef BFA_TRC_MAX -#define BFA_TRC_MAX (4 * 1024) -#endif - -#ifndef BFA_TRC_TS -#define BFA_TRC_TS(_trcm) ((_trcm)->ticks++) -#endif - -struct bfa_trc_s { -#ifdef __BIGENDIAN - u16 fileno; - u16 line; -#else - u16 line; - u16 fileno; -#endif - u32 timestamp; - union { - struct { - u32 rsvd; - u32 u32; - } u32; - u64 u64; - } data; -}; - - -struct bfa_trc_mod_s { - u32 head; - u32 tail; - u32 ntrc; - u32 stopped; - u32 ticks; - u32 rsvd[3]; - struct bfa_trc_s trc[BFA_TRC_MAX]; -}; - - -enum { - BFA_TRC_FW = 1, /* firmware modules */ - BFA_TRC_HAL = 2, /* BFA modules */ - BFA_TRC_FCS = 3, /* BFA FCS modules */ - BFA_TRC_LDRV = 4, /* Linux driver modules */ - BFA_TRC_SDRV = 5, /* Solaris driver modules */ - BFA_TRC_VDRV = 6, /* vmware driver modules */ - BFA_TRC_WDRV = 7, /* windows driver modules */ - BFA_TRC_AEN = 8, /* AEN module */ - BFA_TRC_BIOS = 9, /* bios driver modules */ - BFA_TRC_EFI = 10, /* EFI driver modules */ - BNA_TRC_WDRV = 11, /* BNA windows driver modules */ - BNA_TRC_VDRV = 12, /* BNA vmware driver modules */ - BNA_TRC_SDRV = 13, /* BNA Solaris driver modules */ - BNA_TRC_LDRV = 14, /* BNA Linux driver modules */ - BNA_TRC_HAL = 15, /* BNA modules */ - BFA_TRC_CNA = 16, /* Common modules */ - BNA_TRC_IMDRV = 17 /* BNA windows intermediate driver modules */ -}; -#define BFA_TRC_MOD_SH 10 -#define BFA_TRC_MOD(__mod) ((BFA_TRC_ ## __mod) << BFA_TRC_MOD_SH) - -/** - * Define a new tracing file (module). Module should match one defined above. - */ -#define BFA_TRC_FILE(__mod, __submod) \ - static int __trc_fileno = ((BFA_TRC_ ## __mod ## _ ## __submod) | \ - BFA_TRC_MOD(__mod)) - - -#define bfa_trc32(_trcp, _data) \ - __bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u32)_data) - - -#ifndef BFA_BOOT_BUILD -#define bfa_trc(_trcp, _data) \ - __bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u64)_data) -#else -void bfa_boot_trc(struct bfa_trc_mod_s *trcmod, u16 fileno, - u16 line, u32 data); -#define bfa_trc(_trcp, _data) \ - bfa_boot_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u32)_data) -#endif - - -static inline void -bfa_trc_init(struct bfa_trc_mod_s *trcm) -{ - trcm->head = trcm->tail = trcm->stopped = 0; - trcm->ntrc = BFA_TRC_MAX; -} - - -static inline void -bfa_trc_stop(struct bfa_trc_mod_s *trcm) -{ - trcm->stopped = 1; -} - -#ifdef FWTRC -extern void dc_flush(void *data); -#else -#define dc_flush(data) -#endif - - -static inline void -__bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data) -{ - int tail = trcm->tail; - struct bfa_trc_s *trc = &trcm->trc[tail]; - - if (trcm->stopped) - return; - - trc->fileno = (u16) fileno; - trc->line = (u16) line; - trc->data.u64 = data; - trc->timestamp = BFA_TRC_TS(trcm); - dc_flush(trc); - - trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1); - if (trcm->tail == trcm->head) - trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1); - dc_flush(trcm); -} - - -static inline void -__bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data) -{ - int tail = trcm->tail; - struct bfa_trc_s *trc = &trcm->trc[tail]; - - if (trcm->stopped) - return; - - trc->fileno = (u16) fileno; - trc->line = (u16) line; - trc->data.u32.u32 = data; - trc->timestamp = BFA_TRC_TS(trcm); - dc_flush(trc); - - trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1); - if (trcm->tail == trcm->head) - trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1); - dc_flush(trcm); -} - -#ifndef BFA_PERF_BUILD -#define bfa_trc_fp(_trcp, _data) bfa_trc(_trcp, _data) -#else -#define bfa_trc_fp(_trcp, _data) -#endif - -#endif /* __BFA_TRC_H__ */ - diff --git a/drivers/scsi/bfa/include/cs/bfa_wc.h b/drivers/scsi/bfa/include/cs/bfa_wc.h deleted file mode 100644 index 0460bd4fc7c4..000000000000 --- a/drivers/scsi/bfa/include/cs/bfa_wc.h +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * bfa_wc.h Generic wait counter. - */ - -#ifndef __BFA_WC_H__ -#define __BFA_WC_H__ - -typedef void (*bfa_wc_resume_t) (void *cbarg); - -struct bfa_wc_s { - bfa_wc_resume_t wc_resume; - void *wc_cbarg; - int wc_count; -}; - -static inline void -bfa_wc_up(struct bfa_wc_s *wc) -{ - wc->wc_count++; -} - -static inline void -bfa_wc_down(struct bfa_wc_s *wc) -{ - wc->wc_count--; - if (wc->wc_count == 0) - wc->wc_resume(wc->wc_cbarg); -} - -/** - * Initialize a waiting counter. - */ -static inline void -bfa_wc_init(struct bfa_wc_s *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg) -{ - wc->wc_resume = wc_resume; - wc->wc_cbarg = wc_cbarg; - wc->wc_count = 0; - bfa_wc_up(wc); -} - -/** - * Wait for counter to reach zero - */ -static inline void -bfa_wc_wait(struct bfa_wc_s *wc) -{ - bfa_wc_down(wc); -} - -#endif diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_adapter.h b/drivers/scsi/bfa/include/defs/bfa_defs_adapter.h deleted file mode 100644 index aea0360d67d5..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_adapter.h +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __BFA_DEFS_ADAPTER_H__ -#define __BFA_DEFS_ADAPTER_H__ - -#include -#include -#include - -/** - * BFA adapter level attributes. - */ -enum { - BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE), - /* - *!< adapter serial num length - */ - BFA_ADAPTER_MODEL_NAME_LEN = 16, /* model name length */ - BFA_ADAPTER_MODEL_DESCR_LEN = 128, /* model description length */ - BFA_ADAPTER_MFG_NAME_LEN = 8, /* manufacturer name length */ - BFA_ADAPTER_SYM_NAME_LEN = 64, /* adapter symbolic name length */ - BFA_ADAPTER_OS_TYPE_LEN = 64, /* adapter os type length */ -}; - -struct bfa_adapter_attr_s { - char manufacturer[BFA_ADAPTER_MFG_NAME_LEN]; - char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; - u32 card_type; - char model[BFA_ADAPTER_MODEL_NAME_LEN]; - char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN]; - wwn_t pwwn; - char node_symname[FC_SYMNAME_MAX]; - char hw_ver[BFA_VERSION_LEN]; - char fw_ver[BFA_VERSION_LEN]; - char optrom_ver[BFA_VERSION_LEN]; - char os_type[BFA_ADAPTER_OS_TYPE_LEN]; - struct bfa_mfg_vpd_s vpd; - struct mac_s mac; - - u8 nports; - u8 max_speed; - u8 prototype; - char asic_rev; - - u8 pcie_gen; - u8 pcie_lanes_orig; - u8 pcie_lanes; - u8 cna_capable; - u8 is_mezz; -}; - -/** - * BFA adapter level events - * Arguments below are in BFAL context from Mgmt - * BFA_PORT_AEN_ADD: [in]: None [out]: serial_num, pwwn, nports - * BFA_PORT_AEN_REMOVE: [in]: pwwn [out]: serial_num, pwwn, nports - */ -enum bfa_adapter_aen_event { - BFA_ADAPTER_AEN_ADD = 1, /* New Adapter found event */ - BFA_ADAPTER_AEN_REMOVE = 2, /* Adapter removed event */ -}; - -struct bfa_adapter_aen_data_s { - char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; - u32 nports; /* Number of NPorts */ - wwn_t pwwn; /* WWN of one of its physical port */ -}; - -#endif /* __BFA_DEFS_ADAPTER_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_aen.h b/drivers/scsi/bfa/include/defs/bfa_defs_aen.h deleted file mode 100644 index 35244698fcdc..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_aen.h +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_DEFS_AEN_H__ -#define __BFA_DEFS_AEN_H__ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define BFA_AEN_MAX_APP 5 - -enum bfa_aen_app { - bfa_aen_app_bcu = 0, /* No thread for bcu */ - bfa_aen_app_hcm = 1, - bfa_aen_app_cim = 2, - bfa_aen_app_snia = 3, - bfa_aen_app_test = 4, /* To be removed after unit test */ -}; - -enum bfa_aen_category { - BFA_AEN_CAT_ADAPTER = 1, - BFA_AEN_CAT_PORT = 2, - BFA_AEN_CAT_LPORT = 3, - BFA_AEN_CAT_RPORT = 4, - BFA_AEN_CAT_ITNIM = 5, - BFA_AEN_CAT_TIN = 6, - BFA_AEN_CAT_IPFC = 7, - BFA_AEN_CAT_AUDIT = 8, - BFA_AEN_CAT_IOC = 9, - BFA_AEN_CAT_ETHPORT = 10, - BFA_AEN_MAX_CAT = 10 -}; - -#pragma pack(1) -union bfa_aen_data_u { - struct bfa_adapter_aen_data_s adapter; - struct bfa_port_aen_data_s port; - struct bfa_lport_aen_data_s lport; - struct bfa_rport_aen_data_s rport; - struct bfa_itnim_aen_data_s itnim; - struct bfa_audit_aen_data_s audit; - struct bfa_ioc_aen_data_s ioc; - struct bfa_ethport_aen_data_s ethport; -}; - -struct bfa_aen_entry_s { - enum bfa_aen_category aen_category; - int aen_type; - union bfa_aen_data_u aen_data; - struct bfa_timeval_s aen_tv; - s32 seq_num; - s32 bfad_num; - s32 rsvd[1]; -}; - -#pragma pack() - -#define bfa_aen_event_t int - -#endif /* __BFA_DEFS_AEN_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_audit.h b/drivers/scsi/bfa/include/defs/bfa_defs_audit.h deleted file mode 100644 index 8e3a962bf20c..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_audit.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_DEFS_AUDIT_H__ -#define __BFA_DEFS_AUDIT_H__ - -#include - -/** - * BFA audit events - */ -enum bfa_audit_aen_event { - BFA_AUDIT_AEN_AUTH_ENABLE = 1, - BFA_AUDIT_AEN_AUTH_DISABLE = 2, -}; - -/** - * audit event data - */ -struct bfa_audit_aen_data_s { - wwn_t pwwn; -}; - -#endif /* __BFA_DEFS_AUDIT_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_auth.h b/drivers/scsi/bfa/include/defs/bfa_defs_auth.h deleted file mode 100644 index f56ed871bb99..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_auth.h +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __BFA_DEFS_AUTH_H__ -#define __BFA_DEFS_AUTH_H__ - -#include - -#define PUBLIC_KEY 15409 -#define PRIVATE_KEY 19009 -#define KEY_LEN 32399 -#define BFA_AUTH_SECRET_STRING_LEN 256 -#define BFA_AUTH_FAIL_NO_PASSWORD 0xFE -#define BFA_AUTH_FAIL_TIMEOUT 0xFF - -/** - * Authentication status - */ -enum bfa_auth_status { - BFA_AUTH_STATUS_NONE = 0, /* no authentication */ - BFA_AUTH_UNINIT = 1, /* state - uninit */ - BFA_AUTH_NEG_SEND = 2, /* state - negotiate send */ - BFA_AUTH_CHAL_WAIT = 3, /* state - challenge wait */ - BFA_AUTH_NEG_RETRY = 4, /* state - negotiate retry */ - BFA_AUTH_REPLY_SEND = 5, /* state - reply send */ - BFA_AUTH_STATUS_WAIT = 6, /* state - status wait */ - BFA_AUTH_SUCCESS = 7, /* state - success */ - BFA_AUTH_FAILED = 8, /* state - failed */ - BFA_AUTH_STATUS_UNKNOWN = 9, /* authentication status unknown */ -}; - -enum bfa_auth_rej_code { - BFA_AUTH_RJT_CODE_AUTH_FAILURE = 1, /* auth failure */ - BFA_AUTH_RJT_CODE_LOGICAL_ERR = 2, /* logical error */ -}; - -/** - * Authentication reject codes - */ -enum bfa_auth_rej_code_exp { - BFA_AUTH_MECH_NOT_USABLE = 1, /* auth. mechanism not usable */ - BFA_AUTH_DH_GROUP_NOT_USABLE = 2, /* DH Group not usable */ - BFA_AUTH_HASH_FUNC_NOT_USABLE = 3, /* hash Function not usable */ - BFA_AUTH_AUTH_XACT_STARTED = 4, /* auth xact started */ - BFA_AUTH_AUTH_FAILED = 5, /* auth failed */ - BFA_AUTH_INCORRECT_PLD = 6, /* incorrect payload */ - BFA_AUTH_INCORRECT_PROTO_MSG = 7, /* incorrect proto msg */ - BFA_AUTH_RESTART_AUTH_PROTO = 8, /* restart auth protocol */ - BFA_AUTH_AUTH_CONCAT_NOT_SUPP = 9, /* auth concat not supported */ - BFA_AUTH_PROTO_VER_NOT_SUPP = 10,/* proto version not supported */ -}; - -struct auth_proto_stats_s { - u32 auth_rjts; - u32 auth_negs; - u32 auth_dones; - - u32 dhchap_challenges; - u32 dhchap_replies; - u32 dhchap_successes; -}; - -/** - * Authentication related statistics - */ -struct bfa_auth_stats_s { - u32 auth_failures; /* authentication failures */ - u32 auth_successes; /* authentication successes*/ - struct auth_proto_stats_s auth_rx_stats; /* Rx protocol stats */ - struct auth_proto_stats_s auth_tx_stats; /* Tx protocol stats */ -}; - -/** - * Authentication hash function algorithms - */ -enum bfa_auth_algo { - BFA_AUTH_ALGO_MD5 = 1, /* Message-Digest algorithm 5 */ - BFA_AUTH_ALGO_SHA1 = 2, /* Secure Hash Algorithm 1 */ - BFA_AUTH_ALGO_MS = 3, /* MD5, then SHA-1 */ - BFA_AUTH_ALGO_SM = 4, /* SHA-1, then MD5 */ -}; - -/** - * DH Groups - * - * Current value could be combination of one or more of the following values - */ -enum bfa_auth_group { - BFA_AUTH_GROUP_DHNULL = 0, /* DH NULL (value == 0) */ - BFA_AUTH_GROUP_DH768 = 1, /* DH group 768 (value == 1) */ - BFA_AUTH_GROUP_DH1024 = 2, /* DH group 1024 (value == 2) */ - BFA_AUTH_GROUP_DH1280 = 4, /* DH group 1280 (value == 3) */ - BFA_AUTH_GROUP_DH1536 = 8, /* DH group 1536 (value == 4) */ - - BFA_AUTH_GROUP_ALL = 256 /* Use default DH group order - * 0, 1, 2, 3, 4 */ -}; - -/** - * Authentication secret sources - */ -enum bfa_auth_secretsource { - BFA_AUTH_SECSRC_LOCAL = 1, /* locally configured */ - BFA_AUTH_SECSRC_RADIUS = 2, /* use radius server */ - BFA_AUTH_SECSRC_TACACS = 3, /* TACACS server */ -}; - -/** - * Authentication attributes - */ -struct bfa_auth_attr_s { - enum bfa_auth_status status; - enum bfa_auth_algo algo; - enum bfa_auth_group dh_grp; - enum bfa_auth_rej_code rjt_code; - enum bfa_auth_rej_code_exp rjt_code_exp; - u8 secret_set; - u8 resv[3]; -}; - -#endif /* __BFA_DEFS_AUTH_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_boot.h b/drivers/scsi/bfa/include/defs/bfa_defs_boot.h deleted file mode 100644 index 0fca10b6ad10..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_boot.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_DEFS_BOOT_H__ -#define __BFA_DEFS_BOOT_H__ - -#include -#include -#include - -enum { - BFA_BOOT_BOOTLUN_MAX = 4, /* maximum boot lun per IOC */ - BFA_PREBOOT_BOOTLUN_MAX = 8, /* maximum preboot lun per IOC */ - -}; - -#define BOOT_CFG_REV1 1 - -/** - * Boot options setting. Boot options setting determines from where - * to get the boot lun information - */ -enum bfa_boot_bootopt { - BFA_BOOT_AUTO_DISCOVER = 0, /* Boot from blun provided by fabric */ - BFA_BOOT_STORED_BLUN = 1, /* Boot from bluns stored in flash */ - BFA_BOOT_FIRST_LUN = 2, /* Boot from first discovered blun */ -}; - -/** - * Boot lun information. - */ -struct bfa_boot_bootlun_s { - wwn_t pwwn; /* port wwn of target */ - lun_t lun; /* 64-bit lun */ -}; - -/** - * BOOT boot configuraton - */ -struct bfa_boot_cfg_s { - u8 version; - u8 rsvd1; - u16 chksum; - - u8 enable; /* enable/disable SAN boot */ - u8 speed; /* boot speed settings */ - u8 topology; /* boot topology setting */ - u8 bootopt; /* bfa_boot_bootopt_t */ - - u32 nbluns; /* number of boot luns */ - - u32 rsvd2; - - struct bfa_boot_bootlun_s blun[BFA_BOOT_BOOTLUN_MAX]; - struct bfa_boot_bootlun_s blun_disc[BFA_BOOT_BOOTLUN_MAX]; -}; - -struct bfa_boot_pbc_s { - u8 enable; /* enable/disable SAN boot */ - u8 speed; /* boot speed settings */ - u8 topology; /* boot topology setting */ - u8 rsvd1; - u32 nbluns; /* number of boot luns */ - struct bfa_boot_bootlun_s pblun[BFA_PREBOOT_BOOTLUN_MAX]; -}; - -#endif /* __BFA_DEFS_BOOT_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_cee.h b/drivers/scsi/bfa/include/defs/bfa_defs_cee.h deleted file mode 100644 index 6eaf519eccdc..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_cee.h +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * bfa_defs_cee.h Interface declarations between host based - * BFAL and DCBX/LLDP module in Firmware - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __BFA_DEFS_CEE_H__ -#define __BFA_DEFS_CEE_H__ - -#include -#include -#include - -#pragma pack(1) - -#define BFA_CEE_LLDP_MAX_STRING_LEN (128) - -#define BFA_CEE_LLDP_SYS_CAP_OTHER 0x0001 -#define BFA_CEE_LLDP_SYS_CAP_REPEATER 0x0002 -#define BFA_CEE_LLDP_SYS_CAP_MAC_BRIDGE 0x0004 -#define BFA_CEE_LLDP_SYS_CAP_WLAN_AP 0x0008 -#define BFA_CEE_LLDP_SYS_CAP_ROUTER 0x0010 -#define BFA_CEE_LLDP_SYS_CAP_TELEPHONE 0x0020 -#define BFA_CEE_LLDP_SYS_CAP_DOCSIS_CD 0x0040 -#define BFA_CEE_LLDP_SYS_CAP_STATION 0x0080 -#define BFA_CEE_LLDP_SYS_CAP_CVLAN 0x0100 -#define BFA_CEE_LLDP_SYS_CAP_SVLAN 0x0200 -#define BFA_CEE_LLDP_SYS_CAP_TPMR 0x0400 - - -/* LLDP string type */ -struct bfa_cee_lldp_str_s { - u8 sub_type; - u8 len; - u8 rsvd[2]; - u8 value[BFA_CEE_LLDP_MAX_STRING_LEN]; -}; - - -/* LLDP parameters */ -struct bfa_cee_lldp_cfg_s { - struct bfa_cee_lldp_str_s chassis_id; - struct bfa_cee_lldp_str_s port_id; - struct bfa_cee_lldp_str_s port_desc; - struct bfa_cee_lldp_str_s sys_name; - struct bfa_cee_lldp_str_s sys_desc; - struct bfa_cee_lldp_str_s mgmt_addr; - u16 time_to_interval; - u16 enabled_system_cap; -}; - -enum bfa_cee_dcbx_version_e { - DCBX_PROTOCOL_PRECEE = 1, - DCBX_PROTOCOL_CEE = 2, -}; - -enum bfa_cee_lls_e { - CEE_LLS_DOWN_NO_TLV = 0, /* LLS is down because the TLV not sent by - * the peer */ - CEE_LLS_DOWN = 1, /* LLS is down as advertised by the peer */ - CEE_LLS_UP = 2, -}; - -/* CEE/DCBX parameters */ -struct bfa_cee_dcbx_cfg_s { - u8 pgid[8]; - u8 pg_percentage[8]; - u8 pfc_enabled; /* bitmap of priorties with PFC enabled */ - u8 fcoe_user_priority; /* bitmap of priorities used for FcoE - * traffic */ - u8 dcbx_version; /* operating version:CEE or preCEE */ - u8 lls_fcoe; /* FCoE Logical Link Status */ - u8 lls_lan; /* LAN Logical Link Status */ - u8 rsvd[3]; -}; - -/* CEE status */ -/* Making this to tri-state for the benefit of port list command */ -enum bfa_cee_status_e { - CEE_UP = 0, - CEE_PHY_UP = 1, - CEE_LOOPBACK = 2, - CEE_PHY_DOWN = 3, -}; - -/* CEE Query */ -struct bfa_cee_attr_s { - u8 cee_status; - u8 error_reason; - struct bfa_cee_lldp_cfg_s lldp_remote; - struct bfa_cee_dcbx_cfg_s dcbx_remote; - mac_t src_mac; - u8 link_speed; - u8 nw_priority; - u8 filler[2]; -}; - - - - -/* LLDP/DCBX/CEE Statistics */ - -struct bfa_cee_lldp_stats_s { - u32 frames_transmitted; - u32 frames_aged_out; - u32 frames_discarded; - u32 frames_in_error; - u32 frames_rcvd; - u32 tlvs_discarded; - u32 tlvs_unrecognized; -}; - -struct bfa_cee_dcbx_stats_s { - u32 subtlvs_unrecognized; - u32 negotiation_failed; - u32 remote_cfg_changed; - u32 tlvs_received; - u32 tlvs_invalid; - u32 seqno; - u32 ackno; - u32 recvd_seqno; - u32 recvd_ackno; -}; - -struct bfa_cee_cfg_stats_s { - u32 cee_status_down; - u32 cee_status_up; - u32 cee_hw_cfg_changed; - u32 recvd_invalid_cfg; -}; - - -struct bfa_cee_stats_s { - struct bfa_cee_lldp_stats_s lldp_stats; - struct bfa_cee_dcbx_stats_s dcbx_stats; - struct bfa_cee_cfg_stats_s cfg_stats; -}; - -#pragma pack() - - -#endif /* __BFA_DEFS_CEE_H__ */ - - diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_driver.h b/drivers/scsi/bfa/include/defs/bfa_defs_driver.h deleted file mode 100644 index 7d00d00d3969..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_driver.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_DEFS_DRIVER_H__ -#define __BFA_DEFS_DRIVER_H__ - -/** - * Driver statistics - */ -struct bfa_driver_stats_s { - u16 tm_io_abort; - u16 tm_io_abort_comp; - u16 tm_lun_reset; - u16 tm_lun_reset_comp; - u16 tm_target_reset; - u16 tm_bus_reset; - u16 ioc_restart; /* IOC restart count */ - u16 rsvd; - u64 control_req; - u64 input_req; - u64 output_req; - u64 input_words; - u64 output_words; -}; - - -#endif /* __BFA_DEFS_DRIVER_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h b/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h deleted file mode 100644 index b4fa0923aa89..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_DEFS_ETHPORT_H__ -#define __BFA_DEFS_ETHPORT_H__ - -#include -#include -#include -#include -#include - -struct bna_tx_info_s { - u32 miniport_state; - u32 adapter_state; - u64 tx_count; - u64 tx_wi; - u64 tx_sg; - u64 tx_tcp_chksum; - u64 tx_udp_chksum; - u64 tx_ip_chksum; - u64 tx_lsov1; - u64 tx_lsov2; - u64 tx_max_sg_len ; -}; - -struct bna_rx_queue_info_s { - u16 q_id ; - u16 buf_size ; - u16 buf_count ; - u16 rsvd ; - u64 rx_count ; - u64 rx_dropped ; - u64 rx_unsupported ; - u64 rx_internal_err ; - u64 rss_count ; - u64 vlan_count ; - u64 rx_tcp_chksum ; - u64 rx_udp_chksum ; - u64 rx_ip_chksum ; - u64 rx_hds ; -}; - -struct bna_rx_q_set_s { - u16 q_set_type; - u32 miniport_state; - u32 adapter_state; - struct bna_rx_queue_info_s rx_queue[2]; -}; - -struct bna_port_stats_s { - struct bna_tx_info_s tx_stats; - u16 qset_count ; - struct bna_rx_q_set_s rx_qset[8]; -}; - -struct bfa_ethport_stats_s { - struct bna_stats_txf txf_stats[1]; - struct bna_stats_rxf rxf_stats[1]; - struct bnad_drv_stats drv_stats; -}; - -/** - * Ethernet port events - * Arguments below are in BFAL context from Mgmt - * BFA_PORT_AEN_ETH_LINKUP: [in]: mac [out]: mac - * BFA_PORT_AEN_ETH_LINKDOWN: [in]: mac [out]: mac - * BFA_PORT_AEN_ETH_ENABLE: [in]: mac [out]: mac - * BFA_PORT_AEN_ETH_DISABLE: [in]: mac [out]: mac - * - */ -enum bfa_ethport_aen_event { - BFA_ETHPORT_AEN_LINKUP = 1, /* Base Port Ethernet link up event */ - BFA_ETHPORT_AEN_LINKDOWN = 2, /* Base Port Ethernet link down event */ - BFA_ETHPORT_AEN_ENABLE = 3, /* Base Port Ethernet link enable event */ - BFA_ETHPORT_AEN_DISABLE = 4, /* Base Port Ethernet link disable - * event */ -}; - -struct bfa_ethport_aen_data_s { - mac_t mac; /* MAC address of the physical port */ -}; - - -#endif /* __BFA_DEFS_ETHPORT_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_fcpim.h b/drivers/scsi/bfa/include/defs/bfa_defs_fcpim.h deleted file mode 100644 index c08f4f5026ac..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_fcpim.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __BFA_DEFS_FCPIM_H__ -#define __BFA_DEFS_FCPIM_H__ - -struct bfa_fcpim_stats_s { - u32 total_ios; /* Total IO count */ - u32 qresumes; /* IO waiting for CQ space */ - u32 no_iotags; /* NO IO contexts */ - u32 io_aborts; /* IO abort requests */ - u32 no_tskims; /* NO task management contexts */ - u32 iocomp_ok; /* IO completions with OK status */ - u32 iocomp_underrun; /* IO underrun (good) */ - u32 iocomp_overrun; /* IO overrun (good) */ - u32 iocomp_aborted; /* Aborted IO requests */ - u32 iocomp_timedout; /* IO timeouts */ - u32 iocom_nexus_abort; /* IO selection timeouts */ - u32 iocom_proto_err; /* IO protocol errors */ - u32 iocom_dif_err; /* IO SBC-3 protection errors */ - u32 iocom_tm_abort; /* IO aborted by TM requests */ - u32 iocom_sqer_needed; /* IO retry for SQ error - *recovery */ - u32 iocom_res_free; /* Delayed freeing of IO resources */ - u32 iocomp_scsierr; /* IO with non-good SCSI status */ - u32 iocom_hostabrts; /* Host IO abort requests */ - u32 iocom_utags; /* IO comp with unknown tags */ - u32 io_cleanups; /* IO implicitly aborted */ - u32 io_tmaborts; /* IO aborted due to TM commands */ - u32 rsvd; -}; -#endif /*__BFA_DEFS_FCPIM_H__*/ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h b/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h deleted file mode 100644 index af86a6396439..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * bfa_defs_fcport.h - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __BFA_DEFS_FCPORT_H__ -#define __BFA_DEFS_FCPORT_H__ - -#include -#include - -#pragma pack(1) - -/** - * FCoE statistics - */ -struct bfa_fcoe_stats_s { - u64 secs_reset; /* Seconds since stats reset */ - u64 cee_linkups; /* CEE link up */ - u64 cee_linkdns; /* CEE link down */ - u64 fip_linkups; /* FIP link up */ - u64 fip_linkdns; /* FIP link down */ - u64 fip_fails; /* FIP failures */ - u64 mac_invalids; /* Invalid mac assignments */ - u64 vlan_req; /* Vlan requests */ - u64 vlan_notify; /* Vlan notifications */ - u64 vlan_err; /* Vlan notification errors */ - u64 vlan_timeouts; /* Vlan request timeouts */ - u64 vlan_invalids; /* Vlan invalids */ - u64 disc_req; /* Discovery requests */ - u64 disc_rsp; /* Discovery responses */ - u64 disc_err; /* Discovery error frames */ - u64 disc_unsol; /* Discovery unsolicited */ - u64 disc_timeouts; /* Discovery timeouts */ - u64 disc_fcf_unavail; /* Discovery FCF not avail */ - u64 linksvc_unsupp; /* FIP link service req unsupp. */ - u64 linksvc_err; /* FIP link service req errors */ - u64 logo_req; /* FIP logos received */ - u64 clrvlink_req; /* Clear virtual link requests */ - u64 op_unsupp; /* FIP operation unsupp. */ - u64 untagged; /* FIP untagged frames */ - u64 txf_ucast; /* Tx FCoE unicast frames */ - u64 txf_ucast_vlan; /* Tx FCoE unicast vlan frames */ - u64 txf_ucast_octets; /* Tx FCoE unicast octets */ - u64 txf_mcast; /* Tx FCoE mutlicast frames */ - u64 txf_mcast_vlan; /* Tx FCoE mutlicast vlan frames */ - u64 txf_mcast_octets; /* Tx FCoE multicast octets */ - u64 txf_bcast; /* Tx FCoE broadcast frames */ - u64 txf_bcast_vlan; /* Tx FCoE broadcast vlan frames */ - u64 txf_bcast_octets; /* Tx FCoE broadcast octets */ - u64 txf_timeout; /* Tx timeouts */ - u64 txf_parity_errors; /* Transmit parity err */ - u64 txf_fid_parity_errors; /* Transmit FID parity err */ - u64 rxf_ucast_octets; /* Rx FCoE unicast octets */ - u64 rxf_ucast; /* Rx FCoE unicast frames */ - u64 rxf_ucast_vlan; /* Rx FCoE unicast vlan frames */ - u64 rxf_mcast_octets; /* Rx FCoE multicast octets */ - u64 rxf_mcast; /* Rx FCoE multicast frames */ - u64 rxf_mcast_vlan; /* Rx FCoE multicast vlan frames */ - u64 rxf_bcast_octets; /* Rx FCoE broadcast octets */ - u64 rxf_bcast; /* Rx FCoE broadcast frames */ - u64 rxf_bcast_vlan; /* Rx FCoE broadcast vlan frames */ -}; - -/** - * QoS or FCoE stats (fcport stats excluding physical FC port stats) - */ -union bfa_fcport_stats_u { - struct bfa_qos_stats_s fcqos; - struct bfa_fcoe_stats_s fcoe; -}; - -#pragma pack() - -#endif /* __BFA_DEFS_FCPORT_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h b/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h deleted file mode 100644 index add0a05d941d..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_DEFS_IOC_H__ -#define __BFA_DEFS_IOC_H__ - -#include -#include -#include -#include -#include - -enum { - BFA_IOC_DRIVER_LEN = 16, - BFA_IOC_CHIP_REV_LEN = 8, -}; - -/** - * Driver and firmware versions. - */ -struct bfa_ioc_driver_attr_s { - char driver[BFA_IOC_DRIVER_LEN]; /* driver name */ - char driver_ver[BFA_VERSION_LEN]; /* driver version */ - char fw_ver[BFA_VERSION_LEN]; /* firmware version*/ - char bios_ver[BFA_VERSION_LEN]; /* bios version */ - char efi_ver[BFA_VERSION_LEN]; /* EFI version */ - char ob_ver[BFA_VERSION_LEN]; /* openboot version*/ -}; - -/** - * IOC PCI device attributes - */ -struct bfa_ioc_pci_attr_s { - u16 vendor_id; /* PCI vendor ID */ - u16 device_id; /* PCI device ID */ - u16 ssid; /* subsystem ID */ - u16 ssvid; /* subsystem vendor ID */ - u32 pcifn; /* PCI device function */ - u32 rsvd; /* padding */ - u8 chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */ -}; - -/** - * IOC states - */ -enum bfa_ioc_state { - BFA_IOC_RESET = 1, /* IOC is in reset state */ - BFA_IOC_SEMWAIT = 2, /* Waiting for IOC hardware semaphore */ - BFA_IOC_HWINIT = 3, /* IOC hardware is being initialized */ - BFA_IOC_GETATTR = 4, /* IOC is being configured */ - BFA_IOC_OPERATIONAL = 5, /* IOC is operational */ - BFA_IOC_INITFAIL = 6, /* IOC hardware failure */ - BFA_IOC_HBFAIL = 7, /* IOC heart-beat failure */ - BFA_IOC_DISABLING = 8, /* IOC is being disabled */ - BFA_IOC_DISABLED = 9, /* IOC is disabled */ - BFA_IOC_FWMISMATCH = 10, /* IOC firmware different from drivers */ -}; - -/** - * IOC firmware stats - */ -struct bfa_fw_ioc_stats_s { - u32 hb_count; - u32 cfg_reqs; - u32 enable_reqs; - u32 disable_reqs; - u32 stats_reqs; - u32 clrstats_reqs; - u32 unknown_reqs; - u32 ic_reqs; /* interrupt coalesce reqs */ -}; - -/** - * IOC driver stats - */ -struct bfa_ioc_drv_stats_s { - u32 ioc_isrs; - u32 ioc_enables; - u32 ioc_disables; - u32 ioc_hbfails; - u32 ioc_boots; - u32 stats_tmos; - u32 hb_count; - u32 disable_reqs; - u32 enable_reqs; - u32 disable_replies; - u32 enable_replies; -}; - -/** - * IOC statistics - */ -struct bfa_ioc_stats_s { - struct bfa_ioc_drv_stats_s drv_stats; /* driver IOC stats */ - struct bfa_fw_ioc_stats_s fw_stats; /* firmware IOC stats */ -}; - - -enum bfa_ioc_type_e { - BFA_IOC_TYPE_FC = 1, - BFA_IOC_TYPE_FCoE = 2, - BFA_IOC_TYPE_LL = 3, -}; - -/** - * IOC attributes returned in queries - */ -struct bfa_ioc_attr_s { - enum bfa_ioc_type_e ioc_type; - enum bfa_ioc_state state; /* IOC state */ - struct bfa_adapter_attr_s adapter_attr; /* HBA attributes */ - struct bfa_ioc_driver_attr_s driver_attr; /* driver attr */ - struct bfa_ioc_pci_attr_s pci_attr; - u8 port_id; /* port number */ - u8 rsvd[7]; /* 64bit align */ -}; - -/** - * BFA IOC level events - */ -enum bfa_ioc_aen_event { - BFA_IOC_AEN_HBGOOD = 1, /* Heart Beat restore event */ - BFA_IOC_AEN_HBFAIL = 2, /* Heart Beat failure event */ - BFA_IOC_AEN_ENABLE = 3, /* IOC enabled event */ - BFA_IOC_AEN_DISABLE = 4, /* IOC disabled event */ - BFA_IOC_AEN_FWMISMATCH = 5, /* IOC firmware mismatch */ - BFA_IOC_AEN_FWCFG_ERROR = 6, /* IOC firmware config error */ - BFA_IOC_AEN_INVALID_VENDOR = 7, - BFA_IOC_AEN_INVALID_NWWN = 8, /* Zero NWWN */ - BFA_IOC_AEN_INVALID_PWWN = 9 /* Zero PWWN */ - -}; - -/** - * BFA IOC level event data, now just a place holder - */ -struct bfa_ioc_aen_data_s { - wwn_t pwwn; - s16 ioc_type; - mac_t mac; -}; - -#endif /* __BFA_DEFS_IOC_H__ */ - diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h b/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h deleted file mode 100644 index 31e728a631ed..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h +++ /dev/null @@ -1,322 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_DEFS_IOCFC_H__ -#define __BFA_DEFS_IOCFC_H__ - -#include -#include -#include -#include -#include - -#define BFA_IOCFC_INTR_DELAY 1125 -#define BFA_IOCFC_INTR_LATENCY 225 -#define BFA_IOCFCOE_INTR_DELAY 25 -#define BFA_IOCFCOE_INTR_LATENCY 5 - -/** - * Interrupt coalescing configuration. - */ -struct bfa_iocfc_intr_attr_s { - bfa_boolean_t coalesce; /* enable/disable coalescing */ - u16 latency; /* latency in microseconds */ - u16 delay; /* delay in microseconds */ -}; - -/** - * IOC firmware configuraton - */ -struct bfa_iocfc_fwcfg_s { - u16 num_fabrics; /* number of fabrics */ - u16 num_lports; /* number of local lports */ - u16 num_rports; /* number of remote ports */ - u16 num_ioim_reqs; /* number of IO reqs */ - u16 num_tskim_reqs; /* task management requests */ - u16 num_iotm_reqs; /* number of TM IO reqs */ - u16 num_tsktm_reqs; /* TM task management requests*/ - u16 num_fcxp_reqs; /* unassisted FC exchanges */ - u16 num_uf_bufs; /* unsolicited recv buffers */ - u8 num_cqs; - u8 fw_tick_res; /*!< FW clock resolution in ms */ - u8 rsvd[4]; - -}; - -struct bfa_iocfc_drvcfg_s { - u16 num_reqq_elems; /* number of req queue elements */ - u16 num_rspq_elems; /* number of rsp queue elements */ - u16 num_sgpgs; /* number of total SG pages */ - u16 num_sboot_tgts; /* number of SAN boot targets */ - u16 num_sboot_luns; /* number of SAN boot luns */ - u16 ioc_recover; /* IOC recovery mode */ - u16 min_cfg; /* minimum configuration */ - u16 path_tov; /* device path timeout */ - bfa_boolean_t delay_comp; /* delay completion of - failed inflight IOs */ - u32 rsvd; -}; -/** - * IOC configuration - */ -struct bfa_iocfc_cfg_s { - struct bfa_iocfc_fwcfg_s fwcfg; /* firmware side config */ - struct bfa_iocfc_drvcfg_s drvcfg; /* driver side config */ -}; - -/** - * IOC firmware IO stats - */ -struct bfa_fw_io_stats_s { - u32 host_abort; /* IO aborted by host driver*/ - u32 host_cleanup; /* IO clean up by host driver */ - - u32 fw_io_timeout; /* IOs timedout */ - u32 fw_frm_parse; /* frame parsed by f/w */ - u32 fw_frm_data; /* fcp_data frame parsed by f/w */ - u32 fw_frm_rsp; /* fcp_rsp frame parsed by f/w */ - u32 fw_frm_xfer_rdy; /* xfer_rdy frame parsed by f/w */ - u32 fw_frm_bls_acc; /* BLS ACC frame parsed by f/w */ - u32 fw_frm_tgt_abort; /* target ABTS parsed by f/w */ - u32 fw_frm_unknown; /* unknown parsed by f/w */ - u32 fw_data_dma; /* f/w DMA'ed the data frame */ - u32 fw_frm_drop; /* f/w drop the frame */ - - u32 rec_timeout; /* FW rec timed out */ - u32 error_rec; /* FW sending rec on - * an error condition*/ - u32 wait_for_si; /* FW wait for SI */ - u32 rec_rsp_inval; /* REC rsp invalid */ - u32 seqr_io_abort; /* target does not know cmd so abort */ - u32 seqr_io_retry; /* SEQR failed so retry IO */ - - u32 itn_cisc_upd_rsp; /* ITN cisc updated on fcp_rsp */ - u32 itn_cisc_upd_data; /* ITN cisc updated on fcp_data */ - u32 itn_cisc_upd_xfer_rdy; /* ITN cisc updated on fcp_data */ - - u32 fcp_data_lost; /* fcp data lost */ - - u32 ro_set_in_xfer_rdy; /* Target set RO in Xfer_rdy frame */ - u32 xfer_rdy_ooo_err; /* Out of order Xfer_rdy received */ - u32 xfer_rdy_unknown_err; /* unknown error in xfer_rdy frame */ - - u32 io_abort_timeout; /* ABTS timedout */ - u32 sler_initiated; /* SLER initiated */ - - u32 unexp_fcp_rsp; /* fcp response in wrong state */ - - u32 fcp_rsp_under_run; /* fcp rsp IO underrun */ - u32 fcp_rsp_under_run_wr; /* fcp rsp IO underrun for write */ - u32 fcp_rsp_under_run_err; /* fcp rsp IO underrun error */ - u32 fcp_rsp_resid_inval; /* invalid residue */ - u32 fcp_rsp_over_run; /* fcp rsp IO overrun */ - u32 fcp_rsp_over_run_err; /* fcp rsp IO overrun error */ - u32 fcp_rsp_proto_err; /* protocol error in fcp rsp */ - u32 fcp_rsp_sense_err; /* error in sense info in fcp rsp */ - u32 fcp_conf_req; /* FCP conf requested */ - - u32 tgt_aborted_io; /* target initiated abort */ - - u32 ioh_edtov_timeout_event;/* IOH edtov timer popped */ - u32 ioh_fcp_rsp_excp_event; /* IOH FCP_RSP exception */ - u32 ioh_fcp_conf_event; /* IOH FCP_CONF */ - u32 ioh_mult_frm_rsp_event; /* IOH multi_frame FCP_RSP */ - u32 ioh_hit_class2_event; /* IOH hit class2 */ - u32 ioh_miss_other_event; /* IOH miss other */ - u32 ioh_seq_cnt_err_event; /* IOH seq cnt error */ - u32 ioh_len_err_event; /* IOH len error - fcp_dl != - * bytes xfered */ - u32 ioh_seq_len_err_event; /* IOH seq len error */ - u32 ioh_data_oor_event; /* Data out of range */ - u32 ioh_ro_ooo_event; /* Relative offset out of range */ - u32 ioh_cpu_owned_event; /* IOH hit -iost owned by f/w */ - u32 ioh_unexp_frame_event; /* unexpected frame recieved - * count */ - u32 ioh_err_int; /* IOH error int during data-phase - * for scsi write - */ -}; - -/** - * IOC port firmware stats - */ - -struct bfa_fw_port_fpg_stats_s { - u32 intr_evt; - u32 intr; - u32 intr_excess; - u32 intr_cause0; - u32 intr_other; - u32 intr_other_ign; - u32 sig_lost; - u32 sig_regained; - u32 sync_lost; - u32 sync_to; - u32 sync_regained; - u32 div2_overflow; - u32 div2_underflow; - u32 efifo_overflow; - u32 efifo_underflow; - u32 idle_rx; - u32 lrr_rx; - u32 lr_rx; - u32 ols_rx; - u32 nos_rx; - u32 lip_rx; - u32 arbf0_rx; - u32 arb_rx; - u32 mrk_rx; - u32 const_mrk_rx; - u32 prim_unknown; -}; - - -struct bfa_fw_port_lksm_stats_s { - u32 hwsm_success; /* hwsm state machine success */ - u32 hwsm_fails; /* hwsm fails */ - u32 hwsm_wdtov; /* hwsm timed out */ - u32 swsm_success; /* swsm success */ - u32 swsm_fails; /* swsm fails */ - u32 swsm_wdtov; /* swsm timed out */ - u32 busybufs; /* link init failed due to busybuf */ - u32 buf_waits; /* bufwait state entries */ - u32 link_fails; /* link failures */ - u32 psp_errors; /* primitive sequence protocol errors */ - u32 lr_unexp; /* No. of times LR rx-ed unexpectedly */ - u32 lrr_unexp; /* No. of times LRR rx-ed unexpectedly */ - u32 lr_tx; /* No. of times LR tx started */ - u32 lrr_tx; /* No. of times LRR tx started */ - u32 ols_tx; /* No. of times OLS tx started */ - u32 nos_tx; /* No. of times NOS tx started */ - u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */ - u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */ -}; - - -struct bfa_fw_port_snsm_stats_s { - u32 hwsm_success; /* Successful hwsm terminations */ - u32 hwsm_fails; /* hwsm fail count */ - u32 hwsm_wdtov; /* hwsm timed out */ - u32 swsm_success; /* swsm success */ - u32 swsm_wdtov; /* swsm timed out */ - u32 error_resets; /* error resets initiated by upsm */ - u32 sync_lost; /* Sync loss count */ - u32 sig_lost; /* Signal loss count */ -}; - - -struct bfa_fw_port_physm_stats_s { - u32 module_inserts; /* Module insert count */ - u32 module_xtracts; /* Module extracts count */ - u32 module_invalids; /* Invalid module inserted count */ - u32 module_read_ign; /* Module validation status ignored */ - u32 laser_faults; /* Laser fault count */ - u32 rsvd; -}; - - -struct bfa_fw_fip_stats_s { - u32 vlan_req; /* vlan discovery requests */ - u32 vlan_notify; /* vlan notifications */ - u32 vlan_err; /* vlan response error */ - u32 vlan_timeouts; /* vlan disvoery timeouts */ - u32 vlan_invalids; /* invalid vlan in discovery advert. */ - u32 disc_req; /* Discovery solicit requests */ - u32 disc_rsp; /* Discovery solicit response */ - u32 disc_err; /* Discovery advt. parse errors */ - u32 disc_unsol; /* Discovery unsolicited */ - u32 disc_timeouts; /* Discovery timeouts */ - u32 disc_fcf_unavail; /* Discovery FCF Not Avail. */ - u32 linksvc_unsupp; /* Unsupported link service req */ - u32 linksvc_err; /* Parse error in link service req */ - u32 logo_req; /* FIP logos received */ - u32 clrvlink_req; /* Clear virtual link req */ - u32 op_unsupp; /* Unsupported FIP operation */ - u32 untagged; /* Untagged frames (ignored) */ - u32 invalid_version; /*!< Invalid FIP version */ -}; - - -struct bfa_fw_lps_stats_s { - u32 mac_invalids; /* Invalid mac assigned */ - u32 rsvd; -}; - - -struct bfa_fw_fcoe_stats_s { - u32 cee_linkups; /* CEE link up count */ - u32 cee_linkdns; /* CEE link down count */ - u32 fip_linkups; /* FIP link up count */ - u32 fip_linkdns; /* FIP link up count */ - u32 fip_fails; /* FIP fail count */ - u32 mac_invalids; /* Invalid mac assigned */ -}; - -/** - * IOC firmware FCoE port stats - */ -struct bfa_fw_fcoe_port_stats_s { - struct bfa_fw_fcoe_stats_s fcoe_stats; - struct bfa_fw_fip_stats_s fip_stats; -}; - -/** - * IOC firmware FC port stats - */ -struct bfa_fw_fc_port_stats_s { - struct bfa_fw_port_fpg_stats_s fpg_stats; - struct bfa_fw_port_physm_stats_s physm_stats; - struct bfa_fw_port_snsm_stats_s snsm_stats; - struct bfa_fw_port_lksm_stats_s lksm_stats; -}; - -/** - * IOC firmware FC port stats - */ -union bfa_fw_port_stats_s { - struct bfa_fw_fc_port_stats_s fc_stats; - struct bfa_fw_fcoe_port_stats_s fcoe_stats; -}; - -/** - * IOC firmware stats - */ -struct bfa_fw_stats_s { - struct bfa_fw_ioc_stats_s ioc_stats; - struct bfa_fw_io_stats_s io_stats; - union bfa_fw_port_stats_s port_stats; -}; - -/** - * IOC statistics - */ -struct bfa_iocfc_stats_s { - struct bfa_fw_stats_s fw_stats; /* firmware IOC stats */ -}; - -/** - * IOC attributes returned in queries - */ -struct bfa_iocfc_attr_s { - struct bfa_iocfc_cfg_s config; /* IOCFC config */ - struct bfa_iocfc_intr_attr_s intr_attr; /* interrupt attr */ -}; - -#define BFA_IOCFC_PATHTOV_MAX 60 -#define BFA_IOCFC_QDEPTH_MAX 2000 - -#endif /* __BFA_DEFS_IOC_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_ipfc.h b/drivers/scsi/bfa/include/defs/bfa_defs_ipfc.h deleted file mode 100644 index 7cb63ea98f38..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_ipfc.h +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __BFA_DEFS_IPFC_H__ -#define __BFA_DEFS_IPFC_H__ - -#include -#include -#include - -/** - * FCS ip remote port states - */ -enum bfa_iprp_state { - BFA_IPRP_UNINIT = 0, /* PORT is not yet initialized */ - BFA_IPRP_ONLINE = 1, /* process login is complete */ - BFA_IPRP_OFFLINE = 2, /* iprp is offline */ -}; - -/** - * FCS remote port statistics - */ -struct bfa_iprp_stats_s { - u32 offlines; - u32 onlines; - u32 rscns; - u32 plogis; - u32 logos; - u32 plogi_timeouts; - u32 plogi_rejects; -}; - -/** - * FCS iprp attribute returned in queries - */ -struct bfa_iprp_attr_s { - enum bfa_iprp_state state; -}; - -struct bfa_ipfc_stats_s { - u32 arp_sent; - u32 arp_recv; - u32 arp_reply_sent; - u32 arp_reply_recv; - u32 farp_sent; - u32 farp_recv; - u32 farp_reply_sent; - u32 farp_reply_recv; - u32 farp_reject_sent; - u32 farp_reject_recv; -}; - -struct bfa_ipfc_attr_s { - bfa_boolean_t enabled; -}; - -#endif /* __BFA_DEFS_IPFC_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_itnim.h b/drivers/scsi/bfa/include/defs/bfa_defs_itnim.h deleted file mode 100644 index d77788b3999a..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_itnim.h +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __BFA_DEFS_ITNIM_H__ -#define __BFA_DEFS_ITNIM_H__ - -#include -#include - -/** - * FCS itnim states - */ -enum bfa_itnim_state { - BFA_ITNIM_OFFLINE = 0, /* offline */ - BFA_ITNIM_PRLI_SEND = 1, /* prli send */ - BFA_ITNIM_PRLI_SENT = 2, /* prli sent */ - BFA_ITNIM_PRLI_RETRY = 3, /* prli retry */ - BFA_ITNIM_HCB_ONLINE = 4, /* online callback */ - BFA_ITNIM_ONLINE = 5, /* online */ - BFA_ITNIM_HCB_OFFLINE = 6, /* offline callback */ - BFA_ITNIM_INITIATIOR = 7, /* initiator */ -}; - -struct bfa_itnim_latency_s { - u32 min; - u32 max; - u32 count; - u32 clock_res; - u32 avg; - u32 rsvd; -}; - -struct bfa_itnim_hal_stats_s { - u32 onlines; /* ITN nexus onlines (PRLI done) */ - u32 offlines; /* ITN Nexus offlines */ - u32 creates; /* ITN create requests */ - u32 deletes; /* ITN delete requests */ - u32 create_comps; /* ITN create completions */ - u32 delete_comps; /* ITN delete completions */ - u32 sler_events; /* SLER (sequence level error - * recovery) events */ - u32 ioc_disabled; /* Num IOC disables */ - u32 cleanup_comps; /* ITN cleanup completions */ - u32 tm_cmnds; /* task management(TM) cmnds sent */ - u32 tm_fw_rsps; /* TM cmds firmware responses */ - u32 tm_success; /* TM successes */ - u32 tm_failures; /* TM failures */ - u32 tm_io_comps; /* TM IO completions */ - u32 tm_qresumes; /* TM queue resumes (after waiting - * for resources) - */ - u32 tm_iocdowns; /* TM cmnds affected by IOC down */ - u32 tm_cleanups; /* TM cleanups */ - u32 tm_cleanup_comps; - /* TM cleanup completions */ - u32 ios; /* IO requests */ - u32 io_comps; /* IO completions */ - u64 input_reqs; /* INPUT requests */ - u64 output_reqs; /* OUTPUT requests */ -}; - -/** - * FCS remote port statistics - */ -struct bfa_itnim_stats_s { - u32 onlines; /* num rport online */ - u32 offlines; /* num rport offline */ - u32 prli_sent; /* num prli sent out */ - u32 fcxp_alloc_wait;/* num fcxp alloc waits */ - u32 prli_rsp_err; /* num prli rsp errors */ - u32 prli_rsp_acc; /* num prli rsp accepts */ - u32 initiator; /* rport is an initiator */ - u32 prli_rsp_parse_err; /* prli rsp parsing errors */ - u32 prli_rsp_rjt; /* num prli rsp rejects */ - u32 timeout; /* num timeouts detected */ - u32 sler; /* num sler notification from BFA */ - u32 rsvd; - struct bfa_itnim_hal_stats_s hal_stats; -}; - -/** - * FCS itnim attributes returned in queries - */ -struct bfa_itnim_attr_s { - enum bfa_itnim_state state; /* FCS itnim state */ - u8 retry; /* data retransmision support */ - u8 task_retry_id; /* task retry ident support */ - u8 rec_support; /* REC supported */ - u8 conf_comp; /* confirmed completion supp */ - struct bfa_itnim_latency_s io_latency; /* IO latency */ -}; - -/** - * BFA ITNIM events. - * Arguments below are in BFAL context from Mgmt - * BFA_ITNIM_AEN_NEW: [in]: None [out]: vf_id, lpwwn - * BFA_ITNIM_AEN_DELETE: [in]: vf_id, lpwwn, rpwwn (0 = all fcp4 targets), - * [out]: vf_id, ppwwn, lpwwn, rpwwn - * BFA_ITNIM_AEN_ONLINE: [in]: vf_id, lpwwn, rpwwn (0 = all fcp4 targets), - * [out]: vf_id, ppwwn, lpwwn, rpwwn - * BFA_ITNIM_AEN_OFFLINE: [in]: vf_id, lpwwn, rpwwn (0 = all fcp4 targets), - * [out]: vf_id, ppwwn, lpwwn, rpwwn - * BFA_ITNIM_AEN_DISCONNECT:[in]: vf_id, lpwwn, rpwwn (0 = all fcp4 targets), - * [out]: vf_id, ppwwn, lpwwn, rpwwn - */ -enum bfa_itnim_aen_event { - BFA_ITNIM_AEN_ONLINE = 1, /* Target online */ - BFA_ITNIM_AEN_OFFLINE = 2, /* Target offline */ - BFA_ITNIM_AEN_DISCONNECT = 3, /* Target disconnected */ -}; - -/** - * BFA ITNIM event data structure. - */ -struct bfa_itnim_aen_data_s { - u16 vf_id; /* vf_id of the IT nexus */ - u16 rsvd[3]; - wwn_t ppwwn; /* WWN of its physical port */ - wwn_t lpwwn; /* WWN of logical port */ - wwn_t rpwwn; /* WWN of remote(target) port */ -}; - -#endif /* __BFA_DEFS_ITNIM_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_led.h b/drivers/scsi/bfa/include/defs/bfa_defs_led.h deleted file mode 100644 index 62039273264e..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_led.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_DEFS_LED_H__ -#define __BFA_DEFS_LED_H__ - -#define BFA_LED_MAX_NUM 3 - -enum bfa_led_op { - BFA_LED_OFF = 0, - BFA_LED_ON = 1, - BFA_LED_FLICK = 2, - BFA_LED_BLINK = 3, -}; - -enum bfa_led_color { - BFA_LED_GREEN = 0, - BFA_LED_AMBER = 1, -}; - -#endif /* __BFA_DEFS_LED_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_lport.h b/drivers/scsi/bfa/include/defs/bfa_defs_lport.h deleted file mode 100644 index 0952a139c47c..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_lport.h +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_DEFS_LPORT_H__ -#define __BFA_DEFS_LPORT_H__ - -#include -#include - -/** - * BFA AEN logical port events. - * Arguments below are in BFAL context from Mgmt - * BFA_LPORT_AEN_NEW: [in]: None [out]: vf_id, ppwwn, lpwwn, roles - * BFA_LPORT_AEN_DELETE: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles - * BFA_LPORT_AEN_ONLINE: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles - * BFA_LPORT_AEN_OFFLINE: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles - * BFA_LPORT_AEN_DISCONNECT:[in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles - * BFA_LPORT_AEN_NEW_PROP: [in]: None [out]: vf_id, ppwwn. lpwwn, roles - * BFA_LPORT_AEN_DELETE_PROP: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles - * BFA_LPORT_AEN_NEW_STANDARD: [in]: None [out]: vf_id, ppwwn. lpwwn, roles - * BFA_LPORT_AEN_DELETE_STANDARD: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles - * BFA_LPORT_AEN_NPIV_DUP_WWN: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles - * BFA_LPORT_AEN_NPIV_FABRIC_MAX: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles - * BFA_LPORT_AEN_NPIV_UNKNOWN: [in]: lpwwn [out]: vf_id, ppwwn. lpwwn, roles - */ -enum bfa_lport_aen_event { - BFA_LPORT_AEN_NEW = 1, /* LPort created event */ - BFA_LPORT_AEN_DELETE = 2, /* LPort deleted event */ - BFA_LPORT_AEN_ONLINE = 3, /* LPort online event */ - BFA_LPORT_AEN_OFFLINE = 4, /* LPort offline event */ - BFA_LPORT_AEN_DISCONNECT = 5, /* LPort disconnect event */ - BFA_LPORT_AEN_NEW_PROP = 6, /* VPort created event */ - BFA_LPORT_AEN_DELETE_PROP = 7, /* VPort deleted event */ - BFA_LPORT_AEN_NEW_STANDARD = 8, /* VPort created event */ - BFA_LPORT_AEN_DELETE_STANDARD = 9, /* VPort deleted event */ - BFA_LPORT_AEN_NPIV_DUP_WWN = 10, /* VPort configured with - * duplicate WWN event - */ - BFA_LPORT_AEN_NPIV_FABRIC_MAX = 11, /* Max NPIV in fabric/fport */ - BFA_LPORT_AEN_NPIV_UNKNOWN = 12, /* Unknown NPIV Error code event */ -}; - -/** - * BFA AEN event data structure - */ -struct bfa_lport_aen_data_s { - u16 vf_id; /* vf_id of this logical port */ - s16 roles; /* Logical port mode,IM/TM/IP etc */ - u32 rsvd; - wwn_t ppwwn; /* WWN of its physical port */ - wwn_t lpwwn; /* WWN of this logical port */ -}; - -#endif /* __BFA_DEFS_LPORT_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h b/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h deleted file mode 100644 index d22fb7909643..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __BFA_DEFS_MFG_H__ -#define __BFA_DEFS_MFG_H__ - -#include - -/** - * Manufacturing block version - */ -#define BFA_MFG_VERSION 2 - -/** - * Manufacturing block encrypted version - */ -#define BFA_MFG_ENC_VER 2 - -/** - * Manufacturing block version 1 length - */ -#define BFA_MFG_VER1_LEN 128 - -/** - * Manufacturing block header length - */ -#define BFA_MFG_HDR_LEN 4 - -/** - * Checksum size - */ -#define BFA_MFG_CHKSUM_SIZE 16 - -/** - * Manufacturing block format - */ -#define BFA_MFG_SERIALNUM_SIZE 11 -#define BFA_MFG_PARTNUM_SIZE 14 -#define BFA_MFG_SUPPLIER_ID_SIZE 10 -#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20 -#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20 -#define BFA_MFG_SUPPLIER_REVISION_SIZE 4 -#define STRSZ(_n) (((_n) + 4) & ~3) - -/** - * Manufacturing card type - */ -enum { - BFA_MFG_TYPE_CB_MAX = 825, /* Crossbow card type max */ - BFA_MFG_TYPE_FC8P2 = 825, /* 8G 2port FC card */ - BFA_MFG_TYPE_FC8P1 = 815, /* 8G 1port FC card */ - BFA_MFG_TYPE_FC4P2 = 425, /* 4G 2port FC card */ - BFA_MFG_TYPE_FC4P1 = 415, /* 4G 1port FC card */ - BFA_MFG_TYPE_CNA10P2 = 1020, /* 10G 2port CNA card */ - BFA_MFG_TYPE_CNA10P1 = 1010, /* 10G 1port CNA card */ - BFA_MFG_TYPE_JAYHAWK = 804, /* Jayhawk mezz card */ - BFA_MFG_TYPE_WANCHESE = 1007, /* Wanchese mezz card */ - BFA_MFG_TYPE_INVALID = 0, /* Invalid card type */ -}; - -#pragma pack(1) - -/** - * Card type to port number conversion - */ -#define bfa_mfg_type2port_num(card_type) (((card_type) / 10) % 10) - -/** - * Check if Mezz card - */ -#define bfa_mfg_is_mezz(type) (( \ - (type) == BFA_MFG_TYPE_JAYHAWK || \ - (type) == BFA_MFG_TYPE_WANCHESE)) - -/** - * Check if card type valid - */ -#define bfa_mfg_is_card_type_valid(type) (( \ - (type) == BFA_MFG_TYPE_FC8P2 || \ - (type) == BFA_MFG_TYPE_FC8P1 || \ - (type) == BFA_MFG_TYPE_FC4P2 || \ - (type) == BFA_MFG_TYPE_FC4P1 || \ - (type) == BFA_MFG_TYPE_CNA10P2 || \ - (type) == BFA_MFG_TYPE_CNA10P1 || \ - bfa_mfg_is_mezz(type))) - -/** - * All numerical fields are in big-endian format. - */ -struct bfa_mfg_block_s { -}; - -/** - * VPD data length - */ -#define BFA_MFG_VPD_LEN 512 - -#define BFA_MFG_VPD_PCI_HDR_OFF 137 -#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /* version mask 3 bits */ -#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /* vendor mask 5 bits */ - -/** - * VPD vendor tag - */ -enum { - BFA_MFG_VPD_UNKNOWN = 0, /* vendor unknown */ - BFA_MFG_VPD_IBM = 1, /* vendor IBM */ - BFA_MFG_VPD_HP = 2, /* vendor HP */ - BFA_MFG_VPD_DELL = 3, /* vendor DELL */ - BFA_MFG_VPD_PCI_IBM = 0x08, /* PCI VPD IBM */ - BFA_MFG_VPD_PCI_HP = 0x10, /* PCI VPD HP */ - BFA_MFG_VPD_PCI_DELL = 0x20, /* PCI VPD DELL */ - BFA_MFG_VPD_PCI_BRCD = 0xf8, /* PCI VPD Brocade */ -}; - -/** - * All numerical fields are in big-endian format. - */ -struct bfa_mfg_vpd_s { - u8 version; /* vpd data version */ - u8 vpd_sig[3]; /* characters 'V', 'P', 'D' */ - u8 chksum; /* u8 checksum */ - u8 vendor; /* vendor */ - u8 len; /* vpd data length excluding header */ - u8 rsv; - u8 data[BFA_MFG_VPD_LEN]; /* vpd data */ -}; - -#pragma pack() - -#endif /* __BFA_DEFS_MFG_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_pci.h b/drivers/scsi/bfa/include/defs/bfa_defs_pci.h deleted file mode 100644 index ea7d89bbc0bb..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_pci.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_DEFS_PCI_H__ -#define __BFA_DEFS_PCI_H__ - -/** - * PCI device and vendor ID information - */ -enum { - BFA_PCI_VENDOR_ID_BROCADE = 0x1657, - BFA_PCI_DEVICE_ID_FC_8G2P = 0x13, - BFA_PCI_DEVICE_ID_FC_8G1P = 0x17, - BFA_PCI_DEVICE_ID_CT = 0x14, - BFA_PCI_DEVICE_ID_CT_FC = 0x21, -}; - -#define bfa_asic_id_ct(devid) \ - ((devid) == BFA_PCI_DEVICE_ID_CT || \ - (devid) == BFA_PCI_DEVICE_ID_CT_FC) - -/** - * PCI sub-system device and vendor ID information - */ -enum { - BFA_PCI_FCOE_SSDEVICE_ID = 0x14, -}; - -/** - * Maximum number of device address ranges mapped through different BAR(s) - */ -#define BFA_PCI_ACCESS_RANGES 1 - -#endif /* __BFA_DEFS_PCI_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_pm.h b/drivers/scsi/bfa/include/defs/bfa_defs_pm.h deleted file mode 100644 index e8d6d959006e..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_pm.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_DEFS_PM_H__ -#define __BFA_DEFS_PM_H__ - -#include - -/** - * BFA power management device states - */ -enum bfa_pm_ds { - BFA_PM_DS_D0 = 0, /* full power mode */ - BFA_PM_DS_D1 = 1, /* power save state 1 */ - BFA_PM_DS_D2 = 2, /* power save state 2 */ - BFA_PM_DS_D3 = 3, /* power off state */ -}; - -#endif /* __BFA_DEFS_PM_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_pom.h b/drivers/scsi/bfa/include/defs/bfa_defs_pom.h deleted file mode 100644 index d9fa278472b7..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_pom.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __BFA_DEFS_POM_H__ -#define __BFA_DEFS_POM_H__ - -#include -#include - -/** - * POM health status levels for each attributes. - */ -enum bfa_pom_entry_health { - BFA_POM_HEALTH_NOINFO = 1, /* no information */ - BFA_POM_HEALTH_NORMAL = 2, /* health is normal */ - BFA_POM_HEALTH_WARNING = 3, /* warning level */ - BFA_POM_HEALTH_ALARM = 4, /* alarming level */ -}; - -/** - * Reading of temperature/voltage/current/power - */ -struct bfa_pom_entry_s { - enum bfa_pom_entry_health health; /* POM entry health */ - u32 curr_value; /* current value */ - u32 thr_warn_high; /* threshold warning high */ - u32 thr_warn_low; /* threshold warning low */ - u32 thr_alarm_low; /* threshold alaram low */ - u32 thr_alarm_high; /* threshold alarm high */ -}; - -/** - * POM attributes - */ -struct bfa_pom_attr_s { - struct bfa_pom_entry_s temperature; /* centigrade */ - struct bfa_pom_entry_s voltage; /* volts */ - struct bfa_pom_entry_s curr; /* milli amps */ - struct bfa_pom_entry_s txpower; /* micro watts */ - struct bfa_pom_entry_s rxpower; /* micro watts */ -}; - -#endif /* __BFA_DEFS_POM_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_port.h b/drivers/scsi/bfa/include/defs/bfa_defs_port.h deleted file mode 100644 index ebdf0d1731a4..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_port.h +++ /dev/null @@ -1,248 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_DEFS_PORT_H__ -#define __BFA_DEFS_PORT_H__ - -#include -#include -#include -#include - -#define BFA_FCS_FABRIC_IPADDR_SZ 16 - -/** - * symbolic names for base port/virtual port - */ -#define BFA_SYMNAME_MAXLEN 128 /* vmware/windows uses 128 bytes */ -struct bfa_port_symname_s { - char symname[BFA_SYMNAME_MAXLEN]; -}; - -/** -* Roles of FCS port: - * - FCP IM and FCP TM roles cannot be enabled together for a FCS port - * - Create multiple ports if both IM and TM functions required. - * - Atleast one role must be specified. - */ -enum bfa_port_role { - BFA_PORT_ROLE_FCP_IM = 0x01, /* FCP initiator role */ - BFA_PORT_ROLE_FCP_TM = 0x02, /* FCP target role */ - BFA_PORT_ROLE_FCP_IPFC = 0x04, /* IP over FC role */ - BFA_PORT_ROLE_FCP_MAX = BFA_PORT_ROLE_FCP_IPFC | BFA_PORT_ROLE_FCP_IM -}; - -/** - * FCS port configuration. - */ -struct bfa_port_cfg_s { - wwn_t pwwn; /* port wwn */ - wwn_t nwwn; /* node wwn */ - struct bfa_port_symname_s sym_name; /* vm port symbolic name */ - bfa_boolean_t preboot_vp; /* vport created from PBC */ - enum bfa_port_role roles; /* FCS port roles */ - u8 tag[16]; /* opaque tag from application */ -}; - -/** - * FCS port states - */ -enum bfa_port_state { - BFA_PORT_UNINIT = 0, /* PORT is not yet initialized */ - BFA_PORT_FDISC = 1, /* FDISC is in progress */ - BFA_PORT_ONLINE = 2, /* login to fabric is complete */ - BFA_PORT_OFFLINE = 3, /* No login to fabric */ -}; - -/** - * FCS port type. Required for VmWare. - */ -enum bfa_port_type { - BFA_PORT_TYPE_PHYSICAL = 0, - BFA_PORT_TYPE_VIRTUAL, -}; - -/** - * FCS port offline reason. Required for VmWare. - */ -enum bfa_port_offline_reason { - BFA_PORT_OFFLINE_UNKNOWN = 0, - BFA_PORT_OFFLINE_LINKDOWN, - BFA_PORT_OFFLINE_FAB_UNSUPPORTED, /* NPIV not supported by the - * fabric */ - BFA_PORT_OFFLINE_FAB_NORESOURCES, - BFA_PORT_OFFLINE_FAB_LOGOUT, -}; - -/** - * FCS lport info. Required for VmWare. - */ -struct bfa_port_info_s { - u8 port_type; /* bfa_port_type_t : physical or - * virtual */ - u8 port_state; /* one of bfa_port_state values */ - u8 offline_reason; /* one of bfa_port_offline_reason_t - * values */ - wwn_t port_wwn; - wwn_t node_wwn; - - /* - * following 4 feilds are valid for Physical Ports only - */ - u32 max_vports_supp; /* Max supported vports */ - u32 num_vports_inuse; /* Num of in use vports */ - u32 max_rports_supp; /* Max supported rports */ - u32 num_rports_inuse; /* Num of doscovered rports */ - -}; - -/** - * FCS port statistics - */ -struct bfa_port_stats_s { - u32 ns_plogi_sent; - u32 ns_plogi_rsp_err; - u32 ns_plogi_acc_err; - u32 ns_plogi_accepts; - u32 ns_rejects; /* NS command rejects */ - u32 ns_plogi_unknown_rsp; - u32 ns_plogi_alloc_wait; - - u32 ns_retries; /* NS command retries */ - u32 ns_timeouts; /* NS command timeouts */ - - u32 ns_rspnid_sent; - u32 ns_rspnid_accepts; - u32 ns_rspnid_rsp_err; - u32 ns_rspnid_rejects; - u32 ns_rspnid_alloc_wait; - - u32 ns_rftid_sent; - u32 ns_rftid_accepts; - u32 ns_rftid_rsp_err; - u32 ns_rftid_rejects; - u32 ns_rftid_alloc_wait; - - u32 ns_rffid_sent; - u32 ns_rffid_accepts; - u32 ns_rffid_rsp_err; - u32 ns_rffid_rejects; - u32 ns_rffid_alloc_wait; - - u32 ns_gidft_sent; - u32 ns_gidft_accepts; - u32 ns_gidft_rsp_err; - u32 ns_gidft_rejects; - u32 ns_gidft_unknown_rsp; - u32 ns_gidft_alloc_wait; - - /* - * Mgmt Server stats - */ - u32 ms_retries; /* MS command retries */ - u32 ms_timeouts; /* MS command timeouts */ - u32 ms_plogi_sent; - u32 ms_plogi_rsp_err; - u32 ms_plogi_acc_err; - u32 ms_plogi_accepts; - u32 ms_rejects; /* MS command rejects */ - u32 ms_plogi_unknown_rsp; - u32 ms_plogi_alloc_wait; - - u32 num_rscn; /* Num of RSCN received */ - u32 num_portid_rscn;/* Num portid format RSCN - * received */ - - u32 uf_recvs; /* unsolicited recv frames */ - u32 uf_recv_drops; /* dropped received frames */ - - u32 rsvd; /* padding for 64 bit alignment */ -}; - -/** - * BFA port attribute returned in queries - */ -struct bfa_port_attr_s { - enum bfa_port_state state; /* port state */ - u32 pid; /* port ID */ - struct bfa_port_cfg_s port_cfg; /* port configuration */ - enum bfa_pport_type port_type; /* current topology */ - u32 loopback; /* cable is externally looped back */ - wwn_t fabric_name; /* attached switch's nwwn */ - u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ]; /* attached - * fabric's ip addr */ - struct mac_s fpma_mac; /* Lport's FPMA Mac address */ - u16 authfail; /* auth failed state */ -}; - -/** - * BFA physical port Level events - * Arguments below are in BFAL context from Mgmt - * BFA_PORT_AEN_ONLINE: [in]: pwwn [out]: pwwn - * BFA_PORT_AEN_OFFLINE: [in]: pwwn [out]: pwwn - * BFA_PORT_AEN_RLIR: [in]: None [out]: pwwn, rlir_data, rlir_len - * BFA_PORT_AEN_SFP_INSERT: [in]: pwwn [out]: port_id, pwwn - * BFA_PORT_AEN_SFP_REMOVE: [in]: pwwn [out]: port_id, pwwn - * BFA_PORT_AEN_SFP_POM: [in]: pwwn [out]: level, port_id, pwwn - * BFA_PORT_AEN_ENABLE: [in]: pwwn [out]: pwwn - * BFA_PORT_AEN_DISABLE: [in]: pwwn [out]: pwwn - * BFA_PORT_AEN_AUTH_ON: [in]: pwwn [out]: pwwn - * BFA_PORT_AEN_AUTH_OFF: [in]: pwwn [out]: pwwn - * BFA_PORT_AEN_DISCONNECT: [in]: pwwn [out]: pwwn - * BFA_PORT_AEN_QOS_NEG: [in]: pwwn [out]: pwwn - * BFA_PORT_AEN_FABRIC_NAME_CHANGE: [in]: pwwn, [out]: pwwn, fwwn - * - */ -enum bfa_port_aen_event { - BFA_PORT_AEN_ONLINE = 1, /* Physical Port online event */ - BFA_PORT_AEN_OFFLINE = 2, /* Physical Port offline event */ - BFA_PORT_AEN_RLIR = 3, /* RLIR event, not supported */ - BFA_PORT_AEN_SFP_INSERT = 4, /* SFP inserted event */ - BFA_PORT_AEN_SFP_REMOVE = 5, /* SFP removed event */ - BFA_PORT_AEN_SFP_POM = 6, /* SFP POM event */ - BFA_PORT_AEN_ENABLE = 7, /* Physical Port enable event */ - BFA_PORT_AEN_DISABLE = 8, /* Physical Port disable event */ - BFA_PORT_AEN_AUTH_ON = 9, /* Physical Port auth success event */ - BFA_PORT_AEN_AUTH_OFF = 10, /* Physical Port auth fail event */ - BFA_PORT_AEN_DISCONNECT = 11, /* Physical Port disconnect event */ - BFA_PORT_AEN_QOS_NEG = 12, /* Base Port QOS negotiation event */ - BFA_PORT_AEN_FABRIC_NAME_CHANGE = 13, /* Fabric Name/WWN change - * event */ - BFA_PORT_AEN_SFP_ACCESS_ERROR = 14, /* SFP read error event */ - BFA_PORT_AEN_SFP_UNSUPPORT = 15, /* Unsupported SFP event */ -}; - -enum bfa_port_aen_sfp_pom { - BFA_PORT_AEN_SFP_POM_GREEN = 1, /* Normal */ - BFA_PORT_AEN_SFP_POM_AMBER = 2, /* Warning */ - BFA_PORT_AEN_SFP_POM_RED = 3, /* Critical */ - BFA_PORT_AEN_SFP_POM_MAX = BFA_PORT_AEN_SFP_POM_RED -}; - -struct bfa_port_aen_data_s { - wwn_t pwwn; /* WWN of the physical port */ - wwn_t fwwn; /* WWN of the fabric port */ - s32 phy_port_num; /*! For SFP related events */ - s16 ioc_type; - s16 level; /* Only transitions will - * be informed */ - struct mac_s mac; /* MAC address of the ethernet port, - * applicable to CNA port only */ - s16 rsvd; -}; - -#endif /* __BFA_DEFS_PORT_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_pport.h b/drivers/scsi/bfa/include/defs/bfa_defs_pport.h deleted file mode 100644 index 2de675839c2f..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_pport.h +++ /dev/null @@ -1,393 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_DEFS_PPORT_H__ -#define __BFA_DEFS_PPORT_H__ - -#include -#include -#include -#include -#include - -/* Modify char* port_stt[] in bfal_port.c if a new state was added */ -enum bfa_pport_states { - BFA_PPORT_ST_UNINIT = 1, - BFA_PPORT_ST_ENABLING_QWAIT = 2, - BFA_PPORT_ST_ENABLING = 3, - BFA_PPORT_ST_LINKDOWN = 4, - BFA_PPORT_ST_LINKUP = 5, - BFA_PPORT_ST_DISABLING_QWAIT = 6, - BFA_PPORT_ST_DISABLING = 7, - BFA_PPORT_ST_DISABLED = 8, - BFA_PPORT_ST_STOPPED = 9, - BFA_PPORT_ST_IOCDOWN = 10, - BFA_PPORT_ST_IOCDIS = 11, - BFA_PPORT_ST_FWMISMATCH = 12, - BFA_PPORT_ST_PREBOOT_DISABLED = 13, - BFA_PPORT_ST_MAX_STATE, -}; - -/** - * Port speed settings. Each specific speed is a bit field. Use multiple - * bits to specify speeds to be selected for auto-negotiation. - */ -enum bfa_pport_speed { - BFA_PPORT_SPEED_UNKNOWN = 0, - BFA_PPORT_SPEED_1GBPS = 1, - BFA_PPORT_SPEED_2GBPS = 2, - BFA_PPORT_SPEED_4GBPS = 4, - BFA_PPORT_SPEED_8GBPS = 8, - BFA_PPORT_SPEED_10GBPS = 10, - BFA_PPORT_SPEED_AUTO = - (BFA_PPORT_SPEED_1GBPS | BFA_PPORT_SPEED_2GBPS | - BFA_PPORT_SPEED_4GBPS | BFA_PPORT_SPEED_8GBPS), -}; - -/** - * Port operational type (in sync with SNIA port type). - */ -enum bfa_pport_type { - BFA_PPORT_TYPE_UNKNOWN = 1, /* port type is unknown */ - BFA_PPORT_TYPE_TRUNKED = 2, /* Trunked mode */ - BFA_PPORT_TYPE_NPORT = 5, /* P2P with switched fabric */ - BFA_PPORT_TYPE_NLPORT = 6, /* public loop */ - BFA_PPORT_TYPE_LPORT = 20, /* private loop */ - BFA_PPORT_TYPE_P2P = 21, /* P2P with no switched fabric */ - BFA_PPORT_TYPE_VPORT = 22, /* NPIV - virtual port */ -}; - -/** - * Port topology setting. A port's topology and fabric login status - * determine its operational type. - */ -enum bfa_pport_topology { - BFA_PPORT_TOPOLOGY_NONE = 0, /* No valid topology */ - BFA_PPORT_TOPOLOGY_P2P = 1, /* P2P only */ - BFA_PPORT_TOPOLOGY_LOOP = 2, /* LOOP topology */ - BFA_PPORT_TOPOLOGY_AUTO = 3, /* auto topology selection */ -}; - -/** - * Physical port loopback types. - */ -enum bfa_pport_opmode { - BFA_PPORT_OPMODE_NORMAL = 0x00, /* normal non-loopback mode */ - BFA_PPORT_OPMODE_LB_INT = 0x01, /* internal loop back */ - BFA_PPORT_OPMODE_LB_SLW = 0x02, /* serial link wrapback (serdes) */ - BFA_PPORT_OPMODE_LB_EXT = 0x04, /* external loop back (serdes) */ - BFA_PPORT_OPMODE_LB_CBL = 0x08, /* cabled loop back */ - BFA_PPORT_OPMODE_LB_NLINT = 0x20, /* NL_Port internal loopback */ -}; - -#define BFA_PPORT_OPMODE_LB_HARD(_mode) \ - ((_mode == BFA_PPORT_OPMODE_LB_INT) || \ - (_mode == BFA_PPORT_OPMODE_LB_SLW) || \ - (_mode == BFA_PPORT_OPMODE_LB_EXT)) - -/** - Port State (in sync with SNIA port state). - */ -enum bfa_pport_snia_state { - BFA_PPORT_STATE_UNKNOWN = 1, /* port is not initialized */ - BFA_PPORT_STATE_ONLINE = 2, /* port is ONLINE */ - BFA_PPORT_STATE_DISABLED = 3, /* port is disabled by user */ - BFA_PPORT_STATE_BYPASSED = 4, /* port is bypassed (in LOOP) */ - BFA_PPORT_STATE_DIAG = 5, /* port diagnostics is active */ - BFA_PPORT_STATE_LINKDOWN = 6, /* link is down */ - BFA_PPORT_STATE_LOOPBACK = 8, /* port is looped back */ -}; - -/** - * Port link state - */ -enum bfa_pport_linkstate { - BFA_PPORT_LINKUP = 1, /* Physical port/Trunk link up */ - BFA_PPORT_LINKDOWN = 2, /* Physical port/Trunk link down */ - BFA_PPORT_TRUNK_LINKDOWN = 3, /* Trunk link down (new tmaster) */ -}; - -/** - * Port link state event - */ -#define bfa_pport_event_t enum bfa_pport_linkstate - -/** - * Port link state reason code - */ -enum bfa_pport_linkstate_rsn { - BFA_PPORT_LINKSTATE_RSN_NONE = 0, - BFA_PPORT_LINKSTATE_RSN_DISABLED = 1, - BFA_PPORT_LINKSTATE_RSN_RX_NOS = 2, - BFA_PPORT_LINKSTATE_RSN_RX_OLS = 3, - BFA_PPORT_LINKSTATE_RSN_RX_LIP = 4, - BFA_PPORT_LINKSTATE_RSN_RX_LIPF7 = 5, - BFA_PPORT_LINKSTATE_RSN_SFP_REMOVED = 6, - BFA_PPORT_LINKSTATE_RSN_PORT_FAULT = 7, - BFA_PPORT_LINKSTATE_RSN_RX_LOS = 8, - BFA_PPORT_LINKSTATE_RSN_LOCAL_FAULT = 9, - BFA_PPORT_LINKSTATE_RSN_REMOTE_FAULT = 10, - BFA_PPORT_LINKSTATE_RSN_TIMEOUT = 11, - - - - /* CEE related reason codes/errors */ - CEE_LLDP_INFO_AGED_OUT = 20, - CEE_LLDP_SHUTDOWN_TLV_RCVD = 21, - CEE_PEER_NOT_ADVERTISE_DCBX = 22, - CEE_PEER_NOT_ADVERTISE_PG = 23, - CEE_PEER_NOT_ADVERTISE_PFC = 24, - CEE_PEER_NOT_ADVERTISE_FCOE = 25, - CEE_PG_NOT_COMPATIBLE = 26, - CEE_PFC_NOT_COMPATIBLE = 27, - CEE_FCOE_NOT_COMPATIBLE = 28, - CEE_BAD_PG_RCVD = 29, - CEE_BAD_BW_RCVD = 30, - CEE_BAD_PFC_RCVD = 31, - CEE_BAD_FCOE_PRI_RCVD = 32, - CEE_FCOE_PRI_PFC_OFF = 33, - CEE_DUP_CONTROL_TLV_RCVD = 34, - CEE_DUP_FEAT_TLV_RCVD = 35, - CEE_APPLY_NEW_CFG = 36, /* reason, not an error */ - CEE_PROTOCOL_INIT = 37, /* reason, not an error */ - CEE_PHY_LINK_DOWN = 38, - CEE_LLS_FCOE_ABSENT = 39, - CEE_LLS_FCOE_DOWN = 40 -}; - -/** - * Default Target Rate Limiting Speed. - */ -#define BFA_PPORT_DEF_TRL_SPEED BFA_PPORT_SPEED_1GBPS - -/** - * Physical port configuration - */ -struct bfa_pport_cfg_s { - u8 topology; /* bfa_pport_topology */ - u8 speed; /* enum bfa_pport_speed */ - u8 trunked; /* trunked or not */ - u8 qos_enabled; /* qos enabled or not */ - u8 trunk_ports; /* bitmap of trunked ports */ - u8 cfg_hardalpa; /* is hard alpa configured */ - u16 maxfrsize; /* maximum frame size */ - u8 hardalpa; /* configured hard alpa */ - u8 rx_bbcredit; /* receive buffer credits */ - u8 tx_bbcredit; /* transmit buffer credits */ - u8 ratelimit; /* ratelimit enabled or not */ - u8 trl_def_speed; /* ratelimit default speed */ - u8 rsvd[3]; - u16 path_tov; /* device path timeout */ - u16 q_depth; /* SCSI Queue depth */ -}; - -/** - * Port attribute values. - */ -struct bfa_pport_attr_s { - /* - * Static fields - */ - wwn_t nwwn; /* node wwn */ - wwn_t pwwn; /* port wwn */ - wwn_t factorynwwn; /* factory node wwn */ - wwn_t factorypwwn; /* factory port wwn */ - enum fc_cos cos_supported; /* supported class of services */ - u32 rsvd; - struct fc_symname_s port_symname; /* port symbolic name */ - enum bfa_pport_speed speed_supported; /* supported speeds */ - bfa_boolean_t pbind_enabled; /* Will be set if Persistent binding - * enabled. Relevant only in Windows - */ - - /* - * Configured values - */ - struct bfa_pport_cfg_s pport_cfg; /* pport cfg */ - - /* - * Dynamic field - info from BFA - */ - enum bfa_pport_states port_state; /* current port state */ - enum bfa_pport_speed speed; /* current speed */ - enum bfa_pport_topology topology; /* current topology */ - bfa_boolean_t beacon; /* current beacon status */ - bfa_boolean_t link_e2e_beacon;/* set if link beacon on */ - bfa_boolean_t plog_enabled; /* set if portlog is enabled*/ - - /* - * Dynamic field - info from FCS - */ - u32 pid; /* port ID */ - enum bfa_pport_type port_type; /* current topology */ - u32 loopback; /* external loopback */ - u32 authfail; /* auth fail state */ - u32 rsvd2; /* padding for 64 bit */ -}; - -/** - * FC Port statistics. - */ -struct bfa_pport_fc_stats_s { - u64 secs_reset; /* Seconds since stats is reset */ - u64 tx_frames; /* Tx frames */ - u64 tx_words; /* Tx words */ - u64 tx_lip; /* Tx LIP */ - u64 tx_nos; /* Tx NOS */ - u64 tx_ols; /* Tx OLS */ - u64 tx_lr; /* Tx LR */ - u64 tx_lrr; /* Tx LRR */ - u64 rx_frames; /* Rx frames */ - u64 rx_words; /* Rx words */ - u64 lip_count; /* Rx LIP */ - u64 nos_count; /* Rx NOS */ - u64 ols_count; /* Rx OLS */ - u64 lr_count; /* Rx LR */ - u64 lrr_count; /* Rx LRR */ - u64 invalid_crcs; /* Rx CRC err frames */ - u64 invalid_crc_gd_eof; /* Rx CRC err good EOF frames */ - u64 undersized_frm; /* Rx undersized frames */ - u64 oversized_frm; /* Rx oversized frames */ - u64 bad_eof_frm; /* Rx frames with bad EOF */ - u64 error_frames; /* Errored frames */ - u64 dropped_frames; /* Dropped frames */ - u64 link_failures; /* Link Failure (LF) count */ - u64 loss_of_syncs; /* Loss of sync count */ - u64 loss_of_signals;/* Loss of signal count */ - u64 primseq_errs; /* Primitive sequence protocol err. */ - u64 bad_os_count; /* Invalid ordered sets */ - u64 err_enc_out; /* Encoding err nonframe_8b10b */ - u64 err_enc; /* Encoding err frame_8b10b */ -}; - -/** - * Eth Port statistics. - */ -struct bfa_pport_eth_stats_s { - u64 secs_reset; /* Seconds since stats is reset */ - u64 frame_64; /* Frames 64 bytes */ - u64 frame_65_127; /* Frames 65-127 bytes */ - u64 frame_128_255; /* Frames 128-255 bytes */ - u64 frame_256_511; /* Frames 256-511 bytes */ - u64 frame_512_1023; /* Frames 512-1023 bytes */ - u64 frame_1024_1518; /* Frames 1024-1518 bytes */ - u64 frame_1519_1522; /* Frames 1519-1522 bytes */ - u64 tx_bytes; /* Tx bytes */ - u64 tx_packets; /* Tx packets */ - u64 tx_mcast_packets; /* Tx multicast packets */ - u64 tx_bcast_packets; /* Tx broadcast packets */ - u64 tx_control_frame; /* Tx control frame */ - u64 tx_drop; /* Tx drops */ - u64 tx_jabber; /* Tx jabber */ - u64 tx_fcs_error; /* Tx FCS error */ - u64 tx_fragments; /* Tx fragments */ - u64 rx_bytes; /* Rx bytes */ - u64 rx_packets; /* Rx packets */ - u64 rx_mcast_packets; /* Rx multicast packets */ - u64 rx_bcast_packets; /* Rx broadcast packets */ - u64 rx_control_frames; /* Rx control frames */ - u64 rx_unknown_opcode; /* Rx unknown opcode */ - u64 rx_drop; /* Rx drops */ - u64 rx_jabber; /* Rx jabber */ - u64 rx_fcs_error; /* Rx FCS errors */ - u64 rx_alignment_error; /* Rx alignment errors */ - u64 rx_frame_length_error; /* Rx frame len errors */ - u64 rx_code_error; /* Rx code errors */ - u64 rx_fragments; /* Rx fragments */ - u64 rx_pause; /* Rx pause */ - u64 rx_zero_pause; /* Rx zero pause */ - u64 tx_pause; /* Tx pause */ - u64 tx_zero_pause; /* Tx zero pause */ - u64 rx_fcoe_pause; /* Rx FCoE pause */ - u64 rx_fcoe_zero_pause; /* Rx FCoE zero pause */ - u64 tx_fcoe_pause; /* Tx FCoE pause */ - u64 tx_fcoe_zero_pause; /* Tx FCoE zero pause */ -}; - -/** - * Port statistics. - */ -union bfa_pport_stats_u { - struct bfa_pport_fc_stats_s fc; - struct bfa_pport_eth_stats_s eth; -}; - -/** - * Port FCP mappings. - */ -struct bfa_pport_fcpmap_s { - char osdevname[256]; - u32 bus; - u32 target; - u32 oslun; - u32 fcid; - wwn_t nwwn; - wwn_t pwwn; - u64 fcplun; - char luid[256]; -}; - -/** - * Port RNI */ -struct bfa_pport_rnid_s { - wwn_t wwn; - u32 unittype; - u32 portid; - u32 attached_nodes_num; - u16 ip_version; - u16 udp_port; - u8 ipaddr[16]; - u16 rsvd; - u16 topologydiscoveryflags; -}; - -struct bfa_fcport_fcf_s { - wwn_t name; /* FCF name */ - wwn_t fabric_name; /* Fabric Name */ - u8 fipenabled; /* FIP enabled or not */ - u8 fipfailed; /* FIP failed or not */ - u8 resv[2]; - u8 pri; /* FCF priority */ - u8 version; /* FIP version used */ - u8 available; /* Available for login */ - u8 fka_disabled; /* FKA is disabled */ - u8 maxsz_verified; /* FCoE max size verified */ - u8 fc_map[3]; /* FC map */ - u16 vlan; /* FCoE vlan tag/priority */ - u32 fka_adv_per; /* FIP ka advert. period */ - struct mac_s mac; /* FCF mac */ -}; - -/** - * Link state information - */ -struct bfa_pport_link_s { - u8 linkstate; /* Link state bfa_pport_linkstate */ - u8 linkstate_rsn; /* bfa_pport_linkstate_rsn_t */ - u8 topology; /* P2P/LOOP bfa_pport_topology */ - u8 speed; /* Link speed (1/2/4/8 G) */ - u32 linkstate_opt; /* Linkstate optional data (debug) */ - u8 trunked; /* Trunked or not (1 or 0) */ - u8 resvd[3]; - struct bfa_qos_attr_s qos_attr; /* QoS Attributes */ - union { - struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */ - struct bfa_fcport_fcf_s fcf; /* FCF information (for FCoE) */ - } vc_fcf; -}; - -#endif /* __BFA_DEFS_PPORT_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_qos.h b/drivers/scsi/bfa/include/defs/bfa_defs_qos.h deleted file mode 100644 index aadbacd1d2d7..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_qos.h +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_DEFS_QOS_H__ -#define __BFA_DEFS_QOS_H__ - -/** - * QoS states - */ -enum bfa_qos_state { - BFA_QOS_ONLINE = 1, /* QoS is online */ - BFA_QOS_OFFLINE = 2, /* QoS is offline */ -}; - - -/** - * QoS Priority levels. - */ -enum bfa_qos_priority { - BFA_QOS_UNKNOWN = 0, - BFA_QOS_HIGH = 1, /* QoS Priority Level High */ - BFA_QOS_MED = 2, /* QoS Priority Level Medium */ - BFA_QOS_LOW = 3, /* QoS Priority Level Low */ -}; - - -/** - * QoS bandwidth allocation for each priority level - */ -enum bfa_qos_bw_alloc { - BFA_QOS_BW_HIGH = 60, /* bandwidth allocation for High */ - BFA_QOS_BW_MED = 30, /* bandwidth allocation for Medium */ - BFA_QOS_BW_LOW = 10, /* bandwidth allocation for Low */ -}; - -/** - * QoS attribute returned in QoS Query - */ -struct bfa_qos_attr_s { - enum bfa_qos_state state; /* QoS current state */ - u32 total_bb_cr; /* Total BB Credits */ -}; - -/** - * These fields should be displayed only from the CLI. - * There will be a separate BFAL API (get_qos_vc_attr ?) - * to retrieve this. - * - */ -#define BFA_QOS_MAX_VC 16 - -struct bfa_qos_vc_info_s { - u8 vc_credit; - u8 borrow_credit; - u8 priority; - u8 resvd; -}; - -struct bfa_qos_vc_attr_s { - u16 total_vc_count; /* Total VC Count */ - u16 shared_credit; - u32 elp_opmode_flags; - struct bfa_qos_vc_info_s vc_info[BFA_QOS_MAX_VC]; /* as many as - * total_vc_count */ -}; - -/** - * QoS statistics - */ -struct bfa_qos_stats_s { - u32 flogi_sent; /* QoS Flogi sent */ - u32 flogi_acc_recvd; /* QoS Flogi Acc received */ - u32 flogi_rjt_recvd; /* QoS Flogi rejects received */ - u32 flogi_retries; /* QoS Flogi retries */ - - u32 elp_recvd; /* QoS ELP received */ - u32 elp_accepted; /* QoS ELP Accepted */ - u32 elp_rejected; /* QoS ELP rejected */ - u32 elp_dropped; /* QoS ELP dropped */ - - u32 qos_rscn_recvd; /* QoS RSCN received */ - u32 rsvd; /* padding for 64 bit alignment */ -}; - -#endif /* __BFA_DEFS_QOS_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_rport.h b/drivers/scsi/bfa/include/defs/bfa_defs_rport.h deleted file mode 100644 index e0af59d6d2f6..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_rport.h +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_DEFS_RPORT_H__ -#define __BFA_DEFS_RPORT_H__ - -#include -#include -#include -#include -#include - -/** - * FCS remote port states - */ -enum bfa_rport_state { - BFA_RPORT_UNINIT = 0, /* PORT is not yet initialized */ - BFA_RPORT_OFFLINE = 1, /* rport is offline */ - BFA_RPORT_PLOGI = 2, /* PLOGI to rport is in progress */ - BFA_RPORT_ONLINE = 3, /* login to rport is complete */ - BFA_RPORT_PLOGI_RETRY = 4, /* retrying login to rport */ - BFA_RPORT_NSQUERY = 5, /* nameserver query */ - BFA_RPORT_ADISC = 6, /* ADISC authentication */ - BFA_RPORT_LOGO = 7, /* logging out with rport */ - BFA_RPORT_LOGORCV = 8, /* handling LOGO from rport */ - BFA_RPORT_NSDISC = 9, /* re-discover rport */ -}; - -/** - * Rport Scsi Function : Initiator/Target. - */ -enum bfa_rport_function { - BFA_RPORT_INITIATOR = 0x01, /* SCSI Initiator */ - BFA_RPORT_TARGET = 0x02, /* SCSI Target */ -}; - -/** - * port/node symbolic names for rport - */ -#define BFA_RPORT_SYMNAME_MAXLEN 255 -struct bfa_rport_symname_s { - char symname[BFA_RPORT_SYMNAME_MAXLEN]; -}; - -struct bfa_rport_hal_stats_s { - u32 sm_un_cr; /* uninit: create events */ - u32 sm_un_unexp; /* uninit: exception events */ - u32 sm_cr_on; /* created: online events */ - u32 sm_cr_del; /* created: delete events */ - u32 sm_cr_hwf; /* created: IOC down */ - u32 sm_cr_unexp; /* created: exception events */ - u32 sm_fwc_rsp; /* fw create: f/w responses */ - u32 sm_fwc_del; /* fw create: delete events */ - u32 sm_fwc_off; /* fw create: offline events */ - u32 sm_fwc_hwf; /* fw create: IOC down */ - u32 sm_fwc_unexp; /* fw create: exception events*/ - u32 sm_on_off; /* online: offline events */ - u32 sm_on_del; /* online: delete events */ - u32 sm_on_hwf; /* online: IOC down events */ - u32 sm_on_unexp; /* online: exception events */ - u32 sm_fwd_rsp; /* fw delete: fw responses */ - u32 sm_fwd_del; /* fw delete: delete events */ - u32 sm_fwd_hwf; /* fw delete: IOC down events */ - u32 sm_fwd_unexp; /* fw delete: exception events*/ - u32 sm_off_del; /* offline: delete events */ - u32 sm_off_on; /* offline: online events */ - u32 sm_off_hwf; /* offline: IOC down events */ - u32 sm_off_unexp; /* offline: exception events */ - u32 sm_del_fwrsp; /* delete: fw responses */ - u32 sm_del_hwf; /* delete: IOC down events */ - u32 sm_del_unexp; /* delete: exception events */ - u32 sm_delp_fwrsp; /* delete pend: fw responses */ - u32 sm_delp_hwf; /* delete pend: IOC downs */ - u32 sm_delp_unexp; /* delete pend: exceptions */ - u32 sm_offp_fwrsp; /* off-pending: fw responses */ - u32 sm_offp_del; /* off-pending: deletes */ - u32 sm_offp_hwf; /* off-pending: IOC downs */ - u32 sm_offp_unexp; /* off-pending: exceptions */ - u32 sm_iocd_off; /* IOC down: offline events */ - u32 sm_iocd_del; /* IOC down: delete events */ - u32 sm_iocd_on; /* IOC down: online events */ - u32 sm_iocd_unexp; /* IOC down: exceptions */ - u32 rsvd; -}; - -/** - * FCS remote port statistics - */ -struct bfa_rport_stats_s { - u32 offlines; /* remote port offline count */ - u32 onlines; /* remote port online count */ - u32 rscns; /* RSCN affecting rport */ - u32 plogis; /* plogis sent */ - u32 plogi_accs; /* plogi accepts */ - u32 plogi_timeouts; /* plogi timeouts */ - u32 plogi_rejects; /* rcvd plogi rejects */ - u32 plogi_failed; /* local failure */ - u32 plogi_rcvd; /* plogis rcvd */ - u32 prli_rcvd; /* inbound PRLIs */ - u32 adisc_rcvd; /* ADISCs received */ - u32 adisc_rejects; /* recvd ADISC rejects */ - u32 adisc_sent; /* ADISC requests sent */ - u32 adisc_accs; /* ADISC accepted by rport */ - u32 adisc_failed; /* ADISC failed (no response) */ - u32 adisc_rejected; /* ADISC rejected by us */ - u32 logos; /* logos sent */ - u32 logo_accs; /* LOGO accepts from rport */ - u32 logo_failed; /* LOGO failures */ - u32 logo_rejected; /* LOGO rejects from rport */ - u32 logo_rcvd; /* LOGO from remote port */ - - u32 rpsc_rcvd; /* RPSC received */ - u32 rpsc_rejects; /* recvd RPSC rejects */ - u32 rpsc_sent; /* RPSC requests sent */ - u32 rpsc_accs; /* RPSC accepted by rport */ - u32 rpsc_failed; /* RPSC failed (no response) */ - u32 rpsc_rejected; /* RPSC rejected by us */ - - u32 rsvd; - struct bfa_rport_hal_stats_s hal_stats; /* BFA rport stats */ -}; - -/** - * Rport's QoS attributes - */ -struct bfa_rport_qos_attr_s { - enum bfa_qos_priority qos_priority; /* rport's QoS priority */ - u32 qos_flow_id; /* QoS flow Id */ -}; - -/** - * FCS remote port attributes returned in queries - */ -struct bfa_rport_attr_s { - wwn_t nwwn; /* node wwn */ - wwn_t pwwn; /* port wwn */ - enum fc_cos cos_supported; /* supported class of services */ - u32 pid; /* port ID */ - u32 df_sz; /* Max payload size */ - enum bfa_rport_state state; /* Rport State machine state */ - enum fc_cos fc_cos; /* FC classes of services */ - bfa_boolean_t cisc; /* CISC capable device */ - struct bfa_rport_symname_s symname; /* Symbolic Name */ - enum bfa_rport_function scsi_function; /* Initiator/Target */ - struct bfa_rport_qos_attr_s qos_attr; /* qos attributes */ - enum bfa_pport_speed curr_speed; /* operating speed got from - * RPSC ELS. UNKNOWN, if RPSC - * is not supported */ - bfa_boolean_t trl_enforced; /* TRL enforced ? TRUE/FALSE */ - enum bfa_pport_speed assigned_speed; /* Speed assigned by the user. - * will be used if RPSC is not - * supported by the rport */ -}; - -#define bfa_rport_aen_qos_data_t struct bfa_rport_qos_attr_s - -/** - * BFA remote port events - * Arguments below are in BFAL context from Mgmt - * BFA_RPORT_AEN_ONLINE: [in]: lpwwn [out]: vf_id, lpwwn, rpwwn - * BFA_RPORT_AEN_OFFLINE: [in]: lpwwn [out]: vf_id, lpwwn, rpwwn - * BFA_RPORT_AEN_DISCONNECT:[in]: lpwwn [out]: vf_id, lpwwn, rpwwn - * BFA_RPORT_AEN_QOS_PRIO: [in]: lpwwn [out]: vf_id, lpwwn, rpwwn, prio - * BFA_RPORT_AEN_QOS_FLOWID:[in]: lpwwn [out]: vf_id, lpwwn, rpwwn, flow_id - */ -enum bfa_rport_aen_event { - BFA_RPORT_AEN_ONLINE = 1, /* RPort online event */ - BFA_RPORT_AEN_OFFLINE = 2, /* RPort offline event */ - BFA_RPORT_AEN_DISCONNECT = 3, /* RPort disconnect event */ - BFA_RPORT_AEN_QOS_PRIO = 4, /* QOS priority change event */ - BFA_RPORT_AEN_QOS_FLOWID = 5, /* QOS flow Id change event */ -}; - -struct bfa_rport_aen_data_s { - u16 vf_id; /* vf_id of this logical port */ - u16 rsvd[3]; - wwn_t ppwwn; /* WWN of its physical port */ - wwn_t lpwwn; /* WWN of this logical port */ - wwn_t rpwwn; /* WWN of this remote port */ - union { - bfa_rport_aen_qos_data_t qos; - } priv; -}; - -#endif /* __BFA_DEFS_RPORT_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_status.h b/drivers/scsi/bfa/include/defs/bfa_defs_status.h deleted file mode 100644 index 6eb4e62096fc..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_status.h +++ /dev/null @@ -1,282 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __BFA_DEFS_STATUS_H__ -#define __BFA_DEFS_STATUS_H__ - -/** - * API status return values - * - * NOTE: The error msgs are auto generated from the comments. Only singe line - * comments are supported - */ -enum bfa_status { - BFA_STATUS_OK = 0, /* Success */ - BFA_STATUS_FAILED = 1, /* Operation failed */ - BFA_STATUS_EINVAL = 2, /* Invalid params Check input - * parameters */ - BFA_STATUS_ENOMEM = 3, /* Out of resources */ - BFA_STATUS_ENOSYS = 4, /* Function not implemented */ - BFA_STATUS_ETIMER = 5, /* Timer expired - Retry, if - * persists, contact support */ - BFA_STATUS_EPROTOCOL = 6, /* Protocol error */ - BFA_STATUS_ENOFCPORTS = 7, /* No FC ports resources */ - BFA_STATUS_NOFLASH = 8, /* Flash not present */ - BFA_STATUS_BADFLASH = 9, /* Flash is corrupted or bad */ - BFA_STATUS_SFP_UNSUPP = 10, /* Unsupported SFP - Replace SFP */ - BFA_STATUS_UNKNOWN_VFID = 11, /* VF_ID not found */ - BFA_STATUS_DATACORRUPTED = 12, /* Diag returned data corrupted - * contact support */ - BFA_STATUS_DEVBUSY = 13, /* Device busy - Retry operation */ - BFA_STATUS_ABORTED = 14, /* Operation aborted */ - BFA_STATUS_NODEV = 15, /* Dev is not present */ - BFA_STATUS_HDMA_FAILED = 16, /* Host dma failed contact support */ - BFA_STATUS_FLASH_BAD_LEN = 17, /* Flash bad length */ - BFA_STATUS_UNKNOWN_LWWN = 18, /* LPORT PWWN not found */ - BFA_STATUS_UNKNOWN_RWWN = 19, /* RPORT PWWN not found */ - BFA_STATUS_FCPT_LS_RJT = 20, /* Got LS_RJT for FC Pass - * through Req */ - BFA_STATUS_VPORT_EXISTS = 21, /* VPORT already exists */ - BFA_STATUS_VPORT_MAX = 22, /* Reached max VPORT supported - * limit */ - BFA_STATUS_UNSUPP_SPEED = 23, /* Invalid Speed Check speed - * setting */ - BFA_STATUS_INVLD_DFSZ = 24, /* Invalid Max data field size */ - BFA_STATUS_CNFG_FAILED = 25, /* Setting can not be persisted */ - BFA_STATUS_CMD_NOTSUPP = 26, /* Command/API not supported */ - BFA_STATUS_NO_ADAPTER = 27, /* No Brocade Adapter Found */ - BFA_STATUS_LINKDOWN = 28, /* Link is down - Check or replace - * SFP/cable */ - BFA_STATUS_FABRIC_RJT = 29, /* Reject from attached fabric */ - BFA_STATUS_UNKNOWN_VWWN = 30, /* VPORT PWWN not found */ - BFA_STATUS_NSLOGIN_FAILED = 31, /* Nameserver login failed */ - BFA_STATUS_NO_RPORTS = 32, /* No remote ports found */ - BFA_STATUS_NSQUERY_FAILED = 33, /* Nameserver query failed */ - BFA_STATUS_PORT_OFFLINE = 34, /* Port is not online */ - BFA_STATUS_RPORT_OFFLINE = 35, /* RPORT is not online */ - BFA_STATUS_TGTOPEN_FAILED = 36, /* Remote SCSI target open failed */ - BFA_STATUS_BAD_LUNS = 37, /* No valid LUNs found */ - BFA_STATUS_IO_FAILURE = 38, /* SCSI target IO failure */ - BFA_STATUS_NO_FABRIC = 39, /* No switched fabric present */ - BFA_STATUS_EBADF = 40, /* Bad file descriptor */ - BFA_STATUS_EINTR = 41, /* A signal was caught during ioctl */ - BFA_STATUS_EIO = 42, /* I/O error */ - BFA_STATUS_ENOTTY = 43, /* Inappropriate I/O control - * operation */ - BFA_STATUS_ENXIO = 44, /* No such device or address */ - BFA_STATUS_EFOPEN = 45, /* Failed to open file */ - BFA_STATUS_VPORT_WWN_BP = 46, /* WWN is same as base port's WWN */ - BFA_STATUS_PORT_NOT_DISABLED = 47, /* Port not disabled disable port - * first */ - BFA_STATUS_BADFRMHDR = 48, /* Bad frame header */ - BFA_STATUS_BADFRMSZ = 49, /* Bad frame size check and replace - * SFP/cable */ - BFA_STATUS_MISSINGFRM = 50, /* Missing frame check and replace - * SFP/cable or for Mezz card check and - * replace pass through module */ - BFA_STATUS_LINKTIMEOUT = 51, /* Link timeout check and replace - * SFP/cable */ - BFA_STATUS_NO_FCPIM_NEXUS = 52, /* No FCP Nexus exists with the - * rport */ - BFA_STATUS_CHECKSUM_FAIL = 53, /* checksum failure */ - BFA_STATUS_GZME_FAILED = 54, /* Get zone member query failed */ - BFA_STATUS_SCSISTART_REQD = 55, /* SCSI disk require START command */ - BFA_STATUS_IOC_FAILURE = 56, /* IOC failure - Retry, if persists - * contact support */ - BFA_STATUS_INVALID_WWN = 57, /* Invalid WWN */ - BFA_STATUS_MISMATCH = 58, /* Version mismatch */ - BFA_STATUS_IOC_ENABLED = 59, /* IOC is already enabled */ - BFA_STATUS_ADAPTER_ENABLED = 60, /* Adapter is not disabled disable - * adapter first */ - BFA_STATUS_IOC_NON_OP = 61, /* IOC is not operational. Enable IOC - * and if it still fails, - * contact support */ - BFA_STATUS_ADDR_MAP_FAILURE = 62, /* PCI base address not mapped - * in OS */ - BFA_STATUS_SAME_NAME = 63, /* Name exists! use a different - * name */ - BFA_STATUS_PENDING = 64, /* API completes asynchronously */ - BFA_STATUS_8G_SPD = 65, /* Speed setting not valid for - * 8G HBA */ - BFA_STATUS_4G_SPD = 66, /* Speed setting not valid for - * 4G HBA */ - BFA_STATUS_AD_IS_ENABLE = 67, /* Adapter is already enabled */ - BFA_STATUS_EINVAL_TOV = 68, /* Invalid path failover TOV */ - BFA_STATUS_EINVAL_QDEPTH = 69, /* Invalid queue depth value */ - BFA_STATUS_VERSION_FAIL = 70, /* Application/Driver version - * mismatch */ - BFA_STATUS_DIAG_BUSY = 71, /* diag busy */ - BFA_STATUS_BEACON_ON = 72, /* Port Beacon already on */ - BFA_STATUS_BEACON_OFF = 73, /* Port Beacon already off */ - BFA_STATUS_LBEACON_ON = 74, /* Link End-to-End Beacon already - * on */ - BFA_STATUS_LBEACON_OFF = 75, /* Link End-to-End Beacon already - * off */ - BFA_STATUS_PORT_NOT_INITED = 76, /* Port not initialized */ - BFA_STATUS_RPSC_ENABLED = 77, /* Target has a valid speed */ - BFA_STATUS_ENOFSAVE = 78, /* No saved firmware trace */ - BFA_STATUS_BAD_FILE = 79, /* Not a valid Brocade Boot Code - * file */ - BFA_STATUS_RLIM_EN = 80, /* Target rate limiting is already - * enabled */ - BFA_STATUS_RLIM_DIS = 81, /* Target rate limiting is already - * disabled */ - BFA_STATUS_IOC_DISABLED = 82, /* IOC is already disabled */ - BFA_STATUS_ADAPTER_DISABLED = 83, /* Adapter is already disabled */ - BFA_STATUS_BIOS_DISABLED = 84, /* Bios is already disabled */ - BFA_STATUS_AUTH_ENABLED = 85, /* Authentication is already - * enabled */ - BFA_STATUS_AUTH_DISABLED = 86, /* Authentication is already - * disabled */ - BFA_STATUS_ERROR_TRL_ENABLED = 87, /* Target rate limiting is - * enabled */ - BFA_STATUS_ERROR_QOS_ENABLED = 88, /* QoS is enabled */ - BFA_STATUS_NO_SFP_DEV = 89, /* No SFP device check or replace SFP */ - BFA_STATUS_MEMTEST_FAILED = 90, /* Memory test failed contact - * support */ - BFA_STATUS_INVALID_DEVID = 91, /* Invalid device id provided */ - BFA_STATUS_QOS_ENABLED = 92, /* QOS is already enabled */ - BFA_STATUS_QOS_DISABLED = 93, /* QOS is already disabled */ - BFA_STATUS_INCORRECT_DRV_CONFIG = 94, /* Check configuration - * key/value pair */ - BFA_STATUS_REG_FAIL = 95, /* Can't read windows registry */ - BFA_STATUS_IM_INV_CODE = 96, /* Invalid IOCTL code */ - BFA_STATUS_IM_INV_VLAN = 97, /* Invalid VLAN ID */ - BFA_STATUS_IM_INV_ADAPT_NAME = 98, /* Invalid adapter name */ - BFA_STATUS_IM_LOW_RESOURCES = 99, /* Memory allocation failure in - * driver */ - BFA_STATUS_IM_VLANID_IS_PVID = 100, /* Given VLAN id same as PVID */ - BFA_STATUS_IM_VLANID_EXISTS = 101, /* Given VLAN id already exists */ - BFA_STATUS_IM_FW_UPDATE_FAIL = 102, /* Updating firmware with new - * VLAN ID failed */ - BFA_STATUS_PORTLOG_ENABLED = 103, /* Port Log is already enabled */ - BFA_STATUS_PORTLOG_DISABLED = 104, /* Port Log is already disabled */ - BFA_STATUS_FILE_NOT_FOUND = 105, /* Specified file could not be - * found */ - BFA_STATUS_QOS_FC_ONLY = 106, /* QOS can be enabled for FC mode - * only */ - BFA_STATUS_RLIM_FC_ONLY = 107, /* RATELIM can be enabled for FC mode - * only */ - BFA_STATUS_CT_SPD = 108, /* Invalid speed selection for Catapult. */ - BFA_STATUS_LEDTEST_OP = 109, /* LED test is operating */ - BFA_STATUS_CEE_NOT_DN = 110, /* eth port is not at down state, please - * bring down first */ - BFA_STATUS_10G_SPD = 111, /* Speed setting not valid for 10G CNA */ - BFA_STATUS_IM_INV_TEAM_NAME = 112, /* Invalid team name */ - BFA_STATUS_IM_DUP_TEAM_NAME = 113, /* Given team name already - * exists */ - BFA_STATUS_IM_ADAPT_ALREADY_IN_TEAM = 114, /* Given adapter is part - * of another team */ - BFA_STATUS_IM_ADAPT_HAS_VLANS = 115, /* Adapter has VLANs configured. - * Delete all VLANs to become - * part of the team */ - BFA_STATUS_IM_PVID_MISMATCH = 116, /* Mismatching PVIDs configured - * for adapters */ - BFA_STATUS_IM_LINK_SPEED_MISMATCH = 117, /* Mismatching link speeds - * configured for adapters */ - BFA_STATUS_IM_MTU_MISMATCH = 118, /* Mismatching MTUs configured for - * adapters */ - BFA_STATUS_IM_RSS_MISMATCH = 119, /* Mismatching RSS parameters - * configured for adapters */ - BFA_STATUS_IM_HDS_MISMATCH = 120, /* Mismatching HDS parameters - * configured for adapters */ - BFA_STATUS_IM_OFFLOAD_MISMATCH = 121, /* Mismatching offload - * parameters configured for - * adapters */ - BFA_STATUS_IM_PORT_PARAMS = 122, /* Error setting port parameters */ - BFA_STATUS_IM_PORT_NOT_IN_TEAM = 123, /* Port is not part of team */ - BFA_STATUS_IM_CANNOT_REM_PRI = 124, /* Primary adapter cannot be - * removed. Change primary before - * removing */ - BFA_STATUS_IM_MAX_PORTS_REACHED = 125, /* Exceeding maximum ports - * per team */ - BFA_STATUS_IM_LAST_PORT_DELETE = 126, /* Last port in team being - * deleted */ - BFA_STATUS_IM_NO_DRIVER = 127, /* IM driver is not installed */ - BFA_STATUS_IM_MAX_VLANS_REACHED = 128, /* Exceeding maximum VLANs - * per port */ - BFA_STATUS_TOMCAT_SPD_NOT_ALLOWED = 129, /* Bios speed config not - * allowed for CNA */ - BFA_STATUS_NO_MINPORT_DRIVER = 130, /* Miniport driver is not - * loaded */ - BFA_STATUS_CARD_TYPE_MISMATCH = 131, /* Card type mismatch */ - BFA_STATUS_BAD_ASICBLK = 132, /* Bad ASIC block */ - BFA_STATUS_NO_DRIVER = 133, /* Brocade adapter/driver not installed - * or loaded */ - BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */ - BFA_STATUS_IM_NO_VLAN = 135, /* No VLANs configured on the adapter */ - BFA_STATUS_IM_ETH_LB_FAILED = 136, /* Ethernet loopback test failed */ - BFA_STATUS_IM_PVID_REMOVE = 137, /* Cannot remove port VLAN (PVID) */ - BFA_STATUS_IM_PVID_EDIT = 138, /* Cannot edit port VLAN (PVID) */ - BFA_STATUS_CNA_NO_BOOT = 139, /* Boot upload not allowed for CNA */ - BFA_STATUS_IM_PVID_NON_ZERO = 140, /* Port VLAN ID (PVID) is Set to - * Non-Zero Value */ - BFA_STATUS_IM_INETCFG_LOCK_FAILED = 141, /* Acquiring Network - * Subsystem Lock Failed.Please - * try after some time */ - BFA_STATUS_IM_GET_INETCFG_FAILED = 142, /* Acquiring Network Subsystem - * handle Failed. Please try - * after some time */ - BFA_STATUS_IM_NOT_BOUND = 143, /* IM driver is not active */ - BFA_STATUS_INSUFFICIENT_PERMS = 144, /* User doesn't have sufficient - * permissions to execute the BCU - * application */ - BFA_STATUS_IM_INV_VLAN_NAME = 145, /* Invalid/Reserved VLAN name - * string. The name is not allowed - * for the normal VLAN */ - BFA_STATUS_CMD_NOTSUPP_CNA = 146, /* Command not supported for CNA */ - BFA_STATUS_IM_PASSTHRU_EDIT = 147, /* Can not edit passthrough VLAN - * id */ - BFA_STATUS_IM_BIND_FAILED = 148, /* IM Driver bind operation - * failed */ - BFA_STATUS_IM_UNBIND_FAILED = 149, /* IM Driver unbind operation - * failed */ - BFA_STATUS_IM_PORT_IN_TEAM = 150, /* Port is already part of the - * team */ - BFA_STATUS_IM_VLAN_NOT_FOUND = 151, /* VLAN ID doesn't exists */ - BFA_STATUS_IM_TEAM_NOT_FOUND = 152, /* Teaming configuration doesn't - * exists */ - BFA_STATUS_IM_TEAM_CFG_NOT_ALLOWED = 153, /* Given settings are not - * allowed for the current - * Teaming mode */ - BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot - * configuration */ - BFA_STATUS_DEVID_MISSING = 155, /* Boot image is not for the adapter(s) - * installed */ - BFA_STATUS_BAD_FWCFG = 156, /* Bad firmware configuration */ - BFA_STATUS_CREATE_FILE = 157, /* Failed to create temporary file */ - BFA_STATUS_INVALID_VENDOR = 158, /* Invalid switch vendor */ - BFA_STATUS_SFP_NOT_READY = 159, /* SFP info is not ready. Retry */ - BFA_STATUS_NO_TOPOLOGY_FOR_CNA = 160, /* Topology command not - * applicable to CNA */ - BFA_STATUS_BOOT_CODE_UPDATED = 161, /* reboot -- -r is needed after - * boot code updated */ - BFA_STATUS_BOOT_VERSION = 162, /* Boot code version not compatible with - * the driver installed */ - BFA_STATUS_CARDTYPE_MISSING = 163, /* Boot image is not for the - * adapter(s) installed */ - BFA_STATUS_INVALID_CARDTYPE = 164, /* Invalid card type provided */ - BFA_STATUS_MAX_VAL /* Unknown error code */ -}; -#define bfa_status_t enum bfa_status - -enum bfa_eproto_status { - BFA_EPROTO_BAD_ACCEPT = 0, - BFA_EPROTO_UNKNOWN_RSP = 1 -}; -#define bfa_eproto_status_t enum bfa_eproto_status - -#endif /* __BFA_DEFS_STATUS_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_tin.h b/drivers/scsi/bfa/include/defs/bfa_defs_tin.h deleted file mode 100644 index e05a2db7abed..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_tin.h +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_DEFS_TIN_H__ -#define __BFA_DEFS_TIN_H__ - -#include -#include - -/** - * FCS tin states - */ -enum bfa_tin_state_e { - BFA_TIN_SM_OFFLINE = 0, /* tin is offline */ - BFA_TIN_SM_WOS_LOGIN = 1, /* Waiting PRLI ACC/RJT from ULP */ - BFA_TIN_SM_WFW_ONLINE = 2, /* Waiting ACK to PRLI ACC from FW */ - BFA_TIN_SM_ONLINE = 3, /* tin login is complete */ - BFA_TIN_SM_WIO_RELOGIN = 4, /* tin relogin is in progress */ - BFA_TIN_SM_WIO_LOGOUT = 5, /* Processing of PRLO req from - * Initiator is in progress - */ - BFA_TIN_SM_WOS_LOGOUT = 6, /* Processing of PRLO req from - * Initiator is in progress - */ - BFA_TIN_SM_WIO_CLEAN = 7, /* Waiting for IO cleanup before tin - * is offline. This can be triggered - * by RPORT LOGO (rcvd/sent) or by - * PRLO (rcvd/sent) - */ -}; - -struct bfa_prli_req_s { - struct fchs_s fchs; - struct fc_prli_s prli_payload; -}; - -struct bfa_prlo_req_s { - struct fchs_s fchs; - struct fc_prlo_s prlo_payload; -}; - -void bfa_tin_send_login_rsp(void *bfa_tin, u32 login_rsp, - struct fc_ls_rjt_s rjt_payload); -void bfa_tin_send_logout_rsp(void *bfa_tin, u32 logout_rsp, - struct fc_ls_rjt_s rjt_payload); -/** - * FCS target port statistics - */ -struct bfa_tin_stats_s { - u32 onlines; /* ITN nexus onlines (PRLI done) */ - u32 offlines; /* ITN Nexus offlines */ - u32 prli_req_parse_err; /* prli req parsing errors */ - u32 prli_rsp_rjt; /* num prli rsp rejects sent */ - u32 prli_rsp_acc; /* num prli rsp accepts sent */ - u32 cleanup_comps; /* ITN cleanup completions */ -}; - -/** - * FCS tin attributes returned in queries - */ -struct bfa_tin_attr_s { - enum bfa_tin_state_e state; - u8 seq_retry; /* Sequence retry supported */ - u8 rsvd[3]; -}; - -/** - * BFA TIN async event data structure for BFAL - */ -enum bfa_tin_aen_event { - BFA_TIN_AEN_ONLINE = 1, /* Target online */ - BFA_TIN_AEN_OFFLINE = 2, /* Target offline */ - BFA_TIN_AEN_DISCONNECT = 3, /* Target disconnected */ -}; - -/** - * BFA TIN event data structure. - */ -struct bfa_tin_aen_data_s { - u16 vf_id; /* vf_id of the IT nexus */ - u16 rsvd[3]; - wwn_t lpwwn; /* WWN of logical port */ - wwn_t rpwwn; /* WWN of remote(target) port */ -}; - -/** - * Below APIs are needed from BFA driver - * Move these to BFA driver public header file? - */ -/* TIN rcvd new PRLI & gets bfad_tin_t ptr from driver this callback */ -void *bfad_tin_rcvd_login_req(void *bfad_tm_port, void *bfa_tin, - wwn_t rp_wwn, u32 rp_fcid, - struct bfa_prli_req_s prli_req); -/* TIN rcvd new PRLO */ -void bfad_tin_rcvd_logout_req(void *bfad_tin, wwn_t rp_wwn, u32 rp_fcid, - struct bfa_prlo_req_s prlo_req); -/* TIN is online and ready for IO */ -void bfad_tin_online(void *bfad_tin); -/* TIN is offline and BFA driver can shutdown its upper stack */ -void bfad_tin_offline(void *bfad_tin); -/* TIN does not need this BFA driver tin tag anymore, so can be freed */ -void bfad_tin_res_free(void *bfad_tin); - -#endif /* __BFA_DEFS_TIN_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h b/drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h deleted file mode 100644 index ade763dbc8ce..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_DEFS_TSENSOR_H__ -#define __BFA_DEFS_TSENSOR_H__ - -#include -#include - -/** - * Temperature sensor status values - */ -enum bfa_tsensor_status { - BFA_TSENSOR_STATUS_UNKNOWN = 1, /* unknown status */ - BFA_TSENSOR_STATUS_FAULTY = 2, /* sensor is faulty */ - BFA_TSENSOR_STATUS_BELOW_MIN = 3, /* temperature below mininum */ - BFA_TSENSOR_STATUS_NOMINAL = 4, /* normal temperature */ - BFA_TSENSOR_STATUS_ABOVE_MAX = 5, /* temperature above maximum */ -}; - -/** - * Temperature sensor attribute - */ -struct bfa_tsensor_attr_s { - enum bfa_tsensor_status status; /* temperature sensor status */ - u32 value; /* current temperature in celsius */ -}; - -#endif /* __BFA_DEFS_TSENSOR_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_types.h b/drivers/scsi/bfa/include/defs/bfa_defs_types.h deleted file mode 100644 index 4348332b107a..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_types.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __BFA_DEFS_TYPES_H__ -#define __BFA_DEFS_TYPES_H__ - -#include - -enum bfa_boolean { - BFA_FALSE = 0, - BFA_TRUE = 1 -}; -#define bfa_boolean_t enum bfa_boolean - -#define BFA_STRING_32 32 - -#endif /* __BFA_DEFS_TYPES_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_version.h b/drivers/scsi/bfa/include/defs/bfa_defs_version.h deleted file mode 100644 index f8902a2c9aad..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_version.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#ifndef __BFA_DEFS_VERSION_H__ -#define __BFA_DEFS_VERSION_H__ - -#define BFA_VERSION_LEN 64 - -#endif diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_vf.h b/drivers/scsi/bfa/include/defs/bfa_defs_vf.h deleted file mode 100644 index 3235be5e9423..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_vf.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_DEFS_VF_H__ -#define __BFA_DEFS_VF_H__ - -#include -#include -#include - -/** - * VF states - */ -enum bfa_vf_state { - BFA_VF_UNINIT = 0, /* fabric is not yet initialized */ - BFA_VF_LINK_DOWN = 1, /* link is down */ - BFA_VF_FLOGI = 2, /* flogi is in progress */ - BFA_VF_AUTH = 3, /* authentication in progress */ - BFA_VF_NOFABRIC = 4, /* fabric is not present */ - BFA_VF_ONLINE = 5, /* login to fabric is complete */ - BFA_VF_EVFP = 6, /* EVFP is in progress */ - BFA_VF_ISOLATED = 7, /* port isolated due to vf_id mismatch */ -}; - -/** - * VF statistics - */ -struct bfa_vf_stats_s { - u32 flogi_sent; /* Num FLOGIs sent */ - u32 flogi_rsp_err; /* FLOGI response errors */ - u32 flogi_acc_err; /* FLOGI accept errors */ - u32 flogi_accepts; /* FLOGI accepts received */ - u32 flogi_rejects; /* FLOGI rejects received */ - u32 flogi_unknown_rsp; /* Unknown responses for FLOGI */ - u32 flogi_alloc_wait; /* Allocation waits prior to - * sending FLOGI - */ - u32 flogi_rcvd; /* FLOGIs received */ - u32 flogi_rejected; /* Incoming FLOGIs rejected */ - u32 fabric_onlines; /* Internal fabric online - * notification sent to other - * modules - */ - u32 fabric_offlines; /* Internal fabric offline - * notification sent to other - * modules - */ - u32 resvd; -}; - -/** - * VF attributes returned in queries - */ -struct bfa_vf_attr_s { - enum bfa_vf_state state; /* VF state */ - u32 rsvd; - wwn_t fabric_name; /* fabric name */ -}; - -#endif /* __BFA_DEFS_VF_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_vport.h b/drivers/scsi/bfa/include/defs/bfa_defs_vport.h deleted file mode 100644 index 9f021f43b3b4..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_vport.h +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_DEFS_VPORT_H__ -#define __BFA_DEFS_VPORT_H__ - -#include -#include -#include - -/** - * VPORT states - */ -enum bfa_vport_state { - BFA_FCS_VPORT_UNINIT = 0, - BFA_FCS_VPORT_CREATED = 1, - BFA_FCS_VPORT_OFFLINE = 1, - BFA_FCS_VPORT_FDISC_SEND = 2, - BFA_FCS_VPORT_FDISC = 3, - BFA_FCS_VPORT_FDISC_RETRY = 4, - BFA_FCS_VPORT_ONLINE = 5, - BFA_FCS_VPORT_DELETING = 6, - BFA_FCS_VPORT_CLEANUP = 6, - BFA_FCS_VPORT_LOGO_SEND = 7, - BFA_FCS_VPORT_LOGO = 8, - BFA_FCS_VPORT_ERROR = 9, - BFA_FCS_VPORT_MAX_STATE, -}; - -/** - * vport statistics - */ -struct bfa_vport_stats_s { - struct bfa_port_stats_s port_stats; /* base class (port) stats */ - /* - * TODO - remove - */ - - u32 fdisc_sent; /* num fdisc sent */ - u32 fdisc_accepts; /* fdisc accepts */ - u32 fdisc_retries; /* fdisc retries */ - u32 fdisc_timeouts; /* fdisc timeouts */ - u32 fdisc_rsp_err; /* fdisc response error */ - u32 fdisc_acc_bad; /* bad fdisc accepts */ - u32 fdisc_rejects; /* fdisc rejects */ - u32 fdisc_unknown_rsp; - /* - *!< fdisc rsp unknown error - */ - u32 fdisc_alloc_wait;/* fdisc req (fcxp)alloc wait */ - - u32 logo_alloc_wait;/* logo req (fcxp) alloc wait */ - u32 logo_sent; /* logo sent */ - u32 logo_accepts; /* logo accepts */ - u32 logo_rejects; /* logo rejects */ - u32 logo_rsp_err; /* logo rsp errors */ - u32 logo_unknown_rsp; - /* logo rsp unknown errors */ - - u32 fab_no_npiv; /* fabric does not support npiv */ - - u32 fab_offline; /* offline events from fab SM */ - u32 fab_online; /* online events from fab SM */ - u32 fab_cleanup; /* cleanup request from fab SM */ - u32 rsvd; -}; - -/** - * BFA vport attribute returned in queries - */ -struct bfa_vport_attr_s { - struct bfa_port_attr_s port_attr; /* base class (port) attributes */ - enum bfa_vport_state vport_state; /* vport state */ - u32 rsvd; -}; - -#endif /* __BFA_DEFS_VPORT_H__ */ diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb.h b/drivers/scsi/bfa/include/fcb/bfa_fcb.h deleted file mode 100644 index 2963b0bc30e7..000000000000 --- a/drivers/scsi/bfa/include/fcb/bfa_fcb.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * bfa_fcb.h BFA FCS callback interfaces - */ - -#ifndef __BFA_FCB_H__ -#define __BFA_FCB_H__ - -/** - * fcb Main fcs callbacks - */ - -void bfa_fcb_exit(struct bfad_s *bfad); - - - -#endif /* __BFA_FCB_H__ */ diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h b/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h deleted file mode 100644 index 52585d3dd891..000000000000 --- a/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** -* : bfad_fcpim.h - BFA FCS initiator mode remote port callbacks - */ - -#ifndef __BFAD_FCB_FCPIM_H__ -#define __BFAD_FCB_FCPIM_H__ - -struct bfad_itnim_s; - -/* - * RPIM callbacks - */ - -/** - * Memory allocation for remote port instance. Called before PRLI is - * initiated to the remote target port. - * - * @param[in] bfad - driver instance - * @param[out] itnim - FCS remote port (IM) instance - * @param[out] itnim_drv - driver remote port (IM) instance - * - * @return None - */ -void bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim, - struct bfad_itnim_s **itnim_drv); - -/** - * Free remote port (IM) instance. - * - * @param[in] bfad - driver instance - * @param[in] itnim_drv - driver remote port instance - * - * @return None - */ -void bfa_fcb_itnim_free(struct bfad_s *bfad, - struct bfad_itnim_s *itnim_drv); - -/** - * Notification of when login with a remote target device is complete. - * - * @param[in] itnim_drv - driver remote port instance - * - * @return None - */ -void bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv); - -/** - * Notification when login with the remote device is severed. - * - * @param[in] itnim_drv - driver remote port instance - * - * @return None - */ -void bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv); - -void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim_drv); - -#endif /* __BFAD_FCB_FCPIM_H__ */ diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb_port.h b/drivers/scsi/bfa/include/fcb/bfa_fcb_port.h deleted file mode 100644 index 5fd7f986fa32..000000000000 --- a/drivers/scsi/bfa/include/fcb/bfa_fcb_port.h +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * bfa_fcb_port.h BFA FCS virtual port driver interfaces - */ - -#ifndef __BFA_FCB_PORT_H__ -#define __BFA_FCB_PORT_H__ - -#include -/** - * fcs_port_fcb FCS port driver interfaces - */ - -/* - * Forward declarations - */ -struct bfad_port_s; - -/* - * Callback functions from BFA FCS to driver - */ - -/** - * Call from FCS to driver module when a port is instantiated. The port - * can be a base port or a virtual port with in the base fabric or - * a virtual fabric. - * - * On this callback, driver is supposed to create scsi_host, scsi_tgt or - * network interfaces bases on ports personality/roles. - * - * base port of base fabric: vf_drv == NULL && vp_drv == NULL - * vport of base fabric: vf_drv == NULL && vp_drv != NULL - * base port of VF: vf_drv != NULL && vp_drv == NULL - * vport of VF: vf_drv != NULL && vp_drv != NULL - * - * @param[in] bfad - driver instance - * @param[in] port - FCS port instance - * @param[in] roles - port roles: IM, TM, IP - * @param[in] vf_drv - VF driver instance, NULL if base fabric (no VF) - * @param[in] vp_drv - vport driver instance, NULL if base port - * - * @return None - */ -struct bfad_port_s *bfa_fcb_port_new(struct bfad_s *bfad, - struct bfa_fcs_port_s *port, - enum bfa_port_role roles, struct bfad_vf_s *vf_drv, - struct bfad_vport_s *vp_drv); - -/** - * Call from FCS to driver module when a port is deleted. The port - * can be a base port or a virtual port with in the base fabric or - * a virtual fabric. - * - * @param[in] bfad - driver instance - * @param[in] roles - port roles: IM, TM, IP - * @param[in] vf_drv - VF driver instance, NULL if base fabric (no VF) - * @param[in] vp_drv - vport driver instance, NULL if base port - * - * @return None - */ -void bfa_fcb_port_delete(struct bfad_s *bfad, enum bfa_port_role roles, - struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv); - -/** - * Notification when port transitions to ONLINE state. - * - * Online notification is a logical link up for the local port. This - * notification is sent after a successfull FLOGI, or a successful - * link initialization in proviate-loop or N2N topologies. - * - * @param[in] bfad - driver instance - * @param[in] roles - port roles: IM, TM, IP - * @param[in] vf_drv - VF driver instance, NULL if base fabric (no VF) - * @param[in] vp_drv - vport driver instance, NULL if base port - * - * @return None - */ -void bfa_fcb_port_online(struct bfad_s *bfad, enum bfa_port_role roles, - struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv); - -/** - * Notification when port transitions to OFFLINE state. - * - * Offline notification is a logical link down for the local port. - * - * @param[in] bfad - driver instance - * @param[in] roles - port roles: IM, TM, IP - * @param[in] vf_drv - VF driver instance, NULL if base fabric (no VF) - * @param[in] vp_drv - vport driver instance, NULL if base port - * - * @return None - */ -void bfa_fcb_port_offline(struct bfad_s *bfad, enum bfa_port_role roles, - struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv); - - -#endif /* __BFA_FCB_PORT_H__ */ diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb_rport.h b/drivers/scsi/bfa/include/fcb/bfa_fcb_rport.h deleted file mode 100644 index e0261bb6d1c1..000000000000 --- a/drivers/scsi/bfa/include/fcb/bfa_fcb_rport.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * bfa_fcb_rport.h BFA FCS rport driver interfaces - */ - -#ifndef __BFA_FCB_RPORT_H__ -#define __BFA_FCB_RPORT_H__ - -/** - * fcs_rport_fcb Remote port driver interfaces - */ - - -struct bfad_rport_s; - -/* - * Callback functions from BFA FCS to driver - */ - -/** - * Completion callback for bfa_fcs_rport_add(). - * - * @param[in] rport_drv - driver instance of rport - * - * @return None - */ -void bfa_fcb_rport_add(struct bfad_rport_s *rport_drv); - -/** - * Completion callback for bfa_fcs_rport_remove(). - * - * @param[in] rport_drv - driver instance of rport - * - * @return None - */ -void bfa_fcb_rport_remove(struct bfad_rport_s *rport_drv); - -/** - * Call to allocate a rport instance. - * - * @param[in] bfad - driver instance - * @param[out] rport - BFA FCS instance of rport - * @param[out] rport_drv - driver instance of rport - * - * @retval BFA_STATUS_OK - successfully allocated - * @retval BFA_STATUS_ENOMEM - cannot allocate - */ -bfa_status_t bfa_fcb_rport_alloc(struct bfad_s *bfad, - struct bfa_fcs_rport_s **rport, - struct bfad_rport_s **rport_drv); - -/** - * Call to free rport memory resources. - * - * @param[in] bfad - driver instance - * @param[in] rport_drv - driver instance of rport - * - * @return None - */ -void bfa_fcb_rport_free(struct bfad_s *bfad, struct bfad_rport_s **rport_drv); - - - -#endif /* __BFA_FCB_RPORT_H__ */ diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb_vf.h b/drivers/scsi/bfa/include/fcb/bfa_fcb_vf.h deleted file mode 100644 index cfd3fac0a4e2..000000000000 --- a/drivers/scsi/bfa/include/fcb/bfa_fcb_vf.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * bfa_fcb_vf.h BFA FCS virtual fabric driver interfaces - */ - -#ifndef __BFA_FCB_VF_H__ -#define __BFA_FCB_VF_H__ - -/** - * fcs_vf_fcb Virtual fabric driver intrefaces - */ - - -struct bfad_vf_s; - -/* - * Callback functions from BFA FCS to driver - */ - -/** - * Completion callback for bfa_fcs_vf_stop(). - * - * @param[in] vf_drv - driver instance of vf - * - * @return None - */ -void bfa_fcb_vf_stop(struct bfad_vf_s *vf_drv); - - - -#endif /* __BFA_FCB_VF_H__ */ diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb_vport.h b/drivers/scsi/bfa/include/fcb/bfa_fcb_vport.h deleted file mode 100644 index cfd6ba7c47ec..000000000000 --- a/drivers/scsi/bfa/include/fcb/bfa_fcb_vport.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * bfa_fcb_vport.h BFA FCS virtual port driver interfaces - */ - -#ifndef __BFA_FCB_VPORT_H__ -#define __BFA_FCB_VPORT_H__ - -/** - * fcs_vport_fcb Virtual port driver interfaces - */ - - -struct bfad_vport_s; - -/* - * Callback functions from BFA FCS to driver - */ - -/** - * Completion callback for bfa_fcs_vport_delete(). - * - * @param[in] vport_drv - driver instance of vport - * - * @return None - */ -void bfa_fcb_vport_delete(struct bfad_vport_s *vport_drv); -void bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s); - - - -#endif /* __BFA_FCB_VPORT_H__ */ diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs.h b/drivers/scsi/bfa/include/fcs/bfa_fcs.h deleted file mode 100644 index 54e5b81ab2a3..000000000000 --- a/drivers/scsi/bfa/include/fcs/bfa_fcs.h +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_FCS_H__ -#define __BFA_FCS_H__ - -#include -#include -#include -#include -#include - -#define BFA_FCS_OS_STR_LEN 64 - -struct bfa_fcs_stats_s { - struct { - u32 untagged; /* untagged receive frames */ - u32 tagged; /* tagged receive frames */ - u32 vfid_unknown; /* VF id is unknown */ - } uf; -}; - -struct bfa_fcs_driver_info_s { - u8 version[BFA_VERSION_LEN]; /* Driver Version */ - u8 host_machine_name[BFA_FCS_OS_STR_LEN]; - u8 host_os_name[BFA_FCS_OS_STR_LEN]; /* OS name and version */ - u8 host_os_patch[BFA_FCS_OS_STR_LEN];/* patch or service pack */ - u8 os_device_name[BFA_FCS_OS_STR_LEN]; /* Driver Device Name */ -}; - -struct bfa_fcs_s { - struct bfa_s *bfa; /* corresponding BFA bfa instance */ - struct bfad_s *bfad; /* corresponding BDA driver instance */ - struct bfa_log_mod_s *logm; /* driver logging module instance */ - struct bfa_trc_mod_s *trcmod; /* tracing module */ - struct bfa_aen_s *aen; /* aen component */ - bfa_boolean_t vf_enabled; /* VF mode is enabled */ - bfa_boolean_t fdmi_enabled; /*!< FDMI is enabled */ - bfa_boolean_t min_cfg; /* min cfg enabled/disabled */ - u16 port_vfid; /* port default VF ID */ - struct bfa_fcs_driver_info_s driver_info; - struct bfa_fcs_fabric_s fabric; /* base fabric state machine */ - struct bfa_fcs_stats_s stats; /* FCS statistics */ - struct bfa_wc_s wc; /* waiting counter */ -}; - -/* - * bfa fcs API functions - */ -void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, - struct bfad_s *bfad, bfa_boolean_t min_cfg); -void bfa_fcs_init(struct bfa_fcs_s *fcs); -void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, - struct bfa_fcs_driver_info_s *driver_info); -void bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable); -void bfa_fcs_exit(struct bfa_fcs_s *fcs); -void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod); -void bfa_fcs_log_init(struct bfa_fcs_s *fcs, struct bfa_log_mod_s *logmod); -void bfa_fcs_aen_init(struct bfa_fcs_s *fcs, struct bfa_aen_s *aen); -void bfa_fcs_start(struct bfa_fcs_s *fcs); - -#endif /* __BFA_FCS_H__ */ diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_auth.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_auth.h deleted file mode 100644 index 28c4c9ff08b3..000000000000 --- a/drivers/scsi/bfa/include/fcs/bfa_fcs_auth.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_FCS_AUTH_H__ -#define __BFA_FCS_AUTH_H__ - -struct bfa_fcs_s; - -#include -#include -#include -#include -#include -#include -#include -#include - -struct bfa_fcs_fabric_s; - - - -struct bfa_fcs_auth_s { - bfa_sm_t sm; /* state machine */ - bfa_boolean_t policy; /* authentication enabled/disabled */ - enum bfa_auth_status status; /* authentication status */ - enum auth_rjt_codes rjt_code; /* auth reject status */ - enum auth_rjt_code_exps rjt_code_exp; /* auth reject reason */ - enum bfa_auth_algo algo; /* Authentication algorithm */ - struct bfa_auth_stats_s stats; /* Statistics */ - enum auth_dh_gid group; /* DH(diffie-hellman) Group */ - enum bfa_auth_secretsource source; /* Secret source */ - char secret[BFA_AUTH_SECRET_STRING_LEN]; - /* secret string */ - u8 secret_len; - /* secret string length */ - u8 nretries; - /* number of retries */ - struct bfa_fcs_fabric_s *fabric;/* pointer to fabric */ - u8 sentcode; /* pointer to response data */ - u8 *response; /* pointer to response data */ - struct bfa_timer_s delay_timer; /* delay timer */ - struct bfa_fcxp_s *fcxp; /* pointer to fcxp */ - struct bfa_fcxp_wqe_s fcxp_wqe; -}; - -/** - * bfa fcs authentication public functions - */ -bfa_status_t bfa_fcs_auth_get_attr(struct bfa_fcs_s *port, - struct bfa_auth_attr_s *attr); -bfa_status_t bfa_fcs_auth_set_policy(struct bfa_fcs_s *port, - bfa_boolean_t policy); -enum bfa_auth_status bfa_fcs_auth_get_status(struct bfa_fcs_s *port); -bfa_status_t bfa_fcs_auth_set_algo(struct bfa_fcs_s *port, - enum bfa_auth_algo algo); -bfa_status_t bfa_fcs_auth_get_stats(struct bfa_fcs_s *port, - struct bfa_auth_stats_s *stats); -bfa_status_t bfa_fcs_auth_set_dh_group(struct bfa_fcs_s *port, int group); -bfa_status_t bfa_fcs_auth_set_secretstring(struct bfa_fcs_s *port, - char *secret); -bfa_status_t bfa_fcs_auth_set_secretstring_encrypt(struct bfa_fcs_s *port, - u32 secret[], u32 len); -bfa_status_t bfa_fcs_auth_set_secretsource(struct bfa_fcs_s *port, - enum bfa_auth_secretsource src); -bfa_status_t bfa_fcs_auth_reset_stats(struct bfa_fcs_s *port); -bfa_status_t bfa_fcs_auth_reinit(struct bfa_fcs_s *port); - -#endif /* __BFA_FCS_AUTH_H__ */ diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h deleted file mode 100644 index 08b79d5e46f3..000000000000 --- a/drivers/scsi/bfa/include/fcs/bfa_fcs_fabric.h +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_FCS_FABRIC_H__ -#define __BFA_FCS_FABRIC_H__ - -struct bfa_fcs_s; - -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * forward declaration - */ -struct bfad_vf_s; - -enum bfa_fcs_fabric_type { - BFA_FCS_FABRIC_UNKNOWN = 0, - BFA_FCS_FABRIC_SWITCHED = 1, - BFA_FCS_FABRIC_PLOOP = 2, - BFA_FCS_FABRIC_N2N = 3, -}; - - -struct bfa_fcs_fabric_s { - struct list_head qe; /* queue element */ - bfa_sm_t sm; /* state machine */ - struct bfa_fcs_s *fcs; /* FCS instance */ - struct bfa_fcs_port_s bport; /* base logical port */ - enum bfa_fcs_fabric_type fab_type; /* fabric type */ - enum bfa_pport_type oper_type; /* current link topology */ - u8 is_vf; /* is virtual fabric? */ - u8 is_npiv; /* is NPIV supported ? */ - u8 is_auth; /* is Security/Auth supported ? */ - u16 bb_credit; /* BB credit from fabric */ - u16 vf_id; /* virtual fabric ID */ - u16 num_vports; /* num vports */ - u16 rsvd; - struct list_head vport_q; /* queue of virtual ports */ - struct list_head vf_q; /* queue of virtual fabrics */ - struct bfad_vf_s *vf_drv; /* driver vf structure */ - struct bfa_timer_s link_timer; /* Link Failure timer. Vport */ - wwn_t fabric_name; /* attached fabric name */ - bfa_boolean_t auth_reqd; /* authentication required */ - struct bfa_timer_s delay_timer; /* delay timer */ - union { - u16 swp_vfid;/* switch port VF id */ - } event_arg; - struct bfa_fcs_auth_s auth; /* authentication config */ - struct bfa_wc_s wc; /* wait counter for delete */ - struct bfa_vf_stats_s stats; /* fabric/vf stats */ - struct bfa_lps_s *lps; /* lport login services */ - u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ]; /* attached - * fabric's ip addr - */ -}; - -#define bfa_fcs_fabric_npiv_capable(__f) ((__f)->is_npiv) -#define bfa_fcs_fabric_is_switched(__f) \ - ((__f)->fab_type == BFA_FCS_FABRIC_SWITCHED) - -/** - * The design calls for a single implementation of base fabric and vf. - */ -#define bfa_fcs_vf_t struct bfa_fcs_fabric_s - -struct bfa_vf_event_s { - u32 undefined; -}; - -/** - * bfa fcs vf public functions - */ -bfa_status_t bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id); -bfa_status_t bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs); -bfa_status_t bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs, - u16 vf_id, struct bfa_port_cfg_s *port_cfg, - struct bfad_vf_s *vf_drv); -bfa_status_t bfa_fcs_vf_delete(bfa_fcs_vf_t *vf); -void bfa_fcs_vf_start(bfa_fcs_vf_t *vf); -bfa_status_t bfa_fcs_vf_stop(bfa_fcs_vf_t *vf); -void bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs); -void bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs); -void bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr); -void bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf, - struct bfa_vf_stats_s *vf_stats); -void bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf); -void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports); -bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id); -struct bfad_vf_s *bfa_fcs_vf_get_drv_vf(bfa_fcs_vf_t *vf); - -#endif /* __BFA_FCS_FABRIC_H__ */ diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_fcpim.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_fcpim.h deleted file mode 100644 index 9a35ecf5cdf0..000000000000 --- a/drivers/scsi/bfa/include/fcs/bfa_fcs_fcpim.h +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * bfa_fcs_fcpim.h BFA FCS FCP Initiator Mode interfaces/defines. - */ - -#ifndef __BFA_FCS_FCPIM_H__ -#define __BFA_FCS_FCPIM_H__ - -#include -#include -#include -#include -#include -#include - -/* - * forward declarations - */ -struct bfad_itnim_s; - -struct bfa_fcs_itnim_s { - bfa_sm_t sm; /* state machine */ - struct bfa_fcs_rport_s *rport; /* parent remote rport */ - struct bfad_itnim_s *itnim_drv; /* driver peer instance */ - struct bfa_fcs_s *fcs; /* fcs instance */ - struct bfa_timer_s timer; /* timer functions */ - struct bfa_itnim_s *bfa_itnim; /* BFA itnim struct */ - u32 prli_retries; /* max prli retry attempts */ - bfa_boolean_t seq_rec; /* seq recovery support */ - bfa_boolean_t rec_support; /* REC supported */ - bfa_boolean_t conf_comp; /* FCP_CONF support */ - bfa_boolean_t task_retry_id; /* task retry id supp */ - struct bfa_fcxp_wqe_s fcxp_wqe; /* wait qelem for fcxp */ - struct bfa_fcxp_s *fcxp; /* FCXP in use */ - struct bfa_itnim_stats_s stats; /* itn statistics */ -}; - - -static inline struct bfad_port_s * -bfa_fcs_itnim_get_drvport(struct bfa_fcs_itnim_s *itnim) -{ - return itnim->rport->port->bfad_port; -} - - -static inline struct bfa_fcs_port_s * -bfa_fcs_itnim_get_port(struct bfa_fcs_itnim_s *itnim) -{ - return itnim->rport->port; -} - - -static inline wwn_t -bfa_fcs_itnim_get_nwwn(struct bfa_fcs_itnim_s *itnim) -{ - return itnim->rport->nwwn; -} - - -static inline wwn_t -bfa_fcs_itnim_get_pwwn(struct bfa_fcs_itnim_s *itnim) -{ - return itnim->rport->pwwn; -} - - -static inline u32 -bfa_fcs_itnim_get_fcid(struct bfa_fcs_itnim_s *itnim) -{ - return itnim->rport->pid; -} - - -static inline u32 -bfa_fcs_itnim_get_maxfrsize(struct bfa_fcs_itnim_s *itnim) -{ - return itnim->rport->maxfrsize; -} - - -static inline enum fc_cos -bfa_fcs_itnim_get_cos(struct bfa_fcs_itnim_s *itnim) -{ - return itnim->rport->fc_cos; -} - - -static inline struct bfad_itnim_s * -bfa_fcs_itnim_get_drvitn(struct bfa_fcs_itnim_s *itnim) -{ - return itnim->itnim_drv; -} - - -static inline struct bfa_itnim_s * -bfa_fcs_itnim_get_halitn(struct bfa_fcs_itnim_s *itnim) -{ - return itnim->bfa_itnim; -} - -/** - * bfa fcs FCP Initiator mode API functions - */ -void bfa_fcs_itnim_get_attr(struct bfa_fcs_itnim_s *itnim, - struct bfa_itnim_attr_s *attr); -void bfa_fcs_itnim_get_stats(struct bfa_fcs_itnim_s *itnim, - struct bfa_itnim_stats_s *stats); -struct bfa_fcs_itnim_s *bfa_fcs_itnim_lookup(struct bfa_fcs_port_s *port, - wwn_t rpwwn); -bfa_status_t bfa_fcs_itnim_attr_get(struct bfa_fcs_port_s *port, wwn_t rpwwn, - struct bfa_itnim_attr_s *attr); -bfa_status_t bfa_fcs_itnim_stats_get(struct bfa_fcs_port_s *port, wwn_t rpwwn, - struct bfa_itnim_stats_s *stats); -bfa_status_t bfa_fcs_itnim_stats_clear(struct bfa_fcs_port_s *port, - wwn_t rpwwn); -#endif /* __BFA_FCS_FCPIM_H__ */ diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_fdmi.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_fdmi.h deleted file mode 100644 index 4441fffc9c82..000000000000 --- a/drivers/scsi/bfa/include/fcs/bfa_fcs_fdmi.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * bfa_fcs_fdmi.h BFA fcs fdmi module public interface - */ - -#ifndef __BFA_FCS_FDMI_H__ -#define __BFA_FCS_FDMI_H__ -#include -#include - -#define BFA_FCS_FDMI_SUPORTED_SPEEDS (FDMI_TRANS_SPEED_1G | \ - FDMI_TRANS_SPEED_2G | \ - FDMI_TRANS_SPEED_4G | \ - FDMI_TRANS_SPEED_8G) - -/* -* HBA Attribute Block : BFA internal representation. Note : Some variable -* sizes have been trimmed to suit BFA For Ex : Model will be "Brocade". Based - * on this the size has been reduced to 16 bytes from the standard's 64 bytes. - */ -struct bfa_fcs_fdmi_hba_attr_s { - wwn_t node_name; - u8 manufacturer[64]; - u8 serial_num[64]; - u8 model[16]; - u8 model_desc[256]; - u8 hw_version[8]; - u8 driver_version[8]; - u8 option_rom_ver[BFA_VERSION_LEN]; - u8 fw_version[8]; - u8 os_name[256]; - u32 max_ct_pyld; -}; - -/* - * Port Attribute Block - */ -struct bfa_fcs_fdmi_port_attr_s { - u8 supp_fc4_types[32]; /* supported FC4 types */ - u32 supp_speed; /* supported speed */ - u32 curr_speed; /* current Speed */ - u32 max_frm_size; /* max frame size */ - u8 os_device_name[256]; /* OS device Name */ - u8 host_name[256]; /* host name */ -}; - -#endif /* __BFA_FCS_FDMI_H__ */ diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h deleted file mode 100644 index ceaefd3060f4..000000000000 --- a/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h +++ /dev/null @@ -1,219 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * bfa_fcs_port.h BFA fcs port module public interface - */ - -#ifndef __BFA_FCS_PORT_H__ -#define __BFA_FCS_PORT_H__ - -#include -#include -#include -#include -#include -#include -#include - -struct bfa_fcs_s; -struct bfa_fcs_fabric_s; - -/* - * Maximum Rports supported per port (physical/logical). - */ -#define BFA_FCS_MAX_RPORTS_SUPP 256 /* @todo : tentative value */ - - -struct bfa_fcs_port_ns_s { - bfa_sm_t sm; /* state machine */ - struct bfa_timer_s timer; - struct bfa_fcs_port_s *port; /* parent port */ - struct bfa_fcxp_s *fcxp; - struct bfa_fcxp_wqe_s fcxp_wqe; -}; - - -struct bfa_fcs_port_scn_s { - bfa_sm_t sm; /* state machine */ - struct bfa_timer_s timer; - struct bfa_fcs_port_s *port; /* parent port */ - struct bfa_fcxp_s *fcxp; - struct bfa_fcxp_wqe_s fcxp_wqe; -}; - - -struct bfa_fcs_port_fdmi_s { - bfa_sm_t sm; /* state machine */ - struct bfa_timer_s timer; - struct bfa_fcs_port_ms_s *ms; /* parent ms */ - struct bfa_fcxp_s *fcxp; - struct bfa_fcxp_wqe_s fcxp_wqe; - u8 retry_cnt; /* retry count */ - u8 rsvd[3]; -}; - - -struct bfa_fcs_port_ms_s { - bfa_sm_t sm; /* state machine */ - struct bfa_timer_s timer; - struct bfa_fcs_port_s *port; /* parent port */ - struct bfa_fcxp_s *fcxp; - struct bfa_fcxp_wqe_s fcxp_wqe; - struct bfa_fcs_port_fdmi_s fdmi; /* FDMI component of MS */ - u8 retry_cnt; /* retry count */ - u8 rsvd[3]; -}; - - -struct bfa_fcs_port_fab_s { - struct bfa_fcs_port_ns_s ns; /* NS component of port */ - struct bfa_fcs_port_scn_s scn; /* scn component of port */ - struct bfa_fcs_port_ms_s ms; /* MS component of port */ -}; - - - -#define MAX_ALPA_COUNT 127 - -struct bfa_fcs_port_loop_s { - u8 num_alpa; /* Num of ALPA entries in the map */ - u8 alpa_pos_map[MAX_ALPA_COUNT]; /* ALPA Positional - *Map */ - struct bfa_fcs_port_s *port; /* parent port */ -}; - - - -struct bfa_fcs_port_n2n_s { - u32 rsvd; - u16 reply_oxid; /* ox_id from the req flogi to be - *used in flogi acc */ - wwn_t rem_port_wwn; /* Attached port's wwn */ -}; - - -union bfa_fcs_port_topo_u { - struct bfa_fcs_port_fab_s pfab; - struct bfa_fcs_port_loop_s ploop; - struct bfa_fcs_port_n2n_s pn2n; -}; - - -struct bfa_fcs_port_s { - struct list_head qe; /* used by port/vport */ - bfa_sm_t sm; /* state machine */ - struct bfa_fcs_fabric_s *fabric;/* parent fabric */ - struct bfa_port_cfg_s port_cfg;/* port configuration */ - struct bfa_timer_s link_timer; /* timer for link offline */ - u32 pid:24; /* FC address */ - u8 lp_tag; /* lport tag */ - u16 num_rports; /* Num of r-ports */ - struct list_head rport_q; /* queue of discovered r-ports */ - struct bfa_fcs_s *fcs; /* FCS instance */ - union bfa_fcs_port_topo_u port_topo; /* fabric/loop/n2n details */ - struct bfad_port_s *bfad_port; /* driver peer instance */ - struct bfa_fcs_vport_s *vport; /* NULL for base ports */ - struct bfa_fcxp_s *fcxp; - struct bfa_fcxp_wqe_s fcxp_wqe; - struct bfa_port_stats_s stats; - struct bfa_wc_s wc; /* waiting counter for events */ -}; - -#define bfa_fcs_lport_t struct bfa_fcs_port_s - -/** - * Symbolic Name related defines - * Total bytes 255. - * Physical Port's symbolic name 128 bytes. - * For Vports, Vport's symbolic name is appended to the Physical port's - * Symbolic Name. - * - * Physical Port's symbolic name Format : (Total 128 bytes) - * Adapter Model number/name : 12 bytes - * Driver Version : 10 bytes - * Host Machine Name : 30 bytes - * Host OS Info : 48 bytes - * Host OS PATCH Info : 16 bytes - * ( remaining 12 bytes reserved to be used for separator) - */ -#define BFA_FCS_PORT_SYMBNAME_SEPARATOR " | " - -#define BFA_FCS_PORT_SYMBNAME_MODEL_SZ 12 -#define BFA_FCS_PORT_SYMBNAME_VERSION_SZ 10 -#define BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ 30 -#define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ 48 -#define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ 16 - -/** - * Get FC port ID for a logical port. - */ -#define bfa_fcs_port_get_fcid(_lport) ((_lport)->pid) -#define bfa_fcs_port_get_pwwn(_lport) ((_lport)->port_cfg.pwwn) -#define bfa_fcs_port_get_nwwn(_lport) ((_lport)->port_cfg.nwwn) -#define bfa_fcs_port_get_psym_name(_lport) ((_lport)->port_cfg.sym_name) -#define bfa_fcs_port_is_initiator(_lport) \ - ((_lport)->port_cfg.roles & BFA_PORT_ROLE_FCP_IM) -#define bfa_fcs_port_is_target(_lport) \ - ((_lport)->port_cfg.roles & BFA_PORT_ROLE_FCP_TM) -#define bfa_fcs_port_get_nrports(_lport) \ - ((_lport) ? (_lport)->num_rports : 0) - -static inline struct bfad_port_s * -bfa_fcs_port_get_drvport(struct bfa_fcs_port_s *port) -{ - return port->bfad_port; -} - - -#define bfa_fcs_port_get_opertype(_lport) ((_lport)->fabric->oper_type) - - -#define bfa_fcs_port_get_fabric_name(_lport) ((_lport)->fabric->fabric_name) - - -#define bfa_fcs_port_get_fabric_ipaddr(_lport) \ - ((_lport)->fabric->fabric_ip_addr) - -/** - * bfa fcs port public functions - */ -void bfa_fcs_cfg_base_port(struct bfa_fcs_s *fcs, - struct bfa_port_cfg_s *port_cfg); -struct bfa_fcs_port_s *bfa_fcs_get_base_port(struct bfa_fcs_s *fcs); -void bfa_fcs_port_get_rports(struct bfa_fcs_port_s *port, - wwn_t rport_wwns[], int *nrports); - -wwn_t bfa_fcs_port_get_rport(struct bfa_fcs_port_s *port, wwn_t wwn, - int index, int nrports, bfa_boolean_t bwwn); - -struct bfa_fcs_port_s *bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, - u16 vf_id, wwn_t lpwwn); - -void bfa_fcs_port_get_info(struct bfa_fcs_port_s *port, - struct bfa_port_info_s *port_info); -void bfa_fcs_port_get_attr(struct bfa_fcs_port_s *port, - struct bfa_port_attr_s *port_attr); -void bfa_fcs_port_get_stats(struct bfa_fcs_port_s *fcs_port, - struct bfa_port_stats_s *port_stats); -void bfa_fcs_port_clear_stats(struct bfa_fcs_port_s *fcs_port); -enum bfa_pport_speed bfa_fcs_port_get_rport_max_speed( - struct bfa_fcs_port_s *port); -void bfa_fcs_port_enable_ipfc_roles(struct bfa_fcs_port_s *fcs_port); -void bfa_fcs_port_disable_ipfc_roles(struct bfa_fcs_port_s *fcs_port); - -#endif /* __BFA_FCS_PORT_H__ */ diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_rport.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_rport.h deleted file mode 100644 index 3027fc6c7722..000000000000 --- a/drivers/scsi/bfa/include/fcs/bfa_fcs_rport.h +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_FCS_RPORT_H__ -#define __BFA_FCS_RPORT_H__ - -#include -#include -#include -#include - -#define BFA_FCS_RPORT_DEF_DEL_TIMEOUT 90 /* in secs */ -/* - * forward declarations - */ -struct bfad_rport_s; - -struct bfa_fcs_itnim_s; -struct bfa_fcs_tin_s; -struct bfa_fcs_iprp_s; - -/* Rport Features (RPF) */ -struct bfa_fcs_rpf_s { - bfa_sm_t sm; /* state machine */ - struct bfa_fcs_rport_s *rport; /* parent rport */ - struct bfa_timer_s timer; /* general purpose timer */ - struct bfa_fcxp_s *fcxp; /* FCXP needed for discarding */ - struct bfa_fcxp_wqe_s fcxp_wqe; /* fcxp wait queue element */ - int rpsc_retries; /* max RPSC retry attempts */ - enum bfa_pport_speed rpsc_speed; /* Current Speed from RPSC. - * O if RPSC fails */ - enum bfa_pport_speed assigned_speed; /* Speed assigned by the user. - * will be used if RPSC is not - * supported by the rport */ -}; - -struct bfa_fcs_rport_s { - struct list_head qe; /* used by port/vport */ - struct bfa_fcs_port_s *port; /* parent FCS port */ - struct bfa_fcs_s *fcs; /* fcs instance */ - struct bfad_rport_s *rp_drv; /* driver peer instance */ - u32 pid; /* port ID of rport */ - u16 maxfrsize; /* maximum frame size */ - u16 reply_oxid; /* OX_ID of inbound requests */ - enum fc_cos fc_cos; /* FC classes of service supp */ - bfa_boolean_t cisc; /* CISC capable device */ - bfa_boolean_t prlo; /* processing prlo or LOGO */ - wwn_t pwwn; /* port wwn of rport */ - wwn_t nwwn; /* node wwn of rport */ - struct bfa_rport_symname_s psym_name; /* port symbolic name */ - bfa_sm_t sm; /* state machine */ - struct bfa_timer_s timer; /* general purpose timer */ - struct bfa_fcs_itnim_s *itnim; /* ITN initiator mode role */ - struct bfa_fcs_tin_s *tin; /* ITN initiator mode role */ - struct bfa_fcs_iprp_s *iprp; /* IP/FC role */ - struct bfa_rport_s *bfa_rport; /* BFA Rport */ - struct bfa_fcxp_s *fcxp; /* FCXP needed for discarding */ - int plogi_retries; /* max plogi retry attempts */ - int ns_retries; /* max NS query retry attempts */ - struct bfa_fcxp_wqe_s fcxp_wqe; /* fcxp wait queue element */ - struct bfa_rport_stats_s stats; /* rport stats */ - enum bfa_rport_function scsi_function; /* Initiator/Target */ - struct bfa_fcs_rpf_s rpf; /* Rport features module */ -}; - -static inline struct bfa_rport_s * -bfa_fcs_rport_get_halrport(struct bfa_fcs_rport_s *rport) -{ - return rport->bfa_rport; -} - -/** - * bfa fcs rport API functions - */ -bfa_status_t bfa_fcs_rport_add(struct bfa_fcs_port_s *port, wwn_t *pwwn, - struct bfa_fcs_rport_s *rport, - struct bfad_rport_s *rport_drv); -bfa_status_t bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport); -void bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport, - struct bfa_rport_attr_s *attr); -void bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport, - struct bfa_rport_stats_s *stats); -void bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport); -struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_port_s *port, - wwn_t rpwwn); -struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn( - struct bfa_fcs_port_s *port, wwn_t rnwwn); -void bfa_fcs_rport_set_del_timeout(u8 rport_tmo); -void bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport, - enum bfa_pport_speed speed); -#endif /* __BFA_FCS_RPORT_H__ */ diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_vport.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_vport.h deleted file mode 100644 index 0af262430860..000000000000 --- a/drivers/scsi/bfa/include/fcs/bfa_fcs_vport.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * bfa_fcs_vport.h BFA fcs vport module public interface - */ - -#ifndef __BFA_FCS_VPORT_H__ -#define __BFA_FCS_VPORT_H__ - -#include -#include -#include -#include -#include - -struct bfa_fcs_vport_s { - struct list_head qe; /* queue elem */ - bfa_sm_t sm; /* state machine */ - bfa_fcs_lport_t lport; /* logical port */ - struct bfa_timer_s timer; /* general purpose timer */ - struct bfad_vport_s *vport_drv; /* Driver private */ - struct bfa_vport_stats_s vport_stats; /* vport statistics */ - struct bfa_lps_s *lps; /* Lport login service */ - int fdisc_retries; -}; - -#define bfa_fcs_vport_get_port(vport) \ - ((struct bfa_fcs_port_s *)(&vport->port)) - -/** - * bfa fcs vport public functions - */ -bfa_status_t bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, - struct bfa_fcs_s *fcs, u16 vf_id, - struct bfa_port_cfg_s *port_cfg, - struct bfad_vport_s *vport_drv); -bfa_status_t bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, - struct bfa_fcs_s *fcs, uint16_t vf_id, - struct bfa_port_cfg_s *port_cfg, - struct bfad_vport_s *vport_drv); -bfa_status_t bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport); -bfa_status_t bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport); -bfa_status_t bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport); -void bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport, - struct bfa_vport_attr_s *vport_attr); -void bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport, - struct bfa_vport_stats_s *vport_stats); -void bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport); -struct bfa_fcs_vport_s *bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs, - u16 vf_id, wwn_t vpwwn); - -#endif /* __BFA_FCS_VPORT_H__ */ diff --git a/drivers/scsi/bfa/include/log/bfa_log_fcs.h b/drivers/scsi/bfa/include/log/bfa_log_fcs.h deleted file mode 100644 index b6f5df8827f8..000000000000 --- a/drivers/scsi/bfa/include/log/bfa_log_fcs.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/* - * messages define for FCS Module - */ -#ifndef __BFA_LOG_FCS_H__ -#define __BFA_LOG_FCS_H__ -#include -#define BFA_LOG_FCS_FABRIC_NOSWITCH \ - (((u32) BFA_LOG_FCS_ID << BFA_LOG_MODID_OFFSET) | 1) -#define BFA_LOG_FCS_FABRIC_ISOLATED \ - (((u32) BFA_LOG_FCS_ID << BFA_LOG_MODID_OFFSET) | 2) -#endif diff --git a/drivers/scsi/bfa/include/log/bfa_log_hal.h b/drivers/scsi/bfa/include/log/bfa_log_hal.h deleted file mode 100644 index 5f8f5e30b9e8..000000000000 --- a/drivers/scsi/bfa/include/log/bfa_log_hal.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/* messages define for HAL Module */ -#ifndef __BFA_LOG_HAL_H__ -#define __BFA_LOG_HAL_H__ -#include -#define BFA_LOG_HAL_ASSERT \ - (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 1) -#define BFA_LOG_HAL_HEARTBEAT_FAILURE \ - (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 2) -#define BFA_LOG_HAL_FCPIM_PARM_INVALID \ - (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 3) -#define BFA_LOG_HAL_SM_ASSERT \ - (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 4) -#define BFA_LOG_HAL_DRIVER_ERROR \ - (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 5) -#define BFA_LOG_HAL_DRIVER_CONFIG_ERROR \ - (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 6) -#define BFA_LOG_HAL_MBOX_ERROR \ - (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 7) -#endif diff --git a/drivers/scsi/bfa/include/log/bfa_log_linux.h b/drivers/scsi/bfa/include/log/bfa_log_linux.h deleted file mode 100644 index 44bc89768bda..000000000000 --- a/drivers/scsi/bfa/include/log/bfa_log_linux.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/* messages define for LINUX Module */ -#ifndef __BFA_LOG_LINUX_H__ -#define __BFA_LOG_LINUX_H__ -#include -#define BFA_LOG_LINUX_DEVICE_CLAIMED \ - (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 1) -#define BFA_LOG_LINUX_HASH_INIT_FAILED \ - (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 2) -#define BFA_LOG_LINUX_SYSFS_FAILED \ - (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 3) -#define BFA_LOG_LINUX_MEM_ALLOC_FAILED \ - (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 4) -#define BFA_LOG_LINUX_DRIVER_REGISTRATION_FAILED \ - (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 5) -#define BFA_LOG_LINUX_ITNIM_FREE \ - (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 6) -#define BFA_LOG_LINUX_ITNIM_ONLINE \ - (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 7) -#define BFA_LOG_LINUX_ITNIM_OFFLINE \ - (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 8) -#define BFA_LOG_LINUX_SCSI_HOST_FREE \ - (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 9) -#define BFA_LOG_LINUX_SCSI_ABORT \ - (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 10) -#define BFA_LOG_LINUX_SCSI_ABORT_COMP \ - (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 11) -#define BFA_LOG_LINUX_DRIVER_CONFIG_ERROR \ - (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 12) -#define BFA_LOG_LINUX_BNA_STATE_MACHINE \ - (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 13) -#define BFA_LOG_LINUX_IOC_ERROR \ - (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 14) -#define BFA_LOG_LINUX_RESOURCE_ALLOC_ERROR \ - (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 15) -#define BFA_LOG_LINUX_RING_BUFFER_ERROR \ - (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 16) -#define BFA_LOG_LINUX_DRIVER_ERROR \ - (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 17) -#define BFA_LOG_LINUX_DRIVER_INFO \ - (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 18) -#define BFA_LOG_LINUX_DRIVER_DIAG \ - (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 19) -#define BFA_LOG_LINUX_DRIVER_AEN \ - (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 20) -#endif diff --git a/drivers/scsi/bfa/include/log/bfa_log_wdrv.h b/drivers/scsi/bfa/include/log/bfa_log_wdrv.h deleted file mode 100644 index 809a95f7afe2..000000000000 --- a/drivers/scsi/bfa/include/log/bfa_log_wdrv.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/* - * messages define for WDRV Module - */ -#ifndef __BFA_LOG_WDRV_H__ -#define __BFA_LOG_WDRV_H__ -#include -#define BFA_LOG_WDRV_IOC_INIT_ERROR \ - (((u32) BFA_LOG_WDRV_ID << BFA_LOG_MODID_OFFSET) | 1) -#define BFA_LOG_WDRV_IOC_INTERNAL_ERROR \ - (((u32) BFA_LOG_WDRV_ID << BFA_LOG_MODID_OFFSET) | 2) -#define BFA_LOG_WDRV_IOC_START_ERROR \ - (((u32) BFA_LOG_WDRV_ID << BFA_LOG_MODID_OFFSET) | 3) -#define BFA_LOG_WDRV_IOC_STOP_ERROR \ - (((u32) BFA_LOG_WDRV_ID << BFA_LOG_MODID_OFFSET) | 4) -#define BFA_LOG_WDRV_INSUFFICIENT_RESOURCES \ - (((u32) BFA_LOG_WDRV_ID << BFA_LOG_MODID_OFFSET) | 5) -#define BFA_LOG_WDRV_BASE_ADDRESS_MAP_ERROR \ - (((u32) BFA_LOG_WDRV_ID << BFA_LOG_MODID_OFFSET) | 6) -#endif diff --git a/drivers/scsi/bfa/include/protocol/ct.h b/drivers/scsi/bfa/include/protocol/ct.h deleted file mode 100644 index b82540a230c4..000000000000 --- a/drivers/scsi/bfa/include/protocol/ct.h +++ /dev/null @@ -1,492 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __CT_H__ -#define __CT_H__ - -#include - -#pragma pack(1) - -struct ct_hdr_s{ - u32 rev_id:8; /* Revision of the CT */ - u32 in_id:24; /* Initiator Id */ - u32 gs_type:8; /* Generic service Type */ - u32 gs_sub_type:8; /* Generic service sub type */ - u32 options:8; /* options */ - u32 rsvrd:8; /* reserved */ - u32 cmd_rsp_code:16;/* ct command/response code */ - u32 max_res_size:16;/* maximum/residual size */ - u32 frag_id:8; /* fragment ID */ - u32 reason_code:8; /* reason code */ - u32 exp_code:8; /* explanation code */ - u32 vendor_unq:8; /* vendor unique */ -}; - -/* - * defines for the Revision - */ -enum { - CT_GS3_REVISION = 0x01, -}; - -/* - * defines for gs_type - */ -enum { - CT_GSTYPE_KEYSERVICE = 0xF7, - CT_GSTYPE_ALIASSERVICE = 0xF8, - CT_GSTYPE_MGMTSERVICE = 0xFA, - CT_GSTYPE_TIMESERVICE = 0xFB, - CT_GSTYPE_DIRSERVICE = 0xFC, -}; - -/* - * defines for gs_sub_type for gs type directory service - */ -enum { - CT_GSSUBTYPE_NAMESERVER = 0x02, -}; - -/* - * defines for gs_sub_type for gs type management service - */ -enum { - CT_GSSUBTYPE_CFGSERVER = 0x01, - CT_GSSUBTYPE_UNZONED_NS = 0x02, - CT_GSSUBTYPE_ZONESERVER = 0x03, - CT_GSSUBTYPE_LOCKSERVER = 0x04, - CT_GSSUBTYPE_HBA_MGMTSERVER = 0x10, /* for FDMI */ -}; - -/* - * defines for CT response code field - */ -enum { - CT_RSP_REJECT = 0x8001, - CT_RSP_ACCEPT = 0x8002, -}; - -/* - * definitions for CT reason code - */ -enum { - CT_RSN_INV_CMD = 0x01, - CT_RSN_INV_VER = 0x02, - CT_RSN_LOGIC_ERR = 0x03, - CT_RSN_INV_SIZE = 0x04, - CT_RSN_LOGICAL_BUSY = 0x05, - CT_RSN_PROTO_ERR = 0x07, - CT_RSN_UNABLE_TO_PERF = 0x09, - CT_RSN_NOT_SUPP = 0x0B, - CT_RSN_SERVER_NOT_AVBL = 0x0D, - CT_RSN_SESSION_COULD_NOT_BE_ESTBD = 0x0E, - CT_RSN_VENDOR_SPECIFIC = 0xFF, - -}; - -/* - * definitions for explanations code for Name server - */ -enum { - CT_NS_EXP_NOADDITIONAL = 0x00, - CT_NS_EXP_ID_NOT_REG = 0x01, - CT_NS_EXP_PN_NOT_REG = 0x02, - CT_NS_EXP_NN_NOT_REG = 0x03, - CT_NS_EXP_CS_NOT_REG = 0x04, - CT_NS_EXP_IPN_NOT_REG = 0x05, - CT_NS_EXP_IPA_NOT_REG = 0x06, - CT_NS_EXP_FT_NOT_REG = 0x07, - CT_NS_EXP_SPN_NOT_REG = 0x08, - CT_NS_EXP_SNN_NOT_REG = 0x09, - CT_NS_EXP_PT_NOT_REG = 0x0A, - CT_NS_EXP_IPP_NOT_REG = 0x0B, - CT_NS_EXP_FPN_NOT_REG = 0x0C, - CT_NS_EXP_HA_NOT_REG = 0x0D, - CT_NS_EXP_FD_NOT_REG = 0x0E, - CT_NS_EXP_FF_NOT_REG = 0x0F, - CT_NS_EXP_ACCESSDENIED = 0x10, - CT_NS_EXP_UNACCEPTABLE_ID = 0x11, - CT_NS_EXP_DATABASEEMPTY = 0x12, - CT_NS_EXP_NOT_REG_IN_SCOPE = 0x13, - CT_NS_EXP_DOM_ID_NOT_PRESENT = 0x14, - CT_NS_EXP_PORT_NUM_NOT_PRESENT = 0x15, - CT_NS_EXP_NO_DEVICE_ATTACHED = 0x16 -}; - -/* - * definitions for the explanation code for all servers - */ -enum { - CT_EXP_AUTH_EXCEPTION = 0xF1, - CT_EXP_DB_FULL = 0xF2, - CT_EXP_DB_EMPTY = 0xF3, - CT_EXP_PROCESSING_REQ = 0xF4, - CT_EXP_UNABLE_TO_VERIFY_CONN = 0xF5, - CT_EXP_DEVICES_NOT_IN_CMN_ZONE = 0xF6 -}; - -/* - * Command codes for Name server - */ -enum { - GS_GID_PN = 0x0121, /* Get Id on port name */ - GS_GPN_ID = 0x0112, /* Get port name on ID */ - GS_GNN_ID = 0x0113, /* Get node name on ID */ - GS_GID_FT = 0x0171, /* Get Id on FC4 type */ - GS_GSPN_ID = 0x0118, /* Get symbolic PN on ID */ - GS_RFT_ID = 0x0217, /* Register fc4type on ID */ - GS_RSPN_ID = 0x0218, /* Register symbolic PN on ID */ - GS_RPN_ID = 0x0212, /* Register port name */ - GS_RNN_ID = 0x0213, /* Register node name */ - GS_RCS_ID = 0x0214, /* Register class of service */ - GS_RPT_ID = 0x021A, /* Register port type */ - GS_GA_NXT = 0x0100, /* Get all next */ - GS_RFF_ID = 0x021F, /* Register FC4 Feature */ -}; - -struct fcgs_id_req_s{ - u32 rsvd:8; - u32 dap:24; /* port identifier */ -}; -#define fcgs_gpnid_req_t struct fcgs_id_req_s -#define fcgs_gnnid_req_t struct fcgs_id_req_s -#define fcgs_gspnid_req_t struct fcgs_id_req_s - -struct fcgs_gidpn_req_s{ - wwn_t port_name; /* port wwn */ -}; - -struct fcgs_gidpn_resp_s{ - u32 rsvd:8; - u32 dap:24; /* port identifier */ -}; - -/** - * RFT_ID - */ -struct fcgs_rftid_req_s { - u32 rsvd:8; - u32 dap:24; /* port identifier */ - u32 fc4_type[8]; /* fc4 types */ -}; - -/** - * RFF_ID : Register FC4 features. - */ - -#define FC_GS_FCP_FC4_FEATURE_INITIATOR 0x02 -#define FC_GS_FCP_FC4_FEATURE_TARGET 0x01 - -struct fcgs_rffid_req_s{ - u32 rsvd:8; - u32 dap:24; /* port identifier */ - u32 rsvd1:16; - u32 fc4ftr_bits:8; /* fc4 feature bits */ - u32 fc4_type:8; /* corresponding FC4 Type */ -}; - -/** - * GID_FT Request - */ -struct fcgs_gidft_req_s{ - u8 reserved; - u8 domain_id; /* domain, 0 - all fabric */ - u8 area_id; /* area, 0 - whole domain */ - u8 fc4_type; /* FC_TYPE_FCP for SCSI devices */ -}; /* GID_FT Request */ - -/** - * GID_FT Response - */ -struct fcgs_gidft_resp_s { - u8 last:1; /* last port identifier flag */ - u8 reserved:7; - u32 pid:24; /* port identifier */ -}; /* GID_FT Response */ - -/** - * RSPN_ID - */ -struct fcgs_rspnid_req_s{ - u32 rsvd:8; - u32 dap:24; /* port identifier */ - u8 spn_len; /* symbolic port name length */ - u8 spn[256]; /* symbolic port name */ -}; - -/** - * RPN_ID - */ -struct fcgs_rpnid_req_s{ - u32 rsvd:8; - u32 port_id:24; - wwn_t port_name; -}; - -/** - * RNN_ID - */ -struct fcgs_rnnid_req_s{ - u32 rsvd:8; - u32 port_id:24; - wwn_t node_name; -}; - -/** - * RCS_ID - */ -struct fcgs_rcsid_req_s{ - u32 rsvd:8; - u32 port_id:24; - u32 cos; -}; - -/** - * RPT_ID - */ -struct fcgs_rptid_req_s{ - u32 rsvd:8; - u32 port_id:24; - u32 port_type:8; - u32 rsvd1:24; -}; - -/** - * GA_NXT Request - */ -struct fcgs_ganxt_req_s{ - u32 rsvd:8; - u32 port_id:24; -}; - -/** - * GA_NXT Response - */ -struct fcgs_ganxt_rsp_s{ - u32 port_type:8; /* Port Type */ - u32 port_id:24; /* Port Identifier */ - wwn_t port_name; /* Port Name */ - u8 spn_len; /* Length of Symbolic Port Name */ - char spn[255]; /* Symbolic Port Name */ - wwn_t node_name; /* Node Name */ - u8 snn_len; /* Length of Symbolic Node Name */ - char snn[255]; /* Symbolic Node Name */ - u8 ipa[8]; /* Initial Process Associator */ - u8 ip[16]; /* IP Address */ - u32 cos; /* Class of Service */ - u32 fc4types[8]; /* FC-4 TYPEs */ - wwn_t fabric_port_name; - /* Fabric Port Name */ - u32 rsvd:8; /* Reserved */ - u32 hard_addr:24; /* Hard Address */ -}; - -/* - * Fabric Config Server - */ - -/* - * Command codes for Fabric Configuration Server - */ -enum { - GS_FC_GFN_CMD = 0x0114, /* GS FC Get Fabric Name */ - GS_FC_GMAL_CMD = 0x0116, /* GS FC GMAL */ - GS_FC_TRACE_CMD = 0x0400, /* GS FC Trace Route */ - GS_FC_PING_CMD = 0x0401, /* GS FC Ping */ -}; - -/* - * Source or Destination Port Tags. - */ -enum { - GS_FTRACE_TAG_NPORT_ID = 1, - GS_FTRACE_TAG_NPORT_NAME = 2, -}; - -/* -* Port Value : Could be a Port id or wwn - */ -union fcgs_port_val_u{ - u32 nport_id; - wwn_t nport_wwn; -}; - -#define GS_FTRACE_MAX_HOP_COUNT 20 -#define GS_FTRACE_REVISION 1 - -/* - * Ftrace Related Structures. - */ - -/* - * STR (Switch Trace) Reject Reason Codes. From FC-SW. - */ -enum { - GS_FTRACE_STR_CMD_COMPLETED_SUCC = 0, - GS_FTRACE_STR_CMD_NOT_SUPP_IN_NEXT_SWITCH, - GS_FTRACE_STR_NO_RESP_FROM_NEXT_SWITCH, - GS_FTRACE_STR_MAX_HOP_CNT_REACHED, - GS_FTRACE_STR_SRC_PORT_NOT_FOUND, - GS_FTRACE_STR_DST_PORT_NOT_FOUND, - GS_FTRACE_STR_DEVICES_NOT_IN_COMMON_ZONE, - GS_FTRACE_STR_NO_ROUTE_BW_PORTS, - GS_FTRACE_STR_NO_ADDL_EXPLN, - GS_FTRACE_STR_FABRIC_BUSY, - GS_FTRACE_STR_FABRIC_BUILD_IN_PROGRESS, - GS_FTRACE_STR_VENDOR_SPECIFIC_ERR_START = 0xf0, - GS_FTRACE_STR_VENDOR_SPECIFIC_ERR_END = 0xff, -}; - -/* - * Ftrace Request - */ -struct fcgs_ftrace_req_s{ - u32 revision; - u16 src_port_tag; /* Source Port tag */ - u16 src_port_len; /* Source Port len */ - union fcgs_port_val_u src_port_val; /* Source Port value */ - u16 dst_port_tag; /* Destination Port tag */ - u16 dst_port_len; /* Destination Port len */ - union fcgs_port_val_u dst_port_val; /* Destination Port value */ - u32 token; - u8 vendor_id[8]; /* T10 Vendor Identifier */ - u8 vendor_info[8]; /* Vendor specific Info */ - u32 max_hop_cnt; /* Max Hop Count */ -}; - -/* - * Path info structure - */ -struct fcgs_ftrace_path_info_s{ - wwn_t switch_name; /* Switch WWN */ - u32 domain_id; - wwn_t ingress_port_name; /* Ingress ports wwn */ - u32 ingress_phys_port_num; /* Ingress ports physical port - * number - */ - wwn_t egress_port_name; /* Ingress ports wwn */ - u32 egress_phys_port_num; /* Ingress ports physical port - * number - */ -}; - -/* - * Ftrace Acc Response - */ -struct fcgs_ftrace_resp_s{ - u32 revision; - u32 token; - u8 vendor_id[8]; /* T10 Vendor Identifier */ - u8 vendor_info[8]; /* Vendor specific Info */ - u32 str_rej_reason_code; /* STR Reject Reason Code */ - u32 num_path_info_entries; /* No. of path info entries */ - /* - * path info entry/entries. - */ - struct fcgs_ftrace_path_info_s path_info[1]; - -}; - -/* -* Fabric Config Server : FCPing - */ - -/* - * FC Ping Request - */ -struct fcgs_fcping_req_s{ - u32 revision; - u16 port_tag; - u16 port_len; /* Port len */ - union fcgs_port_val_u port_val; /* Port value */ - u32 token; -}; - -/* - * FC Ping Response - */ -struct fcgs_fcping_resp_s{ - u32 token; -}; - -/* - * Command codes for zone server query. - */ -enum { - ZS_GZME = 0x0124, /* Get zone member extended */ -}; - -/* - * ZS GZME request - */ -#define ZS_GZME_ZNAMELEN 32 -struct zs_gzme_req_s{ - u8 znamelen; - u8 rsvd[3]; - u8 zname[ZS_GZME_ZNAMELEN]; -}; - -enum zs_mbr_type{ - ZS_MBR_TYPE_PWWN = 1, - ZS_MBR_TYPE_DOMPORT = 2, - ZS_MBR_TYPE_PORTID = 3, - ZS_MBR_TYPE_NWWN = 4, -}; - -struct zs_mbr_wwn_s{ - u8 mbr_type; - u8 rsvd[3]; - wwn_t wwn; -}; - -struct zs_query_resp_s{ - u32 nmbrs; /* number of zone members */ - struct zs_mbr_wwn_s mbr[1]; -}; - -/* - * GMAL Command ( Get ( interconnect Element) Management Address List) - * To retrieve the IP Address of a Switch. - */ - -#define CT_GMAL_RESP_PREFIX_TELNET "telnet://" -#define CT_GMAL_RESP_PREFIX_HTTP "http://" - -/* GMAL/GFN request */ -struct fcgs_req_s { - wwn_t wwn; /* PWWN/NWWN */ -}; - -#define fcgs_gmal_req_t struct fcgs_req_s -#define fcgs_gfn_req_t struct fcgs_req_s - -/* Accept Response to GMAL */ -struct fcgs_gmal_resp_s { - u32 ms_len; /* Num of entries */ - u8 ms_ma[256]; -}; - -struct fc_gmal_entry_s { - u8 len; - u8 prefix[7]; /* like "http://" */ - u8 ip_addr[248]; -}; - -#pragma pack() - -#endif diff --git a/drivers/scsi/bfa/include/protocol/fc.h b/drivers/scsi/bfa/include/protocol/fc.h deleted file mode 100644 index 436dd7c5643a..000000000000 --- a/drivers/scsi/bfa/include/protocol/fc.h +++ /dev/null @@ -1,1111 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __FC_H__ -#define __FC_H__ - -#include - -#pragma pack(1) - -/* - * Fibre Channel Header Structure (FCHS) definition - */ -struct fchs_s { -#ifdef __BIGENDIAN - u32 routing:4; /* routing bits */ - u32 cat_info:4; /* category info */ -#else - u32 cat_info:4; /* category info */ - u32 routing:4; /* routing bits */ -#endif - u32 d_id:24; /* destination identifier */ - - u32 cs_ctl:8; /* class specific control */ - u32 s_id:24; /* source identifier */ - - u32 type:8; /* data structure type */ - u32 f_ctl:24; /* initial frame control */ - - u8 seq_id; /* sequence identifier */ - u8 df_ctl; /* data field control */ - u16 seq_cnt; /* sequence count */ - - u16 ox_id; /* originator exchange ID */ - u16 rx_id; /* responder exchange ID */ - - u32 ro; /* relative offset */ -}; - -#define FC_SOF_LEN 4 -#define FC_EOF_LEN 4 -#define FC_CRC_LEN 4 - -/* - * Fibre Channel BB_E Header Structure - */ -struct fcbbehs_s { - u16 ver_rsvd; - u32 rsvd[2]; - u32 rsvd__sof; -}; - -#define FC_SEQ_ID_MAX 256 - -/* - * routing bit definitions - */ -enum { - FC_RTG_FC4_DEV_DATA = 0x0, /* FC-4 Device Data */ - FC_RTG_EXT_LINK = 0x2, /* Extended Link Data */ - FC_RTG_FC4_LINK_DATA = 0x3, /* FC-4 Link Data */ - FC_RTG_VIDEO_DATA = 0x4, /* Video Data */ - FC_RTG_EXT_HDR = 0x5, /* VFT, IFR or Encapsuled */ - FC_RTG_BASIC_LINK = 0x8, /* Basic Link data */ - FC_RTG_LINK_CTRL = 0xC, /* Link Control */ -}; - -/* - * information category for extended link data and FC-4 Link Data - */ -enum { - FC_CAT_LD_REQUEST = 0x2, /* Request */ - FC_CAT_LD_REPLY = 0x3, /* Reply */ - FC_CAT_LD_DIAG = 0xF, /* for DIAG use only */ -}; - -/* - * information category for extended headers (VFT, IFR or encapsulation) - */ -enum { - FC_CAT_VFT_HDR = 0x0, /* Virtual fabric tagging header */ - FC_CAT_IFR_HDR = 0x1, /* Inter-Fabric routing header */ - FC_CAT_ENC_HDR = 0x2, /* Encapsulation header */ -}; - -/* - * information category for FC-4 device data - */ -enum { - FC_CAT_UNCATEG_INFO = 0x0, /* Uncategorized information */ - FC_CAT_SOLICIT_DATA = 0x1, /* Solicited Data */ - FC_CAT_UNSOLICIT_CTRL = 0x2, /* Unsolicited Control */ - FC_CAT_SOLICIT_CTRL = 0x3, /* Solicited Control */ - FC_CAT_UNSOLICIT_DATA = 0x4, /* Unsolicited Data */ - FC_CAT_DATA_DESC = 0x5, /* Data Descriptor */ - FC_CAT_UNSOLICIT_CMD = 0x6, /* Unsolicited Command */ - FC_CAT_CMD_STATUS = 0x7, /* Command Status */ -}; - -/* - * information category for Link Control - */ -enum { - FC_CAT_ACK_1 = 0x00, - FC_CAT_ACK_0_N = 0x01, - FC_CAT_P_RJT = 0x02, - FC_CAT_F_RJT = 0x03, - FC_CAT_P_BSY = 0x04, - FC_CAT_F_BSY_DATA = 0x05, - FC_CAT_F_BSY_LINK_CTL = 0x06, - FC_CAT_F_LCR = 0x07, - FC_CAT_NTY = 0x08, - FC_CAT_END = 0x09, -}; - -/* - * Type Field Definitions. FC-PH Section 18.5 pg. 165 - */ -enum { - FC_TYPE_BLS = 0x0, /* Basic Link Service */ - FC_TYPE_ELS = 0x1, /* Extended Link Service */ - FC_TYPE_IP = 0x5, /* IP */ - FC_TYPE_FCP = 0x8, /* SCSI-FCP */ - FC_TYPE_GPP = 0x9, /* SCSI_GPP */ - FC_TYPE_SERVICES = 0x20, /* Fibre Channel Services */ - FC_TYPE_FC_FSS = 0x22, /* Fabric Switch Services */ - FC_TYPE_FC_AL = 0x23, /* FC-AL */ - FC_TYPE_FC_SNMP = 0x24, /* FC-SNMP */ - FC_TYPE_MAX = 256, /* 256 FC-4 types */ -}; - -struct fc_fc4types_s{ - u8 bits[FC_TYPE_MAX / 8]; -}; - -/* - * Frame Control Definitions. FC-PH Table-45. pg. 168 - */ -enum { - FCTL_EC_ORIG = 0x000000, /* exchange originator */ - FCTL_EC_RESP = 0x800000, /* exchange responder */ - FCTL_SEQ_INI = 0x000000, /* sequence initiator */ - FCTL_SEQ_REC = 0x400000, /* sequence recipient */ - FCTL_FS_EXCH = 0x200000, /* first sequence of xchg */ - FCTL_LS_EXCH = 0x100000, /* last sequence of xchg */ - FCTL_END_SEQ = 0x080000, /* last frame of sequence */ - FCTL_SI_XFER = 0x010000, /* seq initiative transfer */ - FCTL_RO_PRESENT = 0x000008, /* relative offset present */ - FCTL_FILLBYTE_MASK = 0x000003 /* , fill byte mask */ -}; - -/* - * Fabric Well Known Addresses - */ -enum { - FC_MIN_WELL_KNOWN_ADDR = 0xFFFFF0, - FC_DOMAIN_CONTROLLER_MASK = 0xFFFC00, - FC_ALIAS_SERVER = 0xFFFFF8, - FC_MGMT_SERVER = 0xFFFFFA, - FC_TIME_SERVER = 0xFFFFFB, - FC_NAME_SERVER = 0xFFFFFC, - FC_FABRIC_CONTROLLER = 0xFFFFFD, - FC_FABRIC_PORT = 0xFFFFFE, - FC_BROADCAST_SERVER = 0xFFFFFF -}; - -/* - * domain/area/port defines - */ -#define FC_DOMAIN_MASK 0xFF0000 -#define FC_DOMAIN_SHIFT 16 -#define FC_AREA_MASK 0x00FF00 -#define FC_AREA_SHIFT 8 -#define FC_PORT_MASK 0x0000FF -#define FC_PORT_SHIFT 0 - -#define FC_GET_DOMAIN(p) (((p) & FC_DOMAIN_MASK) >> FC_DOMAIN_SHIFT) -#define FC_GET_AREA(p) (((p) & FC_AREA_MASK) >> FC_AREA_SHIFT) -#define FC_GET_PORT(p) (((p) & FC_PORT_MASK) >> FC_PORT_SHIFT) - -#define FC_DOMAIN_CTRLR(p) (FC_DOMAIN_CONTROLLER_MASK | (FC_GET_DOMAIN(p))) - -enum { - FC_RXID_ANY = 0xFFFFU, -}; - -/* - * generic ELS command - */ -struct fc_els_cmd_s{ - u32 els_code:8; /* ELS Command Code */ - u32 reserved:24; -}; - -/* - * ELS Command Codes. FC-PH Table-75. pg. 223 - */ -enum { - FC_ELS_LS_RJT = 0x1, /* Link Service Reject. */ - FC_ELS_ACC = 0x02, /* Accept */ - FC_ELS_PLOGI = 0x03, /* N_Port Login. */ - FC_ELS_FLOGI = 0x04, /* F_Port Login. */ - FC_ELS_LOGO = 0x05, /* Logout. */ - FC_ELS_ABTX = 0x06, /* Abort Exchange */ - FC_ELS_RES = 0x08, /* Read Exchange status */ - FC_ELS_RSS = 0x09, /* Read sequence status block */ - FC_ELS_RSI = 0x0A, /* Request Sequence Initiative */ - FC_ELS_ESTC = 0x0C, /* Estimate Credit. */ - FC_ELS_RTV = 0x0E, /* Read Timeout Value. */ - FC_ELS_RLS = 0x0F, /* Read Link Status. */ - FC_ELS_ECHO = 0x10, /* Echo */ - FC_ELS_TEST = 0x11, /* Test */ - FC_ELS_RRQ = 0x12, /* Reinstate Recovery Qualifier. */ - FC_ELS_REC = 0x13, /* Add this for TAPE support in FCR */ - FC_ELS_PRLI = 0x20, /* Process Login */ - FC_ELS_PRLO = 0x21, /* Process Logout. */ - FC_ELS_SCN = 0x22, /* State Change Notification. */ - FC_ELS_TPRLO = 0x24, /* Third Party Process Logout. */ - FC_ELS_PDISC = 0x50, /* Discover N_Port Parameters. */ - FC_ELS_FDISC = 0x51, /* Discover F_Port Parameters. */ - FC_ELS_ADISC = 0x52, /* Discover Address. */ - FC_ELS_FAN = 0x60, /* Fabric Address Notification */ - FC_ELS_RSCN = 0x61, /* Reg State Change Notification */ - FC_ELS_SCR = 0x62, /* State Change Registration. */ - FC_ELS_RTIN = 0x77, /* Mangement server request */ - FC_ELS_RNID = 0x78, /* Mangement server request */ - FC_ELS_RLIR = 0x79, /* Registered Link Incident Record */ - - FC_ELS_RPSC = 0x7D, /* Report Port Speed Capabilities */ - FC_ELS_QSA = 0x7E, /* Query Security Attributes. Ref FC-SP */ - FC_ELS_E2E_LBEACON = 0x81, - /* End-to-End Link Beacon */ - FC_ELS_AUTH = 0x90, /* Authentication. Ref FC-SP */ - FC_ELS_RFCN = 0x97, /* Request Fabric Change Notification. Ref - *FC-SP */ - -}; - -/* - * Version numbers for FC-PH standards, - * used in login to indicate what port - * supports. See FC-PH-X table 158. - */ -enum { - FC_PH_VER_4_3 = 0x09, - FC_PH_VER_PH_3 = 0x20, -}; - -/* - * PDU size defines - */ -enum { - FC_MIN_PDUSZ = 512, - FC_MAX_PDUSZ = 2112, -}; - -/* - * N_Port PLOGI Common Service Parameters. - * FC-PH-x. Figure-76. pg. 308. - */ -struct fc_plogi_csp_s{ - u8 verhi; /* FC-PH high version */ - u8 verlo; /* FC-PH low version */ - u16 bbcred; /* BB_Credit */ - -#ifdef __BIGENDIAN - u8 ciro:1, /* continuously increasing RO */ - rro:1, /* random relative offset */ - npiv_supp:1, /* NPIV supported */ - port_type:1, /* N_Port/F_port */ - altbbcred:1, /* alternate BB_Credit */ - resolution:1, /* ms/ns ED_TOV resolution */ - vvl_info:1, /* VVL Info included */ - reserved1:1; - - u8 hg_supp:1, - query_dbc:1, - security:1, - sync_cap:1, - r_t_tov:1, - dh_dup_supp:1, - cisc:1, /* continuously increasing seq count */ - payload:1; -#else - u8 reserved2:2, - resolution:1, /* ms/ns ED_TOV resolution */ - altbbcred:1, /* alternate BB_Credit */ - port_type:1, /* N_Port/F_port */ - npiv_supp:1, /* NPIV supported */ - rro:1, /* random relative offset */ - ciro:1; /* continuously increasing RO */ - - u8 payload:1, - cisc:1, /* continuously increasing seq count */ - dh_dup_supp:1, - r_t_tov:1, - sync_cap:1, - security:1, - query_dbc:1, - hg_supp:1; -#endif - - u16 rxsz; /* recieve data_field size */ - - u16 conseq; - u16 ro_bitmap; - - u32 e_d_tov; -}; - -/* - * N_Port PLOGI Class Specific Parameters. - * FC-PH-x. Figure 78. pg. 318. - */ -struct fc_plogi_clp_s{ -#ifdef __BIGENDIAN - u32 class_valid:1; - u32 intermix:1; /* class intermix supported if set =1. - * valid only for class1. Reserved for - * class2 & class3 - */ - u32 reserved1:2; - u32 sequential:1; - u32 reserved2:3; -#else - u32 reserved2:3; - u32 sequential:1; - u32 reserved1:2; - u32 intermix:1; /* class intermix supported if set =1. - * valid only for class1. Reserved for - * class2 & class3 - */ - u32 class_valid:1; -#endif - - u32 reserved3:24; - - u32 reserved4:16; - u32 rxsz:16; /* Receive data_field size */ - - u32 reserved5:8; - u32 conseq:8; - u32 e2e_credit:16; /* end to end credit */ - - u32 reserved7:8; - u32 ospx:8; - u32 reserved8:16; -}; - -#define FLOGI_VVL_BRCD 0x42524344 /* ASCII value for each character in - * string "BRCD" */ - -/* - * PLOGI els command and reply payload - */ -struct fc_logi_s{ - struct fc_els_cmd_s els_cmd; /* ELS command code */ - struct fc_plogi_csp_s csp; /* common service params */ - wwn_t port_name; - wwn_t node_name; - struct fc_plogi_clp_s class1; /* class 1 service parameters */ - struct fc_plogi_clp_s class2; /* class 2 service parameters */ - struct fc_plogi_clp_s class3; /* class 3 service parameters */ - struct fc_plogi_clp_s class4; /* class 4 service parameters */ - u8 vvl[16]; /* vendor version level */ -}; - -/* - * LOGO els command payload - */ -struct fc_logo_s{ - struct fc_els_cmd_s els_cmd; /* ELS command code */ - u32 res1:8; - u32 nport_id:24; /* N_Port identifier of source */ - wwn_t orig_port_name; /* Port name of the LOGO originator */ -}; - -/* - * ADISC els command payload - */ -struct fc_adisc_s { - struct fc_els_cmd_s els_cmd; /* ELS command code */ - u32 res1:8; - u32 orig_HA:24; /* originator hard address */ - wwn_t orig_port_name; /* originator port name */ - wwn_t orig_node_name; /* originator node name */ - u32 res2:8; - u32 nport_id:24; /* originator NPortID */ -}; - -/* - * Exchange status block - */ -struct fc_exch_status_blk_s{ - u32 oxid:16; - u32 rxid:16; - u32 res1:8; - u32 orig_np:24; /* originator NPortID */ - u32 res2:8; - u32 resp_np:24; /* responder NPortID */ - u32 es_bits; - u32 res3; - /* - * un modified section of the fields - */ -}; - -/* - * RES els command payload - */ -struct fc_res_s { - struct fc_els_cmd_s els_cmd; /* ELS command code */ - u32 res1:8; - u32 nport_id:24; /* N_Port identifier of source */ - u32 oxid:16; - u32 rxid:16; - u8 assoc_hdr[32]; -}; - -/* - * RES els accept payload - */ -struct fc_res_acc_s{ - struct fc_els_cmd_s els_cmd; /* ELS command code */ - struct fc_exch_status_blk_s fc_exch_blk; /* Exchange status block */ -}; - -/* - * REC els command payload - */ -struct fc_rec_s { - struct fc_els_cmd_s els_cmd; /* ELS command code */ - u32 res1:8; - u32 nport_id:24; /* N_Port identifier of source */ - u32 oxid:16; - u32 rxid:16; -}; - -#define FC_REC_ESB_OWN_RSP 0x80000000 /* responder owns */ -#define FC_REC_ESB_SI 0x40000000 /* SI is owned */ -#define FC_REC_ESB_COMP 0x20000000 /* exchange is complete */ -#define FC_REC_ESB_ENDCOND_ABN 0x10000000 /* abnormal ending */ -#define FC_REC_ESB_RQACT 0x04000000 /* recovery qual active */ -#define FC_REC_ESB_ERRP_MSK 0x03000000 -#define FC_REC_ESB_OXID_INV 0x00800000 /* invalid OXID */ -#define FC_REC_ESB_RXID_INV 0x00400000 /* invalid RXID */ -#define FC_REC_ESB_PRIO_INUSE 0x00200000 - -/* - * REC els accept payload - */ -struct fc_rec_acc_s { - struct fc_els_cmd_s els_cmd; /* ELS command code */ - u32 oxid:16; - u32 rxid:16; - u32 res1:8; - u32 orig_id:24; /* N_Port id of exchange originator */ - u32 res2:8; - u32 resp_id:24; /* N_Port id of exchange responder */ - u32 count; /* data transfer count */ - u32 e_stat; /* exchange status */ -}; - -/* - * RSI els payload - */ -struct fc_rsi_s { - struct fc_els_cmd_s els_cmd; - u32 res1:8; - u32 orig_sid:24; - u32 oxid:16; - u32 rxid:16; -}; - -/* - * structure for PRLI paramater pages, both request & response - * see FC-PH-X table 113 & 115 for explanation also FCP table 8 - */ -struct fc_prli_params_s{ - u32 reserved:16; -#ifdef __BIGENDIAN - u32 reserved1:5; - u32 rec_support:1; - u32 task_retry_id:1; - u32 retry:1; - - u32 confirm:1; - u32 doverlay:1; - u32 initiator:1; - u32 target:1; - u32 cdmix:1; - u32 drmix:1; - u32 rxrdisab:1; - u32 wxrdisab:1; -#else - u32 retry:1; - u32 task_retry_id:1; - u32 rec_support:1; - u32 reserved1:5; - - u32 wxrdisab:1; - u32 rxrdisab:1; - u32 drmix:1; - u32 cdmix:1; - u32 target:1; - u32 initiator:1; - u32 doverlay:1; - u32 confirm:1; -#endif -}; - -/* - * valid values for rspcode in PRLI ACC payload - */ -enum { - FC_PRLI_ACC_XQTD = 0x1, /* request executed */ - FC_PRLI_ACC_PREDEF_IMG = 0x5, /* predefined image - no prli needed */ -}; - -struct fc_prli_params_page_s{ - u32 type:8; - u32 codext:8; -#ifdef __BIGENDIAN - u32 origprocasv:1; - u32 rsppav:1; - u32 imagepair:1; - u32 reserved1:1; - u32 rspcode:4; -#else - u32 rspcode:4; - u32 reserved1:1; - u32 imagepair:1; - u32 rsppav:1; - u32 origprocasv:1; -#endif - u32 reserved2:8; - - u32 origprocas; - u32 rspprocas; - struct fc_prli_params_s servparams; -}; - -/* - * PRLI request and accept payload, FC-PH-X tables 112 & 114 - */ -struct fc_prli_s{ - u32 command:8; - u32 pglen:8; - u32 pagebytes:16; - struct fc_prli_params_page_s parampage; -}; - -/* - * PRLO logout params page - */ -struct fc_prlo_params_page_s{ - u32 type:8; - u32 type_ext:8; -#ifdef __BIGENDIAN - u32 opa_valid:1; /* originator process associator - * valid - */ - u32 rpa_valid:1; /* responder process associator valid */ - u32 res1:14; -#else - u32 res1:14; - u32 rpa_valid:1; /* responder process associator valid */ - u32 opa_valid:1; /* originator process associator - * valid - */ -#endif - u32 orig_process_assc; - u32 resp_process_assc; - - u32 res2; -}; - -/* - * PRLO els command payload - */ -struct fc_prlo_s{ - u32 command:8; - u32 page_len:8; - u32 payload_len:16; - struct fc_prlo_params_page_s prlo_params[1]; -}; - -/* - * PRLO Logout response parameter page - */ -struct fc_prlo_acc_params_page_s{ - u32 type:8; - u32 type_ext:8; - -#ifdef __BIGENDIAN - u32 opa_valid:1; /* originator process associator - * valid - */ - u32 rpa_valid:1; /* responder process associator valid */ - u32 res1:14; -#else - u32 res1:14; - u32 rpa_valid:1; /* responder process associator valid */ - u32 opa_valid:1; /* originator process associator - * valid - */ -#endif - u32 orig_process_assc; - u32 resp_process_assc; - - u32 fc4type_csp; -}; - -/* - * PRLO els command ACC payload - */ -struct fc_prlo_acc_s{ - u32 command:8; - u32 page_len:8; - u32 payload_len:16; - struct fc_prlo_acc_params_page_s prlo_acc_params[1]; -}; - -/* - * SCR els command payload - */ -enum { - FC_SCR_REG_FUNC_FABRIC_DETECTED = 0x01, - FC_SCR_REG_FUNC_N_PORT_DETECTED = 0x02, - FC_SCR_REG_FUNC_FULL = 0x03, - FC_SCR_REG_FUNC_CLEAR_REG = 0xFF, -}; - -/* SCR VU registrations */ -enum { - FC_VU_SCR_REG_FUNC_FABRIC_NAME_CHANGE = 0x01 -}; - -struct fc_scr_s{ - u32 command:8; - u32 res:24; - u32 vu_reg_func:8; /* Vendor Unique Registrations */ - u32 res1:16; - u32 reg_func:8; -}; - -/* - * Information category for Basic link data - */ -enum { - FC_CAT_NOP = 0x0, - FC_CAT_ABTS = 0x1, - FC_CAT_RMC = 0x2, - FC_CAT_BA_ACC = 0x4, - FC_CAT_BA_RJT = 0x5, - FC_CAT_PRMT = 0x6, -}; - -/* - * LS_RJT els reply payload - */ -struct fc_ls_rjt_s { - struct fc_els_cmd_s els_cmd; /* ELS command code */ - u32 res1:8; - u32 reason_code:8; /* Reason code for reject */ - u32 reason_code_expl:8; /* Reason code explanation */ - u32 vendor_unique:8; /* Vendor specific */ -}; - -/* - * LS_RJT reason codes - */ -enum { - FC_LS_RJT_RSN_INV_CMD_CODE = 0x01, - FC_LS_RJT_RSN_LOGICAL_ERROR = 0x03, - FC_LS_RJT_RSN_LOGICAL_BUSY = 0x05, - FC_LS_RJT_RSN_PROTOCOL_ERROR = 0x07, - FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD = 0x09, - FC_LS_RJT_RSN_CMD_NOT_SUPP = 0x0B, -}; - -/* - * LS_RJT reason code explanation - */ -enum { - FC_LS_RJT_EXP_NO_ADDL_INFO = 0x00, - FC_LS_RJT_EXP_SPARMS_ERR_OPTIONS = 0x01, - FC_LS_RJT_EXP_SPARMS_ERR_INI_CTL = 0x03, - FC_LS_RJT_EXP_SPARMS_ERR_REC_CTL = 0x05, - FC_LS_RJT_EXP_SPARMS_ERR_RXSZ = 0x07, - FC_LS_RJT_EXP_SPARMS_ERR_CONSEQ = 0x09, - FC_LS_RJT_EXP_SPARMS_ERR_CREDIT = 0x0B, - FC_LS_RJT_EXP_INV_PORT_NAME = 0x0D, - FC_LS_RJT_EXP_INV_NODE_FABRIC_NAME = 0x0E, - FC_LS_RJT_EXP_INV_CSP = 0x0F, - FC_LS_RJT_EXP_INV_ASSOC_HDR = 0x11, - FC_LS_RJT_EXP_ASSOC_HDR_REQD = 0x13, - FC_LS_RJT_EXP_INV_ORIG_S_ID = 0x15, - FC_LS_RJT_EXP_INV_OXID_RXID_COMB = 0x17, - FC_LS_RJT_EXP_CMD_ALREADY_IN_PROG = 0x19, - FC_LS_RJT_EXP_LOGIN_REQUIRED = 0x1E, - FC_LS_RJT_EXP_INVALID_NPORT_ID = 0x1F, - FC_LS_RJT_EXP_INSUFF_RES = 0x29, - FC_LS_RJT_EXP_CMD_NOT_SUPP = 0x2C, - FC_LS_RJT_EXP_INV_PAYLOAD_LEN = 0x2D, -}; - -/* - * RRQ els command payload - */ -struct fc_rrq_s{ - struct fc_els_cmd_s els_cmd; /* ELS command code */ - u32 res1:8; - u32 s_id:24; /* exchange originator S_ID */ - - u32 ox_id:16; /* originator exchange ID */ - u32 rx_id:16; /* responder exchange ID */ - - u32 res2[8]; /* optional association header */ -}; - -/* - * ABTS BA_ACC reply payload - */ -struct fc_ba_acc_s{ - u32 seq_id_valid:8; /* set to 0x00 for Abort Exchange */ - u32 seq_id:8; /* invalid for Abort Exchange */ - u32 res2:16; - u32 ox_id:16; /* OX_ID from ABTS frame */ - u32 rx_id:16; /* RX_ID from ABTS frame */ - u32 low_seq_cnt:16; /* set to 0x0000 for Abort Exchange */ - u32 high_seq_cnt:16;/* set to 0xFFFF for Abort Exchange */ -}; - -/* - * ABTS BA_RJT reject payload - */ -struct fc_ba_rjt_s{ - u32 res1:8; /* Reserved */ - u32 reason_code:8; /* reason code for reject */ - u32 reason_expl:8; /* reason code explanation */ - u32 vendor_unique:8;/* vendor unique reason code,set to 0 */ -}; - -/* - * TPRLO logout parameter page - */ -struct fc_tprlo_params_page_s{ - u32 type:8; - u32 type_ext:8; - -#ifdef __BIGENDIAN - u32 opa_valid:1; - u32 rpa_valid:1; - u32 tpo_nport_valid:1; - u32 global_process_logout:1; - u32 res1:12; -#else - u32 res1:12; - u32 global_process_logout:1; - u32 tpo_nport_valid:1; - u32 rpa_valid:1; - u32 opa_valid:1; -#endif - - u32 orig_process_assc; - u32 resp_process_assc; - - u32 res2:8; - u32 tpo_nport_id; -}; - -/* - * TPRLO ELS command payload - */ -struct fc_tprlo_s{ - u32 command:8; - u32 page_len:8; - u32 payload_len:16; - - struct fc_tprlo_params_page_s tprlo_params[1]; -}; - -enum fc_tprlo_type{ - FC_GLOBAL_LOGO = 1, - FC_TPR_LOGO -}; - -/* - * TPRLO els command ACC payload - */ -struct fc_tprlo_acc_s{ - u32 command:8; - u32 page_len:8; - u32 payload_len:16; - struct fc_prlo_acc_params_page_s tprlo_acc_params[1]; -}; - -/* - * RSCN els command req payload - */ -#define FC_RSCN_PGLEN 0x4 - -enum fc_rscn_format{ - FC_RSCN_FORMAT_PORTID = 0x0, - FC_RSCN_FORMAT_AREA = 0x1, - FC_RSCN_FORMAT_DOMAIN = 0x2, - FC_RSCN_FORMAT_FABRIC = 0x3, -}; - -struct fc_rscn_event_s{ - u32 format:2; - u32 qualifier:4; - u32 resvd:2; - u32 portid:24; -}; - -struct fc_rscn_pl_s{ - u8 command; - u8 pagelen; - u16 payldlen; - struct fc_rscn_event_s event[1]; -}; - -/* - * ECHO els command req payload - */ -struct fc_echo_s { - struct fc_els_cmd_s els_cmd; -}; - -/* - * RNID els command - */ - -#define RNID_NODEID_DATA_FORMAT_COMMON 0x00 -#define RNID_NODEID_DATA_FORMAT_FCP3 0x08 -#define RNID_NODEID_DATA_FORMAT_DISCOVERY 0xDF - -#define RNID_ASSOCIATED_TYPE_UNKNOWN 0x00000001 -#define RNID_ASSOCIATED_TYPE_OTHER 0x00000002 -#define RNID_ASSOCIATED_TYPE_HUB 0x00000003 -#define RNID_ASSOCIATED_TYPE_SWITCH 0x00000004 -#define RNID_ASSOCIATED_TYPE_GATEWAY 0x00000005 -#define RNID_ASSOCIATED_TYPE_STORAGE_DEVICE 0x00000009 -#define RNID_ASSOCIATED_TYPE_HOST 0x0000000A -#define RNID_ASSOCIATED_TYPE_STORAGE_SUBSYSTEM 0x0000000B -#define RNID_ASSOCIATED_TYPE_STORAGE_ACCESS_DEVICE 0x0000000E -#define RNID_ASSOCIATED_TYPE_NAS_SERVER 0x00000011 -#define RNID_ASSOCIATED_TYPE_BRIDGE 0x00000002 -#define RNID_ASSOCIATED_TYPE_VIRTUALIZATION_DEVICE 0x00000003 -#define RNID_ASSOCIATED_TYPE_MULTI_FUNCTION_DEVICE 0x000000FF - -/* - * RNID els command payload - */ -struct fc_rnid_cmd_s{ - struct fc_els_cmd_s els_cmd; - u32 node_id_data_format:8; - u32 reserved:24; -}; - -/* - * RNID els response payload - */ - -struct fc_rnid_common_id_data_s{ - wwn_t port_name; - wwn_t node_name; -}; - -struct fc_rnid_general_topology_data_s{ - u32 vendor_unique[4]; - u32 asso_type; - u32 phy_port_num; - u32 num_attached_nodes; - u32 node_mgmt:8; - u32 ip_version:8; - u32 udp_tcp_port_num:16; - u32 ip_address[4]; - u32 reserved:16; - u32 vendor_specific:16; -}; - -struct fc_rnid_acc_s{ - struct fc_els_cmd_s els_cmd; - u32 node_id_data_format:8; - u32 common_id_data_length:8; - u32 reserved:8; - u32 specific_id_data_length:8; - struct fc_rnid_common_id_data_s common_id_data; - struct fc_rnid_general_topology_data_s gen_topology_data; -}; - -#define RNID_ASSOCIATED_TYPE_UNKNOWN 0x00000001 -#define RNID_ASSOCIATED_TYPE_OTHER 0x00000002 -#define RNID_ASSOCIATED_TYPE_HUB 0x00000003 -#define RNID_ASSOCIATED_TYPE_SWITCH 0x00000004 -#define RNID_ASSOCIATED_TYPE_GATEWAY 0x00000005 -#define RNID_ASSOCIATED_TYPE_STORAGE_DEVICE 0x00000009 -#define RNID_ASSOCIATED_TYPE_HOST 0x0000000A -#define RNID_ASSOCIATED_TYPE_STORAGE_SUBSYSTEM 0x0000000B -#define RNID_ASSOCIATED_TYPE_STORAGE_ACCESS_DEVICE 0x0000000E -#define RNID_ASSOCIATED_TYPE_NAS_SERVER 0x00000011 -#define RNID_ASSOCIATED_TYPE_BRIDGE 0x00000002 -#define RNID_ASSOCIATED_TYPE_VIRTUALIZATION_DEVICE 0x00000003 -#define RNID_ASSOCIATED_TYPE_MULTI_FUNCTION_DEVICE 0x000000FF - -enum fc_rpsc_speed_cap{ - RPSC_SPEED_CAP_1G = 0x8000, - RPSC_SPEED_CAP_2G = 0x4000, - RPSC_SPEED_CAP_4G = 0x2000, - RPSC_SPEED_CAP_10G = 0x1000, - RPSC_SPEED_CAP_8G = 0x0800, - RPSC_SPEED_CAP_16G = 0x0400, - - RPSC_SPEED_CAP_UNKNOWN = 0x0001, -}; - -enum fc_rpsc_op_speed_s{ - RPSC_OP_SPEED_1G = 0x8000, - RPSC_OP_SPEED_2G = 0x4000, - RPSC_OP_SPEED_4G = 0x2000, - RPSC_OP_SPEED_10G = 0x1000, - RPSC_OP_SPEED_8G = 0x0800, - RPSC_OP_SPEED_16G = 0x0400, - - RPSC_OP_SPEED_NOT_EST = 0x0001, /*! speed not established */ -}; - -struct fc_rpsc_speed_info_s{ - u16 port_speed_cap; /*! see fc_rpsc_speed_cap_t */ - u16 port_op_speed; /*! see fc_rpsc_op_speed_t */ -}; - -enum link_e2e_beacon_subcmd{ - LINK_E2E_BEACON_ON = 1, - LINK_E2E_BEACON_OFF = 2 -}; - -enum beacon_type{ - BEACON_TYPE_NORMAL = 1, /*! Normal Beaconing. Green */ - BEACON_TYPE_WARN = 2, /*! Warning Beaconing. Yellow/Amber */ - BEACON_TYPE_CRITICAL = 3 /*! Critical Beaconing. Red */ -}; - -struct link_e2e_beacon_param_s { - u8 beacon_type; /* Beacon Type. See beacon_type_t */ - u8 beacon_frequency; - /* Beacon frequency. Number of blinks - * per 10 seconds - */ - u16 beacon_duration;/* Beacon duration (in Seconds). The - * command operation should be - * terminated at the end of this - * timeout value. - * - * Ignored if diag_sub_cmd is - * LINK_E2E_BEACON_OFF. - * - * If 0, beaconing will continue till a - * BEACON OFF request is received - */ -}; - -/* - * Link E2E beacon request/good response format. For LS_RJTs use fc_ls_rjt_t - */ -struct link_e2e_beacon_req_s{ - u32 ls_code; /*! FC_ELS_E2E_LBEACON in requests * - *or FC_ELS_ACC in good replies */ - u32 ls_sub_cmd; /*! See link_e2e_beacon_subcmd_t */ - struct link_e2e_beacon_param_s beacon_parm; -}; - -/** - * If RPSC request is sent to the Domain Controller, the request is for - * all the ports within that domain (TODO - I don't think FOS implements - * this...). - */ -struct fc_rpsc_cmd_s{ - struct fc_els_cmd_s els_cmd; -}; - -/* - * RPSC Acc - */ -struct fc_rpsc_acc_s{ - u32 command:8; - u32 rsvd:8; - u32 num_entries:16; - - struct fc_rpsc_speed_info_s speed_info[1]; -}; - -/** - * If RPSC2 request is sent to the Domain Controller, - */ -#define FC_BRCD_TOKEN 0x42524344 - -struct fc_rpsc2_cmd_s{ - struct fc_els_cmd_s els_cmd; - u32 token; - u16 resvd; - u16 num_pids; /* Number of pids in the request */ - struct { - u32 rsvd1:8; - u32 pid:24; /* port identifier */ - } pid_list[1]; -}; - -enum fc_rpsc2_port_type{ - RPSC2_PORT_TYPE_UNKNOWN = 0, - RPSC2_PORT_TYPE_NPORT = 1, - RPSC2_PORT_TYPE_NLPORT = 2, - RPSC2_PORT_TYPE_NPIV_PORT = 0x5f, - RPSC2_PORT_TYPE_NPORT_TRUNK = 0x6f, -}; - -/* - * RPSC2 portInfo entry structure - */ -struct fc_rpsc2_port_info_s{ - u32 pid; /* PID */ - u16 resvd1; - u16 index; /* port number / index */ - u8 resvd2; - u8 type; /* port type N/NL/... */ - u16 speed; /* port Operating Speed */ -}; - -/* - * RPSC2 Accept payload - */ -struct fc_rpsc2_acc_s{ - u8 els_cmd; - u8 resvd; - u16 num_pids; /* Number of pids in the request */ - struct fc_rpsc2_port_info_s port_info[1]; /* port information */ -}; - -/** - * bit fields so that multiple classes can be specified - */ -enum fc_cos{ - FC_CLASS_2 = 0x04, - FC_CLASS_3 = 0x08, - FC_CLASS_2_3 = 0x0C, -}; - -/* - * symbolic name - */ -struct fc_symname_s{ - u8 symname[FC_SYMNAME_MAX]; -}; - -struct fc_alpabm_s{ - u8 alpa_bm[FC_ALPA_MAX / 8]; -}; - -/* - * protocol default timeout values - */ -#define FC_ED_TOV 2 -#define FC_REC_TOV (FC_ED_TOV + 1) -#define FC_RA_TOV 10 -#define FC_ELS_TOV (2 * FC_RA_TOV) -#define FC_FCCT_TOV (3 * FC_RA_TOV) - -/* - * virtual fabric related defines - */ -#define FC_VF_ID_NULL 0 /* must not be used as VF_ID */ -#define FC_VF_ID_MIN 1 -#define FC_VF_ID_MAX 0xEFF -#define FC_VF_ID_CTL 0xFEF /* control VF_ID */ - -/** - * Virtual Fabric Tagging header format - * @caution This is defined only in BIG ENDIAN format. - */ -struct fc_vft_s{ - u32 r_ctl:8; - u32 ver:2; - u32 type:4; - u32 res_a:2; - u32 priority:3; - u32 vf_id:12; - u32 res_b:1; - u32 hopct:8; - u32 res_c:24; -}; - -#pragma pack() - -#endif diff --git a/drivers/scsi/bfa/include/protocol/fc_sp.h b/drivers/scsi/bfa/include/protocol/fc_sp.h deleted file mode 100644 index 55bb0b31d04b..000000000000 --- a/drivers/scsi/bfa/include/protocol/fc_sp.h +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __FC_SP_H__ -#define __FC_SP_H__ - -#include - -#pragma pack(1) - -enum auth_els_flags{ - FC_AUTH_ELS_MORE_FRAGS_FLAG = 0x80, /*! bit-7. More Fragments - * Follow - */ - FC_AUTH_ELS_CONCAT_FLAG = 0x40, /*! bit-6. Concatenation Flag */ - FC_AUTH_ELS_SEQ_NUM_FLAG = 0x01 /*! bit-0. Sequence Number */ -}; - -enum auth_msg_codes{ - FC_AUTH_MC_AUTH_RJT = 0x0A, /*! Auth Reject */ - FC_AUTH_MC_AUTH_NEG = 0x0B, /*! Auth Negotiate */ - FC_AUTH_MC_AUTH_DONE = 0x0C, /*! Auth Done */ - - FC_AUTH_MC_DHCHAP_CHAL = 0x10, /*! DHCHAP Challenge */ - FC_AUTH_MC_DHCHAP_REPLY = 0x11, /*! DHCHAP Reply */ - FC_AUTH_MC_DHCHAP_SUCC = 0x12, /*! DHCHAP Success */ - - FC_AUTH_MC_FCAP_REQ = 0x13, /*! FCAP Request */ - FC_AUTH_MC_FCAP_ACK = 0x14, /*! FCAP Acknowledge */ - FC_AUTH_MC_FCAP_CONF = 0x15, /*! FCAP Confirm */ - - FC_AUTH_MC_FCPAP_INIT = 0x16, /*! FCPAP Init */ - FC_AUTH_MC_FCPAP_ACC = 0x17, /*! FCPAP Accept */ - FC_AUTH_MC_FCPAP_COMP = 0x18, /*! FCPAP Complete */ - - FC_AUTH_MC_IKE_SA_INIT = 0x22, /*! IKE SA INIT */ - FC_AUTH_MC_IKE_SA_AUTH = 0x23, /*! IKE SA Auth */ - FC_AUTH_MC_IKE_CREATE_CHILD_SA = 0x24, /*! IKE Create Child SA */ - FC_AUTH_MC_IKE_INFO = 0x25, /*! IKE informational */ -}; - -enum auth_proto_version{ - FC_AUTH_PROTO_VER_1 = 1, /*! Protocol Version 1 */ -}; - -enum { - FC_AUTH_ELS_COMMAND_CODE = 0x90,/*! Authentication ELS Command code */ - FC_AUTH_PROTO_PARAM_LEN_SZ = 4, /*! Size of Proto Parameter Len Field */ - FC_AUTH_PROTO_PARAM_VAL_SZ = 4, /*! Size of Proto Parameter Val Field */ - FC_MAX_AUTH_SECRET_LEN = 256, - /*! Maximum secret string length */ - FC_AUTH_NUM_USABLE_PROTO_LEN_SZ = 4, - /*! Size of usable protocols field */ - FC_AUTH_RESP_VALUE_LEN_SZ = 4, - /*! Size of response value length */ - FC_MAX_CHAP_KEY_LEN = 256, /*! Maximum md5 digest length */ - FC_MAX_AUTH_RETRIES = 3, /*! Maximum number of retries */ - FC_MD5_DIGEST_LEN = 16, /*! MD5 digest length */ - FC_SHA1_DIGEST_LEN = 20, /*! SHA1 digest length */ - FC_MAX_DHG_SUPPORTED = 1, /*! Maximum DH Groups supported */ - FC_MAX_ALG_SUPPORTED = 1, /*! Maximum algorithms supported */ - FC_MAX_PROTO_SUPPORTED = 1, /*! Maximum protocols supported */ - FC_START_TXN_ID = 2, /*! Starting transaction ID */ -}; - -enum auth_proto_id{ - FC_AUTH_PROTO_DHCHAP = 0x00000001, - FC_AUTH_PROTO_FCAP = 0x00000002, - FC_AUTH_PROTO_FCPAP = 0x00000003, - FC_AUTH_PROTO_IKEv2 = 0x00000004, - FC_AUTH_PROTO_IKEv2_AUTH = 0x00000005, -}; - -struct auth_name_s{ - u16 name_tag; /*! Name Tag = 1 for Authentication */ - u16 name_len; /*! Name Length = 8 for Authentication - */ - wwn_t name; /*! Name. TODO - is this PWWN */ -}; - - -enum auth_hash_func{ - FC_AUTH_HASH_FUNC_MD5 = 0x00000005, - FC_AUTH_HASH_FUNC_SHA_1 = 0x00000006, -}; - -enum auth_dh_gid{ - FC_AUTH_DH_GID_0_DHG_NULL = 0x00000000, - FC_AUTH_DH_GID_1_DHG_1024 = 0x00000001, - FC_AUTH_DH_GID_2_DHG_1280 = 0x00000002, - FC_AUTH_DH_GID_3_DHG_1536 = 0x00000003, - FC_AUTH_DH_GID_4_DHG_2048 = 0x00000004, - FC_AUTH_DH_GID_6_DHG_3072 = 0x00000006, - FC_AUTH_DH_GID_7_DHG_4096 = 0x00000007, - FC_AUTH_DH_GID_8_DHG_6144 = 0x00000008, - FC_AUTH_DH_GID_9_DHG_8192 = 0x00000009, -}; - -struct auth_els_msg_s { - u8 auth_els_code; /* Authentication ELS Code (0x90) */ - u8 auth_els_flag; /* Authentication ELS Flags */ - u8 auth_msg_code; /* Authentication Message Code */ - u8 proto_version; /* Protocol Version */ - u32 msg_len; /* Message Length */ - u32 trans_id; /* Transaction Identifier (T_ID) */ - - /* Msg payload follows... */ -}; - - -enum auth_neg_param_tags { - FC_AUTH_NEG_DHCHAP_HASHLIST = 0x0001, - FC_AUTH_NEG_DHCHAP_DHG_ID_LIST = 0x0002, -}; - - -struct dhchap_param_format_s { - u16 tag; /*! Parameter Tag. See - * auth_neg_param_tags_t - */ - u16 word_cnt; - - /* followed by variable length parameter value... */ -}; - -struct auth_proto_params_s { - u32 proto_param_len; - u32 proto_id; - - /* - * Followed by variable length Protocol specific parameters. DH-CHAP - * uses dhchap_param_format_t - */ -}; - -struct auth_neg_msg_s { - struct auth_name_s auth_ini_name; - u32 usable_auth_protos; - struct auth_proto_params_s proto_params[1]; /*! (1..usable_auth_proto) - * protocol params - */ -}; - -struct auth_dh_val_s { - u32 dh_val_len; - u32 dh_val[1]; -}; - -struct auth_dhchap_chal_msg_s { - struct auth_els_msg_s hdr; - struct auth_name_s auth_responder_name; /* TODO VRK - is auth_name_t - * type OK? - */ - u32 hash_id; - u32 dh_grp_id; - u32 chal_val_len; - char chal_val[1]; - - /* ...followed by variable Challenge length/value and DH length/value */ -}; - - -enum auth_rjt_codes { - FC_AUTH_RJT_CODE_AUTH_FAILURE = 0x01, - FC_AUTH_RJT_CODE_LOGICAL_ERR = 0x02, -}; - -enum auth_rjt_code_exps { - FC_AUTH_CEXP_AUTH_MECH_NOT_USABLE = 0x01, - FC_AUTH_CEXP_DH_GROUP_NOT_USABLE = 0x02, - FC_AUTH_CEXP_HASH_FUNC_NOT_USABLE = 0x03, - FC_AUTH_CEXP_AUTH_XACT_STARTED = 0x04, - FC_AUTH_CEXP_AUTH_FAILED = 0x05, - FC_AUTH_CEXP_INCORRECT_PLD = 0x06, - FC_AUTH_CEXP_INCORRECT_PROTO_MSG = 0x07, - FC_AUTH_CEXP_RESTART_AUTH_PROTO = 0x08, - FC_AUTH_CEXP_AUTH_CONCAT_NOT_SUPP = 0x09, - FC_AUTH_CEXP_PROTO_VER_NOT_SUPP = 0x0A, -}; - -enum auth_status { - FC_AUTH_STATE_INPROGRESS = 0, /*! authentication in progress */ - FC_AUTH_STATE_FAILED = 1, /*! authentication failed */ - FC_AUTH_STATE_SUCCESS = 2 /*! authentication successful */ -}; - -struct auth_rjt_msg_s { - struct auth_els_msg_s hdr; - u8 reason_code; - u8 reason_code_exp; - u8 rsvd[2]; -}; - - -struct auth_dhchap_neg_msg_s { - struct auth_els_msg_s hdr; - struct auth_neg_msg_s nego; -}; - -struct auth_dhchap_reply_msg_s { - struct auth_els_msg_s hdr; - - /* - * followed by response value length & Value + DH Value Length & Value - */ -}; - -#pragma pack() - -#endif /* __FC_SP_H__ */ diff --git a/drivers/scsi/bfa/include/protocol/fcp.h b/drivers/scsi/bfa/include/protocol/fcp.h deleted file mode 100644 index 74ea63ce84b7..000000000000 --- a/drivers/scsi/bfa/include/protocol/fcp.h +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __FCPPROTO_H__ -#define __FCPPROTO_H__ - -#include -#include - -#pragma pack(1) - -enum { - FCP_RJT = 0x01000000, /* SRR reject */ - FCP_SRR_ACCEPT = 0x02000000, /* SRR accept */ - FCP_SRR = 0x14000000, /* Sequence Retransmission Request */ -}; - -/* - * SRR FC-4 LS payload - */ -struct fc_srr_s{ - u32 ls_cmd; - u32 ox_id:16; /* ox-id */ - u32 rx_id:16; /* rx-id */ - u32 ro; /* relative offset */ - u32 r_ctl:8; /* R_CTL for I.U. */ - u32 res:24; -}; - - -/* - * FCP_CMND definitions - */ -#define FCP_CMND_CDB_LEN 16 -#define FCP_CMND_LUN_LEN 8 - -struct fcp_cmnd_s{ - lun_t lun; /* 64-bit LU number */ - u8 crn; /* command reference number */ -#ifdef __BIGENDIAN - u8 resvd:1, - priority:4, /* FCP-3: SAM-3 priority */ - taskattr:3; /* scsi task attribute */ -#else - u8 taskattr:3, /* scsi task attribute */ - priority:4, /* FCP-3: SAM-3 priority */ - resvd:1; -#endif - u8 tm_flags; /* task management flags */ -#ifdef __BIGENDIAN - u8 addl_cdb_len:6, /* additional CDB length words */ - iodir:2; /* read/write FCP_DATA IUs */ -#else - u8 iodir:2, /* read/write FCP_DATA IUs */ - addl_cdb_len:6; /* additional CDB length */ -#endif - struct scsi_cdb_s cdb; - - /* - * !!! additional cdb bytes follows here!!! - */ - u32 fcp_dl; /* bytes to be transferred */ -}; - -#define fcp_cmnd_cdb_len(_cmnd) ((_cmnd)->addl_cdb_len * 4 + FCP_CMND_CDB_LEN) -#define fcp_cmnd_fcpdl(_cmnd) ((&(_cmnd)->fcp_dl)[(_cmnd)->addl_cdb_len]) - -/* - * fcp_cmnd_t.iodir field values - */ -enum fcp_iodir{ - FCP_IODIR_NONE = 0, - FCP_IODIR_WRITE = 1, - FCP_IODIR_READ = 2, - FCP_IODIR_RW = 3, -}; - -/* - * Task attribute field - */ -enum { - FCP_TASK_ATTR_SIMPLE = 0, - FCP_TASK_ATTR_HOQ = 1, - FCP_TASK_ATTR_ORDERED = 2, - FCP_TASK_ATTR_ACA = 4, - FCP_TASK_ATTR_UNTAGGED = 5, /* obsolete in FCP-3 */ -}; - -/* - * Task management flags field - only one bit shall be set - */ -enum fcp_tm_cmnd{ - FCP_TM_ABORT_TASK_SET = BIT(1), - FCP_TM_CLEAR_TASK_SET = BIT(2), - FCP_TM_LUN_RESET = BIT(4), - FCP_TM_TARGET_RESET = BIT(5), /* obsolete in FCP-3 */ - FCP_TM_CLEAR_ACA = BIT(6), -}; - -/* - * FCP_XFER_RDY IU defines - */ -struct fcp_xfer_rdy_s{ - u32 data_ro; - u32 burst_len; - u32 reserved; -}; - -/* - * FCP_RSP residue flags - */ -enum fcp_residue{ - FCP_NO_RESIDUE = 0, /* no residue */ - FCP_RESID_OVER = 1, /* more data left that was not sent */ - FCP_RESID_UNDER = 2, /* less data than requested */ -}; - -enum { - FCP_RSPINFO_GOOD = 0, - FCP_RSPINFO_DATALEN_MISMATCH = 1, - FCP_RSPINFO_CMND_INVALID = 2, - FCP_RSPINFO_ROLEN_MISMATCH = 3, - FCP_RSPINFO_TM_NOT_SUPP = 4, - FCP_RSPINFO_TM_FAILED = 5, -}; - -struct fcp_rspinfo_s{ - u32 res0:24; - u32 rsp_code:8; /* response code (as above) */ - u32 res1; -}; - -struct fcp_resp_s{ - u32 reserved[2]; /* 2 words reserved */ - u16 reserved2; -#ifdef __BIGENDIAN - u8 reserved3:3; - u8 fcp_conf_req:1; /* FCP_CONF is requested */ - u8 resid_flags:2; /* underflow/overflow */ - u8 sns_len_valid:1;/* sense len is valid */ - u8 rsp_len_valid:1;/* response len is valid */ -#else - u8 rsp_len_valid:1;/* response len is valid */ - u8 sns_len_valid:1;/* sense len is valid */ - u8 resid_flags:2; /* underflow/overflow */ - u8 fcp_conf_req:1; /* FCP_CONF is requested */ - u8 reserved3:3; -#endif - u8 scsi_status; /* one byte SCSI status */ - u32 residue; /* residual data bytes */ - u32 sns_len; /* length od sense info */ - u32 rsp_len; /* length of response info */ -}; - -#define fcp_snslen(__fcprsp) ((__fcprsp)->sns_len_valid ? \ - (__fcprsp)->sns_len : 0) -#define fcp_rsplen(__fcprsp) ((__fcprsp)->rsp_len_valid ? \ - (__fcprsp)->rsp_len : 0) -#define fcp_rspinfo(__fcprsp) ((struct fcp_rspinfo_s *)((__fcprsp) + 1)) -#define fcp_snsinfo(__fcprsp) (((u8 *)fcp_rspinfo(__fcprsp)) + \ - fcp_rsplen(__fcprsp)) - -struct fcp_cmnd_fr_s{ - struct fchs_s fchs; - struct fcp_cmnd_s fcp; -}; - -#pragma pack() - -#endif diff --git a/drivers/scsi/bfa/include/protocol/fdmi.h b/drivers/scsi/bfa/include/protocol/fdmi.h deleted file mode 100644 index 6c05c268c71b..000000000000 --- a/drivers/scsi/bfa/include/protocol/fdmi.h +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __FDMI_H__ -#define __FDMI_H__ - -#include -#include -#include - -#pragma pack(1) - -/* - * FDMI Command Codes - */ -#define FDMI_GRHL 0x0100 -#define FDMI_GHAT 0x0101 -#define FDMI_GRPL 0x0102 -#define FDMI_GPAT 0x0110 -#define FDMI_RHBA 0x0200 -#define FDMI_RHAT 0x0201 -#define FDMI_RPRT 0x0210 -#define FDMI_RPA 0x0211 -#define FDMI_DHBA 0x0300 -#define FDMI_DPRT 0x0310 - -/* - * FDMI reason codes - */ -#define FDMI_NO_ADDITIONAL_EXP 0x00 -#define FDMI_HBA_ALREADY_REG 0x10 -#define FDMI_HBA_ATTRIB_NOT_REG 0x11 -#define FDMI_HBA_ATTRIB_MULTIPLE 0x12 -#define FDMI_HBA_ATTRIB_LENGTH_INVALID 0x13 -#define FDMI_HBA_ATTRIB_NOT_PRESENT 0x14 -#define FDMI_PORT_ORIG_NOT_IN_LIST 0x15 -#define FDMI_PORT_HBA_NOT_IN_LIST 0x16 -#define FDMI_PORT_ATTRIB_NOT_REG 0x20 -#define FDMI_PORT_NOT_REG 0x21 -#define FDMI_PORT_ATTRIB_MULTIPLE 0x22 -#define FDMI_PORT_ATTRIB_LENGTH_INVALID 0x23 -#define FDMI_PORT_ALREADY_REGISTEREED 0x24 - -/* - * FDMI Transmission Speed Mask values - */ -#define FDMI_TRANS_SPEED_1G 0x00000001 -#define FDMI_TRANS_SPEED_2G 0x00000002 -#define FDMI_TRANS_SPEED_10G 0x00000004 -#define FDMI_TRANS_SPEED_4G 0x00000008 -#define FDMI_TRANS_SPEED_8G 0x00000010 -#define FDMI_TRANS_SPEED_16G 0x00000020 -#define FDMI_TRANS_SPEED_UNKNOWN 0x00008000 - -/* - * FDMI HBA attribute types - */ -enum fdmi_hba_attribute_type { - FDMI_HBA_ATTRIB_NODENAME = 1, /* 0x0001 */ - FDMI_HBA_ATTRIB_MANUFACTURER, /* 0x0002 */ - FDMI_HBA_ATTRIB_SERIALNUM, /* 0x0003 */ - FDMI_HBA_ATTRIB_MODEL, /* 0x0004 */ - FDMI_HBA_ATTRIB_MODEL_DESC, /* 0x0005 */ - FDMI_HBA_ATTRIB_HW_VERSION, /* 0x0006 */ - FDMI_HBA_ATTRIB_DRIVER_VERSION, /* 0x0007 */ - FDMI_HBA_ATTRIB_ROM_VERSION, /* 0x0008 */ - FDMI_HBA_ATTRIB_FW_VERSION, /* 0x0009 */ - FDMI_HBA_ATTRIB_OS_NAME, /* 0x000A */ - FDMI_HBA_ATTRIB_MAX_CT, /* 0x000B */ - - FDMI_HBA_ATTRIB_MAX_TYPE -}; - -/* - * FDMI Port attribute types - */ -enum fdmi_port_attribute_type { - FDMI_PORT_ATTRIB_FC4_TYPES = 1, /* 0x0001 */ - FDMI_PORT_ATTRIB_SUPP_SPEED, /* 0x0002 */ - FDMI_PORT_ATTRIB_PORT_SPEED, /* 0x0003 */ - FDMI_PORT_ATTRIB_FRAME_SIZE, /* 0x0004 */ - FDMI_PORT_ATTRIB_DEV_NAME, /* 0x0005 */ - FDMI_PORT_ATTRIB_HOST_NAME, /* 0x0006 */ - - FDMI_PORT_ATTR_MAX_TYPE -}; - -/* - * FDMI attribute - */ -struct fdmi_attr_s { - u16 type; - u16 len; - u8 value[1]; -}; - -/* - * HBA Attribute Block - */ -struct fdmi_hba_attr_s { - u32 attr_count; /* # of attributes */ - struct fdmi_attr_s hba_attr; /* n attributes */ -}; - -/* - * Registered Port List - */ -struct fdmi_port_list_s { - u32 num_ports; /* number Of Port Entries */ - wwn_t port_entry; /* one or more */ -}; - -/* - * Port Attribute Block - */ -struct fdmi_port_attr_s { - u32 attr_count; /* # of attributes */ - struct fdmi_attr_s port_attr; /* n attributes */ -}; - -/* - * FDMI Register HBA Attributes - */ -struct fdmi_rhba_s { - wwn_t hba_id; /* HBA Identifier */ - struct fdmi_port_list_s port_list; /* Registered Port List */ - struct fdmi_hba_attr_s hba_attr_blk; /* HBA attribute block */ -}; - -/* - * FDMI Register Port - */ -struct fdmi_rprt_s { - wwn_t hba_id; /* HBA Identifier */ - wwn_t port_name; /* Port wwn */ - struct fdmi_port_attr_s port_attr_blk; /* Port Attr Block */ -}; - -/* - * FDMI Register Port Attributes - */ -struct fdmi_rpa_s { - wwn_t port_name; /* port wwn */ - struct fdmi_port_attr_s port_attr_blk; /* Port Attr Block */ -}; - -#pragma pack() - -#endif diff --git a/drivers/scsi/bfa/include/protocol/scsi.h b/drivers/scsi/bfa/include/protocol/scsi.h deleted file mode 100644 index b220e6b4f6e1..000000000000 --- a/drivers/scsi/bfa/include/protocol/scsi.h +++ /dev/null @@ -1,1648 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __SCSI_H__ -#define __SCSI_H__ - -#include - -#pragma pack(1) - -/* - * generic SCSI cdb definition - */ -#define SCSI_MAX_CDBLEN 16 -struct scsi_cdb_s{ - u8 scsi_cdb[SCSI_MAX_CDBLEN]; -}; - -/* - * scsi lun serial number definition - */ -#define SCSI_LUN_SN_LEN 32 -struct scsi_lun_sn_s{ - u8 lun_sn[SCSI_LUN_SN_LEN]; -}; - -/* - * SCSI Direct Access Commands - */ -enum { - SCSI_OP_TEST_UNIT_READY = 0x00, - SCSI_OP_REQUEST_SENSE = 0x03, - SCSI_OP_FORMAT_UNIT = 0x04, - SCSI_OP_READ6 = 0x08, - SCSI_OP_WRITE6 = 0x0A, - SCSI_OP_WRITE_FILEMARKS = 0x10, - SCSI_OP_INQUIRY = 0x12, - SCSI_OP_MODE_SELECT6 = 0x15, - SCSI_OP_RESERVE6 = 0x16, - SCSI_OP_RELEASE6 = 0x17, - SCSI_OP_MODE_SENSE6 = 0x1A, - SCSI_OP_START_STOP_UNIT = 0x1B, - SCSI_OP_SEND_DIAGNOSTIC = 0x1D, - SCSI_OP_READ_CAPACITY = 0x25, - SCSI_OP_READ10 = 0x28, - SCSI_OP_WRITE10 = 0x2A, - SCSI_OP_VERIFY10 = 0x2F, - SCSI_OP_READ_DEFECT_DATA = 0x37, - SCSI_OP_LOG_SELECT = 0x4C, - SCSI_OP_LOG_SENSE = 0x4D, - SCSI_OP_MODE_SELECT10 = 0x55, - SCSI_OP_RESERVE10 = 0x56, - SCSI_OP_RELEASE10 = 0x57, - SCSI_OP_MODE_SENSE10 = 0x5A, - SCSI_OP_PER_RESERVE_IN = 0x5E, - SCSI_OP_PER_RESERVE_OUR = 0x5E, - SCSI_OP_READ16 = 0x88, - SCSI_OP_WRITE16 = 0x8A, - SCSI_OP_VERIFY16 = 0x8F, - SCSI_OP_READ_CAPACITY16 = 0x9E, - SCSI_OP_REPORT_LUNS = 0xA0, - SCSI_OP_READ12 = 0xA8, - SCSI_OP_WRITE12 = 0xAA, - SCSI_OP_UNDEF = 0xFF, -}; - -/* - * SCSI START_STOP_UNIT command - */ -struct scsi_start_stop_unit_s{ - u8 opcode; -#ifdef __BIGENDIAN - u8 lun:3; - u8 reserved1:4; - u8 immed:1; -#else - u8 immed:1; - u8 reserved1:4; - u8 lun:3; -#endif - u8 reserved2; - u8 reserved3; -#ifdef __BIGENDIAN - u8 power_conditions:4; - u8 reserved4:2; - u8 loEj:1; - u8 start:1; -#else - u8 start:1; - u8 loEj:1; - u8 reserved4:2; - u8 power_conditions:4; -#endif - u8 control; -}; - -/* - * SCSI SEND_DIAGNOSTIC command - */ -struct scsi_send_diagnostic_s{ - u8 opcode; -#ifdef __BIGENDIAN - u8 self_test_code:3; - u8 pf:1; - u8 reserved1:1; - u8 self_test:1; - u8 dev_offl:1; - u8 unit_offl:1; -#else - u8 unit_offl:1; - u8 dev_offl:1; - u8 self_test:1; - u8 reserved1:1; - u8 pf:1; - u8 self_test_code:3; -#endif - u8 reserved2; - - u8 param_list_length[2]; /* MSB first */ - u8 control; - -}; - -/* - * SCSI READ10/WRITE10 commands - */ -struct scsi_rw10_s{ - u8 opcode; -#ifdef __BIGENDIAN - u8 lun:3; - u8 dpo:1; /* Disable Page Out */ - u8 fua:1; /* Force Unit Access */ - u8 reserved1:2; - u8 rel_adr:1; /* relative address */ -#else - u8 rel_adr:1; - u8 reserved1:2; - u8 fua:1; - u8 dpo:1; - u8 lun:3; -#endif - u8 lba0; /* logical block address - MSB */ - u8 lba1; - u8 lba2; - u8 lba3; /* LSB */ - u8 reserved3; - u8 xfer_length0; /* transfer length in blocks - MSB */ - u8 xfer_length1; /* LSB */ - u8 control; -}; - -#define SCSI_CDB10_GET_LBA(cdb) \ - (((cdb)->lba0 << 24) | ((cdb)->lba1 << 16) | \ - ((cdb)->lba2 << 8) | (cdb)->lba3) - -#define SCSI_CDB10_SET_LBA(cdb, lba) { \ - (cdb)->lba0 = lba >> 24; \ - (cdb)->lba1 = (lba >> 16) & 0xFF; \ - (cdb)->lba2 = (lba >> 8) & 0xFF; \ - (cdb)->lba3 = lba & 0xFF; \ -} - -#define SCSI_CDB10_GET_TL(cdb) \ - ((cdb)->xfer_length0 << 8 | (cdb)->xfer_length1) -#define SCSI_CDB10_SET_TL(cdb, tl) { \ - (cdb)->xfer_length0 = tl >> 8; \ - (cdb)->xfer_length1 = tl & 0xFF; \ -} - -/* - * SCSI READ6/WRITE6 commands - */ -struct scsi_rw6_s{ - u8 opcode; -#ifdef __BIGENDIAN - u8 lun:3; - u8 lba0:5; /* MSb */ -#else - u8 lba0:5; /* MSb */ - u8 lun:3; -#endif - u8 lba1; - u8 lba2; /* LSB */ - u8 xfer_length; - u8 control; -}; - -#define SCSI_TAPE_CDB6_GET_TL(cdb) \ - (((cdb)->tl0 << 16) | ((cdb)->tl1 << 8) | (cdb)->tl2) - -#define SCSI_TAPE_CDB6_SET_TL(cdb, tl) { \ - (cdb)->tl0 = tl >> 16; \ - (cdb)->tl1 = (tl >> 8) & 0xFF; \ - (cdb)->tl2 = tl & 0xFF; \ -} - -/* - * SCSI sequential (TAPE) wrtie command - */ -struct scsi_tape_wr_s{ - u8 opcode; -#ifdef __BIGENDIAN - u8 rsvd:7; - u8 fixed:1; /* MSb */ -#else - u8 fixed:1; /* MSb */ - u8 rsvd:7; -#endif - u8 tl0; /* Msb */ - u8 tl1; - u8 tl2; /* Lsb */ - - u8 control; -}; - -#define SCSI_CDB6_GET_LBA(cdb) \ - (((cdb)->lba0 << 16) | ((cdb)->lba1 << 8) | (cdb)->lba2) - -#define SCSI_CDB6_SET_LBA(cdb, lba) { \ - (cdb)->lba0 = lba >> 16; \ - (cdb)->lba1 = (lba >> 8) & 0xFF; \ - (cdb)->lba2 = lba & 0xFF; \ -} - -#define SCSI_CDB6_GET_TL(cdb) ((cdb)->xfer_length) -#define SCSI_CDB6_SET_TL(cdb, tl) { \ - (cdb)->xfer_length = tl; \ -} - -/* - * SCSI sense data format - */ -struct scsi_sense_s{ -#ifdef __BIGENDIAN - u8 valid:1; - u8 rsp_code:7; -#else - u8 rsp_code:7; - u8 valid:1; -#endif - u8 seg_num; -#ifdef __BIGENDIAN - u8 file_mark:1; - u8 eom:1; /* end of media */ - u8 ili:1; /* incorrect length indicator */ - u8 reserved:1; - u8 sense_key:4; -#else - u8 sense_key:4; - u8 reserved:1; - u8 ili:1; /* incorrect length indicator */ - u8 eom:1; /* end of media */ - u8 file_mark:1; -#endif - u8 information[4]; /* device-type or command specific info - */ - u8 add_sense_length; - /* additional sense length */ - u8 command_info[4];/* command specific information - */ - u8 asc; /* additional sense code */ - u8 ascq; /* additional sense code qualifier */ - u8 fru_code; /* field replaceable unit code */ -#ifdef __BIGENDIAN - u8 sksv:1; /* sense key specific valid */ - u8 c_d:1; /* command/data bit */ - u8 res1:2; - u8 bpv:1; /* bit pointer valid */ - u8 bpointer:3; /* bit pointer */ -#else - u8 bpointer:3; /* bit pointer */ - u8 bpv:1; /* bit pointer valid */ - u8 res1:2; - u8 c_d:1; /* command/data bit */ - u8 sksv:1; /* sense key specific valid */ -#endif - u8 fpointer[2]; /* field pointer */ -}; - -#define SCSI_SENSE_CUR_ERR 0x70 -#define SCSI_SENSE_DEF_ERR 0x71 - -/* - * SCSI sense key values - */ -#define SCSI_SK_NO_SENSE 0x0 -#define SCSI_SK_REC_ERR 0x1 /* recovered error */ -#define SCSI_SK_NOT_READY 0x2 -#define SCSI_SK_MED_ERR 0x3 /* medium error */ -#define SCSI_SK_HW_ERR 0x4 /* hardware error */ -#define SCSI_SK_ILLEGAL_REQ 0x5 -#define SCSI_SK_UNIT_ATT 0x6 /* unit attention */ -#define SCSI_SK_DATA_PROTECT 0x7 -#define SCSI_SK_BLANK_CHECK 0x8 -#define SCSI_SK_VENDOR_SPEC 0x9 -#define SCSI_SK_COPY_ABORTED 0xA -#define SCSI_SK_ABORTED_CMND 0xB -#define SCSI_SK_VOL_OVERFLOW 0xD -#define SCSI_SK_MISCOMPARE 0xE - -/* - * SCSI additional sense codes - */ -#define SCSI_ASC_NO_ADD_SENSE 0x00 -#define SCSI_ASC_LUN_NOT_READY 0x04 -#define SCSI_ASC_LUN_COMMUNICATION 0x08 -#define SCSI_ASC_WRITE_ERROR 0x0C -#define SCSI_ASC_INVALID_CMND_CODE 0x20 -#define SCSI_ASC_BAD_LBA 0x21 -#define SCSI_ASC_INVALID_FIELD_IN_CDB 0x24 -#define SCSI_ASC_LUN_NOT_SUPPORTED 0x25 -#define SCSI_ASC_LUN_WRITE_PROTECT 0x27 -#define SCSI_ASC_POWERON_BDR 0x29 /* power on reset, bus reset, - * bus device reset - */ -#define SCSI_ASC_PARAMS_CHANGED 0x2A -#define SCSI_ASC_CMND_CLEARED_BY_A_I 0x2F -#define SCSI_ASC_SAVING_PARAM_NOTSUPP 0x39 -#define SCSI_ASC_TOCC 0x3F /* target operating condtions - * changed - */ -#define SCSI_ASC_PARITY_ERROR 0x47 -#define SCSI_ASC_CMND_PHASE_ERROR 0x4A -#define SCSI_ASC_DATA_PHASE_ERROR 0x4B -#define SCSI_ASC_VENDOR_SPEC 0x7F - -/* - * SCSI additional sense code qualifiers - */ -#define SCSI_ASCQ_CAUSE_NOT_REPORT 0x00 -#define SCSI_ASCQ_BECOMING_READY 0x01 -#define SCSI_ASCQ_INIT_CMD_REQ 0x02 -#define SCSI_ASCQ_FORMAT_IN_PROGRESS 0x04 -#define SCSI_ASCQ_OPERATION_IN_PROGRESS 0x07 -#define SCSI_ASCQ_SELF_TEST_IN_PROGRESS 0x09 -#define SCSI_ASCQ_WR_UNEXP_UNSOL_DATA 0x0C -#define SCSI_ASCQ_WR_NOTENG_UNSOL_DATA 0x0D - -#define SCSI_ASCQ_LBA_OUT_OF_RANGE 0x00 -#define SCSI_ASCQ_INVALID_ELEMENT_ADDR 0x01 - -#define SCSI_ASCQ_LUN_WRITE_PROTECTED 0x00 -#define SCSI_ASCQ_LUN_HW_WRITE_PROTECTED 0x01 -#define SCSI_ASCQ_LUN_SW_WRITE_PROTECTED 0x02 - -#define SCSI_ASCQ_POR 0x01 /* power on reset */ -#define SCSI_ASCQ_SBR 0x02 /* scsi bus reset */ -#define SCSI_ASCQ_BDR 0x03 /* bus device reset */ -#define SCSI_ASCQ_DIR 0x04 /* device internal reset */ - -#define SCSI_ASCQ_MODE_PARAMS_CHANGED 0x01 -#define SCSI_ASCQ_LOG_PARAMS_CHANGED 0x02 -#define SCSI_ASCQ_RESERVATIONS_PREEMPTED 0x03 -#define SCSI_ASCQ_RESERVATIONS_RELEASED 0x04 -#define SCSI_ASCQ_REGISTRATIONS_PREEMPTED 0x05 - -#define SCSI_ASCQ_MICROCODE_CHANGED 0x01 -#define SCSI_ASCQ_CHANGED_OPER_COND 0x02 -#define SCSI_ASCQ_INQ_CHANGED 0x03 /* inquiry data changed */ -#define SCSI_ASCQ_DI_CHANGED 0x05 /* device id changed */ -#define SCSI_ASCQ_RL_DATA_CHANGED 0x0E /* report luns data changed */ - -#define SCSI_ASCQ_DP_CRC_ERR 0x01 /* data phase crc error */ -#define SCSI_ASCQ_DP_SCSI_PARITY_ERR 0x02 /* data phase scsi parity error - */ -#define SCSI_ASCQ_IU_CRC_ERR 0x03 /* information unit crc error */ -#define SCSI_ASCQ_PROTO_SERV_CRC_ERR 0x05 - -#define SCSI_ASCQ_LUN_TIME_OUT 0x01 - -/* ------------------------------------------------------------ - * SCSI INQUIRY - * ------------------------------------------------------------*/ - -struct scsi_inquiry_s{ - u8 opcode; -#ifdef __BIGENDIAN - u8 lun:3; - u8 reserved1:3; - u8 cmd_dt:1; - u8 evpd:1; -#else - u8 evpd:1; - u8 cmd_dt:1; - u8 reserved1:3; - u8 lun:3; -#endif - u8 page_code; - u8 reserved2; - u8 alloc_length; - u8 control; -}; - -struct scsi_inquiry_vendor_s{ - u8 vendor_id[8]; -}; - -struct scsi_inquiry_prodid_s{ - u8 product_id[16]; -}; - -struct scsi_inquiry_prodrev_s{ - u8 product_rev[4]; -}; - -struct scsi_inquiry_data_s{ -#ifdef __BIGENDIAN - u8 peripheral_qual:3; /* peripheral qualifier */ - u8 device_type:5; /* peripheral device type */ - - u8 rmb:1; /* removable medium bit */ - u8 device_type_mod:7; /* device type modifier */ - - u8 version; - - u8 aenc:1; /* async event notification capability - */ - u8 trm_iop:1; /* terminate I/O process */ - u8 norm_aca:1; /* normal ACA supported */ - u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */ - u8 rsp_data_format:4; - - u8 additional_len; - u8 sccs:1; - u8 reserved1:7; - - u8 reserved2:1; - u8 enc_serv:1; /* enclosure service component */ - u8 reserved3:1; - u8 multi_port:1; /* multi-port device */ - u8 m_chngr:1; /* device in medium transport element */ - u8 ack_req_q:1; /* SIP specific bit */ - u8 addr32:1; /* SIP specific bit */ - u8 addr16:1; /* SIP specific bit */ - - u8 rel_adr:1; /* relative address */ - u8 w_bus32:1; - u8 w_bus16:1; - u8 synchronous:1; - u8 linked_commands:1; - u8 trans_dis:1; - u8 cmd_queue:1; /* command queueing supported */ - u8 soft_reset:1; /* soft reset alternative (VS) */ -#else - u8 device_type:5; /* peripheral device type */ - u8 peripheral_qual:3; - /* peripheral qualifier */ - - u8 device_type_mod:7; - /* device type modifier */ - u8 rmb:1; /* removable medium bit */ - - u8 version; - - u8 rsp_data_format:4; - u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */ - u8 norm_aca:1; /* normal ACA supported */ - u8 terminate_iop:1;/* terminate I/O process */ - u8 aenc:1; /* async event notification capability - */ - - u8 additional_len; - u8 reserved1:7; - u8 sccs:1; - - u8 addr16:1; /* SIP specific bit */ - u8 addr32:1; /* SIP specific bit */ - u8 ack_req_q:1; /* SIP specific bit */ - u8 m_chngr:1; /* device in medium transport element */ - u8 multi_port:1; /* multi-port device */ - u8 reserved3:1; /* TBD - Vendor Specific */ - u8 enc_serv:1; /* enclosure service component */ - u8 reserved2:1; - - u8 soft_seset:1; /* soft reset alternative (VS) */ - u8 cmd_queue:1; /* command queueing supported */ - u8 trans_dis:1; - u8 linked_commands:1; - u8 synchronous:1; - u8 w_bus16:1; - u8 w_bus32:1; - u8 rel_adr:1; /* relative address */ -#endif - struct scsi_inquiry_vendor_s vendor_id; - struct scsi_inquiry_prodid_s product_id; - struct scsi_inquiry_prodrev_s product_rev; - u8 vendor_specific[20]; - u8 reserved4[40]; -}; - -/* - * inquiry.peripheral_qual field values - */ -#define SCSI_DEVQUAL_DEFAULT 0 -#define SCSI_DEVQUAL_NOT_CONNECTED 1 -#define SCSI_DEVQUAL_NOT_SUPPORTED 3 - -/* - * inquiry.device_type field values - */ -#define SCSI_DEVICE_DIRECT_ACCESS 0x00 -#define SCSI_DEVICE_SEQ_ACCESS 0x01 -#define SCSI_DEVICE_ARRAY_CONTROLLER 0x0C -#define SCSI_DEVICE_UNKNOWN 0x1F - -/* - * inquiry.version - */ -#define SCSI_VERSION_ANSI_X3131 2 /* ANSI X3.131 SCSI-2 */ -#define SCSI_VERSION_SPC 3 /* SPC (SCSI-3), ANSI X3.301:1997 */ -#define SCSI_VERSION_SPC_2 4 /* SPC-2 */ - -/* - * response data format - */ -#define SCSI_RSP_DATA_FORMAT 2 /* SCSI-2 & SPC */ - -/* - * SCSI inquiry page codes - */ -#define SCSI_INQ_PAGE_VPD_PAGES 0x00 /* supported vpd pages */ -#define SCSI_INQ_PAGE_USN_PAGE 0x80 /* unit serial number page */ -#define SCSI_INQ_PAGE_DEV_IDENT 0x83 /* device indentification page - */ -#define SCSI_INQ_PAGES_MAX 3 - -/* - * supported vital product data pages - */ -struct scsi_inq_page_vpd_pages_s{ -#ifdef __BIGENDIAN - u8 peripheral_qual:3; - u8 device_type:5; -#else - u8 device_type:5; - u8 peripheral_qual:3; -#endif - u8 page_code; - u8 reserved; - u8 page_length; - u8 pages[SCSI_INQ_PAGES_MAX]; -}; - -/* - * Unit serial number page - */ -#define SCSI_INQ_USN_LEN 32 - -struct scsi_inq_usn_s{ - char usn[SCSI_INQ_USN_LEN]; -}; - -struct scsi_inq_page_usn_s{ -#ifdef __BIGENDIAN - u8 peripheral_qual:3; - u8 device_type:5; -#else - u8 device_type:5; - u8 peripheral_qual:3; -#endif - u8 page_code; - u8 reserved1; - u8 page_length; - struct scsi_inq_usn_s usn; -}; - -enum { - SCSI_INQ_DIP_CODE_BINARY = 1, /* identifier has binary value */ - SCSI_INQ_DIP_CODE_ASCII = 2, /* identifier has ascii value */ -}; - -enum { - SCSI_INQ_DIP_ASSOC_LUN = 0, /* id is associated with device */ - SCSI_INQ_DIP_ASSOC_PORT = 1, /* id is associated with port that - * received the request - */ -}; - -enum { - SCSI_INQ_ID_TYPE_VENDOR = 1, - SCSI_INQ_ID_TYPE_IEEE = 2, - SCSI_INQ_ID_TYPE_FC_FS = 3, - SCSI_INQ_ID_TYPE_OTHER = 4, -}; - -struct scsi_inq_dip_desc_s{ -#ifdef __BIGENDIAN - u8 res0:4; - u8 code_set:4; - u8 res1:2; - u8 association:2; - u8 id_type:4; -#else - u8 code_set:4; - u8 res0:4; - u8 id_type:4; - u8 association:2; - u8 res1:2; -#endif - u8 res2; - u8 id_len; - struct scsi_lun_sn_s id; -}; - -/* - * Device indentification page - */ -struct scsi_inq_page_dev_ident_s{ -#ifdef __BIGENDIAN - u8 peripheral_qual:3; - u8 device_type:5; -#else - u8 device_type:5; - u8 peripheral_qual:3; -#endif - u8 page_code; - u8 reserved1; - u8 page_length; - struct scsi_inq_dip_desc_s desc; -}; - -/* ------------------------------------------------------------ - * READ CAPACITY - * ------------------------------------------------------------ - */ - -struct scsi_read_capacity_s{ - u8 opcode; -#ifdef __BIGENDIAN - u8 lun:3; - u8 reserved1:4; - u8 rel_adr:1; -#else - u8 rel_adr:1; - u8 reserved1:4; - u8 lun:3; -#endif - u8 lba0; /* MSB */ - u8 lba1; - u8 lba2; - u8 lba3; /* LSB */ - u8 reserved2; - u8 reserved3; -#ifdef __BIGENDIAN - u8 reserved4:7; - u8 pmi:1; /* partial medium indicator */ -#else - u8 pmi:1; /* partial medium indicator */ - u8 reserved4:7; -#endif - u8 control; -}; - -struct scsi_read_capacity_data_s{ - u32 max_lba; /* maximum LBA available */ - u32 block_length; /* in bytes */ -}; - -struct scsi_read_capacity16_data_s{ - u64 lba; /* maximum LBA available */ - u32 block_length; /* in bytes */ -#ifdef __BIGENDIAN - u8 reserved1:4, - p_type:3, - prot_en:1; - u8 reserved2:4, - lb_pbe:4; /* logical blocks per physical block - * exponent */ - u16 reserved3:2, - lba_align:14; /* lowest aligned logical block - * address */ -#else - u16 lba_align:14, /* lowest aligned logical block - * address */ - reserved3:2; - u8 lb_pbe:4, /* logical blocks per physical block - * exponent */ - reserved2:4; - u8 prot_en:1, - p_type:3, - reserved1:4; -#endif - u64 reserved4; - u64 reserved5; -}; - -/* ------------------------------------------------------------ - * REPORT LUNS command - * ------------------------------------------------------------ - */ - -struct scsi_report_luns_s{ - u8 opcode; /* A0h - REPORT LUNS opCode */ - u8 reserved1[5]; - u8 alloc_length[4];/* allocation length MSB first */ - u8 reserved2; - u8 control; -}; - -#define SCSI_REPORT_LUN_ALLOC_LENGTH(rl) \ - ((rl->alloc_length[0] << 24) | (rl->alloc_length[1] << 16) | \ - (rl->alloc_length[2] << 8) | (rl->alloc_length[3])) - -#define SCSI_REPORT_LUNS_SET_ALLOCLEN(rl, alloc_len) { \ - (rl)->alloc_length[0] = (alloc_len) >> 24; \ - (rl)->alloc_length[1] = ((alloc_len) >> 16) & 0xFF; \ - (rl)->alloc_length[2] = ((alloc_len) >> 8) & 0xFF; \ - (rl)->alloc_length[3] = (alloc_len) & 0xFF; \ -} - -struct scsi_report_luns_data_s{ - u32 lun_list_length; /* length of LUN list length */ - u32 reserved; - lun_t lun[1]; /* first LUN in lun list */ -}; - -/* ------------------------------------------------------------- - * SCSI mode parameters - * ----------------------------------------------------------- - */ -enum { - SCSI_DA_MEDIUM_DEF = 0, /* direct access default medium type */ - SCSI_DA_MEDIUM_SS = 1, /* direct access single sided */ - SCSI_DA_MEDIUM_DS = 2, /* direct access double sided */ -}; - -/* - * SCSI Mode Select(6) cdb - */ -struct scsi_mode_select6_s{ - u8 opcode; -#ifdef __BIGENDIAN - u8 reserved1:3; - u8 pf:1; /* page format */ - u8 reserved2:3; - u8 sp:1; /* save pages if set to 1 */ -#else - u8 sp:1; /* save pages if set to 1 */ - u8 reserved2:3; - u8 pf:1; /* page format */ - u8 reserved1:3; -#endif - u8 reserved3[2]; - u8 alloc_len; - u8 control; -}; - -/* - * SCSI Mode Select(10) cdb - */ -struct scsi_mode_select10_s{ - u8 opcode; -#ifdef __BIGENDIAN - u8 reserved1:3; - u8 pf:1; /* page format */ - u8 reserved2:3; - u8 sp:1; /* save pages if set to 1 */ -#else - u8 sp:1; /* save pages if set to 1 */ - u8 reserved2:3; - u8 pf:1; /* page format */ - u8 reserved1:3; -#endif - u8 reserved3[5]; - u8 alloc_len_msb; - u8 alloc_len_lsb; - u8 control; -}; - -/* - * SCSI Mode Sense(6) cdb - */ -struct scsi_mode_sense6_s{ - u8 opcode; -#ifdef __BIGENDIAN - u8 reserved1:4; - u8 dbd:1; /* disable block discriptors if set to 1 */ - u8 reserved2:3; - - u8 pc:2; /* page control */ - u8 page_code:6; -#else - u8 reserved2:3; - u8 dbd:1; /* disable block descriptors if set to 1 */ - u8 reserved1:4; - - u8 page_code:6; - u8 pc:2; /* page control */ -#endif - u8 reserved3; - u8 alloc_len; - u8 control; -}; - -/* - * SCSI Mode Sense(10) cdb - */ -struct scsi_mode_sense10_s{ - u8 opcode; -#ifdef __BIGENDIAN - u8 reserved1:3; - u8 LLBAA:1; /* long LBA accepted if set to 1 */ - u8 dbd:1; /* disable block descriptors if set - * to 1 - */ - u8 reserved2:3; - - u8 pc:2; /* page control */ - u8 page_code:6; -#else - u8 reserved2:3; - u8 dbd:1; /* disable block descriptors if set to - * 1 - */ - u8 LLBAA:1; /* long LBA accepted if set to 1 */ - u8 reserved1:3; - - u8 page_code:6; - u8 pc:2; /* page control */ -#endif - u8 reserved3[4]; - u8 alloc_len_msb; - u8 alloc_len_lsb; - u8 control; -}; - -#define SCSI_CDB10_GET_AL(cdb) \ - ((cdb)->alloc_len_msb << 8 | (cdb)->alloc_len_lsb) - -#define SCSI_CDB10_SET_AL(cdb, al) { \ - (cdb)->alloc_len_msb = al >> 8; \ - (cdb)->alloc_len_lsb = al & 0xFF; \ -} - -#define SCSI_CDB6_GET_AL(cdb) ((cdb)->alloc_len) - -#define SCSI_CDB6_SET_AL(cdb, al) { \ - (cdb)->alloc_len = al; \ -} - -/* - * page control field values - */ -#define SCSI_PC_CURRENT_VALUES 0x0 -#define SCSI_PC_CHANGEABLE_VALUES 0x1 -#define SCSI_PC_DEFAULT_VALUES 0x2 -#define SCSI_PC_SAVED_VALUES 0x3 - -/* - * SCSI mode page codes - */ -#define SCSI_MP_VENDOR_SPEC 0x00 -#define SCSI_MP_DISC_RECN 0x02 /* disconnect-reconnect page */ -#define SCSI_MP_FORMAT_DEVICE 0x03 -#define SCSI_MP_RDG 0x04 /* rigid disk geometry page */ -#define SCSI_MP_FDP 0x05 /* flexible disk page */ -#define SCSI_MP_CACHING 0x08 /* caching page */ -#define SCSI_MP_CONTROL 0x0A /* control mode page */ -#define SCSI_MP_MED_TYPES_SUP 0x0B /* medium types supported page */ -#define SCSI_MP_INFO_EXCP_CNTL 0x1C /* informational exception control */ -#define SCSI_MP_ALL 0x3F /* return all pages - mode sense only */ - -/* - * mode parameter header - */ -struct scsi_mode_param_header6_s{ - u8 mode_datalen; - u8 medium_type; - - /* - * device specific parameters expanded for direct access devices - */ -#ifdef __BIGENDIAN - u32 wp:1; /* write protected */ - u32 reserved1:2; - u32 dpofua:1; /* disable page out + force unit access - */ - u32 reserved2:4; -#else - u32 reserved2:4; - u32 dpofua:1; /* disable page out + force unit access - */ - u32 reserved1:2; - u32 wp:1; /* write protected */ -#endif - - u8 block_desclen; -}; - -struct scsi_mode_param_header10_s{ - u32 mode_datalen:16; - u32 medium_type:8; - - /* - * device specific parameters expanded for direct access devices - */ -#ifdef __BIGENDIAN - u32 wp:1; /* write protected */ - u32 reserved1:2; - u32 dpofua:1; /* disable page out + force unit access - */ - u32 reserved2:4; -#else - u32 reserved2:4; - u32 dpofua:1; /* disable page out + force unit access - */ - u32 reserved1:2; - u32 wp:1; /* write protected */ -#endif - -#ifdef __BIGENDIAN - u32 reserved3:7; - u32 longlba:1; -#else - u32 longlba:1; - u32 reserved3:7; -#endif - u32 reserved4:8; - u32 block_desclen:16; -}; - -/* - * mode parameter block descriptor - */ -struct scsi_mode_param_desc_s{ - u32 nblks; - u32 density_code:8; - u32 block_length:24; -}; - -/* - * Disconnect-reconnect mode page format - */ -struct scsi_mp_disc_recn_s{ -#ifdef __BIGENDIAN - u8 ps:1; - u8 reserved1:1; - u8 page_code:6; -#else - u8 page_code:6; - u8 reserved1:1; - u8 ps:1; -#endif - u8 page_len; - u8 buf_full_ratio; - u8 buf_empty_ratio; - - u8 bil_msb; /* bus inactivity limit -MSB */ - u8 bil_lsb; /* bus inactivity limit -LSB */ - - u8 dtl_msb; /* disconnect time limit - MSB */ - u8 dtl_lsb; /* disconnect time limit - LSB */ - - u8 ctl_msb; /* connect time limit - MSB */ - u8 ctl_lsb; /* connect time limit - LSB */ - - u8 max_burst_len_msb; - u8 max_burst_len_lsb; -#ifdef __BIGENDIAN - u8 emdp:1; /* enable modify data pointers */ - u8 fa:3; /* fair arbitration */ - u8 dimm:1; /* disconnect immediate */ - u8 dtdc:3; /* data transfer disconnect control */ -#else - u8 dtdc:3; /* data transfer disconnect control */ - u8 dimm:1; /* disconnect immediate */ - u8 fa:3; /* fair arbitration */ - u8 emdp:1; /* enable modify data pointers */ -#endif - - u8 reserved3; - - u8 first_burst_len_msb; - u8 first_burst_len_lsb; -}; - -/* - * SCSI format device mode page - */ -struct scsi_mp_format_device_s{ -#ifdef __BIGENDIAN - u32 ps:1; - u32 reserved1:1; - u32 page_code:6; -#else - u32 page_code:6; - u32 reserved1:1; - u32 ps:1; -#endif - u32 page_len:8; - u32 tracks_per_zone:16; - - u32 a_sec_per_zone:16; - u32 a_tracks_per_zone:16; - - u32 a_tracks_per_lun:16; /* alternate tracks/lun-MSB */ - u32 sec_per_track:16; /* sectors/track-MSB */ - - u32 bytes_per_sector:16; - u32 interleave:16; - - u32 tsf:16; /* track skew factor-MSB */ - u32 csf:16; /* cylinder skew factor-MSB */ - -#ifdef __BIGENDIAN - u32 ssec:1; /* soft sector formatting */ - u32 hsec:1; /* hard sector formatting */ - u32 rmb:1; /* removable media */ - u32 surf:1; /* surface */ - u32 reserved2:4; -#else - u32 reserved2:4; - u32 surf:1; /* surface */ - u32 rmb:1; /* removable media */ - u32 hsec:1; /* hard sector formatting */ - u32 ssec:1; /* soft sector formatting */ -#endif - u32 reserved3:24; -}; - -/* - * SCSI rigid disk device geometry page - */ -struct scsi_mp_rigid_device_geometry_s{ -#ifdef __BIGENDIAN - u32 ps:1; - u32 reserved1:1; - u32 page_code:6; -#else - u32 page_code:6; - u32 reserved1:1; - u32 ps:1; -#endif - u32 page_len:8; - u32 num_cylinders0:8; - u32 num_cylinders1:8; - - u32 num_cylinders2:8; - u32 num_heads:8; - u32 scwp0:8; - u32 scwp1:8; - - u32 scwp2:8; - u32 scrwc0:8; - u32 scrwc1:8; - u32 scrwc2:8; - - u32 dsr:16; - u32 lscyl0:8; - u32 lscyl1:8; - - u32 lscyl2:8; -#ifdef __BIGENDIAN - u32 reserved2:6; - u32 rpl:2; /* rotational position locking */ -#else - u32 rpl:2; /* rotational position locking */ - u32 reserved2:6; -#endif - u32 rot_off:8; - u32 reserved3:8; - - u32 med_rot_rate:16; - u32 reserved4:16; -}; - -/* - * SCSI caching mode page - */ -struct scsi_mp_caching_s{ -#ifdef __BIGENDIAN - u8 ps:1; - u8 res1:1; - u8 page_code:6; -#else - u8 page_code:6; - u8 res1:1; - u8 ps:1; -#endif - u8 page_len; -#ifdef __BIGENDIAN - u8 ic:1; /* initiator control */ - u8 abpf:1; /* abort pre-fetch */ - u8 cap:1; /* caching analysis permitted */ - u8 disc:1; /* discontinuity */ - u8 size:1; /* size enable */ - u8 wce:1; /* write cache enable */ - u8 mf:1; /* multiplication factor */ - u8 rcd:1; /* read cache disable */ - - u8 drrp:4; /* demand read retention priority */ - u8 wrp:4; /* write retention priority */ -#else - u8 rcd:1; /* read cache disable */ - u8 mf:1; /* multiplication factor */ - u8 wce:1; /* write cache enable */ - u8 size:1; /* size enable */ - u8 disc:1; /* discontinuity */ - u8 cap:1; /* caching analysis permitted */ - u8 abpf:1; /* abort pre-fetch */ - u8 ic:1; /* initiator control */ - - u8 wrp:4; /* write retention priority */ - u8 drrp:4; /* demand read retention priority */ -#endif - u8 dptl[2];/* disable pre-fetch transfer length */ - u8 min_prefetch[2]; - u8 max_prefetch[2]; - u8 max_prefetch_limit[2]; -#ifdef __BIGENDIAN - u8 fsw:1; /* force sequential write */ - u8 lbcss:1;/* logical block cache segment size */ - u8 dra:1; /* disable read ahead */ - u8 vs:2; /* vendor specific */ - u8 res2:3; -#else - u8 res2:3; - u8 vs:2; /* vendor specific */ - u8 dra:1; /* disable read ahead */ - u8 lbcss:1;/* logical block cache segment size */ - u8 fsw:1; /* force sequential write */ -#endif - u8 num_cache_segs; - - u8 cache_seg_size[2]; - u8 res3; - u8 non_cache_seg_size[3]; -}; - -/* - * SCSI control mode page - */ -struct scsi_mp_control_page_s{ -#ifdef __BIGENDIAN -u8 ps:1; -u8 reserved1:1; -u8 page_code:6; -#else -u8 page_code:6; -u8 reserved1:1; -u8 ps:1; -#endif - u8 page_len; -#ifdef __BIGENDIAN - u8 tst:3; /* task set type */ - u8 reserved3:3; - u8 gltsd:1; /* global logging target save disable */ - u8 rlec:1; /* report log exception condition */ - - u8 qalgo_mod:4; /* queue alogorithm modifier */ - u8 reserved4:1; - u8 qerr:2; /* queue error management */ - u8 dque:1; /* disable queuing */ - - u8 reserved5:1; - u8 rac:1; /* report a check */ - u8 reserved6:2; - u8 swp:1; /* software write protect */ - u8 raerp:1; /* ready AER permission */ - u8 uaaerp:1; /* unit attenstion AER permission */ - u8 eaerp:1; /* error AER permission */ - - u8 reserved7:5; - u8 autoload_mod:3; -#else - u8 rlec:1; /* report log exception condition */ - u8 gltsd:1; /* global logging target save disable */ - u8 reserved3:3; - u8 tst:3; /* task set type */ - - u8 dque:1; /* disable queuing */ - u8 qerr:2; /* queue error management */ - u8 reserved4:1; - u8 qalgo_mod:4; /* queue alogorithm modifier */ - - u8 eaerp:1; /* error AER permission */ - u8 uaaerp:1; /* unit attenstion AER permission */ - u8 raerp:1; /* ready AER permission */ - u8 swp:1; /* software write protect */ - u8 reserved6:2; - u8 rac:1; /* report a check */ - u8 reserved5:1; - - u8 autoload_mod:3; - u8 reserved7:5; -#endif - u8 rahp_msb; /* ready AER holdoff period - MSB */ - u8 rahp_lsb; /* ready AER holdoff period - LSB */ - - u8 busy_timeout_period_msb; - u8 busy_timeout_period_lsb; - - u8 ext_selftest_compl_time_msb; - u8 ext_selftest_compl_time_lsb; -}; - -/* - * SCSI medium types supported mode page - */ -struct scsi_mp_medium_types_sup_s{ -#ifdef __BIGENDIAN - u8 ps:1; - u8 reserved1:1; - u8 page_code:6; -#else - u8 page_code:6; - u8 reserved1:1; - u8 ps:1; -#endif - u8 page_len; - - u8 reserved3[2]; - u8 med_type1_sup; /* medium type one supported */ - u8 med_type2_sup; /* medium type two supported */ - u8 med_type3_sup; /* medium type three supported */ - u8 med_type4_sup; /* medium type four supported */ -}; - -/* - * SCSI informational exception control mode page - */ -struct scsi_mp_info_excpt_cntl_s{ -#ifdef __BIGENDIAN - u8 ps:1; - u8 reserved1:1; - u8 page_code:6; -#else - u8 page_code:6; - u8 reserved1:1; - u8 ps:1; -#endif - u8 page_len; -#ifdef __BIGENDIAN - u8 perf:1; /* performance */ - u8 reserved3:1; - u8 ebf:1; /* enable background fucntion */ - u8 ewasc:1; /* enable warning */ - u8 dexcpt:1; /* disable exception control */ - u8 test:1; /* enable test device failure - * notification - */ - u8 reserved4:1; - u8 log_error:1; - - u8 reserved5:4; - u8 mrie:4; /* method of reporting info - * exceptions - */ -#else - u8 log_error:1; - u8 reserved4:1; - u8 test:1; /* enable test device failure - * notification - */ - u8 dexcpt:1; /* disable exception control */ - u8 ewasc:1; /* enable warning */ - u8 ebf:1; /* enable background fucntion */ - u8 reserved3:1; - u8 perf:1; /* performance */ - - u8 mrie:4; /* method of reporting info - * exceptions - */ - u8 reserved5:4; -#endif - u8 interval_timer_msb; - u8 interval_timer_lsb; - - u8 report_count_msb; - u8 report_count_lsb; -}; - -/* - * Methods of reporting informational exceptions - */ -#define SCSI_MP_IEC_NO_REPORT 0x0 /* no reporting of exceptions */ -#define SCSI_MP_IEC_AER 0x1 /* async event reporting */ -#define SCSI_MP_IEC_UNIT_ATTN 0x2 /* generate unit attenstion */ -#define SCSI_MO_IEC_COND_REC_ERR 0x3 /* conditionally generate recovered - * error - */ -#define SCSI_MP_IEC_UNCOND_REC_ERR 0x4 /* unconditionally generate recovered - * error - */ -#define SCSI_MP_IEC_NO_SENSE 0x5 /* generate no sense */ -#define SCSI_MP_IEC_ON_REQUEST 0x6 /* only report exceptions on request */ - -/* - * SCSI flexible disk page - */ -struct scsi_mp_flexible_disk_s{ -#ifdef __BIGENDIAN - u8 ps:1; - u8 reserved1:1; - u8 page_code:6; -#else - u8 page_code:6; - u8 reserved1:1; - u8 ps:1; -#endif - u8 page_len; - - u8 transfer_rate_msb; - u8 transfer_rate_lsb; - - u8 num_heads; - u8 num_sectors; - - u8 bytes_per_sector_msb; - u8 bytes_per_sector_lsb; - - u8 num_cylinders_msb; - u8 num_cylinders_lsb; - - u8 sc_wpc_msb; /* starting cylinder-write - * precompensation msb - */ - u8 sc_wpc_lsb; /* starting cylinder-write - * precompensation lsb - */ - u8 sc_rwc_msb; /* starting cylinder-reduced write - * current msb - */ - u8 sc_rwc_lsb; /* starting cylinder-reduced write - * current lsb - */ - - u8 dev_step_rate_msb; - u8 dev_step_rate_lsb; - - u8 dev_step_pulse_width; - - u8 head_sd_msb; /* head settle delay msb */ - u8 head_sd_lsb; /* head settle delay lsb */ - - u8 motor_on_delay; - u8 motor_off_delay; -#ifdef __BIGENDIAN - u8 trdy:1; /* true ready bit */ - u8 ssn:1; /* start sector number bit */ - u8 mo:1; /* motor on bit */ - u8 reserved3:5; - - u8 reserved4:4; - u8 spc:4; /* step pulse per cylinder */ -#else - u8 reserved3:5; - u8 mo:1; /* motor on bit */ - u8 ssn:1; /* start sector number bit */ - u8 trdy:1; /* true ready bit */ - - u8 spc:4; /* step pulse per cylinder */ - u8 reserved4:4; -#endif - u8 write_comp; - u8 head_load_delay; - u8 head_unload_delay; -#ifdef __BIGENDIAN - u8 pin34:4; /* pin34 usage */ - u8 pin2:4; /* pin2 usage */ - - u8 pin4:4; /* pin4 usage */ - u8 pin1:4; /* pin1 usage */ -#else - u8 pin2:4; /* pin2 usage */ - u8 pin34:4; /* pin34 usage */ - - u8 pin1:4; /* pin1 usage */ - u8 pin4:4; /* pin4 usage */ -#endif - u8 med_rot_rate_msb; - u8 med_rot_rate_lsb; - - u8 reserved5[2]; -}; - -struct scsi_mode_page_format_data6_s{ - struct scsi_mode_param_header6_s mph; /* mode page header */ - struct scsi_mode_param_desc_s desc; /* block descriptor */ - struct scsi_mp_format_device_s format; /* format device data */ -}; - -struct scsi_mode_page_format_data10_s{ - struct scsi_mode_param_header10_s mph; /* mode page header */ - struct scsi_mode_param_desc_s desc; /* block descriptor */ - struct scsi_mp_format_device_s format; /* format device data */ -}; - -struct scsi_mode_page_rdg_data6_s{ - struct scsi_mode_param_header6_s mph; /* mode page header */ - struct scsi_mode_param_desc_s desc; /* block descriptor */ - struct scsi_mp_rigid_device_geometry_s rdg; - /* rigid geometry data */ -}; - -struct scsi_mode_page_rdg_data10_s{ - struct scsi_mode_param_header10_s mph; /* mode page header */ - struct scsi_mode_param_desc_s desc; /* block descriptor */ - struct scsi_mp_rigid_device_geometry_s rdg; - /* rigid geometry data */ -}; - -struct scsi_mode_page_cache6_s{ - struct scsi_mode_param_header6_s mph; /* mode page header */ - struct scsi_mode_param_desc_s desc; /* block descriptor */ - struct scsi_mp_caching_s cache; /* cache page data */ -}; - -struct scsi_mode_page_cache10_s{ - struct scsi_mode_param_header10_s mph; /* mode page header */ - struct scsi_mode_param_desc_s desc; /* block descriptor */ - struct scsi_mp_caching_s cache; /* cache page data */ -}; - -/* -------------------------------------------------------------- - * Format Unit command - * ------------------------------------------------------------ - */ - -/* - * Format Unit CDB - */ -struct scsi_format_unit_s{ - u8 opcode; -#ifdef __BIGENDIAN - u8 res1:3; - u8 fmtdata:1; /* if set, data out phase has format - * data - */ - u8 cmplst:1; /* if set, defect list is complete */ - u8 def_list:3; /* format of defect descriptor is - * fmtdata =1 - */ -#else - u8 def_list:3; /* format of defect descriptor is - * fmtdata = 1 - */ - u8 cmplst:1; /* if set, defect list is complete */ - u8 fmtdata:1; /* if set, data out phase has format - * data - */ - u8 res1:3; -#endif - u8 interleave_msb; - u8 interleave_lsb; - u8 vendor_spec; - u8 control; -}; - -/* - * h - */ -struct scsi_reserve6_s{ - u8 opcode; -#ifdef __BIGENDIAN - u8 reserved:3; - u8 obsolete:4; - u8 extent:1; -#else - u8 extent:1; - u8 obsolete:4; - u8 reserved:3; -#endif - u8 reservation_id; - u16 param_list_len; - u8 control; -}; - -/* - * h - */ -struct scsi_release6_s{ - u8 opcode; -#ifdef __BIGENDIAN - u8 reserved1:3; - u8 obsolete:4; - u8 extent:1; -#else - u8 extent:1; - u8 obsolete:4; - u8 reserved1:3; -#endif - u8 reservation_id; - u16 reserved2; - u8 control; -}; - -/* - * h - */ -struct scsi_reserve10_s{ - u8 opcode; -#ifdef __BIGENDIAN - u8 reserved1:3; - u8 third_party:1; - u8 reserved2:2; - u8 long_id:1; - u8 extent:1; -#else - u8 extent:1; - u8 long_id:1; - u8 reserved2:2; - u8 third_party:1; - u8 reserved1:3; -#endif - u8 reservation_id; - u8 third_pty_dev_id; - u8 reserved3; - u8 reserved4; - u8 reserved5; - u16 param_list_len; - u8 control; -}; - -struct scsi_release10_s{ - u8 opcode; -#ifdef __BIGENDIAN - u8 reserved1:3; - u8 third_party:1; - u8 reserved2:2; - u8 long_id:1; - u8 extent:1; -#else - u8 extent:1; - u8 long_id:1; - u8 reserved2:2; - u8 third_party:1; - u8 reserved1:3; -#endif - u8 reservation_id; - u8 third_pty_dev_id; - u8 reserved3; - u8 reserved4; - u8 reserved5; - u16 param_list_len; - u8 control; -}; - -struct scsi_verify10_s{ - u8 opcode; -#ifdef __BIGENDIAN - u8 lun:3; - u8 dpo:1; - u8 reserved:2; - u8 bytchk:1; - u8 reladdr:1; -#else - u8 reladdr:1; - u8 bytchk:1; - u8 reserved:2; - u8 dpo:1; - u8 lun:3; -#endif - u8 lba0; - u8 lba1; - u8 lba2; - u8 lba3; - u8 reserved1; - u8 verification_len0; - u8 verification_len1; - u8 control_byte; -}; - -struct scsi_request_sense_s{ - u8 opcode; -#ifdef __BIGENDIAN - u8 lun:3; - u8 reserved:5; -#else - u8 reserved:5; - u8 lun:3; -#endif - u8 reserved0; - u8 reserved1; - u8 alloc_len; - u8 control_byte; -}; - -/* ------------------------------------------------------------ - * SCSI status byte values - * ------------------------------------------------------------ - */ -#define SCSI_STATUS_GOOD 0x00 -#define SCSI_STATUS_CHECK_CONDITION 0x02 -#define SCSI_STATUS_CONDITION_MET 0x04 -#define SCSI_STATUS_BUSY 0x08 -#define SCSI_STATUS_INTERMEDIATE 0x10 -#define SCSI_STATUS_ICM 0x14 /* intermediate condition met */ -#define SCSI_STATUS_RESERVATION_CONFLICT 0x18 -#define SCSI_STATUS_COMMAND_TERMINATED 0x22 -#define SCSI_STATUS_QUEUE_FULL 0x28 -#define SCSI_STATUS_ACA_ACTIVE 0x30 - -#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length - * in CDBs - */ - -#define SCSI_OP_WRITE_VERIFY10 0x2E -#define SCSI_OP_WRITE_VERIFY12 0xAE -#define SCSI_OP_UNDEF 0xFF - -/* - * SCSI WRITE-VERIFY(10) command - */ -struct scsi_write_verify10_s{ - u8 opcode; -#ifdef __BIGENDIAN - u8 reserved1:3; - u8 dpo:1; /* Disable Page Out */ - u8 reserved2:1; - u8 ebp:1; /* erse by-pass */ - u8 bytchk:1; /* byte check */ - u8 rel_adr:1; /* relative address */ -#else - u8 rel_adr:1; /* relative address */ - u8 bytchk:1; /* byte check */ - u8 ebp:1; /* erse by-pass */ - u8 reserved2:1; - u8 dpo:1; /* Disable Page Out */ - u8 reserved1:3; -#endif - u8 lba0; /* logical block address - MSB */ - u8 lba1; - u8 lba2; - u8 lba3; /* LSB */ - u8 reserved3; - u8 xfer_length0; /* transfer length in blocks - MSB */ - u8 xfer_length1; /* LSB */ - u8 control; -}; - -#pragma pack() - -#endif /* __SCSI_H__ */ diff --git a/drivers/scsi/bfa/include/protocol/types.h b/drivers/scsi/bfa/include/protocol/types.h deleted file mode 100644 index 2875a6cced3b..000000000000 --- a/drivers/scsi/bfa/include/protocol/types.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * types.h Protocol defined base types - */ - -#ifndef __TYPES_H__ -#define __TYPES_H__ - -#include - -#define wwn_t u64 -#define lun_t u64 - -#define WWN_NULL (0) -#define FC_SYMNAME_MAX 256 /* max name server symbolic name size */ -#define FC_ALPA_MAX 128 - -#pragma pack(1) - -#define MAC_ADDRLEN (6) -struct mac_s { u8 mac[MAC_ADDRLEN]; }; -#define mac_t struct mac_s - -#pragma pack() - -#endif diff --git a/drivers/scsi/bfa/loop.c b/drivers/scsi/bfa/loop.c deleted file mode 100644 index f6342efb6a90..000000000000 --- a/drivers/scsi/bfa/loop.c +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * port_loop.c vport private loop implementation. - */ -#include -#include -#include "fcs_lport.h" -#include "fcs_rport.h" -#include "fcs_trcmod.h" -#include "lport_priv.h" - -BFA_TRC_FILE(FCS, LOOP); - -/** - * ALPA to LIXA bitmap mapping - * - * ALPA 0x00 (Word 0, Bit 30) is invalid for N_Ports. Also Word 0 Bit 31 - * is for L_bit (login required) and is filled as ALPA 0x00 here. - */ -static const u8 port_loop_alpa_map[] = { - 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, /* Word 3 Bits 0..7 */ - 0xD9, 0xD6, 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, /* Word 3 Bits 8..15 */ - 0xCD, 0xCC, 0xCB, 0xCA, 0xC9, 0xC7, 0xC6, 0xC5, /* Word 3 Bits 16..23 */ - 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5, 0xB4, 0xB3, /* Word 3 Bits 24..31 */ - - 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, /* Word 2 Bits 0..7 */ - 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, /* Word 2 Bits 8..15 */ - 0x98, 0x97, 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, /* Word 2 Bits 16..23 */ - 0x80, 0x7C, 0x7A, 0x79, 0x76, 0x75, 0x74, 0x73, /* Word 2 Bits 24..31 */ - - 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B, 0x6A, 0x69, /* Word 1 Bits 0..7 */ - 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56, /* Word 1 Bits 8..15 */ - 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, /* Word 1 Bits 16..23 */ - 0x4B, 0x4A, 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, /* Word 1 Bits 24..31 */ - - 0x3A, 0x39, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, /* Word 0 Bits 0..7 */ - 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x27, 0x26, /* Word 0 Bits 8..15 */ - 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17, /* Word 0 Bits 16..23 */ - 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01, 0x00, 0x00, /* Word 0 Bits 24..31 */ -}; - -/* - * Local Functions - */ -static bfa_status_t bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port, - u8 alpa); - -static void bfa_fcs_port_loop_plogi_response(void *fcsarg, - struct bfa_fcxp_s *fcxp, - void *cbarg, - bfa_status_t req_status, - u32 rsp_len, - u32 resid_len, - struct fchs_s *rsp_fchs); -/** - * Called by port to initializar in provate LOOP topology. - */ -void -bfa_fcs_port_loop_init(struct bfa_fcs_port_s *port) -{ -} - -/** - * Called by port to notify transition to online state. - */ -void -bfa_fcs_port_loop_online(struct bfa_fcs_port_s *port) -{ - - u8 num_alpa = port->port_topo.ploop.num_alpa; - u8 *alpa_pos_map = port->port_topo.ploop.alpa_pos_map; - struct bfa_fcs_rport_s *r_port; - int ii = 0; - - /* - * If the port role is Initiator Mode, create Rports. - */ - if (port->port_cfg.roles == BFA_PORT_ROLE_FCP_IM) { - /* - * Check if the ALPA positional bitmap is available. - * if not, we send PLOGI to all possible ALPAs. - */ - if (num_alpa > 0) { - for (ii = 0; ii < num_alpa; ii++) { - /* - * ignore ALPA of bfa port - */ - if (alpa_pos_map[ii] != port->pid) { - r_port = bfa_fcs_rport_create(port, - alpa_pos_map[ii]); - } - } - } else { - for (ii = 0; ii < MAX_ALPA_COUNT; ii++) { - /* - * ignore ALPA of bfa port - */ - if ((port_loop_alpa_map[ii] > 0) - && (port_loop_alpa_map[ii] != port->pid)) - bfa_fcs_port_loop_send_plogi(port, - port_loop_alpa_map[ii]); - /**TBD */ - } - } - } else { - /* - * TBD Target Mode ?? - */ - } - -} - -/** - * Called by port to notify transition to offline state. - */ -void -bfa_fcs_port_loop_offline(struct bfa_fcs_port_s *port) -{ - -} - -/** - * Called by port to notify a LIP on the loop. - */ -void -bfa_fcs_port_loop_lip(struct bfa_fcs_port_s *port) -{ -} - -/** - * Local Functions. - */ -static bfa_status_t -bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port, u8 alpa) -{ - struct fchs_s fchs; - struct bfa_fcxp_s *fcxp = NULL; - int len; - - bfa_trc(port->fcs, alpa); - - fcxp = bfa_fcxp_alloc(NULL, port->fcs->bfa, 0, 0, NULL, NULL, NULL, - NULL); - bfa_assert(fcxp); - - len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa, - bfa_fcs_port_get_fcid(port), 0, - port->port_cfg.pwwn, port->port_cfg.nwwn, - bfa_fcport_get_maxfrsize(port->fcs->bfa)); - - bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, - FC_CLASS_3, len, &fchs, - bfa_fcs_port_loop_plogi_response, (void *)port, - FC_MAX_PDUSZ, FC_RA_TOV); - - return BFA_STATUS_OK; -} - -/** - * Called by fcxp to notify the Plogi response - */ -static void -bfa_fcs_port_loop_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, - void *cbarg, bfa_status_t req_status, - u32 rsp_len, u32 resid_len, - struct fchs_s *rsp_fchs) -{ - struct bfa_fcs_port_s *port = (struct bfa_fcs_port_s *) cbarg; - struct fc_logi_s *plogi_resp; - struct fc_els_cmd_s *els_cmd; - - bfa_trc(port->fcs, req_status); - - /* - * Sanity Checks - */ - if (req_status != BFA_STATUS_OK) { - bfa_trc(port->fcs, req_status); - /* - * @todo - * This could mean that the device with this APLA does not - * exist on the loop. - */ - - return; - } - - els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp); - plogi_resp = (struct fc_logi_s *) els_cmd; - - if (els_cmd->els_code == FC_ELS_ACC) { - bfa_fcs_rport_start(port, rsp_fchs, plogi_resp); - } else { - bfa_trc(port->fcs, plogi_resp->els_cmd.els_code); - bfa_assert(0); - } -} diff --git a/drivers/scsi/bfa/lport_api.c b/drivers/scsi/bfa/lport_api.c deleted file mode 100644 index 72b3f508d0e9..000000000000 --- a/drivers/scsi/bfa/lport_api.c +++ /dev/null @@ -1,303 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * port_api.c BFA FCS port - */ - -#include -#include -#include -#include "fcs_rport.h" -#include "fcs_fabric.h" -#include "fcs_trcmod.h" -#include "fcs_vport.h" - -BFA_TRC_FILE(FCS, PORT_API); - - - -/** - * fcs_port_api BFA FCS port API - */ - -void -bfa_fcs_cfg_base_port(struct bfa_fcs_s *fcs, struct bfa_port_cfg_s *port_cfg) -{ -} - -struct bfa_fcs_port_s * -bfa_fcs_get_base_port(struct bfa_fcs_s *fcs) -{ - return &fcs->fabric.bport; -} - -wwn_t -bfa_fcs_port_get_rport(struct bfa_fcs_port_s *port, wwn_t wwn, int index, - int nrports, bfa_boolean_t bwwn) -{ - struct list_head *qh, *qe; - struct bfa_fcs_rport_s *rport = NULL; - int i; - struct bfa_fcs_s *fcs; - - if (port == NULL || nrports == 0) - return (wwn_t) 0; - - fcs = port->fcs; - bfa_trc(fcs, (u32) nrports); - - i = 0; - qh = &port->rport_q; - qe = bfa_q_first(qh); - - while ((qe != qh) && (i < nrports)) { - rport = (struct bfa_fcs_rport_s *)qe; - if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) { - qe = bfa_q_next(qe); - bfa_trc(fcs, (u32) rport->pwwn); - bfa_trc(fcs, rport->pid); - bfa_trc(fcs, i); - continue; - } - - if (bwwn) { - if (!memcmp(&wwn, &rport->pwwn, 8)) - break; - } else { - if (i == index) - break; - } - - i++; - qe = bfa_q_next(qe); - } - - bfa_trc(fcs, i); - if (rport) - return rport->pwwn; - else - return (wwn_t) 0; -} - -void -bfa_fcs_port_get_rports(struct bfa_fcs_port_s *port, wwn_t rport_wwns[], - int *nrports) -{ - struct list_head *qh, *qe; - struct bfa_fcs_rport_s *rport = NULL; - int i; - struct bfa_fcs_s *fcs; - - if (port == NULL || rport_wwns == NULL || *nrports == 0) - return; - - fcs = port->fcs; - bfa_trc(fcs, (u32) *nrports); - - i = 0; - qh = &port->rport_q; - qe = bfa_q_first(qh); - - while ((qe != qh) && (i < *nrports)) { - rport = (struct bfa_fcs_rport_s *)qe; - if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) { - qe = bfa_q_next(qe); - bfa_trc(fcs, (u32) rport->pwwn); - bfa_trc(fcs, rport->pid); - bfa_trc(fcs, i); - continue; - } - - rport_wwns[i] = rport->pwwn; - - i++; - qe = bfa_q_next(qe); - } - - bfa_trc(fcs, i); - *nrports = i; - return; -} - -/* - * Iterate's through all the rport's in the given port to - * determine the maximum operating speed. - * - * To be used in TRL Functionality only - */ -enum bfa_pport_speed -bfa_fcs_port_get_rport_max_speed(struct bfa_fcs_port_s *port) -{ - struct list_head *qh, *qe; - struct bfa_fcs_rport_s *rport = NULL; - struct bfa_fcs_s *fcs; - enum bfa_pport_speed max_speed = 0; - struct bfa_pport_attr_s pport_attr; - enum bfa_pport_speed pport_speed, rport_speed; - bfa_boolean_t trl_enabled = bfa_fcport_is_ratelim(port->fcs->bfa); - - if (port == NULL) - return 0; - - fcs = port->fcs; - - /* - * Get Physical port's current speed - */ - bfa_fcport_get_attr(port->fcs->bfa, &pport_attr); - pport_speed = pport_attr.speed; - bfa_trc(fcs, pport_speed); - - qh = &port->rport_q; - qe = bfa_q_first(qh); - - while (qe != qh) { - rport = (struct bfa_fcs_rport_s *) qe; - if ((bfa_os_ntoh3b(rport->pid) > 0xFFF000) || - (bfa_fcs_rport_get_state(rport) == - BFA_RPORT_OFFLINE)) { - qe = bfa_q_next(qe); - continue; - } - - rport_speed = rport->rpf.rpsc_speed; - if ((trl_enabled) && (rport_speed == - BFA_PPORT_SPEED_UNKNOWN)) { - /* Use default ratelim speed setting */ - rport_speed = - bfa_fcport_get_ratelim_speed(port->fcs->bfa); - } - - if ((rport_speed == BFA_PPORT_SPEED_8GBPS) || - (rport_speed > pport_speed)) { - max_speed = rport_speed; - break; - } else if (rport_speed > max_speed) { - max_speed = rport_speed; - } - - qe = bfa_q_next(qe); - } - - bfa_trc(fcs, max_speed); - return max_speed; -} - -struct bfa_fcs_port_s * -bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn) -{ - struct bfa_fcs_vport_s *vport; - bfa_fcs_vf_t *vf; - - bfa_assert(fcs != NULL); - - vf = bfa_fcs_vf_lookup(fcs, vf_id); - if (vf == NULL) { - bfa_trc(fcs, vf_id); - return NULL; - } - - if (!lpwwn || (vf->bport.port_cfg.pwwn == lpwwn)) - return &vf->bport; - - vport = bfa_fcs_fabric_vport_lookup(vf, lpwwn); - if (vport) - return &vport->lport; - - return NULL; -} - -/* - * API corresponding to VmWare's NPIV_VPORT_GETINFO. - */ -void -bfa_fcs_port_get_info(struct bfa_fcs_port_s *port, - struct bfa_port_info_s *port_info) -{ - - bfa_trc(port->fcs, port->fabric->fabric_name); - - if (port->vport == NULL) { - /* - * This is a Physical port - */ - port_info->port_type = BFA_PORT_TYPE_PHYSICAL; - - /* - * @todo : need to fix the state & reason - */ - port_info->port_state = 0; - port_info->offline_reason = 0; - - port_info->port_wwn = bfa_fcs_port_get_pwwn(port); - port_info->node_wwn = bfa_fcs_port_get_nwwn(port); - - port_info->max_vports_supp = - bfa_lps_get_max_vport(port->fcs->bfa); - port_info->num_vports_inuse = - bfa_fcs_fabric_vport_count(port->fabric); - port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP; - port_info->num_rports_inuse = port->num_rports; - } else { - /* - * This is a virtual port - */ - port_info->port_type = BFA_PORT_TYPE_VIRTUAL; - - /* - * @todo : need to fix the state & reason - */ - port_info->port_state = 0; - port_info->offline_reason = 0; - - port_info->port_wwn = bfa_fcs_port_get_pwwn(port); - port_info->node_wwn = bfa_fcs_port_get_nwwn(port); - } -} - -void -bfa_fcs_port_get_stats(struct bfa_fcs_port_s *fcs_port, - struct bfa_port_stats_s *port_stats) -{ - bfa_os_memcpy(port_stats, &fcs_port->stats, - sizeof(struct bfa_port_stats_s)); - return; -} - -void -bfa_fcs_port_clear_stats(struct bfa_fcs_port_s *fcs_port) -{ - bfa_os_memset(&fcs_port->stats, 0, sizeof(struct bfa_port_stats_s)); - return; -} - -void -bfa_fcs_port_enable_ipfc_roles(struct bfa_fcs_port_s *fcs_port) -{ - fcs_port->port_cfg.roles |= BFA_PORT_ROLE_FCP_IPFC; - return; -} - -void -bfa_fcs_port_disable_ipfc_roles(struct bfa_fcs_port_s *fcs_port) -{ - fcs_port->port_cfg.roles &= ~BFA_PORT_ROLE_FCP_IPFC; - return; -} - - diff --git a/drivers/scsi/bfa/lport_priv.h b/drivers/scsi/bfa/lport_priv.h deleted file mode 100644 index dbae370a599a..000000000000 --- a/drivers/scsi/bfa/lport_priv.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __VP_PRIV_H__ -#define __VP_PRIV_H__ - -#include -#include - -/* - * Functions exported by vps - */ -void bfa_fcs_vport_init(struct bfa_fcs_vport_s *vport); - -/* - * Functions exported by vps - */ -void bfa_fcs_vps_online(struct bfa_fcs_port_s *port); -void bfa_fcs_vps_offline(struct bfa_fcs_port_s *port); -void bfa_fcs_vps_lip(struct bfa_fcs_port_s *port); - -/* - * Functions exported by port_fab - */ -void bfa_fcs_port_fab_init(struct bfa_fcs_port_s *vport); -void bfa_fcs_port_fab_online(struct bfa_fcs_port_s *vport); -void bfa_fcs_port_fab_offline(struct bfa_fcs_port_s *vport); -void bfa_fcs_port_fab_rx_frame(struct bfa_fcs_port_s *port, - u8 *rx_frame, u32 len); - -/* - * Functions exported by VP-NS. - */ -void bfa_fcs_port_ns_init(struct bfa_fcs_port_s *vport); -void bfa_fcs_port_ns_offline(struct bfa_fcs_port_s *vport); -void bfa_fcs_port_ns_online(struct bfa_fcs_port_s *vport); -void bfa_fcs_port_ns_query(struct bfa_fcs_port_s *port); - -/* - * Functions exported by VP-SCN - */ -void bfa_fcs_port_scn_init(struct bfa_fcs_port_s *vport); -void bfa_fcs_port_scn_offline(struct bfa_fcs_port_s *vport); -void bfa_fcs_port_scn_online(struct bfa_fcs_port_s *vport); -void bfa_fcs_port_scn_process_rscn(struct bfa_fcs_port_s *port, - struct fchs_s *rx_frame, u32 len); - -/* - * Functions exported by VP-N2N - */ - -void bfa_fcs_port_n2n_init(struct bfa_fcs_port_s *port); -void bfa_fcs_port_n2n_online(struct bfa_fcs_port_s *port); -void bfa_fcs_port_n2n_offline(struct bfa_fcs_port_s *port); -void bfa_fcs_port_n2n_rx_frame(struct bfa_fcs_port_s *port, - u8 *rx_frame, u32 len); - -/* - * Functions exported by VP-LOOP - */ -void bfa_fcs_port_loop_init(struct bfa_fcs_port_s *port); -void bfa_fcs_port_loop_online(struct bfa_fcs_port_s *port); -void bfa_fcs_port_loop_offline(struct bfa_fcs_port_s *port); -void bfa_fcs_port_loop_lip(struct bfa_fcs_port_s *port); -void bfa_fcs_port_loop_rx_frame(struct bfa_fcs_port_s *port, - u8 *rx_frame, u32 len); - -#endif /* __VP_PRIV_H__ */ diff --git a/drivers/scsi/bfa/ms.c b/drivers/scsi/bfa/ms.c deleted file mode 100644 index 1d579ef26122..000000000000 --- a/drivers/scsi/bfa/ms.c +++ /dev/null @@ -1,759 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - - -#include -#include -#include "fcs_lport.h" -#include "fcs_rport.h" -#include "fcs_trcmod.h" -#include "fcs_fcxp.h" -#include "lport_priv.h" - -BFA_TRC_FILE(FCS, MS); - -#define BFA_FCS_MS_CMD_MAX_RETRIES 2 -/* - * forward declarations - */ -static void bfa_fcs_port_ms_send_plogi(void *ms_cbarg, - struct bfa_fcxp_s *fcxp_alloced); -static void bfa_fcs_port_ms_timeout(void *arg); -static void bfa_fcs_port_ms_plogi_response(void *fcsarg, - struct bfa_fcxp_s *fcxp, - void *cbarg, - bfa_status_t req_status, - u32 rsp_len, - u32 resid_len, - struct fchs_s *rsp_fchs); - -static void bfa_fcs_port_ms_send_gmal(void *ms_cbarg, - struct bfa_fcxp_s *fcxp_alloced); -static void bfa_fcs_port_ms_gmal_response(void *fcsarg, - struct bfa_fcxp_s *fcxp, - void *cbarg, - bfa_status_t req_status, - u32 rsp_len, - u32 resid_len, - struct fchs_s *rsp_fchs); -static void bfa_fcs_port_ms_send_gfn(void *ms_cbarg, - struct bfa_fcxp_s *fcxp_alloced); -static void bfa_fcs_port_ms_gfn_response(void *fcsarg, - struct bfa_fcxp_s *fcxp, - void *cbarg, - bfa_status_t req_status, - u32 rsp_len, - u32 resid_len, - struct fchs_s *rsp_fchs); -/** - * fcs_ms_sm FCS MS state machine - */ - -/** - * MS State Machine events - */ -enum port_ms_event { - MSSM_EVENT_PORT_ONLINE = 1, - MSSM_EVENT_PORT_OFFLINE = 2, - MSSM_EVENT_RSP_OK = 3, - MSSM_EVENT_RSP_ERROR = 4, - MSSM_EVENT_TIMEOUT = 5, - MSSM_EVENT_FCXP_SENT = 6, - MSSM_EVENT_PORT_FABRIC_RSCN = 7 -}; - -static void bfa_fcs_port_ms_sm_offline(struct bfa_fcs_port_ms_s *ms, - enum port_ms_event event); -static void bfa_fcs_port_ms_sm_plogi_sending(struct bfa_fcs_port_ms_s *ms, - enum port_ms_event event); -static void bfa_fcs_port_ms_sm_plogi(struct bfa_fcs_port_ms_s *ms, - enum port_ms_event event); -static void bfa_fcs_port_ms_sm_plogi_retry(struct bfa_fcs_port_ms_s *ms, - enum port_ms_event event); -static void bfa_fcs_port_ms_sm_gmal_sending(struct bfa_fcs_port_ms_s *ms, - enum port_ms_event event); -static void bfa_fcs_port_ms_sm_gmal(struct bfa_fcs_port_ms_s *ms, - enum port_ms_event event); -static void bfa_fcs_port_ms_sm_gmal_retry(struct bfa_fcs_port_ms_s *ms, - enum port_ms_event event); -static void bfa_fcs_port_ms_sm_gfn_sending(struct bfa_fcs_port_ms_s *ms, - enum port_ms_event event); -static void bfa_fcs_port_ms_sm_gfn(struct bfa_fcs_port_ms_s *ms, - enum port_ms_event event); -static void bfa_fcs_port_ms_sm_gfn_retry(struct bfa_fcs_port_ms_s *ms, - enum port_ms_event event); -static void bfa_fcs_port_ms_sm_online(struct bfa_fcs_port_ms_s *ms, - enum port_ms_event event); -/** - * Start in offline state - awaiting NS to send start. - */ -static void -bfa_fcs_port_ms_sm_offline(struct bfa_fcs_port_ms_s *ms, - enum port_ms_event event) -{ - bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); - bfa_trc(ms->port->fcs, event); - - switch (event) { - case MSSM_EVENT_PORT_ONLINE: - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_plogi_sending); - bfa_fcs_port_ms_send_plogi(ms, NULL); - break; - - case MSSM_EVENT_PORT_OFFLINE: - break; - - default: - bfa_sm_fault(ms->port->fcs, event); - } -} - -static void -bfa_fcs_port_ms_sm_plogi_sending(struct bfa_fcs_port_ms_s *ms, - enum port_ms_event event) -{ - bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); - bfa_trc(ms->port->fcs, event); - - switch (event) { - case MSSM_EVENT_FCXP_SENT: - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_plogi); - break; - - case MSSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline); - bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port), - &ms->fcxp_wqe); - break; - - default: - bfa_sm_fault(ms->port->fcs, event); - } -} - -static void -bfa_fcs_port_ms_sm_plogi(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event) -{ - bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); - bfa_trc(ms->port->fcs, event); - - switch (event) { - case MSSM_EVENT_RSP_ERROR: - /* - * Start timer for a delayed retry - */ - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_plogi_retry); - ms->port->stats.ms_retries++; - bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port), &ms->timer, - bfa_fcs_port_ms_timeout, ms, - BFA_FCS_RETRY_TIMEOUT); - break; - - case MSSM_EVENT_RSP_OK: - /* - * since plogi is done, now invoke MS related sub-modules - */ - bfa_fcs_port_fdmi_online(ms); - - /** - * if this is a Vport, go to online state. - */ - if (ms->port->vport) { - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_online); - break; - } - - /* - * For a base port we need to get the - * switch's IP address. - */ - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gmal_sending); - bfa_fcs_port_ms_send_gmal(ms, NULL); - break; - - case MSSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline); - bfa_fcxp_discard(ms->fcxp); - break; - - default: - bfa_sm_fault(ms->port->fcs, event); - } -} - -static void -bfa_fcs_port_ms_sm_plogi_retry(struct bfa_fcs_port_ms_s *ms, - enum port_ms_event event) -{ - bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); - bfa_trc(ms->port->fcs, event); - - switch (event) { - case MSSM_EVENT_TIMEOUT: - /* - * Retry Timer Expired. Re-send - */ - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_plogi_sending); - bfa_fcs_port_ms_send_plogi(ms, NULL); - break; - - case MSSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline); - bfa_timer_stop(&ms->timer); - break; - - default: - bfa_sm_fault(ms->port->fcs, event); - } -} - -static void -bfa_fcs_port_ms_sm_online(struct bfa_fcs_port_ms_s *ms, - enum port_ms_event event) -{ - bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); - bfa_trc(ms->port->fcs, event); - - switch (event) { - case MSSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline); - break; - - case MSSM_EVENT_PORT_FABRIC_RSCN: - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_sending); - ms->retry_cnt = 0; - bfa_fcs_port_ms_send_gfn(ms, NULL); - break; - - default: - bfa_sm_fault(ms->port->fcs, event); - } -} - -static void -bfa_fcs_port_ms_sm_gmal_sending(struct bfa_fcs_port_ms_s *ms, - enum port_ms_event event) -{ - bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); - bfa_trc(ms->port->fcs, event); - - switch (event) { - case MSSM_EVENT_FCXP_SENT: - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gmal); - break; - - case MSSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline); - bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port), - &ms->fcxp_wqe); - break; - - default: - bfa_sm_fault(ms->port->fcs, event); - } -} - -static void -bfa_fcs_port_ms_sm_gmal(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event) -{ - bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); - bfa_trc(ms->port->fcs, event); - - switch (event) { - case MSSM_EVENT_RSP_ERROR: - /* - * Start timer for a delayed retry - */ - if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) { - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gmal_retry); - ms->port->stats.ms_retries++; - bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port), - &ms->timer, bfa_fcs_port_ms_timeout, ms, - BFA_FCS_RETRY_TIMEOUT); - } else { - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_sending); - bfa_fcs_port_ms_send_gfn(ms, NULL); - ms->retry_cnt = 0; - } - break; - - case MSSM_EVENT_RSP_OK: - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_sending); - bfa_fcs_port_ms_send_gfn(ms, NULL); - break; - - case MSSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline); - bfa_fcxp_discard(ms->fcxp); - break; - - default: - bfa_sm_fault(ms->port->fcs, event); - } -} - -static void -bfa_fcs_port_ms_sm_gmal_retry(struct bfa_fcs_port_ms_s *ms, - enum port_ms_event event) -{ - bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); - bfa_trc(ms->port->fcs, event); - - switch (event) { - case MSSM_EVENT_TIMEOUT: - /* - * Retry Timer Expired. Re-send - */ - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gmal_sending); - bfa_fcs_port_ms_send_gmal(ms, NULL); - break; - - case MSSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline); - bfa_timer_stop(&ms->timer); - break; - - default: - bfa_sm_fault(ms->port->fcs, event); - } -} - -/** - * ms_pvt MS local functions - */ - -static void -bfa_fcs_port_ms_send_gmal(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced) -{ - struct bfa_fcs_port_ms_s *ms = ms_cbarg; - struct bfa_fcs_port_s *port = ms->port; - struct fchs_s fchs; - int len; - struct bfa_fcxp_s *fcxp; - - bfa_trc(port->fcs, port->pid); - - fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); - if (!fcxp) { - bfa_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe, - bfa_fcs_port_ms_send_gmal, ms); - return; - } - ms->fcxp = fcxp; - - len = fc_gmal_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), - bfa_fcs_port_get_fcid(port), - bfa_lps_get_peer_nwwn(port->fabric->lps)); - - bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, - FC_CLASS_3, len, &fchs, bfa_fcs_port_ms_gmal_response, - (void *)ms, FC_MAX_PDUSZ, FC_FCCT_TOV); - - bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT); -} - -static void -bfa_fcs_port_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp, - void *cbarg, bfa_status_t req_status, - u32 rsp_len, u32 resid_len, - struct fchs_s *rsp_fchs) -{ - struct bfa_fcs_port_ms_s *ms = (struct bfa_fcs_port_ms_s *)cbarg; - struct bfa_fcs_port_s *port = ms->port; - struct ct_hdr_s *cthdr = NULL; - struct fcgs_gmal_resp_s *gmal_resp; - struct fc_gmal_entry_s *gmal_entry; - u32 num_entries; - u8 *rsp_str; - - bfa_trc(port->fcs, req_status); - bfa_trc(port->fcs, port->port_cfg.pwwn); - - /* - * Sanity Checks - */ - if (req_status != BFA_STATUS_OK) { - bfa_trc(port->fcs, req_status); - bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); - return; - } - - cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); - - if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { - gmal_resp = (struct fcgs_gmal_resp_s *)(cthdr + 1); - num_entries = bfa_os_ntohl(gmal_resp->ms_len); - if (num_entries == 0) { - bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); - return; - } - /* - * The response could contain multiple Entries. - * Entries for SNMP interface, etc. - * We look for the entry with a telnet prefix. - * First "http://" entry refers to IP addr - */ - - gmal_entry = (struct fc_gmal_entry_s *)gmal_resp->ms_ma; - while (num_entries > 0) { - if (strncmp - (gmal_entry->prefix, CT_GMAL_RESP_PREFIX_HTTP, - sizeof(gmal_entry->prefix)) == 0) { - - /* - * if the IP address is terminating with a '/', - * remove it. *Byte 0 consists of the length - * of the string. - */ - rsp_str = &(gmal_entry->prefix[0]); - if (rsp_str[gmal_entry->len - 1] == '/') - rsp_str[gmal_entry->len - 1] = 0; - /* - * copy IP Address to fabric - */ - strncpy(bfa_fcs_port_get_fabric_ipaddr(port), - gmal_entry->ip_addr, - BFA_FCS_FABRIC_IPADDR_SZ); - break; - } else { - --num_entries; - ++gmal_entry; - } - } - - bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK); - return; - } - - bfa_trc(port->fcs, cthdr->reason_code); - bfa_trc(port->fcs, cthdr->exp_code); - bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); -} - -static void -bfa_fcs_port_ms_sm_gfn_sending(struct bfa_fcs_port_ms_s *ms, - enum port_ms_event event) -{ - bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); - bfa_trc(ms->port->fcs, event); - - switch (event) { - case MSSM_EVENT_FCXP_SENT: - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn); - break; - - case MSSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline); - bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port), - &ms->fcxp_wqe); - break; - - default: - bfa_sm_fault(ms->port->fcs, event); - } -} - -static void -bfa_fcs_port_ms_sm_gfn(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event) -{ - bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); - bfa_trc(ms->port->fcs, event); - - switch (event) { - case MSSM_EVENT_RSP_ERROR: - /* - * Start timer for a delayed retry - */ - if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) { - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_retry); - ms->port->stats.ms_retries++; - bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port), - &ms->timer, bfa_fcs_port_ms_timeout, ms, - BFA_FCS_RETRY_TIMEOUT); - } else { - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_online); - ms->retry_cnt = 0; - } - break; - - case MSSM_EVENT_RSP_OK: - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_online); - break; - - case MSSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline); - bfa_fcxp_discard(ms->fcxp); - break; - - default: - bfa_sm_fault(ms->port->fcs, event); - } -} - -static void -bfa_fcs_port_ms_sm_gfn_retry(struct bfa_fcs_port_ms_s *ms, - enum port_ms_event event) -{ - bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); - bfa_trc(ms->port->fcs, event); - - switch (event) { - case MSSM_EVENT_TIMEOUT: - /* - * Retry Timer Expired. Re-send - */ - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_sending); - bfa_fcs_port_ms_send_gfn(ms, NULL); - break; - - case MSSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline); - bfa_timer_stop(&ms->timer); - break; - - default: - bfa_sm_fault(ms->port->fcs, event); - } -} - -/** - * ms_pvt MS local functions - */ - -static void -bfa_fcs_port_ms_send_gfn(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced) -{ - struct bfa_fcs_port_ms_s *ms = ms_cbarg; - struct bfa_fcs_port_s *port = ms->port; - struct fchs_s fchs; - int len; - struct bfa_fcxp_s *fcxp; - - bfa_trc(port->fcs, port->pid); - - fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); - if (!fcxp) { - bfa_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe, - bfa_fcs_port_ms_send_gfn, ms); - return; - } - ms->fcxp = fcxp; - - len = fc_gfn_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), - bfa_fcs_port_get_fcid(port), - bfa_lps_get_peer_nwwn(port->fabric->lps)); - - bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, - FC_CLASS_3, len, &fchs, bfa_fcs_port_ms_gfn_response, - (void *)ms, FC_MAX_PDUSZ, FC_FCCT_TOV); - - bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT); -} - -static void -bfa_fcs_port_ms_gfn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, - bfa_status_t req_status, u32 rsp_len, - u32 resid_len, struct fchs_s *rsp_fchs) -{ - struct bfa_fcs_port_ms_s *ms = (struct bfa_fcs_port_ms_s *)cbarg; - struct bfa_fcs_port_s *port = ms->port; - struct ct_hdr_s *cthdr = NULL; - wwn_t *gfn_resp; - - bfa_trc(port->fcs, req_status); - bfa_trc(port->fcs, port->port_cfg.pwwn); - - /* - * Sanity Checks - */ - if (req_status != BFA_STATUS_OK) { - bfa_trc(port->fcs, req_status); - bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); - return; - } - - cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); - - if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { - gfn_resp = (wwn_t *) (cthdr + 1); - /* - * check if it has actually changed - */ - if ((memcmp - ((void *)&bfa_fcs_port_get_fabric_name(port), gfn_resp, - sizeof(wwn_t)) != 0)) - bfa_fcs_fabric_set_fabric_name(port->fabric, *gfn_resp); - bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK); - return; - } - - bfa_trc(port->fcs, cthdr->reason_code); - bfa_trc(port->fcs, cthdr->exp_code); - bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); -} - -/** - * ms_pvt MS local functions - */ - -static void -bfa_fcs_port_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced) -{ - struct bfa_fcs_port_ms_s *ms = ms_cbarg; - struct bfa_fcs_port_s *port = ms->port; - struct fchs_s fchs; - int len; - struct bfa_fcxp_s *fcxp; - - bfa_trc(port->fcs, port->pid); - - fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); - if (!fcxp) { - port->stats.ms_plogi_alloc_wait++; - bfa_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe, - bfa_fcs_port_ms_send_plogi, ms); - return; - } - ms->fcxp = fcxp; - - len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), - bfa_os_hton3b(FC_MGMT_SERVER), - bfa_fcs_port_get_fcid(port), 0, - port->port_cfg.pwwn, port->port_cfg.nwwn, - bfa_fcport_get_maxfrsize(port->fcs->bfa)); - - bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, - FC_CLASS_3, len, &fchs, bfa_fcs_port_ms_plogi_response, - (void *)ms, FC_MAX_PDUSZ, FC_ELS_TOV); - - port->stats.ms_plogi_sent++; - bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT); -} - -static void -bfa_fcs_port_ms_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, - void *cbarg, bfa_status_t req_status, - u32 rsp_len, u32 resid_len, - struct fchs_s *rsp_fchs) -{ - struct bfa_fcs_port_ms_s *ms = (struct bfa_fcs_port_ms_s *)cbarg; - - struct bfa_fcs_port_s *port = ms->port; - struct fc_els_cmd_s *els_cmd; - struct fc_ls_rjt_s *ls_rjt; - - bfa_trc(port->fcs, req_status); - bfa_trc(port->fcs, port->port_cfg.pwwn); - - /* - * Sanity Checks - */ - if (req_status != BFA_STATUS_OK) { - port->stats.ms_plogi_rsp_err++; - bfa_trc(port->fcs, req_status); - bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); - return; - } - - els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp); - - switch (els_cmd->els_code) { - - case FC_ELS_ACC: - if (rsp_len < sizeof(struct fc_logi_s)) { - bfa_trc(port->fcs, rsp_len); - port->stats.ms_plogi_acc_err++; - bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); - break; - } - port->stats.ms_plogi_accepts++; - bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK); - break; - - case FC_ELS_LS_RJT: - ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); - - bfa_trc(port->fcs, ls_rjt->reason_code); - bfa_trc(port->fcs, ls_rjt->reason_code_expl); - - port->stats.ms_rejects++; - bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); - break; - - default: - port->stats.ms_plogi_unknown_rsp++; - bfa_trc(port->fcs, els_cmd->els_code); - bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); - } -} - -static void -bfa_fcs_port_ms_timeout(void *arg) -{ - struct bfa_fcs_port_ms_s *ms = (struct bfa_fcs_port_ms_s *)arg; - - ms->port->stats.ms_timeouts++; - bfa_sm_send_event(ms, MSSM_EVENT_TIMEOUT); -} - - -void -bfa_fcs_port_ms_init(struct bfa_fcs_port_s *port) -{ - struct bfa_fcs_port_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port); - - ms->port = port; - bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline); - - /* - * Invoke init routines of sub modules. - */ - bfa_fcs_port_fdmi_init(ms); -} - -void -bfa_fcs_port_ms_offline(struct bfa_fcs_port_s *port) -{ - struct bfa_fcs_port_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port); - - ms->port = port; - bfa_sm_send_event(ms, MSSM_EVENT_PORT_OFFLINE); - bfa_fcs_port_fdmi_offline(ms); -} - -void -bfa_fcs_port_ms_online(struct bfa_fcs_port_s *port) -{ - struct bfa_fcs_port_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port); - - ms->port = port; - bfa_sm_send_event(ms, MSSM_EVENT_PORT_ONLINE); -} - -void -bfa_fcs_port_ms_fabric_rscn(struct bfa_fcs_port_s *port) -{ - struct bfa_fcs_port_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port); - - /* - * @todo. Handle this only when in Online state - */ - if (bfa_sm_cmp_state(ms, bfa_fcs_port_ms_sm_online)) - bfa_sm_send_event(ms, MSSM_EVENT_PORT_FABRIC_RSCN); -} diff --git a/drivers/scsi/bfa/n2n.c b/drivers/scsi/bfa/n2n.c deleted file mode 100644 index 735456824346..000000000000 --- a/drivers/scsi/bfa/n2n.c +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * n2n.c n2n implementation. - */ -#include -#include -#include "fcs_lport.h" -#include "fcs_rport.h" -#include "fcs_trcmod.h" -#include "lport_priv.h" - -BFA_TRC_FILE(FCS, N2N); - -/** - * Called by fcs/port to initialize N2N topology. - */ -void -bfa_fcs_port_n2n_init(struct bfa_fcs_port_s *port) -{ -} - -/** - * Called by fcs/port to notify transition to online state. - */ -void -bfa_fcs_port_n2n_online(struct bfa_fcs_port_s *port) -{ - struct bfa_fcs_port_n2n_s *n2n_port = &port->port_topo.pn2n; - struct bfa_port_cfg_s *pcfg = &port->port_cfg; - struct bfa_fcs_rport_s *rport; - - bfa_trc(port->fcs, pcfg->pwwn); - - /* - * If our PWWN is > than that of the r-port, we have to initiate PLOGI - * and assign an Address. if not, we need to wait for its PLOGI. - * - * If our PWWN is < than that of the remote port, it will send a PLOGI - * with the PIDs assigned. The rport state machine take care of this - * incoming PLOGI. - */ - if (memcmp - ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn, - sizeof(wwn_t)) > 0) { - port->pid = N2N_LOCAL_PID; - /** - * First, check if we know the device by pwwn. - */ - rport = bfa_fcs_port_get_rport_by_pwwn(port, - n2n_port->rem_port_wwn); - if (rport) { - bfa_trc(port->fcs, rport->pid); - bfa_trc(port->fcs, rport->pwwn); - rport->pid = N2N_REMOTE_PID; - bfa_fcs_rport_online(rport); - return; - } - - /* - * In n2n there can be only one rport. Delete the old one whose - * pid should be zero, because it is offline. - */ - if (port->num_rports > 0) { - rport = bfa_fcs_port_get_rport_by_pid(port, 0); - bfa_assert(rport != NULL); - if (rport) { - bfa_trc(port->fcs, rport->pwwn); - bfa_fcs_rport_delete(rport); - } - } - bfa_fcs_rport_create(port, N2N_REMOTE_PID); - } -} - -/** - * Called by fcs/port to notify transition to offline state. - */ -void -bfa_fcs_port_n2n_offline(struct bfa_fcs_port_s *port) -{ - struct bfa_fcs_port_n2n_s *n2n_port = &port->port_topo.pn2n; - - bfa_trc(port->fcs, port->pid); - port->pid = 0; - n2n_port->rem_port_wwn = 0; - n2n_port->reply_oxid = 0; -} - - diff --git a/drivers/scsi/bfa/ns.c b/drivers/scsi/bfa/ns.c deleted file mode 100644 index ae0edcc86ed5..000000000000 --- a/drivers/scsi/bfa/ns.c +++ /dev/null @@ -1,1242 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * @page ns_sm_info VPORT NS State Machine - * - * @section ns_sm_interactions VPORT NS State Machine Interactions - * - * @section ns_sm VPORT NS State Machine - * img ns_sm.jpg - */ -#include -#include -#include -#include "fcs_lport.h" -#include "fcs_rport.h" -#include "fcs_trcmod.h" -#include "fcs_fcxp.h" -#include "fcs.h" -#include "lport_priv.h" - -BFA_TRC_FILE(FCS, NS); - -/* - * forward declarations - */ -static void bfa_fcs_port_ns_send_plogi(void *ns_cbarg, - struct bfa_fcxp_s *fcxp_alloced); -static void bfa_fcs_port_ns_send_rspn_id(void *ns_cbarg, - struct bfa_fcxp_s *fcxp_alloced); -static void bfa_fcs_port_ns_send_rft_id(void *ns_cbarg, - struct bfa_fcxp_s *fcxp_alloced); -static void bfa_fcs_port_ns_send_rff_id(void *ns_cbarg, - struct bfa_fcxp_s *fcxp_alloced); -static void bfa_fcs_port_ns_send_gid_ft(void *ns_cbarg, - struct bfa_fcxp_s *fcxp_alloced); -static void bfa_fcs_port_ns_timeout(void *arg); -static void bfa_fcs_port_ns_plogi_response(void *fcsarg, - struct bfa_fcxp_s *fcxp, - void *cbarg, - bfa_status_t req_status, - u32 rsp_len, - u32 resid_len, - struct fchs_s *rsp_fchs); -static void bfa_fcs_port_ns_rspn_id_response(void *fcsarg, - struct bfa_fcxp_s *fcxp, - void *cbarg, - bfa_status_t req_status, - u32 rsp_len, - u32 resid_len, - struct fchs_s *rsp_fchs); -static void bfa_fcs_port_ns_rft_id_response(void *fcsarg, - struct bfa_fcxp_s *fcxp, - void *cbarg, - bfa_status_t req_status, - u32 rsp_len, - u32 resid_len, - struct fchs_s *rsp_fchs); -static void bfa_fcs_port_ns_rff_id_response(void *fcsarg, - struct bfa_fcxp_s *fcxp, - void *cbarg, - bfa_status_t req_status, - u32 rsp_len, - u32 resid_len, - struct fchs_s *rsp_fchs); -static void bfa_fcs_port_ns_gid_ft_response(void *fcsarg, - struct bfa_fcxp_s *fcxp, - void *cbarg, - bfa_status_t req_status, - u32 rsp_len, - u32 resid_len, - struct fchs_s *rsp_fchs); -static void bfa_fcs_port_ns_process_gidft_pids(struct bfa_fcs_port_s *port, - u32 *pid_buf, - u32 n_pids); - -static void bfa_fcs_port_ns_boot_target_disc(struct bfa_fcs_port_s *port); -/** - * fcs_ns_sm FCS nameserver interface state machine - */ - -/** - * VPort NS State Machine events - */ -enum vport_ns_event { - NSSM_EVENT_PORT_ONLINE = 1, - NSSM_EVENT_PORT_OFFLINE = 2, - NSSM_EVENT_PLOGI_SENT = 3, - NSSM_EVENT_RSP_OK = 4, - NSSM_EVENT_RSP_ERROR = 5, - NSSM_EVENT_TIMEOUT = 6, - NSSM_EVENT_NS_QUERY = 7, - NSSM_EVENT_RSPNID_SENT = 8, - NSSM_EVENT_RFTID_SENT = 9, - NSSM_EVENT_RFFID_SENT = 10, - NSSM_EVENT_GIDFT_SENT = 11, -}; - -static void bfa_fcs_port_ns_sm_offline(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event); -static void bfa_fcs_port_ns_sm_plogi_sending(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event); -static void bfa_fcs_port_ns_sm_plogi(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event); -static void bfa_fcs_port_ns_sm_plogi_retry(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event); -static void bfa_fcs_port_ns_sm_sending_rspn_id(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event); -static void bfa_fcs_port_ns_sm_rspn_id(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event); -static void bfa_fcs_port_ns_sm_rspn_id_retry(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event); -static void bfa_fcs_port_ns_sm_sending_rft_id(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event); -static void bfa_fcs_port_ns_sm_rft_id_retry(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event); -static void bfa_fcs_port_ns_sm_rft_id(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event); -static void bfa_fcs_port_ns_sm_sending_rff_id(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event); -static void bfa_fcs_port_ns_sm_rff_id_retry(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event); -static void bfa_fcs_port_ns_sm_rff_id(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event); -static void bfa_fcs_port_ns_sm_sending_gid_ft(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event); -static void bfa_fcs_port_ns_sm_gid_ft(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event); -static void bfa_fcs_port_ns_sm_gid_ft_retry(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event); -static void bfa_fcs_port_ns_sm_online(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event); -/** - * Start in offline state - awaiting linkup - */ -static void -bfa_fcs_port_ns_sm_offline(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event) -{ - bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); - bfa_trc(ns->port->fcs, event); - - switch (event) { - case NSSM_EVENT_PORT_ONLINE: - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_plogi_sending); - bfa_fcs_port_ns_send_plogi(ns, NULL); - break; - - case NSSM_EVENT_PORT_OFFLINE: - break; - - default: - bfa_sm_fault(ns->port->fcs, event); - } -} - -static void -bfa_fcs_port_ns_sm_plogi_sending(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event) -{ - bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); - bfa_trc(ns->port->fcs, event); - - switch (event) { - case NSSM_EVENT_PLOGI_SENT: - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_plogi); - break; - - case NSSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline); - bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port), - &ns->fcxp_wqe); - break; - - default: - bfa_sm_fault(ns->port->fcs, event); - } -} - -static void -bfa_fcs_port_ns_sm_plogi(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event) -{ - bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); - bfa_trc(ns->port->fcs, event); - - switch (event) { - case NSSM_EVENT_RSP_ERROR: - /* - * Start timer for a delayed retry - */ - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_plogi_retry); - ns->port->stats.ns_retries++; - bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer, - bfa_fcs_port_ns_timeout, ns, - BFA_FCS_RETRY_TIMEOUT); - break; - - case NSSM_EVENT_RSP_OK: - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rspn_id); - bfa_fcs_port_ns_send_rspn_id(ns, NULL); - break; - - case NSSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline); - bfa_fcxp_discard(ns->fcxp); - break; - - default: - bfa_sm_fault(ns->port->fcs, event); - } -} - -static void -bfa_fcs_port_ns_sm_plogi_retry(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event) -{ - bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); - bfa_trc(ns->port->fcs, event); - - switch (event) { - case NSSM_EVENT_TIMEOUT: - /* - * Retry Timer Expired. Re-send - */ - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_plogi_sending); - bfa_fcs_port_ns_send_plogi(ns, NULL); - break; - - case NSSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline); - bfa_timer_stop(&ns->timer); - break; - - default: - bfa_sm_fault(ns->port->fcs, event); - } -} - -static void -bfa_fcs_port_ns_sm_sending_rspn_id(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event) -{ - bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); - bfa_trc(ns->port->fcs, event); - - switch (event) { - case NSSM_EVENT_RSPNID_SENT: - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_rspn_id); - break; - - case NSSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline); - bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port), - &ns->fcxp_wqe); - break; - - default: - bfa_sm_fault(ns->port->fcs, event); - } -} - -static void -bfa_fcs_port_ns_sm_rspn_id(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event) -{ - bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); - bfa_trc(ns->port->fcs, event); - - switch (event) { - case NSSM_EVENT_RSP_ERROR: - /* - * Start timer for a delayed retry - */ - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_rspn_id_retry); - ns->port->stats.ns_retries++; - bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer, - bfa_fcs_port_ns_timeout, ns, - BFA_FCS_RETRY_TIMEOUT); - break; - - case NSSM_EVENT_RSP_OK: - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rft_id); - bfa_fcs_port_ns_send_rft_id(ns, NULL); - break; - - case NSSM_EVENT_PORT_OFFLINE: - bfa_fcxp_discard(ns->fcxp); - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline); - break; - - default: - bfa_sm_fault(ns->port->fcs, event); - } -} - -static void -bfa_fcs_port_ns_sm_rspn_id_retry(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event) -{ - bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); - bfa_trc(ns->port->fcs, event); - - switch (event) { - case NSSM_EVENT_TIMEOUT: - /* - * Retry Timer Expired. Re-send - */ - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rspn_id); - bfa_fcs_port_ns_send_rspn_id(ns, NULL); - break; - - case NSSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline); - bfa_timer_stop(&ns->timer); - break; - - default: - bfa_sm_fault(ns->port->fcs, event); - } -} - -static void -bfa_fcs_port_ns_sm_sending_rft_id(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event) -{ - bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); - bfa_trc(ns->port->fcs, event); - - switch (event) { - case NSSM_EVENT_RFTID_SENT: - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_rft_id); - break; - - case NSSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline); - bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port), - &ns->fcxp_wqe); - break; - - default: - bfa_sm_fault(ns->port->fcs, event); - } -} - -static void -bfa_fcs_port_ns_sm_rft_id(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event) -{ - bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); - bfa_trc(ns->port->fcs, event); - - switch (event) { - case NSSM_EVENT_RSP_OK: - /* - * Now move to register FC4 Features - */ - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rff_id); - bfa_fcs_port_ns_send_rff_id(ns, NULL); - break; - - case NSSM_EVENT_RSP_ERROR: - /* - * Start timer for a delayed retry - */ - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_rft_id_retry); - ns->port->stats.ns_retries++; - bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer, - bfa_fcs_port_ns_timeout, ns, - BFA_FCS_RETRY_TIMEOUT); - break; - - case NSSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline); - bfa_fcxp_discard(ns->fcxp); - break; - - default: - bfa_sm_fault(ns->port->fcs, event); - } -} - -static void -bfa_fcs_port_ns_sm_rft_id_retry(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event) -{ - bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); - bfa_trc(ns->port->fcs, event); - - switch (event) { - case NSSM_EVENT_TIMEOUT: - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rft_id); - bfa_fcs_port_ns_send_rft_id(ns, NULL); - break; - - case NSSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline); - bfa_timer_stop(&ns->timer); - break; - - default: - bfa_sm_fault(ns->port->fcs, event); - } -} - -static void -bfa_fcs_port_ns_sm_sending_rff_id(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event) -{ - bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); - bfa_trc(ns->port->fcs, event); - - switch (event) { - case NSSM_EVENT_RFFID_SENT: - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_rff_id); - break; - - case NSSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline); - bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port), - &ns->fcxp_wqe); - break; - - default: - bfa_sm_fault(ns->port->fcs, event); - } -} - -static void -bfa_fcs_port_ns_sm_rff_id(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event) -{ - bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); - bfa_trc(ns->port->fcs, event); - - switch (event) { - case NSSM_EVENT_RSP_OK: - - /* - * If min cfg mode is enabled, we donot initiate rport - * discovery with the fabric. Instead, we will retrieve the - * boot targets from HAL/FW. - */ - if (__fcs_min_cfg(ns->port->fcs)) { - bfa_fcs_port_ns_boot_target_disc(ns->port); - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_online); - return; - } - - /* - * If the port role is Initiator Mode issue NS query. - * If it is Target Mode, skip this and go to online. - */ - if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) { - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_gid_ft); - bfa_fcs_port_ns_send_gid_ft(ns, NULL); - } else if (BFA_FCS_VPORT_IS_TARGET_MODE(ns->port)) { - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_online); - } - /* - * kick off mgmt srvr state machine - */ - bfa_fcs_port_ms_online(ns->port); - break; - - case NSSM_EVENT_RSP_ERROR: - /* - * Start timer for a delayed retry - */ - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_rff_id_retry); - ns->port->stats.ns_retries++; - bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer, - bfa_fcs_port_ns_timeout, ns, - BFA_FCS_RETRY_TIMEOUT); - break; - - case NSSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline); - bfa_fcxp_discard(ns->fcxp); - break; - - default: - bfa_sm_fault(ns->port->fcs, event); - } -} - -static void -bfa_fcs_port_ns_sm_rff_id_retry(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event) -{ - bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); - bfa_trc(ns->port->fcs, event); - - switch (event) { - case NSSM_EVENT_TIMEOUT: - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rff_id); - bfa_fcs_port_ns_send_rff_id(ns, NULL); - break; - - case NSSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline); - bfa_timer_stop(&ns->timer); - break; - - default: - bfa_sm_fault(ns->port->fcs, event); - } -} -static void -bfa_fcs_port_ns_sm_sending_gid_ft(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event) -{ - bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); - bfa_trc(ns->port->fcs, event); - - switch (event) { - case NSSM_EVENT_GIDFT_SENT: - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_gid_ft); - break; - - case NSSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline); - bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port), - &ns->fcxp_wqe); - break; - - default: - bfa_sm_fault(ns->port->fcs, event); - } -} - -static void -bfa_fcs_port_ns_sm_gid_ft(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event) -{ - bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); - bfa_trc(ns->port->fcs, event); - - switch (event) { - case NSSM_EVENT_RSP_OK: - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_online); - break; - - case NSSM_EVENT_RSP_ERROR: - /* - * TBD: for certain reject codes, we don't need to retry - */ - /* - * Start timer for a delayed retry - */ - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_gid_ft_retry); - ns->port->stats.ns_retries++; - bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer, - bfa_fcs_port_ns_timeout, ns, - BFA_FCS_RETRY_TIMEOUT); - break; - - case NSSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline); - bfa_fcxp_discard(ns->fcxp); - break; - - default: - bfa_sm_fault(ns->port->fcs, event); - } -} - -static void -bfa_fcs_port_ns_sm_gid_ft_retry(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event) -{ - bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); - bfa_trc(ns->port->fcs, event); - - switch (event) { - case NSSM_EVENT_TIMEOUT: - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_gid_ft); - bfa_fcs_port_ns_send_gid_ft(ns, NULL); - break; - - case NSSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline); - bfa_timer_stop(&ns->timer); - break; - - default: - bfa_sm_fault(ns->port->fcs, event); - } -} - -static void -bfa_fcs_port_ns_sm_online(struct bfa_fcs_port_ns_s *ns, - enum vport_ns_event event) -{ - bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); - bfa_trc(ns->port->fcs, event); - - switch (event) { - case NSSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline); - break; - - case NSSM_EVENT_NS_QUERY: - /* - * If the port role is Initiator Mode issue NS query. - * If it is Target Mode, skip this and go to online. - */ - if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) { - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_gid_ft); - bfa_fcs_port_ns_send_gid_ft(ns, NULL); - }; - break; - - default: - bfa_sm_fault(ns->port->fcs, event); - } -} - - - -/** - * ns_pvt Nameserver local functions - */ - -static void -bfa_fcs_port_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) -{ - struct bfa_fcs_port_ns_s *ns = ns_cbarg; - struct bfa_fcs_port_s *port = ns->port; - struct fchs_s fchs; - int len; - struct bfa_fcxp_s *fcxp; - - bfa_trc(port->fcs, port->pid); - - fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); - if (!fcxp) { - port->stats.ns_plogi_alloc_wait++; - bfa_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, - bfa_fcs_port_ns_send_plogi, ns); - return; - } - ns->fcxp = fcxp; - - len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), - bfa_os_hton3b(FC_NAME_SERVER), - bfa_fcs_port_get_fcid(port), 0, - port->port_cfg.pwwn, port->port_cfg.nwwn, - bfa_fcport_get_maxfrsize(port->fcs->bfa)); - - bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, - FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_plogi_response, - (void *)ns, FC_MAX_PDUSZ, FC_ELS_TOV); - port->stats.ns_plogi_sent++; - - bfa_sm_send_event(ns, NSSM_EVENT_PLOGI_SENT); -} - -static void -bfa_fcs_port_ns_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, - void *cbarg, bfa_status_t req_status, - u32 rsp_len, u32 resid_len, - struct fchs_s *rsp_fchs) -{ - struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)cbarg; - struct bfa_fcs_port_s *port = ns->port; - /* struct fc_logi_s *plogi_resp; */ - struct fc_els_cmd_s *els_cmd; - struct fc_ls_rjt_s *ls_rjt; - - bfa_trc(port->fcs, req_status); - bfa_trc(port->fcs, port->port_cfg.pwwn); - - /* - * Sanity Checks - */ - if (req_status != BFA_STATUS_OK) { - bfa_trc(port->fcs, req_status); - port->stats.ns_plogi_rsp_err++; - bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); - return; - } - - els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp); - - switch (els_cmd->els_code) { - - case FC_ELS_ACC: - if (rsp_len < sizeof(struct fc_logi_s)) { - bfa_trc(port->fcs, rsp_len); - port->stats.ns_plogi_acc_err++; - bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); - break; - } - port->stats.ns_plogi_accepts++; - bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); - break; - - case FC_ELS_LS_RJT: - ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); - - bfa_trc(port->fcs, ls_rjt->reason_code); - bfa_trc(port->fcs, ls_rjt->reason_code_expl); - - port->stats.ns_rejects++; - - bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); - break; - - default: - port->stats.ns_plogi_unknown_rsp++; - bfa_trc(port->fcs, els_cmd->els_code); - bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); - } -} - -/** - * Register the symbolic port name. - */ -static void -bfa_fcs_port_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) -{ - struct bfa_fcs_port_ns_s *ns = ns_cbarg; - struct bfa_fcs_port_s *port = ns->port; - struct fchs_s fchs; - int len; - struct bfa_fcxp_s *fcxp; - u8 symbl[256]; - u8 *psymbl = &symbl[0]; - - bfa_os_memset(symbl, 0, sizeof(symbl)); - - bfa_trc(port->fcs, port->port_cfg.pwwn); - - fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); - if (!fcxp) { - port->stats.ns_rspnid_alloc_wait++; - bfa_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, - bfa_fcs_port_ns_send_rspn_id, ns); - return; - } - ns->fcxp = fcxp; - - /* - * for V-Port, form a Port Symbolic Name - */ - if (port->vport) { - /**For Vports, - * we append the vport's port symbolic name to that of the base port. - */ - - strncpy((char *)psymbl, - (char *) - &(bfa_fcs_port_get_psym_name - (bfa_fcs_get_base_port(port->fcs))), - strlen((char *) - &bfa_fcs_port_get_psym_name(bfa_fcs_get_base_port - (port->fcs)))); - - /* - * Ensure we have a null terminating string. - */ - ((char *) - psymbl)[strlen((char *) - &bfa_fcs_port_get_psym_name - (bfa_fcs_get_base_port(port->fcs)))] = 0; - - strncat((char *)psymbl, - (char *)&(bfa_fcs_port_get_psym_name(port)), - strlen((char *)&bfa_fcs_port_get_psym_name(port))); - } else { - psymbl = (u8 *) &(bfa_fcs_port_get_psym_name(port)); - } - - len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), - bfa_fcs_port_get_fcid(port), 0, psymbl); - - bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, - FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_rspn_id_response, - (void *)ns, FC_MAX_PDUSZ, FC_FCCT_TOV); - - port->stats.ns_rspnid_sent++; - - bfa_sm_send_event(ns, NSSM_EVENT_RSPNID_SENT); -} - -static void -bfa_fcs_port_ns_rspn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp, - void *cbarg, bfa_status_t req_status, - u32 rsp_len, u32 resid_len, - struct fchs_s *rsp_fchs) -{ - struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)cbarg; - struct bfa_fcs_port_s *port = ns->port; - struct ct_hdr_s *cthdr = NULL; - - bfa_trc(port->fcs, port->port_cfg.pwwn); - - /* - * Sanity Checks - */ - if (req_status != BFA_STATUS_OK) { - bfa_trc(port->fcs, req_status); - port->stats.ns_rspnid_rsp_err++; - bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); - return; - } - - cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); - - if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { - port->stats.ns_rspnid_accepts++; - bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); - return; - } - - port->stats.ns_rspnid_rejects++; - bfa_trc(port->fcs, cthdr->reason_code); - bfa_trc(port->fcs, cthdr->exp_code); - bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); -} - -/** - * Register FC4-Types - * TBD, Need to retrieve this from the OS driver, in case IPFC is enabled ? - */ -static void -bfa_fcs_port_ns_send_rft_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) -{ - struct bfa_fcs_port_ns_s *ns = ns_cbarg; - struct bfa_fcs_port_s *port = ns->port; - struct fchs_s fchs; - int len; - struct bfa_fcxp_s *fcxp; - - bfa_trc(port->fcs, port->port_cfg.pwwn); - - fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); - if (!fcxp) { - port->stats.ns_rftid_alloc_wait++; - bfa_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, - bfa_fcs_port_ns_send_rft_id, ns); - return; - } - ns->fcxp = fcxp; - - len = fc_rftid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), - bfa_fcs_port_get_fcid(port), 0, - port->port_cfg.roles); - - bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, - FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_rft_id_response, - (void *)ns, FC_MAX_PDUSZ, FC_FCCT_TOV); - - port->stats.ns_rftid_sent++; - bfa_sm_send_event(ns, NSSM_EVENT_RFTID_SENT); -} - -static void -bfa_fcs_port_ns_rft_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp, - void *cbarg, bfa_status_t req_status, - u32 rsp_len, u32 resid_len, - struct fchs_s *rsp_fchs) -{ - struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)cbarg; - struct bfa_fcs_port_s *port = ns->port; - struct ct_hdr_s *cthdr = NULL; - - bfa_trc(port->fcs, port->port_cfg.pwwn); - - /* - * Sanity Checks - */ - if (req_status != BFA_STATUS_OK) { - bfa_trc(port->fcs, req_status); - port->stats.ns_rftid_rsp_err++; - bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); - return; - } - - cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); - - if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { - port->stats.ns_rftid_accepts++; - bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); - return; - } - - port->stats.ns_rftid_rejects++; - bfa_trc(port->fcs, cthdr->reason_code); - bfa_trc(port->fcs, cthdr->exp_code); - bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); -} - -/** -* Register FC4-Features : Should be done after RFT_ID - */ -static void -bfa_fcs_port_ns_send_rff_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) -{ - struct bfa_fcs_port_ns_s *ns = ns_cbarg; - struct bfa_fcs_port_s *port = ns->port; - struct fchs_s fchs; - int len; - struct bfa_fcxp_s *fcxp; - u8 fc4_ftrs = 0; - - bfa_trc(port->fcs, port->port_cfg.pwwn); - - fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); - if (!fcxp) { - port->stats.ns_rffid_alloc_wait++; - bfa_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, - bfa_fcs_port_ns_send_rff_id, ns); - return; - } - ns->fcxp = fcxp; - - if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) - fc4_ftrs = FC_GS_FCP_FC4_FEATURE_INITIATOR; - else if (BFA_FCS_VPORT_IS_TARGET_MODE(ns->port)) - fc4_ftrs = FC_GS_FCP_FC4_FEATURE_TARGET; - - len = fc_rffid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), - bfa_fcs_port_get_fcid(port), 0, FC_TYPE_FCP, - fc4_ftrs); - - bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, - FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_rff_id_response, - (void *)ns, FC_MAX_PDUSZ, FC_FCCT_TOV); - - port->stats.ns_rffid_sent++; - bfa_sm_send_event(ns, NSSM_EVENT_RFFID_SENT); -} - -static void -bfa_fcs_port_ns_rff_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp, - void *cbarg, bfa_status_t req_status, - u32 rsp_len, u32 resid_len, - struct fchs_s *rsp_fchs) -{ - struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)cbarg; - struct bfa_fcs_port_s *port = ns->port; - struct ct_hdr_s *cthdr = NULL; - - bfa_trc(port->fcs, port->port_cfg.pwwn); - - /* - * Sanity Checks - */ - if (req_status != BFA_STATUS_OK) { - bfa_trc(port->fcs, req_status); - port->stats.ns_rffid_rsp_err++; - bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); - return; - } - - cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); - - if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { - port->stats.ns_rffid_accepts++; - bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); - return; - } - - port->stats.ns_rffid_rejects++; - bfa_trc(port->fcs, cthdr->reason_code); - bfa_trc(port->fcs, cthdr->exp_code); - - if (cthdr->reason_code == CT_RSN_NOT_SUPP) { - /* - * if this command is not supported, we don't retry - */ - bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); - } else { - bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); - } -} - -/** - * Query Fabric for FC4-Types Devices. - * -* TBD : Need to use a local (FCS private) response buffer, since the response - * can be larger than 2K. - */ -static void -bfa_fcs_port_ns_send_gid_ft(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) -{ - struct bfa_fcs_port_ns_s *ns = ns_cbarg; - struct bfa_fcs_port_s *port = ns->port; - struct fchs_s fchs; - int len; - struct bfa_fcxp_s *fcxp; - - bfa_trc(port->fcs, port->pid); - - fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); - if (!fcxp) { - port->stats.ns_gidft_alloc_wait++; - bfa_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, - bfa_fcs_port_ns_send_gid_ft, ns); - return; - } - ns->fcxp = fcxp; - - /* - * This query is only initiated for FCP initiator mode. - */ - len = fc_gid_ft_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), ns->port->pid, - FC_TYPE_FCP); - - bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, - FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_gid_ft_response, - (void *)ns, bfa_fcxp_get_maxrsp(port->fcs->bfa), - FC_FCCT_TOV); - - port->stats.ns_gidft_sent++; - - bfa_sm_send_event(ns, NSSM_EVENT_GIDFT_SENT); -} - -static void -bfa_fcs_port_ns_gid_ft_response(void *fcsarg, struct bfa_fcxp_s *fcxp, - void *cbarg, bfa_status_t req_status, - u32 rsp_len, u32 resid_len, - struct fchs_s *rsp_fchs) -{ - struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)cbarg; - struct bfa_fcs_port_s *port = ns->port; - struct ct_hdr_s *cthdr = NULL; - u32 n_pids; - - bfa_trc(port->fcs, port->port_cfg.pwwn); - - /* - * Sanity Checks - */ - if (req_status != BFA_STATUS_OK) { - bfa_trc(port->fcs, req_status); - port->stats.ns_gidft_rsp_err++; - bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); - return; - } - - if (resid_len != 0) { - /* - * TBD : we will need to allocate a larger buffer & retry the - * command - */ - bfa_trc(port->fcs, rsp_len); - bfa_trc(port->fcs, resid_len); - return; - } - - cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); - - switch (cthdr->cmd_rsp_code) { - - case CT_RSP_ACCEPT: - - port->stats.ns_gidft_accepts++; - n_pids = (fc_get_ctresp_pyld_len(rsp_len) / sizeof(u32)); - bfa_trc(port->fcs, n_pids); - bfa_fcs_port_ns_process_gidft_pids(port, - (u32 *) (cthdr + 1), - n_pids); - bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); - break; - - case CT_RSP_REJECT: - - /* - * Check the reason code & explanation. - * There may not have been any FC4 devices in the fabric - */ - port->stats.ns_gidft_rejects++; - bfa_trc(port->fcs, cthdr->reason_code); - bfa_trc(port->fcs, cthdr->exp_code); - - if ((cthdr->reason_code == CT_RSN_UNABLE_TO_PERF) - && (cthdr->exp_code == CT_NS_EXP_FT_NOT_REG)) { - - bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); - } else { - /* - * for all other errors, retry - */ - bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); - } - break; - - default: - port->stats.ns_gidft_unknown_rsp++; - bfa_trc(port->fcs, cthdr->cmd_rsp_code); - bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); - } -} - -/** - * This routine will be called by bfa_timer on timer timeouts. - * - * param[in] port - pointer to bfa_fcs_port_t. - * - * return - * void - * -* Special Considerations: - * - * note - */ -static void -bfa_fcs_port_ns_timeout(void *arg) -{ - struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)arg; - - ns->port->stats.ns_timeouts++; - bfa_sm_send_event(ns, NSSM_EVENT_TIMEOUT); -} - -/* - * Process the PID list in GID_FT response - */ -static void -bfa_fcs_port_ns_process_gidft_pids(struct bfa_fcs_port_s *port, - u32 *pid_buf, u32 n_pids) -{ - struct fcgs_gidft_resp_s *gidft_entry; - struct bfa_fcs_rport_s *rport; - u32 ii; - - for (ii = 0; ii < n_pids; ii++) { - gidft_entry = (struct fcgs_gidft_resp_s *) &pid_buf[ii]; - - if (gidft_entry->pid == port->pid) - continue; - - /* - * Check if this rport already exists - */ - rport = bfa_fcs_port_get_rport_by_pid(port, gidft_entry->pid); - if (rport == NULL) { - /* - * this is a new device. create rport - */ - rport = bfa_fcs_rport_create(port, gidft_entry->pid); - } else { - /* - * this rport already exists - */ - bfa_fcs_rport_scn(rport); - } - - bfa_trc(port->fcs, gidft_entry->pid); - - /* - * if the last entry bit is set, bail out. - */ - if (gidft_entry->last) - return; - } -} - -/** - * fcs_ns_public FCS nameserver public interfaces - */ - -/* - * Functions called by port/fab. - * These will send relevant Events to the ns state machine. - */ -void -bfa_fcs_port_ns_init(struct bfa_fcs_port_s *port) -{ - struct bfa_fcs_port_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port); - - ns->port = port; - bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline); -} - -void -bfa_fcs_port_ns_offline(struct bfa_fcs_port_s *port) -{ - struct bfa_fcs_port_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port); - - ns->port = port; - bfa_sm_send_event(ns, NSSM_EVENT_PORT_OFFLINE); -} - -void -bfa_fcs_port_ns_online(struct bfa_fcs_port_s *port) -{ - struct bfa_fcs_port_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port); - - ns->port = port; - bfa_sm_send_event(ns, NSSM_EVENT_PORT_ONLINE); -} - -void -bfa_fcs_port_ns_query(struct bfa_fcs_port_s *port) -{ - struct bfa_fcs_port_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port); - - bfa_trc(port->fcs, port->pid); - bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY); -} - -static void -bfa_fcs_port_ns_boot_target_disc(struct bfa_fcs_port_s *port) -{ - - struct bfa_fcs_rport_s *rport; - u8 nwwns; - wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX]; - int ii; - - bfa_iocfc_get_bootwwns(port->fcs->bfa, &nwwns, wwns); - - for (ii = 0; ii < nwwns; ++ii) { - rport = bfa_fcs_rport_create_by_wwn(port, wwns[ii]); - bfa_assert(rport); - } -} - - diff --git a/drivers/scsi/bfa/plog.c b/drivers/scsi/bfa/plog.c deleted file mode 100644 index fcb8864d3276..000000000000 --- a/drivers/scsi/bfa/plog.c +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#include -#include -#include - -static int -plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec) -{ - if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) - && (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING)) - return 1; - - if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) - && (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ)) - return 1; - - return 0; -} - -static void -bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec) -{ - u16 tail; - struct bfa_plog_rec_s *pl_recp; - - if (plog->plog_enabled == 0) - return; - - if (plkd_validate_logrec(pl_rec)) { - bfa_assert(0); - return; - } - - tail = plog->tail; - - pl_recp = &(plog->plog_recs[tail]); - - bfa_os_memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s)); - - pl_recp->tv = BFA_TRC_TS(plog); - BFA_PL_LOG_REC_INCR(plog->tail); - - if (plog->head == plog->tail) - BFA_PL_LOG_REC_INCR(plog->head); -} - -void -bfa_plog_init(struct bfa_plog_s *plog) -{ - bfa_os_memset((char *)plog, 0, sizeof(struct bfa_plog_s)); - - bfa_os_memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN); - plog->head = plog->tail = 0; - plog->plog_enabled = 1; -} - -void -bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid, - enum bfa_plog_eid event, - u16 misc, char *log_str) -{ - struct bfa_plog_rec_s lp; - - if (plog->plog_enabled) { - bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); - lp.mid = mid; - lp.eid = event; - lp.log_type = BFA_PL_LOG_TYPE_STRING; - lp.misc = misc; - strncpy(lp.log_entry.string_log, log_str, - BFA_PL_STRING_LOG_SZ - 1); - lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0'; - bfa_plog_add(plog, &lp); - } -} - -void -bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid, - enum bfa_plog_eid event, - u16 misc, u32 *intarr, u32 num_ints) -{ - struct bfa_plog_rec_s lp; - u32 i; - - if (num_ints > BFA_PL_INT_LOG_SZ) - num_ints = BFA_PL_INT_LOG_SZ; - - if (plog->plog_enabled) { - bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); - lp.mid = mid; - lp.eid = event; - lp.log_type = BFA_PL_LOG_TYPE_INT; - lp.misc = misc; - - for (i = 0; i < num_ints; i++) - bfa_os_assign(lp.log_entry.int_log[i], - intarr[i]); - - lp.log_num_ints = (u8) num_ints; - - bfa_plog_add(plog, &lp); - } -} - -void -bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid, - enum bfa_plog_eid event, - u16 misc, struct fchs_s *fchdr) -{ - struct bfa_plog_rec_s lp; - u32 *tmp_int = (u32 *) fchdr; - u32 ints[BFA_PL_INT_LOG_SZ]; - - if (plog->plog_enabled) { - bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); - - ints[0] = tmp_int[0]; - ints[1] = tmp_int[1]; - ints[2] = tmp_int[4]; - - bfa_plog_intarr(plog, mid, event, misc, ints, 3); - } -} - -void -bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid, - enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr, - u32 pld_w0) -{ - struct bfa_plog_rec_s lp; - u32 *tmp_int = (u32 *) fchdr; - u32 ints[BFA_PL_INT_LOG_SZ]; - - if (plog->plog_enabled) { - bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); - - ints[0] = tmp_int[0]; - ints[1] = tmp_int[1]; - ints[2] = tmp_int[4]; - ints[3] = pld_w0; - - bfa_plog_intarr(plog, mid, event, misc, ints, 4); - } -} - -void -bfa_plog_clear(struct bfa_plog_s *plog) -{ - plog->head = plog->tail = 0; -} - -void -bfa_plog_enable(struct bfa_plog_s *plog) -{ - plog->plog_enabled = 1; -} - -void -bfa_plog_disable(struct bfa_plog_s *plog) -{ - plog->plog_enabled = 0; -} - -bfa_boolean_t -bfa_plog_get_setting(struct bfa_plog_s *plog) -{ - return (bfa_boolean_t)plog->plog_enabled; -} diff --git a/drivers/scsi/bfa/rport.c b/drivers/scsi/bfa/rport.c deleted file mode 100644 index 9b4c2c9a644b..000000000000 --- a/drivers/scsi/bfa/rport.c +++ /dev/null @@ -1,2676 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * rport.c Remote port implementation. - */ - -#include -#include -#include -#include "fcbuild.h" -#include "fcs_vport.h" -#include "fcs_lport.h" -#include "fcs_rport.h" -#include "fcs_fcpim.h" -#include "fcs_fcptm.h" -#include "fcs_trcmod.h" -#include "fcs_fcxp.h" -#include "fcs.h" -#include -#include - -BFA_TRC_FILE(FCS, RPORT); - -/* In millisecs */ -static u32 bfa_fcs_rport_del_timeout = - BFA_FCS_RPORT_DEF_DEL_TIMEOUT * 1000; - -/* - * forward declarations - */ -static struct bfa_fcs_rport_s *bfa_fcs_rport_alloc(struct bfa_fcs_port_s *port, - wwn_t pwwn, u32 rpid); -static void bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport); -static void bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport); -static void bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport); -static void bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport); -static void bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, - struct fc_logi_s *plogi); -static void bfa_fcs_rport_fc4_pause(struct bfa_fcs_rport_s *rport); -static void bfa_fcs_rport_fc4_resume(struct bfa_fcs_rport_s *rport); -static void bfa_fcs_rport_timeout(void *arg); -static void bfa_fcs_rport_send_plogi(void *rport_cbarg, - struct bfa_fcxp_s *fcxp_alloced); -static void bfa_fcs_rport_send_plogiacc(void *rport_cbarg, - struct bfa_fcxp_s *fcxp_alloced); -static void bfa_fcs_rport_plogi_response(void *fcsarg, - struct bfa_fcxp_s *fcxp, - void *cbarg, - bfa_status_t req_status, - u32 rsp_len, - u32 resid_len, - struct fchs_s *rsp_fchs); -static void bfa_fcs_rport_send_adisc(void *rport_cbarg, - struct bfa_fcxp_s *fcxp_alloced); -static void bfa_fcs_rport_adisc_response(void *fcsarg, - struct bfa_fcxp_s *fcxp, - void *cbarg, - bfa_status_t req_status, - u32 rsp_len, - u32 resid_len, - struct fchs_s *rsp_fchs); -static void bfa_fcs_rport_send_gidpn(void *rport_cbarg, - struct bfa_fcxp_s *fcxp_alloced); -static void bfa_fcs_rport_gidpn_response(void *fcsarg, - struct bfa_fcxp_s *fcxp, - void *cbarg, - bfa_status_t req_status, - u32 rsp_len, - u32 resid_len, - struct fchs_s *rsp_fchs); -static void bfa_fcs_rport_send_logo(void *rport_cbarg, - struct bfa_fcxp_s *fcxp_alloced); -static void bfa_fcs_rport_send_logo_acc(void *rport_cbarg); -static void bfa_fcs_rport_process_prli(struct bfa_fcs_rport_s *rport, - struct fchs_s *rx_fchs, u16 len); -static void bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, - struct fchs_s *rx_fchs, u8 reason_code, - u8 reason_code_expl); -static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport, - struct fchs_s *rx_fchs, u16 len); -static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport); -/** - * fcs_rport_sm FCS rport state machine events - */ - -enum rport_event { - RPSM_EVENT_PLOGI_SEND = 1, /* new rport; start with PLOGI */ - RPSM_EVENT_PLOGI_RCVD = 2, /* Inbound PLOGI from remote port */ - RPSM_EVENT_PLOGI_COMP = 3, /* PLOGI completed to rport */ - RPSM_EVENT_LOGO_RCVD = 4, /* LOGO from remote device */ - RPSM_EVENT_LOGO_IMP = 5, /* implicit logo for SLER */ - RPSM_EVENT_FCXP_SENT = 6, /* Frame from has been sent */ - RPSM_EVENT_DELETE = 7, /* RPORT delete request */ - RPSM_EVENT_SCN = 8, /* state change notification */ - RPSM_EVENT_ACCEPTED = 9,/* Good response from remote device */ - RPSM_EVENT_FAILED = 10, /* Request to rport failed. */ - RPSM_EVENT_TIMEOUT = 11, /* Rport SM timeout event */ - RPSM_EVENT_HCB_ONLINE = 12, /* BFA rport online callback */ - RPSM_EVENT_HCB_OFFLINE = 13, /* BFA rport offline callback */ - RPSM_EVENT_FC4_OFFLINE = 14, /* FC-4 offline complete */ - RPSM_EVENT_ADDRESS_CHANGE = 15, /* Rport's PID has changed */ - RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */ - RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */ -}; - -static void bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, - enum rport_event event); -static void bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport, - enum rport_event event); -static void bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport, - enum rport_event event); -static void bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport, - enum rport_event event); -static void bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, - enum rport_event event); -static void bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport, - enum rport_event event); -static void bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, - enum rport_event event); -static void bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport, - enum rport_event event); -static void bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, - enum rport_event event); -static void bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport, - enum rport_event event); -static void bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, - enum rport_event event); -static void bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, - enum rport_event event); -static void bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport, - enum rport_event event); -static void bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport, - enum rport_event event); -static void bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport, - enum rport_event event); -static void bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport, - enum rport_event event); -static void bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport, - enum rport_event event); -static void bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport, - enum rport_event event); -static void bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, - enum rport_event event); -static void bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport, - enum rport_event event); -static void bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport, - enum rport_event event); -static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport, - enum rport_event event); -static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport, - enum rport_event event); - -static struct bfa_sm_table_s rport_sm_table[] = { - {BFA_SM(bfa_fcs_rport_sm_uninit), BFA_RPORT_UNINIT}, - {BFA_SM(bfa_fcs_rport_sm_plogi_sending), BFA_RPORT_PLOGI}, - {BFA_SM(bfa_fcs_rport_sm_plogiacc_sending), BFA_RPORT_ONLINE}, - {BFA_SM(bfa_fcs_rport_sm_plogi_retry), BFA_RPORT_PLOGI_RETRY}, - {BFA_SM(bfa_fcs_rport_sm_plogi), BFA_RPORT_PLOGI}, - {BFA_SM(bfa_fcs_rport_sm_hal_online), BFA_RPORT_ONLINE}, - {BFA_SM(bfa_fcs_rport_sm_online), BFA_RPORT_ONLINE}, - {BFA_SM(bfa_fcs_rport_sm_nsquery_sending), BFA_RPORT_NSQUERY}, - {BFA_SM(bfa_fcs_rport_sm_nsquery), BFA_RPORT_NSQUERY}, - {BFA_SM(bfa_fcs_rport_sm_adisc_sending), BFA_RPORT_ADISC}, - {BFA_SM(bfa_fcs_rport_sm_adisc), BFA_RPORT_ADISC}, - {BFA_SM(bfa_fcs_rport_sm_fc4_logorcv), BFA_RPORT_LOGORCV}, - {BFA_SM(bfa_fcs_rport_sm_fc4_logosend), BFA_RPORT_LOGO}, - {BFA_SM(bfa_fcs_rport_sm_fc4_offline), BFA_RPORT_OFFLINE}, - {BFA_SM(bfa_fcs_rport_sm_hcb_offline), BFA_RPORT_OFFLINE}, - {BFA_SM(bfa_fcs_rport_sm_hcb_logorcv), BFA_RPORT_LOGORCV}, - {BFA_SM(bfa_fcs_rport_sm_hcb_logosend), BFA_RPORT_LOGO}, - {BFA_SM(bfa_fcs_rport_sm_logo_sending), BFA_RPORT_LOGO}, - {BFA_SM(bfa_fcs_rport_sm_offline), BFA_RPORT_OFFLINE}, - {BFA_SM(bfa_fcs_rport_sm_nsdisc_sending), BFA_RPORT_NSDISC}, - {BFA_SM(bfa_fcs_rport_sm_nsdisc_retry), BFA_RPORT_NSDISC}, - {BFA_SM(bfa_fcs_rport_sm_nsdisc_sent), BFA_RPORT_NSDISC}, -}; - -/** - * Beginning state. - */ -static void -bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event) -{ - bfa_trc(rport->fcs, rport->pwwn); - bfa_trc(rport->fcs, rport->pid); - bfa_trc(rport->fcs, event); - - switch (event) { - case RPSM_EVENT_PLOGI_SEND: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); - rport->plogi_retries = 0; - bfa_fcs_rport_send_plogi(rport, NULL); - break; - - case RPSM_EVENT_PLOGI_RCVD: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); - bfa_fcs_rport_send_plogiacc(rport, NULL); - break; - - case RPSM_EVENT_PLOGI_COMP: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); - bfa_fcs_rport_hal_online(rport); - break; - - case RPSM_EVENT_ADDRESS_CHANGE: - case RPSM_EVENT_ADDRESS_DISC: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); - rport->ns_retries = 0; - bfa_fcs_rport_send_gidpn(rport, NULL); - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} - -/** - * PLOGI is being sent. - */ -static void -bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport, - enum rport_event event) -{ - bfa_trc(rport->fcs, rport->pwwn); - bfa_trc(rport->fcs, rport->pid); - bfa_trc(rport->fcs, event); - - switch (event) { - case RPSM_EVENT_FCXP_SENT: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi); - break; - - case RPSM_EVENT_DELETE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); - bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); - bfa_fcs_rport_free(rport); - break; - - case RPSM_EVENT_PLOGI_RCVD: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); - bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); - bfa_fcs_rport_send_plogiacc(rport, NULL); - break; - - case RPSM_EVENT_ADDRESS_CHANGE: - bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); - bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); - rport->ns_retries = 0; - bfa_fcs_rport_send_gidpn(rport, NULL); - break; - - case RPSM_EVENT_LOGO_IMP: - rport->pid = 0; - bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); - bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); - bfa_timer_start(rport->fcs->bfa, &rport->timer, - bfa_fcs_rport_timeout, rport, - bfa_fcs_rport_del_timeout); - break; - - case RPSM_EVENT_SCN: - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} - -/** - * PLOGI is being sent. - */ -static void -bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport, - enum rport_event event) -{ - bfa_trc(rport->fcs, rport->pwwn); - bfa_trc(rport->fcs, rport->pid); - bfa_trc(rport->fcs, event); - - switch (event) { - case RPSM_EVENT_FCXP_SENT: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); - bfa_fcs_rport_hal_online(rport); - break; - - case RPSM_EVENT_DELETE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); - bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); - bfa_fcs_rport_free(rport); - break; - - case RPSM_EVENT_SCN: - /** - * Ignore, SCN is possibly online notification. - */ - break; - - case RPSM_EVENT_ADDRESS_CHANGE: - bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); - bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); - rport->ns_retries = 0; - bfa_fcs_rport_send_gidpn(rport, NULL); - break; - - case RPSM_EVENT_LOGO_IMP: - rport->pid = 0; - bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); - bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); - bfa_timer_start(rport->fcs->bfa, &rport->timer, - bfa_fcs_rport_timeout, rport, - bfa_fcs_rport_del_timeout); - break; - - case RPSM_EVENT_HCB_OFFLINE: - /** - * Ignore BFA callback, on a PLOGI receive we call bfa offline. - */ - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} - -/** - * PLOGI is sent. - */ -static void -bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport, - enum rport_event event) -{ - bfa_trc(rport->fcs, rport->pwwn); - bfa_trc(rport->fcs, rport->pid); - bfa_trc(rport->fcs, event); - - switch (event) { - case RPSM_EVENT_SCN: - bfa_timer_stop(&rport->timer); - /* - * !! fall through !! - */ - - case RPSM_EVENT_TIMEOUT: - if (rport->plogi_retries < BFA_FCS_RPORT_MAX_RETRIES) { - rport->plogi_retries++; - bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); - bfa_fcs_rport_send_plogi(rport, NULL); - } else { - rport->pid = 0; - bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); - bfa_timer_start(rport->fcs->bfa, &rport->timer, - bfa_fcs_rport_timeout, rport, - bfa_fcs_rport_del_timeout); - } - break; - - case RPSM_EVENT_DELETE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); - bfa_timer_stop(&rport->timer); - bfa_fcs_rport_free(rport); - break; - - case RPSM_EVENT_PRLO_RCVD: - case RPSM_EVENT_LOGO_RCVD: - break; - - case RPSM_EVENT_PLOGI_RCVD: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); - bfa_timer_stop(&rport->timer); - bfa_fcs_rport_send_plogiacc(rport, NULL); - break; - - case RPSM_EVENT_ADDRESS_CHANGE: - bfa_timer_stop(&rport->timer); - bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); - rport->ns_retries = 0; - bfa_fcs_rport_send_gidpn(rport, NULL); - break; - - case RPSM_EVENT_LOGO_IMP: - rport->pid = 0; - bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); - bfa_timer_stop(&rport->timer); - bfa_timer_start(rport->fcs->bfa, &rport->timer, - bfa_fcs_rport_timeout, rport, - bfa_fcs_rport_del_timeout); - break; - - case RPSM_EVENT_PLOGI_COMP: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); - bfa_timer_stop(&rport->timer); - bfa_fcs_rport_hal_online(rport); - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} - -/** - * PLOGI is sent. - */ -static void -bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event) -{ - bfa_trc(rport->fcs, rport->pwwn); - bfa_trc(rport->fcs, rport->pid); - bfa_trc(rport->fcs, event); - - switch (event) { - case RPSM_EVENT_ACCEPTED: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); - rport->plogi_retries = 0; - bfa_fcs_rport_hal_online(rport); - break; - - case RPSM_EVENT_LOGO_RCVD: - bfa_fcs_rport_send_logo_acc(rport); - /* - * !! fall through !! - */ - case RPSM_EVENT_PRLO_RCVD: - if (rport->prlo == BFA_TRUE) - bfa_fcs_rport_send_prlo_acc(rport); - - bfa_fcxp_discard(rport->fcxp); - /* - * !! fall through !! - */ - case RPSM_EVENT_FAILED: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry); - bfa_timer_start(rport->fcs->bfa, &rport->timer, - bfa_fcs_rport_timeout, rport, - BFA_FCS_RETRY_TIMEOUT); - break; - - case RPSM_EVENT_LOGO_IMP: - rport->pid = 0; - bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); - bfa_fcxp_discard(rport->fcxp); - bfa_timer_start(rport->fcs->bfa, &rport->timer, - bfa_fcs_rport_timeout, rport, - bfa_fcs_rport_del_timeout); - break; - - case RPSM_EVENT_ADDRESS_CHANGE: - bfa_fcxp_discard(rport->fcxp); - bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); - rport->ns_retries = 0; - bfa_fcs_rport_send_gidpn(rport, NULL); - break; - - case RPSM_EVENT_PLOGI_RCVD: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); - bfa_fcxp_discard(rport->fcxp); - bfa_fcs_rport_send_plogiacc(rport, NULL); - break; - - case RPSM_EVENT_SCN: - /** - * Ignore SCN - wait for PLOGI response. - */ - break; - - case RPSM_EVENT_DELETE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); - bfa_fcxp_discard(rport->fcxp); - bfa_fcs_rport_free(rport); - break; - - case RPSM_EVENT_PLOGI_COMP: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); - bfa_fcxp_discard(rport->fcxp); - bfa_fcs_rport_hal_online(rport); - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} - -/** - * PLOGI is complete. Awaiting BFA rport online callback. FC-4s - * are offline. - */ -static void -bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport, - enum rport_event event) -{ - bfa_trc(rport->fcs, rport->pwwn); - bfa_trc(rport->fcs, rport->pid); - bfa_trc(rport->fcs, event); - - switch (event) { - case RPSM_EVENT_HCB_ONLINE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_online); - bfa_fcs_rport_online_action(rport); - break; - - case RPSM_EVENT_PRLO_RCVD: - break; - - case RPSM_EVENT_LOGO_RCVD: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv); - bfa_rport_offline(rport->bfa_rport); - break; - - case RPSM_EVENT_LOGO_IMP: - case RPSM_EVENT_ADDRESS_CHANGE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline); - bfa_rport_offline(rport->bfa_rport); - break; - - case RPSM_EVENT_PLOGI_RCVD: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); - bfa_rport_offline(rport->bfa_rport); - bfa_fcs_rport_send_plogiacc(rport, NULL); - break; - - case RPSM_EVENT_DELETE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend); - bfa_rport_offline(rport->bfa_rport); - break; - - case RPSM_EVENT_SCN: - /** - * @todo - * Ignore SCN - PLOGI just completed, FC-4 login should detect - * device failures. - */ - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} - -/** - * Rport is ONLINE. FC-4s active. - */ -static void -bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event) -{ - bfa_trc(rport->fcs, rport->pwwn); - bfa_trc(rport->fcs, rport->pid); - bfa_trc(rport->fcs, event); - - switch (event) { - case RPSM_EVENT_SCN: - /** - * Pause FC-4 activity till rport is authenticated. - * In switched fabrics, check presence of device in nameserver - * first. - */ - bfa_fcs_rport_fc4_pause(rport); - - if (bfa_fcs_fabric_is_switched(rport->port->fabric)) { - bfa_sm_set_state(rport, - bfa_fcs_rport_sm_nsquery_sending); - rport->ns_retries = 0; - bfa_fcs_rport_send_gidpn(rport, NULL); - } else { - bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_sending); - bfa_fcs_rport_send_adisc(rport, NULL); - } - break; - - case RPSM_EVENT_PLOGI_RCVD: - case RPSM_EVENT_LOGO_IMP: - case RPSM_EVENT_ADDRESS_CHANGE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); - bfa_fcs_rport_offline_action(rport); - break; - - case RPSM_EVENT_DELETE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); - bfa_fcs_rport_offline_action(rport); - break; - - case RPSM_EVENT_LOGO_RCVD: - case RPSM_EVENT_PRLO_RCVD: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); - bfa_fcs_rport_offline_action(rport); - break; - - case RPSM_EVENT_PLOGI_COMP: - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} - -/** - * An SCN event is received in ONLINE state. NS query is being sent - * prior to ADISC authentication with rport. FC-4s are paused. - */ -static void -bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport, - enum rport_event event) -{ - bfa_trc(rport->fcs, rport->pwwn); - bfa_trc(rport->fcs, rport->pid); - bfa_trc(rport->fcs, event); - - switch (event) { - case RPSM_EVENT_FCXP_SENT: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsquery); - break; - - case RPSM_EVENT_DELETE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); - bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); - bfa_fcs_rport_offline_action(rport); - break; - - case RPSM_EVENT_SCN: - /** - * ignore SCN, wait for response to query itself - */ - break; - - case RPSM_EVENT_LOGO_RCVD: - case RPSM_EVENT_PRLO_RCVD: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); - bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); - bfa_fcs_rport_offline_action(rport); - break; - - case RPSM_EVENT_LOGO_IMP: - rport->pid = 0; - bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); - bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); - bfa_timer_start(rport->fcs->bfa, &rport->timer, - bfa_fcs_rport_timeout, rport, - bfa_fcs_rport_del_timeout); - break; - - case RPSM_EVENT_PLOGI_RCVD: - case RPSM_EVENT_ADDRESS_CHANGE: - case RPSM_EVENT_PLOGI_COMP: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); - bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); - bfa_fcs_rport_offline_action(rport); - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} - -/** - * An SCN event is received in ONLINE state. NS query is sent to rport. - * FC-4s are paused. - */ -static void -bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event) -{ - bfa_trc(rport->fcs, rport->pwwn); - bfa_trc(rport->fcs, rport->pid); - bfa_trc(rport->fcs, event); - - switch (event) { - case RPSM_EVENT_ACCEPTED: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_sending); - bfa_fcs_rport_send_adisc(rport, NULL); - break; - - case RPSM_EVENT_FAILED: - rport->ns_retries++; - if (rport->ns_retries < BFA_FCS_RPORT_MAX_RETRIES) { - bfa_sm_set_state(rport, - bfa_fcs_rport_sm_nsquery_sending); - bfa_fcs_rport_send_gidpn(rport, NULL); - } else { - bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); - bfa_fcs_rport_offline_action(rport); - } - break; - - case RPSM_EVENT_DELETE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); - bfa_fcxp_discard(rport->fcxp); - bfa_fcs_rport_offline_action(rport); - break; - - case RPSM_EVENT_SCN: - break; - - case RPSM_EVENT_LOGO_RCVD: - case RPSM_EVENT_PRLO_RCVD: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); - bfa_fcxp_discard(rport->fcxp); - bfa_fcs_rport_offline_action(rport); - break; - - case RPSM_EVENT_PLOGI_COMP: - case RPSM_EVENT_ADDRESS_CHANGE: - case RPSM_EVENT_PLOGI_RCVD: - case RPSM_EVENT_LOGO_IMP: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); - bfa_fcxp_discard(rport->fcxp); - bfa_fcs_rport_offline_action(rport); - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} - -/** - * An SCN event is received in ONLINE state. ADISC is being sent for - * authenticating with rport. FC-4s are paused. - */ -static void -bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport, - enum rport_event event) -{ - bfa_trc(rport->fcs, rport->pwwn); - bfa_trc(rport->fcs, rport->pid); - bfa_trc(rport->fcs, event); - - switch (event) { - case RPSM_EVENT_FCXP_SENT: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc); - break; - - case RPSM_EVENT_DELETE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); - bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); - bfa_fcs_rport_offline_action(rport); - break; - - case RPSM_EVENT_LOGO_IMP: - case RPSM_EVENT_ADDRESS_CHANGE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); - bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); - bfa_fcs_rport_offline_action(rport); - break; - - case RPSM_EVENT_LOGO_RCVD: - case RPSM_EVENT_PRLO_RCVD: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); - bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); - bfa_fcs_rport_offline_action(rport); - break; - - case RPSM_EVENT_SCN: - break; - - case RPSM_EVENT_PLOGI_RCVD: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); - bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); - bfa_fcs_rport_offline_action(rport); - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} - -/** - * An SCN event is received in ONLINE state. ADISC is to rport. - * FC-4s are paused. - */ -static void -bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event) -{ - bfa_trc(rport->fcs, rport->pwwn); - bfa_trc(rport->fcs, rport->pid); - bfa_trc(rport->fcs, event); - - switch (event) { - case RPSM_EVENT_ACCEPTED: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_online); - bfa_fcs_rport_fc4_resume(rport); - break; - - case RPSM_EVENT_PLOGI_RCVD: - /** - * Too complex to cleanup FC-4 & rport and then acc to PLOGI. - * At least go offline when a PLOGI is received. - */ - bfa_fcxp_discard(rport->fcxp); - /* - * !!! fall through !!! - */ - - case RPSM_EVENT_FAILED: - case RPSM_EVENT_ADDRESS_CHANGE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); - bfa_fcs_rport_offline_action(rport); - break; - - case RPSM_EVENT_DELETE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); - bfa_fcxp_discard(rport->fcxp); - bfa_fcs_rport_offline_action(rport); - break; - - case RPSM_EVENT_SCN: - /** - * already processing RSCN - */ - break; - - case RPSM_EVENT_LOGO_IMP: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); - bfa_fcxp_discard(rport->fcxp); - bfa_fcs_rport_offline_action(rport); - break; - - case RPSM_EVENT_LOGO_RCVD: - case RPSM_EVENT_PRLO_RCVD: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); - bfa_fcxp_discard(rport->fcxp); - bfa_fcs_rport_offline_action(rport); - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} - -/** - * Rport has sent LOGO. Awaiting FC-4 offline completion callback. - */ -static void -bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, - enum rport_event event) -{ - bfa_trc(rport->fcs, rport->pwwn); - bfa_trc(rport->fcs, rport->pid); - bfa_trc(rport->fcs, event); - - switch (event) { - case RPSM_EVENT_FC4_OFFLINE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv); - bfa_rport_offline(rport->bfa_rport); - break; - - case RPSM_EVENT_DELETE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); - break; - - case RPSM_EVENT_LOGO_RCVD: - case RPSM_EVENT_PRLO_RCVD: - case RPSM_EVENT_ADDRESS_CHANGE: - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} - -/** - * LOGO needs to be sent to rport. Awaiting FC-4 offline completion - * callback. - */ -static void -bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport, - enum rport_event event) -{ - bfa_trc(rport->fcs, rport->pwwn); - bfa_trc(rport->fcs, rport->pid); - bfa_trc(rport->fcs, event); - - switch (event) { - case RPSM_EVENT_FC4_OFFLINE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend); - bfa_rport_offline(rport->bfa_rport); - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} - -/** - * Rport is going offline. Awaiting FC-4 offline completion callback. - */ -static void -bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport, - enum rport_event event) -{ - bfa_trc(rport->fcs, rport->pwwn); - bfa_trc(rport->fcs, rport->pid); - bfa_trc(rport->fcs, event); - - switch (event) { - case RPSM_EVENT_FC4_OFFLINE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline); - bfa_rport_offline(rport->bfa_rport); - break; - - case RPSM_EVENT_SCN: - case RPSM_EVENT_LOGO_IMP: - case RPSM_EVENT_LOGO_RCVD: - case RPSM_EVENT_PRLO_RCVD: - case RPSM_EVENT_ADDRESS_CHANGE: - /** - * rport is already going offline. - * SCN - ignore and wait till transitioning to offline state - */ - break; - - case RPSM_EVENT_DELETE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} - -/** - * Rport is offline. FC-4s are offline. Awaiting BFA rport offline - * callback. - */ -static void -bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport, - enum rport_event event) -{ - bfa_trc(rport->fcs, rport->pwwn); - bfa_trc(rport->fcs, rport->pid); - bfa_trc(rport->fcs, event); - - switch (event) { - case RPSM_EVENT_HCB_OFFLINE: - case RPSM_EVENT_ADDRESS_CHANGE: - if (bfa_fcs_port_is_online(rport->port)) { - if (bfa_fcs_fabric_is_switched(rport->port->fabric)) { - bfa_sm_set_state(rport, - bfa_fcs_rport_sm_nsdisc_sending); - rport->ns_retries = 0; - bfa_fcs_rport_send_gidpn(rport, NULL); - } else { - bfa_sm_set_state(rport, - bfa_fcs_rport_sm_plogi_sending); - rport->plogi_retries = 0; - bfa_fcs_rport_send_plogi(rport, NULL); - } - } else { - rport->pid = 0; - bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); - bfa_timer_start(rport->fcs->bfa, &rport->timer, - bfa_fcs_rport_timeout, rport, - bfa_fcs_rport_del_timeout); - } - break; - - case RPSM_EVENT_DELETE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); - bfa_fcs_rport_free(rport); - break; - - case RPSM_EVENT_SCN: - case RPSM_EVENT_LOGO_RCVD: - case RPSM_EVENT_PRLO_RCVD: - /** - * Ignore, already offline. - */ - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} - -/** - * Rport is offline. FC-4s are offline. Awaiting BFA rport offline - * callback to send LOGO accept. - */ -static void -bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport, - enum rport_event event) -{ - bfa_trc(rport->fcs, rport->pwwn); - bfa_trc(rport->fcs, rport->pid); - bfa_trc(rport->fcs, event); - - switch (event) { - case RPSM_EVENT_HCB_OFFLINE: - case RPSM_EVENT_ADDRESS_CHANGE: - if (rport->pid && (rport->prlo == BFA_TRUE)) - bfa_fcs_rport_send_prlo_acc(rport); - if (rport->pid && (rport->prlo == BFA_FALSE)) - bfa_fcs_rport_send_logo_acc(rport); - - /* - * If the lport is online and if the rport is not a well known - * address port, we try to re-discover the r-port. - */ - if (bfa_fcs_port_is_online(rport->port) - && (!BFA_FCS_PID_IS_WKA(rport->pid))) { - bfa_sm_set_state(rport, - bfa_fcs_rport_sm_nsdisc_sending); - rport->ns_retries = 0; - bfa_fcs_rport_send_gidpn(rport, NULL); - } else { - /* - * if it is not a well known address, reset the pid to - * - */ - if (!BFA_FCS_PID_IS_WKA(rport->pid)) - rport->pid = 0; - bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); - bfa_timer_start(rport->fcs->bfa, &rport->timer, - bfa_fcs_rport_timeout, rport, - bfa_fcs_rport_del_timeout); - } - break; - - case RPSM_EVENT_DELETE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend); - break; - - case RPSM_EVENT_LOGO_IMP: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline); - break; - - case RPSM_EVENT_LOGO_RCVD: - case RPSM_EVENT_PRLO_RCVD: - /** - * Ignore - already processing a LOGO. - */ - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} - -/** - * Rport is being deleted. FC-4s are offline. Awaiting BFA rport offline - * callback to send LOGO. - */ -static void -bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport, - enum rport_event event) -{ - bfa_trc(rport->fcs, rport->pwwn); - bfa_trc(rport->fcs, rport->pid); - bfa_trc(rport->fcs, event); - - switch (event) { - case RPSM_EVENT_HCB_OFFLINE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_logo_sending); - bfa_fcs_rport_send_logo(rport, NULL); - break; - - case RPSM_EVENT_LOGO_RCVD: - case RPSM_EVENT_PRLO_RCVD: - case RPSM_EVENT_ADDRESS_CHANGE: - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} - -/** - * Rport is being deleted. FC-4s are offline. LOGO is being sent. - */ -static void -bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport, - enum rport_event event) -{ - bfa_trc(rport->fcs, rport->pwwn); - bfa_trc(rport->fcs, rport->pid); - bfa_trc(rport->fcs, event); - - switch (event) { - case RPSM_EVENT_FCXP_SENT: - /* - * Once LOGO is sent, we donot wait for the response - */ - bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); - bfa_fcs_rport_free(rport); - break; - - case RPSM_EVENT_SCN: - case RPSM_EVENT_ADDRESS_CHANGE: - break; - - case RPSM_EVENT_LOGO_RCVD: - case RPSM_EVENT_PRLO_RCVD: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); - bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); - bfa_fcs_rport_free(rport); - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} - -/** - * Rport is offline. FC-4s are offline. BFA rport is offline. - * Timer active to delete stale rport. - */ -static void -bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event) -{ - bfa_trc(rport->fcs, rport->pwwn); - bfa_trc(rport->fcs, rport->pid); - bfa_trc(rport->fcs, event); - - switch (event) { - case RPSM_EVENT_TIMEOUT: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); - bfa_fcs_rport_free(rport); - break; - - case RPSM_EVENT_SCN: - case RPSM_EVENT_ADDRESS_CHANGE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); - bfa_timer_stop(&rport->timer); - rport->ns_retries = 0; - bfa_fcs_rport_send_gidpn(rport, NULL); - break; - - case RPSM_EVENT_DELETE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); - bfa_timer_stop(&rport->timer); - bfa_fcs_rport_free(rport); - break; - - case RPSM_EVENT_PLOGI_RCVD: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); - bfa_timer_stop(&rport->timer); - bfa_fcs_rport_send_plogiacc(rport, NULL); - break; - - case RPSM_EVENT_LOGO_RCVD: - case RPSM_EVENT_PRLO_RCVD: - case RPSM_EVENT_LOGO_IMP: - break; - - case RPSM_EVENT_PLOGI_COMP: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); - bfa_timer_stop(&rport->timer); - bfa_fcs_rport_hal_online(rport); - break; - - case RPSM_EVENT_PLOGI_SEND: - bfa_timer_stop(&rport->timer); - bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); - rport->plogi_retries = 0; - bfa_fcs_rport_send_plogi(rport, NULL); - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} - -/** - * Rport address has changed. Nameserver discovery request is being sent. - */ -static void -bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport, - enum rport_event event) -{ - bfa_trc(rport->fcs, rport->pwwn); - bfa_trc(rport->fcs, rport->pid); - bfa_trc(rport->fcs, event); - - switch (event) { - case RPSM_EVENT_FCXP_SENT: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sent); - break; - - case RPSM_EVENT_DELETE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); - bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); - bfa_fcs_rport_free(rport); - break; - - case RPSM_EVENT_PLOGI_RCVD: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); - bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); - bfa_fcs_rport_send_plogiacc(rport, NULL); - break; - - case RPSM_EVENT_SCN: - case RPSM_EVENT_LOGO_RCVD: - case RPSM_EVENT_PRLO_RCVD: - case RPSM_EVENT_PLOGI_SEND: - break; - - case RPSM_EVENT_ADDRESS_CHANGE: - rport->ns_retries = 0; /* reset the retry count */ - break; - - case RPSM_EVENT_LOGO_IMP: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); - bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); - bfa_timer_start(rport->fcs->bfa, &rport->timer, - bfa_fcs_rport_timeout, rport, - bfa_fcs_rport_del_timeout); - break; - - case RPSM_EVENT_PLOGI_COMP: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); - bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); - bfa_fcs_rport_hal_online(rport); - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} - -/** - * Nameserver discovery failed. Waiting for timeout to retry. - */ -static void -bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport, - enum rport_event event) -{ - bfa_trc(rport->fcs, rport->pwwn); - bfa_trc(rport->fcs, rport->pid); - bfa_trc(rport->fcs, event); - - switch (event) { - case RPSM_EVENT_TIMEOUT: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); - bfa_fcs_rport_send_gidpn(rport, NULL); - break; - - case RPSM_EVENT_SCN: - case RPSM_EVENT_ADDRESS_CHANGE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); - bfa_timer_stop(&rport->timer); - rport->ns_retries = 0; - bfa_fcs_rport_send_gidpn(rport, NULL); - break; - - case RPSM_EVENT_DELETE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); - bfa_timer_stop(&rport->timer); - bfa_fcs_rport_free(rport); - break; - - case RPSM_EVENT_PLOGI_RCVD: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); - bfa_timer_stop(&rport->timer); - bfa_fcs_rport_send_plogiacc(rport, NULL); - break; - - case RPSM_EVENT_LOGO_IMP: - rport->pid = 0; - bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); - bfa_timer_stop(&rport->timer); - bfa_timer_start(rport->fcs->bfa, &rport->timer, - bfa_fcs_rport_timeout, rport, - bfa_fcs_rport_del_timeout); - break; - - case RPSM_EVENT_LOGO_RCVD: - bfa_fcs_rport_send_logo_acc(rport); - break; - - case RPSM_EVENT_PRLO_RCVD: - bfa_fcs_rport_send_prlo_acc(rport); - break; - - case RPSM_EVENT_PLOGI_COMP: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); - bfa_timer_stop(&rport->timer); - bfa_fcs_rport_hal_online(rport); - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} - -/** - * Rport address has changed. Nameserver discovery request is sent. - */ -static void -bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport, - enum rport_event event) -{ - bfa_trc(rport->fcs, rport->pwwn); - bfa_trc(rport->fcs, rport->pid); - bfa_trc(rport->fcs, event); - - switch (event) { - case RPSM_EVENT_ACCEPTED: - case RPSM_EVENT_ADDRESS_CHANGE: - if (rport->pid) { - bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending); - bfa_fcs_rport_send_plogi(rport, NULL); - } else { - bfa_sm_set_state(rport, - bfa_fcs_rport_sm_nsdisc_sending); - rport->ns_retries = 0; - bfa_fcs_rport_send_gidpn(rport, NULL); - } - break; - - case RPSM_EVENT_FAILED: - rport->ns_retries++; - if (rport->ns_retries < BFA_FCS_RPORT_MAX_RETRIES) { - bfa_sm_set_state(rport, - bfa_fcs_rport_sm_nsdisc_sending); - bfa_fcs_rport_send_gidpn(rport, NULL); - } else { - rport->pid = 0; - bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); - bfa_timer_start(rport->fcs->bfa, &rport->timer, - bfa_fcs_rport_timeout, rport, - bfa_fcs_rport_del_timeout); - }; - break; - - case RPSM_EVENT_DELETE: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); - bfa_fcxp_discard(rport->fcxp); - bfa_fcs_rport_free(rport); - break; - - case RPSM_EVENT_PLOGI_RCVD: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); - bfa_fcxp_discard(rport->fcxp); - bfa_fcs_rport_send_plogiacc(rport, NULL); - break; - - case RPSM_EVENT_LOGO_IMP: - rport->pid = 0; - bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); - bfa_fcxp_discard(rport->fcxp); - bfa_timer_start(rport->fcs->bfa, &rport->timer, - bfa_fcs_rport_timeout, rport, - bfa_fcs_rport_del_timeout); - break; - - case RPSM_EVENT_PRLO_RCVD: - bfa_fcs_rport_send_prlo_acc(rport); - break; - - case RPSM_EVENT_SCN: - /** - * ignore, wait for NS query response - */ - break; - - case RPSM_EVENT_LOGO_RCVD: - /** - * Not logged-in yet. Accept LOGO. - */ - bfa_fcs_rport_send_logo_acc(rport); - break; - - case RPSM_EVENT_PLOGI_COMP: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); - bfa_fcxp_discard(rport->fcxp); - bfa_fcs_rport_hal_online(rport); - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} - - - -/** - * fcs_rport_private FCS RPORT provate functions - */ - -static void -bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) -{ - struct bfa_fcs_rport_s *rport = rport_cbarg; - struct bfa_fcs_port_s *port = rport->port; - struct fchs_s fchs; - int len; - struct bfa_fcxp_s *fcxp; - - bfa_trc(rport->fcs, rport->pwwn); - - fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); - if (!fcxp) { - bfa_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, - bfa_fcs_rport_send_plogi, rport); - return; - } - rport->fcxp = fcxp; - - len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, - bfa_fcs_port_get_fcid(port), 0, - port->port_cfg.pwwn, port->port_cfg.nwwn, - bfa_fcport_get_maxfrsize(port->fcs->bfa)); - - bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, - FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response, - (void *)rport, FC_MAX_PDUSZ, FC_ELS_TOV); - - rport->stats.plogis++; - bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); -} - -static void -bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, - bfa_status_t req_status, u32 rsp_len, - u32 resid_len, struct fchs_s *rsp_fchs) -{ - struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg; - struct fc_logi_s *plogi_rsp; - struct fc_ls_rjt_s *ls_rjt; - struct bfa_fcs_rport_s *twin; - struct list_head *qe; - - bfa_trc(rport->fcs, rport->pwwn); - - /* - * Sanity Checks - */ - if (req_status != BFA_STATUS_OK) { - bfa_trc(rport->fcs, req_status); - rport->stats.plogi_failed++; - bfa_sm_send_event(rport, RPSM_EVENT_FAILED); - return; - } - - plogi_rsp = (struct fc_logi_s *) BFA_FCXP_RSP_PLD(fcxp); - - /** - * Check for failure first. - */ - if (plogi_rsp->els_cmd.els_code != FC_ELS_ACC) { - ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); - - bfa_trc(rport->fcs, ls_rjt->reason_code); - bfa_trc(rport->fcs, ls_rjt->reason_code_expl); - - rport->stats.plogi_rejects++; - bfa_sm_send_event(rport, RPSM_EVENT_FAILED); - return; - } - - /** - * PLOGI is complete. Make sure this device is not one of the known - * device with a new FC port address. - */ - list_for_each(qe, &rport->port->rport_q) { - twin = (struct bfa_fcs_rport_s *)qe; - if (twin == rport) - continue; - if (!rport->pwwn && (plogi_rsp->port_name == twin->pwwn)) { - bfa_trc(rport->fcs, twin->pid); - bfa_trc(rport->fcs, rport->pid); - - /* - * Update plogi stats in twin - */ - twin->stats.plogis += rport->stats.plogis; - twin->stats.plogi_rejects += rport->stats.plogi_rejects; - twin->stats.plogi_timeouts += - rport->stats.plogi_timeouts; - twin->stats.plogi_failed += rport->stats.plogi_failed; - twin->stats.plogi_rcvd += rport->stats.plogi_rcvd; - twin->stats.plogi_accs++; - - bfa_fcs_rport_delete(rport); - - bfa_fcs_rport_update(twin, plogi_rsp); - twin->pid = rsp_fchs->s_id; - bfa_sm_send_event(twin, RPSM_EVENT_PLOGI_COMP); - return; - } - } - - /** - * Normal login path -- no evil twins. - */ - rport->stats.plogi_accs++; - bfa_fcs_rport_update(rport, plogi_rsp); - bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED); -} - -static void -bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) -{ - struct bfa_fcs_rport_s *rport = rport_cbarg; - struct bfa_fcs_port_s *port = rport->port; - struct fchs_s fchs; - int len; - struct bfa_fcxp_s *fcxp; - - bfa_trc(rport->fcs, rport->pwwn); - bfa_trc(rport->fcs, rport->reply_oxid); - - fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); - if (!fcxp) { - bfa_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, - bfa_fcs_rport_send_plogiacc, rport); - return; - } - rport->fcxp = fcxp; - - len = fc_plogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, - bfa_fcs_port_get_fcid(port), rport->reply_oxid, - port->port_cfg.pwwn, port->port_cfg.nwwn, - bfa_fcport_get_maxfrsize(port->fcs->bfa)); - - bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, - FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); - - bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); -} - -static void -bfa_fcs_rport_send_adisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) -{ - struct bfa_fcs_rport_s *rport = rport_cbarg; - struct bfa_fcs_port_s *port = rport->port; - struct fchs_s fchs; - int len; - struct bfa_fcxp_s *fcxp; - - bfa_trc(rport->fcs, rport->pwwn); - - fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); - if (!fcxp) { - bfa_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, - bfa_fcs_rport_send_adisc, rport); - return; - } - rport->fcxp = fcxp; - - len = fc_adisc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, - bfa_fcs_port_get_fcid(port), 0, - port->port_cfg.pwwn, port->port_cfg.nwwn); - - bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, - FC_CLASS_3, len, &fchs, bfa_fcs_rport_adisc_response, - rport, FC_MAX_PDUSZ, FC_ELS_TOV); - - rport->stats.adisc_sent++; - bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); -} - -static void -bfa_fcs_rport_adisc_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, - bfa_status_t req_status, u32 rsp_len, - u32 resid_len, struct fchs_s *rsp_fchs) -{ - struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg; - void *pld = bfa_fcxp_get_rspbuf(fcxp); - struct fc_ls_rjt_s *ls_rjt; - - if (req_status != BFA_STATUS_OK) { - bfa_trc(rport->fcs, req_status); - rport->stats.adisc_failed++; - bfa_sm_send_event(rport, RPSM_EVENT_FAILED); - return; - } - - if (fc_adisc_rsp_parse((struct fc_adisc_s *)pld, rsp_len, rport->pwwn, - rport->nwwn) == FC_PARSE_OK) { - rport->stats.adisc_accs++; - bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED); - return; - } - - rport->stats.adisc_rejects++; - ls_rjt = pld; - bfa_trc(rport->fcs, ls_rjt->els_cmd.els_code); - bfa_trc(rport->fcs, ls_rjt->reason_code); - bfa_trc(rport->fcs, ls_rjt->reason_code_expl); - bfa_sm_send_event(rport, RPSM_EVENT_FAILED); -} - -static void -bfa_fcs_rport_send_gidpn(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) -{ - struct bfa_fcs_rport_s *rport = rport_cbarg; - struct bfa_fcs_port_s *port = rport->port; - struct fchs_s fchs; - struct bfa_fcxp_s *fcxp; - int len; - - bfa_trc(rport->fcs, rport->pid); - - fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); - if (!fcxp) { - bfa_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, - bfa_fcs_rport_send_gidpn, rport); - return; - } - rport->fcxp = fcxp; - - len = fc_gidpn_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), - bfa_fcs_port_get_fcid(port), 0, rport->pwwn); - - bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, - FC_CLASS_3, len, &fchs, bfa_fcs_rport_gidpn_response, - (void *)rport, FC_MAX_PDUSZ, FC_FCCT_TOV); - - bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); -} - -static void -bfa_fcs_rport_gidpn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, - bfa_status_t req_status, u32 rsp_len, - u32 resid_len, struct fchs_s *rsp_fchs) -{ - struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg; - struct bfa_fcs_rport_s *twin; - struct list_head *qe; - struct ct_hdr_s *cthdr; - struct fcgs_gidpn_resp_s *gidpn_rsp; - - bfa_trc(rport->fcs, rport->pwwn); - - cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); - - if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { - /* - * Check if the pid is the same as before. - */ - gidpn_rsp = (struct fcgs_gidpn_resp_s *) (cthdr + 1); - - if (gidpn_rsp->dap == rport->pid) { - /* - * Device is online - */ - bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED); - } else { - /* - * Device's PID has changed. We need to cleanup and - * re-login. If there is another device with the the - * newly discovered pid, send an scn notice so that its - * new pid can be discovered. - */ - list_for_each(qe, &rport->port->rport_q) { - twin = (struct bfa_fcs_rport_s *)qe; - if (twin == rport) - continue; - if (gidpn_rsp->dap == twin->pid) { - bfa_trc(rport->fcs, twin->pid); - bfa_trc(rport->fcs, rport->pid); - - twin->pid = 0; - bfa_sm_send_event(twin, - RPSM_EVENT_ADDRESS_CHANGE); - } - } - rport->pid = gidpn_rsp->dap; - bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_CHANGE); - } - return; - } - - /* - * Reject Response - */ - switch (cthdr->reason_code) { - case CT_RSN_LOGICAL_BUSY: - /* - * Need to retry - */ - bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT); - break; - - case CT_RSN_UNABLE_TO_PERF: - /* - * device doesn't exist : Start timer to cleanup this later. - */ - bfa_sm_send_event(rport, RPSM_EVENT_FAILED); - break; - - default: - bfa_sm_send_event(rport, RPSM_EVENT_FAILED); - break; - } -} - -/** - * Called to send a logout to the rport. - */ -static void -bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) -{ - struct bfa_fcs_rport_s *rport = rport_cbarg; - struct bfa_fcs_port_s *port; - struct fchs_s fchs; - struct bfa_fcxp_s *fcxp; - u16 len; - - bfa_trc(rport->fcs, rport->pid); - - port = rport->port; - - fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); - if (!fcxp) { - bfa_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, - bfa_fcs_rport_send_logo, rport); - return; - } - rport->fcxp = fcxp; - - len = fc_logo_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, - bfa_fcs_port_get_fcid(port), 0, - bfa_fcs_port_get_pwwn(port)); - - bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, - FC_CLASS_3, len, &fchs, NULL, rport, FC_MAX_PDUSZ, - FC_ELS_TOV); - - rport->stats.logos++; - bfa_fcxp_discard(rport->fcxp); - bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); -} - -/** - * Send ACC for a LOGO received. - */ -static void -bfa_fcs_rport_send_logo_acc(void *rport_cbarg) -{ - struct bfa_fcs_rport_s *rport = rport_cbarg; - struct bfa_fcs_port_s *port; - struct fchs_s fchs; - struct bfa_fcxp_s *fcxp; - u16 len; - - bfa_trc(rport->fcs, rport->pid); - - port = rport->port; - - fcxp = bfa_fcs_fcxp_alloc(port->fcs); - if (!fcxp) - return; - - rport->stats.logo_rcvd++; - len = fc_logo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, - bfa_fcs_port_get_fcid(port), rport->reply_oxid); - - bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, - FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); -} - -/** - * This routine will be called by bfa_timer on timer timeouts. - * - * param[in] rport - pointer to bfa_fcs_port_ns_t. - * param[out] rport_status - pointer to return vport status in - * - * return - * void - * -* Special Considerations: - * - * note - */ -static void -bfa_fcs_rport_timeout(void *arg) -{ - struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)arg; - - rport->stats.plogi_timeouts++; - bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT); -} - -static void -bfa_fcs_rport_process_prli(struct bfa_fcs_rport_s *rport, - struct fchs_s *rx_fchs, u16 len) -{ - struct bfa_fcxp_s *fcxp; - struct fchs_s fchs; - struct bfa_fcs_port_s *port = rport->port; - struct fc_prli_s *prli; - - bfa_trc(port->fcs, rx_fchs->s_id); - bfa_trc(port->fcs, rx_fchs->d_id); - - rport->stats.prli_rcvd++; - - if (BFA_FCS_VPORT_IS_TARGET_MODE(port)) { - /* - * Target Mode : Let the fcptm handle it - */ - bfa_fcs_tin_rx_prli(rport->tin, rx_fchs, len); - return; - } - - /* - * We are either in Initiator or ipfc Mode - */ - prli = (struct fc_prli_s *) (rx_fchs + 1); - - if (prli->parampage.servparams.initiator) { - bfa_trc(rport->fcs, prli->parampage.type); - rport->scsi_function = BFA_RPORT_INITIATOR; - bfa_fcs_itnim_is_initiator(rport->itnim); - } else { - /* - * @todo: PRLI from a target ? - */ - bfa_trc(port->fcs, rx_fchs->s_id); - rport->scsi_function = BFA_RPORT_TARGET; - } - - fcxp = bfa_fcs_fcxp_alloc(port->fcs); - if (!fcxp) - return; - - len = fc_prli_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, - bfa_fcs_port_get_fcid(port), rx_fchs->ox_id, - port->port_cfg.roles); - - bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, - FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); -} - -static void -bfa_fcs_rport_process_rpsc(struct bfa_fcs_rport_s *rport, - struct fchs_s *rx_fchs, u16 len) -{ - struct bfa_fcxp_s *fcxp; - struct fchs_s fchs; - struct bfa_fcs_port_s *port = rport->port; - struct fc_rpsc_speed_info_s speeds; - struct bfa_pport_attr_s pport_attr; - - bfa_trc(port->fcs, rx_fchs->s_id); - bfa_trc(port->fcs, rx_fchs->d_id); - - rport->stats.rpsc_rcvd++; - speeds.port_speed_cap = - RPSC_SPEED_CAP_1G | RPSC_SPEED_CAP_2G | RPSC_SPEED_CAP_4G | - RPSC_SPEED_CAP_8G; - - /* - * get curent speed from pport attributes from BFA - */ - bfa_fcport_get_attr(port->fcs->bfa, &pport_attr); - - speeds.port_op_speed = fc_bfa_speed_to_rpsc_operspeed(pport_attr.speed); - - fcxp = bfa_fcs_fcxp_alloc(port->fcs); - if (!fcxp) - return; - - len = fc_rpsc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, - bfa_fcs_port_get_fcid(port), rx_fchs->ox_id, - &speeds); - - bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, - FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); -} - -static void -bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport, - struct fchs_s *rx_fchs, u16 len) -{ - struct bfa_fcxp_s *fcxp; - struct fchs_s fchs; - struct bfa_fcs_port_s *port = rport->port; - struct fc_adisc_s *adisc; - - bfa_trc(port->fcs, rx_fchs->s_id); - bfa_trc(port->fcs, rx_fchs->d_id); - - rport->stats.adisc_rcvd++; - - if (BFA_FCS_VPORT_IS_TARGET_MODE(port)) { - /* - * @todo : Target Mode handling - */ - bfa_trc(port->fcs, rx_fchs->d_id); - bfa_assert(0); - return; - } - - adisc = (struct fc_adisc_s *) (rx_fchs + 1); - - /* - * Accept if the itnim for this rport is online. Else reject the ADISC - */ - if (bfa_fcs_itnim_get_online_state(rport->itnim) == BFA_STATUS_OK) { - - fcxp = bfa_fcs_fcxp_alloc(port->fcs); - if (!fcxp) - return; - - len = fc_adisc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), - rx_fchs->s_id, - bfa_fcs_port_get_fcid(port), - rx_fchs->ox_id, port->port_cfg.pwwn, - port->port_cfg.nwwn); - - bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, - BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, - FC_MAX_PDUSZ, 0); - } else { - rport->stats.adisc_rejected++; - bfa_fcs_rport_send_ls_rjt(rport, rx_fchs, - FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD, - FC_LS_RJT_EXP_LOGIN_REQUIRED); - } - -} - -static void -bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport) -{ - struct bfa_fcs_port_s *port = rport->port; - struct bfa_rport_info_s rport_info; - - rport_info.pid = rport->pid; - rport_info.local_pid = port->pid; - rport_info.lp_tag = port->lp_tag; - rport_info.vf_id = port->fabric->vf_id; - rport_info.vf_en = port->fabric->is_vf; - rport_info.fc_class = rport->fc_cos; - rport_info.cisc = rport->cisc; - rport_info.max_frmsz = rport->maxfrsize; - bfa_rport_online(rport->bfa_rport, &rport_info); -} - -static void -bfa_fcs_rport_fc4_pause(struct bfa_fcs_rport_s *rport) -{ - if (bfa_fcs_port_is_initiator(rport->port)) - bfa_fcs_itnim_pause(rport->itnim); - - if (bfa_fcs_port_is_target(rport->port)) - bfa_fcs_tin_pause(rport->tin); -} - -static void -bfa_fcs_rport_fc4_resume(struct bfa_fcs_rport_s *rport) -{ - if (bfa_fcs_port_is_initiator(rport->port)) - bfa_fcs_itnim_resume(rport->itnim); - - if (bfa_fcs_port_is_target(rport->port)) - bfa_fcs_tin_resume(rport->tin); -} - -static struct bfa_fcs_rport_s * -bfa_fcs_rport_alloc(struct bfa_fcs_port_s *port, wwn_t pwwn, u32 rpid) -{ - struct bfa_fcs_s *fcs = port->fcs; - struct bfa_fcs_rport_s *rport; - struct bfad_rport_s *rport_drv; - - /** - * allocate rport - */ - if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv) - != BFA_STATUS_OK) { - bfa_trc(fcs, rpid); - return NULL; - } - - /* - * Initialize r-port - */ - rport->port = port; - rport->fcs = fcs; - rport->rp_drv = rport_drv; - rport->pid = rpid; - rport->pwwn = pwwn; - - /** - * allocate BFA rport - */ - rport->bfa_rport = bfa_rport_create(port->fcs->bfa, rport); - if (!rport->bfa_rport) { - bfa_trc(fcs, rpid); - kfree(rport_drv); - return NULL; - } - - /** - * allocate FC-4s - */ - bfa_assert(bfa_fcs_port_is_initiator(port) ^ - bfa_fcs_port_is_target(port)); - - if (bfa_fcs_port_is_initiator(port)) { - rport->itnim = bfa_fcs_itnim_create(rport); - if (!rport->itnim) { - bfa_trc(fcs, rpid); - bfa_rport_delete(rport->bfa_rport); - kfree(rport_drv); - return NULL; - } - } - - if (bfa_fcs_port_is_target(port)) { - rport->tin = bfa_fcs_tin_create(rport); - if (!rport->tin) { - bfa_trc(fcs, rpid); - bfa_rport_delete(rport->bfa_rport); - kfree(rport_drv); - return NULL; - } - } - - bfa_fcs_port_add_rport(port, rport); - - bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); - - /* - * Initialize the Rport Features(RPF) Sub Module - */ - if (!BFA_FCS_PID_IS_WKA(rport->pid)) - bfa_fcs_rpf_init(rport); - - return rport; -} - - -static void -bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport) -{ - struct bfa_fcs_port_s *port = rport->port; - - /** - * - delete FC-4s - * - delete BFA rport - * - remove from queue of rports - */ - if (bfa_fcs_port_is_initiator(port)) - bfa_fcs_itnim_delete(rport->itnim); - - if (bfa_fcs_port_is_target(port)) - bfa_fcs_tin_delete(rport->tin); - - bfa_rport_delete(rport->bfa_rport); - bfa_fcs_port_del_rport(port, rport); - kfree(rport->rp_drv); -} - -static void -bfa_fcs_rport_aen_post(struct bfa_fcs_rport_s *rport, - enum bfa_rport_aen_event event, - struct bfa_rport_aen_data_s *data) -{ - union bfa_aen_data_u aen_data; - struct bfa_log_mod_s *logmod = rport->fcs->logm; - wwn_t lpwwn = bfa_fcs_port_get_pwwn(rport->port); - wwn_t rpwwn = rport->pwwn; - char lpwwn_ptr[BFA_STRING_32]; - char rpwwn_ptr[BFA_STRING_32]; - char *prio_str[] = { "unknown", "high", "medium", "low" }; - - wwn2str(lpwwn_ptr, lpwwn); - wwn2str(rpwwn_ptr, rpwwn); - - switch (event) { - case BFA_RPORT_AEN_ONLINE: - case BFA_RPORT_AEN_OFFLINE: - case BFA_RPORT_AEN_DISCONNECT: - bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, event), - rpwwn_ptr, lpwwn_ptr); - break; - case BFA_RPORT_AEN_QOS_PRIO: - aen_data.rport.priv.qos = data->priv.qos; - bfa_log(logmod, BFA_AEN_RPORT_QOS_PRIO, - prio_str[aen_data.rport.priv.qos.qos_priority], - rpwwn_ptr, lpwwn_ptr); - break; - case BFA_RPORT_AEN_QOS_FLOWID: - aen_data.rport.priv.qos = data->priv.qos; - bfa_log(logmod, BFA_AEN_RPORT_QOS_FLOWID, - aen_data.rport.priv.qos.qos_flow_id, rpwwn_ptr, - lpwwn_ptr); - break; - default: - break; - } - - aen_data.rport.vf_id = rport->port->fabric->vf_id; - aen_data.rport.ppwwn = - bfa_fcs_port_get_pwwn(bfa_fcs_get_base_port(rport->fcs)); - aen_data.rport.lpwwn = lpwwn; - aen_data.rport.rpwwn = rpwwn; -} - -static void -bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport) -{ - struct bfa_fcs_port_s *port = rport->port; - - rport->stats.onlines++; - - if (bfa_fcs_port_is_initiator(port)) { - bfa_fcs_itnim_rport_online(rport->itnim); - if (!BFA_FCS_PID_IS_WKA(rport->pid)) - bfa_fcs_rpf_rport_online(rport); - }; - - if (bfa_fcs_port_is_target(port)) - bfa_fcs_tin_rport_online(rport->tin); - - /* - * Don't post events for well known addresses - */ - if (!BFA_FCS_PID_IS_WKA(rport->pid)) - bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_ONLINE, NULL); -} - -static void -bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport) -{ - struct bfa_fcs_port_s *port = rport->port; - - rport->stats.offlines++; - - /* - * Don't post events for well known addresses - */ - if (!BFA_FCS_PID_IS_WKA(rport->pid)) { - if (bfa_fcs_port_is_online(rport->port) == BFA_TRUE) { - bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_DISCONNECT, - NULL); - } else { - bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_OFFLINE, - NULL); - } - } - - if (bfa_fcs_port_is_initiator(port)) { - bfa_fcs_itnim_rport_offline(rport->itnim); - if (!BFA_FCS_PID_IS_WKA(rport->pid)) - bfa_fcs_rpf_rport_offline(rport); - } - - if (bfa_fcs_port_is_target(port)) - bfa_fcs_tin_rport_offline(rport->tin); -} - -/** - * Update rport parameters from PLOGI or PLOGI accept. - */ -static void -bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi) -{ - struct bfa_fcs_port_s *port = rport->port; - - /** - * - port name - * - node name - */ - rport->pwwn = plogi->port_name; - rport->nwwn = plogi->node_name; - - /** - * - class of service - */ - rport->fc_cos = 0; - if (plogi->class3.class_valid) - rport->fc_cos = FC_CLASS_3; - - if (plogi->class2.class_valid) - rport->fc_cos |= FC_CLASS_2; - - /** - * - CISC - * - MAX receive frame size - */ - rport->cisc = plogi->csp.cisc; - rport->maxfrsize = bfa_os_ntohs(plogi->class3.rxsz); - - bfa_trc(port->fcs, bfa_os_ntohs(plogi->csp.bbcred)); - bfa_trc(port->fcs, port->fabric->bb_credit); - /** - * Direct Attach P2P mode : - * This is to handle a bug (233476) in IBM targets in Direct Attach - * Mode. Basically, in FLOGI Accept the target would have erroneously - * set the BB Credit to the value used in the FLOGI sent by the HBA. - * It uses the correct value (its own BB credit) in PLOGI. - */ - if ((!bfa_fcs_fabric_is_switched(port->fabric)) - && (bfa_os_ntohs(plogi->csp.bbcred) < port->fabric->bb_credit)) { - - bfa_trc(port->fcs, bfa_os_ntohs(plogi->csp.bbcred)); - bfa_trc(port->fcs, port->fabric->bb_credit); - - port->fabric->bb_credit = bfa_os_ntohs(plogi->csp.bbcred); - bfa_fcport_set_tx_bbcredit(port->fcs->bfa, - port->fabric->bb_credit); - } - -} - -/** - * Called to handle LOGO received from an existing remote port. - */ -static void -bfa_fcs_rport_process_logo(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs) -{ - rport->reply_oxid = fchs->ox_id; - bfa_trc(rport->fcs, rport->reply_oxid); - - rport->prlo = BFA_FALSE; - rport->stats.logo_rcvd++; - bfa_sm_send_event(rport, RPSM_EVENT_LOGO_RCVD); -} - - - -/** - * fcs_rport_public FCS rport public interfaces - */ - -/** - * Called by bport/vport to create a remote port instance for a discovered - * remote device. - * - * @param[in] port - base port or vport - * @param[in] rpid - remote port ID - * - * @return None - */ -struct bfa_fcs_rport_s * -bfa_fcs_rport_create(struct bfa_fcs_port_s *port, u32 rpid) -{ - struct bfa_fcs_rport_s *rport; - - bfa_trc(port->fcs, rpid); - rport = bfa_fcs_rport_alloc(port, WWN_NULL, rpid); - if (!rport) - return NULL; - - bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND); - return rport; -} - -/** - * Called to create a rport for which only the wwn is known. - * - * @param[in] port - base port - * @param[in] rpwwn - remote port wwn - * - * @return None - */ -struct bfa_fcs_rport_s * -bfa_fcs_rport_create_by_wwn(struct bfa_fcs_port_s *port, wwn_t rpwwn) -{ - struct bfa_fcs_rport_s *rport; - - bfa_trc(port->fcs, rpwwn); - rport = bfa_fcs_rport_alloc(port, rpwwn, 0); - if (!rport) - return NULL; - - bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC); - return rport; -} - -/** - * Called by bport in private loop topology to indicate that a - * rport has been discovered and plogi has been completed. - * - * @param[in] port - base port or vport - * @param[in] rpid - remote port ID - */ -void -bfa_fcs_rport_start(struct bfa_fcs_port_s *port, struct fchs_s *fchs, - struct fc_logi_s *plogi) -{ - struct bfa_fcs_rport_s *rport; - - rport = bfa_fcs_rport_alloc(port, WWN_NULL, fchs->s_id); - if (!rport) - return; - - bfa_fcs_rport_update(rport, plogi); - - bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_COMP); -} - -/** - * Called by bport/vport to handle PLOGI received from a new remote port. - * If an existing rport does a plogi, it will be handled separately. - */ -void -bfa_fcs_rport_plogi_create(struct bfa_fcs_port_s *port, struct fchs_s *fchs, - struct fc_logi_s *plogi) -{ - struct bfa_fcs_rport_s *rport; - - rport = bfa_fcs_rport_alloc(port, plogi->port_name, fchs->s_id); - if (!rport) - return; - - bfa_fcs_rport_update(rport, plogi); - - rport->reply_oxid = fchs->ox_id; - bfa_trc(rport->fcs, rport->reply_oxid); - - rport->stats.plogi_rcvd++; - bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD); -} - -static int -wwn_compare(wwn_t wwn1, wwn_t wwn2) -{ - u8 *b1 = (u8 *) &wwn1; - u8 *b2 = (u8 *) &wwn2; - int i; - - for (i = 0; i < sizeof(wwn_t); i++) { - if (b1[i] < b2[i]) - return -1; - if (b1[i] > b2[i]) - return 1; - } - return 0; -} - -/** - * Called by bport/vport to handle PLOGI received from an existing - * remote port. - */ -void -bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, - struct fc_logi_s *plogi) -{ - /** - * @todo Handle P2P and initiator-initiator. - */ - - bfa_fcs_rport_update(rport, plogi); - - rport->reply_oxid = rx_fchs->ox_id; - bfa_trc(rport->fcs, rport->reply_oxid); - - /** - * In Switched fabric topology, - * PLOGI to each other. If our pwwn is smaller, ignore it, - * if it is not a well known address. - * If the link topology is N2N, - * this Plogi should be accepted. - */ - if ((wwn_compare(rport->port->port_cfg.pwwn, rport->pwwn) == -1) - && (bfa_fcs_fabric_is_switched(rport->port->fabric)) - && (!BFA_FCS_PID_IS_WKA(rport->pid))) { - bfa_trc(rport->fcs, rport->pid); - return; - } - - rport->stats.plogi_rcvd++; - bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD); -} - -/** - * Called by bport/vport to delete a remote port instance. - * -* Rport delete is called under the following conditions: - * - vport is deleted - * - vf is deleted - * - explicit request from OS to delete rport (vmware) - */ -void -bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport) -{ - bfa_sm_send_event(rport, RPSM_EVENT_DELETE); -} - -/** - * Called by bport/vport to when a target goes offline. - * - */ -void -bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport) -{ - bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP); -} - -/** - * Called by bport in n2n when a target (attached port) becomes online. - * - */ -void -bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport) -{ - bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND); -} - -/** - * Called by bport/vport to notify SCN for the remote port - */ -void -bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport) -{ - - rport->stats.rscns++; - bfa_sm_send_event(rport, RPSM_EVENT_SCN); -} - -/** - * Called by fcpim to notify that the ITN cleanup is done. - */ -void -bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport) -{ - bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE); -} - -/** - * Called by fcptm to notify that the ITN cleanup is done. - */ -void -bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport) -{ - bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE); -} - -/** - * This routine BFA callback for bfa_rport_online() call. - * - * param[in] cb_arg - rport struct. - * - * return - * void - * -* Special Considerations: - * - * note - */ -void -bfa_cb_rport_online(void *cbarg) -{ - - struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg; - - bfa_trc(rport->fcs, rport->pwwn); - bfa_sm_send_event(rport, RPSM_EVENT_HCB_ONLINE); -} - -/** - * This routine BFA callback for bfa_rport_offline() call. - * - * param[in] rport - - * - * return - * void - * - * Special Considerations: - * - * note - */ -void -bfa_cb_rport_offline(void *cbarg) -{ - struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg; - - bfa_trc(rport->fcs, rport->pwwn); - bfa_sm_send_event(rport, RPSM_EVENT_HCB_OFFLINE); -} - -/** - * This routine is a static BFA callback when there is a QoS flow_id - * change notification - * - * @param[in] rport - - * - * @return void - * - * Special Considerations: - * - * @note - */ -void -bfa_cb_rport_qos_scn_flowid(void *cbarg, - struct bfa_rport_qos_attr_s old_qos_attr, - struct bfa_rport_qos_attr_s new_qos_attr) -{ - struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg; - struct bfa_rport_aen_data_s aen_data; - - bfa_trc(rport->fcs, rport->pwwn); - aen_data.priv.qos = new_qos_attr; - bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_FLOWID, &aen_data); -} - -/** - * This routine is a static BFA callback when there is a QoS priority - * change notification - * - * @param[in] rport - - * - * @return void - * - * Special Considerations: - * - * @note - */ -void -bfa_cb_rport_qos_scn_prio(void *cbarg, struct bfa_rport_qos_attr_s old_qos_attr, - struct bfa_rport_qos_attr_s new_qos_attr) -{ - struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *)cbarg; - struct bfa_rport_aen_data_s aen_data; - - bfa_trc(rport->fcs, rport->pwwn); - aen_data.priv.qos = new_qos_attr; - bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_PRIO, &aen_data); -} - -/** - * Called to process any unsolicted frames from this remote port - */ -void -bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport) -{ - bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP); -} - -/** - * Called to process any unsolicted frames from this remote port - */ -void -bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs, - u16 len) -{ - struct bfa_fcs_port_s *port = rport->port; - struct fc_els_cmd_s *els_cmd; - - bfa_trc(rport->fcs, fchs->s_id); - bfa_trc(rport->fcs, fchs->d_id); - bfa_trc(rport->fcs, fchs->type); - - if (fchs->type != FC_TYPE_ELS) - return; - - els_cmd = (struct fc_els_cmd_s *) (fchs + 1); - - bfa_trc(rport->fcs, els_cmd->els_code); - - switch (els_cmd->els_code) { - case FC_ELS_LOGO: - bfa_fcs_rport_process_logo(rport, fchs); - break; - - case FC_ELS_ADISC: - bfa_fcs_rport_process_adisc(rport, fchs, len); - break; - - case FC_ELS_PRLO: - if (bfa_fcs_port_is_initiator(port)) - bfa_fcs_fcpim_uf_recv(rport->itnim, fchs, len); - - if (bfa_fcs_port_is_target(port)) - bfa_fcs_fcptm_uf_recv(rport->tin, fchs, len); - break; - - case FC_ELS_PRLI: - bfa_fcs_rport_process_prli(rport, fchs, len); - break; - - case FC_ELS_RPSC: - bfa_fcs_rport_process_rpsc(rport, fchs, len); - break; - - default: - bfa_fcs_rport_send_ls_rjt(rport, fchs, - FC_LS_RJT_RSN_CMD_NOT_SUPP, - FC_LS_RJT_EXP_NO_ADDL_INFO); - break; - } -} - -/* Send best case acc to prlo */ -static void -bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport) -{ - struct bfa_fcs_port_s *port = rport->port; - struct fchs_s fchs; - struct bfa_fcxp_s *fcxp; - int len; - - bfa_trc(rport->fcs, rport->pid); - - fcxp = bfa_fcs_fcxp_alloc(port->fcs); - if (!fcxp) - return; - - len = fc_prlo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), - rport->pid, bfa_fcs_port_get_fcid(port), - rport->reply_oxid, 0); - - bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id, - port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, - NULL, NULL, FC_MAX_PDUSZ, 0); -} - -/* - * Send a LS reject - */ -static void -bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, - u8 reason_code, u8 reason_code_expl) -{ - struct bfa_fcs_port_s *port = rport->port; - struct fchs_s fchs; - struct bfa_fcxp_s *fcxp; - int len; - - bfa_trc(rport->fcs, rx_fchs->s_id); - - fcxp = bfa_fcs_fcxp_alloc(rport->fcs); - if (!fcxp) - return; - - len = fc_ls_rjt_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, - bfa_fcs_port_get_fcid(port), rx_fchs->ox_id, - reason_code, reason_code_expl); - - bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, - FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); -} - -/** - * Return state of rport. - */ -int -bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport) -{ - return bfa_sm_to_state(rport_sm_table, rport->sm); -} - -/** - * Called by the Driver to set rport delete/ageout timeout - * - * param[in] rport timeout value in seconds. - * - * return None - */ -void -bfa_fcs_rport_set_del_timeout(u8 rport_tmo) -{ - /* - * convert to Millisecs - */ - if (rport_tmo > 0) - bfa_fcs_rport_del_timeout = rport_tmo * 1000; -} - -void -bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, uint16_t ox_id) -{ - bfa_trc(rport->fcs, rport->pid); - - rport->prlo = BFA_TRUE; - rport->reply_oxid = ox_id; - bfa_sm_send_event(rport, RPSM_EVENT_PRLO_RCVD); -} diff --git a/drivers/scsi/bfa/rport_api.c b/drivers/scsi/bfa/rport_api.c deleted file mode 100644 index 15e0c470afd9..000000000000 --- a/drivers/scsi/bfa/rport_api.c +++ /dev/null @@ -1,185 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#include -#include -#include "fcs_vport.h" -#include "fcs_lport.h" -#include "fcs_rport.h" -#include "fcs_trcmod.h" - -BFA_TRC_FILE(FCS, RPORT_API); - -/** - * rport_api.c Remote port implementation. - */ - -/** - * fcs_rport_api FCS rport API. - */ - -/** - * Direct API to add a target by port wwn. This interface is used, for - * example, by bios when target pwwn is known from boot lun configuration. - */ -bfa_status_t -bfa_fcs_rport_add(struct bfa_fcs_port_s *port, wwn_t *pwwn, - struct bfa_fcs_rport_s *rport, - struct bfad_rport_s *rport_drv) -{ - bfa_trc(port->fcs, *pwwn); - - return BFA_STATUS_OK; -} - -/** - * Direct API to remove a target and its associated resources. This - * interface is used, for example, by vmware driver to remove target - * ports from the target list for a VM. - */ -bfa_status_t -bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport_in) -{ - - struct bfa_fcs_rport_s *rport; - - bfa_trc(rport_in->fcs, rport_in->pwwn); - - rport = bfa_fcs_port_get_rport_by_pwwn(rport_in->port, rport_in->pwwn); - if (rport == NULL) { - /* - * TBD Error handling - */ - bfa_trc(rport_in->fcs, rport_in->pid); - return BFA_STATUS_UNKNOWN_RWWN; - } - - /* - * TBD if this remote port is online, send a logo - */ - return BFA_STATUS_OK; - -} - -/** - * Remote device status for display/debug. - */ -void -bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport, - struct bfa_rport_attr_s *rport_attr) -{ - struct bfa_rport_qos_attr_s qos_attr; - struct bfa_fcs_port_s *port = rport->port; - enum bfa_pport_speed rport_speed = rport->rpf.rpsc_speed; - - bfa_os_memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s)); - - rport_attr->pid = rport->pid; - rport_attr->pwwn = rport->pwwn; - rport_attr->nwwn = rport->nwwn; - rport_attr->cos_supported = rport->fc_cos; - rport_attr->df_sz = rport->maxfrsize; - rport_attr->state = bfa_fcs_rport_get_state(rport); - rport_attr->fc_cos = rport->fc_cos; - rport_attr->cisc = rport->cisc; - rport_attr->scsi_function = rport->scsi_function; - rport_attr->curr_speed = rport->rpf.rpsc_speed; - rport_attr->assigned_speed = rport->rpf.assigned_speed; - - bfa_rport_get_qos_attr(rport->bfa_rport, &qos_attr); - rport_attr->qos_attr = qos_attr; - - rport_attr->trl_enforced = BFA_FALSE; - - if (bfa_fcport_is_ratelim(port->fcs->bfa)) { - if (rport_speed == BFA_PPORT_SPEED_UNKNOWN) { - /* Use default ratelim speed setting */ - rport_speed = - bfa_fcport_get_ratelim_speed(rport->fcs->bfa); - } - if (rport_speed < bfa_fcs_port_get_rport_max_speed(port)) - rport_attr->trl_enforced = BFA_TRUE; - } - - /* - * TODO - * rport->symname - */ -} - -/** - * Per remote device statistics. - */ -void -bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport, - struct bfa_rport_stats_s *stats) -{ - *stats = rport->stats; -} - -void -bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport) -{ - bfa_os_memset((char *)&rport->stats, 0, - sizeof(struct bfa_rport_stats_s)); -} - -struct bfa_fcs_rport_s * -bfa_fcs_rport_lookup(struct bfa_fcs_port_s *port, wwn_t rpwwn) -{ - struct bfa_fcs_rport_s *rport; - - rport = bfa_fcs_port_get_rport_by_pwwn(port, rpwwn); - if (rport == NULL) { - /* - * TBD Error handling - */ - } - - return rport; -} - -struct bfa_fcs_rport_s * -bfa_fcs_rport_lookup_by_nwwn(struct bfa_fcs_port_s *port, wwn_t rnwwn) -{ - struct bfa_fcs_rport_s *rport; - - rport = bfa_fcs_port_get_rport_by_nwwn(port, rnwwn); - if (rport == NULL) { - /* - * TBD Error handling - */ - } - - return rport; -} - -/* - * This API is to set the Rport's speed. Should be used when RPSC is not - * supported by the rport. - */ -void -bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport, - enum bfa_pport_speed speed) -{ - rport->rpf.assigned_speed = speed; - - /* Set this speed in f/w only if the RPSC speed is not available */ - if (rport->rpf.rpsc_speed == BFA_PPORT_SPEED_UNKNOWN) - bfa_rport_speed(rport->bfa_rport, speed); -} - - diff --git a/drivers/scsi/bfa/rport_ftrs.c b/drivers/scsi/bfa/rport_ftrs.c deleted file mode 100644 index f2a9361ce9a4..000000000000 --- a/drivers/scsi/bfa/rport_ftrs.c +++ /dev/null @@ -1,379 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * rport_ftrs.c Remote port features (RPF) implementation. - */ - -#include -#include -#include "fcbuild.h" -#include "fcs_rport.h" -#include "fcs_lport.h" -#include "fcs_trcmod.h" -#include "fcs_fcxp.h" -#include "fcs.h" - -BFA_TRC_FILE(FCS, RPORT_FTRS); - -#define BFA_FCS_RPF_RETRIES (3) -#define BFA_FCS_RPF_RETRY_TIMEOUT (1000) /* 1 sec (In millisecs) */ - -static void bfa_fcs_rpf_send_rpsc2(void *rport_cbarg, - struct bfa_fcxp_s *fcxp_alloced); -static void bfa_fcs_rpf_rpsc2_response(void *fcsarg, - struct bfa_fcxp_s *fcxp, void *cbarg, - bfa_status_t req_status, u32 rsp_len, - u32 resid_len, - struct fchs_s *rsp_fchs); -static void bfa_fcs_rpf_timeout(void *arg); - -/** - * fcs_rport_ftrs_sm FCS rport state machine events - */ - -enum rpf_event { - RPFSM_EVENT_RPORT_OFFLINE = 1, /* Rport offline */ - RPFSM_EVENT_RPORT_ONLINE = 2, /* Rport online */ - RPFSM_EVENT_FCXP_SENT = 3, /* Frame from has been sent */ - RPFSM_EVENT_TIMEOUT = 4, /* Rport SM timeout event */ - RPFSM_EVENT_RPSC_COMP = 5, - RPFSM_EVENT_RPSC_FAIL = 6, - RPFSM_EVENT_RPSC_ERROR = 7, -}; - -static void bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, - enum rpf_event event); -static void bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf, - enum rpf_event event); -static void bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, - enum rpf_event event); -static void bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf, - enum rpf_event event); -static void bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, - enum rpf_event event); -static void bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf, - enum rpf_event event); - -static void -bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) -{ - struct bfa_fcs_rport_s *rport = rpf->rport; - struct bfa_fcs_fabric_s *fabric = &rport->fcs->fabric; - - bfa_trc(rport->fcs, rport->pwwn); - bfa_trc(rport->fcs, rport->pid); - bfa_trc(rport->fcs, event); - - switch (event) { - case RPFSM_EVENT_RPORT_ONLINE: - /* Send RPSC2 to a Brocade fabric only. */ - if ((!BFA_FCS_PID_IS_WKA(rport->pid)) && - ((bfa_lps_is_brcd_fabric(rport->port->fabric->lps)) || - (bfa_fcs_fabric_get_switch_oui(fabric) == - BFA_FCS_BRCD_SWITCH_OUI))) { - bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); - rpf->rpsc_retries = 0; - bfa_fcs_rpf_send_rpsc2(rpf, NULL); - } - break; - - case RPFSM_EVENT_RPORT_OFFLINE: - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} - -static void -bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) -{ - struct bfa_fcs_rport_s *rport = rpf->rport; - - bfa_trc(rport->fcs, event); - - switch (event) { - case RPFSM_EVENT_FCXP_SENT: - bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc); - break; - - case RPFSM_EVENT_RPORT_OFFLINE: - bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); - bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rpf->fcxp_wqe); - rpf->rpsc_retries = 0; - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} - -static void -bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) -{ - struct bfa_fcs_rport_s *rport = rpf->rport; - - bfa_trc(rport->fcs, rport->pid); - bfa_trc(rport->fcs, event); - - switch (event) { - case RPFSM_EVENT_RPSC_COMP: - bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online); - /* Update speed info in f/w via BFA */ - if (rpf->rpsc_speed != BFA_PPORT_SPEED_UNKNOWN) - bfa_rport_speed(rport->bfa_rport, rpf->rpsc_speed); - else if (rpf->assigned_speed != BFA_PPORT_SPEED_UNKNOWN) - bfa_rport_speed(rport->bfa_rport, rpf->assigned_speed); - break; - - case RPFSM_EVENT_RPSC_FAIL: - /* RPSC not supported by rport */ - bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online); - break; - - case RPFSM_EVENT_RPSC_ERROR: - /* need to retry...delayed a bit. */ - if (rpf->rpsc_retries++ < BFA_FCS_RPF_RETRIES) { - bfa_timer_start(rport->fcs->bfa, &rpf->timer, - bfa_fcs_rpf_timeout, rpf, - BFA_FCS_RPF_RETRY_TIMEOUT); - bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_retry); - } else { - bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online); - } - break; - - case RPFSM_EVENT_RPORT_OFFLINE: - bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); - bfa_fcxp_discard(rpf->fcxp); - rpf->rpsc_retries = 0; - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} - -static void -bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) -{ - struct bfa_fcs_rport_s *rport = rpf->rport; - - bfa_trc(rport->fcs, rport->pid); - bfa_trc(rport->fcs, event); - - switch (event) { - case RPFSM_EVENT_TIMEOUT: - /* re-send the RPSC */ - bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); - bfa_fcs_rpf_send_rpsc2(rpf, NULL); - break; - - case RPFSM_EVENT_RPORT_OFFLINE: - bfa_timer_stop(&rpf->timer); - bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); - rpf->rpsc_retries = 0; - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} - -static void -bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) -{ - struct bfa_fcs_rport_s *rport = rpf->rport; - - bfa_trc(rport->fcs, rport->pwwn); - bfa_trc(rport->fcs, rport->pid); - bfa_trc(rport->fcs, event); - - switch (event) { - case RPFSM_EVENT_RPORT_OFFLINE: - bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); - rpf->rpsc_retries = 0; - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} - -static void -bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) -{ - struct bfa_fcs_rport_s *rport = rpf->rport; - - bfa_trc(rport->fcs, rport->pwwn); - bfa_trc(rport->fcs, rport->pid); - bfa_trc(rport->fcs, event); - - switch (event) { - case RPFSM_EVENT_RPORT_ONLINE: - bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); - bfa_fcs_rpf_send_rpsc2(rpf, NULL); - break; - - case RPFSM_EVENT_RPORT_OFFLINE: - break; - - default: - bfa_sm_fault(rport->fcs, event); - } -} -/** - * Called when Rport is created. - */ -void bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport) -{ - struct bfa_fcs_rpf_s *rpf = &rport->rpf; - - bfa_trc(rport->fcs, rport->pid); - rpf->rport = rport; - - bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_uninit); -} - -/** - * Called when Rport becomes online - */ -void bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport) -{ - bfa_trc(rport->fcs, rport->pid); - - if (__fcs_min_cfg(rport->port->fcs)) - return; - - if (bfa_fcs_fabric_is_switched(rport->port->fabric)) - bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_ONLINE); -} - -/** - * Called when Rport becomes offline - */ -void bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport) -{ - bfa_trc(rport->fcs, rport->pid); - - if (__fcs_min_cfg(rport->port->fcs)) - return; - - rport->rpf.rpsc_speed = 0; - bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_OFFLINE); -} - -static void -bfa_fcs_rpf_timeout(void *arg) -{ - struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *) arg; - struct bfa_fcs_rport_s *rport = rpf->rport; - - bfa_trc(rport->fcs, rport->pid); - bfa_sm_send_event(rpf, RPFSM_EVENT_TIMEOUT); -} - -static void -bfa_fcs_rpf_send_rpsc2(void *rpf_cbarg, struct bfa_fcxp_s *fcxp_alloced) -{ - struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *)rpf_cbarg; - struct bfa_fcs_rport_s *rport = rpf->rport; - struct bfa_fcs_port_s *port = rport->port; - struct fchs_s fchs; - int len; - struct bfa_fcxp_s *fcxp; - - bfa_trc(rport->fcs, rport->pwwn); - - fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); - if (!fcxp) { - bfa_fcxp_alloc_wait(port->fcs->bfa, &rpf->fcxp_wqe, - bfa_fcs_rpf_send_rpsc2, rpf); - return; - } - rpf->fcxp = fcxp; - - len = fc_rpsc2_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, - bfa_fcs_port_get_fcid(port), &rport->pid, 1); - - bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, - FC_CLASS_3, len, &fchs, bfa_fcs_rpf_rpsc2_response, - rpf, FC_MAX_PDUSZ, FC_ELS_TOV); - rport->stats.rpsc_sent++; - bfa_sm_send_event(rpf, RPFSM_EVENT_FCXP_SENT); - -} - -static void -bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, - bfa_status_t req_status, u32 rsp_len, - u32 resid_len, struct fchs_s *rsp_fchs) -{ - struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *) cbarg; - struct bfa_fcs_rport_s *rport = rpf->rport; - struct fc_ls_rjt_s *ls_rjt; - struct fc_rpsc2_acc_s *rpsc2_acc; - u16 num_ents; - - bfa_trc(rport->fcs, req_status); - - if (req_status != BFA_STATUS_OK) { - bfa_trc(rport->fcs, req_status); - if (req_status == BFA_STATUS_ETIMER) - rport->stats.rpsc_failed++; - bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR); - return; - } - - rpsc2_acc = (struct fc_rpsc2_acc_s *) BFA_FCXP_RSP_PLD(fcxp); - if (rpsc2_acc->els_cmd == FC_ELS_ACC) { - rport->stats.rpsc_accs++; - num_ents = bfa_os_ntohs(rpsc2_acc->num_pids); - bfa_trc(rport->fcs, num_ents); - if (num_ents > 0) { - bfa_assert(rpsc2_acc->port_info[0].pid != rport->pid); - bfa_trc(rport->fcs, - bfa_os_ntohs(rpsc2_acc->port_info[0].pid)); - bfa_trc(rport->fcs, - bfa_os_ntohs(rpsc2_acc->port_info[0].speed)); - bfa_trc(rport->fcs, - bfa_os_ntohs(rpsc2_acc->port_info[0].index)); - bfa_trc(rport->fcs, - rpsc2_acc->port_info[0].type); - - if (rpsc2_acc->port_info[0].speed == 0) { - bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR); - return; - } - - rpf->rpsc_speed = fc_rpsc_operspeed_to_bfa_speed( - bfa_os_ntohs(rpsc2_acc->port_info[0].speed)); - - bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_COMP); - } - } else { - ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); - bfa_trc(rport->fcs, ls_rjt->reason_code); - bfa_trc(rport->fcs, ls_rjt->reason_code_expl); - rport->stats.rpsc_rejects++; - if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP) - bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_FAIL); - else - bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR); - } -} diff --git a/drivers/scsi/bfa/scn.c b/drivers/scsi/bfa/scn.c deleted file mode 100644 index 8a60129e6307..000000000000 --- a/drivers/scsi/bfa/scn.c +++ /dev/null @@ -1,482 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#include -#include -#include "fcs_lport.h" -#include "fcs_rport.h" -#include "fcs_ms.h" -#include "fcs_trcmod.h" -#include "fcs_fcxp.h" -#include "fcs.h" -#include "lport_priv.h" - -BFA_TRC_FILE(FCS, SCN); - -#define FC_QOS_RSCN_EVENT 0x0c -#define FC_FABRIC_NAME_RSCN_EVENT 0x0d - -/* - * forward declarations - */ -static void bfa_fcs_port_scn_send_scr(void *scn_cbarg, - struct bfa_fcxp_s *fcxp_alloced); -static void bfa_fcs_port_scn_scr_response(void *fcsarg, - struct bfa_fcxp_s *fcxp, - void *cbarg, - bfa_status_t req_status, - u32 rsp_len, - u32 resid_len, - struct fchs_s *rsp_fchs); -static void bfa_fcs_port_scn_send_ls_acc(struct bfa_fcs_port_s *port, - struct fchs_s *rx_fchs); -static void bfa_fcs_port_scn_timeout(void *arg); - -/** - * fcs_scm_sm FCS SCN state machine - */ - -/** - * VPort SCN State Machine events - */ -enum port_scn_event { - SCNSM_EVENT_PORT_ONLINE = 1, - SCNSM_EVENT_PORT_OFFLINE = 2, - SCNSM_EVENT_RSP_OK = 3, - SCNSM_EVENT_RSP_ERROR = 4, - SCNSM_EVENT_TIMEOUT = 5, - SCNSM_EVENT_SCR_SENT = 6, -}; - -static void bfa_fcs_port_scn_sm_offline(struct bfa_fcs_port_scn_s *scn, - enum port_scn_event event); -static void bfa_fcs_port_scn_sm_sending_scr(struct bfa_fcs_port_scn_s *scn, - enum port_scn_event event); -static void bfa_fcs_port_scn_sm_scr(struct bfa_fcs_port_scn_s *scn, - enum port_scn_event event); -static void bfa_fcs_port_scn_sm_scr_retry(struct bfa_fcs_port_scn_s *scn, - enum port_scn_event event); -static void bfa_fcs_port_scn_sm_online(struct bfa_fcs_port_scn_s *scn, - enum port_scn_event event); - -/** - * Starting state - awaiting link up. - */ -static void -bfa_fcs_port_scn_sm_offline(struct bfa_fcs_port_scn_s *scn, - enum port_scn_event event) -{ - switch (event) { - case SCNSM_EVENT_PORT_ONLINE: - bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_sending_scr); - bfa_fcs_port_scn_send_scr(scn, NULL); - break; - - case SCNSM_EVENT_PORT_OFFLINE: - break; - - default: - bfa_sm_fault(scn->port->fcs, event); - } -} - -static void -bfa_fcs_port_scn_sm_sending_scr(struct bfa_fcs_port_scn_s *scn, - enum port_scn_event event) -{ - switch (event) { - case SCNSM_EVENT_SCR_SENT: - bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_scr); - break; - - case SCNSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_offline); - bfa_fcxp_walloc_cancel(scn->port->fcs->bfa, &scn->fcxp_wqe); - break; - - default: - bfa_sm_fault(scn->port->fcs, event); - } -} - -static void -bfa_fcs_port_scn_sm_scr(struct bfa_fcs_port_scn_s *scn, - enum port_scn_event event) -{ - struct bfa_fcs_port_s *port = scn->port; - - switch (event) { - case SCNSM_EVENT_RSP_OK: - bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_online); - break; - - case SCNSM_EVENT_RSP_ERROR: - bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_scr_retry); - bfa_timer_start(port->fcs->bfa, &scn->timer, - bfa_fcs_port_scn_timeout, scn, - BFA_FCS_RETRY_TIMEOUT); - break; - - case SCNSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_offline); - bfa_fcxp_discard(scn->fcxp); - break; - - default: - bfa_sm_fault(scn->port->fcs, event); - } -} - -static void -bfa_fcs_port_scn_sm_scr_retry(struct bfa_fcs_port_scn_s *scn, - enum port_scn_event event) -{ - switch (event) { - case SCNSM_EVENT_TIMEOUT: - bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_sending_scr); - bfa_fcs_port_scn_send_scr(scn, NULL); - break; - - case SCNSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_offline); - bfa_timer_stop(&scn->timer); - break; - - default: - bfa_sm_fault(scn->port->fcs, event); - } -} - -static void -bfa_fcs_port_scn_sm_online(struct bfa_fcs_port_scn_s *scn, - enum port_scn_event event) -{ - switch (event) { - case SCNSM_EVENT_PORT_OFFLINE: - bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_offline); - break; - - default: - bfa_sm_fault(scn->port->fcs, event); - } -} - - - -/** - * fcs_scn_private FCS SCN private functions - */ - -/** - * This routine will be called to send a SCR command. - */ -static void -bfa_fcs_port_scn_send_scr(void *scn_cbarg, struct bfa_fcxp_s *fcxp_alloced) -{ - struct bfa_fcs_port_scn_s *scn = scn_cbarg; - struct bfa_fcs_port_s *port = scn->port; - struct fchs_s fchs; - int len; - struct bfa_fcxp_s *fcxp; - - bfa_trc(port->fcs, port->pid); - bfa_trc(port->fcs, port->port_cfg.pwwn); - - fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); - if (!fcxp) { - bfa_fcxp_alloc_wait(port->fcs->bfa, &scn->fcxp_wqe, - bfa_fcs_port_scn_send_scr, scn); - return; - } - scn->fcxp = fcxp; - - /* - * Handle VU registrations for Base port only - */ - if ((!port->vport) && bfa_ioc_get_fcmode(&port->fcs->bfa->ioc)) { - len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), - bfa_lps_is_brcd_fabric(port->fabric->lps), - port->pid, 0); - } else { - len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), BFA_FALSE, - port->pid, 0); - } - - bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, - FC_CLASS_3, len, &fchs, bfa_fcs_port_scn_scr_response, - (void *)scn, FC_MAX_PDUSZ, FC_ELS_TOV); - - bfa_sm_send_event(scn, SCNSM_EVENT_SCR_SENT); -} - -static void -bfa_fcs_port_scn_scr_response(void *fcsarg, struct bfa_fcxp_s *fcxp, - void *cbarg, bfa_status_t req_status, - u32 rsp_len, u32 resid_len, - struct fchs_s *rsp_fchs) -{ - struct bfa_fcs_port_scn_s *scn = (struct bfa_fcs_port_scn_s *)cbarg; - struct bfa_fcs_port_s *port = scn->port; - struct fc_els_cmd_s *els_cmd; - struct fc_ls_rjt_s *ls_rjt; - - bfa_trc(port->fcs, port->port_cfg.pwwn); - - /* - * Sanity Checks - */ - if (req_status != BFA_STATUS_OK) { - bfa_trc(port->fcs, req_status); - bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR); - return; - } - - els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp); - - switch (els_cmd->els_code) { - - case FC_ELS_ACC: - bfa_sm_send_event(scn, SCNSM_EVENT_RSP_OK); - break; - - case FC_ELS_LS_RJT: - - ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); - - bfa_trc(port->fcs, ls_rjt->reason_code); - bfa_trc(port->fcs, ls_rjt->reason_code_expl); - - bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR); - break; - - default: - bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR); - } -} - -/* - * Send a LS Accept - */ -static void -bfa_fcs_port_scn_send_ls_acc(struct bfa_fcs_port_s *port, - struct fchs_s *rx_fchs) -{ - struct fchs_s fchs; - struct bfa_fcxp_s *fcxp; - struct bfa_rport_s *bfa_rport = NULL; - int len; - - bfa_trc(port->fcs, rx_fchs->s_id); - - fcxp = bfa_fcs_fcxp_alloc(port->fcs); - if (!fcxp) - return; - - len = fc_ls_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, - bfa_fcs_port_get_fcid(port), rx_fchs->ox_id); - - bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag, - BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, - FC_MAX_PDUSZ, 0); -} - -/** - * This routine will be called by bfa_timer on timer timeouts. - * - * param[in] vport - pointer to bfa_fcs_port_t. - * param[out] vport_status - pointer to return vport status in - * - * return - * void - * -* Special Considerations: - * - * note - */ -static void -bfa_fcs_port_scn_timeout(void *arg) -{ - struct bfa_fcs_port_scn_s *scn = (struct bfa_fcs_port_scn_s *)arg; - - bfa_sm_send_event(scn, SCNSM_EVENT_TIMEOUT); -} - - - -/** - * fcs_scn_public FCS state change notification public interfaces - */ - -/* - * Functions called by port/fab - */ -void -bfa_fcs_port_scn_init(struct bfa_fcs_port_s *port) -{ - struct bfa_fcs_port_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port); - - scn->port = port; - bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_offline); -} - -void -bfa_fcs_port_scn_offline(struct bfa_fcs_port_s *port) -{ - struct bfa_fcs_port_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port); - - scn->port = port; - bfa_sm_send_event(scn, SCNSM_EVENT_PORT_OFFLINE); -} - -void -bfa_fcs_port_scn_online(struct bfa_fcs_port_s *port) -{ - struct bfa_fcs_port_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port); - - scn->port = port; - bfa_sm_send_event(scn, SCNSM_EVENT_PORT_ONLINE); -} - -static void -bfa_fcs_port_scn_portid_rscn(struct bfa_fcs_port_s *port, u32 rpid) -{ - struct bfa_fcs_rport_s *rport; - - bfa_trc(port->fcs, rpid); - - /** - * If this is an unknown device, then it just came online. - * Otherwise let rport handle the RSCN event. - */ - rport = bfa_fcs_port_get_rport_by_pid(port, rpid); - if (rport == NULL) { - /* - * If min cfg mode is enabled, we donot need to - * discover any new rports. - */ - if (!__fcs_min_cfg(port->fcs)) - rport = bfa_fcs_rport_create(port, rpid); - } else { - bfa_fcs_rport_scn(rport); - } -} - -/** - * rscn format based PID comparison - */ -#define __fc_pid_match(__c0, __c1, __fmt) \ - (((__fmt) == FC_RSCN_FORMAT_FABRIC) || \ - (((__fmt) == FC_RSCN_FORMAT_DOMAIN) && \ - ((__c0)[0] == (__c1)[0])) || \ - (((__fmt) == FC_RSCN_FORMAT_AREA) && \ - ((__c0)[0] == (__c1)[0]) && \ - ((__c0)[1] == (__c1)[1]))) - -static void -bfa_fcs_port_scn_multiport_rscn(struct bfa_fcs_port_s *port, - enum fc_rscn_format format, u32 rscn_pid) -{ - struct bfa_fcs_rport_s *rport; - struct list_head *qe, *qe_next; - u8 *c0, *c1; - - bfa_trc(port->fcs, format); - bfa_trc(port->fcs, rscn_pid); - - c0 = (u8 *) &rscn_pid; - - list_for_each_safe(qe, qe_next, &port->rport_q) { - rport = (struct bfa_fcs_rport_s *)qe; - c1 = (u8 *) &rport->pid; - if (__fc_pid_match(c0, c1, format)) - bfa_fcs_rport_scn(rport); - } -} - -void -bfa_fcs_port_scn_process_rscn(struct bfa_fcs_port_s *port, struct fchs_s *fchs, - u32 len) -{ - struct fc_rscn_pl_s *rscn = (struct fc_rscn_pl_s *) (fchs + 1); - int num_entries; - u32 rscn_pid; - bfa_boolean_t nsquery = BFA_FALSE; - int i = 0; - - num_entries = - (bfa_os_ntohs(rscn->payldlen) - - sizeof(u32)) / sizeof(rscn->event[0]); - - bfa_trc(port->fcs, num_entries); - - port->stats.num_rscn++; - - bfa_fcs_port_scn_send_ls_acc(port, fchs); - - for (i = 0; i < num_entries; i++) { - rscn_pid = rscn->event[i].portid; - - bfa_trc(port->fcs, rscn->event[i].format); - bfa_trc(port->fcs, rscn_pid); - - switch (rscn->event[i].format) { - case FC_RSCN_FORMAT_PORTID: - if (rscn->event[i].qualifier == FC_QOS_RSCN_EVENT) { - /* - * Ignore this event. f/w would have processed - * it - */ - bfa_trc(port->fcs, rscn_pid); - } else { - port->stats.num_portid_rscn++; - bfa_fcs_port_scn_portid_rscn(port, rscn_pid); - } - break; - - case FC_RSCN_FORMAT_FABRIC: - if (rscn->event[i].qualifier == - FC_FABRIC_NAME_RSCN_EVENT) { - bfa_fcs_port_ms_fabric_rscn(port); - break; - } - /* - * !!!!!!!!! Fall Through !!!!!!!!!!!!! - */ - - case FC_RSCN_FORMAT_AREA: - case FC_RSCN_FORMAT_DOMAIN: - nsquery = BFA_TRUE; - bfa_fcs_port_scn_multiport_rscn(port, - rscn->event[i].format, - rscn_pid); - break; - - default: - bfa_assert(0); - nsquery = BFA_TRUE; - } - } - - /** - * If any of area, domain or fabric RSCN is received, do a fresh discovery - * to find new devices. - */ - if (nsquery) - bfa_fcs_port_ns_query(port); -} - - diff --git a/drivers/scsi/bfa/vfapi.c b/drivers/scsi/bfa/vfapi.c deleted file mode 100644 index 391a4790bebd..000000000000 --- a/drivers/scsi/bfa/vfapi.c +++ /dev/null @@ -1,292 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * vfapi.c Fabric module implementation. - */ - -#include "fcs_fabric.h" -#include "fcs_trcmod.h" - -BFA_TRC_FILE(FCS, VFAPI); - -/** - * fcs_vf_api virtual fabrics API - */ - -/** - * Enable VF mode. - * - * @param[in] fcs fcs module instance - * @param[in] vf_id default vf_id of port, FC_VF_ID_NULL - * to use standard default vf_id of 1. - * - * @retval BFA_STATUS_OK vf mode is enabled - * @retval BFA_STATUS_BUSY Port is active. Port must be disabled - * before VF mode can be enabled. - */ -bfa_status_t -bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id) -{ - return BFA_STATUS_OK; -} - -/** - * Disable VF mode. - * - * @param[in] fcs fcs module instance - * - * @retval BFA_STATUS_OK vf mode is disabled - * @retval BFA_STATUS_BUSY VFs are present and being used. All - * VFs must be deleted before disabling - * VF mode. - */ -bfa_status_t -bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs) -{ - return BFA_STATUS_OK; -} - -/** - * Create a new VF instance. - * - * A new VF is created using the given VF configuration. A VF is identified - * by VF id. No duplicate VF creation is allowed with the same VF id. Once - * a VF is created, VF is automatically started after link initialization - * and EVFP exchange is completed. - * - * param[in] vf - FCS vf data structure. Memory is - * allocated by caller (driver) - * param[in] fcs - FCS module - * param[in] vf_cfg - VF configuration - * param[in] vf_drv - Opaque handle back to the driver's - * virtual vf structure - * - * retval BFA_STATUS_OK VF creation is successful - * retval BFA_STATUS_FAILED VF creation failed - * retval BFA_STATUS_EEXIST A VF exists with the given vf_id - */ -bfa_status_t -bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs, u16 vf_id, - struct bfa_port_cfg_s *port_cfg, struct bfad_vf_s *vf_drv) -{ - bfa_trc(fcs, vf_id); - return BFA_STATUS_OK; -} - -/** - * Use this function to delete a BFA VF object. VF object should - * be stopped before this function call. - * - * param[in] vf - pointer to bfa_vf_t. - * - * retval BFA_STATUS_OK On vf deletion success - * retval BFA_STATUS_BUSY VF is not in a stopped state - * retval BFA_STATUS_INPROGRESS VF deletion in in progress - */ -bfa_status_t -bfa_fcs_vf_delete(bfa_fcs_vf_t *vf) -{ - bfa_trc(vf->fcs, vf->vf_id); - return BFA_STATUS_OK; -} - -/** - * Start participation in VF. This triggers login to the virtual fabric. - * - * param[in] vf - pointer to bfa_vf_t. - * - * return None - */ -void -bfa_fcs_vf_start(bfa_fcs_vf_t *vf) -{ - bfa_trc(vf->fcs, vf->vf_id); -} - -/** - * Logout with the virtual fabric. - * - * param[in] vf - pointer to bfa_vf_t. - * - * retval BFA_STATUS_OK On success. - * retval BFA_STATUS_INPROGRESS VF is being stopped. - */ -bfa_status_t -bfa_fcs_vf_stop(bfa_fcs_vf_t *vf) -{ - bfa_trc(vf->fcs, vf->vf_id); - return BFA_STATUS_OK; -} - -/** - * Returns attributes of the given VF. - * - * param[in] vf pointer to bfa_vf_t. - * param[out] vf_attr vf attributes returned - * - * return None - */ -void -bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr) -{ - bfa_trc(vf->fcs, vf->vf_id); -} - -/** - * Return statistics associated with the given vf. - * - * param[in] vf pointer to bfa_vf_t. - * param[out] vf_stats vf statistics returned - * - * @return None - */ -void -bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf, struct bfa_vf_stats_s *vf_stats) -{ - bfa_os_memcpy(vf_stats, &vf->stats, sizeof(struct bfa_vf_stats_s)); - return; -} - -void -/** - * clear statistics associated with the given vf. - * - * param[in] vf pointer to bfa_vf_t. - * - * @return None - */ -bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf) -{ - bfa_os_memset(&vf->stats, 0, sizeof(struct bfa_vf_stats_s)); - return; -} - -/** - * Returns FCS vf structure for a given vf_id. - * - * param[in] vf_id - VF_ID - * - * return - * If lookup succeeds, retuns fcs vf object, otherwise returns NULL - */ -bfa_fcs_vf_t * -bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id) -{ - bfa_trc(fcs, vf_id); - if (vf_id == FC_VF_ID_NULL) - return &fcs->fabric; - - /** - * @todo vf support - */ - - return NULL; -} - -/** - * Returns driver VF structure for a given FCS vf. - * - * param[in] vf - pointer to bfa_vf_t - * - * return Driver VF structure - */ -struct bfad_vf_s * -bfa_fcs_vf_get_drv_vf(bfa_fcs_vf_t *vf) -{ - bfa_assert(vf); - bfa_trc(vf->fcs, vf->vf_id); - return vf->vf_drv; -} - -/** - * Return the list of VFs configured. - * - * param[in] fcs fcs module instance - * param[out] vf_ids returned list of vf_ids - * param[in,out] nvfs in:size of vf_ids array, - * out:total elements present, - * actual elements returned is limited by the size - * - * return Driver VF structure - */ -void -bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs) -{ - bfa_trc(fcs, *nvfs); -} - -/** - * Return the list of all VFs visible from fabric. - * - * param[in] fcs fcs module instance - * param[out] vf_ids returned list of vf_ids - * param[in,out] nvfs in:size of vf_ids array, - * out:total elements present, - * actual elements returned is limited by the size - * - * return Driver VF structure - */ -void -bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs) -{ - bfa_trc(fcs, *nvfs); -} - -/** - * Return the list of local logical ports present in the given VF. - * - * param[in] vf vf for which logical ports are returned - * param[out] lpwwn returned logical port wwn list - * param[in,out] nlports in:size of lpwwn list; - * out:total elements present, - * actual elements returned is limited by the size - * - */ -void -bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t lpwwn[], int *nlports) -{ - struct list_head *qe; - struct bfa_fcs_vport_s *vport; - int i; - struct bfa_fcs_s *fcs; - - if (vf == NULL || lpwwn == NULL || *nlports == 0) - return; - - fcs = vf->fcs; - - bfa_trc(fcs, vf->vf_id); - bfa_trc(fcs, (u32) *nlports); - - i = 0; - lpwwn[i++] = vf->bport.port_cfg.pwwn; - - list_for_each(qe, &vf->vport_q) { - if (i >= *nlports) - break; - - vport = (struct bfa_fcs_vport_s *) qe; - lpwwn[i++] = vport->lport.port_cfg.pwwn; - } - - bfa_trc(fcs, i); - *nlports = i; - return; -} - - diff --git a/drivers/scsi/bfa/vport.c b/drivers/scsi/bfa/vport.c deleted file mode 100644 index b378ec79d386..000000000000 --- a/drivers/scsi/bfa/vport.c +++ /dev/null @@ -1,903 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * bfa_fcs_vport.c FCS virtual port state machine - */ - -#include -#include -#include -#include "fcs_fabric.h" -#include "fcs_lport.h" -#include "fcs_vport.h" -#include "fcs_trcmod.h" -#include "fcs.h" -#include - -BFA_TRC_FILE(FCS, VPORT); - -#define __vport_fcs(__vp) ((__vp)->lport.fcs) -#define __vport_pwwn(__vp) ((__vp)->lport.port_cfg.pwwn) -#define __vport_nwwn(__vp) ((__vp)->lport.port_cfg.nwwn) -#define __vport_bfa(__vp) ((__vp)->lport.fcs->bfa) -#define __vport_fcid(__vp) ((__vp)->lport.pid) -#define __vport_fabric(__vp) ((__vp)->lport.fabric) -#define __vport_vfid(__vp) ((__vp)->lport.fabric->vf_id) - -#define BFA_FCS_VPORT_MAX_RETRIES 5 -/* - * Forward declarations - */ -static void bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport); -static void bfa_fcs_vport_timeout(void *vport_arg); -static void bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport); -static void bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport); - -/** - * fcs_vport_sm FCS virtual port state machine - */ - -/** - * VPort State Machine events - */ -enum bfa_fcs_vport_event { - BFA_FCS_VPORT_SM_CREATE = 1, /* vport create event */ - BFA_FCS_VPORT_SM_DELETE = 2, /* vport delete event */ - BFA_FCS_VPORT_SM_START = 3, /* vport start request */ - BFA_FCS_VPORT_SM_STOP = 4, /* stop: unsupported */ - BFA_FCS_VPORT_SM_ONLINE = 5, /* fabric online */ - BFA_FCS_VPORT_SM_OFFLINE = 6, /* fabric offline event */ - BFA_FCS_VPORT_SM_FRMSENT = 7, /* fdisc/logo sent events */ - BFA_FCS_VPORT_SM_RSP_OK = 8, /* good response */ - BFA_FCS_VPORT_SM_RSP_ERROR = 9, /* error/bad response */ - BFA_FCS_VPORT_SM_TIMEOUT = 10, /* delay timer event */ - BFA_FCS_VPORT_SM_DELCOMP = 11, /* lport delete completion */ - BFA_FCS_VPORT_SM_RSP_DUP_WWN = 12, /* Dup wnn error */ - BFA_FCS_VPORT_SM_RSP_FAILED = 13, /* non-retryable failure */ -}; - -static void bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport, - enum bfa_fcs_vport_event event); -static void bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport, - enum bfa_fcs_vport_event event); -static void bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport, - enum bfa_fcs_vport_event event); -static void bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport, - enum bfa_fcs_vport_event event); -static void bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport, - enum bfa_fcs_vport_event event); -static void bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport, - enum bfa_fcs_vport_event event); -static void bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport, - enum bfa_fcs_vport_event event); -static void bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport, - enum bfa_fcs_vport_event event); -static void bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport, - enum bfa_fcs_vport_event event); -static void bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport, - enum bfa_fcs_vport_event event); - -static struct bfa_sm_table_s vport_sm_table[] = { - {BFA_SM(bfa_fcs_vport_sm_uninit), BFA_FCS_VPORT_UNINIT}, - {BFA_SM(bfa_fcs_vport_sm_created), BFA_FCS_VPORT_CREATED}, - {BFA_SM(bfa_fcs_vport_sm_offline), BFA_FCS_VPORT_OFFLINE}, - {BFA_SM(bfa_fcs_vport_sm_fdisc), BFA_FCS_VPORT_FDISC}, - {BFA_SM(bfa_fcs_vport_sm_fdisc_retry), BFA_FCS_VPORT_FDISC_RETRY}, - {BFA_SM(bfa_fcs_vport_sm_online), BFA_FCS_VPORT_ONLINE}, - {BFA_SM(bfa_fcs_vport_sm_deleting), BFA_FCS_VPORT_DELETING}, - {BFA_SM(bfa_fcs_vport_sm_cleanup), BFA_FCS_VPORT_CLEANUP}, - {BFA_SM(bfa_fcs_vport_sm_logo), BFA_FCS_VPORT_LOGO}, - {BFA_SM(bfa_fcs_vport_sm_error), BFA_FCS_VPORT_ERROR} -}; - -/** - * Beginning state. - */ -static void -bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport, - enum bfa_fcs_vport_event event) -{ - bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); - bfa_trc(__vport_fcs(vport), event); - - switch (event) { - case BFA_FCS_VPORT_SM_CREATE: - bfa_sm_set_state(vport, bfa_fcs_vport_sm_created); - bfa_fcs_fabric_addvport(__vport_fabric(vport), vport); - break; - - default: - bfa_sm_fault(__vport_fcs(vport), event); - } -} - -/** - * Created state - a start event is required to start up the state machine. - */ -static void -bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport, - enum bfa_fcs_vport_event event) -{ - bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); - bfa_trc(__vport_fcs(vport), event); - - switch (event) { - case BFA_FCS_VPORT_SM_START: - if (bfa_fcs_fabric_is_online(__vport_fabric(vport)) - && bfa_fcs_fabric_npiv_capable(__vport_fabric(vport))) { - bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc); - bfa_fcs_vport_do_fdisc(vport); - } else { - /** - * Fabric is offline or not NPIV capable, stay in - * offline state. - */ - vport->vport_stats.fab_no_npiv++; - bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); - } - break; - - case BFA_FCS_VPORT_SM_DELETE: - bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); - bfa_fcs_port_delete(&vport->lport); - break; - - case BFA_FCS_VPORT_SM_ONLINE: - case BFA_FCS_VPORT_SM_OFFLINE: - /** - * Ignore ONLINE/OFFLINE events from fabric till vport is started. - */ - break; - - default: - bfa_sm_fault(__vport_fcs(vport), event); - } -} - -/** - * Offline state - awaiting ONLINE event from fabric SM. - */ -static void -bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport, - enum bfa_fcs_vport_event event) -{ - bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); - bfa_trc(__vport_fcs(vport), event); - - switch (event) { - case BFA_FCS_VPORT_SM_DELETE: - bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); - bfa_fcs_port_delete(&vport->lport); - break; - - case BFA_FCS_VPORT_SM_ONLINE: - bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc); - vport->fdisc_retries = 0; - bfa_fcs_vport_do_fdisc(vport); - break; - - case BFA_FCS_VPORT_SM_OFFLINE: - /* - * This can happen if the vport couldn't be initialzied due - * the fact that the npiv was not enabled on the switch. In - * that case we will put the vport in offline state. However, - * the link can go down and cause the this event to be sent when - * we are already offline. Ignore it. - */ - break; - - default: - bfa_sm_fault(__vport_fcs(vport), event); - } -} - -/** - * FDISC is sent and awaiting reply from fabric. - */ -static void -bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport, - enum bfa_fcs_vport_event event) -{ - bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); - bfa_trc(__vport_fcs(vport), event); - - switch (event) { - case BFA_FCS_VPORT_SM_DELETE: - bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); - bfa_lps_discard(vport->lps); - bfa_fcs_port_delete(&vport->lport); - break; - - case BFA_FCS_VPORT_SM_OFFLINE: - bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); - bfa_lps_discard(vport->lps); - break; - - case BFA_FCS_VPORT_SM_RSP_OK: - bfa_sm_set_state(vport, bfa_fcs_vport_sm_online); - bfa_fcs_port_online(&vport->lport); - break; - - case BFA_FCS_VPORT_SM_RSP_ERROR: - bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc_retry); - bfa_timer_start(__vport_bfa(vport), &vport->timer, - bfa_fcs_vport_timeout, vport, - BFA_FCS_RETRY_TIMEOUT); - break; - - case BFA_FCS_VPORT_SM_RSP_FAILED: - bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); - break; - - case BFA_FCS_VPORT_SM_RSP_DUP_WWN: - bfa_sm_set_state(vport, bfa_fcs_vport_sm_error); - break; - - default: - bfa_sm_fault(__vport_fcs(vport), event); - } -} - -/** - * FDISC attempt failed - a timer is active to retry FDISC. - */ -static void -bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport, - enum bfa_fcs_vport_event event) -{ - bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); - bfa_trc(__vport_fcs(vport), event); - - switch (event) { - case BFA_FCS_VPORT_SM_DELETE: - bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); - bfa_timer_stop(&vport->timer); - bfa_fcs_port_delete(&vport->lport); - break; - - case BFA_FCS_VPORT_SM_OFFLINE: - bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); - bfa_timer_stop(&vport->timer); - break; - - case BFA_FCS_VPORT_SM_TIMEOUT: - bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc); - vport->vport_stats.fdisc_retries++; - vport->fdisc_retries++; - bfa_fcs_vport_do_fdisc(vport); - break; - - default: - bfa_sm_fault(__vport_fcs(vport), event); - } -} - -/** - * Vport is online (FDISC is complete). - */ -static void -bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport, - enum bfa_fcs_vport_event event) -{ - bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); - bfa_trc(__vport_fcs(vport), event); - - switch (event) { - case BFA_FCS_VPORT_SM_DELETE: - bfa_sm_set_state(vport, bfa_fcs_vport_sm_deleting); - bfa_fcs_port_delete(&vport->lport); - break; - - case BFA_FCS_VPORT_SM_OFFLINE: - bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); - bfa_lps_discard(vport->lps); - bfa_fcs_port_offline(&vport->lport); - break; - - default: - bfa_sm_fault(__vport_fcs(vport), event); - } -} - -/** - * Vport is being deleted - awaiting lport delete completion to send - * LOGO to fabric. - */ -static void -bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport, - enum bfa_fcs_vport_event event) -{ - bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); - bfa_trc(__vport_fcs(vport), event); - - switch (event) { - case BFA_FCS_VPORT_SM_DELETE: - break; - - case BFA_FCS_VPORT_SM_DELCOMP: - bfa_sm_set_state(vport, bfa_fcs_vport_sm_logo); - bfa_fcs_vport_do_logo(vport); - break; - - case BFA_FCS_VPORT_SM_OFFLINE: - bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); - break; - - default: - bfa_sm_fault(__vport_fcs(vport), event); - } -} - -/** - * Error State. - * This state will be set when the Vport Creation fails due to errors like - * Dup WWN. In this state only operation allowed is a Vport Delete. - */ -static void -bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport, - enum bfa_fcs_vport_event event) -{ - bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); - bfa_trc(__vport_fcs(vport), event); - - switch (event) { - case BFA_FCS_VPORT_SM_DELETE: - bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); - bfa_fcs_port_delete(&vport->lport); - - break; - - default: - bfa_trc(__vport_fcs(vport), event); - } -} - -/** - * Lport cleanup is in progress since vport is being deleted. Fabric is - * offline, so no LOGO is needed to complete vport deletion. - */ -static void -bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport, - enum bfa_fcs_vport_event event) -{ - bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); - bfa_trc(__vport_fcs(vport), event); - - switch (event) { - case BFA_FCS_VPORT_SM_DELCOMP: - bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit); - bfa_fcs_vport_free(vport); - break; - - case BFA_FCS_VPORT_SM_DELETE: - break; - - default: - bfa_sm_fault(__vport_fcs(vport), event); - } -} - -/** - * LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup - * is done. - */ -static void -bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport, - enum bfa_fcs_vport_event event) -{ - bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); - bfa_trc(__vport_fcs(vport), event); - - switch (event) { - case BFA_FCS_VPORT_SM_OFFLINE: - bfa_lps_discard(vport->lps); - /* - * !!! fall through !!! - */ - - case BFA_FCS_VPORT_SM_RSP_OK: - case BFA_FCS_VPORT_SM_RSP_ERROR: - bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit); - bfa_fcs_vport_free(vport); - break; - - case BFA_FCS_VPORT_SM_DELETE: - break; - - default: - bfa_sm_fault(__vport_fcs(vport), event); - } -} - - - -/** - * fcs_vport_private FCS virtual port private functions - */ - -/** - * Send AEN notification - */ -static void -bfa_fcs_vport_aen_post(bfa_fcs_lport_t *port, enum bfa_lport_aen_event event) -{ - union bfa_aen_data_u aen_data; - struct bfa_log_mod_s *logmod = port->fcs->logm; - enum bfa_port_role role = port->port_cfg.roles; - wwn_t lpwwn = bfa_fcs_port_get_pwwn(port); - char lpwwn_ptr[BFA_STRING_32]; - char *role_str[BFA_PORT_ROLE_FCP_MAX / 2 + 1] = - { "Initiator", "Target", "IPFC" }; - - wwn2str(lpwwn_ptr, lpwwn); - - bfa_assert(role <= BFA_PORT_ROLE_FCP_MAX); - - bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, event), lpwwn_ptr, - role_str[role/2]); - - aen_data.lport.vf_id = port->fabric->vf_id; - aen_data.lport.roles = role; - aen_data.lport.ppwwn = - bfa_fcs_port_get_pwwn(bfa_fcs_get_base_port(port->fcs)); - aen_data.lport.lpwwn = lpwwn; -} - -/** - * This routine will be called to send a FDISC command. - */ -static void -bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport) -{ - bfa_lps_fdisc(vport->lps, vport, - bfa_fcport_get_maxfrsize(__vport_bfa(vport)), - __vport_pwwn(vport), __vport_nwwn(vport)); - vport->vport_stats.fdisc_sent++; -} - -static void -bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport) -{ - u8 lsrjt_rsn = bfa_lps_get_lsrjt_rsn(vport->lps); - u8 lsrjt_expl = bfa_lps_get_lsrjt_expl(vport->lps); - - bfa_trc(__vport_fcs(vport), lsrjt_rsn); - bfa_trc(__vport_fcs(vport), lsrjt_expl); - - /* - * For certain reason codes, we don't want to retry. - */ - switch (bfa_lps_get_lsrjt_expl(vport->lps)) { - case FC_LS_RJT_EXP_INV_PORT_NAME: /* by brocade */ - case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */ - if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES) - bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); - else { - bfa_fcs_vport_aen_post(&vport->lport, - BFA_LPORT_AEN_NPIV_DUP_WWN); - bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_DUP_WWN); - } - break; - - case FC_LS_RJT_EXP_INSUFF_RES: - /* - * This means max logins per port/switch setting on the - * switch was exceeded. - */ - if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES) - bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); - else { - bfa_fcs_vport_aen_post(&vport->lport, - BFA_LPORT_AEN_NPIV_FABRIC_MAX); - bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED); - } - break; - - default: - if (vport->fdisc_retries == 0) /* Print only once */ - bfa_fcs_vport_aen_post(&vport->lport, - BFA_LPORT_AEN_NPIV_UNKNOWN); - bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); - } -} - -/** - * Called to send a logout to the fabric. Used when a V-Port is - * deleted/stopped. - */ -static void -bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport) -{ - bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); - - vport->vport_stats.logo_sent++; - bfa_lps_fdisclogo(vport->lps); -} - -/** - * This routine will be called by bfa_timer on timer timeouts. - * - * param[in] vport - pointer to bfa_fcs_vport_t. - * param[out] vport_status - pointer to return vport status in - * - * return - * void - * -* Special Considerations: - * - * note - */ -static void -bfa_fcs_vport_timeout(void *vport_arg) -{ - struct bfa_fcs_vport_s *vport = (struct bfa_fcs_vport_s *)vport_arg; - - vport->vport_stats.fdisc_timeouts++; - bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_TIMEOUT); -} - -static void -bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport) -{ - bfa_fcs_fabric_delvport(__vport_fabric(vport), vport); - bfa_fcb_vport_delete(vport->vport_drv); - bfa_lps_delete(vport->lps); -} - - - -/** - * fcs_vport_public FCS virtual port public interfaces - */ - -/** - * Online notification from fabric SM. - */ -void -bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport) -{ - vport->vport_stats.fab_online++; - bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE); -} - -/** - * Offline notification from fabric SM. - */ -void -bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport) -{ - vport->vport_stats.fab_offline++; - bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE); -} - -/** - * Cleanup notification from fabric SM on link timer expiry. - */ -void -bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport) -{ - vport->vport_stats.fab_cleanup++; -} - -/** - * delete notification from fabric SM. To be invoked from within FCS. - */ -void -bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport) -{ - bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE); -} - -/** - * Delete completion callback from associated lport - */ -void -bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport) -{ - bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELCOMP); -} - -/** - * fcs_vport_api Virtual port API - */ - -/** - * Use this function to instantiate a new FCS vport object. This - * function will not trigger any HW initialization process (which will be - * done in vport_start() call) - * - * param[in] vport - pointer to bfa_fcs_vport_t. This space - * needs to be allocated by the driver. - * param[in] fcs - FCS instance - * param[in] vport_cfg - vport configuration - * param[in] vf_id - VF_ID if vport is created within a VF. - * FC_VF_ID_NULL to specify base fabric. - * param[in] vport_drv - Opaque handle back to the driver's vport - * structure - * - * retval BFA_STATUS_OK - on success. - * retval BFA_STATUS_FAILED - on failure. - */ -bfa_status_t -bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs, - u16 vf_id, struct bfa_port_cfg_s *vport_cfg, - struct bfad_vport_s *vport_drv) -{ - if (vport_cfg->pwwn == 0) - return BFA_STATUS_INVALID_WWN; - - if (bfa_fcs_port_get_pwwn(&fcs->fabric.bport) == vport_cfg->pwwn) - return BFA_STATUS_VPORT_WWN_BP; - - if (bfa_fcs_vport_lookup(fcs, vf_id, vport_cfg->pwwn) != NULL) - return BFA_STATUS_VPORT_EXISTS; - - if (bfa_fcs_fabric_vport_count(&fcs->fabric) == - bfa_lps_get_max_vport(fcs->bfa)) - return BFA_STATUS_VPORT_MAX; - - vport->lps = bfa_lps_alloc(fcs->bfa); - if (!vport->lps) - return BFA_STATUS_VPORT_MAX; - - vport->vport_drv = vport_drv; - vport_cfg->preboot_vp = BFA_FALSE; - bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit); - - bfa_fcs_lport_attach(&vport->lport, fcs, vf_id, vport); - bfa_fcs_lport_init(&vport->lport, vport_cfg); - - bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_CREATE); - - return BFA_STATUS_OK; -} - -/** - * Use this function to instantiate a new FCS PBC vport object. This - * function will not trigger any HW initialization process (which will be - * done in vport_start() call) - * - * param[in] vport - pointer to bfa_fcs_vport_t. This space - * needs to be allocated by the driver. - * param[in] fcs - FCS instance - * param[in] vport_cfg - vport configuration - * param[in] vf_id - VF_ID if vport is created within a VF. - * FC_VF_ID_NULL to specify base fabric. - * param[in] vport_drv - Opaque handle back to the driver's vport - * structure - * - * retval BFA_STATUS_OK - on success. - * retval BFA_STATUS_FAILED - on failure. - */ -bfa_status_t -bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs, - uint16_t vf_id, struct bfa_port_cfg_s *vport_cfg, - struct bfad_vport_s *vport_drv) -{ - bfa_status_t rc; - - rc = bfa_fcs_vport_create(vport, fcs, vf_id, vport_cfg, vport_drv); - vport->lport.port_cfg.preboot_vp = BFA_TRUE; - - return rc; -} - -/** - * Use this function initialize the vport. - * - * @param[in] vport - pointer to bfa_fcs_vport_t. - * - * @returns None - */ -bfa_status_t -bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport) -{ - bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_START); - - return BFA_STATUS_OK; -} - -/** - * Use this function quiese the vport object. This function will return - * immediately, when the vport is actually stopped, the - * bfa_drv_vport_stop_cb() will be called. - * - * param[in] vport - pointer to bfa_fcs_vport_t. - * - * return None - */ -bfa_status_t -bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport) -{ - bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOP); - - return BFA_STATUS_OK; -} - -/** - * Use this function to delete a vport object. Fabric object should - * be stopped before this function call. - * - * Donot invoke this from within FCS - * - * param[in] vport - pointer to bfa_fcs_vport_t. - * - * return None - */ -bfa_status_t -bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport) -{ - if (vport->lport.port_cfg.preboot_vp) - return BFA_STATUS_PBC; - - bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE); - - return BFA_STATUS_OK; -} - -/** - * Use this function to get vport's current status info. - * - * param[in] vport pointer to bfa_fcs_vport_t. - * param[out] attr pointer to return vport attributes - * - * return None - */ -void -bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport, - struct bfa_vport_attr_s *attr) -{ - if (vport == NULL || attr == NULL) - return; - - bfa_os_memset(attr, 0, sizeof(struct bfa_vport_attr_s)); - - bfa_fcs_port_get_attr(&vport->lport, &attr->port_attr); - attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm); -} - -/** - * Use this function to get vport's statistics. - * - * param[in] vport pointer to bfa_fcs_vport_t. - * param[out] stats pointer to return vport statistics in - * - * return None - */ -void -bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport, - struct bfa_vport_stats_s *stats) -{ - *stats = vport->vport_stats; -} - -/** - * Use this function to clear vport's statistics. - * - * param[in] vport pointer to bfa_fcs_vport_t. - * - * return None - */ -void -bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport) -{ - bfa_os_memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s)); -} - -/** - * Lookup a virtual port. Excludes base port from lookup. - */ -struct bfa_fcs_vport_s * -bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t vpwwn) -{ - struct bfa_fcs_vport_s *vport; - struct bfa_fcs_fabric_s *fabric; - - bfa_trc(fcs, vf_id); - bfa_trc(fcs, vpwwn); - - fabric = bfa_fcs_vf_lookup(fcs, vf_id); - if (!fabric) { - bfa_trc(fcs, vf_id); - return NULL; - } - - vport = bfa_fcs_fabric_vport_lookup(fabric, vpwwn); - return vport; -} - -/** - * FDISC Response - */ -void -bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status) -{ - struct bfa_fcs_vport_s *vport = uarg; - - bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); - bfa_trc(__vport_fcs(vport), status); - - switch (status) { - case BFA_STATUS_OK: - /* - * Initialize the V-Port fields - */ - __vport_fcid(vport) = bfa_lps_get_pid(vport->lps); - vport->vport_stats.fdisc_accepts++; - bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK); - break; - - case BFA_STATUS_INVALID_MAC: - /* - * Only for CNA - */ - vport->vport_stats.fdisc_acc_bad++; - bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); - - break; - - case BFA_STATUS_EPROTOCOL: - switch (bfa_lps_get_extstatus(vport->lps)) { - case BFA_EPROTO_BAD_ACCEPT: - vport->vport_stats.fdisc_acc_bad++; - break; - - case BFA_EPROTO_UNKNOWN_RSP: - vport->vport_stats.fdisc_unknown_rsp++; - break; - - default: - break; - } - - bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); - break; - - case BFA_STATUS_FABRIC_RJT: - vport->vport_stats.fdisc_rejects++; - bfa_fcs_vport_fdisc_rejected(vport); - break; - - default: - vport->vport_stats.fdisc_rsp_err++; - bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); - } -} - -/** - * LOGO response - */ -void -bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg) -{ - struct bfa_fcs_vport_s *vport = uarg; - bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK); -} - -/** - * Received clear virtual link - */ -void -bfa_cb_lps_cvl_event(void *bfad, void *uarg) -{ - struct bfa_fcs_vport_s *vport = uarg; - - /* Send an Offline followed by an ONLINE */ - bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE); - bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE); -} -- cgit v1.2.3 From 045d3fe766b01921e24e2d4178e011b3b09ad4d6 Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Fri, 10 Sep 2010 01:22:07 -0400 Subject: [SCSI] sd: Update thin provisioning support Add support for the Thin Provisioning VPD page and use the TPU and TPWS bits to switch between UNMAP and WRITE SAME(16) for discards. If no TP VPD page is present we fall back to old scheme where the max descriptor count combined with the max lba count are used trigger UNMAP. Signed-off-by: Martin K. Petersen Signed-off-by: James Bottomley --- drivers/scsi/sd.c | 46 +++++++++++++++++++++++++++++++++++++++++----- drivers/scsi/sd.h | 3 +++ 2 files changed, 44 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 8c9b275f71a7..0c4f89cfb7dc 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2039,14 +2039,24 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) lba_count = get_unaligned_be32(&buffer[20]); desc_count = get_unaligned_be32(&buffer[24]); - if (lba_count) { - q->limits.max_discard_sectors = - lba_count * sector_sz >> 9; - - if (desc_count) + if (lba_count && desc_count) { + if (sdkp->tpvpd && !sdkp->tpu) + sdkp->unmap = 0; + else sdkp->unmap = 1; } + if (sdkp->tpvpd && !sdkp->tpu && !sdkp->tpws) { + sd_printk(KERN_ERR, sdkp, "Thin provisioning is " \ + "enabled but neither TPU, nor TPWS are " \ + "set. Disabling discard!\n"); + goto out; + } + + if (lba_count) + q->limits.max_discard_sectors = + lba_count * sector_sz >> 9; + granularity = get_unaligned_be32(&buffer[28]); if (granularity) @@ -2087,6 +2097,31 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) kfree(buffer); } +/** + * sd_read_thin_provisioning - Query thin provisioning VPD page + * @disk: disk to query + */ +static void sd_read_thin_provisioning(struct scsi_disk *sdkp) +{ + unsigned char *buffer; + const int vpd_len = 8; + + if (sdkp->thin_provisioning == 0) + return; + + buffer = kmalloc(vpd_len, GFP_KERNEL); + + if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb2, buffer, vpd_len)) + goto out; + + sdkp->tpvpd = 1; + sdkp->tpu = (buffer[5] >> 7) & 1; /* UNMAP */ + sdkp->tpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */ + + out: + kfree(buffer); +} + static int sd_try_extended_inquiry(struct scsi_device *sdp) { /* @@ -2138,6 +2173,7 @@ static int sd_revalidate_disk(struct gendisk *disk) sd_read_capacity(sdkp, buffer); if (sd_try_extended_inquiry(sdp)) { + sd_read_thin_provisioning(sdkp); sd_read_block_limits(sdkp); sd_read_block_characteristics(sdkp); } diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 315ce9d96b1f..a40730ee465c 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -63,6 +63,9 @@ struct scsi_disk { unsigned first_scan : 1; unsigned thin_provisioning : 1; unsigned unmap : 1; + unsigned tpws : 1; + unsigned tpu : 1; + unsigned tpvpd : 1; }; #define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev) -- cgit v1.2.3 From f0ad30d3d2dc924decc0e10b1ff6dc32525a5d99 Mon Sep 17 00:00:00 2001 From: David Milburn Date: Fri, 3 Sep 2010 17:13:03 -0500 Subject: [SCSI] libsas: fix NCQ mixing with non-NCQ Some cards (like mvsas) have issue troubles if non-NCQ commands are mixed with NCQ ones. Fix this by using the libata default NCQ check routine which waits until all NCQ commands are complete before issuing a non-NCQ one. The impact to cards (like aic94xx) which don't need this logic should be minimal Cc: Stable Tree Signed-off-by: James Bottomley --- drivers/scsi/libsas/sas_ata.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 042153cbbde1..ddbade7beec9 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -347,6 +347,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in, static struct ata_port_operations sas_sata_ops = { .phy_reset = sas_ata_phy_reset, .post_internal_cmd = sas_ata_post_internal, + .qc_defer = ata_std_qc_defer, .qc_prep = ata_noop_qc_prep, .qc_issue = sas_ata_qc_issue, .qc_fill_rtf = sas_ata_qc_fill_rtf, -- cgit v1.2.3 From a6751ccb9ba85180c84135cc921eea11d83d5689 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Tue, 14 Sep 2010 14:12:54 +0200 Subject: [SCSI] qla4xxx: fix build on PPC We use read/write[bslq] but do not include linux/io.h. This causes build failures on PPC. Include that file. Signed-off-by: Jiri Slaby Cc: Stable Tree Acked-by: Vikas Chaudhary Signed-off-by: James Bottomley --- drivers/scsi/qla4xxx/ql4_nx.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index 5d4a3822382d..449256f2c5f8 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c @@ -5,6 +5,7 @@ * See LICENSE.qla4xxx for copyright and licensing details. */ #include +#include #include #include "ql4_def.h" #include "ql4_glbl.h" -- cgit v1.2.3 From 485868208ee833628d49f81b4609001749d92d56 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 15 Sep 2010 16:52:28 -0500 Subject: [SCSI] fnic: prep for fc host dev loss tmo support This removes the driver's get_host_def_dev_loss_tmo callback and just has the driver set the dev loss using the fc class fc_host_dev_loss_tmo macro like is done for other fc params. This also adds a set rport dev loss function so the fc class host dev loss tmp sysfs support being added in the fc class patch can update rports. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/fnic/fnic_main.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index df91a61591b2..bb63f1a1f808 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -110,12 +110,12 @@ static struct scsi_host_template fnic_host_template = { }; static void -fnic_get_host_def_loss_tmo(struct Scsi_Host *shost) +fnic_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout) { - struct fc_lport *lp = shost_priv(shost); - struct fnic *fnic = lport_priv(lp); - - fc_host_def_dev_loss_tmo(shost) = fnic->config.port_down_timeout / 1000; + if (timeout) + rport->dev_loss_tmo = timeout; + else + rport->dev_loss_tmo = 1; } static void fnic_get_host_speed(struct Scsi_Host *shost); @@ -145,9 +145,9 @@ static struct fc_function_template fnic_fc_functions = { .show_starget_port_name = 1, .show_starget_port_id = 1, .show_rport_dev_loss_tmo = 1, + .set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo, .issue_fc_host_lip = fnic_reset, .get_fc_host_stats = fnic_get_stats, - .get_host_def_dev_loss_tmo = fnic_get_host_def_loss_tmo, .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), .terminate_rport_io = fnic_terminate_rport_io, .bsg_request = fc_lport_bsg_request, @@ -712,6 +712,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev, goto err_out_free_exch_mgr; } fc_host_maxframe_size(lp->host) = lp->mfs; + fc_host_dev_loss_tmo(lp->host) = fnic->config.port_down_timeout / 1000; sprintf(fc_host_symbolic_name(lp->host), DRV_NAME " v" DRV_VERSION " over %s", fnic->name); -- cgit v1.2.3 From a5110f2983bdf931d9819f88505775b16de8b99e Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 15 Sep 2010 16:52:29 -0500 Subject: [SCSI] ibmvfc: prep for fc host dev loss tmo support This removes the driver's get_host_def_dev_loss_tmo callback and just has the driver set the dev loss using the fc class fc_host_dev_loss_tmo macro like is done for other fc params. This patch also removes the module dev loss param. To override the value the fc host sysfs value being added in the fc class patch can be used instead of the driver module param. Signed-off-by: Mike Christie Acked-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvfc.c | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index b1512e9d1b9d..f033bf39690f 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -50,7 +50,6 @@ static unsigned int max_lun = IBMVFC_MAX_LUN; static unsigned int max_targets = IBMVFC_MAX_TARGETS; static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT; static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS; -static unsigned int dev_loss_tmo = IBMVFC_DEV_LOSS_TMO; static unsigned int ibmvfc_debug = IBMVFC_DEBUG; static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL; static LIST_HEAD(ibmvfc_head); @@ -84,11 +83,6 @@ MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. " module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Enable driver debug information. " "[Default=" __stringify(IBMVFC_DEBUG) "]"); -module_param_named(dev_loss_tmo, dev_loss_tmo, uint, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(dev_loss_tmo, "Maximum number of seconds that the FC " - "transport should insulate the loss of a remote port. Once this " - "value is exceeded, the scsi target is removed. " - "[Default=" __stringify(IBMVFC_DEV_LOSS_TMO) "]"); module_param_named(log_level, log_level, uint, 0); MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. " "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]"); @@ -1030,11 +1024,6 @@ static void ibmvfc_get_host_port_state(struct Scsi_Host *shost) spin_unlock_irqrestore(shost->host_lock, flags); } -static void ibmvfc_set_host_def_dev_loss_tmo(struct Scsi_Host *shost) -{ - fc_host_def_dev_loss_tmo(shost) = dev_loss_tmo; -} - /** * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout * @rport: rport struct @@ -4772,6 +4761,8 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) if ((rc = scsi_add_host(shost, dev))) goto release_event_pool; + fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO; + if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj, &ibmvfc_trace_attr))) { dev_err(dev, "Failed to create trace file. rc=%d\n", rc); @@ -4917,8 +4908,6 @@ static struct fc_function_template ibmvfc_transport_functions = { .get_host_speed = ibmvfc_get_host_speed, .show_host_speed = 1, - .get_host_def_dev_loss_tmo = ibmvfc_set_host_def_dev_loss_tmo, - .issue_fc_host_lip = ibmvfc_issue_fc_host_lip, .terminate_rport_io = ibmvfc_terminate_rport_io, -- cgit v1.2.3 From d2b5f10e5b93633a40d9263383b914f06019f00b Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 15 Sep 2010 16:52:30 -0500 Subject: [SCSI] qla2xxx: prep for fc host dev loss tmo support This removes the driver's get_host_def_dev_loss_tmo callback and just has the driver set the dev loss using the fc class fc_host_dev_loss_tmo macro like is done for other fc params. Signed-off-by: Mike Christie Acked-by: Andrew Vasquez Signed-off-by: James Bottomley --- drivers/scsi/qla2xxx/qla_attr.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index c9781050c9ed..029fe6b9d463 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1529,15 +1529,6 @@ qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) rport->dev_loss_tmo = 1; } -static void -qla2x00_get_host_def_loss_tmo(struct Scsi_Host *shost) -{ - scsi_qla_host_t *vha = shost_priv(shost); - struct qla_hw_data *ha = vha->hw; - - fc_host_def_dev_loss_tmo(shost) = ha->port_down_retry_count; -} - static void qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) { @@ -1785,6 +1776,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) } /* initialize attributes */ + fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count; fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); fc_host_supported_classes(vha->host) = @@ -1912,7 +1904,6 @@ struct fc_function_template qla2xxx_transport_functions = { .show_host_fabric_name = 1, .get_host_port_state = qla2x00_get_host_port_state, .show_host_port_state = 1, - .get_host_def_dev_loss_tmo = qla2x00_get_host_def_loss_tmo, .dd_fcrport_size = sizeof(struct fc_port *), .show_rport_supported_classes = 1, @@ -1959,7 +1950,6 @@ struct fc_function_template qla2xxx_transport_vport_functions = { .show_host_fabric_name = 1, .get_host_port_state = qla2x00_get_host_port_state, .show_host_port_state = 1, - .get_host_def_dev_loss_tmo = qla2x00_get_host_def_loss_tmo, .dd_fcrport_size = sizeof(struct fc_port *), .show_rport_supported_classes = 1, @@ -1988,6 +1978,7 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; u32 speed = FC_PORTSPEED_UNKNOWN; + fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count; fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); fc_host_supported_classes(vha->host) = FC_COS_CLASS3; -- cgit v1.2.3 From 0af5d708aae3aef1f98a1c689007b92db2c10277 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 15 Sep 2010 16:52:31 -0500 Subject: [SCSI] lpfc: prep for fc host dev loss tmo support This removes the driver's get_host_def_dev_loss_tmo callback and just has the driver set the dev loss using the fc class fc_host_dev_loss_tmo macro like is done for other fc params. It also adds compat support for the driver's existing dev loss and nodev sysfs and modparams. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_attr.c | 16 ++++++---------- drivers/scsi/lpfc/lpfc_init.c | 2 ++ 2 files changed, 8 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 87c2b6b858f7..f6efc6fe86d7 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -2159,6 +2159,11 @@ lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val) if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) { vport->cfg_nodev_tmo = val; vport->cfg_devloss_tmo = val; + /* + * For compat: set the fc_host dev loss so new rports + * will get the value. + */ + fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val; lpfc_update_rport_devloss_tmo(vport); return 0; } @@ -2208,6 +2213,7 @@ lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val) vport->cfg_nodev_tmo = val; vport->cfg_devloss_tmo = val; vport->dev_loss_tmo_changed = 1; + fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val; lpfc_update_rport_devloss_tmo(vport); return 0; } @@ -4370,14 +4376,6 @@ lpfc_get_starget_port_name(struct scsi_target *starget) ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0; } -static void -lpfc_get_host_def_loss_tmo(struct Scsi_Host *shost) -{ - struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; - - fc_host_def_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; -} - /** * lpfc_set_rport_loss_tmo - Set the rport dev loss tmo * @rport: fc rport address. @@ -4486,7 +4484,6 @@ struct fc_function_template lpfc_transport_functions = { .get_host_fabric_name = lpfc_get_host_fabric_name, .show_host_fabric_name = 1, - .get_host_def_dev_loss_tmo = lpfc_get_host_def_loss_tmo, /* * The LPFC driver treats linkdown handling as target loss events * so there are no sysfs handlers for link_down_tmo. @@ -4554,7 +4551,6 @@ struct fc_function_template lpfc_vport_transport_functions = { .get_host_fabric_name = lpfc_get_host_fabric_name, .show_host_fabric_name = 1, - .get_host_def_dev_loss_tmo = lpfc_get_host_def_loss_tmo, /* * The LPFC driver treats linkdown handling as target loss events * so there are no sysfs handlers for link_down_tmo. diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index da9ba06ad583..87a4d09a6641 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -2817,6 +2817,8 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost) (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb; + fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo; + /* This value is also unchanging */ memset(fc_host_active_fc4s(shost), 0, sizeof(fc_host_active_fc4s(shost))); -- cgit v1.2.3 From 43ca910a9c90566308f39f51ac03a55f94a5f83c Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Wed, 15 Sep 2010 16:52:32 -0500 Subject: [SCSI] fc class: add fc host dev loss sysfs file This adds a fc host dev loss sysfs file. Instead of calling into the driver using the get_host_def_dev_loss_tmo callback, we allow drivers to init the dev loss like is done for other fc host params, and then the fc class will handle updating the value if the user writes to the new sysfs file. Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/scsi_transport_fc.c | 124 +++++++++++++++++++++++++++------------ include/scsi/scsi_transport_fc.h | 7 +-- 2 files changed, 91 insertions(+), 40 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 78486d540652..998c01be3234 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -52,6 +52,25 @@ static int fc_bsg_rportadd(struct Scsi_Host *, struct fc_rport *); static void fc_bsg_remove(struct request_queue *); static void fc_bsg_goose_queue(struct fc_rport *); +/* + * Module Parameters + */ + +/* + * dev_loss_tmo: the default number of seconds that the FC transport + * should insulate the loss of a remote port. + * The maximum will be capped by the value of SCSI_DEVICE_BLOCK_MAX_TIMEOUT. + */ +static unsigned int fc_dev_loss_tmo = 60; /* seconds */ + +module_param_named(dev_loss_tmo, fc_dev_loss_tmo, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(dev_loss_tmo, + "Maximum number of seconds that the FC transport should" + " insulate the loss of a remote port. Once this value is" + " exceeded, the scsi target is removed. Value should be" + " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT if" + " fast_io_fail_tmo is not set."); + /* * Redefine so that we can have same named attributes in the * sdev/starget/host objects. @@ -408,6 +427,7 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev, if (!fc_host->work_q) return -ENOMEM; + fc_host->dev_loss_tmo = fc_dev_loss_tmo; snprintf(fc_host->devloss_work_q_name, sizeof(fc_host->devloss_work_q_name), "fc_dl_%d", shost->host_no); @@ -461,25 +481,6 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class, NULL, NULL); -/* - * Module Parameters - */ - -/* - * dev_loss_tmo: the default number of seconds that the FC transport - * should insulate the loss of a remote port. - * The maximum will be capped by the value of SCSI_DEVICE_BLOCK_MAX_TIMEOUT. - */ -static unsigned int fc_dev_loss_tmo = 60; /* seconds */ - -module_param_named(dev_loss_tmo, fc_dev_loss_tmo, uint, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(dev_loss_tmo, - "Maximum number of seconds that the FC transport should" - " insulate the loss of a remote port. Once this value is" - " exceeded, the scsi target is removed. Value should be" - " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT if" - " fast_io_fail_tmo is not set."); - /* * Netlink Infrastructure */ @@ -830,24 +831,32 @@ static FC_DEVICE_ATTR(rport, supported_classes, S_IRUGO, /* * dev_loss_tmo attribute */ -fc_rport_show_function(dev_loss_tmo, "%d\n", 20, ) -static ssize_t -store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static int fc_str_to_dev_loss(const char *buf, unsigned long *val) +{ + char *cp; + + *val = simple_strtoul(buf, &cp, 0); + if ((*cp && (*cp != '\n')) || (*val < 0)) + return -EINVAL; + /* + * Check for overflow; dev_loss_tmo is u32 + */ + if (*val > UINT_MAX) + return -EINVAL; + + return 0; +} + +static int fc_rport_set_dev_loss_tmo(struct fc_rport *rport, + unsigned long val) { - unsigned long val; - struct fc_rport *rport = transport_class_to_rport(dev); struct Scsi_Host *shost = rport_to_shost(rport); struct fc_internal *i = to_fc_internal(shost->transportt); - char *cp; + if ((rport->port_state == FC_PORTSTATE_BLOCKED) || (rport->port_state == FC_PORTSTATE_DELETED) || (rport->port_state == FC_PORTSTATE_NOTPRESENT)) return -EBUSY; - val = simple_strtoul(buf, &cp, 0); - if ((*cp && (*cp != '\n')) || (val < 0)) - return -EINVAL; - /* * Check for overflow; dev_loss_tmo is u32 */ @@ -863,6 +872,25 @@ store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr, return -EINVAL; i->f->set_rport_dev_loss_tmo(rport, val); + return 0; +} + +fc_rport_show_function(dev_loss_tmo, "%d\n", 20, ) +static ssize_t +store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fc_rport *rport = transport_class_to_rport(dev); + unsigned long val; + int rc; + + rc = fc_str_to_dev_loss(buf, &val); + if (rc) + return rc; + + rc = fc_rport_set_dev_loss_tmo(rport, val); + if (rc) + return rc; return count; } static FC_DEVICE_ATTR(rport, dev_loss_tmo, S_IRUGO | S_IWUSR, @@ -1608,8 +1636,35 @@ store_fc_private_host_issue_lip(struct device *dev, static FC_DEVICE_ATTR(host, issue_lip, S_IWUSR, NULL, store_fc_private_host_issue_lip); -fc_private_host_rd_attr(npiv_vports_inuse, "%u\n", 20); +static ssize_t +store_fc_private_host_dev_loss_tmo(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = transport_class_to_shost(dev); + struct fc_host_attrs *fc_host = shost_to_fc_host(shost); + struct fc_rport *rport; + unsigned long val, flags; + int rc; + + rc = fc_str_to_dev_loss(buf, &val); + if (rc) + return rc; + + fc_host_dev_loss_tmo(shost) = val; + spin_lock_irqsave(shost->host_lock, flags); + list_for_each_entry(rport, &fc_host->rports, peers) + fc_rport_set_dev_loss_tmo(rport, val); + spin_unlock_irqrestore(shost->host_lock, flags); + return count; +} +fc_private_host_show_function(dev_loss_tmo, "%d\n", 20, ); +static FC_DEVICE_ATTR(host, dev_loss_tmo, S_IRUGO | S_IWUSR, + show_fc_host_dev_loss_tmo, + store_fc_private_host_dev_loss_tmo); + +fc_private_host_rd_attr(npiv_vports_inuse, "%u\n", 20); /* * Host Statistics Management @@ -2165,6 +2220,7 @@ fc_attach_transport(struct fc_function_template *ft) SETUP_HOST_ATTRIBUTE_RW(system_hostname); /* Transport-managed attributes */ + SETUP_PRIVATE_HOST_ATTRIBUTE_RW(dev_loss_tmo); SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type); if (ft->issue_fc_host_lip) SETUP_PRIVATE_HOST_ATTRIBUTE_RW(issue_lip); @@ -2525,11 +2581,7 @@ fc_rport_create(struct Scsi_Host *shost, int channel, rport->maxframe_size = -1; rport->supported_classes = FC_COS_UNSPECIFIED; - if (fci->f->get_host_def_dev_loss_tmo) { - fci->f->get_host_def_dev_loss_tmo(shost); - rport->dev_loss_tmo = fc_host_def_dev_loss_tmo(shost); - } else - rport->dev_loss_tmo = fc_dev_loss_tmo; + rport->dev_loss_tmo = fc_host->dev_loss_tmo; memcpy(&rport->node_name, &ids->node_name, sizeof(rport->node_name)); memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name)); rport->port_id = ids->port_id; diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h index 9f98fca9b763..59816fe31e68 100644 --- a/include/scsi/scsi_transport_fc.h +++ b/include/scsi/scsi_transport_fc.h @@ -496,7 +496,7 @@ struct fc_host_attrs { u64 fabric_name; char symbolic_name[FC_SYMBOLIC_NAME_SIZE]; char system_hostname[FC_SYMBOLIC_NAME_SIZE]; - u32 def_dev_loss_tmo; + u32 dev_loss_tmo; /* Private (Transport-managed) Attributes */ enum fc_tgtid_binding_type tgtid_bind_type; @@ -581,8 +581,8 @@ struct fc_host_attrs { (((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q_name) #define fc_host_devloss_work_q(x) \ (((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q) -#define fc_host_def_dev_loss_tmo(x) \ - (((struct fc_host_attrs *)(x)->shost_data)->def_dev_loss_tmo) +#define fc_host_dev_loss_tmo(x) \ + (((struct fc_host_attrs *)(x)->shost_data)->dev_loss_tmo) struct fc_bsg_buffer { @@ -643,7 +643,6 @@ struct fc_function_template { void (*get_host_fabric_name)(struct Scsi_Host *); void (*get_host_symbolic_name)(struct Scsi_Host *); void (*set_host_system_hostname)(struct Scsi_Host *); - void (*get_host_def_dev_loss_tmo)(struct Scsi_Host *); struct fc_host_statistics * (*get_fc_host_stats)(struct Scsi_Host *); void (*reset_fc_host_stats)(struct Scsi_Host *); -- cgit v1.2.3 From 1a03ae0f556a931aa3747b70e44b78308f5b0590 Mon Sep 17 00:00:00 2001 From: Michael Reed Date: Mon, 20 Sep 2010 11:20:22 -0500 Subject: [SCSI] sd name space exhaustion causes system hang Following a site power outage which re-enabled all the ports on my FC switches, my system subsequently booted with far too many luns! I had let it run hoping it would make multi-user. It didn't. :( It hung solid after exhausting the last sd device, sdzzz, and attempting to create sdaaaa and beyond. I was unable to get a dump. Discovered using a 2.6.32.13 based system. correct this by detecting when the last index is utilized and failing the sd probe of the device. Patch applies to scsi-misc-2.6. Signed-off-by: Michael Reed Cc: Stable Tree Signed-off-by: James Bottomley --- drivers/scsi/sd.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 0c4f89cfb7dc..50f1fe605303 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2288,11 +2288,10 @@ static void sd_probe_async(void *data, async_cookie_t cookie) index = sdkp->index; dev = &sdp->sdev_gendev; - if (index < SD_MAX_DISKS) { - gd->major = sd_major((index & 0xf0) >> 4); - gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); - gd->minors = SD_MINORS; - } + gd->major = sd_major((index & 0xf0) >> 4); + gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); + gd->minors = SD_MINORS; + gd->fops = &sd_fops; gd->private_data = &sdkp->driver; gd->queue = sdkp->device->request_queue; @@ -2382,6 +2381,12 @@ static int sd_probe(struct device *dev) if (error) goto out_put; + if (index >= SD_MAX_DISKS) { + error = -ENODEV; + sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name space exhausted.\n"); + goto out_free_index; + } + error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN); if (error) goto out_free_index; -- cgit v1.2.3 From d24099df9e379b150c164da354b8c6fdafc73257 Mon Sep 17 00:00:00 2001 From: Brian King Date: Tue, 21 Sep 2010 10:17:11 -0500 Subject: [SCSI] ibmvfc: Handle Virtual I/O Server reboot If a Virtual I/O server is rebooted, the client fibre channel sees a transport event on its CRQ, which causes it to attempt to reconnect to the CRQ. For a period of time during the VIOS reboot, the client's attempts to register the CRQ will return H_CLOSED, indicating the server side is not currently registered. The ibmvfc driver was not handling this well and was taking the virtual adapter offline. Fix this by re-enabling our interrupt and waiting for the event on our CRQ indicating the server is back, at which point we can reconnect. Signed-off-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvfc.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index f033bf39690f..00d08b25425f 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -4302,8 +4302,10 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) spin_unlock_irqrestore(vhost->host->host_lock, flags); rc = ibmvfc_reset_crq(vhost); spin_lock_irqsave(vhost->host->host_lock, flags); - if (rc || (rc = ibmvfc_send_crq_init(vhost)) || - (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) { + if (rc == H_CLOSED) + vio_enable_interrupts(to_vio_dev(vhost->dev)); + else if (rc || (rc = ibmvfc_send_crq_init(vhost)) || + (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) { ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc); } -- cgit v1.2.3 From 24d3f95a5b6082ca4aba89071ca6259e15d3e564 Mon Sep 17 00:00:00 2001 From: "kxie@chelsio.com" Date: Thu, 23 Sep 2010 16:43:23 -0700 Subject: [SCSI] cxgbi: rename alloc_cpl to alloc_wr Signed-off-by: Karen Xie Reviewed-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/cxgbi/cxgb3i/cxgb3i.c | 18 +++++++++--------- drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | 28 ++++++++++++++-------------- drivers/scsi/cxgbi/libcxgbi.h | 5 ++--- 3 files changed, 25 insertions(+), 26 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index a01c1e238938..a2c207f57d61 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -320,7 +320,7 @@ static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) "csk 0x%p,%u,0x%lx,%u, credit %u, dack %u.\n", csk, csk->state, csk->flags, csk->tid, credits, dack); - skb = alloc_cpl(sizeof(*req), 0, GFP_ATOMIC); + skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC); if (!skb) { pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits); return 0; @@ -572,7 +572,7 @@ static void act_open_retry_timer(unsigned long data) cxgbi_sock_get(csk); spin_lock_bh(&csk->lock); - skb = alloc_cpl(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC); + skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC); if (!skb) cxgbi_sock_fail_act_open(csk, -ENOMEM); else { @@ -881,16 +881,16 @@ static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) */ static int alloc_cpls(struct cxgbi_sock *csk) { - csk->cpl_close = alloc_cpl(sizeof(struct cpl_close_con_req), 0, + csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 0, GFP_KERNEL); if (!csk->cpl_close) return -ENOMEM; - csk->cpl_abort_req = alloc_cpl(sizeof(struct cpl_abort_req), 0, + csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 0, GFP_KERNEL); if (!csk->cpl_abort_req) goto free_cpl_skbs; - csk->cpl_abort_rpl = alloc_cpl(sizeof(struct cpl_abort_rpl), 0, + csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 0, GFP_KERNEL); if (!csk->cpl_abort_rpl) goto free_cpl_skbs; @@ -972,7 +972,7 @@ static int init_act_open(struct cxgbi_sock *csk) cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); cxgbi_sock_get(csk); - skb = alloc_cpl(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL); + skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL); if (!skb) goto rel_resource; skb->sk = (struct sock *)csk; @@ -1141,7 +1141,7 @@ static int ddp_alloc_gl_skb(struct cxgbi_ddp_info *ddp, int idx, "ddp 0x%p, idx %d, cnt %d.\n", ddp, idx, cnt); for (i = 0; i < cnt; i++) { - struct sk_buff *skb = alloc_cpl(sizeof(struct ulp_mem_io) + + struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) + PPOD_SIZE, 0, gfp); if (skb) { ddp->gl_skb[idx + i] = skb; @@ -1156,7 +1156,7 @@ static int ddp_alloc_gl_skb(struct cxgbi_ddp_info *ddp, int idx, static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, int pg_idx, bool reply) { - struct sk_buff *skb = alloc_cpl(sizeof(struct cpl_set_tcb_field), 0, + struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, GFP_KERNEL); struct cpl_set_tcb_field *req; u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0; @@ -1193,7 +1193,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, int hcrc, int dcrc, int reply) { - struct sk_buff *skb = alloc_cpl(sizeof(struct cpl_set_tcb_field), 0, + struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, GFP_KERNEL); struct cpl_set_tcb_field *req; u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0); diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index b375a683a6b0..1056d97e686e 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -332,7 +332,7 @@ static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) "csk 0x%p,%u,0x%lx,%u, credit %u.\n", csk, csk->state, csk->flags, csk->tid, credits); - skb = alloc_cpl(sizeof(*req), 0, GFP_ATOMIC); + skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC); if (!skb) { pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits); return 0; @@ -388,7 +388,7 @@ static inline void send_tx_flowc_wr(struct cxgbi_sock *csk) int flowclen, i; flowclen = 80; - skb = alloc_cpl(flowclen, 0, GFP_ATOMIC); + skb = alloc_wr(flowclen, 0, GFP_ATOMIC); flowc = (struct fw_flowc_wr *)skb->head; flowc->op_to_nparams = htonl(FW_WR_OP(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS(8)); @@ -651,7 +651,7 @@ static void csk_act_open_retry_timer(unsigned long data) cxgbi_sock_get(csk); spin_lock_bh(&csk->lock); - skb = alloc_cpl(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC); + skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC); if (!skb) cxgbi_sock_fail_act_open(csk, -ENOMEM); else { @@ -1073,18 +1073,18 @@ static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) static int alloc_cpls(struct cxgbi_sock *csk) { - csk->cpl_close = alloc_cpl(sizeof(struct cpl_close_con_req), - 0, GFP_NOIO); + csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), + 0, GFP_KERNEL); if (!csk->cpl_close) return -ENOMEM; - csk->cpl_abort_req = alloc_cpl(sizeof(struct cpl_abort_req), - 0, GFP_NOIO); + csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), + 0, GFP_KERNEL); if (!csk->cpl_abort_req) goto free_cpls; - csk->cpl_abort_rpl = alloc_cpl(sizeof(struct cpl_abort_rpl), - 0, GFP_NOIO); + csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), + 0, GFP_KERNEL); if (!csk->cpl_abort_rpl) goto free_cpls; return 0; @@ -1158,7 +1158,7 @@ static int init_act_open(struct cxgbi_sock *csk) } cxgbi_sock_get(csk); - skb = alloc_cpl(sizeof(struct cpl_act_open_req), 0, GFP_NOIO); + skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL); if (!skb) goto rel_resource; skb->sk = (struct sock *)csk; @@ -1268,7 +1268,7 @@ static int ddp_ppod_write_sgl(struct cxgbi_device *cdev, unsigned int port_id, dlen = PPOD_SIZE * npods; pm_addr = idx * PPOD_SIZE + ddp->llimit; - skb = alloc_cpl(sizeof(*req) + sizeof(*sgl), dlen, GFP_ATOMIC); + skb = alloc_wr(sizeof(*req) + sizeof(*sgl), dlen, GFP_ATOMIC); if (!skb) { pr_err("cdev 0x%p, idx %u, npods %u, OOM.\n", cdev, idx, npods); @@ -1339,7 +1339,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, if (!pg_idx) return 0; - skb = alloc_cpl(sizeof(*req), 0, GFP_KERNEL); + skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); if (!skb) return -ENOMEM; @@ -1373,7 +1373,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, val = TCB_ULP_RAW(val); val |= TCB_ULP_TYPE(ULP2_MODE_ISCSI); - skb = alloc_cpl(sizeof(*req), 0, GFP_KERNEL); + skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); if (!skb) return -ENOMEM; @@ -1516,7 +1516,7 @@ static int t4_uld_rx_handler(void *handle, const __be64 *rsp, if (pgl == NULL) { unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8; - skb = alloc_cpl(len, 0, GFP_ATOMIC); + skb = alloc_wr(len, 0, GFP_ATOMIC); if (!skb) goto nomem; skb_copy_to_linear_data(skb, &rsp[1], len); diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index 40551f3be5dc..2f2485b0f9ec 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -410,16 +410,15 @@ static inline unsigned int cxgbi_sock_compute_wscale(unsigned int win) return wscale; } -static inline struct sk_buff *alloc_cpl(int cpl_len, int dlen, gfp_t gfp) +static inline struct sk_buff *alloc_wr(int wrlen, int dlen, gfp_t gfp) { - int wrlen = roundup(cpl_len, 16); struct sk_buff *skb = alloc_skb(wrlen + dlen, gfp); if (skb) { __skb_put(skb, wrlen); memset(skb->head, 0, wrlen + dlen); } else - pr_info("alloc cpl skb %u+%u, OOM.\n", cpl_len, dlen); + pr_info("alloc cpl wr skb %u+%u, OOM.\n", wrlen, dlen); return skb; } -- cgit v1.2.3 From e3d2ad8cb2775e4201446489efd1cf26c5bbce5c Mon Sep 17 00:00:00 2001 From: "kxie@chelsio.com" Date: Thu, 23 Sep 2010 16:43:23 -0700 Subject: [SCSI] libcxgbi: pdu read fixes Fixed the locking and releasing skb in the case of error in the pdu read path, and added define iscsi_task_cxgbi_data to access the private data inside the iscsi_task. Signed-off-by: Karen Xie Reviewed-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/cxgbi/libcxgbi.c | 79 ++++++++++++++++++++++++++++--------------- drivers/scsi/cxgbi/libcxgbi.h | 2 ++ 2 files changed, 53 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index db9d08a831d0..6cfce2726ea3 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -593,11 +593,11 @@ static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk) csk, csk->state, csk->flags, csk->user_data); if (csk->state != CTP_ESTABLISHED) { - read_lock(&csk->callback_lock); + read_lock_bh(&csk->callback_lock); if (csk->user_data) iscsi_conn_failure(csk->user_data, ISCSI_ERR_CONN_FAILED); - read_unlock(&csk->callback_lock); + read_unlock_bh(&csk->callback_lock); } } @@ -1712,12 +1712,10 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk) "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n", csk, conn, conn ? conn->id : 0xFF, conn ? conn->suspend_rx : 0xFF); - read_unlock(&csk->callback_lock); return; } while (!err) { - read_lock(&csk->callback_lock); skb = skb_peek(&csk->receive_queue); if (!skb || !(cxgbi_skcb_test_flag(skb, SKCBF_RX_STATUS))) { @@ -1725,11 +1723,9 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk) log_debug(1 << CXGBI_DBG_PDU_RX, "skb 0x%p, NOT ready 0x%lx.\n", skb, cxgbi_skcb_flags(skb)); - read_unlock(&csk->callback_lock); break; } __skb_unlink(skb, &csk->receive_queue); - read_unlock(&csk->callback_lock); read += cxgbi_skcb_rx_pdulen(skb); log_debug(1 << CXGBI_DBG_PDU_RX, @@ -1739,37 +1735,66 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk) if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) { err = skb_read_pdu_bhs(conn, skb); - if (err < 0) - break; + if (err < 0) { + pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, " + "f 0x%lx, plen %u.\n", + csk, skb, skb->len, + cxgbi_skcb_flags(skb), + cxgbi_skcb_rx_pdulen(skb)); + goto skb_done; + } err = skb_read_pdu_data(conn, skb, skb, err + cdev->skb_rx_extra); + if (err < 0) + pr_err("coalesced data, csk 0x%p, skb 0x%p,%u, " + "f 0x%lx, plen %u.\n", + csk, skb, skb->len, + cxgbi_skcb_flags(skb), + cxgbi_skcb_rx_pdulen(skb)); } else { err = skb_read_pdu_bhs(conn, skb); - if (err < 0) - break; + if (err < 0) { + pr_err("bhs, csk 0x%p, skb 0x%p,%u, " + "f 0x%lx, plen %u.\n", + csk, skb, skb->len, + cxgbi_skcb_flags(skb), + cxgbi_skcb_rx_pdulen(skb)); + goto skb_done; + } + if (cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) { struct sk_buff *dskb; - read_lock(&csk->callback_lock); dskb = skb_peek(&csk->receive_queue); if (!dskb) { - read_unlock(&csk->callback_lock); - pr_err("csk 0x%p, NO data.\n", csk); - err = -EAGAIN; - break; + pr_err("csk 0x%p, skb 0x%p,%u, f 0x%lx," + " plen %u, NO data.\n", + csk, skb, skb->len, + cxgbi_skcb_flags(skb), + cxgbi_skcb_rx_pdulen(skb)); + err = -EIO; + goto skb_done; } __skb_unlink(dskb, &csk->receive_queue); - read_unlock(&csk->callback_lock); err = skb_read_pdu_data(conn, skb, dskb, 0); + if (err < 0) + pr_err("data, csk 0x%p, skb 0x%p,%u, " + "f 0x%lx, plen %u, dskb 0x%p," + "%u.\n", + csk, skb, skb->len, + cxgbi_skcb_flags(skb), + cxgbi_skcb_rx_pdulen(skb), + dskb, dskb->len); __kfree_skb(dskb); } else err = skb_read_pdu_data(conn, skb, skb, 0); } +skb_done: + __kfree_skb(skb); + if (err < 0) break; - - __kfree_skb(skb); } log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, read %u.\n", csk, read); @@ -1780,7 +1805,8 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk) } if (err < 0) { - pr_info("csk 0x%p, 0x%p, rx failed %d.\n", csk, conn, err); + pr_info("csk 0x%p, 0x%p, rx failed %d, read %u.\n", + csk, conn, err, read); iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); } } @@ -1861,7 +1887,7 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode) struct cxgbi_device *cdev = cconn->chba->cdev; struct iscsi_conn *conn = task->conn; struct iscsi_tcp_task *tcp_task = task->dd_data; - struct cxgbi_task_data *tdata = task->dd_data + sizeof(*tcp_task); + struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); struct scsi_cmnd *sc = task->sc; int headroom = SKB_TX_ISCSI_PDU_HEADER_MAX; @@ -1916,8 +1942,7 @@ int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset, unsigned int count) { struct iscsi_conn *conn = task->conn; - struct iscsi_tcp_task *tcp_task = task->dd_data; - struct cxgbi_task_data *tdata = tcp_task->dd_data; + struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); struct sk_buff *skb = tdata->skb; unsigned int datalen = count; int i, padlen = iscsi_padding(count); @@ -2019,8 +2044,7 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task) { struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; struct cxgbi_conn *cconn = tcp_conn->dd_data; - struct iscsi_tcp_task *tcp_task = task->dd_data; - struct cxgbi_task_data *tdata = tcp_task->dd_data; + struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); struct sk_buff *skb = tdata->skb; unsigned int datalen; int err; @@ -2072,8 +2096,7 @@ EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu); void cxgbi_cleanup_task(struct iscsi_task *task) { - struct cxgbi_task_data *tdata = task->dd_data + - sizeof(struct iscsi_tcp_task); + struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); log_debug(1 << CXGBI_DBG_ISCSI, "task 0x%p, skb 0x%p, itt 0x%x.\n", @@ -2290,12 +2313,12 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, /* calculate the tag idx bits needed for this conn based on cmds_max */ cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1; - write_lock(&csk->callback_lock); + write_lock_bh(&csk->callback_lock); csk->user_data = conn; cconn->chba = cep->chba; cconn->cep = cep; cep->cconn = cconn; - write_unlock(&csk->callback_lock); + write_unlock_bh(&csk->callback_lock); cxgbi_conn_max_xmit_dlength(conn); cxgbi_conn_max_recv_dlength(conn); diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index 2f2485b0f9ec..48e6d623f76d 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -592,6 +592,8 @@ struct cxgbi_task_data { unsigned int count; unsigned int sgoffset; }; +#define iscsi_task_cxgbi_data(task) \ + ((task)->dd_data + sizeof(struct iscsi_tcp_task)) static inline int cxgbi_is_ddp_tag(struct cxgbi_tag_format *tformat, u32 tag) { -- cgit v1.2.3 From 0b3d8947972bfd2dd6d55c8009427ad2941ef038 Mon Sep 17 00:00:00 2001 From: "kxie@chelsio.com" Date: Thu, 23 Sep 2010 16:43:23 -0700 Subject: [SCSI] cxgb3i: fixed connection over vlan Signed-off-by: Karen Xie Reviewed-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/cxgbi/cxgb3i/cxgb3i.c | 47 ++++++++++++++++++++++++++++++++------ drivers/scsi/cxgbi/libcxgbi.c | 37 +++++++----------------------- drivers/scsi/cxgbi/libcxgbi.h | 1 + 3 files changed, 49 insertions(+), 36 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index a2c207f57d61..a129a170b47b 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -589,9 +589,10 @@ static int do_act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) struct cxgbi_sock *csk = ctx; struct cpl_act_open_rpl *rpl = cplhdr(skb); - log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, - "csk 0x%p,%u,0x%lx,%u, status %u.\n", - csk, csk->state, csk->flags, csk->atid, rpl->status); + pr_info("csk 0x%p,%u,0x%lx,%u, status %u, %pI4:%u-%pI4:%u.\n", + csk, csk->state, csk->flags, csk->atid, rpl->status, + &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port), + &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port)); if (rpl->status != CPL_ERR_TCAM_FULL && rpl->status != CPL_ERR_CONN_EXIST && @@ -662,8 +663,7 @@ static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, switch (abort_reason) { case CPL_ERR_BAD_SYN: /* fall through */ case CPL_ERR_CONN_RESET: - return csk->state > CTP_ESTABLISHED ? - -EPIPE : -ECONNRESET; + return csk->state > CTP_ESTABLISHED ? -EPIPE : -ECONNRESET; case CPL_ERR_XMIT_TIMEDOUT: case CPL_ERR_PERSIST_TIMEDOUT: case CPL_ERR_FINWAIT2_TIMEDOUT: @@ -945,17 +945,44 @@ static void release_offload_resources(struct cxgbi_sock *csk) csk->cdev = NULL; } +static void update_address(struct cxgbi_hba *chba) +{ + if (chba->ipv4addr) { + if (chba->vdev && + chba->ipv4addr != cxgb3i_get_private_ipv4addr(chba->vdev)) { + cxgb3i_set_private_ipv4addr(chba->vdev, chba->ipv4addr); + cxgb3i_set_private_ipv4addr(chba->ndev, 0); + pr_info("%s set %pI4.\n", + chba->vdev->name, &chba->ipv4addr); + } else if (chba->ipv4addr != + cxgb3i_get_private_ipv4addr(chba->ndev)) { + cxgb3i_set_private_ipv4addr(chba->ndev, chba->ipv4addr); + pr_info("%s set %pI4.\n", + chba->ndev->name, &chba->ipv4addr); + } + } else if (cxgb3i_get_private_ipv4addr(chba->ndev)) { + if (chba->vdev) + cxgb3i_set_private_ipv4addr(chba->vdev, 0); + cxgb3i_set_private_ipv4addr(chba->ndev, 0); + } +} + static int init_act_open(struct cxgbi_sock *csk) { struct dst_entry *dst = csk->dst; struct cxgbi_device *cdev = csk->cdev; struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev; struct net_device *ndev = cdev->ports[csk->port_id]; + struct cxgbi_hba *chba = cdev->hbas[csk->port_id]; struct sk_buff *skb = NULL; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags); + update_address(chba); + if (chba->ipv4addr) + csk->saddr.sin_addr.s_addr = chba->ipv4addr; + csk->rss_qid = 0; csk->l2t = t3_l2t_get(t3dev, dst->neighbour, ndev); if (!csk->l2t) { @@ -984,6 +1011,12 @@ static int init_act_open(struct cxgbi_sock *csk) cxgbi_sock_reset_wr_list(csk); csk->err = 0; + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk 0x%p,%u,0x%lx, %pI4:%u-%pI4:%u.\n", + csk, csk->state, csk->flags, + &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port), + &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port)); + cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); send_act_open_req(csk, skb, csk->l2t); return 0; @@ -1143,9 +1176,9 @@ static int ddp_alloc_gl_skb(struct cxgbi_ddp_info *ddp, int idx, for (i = 0; i < cnt; i++) { struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) + PPOD_SIZE, 0, gfp); - if (skb) { + if (skb) ddp->gl_skb[idx + i] = skb; - } else { + else { ddp_free_gl_skb(ddp, idx, i); return -ENOMEM; } diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index 6cfce2726ea3..be5661707dfa 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -195,16 +195,22 @@ EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev); static struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev, int *port) { + struct net_device *vdev = NULL; struct cxgbi_device *cdev, *tmp; int i; - if (ndev->priv_flags & IFF_802_1Q_VLAN) + if (ndev->priv_flags & IFF_802_1Q_VLAN) { + vdev = ndev; ndev = vlan_dev_real_dev(ndev); + log_debug(1 << CXGBI_DBG_DEV, + "vlan dev %s -> %s.\n", vdev->name, ndev->name); + } mutex_lock(&cdev_mutex); list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { for (i = 0; i < cdev->nports; i++) { if (ndev == cdev->ports[i]) { + cdev->hbas[i]->vdev = vdev; mutex_unlock(&cdev_mutex); if (port) *port = i; @@ -218,24 +224,6 @@ static struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev, return NULL; } -struct cxgbi_hba *cxgbi_hba_find_by_netdev(struct net_device *dev, - struct cxgbi_device *cdev) -{ - int i; - - if (dev->priv_flags & IFF_802_1Q_VLAN) - dev = vlan_dev_real_dev(dev); - - for (i = 0; i < cdev->nports; i++) { - if (cdev->hbas[i]->ndev == dev) - return cdev->hbas[i]; - } - log_debug(1 << CXGBI_DBG_DEV, - "ndev 0x%p, %s, cdev 0x%p, NO match found.\n", - dev, dev->name, cdev); - return NULL; -} - void cxgbi_hbas_remove(struct cxgbi_device *cdev) { int i; @@ -532,12 +520,6 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) dst->neighbour->dev->name, ndev->name, mtu); } - if (ndev->priv_flags & IFF_802_1Q_VLAN) { - ndev = vlan_dev_real_dev(ndev); - pr_info("rt dev %s, vlan -> %s.\n", - dst->neighbour->dev->name, ndev->name); - } - cdev = cxgbi_device_find_by_netdev(ndev, &port); if (!cdev) { pr_info("dst %pI4, %s, NOT cxgbi device.\n", @@ -561,10 +543,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) csk->dst = dst; csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr; csk->daddr.sin_port = daddr->sin_port; - if (cdev->hbas[port]->ipv4addr) - csk->saddr.sin_addr.s_addr = cdev->hbas[port]->ipv4addr; - else - csk->saddr.sin_addr.s_addr = rt->rt_src; + csk->saddr.sin_addr.s_addr = rt->rt_src; return csk; diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index 48e6d623f76d..43025b71e295 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -500,6 +500,7 @@ void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *); struct cxgbi_hba { struct net_device *ndev; + struct net_device *vdev; /* vlan dev */ struct Scsi_Host *shost; struct cxgbi_device *cdev; __be32 ipv4addr; -- cgit v1.2.3 From e27d6169c79e3c75edc74a14424a7856e7ff487c Mon Sep 17 00:00:00 2001 From: "kxie@chelsio.com" Date: Thu, 23 Sep 2010 16:43:23 -0700 Subject: [SCSI] cxgb4i: connection and ddp setting update Update cxgb4i connection setting and pagepod programming. Signed-off-by: Karen Xie Reviewed-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | 128 +++++++++++++++++++------------------ drivers/scsi/cxgbi/cxgb4i/cxgb4i.h | 5 ++ drivers/scsi/cxgbi/libcxgbi.h | 10 --- 3 files changed, 71 insertions(+), 72 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 1056d97e686e..99f2b8c5dd63 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -34,8 +34,8 @@ static unsigned int dbg_level; #define DRV_MODULE_NAME "cxgb4i" #define DRV_MODULE_DESC "Chelsio T4 iSCSI Driver" -#define DRV_MODULE_VERSION "0.9.0" -#define DRV_MODULE_RELDATE "May 2010" +#define DRV_MODULE_VERSION "0.9.1" +#define DRV_MODULE_RELDATE "Aug. 2010" static char version[] = DRV_MODULE_DESC " " DRV_MODULE_NAME @@ -396,7 +396,7 @@ static inline void send_tx_flowc_wr(struct cxgbi_sock *csk) htonl(FW_WR_LEN16(DIV_ROUND_UP(72, 16)) | FW_WR_FLOWID(csk->tid)); flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; - flowc->mnemval[0].val = htonl(0); + flowc->mnemval[0].val = htonl(csk->cdev->pfvf); flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; flowc->mnemval[1].val = htonl(csk->tx_chan); flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; @@ -568,6 +568,12 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb) goto rel_skb; } + if (csk->atid != atid) { + pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n", + atid, csk, csk->state, csk->flags, csk->tid, csk->atid); + goto rel_skb; + } + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx, tid %u, atid %u, rseq %u.\n", csk, csk->state, csk->flags, tid, atid, rcv_isn); @@ -681,9 +687,10 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) goto rel_skb; } - log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, - "csk 0x%p,%u,0x%lx, status %u, atid %u, tid %u.\n", - csk, csk->state, csk->flags, status, atid, tid); + pr_info("%pI4:%u-%pI4:%u, atid %u,%u, status %u, csk 0x%p,%u,0x%lx.\n", + &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port), + &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port), + atid, tid, status, csk, csk->state, csk->flags); if (status && status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && @@ -846,7 +853,6 @@ static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) unsigned int tid = GET_TID(cpl); struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct tid_info *t = lldi->tids; - struct sk_buff *lskb; csk = lookup_tid(t, tid); if (unlikely(!csk)) { @@ -872,6 +878,8 @@ static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) } cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq); + cxgbi_skcb_flags(skb) = 0; + skb_reset_transport_header(skb); __skb_pull(skb, sizeof(*cpl)); __pskb_trim(skb, ntohs(cpl->len)); @@ -884,17 +892,16 @@ static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n", csk, csk->state, csk->flags, csk->tid, skb); csk->skb_ulp_lhdr = skb; - lskb = csk->skb_ulp_lhdr; - cxgbi_skcb_set_flag(lskb, SKCBF_RX_HDR); + cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR); - if (cxgbi_skcb_tcp_seq(lskb) != csk->rcv_nxt) { + if (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt) { pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n", - csk->tid, cxgbi_skcb_tcp_seq(lskb), + csk->tid, cxgbi_skcb_tcp_seq(skb), csk->rcv_nxt); goto abort_conn; } - bhs = lskb->data; + bhs = skb->data; hlen = ntohs(cpl->len); dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF; @@ -918,9 +925,9 @@ static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) ntohl(*((unsigned int *)(bhs + 24)))); } else { - lskb = csk->skb_ulp_lhdr; - cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA); + struct sk_buff *lskb = csk->skb_ulp_lhdr; + cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA); log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n", csk, csk->state, csk->flags, skb, lskb); @@ -979,7 +986,6 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev, lskb = csk->skb_ulp_lhdr; csk->skb_ulp_lhdr = NULL; - cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS); cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc); if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb)) @@ -987,15 +993,13 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev, csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb)); if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) { - log_debug(1 << CXGBI_DBG_PDU_RX, - "csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad.\n", - csk, lskb, status); + pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n", + csk, lskb, status, cxgbi_skcb_flags(lskb)); cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR); } if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) { - log_debug(1 << CXGBI_DBG_PDU_RX, - "csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad.\n", - csk, lskb, status); + pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n", + csk, lskb, status, cxgbi_skcb_flags(lskb)); cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR); } if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) { @@ -1015,6 +1019,7 @@ static void do_rx_data_ddp(struct cxgbi_device *cdev, "csk 0x%p, lskb 0x%p, f 0x%lx.\n", csk, lskb, cxgbi_skcb_flags(lskb)); + cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS); cxgbi_conn_pdu_ready(csk); spin_unlock_bh(&csk->lock); goto rel_skb; @@ -1234,41 +1239,41 @@ int cxgb4i_ofld_init(struct cxgbi_device *cdev) /* * functions to program the pagepod in h/w */ +#define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */ static inline void ulp_mem_io_set_hdr(struct ulp_mem_io *req, - unsigned int dlen, unsigned int pm_addr) + unsigned int wr_len, unsigned int dlen, + unsigned int pm_addr) { - struct ulptx_sgl *sgl; - unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) + - sizeof(struct ulptx_sgl), 16); + struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1); INIT_ULPTX_WR(req, wr_len, 0, 0); - req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE)); + req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) | (1 << 23)); req->dlen = htonl(ULP_MEMIO_DATA_LEN(dlen >> 5)); req->lock_addr = htonl(ULP_MEMIO_ADDR(pm_addr >> 5)); req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); - sgl = (struct ulptx_sgl *)(req + 1); - sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(1)); - sgl->len0 = htonl(dlen); + + idata->cmd_more = htonl(ULPTX_CMD(ULP_TX_SC_IMM)); + idata->len = htonl(dlen); } -static int ddp_ppod_write_sgl(struct cxgbi_device *cdev, unsigned int port_id, +static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id, struct cxgbi_pagepod_hdr *hdr, unsigned int idx, unsigned int npods, struct cxgbi_gather_list *gl, unsigned int gl_pidx) { struct cxgbi_ddp_info *ddp = cdev->ddp; - unsigned int dlen, pm_addr; struct sk_buff *skb; struct ulp_mem_io *req; - struct ulptx_sgl *sgl; + struct ulptx_idata *idata; struct cxgbi_pagepod *ppod; + unsigned int pm_addr = idx * PPOD_SIZE + ddp->llimit; + unsigned int dlen = PPOD_SIZE * npods; + unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) + + sizeof(struct ulptx_idata) + dlen, 16); unsigned int i; - dlen = PPOD_SIZE * npods; - pm_addr = idx * PPOD_SIZE + ddp->llimit; - - skb = alloc_wr(sizeof(*req) + sizeof(*sgl), dlen, GFP_ATOMIC); + skb = alloc_wr(wr_len, 0, GFP_ATOMIC); if (!skb) { pr_err("cdev 0x%p, idx %u, npods %u, OOM.\n", cdev, idx, npods); @@ -1277,10 +1282,9 @@ static int ddp_ppod_write_sgl(struct cxgbi_device *cdev, unsigned int port_id, req = (struct ulp_mem_io *)skb->head; set_queue(skb, CPL_PRIORITY_CONTROL, NULL); - ulp_mem_io_set_hdr(req, dlen, pm_addr); - sgl = (struct ulptx_sgl *)(req + 1); - ppod = (struct cxgbi_pagepod *)(sgl + 1); - sgl->addr0 = cpu_to_be64(virt_to_phys(ppod)); + ulp_mem_io_set_hdr(req, wr_len, dlen, pm_addr); + idata = (struct ulptx_idata *)(req + 1); + ppod = (struct cxgbi_pagepod *)(idata + 1); for (i = 0; i < npods; i++, ppod++, gl_pidx += PPOD_PAGES_MAX) { if (!hdr && !gl) @@ -1302,9 +1306,9 @@ static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr, for (i = 0; i < npods; i += cnt, idx += cnt) { cnt = npods - i; - if (cnt > ULPMEM_DSGL_MAX_NPPODS) - cnt = ULPMEM_DSGL_MAX_NPPODS; - err = ddp_ppod_write_sgl(csk->cdev, csk->port_id, hdr, + if (cnt > ULPMEM_IDATA_MAX_NPPODS) + cnt = ULPMEM_IDATA_MAX_NPPODS; + err = ddp_ppod_write_idata(csk->cdev, csk->port_id, hdr, idx, cnt, gl, 4 * i); if (err < 0) break; @@ -1320,9 +1324,9 @@ static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag, for (i = 0; i < npods; i += cnt, idx += cnt) { cnt = npods - i; - if (cnt > ULPMEM_DSGL_MAX_NPPODS) - cnt = ULPMEM_DSGL_MAX_NPPODS; - err = ddp_ppod_write_sgl(chba->cdev, chba->port_id, NULL, + if (cnt > ULPMEM_IDATA_MAX_NPPODS) + cnt = ULPMEM_IDATA_MAX_NPPODS; + err = ddp_ppod_write_idata(chba->cdev, chba->port_id, NULL, idx, cnt, NULL, 0); if (err < 0) break; @@ -1334,26 +1338,22 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, { struct sk_buff *skb; struct cpl_set_tcb_field *req; - u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0; - if (!pg_idx) + if (!pg_idx || pg_idx >= DDP_PGIDX_MAX) return 0; skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); if (!skb) return -ENOMEM; - /* set up ulp submode and page size */ - val = (val & 0x03) << 2; - val |= TCB_ULP_TYPE(ULP2_MODE_ISCSI); - + /* set up ulp page size */ req = (struct cpl_set_tcb_field *)skb->head; INIT_TP_WR(req, csk->tid); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid)); - req->word_cookie = htons(TCB_WORD(W_TCB_ULP_RAW)); - req->mask = cpu_to_be64(TCB_ULP_TYPE(TCB_ULP_TYPE_MASK)); - req->val = cpu_to_be64(val); + req->word_cookie = htons(0); + req->mask = cpu_to_be64(0x3 << 8); + req->val = cpu_to_be64(pg_idx << 8); set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, @@ -1368,10 +1368,9 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, { struct sk_buff *skb; struct cpl_set_tcb_field *req; - u64 val = (hcrc ? ULP_CRC_HEADER : 0) | (dcrc ? ULP_CRC_DATA : 0); - val = TCB_ULP_RAW(val); - val |= TCB_ULP_TYPE(ULP2_MODE_ISCSI); + if (!hcrc && !dcrc) + return 0; skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL); if (!skb) @@ -1379,14 +1378,15 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, csk->hcrc_len = (hcrc ? 4 : 0); csk->dcrc_len = (dcrc ? 4 : 0); - /* set up ulp submode and page size */ + /* set up ulp submode */ req = (struct cpl_set_tcb_field *)skb->head; INIT_TP_WR(req, tid); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid)); - req->word_cookie = htons(TCB_WORD(W_TCB_ULP_RAW)); - req->mask = cpu_to_be64(TCB_ULP_RAW(TCB_ULP_RAW_MASK)); - req->val = cpu_to_be64(val); + req->word_cookie = htons(0); + req->mask = cpu_to_be64(0x3 << 4); + req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | + (dcrc ? ULP_CRC_DATA : 0)) << 4); set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, @@ -1477,6 +1477,10 @@ static void *t4_uld_add(const struct cxgb4_lld_info *lldi) cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr); cdev->itp = &cxgb4i_iscsi_transport; + cdev->pfvf = FW_VIID_PFN_GET(cxgb4_port_viid(lldi->ports[0])) << 8; + pr_info("cdev 0x%p,%s, pfvf %u.\n", + cdev, lldi->ports[0]->name, cdev->pfvf); + rc = cxgb4i_ddp_init(cdev); if (rc) { pr_info("t4 0x%p ddp init failed.\n", cdev); diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h index 342263b1f542..1096026ba241 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h @@ -23,6 +23,11 @@ #define CXGB4I_TX_HEADER_LEN \ (sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr)) +struct ulptx_idata { + __be32 cmd_more; + __be32 len; +}; + struct cpl_rx_data_ddp { union opcode_tid ot; __be16 urg; diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index 43025b71e295..c57d59db000c 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -162,16 +162,6 @@ struct cxgbi_ddp_info { #define PPOD_VALID(x) ((x) << PPOD_VALID_SHIFT) #define PPOD_VALID_FLAG PPOD_VALID(1U) -#define W_TCB_ULP_TYPE 0 -#define TCB_ULP_TYPE_SHIFT 0 -#define TCB_ULP_TYPE_MASK 0xfULL -#define TCB_ULP_TYPE(x) ((x) << TCB_ULP_TYPE_SHIFT) - -#define W_TCB_ULP_RAW 0 -#define TCB_ULP_RAW_SHIFT 4 -#define TCB_ULP_RAW_MASK 0xffULL -#define TCB_ULP_RAW(x) ((x) << TCB_ULP_RAW_SHIFT) - /* * sge_opaque_hdr - * Opaque version of structure the SGE stores at skb->head of TX_DATA packets -- cgit v1.2.3 From 69723d178da97b09ae8996f60fbf2f0cf68d6d61 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Fri, 24 Sep 2010 15:57:04 +0200 Subject: [SCSI] scsi_dh_alua: Handle all states correctly For ALUA we should be handling all states, independent of whether the mode is explicit or implicit. For 'Transitioning' we should retry for a certain amount of time; after that we're setting the port to 'Standby' and return SCSI_DH_RETRY to signal upper layers a retry is in order here. Signed-off-by: Hannes Reinecke Acked-by: Mike Snitzer Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh_alua.c | 65 +++++++++++++++++------------- 1 file changed, 38 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 1a970a76b1b9..6b729324b8d3 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -1,7 +1,7 @@ /* * Generic SCSI-3 ALUA SCSI Device Handler * - * Copyright (C) 2007, 2008 Hannes Reinecke, SUSE Linux Products GmbH. + * Copyright (C) 2007-2010 Hannes Reinecke, SUSE Linux Products GmbH. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -20,17 +20,19 @@ * */ #include +#include #include #include #include #define ALUA_DH_NAME "alua" -#define ALUA_DH_VER "1.2" +#define ALUA_DH_VER "1.3" #define TPGS_STATE_OPTIMIZED 0x0 #define TPGS_STATE_NONOPTIMIZED 0x1 #define TPGS_STATE_STANDBY 0x2 #define TPGS_STATE_UNAVAILABLE 0x3 +#define TPGS_STATE_LBA_DEPENDENT 0x4 #define TPGS_STATE_OFFLINE 0xe #define TPGS_STATE_TRANSITIONING 0xf @@ -39,6 +41,7 @@ #define TPGS_SUPPORT_NONOPTIMIZED 0x02 #define TPGS_SUPPORT_STANDBY 0x04 #define TPGS_SUPPORT_UNAVAILABLE 0x08 +#define TPGS_SUPPORT_LBA_DEPENDENT 0x10 #define TPGS_SUPPORT_OFFLINE 0x40 #define TPGS_SUPPORT_TRANSITION 0x80 @@ -460,6 +463,8 @@ static char print_alua_state(int state) return 'S'; case TPGS_STATE_UNAVAILABLE: return 'U'; + case TPGS_STATE_LBA_DEPENDENT: + return 'L'; case TPGS_STATE_OFFLINE: return 'O'; case TPGS_STATE_TRANSITIONING: @@ -542,7 +547,9 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) int len, k, off, valid_states = 0; char *ucp; unsigned err; + unsigned long expiry, interval = 10; + expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT); retry: err = submit_rtpg(sdev, h); @@ -553,7 +560,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) return SCSI_DH_IO; err = alua_check_sense(sdev, &sense_hdr); - if (err == ADD_TO_MLQUEUE) + if (err == ADD_TO_MLQUEUE && time_before(jiffies, expiry)) goto retry; sdev_printk(KERN_INFO, sdev, "%s: rtpg sense code %02x/%02x/%02x\n", @@ -587,38 +594,37 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) } sdev_printk(KERN_INFO, sdev, - "%s: port group %02x state %c supports %c%c%c%c%c%c\n", + "%s: port group %02x state %c supports %c%c%c%c%c%c%c\n", ALUA_DH_NAME, h->group_id, print_alua_state(h->state), valid_states&TPGS_SUPPORT_TRANSITION?'T':'t', valid_states&TPGS_SUPPORT_OFFLINE?'O':'o', + valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l', valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u', valid_states&TPGS_SUPPORT_STANDBY?'S':'s', valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n', valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a'); - if (h->tpgs & TPGS_MODE_EXPLICIT) { - switch (h->state) { - case TPGS_STATE_TRANSITIONING: + switch (h->state) { + case TPGS_STATE_TRANSITIONING: + if (time_before(jiffies, expiry)) { /* State transition, retry */ + interval *= 10; + msleep(interval); goto retry; - break; - case TPGS_STATE_OFFLINE: - /* Path is offline, fail */ - err = SCSI_DH_DEV_OFFLINED; - break; - default: - break; } - } else { - /* Only Implicit ALUA support */ - if (h->state == TPGS_STATE_OPTIMIZED || - h->state == TPGS_STATE_NONOPTIMIZED || - h->state == TPGS_STATE_STANDBY) - /* Useable path if active */ - err = SCSI_DH_OK; - else - /* Path unuseable for unavailable/offline */ - err = SCSI_DH_DEV_OFFLINED; + /* Transitioning time exceeded, set port to standby */ + err = SCSI_DH_RETRY; + h->state = TPGS_STATE_STANDBY; + break; + case TPGS_STATE_OFFLINE: + case TPGS_STATE_UNAVAILABLE: + /* Path unuseable for unavailable/offline */ + err = SCSI_DH_DEV_OFFLINED; + break; + default: + /* Useable path if active */ + err = SCSI_DH_OK; + break; } return err; } @@ -672,7 +678,9 @@ static int alua_activate(struct scsi_device *sdev, goto out; } - if (h->tpgs & TPGS_MODE_EXPLICIT && h->state != TPGS_STATE_OPTIMIZED) { + if (h->tpgs & TPGS_MODE_EXPLICIT && + h->state != TPGS_STATE_OPTIMIZED && + h->state != TPGS_STATE_LBA_DEPENDENT) { h->callback_fn = fn; h->callback_data = data; err = submit_stpg(h); @@ -698,8 +706,11 @@ static int alua_prep_fn(struct scsi_device *sdev, struct request *req) struct alua_dh_data *h = get_alua_data(sdev); int ret = BLKPREP_OK; - if (h->state != TPGS_STATE_OPTIMIZED && - h->state != TPGS_STATE_NONOPTIMIZED) { + if (h->state == TPGS_STATE_TRANSITIONING) + ret = BLKPREP_DEFER; + else if (h->state != TPGS_STATE_OPTIMIZED && + h->state != TPGS_STATE_NONOPTIMIZED && + h->state != TPGS_STATE_LBA_DEPENDENT) { ret = BLKPREP_KILL; req->cmd_flags |= REQ_QUIET; } -- cgit v1.2.3 From 4ebcfb929b6be63bacdba34f6aae0f5b51432d2c Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Fri, 24 Sep 2010 22:07:51 +0200 Subject: [SCSI] aha152x: enable PCMCIA on 64bit It was disabled when x86_64 was introduced, but it is reported to be working on 64bit by two different people, so let's enable it back again. Signed-off-by: Jiri Slaby Cc: "Juergen E. Fischer" Signed-off-by: James Bottomley --- drivers/scsi/pcmcia/Kconfig | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/pcmcia/Kconfig b/drivers/scsi/pcmcia/Kconfig index 53857c6b6d4d..ecc855c550aa 100644 --- a/drivers/scsi/pcmcia/Kconfig +++ b/drivers/scsi/pcmcia/Kconfig @@ -11,7 +11,6 @@ if SCSI_LOWLEVEL_PCMCIA && SCSI && PCMCIA && m config PCMCIA_AHA152X tristate "Adaptec AHA152X PCMCIA support" - depends on !64BIT select SCSI_SPI_ATTRS help Say Y here if you intend to attach this type of PCMCIA SCSI host -- cgit v1.2.3 From 7262026f0eded76b935a12b9b15af05516a8610a Mon Sep 17 00:00:00 2001 From: Wayne Boyer Date: Mon, 27 Sep 2010 10:45:28 -0700 Subject: [SCSI] ipr: fix array error logging The structure definitions for reporting array errors did not have the correct size for the Array WWID field. This patch fixes those definitions. It also fixes part of the output formatting that did not have newlines and fixes size calculations. Signed-off-by: Wayne Boyer Acked-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ipr.c | 10 +++++----- drivers/scsi/ipr.h | 9 +++++++-- 2 files changed, 12 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 3b28e8728131..df9a12c8b373 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -1671,7 +1671,7 @@ static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg, array_entry = error->array_member; num_entries = min_t(u32, be32_to_cpu(error->num_entries), - sizeof(error->array_member)); + ARRAY_SIZE(error->array_member)); for (i = 0; i < num_entries; i++, array_entry++) { if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) @@ -2152,8 +2152,8 @@ static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg, ipr_err_separator; array_entry = error->array_member; - num_entries = min_t(u32, be32_to_cpu(error->num_entries), - sizeof(error->array_member)); + num_entries = min_t(u32, error->num_entries, + ARRAY_SIZE(error->array_member)); for (i = 0; i < num_entries; i++, array_entry++) { @@ -2167,10 +2167,10 @@ static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg, ipr_err("Array Member %d:\n", i); ipr_log_ext_vpd(&array_entry->vpd); - ipr_err("Current Location: %s", + ipr_err("Current Location: %s\n", ipr_format_res_path(array_entry->res_path, buffer, sizeof(buffer))); - ipr_err("Expected Location: %s", + ipr_err("Expected Location: %s\n", ipr_format_res_path(array_entry->expected_res_path, buffer, sizeof(buffer))); diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 67cae67378ad..aa8bb2f2c6ee 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -319,6 +319,11 @@ struct ipr_ext_vpd { __be32 wwid[2]; }__attribute__((packed)); +struct ipr_ext_vpd64 { + struct ipr_vpd vpd; + __be32 wwid[4]; +}__attribute__((packed)); + struct ipr_std_inq_data { u8 peri_qual_dev_type; #define IPR_STD_INQ_PERI_QUAL(peri) ((peri) >> 5) @@ -395,7 +400,7 @@ struct ipr_config_table_entry64 { __be64 res_path; struct ipr_std_inq_data std_inq_data; u8 reserved2[4]; - __be64 reserved3[2]; // description text + __be64 reserved3[2]; u8 reserved4[8]; }__attribute__ ((packed, aligned (8))); @@ -914,7 +919,7 @@ struct ipr_hostrcb_type_24_error { u8 array_id; u8 last_res_path[8]; u8 protection_level[8]; - struct ipr_ext_vpd array_vpd; + struct ipr_ext_vpd64 array_vpd; u8 description[16]; u8 reserved2[3]; u8 num_entries; -- cgit v1.2.3 From bf5eefb007e7c5498a41af2dd65d957ae9793a63 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 28 Sep 2010 10:11:06 +0200 Subject: [SCSI] zfcp: Remove scsi_cmnd->serial_number from debug traces With the change that drivers have to explicitly request the serial number for SCSI commands, this field should not be part of the zfcp traces. It is not worth the effort to request the serial number only for tracing purposes, so simply remove this field from the debug traces. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_dbf.c | 4 ---- drivers/s390/scsi/zfcp_dbf.h | 2 -- 2 files changed, 6 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 2224caa8b92d..2cdd6b28ff7f 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -154,7 +154,6 @@ void _zfcp_dbf_hba_fsf_response(const char *tag2, int level, scsi_cmnd = (struct scsi_cmnd *)fsf_req->data; if (scsi_cmnd) { response->u.fcp.cmnd = (unsigned long)scsi_cmnd; - response->u.fcp.serial = scsi_cmnd->serial_number; response->u.fcp.data_dir = qtcb->bottom.io.data_direction; } @@ -330,7 +329,6 @@ static void zfcp_dbf_hba_view_response(char **p, break; zfcp_dbf_out(p, "data_direction", "0x%04x", r->u.fcp.data_dir); zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd); - zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial); *p += sprintf(*p, "\n"); break; @@ -881,7 +879,6 @@ void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level, } rec->scsi_result = scsi_cmnd->result; rec->scsi_cmnd = (unsigned long)scsi_cmnd; - rec->scsi_serial = scsi_cmnd->serial_number; memcpy(rec->scsi_opcode, scsi_cmnd->cmnd, min((int)scsi_cmnd->cmd_len, ZFCP_DBF_SCSI_OPCODE)); @@ -950,7 +947,6 @@ static int zfcp_dbf_scsi_view_format(debug_info_t *id, struct debug_view *view, zfcp_dbf_out(&p, "scsi_lun", "0x%08x", r->scsi_lun); zfcp_dbf_out(&p, "scsi_result", "0x%08x", r->scsi_result); zfcp_dbf_out(&p, "scsi_cmnd", "0x%0Lx", r->scsi_cmnd); - zfcp_dbf_out(&p, "scsi_serial", "0x%016Lx", r->scsi_serial); zfcp_dbf_outd(&p, "scsi_opcode", r->scsi_opcode, ZFCP_DBF_SCSI_OPCODE, 0, ZFCP_DBF_SCSI_OPCODE); zfcp_dbf_out(&p, "scsi_retries", "0x%02x", r->scsi_retries); diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index 6a48c197b45d..04081b1b62b4 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h @@ -110,7 +110,6 @@ struct zfcp_dbf_hba_record_response { union { struct { u64 cmnd; - u64 serial; u32 data_dir; } fcp; struct { @@ -206,7 +205,6 @@ struct zfcp_dbf_scsi_record { u32 scsi_lun; u32 scsi_result; u64 scsi_cmnd; - u64 scsi_serial; #define ZFCP_DBF_SCSI_OPCODE 16 u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE]; u8 scsi_retries; -- cgit v1.2.3 From d439d286f573afab8c164dbc953ce1d214585a40 Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 29 Sep 2010 11:18:45 -0400 Subject: [SCSI] lpfc 8.3.17: Code Cleanup and Locking fixes - Move Unload flag earlier in vport deletei to stop ELS traffic - Replaced some unnecessary spin_lock_irqsave with spin_lock_irq - Fixed circular spinlock dependency between low-level driver and SCSI midlayer - Remove duplicate code from lpfc_els_retry routine - Make all error values negative Signed-off-by: Alex Iannicelli Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_bsg.c | 81 ++++++++++++++++--------------- drivers/scsi/lpfc/lpfc_els.c | 2 + drivers/scsi/lpfc/lpfc_hbadisc.c | 34 ++++++------- drivers/scsi/lpfc/lpfc_init.c | 34 ++++++------- drivers/scsi/lpfc/lpfc_sli.c | 100 +++++++++++++++++++++++---------------- drivers/scsi/lpfc/lpfc_vport.c | 8 ++-- 6 files changed, 142 insertions(+), 117 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 49d0cf99c24c..10cfc64782ad 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -259,6 +259,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) struct bsg_job_data *dd_data; uint32_t creg_val; int rc = 0; + int iocb_stat; /* in case no data is transferred */ job->reply->reply_payload_rcv_len = 0; @@ -373,14 +374,13 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) readl(phba->HCregaddr); /* flush */ } - rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); - - if (rc == IOCB_SUCCESS) + iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); + if (iocb_stat == IOCB_SUCCESS) return 0; /* done for now */ - else if (rc == IOCB_BUSY) - rc = EAGAIN; + else if (iocb_stat == IOCB_BUSY) + rc = -EAGAIN; else - rc = EIO; + rc = -EIO; /* iocb failed so cleanup */ @@ -631,9 +631,9 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) if (rc == IOCB_SUCCESS) return 0; /* done for now */ else if (rc == IOCB_BUSY) - rc = EAGAIN; + rc = -EAGAIN; else - rc = EIO; + rc = -EIO; pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, job->request_payload.sg_cnt, DMA_TO_DEVICE); @@ -1299,7 +1299,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, /* Allocate buffer for command iocb */ ctiocb = lpfc_sli_get_iocbq(phba); if (!ctiocb) { - rc = ENOMEM; + rc = -ENOMEM; goto no_ctiocb; } @@ -1649,17 +1649,18 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi) mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) - return ENOMEM; + return -ENOMEM; status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID, (uint8_t *)&phba->pport->fc_sparam, mbox, 0); if (status) { mempool_free(mbox, phba->mbox_mem_pool); - return ENOMEM; + return -ENOMEM; } dmabuff = (struct lpfc_dmabuf *) mbox->context1; mbox->context1 = NULL; + mbox->context2 = NULL; status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { @@ -1667,7 +1668,7 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi) kfree(dmabuff); if (status != MBX_TIMEOUT) mempool_free(mbox, phba->mbox_mem_pool); - return ENODEV; + return -ENODEV; } *rpi = mbox->u.mb.un.varWords[0]; @@ -1693,7 +1694,7 @@ static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi) /* Allocate mboxq structure */ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox == NULL) - return ENOMEM; + return -ENOMEM; lpfc_unreg_login(phba, 0, rpi, mbox); status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); @@ -1701,7 +1702,7 @@ static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi) if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) { if (status != MBX_TIMEOUT) mempool_free(mbox, phba->mbox_mem_pool); - return EIO; + return -EIO; } mempool_free(mbox, phba->mbox_mem_pool); @@ -1730,6 +1731,8 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, struct ulp_bde64 *bpl = NULL; struct lpfc_sli_ct_request *ctreq = NULL; int ret_val = 0; + int time_left; + int iocb_stat; unsigned long flags; *txxri = 0; @@ -1737,7 +1740,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, SLI_CT_ELX_LOOPBACK); if (!evt) - return ENOMEM; + return -ENOMEM; spin_lock_irqsave(&phba->ct_ev_lock, flags); list_add(&evt->node, &phba->ct_ev_waiters); @@ -1770,7 +1773,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, if (cmdiocbq == NULL || rspiocbq == NULL || dmabuf == NULL || bpl == NULL || ctreq == NULL || dmabuf->virt == NULL) { - ret_val = ENOMEM; + ret_val = -ENOMEM; goto err_get_xri_exit; } @@ -1806,24 +1809,24 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; cmdiocbq->vport = phba->pport; - ret_val = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, + iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq, (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT); - if (ret_val) + if (iocb_stat) { + ret_val = -EIO; goto err_get_xri_exit; - + } *txxri = rsp->ulpContext; evt->waiting = 1; evt->wait_time_stamp = jiffies; - ret_val = wait_event_interruptible_timeout( + time_left = wait_event_interruptible_timeout( evt->wq, !list_empty(&evt->events_to_see), ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); if (list_empty(&evt->events_to_see)) - ret_val = (ret_val) ? EINTR : ETIMEDOUT; + ret_val = (time_left) ? -EINTR : -ETIMEDOUT; else { - ret_val = IOCB_SUCCESS; spin_lock_irqsave(&phba->ct_ev_lock, flags); list_move(evt->events_to_see.prev, &evt->events_to_get); spin_unlock_irqrestore(&phba->ct_ev_lock, flags); @@ -1845,7 +1848,7 @@ err_get_xri_exit: kfree(dmabuf); } - if (cmdiocbq && (ret_val != IOCB_TIMEDOUT)) + if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT)) lpfc_sli_release_iocbq(phba, cmdiocbq); if (rspiocbq) lpfc_sli_release_iocbq(phba, rspiocbq); @@ -1959,6 +1962,7 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, uint32_t num_bde; struct lpfc_dmabufext *rxbuffer = NULL; int ret_val = 0; + int iocb_stat; int i = 0; cmdiocbq = lpfc_sli_get_iocbq(phba); @@ -1973,7 +1977,7 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, } if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) { - ret_val = ENOMEM; + ret_val = -ENOMEM; goto err_post_rxbufs_exit; } @@ -2022,16 +2026,16 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, cmd->ulpClass = CLASS3; cmd->ulpContext = rxxri; - ret_val = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); - - if (ret_val == IOCB_ERROR) { + iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, + 0); + if (iocb_stat == IOCB_ERROR) { diag_cmd_data_free(phba, (struct lpfc_dmabufext *)mp[0]); if (mp[1]) diag_cmd_data_free(phba, (struct lpfc_dmabufext *)mp[1]); dmp = list_entry(next, struct lpfc_dmabuf, list); - ret_val = EIO; + ret_val = -EIO; goto err_post_rxbufs_exit; } @@ -2045,7 +2049,7 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, cmdiocbq = lpfc_sli_get_iocbq(phba); if (!cmdiocbq) { dmp = list_entry(next, struct lpfc_dmabuf, list); - ret_val = EIO; + ret_val = -EIO; goto err_post_rxbufs_exit; } @@ -2111,6 +2115,8 @@ lpfc_bsg_diag_test(struct fc_bsg_job *job) uint32_t num_bde; uint8_t *ptr = NULL, *rx_databuf = NULL; int rc = 0; + int time_left; + int iocb_stat; unsigned long flags; void *dataout = NULL; uint32_t total_mem; @@ -2185,22 +2191,18 @@ lpfc_bsg_diag_test(struct fc_bsg_job *job) ptr, size); rc = lpfcdiag_loop_self_reg(phba, &rpi); - if (rc) { - rc = -ENOMEM; + if (rc) goto loopback_test_exit; - } rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri); if (rc) { lpfcdiag_loop_self_unreg(phba, rpi); - rc = -ENOMEM; goto loopback_test_exit; } rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size); if (rc) { lpfcdiag_loop_self_unreg(phba, rpi); - rc = -ENOMEM; goto loopback_test_exit; } @@ -2290,21 +2292,22 @@ lpfc_bsg_diag_test(struct fc_bsg_job *job) cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; cmdiocbq->vport = phba->pport; - rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq, - (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT); + iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, + rspiocbq, (phba->fc_ratov * 2) + + LPFC_DRVR_TIMEOUT); - if ((rc != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) { + if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) { rc = -EIO; goto err_loopback_test_exit; } evt->waiting = 1; - rc = wait_event_interruptible_timeout( + time_left = wait_event_interruptible_timeout( evt->wq, !list_empty(&evt->events_to_see), ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); evt->waiting = 0; if (list_empty(&evt->events_to_see)) - rc = (rc) ? -EINTR : -ETIMEDOUT; + rc = (time_left) ? -EINTR : -ETIMEDOUT; else { spin_lock_irqsave(&phba->ct_ev_lock, flags); list_move(evt->events_to_see.prev, &evt->events_to_get); diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 8d09191c327e..e6ca12f6c6cb 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -3250,6 +3250,8 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi); pmb->context1 = NULL; + pmb->context2 = NULL; + lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 1f62ea8c165d..c3d7174e3469 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1015,7 +1015,6 @@ static void lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct lpfc_vport *vport = mboxq->vport; - unsigned long flags; if (mboxq->u.mb.mbxStatus) { lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, @@ -1029,18 +1028,18 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) /* Start FCoE discovery by sending a FLOGI. */ phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi); /* Set the FCFI registered flag */ - spin_lock_irqsave(&phba->hbalock, flags); + spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag |= FCF_REGISTERED; - spin_unlock_irqrestore(&phba->hbalock, flags); + spin_unlock_irq(&phba->hbalock); /* If there is a pending FCoE event, restart FCF table scan. */ if (lpfc_check_pending_fcoe_event(phba, 1)) { mempool_free(mboxq, phba->mbox_mem_pool); return; } - spin_lock_irqsave(&phba->hbalock, flags); + spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); phba->hba_flag &= ~FCF_DISC_INPROGRESS; - spin_unlock_irqrestore(&phba->hbalock, flags); + spin_unlock_irq(&phba->hbalock); if (vport->port_state != LPFC_FLOGI) lpfc_initial_flogi(vport); @@ -1240,14 +1239,13 @@ lpfc_register_fcf(struct lpfc_hba *phba) { LPFC_MBOXQ_t *fcf_mbxq; int rc; - unsigned long flags; - spin_lock_irqsave(&phba->hbalock, flags); + spin_lock_irq(&phba->hbalock); /* If the FCF is not availabe do nothing. */ if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { phba->hba_flag &= ~FCF_DISC_INPROGRESS; - spin_unlock_irqrestore(&phba->hbalock, flags); + spin_unlock_irq(&phba->hbalock); return; } @@ -1255,19 +1253,19 @@ lpfc_register_fcf(struct lpfc_hba *phba) if (phba->fcf.fcf_flag & FCF_REGISTERED) { phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); phba->hba_flag &= ~FCF_DISC_INPROGRESS; - spin_unlock_irqrestore(&phba->hbalock, flags); + spin_unlock_irq(&phba->hbalock); if (phba->pport->port_state != LPFC_FLOGI) lpfc_initial_flogi(phba->pport); return; } - spin_unlock_irqrestore(&phba->hbalock, flags); + spin_unlock_irq(&phba->hbalock); fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!fcf_mbxq) { - spin_lock_irqsave(&phba->hbalock, flags); + spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~FCF_DISC_INPROGRESS; - spin_unlock_irqrestore(&phba->hbalock, flags); + spin_unlock_irq(&phba->hbalock); return; } @@ -1276,9 +1274,9 @@ lpfc_register_fcf(struct lpfc_hba *phba) fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { - spin_lock_irqsave(&phba->hbalock, flags); + spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~FCF_DISC_INPROGRESS; - spin_unlock_irqrestore(&phba->hbalock, flags); + spin_unlock_irq(&phba->hbalock); mempool_free(fcf_mbxq, phba->mbox_mem_pool); } @@ -2851,6 +2849,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) struct Scsi_Host *shost = lpfc_shost_from_vport(vport); pmb->context1 = NULL; + pmb->context2 = NULL; if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; @@ -3149,6 +3148,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) ndlp = (struct lpfc_nodelist *) pmb->context2; pmb->context1 = NULL; pmb->context2 = NULL; + if (mb->mbxStatus) { lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "0258 Register Fabric login error: 0x%x\n", @@ -3218,6 +3218,9 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; struct lpfc_vport *vport = pmb->vport; + pmb->context1 = NULL; + pmb->context2 = NULL; + if (mb->mbxStatus) { out: lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, @@ -3249,8 +3252,6 @@ out: return; } - pmb->context1 = NULL; - ndlp->nlp_rpi = mb->un.varWords[0]; ndlp->nlp_flag |= NLP_RPI_VALID; ndlp->nlp_type |= NLP_FABRIC; @@ -4784,6 +4785,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) struct lpfc_vport *vport = pmb->vport; pmb->context1 = NULL; + pmb->context2 = NULL; ndlp->nlp_rpi = mb->un.varWords[0]; ndlp->nlp_flag |= NLP_RPI_VALID; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 87a4d09a6641..699c9cf2dad2 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -4727,8 +4727,8 @@ out_free_mem: * * Return codes * 0 - successful - * ENOMEM - No availble memory - * EIO - The mailbox failed to complete successfully. + * -ENOMEM - No availble memory + * -EIO - The mailbox failed to complete successfully. **/ int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) @@ -5421,7 +5421,7 @@ lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) * * Return codes * 0 - successful - * ENOMEM - could not allocated memory. + * -ENOMEM - could not allocated memory. **/ static int lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) @@ -5520,8 +5520,8 @@ lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) * * Return codes * 0 - successful - * ENOMEM - No availble memory - * EIO - The mailbox failed to complete successfully. + * -ENOMEM - No availble memory + * -EIO - The mailbox failed to complete successfully. **/ static int lpfc_sli4_read_config(struct lpfc_hba *phba) @@ -5624,8 +5624,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) * * Return codes * 0 - successful - * ENOMEM - No availble memory - * EIO - The mailbox failed to complete successfully. + * -ENOMEM - No availble memory + * -EIO - The mailbox failed to complete successfully. **/ static int lpfc_setup_endian_order(struct lpfc_hba *phba) @@ -5673,8 +5673,8 @@ lpfc_setup_endian_order(struct lpfc_hba *phba) * * Return codes * 0 - successful - * ENOMEM - No availble memory - * EIO - The mailbox failed to complete successfully. + * -ENOMEM - No availble memory + * -EIO - The mailbox failed to complete successfully. **/ static int lpfc_sli4_queue_create(struct lpfc_hba *phba) @@ -5968,8 +5968,8 @@ out_error: * * Return codes * 0 - successful - * ENOMEM - No availble memory - * EIO - The mailbox failed to complete successfully. + * -ENOMEM - No availble memory + * -EIO - The mailbox failed to complete successfully. **/ static void lpfc_sli4_queue_destroy(struct lpfc_hba *phba) @@ -6032,8 +6032,8 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) * * Return codes * 0 - successful - * ENOMEM - No availble memory - * EIO - The mailbox failed to complete successfully. + * -ENOMEM - No availble memory + * -EIO - The mailbox failed to complete successfully. **/ int lpfc_sli4_queue_setup(struct lpfc_hba *phba) @@ -6277,8 +6277,8 @@ out_error: * * Return codes * 0 - successful - * ENOMEM - No availble memory - * EIO - The mailbox failed to complete successfully. + * -ENOMEM - No availble memory + * -EIO - The mailbox failed to complete successfully. **/ void lpfc_sli4_queue_unset(struct lpfc_hba *phba) @@ -6483,8 +6483,8 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) * * Return codes * 0 - successful - * ENOMEM - No availble memory - * EIO - The mailbox failed to complete successfully. + * -ENOMEM - No availble memory + * -EIO - The mailbox failed to complete successfully. **/ int lpfc_pci_function_reset(struct lpfc_hba *phba) diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 23a47e536858..bbbd8ba5c1a7 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1730,10 +1730,11 @@ lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { + struct lpfc_vport *vport = pmb->vport; struct lpfc_dmabuf *mp; + struct lpfc_nodelist *ndlp; uint16_t rpi, vpi; int rc; - struct lpfc_vport *vport = pmb->vport; mp = (struct lpfc_dmabuf *) (pmb->context1); @@ -1774,6 +1775,12 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) return; } + if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { + ndlp = (struct lpfc_nodelist *)pmb->context2; + lpfc_nlp_put(ndlp); + pmb->context2 = NULL; + } + if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) lpfc_sli4_mbox_cmd_free(phba, pmb); else @@ -4186,7 +4193,7 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba, * * Return codes * 0 - successful - * ENOMEM - could not allocated memory. + * -ENOMEM - could not allocated memory. **/ static int lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, @@ -9724,8 +9731,8 @@ out_fail: * command to finish before continuing. * * On success this function will return a zero. If unable to allocate enough - * memory this function will return ENOMEM. If the queue create mailbox command - * fails this function will return ENXIO. + * memory this function will return -ENOMEM. If the queue create mailbox command + * fails this function will return -ENXIO. **/ uint32_t lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax) @@ -9840,8 +9847,8 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax) * command to finish before continuing. * * On success this function will return a zero. If unable to allocate enough - * memory this function will return ENOMEM. If the queue create mailbox command - * fails this function will return ENXIO. + * memory this function will return -ENOMEM. If the queue create mailbox command + * fails this function will return -ENXIO. **/ uint32_t lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, @@ -10011,8 +10018,8 @@ lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, * command to finish before continuing. * * On success this function will return a zero. If unable to allocate enough - * memory this function will return ENOMEM. If the queue create mailbox command - * fails this function will return ENXIO. + * memory this function will return -ENOMEM. If the queue create mailbox command + * fails this function will return -ENXIO. **/ int32_t lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, @@ -10146,8 +10153,8 @@ out: * command to finish before continuing. * * On success this function will return a zero. If unable to allocate enough - * memory this function will return ENOMEM. If the queue create mailbox command - * fails this function will return ENXIO. + * memory this function will return -ENOMEM. If the queue create mailbox command + * fails this function will return -ENXIO. **/ uint32_t lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, @@ -10234,8 +10241,8 @@ out: * mailbox command to finish before continuing. * * On success this function will return a zero. If unable to allocate enough - * memory this function will return ENOMEM. If the queue create mailbox command - * fails this function will return ENXIO. + * memory this function will return -ENOMEM. If the queue create mailbox command + * fails this function will return -ENXIO. **/ uint32_t lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, @@ -10403,7 +10410,7 @@ out: * The @eq struct is used to get the queue ID of the queue to destroy. * * On success this function will return a zero. If the queue destroy mailbox - * command fails this function will return ENXIO. + * command fails this function will return -ENXIO. **/ uint32_t lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) @@ -10458,7 +10465,7 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) * The @cq struct is used to get the queue ID of the queue to destroy. * * On success this function will return a zero. If the queue destroy mailbox - * command fails this function will return ENXIO. + * command fails this function will return -ENXIO. **/ uint32_t lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) @@ -10511,7 +10518,7 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) * The @mq struct is used to get the queue ID of the queue to destroy. * * On success this function will return a zero. If the queue destroy mailbox - * command fails this function will return ENXIO. + * command fails this function will return -ENXIO. **/ uint32_t lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) @@ -10564,7 +10571,7 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) * The @wq struct is used to get the queue ID of the queue to destroy. * * On success this function will return a zero. If the queue destroy mailbox - * command fails this function will return ENXIO. + * command fails this function will return -ENXIO. **/ uint32_t lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) @@ -10616,7 +10623,7 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) * The @rq struct is used to get the queue ID of the queue to destroy. * * On success this function will return a zero. If the queue destroy mailbox - * command fails this function will return ENXIO. + * command fails this function will return -ENXIO. **/ uint32_t lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, @@ -11819,7 +11826,7 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, * * Return codes * 0 - successful - * EIO - The mailbox failed to complete successfully. + * -EIO - The mailbox failed to complete successfully. * When this error occurs, the driver is not guaranteed * to have any rpi regions posted to the device and * must either attempt to repost the regions or take a @@ -11857,8 +11864,8 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) * * Return codes * 0 - successful - * ENOMEM - No available memory - * EIO - The mailbox failed to complete successfully. + * -ENOMEM - No available memory + * -EIO - The mailbox failed to complete successfully. **/ int lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) @@ -12805,8 +12812,11 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) LPFC_MBOXQ_t *mb, *nextmb; struct lpfc_dmabuf *mp; struct lpfc_nodelist *ndlp; + struct lpfc_nodelist *act_mbx_ndlp = NULL; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + LIST_HEAD(mbox_cmd_list); + /* Clean up internally queued mailbox commands with the vport */ spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { if (mb->vport != vport) @@ -12816,6 +12826,28 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) (mb->u.mb.mbxCommand != MBX_REG_VPI)) continue; + list_del(&mb->list); + list_add_tail(&mb->list, &mbox_cmd_list); + } + /* Clean up active mailbox command with the vport */ + mb = phba->sli.mbox_active; + if (mb && (mb->vport == vport)) { + if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || + (mb->u.mb.mbxCommand == MBX_REG_VPI)) + mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { + act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2; + /* Put reference count for delayed processing */ + act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp); + /* Unregister the RPI when mailbox complete */ + mb->mbox_flag |= LPFC_MBX_IMED_UNREG; + } + } + spin_unlock_irq(&phba->hbalock); + + /* Release the cleaned-up mailbox commands */ + while (!list_empty(&mbox_cmd_list)) { + list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { if (phba->sli_rev == LPFC_SLI_REV4) __lpfc_sli4_free_rpi(phba, @@ -12826,36 +12858,24 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) kfree(mp); } ndlp = (struct lpfc_nodelist *) mb->context2; + mb->context2 = NULL; if (ndlp) { spin_lock(shost->host_lock); ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; spin_unlock(shost->host_lock); lpfc_nlp_put(ndlp); - mb->context2 = NULL; } } - list_del(&mb->list); mempool_free(mb, phba->mbox_mem_pool); } - mb = phba->sli.mbox_active; - if (mb && (mb->vport == vport)) { - if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || - (mb->u.mb.mbxCommand == MBX_REG_VPI)) - mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { - ndlp = (struct lpfc_nodelist *) mb->context2; - if (ndlp) { - spin_lock(shost->host_lock); - ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; - spin_unlock(shost->host_lock); - lpfc_nlp_put(ndlp); - mb->context2 = NULL; - } - /* Unregister the RPI when mailbox complete */ - mb->mbox_flag |= LPFC_MBX_IMED_UNREG; - } + + /* Release the ndlp with the cleaned-up active mailbox command */ + if (act_mbx_ndlp) { + spin_lock(shost->host_lock); + act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; + spin_unlock(shost->host_lock); + lpfc_nlp_put(act_mbx_ndlp); } - spin_unlock_irq(&phba->hbalock); } /** diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 1655507a682c..a5281ce893d0 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -580,7 +580,9 @@ lpfc_vport_delete(struct fc_vport *fc_vport) "static vport.\n"); return VPORT_ERROR; } - + spin_lock_irq(&phba->hbalock); + vport->load_flag |= FC_UNLOADING; + spin_unlock_irq(&phba->hbalock); /* * If we are not unloading the driver then prevent the vport_delete * from happening until after this vport's discovery is finished. @@ -618,10 +620,6 @@ lpfc_vport_delete(struct fc_vport *fc_vport) scsi_host_put(shost); return VPORT_INVAL; } - spin_lock_irq(&phba->hbalock); - vport->load_flag |= FC_UNLOADING; - spin_unlock_irq(&phba->hbalock); - lpfc_free_sysfs_attr(vport); lpfc_debugfs_terminate(vport); -- cgit v1.2.3 From dcf2a4e0792e837d6133506444a033a95cbc9616 Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 29 Sep 2010 11:18:53 -0400 Subject: [SCSI] lpfc 8.3.17: SLI Additions and Fixes - Added driver support for management application to pass down two security specific mailbox commands (MBX_SECURITY_MGMT and MBX_AUTH_PORT) - Added driver support for handling FIPS zeroization trap of host ERATT ER8, performing selective reset and bringing the device up. - Added code to detect INIT_LINK mailbox command completion returning status MBXERR_SEC_NO_PERMISSION. - Increased the wait timeout on host status register HS_FFRDY and HS_MBRDY being set. - Remove the port offline code from the Heartbeat TMO handler. Signed-off-by: Alex Iannicelli Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_attr.c | 10 ++++++++ drivers/scsi/lpfc/lpfc_hw.h | 6 ++++- drivers/scsi/lpfc/lpfc_init.c | 45 ++++++++++++++++++----------------- drivers/scsi/lpfc/lpfc_sli.c | 55 ++++++++++++++++++++++++++++++------------- 4 files changed, 78 insertions(+), 38 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index f6efc6fe86d7..f681eea57730 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -586,6 +586,11 @@ lpfc_issue_lip(struct Scsi_Host *shost) phba->cfg_link_speed); mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); + if ((mbxstatus == MBX_SUCCESS) && + (pmboxq->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "2859 SLI authentication is required " + "for INIT_LINK but has not done yet\n"); } lpfc_set_loopback_flag(phba); @@ -3782,6 +3787,11 @@ sysfs_mbox_read(struct file *filp, struct kobject *kobj, case MBX_PORT_CAPABILITIES: case MBX_PORT_IOV_CONTROL: break; + case MBX_SECURITY_MGMT: + case MBX_AUTH_PORT: + if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) + return -EPERM; + break; case MBX_READ_SPARM64: case MBX_READ_LA: case MBX_READ_LA64: diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 1676f61291e7..a631647051d9 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1380,6 +1380,9 @@ typedef struct { /* FireFly BIU registers */ #define MBX_INIT_VFI 0xA3 #define MBX_INIT_VPI 0xA4 +#define MBX_AUTH_PORT 0xF8 +#define MBX_SECURITY_MGMT 0xF9 + /* IOCB Commands */ #define CMD_RCV_SEQUENCE_CX 0x01 @@ -1502,7 +1505,8 @@ typedef struct { /* FireFly BIU registers */ #define MBXERR_DMA_ERROR 15 #define MBXERR_ERROR 16 #define MBXERR_LINK_DOWN 0x33 -#define MBX_NOT_FINISHED 255 +#define MBXERR_SEC_NO_PERMISSION 0xF02 +#define MBX_NOT_FINISHED 255 #define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */ #define MBX_TIMEOUT 0xfffffe /* time-out expired waiting for */ diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 699c9cf2dad2..053eaef09005 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1076,21 +1076,16 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) } else { /* * If heart beat timeout called with hb_outstanding set - * we need to take the HBA offline. + * we need to give the hb mailbox cmd a chance to + * complete or TMO. */ - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0459 Adapter heartbeat failure, " - "taking this port offline.\n"); - - spin_lock_irq(&phba->hbalock); - psli->sli_flag &= ~LPFC_SLI_ACTIVE; - spin_unlock_irq(&phba->hbalock); - - lpfc_offline_prep(phba); - lpfc_offline(phba); - lpfc_unblock_mgmt_io(phba); - phba->link_state = LPFC_HBA_ERROR; - lpfc_hba_down_post(phba); + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0459 Adapter heartbeat still out" + "standing:last compl time was %d ms.\n", + jiffies_to_msecs(jiffies + - phba->last_completion_time)); + mod_timer(&phba->hb_tmofunc, + jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); } } } @@ -1277,13 +1272,21 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba) if (phba->hba_flag & DEFER_ERATT) lpfc_handle_deferred_eratt(phba); - if (phba->work_hs & HS_FFER6) { - /* Re-establishing Link */ - lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, - "1301 Re-establishing Link " - "Data: x%x x%x x%x\n", - phba->work_hs, - phba->work_status[0], phba->work_status[1]); + if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) { + if (phba->work_hs & HS_FFER6) + /* Re-establishing Link */ + lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, + "1301 Re-establishing Link " + "Data: x%x x%x x%x\n", + phba->work_hs, phba->work_status[0], + phba->work_status[1]); + if (phba->work_hs & HS_FFER8) + /* Device Zeroization */ + lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, + "2861 Host Authentication device " + "zeroization Data:x%x x%x x%x\n", + phba->work_hs, phba->work_status[0], + phba->work_status[1]); spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_ACTIVE; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index bbbd8ba5c1a7..34dd87f542c2 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1677,6 +1677,8 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand) case MBX_RESUME_RPI: case MBX_READ_EVENT_LOG_STATUS: case MBX_READ_EVENT_LOG: + case MBX_SECURITY_MGMT: + case MBX_AUTH_PORT: ret = mbxCommand; break; default: @@ -1781,6 +1783,13 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) pmb->context2 = NULL; } + /* Check security permission status on INIT_LINK mailbox command */ + if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && + (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "2860 SLI authentication is required " + "for INIT_LINK but has not done yet\n"); + if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) lpfc_sli4_mbox_cmd_free(phba, pmb); else @@ -3658,11 +3667,15 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) i = 0; while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { - /* Check every 100ms for 5 retries, then every 500ms for 5, then - * every 2.5 sec for 5, then reset board and every 2.5 sec for - * 4. + /* Check every 10ms for 10 retries, then every 100ms for 90 + * retries, then every 1 sec for 50 retires for a total of + * ~60 seconds before reset the board again and check every + * 1 sec for 50 retries. The up to 60 seconds before the + * board ready is required by the Falcon FIPS zeroization + * complete, and any reset the board in between shall cause + * restart of zeroization, further delay the board ready. */ - if (i++ >= 20) { + if (i++ >= 200) { /* Adapter failed to init, timeout, status reg */ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -3690,16 +3703,15 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba) return -EIO; } - if (i <= 5) { + if (i <= 10) msleep(10); - } else if (i <= 10) { - msleep(500); - } else { - msleep(2500); - } + else if (i <= 100) + msleep(100); + else + msleep(1000); - if (i == 15) { - /* Do post */ + if (i == 150) { + /* Do post */ phba->pport->port_state = LPFC_VPORT_UNKNOWN; lpfc_sli_brdrestart(phba); } @@ -5950,6 +5962,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, uint8_t command_type = ELS_COMMAND_NON_FIP; uint8_t cmnd; uint16_t xritag; + uint16_t abrt_iotag; + struct lpfc_iocbq *abrtiocbq; struct ulp_bde64 *bpl = NULL; uint32_t els_id = ELS_ID_DEFAULT; int numBdes, i; @@ -6162,9 +6176,17 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, case CMD_ABORT_XRI_CX: /* words 0-2 memcpy should be 0 rserved */ /* port will send abts */ - if (iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) + abrt_iotag = iocbq->iocb.un.acxri.abortContextTag; + if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) { + abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag]; + fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK; + } else + fip = 0; + + if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip) /* - * The link is down so the fw does not need to send abts + * The link is down, or the command was ELS_FIP + * so the fw does not need to send abts * on the wire. */ bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); @@ -7895,7 +7917,7 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba) /* Check if there is a deferred error condition is active */ if ((HS_FFER1 & phba->work_hs) && ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | - HS_FFER6 | HS_FFER7) & phba->work_hs)) { + HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { phba->hba_flag |= DEFER_ERATT; /* Clear all interrupt enable conditions */ writel(0, phba->HCregaddr); @@ -8211,7 +8233,8 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) */ if ((HS_FFER1 & phba->work_hs) && ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | - HS_FFER6 | HS_FFER7) & phba->work_hs)) { + HS_FFER6 | HS_FFER7 | HS_FFER8) & + phba->work_hs)) { phba->hba_flag |= DEFER_ERATT; /* Clear all interrupt enable conditions */ writel(0, phba->HCregaddr); -- cgit v1.2.3 From 515e0aa21ec399ddcf783140a596312e83d20a80 Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 29 Sep 2010 11:19:00 -0400 Subject: [SCSI] lpfc 8.3.17: BSG fixes - Add support for bsg MBX_SLI4_CONFIG. - Multiply linkup timeout in loopback test code by 100. - Set iocb_stat to 0 in the lpfcdiag_loop_get_xri function. Signed-off-by: Alex Iannicelli Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_bsg.c | 68 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 66 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 10cfc64782ad..f5d60b55f53a 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -1518,7 +1518,7 @@ lpfc_bsg_diag_mode(struct fc_bsg_job *job) loopback_mode = (struct diag_mode_set *) job->request->rqst_data.h_vendor.vendor_cmd; link_flags = loopback_mode->type; - timeout = loopback_mode->timeout; + timeout = loopback_mode->timeout * 100; if ((phba->link_state == LPFC_HBA_ERROR) || (psli->sli_flag & LPFC_BLOCK_MGMT_IO) || @@ -1732,7 +1732,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, struct lpfc_sli_ct_request *ctreq = NULL; int ret_val = 0; int time_left; - int iocb_stat; + int iocb_stat = 0; unsigned long flags; *txxri = 0; @@ -2473,6 +2473,17 @@ lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) to += sizeof(MAILBOX_t); size = pmboxq->u.mb.un.varWords[5]; memcpy(to, from, size); + } else if ((phba->sli_rev == LPFC_SLI_REV4) && + (pmboxq->u.mb.mbxCommand == MBX_SLI4_CONFIG)) { + struct lpfc_mbx_nembed_cmd *nembed_sge = + (struct lpfc_mbx_nembed_cmd *) + &pmboxq->u.mb.un.varWords[0]; + + from = (uint8_t *)dd_data->context_un.mbox.dmp->dma. + virt; + to += sizeof(MAILBOX_t); + size = nembed_sge->sge[0].length; + memcpy(to, from, size); } else if (pmboxq->u.mb.mbxCommand == MBX_READ_EVENT_LOG) { from = (uint8_t *)dd_data->context_un. mbox.dmp->dma.virt; @@ -2914,6 +2925,59 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, from += sizeof(MAILBOX_t); memcpy((uint8_t *)dmp->dma.virt, from, bde->tus.f.bdeSize); + } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) { + struct lpfc_mbx_nembed_cmd *nembed_sge; + struct mbox_header *header; + uint32_t receive_length; + + /* rebuild the command for sli4 using our own buffers + * like we do for biu diags + */ + header = (struct mbox_header *)&pmb->un.varWords[0]; + nembed_sge = (struct lpfc_mbx_nembed_cmd *) + &pmb->un.varWords[0]; + receive_length = nembed_sge->sge[0].length; + + /* receive length cannot be greater than mailbox + * extension size + */ + if ((receive_length == 0) || + (receive_length > MAILBOX_EXT_SIZE)) { + rc = -ERANGE; + goto job_done; + } + + rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!rxbmp) { + rc = -ENOMEM; + goto job_done; + } + + rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); + if (!rxbmp->virt) { + rc = -ENOMEM; + goto job_done; + } + + INIT_LIST_HEAD(&rxbmp->list); + rxbpl = (struct ulp_bde64 *) rxbmp->virt; + dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length, + 0); + if (!dmp) { + rc = -ENOMEM; + goto job_done; + } + + INIT_LIST_HEAD(&dmp->dma.list); + nembed_sge->sge[0].pa_hi = putPaddrHigh(dmp->dma.phys); + nembed_sge->sge[0].pa_lo = putPaddrLow(dmp->dma.phys); + /* copy the transmit data found in the mailbox + * extension area + */ + from = (uint8_t *)mb; + from += sizeof(MAILBOX_t); + memcpy((uint8_t *)dmp->dma.virt, from, + header->cfg_mhdr.payload_length); } } -- cgit v1.2.3 From eee8877ee5e8a879f78034001b5e7c04db005ab2 Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 29 Sep 2010 11:19:08 -0400 Subject: [SCSI] lpfc 8.3.17: SCSI fixes - Fail I/Os with incomplete data that complete with SCSI Check condition. - Complete aborted I/Os with host_scribble equal to NULL with success. - Initialize context1 field of iocbq in the new_scsi_buf routines. Signed-off-by: Alex Iannicelli Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_scsi.c | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 6e331c73170e..3a658953486c 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -599,6 +599,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) iocb->ulpClass = CLASS3; psb->status = IOSTAT_SUCCESS; /* Put it back into the SCSI buffer list */ + psb->cur_iocbq.context1 = psb; lpfc_release_scsi_buf_s3(phba, psb); } @@ -849,6 +850,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) iocb->ulpBdeCount = 1; iocb->ulpLe = 1; iocb->ulpClass = CLASS3; + psb->cur_iocbq.context1 = psb; if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE; else @@ -2276,15 +2278,24 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, * Check SLI validation that all the transfer was actually done * (fcpi_parm should be zero). Apply check only to reads. */ - } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && - (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { + } else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, "9029 FCP Read Check Error Data: " - "x%x x%x x%x x%x\n", + "x%x x%x x%x x%x x%x\n", be32_to_cpu(fcpcmd->fcpDl), be32_to_cpu(fcprsp->rspResId), - fcpi_parm, cmnd->cmnd[0]); - host_status = DID_ERROR; + fcpi_parm, cmnd->cmnd[0], scsi_status); + switch (scsi_status) { + case SAM_STAT_GOOD: + case SAM_STAT_CHECK_CONDITION: + /* Fabric dropped a data frame. Fail any successful + * command in which we detected dropped frames. + * A status of good or some check conditions could + * be considered a successful command. + */ + host_status = DID_ERROR; + break; + } scsi_set_resid(cmnd, scsi_bufflen(cmnd)); } @@ -3072,7 +3083,14 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) if (ret) return ret; lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; - BUG_ON(!lpfc_cmd); + if (!lpfc_cmd) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, + "2873 SCSI Layer I/O Abort Request IO CMPL Status " + "x%x ID %d " + "LUN %d snum %#lx\n", ret, cmnd->device->id, + cmnd->device->lun, cmnd->serial_number); + return SUCCESS; + } /* * If pCmd field of the corresponding lpfc_scsi_buf structure -- cgit v1.2.3 From 3677a3a76e190f801af0778df3b8efa1fe161a6e Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 29 Sep 2010 11:19:14 -0400 Subject: [SCSI] lpfc 8.3.17: Replace function reset methodology Replace graceful teardown steps with the singular function reset command. Signed-off-by: Alex Iannicelli Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_init.c | 132 +++--------------------------------------- drivers/scsi/lpfc/lpfc_sli.c | 76 ------------------------ drivers/scsi/lpfc/lpfc_sli4.h | 2 - 3 files changed, 9 insertions(+), 201 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 053eaef09005..295c7ddb36c1 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -2887,65 +2887,6 @@ lpfc_stop_port(struct lpfc_hba *phba) phba->lpfc_stop_port(phba); } -/** - * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to remove the driver default fcf record from - * the port. This routine currently acts on FCF Index 0. - * - **/ -void -lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba) -{ - int rc = 0; - LPFC_MBOXQ_t *mboxq; - struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record; - uint32_t mbox_tmo, req_len; - uint32_t shdr_status, shdr_add_status; - - mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mboxq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2020 Failed to allocate mbox for ADD_FCF cmd\n"); - return; - } - - req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) - - sizeof(struct lpfc_sli4_cfg_mhdr); - rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, - LPFC_MBOX_OPCODE_FCOE_DELETE_FCF, - req_len, LPFC_SLI4_MBX_EMBED); - /* - * In phase 1, there is a single FCF index, 0. In phase2, the driver - * supports multiple FCF indices. - */ - del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry; - bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1); - bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record, - phba->fcf.current_rec.fcf_indx); - - if (!phba->sli4_hba.intr_enable) - rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); - else { - mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); - rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); - } - /* The IOCTL status is embedded in the mailbox subheader. */ - shdr_status = bf_get(lpfc_mbox_hdr_status, - &del_fcf_record->header.cfg_shdr.response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, - &del_fcf_record->header.cfg_shdr.response); - if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2516 DEL FCF of default FCF Index failed " - "mbx status x%x, status x%x add_status x%x\n", - rc, shdr_status, shdr_add_status); - } - if (rc != MBX_TIMEOUT) - mempool_free(mboxq, phba->mbox_mem_pool); -} - /** * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer * @phba: Pointer to hba for which this call is being executed. @@ -4288,12 +4229,6 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) { struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; - /* unregister default FCFI from the HBA */ - lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi); - - /* Free the default FCR table */ - lpfc_sli_remove_dflt_fcf(phba); - /* Free memory allocated for msi-x interrupt vector entries */ kfree(phba->sli4_hba.msix_entries); @@ -4321,9 +4256,6 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) lpfc_sli4_cq_event_release_all(phba); lpfc_sli4_cq_event_pool_destroy(phba); - /* Reset SLI4 HBA FCoE function */ - lpfc_pci_function_reset(phba); - /* Free the bsmbx region. */ lpfc_destroy_bootstrap_mbox(phba); @@ -4550,7 +4482,6 @@ lpfc_free_sgl_list(struct lpfc_hba *phba) { struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; LIST_HEAD(sglq_list); - int rc = 0; spin_lock_irq(&phba->hbalock); list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); @@ -4563,11 +4494,6 @@ lpfc_free_sgl_list(struct lpfc_hba *phba) kfree(sglq_entry); phba->sli4_hba.total_sglq_bufs--; } - rc = lpfc_sli4_remove_all_sgl_pages(phba); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2005 Unable to deregister pages from HBA: %x\n", rc); - } kfree(phba->sli4_hba.lpfc_els_sgl_array); } @@ -6596,50 +6522,6 @@ lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) return cmdsent; } -/** - * lpfc_sli4_fcfi_unreg - Unregister fcfi to device - * @phba: pointer to lpfc hba data structure. - * @fcfi: fcf index. - * - * This routine is invoked to unregister a FCFI from device. - **/ -void -lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi) -{ - LPFC_MBOXQ_t *mbox; - uint32_t mbox_tmo; - int rc; - unsigned long flags; - - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - - if (!mbox) - return; - - lpfc_unreg_fcfi(mbox, fcfi); - - if (!phba->sli4_hba.intr_enable) - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); - else { - mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); - rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); - } - if (rc != MBX_TIMEOUT) - mempool_free(mbox, phba->mbox_mem_pool); - if (rc != MBX_SUCCESS) - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2517 Unregister FCFI command failed " - "status %d, mbxStatus x%x\n", rc, - bf_get(lpfc_mqe_status, &mbox->u.mqe)); - else { - spin_lock_irqsave(&phba->hbalock, flags); - /* Mark the FCFI is no longer registered */ - phba->fcf.fcf_flag &= - ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_SCAN_DONE); - spin_unlock_irqrestore(&phba->hbalock, flags); - } -} - /** * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. * @phba: pointer to lpfc hba data structure. @@ -7377,10 +7259,14 @@ lpfc_sli4_unset_hba(struct lpfc_hba *phba) phba->pport->work_port_events = 0; - lpfc_sli4_hba_down(phba); + /* Stop the SLI4 device port */ + lpfc_stop_port(phba); lpfc_sli4_disable_intr(phba); + /* Reset SLI4 HBA FCoE function */ + lpfc_pci_function_reset(phba); + return; } @@ -7429,15 +7315,15 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba) spin_unlock_irq(&phba->hbalock); } - /* Tear down the queues in the HBA */ - lpfc_sli4_queue_unset(phba); - /* Disable PCI subsystem interrupt */ lpfc_sli4_disable_intr(phba); /* Stop kthread signal shall trigger work_done one more time */ kthread_stop(phba->worker_thread); + /* Reset SLI4 HBA FCoE function */ + lpfc_pci_function_reset(phba); + /* Stop the SLI4 device port */ phba->pport->work_port_events = 0; } @@ -8373,7 +8259,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev) list_del_init(&vport->listentry); spin_unlock_irq(&phba->hbalock); - /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi + /* Perform scsi free before driver resource_unset since scsi * buffers are released to their corresponding pools here. */ lpfc_scsi_free(phba); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 34dd87f542c2..0d1e187b005d 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -6929,37 +6929,6 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) return 1; } -/** - * lpfc_sli4_hba_down - PCI function resource cleanup for the SLI4 HBA - * @phba: Pointer to HBA context object. - * - * This function cleans up all queues, iocb, buffers, mailbox commands while - * shutting down the SLI4 HBA FCoE function. This function is called with no - * lock held and always returns 1. - * - * This function does the following to cleanup driver FCoE function resources: - * - Free discovery resources for each virtual port - * - Cleanup any pending fabric iocbs - * - Iterate through the iocb txq and free each entry in the list. - * - Free up any buffer posted to the HBA. - * - Clean up all the queue entries: WQ, RQ, MQ, EQ, CQ, etc. - * - Free mailbox commands in the mailbox queue. - **/ -int -lpfc_sli4_hba_down(struct lpfc_hba *phba) -{ - /* Stop the SLI4 device port */ - lpfc_stop_port(phba); - - /* Tear down the queues in the HBA */ - lpfc_sli4_queue_unset(phba); - - /* unregister default FCFI from the HBA */ - lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi); - - return 1; -} - /** * lpfc_sli_pcimem_bcopy - SLI memory copy function * @srcp: Source memory pointer. @@ -10788,51 +10757,6 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba, } return 0; } -/** - * lpfc_sli4_remove_all_sgl_pages - Post scatter gather list for an XRI to HBA - * @phba: The virtual port for which this call being executed. - * - * This routine will remove all of the sgl pages registered with the hba. - * - * Return codes: - * 0 - Success - * -ENXIO, -ENOMEM - Failure - **/ -int -lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *phba) -{ - LPFC_MBOXQ_t *mbox; - int rc; - uint32_t shdr_status, shdr_add_status; - union lpfc_sli4_cfg_shdr *shdr; - - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) - return -ENOMEM; - - lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, - LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES, 0, - LPFC_SLI4_MBX_EMBED); - if (!phba->sli4_hba.intr_enable) - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); - else - rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); - /* The IOCTL status is embedded in the mailbox subheader. */ - shdr = (union lpfc_sli4_cfg_shdr *) - &mbox->u.mqe.un.sli4_config.header.cfg_shdr; - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); - if (rc != MBX_TIMEOUT) - mempool_free(mbox, phba->mbox_mem_pool); - if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2512 REMOVE_ALL_SGL_PAGES mailbox failed with " - "status x%x add_status x%x, mbx status x%x\n", - shdr_status, shdr_add_status, rc); - rc = -ENXIO; - } - return rc; -} /** * lpfc_sli4_next_xritag - Get an xritag for the io diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index a3b24d99a2a7..a0ca572ec28b 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -481,7 +481,6 @@ struct lpfc_rpi_hdr { */ int lpfc_pci_function_reset(struct lpfc_hba *); int lpfc_sli4_hba_setup(struct lpfc_hba *); -int lpfc_sli4_hba_down(struct lpfc_hba *); int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t, uint8_t, uint32_t, bool); void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *); @@ -514,7 +513,6 @@ int lpfc_sli4_queue_setup(struct lpfc_hba *); void lpfc_sli4_queue_unset(struct lpfc_hba *); int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t); int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *); -int lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *); uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *); int lpfc_sli4_post_async_mbox(struct lpfc_hba *); int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba); -- cgit v1.2.3 From 969c9165581fbb55cab0f500f555a69279cecb57 Mon Sep 17 00:00:00 2001 From: James Smart Date: Wed, 29 Sep 2010 11:19:22 -0400 Subject: [SCSI] lpfc 8.3.17: Update lpfc driver version to 8.3.17 Update lpfc driver version to 8.3.17 Signed-off-by: Alex Iannicelli Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 61afb3420a96..f93120e4c796 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.16" +#define LPFC_DRIVER_VERSION "8.3.17" #define LPFC_DRIVER_NAME "lpfc" #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" -- cgit v1.2.3 From 39a985547cbfcbb0b23667b69b8ae82a6cf312ac Mon Sep 17 00:00:00 2001 From: bo yang Date: Wed, 22 Sep 2010 22:36:29 -0400 Subject: [SCSI] megaraid_sas: Add Online Controller Reset to MegaRAID SAS drive To add the Online controller reset support, driver need to do: a). reset the controller chips -- Xscale and Gen2 which will change the function calls and add the reset function related to this two chips. b). during the reset, driver will store the pending cmds which not returned by FW to driver's pending queue. Driver will re-issue those pending cmds again to FW after the OCR finished. c). In driver's timeout routine, driver will report to OS as reset. Also driver's queue routine will block the cmds until the OCR finished. d). in Driver's ISR routine, if driver get the FW state as state change, FW in Failure status and FW support online controller reset (OCR), driver will start to do the controller reset. e). In driver's IOCTL routine, the application cmds will wait for the OCR to finish, then issue the cmds to FW. Signed-off-by Bo Yang Signed-off-by: James Bottomley --- drivers/scsi/megaraid/megaraid_sas.c | 756 ++++++++++++++++++++++++++++++++--- drivers/scsi/megaraid/megaraid_sas.h | 88 +++- 2 files changed, 787 insertions(+), 57 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index 99e4478c3f3e..55951f4d3260 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c @@ -62,6 +62,11 @@ MODULE_VERSION(MEGASAS_VERSION); MODULE_AUTHOR("megaraidlinux@lsi.com"); MODULE_DESCRIPTION("LSI MegaRAID SAS Driver"); +static int megasas_transition_to_ready(struct megasas_instance *instance); +static int megasas_get_pd_list(struct megasas_instance *instance); +static int megasas_issue_init_mfi(struct megasas_instance *instance); +static int megasas_register_aen(struct megasas_instance *instance, + u32 seq_num, u32 class_locale_word); /* * PCI ID table for all supported controllers */ @@ -164,7 +169,7 @@ megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) static inline void megasas_enable_intr_xscale(struct megasas_register_set __iomem * regs) { - writel(1, &(regs)->outbound_intr_mask); + writel(0, &(regs)->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(®s->outbound_intr_mask); @@ -200,24 +205,27 @@ static int megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs) { u32 status; + u32 mfiStatus = 0; /* * Check if it is our interrupt */ status = readl(®s->outbound_intr_status); - if (!(status & MFI_OB_INTR_STATUS_MASK)) { - return 1; - } + if (status & MFI_OB_INTR_STATUS_MASK) + mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; + if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT) + mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; /* * Clear the interrupt by writing back the same value */ - writel(status, ®s->outbound_intr_status); + if (mfiStatus) + writel(status, ®s->outbound_intr_status); /* Dummy readl to force pci flush */ readl(®s->outbound_intr_status); - return 0; + return mfiStatus; } /** @@ -232,8 +240,69 @@ megasas_fire_cmd_xscale(struct megasas_instance *instance, u32 frame_count, struct megasas_register_set __iomem *regs) { + unsigned long flags; + spin_lock_irqsave(&instance->hba_lock, flags); writel((frame_phys_addr >> 3)|(frame_count), &(regs)->inbound_queue_port); + spin_unlock_irqrestore(&instance->hba_lock, flags); +} + +/** + * megasas_adp_reset_xscale - For controller reset + * @regs: MFI register set + */ +static int +megasas_adp_reset_xscale(struct megasas_instance *instance, + struct megasas_register_set __iomem *regs) +{ + u32 i; + u32 pcidata; + writel(MFI_ADP_RESET, ®s->inbound_doorbell); + + for (i = 0; i < 3; i++) + msleep(1000); /* sleep for 3 secs */ + pcidata = 0; + pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata); + printk(KERN_NOTICE "pcidata = %x\n", pcidata); + if (pcidata & 0x2) { + printk(KERN_NOTICE "mfi 1068 offset read=%x\n", pcidata); + pcidata &= ~0x2; + pci_write_config_dword(instance->pdev, + MFI_1068_PCSR_OFFSET, pcidata); + + for (i = 0; i < 2; i++) + msleep(1000); /* need to wait 2 secs again */ + + pcidata = 0; + pci_read_config_dword(instance->pdev, + MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata); + printk(KERN_NOTICE "1068 offset handshake read=%x\n", pcidata); + if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) { + printk(KERN_NOTICE "1068 offset pcidt=%x\n", pcidata); + pcidata = 0; + pci_write_config_dword(instance->pdev, + MFI_1068_FW_HANDSHAKE_OFFSET, pcidata); + } + } + return 0; +} + +/** + * megasas_check_reset_xscale - For controller reset check + * @regs: MFI register set + */ +static int +megasas_check_reset_xscale(struct megasas_instance *instance, + struct megasas_register_set __iomem *regs) +{ + u32 consumer; + consumer = *instance->consumer; + + if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) && + (*instance->consumer == MEGASAS_ADPRESET_INPROG_SIGN)) { + return 1; + } + return 0; } static struct megasas_instance_template megasas_instance_template_xscale = { @@ -243,6 +312,8 @@ static struct megasas_instance_template megasas_instance_template_xscale = { .disable_intr = megasas_disable_intr_xscale, .clear_intr = megasas_clear_intr_xscale, .read_fw_status_reg = megasas_read_fw_status_reg_xscale, + .adp_reset = megasas_adp_reset_xscale, + .check_reset = megasas_check_reset_xscale, }; /** @@ -264,7 +335,7 @@ megasas_enable_intr_ppc(struct megasas_register_set __iomem * regs) { writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); - writel(~0x80000004, &(regs)->outbound_intr_mask); + writel(~0x80000000, &(regs)->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(®s->outbound_intr_mask); @@ -307,7 +378,7 @@ megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs) status = readl(®s->outbound_intr_status); if (!(status & MFI_REPLY_1078_MESSAGE_INTERRUPT)) { - return 1; + return 0; } /* @@ -318,7 +389,7 @@ megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs) /* Dummy readl to force pci flush */ readl(®s->outbound_doorbell_clear); - return 0; + return 1; } /** * megasas_fire_cmd_ppc - Sends command to the FW @@ -332,10 +403,34 @@ megasas_fire_cmd_ppc(struct megasas_instance *instance, u32 frame_count, struct megasas_register_set __iomem *regs) { + unsigned long flags; + spin_lock_irqsave(&instance->hba_lock, flags); writel((frame_phys_addr | (frame_count<<1))|1, &(regs)->inbound_queue_port); + spin_unlock_irqrestore(&instance->hba_lock, flags); +} + +/** + * megasas_adp_reset_ppc - For controller reset + * @regs: MFI register set + */ +static int +megasas_adp_reset_ppc(struct megasas_instance *instance, + struct megasas_register_set __iomem *regs) +{ + return 0; } +/** + * megasas_check_reset_ppc - For controller reset check + * @regs: MFI register set + */ +static int +megasas_check_reset_ppc(struct megasas_instance *instance, + struct megasas_register_set __iomem *regs) +{ + return 0; +} static struct megasas_instance_template megasas_instance_template_ppc = { .fire_cmd = megasas_fire_cmd_ppc, @@ -343,6 +438,8 @@ static struct megasas_instance_template megasas_instance_template_ppc = { .disable_intr = megasas_disable_intr_ppc, .clear_intr = megasas_clear_intr_ppc, .read_fw_status_reg = megasas_read_fw_status_reg_ppc, + .adp_reset = megasas_adp_reset_ppc, + .check_reset = megasas_check_reset_ppc, }; /** @@ -397,7 +494,7 @@ megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs) status = readl(®s->outbound_intr_status); if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) { - return 1; + return 0; } /* @@ -410,7 +507,7 @@ megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs) */ readl(®s->outbound_intr_status); - return 0; + return 1; } /** @@ -426,11 +523,33 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { unsigned long flags; - spin_lock_irqsave(&instance->fire_lock, flags); + spin_lock_irqsave(&instance->hba_lock, flags); writel(0, &(regs)->inbound_high_queue_port); writel((frame_phys_addr | (frame_count<<1))|1, &(regs)->inbound_low_queue_port); - spin_unlock_irqrestore(&instance->fire_lock, flags); + spin_unlock_irqrestore(&instance->hba_lock, flags); +} + +/** + * megasas_adp_reset_skinny - For controller reset + * @regs: MFI register set + */ +static int +megasas_adp_reset_skinny(struct megasas_instance *instance, + struct megasas_register_set __iomem *regs) +{ + return 0; +} + +/** + * megasas_check_reset_skinny - For controller reset check + * @regs: MFI register set + */ +static int +megasas_check_reset_skinny(struct megasas_instance *instance, + struct megasas_register_set __iomem *regs) +{ + return 0; } static struct megasas_instance_template megasas_instance_template_skinny = { @@ -440,6 +559,8 @@ static struct megasas_instance_template megasas_instance_template_skinny = { .disable_intr = megasas_disable_intr_skinny, .clear_intr = megasas_clear_intr_skinny, .read_fw_status_reg = megasas_read_fw_status_reg_skinny, + .adp_reset = megasas_adp_reset_skinny, + .check_reset = megasas_check_reset_skinny, }; @@ -495,23 +616,29 @@ static int megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs) { u32 status; + u32 mfiStatus = 0; /* * Check if it is our interrupt */ status = readl(®s->outbound_intr_status); - if (!(status & MFI_GEN2_ENABLE_INTERRUPT_MASK)) - return 1; + if (status & MFI_GEN2_ENABLE_INTERRUPT_MASK) { + mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; + } + if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) { + mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; + } /* * Clear the interrupt by writing back the same value */ - writel(status, ®s->outbound_doorbell_clear); + if (mfiStatus) + writel(status, ®s->outbound_doorbell_clear); /* Dummy readl to force pci flush */ readl(®s->outbound_intr_status); - return 0; + return mfiStatus; } /** * megasas_fire_cmd_gen2 - Sends command to the FW @@ -525,8 +652,74 @@ megasas_fire_cmd_gen2(struct megasas_instance *instance, u32 frame_count, struct megasas_register_set __iomem *regs) { + unsigned long flags; + spin_lock_irqsave(&instance->hba_lock, flags); writel((frame_phys_addr | (frame_count<<1))|1, &(regs)->inbound_queue_port); + spin_unlock_irqrestore(&instance->hba_lock, flags); +} + +/** + * megasas_adp_reset_gen2 - For controller reset + * @regs: MFI register set + */ +static int +megasas_adp_reset_gen2(struct megasas_instance *instance, + struct megasas_register_set __iomem *reg_set) +{ + u32 retry = 0 ; + u32 HostDiag; + + writel(0, ®_set->seq_offset); + writel(4, ®_set->seq_offset); + writel(0xb, ®_set->seq_offset); + writel(2, ®_set->seq_offset); + writel(7, ®_set->seq_offset); + writel(0xd, ®_set->seq_offset); + msleep(1000); + + HostDiag = (u32)readl(®_set->host_diag); + + while ( !( HostDiag & DIAG_WRITE_ENABLE) ) { + msleep(100); + HostDiag = (u32)readl(®_set->host_diag); + printk(KERN_NOTICE "RESETGEN2: retry=%x, hostdiag=%x\n", + retry, HostDiag); + + if (retry++ >= 100) + return 1; + + } + + printk(KERN_NOTICE "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag); + + writel((HostDiag | DIAG_RESET_ADAPTER), ®_set->host_diag); + + ssleep(10); + + HostDiag = (u32)readl(®_set->host_diag); + while ( ( HostDiag & DIAG_RESET_ADAPTER) ) { + msleep(100); + HostDiag = (u32)readl(®_set->host_diag); + printk(KERN_NOTICE "RESET_GEN2: retry=%x, hostdiag=%x\n", + retry, HostDiag); + + if (retry++ >= 1000) + return 1; + + } + return 0; +} + +/** + * megasas_check_reset_gen2 - For controller reset check + * @regs: MFI register set + */ +static int +megasas_check_reset_gen2(struct megasas_instance *instance, + struct megasas_register_set __iomem *regs) +{ + return 0; } static struct megasas_instance_template megasas_instance_template_gen2 = { @@ -536,11 +729,13 @@ static struct megasas_instance_template megasas_instance_template_gen2 = { .disable_intr = megasas_disable_intr_gen2, .clear_intr = megasas_clear_intr_gen2, .read_fw_status_reg = megasas_read_fw_status_reg_gen2, + .adp_reset = megasas_adp_reset_gen2, + .check_reset = megasas_check_reset_gen2, }; /** * This is the end of set of functions & definitions -* specific to ppc (deviceid : 0x60) controllers +* specific to gen2 (deviceid : 0x78, 0x79) controllers */ /** @@ -599,8 +794,7 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance, instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 0, instance->reg_set); - wait_event_timeout(instance->int_cmd_wait_q, (cmd->cmd_status != ENODATA), - MEGASAS_INTERNAL_CMD_WAIT_TIME*HZ); + wait_event(instance->int_cmd_wait_q, cmd->cmd_status != ENODATA); return 0; } @@ -648,8 +842,8 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, /* * Wait for this cmd to complete */ - wait_event_timeout(instance->abort_cmd_wait_q, (cmd->cmd_status != 0xFF), - MEGASAS_INTERNAL_CMD_WAIT_TIME*HZ); + wait_event(instance->abort_cmd_wait_q, cmd->cmd_status != 0xFF); + cmd->sync_cmd = 0; megasas_return_cmd(instance, cmd); return 0; @@ -1131,14 +1325,22 @@ megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *)) u32 frame_count; struct megasas_cmd *cmd; struct megasas_instance *instance; + unsigned long flags; instance = (struct megasas_instance *) scmd->device->host->hostdata; - /* Don't process if we have already declared adapter dead */ - if (instance->hw_crit_error) + if (instance->issuepend_done == 0) return SCSI_MLQUEUE_HOST_BUSY; + spin_lock_irqsave(&instance->hba_lock, flags); + if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) { + spin_unlock_irqrestore(&instance->hba_lock, flags); + return SCSI_MLQUEUE_HOST_BUSY; + } + + spin_unlock_irqrestore(&instance->hba_lock, flags); + scmd->scsi_done = done; scmd->result = 0; @@ -1274,6 +1476,18 @@ static int megasas_slave_alloc(struct scsi_device *sdev) return 0; } +static void megaraid_sas_kill_hba(struct megasas_instance *instance) +{ + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { + writel(MFI_STOP_ADP, + &instance->reg_set->reserved_0[0]); + } else { + writel(MFI_STOP_ADP, + &instance->reg_set->inbound_doorbell); + } +} + /** * megasas_complete_cmd_dpc - Returns FW's controller structure * @instance_addr: Address of adapter soft state @@ -1291,7 +1505,7 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr) unsigned long flags; /* If we have already declared adapter dead, donot complete cmds */ - if (instance->hw_crit_error) + if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR ) return; spin_lock_irqsave(&instance->completion_lock, flags); @@ -1301,6 +1515,11 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr) while (consumer != producer) { context = instance->reply_queue[consumer]; + if (context >= instance->max_fw_cmds) { + printk(KERN_ERR "Unexpected context value %x\n", + context); + BUG(); + } cmd = instance->cmd_list[context]; @@ -1350,7 +1569,76 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr) static int megasas_wait_for_outstanding(struct megasas_instance *instance) { int i; + u32 reset_index; u32 wait_time = MEGASAS_RESET_WAIT_TIME; + u8 adprecovery; + unsigned long flags; + struct list_head clist_local; + struct megasas_cmd *reset_cmd; + + spin_lock_irqsave(&instance->hba_lock, flags); + adprecovery = instance->adprecovery; + spin_unlock_irqrestore(&instance->hba_lock, flags); + + if (adprecovery != MEGASAS_HBA_OPERATIONAL) { + + INIT_LIST_HEAD(&clist_local); + spin_lock_irqsave(&instance->hba_lock, flags); + list_splice_init(&instance->internal_reset_pending_q, + &clist_local); + spin_unlock_irqrestore(&instance->hba_lock, flags); + + printk(KERN_NOTICE "megasas: HBA reset wait ...\n"); + for (i = 0; i < wait_time; i++) { + msleep(1000); + spin_lock_irqsave(&instance->hba_lock, flags); + adprecovery = instance->adprecovery; + spin_unlock_irqrestore(&instance->hba_lock, flags); + if (adprecovery == MEGASAS_HBA_OPERATIONAL) + break; + } + + if (adprecovery != MEGASAS_HBA_OPERATIONAL) { + printk(KERN_NOTICE "megasas: reset: Stopping HBA.\n"); + spin_lock_irqsave(&instance->hba_lock, flags); + instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; + spin_unlock_irqrestore(&instance->hba_lock, flags); + return FAILED; + } + + reset_index = 0; + while (!list_empty(&clist_local)) { + reset_cmd = list_entry((&clist_local)->next, + struct megasas_cmd, list); + list_del_init(&reset_cmd->list); + if (reset_cmd->scmd) { + reset_cmd->scmd->result = DID_RESET << 16; + printk(KERN_NOTICE "%d:%p reset [%02x], %#lx\n", + reset_index, reset_cmd, + reset_cmd->scmd->cmnd[0], + reset_cmd->scmd->serial_number); + + reset_cmd->scmd->scsi_done(reset_cmd->scmd); + megasas_return_cmd(instance, reset_cmd); + } else if (reset_cmd->sync_cmd) { + printk(KERN_NOTICE "megasas:%p synch cmds" + "reset queue\n", + reset_cmd); + + reset_cmd->cmd_status = ENODATA; + instance->instancet->fire_cmd(instance, + reset_cmd->frame_phys_addr, + 0, instance->reg_set); + } else { + printk(KERN_NOTICE "megasas: %p unexpected" + "cmds lst\n", + reset_cmd); + } + reset_index++; + } + + return SUCCESS; + } for (i = 0; i < wait_time; i++) { @@ -1373,6 +1661,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance) } if (atomic_read(&instance->fw_outstanding)) { + printk(KERN_NOTICE "megaraid_sas: pending cmds after reset\n"); /* * Send signal to FW to stop processing any pending cmds. * The controller will be taken offline by the OS now. @@ -1388,10 +1677,14 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance) &instance->reg_set->inbound_doorbell); } megasas_dump_pending_frames(instance); - instance->hw_crit_error = 1; + spin_lock_irqsave(&instance->hba_lock, flags); + instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; + spin_unlock_irqrestore(&instance->hba_lock, flags); return FAILED; } + printk(KERN_NOTICE "megaraid_sas: no pending cmds after reset\n"); + return SUCCESS; } @@ -1413,7 +1706,7 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd) scmd_printk(KERN_NOTICE, scmd, "megasas: RESET -%ld cmd=%x retries=%x\n", scmd->serial_number, scmd->cmnd[0], scmd->retries); - if (instance->hw_crit_error) { + if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { printk(KERN_ERR "megasas: cannot recover from previous reset " "failures\n"); return FAILED; @@ -1568,7 +1861,8 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) instance->aen_cmd = NULL; megasas_return_cmd(instance, cmd); - if (instance->unload == 0) { + if ((instance->unload == 0) && + ((instance->issuepend_done == 1))) { struct megasas_aen_event *ev; ev = kzalloc(sizeof(*ev), GFP_ATOMIC); if (!ev) { @@ -1663,6 +1957,9 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, struct megasas_header *hdr = &cmd->frame->hdr; unsigned long flags; + /* flag for the retry reset */ + cmd->retry_for_fw_reset = 0; + if (cmd->scmd) cmd->scmd->SCp.ptr = NULL; @@ -1782,40 +2079,302 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, } } +/** + * megasas_issue_pending_cmds_again - issue all pending cmds + * in FW again because of the fw reset + * @instance: Adapter soft state + */ +static inline void +megasas_issue_pending_cmds_again(struct megasas_instance *instance) +{ + struct megasas_cmd *cmd; + struct list_head clist_local; + union megasas_evt_class_locale class_locale; + unsigned long flags; + u32 seq_num; + + INIT_LIST_HEAD(&clist_local); + spin_lock_irqsave(&instance->hba_lock, flags); + list_splice_init(&instance->internal_reset_pending_q, &clist_local); + spin_unlock_irqrestore(&instance->hba_lock, flags); + + while (!list_empty(&clist_local)) { + cmd = list_entry((&clist_local)->next, + struct megasas_cmd, list); + list_del_init(&cmd->list); + + if (cmd->sync_cmd || cmd->scmd) { + printk(KERN_NOTICE "megaraid_sas: command %p, %p:%d" + "detected to be pending while HBA reset.\n", + cmd, cmd->scmd, cmd->sync_cmd); + + cmd->retry_for_fw_reset++; + + if (cmd->retry_for_fw_reset == 3) { + printk(KERN_NOTICE "megaraid_sas: cmd %p, %p:%d" + "was tried multiple times during reset." + "Shutting down the HBA\n", + cmd, cmd->scmd, cmd->sync_cmd); + megaraid_sas_kill_hba(instance); + + instance->adprecovery = + MEGASAS_HW_CRITICAL_ERROR; + return; + } + } + + if (cmd->sync_cmd == 1) { + if (cmd->scmd) { + printk(KERN_NOTICE "megaraid_sas: unexpected" + "cmd attached to internal command!\n"); + } + printk(KERN_NOTICE "megasas: %p synchronous cmd" + "on the internal reset queue," + "issue it again.\n", cmd); + cmd->cmd_status = ENODATA; + instance->instancet->fire_cmd(instance, + cmd->frame_phys_addr , + 0, instance->reg_set); + } else if (cmd->scmd) { + printk(KERN_NOTICE "megasas: %p scsi cmd [%02x],%#lx" + "detected on the internal queue, issue again.\n", + cmd, cmd->scmd->cmnd[0], cmd->scmd->serial_number); + + atomic_inc(&instance->fw_outstanding); + instance->instancet->fire_cmd(instance, + cmd->frame_phys_addr, + cmd->frame_count-1, instance->reg_set); + } else { + printk(KERN_NOTICE "megasas: %p unexpected cmd on the" + "internal reset defer list while re-issue!!\n", + cmd); + } + } + + if (instance->aen_cmd) { + printk(KERN_NOTICE "megaraid_sas: aen_cmd in def process\n"); + megasas_return_cmd(instance, instance->aen_cmd); + + instance->aen_cmd = NULL; + } + + /* + * Initiate AEN (Asynchronous Event Notification) + */ + seq_num = instance->last_seq_num; + class_locale.members.reserved = 0; + class_locale.members.locale = MR_EVT_LOCALE_ALL; + class_locale.members.class = MR_EVT_CLASS_DEBUG; + + megasas_register_aen(instance, seq_num, class_locale.word); +} + +/** + * Move the internal reset pending commands to a deferred queue. + * + * We move the commands pending at internal reset time to a + * pending queue. This queue would be flushed after successful + * completion of the internal reset sequence. if the internal reset + * did not complete in time, the kernel reset handler would flush + * these commands. + **/ +static void +megasas_internal_reset_defer_cmds(struct megasas_instance *instance) +{ + struct megasas_cmd *cmd; + int i; + u32 max_cmd = instance->max_fw_cmds; + u32 defer_index; + unsigned long flags; + + defer_index = 0; + spin_lock_irqsave(&instance->cmd_pool_lock, flags); + for (i = 0; i < max_cmd; i++) { + cmd = instance->cmd_list[i]; + if (cmd->sync_cmd == 1 || cmd->scmd) { + printk(KERN_NOTICE "megasas: moving cmd[%d]:%p:%d:%p" + "on the defer queue as internal\n", + defer_index, cmd, cmd->sync_cmd, cmd->scmd); + + if (!list_empty(&cmd->list)) { + printk(KERN_NOTICE "megaraid_sas: ERROR while" + " moving this cmd:%p, %d %p, it was" + "discovered on some list?\n", + cmd, cmd->sync_cmd, cmd->scmd); + + list_del_init(&cmd->list); + } + defer_index++; + list_add_tail(&cmd->list, + &instance->internal_reset_pending_q); + } + } + spin_unlock_irqrestore(&instance->cmd_pool_lock, flags); +} + + +static void +process_fw_state_change_wq(struct work_struct *work) +{ + struct megasas_instance *instance = + container_of(work, struct megasas_instance, work_init); + u32 wait; + unsigned long flags; + + if (instance->adprecovery != MEGASAS_ADPRESET_SM_INFAULT) { + printk(KERN_NOTICE "megaraid_sas: error, recovery st %x \n", + instance->adprecovery); + return ; + } + + if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) { + printk(KERN_NOTICE "megaraid_sas: FW detected to be in fault" + "state, restarting it...\n"); + + instance->instancet->disable_intr(instance->reg_set); + atomic_set(&instance->fw_outstanding, 0); + + atomic_set(&instance->fw_reset_no_pci_access, 1); + instance->instancet->adp_reset(instance, instance->reg_set); + atomic_set(&instance->fw_reset_no_pci_access, 0 ); + + printk(KERN_NOTICE "megaraid_sas: FW restarted successfully," + "initiating next stage...\n"); + + printk(KERN_NOTICE "megaraid_sas: HBA recovery state machine," + "state 2 starting...\n"); + + /*waitting for about 20 second before start the second init*/ + for (wait = 0; wait < 30; wait++) { + msleep(1000); + } + + if (megasas_transition_to_ready(instance)) { + printk(KERN_NOTICE "megaraid_sas:adapter not ready\n"); + + megaraid_sas_kill_hba(instance); + instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; + return ; + } + + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || + (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR) + ) { + *instance->consumer = *instance->producer; + } else { + *instance->consumer = 0; + *instance->producer = 0; + } + + megasas_issue_init_mfi(instance); + + spin_lock_irqsave(&instance->hba_lock, flags); + instance->adprecovery = MEGASAS_HBA_OPERATIONAL; + spin_unlock_irqrestore(&instance->hba_lock, flags); + instance->instancet->enable_intr(instance->reg_set); + + megasas_issue_pending_cmds_again(instance); + instance->issuepend_done = 1; + } + return ; +} + /** * megasas_deplete_reply_queue - Processes all completed commands * @instance: Adapter soft state * @alt_status: Alternate status to be returned to * SCSI mid-layer instead of the status * returned by the FW + * Note: this must be called with hba lock held */ static int -megasas_deplete_reply_queue(struct megasas_instance *instance, u8 alt_status) +megasas_deplete_reply_queue(struct megasas_instance *instance, + u8 alt_status) { - /* - * Check if it is our interrupt - * Clear the interrupt - */ - if(instance->instancet->clear_intr(instance->reg_set)) + u32 mfiStatus; + u32 fw_state; + + if ((mfiStatus = instance->instancet->check_reset(instance, + instance->reg_set)) == 1) { + return IRQ_HANDLED; + } + + if ((mfiStatus = instance->instancet->clear_intr( + instance->reg_set) + ) == 0) { return IRQ_NONE; + } + + instance->mfiStatus = mfiStatus; + + if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) { + fw_state = instance->instancet->read_fw_status_reg( + instance->reg_set) & MFI_STATE_MASK; + + if (fw_state != MFI_STATE_FAULT) { + printk(KERN_NOTICE "megaraid_sas: fw state:%x\n", + fw_state); + } + + if ((fw_state == MFI_STATE_FAULT) && + (instance->disableOnlineCtrlReset == 0)) { + printk(KERN_NOTICE "megaraid_sas: wait adp restart\n"); + + if ((instance->pdev->device == + PCI_DEVICE_ID_LSI_SAS1064R) || + (instance->pdev->device == + PCI_DEVICE_ID_DELL_PERC5) || + (instance->pdev->device == + PCI_DEVICE_ID_LSI_VERDE_ZCR)) { + + *instance->consumer = + MEGASAS_ADPRESET_INPROG_SIGN; + } + + + instance->instancet->disable_intr(instance->reg_set); + instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; + instance->issuepend_done = 0; + + atomic_set(&instance->fw_outstanding, 0); + megasas_internal_reset_defer_cmds(instance); + + printk(KERN_NOTICE "megasas: fwState=%x, stage:%d\n", + fw_state, instance->adprecovery); + + schedule_work(&instance->work_init); + return IRQ_HANDLED; + + } else { + printk(KERN_NOTICE "megasas: fwstate:%x, dis_OCR=%x\n", + fw_state, instance->disableOnlineCtrlReset); + } + } - if (instance->hw_crit_error) - goto out_done; - /* - * Schedule the tasklet for cmd completion - */ tasklet_schedule(&instance->isr_tasklet); -out_done: return IRQ_HANDLED; } - /** * megasas_isr - isr entry point */ static irqreturn_t megasas_isr(int irq, void *devp) { - return megasas_deplete_reply_queue((struct megasas_instance *)devp, - DID_OK); + struct megasas_instance *instance; + unsigned long flags; + irqreturn_t rc; + + if (atomic_read( + &(((struct megasas_instance *)devp)->fw_reset_no_pci_access))) + return IRQ_HANDLED; + + instance = (struct megasas_instance *)devp; + + spin_lock_irqsave(&instance->hba_lock, flags); + rc = megasas_deplete_reply_queue(instance, DID_OK); + spin_unlock_irqrestore(&instance->hba_lock, flags); + + return rc; } /** @@ -1972,7 +2531,7 @@ megasas_transition_to_ready(struct megasas_instance* instance) "in %d secs\n", fw_state, max_wait); return -ENODEV; } - }; + } printk(KERN_INFO "megasas: FW now in Ready state\n"); return 0; @@ -2054,6 +2613,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance) */ sgl_sz = sge_sz * instance->max_num_sge; frame_count = (sgl_sz + MEGAMFI_FRAME_SIZE - 1) / MEGAMFI_FRAME_SIZE; + frame_count = 15; /* * We need one extra frame for the MFI command @@ -2201,6 +2761,7 @@ static int megasas_alloc_cmds(struct megasas_instance *instance) cmd = instance->cmd_list[i]; memset(cmd, 0, sizeof(struct megasas_cmd)); cmd->index = i; + cmd->scmd = NULL; cmd->instance = instance; list_add_tail(&cmd->list, &instance->cmd_pool); @@ -2368,7 +2929,7 @@ megasas_get_ld_list(struct megasas_instance *instance) /* the following function will get the instance PD LIST */ - if ((ret == 0) && (ci->ldCount < MAX_LOGICAL_DRIVES)) { + if ((ret == 0) && (ci->ldCount <= MAX_LOGICAL_DRIVES)) { memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); for (ld_index = 0; ld_index < ci->ldCount; ld_index++) { @@ -2682,6 +3243,21 @@ static int megasas_init_mfi(struct megasas_instance *instance) if (megasas_issue_init_mfi(instance)) goto fail_fw_init; + instance->fw_support_ieee = 0; + instance->fw_support_ieee = + (instance->instancet->read_fw_status_reg(reg_set) & + 0x04000000); + + printk(KERN_NOTICE "megasas_init_mfi: fw_support_ieee=%d", + instance->fw_support_ieee); + + if (instance->fw_support_ieee) + instance->flag_ieee = 1; + + /** for passthrough + * the following function will get the PD LIST. + */ + memset(instance->pd_list, 0 , (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); megasas_get_pd_list(instance); @@ -2708,6 +3284,8 @@ static int megasas_init_mfi(struct megasas_instance *instance) max_sectors_2 = ctrl_info->max_request_size; tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2); + instance->disableOnlineCtrlReset = + ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; } instance->max_sectors_per_req = instance->max_num_sge * @@ -2929,6 +3507,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num, dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; + instance->last_seq_num = seq_num; dcmd->data_xfer_len = sizeof(struct megasas_evt_detail); dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; dcmd->mbox.w[0] = seq_num; @@ -3097,6 +3676,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) instance = (struct megasas_instance *)host->hostdata; memset(instance, 0, sizeof(*instance)); + atomic_set( &instance->fw_reset_no_pci_access, 0 ); instance->producer = pci_alloc_consistent(pdev, sizeof(u32), &instance->producer_h); @@ -3114,6 +3694,9 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) megasas_poll_wait_aen = 0; instance->flag_ieee = 0; instance->ev = NULL; + instance->issuepend_done = 1; + instance->adprecovery = MEGASAS_HBA_OPERATIONAL; + megasas_poll_wait_aen = 0; instance->evt_detail = pci_alloc_consistent(pdev, sizeof(struct @@ -3130,6 +3713,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) * Initialize locks and queues */ INIT_LIST_HEAD(&instance->cmd_pool); + INIT_LIST_HEAD(&instance->internal_reset_pending_q); atomic_set(&instance->fw_outstanding,0); @@ -3137,7 +3721,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) init_waitqueue_head(&instance->abort_cmd_wait_q); spin_lock_init(&instance->cmd_pool_lock); - spin_lock_init(&instance->fire_lock); + spin_lock_init(&instance->hba_lock); spin_lock_init(&instance->completion_lock); spin_lock_init(&poll_aen_lock); @@ -3162,6 +3746,9 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) instance->flag = 0; instance->unload = 1; instance->last_time = 0; + instance->disableOnlineCtrlReset = 1; + + INIT_WORK(&instance->work_init, process_fw_state_change_wq); /* * Initialize MFI Firmware @@ -3253,6 +3840,9 @@ static void megasas_flush_cache(struct megasas_instance *instance) struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; + if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) + return; + cmd = megasas_get_cmd(instance); if (!cmd) @@ -3290,6 +3880,9 @@ static void megasas_shutdown_controller(struct megasas_instance *instance, struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; + if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) + return; + cmd = megasas_get_cmd(instance); if (!cmd) @@ -3781,6 +4374,9 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) struct megasas_iocpacket *ioc; struct megasas_instance *instance; int error; + int i; + unsigned long flags; + u32 wait_time = MEGASAS_RESET_WAIT_TIME; ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); if (!ioc) @@ -3797,8 +4393,8 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) goto out_kfree_ioc; } - if (instance->hw_crit_error == 1) { - printk(KERN_DEBUG "Controller in Crit ERROR\n"); + if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { + printk(KERN_ERR "Controller in crit error\n"); error = -ENODEV; goto out_kfree_ioc; } @@ -3815,6 +4411,35 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) error = -ERESTARTSYS; goto out_kfree_ioc; } + + for (i = 0; i < wait_time; i++) { + + spin_lock_irqsave(&instance->hba_lock, flags); + if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) { + spin_unlock_irqrestore(&instance->hba_lock, flags); + break; + } + spin_unlock_irqrestore(&instance->hba_lock, flags); + + if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { + printk(KERN_NOTICE "megasas: waiting" + "for controller reset to finish\n"); + } + + msleep(1000); + } + + spin_lock_irqsave(&instance->hba_lock, flags); + if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) { + spin_unlock_irqrestore(&instance->hba_lock, flags); + + printk(KERN_ERR "megaraid_sas: timed out while" + "waiting for HBA to recover\n"); + error = -ENODEV; + goto out_kfree_ioc; + } + spin_unlock_irqrestore(&instance->hba_lock, flags); + error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); up(&instance->ioctl_sem); @@ -3828,6 +4453,9 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) struct megasas_instance *instance; struct megasas_aen aen; int error; + int i; + unsigned long flags; + u32 wait_time = MEGASAS_RESET_WAIT_TIME; if (file->private_data != file) { printk(KERN_DEBUG "megasas: fasync_helper was not " @@ -3843,14 +4471,42 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) if (!instance) return -ENODEV; - if (instance->hw_crit_error == 1) { - error = -ENODEV; + if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { + return -ENODEV; } if (instance->unload == 1) { return -ENODEV; } + for (i = 0; i < wait_time; i++) { + + spin_lock_irqsave(&instance->hba_lock, flags); + if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) { + spin_unlock_irqrestore(&instance->hba_lock, + flags); + break; + } + + spin_unlock_irqrestore(&instance->hba_lock, flags); + + if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { + printk(KERN_NOTICE "megasas: waiting for" + "controller reset to finish\n"); + } + + msleep(1000); + } + + spin_lock_irqsave(&instance->hba_lock, flags); + if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) { + spin_unlock_irqrestore(&instance->hba_lock, flags); + printk(KERN_ERR "megaraid_sas: timed out while waiting" + "for HBA to recover.\n"); + return -ENODEV; + } + spin_unlock_irqrestore(&instance->hba_lock, flags); + mutex_lock(&instance->aen_mutex); error = megasas_register_aen(instance, aen.seq_num, aen.class_locale_word); diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 9d8b6bf605aa..16a4f68a34b0 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -60,6 +60,7 @@ #define MFI_STATE_READY 0xB0000000 #define MFI_STATE_OPERATIONAL 0xC0000000 #define MFI_STATE_FAULT 0xF0000000 +#define MFI_RESET_REQUIRED 0x00000001 #define MEGAMFI_FRAME_SIZE 64 @@ -73,6 +74,12 @@ * HOTPLUG : Resume from Hotplug * MFI_STOP_ADP : Send signal to FW to stop processing */ +#define WRITE_SEQUENCE_OFFSET (0x0000000FC) /* I20 */ +#define HOST_DIAGNOSTIC_OFFSET (0x000000F8) /* I20 */ +#define DIAG_WRITE_ENABLE (0x00000080) +#define DIAG_RESET_ADAPTER (0x00000004) + +#define MFI_ADP_RESET 0x00000040 #define MFI_INIT_ABORT 0x00000001 #define MFI_INIT_READY 0x00000002 #define MFI_INIT_MFIMODE 0x00000004 @@ -402,8 +409,40 @@ struct megasas_ctrl_prop { u16 ecc_bucket_leak_rate; u8 restore_hotspare_on_insertion; u8 expose_encl_devices; - u8 reserved[38]; + u8 maintainPdFailHistory; + u8 disallowHostRequestReordering; + u8 abortCCOnError; + u8 loadBalanceMode; + u8 disableAutoDetectBackplane; + + u8 snapVDSpace; + + /* + * Add properties that can be controlled by + * a bit in the following structure. + */ + struct { + u32 copyBackDisabled : 1; + u32 SMARTerEnabled : 1; + u32 prCorrectUnconfiguredAreas : 1; + u32 useFdeOnly : 1; + u32 disableNCQ : 1; + u32 SSDSMARTerEnabled : 1; + u32 SSDPatrolReadEnabled : 1; + u32 enableSpinDownUnconfigured : 1; + u32 autoEnhancedImport : 1; + u32 enableSecretKeyControl : 1; + u32 disableOnlineCtrlReset : 1; + u32 allowBootWithPinnedCache : 1; + u32 disableSpinDownHS : 1; + u32 enableJBOD : 1; + u32 reserved :18; + } OnOffProperties; + u8 autoSnapVDSpace; + u8 viewSpace; + u16 spinDownTime; + u8 reserved[24]; } __packed; /* @@ -704,6 +743,12 @@ struct megasas_ctrl_info { */ #define IS_DMA64 (sizeof(dma_addr_t) == 8) +#define MFI_XSCALE_OMR0_CHANGE_INTERRUPT 0x00000001 + +#define MFI_INTR_FLAG_REPLY_MESSAGE 0x00000001 +#define MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE 0x00000002 +#define MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT 0x00000004 + #define MFI_OB_INTR_STATUS_MASK 0x00000002 #define MFI_POLL_TIMEOUT_SECS 60 #define MEGASAS_COMPLETION_TIMER_INTERVAL (HZ/10) @@ -714,6 +759,9 @@ struct megasas_ctrl_info { #define MFI_REPLY_SKINNY_MESSAGE_INTERRUPT 0x40000000 #define MFI_SKINNY_ENABLE_INTERRUPT_MASK (0x00000001) +#define MFI_1068_PCSR_OFFSET 0x84 +#define MFI_1068_FW_HANDSHAKE_OFFSET 0x64 +#define MFI_1068_FW_READY 0xDDDD0000 /* * register set for both 1068 and 1078 controllers * structure extended for 1078 registers @@ -755,8 +803,10 @@ struct megasas_register_set { u32 inbound_high_queue_port ; /*00C4h*/ u32 reserved_5; /*00C8h*/ - u32 index_registers[820]; /*00CCh*/ - + u32 res_6[11]; /*CCh*/ + u32 host_diag; + u32 seq_offset; + u32 index_registers[807]; /*00CCh*/ } __attribute__ ((packed)); struct megasas_sge32 { @@ -1226,11 +1276,12 @@ struct megasas_instance { struct megasas_cmd **cmd_list; struct list_head cmd_pool; + /* used to sync fire the cmd to fw */ spinlock_t cmd_pool_lock; + /* used to sync fire the cmd to fw */ + spinlock_t hba_lock; /* used to synch producer, consumer ptrs in dpc */ spinlock_t completion_lock; - /* used to sync fire the cmd to fw */ - spinlock_t fire_lock; struct dma_pool *frame_dma_pool; struct dma_pool *sense_dma_pool; @@ -1247,19 +1298,36 @@ struct megasas_instance { struct pci_dev *pdev; u32 unique_id; + u32 fw_support_ieee; atomic_t fw_outstanding; - u32 hw_crit_error; + atomic_t fw_reset_no_pci_access; struct megasas_instance_template *instancet; struct tasklet_struct isr_tasklet; + struct work_struct work_init; u8 flag; u8 unload; u8 flag_ieee; + u8 issuepend_done; + u8 disableOnlineCtrlReset; + u8 adprecovery; unsigned long last_time; + u32 mfiStatus; + u32 last_seq_num; struct timer_list io_completion_timer; + struct list_head internal_reset_pending_q; +}; + +enum { + MEGASAS_HBA_OPERATIONAL = 0, + MEGASAS_ADPRESET_SM_INFAULT = 1, + MEGASAS_ADPRESET_SM_FW_RESET_SUCCESS = 2, + MEGASAS_ADPRESET_SM_OPERATIONAL = 3, + MEGASAS_HW_CRITICAL_ERROR = 4, + MEGASAS_ADPRESET_INPROG_SIGN = 0xDEADDEAD, }; struct megasas_instance_template { @@ -1272,6 +1340,10 @@ struct megasas_instance_template { int (*clear_intr)(struct megasas_register_set __iomem *); u32 (*read_fw_status_reg)(struct megasas_register_set __iomem *); + int (*adp_reset)(struct megasas_instance *, \ + struct megasas_register_set __iomem *); + int (*check_reset)(struct megasas_instance *, \ + struct megasas_register_set __iomem *); }; #define MEGASAS_IS_LOGICAL(scp) \ @@ -1291,7 +1363,9 @@ struct megasas_cmd { u32 index; u8 sync_cmd; u8 cmd_status; - u16 abort_aen; + u8 abort_aen; + u8 retry_for_fw_reset; + struct list_head list; struct scsi_cmnd *scmd; -- cgit v1.2.3 From 56dd2c0691a5a387b7b05835fe547dc6fade9407 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Fri, 1 Oct 2010 13:55:47 -0700 Subject: [SCSI] libsas: Don't issue commands to devices that have been hot-removed sd will get hung up issuing commands to flush write cache if a SAS device behind the expander is unplugged without warning. Change libsas to reject commands to domain devices that have already gone away. [maciej.trela@intel.com: removed setting ->gone in sas_deform_port() to permit sync cache commands at module removal] Signed-off-by: Darrick J. Wong Tested-by: Haipao Fan Signed-off-by: Maciej Trela Signed-off-by: Dan Williams Signed-off-by: James Bottomley --- drivers/scsi/libsas/sas_ata.c | 4 ++++ drivers/scsi/libsas/sas_expander.c | 3 +++ drivers/scsi/libsas/sas_scsi_host.c | 7 +++++++ include/scsi/libsas.h | 1 + 4 files changed, 15 insertions(+) (limited to 'drivers') diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index ddbade7beec9..e1a395b438ee 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -162,6 +162,10 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) unsigned int xfer = 0; unsigned int si; + /* If the device fell off, no sense in issuing commands */ + if (dev->gone) + return AC_ERR_SYSTEM; + task = sas_alloc_task(GFP_ATOMIC); if (!task) return AC_ERR_SYSTEM; diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 83dd5070a15c..61d81f858a5a 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -1724,6 +1724,7 @@ static void sas_unregister_ex_tree(struct domain_device *dev) struct domain_device *child, *n; list_for_each_entry_safe(child, n, &ex->children, siblings) { + child->gone = 1; if (child->dev_type == EDGE_DEV || child->dev_type == FANOUT_DEV) sas_unregister_ex_tree(child); @@ -1744,6 +1745,7 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent, &ex_dev->children, siblings) { if (SAS_ADDR(child->sas_addr) == SAS_ADDR(phy->attached_sas_addr)) { + child->gone = 1; if (child->dev_type == EDGE_DEV || child->dev_type == FANOUT_DEV) sas_unregister_ex_tree(child); @@ -1752,6 +1754,7 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent, break; } } + parent->gone = 1; sas_disable_routing(parent, phy->attached_sas_addr); } memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index f0cfba9a1fc8..1787bd2eb4a6 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -228,6 +228,13 @@ int sas_queuecommand(struct scsi_cmnd *cmd, goto out; } + /* If the device fell off, no sense in issuing commands */ + if (dev->gone) { + cmd->result = DID_BAD_TARGET << 16; + scsi_done(cmd); + goto out; + } + res = -ENOMEM; task = sas_create_task(cmd, dev, GFP_ATOMIC); if (!task) diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index d06e13be717b..3dec1949f69c 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -205,6 +205,7 @@ struct domain_device { }; void *lldd_dev; + int gone; }; struct sas_discovery_event { -- cgit v1.2.3 From 3e51d3c924aea8a1f1372e6c615b0a37b528121d Mon Sep 17 00:00:00 2001 From: Kai Makisara Date: Sat, 9 Oct 2010 00:17:56 +0300 Subject: [SCSI] st: add MTWEOFI to write filemarks without flushing drive buffer This patch adds a new MTIOCTOP operation MTWEOFI that writes filemarks with immediate bit set. This means that the drive does not flush its buffer and the next file can be started immediately. This speeds up writing in applications that have to write multiple small files. Signed-off-by: Kai Makisara Signed-off-by: James Bottomley --- Documentation/scsi/st.txt | 15 ++++++++++++++- drivers/scsi/st.c | 15 +++++++++------ include/linux/mtio.h | 1 + 3 files changed, 24 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/Documentation/scsi/st.txt b/Documentation/scsi/st.txt index 40752602c050..691ca292c24d 100644 --- a/Documentation/scsi/st.txt +++ b/Documentation/scsi/st.txt @@ -2,7 +2,7 @@ This file contains brief information about the SCSI tape driver. The driver is currently maintained by Kai Mäkisara (email Kai.Makisara@kolumbus.fi) -Last modified: Sun Feb 24 21:59:07 2008 by kai.makisara +Last modified: Sun Aug 29 18:25:47 2010 by kai.makisara BASICS @@ -85,6 +85,17 @@ writing and the last operation has been a write. Two filemarks can be optionally written. In both cases end of data is signified by returning zero bytes for two consecutive reads. +Writing filemarks without the immediate bit set in the SCSI command block acts +as a synchronization point, i.e., all remaining data form the drive buffers is +written to tape before the command returns. This makes sure that write errors +are caught at that point, but this takes time. In some applications, several +consecutive files must be written fast. The MTWEOFI operation can be used to +write the filemarks without flushing the drive buffer. Writing filemark at +close() is always flushing the drive buffers. However, if the previous +operation is MTWEOFI, close() does not write a filemark. This can be used if +the program wants to close/open the tape device between files and wants to +skip waiting. + If rewind, offline, bsf, or seek is done and previous tape operation was write, a filemark is written before moving tape. @@ -301,6 +312,8 @@ MTBSR Space backward over count records. MTFSS Space forward over count setmarks. MTBSS Space backward over count setmarks. MTWEOF Write count filemarks. +MTWEOFI Write count filemarks with immediate bit set (i.e., does not + wait until data is on tape) MTWSM Write count setmarks. MTREW Rewind tape. MTOFFL Set device off line (often rewind plus eject). diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 24211d0efa6d..9e2c3a72ff4d 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -9,7 +9,7 @@ Steve Hirsch, Andreas Koppenh"ofer, Michael Leodolter, Eyal Lebedinsky, Michael Schaefer, J"org Weule, and Eric Youngdale. - Copyright 1992 - 2008 Kai Makisara + Copyright 1992 - 2010 Kai Makisara email Kai.Makisara@kolumbus.fi Some small formal changes - aeb, 950809 @@ -17,7 +17,7 @@ Last modified: 18-JAN-1998 Richard Gooch Devfs support */ -static const char *verstr = "20081215"; +static const char *verstr = "20100829"; #include @@ -2696,18 +2696,21 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon } break; case MTWEOF: + case MTWEOFI: case MTWSM: if (STp->write_prot) return (-EACCES); cmd[0] = WRITE_FILEMARKS; if (cmd_in == MTWSM) cmd[1] = 2; + if (cmd_in == MTWEOFI) + cmd[1] |= 1; cmd[2] = (arg >> 16); cmd[3] = (arg >> 8); cmd[4] = arg; timeout = STp->device->request_queue->rq_timeout; DEBC( - if (cmd_in == MTWEOF) + if (cmd_in != MTWSM) printk(ST_DEB_MSG "%s: Writing %d filemarks.\n", name, cmd[2] * 65536 + cmd[3] * 256 + cmd[4]); else @@ -2883,8 +2886,8 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon else if (chg_eof) STps->eof = ST_NOEOF; - if (cmd_in == MTWEOF) - STps->rw = ST_IDLE; + if (cmd_in == MTWEOF || cmd_in == MTWEOFI) + STps->rw = ST_IDLE; /* prevent automatic WEOF at close */ } else { /* SCSI command was not completely successful. Don't return from this block without releasing the SCSI command block! */ struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; @@ -2901,7 +2904,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon else undone = 0; - if (cmd_in == MTWEOF && + if ((cmd_in == MTWEOF || cmd_in == MTWEOFI) && cmdstatp->have_sense && (cmdstatp->flags & SENSE_EOM)) { if (cmdstatp->sense_hdr.sense_key == NO_SENSE || diff --git a/include/linux/mtio.h b/include/linux/mtio.h index ef01d6aa5934..8f825756c459 100644 --- a/include/linux/mtio.h +++ b/include/linux/mtio.h @@ -63,6 +63,7 @@ struct mtop { #define MTCOMPRESSION 32/* control compression with SCSI mode page 15 */ #define MTSETPART 33 /* Change the active tape partition */ #define MTMKPART 34 /* Format the tape with one or two partitions */ +#define MTWEOFI 35 /* write an end-of-file record (mark) in immediate mode */ /* structure for MTIOCGET - mag tape get status command */ -- cgit v1.2.3 From 526f7c7950bbf1271e59177d70d74438c2ef96de Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Tue, 28 Sep 2010 14:48:47 -0400 Subject: [SCSI] sd: Fix overflow with big physical blocks The hw_sector_size variable could overflow if a device reported huge physical blocks. Switch to the more accurate physical_block_size terminology and make sure we use an unsigned int to match the range permitted by READ CAPACITY(16). Signed-off-by: Martin K. Petersen Signed-off-by: James Bottomley --- drivers/scsi/sd.c | 13 +++++++------ drivers/scsi/sd.h | 2 +- 2 files changed, 8 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 50f1fe605303..08b60dda8bcf 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1554,7 +1554,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, } /* Logical blocks per physical block exponent */ - sdkp->hw_sector_size = (1 << (buffer[13] & 0xf)) * sector_size; + sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size; /* Lowest aligned logical block */ alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size; @@ -1567,7 +1567,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, struct request_queue *q = sdp->request_queue; sdkp->thin_provisioning = 1; - q->limits.discard_granularity = sdkp->hw_sector_size; + q->limits.discard_granularity = sdkp->physical_block_size; q->limits.max_discard_sectors = 0xffffffff; if (buffer[14] & 0x40) /* TPRZ */ @@ -1635,7 +1635,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp, } sdkp->capacity = lba + 1; - sdkp->hw_sector_size = sector_size; + sdkp->physical_block_size = sector_size; return sector_size; } @@ -1756,10 +1756,10 @@ got_data: (unsigned long long)sdkp->capacity, sector_size, cap_str_10, cap_str_2); - if (sdkp->hw_sector_size != sector_size) + if (sdkp->physical_block_size != sector_size) sd_printk(KERN_NOTICE, sdkp, "%u-byte physical blocks\n", - sdkp->hw_sector_size); + sdkp->physical_block_size); } } @@ -1773,7 +1773,8 @@ got_data: else if (sector_size == 256) sdkp->capacity >>= 1; - blk_queue_physical_block_size(sdp->request_queue, sdkp->hw_sector_size); + blk_queue_physical_block_size(sdp->request_queue, + sdkp->physical_block_size); sdkp->device->sector_size = sector_size; } diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index a40730ee465c..55488faf0815 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -51,7 +51,7 @@ struct scsi_disk { atomic_t openers; sector_t capacity; /* size in 512-byte sectors */ u32 index; - unsigned short hw_sector_size; + unsigned int physical_block_size; u8 media_present; u8 write_prot; u8 protection_type;/* Data Integrity Field */ -- cgit v1.2.3 From 2bc72c91ea7e104b0e40151543d135b933a12e93 Mon Sep 17 00:00:00 2001 From: Jack Wang Date: Wed, 6 Oct 2010 16:05:35 +0800 Subject: [SCSI] libsas: fix bug for vacant phy This patch fix bug reported by Chuck. And this new version incorporate comments from Hannes. Please consider to include it into mainline. Signed-off-by: Jack Wang Signed-off-by: Lindar Liu Tested-by: Chuck Tuffli Signed-off-by: James Bottomley --- drivers/scsi/libsas/sas_expander.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 61d81f858a5a..505ffe358293 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -175,10 +175,10 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, switch (resp->result) { case SMP_RESP_PHY_VACANT: phy->phy_state = PHY_VACANT; - return; + break; default: phy->phy_state = PHY_NOT_PRESENT; - return; + break; case SMP_RESP_FUNC_ACC: phy->phy_state = PHY_EMPTY; /* do not know yet */ break; @@ -209,7 +209,10 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, phy->phy->negotiated_linkrate = phy->linkrate; if (!rediscover) - sas_phy_add(phy->phy); + if (sas_phy_add(phy->phy)) { + sas_phy_free(phy->phy); + return; + } SAS_DPRINTK("ex %016llx phy%02d:%c attached: %016llx\n", SAS_ADDR(dev->sas_addr), phy->phy_id, -- cgit v1.2.3 From 91446f060b9ae27875d38b00dc3c6394b8321bff Mon Sep 17 00:00:00 2001 From: Jayamohan Kallickal Date: Wed, 6 Oct 2010 23:45:21 +0530 Subject: [SCSI] be2iscsi: More time for FW This patch provides more time for the FW to respond. This became necessary in boot situations Signed-off-by: Jayamohan Kallickal Reviewed-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/be2iscsi/be_cmds.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c index 7c7537335c88..ad246369d373 100644 --- a/drivers/scsi/be2iscsi/be_cmds.c +++ b/drivers/scsi/be2iscsi/be_cmds.c @@ -335,7 +335,7 @@ static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl) if (ready) break; - if (cnt > 6000000) { + if (cnt > 12000000) { dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n"); return -EBUSY; } -- cgit v1.2.3 From af4c609c0d645f196b570c58dd4ee878ff3afd24 Mon Sep 17 00:00:00 2001 From: Jayamohan Kallickal Date: Wed, 6 Oct 2010 23:46:03 +0530 Subject: [SCSI] be2iscsi: Remove premature free of cid Remove unnecessary beiscsi_put_cid that was freeing up the cid while in use Signed-off-by: Jayamohan Kallickal Reviewed-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/be2iscsi/be_iscsi.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index 7d4d2275573c..8b897c80ec7a 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -523,7 +523,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start + phba->params.cxns_per_ctrl * 2)) { SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n"); - beiscsi_put_cid(phba, beiscsi_ep->ep_cid); goto free_ep; } @@ -560,7 +559,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed" " status = %d extd_status = %d\n", status, extd_status); - beiscsi_put_cid(phba, beiscsi_ep->ep_cid); free_mcc_tag(&phba->ctrl, tag); pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); @@ -575,7 +573,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, beiscsi_ep->cid_vld = 1; SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n"); } - beiscsi_put_cid(phba, beiscsi_ep->ep_cid); pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); return 0; -- cgit v1.2.3 From 58ff4bd042adf8013c8f70fd03c2c0f8d022e387 Mon Sep 17 00:00:00 2001 From: Jayamohan Kallickal Date: Wed, 6 Oct 2010 23:46:47 +0530 Subject: [SCSI] be2iscsi: SGE Len == 64K Signed-off-by: Jayamohan Kallickal Reviewed-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/be2iscsi/be_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 8220bde6c04c..75a85aa9e882 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -2040,7 +2040,7 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg, unsigned int num_sg, struct beiscsi_io_task *io_task) { struct iscsi_sge *psgl; - unsigned short sg_len, index; + unsigned int sg_len, index; unsigned int sge_len = 0; unsigned long long addr; struct scatterlist *l_sg; -- cgit v1.2.3