diff options
author | Andy Grover <agrover@redhat.com> | 2011-07-19 08:55:10 +0000 |
---|---|---|
committer | Nicholas Bellinger <nab@linux-iscsi.org> | 2011-07-22 09:37:43 +0000 |
commit | e3d6f909ed803d92a5ac9b4a2c087e0eae9b90d0 (patch) | |
tree | 2eb65e958a2cc35c896a0e184ec09edcb9076b3b /drivers/target/target_core_device.c | |
parent | a8c6da90b823fb94ca76ca0df6bb44e6e205dc87 (diff) |
target: Core cleanups from AGrover (round 1)
This patch contains the squashed version of a number of cleanups and
minor fixes from Andy's initial series (round 1) for target core this
past spring. The condensed log looks like:
target: use errno values instead of returning -1 for everything
target: Rename transport_calc_sg_num to transport_init_task_sg
target: Fix leak in error path in transport_init_task_sg
target/pscsi: Remove pscsi_get_sh() usage
target: Make two runtime checks into WARN_ONs
target: Remove hba queue depth and convert to spin_lock_irq usage
target: dev->dev_status_queue_obj is unused
target: Make struct se_queue_req.cmd type struct se_cmd *
target: Remove __transport_get_qr_from_queue()
target: Rename se_dev->g_se_dev_list to se_dev_node
target: Remove struct se_global
target: Simplify scsi mib index table code
target: Make dev_queue_obj a member of se_device instead of a pointer
target: remove extraneous returns at end of void functions
target: Ensure transport_dump_vpd_ident_type returns null-terminated str
target: Function pointers don't need to use '&' to be assigned
target: Fix comment in __transport_execute_tasks()
target: Misc style cleanups
target: rename struct pr_reservation_template to pr_reservation
target: Remove #defines that just perform indirection
target: Inline transport_get_task_from_execute_queue()
target: Minor header comment fixes
Signed-off-by: Andy Grover <agrover@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target/target_core_device.c')
-rw-r--r-- | drivers/target/target_core_device.c | 440 |
1 files changed, 216 insertions, 224 deletions
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index e76ffc5b2079..fd923854505c 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -1,7 +1,7 @@ /******************************************************************************* * Filename: target_core_device.c (based on iscsi_target_device.c) * - * This file contains the iSCSI Virtual Device and Disk Transport + * This file contains the TCM Virtual Device and Disk Transport * agnostic related functions. * * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. @@ -54,25 +54,30 @@ static void se_dev_start(struct se_device *dev); static void se_dev_stop(struct se_device *dev); +static struct se_hba *lun0_hba; +static struct se_subsystem_dev *lun0_su_dev; +/* not static, needed by tpg.c */ +struct se_device *g_lun0_dev; + int transport_get_lun_for_cmd( struct se_cmd *se_cmd, u32 unpacked_lun) { struct se_dev_entry *deve; struct se_lun *se_lun = NULL; - struct se_session *se_sess = SE_SESS(se_cmd); + struct se_session *se_sess = se_cmd->se_sess; unsigned long flags; int read_only = 0; if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) { se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - return -1; + return -ENODEV; } - spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); + spin_lock_irq(&se_sess->se_node_acl->device_list_lock); deve = se_cmd->se_deve = - &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; + &se_sess->se_node_acl->device_list[unpacked_lun]; if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { if (se_cmd) { deve->total_cmds++; @@ -95,11 +100,11 @@ int transport_get_lun_for_cmd( se_lun = se_cmd->se_lun = deve->se_lun; se_cmd->pr_res_key = deve->pr_res_key; se_cmd->orig_fe_lun = unpacked_lun; - se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; + se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; } out: - spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); + spin_unlock_irq(&se_sess->se_node_acl->device_list_lock); if (!se_lun) { if (read_only) { @@ -107,9 +112,9 @@ out: se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" " Access for 0x%08x\n", - CMD_TFO(se_cmd)->get_fabric_name(), + se_cmd->se_tfo->get_fabric_name(), unpacked_lun); - return -1; + return -EACCES; } else { /* * Use the se_portal_group->tpg_virt_lun0 to allow for @@ -121,9 +126,9 @@ out: se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" " Access for 0x%08x\n", - CMD_TFO(se_cmd)->get_fabric_name(), + se_cmd->se_tfo->get_fabric_name(), unpacked_lun); - return -1; + return -ENODEV; } /* * Force WRITE PROTECT for virtual LUN 0 @@ -132,15 +137,15 @@ out: (se_cmd->data_direction != DMA_NONE)) { se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - return -1; + return -EACCES; } #if 0 printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n", - CMD_TFO(se_cmd)->get_fabric_name()); + se_cmd->se_tfo->get_fabric_name()); #endif se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; se_cmd->orig_fe_lun = 0; - se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; + se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; } } @@ -151,7 +156,7 @@ out: if (se_dev_check_online(se_lun->lun_se_dev) != 0) { se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - return -1; + return -ENODEV; } { @@ -171,10 +176,10 @@ out: */ spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list); - atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1); + atomic_set(&se_cmd->t_task->transport_lun_active, 1); #if 0 printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n", - CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun); + se_cmd->se_tfo->get_task_tag(se_cmd), se_lun->unpacked_lun); #endif spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); @@ -189,35 +194,35 @@ int transport_get_lun_for_tmr( struct se_device *dev = NULL; struct se_dev_entry *deve; struct se_lun *se_lun = NULL; - struct se_session *se_sess = SE_SESS(se_cmd); + struct se_session *se_sess = se_cmd->se_sess; struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) { se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - return -1; + return -ENODEV; } - spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); + spin_lock_irq(&se_sess->se_node_acl->device_list_lock); deve = se_cmd->se_deve = - &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; + &se_sess->se_node_acl->device_list[unpacked_lun]; if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun; dev = se_lun->lun_se_dev; se_cmd->pr_res_key = deve->pr_res_key; se_cmd->orig_fe_lun = unpacked_lun; - se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; + se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; /* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */ } - spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); + spin_unlock_irq(&se_sess->se_node_acl->device_list_lock); if (!se_lun) { printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" " Access for 0x%08x\n", - CMD_TFO(se_cmd)->get_fabric_name(), + se_cmd->se_tfo->get_fabric_name(), unpacked_lun); se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - return -1; + return -ENODEV; } /* * Determine if the struct se_lun is online. @@ -225,7 +230,7 @@ int transport_get_lun_for_tmr( /* #warning FIXME: Check for LUN_RESET + UNIT Attention */ if (se_dev_check_online(se_lun->lun_se_dev) != 0) { se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - return -1; + return -ENODEV; } se_tmr->tmr_dev = dev; @@ -263,14 +268,14 @@ struct se_dev_entry *core_get_se_deve_from_rtpi( if (!(lun)) { printk(KERN_ERR "%s device entries device pointer is" " NULL, but Initiator has access.\n", - TPG_TFO(tpg)->get_fabric_name()); + tpg->se_tpg_tfo->get_fabric_name()); continue; } port = lun->lun_sep; if (!(port)) { printk(KERN_ERR "%s device entries device pointer is" " NULL, but Initiator has access.\n", - TPG_TFO(tpg)->get_fabric_name()); + tpg->se_tpg_tfo->get_fabric_name()); continue; } if (port->sep_rtpi != rtpi) @@ -308,7 +313,7 @@ int core_free_device_list_for_node( if (!deve->se_lun) { printk(KERN_ERR "%s device entries device pointer is" " NULL, but Initiator has access.\n", - TPG_TFO(tpg)->get_fabric_name()); + tpg->se_tpg_tfo->get_fabric_name()); continue; } lun = deve->se_lun; @@ -334,8 +339,6 @@ void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) deve = &se_nacl->device_list[se_cmd->orig_fe_lun]; deve->deve_cmds--; spin_unlock_irq(&se_nacl->device_list_lock); - - return; } void core_update_device_list_access( @@ -355,8 +358,6 @@ void core_update_device_list_access( deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; } spin_unlock_irq(&nacl->device_list_lock); - - return; } /* core_update_device_list_for_node(): @@ -408,14 +409,14 @@ int core_update_device_list_for_node( " already set for demo mode -> explict" " LUN ACL transition\n"); spin_unlock_irq(&nacl->device_list_lock); - return -1; + return -EINVAL; } if (deve->se_lun != lun) { printk(KERN_ERR "struct se_dev_entry->se_lun does" " match passed struct se_lun for demo mode" " -> explict LUN ACL transition\n"); spin_unlock_irq(&nacl->device_list_lock); - return -1; + return -EINVAL; } deve->se_lun_acl = lun_acl; trans = 1; @@ -503,8 +504,6 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) spin_lock_bh(&tpg->acl_node_lock); } spin_unlock_bh(&tpg->acl_node_lock); - - return; } static struct se_port *core_alloc_port(struct se_device *dev) @@ -514,7 +513,7 @@ static struct se_port *core_alloc_port(struct se_device *dev) port = kzalloc(sizeof(struct se_port), GFP_KERNEL); if (!(port)) { printk(KERN_ERR "Unable to allocate struct se_port\n"); - return NULL; + return ERR_PTR(-ENOMEM); } INIT_LIST_HEAD(&port->sep_alua_list); INIT_LIST_HEAD(&port->sep_list); @@ -527,7 +526,7 @@ static struct se_port *core_alloc_port(struct se_device *dev) printk(KERN_WARNING "Reached dev->dev_port_count ==" " 0x0000ffff\n"); spin_unlock(&dev->se_port_lock); - return NULL; + return ERR_PTR(-ENOSPC); } again: /* @@ -565,7 +564,7 @@ static void core_export_port( struct se_port *port, struct se_lun *lun) { - struct se_subsystem_dev *su_dev = SU_DEV(dev); + struct se_subsystem_dev *su_dev = dev->se_sub_dev; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; spin_lock(&dev->se_port_lock); @@ -578,7 +577,7 @@ static void core_export_port( list_add_tail(&port->sep_list, &dev->dev_sep_list); spin_unlock(&dev->se_port_lock); - if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) { + if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { printk(KERN_ERR "Unable to allocate t10_alua_tg_pt" @@ -587,11 +586,11 @@ static void core_export_port( } spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, - T10_ALUA(su_dev)->default_tg_pt_gp); + su_dev->t10_alua.default_tg_pt_gp); spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port" " Group: alua/default_tg_pt_gp\n", - TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name()); + dev->transport->name, tpg->se_tpg_tfo->get_fabric_name()); } dev->dev_port_count++; @@ -618,8 +617,6 @@ static void core_release_port(struct se_device *dev, struct se_port *port) list_del(&port->sep_list); dev->dev_port_count--; kfree(port); - - return; } int core_dev_export( @@ -630,8 +627,8 @@ int core_dev_export( struct se_port *port; port = core_alloc_port(dev); - if (!(port)) - return -1; + if (IS_ERR(port)) + return PTR_ERR(port); lun->lun_se_dev = dev; se_dev_start(dev); @@ -668,12 +665,12 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd) { struct se_dev_entry *deve; struct se_lun *se_lun; - struct se_session *se_sess = SE_SESS(se_cmd); + struct se_session *se_sess = se_cmd->se_sess; struct se_task *se_task; - unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf; + unsigned char *buf = se_cmd->t_task->t_task_buf; u32 cdb_offset = 0, lun_count = 0, offset = 8, i; - list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list) + list_for_each_entry(se_task, &se_cmd->t_task->t_task_list, t_list) break; if (!(se_task)) { @@ -692,9 +689,9 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd) goto done; } - spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); + spin_lock_irq(&se_sess->se_node_acl->device_list_lock); for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { - deve = &SE_NODE_ACL(se_sess)->device_list[i]; + deve = &se_sess->se_node_acl->device_list[i]; if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) continue; se_lun = deve->se_lun; @@ -711,7 +708,7 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd) offset += 8; cdb_offset += 8; } - spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); + spin_unlock_irq(&se_sess->se_node_acl->device_list_lock); /* * See SPC3 r07, page 159. @@ -755,26 +752,20 @@ void se_release_device_for_hba(struct se_device *dev) core_scsi3_free_all_registrations(dev); se_release_vpd_for_dev(dev); - kfree(dev->dev_status_queue_obj); - kfree(dev->dev_queue_obj); kfree(dev); - - return; } void se_release_vpd_for_dev(struct se_device *dev) { struct t10_vpd *vpd, *vpd_tmp; - spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock); + spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock); list_for_each_entry_safe(vpd, vpd_tmp, - &DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) { + &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) { list_del(&vpd->vpd_list); kfree(vpd); } - spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock); - - return; + spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock); } /* se_free_virtual_device(): @@ -860,48 +851,48 @@ void se_dev_set_default_attribs( { struct queue_limits *limits = &dev_limits->limits; - DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO; - DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE; - DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ; - DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE; - DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; - DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS; - DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU; - DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS; - DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS; - DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA; - DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS; + dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO; + dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE; + dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ; + dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; + dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; + dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS; + dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU; + dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS; + dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS; + dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA; + dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; /* * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK * iblock_create_virtdevice() from struct queue_limits values * if blk_queue_discard()==1 */ - DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; - DEV_ATTRIB(dev)->max_unmap_block_desc_count = - DA_MAX_UNMAP_BLOCK_DESC_COUNT; - DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; - DEV_ATTRIB(dev)->unmap_granularity_alignment = + dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; + dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = + DA_MAX_UNMAP_BLOCK_DESC_COUNT; + dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; + dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; /* * block_size is based on subsystem plugin dependent requirements. */ - DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size; - DEV_ATTRIB(dev)->block_size = limits->logical_block_size; + dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size; + dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size; /* * max_sectors is based on subsystem plugin dependent requirements. */ - DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors; - DEV_ATTRIB(dev)->max_sectors = limits->max_sectors; + dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; + dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; /* * Set optimal_sectors from max_sectors, which can be lowered via * configfs. */ - DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors; + dev->se_sub_dev->se_dev_attrib.optimal_sectors = limits->max_sectors; /* * queue_depth is based on subsystem plugin dependent requirements. */ - DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth; - DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth; + dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth; + dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth; } int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) @@ -909,9 +900,9 @@ int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) if (task_timeout > DA_TASK_TIMEOUT_MAX) { printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then" " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout); - return -1; + return -EINVAL; } else { - DEV_ATTRIB(dev)->task_timeout = task_timeout; + dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout; printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n", dev, task_timeout); } @@ -923,9 +914,9 @@ int se_dev_set_max_unmap_lba_count( struct se_device *dev, u32 max_unmap_lba_count) { - DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count; + dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count; printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n", - dev, DEV_ATTRIB(dev)->max_unmap_lba_count); + dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count); return 0; } @@ -933,9 +924,10 @@ int se_dev_set_max_unmap_block_desc_count( struct se_device *dev, u32 max_unmap_block_desc_count) { - DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count; + dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = + max_unmap_block_desc_count; printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n", - dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count); + dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count); return 0; } @@ -943,9 +935,9 @@ int se_dev_set_unmap_granularity( struct se_device *dev, u32 unmap_granularity) { - DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity; + dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity; printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n", - dev, DEV_ATTRIB(dev)->unmap_granularity); + dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity); return 0; } @@ -953,9 +945,9 @@ int se_dev_set_unmap_granularity_alignment( struct se_device *dev, u32 unmap_granularity_alignment) { - DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment; + dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment; printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n", - dev, DEV_ATTRIB(dev)->unmap_granularity_alignment); + dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment); return 0; } @@ -963,19 +955,19 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { printk(KERN_ERR "Illegal value %d\n", flag); - return -1; + return -EINVAL; } - if (TRANSPORT(dev)->dpo_emulated == NULL) { - printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n"); - return -1; + if (dev->transport->dpo_emulated == NULL) { + printk(KERN_ERR "dev->transport->dpo_emulated is NULL\n"); + return -EINVAL; } - if (TRANSPORT(dev)->dpo_emulated(dev) == 0) { - printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n"); - return -1; + if (dev->transport->dpo_emulated(dev) == 0) { + printk(KERN_ERR "dev->transport->dpo_emulated not supported\n"); + return -EINVAL; } - DEV_ATTRIB(dev)->emulate_dpo = flag; + dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag; printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation" - " bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo); + " bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo); return 0; } @@ -983,19 +975,19 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { printk(KERN_ERR "Illegal value %d\n", flag); - return -1; + return -EINVAL; } - if (TRANSPORT(dev)->fua_write_emulated == NULL) { - printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n"); - return -1; + if (dev->transport->fua_write_emulated == NULL) { + printk(KERN_ERR "dev->transport->fua_write_emulated is NULL\n"); + return -EINVAL; } - if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) { - printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n"); - return -1; + if (dev->transport->fua_write_emulated(dev) == 0) { + printk(KERN_ERR "dev->transport->fua_write_emulated not supported\n"); + return -EINVAL; } - DEV_ATTRIB(dev)->emulate_fua_write = flag; + dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag; printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", - dev, DEV_ATTRIB(dev)->emulate_fua_write); + dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write); return 0; } @@ -1003,19 +995,19 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { printk(KERN_ERR "Illegal value %d\n", flag); - return -1; + return -EINVAL; } - if (TRANSPORT(dev)->fua_read_emulated == NULL) { - printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n"); - return -1; + if (dev->transport->fua_read_emulated == NULL) { + printk(KERN_ERR "dev->transport->fua_read_emulated is NULL\n"); + return -EINVAL; } - if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) { - printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n"); - return -1; + if (dev->transport->fua_read_emulated(dev) == 0) { + printk(KERN_ERR "dev->transport->fua_read_emulated not supported\n"); + return -EINVAL; } - DEV_ATTRIB(dev)->emulate_fua_read = flag; + dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag; printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n", - dev, DEV_ATTRIB(dev)->emulate_fua_read); + dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read); return 0; } @@ -1023,19 +1015,19 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { printk(KERN_ERR "Illegal value %d\n", flag); - return -1; + return -EINVAL; } - if (TRANSPORT(dev)->write_cache_emulated == NULL) { - printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n"); - return -1; + if (dev->transport->write_cache_emulated == NULL) { + printk(KERN_ERR "dev->transport->write_cache_emulated is NULL\n"); + return -EINVAL; } - if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) { - printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n"); - return -1; + if (dev->transport->write_cache_emulated(dev) == 0) { + printk(KERN_ERR "dev->transport->write_cache_emulated not supported\n"); + return -EINVAL; } - DEV_ATTRIB(dev)->emulate_write_cache = flag; + dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag; printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", - dev, DEV_ATTRIB(dev)->emulate_write_cache); + dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache); return 0; } @@ -1043,7 +1035,7 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1) && (flag != 2)) { printk(KERN_ERR "Illegal value %d\n", flag); - return -1; + return -EINVAL; } if (atomic_read(&dev->dev_export_obj.obj_access_count)) { @@ -1051,11 +1043,11 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) " UA_INTRLCK_CTRL while dev_export_obj: %d count" " exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); - return -1; + return -EINVAL; } - DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag; + dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag; printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", - dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl); + dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl); return 0; } @@ -1064,18 +1056,18 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { printk(KERN_ERR "Illegal value %d\n", flag); - return -1; + return -EINVAL; } if (atomic_read(&dev->dev_export_obj.obj_access_count)) { printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while" " dev_export_obj: %d count exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); - return -1; + return -EINVAL; } - DEV_ATTRIB(dev)->emulate_tas = flag; + dev->se_sub_dev->se_dev_attrib.emulate_tas = flag; printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n", - dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled"); + dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled"); return 0; } @@ -1084,18 +1076,18 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { printk(KERN_ERR "Illegal value %d\n", flag); - return -1; + return -EINVAL; } /* * We expect this value to be non-zero when generic Block Layer * Discard supported is detected iblock_create_virtdevice(). */ - if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { + if (!(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count)) { printk(KERN_ERR "Generic Block Discard not supported\n"); return -ENOSYS; } - DEV_ATTRIB(dev)->emulate_tpu = flag; + dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag; printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", dev, flag); return 0; @@ -1105,18 +1097,18 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { printk(KERN_ERR "Illegal value %d\n", flag); - return -1; + return -EINVAL; } /* * We expect this value to be non-zero when generic Block Layer * Discard supported is detected iblock_create_virtdevice(). */ - if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { + if (!(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count)) { printk(KERN_ERR "Generic Block Discard not supported\n"); return -ENOSYS; } - DEV_ATTRIB(dev)->emulate_tpws = flag; + dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag; printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", dev, flag); return 0; @@ -1126,11 +1118,11 @@ int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { printk(KERN_ERR "Illegal value %d\n", flag); - return -1; + return -EINVAL; } - DEV_ATTRIB(dev)->enforce_pr_isids = flag; + dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag; printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, - (DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled"); + (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); return 0; } @@ -1145,35 +1137,35 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while" " dev_export_obj: %d count exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); - return -1; + return -EINVAL; } if (!(queue_depth)) { printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue" "_depth\n", dev); - return -1; + return -EINVAL; } - if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { - if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { + if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { + if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { printk(KERN_ERR "dev[%p]: Passed queue_depth: %u" " exceeds TCM/SE_Device TCQ: %u\n", dev, queue_depth, - DEV_ATTRIB(dev)->hw_queue_depth); - return -1; + dev->se_sub_dev->se_dev_attrib.hw_queue_depth); + return -EINVAL; } } else { - if (queue_depth > DEV_ATTRIB(dev)->queue_depth) { - if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { + if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) { + if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { printk(KERN_ERR "dev[%p]: Passed queue_depth:" " %u exceeds TCM/SE_Device MAX" " TCQ: %u\n", dev, queue_depth, - DEV_ATTRIB(dev)->hw_queue_depth); - return -1; + dev->se_sub_dev->se_dev_attrib.hw_queue_depth); + return -EINVAL; } } } - DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth; + dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth; if (queue_depth > orig_queue_depth) atomic_add(queue_depth - orig_queue_depth, &dev->depth_left); else if (queue_depth < orig_queue_depth) @@ -1192,46 +1184,46 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) printk(KERN_ERR "dev[%p]: Unable to change SE Device" " max_sectors while dev_export_obj: %d count exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); - return -1; + return -EINVAL; } if (!(max_sectors)) { printk(KERN_ERR "dev[%p]: Illegal ZERO value for" " max_sectors\n", dev); - return -1; + return -EINVAL; } if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) { printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than" " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors, DA_STATUS_MAX_SECTORS_MIN); - return -1; + return -EINVAL; } - if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { - if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) { + if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { + if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) { printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" " greater than TCM/SE_Device max_sectors:" " %u\n", dev, max_sectors, - DEV_ATTRIB(dev)->hw_max_sectors); - return -1; + dev->se_sub_dev->se_dev_attrib.hw_max_sectors); + return -EINVAL; } } else { if (!(force) && (max_sectors > - DEV_ATTRIB(dev)->hw_max_sectors)) { + dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) { printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" " greater than TCM/SE_Device max_sectors" ": %u, use force=1 to override.\n", dev, - max_sectors, DEV_ATTRIB(dev)->hw_max_sectors); - return -1; + max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors); + return -EINVAL; } if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) { printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" " greater than DA_STATUS_MAX_SECTORS_MAX:" " %u\n", dev, max_sectors, DA_STATUS_MAX_SECTORS_MAX); - return -1; + return -EINVAL; } } - DEV_ATTRIB(dev)->max_sectors = max_sectors; + dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; printk("dev[%p]: SE Device max_sectors changed to %u\n", dev, max_sectors); return 0; @@ -1245,19 +1237,19 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) dev, atomic_read(&dev->dev_export_obj.obj_access_count)); return -EINVAL; } - if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { + if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be" " changed for TCM/pSCSI\n", dev); return -EINVAL; } - if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) { + if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) { printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be" " greater than max_sectors: %u\n", dev, - optimal_sectors, DEV_ATTRIB(dev)->max_sectors); + optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); return -EINVAL; } - DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors; + dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors; printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n", dev, optimal_sectors); return 0; @@ -1269,7 +1261,7 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size) printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size" " while dev_export_obj: %d count exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); - return -1; + return -EINVAL; } if ((block_size != 512) && @@ -1279,17 +1271,17 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size) printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u" " for SE device, must be 512, 1024, 2048 or 4096\n", dev, block_size); - return -1; + return -EINVAL; } - if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { + if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { printk(KERN_ERR "dev[%p]: Not allowed to change block_size for" " Physical Device, use for Linux/SCSI to change" " block_size for underlying hardware\n", dev); - return -1; + return -EINVAL; } - DEV_ATTRIB(dev)->block_size = block_size; + dev->se_sub_dev->se_dev_attrib.block_size = block_size; printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n", dev, block_size); return 0; @@ -1323,14 +1315,14 @@ struct se_lun *core_dev_add_lun( return NULL; printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" - " CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun, - TPG_TFO(tpg)->get_fabric_name(), hba->hba_id); + " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun, + tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id); /* * Update LUN maps for dynamically added initiators when * generate_node_acl is enabled. */ - if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) { + if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { struct se_node_acl *acl; spin_lock_bh(&tpg->acl_node_lock); list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { @@ -1364,9 +1356,9 @@ int core_dev_del_lun( core_tpg_post_dellun(tpg, lun); printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" - " device object\n", TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, - TPG_TFO(tpg)->get_fabric_name()); + " device object\n", tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, + tpg->se_tpg_tfo->get_fabric_name()); return 0; } @@ -1379,9 +1371,9 @@ struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_l if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS" "_PER_TPG-1: %u for Target Portal Group: %hu\n", - TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, + tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, - TPG_TFO(tpg)->tpg_get_tag(tpg)); + tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock(&tpg->tpg_lun_lock); return NULL; } @@ -1390,8 +1382,8 @@ struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_l if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { printk(KERN_ERR "%s Logical Unit Number: %u is not free on" " Target Portal Group: %hu, ignoring request.\n", - TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, - TPG_TFO(tpg)->tpg_get_tag(tpg)); + tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, + tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock(&tpg->tpg_lun_lock); return NULL; } @@ -1412,9 +1404,9 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" "_TPG-1: %u for Target Portal Group: %hu\n", - TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, + tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, - TPG_TFO(tpg)->tpg_get_tag(tpg)); + tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock(&tpg->tpg_lun_lock); return NULL; } @@ -1423,8 +1415,8 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { printk(KERN_ERR "%s Logical Unit Number: %u is not active on" " Target Portal Group: %hu, ignoring request.\n", - TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, - TPG_TFO(tpg)->tpg_get_tag(tpg)); + tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, + tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock(&tpg->tpg_lun_lock); return NULL; } @@ -1444,7 +1436,7 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl( if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) { printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n", - TPG_TFO(tpg)->get_fabric_name()); + tpg->se_tpg_tfo->get_fabric_name()); *ret = -EOVERFLOW; return NULL; } @@ -1481,8 +1473,8 @@ int core_dev_add_initiator_node_lun_acl( if (!(lun)) { printk(KERN_ERR "%s Logical Unit Number: %u is not active on" " Target Portal Group: %hu, ignoring request.\n", - TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, - TPG_TFO(tpg)->tpg_get_tag(tpg)); + tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, + tpg->se_tpg_tfo->tpg_get_tag(tpg)); return -EINVAL; } @@ -1507,8 +1499,8 @@ int core_dev_add_initiator_node_lun_acl( spin_unlock(&lun->lun_acl_lock); printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " - " InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, + " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", lacl->initiatorname); /* @@ -1547,8 +1539,8 @@ int core_dev_del_initiator_node_lun_acl( printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for" " InitiatorNode: %s Mapped LUN: %u\n", - TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun, + tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->initiatorname, lacl->mapped_lun); return 0; @@ -1559,9 +1551,9 @@ void core_dev_free_initiator_node_lun_acl( struct se_lun_acl *lacl) { printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" - " Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg), - TPG_TFO(tpg)->get_fabric_name(), + " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg), + tpg->se_tpg_tfo->get_fabric_name(), lacl->initiatorname, lacl->mapped_lun); kfree(lacl); @@ -1580,7 +1572,7 @@ int core_dev_setup_virtual_lun0(void) if (IS_ERR(hba)) return PTR_ERR(hba); - se_global->g_lun0_hba = hba; + lun0_hba = hba; t = hba->transport; se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); @@ -1590,17 +1582,17 @@ int core_dev_setup_virtual_lun0(void) ret = -ENOMEM; goto out; } - INIT_LIST_HEAD(&se_dev->g_se_dev_list); + INIT_LIST_HEAD(&se_dev->se_dev_node); INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); - INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list); - INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list); - spin_lock_init(&se_dev->t10_reservation.registration_lock); - spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock); + INIT_LIST_HEAD(&se_dev->t10_pr.registration_list); + INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list); + spin_lock_init(&se_dev->t10_pr.registration_lock); + spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock); INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); spin_lock_init(&se_dev->se_dev_lock); - se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; + se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; se_dev->t10_wwn.t10_sub_dev = se_dev; se_dev->t10_alua.t10_sub_dev = se_dev; se_dev->se_dev_attrib.da_sub_dev = se_dev; @@ -1613,27 +1605,27 @@ int core_dev_setup_virtual_lun0(void) ret = -ENOMEM; goto out; } - se_global->g_lun0_su_dev = se_dev; + lun0_su_dev = se_dev; memset(buf, 0, 16); sprintf(buf, "rd_pages=8"); t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf)); dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); - if (!(dev) || IS_ERR(dev)) { - ret = -ENOMEM; + if (IS_ERR(dev)) { + ret = PTR_ERR(dev); goto out; } se_dev->se_dev_ptr = dev; - se_global->g_lun0_dev = dev; + g_lun0_dev = dev; return 0; out: - se_global->g_lun0_su_dev = NULL; + lun0_su_dev = NULL; kfree(se_dev); - if (se_global->g_lun0_hba) { - core_delete_hba(se_global->g_lun0_hba); - se_global->g_lun0_hba = NULL; + if (lun0_hba) { + core_delete_hba(lun0_hba); + lun0_hba = NULL; } return ret; } @@ -1641,14 +1633,14 @@ out: void core_dev_release_virtual_lun0(void) { - struct se_hba *hba = se_global->g_lun0_hba; - struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev; + struct se_hba *hba = lun0_hba; + struct se_subsystem_dev *su_dev = lun0_su_dev; if (!(hba)) return; - if (se_global->g_lun0_dev) - se_free_virtual_device(se_global->g_lun0_dev, hba); + if (g_lun0_dev) + se_free_virtual_device(g_lun0_dev, hba); kfree(su_dev); core_delete_hba(hba); |