From: Andy Grover Date: Tue, 19 Jul 2011 10:26:37 +0000 (+0000) Subject: target: More core cleanups from AGrover (round 2) X-Git-Tag: upstream/snapshot3+hdmi~9613^2~31 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=5951146dea1ac8ff2f177477c907084d63913cad;p=platform%2Fadaptation%2Frenesas_rcar%2Frenesas_kernel.git target: More core cleanups from AGrover (round 2) This patch contains the squashed version of second round of target core cleanups and simplifications and Andy and Co. It also contains a handful of fixes to address bugs the original series and other minor cleanups. Here is the condensed shortlog: target: Remove unneeded casts to void* target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun target: Make t_task a member of se_cmd, not a pointer target: Handle functions returning "-2" target: Use cmd->se_dev over cmd->se_lun->lun_se_dev target: Embed qr in struct se_cmd target: Replace embedded struct se_queue_req with a list_head target: Rename list_heads that are nodes in struct se_cmd to "*_node" target: Fold transport_device_setup_cmd() into lookup_{tmr,cmd}_lun() target: Make t_mem_list and t_mem_list_bidi members of t_task target: Add comment & cleanup transport_map_sg_to_mem() target: Remove unneeded checks in transport_free_pages() (Roland: Fix se_queue_req removal leftovers OOPs) (nab: Fix transport_lookup_tmr_lun failure case) (nab: Fix list_empty(&cmd->t_task.t_mem_bidi_list) inversion bugs) Signed-off-by: Andy Grover Signed-off-by: Roland Dreier Signed-off-by: Nicholas Bellinger --- diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 2f19e19..eeb7ee7 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -118,17 +118,16 @@ static struct se_cmd *tcm_loop_allocate_core_cmd( * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi */ if (scsi_bidi_cmnd(sc)) - se_cmd->t_task->t_tasks_bidi = 1; + se_cmd->t_task.t_tasks_bidi = 1; /* * Locate the struct se_lun pointer and attach it to struct se_cmd */ - if (transport_get_lun_for_cmd(se_cmd, tl_cmd->sc->device->lun) < 0) { + if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) { kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); set_host_byte(sc, DID_NO_CONNECT); return NULL; } - transport_device_setup_cmd(se_cmd); return se_cmd; } @@ -143,17 +142,17 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd) struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, struct tcm_loop_cmd, tl_se_cmd); struct scsi_cmnd *sc = tl_cmd->sc; - void *mem_ptr, *mem_bidi_ptr = NULL; - u32 sg_no_bidi = 0; + struct scatterlist *sgl_bidi = NULL; + u32 sgl_bidi_count = 0; int ret; /* * Allocate the necessary tasks to complete the received CDB+data */ - ret = transport_generic_allocate_tasks(se_cmd, tl_cmd->sc->cmnd); - if (ret == -1) { + ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd); + if (ret == -ENOMEM) { /* Out of Resources */ return PYX_TRANSPORT_LU_COMM_FAILURE; - } else if (ret == -2) { + } else if (ret == -EINVAL) { /* * Handle case for SAM_STAT_RESERVATION_CONFLICT */ @@ -165,35 +164,24 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd) */ return PYX_TRANSPORT_USE_SENSE_REASON; } + /* - * Setup the struct scatterlist memory from the received - * struct scsi_cmnd. + * For BIDI commands, pass in the extra READ buffer + * to transport_generic_map_mem_to_cmd() below.. */ - if (scsi_sg_count(sc)) { - se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM; - mem_ptr = (void *)scsi_sglist(sc); - /* - * For BIDI commands, pass in the extra READ buffer - * to transport_generic_map_mem_to_cmd() below.. - */ - if (se_cmd->t_task->t_tasks_bidi) { - struct scsi_data_buffer *sdb = scsi_in(sc); + if (se_cmd->t_task.t_tasks_bidi) { + struct scsi_data_buffer *sdb = scsi_in(sc); - mem_bidi_ptr = (void *)sdb->table.sgl; - sg_no_bidi = sdb->table.nents; - } - } else { - /* - * Used for DMA_NONE - */ - mem_ptr = NULL; + sgl_bidi = sdb->table.sgl; + sgl_bidi_count = sdb->table.nents; } + /* * Map the SG memory into struct se_mem->page linked list using the same * physical memory at sg->page_link. */ - ret = transport_generic_map_mem_to_cmd(se_cmd, mem_ptr, - scsi_sg_count(sc), mem_bidi_ptr, sg_no_bidi); + ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc), + scsi_sg_count(sc), sgl_bidi, sgl_bidi_count); if (ret < 0) return PYX_TRANSPORT_LU_COMM_FAILURE; @@ -384,14 +372,14 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) /* * Allocate the LUN_RESET TMR */ - se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, (void *)tl_tmr, + se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr, TMR_LUN_RESET); if (IS_ERR(se_cmd->se_tmr_req)) goto release; /* * Locate the underlying TCM struct se_lun from sc->device->lun */ - if (transport_get_lun_for_tmr(se_cmd, sc->device->lun) < 0) + if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0) goto release; /* * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp() @@ -904,7 +892,7 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd) ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { - memcpy((void *)sc->sense_buffer, (void *)se_cmd->sense_buffer, + memcpy(sc->sense_buffer, se_cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); sc->result = SAM_STAT_CHECK_CONDITION; set_driver_byte(sc, DRIVER_SENSE); @@ -1054,7 +1042,7 @@ static int tcm_loop_make_nexus( * transport_register_session() */ __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, - tl_nexus->se_sess, (void *)tl_nexus); + tl_nexus->se_sess, tl_nexus); tl_tpg->tl_hba->tl_nexus = tl_nexus; printk(KERN_INFO "TCM_Loop_ConfigFS: Established I_T Nexus to emulated" " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), @@ -1242,7 +1230,7 @@ struct se_portal_group *tcm_loop_make_naa_tpg( * Register the tl_tpg as a emulated SAS TCM Target Endpoint */ ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops, - wwn, &tl_tpg->tl_se_tpg, (void *)tl_tpg, + wwn, &tl_tpg->tl_se_tpg, tl_tpg, TRANSPORT_TPG_TYPE_NORMAL); if (ret < 0) return ERR_PTR(-ENOMEM); diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index bfc42ad..76abd86 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -61,11 +61,11 @@ struct t10_alua_lu_gp *default_lu_gp; */ int core_emulate_report_target_port_groups(struct se_cmd *cmd) { - struct se_subsystem_dev *su_dev = cmd->se_lun->lun_se_dev->se_sub_dev; + struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; struct se_port *port; struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; - unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first Target port group descriptor */ @@ -151,13 +151,13 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd) */ int core_emulate_set_target_port_groups(struct se_cmd *cmd) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; struct se_subsystem_dev *su_dev = dev->se_sub_dev; struct se_port *port, *l_port = cmd->se_lun->lun_sep; struct se_node_acl *nacl = cmd->se_sess->se_node_acl; struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem; - unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */ u32 len = 4; /* Skip over RESERVED area in header */ int alua_access_state, primary = 0, rc; diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 7d9ccf3..95195d7 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -65,8 +65,8 @@ static int target_emulate_inquiry_std(struct se_cmd *cmd) { struct se_lun *lun = cmd->se_lun; - struct se_device *dev = cmd->se_lun->lun_se_dev; - unsigned char *buf = cmd->t_task->t_task_buf; + struct se_device *dev = cmd->se_dev; + unsigned char *buf = cmd->t_task.t_task_buf; /* * Make sure we at least have 6 bytes of INQUIRY response @@ -128,7 +128,7 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) * Registered Extended LUN WWN has been set via ConfigFS * during device creation/restart. */ - if (cmd->se_lun->lun_se_dev->se_sub_dev->su_dev_flags & + if (cmd->se_dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL) { buf[3] = 3; buf[5] = 0x80; @@ -143,7 +143,7 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) static int target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; u16 len = 0; buf[1] = 0x80; @@ -176,7 +176,7 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) static int target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; struct se_lun *lun = cmd->se_lun; struct se_port *port = NULL; struct se_portal_group *tpg = NULL; @@ -477,7 +477,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) buf[5] = 0x07; /* If WriteCache emulation is enabled, set V_SUP */ - if (cmd->se_lun->lun_se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) + if (cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) buf[6] = 0x01; return 0; } @@ -486,7 +486,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) static int target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; int have_tp = 0; /* @@ -568,7 +568,7 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) static int target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; /* * From sbc3r22 section 6.5.4 Thin Provisioning VPD page: @@ -620,9 +620,9 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) static int target_emulate_inquiry(struct se_cmd *cmd) { - struct se_device *dev = cmd->se_lun->lun_se_dev; - unsigned char *buf = cmd->t_task->t_task_buf; - unsigned char *cdb = cmd->t_task->t_task_cdb; + struct se_device *dev = cmd->se_dev; + unsigned char *buf = cmd->t_task.t_task_buf; + unsigned char *cdb = cmd->t_task.t_task_cdb; if (!(cdb[1] & 0x1)) return target_emulate_inquiry_std(cmd); @@ -665,8 +665,8 @@ target_emulate_inquiry(struct se_cmd *cmd) static int target_emulate_readcapacity(struct se_cmd *cmd) { - struct se_device *dev = cmd->se_lun->lun_se_dev; - unsigned char *buf = cmd->t_task->t_task_buf; + struct se_device *dev = cmd->se_dev; + unsigned char *buf = cmd->t_task.t_task_buf; unsigned long long blocks_long = dev->transport->get_blocks(dev); u32 blocks; @@ -695,8 +695,8 @@ target_emulate_readcapacity(struct se_cmd *cmd) static int target_emulate_readcapacity_16(struct se_cmd *cmd) { - struct se_device *dev = cmd->se_lun->lun_se_dev; - unsigned char *buf = cmd->t_task->t_task_buf; + struct se_device *dev = cmd->se_dev; + unsigned char *buf = cmd->t_task.t_task_buf; unsigned long long blocks = dev->transport->get_blocks(dev); buf[0] = (blocks >> 56) & 0xff; @@ -830,9 +830,9 @@ target_modesense_dpofua(unsigned char *buf, int type) static int target_emulate_modesense(struct se_cmd *cmd, int ten) { - struct se_device *dev = cmd->se_lun->lun_se_dev; - char *cdb = cmd->t_task->t_task_cdb; - unsigned char *rbuf = cmd->t_task->t_task_buf; + struct se_device *dev = cmd->se_dev; + char *cdb = cmd->t_task.t_task_cdb; + unsigned char *rbuf = cmd->t_task.t_task_buf; int type = dev->transport->get_device_type(dev); int offset = (ten) ? 8 : 4; int length = 0; @@ -903,8 +903,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten) static int target_emulate_request_sense(struct se_cmd *cmd) { - unsigned char *cdb = cmd->t_task->t_task_cdb; - unsigned char *buf = cmd->t_task->t_task_buf; + unsigned char *cdb = cmd->t_task.t_task_cdb; + unsigned char *buf = cmd->t_task.t_task_buf; u8 ua_asc = 0, ua_ascq = 0; if (cdb[1] & 0x01) { @@ -964,9 +964,9 @@ static int target_emulate_unmap(struct se_task *task) { struct se_cmd *cmd = task->task_se_cmd; - struct se_device *dev = cmd->se_lun->lun_se_dev; - unsigned char *buf = cmd->t_task->t_task_buf, *ptr = NULL; - unsigned char *cdb = &cmd->t_task->t_task_cdb[0]; + struct se_device *dev = cmd->se_dev; + unsigned char *buf = cmd->t_task.t_task_buf, *ptr = NULL; + unsigned char *cdb = &cmd->t_task.t_task_cdb[0]; sector_t lba; unsigned int size = cmd->data_length, range; int ret, offset; @@ -1011,8 +1011,8 @@ static int target_emulate_write_same(struct se_task *task) { struct se_cmd *cmd = task->task_se_cmd; - struct se_device *dev = cmd->se_lun->lun_se_dev; - sector_t lba = cmd->t_task->t_task_lba; + struct se_device *dev = cmd->se_dev; + sector_t lba = cmd->t_task.t_task_lba; unsigned int range; int ret; @@ -1036,11 +1036,11 @@ int transport_emulate_control_cdb(struct se_task *task) { struct se_cmd *cmd = task->task_se_cmd; - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; unsigned short service_action; int ret = 0; - switch (cmd->t_task->t_task_cdb[0]) { + switch (cmd->t_task.t_task_cdb[0]) { case INQUIRY: ret = target_emulate_inquiry(cmd); break; @@ -1054,13 +1054,13 @@ transport_emulate_control_cdb(struct se_task *task) ret = target_emulate_modesense(cmd, 1); break; case SERVICE_ACTION_IN: - switch (cmd->t_task->t_task_cdb[1] & 0x1f) { + switch (cmd->t_task.t_task_cdb[1] & 0x1f) { case SAI_READ_CAPACITY_16: ret = target_emulate_readcapacity_16(cmd); break; default: printk(KERN_ERR "Unsupported SA: 0x%02x\n", - cmd->t_task->t_task_cdb[1] & 0x1f); + cmd->t_task.t_task_cdb[1] & 0x1f); return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } break; @@ -1085,7 +1085,7 @@ transport_emulate_control_cdb(struct se_task *task) break; case VARIABLE_LENGTH_CMD: service_action = - get_unaligned_be16(&cmd->t_task->t_task_cdb[8]); + get_unaligned_be16(&cmd->t_task.t_task_cdb[8]); switch (service_action) { case WRITE_SAME_32: if (!dev->transport->do_discard) { @@ -1124,7 +1124,7 @@ transport_emulate_control_cdb(struct se_task *task) break; default: printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n", - cmd->t_task->t_task_cdb[0], dev->transport->name); + cmd->t_task.t_task_cdb[0], dev->transport->name); return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 64418ef..ac7f765 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -2037,7 +2037,7 @@ static ssize_t target_core_dev_show(struct config_item *item, if (!(tc_attr->show)) return -EINVAL; - return tc_attr->show((void *)se_dev, page); + return tc_attr->show(se_dev, page); } static ssize_t target_core_dev_store(struct config_item *item, @@ -2053,7 +2053,7 @@ static ssize_t target_core_dev_store(struct config_item *item, if (!(tc_attr->store)) return -EINVAL; - return tc_attr->store((void *)se_dev, page, count); + return tc_attr->store(se_dev, page, count); } static struct configfs_item_operations target_core_dev_item_ops = { diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index fd92385..ea92f75 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -59,15 +59,12 @@ static struct se_subsystem_dev *lun0_su_dev; /* not static, needed by tpg.c */ struct se_device *g_lun0_dev; -int transport_get_lun_for_cmd( - struct se_cmd *se_cmd, - u32 unpacked_lun) +int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) { - struct se_dev_entry *deve; struct se_lun *se_lun = NULL; struct se_session *se_sess = se_cmd->se_sess; + struct se_device *dev; unsigned long flags; - int read_only = 0; if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) { se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; @@ -76,91 +73,87 @@ int transport_get_lun_for_cmd( } spin_lock_irq(&se_sess->se_node_acl->device_list_lock); - deve = se_cmd->se_deve = - &se_sess->se_node_acl->device_list[unpacked_lun]; - if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { - if (se_cmd) { - deve->total_cmds++; - deve->total_bytes += se_cmd->data_length; - - if (se_cmd->data_direction == DMA_TO_DEVICE) { - if (deve->lun_flags & - TRANSPORT_LUNFLAGS_READ_ONLY) { - read_only = 1; - goto out; - } - deve->write_bytes += se_cmd->data_length; - } else if (se_cmd->data_direction == - DMA_FROM_DEVICE) { - deve->read_bytes += se_cmd->data_length; - } + se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun]; + if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { + struct se_dev_entry *deve = se_cmd->se_deve; + + deve->total_cmds++; + deve->total_bytes += se_cmd->data_length; + + if ((se_cmd->data_direction == DMA_TO_DEVICE) && + (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { + se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; + se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" + " Access for 0x%08x\n", + se_cmd->se_tfo->get_fabric_name(), + unpacked_lun); + spin_unlock_irq(&se_sess->se_node_acl->device_list_lock); + return -EACCES; } + + if (se_cmd->data_direction == DMA_TO_DEVICE) + deve->write_bytes += se_cmd->data_length; + else if (se_cmd->data_direction == DMA_FROM_DEVICE) + deve->read_bytes += se_cmd->data_length; + deve->deve_cmds++; - se_lun = se_cmd->se_lun = deve->se_lun; + se_lun = deve->se_lun; + se_cmd->se_lun = deve->se_lun; se_cmd->pr_res_key = deve->pr_res_key; se_cmd->orig_fe_lun = unpacked_lun; se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; } -out: spin_unlock_irq(&se_sess->se_node_acl->device_list_lock); if (!se_lun) { - if (read_only) { - se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; + /* + * Use the se_portal_group->tpg_virt_lun0 to allow for + * REPORT_LUNS, et al to be returned when no active + * MappedLUN=0 exists for this Initiator Port. + */ + if (unpacked_lun != 0) { + se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" + printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" " Access for 0x%08x\n", se_cmd->se_tfo->get_fabric_name(), unpacked_lun); + return -ENODEV; + } + /* + * Force WRITE PROTECT for virtual LUN 0 + */ + if ((se_cmd->data_direction != DMA_FROM_DEVICE) && + (se_cmd->data_direction != DMA_NONE)) { + se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; + se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; return -EACCES; - } else { - /* - * Use the se_portal_group->tpg_virt_lun0 to allow for - * REPORT_LUNS, et al to be returned when no active - * MappedLUN=0 exists for this Initiator Port. - */ - if (unpacked_lun != 0) { - se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; - se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" - " Access for 0x%08x\n", - se_cmd->se_tfo->get_fabric_name(), - unpacked_lun); - return -ENODEV; - } - /* - * Force WRITE PROTECT for virtual LUN 0 - */ - if ((se_cmd->data_direction != DMA_FROM_DEVICE) && - (se_cmd->data_direction != DMA_NONE)) { - se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; - se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - return -EACCES; - } -#if 0 - printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n", - se_cmd->se_tfo->get_fabric_name()); -#endif - se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; - se_cmd->orig_fe_lun = 0; - se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; - se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; } + + se_lun = &se_sess->se_tpg->tpg_virt_lun0; + se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; + se_cmd->orig_fe_lun = 0; + se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; + se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; } /* * Determine if the struct se_lun is online. + * FIXME: Check for LUN_RESET + UNIT Attention */ -/* #warning FIXME: Check for LUN_RESET + UNIT Attention */ if (se_dev_check_online(se_lun->lun_se_dev) != 0) { se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; return -ENODEV; } - { - struct se_device *dev = se_lun->lun_se_dev; + /* Directly associate cmd with se_dev */ + se_cmd->se_dev = se_lun->lun_se_dev; + + /* TODO: get rid of this and use atomics for stats */ + dev = se_lun->lun_se_dev; spin_lock_irq(&dev->stats_lock); dev->num_cmds++; if (se_cmd->data_direction == DMA_TO_DEVICE) @@ -168,30 +161,22 @@ out: else if (se_cmd->data_direction == DMA_FROM_DEVICE) dev->read_bytes += se_cmd->data_length; spin_unlock_irq(&dev->stats_lock); - } /* * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used * for tracking state of struct se_cmds during LUN shutdown events. */ spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); - list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list); - atomic_set(&se_cmd->t_task->transport_lun_active, 1); -#if 0 - printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n", - se_cmd->se_tfo->get_task_tag(se_cmd), se_lun->unpacked_lun); -#endif + list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list); + atomic_set(&se_cmd->t_task.transport_lun_active, 1); spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); return 0; } -EXPORT_SYMBOL(transport_get_lun_for_cmd); +EXPORT_SYMBOL(transport_lookup_cmd_lun); -int transport_get_lun_for_tmr( - struct se_cmd *se_cmd, - u32 unpacked_lun) +int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun) { - struct se_device *dev = NULL; struct se_dev_entry *deve; struct se_lun *se_lun = NULL; struct se_session *se_sess = se_cmd->se_sess; @@ -204,15 +189,16 @@ int transport_get_lun_for_tmr( } spin_lock_irq(&se_sess->se_node_acl->device_list_lock); - deve = se_cmd->se_deve = - &se_sess->se_node_acl->device_list[unpacked_lun]; + se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun]; + deve = se_cmd->se_deve; + if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { - se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun; - dev = se_lun->lun_se_dev; + se_tmr->tmr_lun = deve->se_lun; + se_cmd->se_lun = deve->se_lun; + se_lun = deve->se_lun; se_cmd->pr_res_key = deve->pr_res_key; se_cmd->orig_fe_lun = unpacked_lun; - se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; -/* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */ + se_cmd->se_orig_obj_ptr = se_cmd->se_dev; } spin_unlock_irq(&se_sess->se_node_acl->device_list_lock); @@ -226,21 +212,24 @@ int transport_get_lun_for_tmr( } /* * Determine if the struct se_lun is online. + * FIXME: Check for LUN_RESET + UNIT Attention */ -/* #warning FIXME: Check for LUN_RESET + UNIT Attention */ if (se_dev_check_online(se_lun->lun_se_dev) != 0) { se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; return -ENODEV; } - se_tmr->tmr_dev = dev; - spin_lock(&dev->se_tmr_lock); - list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list); - spin_unlock(&dev->se_tmr_lock); + /* Directly associate cmd with se_dev */ + se_cmd->se_dev = se_lun->lun_se_dev; + se_tmr->tmr_dev = se_lun->lun_se_dev; + + spin_lock(&se_tmr->tmr_dev->se_tmr_lock); + list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); + spin_unlock(&se_tmr->tmr_dev->se_tmr_lock); return 0; } -EXPORT_SYMBOL(transport_get_lun_for_tmr); +EXPORT_SYMBOL(transport_lookup_tmr_lun); /* * This function is called from core_scsi3_emulate_pro_register_and_move() @@ -667,10 +656,10 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd) struct se_lun *se_lun; struct se_session *se_sess = se_cmd->se_sess; struct se_task *se_task; - unsigned char *buf = se_cmd->t_task->t_task_buf; + unsigned char *buf = se_cmd->t_task.t_task_buf; u32 cdb_offset = 0, lun_count = 0, offset = 8, i; - list_for_each_entry(se_task, &se_cmd->t_task->t_task_list, t_list) + list_for_each_entry(se_task, &se_cmd->t_task.t_task_list, t_list) break; if (!(se_task)) { diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 0c44bc0..2e7ea74 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -223,7 +223,7 @@ static struct se_device *fd_create_virtdevice( dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH; dev = transport_add_device_to_core_hba(hba, &fileio_template, - se_dev, dev_flags, (void *)fd_dev, + se_dev, dev_flags, fd_dev, &dev_limits, "FILEIO", FD_VERSION); if (!(dev)) goto fail; @@ -279,7 +279,7 @@ fd_alloc_task(struct se_cmd *cmd) return NULL; } - fd_req->fd_dev = cmd->se_lun->lun_se_dev->dev_ptr; + fd_req->fd_dev = cmd->se_dev->dev_ptr; return &fd_req->fd_task; } @@ -377,7 +377,7 @@ static void fd_emulate_sync_cache(struct se_task *task) struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; struct fd_dev *fd_dev = dev->dev_ptr; - int immed = (cmd->t_task->t_task_cdb[1] & 0x2); + int immed = (cmd->t_task.t_task_cdb[1] & 0x2); loff_t start, end; int ret; @@ -391,11 +391,11 @@ static void fd_emulate_sync_cache(struct se_task *task) /* * Determine if we will be flushing the entire device. */ - if (cmd->t_task->t_task_lba == 0 && cmd->data_length == 0) { + if (cmd->t_task.t_task_lba == 0 && cmd->data_length == 0) { start = 0; end = LLONG_MAX; } else { - start = cmd->t_task->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size; + start = cmd->t_task.t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size; if (cmd->data_length) end = start + cmd->data_length; else @@ -475,7 +475,7 @@ static int fd_do_task(struct se_task *task) if (ret > 0 && dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && - cmd->t_task->t_tasks_fua) { + cmd->t_task.t_tasks_fua) { /* * We might need to be a bit smarter here * and return some sense data to let the initiator diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index fb15987..c73baef 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -74,7 +74,7 @@ static int iblock_attach_hba(struct se_hba *hba, u32 host_id) ib_host->iblock_host_id = host_id; - hba->hba_ptr = (void *) ib_host; + hba->hba_ptr = ib_host; printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on" " Generic Target Core Stack %s\n", hba->hba_id, @@ -172,7 +172,7 @@ static struct se_device *iblock_create_virtdevice( ib_dev->ibd_bd = bd; dev = transport_add_device_to_core_hba(hba, - &iblock_template, se_dev, dev_flags, (void *)ib_dev, + &iblock_template, se_dev, dev_flags, ib_dev, &dev_limits, "IBLOCK", IBLOCK_VERSION); if (!(dev)) goto failed; @@ -240,7 +240,7 @@ iblock_alloc_task(struct se_cmd *cmd) return NULL; } - ib_req->ib_dev = cmd->se_lun->lun_se_dev->dev_ptr; + ib_req->ib_dev = cmd->se_dev->dev_ptr; atomic_set(&ib_req->ib_bio_cnt, 0); return &ib_req->ib_task; } @@ -331,7 +331,7 @@ static void iblock_emulate_sync_cache(struct se_task *task) { struct se_cmd *cmd = task->task_se_cmd; struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; - int immed = (cmd->t_task->t_task_cdb[1] & 0x2); + int immed = (cmd->t_task.t_task_cdb[1] & 0x2); sector_t error_sector; int ret; @@ -400,7 +400,7 @@ static int iblock_do_task(struct se_task *task) */ if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && - task->task_se_cmd->t_task->t_tasks_fua)) + task->task_se_cmd->t_task.t_tasks_fua)) rw = WRITE_FUA; else rw = WRITE; @@ -593,7 +593,7 @@ static struct bio *iblock_get_bio( DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size); bio->bi_bdev = ib_dev->ibd_bd; - bio->bi_private = (void *) task; + bio->bi_private = task; bio->bi_destructor = iblock_bio_destructor; bio->bi_end_io = &iblock_bio_done; bio->bi_sector = lba; @@ -608,7 +608,7 @@ static struct bio *iblock_get_bio( static int iblock_map_task_SG(struct se_task *task) { struct se_cmd *cmd = task->task_se_cmd; - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; struct iblock_dev *ib_dev = task->se_dev->dev_ptr; struct iblock_req *ib_req = IBLOCK_REQ(task); struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 27a7525..19406a3 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -157,8 +157,8 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd) struct se_session *sess = cmd->se_sess; struct se_portal_group *tpg = sess->se_tpg; - if ((cmd->t_task->t_task_cdb[1] & 0x01) && - (cmd->t_task->t_task_cdb[1] & 0x02)) { + if ((cmd->t_task.t_task_cdb[1] & 0x01) && + (cmd->t_task.t_task_cdb[1] & 0x02)) { printk(KERN_ERR "LongIO and Obselete Bits set, returning" " ILLEGAL_REQUEST\n"); return PYX_TRANSPORT_ILLEGAL_REQUEST; @@ -216,7 +216,7 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd) struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; struct t10_pr_registration *pr_reg; struct t10_reservation *pr_tmpl = &su_dev->t10_pr; - unsigned char *cdb = &cmd->t_task->t_task_cdb[0]; + unsigned char *cdb = &cmd->t_task.t_task_cdb[0]; int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS); int conflict = 0; @@ -1471,7 +1471,7 @@ static int core_scsi3_decode_spec_i_port( int all_tg_pt, int aptpl) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; struct se_port *tmp_port; struct se_portal_group *dest_tpg = NULL, *tmp_tpg; struct se_session *se_sess = cmd->se_sess; @@ -1482,7 +1482,7 @@ static int core_scsi3_decode_spec_i_port( struct list_head tid_dest_list; struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; struct target_core_fabric_ops *tmp_tf_ops; - unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident; char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; u32 tpdl, tid_len = 0; @@ -1509,7 +1509,7 @@ static int core_scsi3_decode_spec_i_port( tidh_new->dest_node_acl = se_sess->se_node_acl; tidh_new->dest_se_deve = local_se_deve; - local_pr_reg = __core_scsi3_alloc_registration(cmd->se_lun->lun_se_dev, + local_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev, se_sess->se_node_acl, local_se_deve, l_isid, sa_res_key, all_tg_pt, aptpl); if (!(local_pr_reg)) { @@ -1741,7 +1741,7 @@ static int core_scsi3_decode_spec_i_port( * and then call __core_scsi3_add_registration() in the * 2nd loop which will never fail. */ - dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_lun->lun_se_dev, + dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl, dest_se_deve, iport_ptr, sa_res_key, all_tg_pt, aptpl); if (!(dest_pr_reg)) { @@ -1787,7 +1787,7 @@ static int core_scsi3_decode_spec_i_port( prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0], PR_REG_ISID_ID_LEN); - __core_scsi3_add_registration(cmd->se_lun->lun_se_dev, dest_node_acl, + __core_scsi3_add_registration(cmd->se_dev, dest_node_acl, dest_pr_reg, 0, 0); printk(KERN_INFO "SPC-3 PR [%s] SPEC_I_PT: Successfully" @@ -2071,7 +2071,7 @@ static int core_scsi3_emulate_pro_register( int ignore_key) { struct se_session *se_sess = cmd->se_sess; - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; struct se_dev_entry *se_deve; struct se_lun *se_lun = cmd->se_lun; struct se_portal_group *se_tpg; @@ -2117,7 +2117,7 @@ static int core_scsi3_emulate_pro_register( * Port Endpoint that the PRO was received from on the * Logical Unit of the SCSI device server. */ - ret = core_scsi3_alloc_registration(cmd->se_lun->lun_se_dev, + ret = core_scsi3_alloc_registration(cmd->se_dev, se_sess->se_node_acl, se_deve, isid_ptr, sa_res_key, all_tg_pt, aptpl, ignore_key, 0); @@ -2145,7 +2145,7 @@ static int core_scsi3_emulate_pro_register( */ if (!(aptpl)) { pr_tmpl->pr_aptpl_active = 0; - core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, NULL, 0); + core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0); printk("SPC-3 PR: Set APTPL Bit Deactivated for" " REGISTER\n"); return 0; @@ -2155,10 +2155,10 @@ static int core_scsi3_emulate_pro_register( * update the APTPL metadata information using its * preallocated *pr_reg->pr_aptpl_buf. */ - pr_reg = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, + pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, se_sess); - ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, + ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, &pr_reg->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); if (!(ret)) { @@ -2223,7 +2223,7 @@ static int core_scsi3_emulate_pro_register( */ if (!(sa_res_key)) { pr_holder = core_scsi3_check_implict_release( - cmd->se_lun->lun_se_dev, pr_reg); + cmd->se_dev, pr_reg); if (pr_holder < 0) { kfree(pr_aptpl_buf); core_scsi3_put_pr_reg(pr_reg); @@ -2260,7 +2260,7 @@ static int core_scsi3_emulate_pro_register( /* * Release the calling I_T Nexus registration now.. */ - __core_scsi3_free_registration(cmd->se_lun->lun_se_dev, pr_reg, + __core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1); /* * From spc4r17, section 5.7.11.3 Unregistering @@ -2315,7 +2315,7 @@ static int core_scsi3_emulate_pro_register( * READ_KEYS service action. */ pr_reg->pr_res_generation = core_scsi3_pr_generation( - cmd->se_lun->lun_se_dev); + cmd->se_dev); pr_reg->pr_res_key = sa_res_key; printk("SPC-3 PR [%s] REGISTER%s: Changed Reservation" " Key for %s to: 0x%016Lx PRgeneration:" @@ -2398,7 +2398,7 @@ static int core_scsi3_pro_reserve( /* * Locate the existing *pr_reg via struct se_node_acl pointers */ - pr_reg = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, se_sess->se_node_acl, + pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, se_sess); if (!(pr_reg)) { printk(KERN_ERR "SPC-3 PR: Unable to locate" @@ -2527,7 +2527,7 @@ static int core_scsi3_pro_reserve( spin_unlock(&dev->dev_reservation_lock); if (pr_tmpl->pr_aptpl_active) { - ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, + ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, &pr_reg->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); if (!(ret)) @@ -2758,7 +2758,7 @@ static int core_scsi3_emulate_pro_release( write_aptpl: if (pr_tmpl->pr_aptpl_active) { - ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, + ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, &pr_reg->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); if (!(ret)) @@ -2783,7 +2783,7 @@ static int core_scsi3_emulate_pro_clear( /* * Locate the existing *pr_reg via struct se_node_acl pointers */ - pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, + pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, se_sess); if (!(pr_reg_n)) { printk(KERN_ERR "SPC-3 PR: Unable to locate" @@ -2849,7 +2849,7 @@ static int core_scsi3_emulate_pro_clear( cmd->se_tfo->get_fabric_name()); if (pr_tmpl->pr_aptpl_active) { - core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, NULL, 0); + core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0); printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata" " for CLEAR\n"); } @@ -2954,7 +2954,7 @@ static int core_scsi3_pro_preempt( u64 sa_res_key, int abort) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; struct se_dev_entry *se_deve; struct se_node_acl *pr_reg_nacl; struct se_session *se_sess = cmd->se_sess; @@ -2969,7 +2969,7 @@ static int core_scsi3_pro_preempt( return PYX_TRANSPORT_LU_COMM_FAILURE; se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; - pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, se_sess->se_node_acl, + pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, se_sess); if (!(pr_reg_n)) { printk(KERN_ERR "SPC-3 PR: Unable to locate" @@ -3111,7 +3111,7 @@ static int core_scsi3_pro_preempt( spin_unlock(&dev->dev_reservation_lock); if (pr_tmpl->pr_aptpl_active) { - ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, + ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, &pr_reg_n->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); if (!(ret)) @@ -3121,7 +3121,7 @@ static int core_scsi3_pro_preempt( } core_scsi3_put_pr_reg(pr_reg_n); - core_scsi3_pr_generation(cmd->se_lun->lun_se_dev); + core_scsi3_pr_generation(cmd->se_dev); return 0; } /* @@ -3247,7 +3247,7 @@ static int core_scsi3_pro_preempt( } if (pr_tmpl->pr_aptpl_active) { - ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, + ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, &pr_reg_n->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); if (!(ret)) @@ -3256,7 +3256,7 @@ static int core_scsi3_pro_preempt( } core_scsi3_put_pr_reg(pr_reg_n); - core_scsi3_pr_generation(cmd->se_lun->lun_se_dev); + core_scsi3_pr_generation(cmd->se_dev); return 0; } @@ -3298,7 +3298,7 @@ static int core_scsi3_emulate_pro_register_and_move( int unreg) { struct se_session *se_sess = cmd->se_sess; - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; struct se_dev_entry *se_deve, *dest_se_deve = NULL; struct se_lun *se_lun = cmd->se_lun; struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL; @@ -3307,7 +3307,7 @@ static int core_scsi3_emulate_pro_register_and_move( struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; - unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; unsigned char *initiator_str; char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; u32 tid_len, tmp_tid_len; @@ -3330,7 +3330,7 @@ static int core_scsi3_emulate_pro_register_and_move( * * Locate the existing *pr_reg via struct se_node_acl pointers */ - pr_reg = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, se_sess->se_node_acl, + pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, se_sess); if (!(pr_reg)) { printk(KERN_ERR "SPC-3 PR: Unable to locate PR_REGISTERED" @@ -3612,7 +3612,7 @@ after_iport_check: dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, iport_ptr); if (!(dest_pr_reg)) { - ret = core_scsi3_alloc_registration(cmd->se_lun->lun_se_dev, + ret = core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl, dest_se_deve, iport_ptr, sa_res_key, 0, aptpl, 2, 1); if (ret != 0) { @@ -3683,12 +3683,12 @@ after_iport_check: */ if (!(aptpl)) { pr_tmpl->pr_aptpl_active = 0; - core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, NULL, 0); + core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0); printk("SPC-3 PR: Set APTPL Bit Deactivated for" " REGISTER_AND_MOVE\n"); } else { pr_tmpl->pr_aptpl_active = 1; - ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, + ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, &dest_pr_reg->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); if (!(ret)) @@ -3723,7 +3723,7 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb) */ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) { - unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; u64 res_key, sa_res_key; int sa, scope, type, aptpl; int spec_i_pt = 0, all_tg_pt = 0, unreg = 0; @@ -3827,10 +3827,10 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) */ static int core_scsi3_pri_read_keys(struct se_cmd *cmd) { - struct se_device *se_dev = cmd->se_lun->lun_se_dev; + struct se_device *se_dev = cmd->se_dev; struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; struct t10_pr_registration *pr_reg; - unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; u32 add_len = 0, off = 8; if (cmd->data_length < 8) { @@ -3882,10 +3882,10 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd) */ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) { - struct se_device *se_dev = cmd->se_lun->lun_se_dev; + struct se_device *se_dev = cmd->se_dev; struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; struct t10_pr_registration *pr_reg; - unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; u64 pr_res_key; u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */ @@ -3963,9 +3963,9 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) */ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; - unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; u16 add_len = 8; /* Hardcoded to 8. */ if (cmd->data_length < 6) { @@ -4014,13 +4014,13 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) */ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) { - struct se_device *se_dev = cmd->se_lun->lun_se_dev; + struct se_device *se_dev = cmd->se_dev; struct se_node_acl *se_nacl; struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; struct se_portal_group *se_tpg; struct t10_pr_registration *pr_reg, *pr_reg_tmp; struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr; - unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len; u32 off = 8; /* off into first Full Status descriptor */ int format_code = 0; @@ -4174,7 +4174,7 @@ static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb) int core_scsi3_emulate_pr(struct se_cmd *cmd) { - unsigned char *cdb = &cmd->t_task->t_task_cdb[0]; + unsigned char *cdb = &cmd->t_task.t_task_cdb[0]; struct se_device *dev = cmd->se_dev; /* * Following spc2r20 5.5.1 Reservations overview: diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 44a79a5..ecfe889 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -72,7 +72,7 @@ static int pscsi_attach_hba(struct se_hba *hba, u32 host_id) phv->phv_host_id = host_id; phv->phv_mode = PHV_VIRUTAL_HOST_ID; - hba->hba_ptr = (void *)phv; + hba->hba_ptr = phv; printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on" " Generic Target Core Stack %s\n", hba->hba_id, @@ -355,7 +355,7 @@ static struct se_device *pscsi_add_device_to_list( pdv->pdv_sd = sd; dev = transport_add_device_to_core_hba(hba, &pscsi_template, - se_dev, dev_flags, (void *)pdv, + se_dev, dev_flags, pdv, &dev_limits, NULL, NULL); if (!(dev)) { pdv->pdv_sd = NULL; @@ -394,7 +394,7 @@ static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name) pdv->pdv_se_hba = hba; printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name); - return (void *)pdv; + return pdv; } /* @@ -697,7 +697,7 @@ static int pscsi_transport_complete(struct se_task *task) if (task->task_se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) { - unsigned char *buf = task->task_se_cmd->t_task->t_task_buf; + unsigned char *buf = task->task_se_cmd->t_task.t_task_buf; if (cdb[0] == MODE_SENSE_10) { if (!(buf[3] & 0x80)) @@ -763,7 +763,7 @@ static struct se_task * pscsi_alloc_task(struct se_cmd *cmd) { struct pscsi_plugin_task *pt; - unsigned char *cdb = cmd->t_task->t_task_cdb; + unsigned char *cdb = cmd->t_task.t_task_cdb; pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL); if (!pt) { @@ -776,7 +776,7 @@ pscsi_alloc_task(struct se_cmd *cmd) * allocate the extended CDB buffer for per struct se_task context * pt->pscsi_cdb now. */ - if (cmd->t_task->t_task_cdb != cmd->t_task->__t_task_cdb) { + if (cmd->t_task.t_task_cdb != cmd->t_task.__t_task_cdb) { pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL); if (!(pt->pscsi_cdb)) { @@ -812,7 +812,7 @@ static inline void pscsi_blk_init_request( * also set the end_io_data pointer.to struct se_task. */ req->end_io = pscsi_req_done; - req->end_io_data = (void *)task; + req->end_io_data = task; /* * Load the referenced struct se_task's SCSI CDB into * include/linux/blkdev.h:struct request->cmd @@ -822,7 +822,7 @@ static inline void pscsi_blk_init_request( /* * Setup pointer for outgoing sense data. */ - req->sense = (void *)&pt->pscsi_sense[0]; + req->sense = &pt->pscsi_sense[0]; req->sense_len = 0; } @@ -889,7 +889,7 @@ static void pscsi_free_task(struct se_task *task) * Release the extended CDB allocation from pscsi_alloc_task() * if one exists. */ - if (cmd->t_task->t_task_cdb != cmd->t_task->__t_task_cdb) + if (cmd->t_task.t_task_cdb != cmd->t_task.__t_task_cdb) kfree(pt->pscsi_cdb); /* * We do not release the bio(s) here associated with this task, as @@ -1266,7 +1266,7 @@ static int pscsi_map_task_non_SG(struct se_task *task) return 0; ret = blk_rq_map_kern(pdv->pdv_sd->request_queue, - pt->pscsi_req, cmd->t_task->t_task_buf, + pt->pscsi_req, cmd->t_task.t_task_buf, task->task_size, GFP_KERNEL); if (ret < 0) { printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret); diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index fbf06c3..384a8e2 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -66,7 +66,7 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id) rd_host->rd_host_id = host_id; - hba->hba_ptr = (void *) rd_host; + hba->hba_ptr = rd_host; printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" " Generic Target Core Stack %s\n", hba->hba_id, @@ -271,7 +271,7 @@ static struct se_device *rd_create_virtdevice( dev = transport_add_device_to_core_hba(hba, (rd_dev->rd_direct) ? &rd_dr_template : - &rd_mcp_template, se_dev, dev_flags, (void *)rd_dev, + &rd_mcp_template, se_dev, dev_flags, rd_dev, &dev_limits, prod, rev); if (!(dev)) goto fail; @@ -336,7 +336,7 @@ rd_alloc_task(struct se_cmd *cmd) printk(KERN_ERR "Unable to allocate struct rd_request\n"); return NULL; } - rd_req->rd_dev = cmd->se_lun->lun_se_dev->dev_ptr; + rd_req->rd_dev = cmd->se_dev->dev_ptr; return &rd_req->rd_task; } @@ -737,7 +737,7 @@ check_eot: } out: - task->task_se_cmd->t_task->t_tasks_se_num += *se_mem_cnt; + task->task_se_cmd->t_task.t_tasks_se_num += *se_mem_cnt; #ifdef DEBUG_RAMDISK_DR printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", *se_mem_cnt); @@ -819,7 +819,7 @@ static int rd_DIRECT_without_offset( } out: - task->task_se_cmd->t_task->t_tasks_se_num += *se_mem_cnt; + task->task_se_cmd->t_task.t_tasks_se_num += *se_mem_cnt; #ifdef DEBUG_RAMDISK_DR printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", *se_mem_cnt); @@ -880,14 +880,14 @@ static int rd_DIRECT_do_se_mem_map( * across multiple struct se_task->task_sg[]. */ ret = transport_init_task_sg(task, - list_entry(cmd->t_task->t_mem_list->next, + list_first_entry(&cmd->t_task.t_mem_list, struct se_mem, se_list), task_offset); if (ret <= 0) return ret; return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, - list_entry(cmd->t_task->t_mem_list->next, + list_first_entry(&cmd->t_task.t_mem_list, struct se_mem, se_list), out_se_mem, se_mem_cnt, task_offset_in); } diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 2f73749..e1f99f7 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -113,15 +113,14 @@ int core_tmr_lun_reset( struct list_head *preempt_and_abort_list, struct se_cmd *prout_cmd) { - struct se_cmd *cmd; - struct se_queue_req *qr, *qr_tmp; + struct se_cmd *cmd, *tcmd; struct se_node_acl *tmr_nacl = NULL; struct se_portal_group *tmr_tpg = NULL; struct se_queue_obj *qobj = &dev->dev_queue_obj; struct se_tmr_req *tmr_p, *tmr_pp; struct se_task *task, *task_tmp; unsigned long flags; - int fe_count, state, tas; + int fe_count, tas; /* * TASK_ABORTED status bit, this is configurable via ConfigFS * struct se_device attributes. spc4r17 section 7.4.6 Control mode page @@ -179,14 +178,14 @@ int core_tmr_lun_reset( continue; spin_unlock(&dev->se_tmr_lock); - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - if (!(atomic_read(&cmd->t_task->t_transport_active))) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + if (!(atomic_read(&cmd->t_task.t_transport_active))) { + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); spin_lock(&dev->se_tmr_lock); continue; } if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); spin_lock(&dev->se_tmr_lock); continue; } @@ -194,7 +193,7 @@ int core_tmr_lun_reset( " Response: 0x%02x, t_state: %d\n", (preempt_and_abort_list) ? "Preempt" : "", tmr_p, tmr_p->function, tmr_p->response, cmd->t_state); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); transport_cmd_finish_abort_tmr(cmd); spin_lock(&dev->se_tmr_lock); @@ -230,12 +229,6 @@ int core_tmr_lun_reset( } cmd = task->task_se_cmd; - if (!cmd->t_task) { - printk(KERN_ERR "cmd->t_task is NULL for task: %p cmd:" - " %p ITT: 0x%08x\n", task, cmd, - cmd->se_tfo->get_task_tag(cmd)); - continue; - } /* * For PREEMPT_AND_ABORT usage, only process commands * with a matching reservation key. @@ -254,38 +247,38 @@ int core_tmr_lun_reset( atomic_set(&task->task_state_active, 0); spin_unlock_irqrestore(&dev->execute_task_lock, flags); - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); DEBUG_LR("LUN_RESET: %s cmd: %p task: %p" " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/" "def_t_state: %d/%d cdb: 0x%02x\n", (preempt_and_abort_list) ? "Preempt" : "", cmd, task, cmd->se_tfo->get_task_tag(cmd), 0, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, - cmd->deferred_t_state, cmd->t_task->t_task_cdb[0]); + cmd->deferred_t_state, cmd->t_task.t_task_cdb[0]); DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" " t_task_cdbs: %d t_task_cdbs_left: %d" " t_task_cdbs_sent: %d -- t_transport_active: %d" " t_transport_stop: %d t_transport_sent: %d\n", cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key, - cmd->t_task->t_task_cdbs, - atomic_read(&cmd->t_task->t_task_cdbs_left), - atomic_read(&cmd->t_task->t_task_cdbs_sent), - atomic_read(&cmd->t_task->t_transport_active), - atomic_read(&cmd->t_task->t_transport_stop), - atomic_read(&cmd->t_task->t_transport_sent)); + cmd->t_task.t_task_cdbs, + atomic_read(&cmd->t_task.t_task_cdbs_left), + atomic_read(&cmd->t_task.t_task_cdbs_sent), + atomic_read(&cmd->t_task.t_transport_active), + atomic_read(&cmd->t_task.t_transport_stop), + atomic_read(&cmd->t_task.t_transport_sent)); if (atomic_read(&task->task_active)) { atomic_set(&task->task_stop, 1); spin_unlock_irqrestore( - &cmd->t_task->t_state_lock, flags); + &cmd->t_task.t_state_lock, flags); DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown" " for dev: %p\n", task, dev); wait_for_completion(&task->task_stop_comp); DEBUG_LR("LUN_RESET Completed task: %p shutdown for" " dev: %p\n", task, dev); - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - atomic_dec(&cmd->t_task->t_task_cdbs_left); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + atomic_dec(&cmd->t_task.t_task_cdbs_left); atomic_set(&task->task_active, 0); atomic_set(&task->task_stop, 0); @@ -295,24 +288,24 @@ int core_tmr_lun_reset( } __transport_stop_task_timer(task, &flags); - if (!(atomic_dec_and_test(&cmd->t_task->t_task_cdbs_ex_left))) { + if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_ex_left))) { spin_unlock_irqrestore( - &cmd->t_task->t_state_lock, flags); + &cmd->t_task.t_state_lock, flags); DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for" " t_task_cdbs_ex_left: %d\n", task, dev, - atomic_read(&cmd->t_task->t_task_cdbs_ex_left)); + atomic_read(&cmd->t_task.t_task_cdbs_ex_left)); spin_lock_irqsave(&dev->execute_task_lock, flags); continue; } - fe_count = atomic_read(&cmd->t_task->t_fe_count); + fe_count = atomic_read(&cmd->t_task.t_fe_count); - if (atomic_read(&cmd->t_task->t_transport_active)) { + if (atomic_read(&cmd->t_task.t_transport_active)) { DEBUG_LR("LUN_RESET: got t_transport_active = 1 for" " task: %p, t_fe_count: %d dev: %p\n", task, fe_count, dev); - atomic_set(&cmd->t_task->t_transport_aborted, 1); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, + atomic_set(&cmd->t_task.t_transport_aborted, 1); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); @@ -321,8 +314,8 @@ int core_tmr_lun_reset( } DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p," " t_fe_count: %d dev: %p\n", task, fe_count, dev); - atomic_set(&cmd->t_task->t_transport_aborted, 1); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + atomic_set(&cmd->t_task.t_transport_aborted, 1); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); spin_lock_irqsave(&dev->execute_task_lock, flags); @@ -337,20 +330,7 @@ int core_tmr_lun_reset( * reference, otherwise the struct se_cmd is released. */ spin_lock_irqsave(&qobj->cmd_queue_lock, flags); - list_for_each_entry_safe(qr, qr_tmp, &qobj->qobj_list, qr_list) { - cmd = (struct se_cmd *)qr->cmd; - if (!(cmd)) { - /* - * Skip these for non PREEMPT_AND_ABORT usage.. - */ - if (preempt_and_abort_list != NULL) - continue; - - atomic_dec(&qobj->queue_cnt); - list_del(&qr->qr_list); - kfree(qr); - continue; - } + list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) { /* * For PREEMPT_AND_ABORT usage, only process commands * with a matching reservation key. @@ -365,18 +345,15 @@ int core_tmr_lun_reset( if (prout_cmd == cmd) continue; - atomic_dec(&cmd->t_task->t_transport_queue_active); + atomic_dec(&cmd->t_task.t_transport_queue_active); atomic_dec(&qobj->queue_cnt); - list_del(&qr->qr_list); + list_del(&cmd->se_queue_node); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); - state = qr->state; - kfree(qr); - DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:" " %d t_fe_count: %d\n", (preempt_and_abort_list) ? - "Preempt" : "", cmd, state, - atomic_read(&cmd->t_task->t_fe_count)); + "Preempt" : "", cmd, cmd->t_state, + atomic_read(&cmd->t_task.t_fe_count)); /* * Signal that the command has failed via cmd->se_cmd_flags, * and call TFO->new_cmd_failure() to wakeup any fabric @@ -388,7 +365,7 @@ int core_tmr_lun_reset( transport_new_cmd_failure(cmd); core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, - atomic_read(&cmd->t_task->t_fe_count)); + atomic_read(&cmd->t_task.t_fe_count)); spin_lock_irqsave(&qobj->cmd_queue_lock, flags); } spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 6f5d4df..d0cd601 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -201,7 +201,7 @@ struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; typedef int (*map_func_t)(struct se_task *, u32); static int transport_generic_write_pending(struct se_cmd *); -static int transport_processing_thread(void *); +static int transport_processing_thread(void *param); static int __transport_execute_tasks(struct se_device *dev); static void transport_complete_task_attr(struct se_cmd *cmd); static void transport_direct_request_timeout(struct se_cmd *cmd); @@ -215,9 +215,8 @@ static int transport_generic_get_mem(struct se_cmd *cmd, u32 length, static int transport_generic_remove(struct se_cmd *cmd, int release_to_pool, int session_reinstatement); static int transport_get_sectors(struct se_cmd *cmd); -static struct list_head *transport_init_se_mem_list(void); static int transport_map_sg_to_mem(struct se_cmd *cmd, - struct list_head *se_mem_list, void *in_mem, + struct list_head *se_mem_list, struct scatterlist *sgl, u32 *se_mem_cnt); static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd, unsigned char *dst, struct list_head *se_mem_list); @@ -574,7 +573,7 @@ void transport_deregister_session(struct se_session *se_sess) EXPORT_SYMBOL(transport_deregister_session); /* - * Called with cmd->t_task->t_state_lock held. + * Called with cmd->t_task.t_state_lock held. */ static void transport_all_task_dev_remove_state(struct se_cmd *cmd) { @@ -582,10 +581,7 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd) struct se_task *task; unsigned long flags; - if (!cmd->t_task) - return; - - list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) { + list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) { dev = task->se_dev; if (!(dev)) continue; @@ -603,7 +599,7 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd) spin_unlock_irqrestore(&dev->execute_task_lock, flags); atomic_set(&task->task_state_active, 0); - atomic_dec(&cmd->t_task->t_task_cdbs_ex_left); + atomic_dec(&cmd->t_task.t_task_cdbs_ex_left); } } @@ -622,32 +618,32 @@ static int transport_cmd_check_stop( { unsigned long flags; - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); /* * Determine if IOCTL context caller in requesting the stopping of this * command for LUN shutdown purposes. */ - if (atomic_read(&cmd->t_task->transport_lun_stop)) { - DEBUG_CS("%s:%d atomic_read(&cmd->t_task->transport_lun_stop)" + if (atomic_read(&cmd->t_task.transport_lun_stop)) { + DEBUG_CS("%s:%d atomic_read(&cmd->t_task.transport_lun_stop)" " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); cmd->deferred_t_state = cmd->t_state; cmd->t_state = TRANSPORT_DEFERRED_CMD; - atomic_set(&cmd->t_task->t_transport_active, 0); + atomic_set(&cmd->t_task.t_transport_active, 0); if (transport_off == 2) transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); - complete(&cmd->t_task->transport_lun_stop_comp); + complete(&cmd->t_task.transport_lun_stop_comp); return 1; } /* * Determine if frontend context caller is requesting the stopping of * this command for frontend exceptions. */ - if (atomic_read(&cmd->t_task->t_transport_stop)) { - DEBUG_CS("%s:%d atomic_read(&cmd->t_task->t_transport_stop) ==" + if (atomic_read(&cmd->t_task.t_transport_stop)) { + DEBUG_CS("%s:%d atomic_read(&cmd->t_task.t_transport_stop) ==" " TRUE for ITT: 0x%08x\n", __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); @@ -662,13 +658,13 @@ static int transport_cmd_check_stop( */ if (transport_off == 2) cmd->se_lun = NULL; - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); - complete(&cmd->t_task->t_transport_stop_comp); + complete(&cmd->t_task.t_transport_stop_comp); return 1; } if (transport_off) { - atomic_set(&cmd->t_task->t_transport_active, 0); + atomic_set(&cmd->t_task.t_transport_active, 0); if (transport_off == 2) { transport_all_task_dev_remove_state(cmd); /* @@ -683,18 +679,18 @@ static int transport_cmd_check_stop( */ if (cmd->se_tfo->check_stop_free != NULL) { spin_unlock_irqrestore( - &cmd->t_task->t_state_lock, flags); + &cmd->t_task.t_state_lock, flags); cmd->se_tfo->check_stop_free(cmd); return 1; } } - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return 0; } else if (t_state) cmd->t_state = t_state; - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return 0; } @@ -712,21 +708,21 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) if (!lun) return; - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - if (!(atomic_read(&cmd->t_task->transport_dev_active))) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + if (!(atomic_read(&cmd->t_task.transport_dev_active))) { + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); goto check_lun; } - atomic_set(&cmd->t_task->transport_dev_active, 0); + atomic_set(&cmd->t_task.transport_dev_active, 0); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); check_lun: spin_lock_irqsave(&lun->lun_cmd_lock, flags); - if (atomic_read(&cmd->t_task->transport_lun_active)) { - list_del(&cmd->se_lun_list); - atomic_set(&cmd->t_task->transport_lun_active, 0); + if (atomic_read(&cmd->t_task.transport_lun_active)) { + list_del(&cmd->se_lun_node); + atomic_set(&cmd->t_task.transport_lun_active, 0); #if 0 printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n" cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun); @@ -737,7 +733,7 @@ check_lun: void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) { - transport_remove_cmd_from_queue(cmd, &cmd->se_lun->lun_se_dev->dev_queue_obj); + transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); transport_lun_remove_cmd(cmd); if (transport_cmd_check_stop_to_fabric(cmd)) @@ -748,7 +744,7 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) { - transport_remove_cmd_from_queue(cmd, &cmd->se_lun->lun_se_dev->dev_queue_obj); + transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); if (transport_cmd_check_stop_to_fabric(cmd)) return; @@ -756,50 +752,36 @@ void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) transport_generic_remove(cmd, 0, 0); } -static int transport_add_cmd_to_queue( +static void transport_add_cmd_to_queue( struct se_cmd *cmd, int t_state) { struct se_device *dev = cmd->se_dev; struct se_queue_obj *qobj = &dev->dev_queue_obj; - struct se_queue_req *qr; unsigned long flags; - qr = kzalloc(sizeof(struct se_queue_req), GFP_ATOMIC); - if (!(qr)) { - printk(KERN_ERR "Unable to allocate memory for" - " struct se_queue_req\n"); - return -ENOMEM; - } - INIT_LIST_HEAD(&qr->qr_list); - - qr->cmd = cmd; - qr->state = t_state; + INIT_LIST_HEAD(&cmd->se_queue_node); if (t_state) { - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); cmd->t_state = t_state; - atomic_set(&cmd->t_task->t_transport_active, 1); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + atomic_set(&cmd->t_task.t_transport_active, 1); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); } spin_lock_irqsave(&qobj->cmd_queue_lock, flags); - list_add_tail(&qr->qr_list, &qobj->qobj_list); - atomic_inc(&cmd->t_task->t_transport_queue_active); + list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); + atomic_inc(&cmd->t_task.t_transport_queue_active); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); atomic_inc(&qobj->queue_cnt); wake_up_interruptible(&qobj->thread_wq); - return 0; } -/* - * Called with struct se_queue_obj->cmd_queue_lock held. - */ -static struct se_queue_req * -transport_get_qr_from_queue(struct se_queue_obj *qobj) +static struct se_cmd * +transport_get_cmd_from_queue(struct se_queue_obj *qobj) { - struct se_queue_req *qr; + struct se_cmd *cmd; unsigned long flags; spin_lock_irqsave(&qobj->cmd_queue_lock, flags); @@ -807,47 +789,42 @@ transport_get_qr_from_queue(struct se_queue_obj *qobj) spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); return NULL; } + cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); - list_for_each_entry(qr, &qobj->qobj_list, qr_list) - break; + atomic_dec(&cmd->t_task.t_transport_queue_active); - if (qr->cmd) - atomic_dec(&qr->cmd->t_task->t_transport_queue_active); - - list_del(&qr->qr_list); + list_del(&cmd->se_queue_node); atomic_dec(&qobj->queue_cnt); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); - return qr; + return cmd; } static void transport_remove_cmd_from_queue(struct se_cmd *cmd, struct se_queue_obj *qobj) { - struct se_queue_req *qr = NULL, *qr_p = NULL; + struct se_cmd *t; unsigned long flags; spin_lock_irqsave(&qobj->cmd_queue_lock, flags); - if (!(atomic_read(&cmd->t_task->t_transport_queue_active))) { + if (!(atomic_read(&cmd->t_task.t_transport_queue_active))) { spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); return; } - list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) { - if (qr->cmd != cmd) - continue; - - atomic_dec(&qr->cmd->t_task->t_transport_queue_active); - atomic_dec(&qobj->queue_cnt); - list_del(&qr->qr_list); - kfree(qr); - } + list_for_each_entry(t, &qobj->qobj_list, se_queue_node) + if (t == cmd) { + atomic_dec(&cmd->t_task.t_transport_queue_active); + atomic_dec(&qobj->queue_cnt); + list_del(&cmd->se_queue_node); + break; + } spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); - if (atomic_read(&cmd->t_task->t_transport_queue_active)) { + if (atomic_read(&cmd->t_task.t_transport_queue_active)) { printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n", cmd->se_tfo->get_task_tag(cmd), - atomic_read(&cmd->t_task->t_transport_queue_active)); + atomic_read(&cmd->t_task.t_transport_queue_active)); } } @@ -857,7 +834,7 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd, */ void transport_complete_sync_cache(struct se_cmd *cmd, int good) { - struct se_task *task = list_entry(cmd->t_task->t_task_list.next, + struct se_task *task = list_entry(cmd->t_task.t_task_list.next, struct se_task, t_list); if (good) { @@ -887,12 +864,12 @@ void transport_complete_task(struct se_task *task, int success) unsigned long flags; #if 0 printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task, - cmd->t_task->t_task_cdb[0], dev); + cmd->t_task.t_task_cdb[0], dev); #endif if (dev) atomic_inc(&dev->depth_left); - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); atomic_set(&task->task_active, 0); /* @@ -914,14 +891,14 @@ void transport_complete_task(struct se_task *task, int success) */ if (atomic_read(&task->task_stop)) { /* - * Decrement cmd->t_task->t_se_count if this task had + * Decrement cmd->t_task.t_se_count if this task had * previously thrown its timeout exception handler. */ if (atomic_read(&task->task_timeout)) { - atomic_dec(&cmd->t_task->t_se_count); + atomic_dec(&cmd->t_task.t_se_count); atomic_set(&task->task_timeout, 0); } - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); complete(&task->task_stop_comp); return; @@ -933,33 +910,33 @@ void transport_complete_task(struct se_task *task, int success) */ if (atomic_read(&task->task_timeout)) { if (!(atomic_dec_and_test( - &cmd->t_task->t_task_cdbs_timeout_left))) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, + &cmd->t_task.t_task_cdbs_timeout_left))) { + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return; } t_state = TRANSPORT_COMPLETE_TIMEOUT; - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); transport_add_cmd_to_queue(cmd, t_state); return; } - atomic_dec(&cmd->t_task->t_task_cdbs_timeout_left); + atomic_dec(&cmd->t_task.t_task_cdbs_timeout_left); /* * Decrement the outstanding t_task_cdbs_left count. The last * struct se_task from struct se_cmd will complete itself into the * device queue depending upon int success. */ - if (!(atomic_dec_and_test(&cmd->t_task->t_task_cdbs_left))) { + if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_left))) { if (!success) - cmd->t_task->t_tasks_failed = 1; + cmd->t_task.t_tasks_failed = 1; - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return; } - if (!success || cmd->t_task->t_tasks_failed) { + if (!success || cmd->t_task.t_tasks_failed) { t_state = TRANSPORT_COMPLETE_FAILURE; if (!task->task_error_status) { task->task_error_status = @@ -968,10 +945,10 @@ void transport_complete_task(struct se_task *task, int success) PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } } else { - atomic_set(&cmd->t_task->t_transport_complete, 1); + atomic_set(&cmd->t_task.t_transport_complete, 1); t_state = TRANSPORT_COMPLETE_OK; } - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); transport_add_cmd_to_queue(cmd, t_state); } @@ -1064,8 +1041,8 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) struct se_task *task; unsigned long flags; - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) { + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) { dev = task->se_dev; if (atomic_read(&task->task_state_active)) @@ -1081,17 +1058,17 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) spin_unlock(&dev->execute_task_lock); } - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); } static void transport_add_tasks_from_cmd(struct se_cmd *cmd) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; struct se_task *task, *task_prev = NULL; unsigned long flags; spin_lock_irqsave(&dev->execute_task_lock, flags); - list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) { + list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) { if (atomic_read(&task->task_execute_queue)) continue; /* @@ -1184,19 +1161,15 @@ void transport_dump_dev_state( */ static void transport_release_all_cmds(struct se_device *dev) { - struct se_cmd *cmd = NULL; - struct se_queue_req *qr = NULL, *qr_p = NULL; + struct se_cmd *cmd, *tcmd; int bug_out = 0, t_state; unsigned long flags; spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); - list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj.qobj_list, - qr_list) { - - cmd = qr->cmd; - t_state = qr->state; - list_del(&qr->qr_list); - kfree(qr); + list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list, + se_queue_node) { + t_state = cmd->t_state; + list_del(&cmd->se_queue_node); spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags); @@ -1548,7 +1521,7 @@ struct se_device *transport_add_device_to_core_hba( transport_init_queue_obj(&dev->dev_queue_obj); dev->dev_flags = device_flags; dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; - dev->dev_ptr = (void *) transport_dev; + dev->dev_ptr = transport_dev; dev->se_hba = hba; dev->se_sub_dev = se_dev; dev->transport = transport; @@ -1684,7 +1657,7 @@ transport_generic_get_task(struct se_cmd *cmd, enum dma_data_direction data_direction) { struct se_task *task; - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; unsigned long flags; task = dev->transport->alloc_task(cmd); @@ -1697,26 +1670,20 @@ transport_generic_get_task(struct se_cmd *cmd, INIT_LIST_HEAD(&task->t_execute_list); INIT_LIST_HEAD(&task->t_state_list); init_completion(&task->task_stop_comp); - task->task_no = cmd->t_task->t_tasks_no++; + task->task_no = cmd->t_task.t_tasks_no++; task->task_se_cmd = cmd; task->se_dev = dev; task->task_data_direction = data_direction; - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - list_add_tail(&task->t_list, &cmd->t_task->t_task_list); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + list_add_tail(&task->t_list, &cmd->t_task.t_task_list); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return task; } static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); -void transport_device_setup_cmd(struct se_cmd *cmd) -{ - cmd->se_dev = cmd->se_lun->lun_se_dev; -} -EXPORT_SYMBOL(transport_device_setup_cmd); - /* * Used by fabric modules containing a local struct se_cmd within their * fabric dependent per I/O descriptor. @@ -1730,20 +1697,18 @@ void transport_init_se_cmd( int task_attr, unsigned char *sense_buffer) { - INIT_LIST_HEAD(&cmd->se_lun_list); - INIT_LIST_HEAD(&cmd->se_delayed_list); - INIT_LIST_HEAD(&cmd->se_ordered_list); - /* - * Setup t_task pointer to t_task_backstore - */ - cmd->t_task = &cmd->t_task_backstore; + INIT_LIST_HEAD(&cmd->se_lun_node); + INIT_LIST_HEAD(&cmd->se_delayed_node); + INIT_LIST_HEAD(&cmd->se_ordered_node); - INIT_LIST_HEAD(&cmd->t_task->t_task_list); - init_completion(&cmd->t_task->transport_lun_fe_stop_comp); - init_completion(&cmd->t_task->transport_lun_stop_comp); - init_completion(&cmd->t_task->t_transport_stop_comp); - spin_lock_init(&cmd->t_task->t_state_lock); - atomic_set(&cmd->t_task->transport_dev_active, 1); + INIT_LIST_HEAD(&cmd->t_task.t_mem_list); + INIT_LIST_HEAD(&cmd->t_task.t_mem_bidi_list); + INIT_LIST_HEAD(&cmd->t_task.t_task_list); + init_completion(&cmd->t_task.transport_lun_fe_stop_comp); + init_completion(&cmd->t_task.transport_lun_stop_comp); + init_completion(&cmd->t_task.t_transport_stop_comp); + spin_lock_init(&cmd->t_task.t_state_lock); + atomic_set(&cmd->t_task.transport_dev_active, 1); cmd->se_tfo = tfo; cmd->se_sess = se_sess; @@ -1760,7 +1725,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd) * Check if SAM Task Attribute emulation is enabled for this * struct se_device storage object */ - if (cmd->se_lun->lun_se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) + if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) return 0; if (cmd->sam_task_attr == MSG_ACA_TAG) { @@ -1772,7 +1737,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd) * Used to determine when ORDERED commands should go from * Dormant to Active status. */ - cmd->se_ordered_id = atomic_inc_return(&cmd->se_lun->lun_se_dev->dev_ordered_id); + cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id); smp_mb__after_atomic_inc(); DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", cmd->se_ordered_id, cmd->sam_task_attr, @@ -1788,8 +1753,8 @@ void transport_free_se_cmd( /* * Check and free any extended CDB buffer that was allocated */ - if (se_cmd->t_task->t_task_cdb != se_cmd->t_task->__t_task_cdb) - kfree(se_cmd->t_task->t_task_cdb); + if (se_cmd->t_task.t_task_cdb != se_cmd->t_task.__t_task_cdb) + kfree(se_cmd->t_task.t_task_cdb); } EXPORT_SYMBOL(transport_free_se_cmd); @@ -1812,7 +1777,6 @@ int transport_generic_allocate_tasks( */ cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; - transport_device_setup_cmd(cmd); /* * Ensure that the received CDB is less than the max (252 + 8) bytes * for VARIABLE_LENGTH_CMD @@ -1828,26 +1792,26 @@ int transport_generic_allocate_tasks( * allocate the additional extended CDB buffer now.. Otherwise * setup the pointer from __t_task_cdb to t_task_cdb. */ - if (scsi_command_size(cdb) > sizeof(cmd->t_task->__t_task_cdb)) { - cmd->t_task->t_task_cdb = kzalloc(scsi_command_size(cdb), + if (scsi_command_size(cdb) > sizeof(cmd->t_task.__t_task_cdb)) { + cmd->t_task.t_task_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL); - if (!(cmd->t_task->t_task_cdb)) { - printk(KERN_ERR "Unable to allocate cmd->t_task->t_task_cdb" - " %u > sizeof(cmd->t_task->__t_task_cdb): %lu ops\n", + if (!(cmd->t_task.t_task_cdb)) { + printk(KERN_ERR "Unable to allocate cmd->t_task.t_task_cdb" + " %u > sizeof(cmd->t_task.__t_task_cdb): %lu ops\n", scsi_command_size(cdb), - (unsigned long)sizeof(cmd->t_task->__t_task_cdb)); + (unsigned long)sizeof(cmd->t_task.__t_task_cdb)); return -ENOMEM; } } else - cmd->t_task->t_task_cdb = &cmd->t_task->__t_task_cdb[0]; + cmd->t_task.t_task_cdb = &cmd->t_task.__t_task_cdb[0]; /* * Copy the original CDB into cmd->t_task. */ - memcpy(cmd->t_task->t_task_cdb, cdb, scsi_command_size(cdb)); + memcpy(cmd->t_task.t_task_cdb, cdb, scsi_command_size(cdb)); /* * Setup the received CDB based on SCSI defined opcodes and * perform unit attention, persistent reservations and ALUA - * checks for virtual device backends. The cmd->t_task->t_task_cdb + * checks for virtual device backends. The cmd->t_task.t_task_cdb * pointer is expected to be setup before we reach this point. */ ret = transport_generic_cmd_sequencer(cmd, cdb); @@ -1859,7 +1823,7 @@ int transport_generic_allocate_tasks( if (transport_check_alloc_task_attr(cmd) < 0) { cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; - return -2; + return -EINVAL; } spin_lock(&cmd->se_lun->lun_sep_lock); if (cmd->se_lun->lun_sep) @@ -1947,7 +1911,6 @@ int transport_generic_handle_tmr( * This is needed for early exceptions. */ cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; - transport_device_setup_cmd(cmd); transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR); return 0; @@ -1973,9 +1936,9 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) /* * No tasks remain in the execution queue */ - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); list_for_each_entry_safe(task, task_tmp, - &cmd->t_task->t_task_list, t_list) { + &cmd->t_task.t_task_list, t_list) { DEBUG_TS("task_no[%d] - Processing task %p\n", task->task_no, task); /* @@ -1984,14 +1947,14 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) */ if (!atomic_read(&task->task_sent) && !atomic_read(&task->task_active)) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); transport_remove_task_from_execute_queue(task, task->se_dev); DEBUG_TS("task_no[%d] - Removed from execute queue\n", task->task_no); - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); continue; } @@ -2001,7 +1964,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) */ if (atomic_read(&task->task_active)) { atomic_set(&task->task_stop, 1); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); DEBUG_TS("task_no[%d] - Waiting to complete\n", @@ -2010,8 +1973,8 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) DEBUG_TS("task_no[%d] - Stopped successfully\n", task->task_no); - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - atomic_dec(&cmd->t_task->t_task_cdbs_left); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + atomic_dec(&cmd->t_task.t_task_cdbs_left); atomic_set(&task->task_active, 0); atomic_set(&task->task_stop, 0); @@ -2022,7 +1985,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) __transport_stop_task_timer(task, &flags); } - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return ret; } @@ -2038,7 +2001,7 @@ static void transport_generic_request_failure( { DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), - cmd->t_task->t_task_cdb[0]); + cmd->t_task.t_task_cdb[0]); DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:" " %d/%d transport_error_status: %d\n", cmd->se_tfo->get_cmd_state(cmd), @@ -2047,13 +2010,13 @@ static void transport_generic_request_failure( DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d" " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" " t_transport_active: %d t_transport_stop: %d" - " t_transport_sent: %d\n", cmd->t_task->t_task_cdbs, - atomic_read(&cmd->t_task->t_task_cdbs_left), - atomic_read(&cmd->t_task->t_task_cdbs_sent), - atomic_read(&cmd->t_task->t_task_cdbs_ex_left), - atomic_read(&cmd->t_task->t_transport_active), - atomic_read(&cmd->t_task->t_transport_stop), - atomic_read(&cmd->t_task->t_transport_sent)); + " t_transport_sent: %d\n", cmd->t_task.t_task_cdbs, + atomic_read(&cmd->t_task.t_task_cdbs_left), + atomic_read(&cmd->t_task.t_task_cdbs_sent), + atomic_read(&cmd->t_task.t_task_cdbs_ex_left), + atomic_read(&cmd->t_task.t_transport_active), + atomic_read(&cmd->t_task.t_transport_stop), + atomic_read(&cmd->t_task.t_transport_sent)); transport_stop_all_task_timers(cmd); @@ -2135,7 +2098,7 @@ static void transport_generic_request_failure( break; default: printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n", - cmd->t_task->t_task_cdb[0], + cmd->t_task.t_task_cdb[0], cmd->transport_error_status); cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; break; @@ -2156,19 +2119,19 @@ static void transport_direct_request_timeout(struct se_cmd *cmd) { unsigned long flags; - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - if (!(atomic_read(&cmd->t_task->t_transport_timeout))) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + if (!(atomic_read(&cmd->t_task.t_transport_timeout))) { + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return; } - if (atomic_read(&cmd->t_task->t_task_cdbs_timeout_left)) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + if (atomic_read(&cmd->t_task.t_task_cdbs_timeout_left)) { + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return; } - atomic_sub(atomic_read(&cmd->t_task->t_transport_timeout), - &cmd->t_task->t_se_count); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + atomic_sub(atomic_read(&cmd->t_task.t_transport_timeout), + &cmd->t_task.t_se_count); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); } static void transport_generic_request_timeout(struct se_cmd *cmd) @@ -2176,16 +2139,16 @@ static void transport_generic_request_timeout(struct se_cmd *cmd) unsigned long flags; /* - * Reset cmd->t_task->t_se_count to allow transport_generic_remove() + * Reset cmd->t_task.t_se_count to allow transport_generic_remove() * to allow last call to free memory resources. */ - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - if (atomic_read(&cmd->t_task->t_transport_timeout) > 1) { - int tmp = (atomic_read(&cmd->t_task->t_transport_timeout) - 1); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + if (atomic_read(&cmd->t_task.t_transport_timeout) > 1) { + int tmp = (atomic_read(&cmd->t_task.t_transport_timeout) - 1); - atomic_sub(tmp, &cmd->t_task->t_se_count); + atomic_sub(tmp, &cmd->t_task.t_se_count); } - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); transport_generic_remove(cmd, 0, 0); } @@ -2201,8 +2164,8 @@ transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length) return -ENOMEM; } - cmd->t_task->t_tasks_se_num = 0; - cmd->t_task->t_task_buf = buf; + cmd->t_task.t_tasks_se_num = 0; + cmd->t_task.t_task_buf = buf; return 0; } @@ -2244,9 +2207,9 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) { unsigned long flags; - spin_lock_irqsave(&se_cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&se_cmd->t_task.t_state_lock, flags); se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; - spin_unlock_irqrestore(&se_cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&se_cmd->t_task.t_state_lock, flags); } /* @@ -2260,9 +2223,9 @@ static void transport_task_timeout_handler(unsigned long data) DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd); - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); if (task->task_flags & TF_STOP) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return; } task->task_flags &= ~TF_RUNNING; @@ -2273,13 +2236,13 @@ static void transport_task_timeout_handler(unsigned long data) if (!(atomic_read(&task->task_active))) { DEBUG_TT("transport task: %p cmd: %p timeout task_active" " == 0\n", task, cmd); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return; } - atomic_inc(&cmd->t_task->t_se_count); - atomic_inc(&cmd->t_task->t_transport_timeout); - cmd->t_task->t_tasks_failed = 1; + atomic_inc(&cmd->t_task.t_se_count); + atomic_inc(&cmd->t_task.t_transport_timeout); + cmd->t_task.t_tasks_failed = 1; atomic_set(&task->task_timeout, 1); task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; @@ -2288,28 +2251,28 @@ static void transport_task_timeout_handler(unsigned long data) if (atomic_read(&task->task_stop)) { DEBUG_TT("transport task: %p cmd: %p timeout task_stop" " == 1\n", task, cmd); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); complete(&task->task_stop_comp); return; } - if (!(atomic_dec_and_test(&cmd->t_task->t_task_cdbs_left))) { + if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_left))) { DEBUG_TT("transport task: %p cmd: %p timeout non zero" " t_task_cdbs_left\n", task, cmd); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return; } DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", task, cmd); cmd->t_state = TRANSPORT_COMPLETE_FAILURE; - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE); } /* - * Called with cmd->t_task->t_state_lock held. + * Called with cmd->t_task.t_state_lock held. */ static void transport_start_task_timer(struct se_task *task) { @@ -2339,7 +2302,7 @@ static void transport_start_task_timer(struct se_task *task) } /* - * Called with spin_lock_irq(&cmd->t_task->t_state_lock) held. + * Called with spin_lock_irq(&cmd->t_task.t_state_lock) held. */ void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) { @@ -2349,11 +2312,11 @@ void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) return; task->task_flags |= TF_STOP; - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, *flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, *flags); del_timer_sync(&task->task_timer); - spin_lock_irqsave(&cmd->t_task->t_state_lock, *flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, *flags); task->task_flags &= ~TF_RUNNING; task->task_flags &= ~TF_STOP; } @@ -2363,11 +2326,11 @@ static void transport_stop_all_task_timers(struct se_cmd *cmd) struct se_task *task = NULL, *task_tmp; unsigned long flags; - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); list_for_each_entry_safe(task, task_tmp, - &cmd->t_task->t_task_list, t_list) + &cmd->t_task.t_task_list, t_list) __transport_stop_task_timer(task, &flags); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); } static inline int transport_tcq_window_closed(struct se_device *dev) @@ -2391,14 +2354,14 @@ static inline int transport_tcq_window_closed(struct se_device *dev) */ static inline int transport_execute_task_attr(struct se_cmd *cmd) { - if (cmd->se_lun->lun_se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) + if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) return 1; /* * Check for the existence of HEAD_OF_QUEUE, and if true return 1 * to allow the passed struct se_cmd list of tasks to the front of the list. */ if (cmd->sam_task_attr == MSG_HEAD_TAG) { - atomic_inc(&cmd->se_lun->lun_se_dev->dev_hoq_count); + atomic_inc(&cmd->se_dev->dev_hoq_count); smp_mb__after_atomic_inc(); DEBUG_STA("Added HEAD_OF_QUEUE for CDB:" " 0x%02x, se_ordered_id: %u\n", @@ -2406,30 +2369,30 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd) cmd->se_ordered_id); return 1; } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { - spin_lock(&cmd->se_lun->lun_se_dev->ordered_cmd_lock); - list_add_tail(&cmd->se_ordered_list, - &cmd->se_lun->lun_se_dev->ordered_cmd_list); - spin_unlock(&cmd->se_lun->lun_se_dev->ordered_cmd_lock); + spin_lock(&cmd->se_dev->ordered_cmd_lock); + list_add_tail(&cmd->se_ordered_node, + &cmd->se_dev->ordered_cmd_list); + spin_unlock(&cmd->se_dev->ordered_cmd_lock); - atomic_inc(&cmd->se_lun->lun_se_dev->dev_ordered_sync); + atomic_inc(&cmd->se_dev->dev_ordered_sync); smp_mb__after_atomic_inc(); DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered" " list, se_ordered_id: %u\n", - cmd->t_task->t_task_cdb[0], + cmd->t_task.t_task_cdb[0], cmd->se_ordered_id); /* * Add ORDERED command to tail of execution queue if * no other older commands exist that need to be * completed first. */ - if (!(atomic_read(&cmd->se_lun->lun_se_dev->simple_cmds))) + if (!(atomic_read(&cmd->se_dev->simple_cmds))) return 1; } else { /* * For SIMPLE and UNTAGGED Task Attribute commands */ - atomic_inc(&cmd->se_lun->lun_se_dev->simple_cmds); + atomic_inc(&cmd->se_dev->simple_cmds); smp_mb__after_atomic_inc(); } /* @@ -2437,20 +2400,20 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd) * add the dormant task(s) built for the passed struct se_cmd to the * execution queue and become in Active state for this struct se_device. */ - if (atomic_read(&cmd->se_lun->lun_se_dev->dev_ordered_sync) != 0) { + if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) { /* * Otherwise, add cmd w/ tasks to delayed cmd queue that * will be drained upon completion of HEAD_OF_QUEUE task. */ - spin_lock(&cmd->se_lun->lun_se_dev->delayed_cmd_lock); + spin_lock(&cmd->se_dev->delayed_cmd_lock); cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR; - list_add_tail(&cmd->se_delayed_list, - &cmd->se_lun->lun_se_dev->delayed_cmd_list); - spin_unlock(&cmd->se_lun->lun_se_dev->delayed_cmd_lock); + list_add_tail(&cmd->se_delayed_node, + &cmd->se_dev->delayed_cmd_list); + spin_unlock(&cmd->se_dev->delayed_cmd_lock); DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to" " delayed CMD list, se_ordered_id: %u\n", - cmd->t_task->t_task_cdb[0], cmd->sam_task_attr, + cmd->t_task.t_task_cdb[0], cmd->sam_task_attr, cmd->se_ordered_id); /* * Return zero to let transport_execute_tasks() know @@ -2505,7 +2468,7 @@ static int transport_execute_tasks(struct se_cmd *cmd) * storage object. */ execute_tasks: - __transport_execute_tasks(cmd->se_lun->lun_se_dev); + __transport_execute_tasks(cmd->se_dev); return 0; } @@ -2548,17 +2511,17 @@ check_depth: cmd = task->task_se_cmd; - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); atomic_set(&task->task_active, 1); atomic_set(&task->task_sent, 1); - atomic_inc(&cmd->t_task->t_task_cdbs_sent); + atomic_inc(&cmd->t_task.t_task_cdbs_sent); - if (atomic_read(&cmd->t_task->t_task_cdbs_sent) == - cmd->t_task->t_task_cdbs) + if (atomic_read(&cmd->t_task.t_task_cdbs_sent) == + cmd->t_task.t_task_cdbs) atomic_set(&cmd->transport_sent, 1); transport_start_task_timer(task); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); /* * The struct se_cmd->transport_emulate_cdb() function pointer is used * to grab REPORT_LUNS and other CDBs we want to handle before they hit the @@ -2623,10 +2586,10 @@ void transport_new_cmd_failure(struct se_cmd *se_cmd) * Any unsolicited data will get dumped for failed command inside of * the fabric plugin */ - spin_lock_irqsave(&se_cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&se_cmd->t_task.t_state_lock, flags); se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED; se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - spin_unlock_irqrestore(&se_cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&se_cmd->t_task.t_state_lock, flags); se_cmd->se_tfo->new_cmd_failure(se_cmd); } @@ -2638,7 +2601,7 @@ static inline u32 transport_get_sectors_6( struct se_cmd *cmd, int *ret) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; /* * Assume TYPE_DISK for non struct se_device objects. @@ -2666,7 +2629,7 @@ static inline u32 transport_get_sectors_10( struct se_cmd *cmd, int *ret) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; /* * Assume TYPE_DISK for non struct se_device objects. @@ -2696,7 +2659,7 @@ static inline u32 transport_get_sectors_12( struct se_cmd *cmd, int *ret) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; /* * Assume TYPE_DISK for non struct se_device objects. @@ -2726,7 +2689,7 @@ static inline u32 transport_get_sectors_16( struct se_cmd *cmd, int *ret) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; /* * Assume TYPE_DISK for non struct se_device objects. @@ -2768,7 +2731,7 @@ static inline u32 transport_get_size( unsigned char *cdb, struct se_cmd *cmd) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; if (dev->transport->get_device_type(dev) == TYPE_TAPE) { if (cdb[1] & 1) { /* sectors */ @@ -2836,17 +2799,17 @@ static void transport_xor_callback(struct se_cmd *cmd) return; } /* - * Copy the scatterlist WRITE buffer located at cmd->t_task->t_mem_list + * Copy the scatterlist WRITE buffer located at cmd->t_task.t_mem_list * into the locally allocated *buf */ - transport_memcpy_se_mem_read_contig(cmd, buf, cmd->t_task->t_mem_list); + transport_memcpy_se_mem_read_contig(cmd, buf, &cmd->t_task.t_mem_list); /* * Now perform the XOR against the BIDI read memory located at - * cmd->t_task->t_mem_bidi_list + * cmd->t_task.t_mem_bidi_list */ offset = 0; - list_for_each_entry(se_mem, cmd->t_task->t_mem_bidi_list, se_list) { + list_for_each_entry(se_mem, &cmd->t_task.t_mem_bidi_list, se_list) { addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0); if (!(addr)) goto out; @@ -2874,14 +2837,14 @@ static int transport_get_sense_data(struct se_cmd *cmd) WARN_ON(!cmd->se_lun); - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return 0; } list_for_each_entry_safe(task, task_tmp, - &cmd->t_task->t_task_list, t_list) { + &cmd->t_task.t_task_list, t_list) { if (!task->task_sense) continue; @@ -2903,12 +2866,12 @@ static int transport_get_sense_data(struct se_cmd *cmd) cmd->se_tfo->get_task_tag(cmd), task->task_no); continue; } - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER); - memcpy((void *)&buffer[offset], (void *)sense_buffer, + memcpy(&buffer[offset], sense_buffer, TRANSPORT_SENSE_BUFFER); cmd->scsi_status = task->task_scsi_status; /* Automatically padded */ @@ -2921,7 +2884,7 @@ static int transport_get_sense_data(struct se_cmd *cmd) cmd->scsi_status); return 0; } - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return -1; } @@ -2958,7 +2921,7 @@ transport_handle_reservation_conflict(struct se_cmd *cmd) core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, cmd->orig_fe_lun, 0x2C, ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); - return -2; + return -EINVAL; } /* transport_generic_cmd_sequencer(): @@ -2975,7 +2938,7 @@ static int transport_generic_cmd_sequencer( struct se_cmd *cmd, unsigned char *cdb) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; struct se_subsystem_dev *su_dev = dev->se_sub_dev; int ret = 0, sector_ret = 0, passthrough; u32 sectors = 0, size = 0, pr_reg_type = 0; @@ -2989,7 +2952,7 @@ static int transport_generic_cmd_sequencer( &transport_nop_wait_for_tasks; cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; - return -2; + return -EINVAL; } /* * Check status of Asymmetric Logical Unit Assignment port @@ -3011,7 +2974,7 @@ static int transport_generic_cmd_sequencer( transport_set_sense_codes(cmd, 0x04, alua_ascq); cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; - return -2; + return -EINVAL; } goto out_invalid_cdb_field; } @@ -3036,7 +2999,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_6; - cmd->t_task->t_task_lba = transport_lba_21(cdb); + cmd->t_task.t_task_lba = transport_lba_21(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case READ_10: @@ -3045,7 +3008,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_10; - cmd->t_task->t_task_lba = transport_lba_32(cdb); + cmd->t_task.t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case READ_12: @@ -3054,7 +3017,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_12; - cmd->t_task->t_task_lba = transport_lba_32(cdb); + cmd->t_task.t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case READ_16: @@ -3063,7 +3026,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_16; - cmd->t_task->t_task_lba = transport_lba_64(cdb); + cmd->t_task.t_task_lba = transport_lba_64(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_6: @@ -3072,7 +3035,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_6; - cmd->t_task->t_task_lba = transport_lba_21(cdb); + cmd->t_task.t_task_lba = transport_lba_21(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_10: @@ -3081,8 +3044,8 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_10; - cmd->t_task->t_task_lba = transport_lba_32(cdb); - cmd->t_task->t_tasks_fua = (cdb[1] & 0x8); + cmd->t_task.t_task_lba = transport_lba_32(cdb); + cmd->t_task.t_tasks_fua = (cdb[1] & 0x8); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_12: @@ -3091,8 +3054,8 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_12; - cmd->t_task->t_task_lba = transport_lba_32(cdb); - cmd->t_task->t_tasks_fua = (cdb[1] & 0x8); + cmd->t_task.t_task_lba = transport_lba_32(cdb); + cmd->t_task.t_tasks_fua = (cdb[1] & 0x8); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_16: @@ -3101,20 +3064,20 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_16; - cmd->t_task->t_task_lba = transport_lba_64(cdb); - cmd->t_task->t_tasks_fua = (cdb[1] & 0x8); + cmd->t_task.t_task_lba = transport_lba_64(cdb); + cmd->t_task.t_tasks_fua = (cdb[1] & 0x8); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case XDWRITEREAD_10: if ((cmd->data_direction != DMA_TO_DEVICE) || - !(cmd->t_task->t_tasks_bidi)) + !(cmd->t_task.t_tasks_bidi)) goto out_invalid_cdb_field; sectors = transport_get_sectors_10(cdb, cmd, §or_ret); if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_10; - cmd->t_task->t_task_lba = transport_lba_32(cdb); + cmd->t_task.t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; passthrough = (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV); @@ -3127,7 +3090,7 @@ static int transport_generic_cmd_sequencer( * Setup BIDI XOR callback to be run during transport_generic_complete_ok() */ cmd->transport_complete_callback = &transport_xor_callback; - cmd->t_task->t_tasks_fua = (cdb[1] & 0x8); + cmd->t_task.t_tasks_fua = (cdb[1] & 0x8); break; case VARIABLE_LENGTH_CMD: service_action = get_unaligned_be16(&cdb[8]); @@ -3149,7 +3112,7 @@ static int transport_generic_cmd_sequencer( * XDWRITE_READ_32 logic. */ cmd->transport_split_cdb = &split_cdb_XX_32; - cmd->t_task->t_task_lba = transport_lba_64_ext(cdb); + cmd->t_task.t_task_lba = transport_lba_64_ext(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; /* @@ -3163,14 +3126,14 @@ static int transport_generic_cmd_sequencer( * transport_generic_complete_ok() */ cmd->transport_complete_callback = &transport_xor_callback; - cmd->t_task->t_tasks_fua = (cdb[10] & 0x8); + cmd->t_task.t_tasks_fua = (cdb[10] & 0x8); break; case WRITE_SAME_32: sectors = transport_get_sectors_32(cdb, cmd, §or_ret); if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); - cmd->t_task->t_task_lba = get_unaligned_be64(&cdb[12]); + cmd->t_task.t_task_lba = get_unaligned_be64(&cdb[12]); cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; /* @@ -3299,7 +3262,7 @@ static int transport_generic_cmd_sequencer( * Do implict HEAD_OF_QUEUE processing for INQUIRY. * See spc4r17 section 5.3 */ - if (cmd->se_lun->lun_se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) + if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) cmd->sam_task_attr = MSG_HEAD_TAG; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; break; @@ -3405,10 +3368,10 @@ static int transport_generic_cmd_sequencer( */ if (cdb[0] == SYNCHRONIZE_CACHE) { sectors = transport_get_sectors_10(cdb, cmd, §or_ret); - cmd->t_task->t_task_lba = transport_lba_32(cdb); + cmd->t_task.t_task_lba = transport_lba_32(cdb); } else { sectors = transport_get_sectors_16(cdb, cmd, §or_ret); - cmd->t_task->t_task_lba = transport_lba_64(cdb); + cmd->t_task.t_task_lba = transport_lba_64(cdb); } if (sector_ret) goto out_unsupported_cdb; @@ -3454,7 +3417,7 @@ static int transport_generic_cmd_sequencer( if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); - cmd->t_task->t_task_lba = get_unaligned_be16(&cdb[2]); + cmd->t_task.t_task_lba = get_unaligned_be16(&cdb[2]); passthrough = (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV); /* @@ -3507,7 +3470,7 @@ static int transport_generic_cmd_sequencer( * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS * See spc4r17 section 5.3 */ - if (cmd->se_lun->lun_se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) + if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) cmd->sam_task_attr = MSG_HEAD_TAG; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; break; @@ -3560,11 +3523,11 @@ static int transport_generic_cmd_sequencer( out_unsupported_cdb: cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; - return -2; + return -EINVAL; out_invalid_cdb_field: cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; - return -2; + return -EINVAL; } static inline void transport_release_tasks(struct se_cmd *); @@ -3662,7 +3625,7 @@ static void transport_memcpy_se_mem_read_contig( */ static void transport_complete_task_attr(struct se_cmd *cmd) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; struct se_cmd *cmd_p, *cmd_tmp; int new_active_tasks = 0; @@ -3682,7 +3645,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) cmd->se_ordered_id); } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { spin_lock(&dev->ordered_cmd_lock); - list_del(&cmd->se_ordered_list); + list_del(&cmd->se_ordered_node); atomic_dec(&dev->dev_ordered_sync); smp_mb__after_atomic_dec(); spin_unlock(&dev->ordered_cmd_lock); @@ -3698,9 +3661,9 @@ static void transport_complete_task_attr(struct se_cmd *cmd) */ spin_lock(&dev->delayed_cmd_lock); list_for_each_entry_safe(cmd_p, cmd_tmp, - &dev->delayed_cmd_list, se_delayed_list) { + &dev->delayed_cmd_list, se_delayed_node) { - list_del(&cmd_p->se_delayed_list); + list_del(&cmd_p->se_delayed_node); spin_unlock(&dev->delayed_cmd_lock); DEBUG_STA("Calling add_tasks() for" @@ -3733,7 +3696,7 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task * Attribute. */ - if (cmd->se_lun->lun_se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) + if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) transport_complete_task_attr(cmd); /* * Check if we need to retrieve a sense buffer from @@ -3777,8 +3740,8 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) */ if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) transport_memcpy_write_contig(cmd, - cmd->t_task->t_task_pt_sgl, - cmd->t_task->t_task_buf); + cmd->t_task.t_task_pt_sgl, + cmd->t_task.t_task_buf); cmd->se_tfo->queue_data_in(cmd); break; @@ -3792,7 +3755,7 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) /* * Check if we need to send READ payload for BIDI-COMMAND */ - if (cmd->t_task->t_mem_bidi_list != NULL) { + if (!list_empty(&cmd->t_task.t_mem_bidi_list)) { spin_lock(&cmd->se_lun->lun_sep_lock); if (cmd->se_lun->lun_sep) { cmd->se_lun->lun_sep->sep_stats.tx_data_octets += @@ -3819,9 +3782,9 @@ static void transport_free_dev_tasks(struct se_cmd *cmd) struct se_task *task, *task_tmp; unsigned long flags; - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); list_for_each_entry_safe(task, task_tmp, - &cmd->t_task->t_task_list, t_list) { + &cmd->t_task.t_task_list, t_list) { if (atomic_read(&task->task_active)) continue; @@ -3830,15 +3793,15 @@ static void transport_free_dev_tasks(struct se_cmd *cmd) list_del(&task->t_list); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); if (task->se_dev) task->se_dev->transport->free_task(task); else printk(KERN_ERR "task[%u] - task->se_dev is NULL\n", task->task_no); - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); } - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); } static inline void transport_free_pages(struct se_cmd *cmd) @@ -3851,9 +3814,9 @@ static inline void transport_free_pages(struct se_cmd *cmd) if (cmd->se_dev->transport->do_se_mem_map) free_page = 0; - if (cmd->t_task->t_task_buf) { - kfree(cmd->t_task->t_task_buf); - cmd->t_task->t_task_buf = NULL; + if (cmd->t_task.t_task_buf) { + kfree(cmd->t_task.t_task_buf); + cmd->t_task.t_task_buf = NULL; return; } @@ -3863,11 +3826,8 @@ static inline void transport_free_pages(struct se_cmd *cmd) if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC) return; - if (!(cmd->t_task->t_tasks_se_num)) - return; - list_for_each_entry_safe(se_mem, se_mem_tmp, - cmd->t_task->t_mem_list, se_list) { + &cmd->t_task.t_mem_list, se_list) { /* * We only release call __free_page(struct se_mem->se_page) when * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, @@ -3878,27 +3838,21 @@ static inline void transport_free_pages(struct se_cmd *cmd) list_del(&se_mem->se_list); kmem_cache_free(se_mem_cache, se_mem); } + cmd->t_task.t_tasks_se_num = 0; - if (cmd->t_task->t_mem_bidi_list && cmd->t_task->t_tasks_se_bidi_num) { - list_for_each_entry_safe(se_mem, se_mem_tmp, - cmd->t_task->t_mem_bidi_list, se_list) { - /* - * We only release call __free_page(struct se_mem->se_page) when - * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, - */ - if (free_page) - __free_page(se_mem->se_page); + list_for_each_entry_safe(se_mem, se_mem_tmp, + &cmd->t_task.t_mem_bidi_list, se_list) { + /* + * We only release call __free_page(struct se_mem->se_page) when + * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, + */ + if (free_page) + __free_page(se_mem->se_page); - list_del(&se_mem->se_list); - kmem_cache_free(se_mem_cache, se_mem); - } + list_del(&se_mem->se_list); + kmem_cache_free(se_mem_cache, se_mem); } - - kfree(cmd->t_task->t_mem_bidi_list); - cmd->t_task->t_mem_bidi_list = NULL; - kfree(cmd->t_task->t_mem_list); - cmd->t_task->t_mem_list = NULL; - cmd->t_task->t_tasks_se_num = 0; + cmd->t_task.t_tasks_se_bidi_num = 0; } static inline void transport_release_tasks(struct se_cmd *cmd) @@ -3910,23 +3864,23 @@ static inline int transport_dec_and_check(struct se_cmd *cmd) { unsigned long flags; - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - if (atomic_read(&cmd->t_task->t_fe_count)) { - if (!(atomic_dec_and_test(&cmd->t_task->t_fe_count))) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + if (atomic_read(&cmd->t_task.t_fe_count)) { + if (!(atomic_dec_and_test(&cmd->t_task.t_fe_count))) { + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return 1; } } - if (atomic_read(&cmd->t_task->t_se_count)) { - if (!(atomic_dec_and_test(&cmd->t_task->t_se_count))) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, + if (atomic_read(&cmd->t_task.t_se_count)) { + if (!(atomic_dec_and_test(&cmd->t_task.t_se_count))) { + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return 1; } } - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return 0; } @@ -3938,14 +3892,14 @@ static void transport_release_fe_cmd(struct se_cmd *cmd) if (transport_dec_and_check(cmd)) return; - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - if (!(atomic_read(&cmd->t_task->transport_dev_active))) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + if (!(atomic_read(&cmd->t_task.transport_dev_active))) { + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); goto free_pages; } - atomic_set(&cmd->t_task->transport_dev_active, 0); + atomic_set(&cmd->t_task.transport_dev_active, 0); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); transport_release_tasks(cmd); free_pages: @@ -3961,33 +3915,30 @@ static int transport_generic_remove( { unsigned long flags; - if (!(cmd->t_task)) - goto release_cmd; - if (transport_dec_and_check(cmd)) { if (session_reinstatement) { - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); } return 1; } - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - if (!(atomic_read(&cmd->t_task->transport_dev_active))) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + if (!(atomic_read(&cmd->t_task.transport_dev_active))) { + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); goto free_pages; } - atomic_set(&cmd->t_task->transport_dev_active, 0); + atomic_set(&cmd->t_task.transport_dev_active, 0); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); transport_release_tasks(cmd); + free_pages: transport_free_pages(cmd); -release_cmd: if (release_to_pool) { transport_release_cmd_to_pool(cmd); } else { @@ -4011,35 +3962,19 @@ release_cmd: */ int transport_generic_map_mem_to_cmd( struct se_cmd *cmd, - struct scatterlist *mem, - u32 sg_mem_num, - struct scatterlist *mem_bidi_in, - u32 sg_mem_bidi_num) + struct scatterlist *sgl, + u32 sgl_count, + struct scatterlist *sgl_bidi, + u32 sgl_bidi_count) { - u32 se_mem_cnt_out = 0; + u32 mapped_sg_count = 0; int ret; - if (!(mem) || !(sg_mem_num)) + if (!sgl || !sgl_count) return 0; - /* - * Passed *mem will contain a list_head containing preformatted - * struct se_mem elements... - */ - if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM)) { - if ((mem_bidi_in) || (sg_mem_bidi_num)) { - printk(KERN_ERR "SCF_CMD_PASSTHROUGH_NOALLOC not supported" - " with BIDI-COMMAND\n"); - return -ENOSYS; - } - cmd->t_task->t_mem_list = (struct list_head *)mem; - cmd->t_task->t_tasks_se_num = sg_mem_num; - cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC; - return 0; - } /* - * Otherwise, assume the caller is passing a struct scatterlist - * array from include/linux/scatterlist.h + * Convert sgls (sgl, sgl_bidi) to list of se_mems */ if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { @@ -4048,41 +3983,29 @@ int transport_generic_map_mem_to_cmd( * processed into a TCM struct se_subsystem_dev, we do the mapping * from the passed physical memory to struct se_mem->se_page here. */ - cmd->t_task->t_mem_list = transport_init_se_mem_list(); - if (!(cmd->t_task->t_mem_list)) - return -ENOMEM; - ret = transport_map_sg_to_mem(cmd, - cmd->t_task->t_mem_list, mem, &se_mem_cnt_out); + &cmd->t_task.t_mem_list, sgl, &mapped_sg_count); if (ret < 0) return -ENOMEM; - cmd->t_task->t_tasks_se_num = se_mem_cnt_out; + cmd->t_task.t_tasks_se_num = mapped_sg_count; /* * Setup BIDI READ list of struct se_mem elements */ - if ((mem_bidi_in) && (sg_mem_bidi_num)) { - cmd->t_task->t_mem_bidi_list = transport_init_se_mem_list(); - if (!(cmd->t_task->t_mem_bidi_list)) { - kfree(cmd->t_task->t_mem_list); - return -ENOMEM; - } - se_mem_cnt_out = 0; - + if (sgl_bidi && sgl_bidi_count) { + mapped_sg_count = 0; ret = transport_map_sg_to_mem(cmd, - cmd->t_task->t_mem_bidi_list, mem_bidi_in, - &se_mem_cnt_out); - if (ret < 0) { - kfree(cmd->t_task->t_mem_list); + &cmd->t_task.t_mem_bidi_list, sgl_bidi, + &mapped_sg_count); + if (ret < 0) return -ENOMEM; - } - cmd->t_task->t_tasks_se_bidi_num = se_mem_cnt_out; + cmd->t_task.t_tasks_se_bidi_num = mapped_sg_count; } cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) { - if (mem_bidi_in || sg_mem_bidi_num) { + if (sgl_bidi || sgl_bidi_count) { printk(KERN_ERR "BIDI-Commands not supported using " "SCF_SCSI_CONTROL_NONSG_IO_CDB\n"); return -ENOSYS; @@ -4097,7 +4020,8 @@ int transport_generic_map_mem_to_cmd( * struct scatterlist format. */ cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG; - cmd->t_task->t_task_pt_sgl = mem; + cmd->t_task.t_task_pt_sgl = sgl; + /* don't need sgl count? We assume it contains cmd->data_length data */ } return 0; @@ -4112,21 +4036,21 @@ static inline long long transport_dev_end_lba(struct se_device *dev) static int transport_get_sectors(struct se_cmd *cmd) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; - cmd->t_task->t_tasks_sectors = + cmd->t_task.t_tasks_sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); - if (!(cmd->t_task->t_tasks_sectors)) - cmd->t_task->t_tasks_sectors = 1; + if (!(cmd->t_task.t_tasks_sectors)) + cmd->t_task.t_tasks_sectors = 1; if (dev->transport->get_device_type(dev) != TYPE_DISK) return 0; - if ((cmd->t_task->t_task_lba + cmd->t_task->t_tasks_sectors) > + if ((cmd->t_task.t_task_lba + cmd->t_task.t_tasks_sectors) > transport_dev_end_lba(dev)) { printk(KERN_ERR "LBA: %llu Sectors: %u exceeds" " transport_dev_end_lba(): %llu\n", - cmd->t_task->t_task_lba, cmd->t_task->t_tasks_sectors, + cmd->t_task.t_task_lba, cmd->t_task.t_tasks_sectors, transport_dev_end_lba(dev)); cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; @@ -4138,26 +4062,26 @@ static int transport_get_sectors(struct se_cmd *cmd) static int transport_new_cmd_obj(struct se_cmd *cmd) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; u32 task_cdbs = 0, rc; if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { task_cdbs++; - cmd->t_task->t_task_cdbs++; + cmd->t_task.t_task_cdbs++; } else { int set_counts = 1; /* * Setup any BIDI READ tasks and memory from - * cmd->t_task->t_mem_bidi_list so the READ struct se_tasks + * cmd->t_task.t_mem_bidi_list so the READ struct se_tasks * are queued first for the non pSCSI passthrough case. */ - if ((cmd->t_task->t_mem_bidi_list != NULL) && + if (!list_empty(&cmd->t_task.t_mem_bidi_list) && (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { rc = transport_generic_get_cdb_count(cmd, - cmd->t_task->t_task_lba, - cmd->t_task->t_tasks_sectors, - DMA_FROM_DEVICE, cmd->t_task->t_mem_bidi_list, + cmd->t_task.t_task_lba, + cmd->t_task.t_tasks_sectors, + DMA_FROM_DEVICE, &cmd->t_task.t_mem_bidi_list, set_counts); if (!(rc)) { cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; @@ -4168,13 +4092,13 @@ static int transport_new_cmd_obj(struct se_cmd *cmd) set_counts = 0; } /* - * Setup the tasks and memory from cmd->t_task->t_mem_list + * Setup the tasks and memory from cmd->t_task.t_mem_list * Note for BIDI transfers this will contain the WRITE payload */ task_cdbs = transport_generic_get_cdb_count(cmd, - cmd->t_task->t_task_lba, - cmd->t_task->t_tasks_sectors, - cmd->data_direction, cmd->t_task->t_mem_list, + cmd->t_task.t_task_lba, + cmd->t_task.t_tasks_sectors, + cmd->data_direction, &cmd->t_task.t_mem_list, set_counts); if (!(task_cdbs)) { cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; @@ -4182,63 +4106,34 @@ static int transport_new_cmd_obj(struct se_cmd *cmd) TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return PYX_TRANSPORT_LU_COMM_FAILURE; } - cmd->t_task->t_task_cdbs += task_cdbs; + cmd->t_task.t_task_cdbs += task_cdbs; #if 0 printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:" " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length, - cmd->t_task->t_task_lba, cmd->t_task->t_tasks_sectors, - cmd->t_task->t_task_cdbs); + cmd->t_task.t_task_lba, cmd->t_task.t_tasks_sectors, + cmd->t_task.t_task_cdbs); #endif } - atomic_set(&cmd->t_task->t_task_cdbs_left, task_cdbs); - atomic_set(&cmd->t_task->t_task_cdbs_ex_left, task_cdbs); - atomic_set(&cmd->t_task->t_task_cdbs_timeout_left, task_cdbs); + atomic_set(&cmd->t_task.t_task_cdbs_left, task_cdbs); + atomic_set(&cmd->t_task.t_task_cdbs_ex_left, task_cdbs); + atomic_set(&cmd->t_task.t_task_cdbs_timeout_left, task_cdbs); return 0; } -static struct list_head *transport_init_se_mem_list(void) -{ - struct list_head *se_mem_list; - - se_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL); - if (!(se_mem_list)) { - printk(KERN_ERR "Unable to allocate memory for se_mem_list\n"); - return NULL; - } - INIT_LIST_HEAD(se_mem_list); - - return se_mem_list; -} - static int transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) { unsigned char *buf; struct se_mem *se_mem; - cmd->t_task->t_mem_list = transport_init_se_mem_list(); - if (!(cmd->t_task->t_mem_list)) - return -ENOMEM; - /* * If the device uses memory mapping this is enough. */ if (cmd->se_dev->transport->do_se_mem_map) return 0; - /* - * Setup BIDI-COMMAND READ list of struct se_mem elements - */ - if (cmd->t_task->t_tasks_bidi) { - cmd->t_task->t_mem_bidi_list = transport_init_se_mem_list(); - if (!(cmd->t_task->t_mem_bidi_list)) { - kfree(cmd->t_task->t_mem_list); - return -ENOMEM; - } - } - while (length) { se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); if (!(se_mem)) { @@ -4263,8 +4158,8 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) memset(buf, 0, se_mem->se_len); kunmap_atomic(buf, KM_IRQ0); - list_add_tail(&se_mem->se_list, cmd->t_task->t_mem_list); - cmd->t_task->t_tasks_se_num++; + list_add_tail(&se_mem->se_list, &cmd->t_task.t_mem_list); + cmd->t_task.t_tasks_se_num++; DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)" " Offset(%u)\n", se_mem->se_page, se_mem->se_len, @@ -4274,7 +4169,7 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) } DEBUG_MEM("Allocated total struct se_mem elements(%u)\n", - cmd->t_task->t_tasks_se_num); + cmd->t_task.t_tasks_se_num); return 0; out: @@ -4290,7 +4185,7 @@ int transport_init_task_sg( u32 task_offset) { struct se_cmd *se_cmd = task->task_se_cmd; - struct se_device *se_dev = se_cmd->se_lun->lun_se_dev; + struct se_device *se_dev = se_cmd->se_dev; struct se_mem *se_mem = in_se_mem; struct target_core_fabric_ops *tfo = se_cmd->se_tfo; u32 sg_length, task_size = task->task_size, task_sg_num_padded; @@ -4306,7 +4201,7 @@ int transport_init_task_sg( sg_length = se_mem->se_len; if (!(list_is_last(&se_mem->se_list, - se_cmd->t_task->t_mem_list))) + &se_cmd->t_task.t_mem_list))) se_mem = list_entry(se_mem->se_list.next, struct se_mem, se_list); } else { @@ -4326,7 +4221,7 @@ int transport_init_task_sg( sg_length = (se_mem->se_len - task_offset); if (!(list_is_last(&se_mem->se_list, - se_cmd->t_task->t_mem_list))) + &se_cmd->t_task.t_mem_list))) se_mem = list_entry(se_mem->se_list.next, struct se_mem, se_list); } @@ -4367,7 +4262,7 @@ next: * Setup task->task_sg_bidi for SCSI READ payload for * TCM/pSCSI passthrough if present for BIDI-COMMAND */ - if ((se_cmd->t_task->t_mem_bidi_list != NULL) && + if (!list_empty(&se_cmd->t_task.t_mem_bidi_list) && (se_dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) { task->task_sg_bidi = kzalloc(task_sg_num_padded * sizeof(struct scatterlist), GFP_KERNEL); @@ -4458,21 +4353,26 @@ static inline int transport_set_tasks_sectors( max_sectors_set); } +/* + * Convert a sgl into a linked list of se_mems. + */ static int transport_map_sg_to_mem( struct se_cmd *cmd, struct list_head *se_mem_list, - void *in_mem, - u32 *se_mem_cnt) + struct scatterlist *sg, + u32 *sg_count) { struct se_mem *se_mem; - struct scatterlist *sg; - u32 sg_count = 1, cmd_size = cmd->data_length; + u32 cmd_size = cmd->data_length; - WARN_ON(!in_mem); - - sg = (struct scatterlist *)in_mem; + WARN_ON(!sg); while (cmd_size) { + /* + * NOTE: it is safe to return -ENOMEM at any time in creating this + * list because transport_free_pages() will eventually be called, and is + * smart enough to deallocate all list items for sg and sg_bidi lists. + */ se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); if (!(se_mem)) { printk(KERN_ERR "Unable to allocate struct se_mem\n"); @@ -4489,26 +4389,21 @@ static int transport_map_sg_to_mem( if (cmd_size > sg->length) { se_mem->se_len = sg->length; sg = sg_next(sg); - sg_count++; } else se_mem->se_len = cmd_size; cmd_size -= se_mem->se_len; + (*sg_count)++; - DEBUG_MEM("sg_to_mem: *se_mem_cnt: %u cmd_size: %u\n", - *se_mem_cnt, cmd_size); + DEBUG_MEM("sg_to_mem: sg_count: %u cmd_size: %u\n", + sg_count, cmd_size); DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n", se_mem->se_page, se_mem->se_off, se_mem->se_len); list_add_tail(&se_mem->se_list, se_mem_list); - (*se_mem_cnt)++; } - DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments to(%u)" - " struct se_mem\n", sg_count, *se_mem_cnt); - - if (sg_count != *se_mem_cnt) - BUG(); + DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments\n", sg_count); return 0; } @@ -4551,7 +4446,7 @@ int transport_map_mem_to_sg( sg->length = se_mem->se_len; if (!(list_is_last(&se_mem->se_list, - se_cmd->t_task->t_mem_list))) { + &se_cmd->t_task.t_mem_list))) { se_mem = list_entry(se_mem->se_list.next, struct se_mem, se_list); (*se_mem_cnt)++; @@ -4587,7 +4482,7 @@ int transport_map_mem_to_sg( sg->length = (se_mem->se_len - *task_offset); if (!(list_is_last(&se_mem->se_list, - se_cmd->t_task->t_mem_list))) { + &se_cmd->t_task.t_mem_list))) { se_mem = list_entry(se_mem->se_list.next, struct se_mem, se_list); (*se_mem_cnt)++; @@ -4645,7 +4540,7 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) * Walk the struct se_task list and setup scatterlist chains * for each contiguosly allocated struct se_task->task_sg[]. */ - list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) { + list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) { if (!(task->task_sg) || !(task->task_padded_sg)) continue; @@ -4656,7 +4551,7 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) * Either add chain or mark end of scatterlist */ if (!(list_is_last(&task->t_list, - &cmd->t_task->t_task_list))) { + &cmd->t_task.t_task_list))) { /* * Clear existing SGL termination bit set in * transport_init_task_sg(), see sg_mark_end() @@ -4682,7 +4577,7 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) /* * Check for single task.. */ - if (!(list_is_last(&task->t_list, &cmd->t_task->t_task_list))) { + if (!(list_is_last(&task->t_list, &cmd->t_task.t_task_list))) { /* * Clear existing SGL termination bit set in * transport_init_task_sg(), see sg_mark_end() @@ -4700,18 +4595,18 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) * Setup the starting pointer and total t_tasks_sg_linked_no including * padding SGs for linking and to mark the end. */ - cmd->t_task->t_tasks_sg_chained = sg_first; - cmd->t_task->t_tasks_sg_chained_no = sg_count; + cmd->t_task.t_tasks_sg_chained = sg_first; + cmd->t_task.t_tasks_sg_chained_no = sg_count; - DEBUG_CMD_M("Setup cmd: %p cmd->t_task->t_tasks_sg_chained: %p and" - " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_task->t_tasks_sg_chained, - cmd->t_task->t_tasks_sg_chained_no); + DEBUG_CMD_M("Setup cmd: %p cmd->t_task.t_tasks_sg_chained: %p and" + " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_task.t_tasks_sg_chained, + cmd->t_task.t_tasks_sg_chained_no); - for_each_sg(cmd->t_task->t_tasks_sg_chained, sg, - cmd->t_task->t_tasks_sg_chained_no, i) { + for_each_sg(cmd->t_task.t_tasks_sg_chained, sg, + cmd->t_task.t_tasks_sg_chained_no, i) { - DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d, magic: 0x%08x\n", - i, sg, sg_page(sg), sg->length, sg->offset, sg->sg_magic); + DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d\n", + i, sg, sg_page(sg), sg->length, sg->offset); if (sg_is_chain(sg)) DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg); if (sg_is_last(sg)) @@ -4741,7 +4636,7 @@ static int transport_do_se_mem_map( in_mem, in_se_mem, out_se_mem, se_mem_cnt, task_offset_in); if (ret == 0) - task->task_se_cmd->t_task->t_tasks_se_num += *se_mem_cnt; + task->task_se_cmd->t_task.t_tasks_se_num += *se_mem_cnt; return ret; } @@ -4791,7 +4686,7 @@ static u32 transport_generic_get_cdb_count( struct se_task *task; struct se_mem *se_mem = NULL, *se_mem_lout = NULL; struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL; - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; int max_sectors_set = 0, ret; u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0; @@ -4805,15 +4700,14 @@ static u32 transport_generic_get_cdb_count( * mem_list will ever be empty at this point. */ if (!(list_empty(mem_list))) - se_mem = list_entry(mem_list->next, struct se_mem, se_list); + se_mem = list_first_entry(mem_list, struct se_mem, se_list); /* * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation */ - if ((cmd->t_task->t_mem_bidi_list != NULL) && - !(list_empty(cmd->t_task->t_mem_bidi_list)) && + if (!list_empty(&cmd->t_task.t_mem_bidi_list) && (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) - se_mem_bidi = list_entry(cmd->t_task->t_mem_bidi_list->next, + se_mem_bidi = list_first_entry(&cmd->t_task.t_mem_bidi_list, struct se_mem, se_list); while (sectors) { @@ -4836,15 +4730,15 @@ static u32 transport_generic_get_cdb_count( cdb = dev->transport->get_cdb(task); if ((cdb)) { - memcpy(cdb, cmd->t_task->t_task_cdb, - scsi_command_size(cmd->t_task->t_task_cdb)); + memcpy(cdb, cmd->t_task.t_task_cdb, + scsi_command_size(cmd->t_task.t_task_cdb)); cmd->transport_split_cdb(task->task_lba, &task->task_sectors, cdb); } /* * Perform the SE OBJ plugin and/or Transport plugin specific - * mapping for cmd->t_task->t_mem_list. And setup the + * mapping for cmd->t_task.t_mem_list. And setup the * task->task_sg and if necessary task->task_sg_bidi */ ret = transport_do_se_mem_map(dev, task, mem_list, @@ -4855,7 +4749,7 @@ static u32 transport_generic_get_cdb_count( se_mem = se_mem_lout; /* - * Setup the cmd->t_task->t_mem_bidi_list -> task->task_sg_bidi + * Setup the cmd->t_task.t_mem_bidi_list -> task->task_sg_bidi * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI * * Note that the first call to transport_do_se_mem_map() above will @@ -4865,7 +4759,7 @@ static u32 transport_generic_get_cdb_count( */ if (task->task_sg_bidi != NULL) { ret = transport_do_se_mem_map(dev, task, - cmd->t_task->t_mem_bidi_list, NULL, + &cmd->t_task.t_mem_bidi_list, NULL, se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt, &task_offset_in); if (ret < 0) @@ -4888,8 +4782,8 @@ static u32 transport_generic_get_cdb_count( } if (set_counts) { - atomic_inc(&cmd->t_task->t_fe_count); - atomic_inc(&cmd->t_task->t_se_count); + atomic_inc(&cmd->t_task.t_fe_count); + atomic_inc(&cmd->t_task.t_se_count); } DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n", @@ -4904,7 +4798,7 @@ out: static int transport_map_control_cmd_to_task(struct se_cmd *cmd) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; unsigned char *cdb; struct se_task *task; int ret; @@ -4915,26 +4809,26 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd) cdb = dev->transport->get_cdb(task); if (cdb) - memcpy(cdb, cmd->t_task->t_task_cdb, - scsi_command_size(cmd->t_task->t_task_cdb)); + memcpy(cdb, cmd->t_task.t_task_cdb, + scsi_command_size(cmd->t_task.t_task_cdb)); task->task_size = cmd->data_length; task->task_sg_num = (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0; - atomic_inc(&cmd->t_task->t_fe_count); - atomic_inc(&cmd->t_task->t_se_count); + atomic_inc(&cmd->t_task.t_fe_count); + atomic_inc(&cmd->t_task.t_se_count); if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { struct se_mem *se_mem = NULL, *se_mem_lout = NULL; u32 se_mem_cnt = 0, task_offset = 0; - if (!list_empty(cmd->t_task->t_mem_list)) - se_mem = list_entry(cmd->t_task->t_mem_list->next, + if (!list_empty(&cmd->t_task.t_mem_list)) + se_mem = list_first_entry(&cmd->t_task.t_mem_list, struct se_mem, se_list); ret = transport_do_se_mem_map(dev, task, - cmd->t_task->t_mem_list, NULL, se_mem, + &cmd->t_task.t_mem_list, NULL, se_mem, &se_mem_lout, &se_mem_cnt, &task_offset); if (ret < 0) return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; @@ -4969,14 +4863,14 @@ static int transport_generic_new_cmd(struct se_cmd *cmd) { struct se_portal_group *se_tpg; struct se_task *task; - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; int ret = 0; /* * Determine is the TCM fabric module has already allocated physical * memory, and is directly calling transport_generic_map_mem_to_cmd() * to setup beforehand the linked list of physical memory at - * cmd->t_task->t_mem_list of struct se_mem->se_page + * cmd->t_task.t_mem_list of struct se_mem->se_page */ if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { ret = transport_allocate_resources(cmd); @@ -5005,7 +4899,7 @@ static int transport_generic_new_cmd(struct se_cmd *cmd) } if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { - list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) { + list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) { if (atomic_read(&task->task_sent)) continue; if (!dev->transport->map_task_SG) @@ -5052,9 +4946,9 @@ void transport_generic_process_write(struct se_cmd *cmd) * original EDTL */ if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { - if (!cmd->t_task->t_tasks_se_num) { + if (!cmd->t_task.t_tasks_se_num) { unsigned char *dst, *buf = - (unsigned char *)cmd->t_task->t_task_buf; + (unsigned char *)cmd->t_task.t_task_buf; dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL); if (!(dst)) { @@ -5066,15 +4960,15 @@ void transport_generic_process_write(struct se_cmd *cmd) } memcpy(dst, buf, cmd->cmd_spdtl); - kfree(cmd->t_task->t_task_buf); - cmd->t_task->t_task_buf = dst; + kfree(cmd->t_task.t_task_buf); + cmd->t_task.t_task_buf = dst; } else { struct scatterlist *sg = - (struct scatterlist *sg)cmd->t_task->t_task_buf; + (struct scatterlist *sg)cmd->t_task.t_task_buf; struct scatterlist *orig_sg; orig_sg = kzalloc(sizeof(struct scatterlist) * - cmd->t_task->t_tasks_se_num, + cmd->t_task.t_tasks_se_num, GFP_KERNEL))) { if (!(orig_sg)) { printk(KERN_ERR "Unable to allocate memory" @@ -5084,9 +4978,9 @@ void transport_generic_process_write(struct se_cmd *cmd) return; } - memcpy(orig_sg, cmd->t_task->t_task_buf, + memcpy(orig_sg, cmd->t_task.t_task_buf, sizeof(struct scatterlist) * - cmd->t_task->t_tasks_se_num); + cmd->t_task.t_tasks_se_num); cmd->data_length = cmd->cmd_spdtl; /* @@ -5117,22 +5011,22 @@ static int transport_generic_write_pending(struct se_cmd *cmd) unsigned long flags; int ret; - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); cmd->t_state = TRANSPORT_WRITE_PENDING; - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); /* * For the TCM control CDBs using a contiguous buffer, do the memcpy * from the passed Linux/SCSI struct scatterlist located at - * se_cmd->t_task->t_task_pt_buf to the contiguous buffer at - * se_cmd->t_task->t_task_buf. + * se_cmd->t_task.t_task_pt_buf to the contiguous buffer at + * se_cmd->t_task.t_task_buf. */ if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) transport_memcpy_read_contig(cmd, - cmd->t_task->t_task_buf, - cmd->t_task->t_task_pt_sgl); + cmd->t_task.t_task_buf, + cmd->t_task.t_task_pt_sgl); /* * Clear the se_cmd for WRITE_PENDING status in order to set - * cmd->t_task->t_transport_active=0 so that transport_generic_handle_data + * cmd->t_task.t_transport_active=0 so that transport_generic_handle_data * can be called from HW target mode interrupt code. This is safe * to be called with transport_off=1 before the cmd->se_tfo->write_pending * because the se_cmd->se_lun pointer is not being cleared. @@ -5156,7 +5050,6 @@ static int transport_generic_write_pending(struct se_cmd *cmd) */ void transport_release_cmd_to_pool(struct se_cmd *cmd) { - BUG_ON(!cmd->t_task); BUG_ON(!cmd->se_tfo); transport_free_se_cmd(cmd); @@ -5174,7 +5067,7 @@ void transport_generic_free_cmd( int release_to_pool, int session_reinstatement) { - if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !cmd->t_task) + if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) transport_release_cmd_to_pool(cmd); else { core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); @@ -5220,32 +5113,32 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) * If the frontend has already requested this struct se_cmd to * be stopped, we can safely ignore this struct se_cmd. */ - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - if (atomic_read(&cmd->t_task->t_transport_stop)) { - atomic_set(&cmd->t_task->transport_lun_stop, 0); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + if (atomic_read(&cmd->t_task.t_transport_stop)) { + atomic_set(&cmd->t_task.transport_lun_stop, 0); DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop ==" " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd)); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); transport_cmd_check_stop(cmd, 1, 0); return -EPERM; } - atomic_set(&cmd->t_task->transport_lun_fe_stop, 1); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + atomic_set(&cmd->t_task.transport_lun_fe_stop, 1); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); - wake_up_interruptible(&cmd->se_lun->lun_se_dev->dev_queue_obj.thread_wq); + wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); ret = transport_stop_tasks_for_cmd(cmd); DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:" - " %d\n", cmd, cmd->t_task->t_task_cdbs, ret); + " %d\n", cmd, cmd->t_task.t_task_cdbs, ret); if (!ret) { DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n", cmd->se_tfo->get_task_tag(cmd)); - wait_for_completion(&cmd->t_task->transport_lun_stop_comp); + wait_for_completion(&cmd->t_task.transport_lun_stop_comp); DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n", cmd->se_tfo->get_task_tag(cmd)); } - transport_remove_cmd_from_queue(cmd, &cmd->se_lun->lun_se_dev->dev_queue_obj); + transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); return 0; } @@ -5266,31 +5159,24 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) * Initiator Port. */ spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); - while (!list_empty_careful(&lun->lun_cmd_list)) { - cmd = list_entry(lun->lun_cmd_list.next, - struct se_cmd, se_lun_list); - list_del(&cmd->se_lun_list); - - if (!(cmd->t_task)) { - printk(KERN_ERR "ITT: 0x%08x, cmd->t_task = NULL" - "[i,t]_state: %u/%u\n", - cmd->se_tfo->get_task_tag(cmd), - cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); - BUG(); - } - atomic_set(&cmd->t_task->transport_lun_active, 0); + while (!list_empty(&lun->lun_cmd_list)) { + cmd = list_first_entry(&lun->lun_cmd_list, + struct se_cmd, se_lun_node); + list_del(&cmd->se_lun_node); + + atomic_set(&cmd->t_task.transport_lun_active, 0); /* * This will notify iscsi_target_transport.c: * transport_cmd_check_stop() that a LUN shutdown is in * progress for the iscsi_cmd_t. */ - spin_lock(&cmd->t_task->t_state_lock); - DEBUG_CLEAR_L("SE_LUN[%d] - Setting cmd->t_task->transport" + spin_lock(&cmd->t_task.t_state_lock); + DEBUG_CLEAR_L("SE_LUN[%d] - Setting cmd->t_task.transport" "_lun_stop for ITT: 0x%08x\n", cmd->se_lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); - atomic_set(&cmd->t_task->transport_lun_stop, 1); - spin_unlock(&cmd->t_task->t_state_lock); + atomic_set(&cmd->t_task.transport_lun_stop, 1); + spin_unlock(&cmd->t_task.t_state_lock); spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); @@ -5318,14 +5204,14 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) cmd->se_lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); - spin_lock_irqsave(&cmd->t_task->t_state_lock, cmd_flags); - if (!(atomic_read(&cmd->t_task->transport_dev_active))) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, cmd_flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, cmd_flags); + if (!(atomic_read(&cmd->t_task.transport_dev_active))) { + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, cmd_flags); goto check_cond; } - atomic_set(&cmd->t_task->transport_dev_active, 0); + atomic_set(&cmd->t_task.transport_dev_active, 0); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, cmd_flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, cmd_flags); transport_free_dev_tasks(cmd); /* @@ -5342,24 +5228,24 @@ check_cond: * be released, notify the waiting thread now that LU has * finished accessing it. */ - spin_lock_irqsave(&cmd->t_task->t_state_lock, cmd_flags); - if (atomic_read(&cmd->t_task->transport_lun_fe_stop)) { + spin_lock_irqsave(&cmd->t_task.t_state_lock, cmd_flags); + if (atomic_read(&cmd->t_task.transport_lun_fe_stop)) { DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for" " struct se_cmd: %p ITT: 0x%08x\n", lun->unpacked_lun, cmd, cmd->se_tfo->get_task_tag(cmd)); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, cmd_flags); transport_cmd_check_stop(cmd, 1, 0); - complete(&cmd->t_task->transport_lun_fe_stop_comp); + complete(&cmd->t_task.transport_lun_fe_stop_comp); spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); continue; } DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n", lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, cmd_flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, cmd_flags); spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); } spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); @@ -5379,7 +5265,7 @@ int transport_clear_lun_from_sessions(struct se_lun *lun) { struct task_struct *kt; - kt = kthread_run(transport_clear_lun_thread, (void *)lun, + kt = kthread_run(transport_clear_lun_thread, lun, "tcm_cl_%u", lun->unpacked_lun); if (IS_ERR(kt)) { printk(KERN_ERR "Unable to start clear_lun thread\n"); @@ -5405,15 +5291,15 @@ static void transport_generic_wait_for_tasks( if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) return; - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); /* * If we are already stopped due to an external event (ie: LUN shutdown) * sleep until the connection can have the passed struct se_cmd back. - * The cmd->t_task->transport_lun_stopped_sem will be upped by + * The cmd->t_task.transport_lun_stopped_sem will be upped by * transport_clear_lun_from_sessions() once the ConfigFS context caller * has completed its operation on the struct se_cmd. */ - if (atomic_read(&cmd->t_task->transport_lun_stop)) { + if (atomic_read(&cmd->t_task.transport_lun_stop)) { DEBUG_TRANSPORT_S("wait_for_tasks: Stopping" " wait_for_completion(&cmd->t_tasktransport_lun_fe" @@ -5426,10 +5312,10 @@ static void transport_generic_wait_for_tasks( * We go ahead and up transport_lun_stop_comp just to be sure * here. */ - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); - complete(&cmd->t_task->transport_lun_stop_comp); - wait_for_completion(&cmd->t_task->transport_lun_fe_stop_comp); - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + complete(&cmd->t_task.transport_lun_stop_comp); + wait_for_completion(&cmd->t_task.transport_lun_fe_stop_comp); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); transport_all_task_dev_remove_state(cmd); /* @@ -5442,13 +5328,13 @@ static void transport_generic_wait_for_tasks( "stop_comp); for ITT: 0x%08x\n", cmd->se_tfo->get_task_tag(cmd)); - atomic_set(&cmd->t_task->transport_lun_stop, 0); + atomic_set(&cmd->t_task.transport_lun_stop, 0); } - if (!atomic_read(&cmd->t_task->t_transport_active) || - atomic_read(&cmd->t_task->t_transport_aborted)) + if (!atomic_read(&cmd->t_task.t_transport_active) || + atomic_read(&cmd->t_task.t_transport_aborted)) goto remove; - atomic_set(&cmd->t_task->t_transport_stop, 1); + atomic_set(&cmd->t_task.t_transport_stop, 1); DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x" " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" @@ -5456,21 +5342,21 @@ static void transport_generic_wait_for_tasks( cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, cmd->deferred_t_state); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); - wake_up_interruptible(&cmd->se_lun->lun_se_dev->dev_queue_obj.thread_wq); + wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); - wait_for_completion(&cmd->t_task->t_transport_stop_comp); + wait_for_completion(&cmd->t_task.t_transport_stop_comp); - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - atomic_set(&cmd->t_task->t_transport_active, 0); - atomic_set(&cmd->t_task->t_transport_stop, 0); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + atomic_set(&cmd->t_task.t_transport_active, 0); + atomic_set(&cmd->t_task.t_transport_stop, 0); DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion(" - "&cmd->t_task->t_transport_stop_comp) for ITT: 0x%08x\n", + "&cmd->t_task.t_transport_stop_comp) for ITT: 0x%08x\n", cmd->se_tfo->get_task_tag(cmd)); remove: - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); if (!remove_cmd) return; @@ -5509,13 +5395,13 @@ int transport_send_check_condition_and_sense( int offset; u8 asc = 0, ascq = 0; - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return 0; } cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); if (!reason && from_transport) goto after_reason; @@ -5674,14 +5560,14 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status) { int ret = 0; - if (atomic_read(&cmd->t_task->t_transport_aborted) != 0) { + if (atomic_read(&cmd->t_task.t_transport_aborted) != 0) { if (!(send_status) || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) return 1; #if 0 printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED" " status for CDB: 0x%02x ITT: 0x%08x\n", - cmd->t_task->t_task_cdb[0], + cmd->t_task.t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); #endif cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; @@ -5702,7 +5588,7 @@ void transport_send_task_abort(struct se_cmd *cmd) */ if (cmd->data_direction == DMA_TO_DEVICE) { if (cmd->se_tfo->write_pending_status(cmd) != 0) { - atomic_inc(&cmd->t_task->t_transport_aborted); + atomic_inc(&cmd->t_task.t_transport_aborted); smp_mb__after_atomic_inc(); cmd->scsi_status = SAM_STAT_TASK_ABORTED; transport_new_cmd_failure(cmd); @@ -5712,7 +5598,7 @@ void transport_send_task_abort(struct se_cmd *cmd) cmd->scsi_status = SAM_STAT_TASK_ABORTED; #if 0 printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," - " ITT: 0x%08x\n", cmd->t_task->t_task_cdb[0], + " ITT: 0x%08x\n", cmd->t_task.t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); #endif cmd->se_tfo->queue_status(cmd); @@ -5725,7 +5611,7 @@ void transport_send_task_abort(struct se_cmd *cmd) int transport_generic_do_tmr(struct se_cmd *cmd) { struct se_cmd *ref_cmd; - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; struct se_tmr_req *tmr = cmd->se_tmr_req; int ret; @@ -5788,9 +5674,7 @@ transport_get_task_from_state_list(struct se_device *dev) static void transport_processing_shutdown(struct se_device *dev) { struct se_cmd *cmd; - struct se_queue_req *qr; struct se_task *task; - u8 state; unsigned long flags; /* * Empty the struct se_device's struct se_task state list. @@ -5803,15 +5687,9 @@ static void transport_processing_shutdown(struct se_device *dev) } cmd = task->task_se_cmd; - if (!cmd->t_task) { - printk(KERN_ERR "cmd->t_task is NULL for task: %p cmd:" - " %p ITT: 0x%08x\n", task, cmd, - cmd->se_tfo->get_task_tag(cmd)); - continue; - } spin_unlock_irqrestore(&dev->execute_task_lock, flags); - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x," " i_state/def_i_state: %d/%d, t_state/def_t_state:" @@ -5819,22 +5697,22 @@ static void transport_processing_shutdown(struct se_device *dev) cmd->se_tfo->get_task_tag(cmd), cmd->cmd_sn, cmd->se_tfo->get_cmd_state(cmd), cmd->deferred_i_state, cmd->t_state, cmd->deferred_t_state, - cmd->t_task->t_task_cdb[0]); + cmd->t_task.t_task_cdb[0]); DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:" " %d t_task_cdbs_sent: %d -- t_transport_active: %d" " t_transport_stop: %d t_transport_sent: %d\n", cmd->se_tfo->get_task_tag(cmd), - cmd->t_task->t_task_cdbs, - atomic_read(&cmd->t_task->t_task_cdbs_left), - atomic_read(&cmd->t_task->t_task_cdbs_sent), - atomic_read(&cmd->t_task->t_transport_active), - atomic_read(&cmd->t_task->t_transport_stop), - atomic_read(&cmd->t_task->t_transport_sent)); + cmd->t_task.t_task_cdbs, + atomic_read(&cmd->t_task.t_task_cdbs_left), + atomic_read(&cmd->t_task.t_task_cdbs_sent), + atomic_read(&cmd->t_task.t_transport_active), + atomic_read(&cmd->t_task.t_transport_stop), + atomic_read(&cmd->t_task.t_transport_sent)); if (atomic_read(&task->task_active)) { atomic_set(&task->task_stop, 1); spin_unlock_irqrestore( - &cmd->t_task->t_state_lock, flags); + &cmd->t_task.t_state_lock, flags); DEBUG_DO("Waiting for task: %p to shutdown for dev:" " %p\n", task, dev); @@ -5842,8 +5720,8 @@ static void transport_processing_shutdown(struct se_device *dev) DEBUG_DO("Completed task: %p shutdown for dev: %p\n", task, dev); - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - atomic_dec(&cmd->t_task->t_task_cdbs_left); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + atomic_dec(&cmd->t_task.t_task_cdbs_left); atomic_set(&task->task_active, 0); atomic_set(&task->task_stop, 0); @@ -5853,39 +5731,39 @@ static void transport_processing_shutdown(struct se_device *dev) } __transport_stop_task_timer(task, &flags); - if (!(atomic_dec_and_test(&cmd->t_task->t_task_cdbs_ex_left))) { + if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_ex_left))) { spin_unlock_irqrestore( - &cmd->t_task->t_state_lock, flags); + &cmd->t_task.t_state_lock, flags); DEBUG_DO("Skipping task: %p, dev: %p for" " t_task_cdbs_ex_left: %d\n", task, dev, - atomic_read(&cmd->t_task->t_task_cdbs_ex_left)); + atomic_read(&cmd->t_task.t_task_cdbs_ex_left)); spin_lock_irqsave(&dev->execute_task_lock, flags); continue; } - if (atomic_read(&cmd->t_task->t_transport_active)) { + if (atomic_read(&cmd->t_task.t_transport_active)) { DEBUG_DO("got t_transport_active = 1 for task: %p, dev:" " %p\n", task, dev); - if (atomic_read(&cmd->t_task->t_fe_count)) { + if (atomic_read(&cmd->t_task.t_fe_count)) { spin_unlock_irqrestore( - &cmd->t_task->t_state_lock, flags); + &cmd->t_task.t_state_lock, flags); transport_send_check_condition_and_sense( cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); transport_remove_cmd_from_queue(cmd, - &cmd->se_lun->lun_se_dev->dev_queue_obj); + &cmd->se_dev->dev_queue_obj); transport_lun_remove_cmd(cmd); transport_cmd_check_stop(cmd, 1, 0); } else { spin_unlock_irqrestore( - &cmd->t_task->t_state_lock, flags); + &cmd->t_task.t_state_lock, flags); transport_remove_cmd_from_queue(cmd, - &cmd->se_lun->lun_se_dev->dev_queue_obj); + &cmd->se_dev->dev_queue_obj); transport_lun_remove_cmd(cmd); @@ -5899,22 +5777,22 @@ static void transport_processing_shutdown(struct se_device *dev) DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n", task, dev); - if (atomic_read(&cmd->t_task->t_fe_count)) { + if (atomic_read(&cmd->t_task.t_fe_count)) { spin_unlock_irqrestore( - &cmd->t_task->t_state_lock, flags); + &cmd->t_task.t_state_lock, flags); transport_send_check_condition_and_sense(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); transport_remove_cmd_from_queue(cmd, - &cmd->se_lun->lun_se_dev->dev_queue_obj); + &cmd->se_dev->dev_queue_obj); transport_lun_remove_cmd(cmd); transport_cmd_check_stop(cmd, 1, 0); } else { spin_unlock_irqrestore( - &cmd->t_task->t_state_lock, flags); + &cmd->t_task.t_state_lock, flags); transport_remove_cmd_from_queue(cmd, - &cmd->se_lun->lun_se_dev->dev_queue_obj); + &cmd->se_dev->dev_queue_obj); transport_lun_remove_cmd(cmd); if (transport_cmd_check_stop(cmd, 1, 0)) @@ -5927,15 +5805,12 @@ static void transport_processing_shutdown(struct se_device *dev) /* * Empty the struct se_device's struct se_cmd list. */ - while ((qr = transport_get_qr_from_queue(&dev->dev_queue_obj))) { - cmd = qr->cmd; - state = qr->state; - kfree(qr); + while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) { DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n", - cmd, state); + cmd, cmd->t_state); - if (atomic_read(&cmd->t_task->t_fe_count)) { + if (atomic_read(&cmd->t_task.t_fe_count)) { transport_send_check_condition_and_sense(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); @@ -5955,10 +5830,9 @@ static void transport_processing_shutdown(struct se_device *dev) */ static int transport_processing_thread(void *param) { - int ret, t_state; + int ret; struct se_cmd *cmd; struct se_device *dev = (struct se_device *) param; - struct se_queue_req *qr; set_user_nice(current, -20); @@ -5980,15 +5854,11 @@ static int transport_processing_thread(void *param) get_cmd: __transport_execute_tasks(dev); - qr = transport_get_qr_from_queue(&dev->dev_queue_obj); - if (!(qr)) + cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj); + if (!cmd) continue; - cmd = qr->cmd; - t_state = qr->state; - kfree(qr); - - switch (t_state) { + switch (cmd->t_state) { case TRANSPORT_NEW_CMD_MAP: if (!(cmd->se_tfo->new_cmd_map)) { printk(KERN_ERR "cmd->se_tfo->new_cmd_map is" @@ -6039,7 +5909,7 @@ get_cmd: default: printk(KERN_ERR "Unknown t_state: %d deferred_t_state:" " %d for ITT: 0x%08x i_state: %d on SE LUN:" - " %u\n", t_state, cmd->deferred_t_state, + " %u\n", cmd->t_state, cmd->deferred_t_state, cmd->se_tfo->get_task_tag(cmd), cmd->se_tfo->get_cmd_state(cmd), cmd->se_lun->unpacked_lun); diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c index 16f41d1..3b8b02c 100644 --- a/drivers/target/target_core_ua.c +++ b/drivers/target/target_core_ua.c @@ -208,7 +208,7 @@ void core_scsi3_ua_for_check_condition( u8 *asc, u8 *ascq) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; struct se_dev_entry *deve; struct se_session *sess = cmd->se_sess; struct se_node_acl *nacl; @@ -270,7 +270,7 @@ void core_scsi3_ua_for_check_condition( nacl->se_tpg->se_tpg_tfo->get_fabric_name(), (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" : "Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl, - cmd->orig_fe_lun, cmd->t_task->t_task_cdb[0], *asc, *ascq); + cmd->orig_fe_lun, cmd->t_task.t_task_cdb[0], *asc, *ascq); } int core_scsi3_ua_clear_for_request_sense( diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 19b2b99..6d9553b 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -72,16 +72,16 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) caller, cmd, cmd->cdb); printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun); - task = se_cmd->t_task; + task = &se_cmd->t_task; printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n", caller, cmd, task, task->t_tasks_se_num, task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags); - if (task->t_mem_list) - list_for_each_entry(mem, task->t_mem_list, se_list) - printk(KERN_INFO "%s: cmd %p mem %p page %p " - "len 0x%x off 0x%x\n", - caller, cmd, mem, - mem->se_page, mem->se_len, mem->se_off); + + list_for_each_entry(mem, &task->t_mem_list, se_list) + printk(KERN_INFO "%s: cmd %p mem %p page %p " + "len 0x%x off 0x%x\n", + caller, cmd, mem, + mem->se_page, mem->se_len, mem->se_off); sp = cmd->seq; if (sp) { ep = fc_seq_exch(sp); @@ -262,9 +262,9 @@ int ft_write_pending(struct se_cmd *se_cmd) * TCM/LIO target */ transport_do_task_sg_chain(se_cmd); - cmd->sg = se_cmd->t_task->t_tasks_sg_chained; + cmd->sg = se_cmd->t_task.t_tasks_sg_chained; cmd->sg_cnt = - se_cmd->t_task->t_tasks_sg_chained_no; + se_cmd->t_task.t_tasks_sg_chained_no; } if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid, cmd->sg, cmd->sg_cnt)) @@ -438,7 +438,7 @@ static void ft_send_tm(struct ft_cmd *cmd) switch (fcp->fc_tm_flags) { case FCP_TMF_LUN_RESET: cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun); - if (transport_get_lun_for_tmr(&cmd->se_cmd, cmd->lun) < 0) { + if (transport_lookup_tmr_lun(&cmd->se_cmd, cmd->lun) < 0) { /* * Make sure to clean up newly allocated TMR request * since "unable to handle TMR request because failed @@ -637,7 +637,7 @@ static void ft_send_cmd(struct ft_cmd *cmd) fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun); - ret = transport_get_lun_for_cmd(&cmd->se_cmd, cmd->lun); + ret = transport_lookup_cmd_lun(&cmd->se_cmd, cmd->lun); if (ret < 0) { ft_dump_cmd(cmd, __func__); transport_send_check_condition_and_sense(&cmd->se_cmd, @@ -650,13 +650,13 @@ static void ft_send_cmd(struct ft_cmd *cmd) FT_IO_DBG("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret); ft_dump_cmd(cmd, __func__); - if (ret == -1) { + if (ret == -ENOMEM) { transport_send_check_condition_and_sense(se_cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); transport_generic_free_cmd(se_cmd, 0, 1, 0); return; } - if (ret == -2) { + if (ret == -EINVAL) { if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) ft_queue_status(se_cmd); else diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 8c5067c..58e4745 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -331,7 +331,7 @@ static struct se_portal_group *ft_add_tpg( transport_init_queue_obj(&tpg->qobj); ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg, - (void *)tpg, TRANSPORT_TPG_TYPE_NORMAL); + tpg, TRANSPORT_TPG_TYPE_NORMAL); if (ret < 0) { kfree(tpg); return NULL; diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index 47efcfb..f18af6e 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -90,15 +90,14 @@ int ft_queue_data_in(struct se_cmd *se_cmd) lport = ep->lp; cmd->seq = lport->tt.seq_start_next(cmd->seq); - task = se_cmd->t_task; - BUG_ON(!task); + task = &se_cmd->t_task; remaining = se_cmd->data_length; /* * Setup to use first mem list entry if any. */ if (task->t_tasks_se_num) { - mem = list_first_entry(task->t_mem_list, + mem = list_first_entry(&task->t_mem_list, struct se_mem, se_list); mem_len = mem->se_len; mem_off = mem->se_off; @@ -236,8 +235,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) u32 f_ctl; void *buf; - task = se_cmd->t_task; - BUG_ON(!task); + task = &se_cmd->t_task; fh = fc_frame_header_get(fp); if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF)) @@ -315,7 +313,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) * Setup to use first mem list entry if any. */ if (task->t_tasks_se_num) { - mem = list_first_entry(task->t_mem_list, + mem = list_first_entry(&task->t_mem_list, struct se_mem, se_list); mem_len = mem->se_len; mem_off = mem->se_off; diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index b0b83ed..94c838d 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -123,7 +123,7 @@ enum se_cmd_flags_table { SCF_SENT_DELAYED_TAS = 0x00020000, SCF_ALUA_NON_OPTIMIZED = 0x00040000, SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000, - SCF_PASSTHROUGH_SG_TO_MEM = 0x00100000, + SCF_UNUSED = 0x00100000, SCF_PASSTHROUGH_CONTIG_TO_SG = 0x00200000, SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000, SCF_EMULATE_SYNC_CACHE = 0x00800000, @@ -452,9 +452,9 @@ struct se_transport_task { * and other HW target mode fabric modules. */ struct scatterlist *t_task_pt_sgl; - struct list_head *t_mem_list; + struct list_head t_mem_list; /* Used for BIDI READ */ - struct list_head *t_mem_bidi_list; + struct list_head t_mem_bidi_list; struct list_head t_task_list; } ____cacheline_aligned; @@ -523,9 +523,9 @@ struct se_cmd { atomic_t transport_sent; /* Used for sense data */ void *sense_buffer; - struct list_head se_delayed_list; - struct list_head se_ordered_list; - struct list_head se_lun_list; + struct list_head se_delayed_node; + struct list_head se_ordered_node; + struct list_head se_lun_node; struct se_device *se_dev; struct se_dev_entry *se_deve; struct se_device *se_obj_ptr; @@ -534,9 +534,8 @@ struct se_cmd { /* Only used for internal passthrough and legacy TCM fabric modules */ struct se_session *se_sess; struct se_tmr_req *se_tmr_req; - /* t_task is setup to t_task_backstore in transport_init_se_cmd() */ - struct se_transport_task *t_task; - struct se_transport_task t_task_backstore; + struct se_transport_task t_task; + struct list_head se_queue_node; struct target_core_fabric_ops *se_tfo; int (*transport_emulate_cdb)(struct se_cmd *); void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *); diff --git a/include/target/target_core_device.h b/include/target/target_core_device.h index d9745bf..96586cc 100644 --- a/include/target/target_core_device.h +++ b/include/target/target_core_device.h @@ -1,8 +1,8 @@ #ifndef TARGET_CORE_DEVICE_H #define TARGET_CORE_DEVICE_H -extern int transport_get_lun_for_cmd(struct se_cmd *, u32); -extern int transport_get_lun_for_tmr(struct se_cmd *, u32); +extern int transport_lookup_cmd_lun(struct se_cmd *, u32); +extern int transport_lookup_tmr_lun(struct se_cmd *, u32); extern struct se_dev_entry *core_get_se_deve_from_rtpi( struct se_node_acl *, u16); extern int core_free_device_list_for_node(struct se_node_acl *, diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index 1dd4d18..acd5914 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -159,7 +159,6 @@ extern struct se_device *transport_add_device_to_core_hba(struct se_hba *, struct se_subsystem_dev *, u32, void *, struct se_dev_limits *, const char *, const char *); -extern void transport_device_setup_cmd(struct se_cmd *); extern void transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *, struct se_session *, u32, int, int,