2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56 #ifndef _ISCI_REQUEST_H_
57 #define _ISCI_REQUEST_H_
61 #include "scu_task_context.h"
64 * struct isci_request_status - This enum defines the possible states of an I/O
69 enum isci_request_status {
85 enum sci_request_protocol {
90 }; /* XXX remove me, use sas_task.{dev|task_proto} instead */;
92 struct scic_sds_stp_request {
98 struct scic_sds_stp_pio_request {
100 * Total transfer for the entire PIO request recorded
101 * at request constuction time.
103 * @todo Should we just decrement this value for each
104 * byte of data transitted or received to elemenate
105 * the current_transfer_bytes field?
107 u32 total_transfer_bytes;
110 * Total number of bytes received/transmitted in data
111 * frames since the start of the IO request. At the
112 * end of the IO request this should equal the
113 * total_transfer_bytes.
115 u32 current_transfer_bytes;
118 * The number of bytes requested in the in the PIO
121 u32 pio_transfer_bytes;
124 * PIO Setup ending status value to tell us if we need
125 * to wait for another FIS or if the transfer is
126 * complete. On the receipt of a D2H FIS this will be
127 * the status field of that FIS.
132 * On receipt of a D2H FIS this will be the ending
133 * error field if the ending_status has the
134 * SATA_STATUS_ERR bit set.
138 struct scic_sds_request_pio_sgl {
147 * The number of bytes requested in the PIO setup
148 * before CDB data frame.
150 u32 device_preferred_cdb_length;
155 struct scic_sds_request {
157 * This field contains the information for the base request state
160 struct sci_base_state_machine sm;
163 * This field simply points to the controller to which this IO request
166 struct scic_sds_controller *owning_controller;
169 * This field simply points to the remote device to which this IO
170 * request is associated.
172 struct scic_sds_remote_device *target_device;
175 * This field indicates the IO tag for this request. The IO tag is
176 * comprised of the task_index and a sequence count. The sequence count
177 * is utilized to help identify tasks from one life to another.
182 * This field specifies the protocol being utilized for this
185 enum sci_request_protocol protocol;
188 * This field indicates the completion status taken from the SCUs
189 * completion code. It indicates the completion result for the SCU
195 * This field indicates the completion status returned to the SCI user.
196 * It indicates the users view of the io request completion.
201 * This field contains the value to be utilized when posting
202 * (e.g. Post_TC, * Post_TC_Abort) this request to the silicon.
206 struct scu_task_context *tc;
208 /* could be larger with sg chaining */
209 #define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2)
210 struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32)));
213 * This field is a pointer to the stored rx frame data. It is used in
214 * STP internal requests and SMP response frames. If this field is
215 * non-NULL the saved frame must be released on IO request completion.
217 * @todo In the future do we want to keep a list of RX frame buffers?
219 u32 saved_rx_frame_index;
224 struct ssp_cmd_iu cmd;
225 struct ssp_task_iu tmf;
228 struct ssp_response_iu rsp;
229 u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
238 struct scic_sds_stp_request req;
239 struct host_to_dev_fis cmd;
240 struct dev_to_host_fis rsp;
245 static inline struct scic_sds_request *to_sci_req(struct scic_sds_stp_request *stp_req)
247 struct scic_sds_request *sci_req;
249 sci_req = container_of(stp_req, typeof(*sci_req), stp.req);
253 struct isci_request {
254 enum isci_request_status status;
255 enum task_type ttype;
256 unsigned short io_tag;
257 #define IREQ_COMPLETE_IN_TARGET 0
258 #define IREQ_TERMINATED 1
262 union ttype_ptr_union {
263 struct sas_task *io_task_ptr; /* When ttype==io_task */
264 struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
266 struct isci_host *isci_host;
267 /* For use in the requests_to_{complete|abort} lists: */
268 struct list_head completed_node;
269 /* For use in the reqs_in_process list: */
270 struct list_head dev_node;
271 spinlock_t state_lock;
272 dma_addr_t request_daddr;
273 dma_addr_t zero_scatter_daddr;
275 unsigned int num_sg_entries; /* returned by pci_alloc_sg */
277 /** Note: "io_request_completion" is completed in two different ways
278 * depending on whether this is a TMF or regular request.
279 * - TMF requests are completed in the thread that started them;
280 * - regular requests are completed in the request completion callback
282 * This difference in operation allows the aborter of a TMF request
283 * to be sure that once the TMF request completes, the I/O that the
284 * TMF was aborting is guaranteed to have completed.
286 struct completion *io_request_completion;
287 struct scic_sds_request sci;
290 static inline struct isci_request *sci_req_to_ireq(struct scic_sds_request *sci_req)
292 struct isci_request *ireq = container_of(sci_req, typeof(*ireq), sci);
298 * enum sci_base_request_states - This enumeration depicts all the states for
299 * the common request state machine.
303 enum sci_base_request_states {
305 * Simply the initial state for the base request state machine.
310 * This state indicates that the request has been constructed.
311 * This state is entered from the INITIAL state.
316 * This state indicates that the request has been started. This state
317 * is entered from the CONSTRUCTED state.
321 SCI_REQ_STP_UDMA_WAIT_TC_COMP,
322 SCI_REQ_STP_UDMA_WAIT_D2H,
324 SCI_REQ_STP_NON_DATA_WAIT_H2D,
325 SCI_REQ_STP_NON_DATA_WAIT_D2H,
327 SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED,
328 SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG,
329 SCI_REQ_STP_SOFT_RESET_WAIT_D2H,
332 * While in this state the IO request object is waiting for the TC
333 * completion notification for the H2D Register FIS
335 SCI_REQ_STP_PIO_WAIT_H2D,
338 * While in this state the IO request object is waiting for either a
339 * PIO Setup FIS or a D2H register FIS. The type of frame received is
340 * based on the result of the prior frame and line conditions.
342 SCI_REQ_STP_PIO_WAIT_FRAME,
345 * While in this state the IO request object is waiting for a DATA
346 * frame from the device.
348 SCI_REQ_STP_PIO_DATA_IN,
351 * While in this state the IO request object is waiting to transmit
352 * the next data frame to the device.
354 SCI_REQ_STP_PIO_DATA_OUT,
357 * The AWAIT_TC_COMPLETION sub-state indicates that the started raw
358 * task management request is waiting for the transmission of the
359 * initial frame (i.e. command, task, etc.).
361 SCI_REQ_TASK_WAIT_TC_COMP,
364 * This sub-state indicates that the started task management request
365 * is waiting for the reception of an unsolicited frame
366 * (i.e. response IU).
368 SCI_REQ_TASK_WAIT_TC_RESP,
371 * This sub-state indicates that the started task management request
372 * is waiting for the reception of an unsolicited frame
373 * (i.e. response IU).
375 SCI_REQ_SMP_WAIT_RESP,
378 * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP
379 * request is waiting for the transmission of the initial frame
380 * (i.e. command, task, etc.).
382 SCI_REQ_SMP_WAIT_TC_COMP,
385 * This state indicates that the request has completed.
386 * This state is entered from the STARTED state. This state is entered
387 * from the ABORTING state.
392 * This state indicates that the request is in the process of being
393 * terminated/aborted.
394 * This state is entered from the CONSTRUCTED state.
395 * This state is entered from the STARTED state.
400 * Simply the final state for the base request state machine.
406 * scic_sds_request_get_controller() -
408 * This macro will return the controller for this io request object
410 #define scic_sds_request_get_controller(sci_req) \
411 ((sci_req)->owning_controller)
414 * scic_sds_request_get_device() -
416 * This macro will return the device for this io request object
418 #define scic_sds_request_get_device(sci_req) \
419 ((sci_req)->target_device)
422 * scic_sds_request_get_port() -
424 * This macro will return the port for this io request object
426 #define scic_sds_request_get_port(sci_req) \
427 scic_sds_remote_device_get_port(scic_sds_request_get_device(sci_req))
430 * scic_sds_request_get_post_context() -
432 * This macro returns the constructed post context result for the io request.
434 #define scic_sds_request_get_post_context(sci_req) \
435 ((sci_req)->post_context)
438 * scic_sds_request_get_task_context() -
440 * This is a helper macro to return the os handle for this request object.
442 #define scic_sds_request_get_task_context(request) \
443 ((request)->task_context_buffer)
446 * scic_sds_request_set_status() -
448 * This macro will set the scu hardware status and sci request completion
449 * status for an io request.
451 #define scic_sds_request_set_status(request, scu_status_code, sci_status_code) \
453 (request)->scu_status = (scu_status_code); \
454 (request)->sci_status = (sci_status_code); \
457 enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req);
458 enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req);
460 scic_sds_io_request_event_handler(struct scic_sds_request *sci_req,
463 scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
466 scic_sds_task_request_terminate(struct scic_sds_request *sci_req);
467 extern enum sci_status
468 scic_sds_request_complete(struct scic_sds_request *sci_req);
469 extern enum sci_status
470 scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, u32 code);
472 /* XXX open code in caller */
473 static inline dma_addr_t
474 scic_io_request_get_dma_addr(struct scic_sds_request *sci_req, void *virt_addr)
476 struct isci_request *ireq = sci_req_to_ireq(sci_req);
478 char *requested_addr = (char *)virt_addr;
479 char *base_addr = (char *)ireq;
481 BUG_ON(requested_addr < base_addr);
482 BUG_ON((requested_addr - base_addr) >= sizeof(*ireq));
484 return ireq->request_daddr + (requested_addr - base_addr);
488 * This function gets the status of the request object.
489 * @request: This parameter points to the isci_request object
491 * status of the object as a isci_request_status enum.
493 static inline enum isci_request_status
494 isci_request_get_state(struct isci_request *isci_request)
496 BUG_ON(isci_request == NULL);
498 /*probably a bad sign... */
499 if (isci_request->status == unallocated)
500 dev_warn(&isci_request->isci_host->pdev->dev,
501 "%s: isci_request->status == unallocated\n",
504 return isci_request->status;
509 * isci_request_change_state() - This function sets the status of the request
511 * @request: This parameter points to the isci_request object
512 * @status: This Parameter is the new status of the object
515 static inline enum isci_request_status
516 isci_request_change_state(struct isci_request *isci_request,
517 enum isci_request_status status)
519 enum isci_request_status old_state;
522 dev_dbg(&isci_request->isci_host->pdev->dev,
523 "%s: isci_request = %p, state = 0x%x\n",
528 BUG_ON(isci_request == NULL);
530 spin_lock_irqsave(&isci_request->state_lock, flags);
531 old_state = isci_request->status;
532 isci_request->status = status;
533 spin_unlock_irqrestore(&isci_request->state_lock, flags);
539 * isci_request_change_started_to_newstate() - This function sets the status of
540 * the request object.
541 * @request: This parameter points to the isci_request object
542 * @status: This Parameter is the new status of the object
544 * state previous to any change.
546 static inline enum isci_request_status
547 isci_request_change_started_to_newstate(struct isci_request *isci_request,
548 struct completion *completion_ptr,
549 enum isci_request_status newstate)
551 enum isci_request_status old_state;
554 spin_lock_irqsave(&isci_request->state_lock, flags);
556 old_state = isci_request->status;
558 if (old_state == started || old_state == aborting) {
559 BUG_ON(isci_request->io_request_completion != NULL);
561 isci_request->io_request_completion = completion_ptr;
562 isci_request->status = newstate;
565 spin_unlock_irqrestore(&isci_request->state_lock, flags);
567 dev_dbg(&isci_request->isci_host->pdev->dev,
568 "%s: isci_request = %p, old_state = 0x%x\n",
577 * isci_request_change_started_to_aborted() - This function sets the status of
578 * the request object.
579 * @request: This parameter points to the isci_request object
580 * @completion_ptr: This parameter is saved as the kernel completion structure
581 * signalled when the old request completes.
583 * state previous to any change.
585 static inline enum isci_request_status
586 isci_request_change_started_to_aborted(struct isci_request *isci_request,
587 struct completion *completion_ptr)
589 return isci_request_change_started_to_newstate(isci_request,
594 * isci_request_free() - This function frees the request object.
595 * @isci_host: This parameter specifies the ISCI host object
596 * @isci_request: This parameter points to the isci_request object
599 static inline void isci_request_free(struct isci_host *isci_host,
600 struct isci_request *isci_request)
605 /* release the dma memory if we fail. */
606 dma_pool_free(isci_host->dma_pool,
608 isci_request->request_daddr);
611 #define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr)
613 #define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr)
615 struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost,
616 struct isci_tmf *isci_tmf,
618 int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
619 struct sas_task *task, u16 tag, gfp_t gfp_flags);
620 void isci_terminate_pending_requests(struct isci_host *ihost,
621 struct isci_remote_device *idev);
623 scic_task_request_construct(struct scic_sds_controller *scic,
624 struct scic_sds_remote_device *sci_dev,
626 struct scic_sds_request *sci_req);
628 scic_task_request_construct_ssp(struct scic_sds_request *sci_req);
630 scic_task_request_construct_sata(struct scic_sds_request *sci_req);
632 scic_stp_io_request_set_ncq_tag(struct scic_sds_request *sci_req, u16 ncq_tag);
633 void scic_sds_smp_request_copy_response(struct scic_sds_request *sci_req);
635 static inline int isci_task_is_ncq_recovery(struct sas_task *task)
637 return (sas_protocol_ata(task->task_proto) &&
638 task->ata_task.fis.command == ATA_CMD_READ_LOG_EXT &&
639 task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ);
643 #endif /* !defined(_ISCI_REQUEST_H_) */