handler(sm);
}
-static bool scic_sds_controller_completion_queue_has_entries(
- struct isci_host *ihost)
+static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
{
u32 get_value = ihost->completion_queue_get;
u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
return false;
}
-static bool scic_sds_controller_isr(struct isci_host *ihost)
+static bool sci_controller_isr(struct isci_host *ihost)
{
- if (scic_sds_controller_completion_queue_has_entries(ihost)) {
+ if (sci_controller_completion_queue_has_entries(ihost)) {
return true;
} else {
/*
{
struct isci_host *ihost = data;
- if (scic_sds_controller_isr(ihost))
+ if (sci_controller_isr(ihost))
tasklet_schedule(&ihost->completion_tasklet);
return IRQ_HANDLED;
}
-static bool scic_sds_controller_error_isr(struct isci_host *ihost)
+static bool sci_controller_error_isr(struct isci_host *ihost)
{
u32 interrupt_status;
return false;
}
-static void scic_sds_controller_task_completion(struct isci_host *ihost,
- u32 completion_entry)
+static void sci_controller_task_completion(struct isci_host *ihost, u32 ent)
{
- u32 index = SCU_GET_COMPLETION_INDEX(completion_entry);
+ u32 index = SCU_GET_COMPLETION_INDEX(ent);
struct isci_request *ireq = ihost->reqs[index];
/* Make sure that we really want to process this IO request */
if (test_bit(IREQ_ACTIVE, &ireq->flags) &&
ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG &&
ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index])
- /* Yep this is a valid io request pass it along to the io request handler */
- scic_sds_io_request_tc_completion(ireq, completion_entry);
+ /* Yep this is a valid io request pass it along to the
+ * io request handler
+ */
+ sci_io_request_tc_completion(ireq, ent);
}
-static void scic_sds_controller_sdma_completion(struct isci_host *ihost,
- u32 completion_entry)
+static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent)
{
u32 index;
struct isci_request *ireq;
struct isci_remote_device *idev;
- index = SCU_GET_COMPLETION_INDEX(completion_entry);
+ index = SCU_GET_COMPLETION_INDEX(ent);
- switch (scu_get_command_request_type(completion_entry)) {
+ switch (scu_get_command_request_type(ent)) {
case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
ireq = ihost->reqs[index];
dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n",
- __func__, completion_entry, ireq);
+ __func__, ent, ireq);
/* @todo For a post TC operation we need to fail the IO
* request
*/
case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
idev = ihost->device_table[index];
dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n",
- __func__, completion_entry, idev);
+ __func__, ent, idev);
/* @todo For a port RNC operation we need to fail the
* device
*/
break;
default:
dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n",
- __func__, completion_entry);
+ __func__, ent);
break;
}
}
-static void scic_sds_controller_unsolicited_frame(struct isci_host *ihost,
- u32 completion_entry)
+static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent)
{
u32 index;
u32 frame_index;
enum sci_status result = SCI_FAILURE;
- frame_index = SCU_GET_FRAME_INDEX(completion_entry);
+ frame_index = SCU_GET_FRAME_INDEX(ent);
frame_header = ihost->uf_control.buffers.array[frame_index].header;
ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
- if (SCU_GET_FRAME_ERROR(completion_entry)) {
+ if (SCU_GET_FRAME_ERROR(ent)) {
/*
* / @todo If the IAF frame or SIGNATURE FIS frame has an error will
* / this cause a problem? We expect the phy initialization will
* / fail if there is an error in the frame. */
- scic_sds_controller_release_frame(ihost, frame_index);
+ sci_controller_release_frame(ihost, frame_index);
return;
}
if (frame_header->is_address_frame) {
- index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
+ index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
iphy = &ihost->phys[index];
- result = scic_sds_phy_frame_handler(iphy, frame_index);
+ result = sci_phy_frame_handler(iphy, frame_index);
} else {
- index = SCU_GET_COMPLETION_INDEX(completion_entry);
+ index = SCU_GET_COMPLETION_INDEX(ent);
if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
/*
* This is a signature fis or a frame from a direct attached SATA
* device that has not yet been created. In either case forwared
* the frame to the PE and let it take care of the frame data. */
- index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
+ index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
iphy = &ihost->phys[index];
- result = scic_sds_phy_frame_handler(iphy, frame_index);
+ result = sci_phy_frame_handler(iphy, frame_index);
} else {
if (index < ihost->remote_node_entries)
idev = ihost->device_table[index];
idev = NULL;
if (idev != NULL)
- result = scic_sds_remote_device_frame_handler(idev, frame_index);
+ result = sci_remote_device_frame_handler(idev, frame_index);
else
- scic_sds_controller_release_frame(ihost, frame_index);
+ sci_controller_release_frame(ihost, frame_index);
}
}
}
}
-static void scic_sds_controller_event_completion(struct isci_host *ihost,
- u32 completion_entry)
+static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
{
struct isci_remote_device *idev;
struct isci_request *ireq;
struct isci_phy *iphy;
u32 index;
- index = SCU_GET_COMPLETION_INDEX(completion_entry);
+ index = SCU_GET_COMPLETION_INDEX(ent);
- switch (scu_get_event_type(completion_entry)) {
+ switch (scu_get_event_type(ent)) {
case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
/* / @todo The driver did something wrong and we need to fix the condtion. */
dev_err(&ihost->pdev->dev,
"0x%x\n",
__func__,
ihost,
- completion_entry);
+ ent);
break;
case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
"event 0x%x\n",
__func__,
ihost,
- completion_entry);
+ ent);
break;
case SCU_EVENT_TYPE_TRANSPORT_ERROR:
ireq = ihost->reqs[index];
- scic_sds_io_request_event_handler(ireq, completion_entry);
+ sci_io_request_event_handler(ireq, ent);
break;
case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
- switch (scu_get_event_specifier(completion_entry)) {
+ switch (scu_get_event_specifier(ent)) {
case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
ireq = ihost->reqs[index];
if (ireq != NULL)
- scic_sds_io_request_event_handler(ireq, completion_entry);
+ sci_io_request_event_handler(ireq, ent);
else
dev_warn(&ihost->pdev->dev,
"%s: SCIC Controller 0x%p received "
"that doesnt exist.\n",
__func__,
ihost,
- completion_entry);
+ ent);
break;
case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
idev = ihost->device_table[index];
if (idev != NULL)
- scic_sds_remote_device_event_handler(idev, completion_entry);
+ sci_remote_device_event_handler(idev, ent);
else
dev_warn(&ihost->pdev->dev,
"%s: SCIC Controller 0x%p received "
"that doesnt exist.\n",
__func__,
ihost,
- completion_entry);
+ ent);
break;
}
* direct error counter event to the phy object since that is where
* we get the event notification. This is a type 4 event. */
case SCU_EVENT_TYPE_OSSP_EVENT:
- index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
+ index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
iphy = &ihost->phys[index];
- scic_sds_phy_event_handler(iphy, completion_entry);
+ sci_phy_event_handler(iphy, ent);
break;
case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
idev = ihost->device_table[index];
if (idev != NULL)
- scic_sds_remote_device_event_handler(idev, completion_entry);
+ sci_remote_device_event_handler(idev, ent);
} else
dev_err(&ihost->pdev->dev,
"%s: SCIC Controller 0x%p received event 0x%x "
"exist.\n",
__func__,
ihost,
- completion_entry,
+ ent,
index);
break;
dev_warn(&ihost->pdev->dev,
"%s: SCIC Controller received unknown event code %x\n",
__func__,
- completion_entry);
+ ent);
break;
}
}
-static void scic_sds_controller_process_completions(struct isci_host *ihost)
+static void sci_controller_process_completions(struct isci_host *ihost)
{
u32 completion_count = 0;
- u32 completion_entry;
+ u32 ent;
u32 get_index;
u32 get_cycle;
u32 event_get;
) {
completion_count++;
- completion_entry = ihost->completion_queue[get_index];
+ ent = ihost->completion_queue[get_index];
/* increment the get pointer and check for rollover to toggle the cycle bit */
get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) <<
dev_dbg(&ihost->pdev->dev,
"%s: completion queue entry:0x%08x\n",
__func__,
- completion_entry);
+ ent);
- switch (SCU_GET_COMPLETION_TYPE(completion_entry)) {
+ switch (SCU_GET_COMPLETION_TYPE(ent)) {
case SCU_COMPLETION_TYPE_TASK:
- scic_sds_controller_task_completion(ihost, completion_entry);
+ sci_controller_task_completion(ihost, ent);
break;
case SCU_COMPLETION_TYPE_SDMA:
- scic_sds_controller_sdma_completion(ihost, completion_entry);
+ sci_controller_sdma_completion(ihost, ent);
break;
case SCU_COMPLETION_TYPE_UFI:
- scic_sds_controller_unsolicited_frame(ihost, completion_entry);
+ sci_controller_unsolicited_frame(ihost, ent);
break;
case SCU_COMPLETION_TYPE_EVENT:
(SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
event_get = (event_get+1) & (SCU_MAX_EVENTS-1);
- scic_sds_controller_event_completion(ihost, completion_entry);
+ sci_controller_event_completion(ihost, ent);
break;
}
default:
"%s: SCIC Controller received unknown "
"completion type %x\n",
__func__,
- completion_entry);
+ ent);
break;
}
}
}
-static void scic_sds_controller_error_handler(struct isci_host *ihost)
+static void sci_controller_error_handler(struct isci_host *ihost)
{
u32 interrupt_status;
readl(&ihost->smu_registers->interrupt_status);
if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
- scic_sds_controller_completion_queue_has_entries(ihost)) {
+ sci_controller_completion_queue_has_entries(ihost)) {
- scic_sds_controller_process_completions(ihost);
+ sci_controller_process_completions(ihost);
writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status);
} else {
dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__,
irqreturn_t ret = IRQ_NONE;
struct isci_host *ihost = data;
- if (scic_sds_controller_isr(ihost)) {
+ if (sci_controller_isr(ihost)) {
writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
tasklet_schedule(&ihost->completion_tasklet);
ret = IRQ_HANDLED;
- } else if (scic_sds_controller_error_isr(ihost)) {
+ } else if (sci_controller_error_isr(ihost)) {
spin_lock(&ihost->scic_lock);
- scic_sds_controller_error_handler(ihost);
+ sci_controller_error_handler(ihost);
spin_unlock(&ihost->scic_lock);
ret = IRQ_HANDLED;
}
{
struct isci_host *ihost = data;
- if (scic_sds_controller_error_isr(ihost))
- scic_sds_controller_error_handler(ihost);
+ if (sci_controller_error_isr(ihost))
+ sci_controller_error_handler(ihost);
return IRQ_HANDLED;
}
}
/**
- * scic_controller_get_suggested_start_timeout() - This method returns the
- * suggested scic_controller_start() timeout amount. The user is free to
+ * sci_controller_get_suggested_start_timeout() - This method returns the
+ * suggested sci_controller_start() timeout amount. The user is free to
* use any timeout value, but this method provides the suggested minimum
* start timeout value. The returned value is based upon empirical
* information determined as a result of interoperability testing.
* This method returns the number of milliseconds for the suggested start
* operation timeout.
*/
-static u32 scic_controller_get_suggested_start_timeout(struct isci_host *ihost)
+static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
{
/* Validate the user supplied parameters. */
if (!ihost)
+ ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
}
-static void scic_controller_enable_interrupts(struct isci_host *ihost)
+static void sci_controller_enable_interrupts(struct isci_host *ihost)
{
BUG_ON(ihost->smu_registers == NULL);
writel(0, &ihost->smu_registers->interrupt_mask);
}
-void scic_controller_disable_interrupts(struct isci_host *ihost)
+void sci_controller_disable_interrupts(struct isci_host *ihost)
{
BUG_ON(ihost->smu_registers == NULL);
writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
}
-static void scic_sds_controller_enable_port_task_scheduler(struct isci_host *ihost)
+static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
{
u32 port_task_scheduler_value;
&ihost->scu_registers->peg0.ptsg.control);
}
-static void scic_sds_controller_assign_task_entries(struct isci_host *ihost)
+static void sci_controller_assign_task_entries(struct isci_host *ihost)
{
u32 task_assignment;
}
-static void scic_sds_controller_initialize_completion_queue(struct isci_host *ihost)
+static void sci_controller_initialize_completion_queue(struct isci_host *ihost)
{
u32 index;
u32 completion_queue_control_value;
}
}
-static void scic_sds_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost)
+static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost)
{
u32 frame_queue_control_value;
u32 frame_queue_get_value;
&ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
}
-/**
- * This method will attempt to transition into the ready state for the
- * controller and indicate that the controller start operation has completed
- * if all criteria are met.
- * @scic: This parameter indicates the controller object for which
- * to transition to ready.
- * @status: This parameter indicates the status value to be pass into the call
- * to scic_cb_controller_start_complete().
- *
- * none.
- */
-static void scic_sds_controller_transition_to_ready(
- struct isci_host *ihost,
- enum sci_status status)
+static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
{
-
if (ihost->sm.current_state_id == SCIC_STARTING) {
/*
* We move into the ready state, because some of the phys/ports
static bool is_phy_starting(struct isci_phy *iphy)
{
- enum scic_sds_phy_states state;
+ enum sci_phy_states state;
state = iphy->sm.current_state_id;
switch (state) {
}
/**
- * scic_sds_controller_start_next_phy - start phy
+ * sci_controller_start_next_phy - start phy
* @scic: controller
*
* If all the phys have been started, then attempt to transition the
* controller to the READY state and inform the user
- * (scic_cb_controller_start_complete()).
+ * (sci_cb_controller_start_complete()).
*/
-static enum sci_status scic_sds_controller_start_next_phy(struct isci_host *ihost)
+static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
{
- struct scic_sds_oem_params *oem = &ihost->oem_parameters.sds1;
+ struct sci_oem_params *oem = &ihost->oem_parameters;
struct isci_phy *iphy;
enum sci_status status;
* The controller has successfully finished the start process.
* Inform the SCI Core user and transition to the READY state. */
if (is_controller_start_complete == true) {
- scic_sds_controller_transition_to_ready(ihost, SCI_SUCCESS);
+ sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
sci_del_timer(&ihost->phy_timer);
ihost->phy_startup_timer_pending = false;
}
* incorrectly for the PORT or it was never
* assigned to a PORT
*/
- return scic_sds_controller_start_next_phy(ihost);
+ return sci_controller_start_next_phy(ihost);
}
}
- status = scic_sds_phy_start(iphy);
+ status = sci_phy_start(iphy);
if (status == SCI_SUCCESS) {
sci_mod_timer(&ihost->phy_timer,
ihost->phy_startup_timer_pending = false;
do {
- status = scic_sds_controller_start_next_phy(ihost);
+ status = sci_controller_start_next_phy(ihost);
} while (status != SCI_SUCCESS);
done:
return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
}
-static enum sci_status scic_controller_start(struct isci_host *ihost,
+static enum sci_status sci_controller_start(struct isci_host *ihost,
u32 timeout)
{
enum sci_status result;
isci_tci_free(ihost, index);
/* Build the RNi free pool */
- scic_sds_remote_node_table_initialize(
- &ihost->available_remote_nodes,
- ihost->remote_node_entries);
+ sci_remote_node_table_initialize(&ihost->available_remote_nodes,
+ ihost->remote_node_entries);
/*
* Before anything else lets make sure we will not be
* interrupted by the hardware.
*/
- scic_controller_disable_interrupts(ihost);
+ sci_controller_disable_interrupts(ihost);
/* Enable the port task scheduler */
- scic_sds_controller_enable_port_task_scheduler(ihost);
+ sci_controller_enable_port_task_scheduler(ihost);
/* Assign all the task entries to ihost physical function */
- scic_sds_controller_assign_task_entries(ihost);
+ sci_controller_assign_task_entries(ihost);
/* Now initialize the completion queue */
- scic_sds_controller_initialize_completion_queue(ihost);
+ sci_controller_initialize_completion_queue(ihost);
/* Initialize the unsolicited frame queue for use */
- scic_sds_controller_initialize_unsolicited_frame_queue(ihost);
+ sci_controller_initialize_unsolicited_frame_queue(ihost);
/* Start all of the ports on this controller */
for (index = 0; index < ihost->logical_port_entries; index++) {
struct isci_port *iport = &ihost->ports[index];
- result = scic_sds_port_start(iport);
+ result = sci_port_start(iport);
if (result)
return result;
}
- scic_sds_controller_start_next_phy(ihost);
+ sci_controller_start_next_phy(ihost);
sci_mod_timer(&ihost->timer, timeout);
void isci_host_scan_start(struct Scsi_Host *shost)
{
struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
- unsigned long tmo = scic_controller_get_suggested_start_timeout(ihost);
+ unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
set_bit(IHOST_START_PENDING, &ihost->flags);
spin_lock_irq(&ihost->scic_lock);
- scic_controller_start(ihost, tmo);
- scic_controller_enable_interrupts(ihost);
+ sci_controller_start(ihost, tmo);
+ sci_controller_enable_interrupts(ihost);
spin_unlock_irq(&ihost->scic_lock);
}
static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status)
{
isci_host_change_state(ihost, isci_stopped);
- scic_controller_disable_interrupts(ihost);
+ sci_controller_disable_interrupts(ihost);
clear_bit(IHOST_STOP_PENDING, &ihost->flags);
wake_up(&ihost->eventq);
}
-static void scic_sds_controller_completion_handler(struct isci_host *ihost)
+static void sci_controller_completion_handler(struct isci_host *ihost)
{
/* Empty out the completion queue */
- if (scic_sds_controller_completion_queue_has_entries(ihost))
- scic_sds_controller_process_completions(ihost);
+ if (sci_controller_completion_queue_has_entries(ihost))
+ sci_controller_process_completions(ihost);
/* Clear the interrupt and enable all interrupts again */
writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
spin_lock_irq(&ihost->scic_lock);
- scic_sds_controller_completion_handler(ihost);
+ sci_controller_completion_handler(ihost);
/* Take the lists of completed I/Os from the host. */
}
/**
- * scic_controller_stop() - This method will stop an individual controller
+ * sci_controller_stop() - This method will stop an individual controller
* object.This method will invoke the associated user callback upon
* completion. The completion callback is called when the following
* conditions are met: -# the method return status is SCI_SUCCESS. -# the
* controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
* controller is not either in the STARTED or STOPPED states.
*/
-static enum sci_status scic_controller_stop(struct isci_host *ihost,
- u32 timeout)
+static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
{
if (ihost->sm.current_state_id != SCIC_READY) {
dev_warn(&ihost->pdev->dev,
}
/**
- * scic_controller_reset() - This method will reset the supplied core
+ * sci_controller_reset() - This method will reset the supplied core
* controller regardless of the state of said controller. This operation is
* considered destructive. In other words, all current operations are wiped
* out. No IO completions for outstanding devices occur. Outstanding IO
* SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
* the controller reset operation is unable to complete.
*/
-static enum sci_status scic_controller_reset(struct isci_host *ihost)
+static enum sci_status sci_controller_reset(struct isci_host *ihost)
{
switch (ihost->sm.current_state_id) {
case SCIC_RESET:
set_bit(IHOST_STOP_PENDING, &ihost->flags);
spin_lock_irq(&ihost->scic_lock);
- scic_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT);
+ sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT);
spin_unlock_irq(&ihost->scic_lock);
wait_for_stop(ihost);
- scic_controller_reset(ihost);
+ sci_controller_reset(ihost);
/* Cancel any/all outstanding port timers */
for (i = 0; i < ihost->logical_port_entries; i++) {
return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
}
-static void isci_user_parameters_get(
- struct isci_host *isci_host,
- union scic_user_parameters *scic_user_params)
+static void isci_user_parameters_get(struct sci_user_parameters *u)
{
- struct scic_sds_user_parameters *u = &scic_user_params->sds1;
int i;
for (i = 0; i < SCI_MAX_PHYS; i++) {
u->max_number_concurrent_device_spin_up = max_concurr_spinup;
}
-static void scic_sds_controller_initial_state_enter(struct sci_base_state_machine *sm)
+static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
{
struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
sci_change_state(&ihost->sm, SCIC_RESET);
}
-static inline void scic_sds_controller_starting_state_exit(struct sci_base_state_machine *sm)
+static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm)
{
struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28
/**
- * scic_controller_set_interrupt_coalescence() - This method allows the user to
+ * sci_controller_set_interrupt_coalescence() - This method allows the user to
* configure the interrupt coalescence.
* @controller: This parameter represents the handle to the controller object
* for which its interrupt coalesce register is overridden.
* SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
*/
static enum sci_status
-scic_controller_set_interrupt_coalescence(struct isci_host *ihost,
- u32 coalesce_number,
- u32 coalesce_timeout)
+sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
+ u32 coalesce_number,
+ u32 coalesce_timeout)
{
u8 timeout_encode = 0;
u32 min = 0;
}
-static void scic_sds_controller_ready_state_enter(struct sci_base_state_machine *sm)
+static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
{
struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
/* set the default interrupt coalescence number and timeout value. */
- scic_controller_set_interrupt_coalescence(ihost, 0x10, 250);
+ sci_controller_set_interrupt_coalescence(ihost, 0x10, 250);
}
-static void scic_sds_controller_ready_state_exit(struct sci_base_state_machine *sm)
+static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
{
struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
/* disable interrupt coalescence. */
- scic_controller_set_interrupt_coalescence(ihost, 0, 0);
+ sci_controller_set_interrupt_coalescence(ihost, 0, 0);
}
-static enum sci_status scic_sds_controller_stop_phys(struct isci_host *ihost)
+static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
{
u32 index;
enum sci_status status;
status = SCI_SUCCESS;
for (index = 0; index < SCI_MAX_PHYS; index++) {
- phy_status = scic_sds_phy_stop(&ihost->phys[index]);
+ phy_status = sci_phy_stop(&ihost->phys[index]);
if (phy_status != SCI_SUCCESS &&
phy_status != SCI_FAILURE_INVALID_STATE) {
return status;
}
-static enum sci_status scic_sds_controller_stop_ports(struct isci_host *ihost)
+static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
{
u32 index;
enum sci_status port_status;
for (index = 0; index < ihost->logical_port_entries; index++) {
struct isci_port *iport = &ihost->ports[index];
- port_status = scic_sds_port_stop(iport);
+ port_status = sci_port_stop(iport);
if ((port_status != SCI_SUCCESS) &&
(port_status != SCI_FAILURE_INVALID_STATE)) {
return status;
}
-static enum sci_status scic_sds_controller_stop_devices(struct isci_host *ihost)
+static enum sci_status sci_controller_stop_devices(struct isci_host *ihost)
{
u32 index;
enum sci_status status;
for (index = 0; index < ihost->remote_node_entries; index++) {
if (ihost->device_table[index] != NULL) {
/* / @todo What timeout value do we want to provide to this request? */
- device_status = scic_remote_device_stop(ihost->device_table[index], 0);
+ device_status = sci_remote_device_stop(ihost->device_table[index], 0);
if ((device_status != SCI_SUCCESS) &&
(device_status != SCI_FAILURE_INVALID_STATE)) {
return status;
}
-static void scic_sds_controller_stopping_state_enter(struct sci_base_state_machine *sm)
+static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm)
{
struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
/* Stop all of the components for this controller */
- scic_sds_controller_stop_phys(ihost);
- scic_sds_controller_stop_ports(ihost);
- scic_sds_controller_stop_devices(ihost);
+ sci_controller_stop_phys(ihost);
+ sci_controller_stop_ports(ihost);
+ sci_controller_stop_devices(ihost);
}
-static void scic_sds_controller_stopping_state_exit(struct sci_base_state_machine *sm)
+static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm)
{
struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
sci_del_timer(&ihost->timer);
}
-
-/**
- * scic_sds_controller_reset_hardware() -
- *
- * This method will reset the controller hardware.
- */
-static void scic_sds_controller_reset_hardware(struct isci_host *ihost)
+static void sci_controller_reset_hardware(struct isci_host *ihost)
{
/* Disable interrupts so we dont take any spurious interrupts */
- scic_controller_disable_interrupts(ihost);
+ sci_controller_disable_interrupts(ihost);
/* Reset the SCU */
writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control);
writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
}
-static void scic_sds_controller_resetting_state_enter(struct sci_base_state_machine *sm)
+static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm)
{
struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
- scic_sds_controller_reset_hardware(ihost);
+ sci_controller_reset_hardware(ihost);
sci_change_state(&ihost->sm, SCIC_RESET);
}
-static const struct sci_base_state scic_sds_controller_state_table[] = {
+static const struct sci_base_state sci_controller_state_table[] = {
[SCIC_INITIAL] = {
- .enter_state = scic_sds_controller_initial_state_enter,
+ .enter_state = sci_controller_initial_state_enter,
},
[SCIC_RESET] = {},
[SCIC_INITIALIZING] = {},
[SCIC_INITIALIZED] = {},
[SCIC_STARTING] = {
- .exit_state = scic_sds_controller_starting_state_exit,
+ .exit_state = sci_controller_starting_state_exit,
},
[SCIC_READY] = {
- .enter_state = scic_sds_controller_ready_state_enter,
- .exit_state = scic_sds_controller_ready_state_exit,
+ .enter_state = sci_controller_ready_state_enter,
+ .exit_state = sci_controller_ready_state_exit,
},
[SCIC_RESETTING] = {
- .enter_state = scic_sds_controller_resetting_state_enter,
+ .enter_state = sci_controller_resetting_state_enter,
},
[SCIC_STOPPING] = {
- .enter_state = scic_sds_controller_stopping_state_enter,
- .exit_state = scic_sds_controller_stopping_state_exit,
+ .enter_state = sci_controller_stopping_state_enter,
+ .exit_state = sci_controller_stopping_state_exit,
},
[SCIC_STOPPED] = {},
[SCIC_FAILED] = {}
};
-static void scic_sds_controller_set_default_config_parameters(struct isci_host *ihost)
+static void sci_controller_set_default_config_parameters(struct isci_host *ihost)
{
/* these defaults are overridden by the platform / firmware */
u16 index;
/* Default to APC mode. */
- ihost->oem_parameters.sds1.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
+ ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
/* Default to APC mode. */
- ihost->oem_parameters.sds1.controller.max_concurrent_dev_spin_up = 1;
+ ihost->oem_parameters.controller.max_concurrent_dev_spin_up = 1;
/* Default to no SSC operation. */
- ihost->oem_parameters.sds1.controller.do_enable_ssc = false;
+ ihost->oem_parameters.controller.do_enable_ssc = false;
/* Initialize all of the port parameter information to narrow ports. */
for (index = 0; index < SCI_MAX_PORTS; index++) {
- ihost->oem_parameters.sds1.ports[index].phy_mask = 0;
+ ihost->oem_parameters.ports[index].phy_mask = 0;
}
/* Initialize all of the phy parameter information. */
for (index = 0; index < SCI_MAX_PHYS; index++) {
/* Default to 6G (i.e. Gen 3) for now. */
- ihost->user_parameters.sds1.phys[index].max_speed_generation = 3;
+ ihost->user_parameters.phys[index].max_speed_generation = 3;
/* the frequencies cannot be 0 */
- ihost->user_parameters.sds1.phys[index].align_insertion_frequency = 0x7f;
- ihost->user_parameters.sds1.phys[index].in_connection_align_insertion_frequency = 0xff;
- ihost->user_parameters.sds1.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
+ ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
+ ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff;
+ ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
/*
* Previous Vitesse based expanders had a arbitration issue that
* is worked around by having the upper 32-bits of SAS address
* with a value greater then the Vitesse company identifier.
* Hence, usage of 0x5FCFFFFF. */
- ihost->oem_parameters.sds1.phys[index].sas_address.low = 0x1 + ihost->id;
- ihost->oem_parameters.sds1.phys[index].sas_address.high = 0x5FCFFFFF;
+ ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id;
+ ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF;
}
- ihost->user_parameters.sds1.stp_inactivity_timeout = 5;
- ihost->user_parameters.sds1.ssp_inactivity_timeout = 5;
- ihost->user_parameters.sds1.stp_max_occupancy_timeout = 5;
- ihost->user_parameters.sds1.ssp_max_occupancy_timeout = 20;
- ihost->user_parameters.sds1.no_outbound_task_timeout = 20;
+ ihost->user_parameters.stp_inactivity_timeout = 5;
+ ihost->user_parameters.ssp_inactivity_timeout = 5;
+ ihost->user_parameters.stp_max_occupancy_timeout = 5;
+ ihost->user_parameters.ssp_max_occupancy_timeout = 20;
+ ihost->user_parameters.no_outbound_task_timeout = 20;
}
static void controller_timeout(unsigned long data)
goto done;
if (sm->current_state_id == SCIC_STARTING)
- scic_sds_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
+ sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
else if (sm->current_state_id == SCIC_STOPPING) {
sci_change_state(sm, SCIC_FAILED);
isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
}
-/**
- * scic_controller_construct() - This method will attempt to construct a
- * controller object utilizing the supplied parameter information.
- * @c: This parameter specifies the controller to be constructed.
- * @scu_base: mapped base address of the scu registers
- * @smu_base: mapped base address of the smu registers
- *
- * Indicate if the controller was successfully constructed or if it failed in
- * some way. SCI_SUCCESS This value is returned if the controller was
- * successfully constructed. SCI_WARNING_TIMER_CONFLICT This value is returned
- * if the interrupt coalescence timer may cause SAS compliance issues for SMP
- * Target mode response processing. SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE
- * This value is returned if the controller does not support the supplied type.
- * SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION This value is returned if the
- * controller does not support the supplied initialization data version.
- */
-static enum sci_status scic_controller_construct(struct isci_host *ihost,
- void __iomem *scu_base,
- void __iomem *smu_base)
+static enum sci_status sci_controller_construct(struct isci_host *ihost,
+ void __iomem *scu_base,
+ void __iomem *smu_base)
{
u8 i;
- sci_init_sm(&ihost->sm, scic_sds_controller_state_table, SCIC_INITIAL);
+ sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL);
ihost->scu_registers = scu_base;
ihost->smu_registers = smu_base;
- scic_sds_port_configuration_agent_construct(&ihost->port_agent);
+ sci_port_configuration_agent_construct(&ihost->port_agent);
/* Construct the ports for this controller */
for (i = 0; i < SCI_MAX_PORTS; i++)
- scic_sds_port_construct(&ihost->ports[i], i, ihost);
- scic_sds_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost);
+ sci_port_construct(&ihost->ports[i], i, ihost);
+ sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost);
/* Construct the phys for this controller */
for (i = 0; i < SCI_MAX_PHYS; i++) {
/* Add all the PHYs to the dummy port */
- scic_sds_phy_construct(&ihost->phys[i],
- &ihost->ports[SCI_MAX_PORTS], i);
+ sci_phy_construct(&ihost->phys[i],
+ &ihost->ports[SCI_MAX_PORTS], i);
}
ihost->invalid_phy_mask = 0;
sci_init_timer(&ihost->timer, controller_timeout);
/* Initialize the User and OEM parameters to default values. */
- scic_sds_controller_set_default_config_parameters(ihost);
+ sci_controller_set_default_config_parameters(ihost);
- return scic_controller_reset(ihost);
+ return sci_controller_reset(ihost);
}
-int scic_oem_parameters_validate(struct scic_sds_oem_params *oem)
+int sci_oem_parameters_validate(struct sci_oem_params *oem)
{
int i;
return 0;
}
-static enum sci_status scic_oem_parameters_set(struct isci_host *ihost,
- union scic_oem_parameters *scic_parms)
+static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
{
u32 state = ihost->sm.current_state_id;
state == SCIC_INITIALIZING ||
state == SCIC_INITIALIZED) {
- if (scic_oem_parameters_validate(&scic_parms->sds1))
+ if (sci_oem_parameters_validate(&ihost->oem_parameters))
return SCI_FAILURE_INVALID_PARAMETER_VALUE;
- ihost->oem_parameters.sds1 = scic_parms->sds1;
return SCI_SUCCESS;
}
return SCI_FAILURE_INVALID_STATE;
}
-void scic_oem_parameters_get(
- struct isci_host *ihost,
- union scic_oem_parameters *scic_parms)
-{
- memcpy(scic_parms, (&ihost->oem_parameters), sizeof(*scic_parms));
-}
-
static void power_control_timeout(unsigned long data)
{
struct sci_timer *tmr = (struct sci_timer *)data;
continue;
if (ihost->power_control.phys_granted_power >=
- ihost->oem_parameters.sds1.controller.max_concurrent_dev_spin_up)
+ ihost->oem_parameters.controller.max_concurrent_dev_spin_up)
break;
ihost->power_control.requesters[i] = NULL;
ihost->power_control.phys_waiting--;
ihost->power_control.phys_granted_power++;
- scic_sds_phy_consume_power_handler(iphy);
+ sci_phy_consume_power_handler(iphy);
}
/*
spin_unlock_irqrestore(&ihost->scic_lock, flags);
}
-/**
- * This method inserts the phy in the stagger spinup control queue.
- * @scic:
- *
- *
- */
-void scic_sds_controller_power_control_queue_insert(
- struct isci_host *ihost,
- struct isci_phy *iphy)
+void sci_controller_power_control_queue_insert(struct isci_host *ihost,
+ struct isci_phy *iphy)
{
BUG_ON(iphy == NULL);
if (ihost->power_control.phys_granted_power <
- ihost->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) {
+ ihost->oem_parameters.controller.max_concurrent_dev_spin_up) {
ihost->power_control.phys_granted_power++;
- scic_sds_phy_consume_power_handler(iphy);
+ sci_phy_consume_power_handler(iphy);
/*
* stop and start the power_control timer. When the timer fires, the
}
}
-/**
- * This method removes the phy from the stagger spinup control queue.
- * @scic:
- *
- *
- */
-void scic_sds_controller_power_control_queue_remove(
- struct isci_host *ihost,
- struct isci_phy *iphy)
+void sci_controller_power_control_queue_remove(struct isci_host *ihost,
+ struct isci_phy *iphy)
{
BUG_ON(iphy == NULL);
- if (ihost->power_control.requesters[iphy->phy_index] != NULL) {
+ if (ihost->power_control.requesters[iphy->phy_index])
ihost->power_control.phys_waiting--;
- }
ihost->power_control.requesters[iphy->phy_index] = NULL;
}
/* Initialize the AFE for this phy index. We need to read the AFE setup from
* the OEM parameters
*/
-static void scic_sds_controller_afe_initialization(struct isci_host *ihost)
+static void sci_controller_afe_initialization(struct isci_host *ihost)
{
- const struct scic_sds_oem_params *oem = &ihost->oem_parameters.sds1;
+ const struct sci_oem_params *oem = &ihost->oem_parameters;
u32 afe_status;
u32 phy_id;
udelay(AFE_REGISTER_WRITE_DELAY);
}
-static void scic_sds_controller_initialize_power_control(struct isci_host *ihost)
+static void sci_controller_initialize_power_control(struct isci_host *ihost)
{
sci_init_timer(&ihost->power_control.timer, power_control_timeout);
ihost->power_control.phys_granted_power = 0;
}
-static enum sci_status scic_controller_initialize(struct isci_host *ihost)
+static enum sci_status sci_controller_initialize(struct isci_host *ihost)
{
struct sci_base_state_machine *sm = &ihost->sm;
enum sci_status result = SCI_FAILURE;
ihost->next_phy_to_start = 0;
ihost->phy_startup_timer_pending = false;
- scic_sds_controller_initialize_power_control(ihost);
+ sci_controller_initialize_power_control(ihost);
/*
* There is nothing to do here for B0 since we do not have to
* program the AFE registers.
* / @todo The AFE settings are supposed to be correct for the B0 but
* / presently they seem to be wrong. */
- scic_sds_controller_afe_initialization(ihost);
+ sci_controller_afe_initialization(ihost);
/* Take the hardware out of reset */
* are accessed during the port initialization.
*/
for (i = 0; i < SCI_MAX_PHYS; i++) {
- result = scic_sds_phy_initialize(&ihost->phys[i],
- &ihost->scu_registers->peg0.pe[i].tl,
- &ihost->scu_registers->peg0.pe[i].ll);
+ result = sci_phy_initialize(&ihost->phys[i],
+ &ihost->scu_registers->peg0.pe[i].tl,
+ &ihost->scu_registers->peg0.pe[i].ll);
if (result != SCI_SUCCESS)
goto out;
}
for (i = 0; i < ihost->logical_port_entries; i++) {
- result = scic_sds_port_initialize(&ihost->ports[i],
- &ihost->scu_registers->peg0.ptsg.port[i],
- &ihost->scu_registers->peg0.ptsg.protocol_engine,
- &ihost->scu_registers->peg0.viit[i]);
+ struct isci_port *iport = &ihost->ports[i];
- if (result != SCI_SUCCESS)
- goto out;
+ iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i];
+ iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0];
+ iport->viit_registers = &ihost->scu_registers->peg0.viit[i];
}
- result = scic_sds_port_configuration_agent_initialize(ihost, &ihost->port_agent);
+ result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent);
out:
/* Advance the controller state machine */
return result;
}
-static enum sci_status scic_user_parameters_set(
- struct isci_host *ihost,
- union scic_user_parameters *scic_parms)
+static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
+ struct sci_user_parameters *sci_parms)
{
u32 state = ihost->sm.current_state_id;
for (index = 0; index < SCI_MAX_PHYS; index++) {
struct sci_phy_user_params *user_phy;
- user_phy = &scic_parms->sds1.phys[index];
+ user_phy = &sci_parms->phys[index];
if (!((user_phy->max_speed_generation <=
SCIC_SDS_PARM_MAX_SPEED) &&
return SCI_FAILURE_INVALID_PARAMETER_VALUE;
}
- if ((scic_parms->sds1.stp_inactivity_timeout == 0) ||
- (scic_parms->sds1.ssp_inactivity_timeout == 0) ||
- (scic_parms->sds1.stp_max_occupancy_timeout == 0) ||
- (scic_parms->sds1.ssp_max_occupancy_timeout == 0) ||
- (scic_parms->sds1.no_outbound_task_timeout == 0))
+ if ((sci_parms->stp_inactivity_timeout == 0) ||
+ (sci_parms->ssp_inactivity_timeout == 0) ||
+ (sci_parms->stp_max_occupancy_timeout == 0) ||
+ (sci_parms->ssp_max_occupancy_timeout == 0) ||
+ (sci_parms->no_outbound_task_timeout == 0))
return SCI_FAILURE_INVALID_PARAMETER_VALUE;
- memcpy(&ihost->user_parameters, scic_parms, sizeof(*scic_parms));
+ memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
return SCI_SUCCESS;
}
return SCI_FAILURE_INVALID_STATE;
}
-static int scic_controller_mem_init(struct isci_host *ihost)
+static int sci_controller_mem_init(struct isci_host *ihost)
{
struct device *dev = &ihost->pdev->dev;
dma_addr_t dma;
size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma,
- GFP_KERNEL);
+ GFP_KERNEL);
if (!ihost->remote_node_context_table)
return -ENOMEM;
writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower);
writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper);
- err = scic_sds_unsolicited_frame_control_construct(ihost);
+ err = sci_unsolicited_frame_control_construct(ihost);
if (err)
return err;
{
int err = 0, i;
enum sci_status status;
- union scic_oem_parameters oem;
- union scic_user_parameters scic_user_params;
+ struct sci_user_parameters sci_user_params;
struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
spin_lock_init(&ihost->state_lock);
isci_host_change_state(ihost, isci_starting);
- status = scic_controller_construct(ihost, scu_base(ihost),
- smu_base(ihost));
+ status = sci_controller_construct(ihost, scu_base(ihost),
+ smu_base(ihost));
if (status != SCI_SUCCESS) {
dev_err(&ihost->pdev->dev,
- "%s: scic_controller_construct failed - status = %x\n",
+ "%s: sci_controller_construct failed - status = %x\n",
__func__,
status);
return -ENODEV;
* grab initial values stored in the controller object for OEM and USER
* parameters
*/
- isci_user_parameters_get(ihost, &scic_user_params);
- status = scic_user_parameters_set(ihost,
- &scic_user_params);
+ isci_user_parameters_get(&sci_user_params);
+ status = sci_user_parameters_set(ihost, &sci_user_params);
if (status != SCI_SUCCESS) {
dev_warn(&ihost->pdev->dev,
- "%s: scic_user_parameters_set failed\n",
+ "%s: sci_user_parameters_set failed\n",
__func__);
return -ENODEV;
}
- scic_oem_parameters_get(ihost, &oem);
-
/* grab any OEM parameters specified in orom */
if (pci_info->orom) {
- status = isci_parse_oem_parameters(&oem,
+ status = isci_parse_oem_parameters(&ihost->oem_parameters,
pci_info->orom,
ihost->id);
if (status != SCI_SUCCESS) {
}
}
- status = scic_oem_parameters_set(ihost, &oem);
+ status = sci_oem_parameters_set(ihost);
if (status != SCI_SUCCESS) {
dev_warn(&ihost->pdev->dev,
- "%s: scic_oem_parameters_set failed\n",
+ "%s: sci_oem_parameters_set failed\n",
__func__);
return -ENODEV;
}
INIT_LIST_HEAD(&ihost->requests_to_errorback);
spin_lock_irq(&ihost->scic_lock);
- status = scic_controller_initialize(ihost);
+ status = sci_controller_initialize(ihost);
spin_unlock_irq(&ihost->scic_lock);
if (status != SCI_SUCCESS) {
dev_warn(&ihost->pdev->dev,
- "%s: scic_controller_initialize failed -"
+ "%s: sci_controller_initialize failed -"
" status = 0x%x\n",
__func__, status);
return -ENODEV;
}
- err = scic_controller_mem_init(ihost);
+ err = sci_controller_mem_init(ihost);
if (err)
return err;
return 0;
}
-void scic_sds_controller_link_up(struct isci_host *ihost,
- struct isci_port *iport, struct isci_phy *iphy)
+void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
+ struct isci_phy *iphy)
{
switch (ihost->sm.current_state_id) {
case SCIC_STARTING:
sci_del_timer(&ihost->phy_timer);
ihost->phy_startup_timer_pending = false;
ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
- iport, iphy);
- scic_sds_controller_start_next_phy(ihost);
+ iport, iphy);
+ sci_controller_start_next_phy(ihost);
break;
case SCIC_READY:
ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
- iport, iphy);
+ iport, iphy);
break;
default:
dev_dbg(&ihost->pdev->dev,
}
}
-void scic_sds_controller_link_down(struct isci_host *ihost,
- struct isci_port *iport, struct isci_phy *iphy)
+void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
+ struct isci_phy *iphy)
{
switch (ihost->sm.current_state_id) {
case SCIC_STARTING:
}
}
-/**
- * This is a helper method to determine if any remote devices on this
- * controller are still in the stopping state.
- *
- */
-static bool scic_sds_controller_has_remote_devices_stopping(struct isci_host *ihost)
+static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
{
u32 index;
return false;
}
-/**
- * This method is called by the remote device to inform the controller
- * object that the remote device has stopped.
- */
-void scic_sds_controller_remote_device_stopped(struct isci_host *ihost,
- struct isci_remote_device *idev)
+void sci_controller_remote_device_stopped(struct isci_host *ihost,
+ struct isci_remote_device *idev)
{
if (ihost->sm.current_state_id != SCIC_STOPPING) {
dev_dbg(&ihost->pdev->dev,
return;
}
- if (!scic_sds_controller_has_remote_devices_stopping(ihost)) {
+ if (!sci_controller_has_remote_devices_stopping(ihost))
sci_change_state(&ihost->sm, SCIC_STOPPED);
- }
}
-/**
- * This method will write to the SCU PCP register the request value. The method
- * is used to suspend/resume ports, devices, and phys.
- * @scic:
- *
- *
- */
-void scic_sds_controller_post_request(
- struct isci_host *ihost,
- u32 request)
+void sci_controller_post_request(struct isci_host *ihost, u32 request)
{
- dev_dbg(&ihost->pdev->dev,
- "%s: SCIC Controller 0x%p post request 0x%08x\n",
- __func__,
- ihost,
- request);
+ dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n",
+ __func__, ihost->id, request);
writel(request, &ihost->smu_registers->post_context_port);
}
-struct isci_request *scic_request_by_tag(struct isci_host *ihost, u16 io_tag)
+struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag)
{
u16 task_index;
u16 task_sequence;
* enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
* node index available.
*/
-enum sci_status scic_sds_controller_allocate_remote_node_context(
- struct isci_host *ihost,
- struct isci_remote_device *idev,
- u16 *node_id)
+enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 *node_id)
{
u16 node_index;
- u32 remote_node_count = scic_sds_remote_device_node_count(idev);
+ u32 remote_node_count = sci_remote_device_node_count(idev);
- node_index = scic_sds_remote_node_table_allocate_remote_node(
+ node_index = sci_remote_node_table_allocate_remote_node(
&ihost->available_remote_nodes, remote_node_count
);
return SCI_FAILURE_INSUFFICIENT_RESOURCES;
}
-/**
- * This method frees the remote node index back to the available pool. Once
- * this is done the remote node context buffer is no longer valid and can
- * not be used.
- * @scic:
- * @sci_dev:
- * @node_id:
- *
- */
-void scic_sds_controller_free_remote_node_context(
- struct isci_host *ihost,
- struct isci_remote_device *idev,
- u16 node_id)
+void sci_controller_free_remote_node_context(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 node_id)
{
- u32 remote_node_count = scic_sds_remote_device_node_count(idev);
+ u32 remote_node_count = sci_remote_device_node_count(idev);
if (ihost->device_table[node_id] == idev) {
ihost->device_table[node_id] = NULL;
- scic_sds_remote_node_table_release_remote_node_index(
+ sci_remote_node_table_release_remote_node_index(
&ihost->available_remote_nodes, remote_node_count, node_id
);
}
}
-/**
- * This method returns the union scu_remote_node_context for the specified remote
- * node id.
- * @scic:
- * @node_id:
- *
- * union scu_remote_node_context*
- */
-union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer(
- struct isci_host *ihost,
- u16 node_id
- ) {
- if (
- (node_id < ihost->remote_node_entries)
- && (ihost->device_table[node_id] != NULL)
- ) {
- return &ihost->remote_node_context_table[node_id];
- }
-
- return NULL;
-}
-
-/**
- *
- * @resposne_buffer: This is the buffer into which the D2H register FIS will be
- * constructed.
- * @frame_header: This is the frame header returned by the hardware.
- * @frame_buffer: This is the frame buffer returned by the hardware.
- *
- * This method will combind the frame header and frame buffer to create a SATA
- * D2H register FIS none
- */
-void scic_sds_controller_copy_sata_response(
- void *response_buffer,
- void *frame_header,
- void *frame_buffer)
+void sci_controller_copy_sata_response(void *response_buffer,
+ void *frame_header,
+ void *frame_buffer)
{
+ /* XXX type safety? */
memcpy(response_buffer, frame_header, sizeof(u32));
memcpy(response_buffer + sizeof(u32),
sizeof(struct dev_to_host_fis) - sizeof(u32));
}
-/**
- * This method releases the frame once this is done the frame is available for
- * re-use by the hardware. The data contained in the frame header and frame
- * buffer is no longer valid. The UF queue get pointer is only updated if UF
- * control indicates this is appropriate.
- * @scic:
- * @frame_index:
- *
- */
-void scic_sds_controller_release_frame(
- struct isci_host *ihost,
- u32 frame_index)
+void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index)
{
- if (scic_sds_unsolicited_frame_control_release_frame(
- &ihost->uf_control, frame_index) == true)
+ if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index))
writel(ihost->uf_control.get,
&ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
}
return SCI_FAILURE_INVALID_IO_TAG;
}
-/**
- * scic_controller_start_io() - This method is called by the SCI user to
- * send/start an IO request. If the method invocation is successful, then
- * the IO request has been queued to the hardware for processing.
- * @controller: the handle to the controller object for which to start an IO
- * request.
- * @remote_device: the handle to the remote device object for which to start an
- * IO request.
- * @io_request: the handle to the io request object to start.
- * @io_tag: This parameter specifies a previously allocated IO tag that the
- * user desires to be utilized for this request.
- */
-enum sci_status scic_controller_start_io(struct isci_host *ihost,
- struct isci_remote_device *idev,
- struct isci_request *ireq)
+enum sci_status sci_controller_start_io(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
{
enum sci_status status;
return SCI_FAILURE_INVALID_STATE;
}
- status = scic_sds_remote_device_start_io(ihost, idev, ireq);
+ status = sci_remote_device_start_io(ihost, idev, ireq);
if (status != SCI_SUCCESS)
return status;
set_bit(IREQ_ACTIVE, &ireq->flags);
- scic_sds_controller_post_request(ihost, scic_sds_request_get_post_context(ireq));
+ sci_controller_post_request(ihost, sci_request_get_post_context(ireq));
return SCI_SUCCESS;
}
-/**
- * scic_controller_terminate_request() - This method is called by the SCI Core
- * user to terminate an ongoing (i.e. started) core IO request. This does
- * not abort the IO request at the target, but rather removes the IO request
- * from the host controller.
- * @controller: the handle to the controller object for which to terminate a
- * request.
- * @remote_device: the handle to the remote device object for which to
- * terminate a request.
- * @request: the handle to the io or task management request object to
- * terminate.
- *
- * Indicate if the controller successfully began the terminate process for the
- * IO request. SCI_SUCCESS if the terminate process was successfully started
- * for the request. Determine the failure situations and return values.
- */
-enum sci_status scic_controller_terminate_request(
- struct isci_host *ihost,
- struct isci_remote_device *idev,
- struct isci_request *ireq)
+enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
{
+ /* terminate an ongoing (i.e. started) core IO request. This does not
+ * abort the IO request at the target, but rather removes the IO
+ * request from the host controller.
+ */
enum sci_status status;
if (ihost->sm.current_state_id != SCIC_READY) {
return SCI_FAILURE_INVALID_STATE;
}
- status = scic_sds_io_request_terminate(ireq);
+ status = sci_io_request_terminate(ireq);
if (status != SCI_SUCCESS)
return status;
* Utilize the original post context command and or in the POST_TC_ABORT
* request sub-type.
*/
- scic_sds_controller_post_request(ihost,
- scic_sds_request_get_post_context(ireq) |
- SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
+ sci_controller_post_request(ihost,
+ ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
return SCI_SUCCESS;
}
/**
- * scic_controller_complete_io() - This method will perform core specific
+ * sci_controller_complete_io() - This method will perform core specific
* completion operations for an IO request. After this method is invoked,
* the user should consider the IO request as invalid until it is properly
* reused (i.e. re-constructed).
- * @controller: The handle to the controller object for which to complete the
+ * @ihost: The handle to the controller object for which to complete the
* IO request.
- * @remote_device: The handle to the remote device object for which to complete
+ * @idev: The handle to the remote device object for which to complete
* the IO request.
- * @io_request: the handle to the io request object to complete.
+ * @ireq: the handle to the io request object to complete.
*/
-enum sci_status scic_controller_complete_io(
- struct isci_host *ihost,
- struct isci_remote_device *idev,
- struct isci_request *ireq)
+enum sci_status sci_controller_complete_io(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
{
enum sci_status status;
u16 index;
/* XXX: Implement this function */
return SCI_FAILURE;
case SCIC_READY:
- status = scic_sds_remote_device_complete_io(ihost, idev, ireq);
+ status = sci_remote_device_complete_io(ihost, idev, ireq);
if (status != SCI_SUCCESS)
return status;
}
-enum sci_status scic_controller_continue_io(struct isci_request *ireq)
+enum sci_status sci_controller_continue_io(struct isci_request *ireq)
{
struct isci_host *ihost = ireq->owning_controller;
}
set_bit(IREQ_ACTIVE, &ireq->flags);
- scic_sds_controller_post_request(ihost, scic_sds_request_get_post_context(ireq));
+ sci_controller_post_request(ihost, sci_request_get_post_context(ireq));
return SCI_SUCCESS;
}
/**
- * scic_controller_start_task() - This method is called by the SCIC user to
+ * sci_controller_start_task() - This method is called by the SCIC user to
* send/start a framework task management request.
* @controller: the handle to the controller object for which to start the task
* management request.
* the task management request.
* @task_request: the handle to the task request object to start.
*/
-enum sci_task_status scic_controller_start_task(
- struct isci_host *ihost,
- struct isci_remote_device *idev,
- struct isci_request *ireq)
+enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
{
enum sci_status status;
return SCI_TASK_FAILURE_INVALID_STATE;
}
- status = scic_sds_remote_device_start_task(ihost, idev, ireq);
+ status = sci_remote_device_start_task(ihost, idev, ireq);
switch (status) {
case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
set_bit(IREQ_ACTIVE, &ireq->flags);
case SCI_SUCCESS:
set_bit(IREQ_ACTIVE, &ireq->flags);
- scic_sds_controller_post_request(ihost,
- scic_sds_request_get_post_context(ireq));
+ sci_controller_post_request(ihost,
+ sci_request_get_post_context(ireq));
break;
default:
break;
/**
- * struct scic_power_control -
+ * struct sci_power_control -
*
* This structure defines the fields for managing power control for direct
* attached disk devices.
*/
-struct scic_power_control {
+struct sci_power_control {
/**
* This field is set when the power control timer is running and cleared when
* it is not.
/**
* This field is an array of phys that we are waiting on. The phys are direct
- * mapped into requesters via struct scic_sds_phy.phy_index
+ * mapped into requesters via struct sci_phy.phy_index
*/
struct isci_phy *requesters[SCI_MAX_PHYS];
};
-struct scic_sds_port_configuration_agent;
+struct sci_port_configuration_agent;
typedef void (*port_config_fn)(struct isci_host *,
- struct scic_sds_port_configuration_agent *,
+ struct sci_port_configuration_agent *,
struct isci_port *, struct isci_phy *);
-struct scic_sds_port_configuration_agent {
+struct sci_port_configuration_agent {
u16 phy_configured_mask;
u16 phy_ready_mask;
struct {
/* XXX can we time this externally */
struct sci_timer timer;
/* XXX drop reference module params directly */
- union scic_user_parameters user_parameters;
+ struct sci_user_parameters user_parameters;
/* XXX no need to be a union */
- union scic_oem_parameters oem_parameters;
- struct scic_sds_port_configuration_agent port_agent;
+ struct sci_oem_params oem_parameters;
+ struct sci_port_configuration_agent port_agent;
struct isci_remote_device *device_table[SCI_MAX_REMOTE_DEVICES];
- struct scic_remote_node_table available_remote_nodes;
- struct scic_power_control power_control;
+ struct sci_remote_node_table available_remote_nodes;
+ struct sci_power_control power_control;
u8 io_request_sequence[SCI_MAX_IO_REQUESTS];
struct scu_task_context *task_context_table;
dma_addr_t task_context_dma;
u32 logical_port_entries;
u32 remote_node_entries;
u32 task_context_entries;
- struct scic_sds_unsolicited_frame_control uf_control;
+ struct sci_unsolicited_frame_control uf_control;
/* phy startup */
struct sci_timer phy_timer;
};
/**
- * enum scic_sds_controller_states - This enumeration depicts all the states
+ * enum sci_controller_states - This enumeration depicts all the states
* for the common controller state machine.
*/
-enum scic_sds_controller_states {
+enum sci_controller_states {
/**
* Simply the initial state for the base controller state machine.
*/
}
/**
- * scic_sds_controller_get_protocol_engine_group() -
+ * sci_controller_get_protocol_engine_group() -
*
* This macro returns the protocol engine group for this controller object.
* Presently we only support protocol engine group 0 so just return that
*/
-#define scic_sds_controller_get_protocol_engine_group(controller) 0
+#define sci_controller_get_protocol_engine_group(controller) 0
-/* see scic_controller_io_tag_allocate|free for how seq and tci are built */
+/* see sci_controller_io_tag_allocate|free for how seq and tci are built */
#define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci)
/* these are returned by the hardware, so sanitize them */
#define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1))
/* expander attached sata devices require 3 rnc slots */
-static inline int scic_sds_remote_device_node_count(struct isci_remote_device *idev)
+static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
{
struct domain_device *dev = idev->domain_dev;
}
/**
- * scic_sds_controller_set_invalid_phy() -
+ * sci_controller_set_invalid_phy() -
*
* This macro will set the bit in the invalid phy mask for this controller
* object. This is used to control messages reported for invalid link up
* notifications.
*/
-#define scic_sds_controller_set_invalid_phy(controller, phy) \
+#define sci_controller_set_invalid_phy(controller, phy) \
((controller)->invalid_phy_mask |= (1 << (phy)->phy_index))
/**
- * scic_sds_controller_clear_invalid_phy() -
+ * sci_controller_clear_invalid_phy() -
*
* This macro will clear the bit in the invalid phy mask for this controller
* object. This is used to control messages reported for invalid link up
* notifications.
*/
-#define scic_sds_controller_clear_invalid_phy(controller, phy) \
+#define sci_controller_clear_invalid_phy(controller, phy) \
((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index))
static inline struct device *sciphy_to_dev(struct isci_phy *iphy)
return isci_si_rev > ISCI_SI_REVB0;
}
-void scic_sds_controller_post_request(struct isci_host *ihost,
+void sci_controller_post_request(struct isci_host *ihost,
u32 request);
-void scic_sds_controller_release_frame(struct isci_host *ihost,
+void sci_controller_release_frame(struct isci_host *ihost,
u32 frame_index);
-void scic_sds_controller_copy_sata_response(void *response_buffer,
+void sci_controller_copy_sata_response(void *response_buffer,
void *frame_header,
void *frame_buffer);
-enum sci_status scic_sds_controller_allocate_remote_node_context(struct isci_host *ihost,
+enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
struct isci_remote_device *idev,
u16 *node_id);
-void scic_sds_controller_free_remote_node_context(
+void sci_controller_free_remote_node_context(
struct isci_host *ihost,
struct isci_remote_device *idev,
u16 node_id);
-union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer(
- struct isci_host *ihost,
- u16 node_id);
-struct isci_request *scic_request_by_tag(struct isci_host *ihost,
+struct isci_request *sci_request_by_tag(struct isci_host *ihost,
u16 io_tag);
-void scic_sds_controller_power_control_queue_insert(
+void sci_controller_power_control_queue_insert(
struct isci_host *ihost,
struct isci_phy *iphy);
-void scic_sds_controller_power_control_queue_remove(
+void sci_controller_power_control_queue_remove(
struct isci_host *ihost,
struct isci_phy *iphy);
-void scic_sds_controller_link_up(
+void sci_controller_link_up(
struct isci_host *ihost,
struct isci_port *iport,
struct isci_phy *iphy);
-void scic_sds_controller_link_down(
+void sci_controller_link_down(
struct isci_host *ihost,
struct isci_port *iport,
struct isci_phy *iphy);
-void scic_sds_controller_remote_device_stopped(
+void sci_controller_remote_device_stopped(
struct isci_host *ihost,
struct isci_remote_device *idev);
-void scic_sds_controller_copy_task_context(
+void sci_controller_copy_task_context(
struct isci_host *ihost,
struct isci_request *ireq);
-void scic_sds_controller_register_setup(struct isci_host *ihost);
+void sci_controller_register_setup(struct isci_host *ihost);
-enum sci_status scic_controller_continue_io(struct isci_request *ireq);
+enum sci_status sci_controller_continue_io(struct isci_request *ireq);
int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
void isci_host_scan_start(struct Scsi_Host *);
u16 isci_alloc_tag(struct isci_host *ihost);
struct isci_remote_device *,
enum sci_status);
-void scic_controller_disable_interrupts(
+void sci_controller_disable_interrupts(
struct isci_host *ihost);
-enum sci_status scic_controller_start_io(
+enum sci_status sci_controller_start_io(
struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq);
-enum sci_task_status scic_controller_start_task(
+enum sci_task_status sci_controller_start_task(
struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq);
-enum sci_status scic_controller_terminate_request(
+enum sci_status sci_controller_terminate_request(
struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq);
-enum sci_status scic_controller_complete_io(
+enum sci_status sci_controller_complete_io(
struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq);
-void scic_sds_port_configuration_agent_construct(
- struct scic_sds_port_configuration_agent *port_agent);
+void sci_port_configuration_agent_construct(
+ struct sci_port_configuration_agent *port_agent);
-enum sci_status scic_sds_port_configuration_agent_initialize(
+enum sci_status sci_port_configuration_agent_initialize(
struct isci_host *ihost,
- struct scic_sds_port_configuration_agent *port_agent);
+ struct sci_port_configuration_agent *port_agent);
#endif
orom = isci_request_oprom(pdev);
for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) {
- if (scic_oem_parameters_validate(&orom->ctrl[i])) {
+ if (sci_oem_parameters_validate(&orom->ctrl[i])) {
dev_warn(&pdev->dev,
"[%d]: invalid oem parameters detected, falling back to firmware\n", i);
devm_kfree(&pdev->dev, orom);
for_each_isci_host(i, ihost, pdev) {
isci_unregister(ihost);
isci_host_deinit(ihost);
- scic_controller_disable_interrupts(ihost);
+ sci_controller_disable_interrupts(ihost);
}
}
* This member indicates that the operation failed, the failure is
* controller implementation specific, and the response data associated
* with the request is not valid. You can query for the controller
- * specific error information via scic_controller_get_request_status()
+ * specific error information via sci_controller_get_request_status()
*/
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
/**
* This value indicates that an unsupported PCI device ID has been
* specified. This indicates that attempts to invoke
- * scic_library_allocate_controller() will fail.
+ * sci_library_allocate_controller() will fail.
*/
SCI_FAILURE_UNSUPPORTED_PCI_DEVICE_ID
/*
* Each timer is associated with a cancellation flag that is set when
* del_timer() is called and checked in the timer callback function. This
- * is needed since del_timer_sync() cannot be called with scic_lock held.
+ * is needed since del_timer_sync() cannot be called with sci_lock held.
* For deinit however, del_timer_sync() is used without holding the lock.
*/
struct sci_timer {
return iphy->max_negotiated_speed;
}
-/*
- * *****************************************************************************
- * * SCIC SDS PHY Internal Methods
- * ***************************************************************************** */
-
-/**
- * This method will initialize the phy transport layer registers
- * @sci_phy:
- * @transport_layer_registers
- *
- * enum sci_status
- */
-static enum sci_status scic_sds_phy_transport_layer_initialization(
- struct isci_phy *iphy,
- struct scu_transport_layer_registers __iomem *transport_layer_registers)
+static enum sci_status
+sci_phy_transport_layer_initialization(struct isci_phy *iphy,
+ struct scu_transport_layer_registers __iomem *reg)
{
u32 tl_control;
- iphy->transport_layer_registers = transport_layer_registers;
+ iphy->transport_layer_registers = reg;
writel(SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX,
&iphy->transport_layer_registers->stp_rni);
return SCI_SUCCESS;
}
-/**
- * This method will initialize the phy link layer registers
- * @sci_phy:
- * @link_layer_registers:
- *
- * enum sci_status
- */
static enum sci_status
-scic_sds_phy_link_layer_initialization(struct isci_phy *iphy,
- struct scu_link_layer_registers __iomem *link_layer_registers)
+sci_phy_link_layer_initialization(struct isci_phy *iphy,
+ struct scu_link_layer_registers __iomem *reg)
{
- struct isci_host *ihost =
- iphy->owning_port->owning_controller;
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
int phy_idx = iphy->phy_index;
- struct sci_phy_user_params *phy_user =
- &ihost->user_parameters.sds1.phys[phy_idx];
+ struct sci_phy_user_params *phy_user = &ihost->user_parameters.phys[phy_idx];
struct sci_phy_oem_params *phy_oem =
- &ihost->oem_parameters.sds1.phys[phy_idx];
+ &ihost->oem_parameters.phys[phy_idx];
u32 phy_configuration;
- struct scic_phy_cap phy_cap;
+ struct sci_phy_cap phy_cap;
u32 parity_check = 0;
u32 parity_count = 0;
u32 llctl, link_rate;
u32 clksm_value = 0;
- iphy->link_layer_registers = link_layer_registers;
+ iphy->link_layer_registers = reg;
/* Set our IDENTIFY frame data */
#define SCI_END_DEVICE 0x01
phy_cap.gen3_no_ssc = 1;
phy_cap.gen2_no_ssc = 1;
phy_cap.gen1_no_ssc = 1;
- if (ihost->oem_parameters.sds1.controller.do_enable_ssc == true) {
+ if (ihost->oem_parameters.controller.do_enable_ssc == true) {
phy_cap.gen3_ssc = 1;
phy_cap.gen2_ssc = 1;
phy_cap.gen1_ssc = 1;
&iphy->link_layer_registers->afe_lookup_table_control);
llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT,
- (u8)ihost->user_parameters.sds1.no_outbound_task_timeout);
+ (u8)ihost->user_parameters.no_outbound_task_timeout);
switch(phy_user->max_speed_generation) {
case SCIC_SDS_PARM_GEN3_SPEED:
struct isci_port *phy_get_non_dummy_port(
struct isci_phy *iphy)
{
- if (scic_sds_port_get_index(iphy->owning_port) == SCIC_SDS_DUMMY_PORT)
+ if (sci_port_get_index(iphy->owning_port) == SCIC_SDS_DUMMY_PORT)
return NULL;
return iphy->owning_port;
*
*
*/
-void scic_sds_phy_set_port(
+void sci_phy_set_port(
struct isci_phy *iphy,
struct isci_port *iport)
{
if (iphy->bcn_received_while_port_unassigned) {
iphy->bcn_received_while_port_unassigned = false;
- scic_sds_port_broadcast_change_received(iphy->owning_port, iphy);
+ sci_port_broadcast_change_received(iphy->owning_port, iphy);
}
}
-/**
- * This method will initialize the constructed phy
- * @sci_phy:
- * @link_layer_registers:
- *
- * enum sci_status
- */
-enum sci_status scic_sds_phy_initialize(
- struct isci_phy *iphy,
- struct scu_transport_layer_registers __iomem *transport_layer_registers,
- struct scu_link_layer_registers __iomem *link_layer_registers)
+enum sci_status sci_phy_initialize(struct isci_phy *iphy,
+ struct scu_transport_layer_registers __iomem *tl,
+ struct scu_link_layer_registers __iomem *ll)
{
/* Perfrom the initialization of the TL hardware */
- scic_sds_phy_transport_layer_initialization(
- iphy,
- transport_layer_registers);
+ sci_phy_transport_layer_initialization(iphy, tl);
/* Perofrm the initialization of the PE hardware */
- scic_sds_phy_link_layer_initialization(iphy, link_layer_registers);
+ sci_phy_link_layer_initialization(iphy, ll);
- /*
- * There is nothing that needs to be done in this state just
- * transition to the stopped state. */
+ /* There is nothing that needs to be done in this state just
+ * transition to the stopped state
+ */
sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
return SCI_SUCCESS;
* This will either be the RNi for the device or an invalid RNi if there
* is no current device assigned to the phy.
*/
-void scic_sds_phy_setup_transport(
- struct isci_phy *iphy,
- u32 device_id)
+void sci_phy_setup_transport(struct isci_phy *iphy, u32 device_id)
{
u32 tl_control;
writel(tl_control, &iphy->transport_layer_registers->control);
}
-/**
- *
- * @sci_phy: The phy object to be suspended.
- *
- * This function will perform the register reads/writes to suspend the SCU
- * hardware protocol engine. none
- */
-static void scic_sds_phy_suspend(
- struct isci_phy *iphy)
+static void sci_phy_suspend(struct isci_phy *iphy)
{
u32 scu_sas_pcfg_value;
writel(scu_sas_pcfg_value,
&iphy->link_layer_registers->phy_configuration);
- scic_sds_phy_setup_transport(
- iphy,
- SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
+ sci_phy_setup_transport(iphy, SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
}
-void scic_sds_phy_resume(struct isci_phy *iphy)
+void sci_phy_resume(struct isci_phy *iphy)
{
u32 scu_sas_pcfg_value;
&iphy->link_layer_registers->phy_configuration);
}
-void scic_sds_phy_get_sas_address(struct isci_phy *iphy,
- struct sci_sas_address *sas_address)
+void sci_phy_get_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas)
{
- sas_address->high = readl(&iphy->link_layer_registers->source_sas_address_high);
- sas_address->low = readl(&iphy->link_layer_registers->source_sas_address_low);
+ sas->high = readl(&iphy->link_layer_registers->source_sas_address_high);
+ sas->low = readl(&iphy->link_layer_registers->source_sas_address_low);
}
-void scic_sds_phy_get_attached_sas_address(struct isci_phy *iphy,
- struct sci_sas_address *sas_address)
+void sci_phy_get_attached_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas)
{
struct sas_identify_frame *iaf;
iaf = &iphy->frame_rcvd.iaf;
- memcpy(sas_address, iaf->sas_addr, SAS_ADDR_SIZE);
+ memcpy(sas, iaf->sas_addr, SAS_ADDR_SIZE);
}
-void scic_sds_phy_get_protocols(struct isci_phy *iphy,
- struct scic_phy_proto *protocols)
+void sci_phy_get_protocols(struct isci_phy *iphy, struct sci_phy_proto *proto)
{
- protocols->all =
- (u16)(readl(&iphy->
- link_layer_registers->transmit_identification) &
- 0x0000FFFF);
+ proto->all = readl(&iphy->link_layer_registers->transmit_identification);
}
-enum sci_status scic_sds_phy_start(struct isci_phy *iphy)
+enum sci_status sci_phy_start(struct isci_phy *iphy)
{
- enum scic_sds_phy_states state = iphy->sm.current_state_id;
+ enum sci_phy_states state = iphy->sm.current_state_id;
if (state != SCI_PHY_STOPPED) {
dev_dbg(sciphy_to_dev(iphy),
return SCI_SUCCESS;
}
-enum sci_status scic_sds_phy_stop(struct isci_phy *iphy)
+enum sci_status sci_phy_stop(struct isci_phy *iphy)
{
- enum scic_sds_phy_states state = iphy->sm.current_state_id;
+ enum sci_phy_states state = iphy->sm.current_state_id;
switch (state) {
case SCI_PHY_SUB_INITIAL:
return SCI_SUCCESS;
}
-enum sci_status scic_sds_phy_reset(struct isci_phy *iphy)
+enum sci_status sci_phy_reset(struct isci_phy *iphy)
{
- enum scic_sds_phy_states state = iphy->sm.current_state_id;
+ enum sci_phy_states state = iphy->sm.current_state_id;
if (state != SCI_PHY_READY) {
dev_dbg(sciphy_to_dev(iphy),
return SCI_SUCCESS;
}
-enum sci_status scic_sds_phy_consume_power_handler(struct isci_phy *iphy)
+enum sci_status sci_phy_consume_power_handler(struct isci_phy *iphy)
{
- enum scic_sds_phy_states state = iphy->sm.current_state_id;
+ enum sci_phy_states state = iphy->sm.current_state_id;
switch (state) {
case SCI_PHY_SUB_AWAIT_SAS_POWER: {
}
}
-/*
- * *****************************************************************************
- * * SCIC SDS PHY HELPER FUNCTIONS
- * ***************************************************************************** */
-
-
-/**
- *
- * @sci_phy: The phy object that received SAS PHY DETECTED.
- *
- * This method continues the link training for the phy as if it were a SAS PHY
- * instead of a SATA PHY. This is done because the completion queue had a SAS
- * PHY DETECTED event when the state machine was expecting a SATA PHY event.
- * none
- */
-static void scic_sds_phy_start_sas_link_training(
- struct isci_phy *iphy)
+static void sci_phy_start_sas_link_training(struct isci_phy *iphy)
{
+ /* continue the link training for the phy as if it were a SAS PHY
+ * instead of a SATA PHY. This is done because the completion queue had a SAS
+ * PHY DETECTED event when the state machine was expecting a SATA PHY event.
+ */
u32 phy_control;
- phy_control =
- readl(&iphy->link_layer_registers->phy_configuration);
+ phy_control = readl(&iphy->link_layer_registers->phy_configuration);
phy_control |= SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD);
writel(phy_control,
- &iphy->link_layer_registers->phy_configuration);
+ &iphy->link_layer_registers->phy_configuration);
sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN);
iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SAS;
}
-/**
- *
- * @sci_phy: The phy object that received a SATA SPINUP HOLD event
- *
- * This method continues the link training for the phy as if it were a SATA PHY
- * instead of a SAS PHY. This is done because the completion queue had a SATA
- * SPINUP HOLD event when the state machine was expecting a SAS PHY event. none
- */
-static void scic_sds_phy_start_sata_link_training(
- struct isci_phy *iphy)
+static void sci_phy_start_sata_link_training(struct isci_phy *iphy)
{
+ /* This method continues the link training for the phy as if it were a SATA PHY
+ * instead of a SAS PHY. This is done because the completion queue had a SATA
+ * SPINUP HOLD event when the state machine was expecting a SAS PHY event. none
+ */
sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER);
iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA;
}
/**
- * scic_sds_phy_complete_link_training - perform processing common to
+ * sci_phy_complete_link_training - perform processing common to
* all protocols upon completion of link training.
* @sci_phy: This parameter specifies the phy object for which link training
* has completed.
* sub-state machine.
*
*/
-static void scic_sds_phy_complete_link_training(
- struct isci_phy *iphy,
- enum sas_linkrate max_link_rate,
- u32 next_state)
+static void sci_phy_complete_link_training(struct isci_phy *iphy,
+ enum sas_linkrate max_link_rate,
+ u32 next_state)
{
iphy->max_negotiated_speed = max_link_rate;
sci_change_state(&iphy->sm, next_state);
}
-enum sci_status scic_sds_phy_event_handler(struct isci_phy *iphy,
- u32 event_code)
+enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
{
- enum scic_sds_phy_states state = iphy->sm.current_state_id;
+ enum sci_phy_states state = iphy->sm.current_state_id;
switch (state) {
case SCI_PHY_SUB_AWAIT_OSSP_EN:
switch (scu_get_event_code(event_code)) {
case SCU_EVENT_SAS_PHY_DETECTED:
- scic_sds_phy_start_sas_link_training(iphy);
+ sci_phy_start_sas_link_training(iphy);
iphy->is_in_link_training = true;
break;
case SCU_EVENT_SATA_SPINUP_HOLD:
- scic_sds_phy_start_sata_link_training(iphy);
+ sci_phy_start_sata_link_training(iphy);
iphy->is_in_link_training = true;
break;
default:
break;
case SCU_EVENT_SAS_15:
case SCU_EVENT_SAS_15_SSC:
- scic_sds_phy_complete_link_training(
- iphy,
- SAS_LINK_RATE_1_5_GBPS,
- SCI_PHY_SUB_AWAIT_IAF_UF);
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS,
+ SCI_PHY_SUB_AWAIT_IAF_UF);
break;
case SCU_EVENT_SAS_30:
case SCU_EVENT_SAS_30_SSC:
- scic_sds_phy_complete_link_training(
- iphy,
- SAS_LINK_RATE_3_0_GBPS,
- SCI_PHY_SUB_AWAIT_IAF_UF);
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS,
+ SCI_PHY_SUB_AWAIT_IAF_UF);
break;
case SCU_EVENT_SAS_60:
case SCU_EVENT_SAS_60_SSC:
- scic_sds_phy_complete_link_training(
- iphy,
- SAS_LINK_RATE_6_0_GBPS,
- SCI_PHY_SUB_AWAIT_IAF_UF);
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS,
+ SCI_PHY_SUB_AWAIT_IAF_UF);
break;
case SCU_EVENT_SATA_SPINUP_HOLD:
/*
* We were doing SAS PHY link training and received a SATA PHY event
* continue OOB/SN as if this were a SATA PHY */
- scic_sds_phy_start_sata_link_training(iphy);
+ sci_phy_start_sata_link_training(iphy);
break;
case SCU_EVENT_LINK_FAILURE:
/* Link failure change state back to the starting state */
switch (scu_get_event_code(event_code)) {
case SCU_EVENT_SAS_PHY_DETECTED:
/* Backup the state machine */
- scic_sds_phy_start_sas_link_training(iphy);
+ sci_phy_start_sas_link_training(iphy);
break;
case SCU_EVENT_SATA_SPINUP_HOLD:
/* We were doing SAS PHY link training and received a
* SATA PHY event continue OOB/SN as if this were a
* SATA PHY
*/
- scic_sds_phy_start_sata_link_training(iphy);
+ sci_phy_start_sata_link_training(iphy);
break;
case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
case SCU_EVENT_LINK_FAILURE:
/* There has been a change in the phy type before OOB/SN for the
* SATA finished start down the SAS link traning path.
*/
- scic_sds_phy_start_sas_link_training(iphy);
+ sci_phy_start_sas_link_training(iphy);
break;
default:
/* There has been a change in the phy type before OOB/SN for the
* SATA finished start down the SAS link traning path.
*/
- scic_sds_phy_start_sas_link_training(iphy);
+ sci_phy_start_sas_link_training(iphy);
break;
default:
dev_warn(sciphy_to_dev(iphy),
break;
case SCU_EVENT_SATA_15:
case SCU_EVENT_SATA_15_SSC:
- scic_sds_phy_complete_link_training(
- iphy,
- SAS_LINK_RATE_1_5_GBPS,
- SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS,
+ SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
break;
case SCU_EVENT_SATA_30:
case SCU_EVENT_SATA_30_SSC:
- scic_sds_phy_complete_link_training(
- iphy,
- SAS_LINK_RATE_3_0_GBPS,
- SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS,
+ SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
break;
case SCU_EVENT_SATA_60:
case SCU_EVENT_SATA_60_SSC:
- scic_sds_phy_complete_link_training(
- iphy,
- SAS_LINK_RATE_6_0_GBPS,
- SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS,
+ SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
break;
case SCU_EVENT_LINK_FAILURE:
/* Link failure change state back to the starting state */
/*
* There has been a change in the phy type before OOB/SN for the
* SATA finished start down the SAS link traning path. */
- scic_sds_phy_start_sas_link_training(iphy);
+ sci_phy_start_sas_link_training(iphy);
break;
default:
dev_warn(sciphy_to_dev(iphy),
case SCU_EVENT_BROADCAST_CHANGE:
/* Broadcast change received. Notify the port. */
if (phy_get_non_dummy_port(iphy) != NULL)
- scic_sds_port_broadcast_change_received(iphy->owning_port, iphy);
+ sci_port_broadcast_change_received(iphy->owning_port, iphy);
else
iphy->bcn_received_while_port_unassigned = true;
break;
}
}
-enum sci_status scic_sds_phy_frame_handler(struct isci_phy *iphy,
- u32 frame_index)
+enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index)
{
- enum scic_sds_phy_states state = iphy->sm.current_state_id;
+ enum sci_phy_states state = iphy->sm.current_state_id;
struct isci_host *ihost = iphy->owning_port->owning_controller;
enum sci_status result;
unsigned long flags;
u32 *frame_words;
struct sas_identify_frame iaf;
- result = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control,
- frame_index,
- (void **)&frame_words);
+ result = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_words);
if (result != SCI_SUCCESS)
return result;
"unexpected frame id %x\n",
__func__, frame_index);
- scic_sds_controller_release_frame(ihost, frame_index);
+ sci_controller_release_frame(ihost, frame_index);
return result;
}
case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: {
struct dev_to_host_fis *frame_header;
u32 *fis_frame_data;
- result = scic_sds_unsolicited_frame_control_get_header(
- &(scic_sds_phy_get_controller(iphy)->uf_control),
+ result = sci_unsolicited_frame_control_get_header(
+ &(sci_phy_get_controller(iphy)->uf_control),
frame_index,
(void **)&frame_header);
if ((frame_header->fis_type == FIS_REGD2H) &&
!(frame_header->status & ATA_BUSY)) {
- scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control,
- frame_index,
- (void **)&fis_frame_data);
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&fis_frame_data);
spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
- scic_sds_controller_copy_sata_response(&iphy->frame_rcvd.fis,
- frame_header,
- fis_frame_data);
+ sci_controller_copy_sata_response(&iphy->frame_rcvd.fis,
+ frame_header,
+ fis_frame_data);
spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
/* got IAF we can now go to the await spinup semaphore state */
__func__, frame_index);
/* Regardless of the result we are done with this frame with it */
- scic_sds_controller_release_frame(ihost, frame_index);
+ sci_controller_release_frame(ihost, frame_index);
return result;
}
}
-static void scic_sds_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm)
+static void sci_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_OSSP_EN);
}
-static void scic_sds_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm)
+static void sci_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
struct isci_host *ihost = iphy->owning_port->owning_controller;
- scic_sds_controller_power_control_queue_insert(ihost, iphy);
+ sci_controller_power_control_queue_insert(ihost, iphy);
}
-static void scic_sds_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm)
+static void sci_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
struct isci_host *ihost = iphy->owning_port->owning_controller;
- scic_sds_controller_power_control_queue_remove(ihost, iphy);
+ sci_controller_power_control_queue_remove(ihost, iphy);
}
-static void scic_sds_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm)
+static void sci_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
struct isci_host *ihost = iphy->owning_port->owning_controller;
- scic_sds_controller_power_control_queue_insert(ihost, iphy);
+ sci_controller_power_control_queue_insert(ihost, iphy);
}
-static void scic_sds_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm)
+static void sci_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
struct isci_host *ihost = iphy->owning_port->owning_controller;
- scic_sds_controller_power_control_queue_remove(ihost, iphy);
+ sci_controller_power_control_queue_remove(ihost, iphy);
}
-static void scic_sds_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm)
+static void sci_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT);
}
-static void scic_sds_phy_starting_await_sata_phy_substate_exit(struct sci_base_state_machine *sm)
+static void sci_phy_starting_await_sata_phy_substate_exit(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
sci_del_timer(&iphy->sata_timer);
}
-static void scic_sds_phy_starting_await_sata_speed_substate_enter(struct sci_base_state_machine *sm)
+static void sci_phy_starting_await_sata_speed_substate_enter(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT);
}
-static void scic_sds_phy_starting_await_sata_speed_substate_exit(struct sci_base_state_machine *sm)
+static void sci_phy_starting_await_sata_speed_substate_exit(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
sci_del_timer(&iphy->sata_timer);
}
-static void scic_sds_phy_starting_await_sig_fis_uf_substate_enter(struct sci_base_state_machine *sm)
+static void sci_phy_starting_await_sig_fis_uf_substate_enter(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
- if (scic_sds_port_link_detected(iphy->owning_port, iphy)) {
+ if (sci_port_link_detected(iphy->owning_port, iphy)) {
/*
* Clear the PE suspend condition so we can actually
* The hardware will not respond to the XRDY until the PE
* suspend condition is cleared.
*/
- scic_sds_phy_resume(iphy);
+ sci_phy_resume(iphy);
sci_mod_timer(&iphy->sata_timer,
SCIC_SDS_SIGNATURE_FIS_TIMEOUT);
iphy->is_in_link_training = false;
}
-static void scic_sds_phy_starting_await_sig_fis_uf_substate_exit(struct sci_base_state_machine *sm)
+static void sci_phy_starting_await_sig_fis_uf_substate_exit(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
sci_del_timer(&iphy->sata_timer);
}
-static void scic_sds_phy_starting_final_substate_enter(struct sci_base_state_machine *sm)
+static void sci_phy_starting_final_substate_enter(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
&iphy->link_layer_registers->phy_configuration);
}
-static void scic_sds_phy_stopped_state_enter(struct sci_base_state_machine *sm)
+static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
scu_link_layer_stop_protocol_engine(iphy);
if (iphy->sm.previous_state_id != SCI_PHY_INITIAL)
- scic_sds_controller_link_down(scic_sds_phy_get_controller(iphy),
+ sci_controller_link_down(sci_phy_get_controller(iphy),
phy_get_non_dummy_port(iphy),
iphy);
}
-static void scic_sds_phy_starting_state_enter(struct sci_base_state_machine *sm)
+static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
iphy->bcn_received_while_port_unassigned = false;
if (iphy->sm.previous_state_id == SCI_PHY_READY)
- scic_sds_controller_link_down(scic_sds_phy_get_controller(iphy),
+ sci_controller_link_down(sci_phy_get_controller(iphy),
phy_get_non_dummy_port(iphy),
iphy);
sci_change_state(&iphy->sm, SCI_PHY_SUB_INITIAL);
}
-static void scic_sds_phy_ready_state_enter(struct sci_base_state_machine *sm)
+static void sci_phy_ready_state_enter(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
- scic_sds_controller_link_up(scic_sds_phy_get_controller(iphy),
+ sci_controller_link_up(sci_phy_get_controller(iphy),
phy_get_non_dummy_port(iphy),
iphy);
}
-static void scic_sds_phy_ready_state_exit(struct sci_base_state_machine *sm)
+static void sci_phy_ready_state_exit(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
- scic_sds_phy_suspend(iphy);
+ sci_phy_suspend(iphy);
}
-static void scic_sds_phy_resetting_state_enter(struct sci_base_state_machine *sm)
+static void sci_phy_resetting_state_enter(struct sci_base_state_machine *sm)
{
struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
* the resetting state we don't notify the user regarding link up and
* link down notifications
*/
- scic_sds_port_deactivate_phy(iphy->owning_port, iphy, false);
+ sci_port_deactivate_phy(iphy->owning_port, iphy, false);
if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
scu_link_layer_tx_hard_reset(iphy);
}
}
-static const struct sci_base_state scic_sds_phy_state_table[] = {
+static const struct sci_base_state sci_phy_state_table[] = {
[SCI_PHY_INITIAL] = { },
[SCI_PHY_STOPPED] = {
- .enter_state = scic_sds_phy_stopped_state_enter,
+ .enter_state = sci_phy_stopped_state_enter,
},
[SCI_PHY_STARTING] = {
- .enter_state = scic_sds_phy_starting_state_enter,
+ .enter_state = sci_phy_starting_state_enter,
},
[SCI_PHY_SUB_INITIAL] = {
- .enter_state = scic_sds_phy_starting_initial_substate_enter,
+ .enter_state = sci_phy_starting_initial_substate_enter,
},
[SCI_PHY_SUB_AWAIT_OSSP_EN] = { },
[SCI_PHY_SUB_AWAIT_SAS_SPEED_EN] = { },
[SCI_PHY_SUB_AWAIT_IAF_UF] = { },
[SCI_PHY_SUB_AWAIT_SAS_POWER] = {
- .enter_state = scic_sds_phy_starting_await_sas_power_substate_enter,
- .exit_state = scic_sds_phy_starting_await_sas_power_substate_exit,
+ .enter_state = sci_phy_starting_await_sas_power_substate_enter,
+ .exit_state = sci_phy_starting_await_sas_power_substate_exit,
},
[SCI_PHY_SUB_AWAIT_SATA_POWER] = {
- .enter_state = scic_sds_phy_starting_await_sata_power_substate_enter,
- .exit_state = scic_sds_phy_starting_await_sata_power_substate_exit
+ .enter_state = sci_phy_starting_await_sata_power_substate_enter,
+ .exit_state = sci_phy_starting_await_sata_power_substate_exit
},
[SCI_PHY_SUB_AWAIT_SATA_PHY_EN] = {
- .enter_state = scic_sds_phy_starting_await_sata_phy_substate_enter,
- .exit_state = scic_sds_phy_starting_await_sata_phy_substate_exit
+ .enter_state = sci_phy_starting_await_sata_phy_substate_enter,
+ .exit_state = sci_phy_starting_await_sata_phy_substate_exit
},
[SCI_PHY_SUB_AWAIT_SATA_SPEED_EN] = {
- .enter_state = scic_sds_phy_starting_await_sata_speed_substate_enter,
- .exit_state = scic_sds_phy_starting_await_sata_speed_substate_exit
+ .enter_state = sci_phy_starting_await_sata_speed_substate_enter,
+ .exit_state = sci_phy_starting_await_sata_speed_substate_exit
},
[SCI_PHY_SUB_AWAIT_SIG_FIS_UF] = {
- .enter_state = scic_sds_phy_starting_await_sig_fis_uf_substate_enter,
- .exit_state = scic_sds_phy_starting_await_sig_fis_uf_substate_exit
+ .enter_state = sci_phy_starting_await_sig_fis_uf_substate_enter,
+ .exit_state = sci_phy_starting_await_sig_fis_uf_substate_exit
},
[SCI_PHY_SUB_FINAL] = {
- .enter_state = scic_sds_phy_starting_final_substate_enter,
+ .enter_state = sci_phy_starting_final_substate_enter,
},
[SCI_PHY_READY] = {
- .enter_state = scic_sds_phy_ready_state_enter,
- .exit_state = scic_sds_phy_ready_state_exit,
+ .enter_state = sci_phy_ready_state_enter,
+ .exit_state = sci_phy_ready_state_exit,
},
[SCI_PHY_RESETTING] = {
- .enter_state = scic_sds_phy_resetting_state_enter,
+ .enter_state = sci_phy_resetting_state_enter,
},
[SCI_PHY_FINAL] = { },
};
-void scic_sds_phy_construct(struct isci_phy *iphy,
+void sci_phy_construct(struct isci_phy *iphy,
struct isci_port *iport, u8 phy_index)
{
- sci_init_sm(&iphy->sm, scic_sds_phy_state_table, SCI_PHY_INITIAL);
+ sci_init_sm(&iphy->sm, sci_phy_state_table, SCI_PHY_INITIAL);
/* Copy the rest of the input data to our locals */
iphy->owning_port = iport;
void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index)
{
- union scic_oem_parameters oem;
+ struct sci_oem_params *oem = &ihost->oem_parameters;
u64 sci_sas_addr;
__be64 sas_addr;
- scic_oem_parameters_get(ihost, &oem);
- sci_sas_addr = oem.sds1.phys[index].sas_address.high;
+ sci_sas_addr = oem->phys[index].sas_address.high;
sci_sas_addr <<= 32;
- sci_sas_addr |= oem.sds1.phys[index].sas_address.low;
+ sci_sas_addr |= oem->phys[index].sas_address.low;
sas_addr = cpu_to_be64(sci_sas_addr);
memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr));
switch (func) {
case PHY_FUNC_DISABLE:
spin_lock_irqsave(&ihost->scic_lock, flags);
- scic_sds_phy_stop(iphy);
+ sci_phy_stop(iphy);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
break;
case PHY_FUNC_LINK_RESET:
spin_lock_irqsave(&ihost->scic_lock, flags);
- scic_sds_phy_stop(iphy);
- scic_sds_phy_start(iphy);
+ sci_phy_stop(iphy);
+ sci_phy_start(iphy);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
break;
*/
#define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT 250
-enum scic_sds_phy_protocol {
+enum sci_phy_protocol {
SCIC_SDS_PHY_PROTOCOL_UNKNOWN,
SCIC_SDS_PHY_PROTOCOL_SAS,
SCIC_SDS_PHY_PROTOCOL_SATA,
struct sci_base_state_machine sm;
struct isci_port *owning_port;
enum sas_linkrate max_negotiated_speed;
- enum scic_sds_phy_protocol protocol;
+ enum sci_phy_protocol protocol;
u8 phy_index;
bool bcn_received_while_port_unassigned;
bool is_in_link_training;
return iphy;
}
-struct scic_phy_cap {
+struct sci_phy_cap {
union {
struct {
/*
} __packed;
/* this data structure reflects the link layer transmit identification reg */
-struct scic_phy_proto {
+struct sci_phy_proto {
union {
struct {
u16 _r_a:1;
/**
- * struct scic_phy_properties - This structure defines the properties common to
+ * struct sci_phy_properties - This structure defines the properties common to
* all phys that can be retrieved.
*
*
*/
-struct scic_phy_properties {
+struct sci_phy_properties {
/**
* This field specifies the port that currently contains the
* supplied phy. This field may be set to NULL
};
/**
- * struct scic_sas_phy_properties - This structure defines the properties,
+ * struct sci_sas_phy_properties - This structure defines the properties,
* specific to a SAS phy, that can be retrieved.
*
*
*/
-struct scic_sas_phy_properties {
+struct sci_sas_phy_properties {
/**
* This field delineates the Identify Address Frame received
* from the remote end point.
* This field delineates the Phy capabilities structure received
* from the remote end point.
*/
- struct scic_phy_cap rcvd_cap;
+ struct sci_phy_cap rcvd_cap;
};
/**
- * struct scic_sata_phy_properties - This structure defines the properties,
+ * struct sci_sata_phy_properties - This structure defines the properties,
* specific to a SATA phy, that can be retrieved.
*
*
*/
-struct scic_sata_phy_properties {
+struct sci_sata_phy_properties {
/**
* This field delineates the signature FIS received from the
* attached target.
};
/**
- * enum scic_phy_counter_id - This enumeration depicts the various pieces of
+ * enum sci_phy_counter_id - This enumeration depicts the various pieces of
* optional information that can be retrieved for a specific phy.
*
*
*/
-enum scic_phy_counter_id {
+enum sci_phy_counter_id {
/**
* This PHY information field tracks the number of frames received.
*/
SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR
};
-enum scic_sds_phy_states {
+enum sci_phy_states {
/**
* Simply the initial state for the base domain state machine.
*/
};
/**
- * scic_sds_phy_get_index() -
+ * sci_phy_get_index() -
*
* This macro returns the phy index for the specified phy
*/
-#define scic_sds_phy_get_index(phy) \
+#define sci_phy_get_index(phy) \
((phy)->phy_index)
/**
- * scic_sds_phy_get_controller() - This macro returns the controller for this
+ * sci_phy_get_controller() - This macro returns the controller for this
* phy
*
*
*/
-#define scic_sds_phy_get_controller(phy) \
- (scic_sds_port_get_controller((phy)->owning_port))
+#define sci_phy_get_controller(phy) \
+ (sci_port_get_controller((phy)->owning_port))
-void scic_sds_phy_construct(
+void sci_phy_construct(
struct isci_phy *iphy,
struct isci_port *iport,
u8 phy_index);
struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy);
-void scic_sds_phy_set_port(
+void sci_phy_set_port(
struct isci_phy *iphy,
struct isci_port *iport);
-enum sci_status scic_sds_phy_initialize(
+enum sci_status sci_phy_initialize(
struct isci_phy *iphy,
struct scu_transport_layer_registers __iomem *transport_layer_registers,
struct scu_link_layer_registers __iomem *link_layer_registers);
-enum sci_status scic_sds_phy_start(
+enum sci_status sci_phy_start(
struct isci_phy *iphy);
-enum sci_status scic_sds_phy_stop(
+enum sci_status sci_phy_stop(
struct isci_phy *iphy);
-enum sci_status scic_sds_phy_reset(
+enum sci_status sci_phy_reset(
struct isci_phy *iphy);
-void scic_sds_phy_resume(
+void sci_phy_resume(
struct isci_phy *iphy);
-void scic_sds_phy_setup_transport(
+void sci_phy_setup_transport(
struct isci_phy *iphy,
u32 device_id);
-enum sci_status scic_sds_phy_event_handler(
+enum sci_status sci_phy_event_handler(
struct isci_phy *iphy,
u32 event_code);
-enum sci_status scic_sds_phy_frame_handler(
+enum sci_status sci_phy_frame_handler(
struct isci_phy *iphy,
u32 frame_index);
-enum sci_status scic_sds_phy_consume_power_handler(
+enum sci_status sci_phy_consume_power_handler(
struct isci_phy *iphy);
-void scic_sds_phy_get_sas_address(
+void sci_phy_get_sas_address(
struct isci_phy *iphy,
struct sci_sas_address *sas_address);
-void scic_sds_phy_get_attached_sas_address(
+void sci_phy_get_attached_sas_address(
struct isci_phy *iphy,
struct sci_sas_address *sas_address);
-struct scic_phy_proto;
-void scic_sds_phy_get_protocols(
+struct sci_phy_proto;
+void sci_phy_get_protocols(
struct isci_phy *iphy,
- struct scic_phy_proto *protocols);
+ struct sci_phy_proto *protocols);
enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy);
struct isci_host;
spin_unlock_irqrestore(&iport->state_lock, flags);
}
-/*
- * This function will indicate which protocols are supported by this port.
- * @sci_port: a handle corresponding to the SAS port for which to return the
- * supported protocols.
- * @protocols: This parameter specifies a pointer to a data structure
- * which the core will copy the protocol values for the port from the
- * transmit_identification register.
- */
-static void
-scic_sds_port_get_protocols(struct isci_port *iport,
- struct scic_phy_proto *protocols)
+static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto)
{
u8 index;
- protocols->all = 0;
-
+ proto->all = 0;
for (index = 0; index < SCI_MAX_PHYS; index++) {
- if (iport->phy_table[index] != NULL) {
- scic_sds_phy_get_protocols(iport->phy_table[index],
- protocols);
- }
+ struct isci_phy *iphy = iport->phy_table[index];
+
+ if (!iphy)
+ continue;
+ sci_phy_get_protocols(iphy, proto);
}
}
-/**
- * This method requests a list (mask) of the phys contained in the supplied SAS
- * port.
- * @sci_port: a handle corresponding to the SAS port for which to return the
- * phy mask.
- *
- * Return a bit mask indicating which phys are a part of this port. Each bit
- * corresponds to a phy identifier (e.g. bit 0 = phy id 0).
- */
-static u32 scic_sds_port_get_phys(struct isci_port *iport)
+static u32 sci_port_get_phys(struct isci_port *iport)
{
u32 index;
u32 mask;
mask = 0;
-
- for (index = 0; index < SCI_MAX_PHYS; index++) {
- if (iport->phy_table[index] != NULL) {
+ for (index = 0; index < SCI_MAX_PHYS; index++)
+ if (iport->phy_table[index])
mask |= (1 << index);
- }
- }
return mask;
}
/**
- * scic_port_get_properties() - This method simply returns the properties
+ * sci_port_get_properties() - This method simply returns the properties
* regarding the port, such as: physical index, protocols, sas address, etc.
* @port: this parameter specifies the port for which to retrieve the physical
* index.
* value is returned if the specified port is not valid. When this value is
* returned, no data is copied to the properties output parameter.
*/
-static enum sci_status scic_port_get_properties(struct isci_port *iport,
- struct scic_port_properties *prop)
+static enum sci_status sci_port_get_properties(struct isci_port *iport,
+ struct sci_port_properties *prop)
{
if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
return SCI_FAILURE_INVALID_PORT;
- prop->index = iport->logical_port_index;
- prop->phy_mask = scic_sds_port_get_phys(iport);
- scic_sds_port_get_sas_address(iport, &prop->local.sas_address);
- scic_sds_port_get_protocols(iport, &prop->local.protocols);
- scic_sds_port_get_attached_sas_address(iport, &prop->remote.sas_address);
+ prop->index = iport->logical_port_index;
+ prop->phy_mask = sci_port_get_phys(iport);
+ sci_port_get_sas_address(iport, &prop->local.sas_address);
+ sci_port_get_protocols(iport, &prop->local.protocols);
+ sci_port_get_attached_sas_address(iport, &prop->remote.sas_address);
return SCI_SUCCESS;
}
-static void scic_port_bcn_enable(struct isci_port *iport)
+static void sci_port_bcn_enable(struct isci_port *iport)
{
struct isci_phy *iphy;
u32 val;
}
}
-/* called under scic_lock to stabilize phy:port associations */
+/* called under sci_lock to stabilize phy:port associations */
void isci_port_bcn_enable(struct isci_host *ihost, struct isci_port *iport)
{
int i;
ihost->sas_ha.notify_port_event(&iphy->sas_phy,
PORTE_BROADCAST_RCVD);
}
- scic_port_bcn_enable(iport);
+ sci_port_bcn_enable(iport);
}
static void isci_port_link_up(struct isci_host *isci_host,
struct isci_phy *iphy)
{
unsigned long flags;
- struct scic_port_properties properties;
+ struct sci_port_properties properties;
unsigned long success = true;
BUG_ON(iphy->isci_port != NULL);
isci_port_change_state(iphy->isci_port, isci_starting);
- scic_port_get_properties(iport, &properties);
+ sci_port_get_properties(iport, &properties);
if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) {
u64 attached_sas_address;
* automagically assign a SAS address to the end device
* for the purpose of creating a port. This SAS address
* will not be the same as assigned to the PHY and needs
- * to be obtained from struct scic_port_properties properties.
+ * to be obtained from struct sci_port_properties properties.
*/
attached_sas_address = properties.remote.sas_address.high;
attached_sas_address <<= 32;
* doesn't preclude all configurations. It merely ensures that a phy is part
* of the allowable set of phy identifiers for that port. For example, one
* could assign phy 3 to port 0 and no other phys. Please refer to
- * scic_sds_port_is_phy_mask_valid() for information regarding whether the
+ * sci_port_is_phy_mask_valid() for information regarding whether the
* phy_mask for a port can be supported. bool true if this is a valid phy
* assignment for the port false if this is not a valid phy assignment for the
* port
*/
-bool scic_sds_port_is_valid_phy_assignment(struct isci_port *iport,
- u32 phy_index)
+bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index)
{
+ struct isci_host *ihost = iport->owning_controller;
+ struct sci_user_parameters *user = &ihost->user_parameters;
+
/* Initialize to invalid value. */
u32 existing_phy_index = SCI_MAX_PHYS;
u32 index;
- if ((iport->physical_port_index == 1) && (phy_index != 1)) {
+ if ((iport->physical_port_index == 1) && (phy_index != 1))
return false;
- }
- if (iport->physical_port_index == 3 && phy_index != 3) {
+ if (iport->physical_port_index == 3 && phy_index != 3)
return false;
- }
- if (
- (iport->physical_port_index == 2)
- && ((phy_index == 0) || (phy_index == 1))
- ) {
+ if (iport->physical_port_index == 2 &&
+ (phy_index == 0 || phy_index == 1))
return false;
- }
- for (index = 0; index < SCI_MAX_PHYS; index++) {
- if ((iport->phy_table[index] != NULL)
- && (index != phy_index)) {
+ for (index = 0; index < SCI_MAX_PHYS; index++)
+ if (iport->phy_table[index] && index != phy_index)
existing_phy_index = index;
- }
- }
- /*
- * Ensure that all of the phys in the port are capable of
- * operating at the same maximum link rate. */
- if (
- (existing_phy_index < SCI_MAX_PHYS)
- && (iport->owning_controller->user_parameters.sds1.phys[
- phy_index].max_speed_generation !=
- iport->owning_controller->user_parameters.sds1.phys[
- existing_phy_index].max_speed_generation)
- )
+ /* Ensure that all of the phys in the port are capable of
+ * operating at the same maximum link rate.
+ */
+ if (existing_phy_index < SCI_MAX_PHYS &&
+ user->phys[phy_index].max_speed_generation !=
+ user->phys[existing_phy_index].max_speed_generation)
return false;
return true;
* phy mask can be supported. true if this is a valid phy assignment for the
* port false if this is not a valid phy assignment for the port
*/
-static bool scic_sds_port_is_phy_mask_valid(
+static bool sci_port_is_phy_mask_valid(
struct isci_port *iport,
u32 phy_mask)
{
* the port. Currently, the lowest order phy that is connected is returned.
* This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is
* returned if there are no currently active (i.e. connected to a remote end
- * point) phys contained in the port. All other values specify a struct scic_sds_phy
+ * point) phys contained in the port. All other values specify a struct sci_phy
* object that is active in the port.
*/
-static struct isci_phy *scic_sds_port_get_a_connected_phy(struct isci_port *iport)
+static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport)
{
u32 index;
struct isci_phy *iphy;
* connected to the remote end-point.
*/
iphy = iport->phy_table[index];
- if (iphy && scic_sds_port_active_phy(iport, iphy))
+ if (iphy && sci_port_active_phy(iport, iphy))
return iphy;
}
return NULL;
}
-static enum sci_status scic_sds_port_set_phy(struct isci_port *iport, struct isci_phy *iphy)
+static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy)
{
/* Check to see if we can add this phy to a port
* that means that the phy is not part of a port and that the port does
*/
if (!iport->phy_table[iphy->phy_index] &&
!phy_get_non_dummy_port(iphy) &&
- scic_sds_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
+ sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
/* Phy is being added in the stopped state so we are in MPC mode
* make logical port index = physical port index
*/
iport->logical_port_index = iport->physical_port_index;
iport->phy_table[iphy->phy_index] = iphy;
- scic_sds_phy_set_port(iphy, iport);
+ sci_phy_set_port(iphy, iport);
return SCI_SUCCESS;
}
return SCI_FAILURE;
}
-static enum sci_status scic_sds_port_clear_phy(struct isci_port *iport,
- struct isci_phy *iphy)
+static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy)
{
/* Make sure that this phy is part of this port */
if (iport->phy_table[iphy->phy_index] == iphy &&
struct isci_host *ihost = iport->owning_controller;
/* Yep it is assigned to this port so remove it */
- scic_sds_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]);
+ sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]);
iport->phy_table[iphy->phy_index] = NULL;
return SCI_SUCCESS;
}
return SCI_FAILURE;
}
-
-/**
- * This method requests the SAS address for the supplied SAS port from the SCI
- * implementation.
- * @sci_port: a handle corresponding to the SAS port for which to return the
- * SAS address.
- * @sas_address: This parameter specifies a pointer to a SAS address structure
- * into which the core will copy the SAS address for the port.
- *
- */
-void scic_sds_port_get_sas_address(
- struct isci_port *iport,
- struct sci_sas_address *sas_address)
+void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
{
u32 index;
- sas_address->high = 0;
- sas_address->low = 0;
-
- for (index = 0; index < SCI_MAX_PHYS; index++) {
- if (iport->phy_table[index] != NULL) {
- scic_sds_phy_get_sas_address(iport->phy_table[index], sas_address);
- }
- }
+ sas->high = 0;
+ sas->low = 0;
+ for (index = 0; index < SCI_MAX_PHYS; index++)
+ if (iport->phy_table[index])
+ sci_phy_get_sas_address(iport->phy_table[index], sas);
}
-/*
- * This function requests the SAS address for the device directly attached to
- * this SAS port.
- * @sci_port: a handle corresponding to the SAS port for which to return the
- * SAS address.
- * @sas_address: This parameter specifies a pointer to a SAS address structure
- * into which the core will copy the SAS address for the device directly
- * attached to the port.
- *
- */
-void scic_sds_port_get_attached_sas_address(
- struct isci_port *iport,
- struct sci_sas_address *sas_address)
+void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
{
struct isci_phy *iphy;
* Ensure that the phy is both part of the port and currently
* connected to the remote end-point.
*/
- iphy = scic_sds_port_get_a_connected_phy(iport);
+ iphy = sci_port_get_a_connected_phy(iport);
if (iphy) {
if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) {
- scic_sds_phy_get_attached_sas_address(iphy,
- sas_address);
+ sci_phy_get_attached_sas_address(iphy, sas);
} else {
- scic_sds_phy_get_sas_address(iphy, sas_address);
- sas_address->low += iphy->phy_index;
+ sci_phy_get_sas_address(iphy, sas);
+ sas->low += iphy->phy_index;
}
} else {
- sas_address->high = 0;
- sas_address->low = 0;
+ sas->high = 0;
+ sas->low = 0;
}
}
/**
- * scic_sds_port_construct_dummy_rnc() - create dummy rnc for si workaround
+ * sci_port_construct_dummy_rnc() - create dummy rnc for si workaround
*
* @sci_port: logical port on which we need to create the remote node context
* @rni: remote node index for this remote node context.
* This structure will be posted to the hardware to work around a scheduler
* error in the hardware.
*/
-static void scic_sds_port_construct_dummy_rnc(struct isci_port *iport, u16 rni)
+static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni)
{
union scu_remote_node_context *rnc;
* structure will be posted to the hardwre to work around a scheduler error
* in the hardware.
*/
-static void scic_sds_port_construct_dummy_task(struct isci_port *iport, u16 tag)
+static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag)
{
struct isci_host *ihost = iport->owning_controller;
struct scu_task_context *task_context;
task_context->task_phase = 0x01;
}
-static void scic_sds_port_destroy_dummy_resources(struct isci_port *iport)
+static void sci_port_destroy_dummy_resources(struct isci_port *iport)
{
struct isci_host *ihost = iport->owning_controller;
isci_free_tag(ihost, iport->reserved_tag);
if (iport->reserved_rni != SCU_DUMMY_INDEX)
- scic_sds_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes,
+ sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes,
1, iport->reserved_rni);
iport->reserved_rni = SCU_DUMMY_INDEX;
iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
}
-/**
- * This method performs initialization of the supplied port. Initialization
- * includes: - state machine initialization - member variable initialization
- * - configuring the phy_mask
- * @sci_port:
- * @transport_layer_registers:
- * @port_task_scheduler_registers:
- * @port_configuration_regsiter:
- *
- * enum sci_status SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION This value is returned
- * if the phy being added to the port
- */
-enum sci_status scic_sds_port_initialize(
- struct isci_port *iport,
- void __iomem *port_task_scheduler_registers,
- void __iomem *port_configuration_regsiter,
- void __iomem *viit_registers)
-{
- iport->port_task_scheduler_registers = port_task_scheduler_registers;
- iport->port_pe_configuration_register = port_configuration_regsiter;
- iport->viit_registers = viit_registers;
-
- return SCI_SUCCESS;
-}
-
-
-/**
- * This method assigns the direct attached device ID for this port.
- *
- * @param[in] iport The port for which the direct attached device id is to
- * be assigned.
- * @param[in] device_id The direct attached device ID to assign to the port.
- * This will be the RNi for the device
- */
-void scic_sds_port_setup_transports(
- struct isci_port *iport,
- u32 device_id)
+void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
{
u8 index;
for (index = 0; index < SCI_MAX_PHYS; index++) {
if (iport->active_phy_mask & (1 << index))
- scic_sds_phy_setup_transport(iport->phy_table[index], device_id);
+ sci_phy_setup_transport(iport->phy_table[index], device_id);
}
}
-/**
- *
- * @sci_port: This is the port on which the phy should be enabled.
- * @sci_phy: This is the specific phy which to enable.
- * @do_notify_user: This parameter specifies whether to inform the user (via
- * scic_cb_port_link_up()) as to the fact that a new phy as become ready.
- *
- * This function will activate the phy in the port.
- * Activation includes: - adding
- * the phy to the port - enabling the Protocol Engine in the silicon. -
- * notifying the user that the link is up. none
- */
-static void scic_sds_port_activate_phy(struct isci_port *iport,
- struct isci_phy *iphy,
- bool do_notify_user)
+static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy,
+ bool do_notify_user)
{
struct isci_host *ihost = iport->owning_controller;
if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA)
- scic_sds_phy_resume(iphy);
+ sci_phy_resume(iphy);
iport->active_phy_mask |= 1 << iphy->phy_index;
- scic_sds_controller_clear_invalid_phy(ihost, iphy);
+ sci_controller_clear_invalid_phy(ihost, iphy);
if (do_notify_user == true)
isci_port_link_up(ihost, iport, iphy);
}
-void scic_sds_port_deactivate_phy(struct isci_port *iport,
- struct isci_phy *iphy,
- bool do_notify_user)
+void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
+ bool do_notify_user)
{
- struct isci_host *ihost = scic_sds_port_get_controller(iport);
+ struct isci_host *ihost = sci_port_get_controller(iport);
iport->active_phy_mask &= ~(1 << iphy->phy_index);
isci_port_link_down(ihost, iphy, iport);
}
-/**
- *
- * @sci_port: This is the port on which the phy should be disabled.
- * @sci_phy: This is the specific phy which to disabled.
- *
- * This function will disable the phy and report that the phy is not valid for
- * this port object. None
- */
-static void scic_sds_port_invalid_link_up(struct isci_port *iport,
- struct isci_phy *iphy)
+static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy)
{
struct isci_host *ihost = iport->owning_controller;
* invalid link.
*/
if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) {
- scic_sds_controller_set_invalid_phy(ihost, iphy);
+ sci_controller_set_invalid_phy(ihost, iphy);
dev_warn(&ihost->pdev->dev, "Invalid link up!\n");
}
}
-static bool is_port_ready_state(enum scic_sds_port_states state)
+static bool is_port_ready_state(enum sci_port_states state)
{
switch (state) {
case SCI_PORT_READY:
/* flag dummy rnc hanling when exiting a ready state */
static void port_state_machine_change(struct isci_port *iport,
- enum scic_sds_port_states state)
+ enum sci_port_states state)
{
struct sci_base_state_machine *sm = &iport->sm;
- enum scic_sds_port_states old_state = sm->current_state_id;
+ enum sci_port_states old_state = sm->current_state_id;
if (is_port_ready_state(old_state) && !is_port_ready_state(state))
iport->ready_exit = true;
}
/**
- * scic_sds_port_general_link_up_handler - phy can be assigned to port?
- * @sci_port: scic_sds_port object for which has a phy that has gone link up.
+ * sci_port_general_link_up_handler - phy can be assigned to port?
+ * @sci_port: sci_port object for which has a phy that has gone link up.
* @sci_phy: This is the struct isci_phy object that has gone link up.
* @do_notify_user: This parameter specifies whether to inform the user (via
- * scic_cb_port_link_up()) as to the fact that a new phy as become ready.
+ * sci_port_link_up()) as to the fact that a new phy as become ready.
*
* Determine if this phy can be assigned to this
* port . If the phy is not a valid PHY for
* part of a port if it's attached SAS ADDRESS is the same as all other PHYs in
* the same port. none
*/
-static void scic_sds_port_general_link_up_handler(struct isci_port *iport,
+static void sci_port_general_link_up_handler(struct isci_port *iport,
struct isci_phy *iphy,
bool do_notify_user)
{
struct sci_sas_address port_sas_address;
struct sci_sas_address phy_sas_address;
- scic_sds_port_get_attached_sas_address(iport, &port_sas_address);
- scic_sds_phy_get_attached_sas_address(iphy, &phy_sas_address);
+ sci_port_get_attached_sas_address(iport, &port_sas_address);
+ sci_phy_get_attached_sas_address(iphy, &phy_sas_address);
/* If the SAS address of the new phy matches the SAS address of
* other phys in the port OR this is the first phy in the port,
iport->active_phy_mask == 0) {
struct sci_base_state_machine *sm = &iport->sm;
- scic_sds_port_activate_phy(iport, iphy, do_notify_user);
+ sci_port_activate_phy(iport, iphy, do_notify_user);
if (sm->current_state_id == SCI_PORT_RESETTING)
port_state_machine_change(iport, SCI_PORT_READY);
} else
- scic_sds_port_invalid_link_up(iport, iphy);
+ sci_port_invalid_link_up(iport, iphy);
}
* bool true Is returned if this is a wide ported port. false Is returned if
* this is a narrow port.
*/
-static bool scic_sds_port_is_wide(struct isci_port *iport)
+static bool sci_port_is_wide(struct isci_port *iport)
{
u32 index;
u32 phy_count = 0;
* wide ports and direct attached phys. Since there are no wide ported SATA
* devices this could become an invalid port configuration.
*/
-bool scic_sds_port_link_detected(
+bool sci_port_link_detected(
struct isci_port *iport,
struct isci_phy *iphy)
{
if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
(iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) &&
- scic_sds_port_is_wide(iport)) {
- scic_sds_port_invalid_link_up(iport, iphy);
+ sci_port_is_wide(iport)) {
+ sci_port_invalid_link_up(iport, iphy);
return false;
}
*
*
*/
-static void scic_sds_port_update_viit_entry(struct isci_port *iport)
+static void sci_port_update_viit_entry(struct isci_port *iport)
{
struct sci_sas_address sas_address;
- scic_sds_port_get_sas_address(iport, &sas_address);
+ sci_port_get_sas_address(iport, &sas_address);
writel(sas_address.high,
&iport->viit_registers->initiator_sas_address_hi);
&iport->viit_registers->status);
}
-enum sas_linkrate scic_sds_port_get_max_allowed_speed(struct isci_port *iport)
+enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport)
{
u16 index;
struct isci_phy *iphy;
* lowest maximum link rate. */
for (index = 0; index < SCI_MAX_PHYS; index++) {
iphy = iport->phy_table[index];
- if (iphy && scic_sds_port_active_phy(iport, iphy) &&
+ if (iphy && sci_port_active_phy(iport, iphy) &&
iphy->max_negotiated_speed < max_allowed_speed)
max_allowed_speed = iphy->max_negotiated_speed;
}
return max_allowed_speed;
}
-static void scic_sds_port_suspend_port_task_scheduler(struct isci_port *iport)
+static void sci_port_suspend_port_task_scheduler(struct isci_port *iport)
{
u32 pts_control_value;
}
/**
- * scic_sds_port_post_dummy_request() - post dummy/workaround request
+ * sci_port_post_dummy_request() - post dummy/workaround request
* @sci_port: port to post task
*
* Prevent the hardware scheduler from posting new requests to the front
* ongoing requests.
*
*/
-static void scic_sds_port_post_dummy_request(struct isci_port *iport)
+static void sci_port_post_dummy_request(struct isci_port *iport)
{
struct isci_host *ihost = iport->owning_controller;
u16 tag = iport->reserved_tag;
iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
ISCI_TAG_TCI(tag);
- scic_sds_controller_post_request(ihost, command);
+ sci_controller_post_request(ihost, command);
}
/**
* @sci_port: The port on which the task must be aborted.
*
*/
-static void scic_sds_port_abort_dummy_request(struct isci_port *iport)
+static void sci_port_abort_dummy_request(struct isci_port *iport)
{
struct isci_host *ihost = iport->owning_controller;
u16 tag = iport->reserved_tag;
iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
ISCI_TAG_TCI(tag);
- scic_sds_controller_post_request(ihost, command);
+ sci_controller_post_request(ihost, command);
}
/**
* This method will resume the port task scheduler for this port object. none
*/
static void
-scic_sds_port_resume_port_task_scheduler(struct isci_port *iport)
+sci_port_resume_port_task_scheduler(struct isci_port *iport)
{
u32 pts_control_value;
writel(pts_control_value, &iport->port_task_scheduler_registers->control);
}
-static void scic_sds_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm)
+static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm)
{
struct isci_port *iport = container_of(sm, typeof(*iport), sm);
- scic_sds_port_suspend_port_task_scheduler(iport);
+ sci_port_suspend_port_task_scheduler(iport);
iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS;
}
}
-static void scic_sds_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
+static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
{
u32 index;
struct isci_port *iport = container_of(sm, typeof(*iport), sm);
}
}
- scic_sds_port_update_viit_entry(iport);
+ sci_port_update_viit_entry(iport);
- scic_sds_port_resume_port_task_scheduler(iport);
+ sci_port_resume_port_task_scheduler(iport);
/*
* Post the dummy task for the port so the hardware can schedule
* io correctly
*/
- scic_sds_port_post_dummy_request(iport);
+ sci_port_post_dummy_request(iport);
}
-static void scic_sds_port_invalidate_dummy_remote_node(struct isci_port *iport)
+static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport)
{
struct isci_host *ihost = iport->owning_controller;
u8 phys_index = iport->physical_port_index;
command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE |
phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
- scic_sds_controller_post_request(ihost, command);
+ sci_controller_post_request(ihost, command);
}
/**
* exiting the SCI_PORT_SUB_OPERATIONAL. This function reports
* the port not ready and suspends the port task scheduler. none
*/
-static void scic_sds_port_ready_substate_operational_exit(struct sci_base_state_machine *sm)
+static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm)
{
struct isci_port *iport = container_of(sm, typeof(*iport), sm);
struct isci_host *ihost = iport->owning_controller;
* the hardware will treat this as a NOP and just return abort
* complete.
*/
- scic_sds_port_abort_dummy_request(iport);
+ sci_port_abort_dummy_request(iport);
isci_port_not_ready(ihost, iport);
if (iport->ready_exit)
- scic_sds_port_invalidate_dummy_remote_node(iport);
+ sci_port_invalidate_dummy_remote_node(iport);
}
-static void scic_sds_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm)
+static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm)
{
struct isci_port *iport = container_of(sm, typeof(*iport), sm);
struct isci_host *ihost = iport->owning_controller;
SCI_PORT_SUB_OPERATIONAL);
}
-static void scic_sds_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm)
+static void sci_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm)
{
struct isci_port *iport = container_of(sm, typeof(*iport), sm);
- scic_sds_port_suspend_port_task_scheduler(iport);
+ sci_port_suspend_port_task_scheduler(iport);
if (iport->ready_exit)
- scic_sds_port_invalidate_dummy_remote_node(iport);
+ sci_port_invalidate_dummy_remote_node(iport);
}
-enum sci_status scic_sds_port_start(struct isci_port *iport)
+enum sci_status sci_port_start(struct isci_port *iport)
{
struct isci_host *ihost = iport->owning_controller;
enum sci_status status = SCI_SUCCESS;
- enum scic_sds_port_states state;
+ enum sci_port_states state;
u32 phy_mask;
state = iport->sm.current_state_id;
}
if (iport->reserved_rni == SCU_DUMMY_INDEX) {
- u16 rni = scic_sds_remote_node_table_allocate_remote_node(
+ u16 rni = sci_remote_node_table_allocate_remote_node(
&ihost->available_remote_nodes, 1);
if (rni != SCU_DUMMY_INDEX)
- scic_sds_port_construct_dummy_rnc(iport, rni);
+ sci_port_construct_dummy_rnc(iport, rni);
else
status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
iport->reserved_rni = rni;
if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
else
- scic_sds_port_construct_dummy_task(iport, tag);
+ sci_port_construct_dummy_task(iport, tag);
iport->reserved_tag = tag;
}
if (status == SCI_SUCCESS) {
- phy_mask = scic_sds_port_get_phys(iport);
+ phy_mask = sci_port_get_phys(iport);
/*
* There are one or more phys assigned to this port. Make sure
* the port's phy mask is in fact legal and supported by the
* silicon.
*/
- if (scic_sds_port_is_phy_mask_valid(iport, phy_mask) == true) {
+ if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) {
port_state_machine_change(iport,
SCI_PORT_READY);
}
if (status != SCI_SUCCESS)
- scic_sds_port_destroy_dummy_resources(iport);
+ sci_port_destroy_dummy_resources(iport);
return status;
}
-enum sci_status scic_sds_port_stop(struct isci_port *iport)
+enum sci_status sci_port_stop(struct isci_port *iport)
{
- enum scic_sds_port_states state;
+ enum sci_port_states state;
state = iport->sm.current_state_id;
switch (state) {
}
}
-static enum sci_status scic_port_hard_reset(struct isci_port *iport, u32 timeout)
+static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
{
enum sci_status status = SCI_FAILURE_INVALID_PHY;
struct isci_phy *iphy = NULL;
- enum scic_sds_port_states state;
+ enum sci_port_states state;
u32 phy_index;
state = iport->sm.current_state_id;
/* Select a phy on which we can send the hard reset request. */
for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) {
iphy = iport->phy_table[phy_index];
- if (iphy && !scic_sds_port_active_phy(iport, iphy)) {
+ if (iphy && !sci_port_active_phy(iport, iphy)) {
/*
* We found a phy but it is not ready select
* different phy
/* If we have a phy then go ahead and start the reset procedure */
if (!iphy)
return status;
- status = scic_sds_phy_reset(iphy);
+ status = sci_phy_reset(iphy);
if (status != SCI_SUCCESS)
return status;
}
/**
- * scic_sds_port_add_phy() -
+ * sci_port_add_phy() -
* @sci_port: This parameter specifies the port in which the phy will be added.
* @sci_phy: This parameter is the phy which is to be added to the port.
*
* enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other
* status is a failure to add the phy to the port.
*/
-enum sci_status scic_sds_port_add_phy(struct isci_port *iport,
+enum sci_status sci_port_add_phy(struct isci_port *iport,
struct isci_phy *iphy)
{
enum sci_status status;
- enum scic_sds_port_states state;
+ enum sci_port_states state;
state = iport->sm.current_state_id;
switch (state) {
struct sci_sas_address port_sas_address;
/* Read the port assigned SAS Address if there is one */
- scic_sds_port_get_sas_address(iport, &port_sas_address);
+ sci_port_get_sas_address(iport, &port_sas_address);
if (port_sas_address.high != 0 && port_sas_address.low != 0) {
struct sci_sas_address phy_sas_address;
/* Make sure that the PHY SAS Address matches the SAS Address
* for this port
*/
- scic_sds_phy_get_sas_address(iphy, &phy_sas_address);
+ sci_phy_get_sas_address(iphy, &phy_sas_address);
if (port_sas_address.high != phy_sas_address.high ||
port_sas_address.low != phy_sas_address.low)
return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
}
- return scic_sds_port_set_phy(iport, iphy);
+ return sci_port_set_phy(iport, iphy);
}
case SCI_PORT_SUB_WAITING:
case SCI_PORT_SUB_OPERATIONAL:
- status = scic_sds_port_set_phy(iport, iphy);
+ status = sci_port_set_phy(iport, iphy);
if (status != SCI_SUCCESS)
return status;
- scic_sds_port_general_link_up_handler(iport, iphy, true);
+ sci_port_general_link_up_handler(iport, iphy, true);
iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
return status;
case SCI_PORT_SUB_CONFIGURING:
- status = scic_sds_port_set_phy(iport, iphy);
+ status = sci_port_set_phy(iport, iphy);
if (status != SCI_SUCCESS)
return status;
- scic_sds_port_general_link_up_handler(iport, iphy, true);
+ sci_port_general_link_up_handler(iport, iphy, true);
/* Re-enter the configuring state since this may be the last phy in
* the port.
}
/**
- * scic_sds_port_remove_phy() -
+ * sci_port_remove_phy() -
* @sci_port: This parameter specifies the port in which the phy will be added.
* @sci_phy: This parameter is the phy which is to be added to the port.
*
* an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any
* other status is a failure to add the phy to the port.
*/
-enum sci_status scic_sds_port_remove_phy(struct isci_port *iport,
+enum sci_status sci_port_remove_phy(struct isci_port *iport,
struct isci_phy *iphy)
{
enum sci_status status;
- enum scic_sds_port_states state;
+ enum sci_port_states state;
state = iport->sm.current_state_id;
switch (state) {
case SCI_PORT_STOPPED:
- return scic_sds_port_clear_phy(iport, iphy);
+ return sci_port_clear_phy(iport, iphy);
case SCI_PORT_SUB_OPERATIONAL:
- status = scic_sds_port_clear_phy(iport, iphy);
+ status = sci_port_clear_phy(iport, iphy);
if (status != SCI_SUCCESS)
return status;
- scic_sds_port_deactivate_phy(iport, iphy, true);
+ sci_port_deactivate_phy(iport, iphy, true);
iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
port_state_machine_change(iport,
SCI_PORT_SUB_CONFIGURING);
return SCI_SUCCESS;
case SCI_PORT_SUB_CONFIGURING:
- status = scic_sds_port_clear_phy(iport, iphy);
+ status = sci_port_clear_phy(iport, iphy);
if (status != SCI_SUCCESS)
return status;
- scic_sds_port_deactivate_phy(iport, iphy, true);
+ sci_port_deactivate_phy(iport, iphy, true);
/* Re-enter the configuring state since this may be the last phy in
* the port
}
}
-enum sci_status scic_sds_port_link_up(struct isci_port *iport,
+enum sci_status sci_port_link_up(struct isci_port *iport,
struct isci_phy *iphy)
{
- enum scic_sds_port_states state;
+ enum sci_port_states state;
state = iport->sm.current_state_id;
switch (state) {
/* Since this is the first phy going link up for the port we
* can just enable it and continue
*/
- scic_sds_port_activate_phy(iport, iphy, true);
+ sci_port_activate_phy(iport, iphy, true);
port_state_machine_change(iport,
SCI_PORT_SUB_OPERATIONAL);
return SCI_SUCCESS;
case SCI_PORT_SUB_OPERATIONAL:
- scic_sds_port_general_link_up_handler(iport, iphy, true);
+ sci_port_general_link_up_handler(iport, iphy, true);
return SCI_SUCCESS;
case SCI_PORT_RESETTING:
/* TODO We should make sure that the phy that has gone
/* In the resetting state we don't notify the user regarding
* link up and link down notifications.
*/
- scic_sds_port_general_link_up_handler(iport, iphy, false);
+ sci_port_general_link_up_handler(iport, iphy, false);
return SCI_SUCCESS;
default:
dev_warn(sciport_to_dev(iport),
}
}
-enum sci_status scic_sds_port_link_down(struct isci_port *iport,
+enum sci_status sci_port_link_down(struct isci_port *iport,
struct isci_phy *iphy)
{
- enum scic_sds_port_states state;
+ enum sci_port_states state;
state = iport->sm.current_state_id;
switch (state) {
case SCI_PORT_SUB_OPERATIONAL:
- scic_sds_port_deactivate_phy(iport, iphy, true);
+ sci_port_deactivate_phy(iport, iphy, true);
/* If there are no active phys left in the port, then
* transition the port to the WAITING state until such time
case SCI_PORT_RESETTING:
/* In the resetting state we don't notify the user regarding
* link up and link down notifications. */
- scic_sds_port_deactivate_phy(iport, iphy, false);
+ sci_port_deactivate_phy(iport, iphy, false);
return SCI_SUCCESS;
default:
dev_warn(sciport_to_dev(iport),
}
}
-enum sci_status scic_sds_port_start_io(struct isci_port *iport,
- struct isci_remote_device *idev,
- struct isci_request *ireq)
+enum sci_status sci_port_start_io(struct isci_port *iport,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
{
- enum scic_sds_port_states state;
+ enum sci_port_states state;
state = iport->sm.current_state_id;
switch (state) {
}
}
-enum sci_status scic_sds_port_complete_io(struct isci_port *iport,
- struct isci_remote_device *idev,
- struct isci_request *ireq)
+enum sci_status sci_port_complete_io(struct isci_port *iport,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
{
- enum scic_sds_port_states state;
+ enum sci_port_states state;
state = iport->sm.current_state_id;
switch (state) {
"%s: in wrong state: %d\n", __func__, state);
return SCI_FAILURE_INVALID_STATE;
case SCI_PORT_STOPPING:
- scic_sds_port_decrement_request_count(iport);
+ sci_port_decrement_request_count(iport);
if (iport->started_request_count == 0)
port_state_machine_change(iport,
case SCI_PORT_FAILED:
case SCI_PORT_SUB_WAITING:
case SCI_PORT_SUB_OPERATIONAL:
- scic_sds_port_decrement_request_count(iport);
+ sci_port_decrement_request_count(iport);
break;
case SCI_PORT_SUB_CONFIGURING:
- scic_sds_port_decrement_request_count(iport);
+ sci_port_decrement_request_count(iport);
if (iport->started_request_count == 0) {
port_state_machine_change(iport,
SCI_PORT_SUB_OPERATIONAL);
return SCI_SUCCESS;
}
-/**
- *
- * @sci_port: This is the port object which to suspend.
- *
- * This method will enable the SCU Port Task Scheduler for this port object but
- * will leave the port task scheduler in a suspended state. none
- */
-static void
-scic_sds_port_enable_port_task_scheduler(struct isci_port *iport)
+static void sci_port_enable_port_task_scheduler(struct isci_port *iport)
{
u32 pts_control_value;
+ /* enable the port task scheduler in a suspended state */
pts_control_value = readl(&iport->port_task_scheduler_registers->control);
pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND);
writel(pts_control_value, &iport->port_task_scheduler_registers->control);
}
-/**
- *
- * @sci_port: This is the port object which to resume.
- *
- * This method will disable the SCU port task scheduler for this port object.
- * none
- */
-static void
-scic_sds_port_disable_port_task_scheduler(struct isci_port *iport)
+static void sci_port_disable_port_task_scheduler(struct isci_port *iport)
{
u32 pts_control_value;
writel(pts_control_value, &iport->port_task_scheduler_registers->control);
}
-static void scic_sds_port_post_dummy_remote_node(struct isci_port *iport)
+static void sci_port_post_dummy_remote_node(struct isci_port *iport)
{
struct isci_host *ihost = iport->owning_controller;
u8 phys_index = iport->physical_port_index;
command = SCU_CONTEXT_COMMAND_POST_RNC_32 |
phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
- scic_sds_controller_post_request(ihost, command);
+ sci_controller_post_request(ihost, command);
/* ensure hardware has seen the post rnc command and give it
* ample time to act before sending the suspend
command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX |
phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
- scic_sds_controller_post_request(ihost, command);
+ sci_controller_post_request(ihost, command);
}
-static void scic_sds_port_stopped_state_enter(struct sci_base_state_machine *sm)
+static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm)
{
struct isci_port *iport = container_of(sm, typeof(*iport), sm);
* If we enter this state becasuse of a request to stop
* the port then we want to disable the hardwares port
* task scheduler. */
- scic_sds_port_disable_port_task_scheduler(iport);
+ sci_port_disable_port_task_scheduler(iport);
}
}
-static void scic_sds_port_stopped_state_exit(struct sci_base_state_machine *sm)
+static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm)
{
struct isci_port *iport = container_of(sm, typeof(*iport), sm);
/* Enable and suspend the port task scheduler */
- scic_sds_port_enable_port_task_scheduler(iport);
+ sci_port_enable_port_task_scheduler(iport);
}
-static void scic_sds_port_ready_state_enter(struct sci_base_state_machine *sm)
+static void sci_port_ready_state_enter(struct sci_base_state_machine *sm)
{
struct isci_port *iport = container_of(sm, typeof(*iport), sm);
struct isci_host *ihost = iport->owning_controller;
isci_port_not_ready(ihost, iport);
/* Post and suspend the dummy remote node context for this port. */
- scic_sds_port_post_dummy_remote_node(iport);
+ sci_port_post_dummy_remote_node(iport);
/* Start the ready substate machine */
port_state_machine_change(iport,
SCI_PORT_SUB_WAITING);
}
-static void scic_sds_port_resetting_state_exit(struct sci_base_state_machine *sm)
+static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm)
{
struct isci_port *iport = container_of(sm, typeof(*iport), sm);
sci_del_timer(&iport->timer);
}
-static void scic_sds_port_stopping_state_exit(struct sci_base_state_machine *sm)
+static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm)
{
struct isci_port *iport = container_of(sm, typeof(*iport), sm);
sci_del_timer(&iport->timer);
- scic_sds_port_destroy_dummy_resources(iport);
+ sci_port_destroy_dummy_resources(iport);
}
-static void scic_sds_port_failed_state_enter(struct sci_base_state_machine *sm)
+static void sci_port_failed_state_enter(struct sci_base_state_machine *sm)
{
struct isci_port *iport = container_of(sm, typeof(*iport), sm);
/* --------------------------------------------------------------------------- */
-static const struct sci_base_state scic_sds_port_state_table[] = {
+static const struct sci_base_state sci_port_state_table[] = {
[SCI_PORT_STOPPED] = {
- .enter_state = scic_sds_port_stopped_state_enter,
- .exit_state = scic_sds_port_stopped_state_exit
+ .enter_state = sci_port_stopped_state_enter,
+ .exit_state = sci_port_stopped_state_exit
},
[SCI_PORT_STOPPING] = {
- .exit_state = scic_sds_port_stopping_state_exit
+ .exit_state = sci_port_stopping_state_exit
},
[SCI_PORT_READY] = {
- .enter_state = scic_sds_port_ready_state_enter,
+ .enter_state = sci_port_ready_state_enter,
},
[SCI_PORT_SUB_WAITING] = {
- .enter_state = scic_sds_port_ready_substate_waiting_enter,
+ .enter_state = sci_port_ready_substate_waiting_enter,
},
[SCI_PORT_SUB_OPERATIONAL] = {
- .enter_state = scic_sds_port_ready_substate_operational_enter,
- .exit_state = scic_sds_port_ready_substate_operational_exit
+ .enter_state = sci_port_ready_substate_operational_enter,
+ .exit_state = sci_port_ready_substate_operational_exit
},
[SCI_PORT_SUB_CONFIGURING] = {
- .enter_state = scic_sds_port_ready_substate_configuring_enter,
- .exit_state = scic_sds_port_ready_substate_configuring_exit
+ .enter_state = sci_port_ready_substate_configuring_enter,
+ .exit_state = sci_port_ready_substate_configuring_exit
},
[SCI_PORT_RESETTING] = {
- .exit_state = scic_sds_port_resetting_state_exit
+ .exit_state = sci_port_resetting_state_exit
},
[SCI_PORT_FAILED] = {
- .enter_state = scic_sds_port_failed_state_enter,
+ .enter_state = sci_port_failed_state_enter,
}
};
-void scic_sds_port_construct(struct isci_port *iport, u8 index,
+void sci_port_construct(struct isci_port *iport, u8 index,
struct isci_host *ihost)
{
- sci_init_sm(&iport->sm, scic_sds_port_state_table, SCI_PORT_STOPPED);
+ sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED);
iport->logical_port_index = SCIC_SDS_DUMMY_PORT;
iport->physical_port_index = index;
return isci_port->status;
}
-void scic_sds_port_broadcast_change_received(
- struct isci_port *iport,
- struct isci_phy *iphy)
+void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
{
struct isci_host *ihost = iport->owning_controller;
spin_lock_irqsave(&ihost->scic_lock, flags);
#define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT
- status = scic_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT);
+ status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
ret = TMF_RESP_FUNC_FAILED;
dev_err(&ihost->pdev->dev,
- "%s: iport = %p; scic_port_hard_reset call"
+ "%s: iport = %p; sci_port_hard_reset call"
" failed 0x%x\n",
__func__, iport, status);
if (!iphy)
continue;
- scic_sds_phy_stop(iphy);
- scic_sds_phy_start(iphy);
+ sci_phy_stop(iphy);
+ sci_phy_start(iphy);
}
spin_unlock_irqrestore(&ihost->scic_lock, flags);
}
struct scu_viit_entry __iomem *viit_registers;
};
-enum scic_port_not_ready_reason_code {
+enum sci_port_not_ready_reason_code {
SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS,
SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED,
SCIC_PORT_NOT_READY_INVALID_PORT_CONFIGURATION,
SCIC_PORT_NOT_READY_REASON_CODE_MAX
};
-struct scic_port_end_point_properties {
+struct sci_port_end_point_properties {
struct sci_sas_address sas_address;
- struct scic_phy_proto protocols;
+ struct sci_phy_proto protocols;
};
-struct scic_port_properties {
+struct sci_port_properties {
u32 index;
- struct scic_port_end_point_properties local;
- struct scic_port_end_point_properties remote;
+ struct sci_port_end_point_properties local;
+ struct sci_port_end_point_properties remote;
u32 phy_mask;
};
/**
- * enum scic_sds_port_states - This enumeration depicts all the states for the
+ * enum sci_port_states - This enumeration depicts all the states for the
* common port state machine.
*
*
*/
-enum scic_sds_port_states {
+enum sci_port_states {
/**
* This state indicates that the port has successfully been stopped.
* In this state no new IO operations are permitted.
};
/**
- * scic_sds_port_get_controller() -
+ * sci_port_get_controller() -
*
* Helper macro to get the owning controller of this port
*/
-#define scic_sds_port_get_controller(this_port) \
+#define sci_port_get_controller(this_port) \
((this_port)->owning_controller)
/**
- * scic_sds_port_get_index() -
+ * sci_port_get_index() -
*
* This macro returns the physical port index for this port object
*/
-#define scic_sds_port_get_index(this_port) \
+#define sci_port_get_index(this_port) \
((this_port)->physical_port_index)
-static inline void scic_sds_port_decrement_request_count(struct isci_port *iport)
+static inline void sci_port_decrement_request_count(struct isci_port *iport)
{
if (WARN_ONCE(iport->started_request_count == 0,
"%s: tried to decrement started_request_count past 0!?",
iport->started_request_count--;
}
-#define scic_sds_port_active_phy(port, phy) \
+#define sci_port_active_phy(port, phy) \
(((port)->active_phy_mask & (1 << (phy)->phy_index)) != 0)
-void scic_sds_port_construct(
+void sci_port_construct(
struct isci_port *iport,
u8 port_index,
struct isci_host *ihost);
-enum sci_status scic_sds_port_initialize(
- struct isci_port *iport,
- void __iomem *port_task_scheduler_registers,
- void __iomem *port_configuration_regsiter,
- void __iomem *viit_registers);
-
-enum sci_status scic_sds_port_start(struct isci_port *iport);
-enum sci_status scic_sds_port_stop(struct isci_port *iport);
+enum sci_status sci_port_start(struct isci_port *iport);
+enum sci_status sci_port_stop(struct isci_port *iport);
-enum sci_status scic_sds_port_add_phy(
+enum sci_status sci_port_add_phy(
struct isci_port *iport,
struct isci_phy *iphy);
-enum sci_status scic_sds_port_remove_phy(
+enum sci_status sci_port_remove_phy(
struct isci_port *iport,
struct isci_phy *iphy);
-void scic_sds_port_setup_transports(
+void sci_port_setup_transports(
struct isci_port *iport,
u32 device_id);
void isci_port_bcn_enable(struct isci_host *, struct isci_port *);
-void scic_sds_port_deactivate_phy(
+void sci_port_deactivate_phy(
struct isci_port *iport,
struct isci_phy *iphy,
bool do_notify_user);
-bool scic_sds_port_link_detected(
+bool sci_port_link_detected(
struct isci_port *iport,
struct isci_phy *iphy);
-enum sci_status scic_sds_port_link_up(struct isci_port *iport,
+enum sci_status sci_port_link_up(struct isci_port *iport,
struct isci_phy *iphy);
-enum sci_status scic_sds_port_link_down(struct isci_port *iport,
+enum sci_status sci_port_link_down(struct isci_port *iport,
struct isci_phy *iphy);
struct isci_request;
struct isci_remote_device;
-enum sci_status scic_sds_port_start_io(
+enum sci_status sci_port_start_io(
struct isci_port *iport,
struct isci_remote_device *idev,
struct isci_request *ireq);
-enum sci_status scic_sds_port_complete_io(
+enum sci_status sci_port_complete_io(
struct isci_port *iport,
struct isci_remote_device *idev,
struct isci_request *ireq);
-enum sas_linkrate scic_sds_port_get_max_allowed_speed(
+enum sas_linkrate sci_port_get_max_allowed_speed(
struct isci_port *iport);
-void scic_sds_port_broadcast_change_received(
+void sci_port_broadcast_change_received(
struct isci_port *iport,
struct isci_phy *iphy);
-bool scic_sds_port_is_valid_phy_assignment(
+bool sci_port_is_valid_phy_assignment(
struct isci_port *iport,
u32 phy_index);
-void scic_sds_port_get_sas_address(
+void sci_port_get_sas_address(
struct isci_port *iport,
struct sci_sas_address *sas_address);
-void scic_sds_port_get_attached_sas_address(
+void sci_port_get_attached_sas_address(
struct isci_port *iport,
struct sci_sas_address *sas_address);
* port. port address if the port can be found to match the phy.
* NULL if there is no matching port for the phy.
*/
-static struct isci_port *scic_sds_port_configuration_agent_find_port(
+static struct isci_port *sci_port_configuration_agent_find_port(
struct isci_host *ihost,
struct isci_phy *iphy)
{
* more phys match the sent and received SAS address as this phy in which
* case it should participate in the same port.
*/
- scic_sds_phy_get_sas_address(iphy, &phy_sas_address);
- scic_sds_phy_get_attached_sas_address(iphy, &phy_attached_device_address);
+ sci_phy_get_sas_address(iphy, &phy_sas_address);
+ sci_phy_get_attached_sas_address(iphy, &phy_attached_device_address);
for (i = 0; i < ihost->logical_port_entries; i++) {
struct isci_port *iport = &ihost->ports[i];
- scic_sds_port_get_sas_address(iport, &port_sas_address);
- scic_sds_port_get_attached_sas_address(iport, &port_attached_device_address);
+ sci_port_get_sas_address(iport, &port_sas_address);
+ sci_port_get_attached_sas_address(iport, &port_attached_device_address);
if (sci_sas_address_compare(port_sas_address, phy_sas_address) == 0 &&
sci_sas_address_compare(port_attached_device_address, phy_attached_device_address) == 0)
* this port configuration agent. SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION
* the port configuration is not valid for this port configuration agent.
*/
-static enum sci_status scic_sds_port_configuration_agent_validate_ports(
+static enum sci_status sci_port_configuration_agent_validate_ports(
struct isci_host *ihost,
- struct scic_sds_port_configuration_agent *port_agent)
+ struct sci_port_configuration_agent *port_agent)
{
struct sci_sas_address first_address;
struct sci_sas_address second_address;
* PE0 and PE3 can never have the same SAS Address unless they
* are part of the same x4 wide port and we have already checked
* for this condition. */
- scic_sds_phy_get_sas_address(&ihost->phys[0], &first_address);
- scic_sds_phy_get_sas_address(&ihost->phys[3], &second_address);
+ sci_phy_get_sas_address(&ihost->phys[0], &first_address);
+ sci_phy_get_sas_address(&ihost->phys[3], &second_address);
if (sci_sas_address_compare(first_address, second_address) == 0) {
return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
* part of the same port. */
if (port_agent->phy_valid_port_range[0].min_index == 0 &&
port_agent->phy_valid_port_range[1].min_index == 1) {
- scic_sds_phy_get_sas_address(&ihost->phys[0], &first_address);
- scic_sds_phy_get_sas_address(&ihost->phys[2], &second_address);
+ sci_phy_get_sas_address(&ihost->phys[0], &first_address);
+ sci_phy_get_sas_address(&ihost->phys[2], &second_address);
if (sci_sas_address_compare(first_address, second_address) == 0) {
return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
* part of the same port. */
if (port_agent->phy_valid_port_range[2].min_index == 2 &&
port_agent->phy_valid_port_range[3].min_index == 3) {
- scic_sds_phy_get_sas_address(&ihost->phys[1], &first_address);
- scic_sds_phy_get_sas_address(&ihost->phys[3], &second_address);
+ sci_phy_get_sas_address(&ihost->phys[1], &first_address);
+ sci_phy_get_sas_address(&ihost->phys[3], &second_address);
if (sci_sas_address_compare(first_address, second_address) == 0) {
return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
/* verify all of the phys in the same port are using the same SAS address */
static enum sci_status
-scic_sds_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
- struct scic_sds_port_configuration_agent *port_agent)
+sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent)
{
u32 phy_mask;
u32 assigned_phy_mask;
sas_address.low = 0;
for (port_index = 0; port_index < SCI_MAX_PORTS; port_index++) {
- phy_mask = ihost->oem_parameters.sds1.ports[port_index].phy_mask;
+ phy_mask = ihost->oem_parameters.ports[port_index].phy_mask;
if (!phy_mask)
continue;
for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) {
if ((phy_mask & (1 << phy_index)) == 0)
continue;
- scic_sds_phy_get_sas_address(&ihost->phys[phy_index],
+ sci_phy_get_sas_address(&ihost->phys[phy_index],
&sas_address);
/*
while (phy_index < SCI_MAX_PHYS) {
if ((phy_mask & (1 << phy_index)) == 0)
continue;
- scic_sds_phy_get_sas_address(&ihost->phys[phy_index],
+ sci_phy_get_sas_address(&ihost->phys[phy_index],
&phy_assigned_address);
if (sci_sas_address_compare(sas_address, phy_assigned_address) != 0) {
port_agent->phy_valid_port_range[phy_index].min_index = port_index;
port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
- scic_sds_port_add_phy(&ihost->ports[port_index],
+ sci_port_add_phy(&ihost->ports[port_index],
&ihost->phys[phy_index]);
assigned_phy_mask |= (1 << phy_index);
phy_index++;
}
- return scic_sds_port_configuration_agent_validate_ports(ihost, port_agent);
+ return sci_port_configuration_agent_validate_ports(ihost, port_agent);
}
static void mpc_agent_timeout(unsigned long data)
{
u8 index;
struct sci_timer *tmr = (struct sci_timer *)data;
- struct scic_sds_port_configuration_agent *port_agent;
+ struct sci_port_configuration_agent *port_agent;
struct isci_host *ihost;
unsigned long flags;
u16 configure_phy_mask;
spin_unlock_irqrestore(&ihost->scic_lock, flags);
}
-static void scic_sds_mpc_agent_link_up(struct isci_host *ihost,
- struct scic_sds_port_configuration_agent *port_agent,
+static void sci_mpc_agent_link_up(struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent,
struct isci_port *iport,
struct isci_phy *iphy)
{
if (!iport)
return;
- port_agent->phy_ready_mask |= (1 << scic_sds_phy_get_index(iphy));
- scic_sds_port_link_up(iport, iphy);
- if ((iport->active_phy_mask & (1 << scic_sds_phy_get_index(iphy))))
- port_agent->phy_configured_mask |= (1 << scic_sds_phy_get_index(iphy));
+ port_agent->phy_ready_mask |= (1 << sci_phy_get_index(iphy));
+ sci_port_link_up(iport, iphy);
+ if ((iport->active_phy_mask & (1 << sci_phy_get_index(iphy))))
+ port_agent->phy_configured_mask |= (1 << sci_phy_get_index(iphy));
}
/**
* not associated with a port there is no action taken. Is it possible to get a
* link down notification from a phy that has no assocoated port?
*/
-static void scic_sds_mpc_agent_link_down(
+static void sci_mpc_agent_link_down(
struct isci_host *ihost,
- struct scic_sds_port_configuration_agent *port_agent,
+ struct sci_port_configuration_agent *port_agent,
struct isci_port *iport,
struct isci_phy *iphy)
{
* state.
*/
port_agent->phy_ready_mask &=
- ~(1 << scic_sds_phy_get_index(iphy));
+ ~(1 << sci_phy_get_index(iphy));
port_agent->phy_configured_mask &=
- ~(1 << scic_sds_phy_get_index(iphy));
+ ~(1 << sci_phy_get_index(iphy));
/*
* Check to see if there are more phys waiting to be
SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT);
}
- scic_sds_port_link_down(iport, iphy);
+ sci_port_link_down(iport, iphy);
}
}
* configuration mode.
*/
static enum sci_status
-scic_sds_apc_agent_validate_phy_configuration(struct isci_host *ihost,
- struct scic_sds_port_configuration_agent *port_agent)
+sci_apc_agent_validate_phy_configuration(struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent)
{
u8 phy_index;
u8 port_index;
port_index = phy_index;
/* Get the assigned SAS Address for the first PHY on the controller. */
- scic_sds_phy_get_sas_address(&ihost->phys[phy_index],
+ sci_phy_get_sas_address(&ihost->phys[phy_index],
&sas_address);
while (++phy_index < SCI_MAX_PHYS) {
- scic_sds_phy_get_sas_address(&ihost->phys[phy_index],
+ sci_phy_get_sas_address(&ihost->phys[phy_index],
&phy_assigned_address);
/* Verify each of the SAS address are all the same for every PHY */
}
}
- return scic_sds_port_configuration_agent_validate_ports(ihost, port_agent);
+ return sci_port_configuration_agent_validate_ports(ihost, port_agent);
}
-static void scic_sds_apc_agent_configure_ports(struct isci_host *ihost,
- struct scic_sds_port_configuration_agent *port_agent,
+static void sci_apc_agent_configure_ports(struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent,
struct isci_phy *iphy,
bool start_timer)
{
struct isci_port *iport;
enum SCIC_SDS_APC_ACTIVITY apc_activity = SCIC_SDS_APC_SKIP_PHY;
- iport = scic_sds_port_configuration_agent_find_port(ihost, iphy);
+ iport = sci_port_configuration_agent_find_port(ihost, iphy);
if (iport) {
- if (scic_sds_port_is_valid_phy_assignment(iport, iphy->phy_index))
+ if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index))
apc_activity = SCIC_SDS_APC_ADD_PHY;
else
apc_activity = SCIC_SDS_APC_SKIP_PHY;
iport = &ihost->ports[port_index];
/* First we must make sure that this PHY can be added to this Port. */
- if (scic_sds_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
+ if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
/*
* Port contains a PHY with a greater PHY ID than the current
* PHY that has gone link up. This phy can not be part of any
switch (apc_activity) {
case SCIC_SDS_APC_ADD_PHY:
- status = scic_sds_port_add_phy(iport, iphy);
+ status = sci_port_add_phy(iport, iphy);
if (status == SCI_SUCCESS) {
port_agent->phy_configured_mask |= (1 << iphy->phy_index);
}
/**
- * scic_sds_apc_agent_link_up - handle apc link up events
+ * sci_apc_agent_link_up - handle apc link up events
* @scic: This is the controller object that receives the link up
* notification.
* @sci_port: This is the port object associated with the phy. If the is no
* notifications. Is it possible to get a link down notification from a phy
* that has no assocoated port?
*/
-static void scic_sds_apc_agent_link_up(struct isci_host *ihost,
- struct scic_sds_port_configuration_agent *port_agent,
+static void sci_apc_agent_link_up(struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent,
struct isci_port *iport,
struct isci_phy *iphy)
{
if (!iport) {
/* the phy is not the part of this port */
port_agent->phy_ready_mask |= 1 << phy_index;
- scic_sds_apc_agent_configure_ports(ihost, port_agent, iphy, true);
+ sci_apc_agent_configure_ports(ihost, port_agent, iphy, true);
} else {
/* the phy is already the part of the port */
u32 port_state = iport->sm.current_state_id;
*/
BUG_ON(port_state != SCI_PORT_RESETTING);
port_agent->phy_ready_mask |= 1 << phy_index;
- scic_sds_port_link_up(iport, iphy);
+ sci_port_link_up(iport, iphy);
}
}
* possible to get a link down notification from a phy that has no assocoated
* port?
*/
-static void scic_sds_apc_agent_link_down(
+static void sci_apc_agent_link_down(
struct isci_host *ihost,
- struct scic_sds_port_configuration_agent *port_agent,
+ struct sci_port_configuration_agent *port_agent,
struct isci_port *iport,
struct isci_phy *iphy)
{
- port_agent->phy_ready_mask &= ~(1 << scic_sds_phy_get_index(iphy));
+ port_agent->phy_ready_mask &= ~(1 << sci_phy_get_index(iphy));
if (!iport)
return;
if (port_agent->phy_configured_mask & (1 << iphy->phy_index)) {
enum sci_status status;
- status = scic_sds_port_remove_phy(iport, iphy);
+ status = sci_port_remove_phy(iport, iphy);
if (status == SCI_SUCCESS)
port_agent->phy_configured_mask &= ~(1 << iphy->phy_index);
{
u32 index;
struct sci_timer *tmr = (struct sci_timer *)data;
- struct scic_sds_port_configuration_agent *port_agent;
+ struct sci_port_configuration_agent *port_agent;
struct isci_host *ihost;
unsigned long flags;
u16 configure_phy_mask;
if ((configure_phy_mask & (1 << index)) == 0)
continue;
- scic_sds_apc_agent_configure_ports(ihost, port_agent,
+ sci_apc_agent_configure_ports(ihost, port_agent,
&ihost->phys[index], false);
}
* call is universal for both manual port configuration and automatic port
* configuration modes.
*/
-void scic_sds_port_configuration_agent_construct(
- struct scic_sds_port_configuration_agent *port_agent)
+void sci_port_configuration_agent_construct(
+ struct sci_port_configuration_agent *port_agent)
{
u32 index;
}
}
-enum sci_status scic_sds_port_configuration_agent_initialize(
+enum sci_status sci_port_configuration_agent_initialize(
struct isci_host *ihost,
- struct scic_sds_port_configuration_agent *port_agent)
+ struct sci_port_configuration_agent *port_agent)
{
enum sci_status status;
- enum scic_port_configuration_mode mode;
+ enum sci_port_configuration_mode mode;
- mode = ihost->oem_parameters.sds1.controller.mode_type;
+ mode = ihost->oem_parameters.controller.mode_type;
if (mode == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
- status = scic_sds_mpc_agent_validate_phy_configuration(
+ status = sci_mpc_agent_validate_phy_configuration(
ihost, port_agent);
- port_agent->link_up_handler = scic_sds_mpc_agent_link_up;
- port_agent->link_down_handler = scic_sds_mpc_agent_link_down;
+ port_agent->link_up_handler = sci_mpc_agent_link_up;
+ port_agent->link_down_handler = sci_mpc_agent_link_down;
sci_init_timer(&port_agent->timer, mpc_agent_timeout);
} else {
- status = scic_sds_apc_agent_validate_phy_configuration(
+ status = sci_apc_agent_validate_phy_configuration(
ihost, port_agent);
- port_agent->link_up_handler = scic_sds_apc_agent_link_up;
- port_agent->link_down_handler = scic_sds_apc_agent_link_down;
+ port_agent->link_up_handler = sci_apc_agent_link_up;
+ port_agent->link_down_handler = sci_apc_agent_link_down;
sci_init_timer(&port_agent->timer, apc_agent_timeout);
}
return rom;
}
-/**
- * isci_parse_oem_parameters() - This method will take OEM parameters
- * from the module init parameters and copy them to oem_params. This will
- * only copy values that are not set to the module parameter default values
- * @oem_parameters: This parameter specifies the controller default OEM
- * parameters. It is expected that this has been initialized to the default
- * parameters for the controller
- *
- *
- */
-enum sci_status isci_parse_oem_parameters(union scic_oem_parameters *oem_params,
+enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
struct isci_orom *orom, int scu_index)
{
/* check for valid inputs */
if (scu_index < 0 || scu_index >= SCI_MAX_CONTROLLERS ||
- scu_index > orom->hdr.num_elements || !oem_params)
+ scu_index > orom->hdr.num_elements || !oem)
return -EINVAL;
- oem_params->sds1 = orom->ctrl[scu_index];
+ *oem = orom->ctrl[scu_index];
return 0;
}
#define SCIC_SDS_PARM_MAX_SPEED SCIC_SDS_PARM_GEN3_SPEED
/* parameters that can be set by module parameters */
-struct scic_sds_user_parameters {
+struct sci_user_parameters {
struct sci_phy_user_params {
/**
* This field specifies the NOTIFY (ENABLE SPIN UP) primitive
};
-/* XXX kill this union */
-union scic_user_parameters {
- /**
- * This field specifies the user parameters specific to the
- * Storage Controller Unit (SCU) Driver Standard (SDS) version
- * 1.
- */
- struct scic_sds_user_parameters sds1;
-};
-
#define SCIC_SDS_PARM_PHY_MASK_MIN 0x0
#define SCIC_SDS_PARM_PHY_MASK_MAX 0xF
#define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4
-struct scic_sds_oem_params;
-int scic_oem_parameters_validate(struct scic_sds_oem_params *oem);
-
-union scic_oem_parameters;
-void scic_oem_parameters_get(struct isci_host *ihost,
- union scic_oem_parameters *oem);
+struct sci_oem_params;
+int sci_oem_parameters_validate(struct sci_oem_params *oem);
struct isci_orom;
struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
-enum sci_status isci_parse_oem_parameters(union scic_oem_parameters *oem,
+enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
struct isci_orom *orom, int scu_index);
struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw);
struct isci_orom *isci_get_efi_var(struct pci_dev *pdev);
* A PORT_PHY mask that assigns just a single PHY to a port and no other PHYs
* being assigned is sufficient to declare manual PORT configuration.
*/
-enum scic_port_configuration_mode {
+enum sci_port_configuration_mode {
SCIC_PORT_MANUAL_CONFIGURATION_MODE = 0,
SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE = 1
};
uint8_t reserved[8];
} __attribute__ ((packed));
-struct scic_sds_oem_params {
+struct sci_oem_params {
struct {
uint8_t mode_type;
uint8_t max_concurrent_dev_spin_up;
} phys[SCI_MAX_PHYS];
} __attribute__ ((packed));
-/* XXX kill this union */
-union scic_oem_parameters {
- /**
- * This field specifies the OEM parameters specific to the
- * Storage Controller Unit (SCU) Driver Standard (SDS) version
- * 1.
- */
- struct scic_sds_oem_params sds1;
-};
-
struct isci_orom {
struct sci_bios_oem_param_block_hdr hdr;
- struct scic_sds_oem_params ctrl[SCI_MAX_CONTROLLERS];
+ struct sci_oem_params ctrl[SCI_MAX_CONTROLLERS];
} __attribute__ ((packed));
#endif
* @isci_host: This parameter specifies the isci host object.
* @isci_device: This parameter specifies the remote device
*
- * scic_lock is held on entrance to this function.
+ * sci_lock is held on entrance to this function.
*/
static void isci_remote_device_not_ready(struct isci_host *ihost,
struct isci_remote_device *idev, u32 reason)
"%s: isci_device = %p request = %p\n",
__func__, idev, ireq);
- scic_controller_terminate_request(ihost,
+ sci_controller_terminate_request(ihost,
idev,
ireq);
}
sci_change_state(&idev->sm, SCI_DEV_STOPPED);
}
-static enum sci_status scic_sds_remote_device_terminate_requests(struct isci_remote_device *idev)
+static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_device *idev)
{
struct isci_host *ihost = idev->owning_port->owning_controller;
enum sci_status status = SCI_SUCCESS;
ireq->target_device != idev)
continue;
- s = scic_controller_terminate_request(ihost, idev, ireq);
+ s = sci_controller_terminate_request(ihost, idev, ireq);
if (s != SCI_SUCCESS)
status = s;
}
return status;
}
-enum sci_status scic_remote_device_stop(struct isci_remote_device *idev,
+enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
u32 timeout)
{
struct sci_base_state_machine *sm = &idev->sm;
- enum scic_sds_remote_device_states state = sm->current_state_id;
+ enum sci_remote_device_states state = sm->current_state_id;
switch (state) {
case SCI_DEV_INITIAL:
case SCI_DEV_STARTING:
/* device not started so there had better be no requests */
BUG_ON(idev->started_request_count != 0);
- scic_sds_remote_node_context_destruct(&idev->rnc,
+ sci_remote_node_context_destruct(&idev->rnc,
rnc_destruct_done, idev);
/* Transition to the stopping state and wait for the
* remote node to complete being posted and invalidated.
case SCI_SMP_DEV_CMD:
sci_change_state(sm, SCI_DEV_STOPPING);
if (idev->started_request_count == 0) {
- scic_sds_remote_node_context_destruct(&idev->rnc,
+ sci_remote_node_context_destruct(&idev->rnc,
rnc_destruct_done, idev);
return SCI_SUCCESS;
} else
- return scic_sds_remote_device_terminate_requests(idev);
+ return sci_remote_device_terminate_requests(idev);
break;
case SCI_DEV_STOPPING:
/* All requests should have been terminated, but if there is an
* attempt to stop a device already in the stopping state, then
* try again to terminate.
*/
- return scic_sds_remote_device_terminate_requests(idev);
+ return sci_remote_device_terminate_requests(idev);
case SCI_DEV_RESETTING:
sci_change_state(sm, SCI_DEV_STOPPING);
return SCI_SUCCESS;
}
}
-enum sci_status scic_remote_device_reset(struct isci_remote_device *idev)
+enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
{
struct sci_base_state_machine *sm = &idev->sm;
- enum scic_sds_remote_device_states state = sm->current_state_id;
+ enum sci_remote_device_states state = sm->current_state_id;
switch (state) {
case SCI_DEV_INITIAL:
}
}
-enum sci_status scic_remote_device_reset_complete(struct isci_remote_device *idev)
+enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev)
{
struct sci_base_state_machine *sm = &idev->sm;
- enum scic_sds_remote_device_states state = sm->current_state_id;
+ enum sci_remote_device_states state = sm->current_state_id;
if (state != SCI_DEV_RESETTING) {
dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
return SCI_SUCCESS;
}
-enum sci_status scic_sds_remote_device_suspend(struct isci_remote_device *idev,
+enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
u32 suspend_type)
{
struct sci_base_state_machine *sm = &idev->sm;
- enum scic_sds_remote_device_states state = sm->current_state_id;
+ enum sci_remote_device_states state = sm->current_state_id;
if (state != SCI_STP_DEV_CMD) {
dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
return SCI_FAILURE_INVALID_STATE;
}
- return scic_sds_remote_node_context_suspend(&idev->rnc,
+ return sci_remote_node_context_suspend(&idev->rnc,
suspend_type, NULL, NULL);
}
-enum sci_status scic_sds_remote_device_frame_handler(struct isci_remote_device *idev,
+enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
u32 frame_index)
{
struct sci_base_state_machine *sm = &idev->sm;
- enum scic_sds_remote_device_states state = sm->current_state_id;
+ enum sci_remote_device_states state = sm->current_state_id;
struct isci_host *ihost = idev->owning_port->owning_controller;
enum sci_status status;
dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
__func__, state);
/* Return the frame back to the controller */
- scic_sds_controller_release_frame(ihost, frame_index);
+ sci_controller_release_frame(ihost, frame_index);
return SCI_FAILURE_INVALID_STATE;
case SCI_DEV_READY:
case SCI_STP_DEV_NCQ_ERROR:
void *frame_header;
ssize_t word_cnt;
- status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control,
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
frame_index,
&frame_header);
if (status != SCI_SUCCESS)
word_cnt = sizeof(hdr) / sizeof(u32);
sci_swab32_cpy(&hdr, frame_header, word_cnt);
- ireq = scic_request_by_tag(ihost, be16_to_cpu(hdr.tag));
+ ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag));
if (ireq && ireq->target_device == idev) {
/* The IO request is now in charge of releasing the frame */
- status = scic_sds_io_request_frame_handler(ireq, frame_index);
+ status = sci_io_request_frame_handler(ireq, frame_index);
} else {
/* We could not map this tag to a valid IO
* request Just toss the frame and continue
*/
- scic_sds_controller_release_frame(ihost, frame_index);
+ sci_controller_release_frame(ihost, frame_index);
}
break;
}
case SCI_STP_DEV_NCQ: {
struct dev_to_host_fis *hdr;
- status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control,
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
frame_index,
(void **)&hdr);
if (status != SCI_SUCCESS)
} else
status = SCI_FAILURE;
- scic_sds_controller_release_frame(ihost, frame_index);
+ sci_controller_release_frame(ihost, frame_index);
break;
}
case SCI_STP_DEV_CMD:
* in this state. All unsolicited frames are forwarded to the io request
* object.
*/
- status = scic_sds_io_request_frame_handler(idev->working_request, frame_index);
+ status = sci_io_request_frame_handler(idev->working_request, frame_index);
break;
}
{
struct sci_base_state_machine *sm = &idev->sm;
- enum scic_sds_remote_device_states state = sm->current_state_id;
+ enum sci_remote_device_states state = sm->current_state_id;
switch (state) {
case SCI_DEV_READY:
}
}
-enum sci_status scic_sds_remote_device_event_handler(struct isci_remote_device *idev,
+enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
u32 event_code)
{
struct sci_base_state_machine *sm = &idev->sm;
- enum scic_sds_remote_device_states state = sm->current_state_id;
+ enum sci_remote_device_states state = sm->current_state_id;
enum sci_status status;
switch (scu_get_event_type(event_code)) {
case SCU_EVENT_TYPE_RNC_OPS_MISC:
case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
- status = scic_sds_remote_node_context_event_handler(&idev->rnc, event_code);
+ status = sci_remote_node_context_event_handler(&idev->rnc, event_code);
break;
case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) {
status = SCI_SUCCESS;
/* Suspend the associated RNC */
- scic_sds_remote_node_context_suspend(&idev->rnc,
+ sci_remote_node_context_suspend(&idev->rnc,
SCI_SOFTWARE_SUSPENSION,
NULL, NULL);
*/
if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX)
- status = scic_sds_remote_node_context_resume(&idev->rnc, NULL, NULL);
+ status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
}
return status;
}
-static void scic_sds_remote_device_start_request(struct isci_remote_device *idev,
+static void sci_remote_device_start_request(struct isci_remote_device *idev,
struct isci_request *ireq,
enum sci_status status)
{
/* cleanup requests that failed after starting on the port */
if (status != SCI_SUCCESS)
- scic_sds_port_complete_io(iport, idev, ireq);
+ sci_port_complete_io(iport, idev, ireq);
else {
kref_get(&idev->kref);
- scic_sds_remote_device_increment_request_count(idev);
+ sci_remote_device_increment_request_count(idev);
}
}
-enum sci_status scic_sds_remote_device_start_io(struct isci_host *ihost,
+enum sci_status sci_remote_device_start_io(struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq)
{
struct sci_base_state_machine *sm = &idev->sm;
- enum scic_sds_remote_device_states state = sm->current_state_id;
+ enum sci_remote_device_states state = sm->current_state_id;
struct isci_port *iport = idev->owning_port;
enum sci_status status;
* successful it will start the request for the port object then
* increment its own request count.
*/
- status = scic_sds_port_start_io(iport, idev, ireq);
+ status = sci_port_start_io(iport, idev, ireq);
if (status != SCI_SUCCESS)
return status;
- status = scic_sds_remote_node_context_start_io(&idev->rnc, ireq);
+ status = sci_remote_node_context_start_io(&idev->rnc, ireq);
if (status != SCI_SUCCESS)
break;
- status = scic_sds_request_start(ireq);
+ status = sci_request_start(ireq);
break;
case SCI_STP_DEV_IDLE: {
/* handle the start io operation for a sata device that is in
* If this is a softreset we may want to have a different
* substate.
*/
- enum scic_sds_remote_device_states new_state;
+ enum sci_remote_device_states new_state;
struct sas_task *task = isci_request_access_task(ireq);
- status = scic_sds_port_start_io(iport, idev, ireq);
+ status = sci_port_start_io(iport, idev, ireq);
if (status != SCI_SUCCESS)
return status;
- status = scic_sds_remote_node_context_start_io(&idev->rnc, ireq);
+ status = sci_remote_node_context_start_io(&idev->rnc, ireq);
if (status != SCI_SUCCESS)
break;
- status = scic_sds_request_start(ireq);
+ status = sci_request_start(ireq);
if (status != SCI_SUCCESS)
break;
struct sas_task *task = isci_request_access_task(ireq);
if (task->ata_task.use_ncq) {
- status = scic_sds_port_start_io(iport, idev, ireq);
+ status = sci_port_start_io(iport, idev, ireq);
if (status != SCI_SUCCESS)
return status;
- status = scic_sds_remote_node_context_start_io(&idev->rnc, ireq);
+ status = sci_remote_node_context_start_io(&idev->rnc, ireq);
if (status != SCI_SUCCESS)
break;
- status = scic_sds_request_start(ireq);
+ status = sci_request_start(ireq);
} else
return SCI_FAILURE_INVALID_STATE;
break;
case SCI_STP_DEV_AWAIT_RESET:
return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
case SCI_SMP_DEV_IDLE:
- status = scic_sds_port_start_io(iport, idev, ireq);
+ status = sci_port_start_io(iport, idev, ireq);
if (status != SCI_SUCCESS)
return status;
- status = scic_sds_remote_node_context_start_io(&idev->rnc, ireq);
+ status = sci_remote_node_context_start_io(&idev->rnc, ireq);
if (status != SCI_SUCCESS)
break;
- status = scic_sds_request_start(ireq);
+ status = sci_request_start(ireq);
if (status != SCI_SUCCESS)
break;
return SCI_FAILURE_INVALID_STATE;
}
- scic_sds_remote_device_start_request(idev, ireq, status);
+ sci_remote_device_start_request(idev, ireq, status);
return status;
}
{
enum sci_status status;
- status = scic_sds_request_complete(ireq);
+ status = sci_request_complete(ireq);
if (status != SCI_SUCCESS)
return status;
- status = scic_sds_port_complete_io(iport, idev, ireq);
+ status = sci_port_complete_io(iport, idev, ireq);
if (status != SCI_SUCCESS)
return status;
- scic_sds_remote_device_decrement_request_count(idev);
+ sci_remote_device_decrement_request_count(idev);
return status;
}
-enum sci_status scic_sds_remote_device_complete_io(struct isci_host *ihost,
+enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq)
{
struct sci_base_state_machine *sm = &idev->sm;
- enum scic_sds_remote_device_states state = sm->current_state_id;
+ enum sci_remote_device_states state = sm->current_state_id;
struct isci_port *iport = idev->owning_port;
enum sci_status status;
* status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE".
*/
sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET);
- } else if (scic_sds_remote_device_get_request_count(idev) == 0)
+ } else if (sci_remote_device_get_request_count(idev) == 0)
sci_change_state(sm, SCI_STP_DEV_IDLE);
break;
case SCI_SMP_DEV_CMD:
if (status != SCI_SUCCESS)
break;
- if (scic_sds_remote_device_get_request_count(idev) == 0)
- scic_sds_remote_node_context_destruct(&idev->rnc,
+ if (sci_remote_device_get_request_count(idev) == 0)
+ sci_remote_node_context_destruct(&idev->rnc,
rnc_destruct_done,
idev);
break;
return status;
}
-static void scic_sds_remote_device_continue_request(void *dev)
+static void sci_remote_device_continue_request(void *dev)
{
struct isci_remote_device *idev = dev;
/* we need to check if this request is still valid to continue. */
if (idev->working_request)
- scic_controller_continue_io(idev->working_request);
+ sci_controller_continue_io(idev->working_request);
}
-enum sci_status scic_sds_remote_device_start_task(struct isci_host *ihost,
+enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq)
{
struct sci_base_state_machine *sm = &idev->sm;
- enum scic_sds_remote_device_states state = sm->current_state_id;
+ enum sci_remote_device_states state = sm->current_state_id;
struct isci_port *iport = idev->owning_port;
enum sci_status status;
case SCI_STP_DEV_NCQ:
case SCI_STP_DEV_NCQ_ERROR:
case SCI_STP_DEV_AWAIT_RESET:
- status = scic_sds_port_start_io(iport, idev, ireq);
+ status = sci_port_start_io(iport, idev, ireq);
if (status != SCI_SUCCESS)
return status;
- status = scic_sds_remote_node_context_start_task(&idev->rnc, ireq);
+ status = sci_remote_node_context_start_task(&idev->rnc, ireq);
if (status != SCI_SUCCESS)
goto out;
- status = scic_sds_request_start(ireq);
+ status = sci_request_start(ireq);
if (status != SCI_SUCCESS)
goto out;
* the correct action when the remote node context is suspended
* and later resumed.
*/
- scic_sds_remote_node_context_suspend(&idev->rnc,
+ sci_remote_node_context_suspend(&idev->rnc,
SCI_SOFTWARE_SUSPENSION, NULL, NULL);
- scic_sds_remote_node_context_resume(&idev->rnc,
- scic_sds_remote_device_continue_request,
+ sci_remote_node_context_resume(&idev->rnc,
+ sci_remote_device_continue_request,
idev);
out:
- scic_sds_remote_device_start_request(idev, ireq, status);
+ sci_remote_device_start_request(idev, ireq, status);
/* We need to let the controller start request handler know that
* it can't post TC yet. We will provide a callback function to
* post TC when RNC gets resumed.
*/
return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS;
case SCI_DEV_READY:
- status = scic_sds_port_start_io(iport, idev, ireq);
+ status = sci_port_start_io(iport, idev, ireq);
if (status != SCI_SUCCESS)
return status;
- status = scic_sds_remote_node_context_start_task(&idev->rnc, ireq);
+ status = sci_remote_node_context_start_task(&idev->rnc, ireq);
if (status != SCI_SUCCESS)
break;
- status = scic_sds_request_start(ireq);
+ status = sci_request_start(ireq);
break;
}
- scic_sds_remote_device_start_request(idev, ireq, status);
+ sci_remote_device_start_request(idev, ireq, status);
return status;
}
* This method takes the request and bulids an appropriate SCU context for the
* request and then requests the controller to post the request. none
*/
-void scic_sds_remote_device_post_request(
+void sci_remote_device_post_request(
struct isci_remote_device *idev,
u32 request)
{
u32 context;
- context = scic_sds_remote_device_build_command_context(idev, request);
+ context = sci_remote_device_build_command_context(idev, request);
- scic_sds_controller_post_request(
- scic_sds_remote_device_get_controller(idev),
+ sci_controller_post_request(
+ sci_remote_device_get_controller(idev),
context
);
}
sci_change_state(&idev->sm, SCI_DEV_READY);
}
-static void scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev)
+static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev)
{
struct isci_remote_device *idev = _dev;
struct isci_host *ihost = idev->owning_port->owning_controller;
isci_remote_device_ready(ihost, idev);
}
-static void scic_sds_remote_device_initial_state_enter(struct sci_base_state_machine *sm)
+static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
}
/**
- * scic_remote_device_destruct() - free remote node context and destruct
+ * sci_remote_device_destruct() - free remote node context and destruct
* @remote_device: This parameter specifies the remote device to be destructed.
*
* Remote device objects are a limited resource. As such, they must be
* device isn't valid (e.g. it's already been destoryed, the handle isn't
* valid, etc.).
*/
-static enum sci_status scic_remote_device_destruct(struct isci_remote_device *idev)
+static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev)
{
struct sci_base_state_machine *sm = &idev->sm;
- enum scic_sds_remote_device_states state = sm->current_state_id;
+ enum sci_remote_device_states state = sm->current_state_id;
struct isci_host *ihost;
if (state != SCI_DEV_STOPPED) {
}
ihost = idev->owning_port->owning_controller;
- scic_sds_controller_free_remote_node_context(ihost, idev,
+ sci_controller_free_remote_node_context(ihost, idev,
idev->rnc.remote_node_index);
idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
sci_change_state(sm, SCI_DEV_FINAL);
* io requests in process */
BUG_ON(!list_empty(&idev->reqs_in_process));
- scic_remote_device_destruct(idev);
+ sci_remote_device_destruct(idev);
list_del_init(&idev->node);
isci_put_device(idev);
}
-static void scic_sds_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
+static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
struct isci_host *ihost = idev->owning_port->owning_controller;
if (prev_state == SCI_DEV_STOPPING)
isci_remote_device_deconstruct(ihost, idev);
- scic_sds_controller_remote_device_stopped(ihost, idev);
+ sci_controller_remote_device_stopped(ihost, idev);
}
-static void scic_sds_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
+static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
- struct isci_host *ihost = scic_sds_remote_device_get_controller(idev);
+ struct isci_host *ihost = sci_remote_device_get_controller(idev);
isci_remote_device_not_ready(ihost, idev,
SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED);
}
-static void scic_sds_remote_device_ready_state_enter(struct sci_base_state_machine *sm)
+static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
struct isci_host *ihost = idev->owning_port->owning_controller;
isci_remote_device_ready(ihost, idev);
}
-static void scic_sds_remote_device_ready_state_exit(struct sci_base_state_machine *sm)
+static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
struct domain_device *dev = idev->domain_dev;
}
}
-static void scic_sds_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
+static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
- scic_sds_remote_node_context_suspend(
+ sci_remote_node_context_suspend(
&idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL);
}
-static void scic_sds_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
+static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
- scic_sds_remote_node_context_resume(&idev->rnc, NULL, NULL);
+ sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
}
-static void scic_sds_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
+static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
idev->working_request = NULL;
- if (scic_sds_remote_node_context_is_ready(&idev->rnc)) {
+ if (sci_remote_node_context_is_ready(&idev->rnc)) {
/*
* Since the RNC is ready, it's alright to finish completion
* processing (e.g. signal the remote device is ready). */
- scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handler(idev);
+ sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev);
} else {
- scic_sds_remote_node_context_resume(&idev->rnc,
- scic_sds_stp_remote_device_ready_idle_substate_resume_complete_handler,
+ sci_remote_node_context_resume(&idev->rnc,
+ sci_stp_remote_device_ready_idle_substate_resume_complete_handler,
idev);
}
}
-static void scic_sds_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
+static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
- struct isci_host *ihost = scic_sds_remote_device_get_controller(idev);
+ struct isci_host *ihost = sci_remote_device_get_controller(idev);
BUG_ON(idev->working_request == NULL);
SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED);
}
-static void scic_sds_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
+static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
- struct isci_host *ihost = scic_sds_remote_device_get_controller(idev);
+ struct isci_host *ihost = sci_remote_device_get_controller(idev);
if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED)
isci_remote_device_not_ready(ihost, idev,
idev->not_ready_reason);
}
-static void scic_sds_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
+static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
- struct isci_host *ihost = scic_sds_remote_device_get_controller(idev);
+ struct isci_host *ihost = sci_remote_device_get_controller(idev);
isci_remote_device_ready(ihost, idev);
}
-static void scic_sds_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
+static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
- struct isci_host *ihost = scic_sds_remote_device_get_controller(idev);
+ struct isci_host *ihost = sci_remote_device_get_controller(idev);
BUG_ON(idev->working_request == NULL);
SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED);
}
-static void scic_sds_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm)
+static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
idev->working_request = NULL;
}
-static const struct sci_base_state scic_sds_remote_device_state_table[] = {
+static const struct sci_base_state sci_remote_device_state_table[] = {
[SCI_DEV_INITIAL] = {
- .enter_state = scic_sds_remote_device_initial_state_enter,
+ .enter_state = sci_remote_device_initial_state_enter,
},
[SCI_DEV_STOPPED] = {
- .enter_state = scic_sds_remote_device_stopped_state_enter,
+ .enter_state = sci_remote_device_stopped_state_enter,
},
[SCI_DEV_STARTING] = {
- .enter_state = scic_sds_remote_device_starting_state_enter,
+ .enter_state = sci_remote_device_starting_state_enter,
},
[SCI_DEV_READY] = {
- .enter_state = scic_sds_remote_device_ready_state_enter,
- .exit_state = scic_sds_remote_device_ready_state_exit
+ .enter_state = sci_remote_device_ready_state_enter,
+ .exit_state = sci_remote_device_ready_state_exit
},
[SCI_STP_DEV_IDLE] = {
- .enter_state = scic_sds_stp_remote_device_ready_idle_substate_enter,
+ .enter_state = sci_stp_remote_device_ready_idle_substate_enter,
},
[SCI_STP_DEV_CMD] = {
- .enter_state = scic_sds_stp_remote_device_ready_cmd_substate_enter,
+ .enter_state = sci_stp_remote_device_ready_cmd_substate_enter,
},
[SCI_STP_DEV_NCQ] = { },
[SCI_STP_DEV_NCQ_ERROR] = {
- .enter_state = scic_sds_stp_remote_device_ready_ncq_error_substate_enter,
+ .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter,
},
[SCI_STP_DEV_AWAIT_RESET] = { },
[SCI_SMP_DEV_IDLE] = {
- .enter_state = scic_sds_smp_remote_device_ready_idle_substate_enter,
+ .enter_state = sci_smp_remote_device_ready_idle_substate_enter,
},
[SCI_SMP_DEV_CMD] = {
- .enter_state = scic_sds_smp_remote_device_ready_cmd_substate_enter,
- .exit_state = scic_sds_smp_remote_device_ready_cmd_substate_exit,
+ .enter_state = sci_smp_remote_device_ready_cmd_substate_enter,
+ .exit_state = sci_smp_remote_device_ready_cmd_substate_exit,
},
[SCI_DEV_STOPPING] = { },
[SCI_DEV_FAILED] = { },
[SCI_DEV_RESETTING] = {
- .enter_state = scic_sds_remote_device_resetting_state_enter,
- .exit_state = scic_sds_remote_device_resetting_state_exit
+ .enter_state = sci_remote_device_resetting_state_enter,
+ .exit_state = sci_remote_device_resetting_state_exit
},
[SCI_DEV_FINAL] = { },
};
/**
- * scic_remote_device_construct() - common construction
+ * sci_remote_device_construct() - common construction
* @sci_port: SAS/SATA port through which this device is accessed.
* @sci_dev: remote device to construct
*
* This routine just performs benign initialization and does not
* allocate the remote_node_context which is left to
- * scic_remote_device_[de]a_construct(). scic_remote_device_destruct()
+ * sci_remote_device_[de]a_construct(). sci_remote_device_destruct()
* frees the remote_node_context(s) for the device.
*/
-static void scic_remote_device_construct(struct isci_port *iport,
+static void sci_remote_device_construct(struct isci_port *iport,
struct isci_remote_device *idev)
{
idev->owning_port = iport;
idev->started_request_count = 0;
- sci_init_sm(&idev->sm, scic_sds_remote_device_state_table, SCI_DEV_INITIAL);
+ sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL);
- scic_sds_remote_node_context_construct(&idev->rnc,
+ sci_remote_node_context_construct(&idev->rnc,
SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
}
/**
- * scic_remote_device_da_construct() - construct direct attached device.
+ * sci_remote_device_da_construct() - construct direct attached device.
*
* The information (e.g. IAF, Signature FIS, etc.) necessary to build
* the device is known to the SCI Core since it is contained in the
- * scic_phy object. Remote node context(s) is/are a global resource
- * allocated by this routine, freed by scic_remote_device_destruct().
+ * sci_phy object. Remote node context(s) is/are a global resource
+ * allocated by this routine, freed by sci_remote_device_destruct().
*
* Returns:
* SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
* sata-only controller instance.
* SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
*/
-static enum sci_status scic_remote_device_da_construct(struct isci_port *iport,
+static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
struct isci_remote_device *idev)
{
enum sci_status status;
struct domain_device *dev = idev->domain_dev;
- scic_remote_device_construct(iport, idev);
+ sci_remote_device_construct(iport, idev);
/*
* This information is request to determine how many remote node context
* entries will be needed to store the remote node.
*/
idev->is_direct_attached = true;
- status = scic_sds_controller_allocate_remote_node_context(iport->owning_controller,
+ status = sci_controller_allocate_remote_node_context(iport->owning_controller,
idev,
&idev->rnc.remote_node_index);
else
return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
- idev->connection_rate = scic_sds_port_get_max_allowed_speed(iport);
+ idev->connection_rate = sci_port_get_max_allowed_speed(iport);
/* / @todo Should I assign the port width by reading all of the phys on the port? */
idev->device_port_width = 1;
}
/**
- * scic_remote_device_ea_construct() - construct expander attached device
+ * sci_remote_device_ea_construct() - construct expander attached device
*
* Remote node context(s) is/are a global resource allocated by this
- * routine, freed by scic_remote_device_destruct().
+ * routine, freed by sci_remote_device_destruct().
*
* Returns:
* SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
* sata-only controller instance.
* SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
*/
-static enum sci_status scic_remote_device_ea_construct(struct isci_port *iport,
+static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
struct isci_remote_device *idev)
{
struct domain_device *dev = idev->domain_dev;
enum sci_status status;
- scic_remote_device_construct(iport, idev);
+ sci_remote_device_construct(iport, idev);
- status = scic_sds_controller_allocate_remote_node_context(iport->owning_controller,
+ status = sci_controller_allocate_remote_node_context(iport->owning_controller,
idev,
&idev->rnc.remote_node_index);
if (status != SCI_SUCCESS)
* connection the logical link rate is that same as the
* physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay
* one another, so this code works for both situations. */
- idev->connection_rate = min_t(u16, scic_sds_port_get_max_allowed_speed(iport),
+ idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport),
dev->linkrate);
/* / @todo Should I assign the port width by reading all of the phys on the port? */
}
/**
- * scic_remote_device_start() - This method will start the supplied remote
+ * sci_remote_device_start() - This method will start the supplied remote
* device. This method enables normal IO requests to flow through to the
* remote device.
* @remote_device: This parameter specifies the device to be started.
* SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start
* the device when there have been no phys added to it.
*/
-static enum sci_status scic_remote_device_start(struct isci_remote_device *idev,
+static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
u32 timeout)
{
struct sci_base_state_machine *sm = &idev->sm;
- enum scic_sds_remote_device_states state = sm->current_state_id;
+ enum sci_remote_device_states state = sm->current_state_id;
enum sci_status status;
if (state != SCI_DEV_STOPPED) {
return SCI_FAILURE_INVALID_STATE;
}
- status = scic_sds_remote_node_context_resume(&idev->rnc,
+ status = sci_remote_node_context_resume(&idev->rnc,
remote_device_resume_done,
idev);
if (status != SCI_SUCCESS)
enum sci_status status;
if (dev->parent && dev_is_expander(dev->parent))
- status = scic_remote_device_ea_construct(iport, idev);
+ status = sci_remote_device_ea_construct(iport, idev);
else
- status = scic_remote_device_da_construct(iport, idev);
+ status = sci_remote_device_da_construct(iport, idev);
if (status != SCI_SUCCESS) {
dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n",
}
/* start the device. */
- status = scic_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT);
+ status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT);
if (status != SCI_SUCCESS)
dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n",
set_bit(IDEV_STOP_PENDING, &idev->flags);
spin_lock_irqsave(&ihost->scic_lock, flags);
- status = scic_remote_device_stop(idev, 50);
+ status = sci_remote_device_stop(idev, 50);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
/* Wait for the stop complete callback. */
#include "remote_node_context.h"
#include "port.h"
-enum scic_remote_device_not_ready_reason_code {
+enum sci_remote_device_not_ready_reason_code {
SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED,
SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED,
SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED,
enum sas_linkrate connection_rate;
bool is_direct_attached;
struct isci_port *owning_port;
- struct scic_sds_remote_node_context rnc;
+ struct sci_remote_node_context rnc;
/* XXX unify with device reference counting and delete */
u32 started_request_count;
struct isci_request *working_request;
#define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000
-/* device reference routines must be called under scic_lock */
+/* device reference routines must be called under sci_lock */
static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev)
{
struct isci_remote_device *idev = dev->lldd_dev;
void isci_device_clear_reset_pending(struct isci_host *ihost,
struct isci_remote_device *idev);
/**
- * scic_remote_device_stop() - This method will stop both transmission and
+ * sci_remote_device_stop() - This method will stop both transmission and
* reception of link activity for the supplied remote device. This method
* disables normal IO requests from flowing through to the remote device.
* @remote_device: This parameter specifies the device to be stopped.
* This value is returned if the transmission and reception for the device was
* successfully stopped.
*/
-enum sci_status scic_remote_device_stop(
+enum sci_status sci_remote_device_stop(
struct isci_remote_device *idev,
u32 timeout);
/**
- * scic_remote_device_reset() - This method will reset the device making it
+ * sci_remote_device_reset() - This method will reset the device making it
* ready for operation. This method must be called anytime the device is
* reset either through a SMP phy control or a port hard reset request.
* @remote_device: This parameter specifies the device to be reset.
* was accepted. SCI_SUCCESS This value is returned if the device reset is
* started.
*/
-enum sci_status scic_remote_device_reset(
+enum sci_status sci_remote_device_reset(
struct isci_remote_device *idev);
/**
- * scic_remote_device_reset_complete() - This method informs the device object
+ * sci_remote_device_reset_complete() - This method informs the device object
* that the reset operation is complete and the device can resume operation
* again.
* @remote_device: This parameter specifies the device which is to be informed
* An indication that the device is resuming operation. SCI_SUCCESS the device
* is resuming operation.
*/
-enum sci_status scic_remote_device_reset_complete(
+enum sci_status sci_remote_device_reset_complete(
struct isci_remote_device *idev);
-#define scic_remote_device_is_atapi(device_handle) false
-
/**
- * enum scic_sds_remote_device_states - This enumeration depicts all the states
+ * enum sci_remote_device_states - This enumeration depicts all the states
* for the common remote device state machine.
*
*
*/
-enum scic_sds_remote_device_states {
+enum sci_remote_device_states {
/**
* Simply the initial state for the base remote device state machine.
*/
SCI_DEV_FINAL,
};
-static inline struct isci_remote_device *rnc_to_dev(struct scic_sds_remote_node_context *rnc)
+static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc)
{
struct isci_remote_device *idev;
}
/**
- * scic_sds_remote_device_increment_request_count() -
+ * sci_remote_device_increment_request_count() -
*
* This macro incrments the request count for this device
*/
-#define scic_sds_remote_device_increment_request_count(idev) \
+#define sci_remote_device_increment_request_count(idev) \
((idev)->started_request_count++)
/**
- * scic_sds_remote_device_decrement_request_count() -
+ * sci_remote_device_decrement_request_count() -
*
* This macro decrements the request count for this device. This count will
* never decrment past 0.
*/
-#define scic_sds_remote_device_decrement_request_count(idev) \
+#define sci_remote_device_decrement_request_count(idev) \
((idev)->started_request_count > 0 ? \
(idev)->started_request_count-- : 0)
/**
- * scic_sds_remote_device_get_request_count() -
+ * sci_remote_device_get_request_count() -
*
* This is a helper macro to return the current device request count.
*/
-#define scic_sds_remote_device_get_request_count(idev) \
+#define sci_remote_device_get_request_count(idev) \
((idev)->started_request_count)
/**
- * scic_sds_remote_device_get_controller() -
+ * sci_remote_device_get_controller() -
*
* This macro returns the controller object that contains this device object
*/
-#define scic_sds_remote_device_get_controller(idev) \
- scic_sds_port_get_controller(scic_sds_remote_device_get_port(idev))
+#define sci_remote_device_get_controller(idev) \
+ sci_port_get_controller(sci_remote_device_get_port(idev))
/**
- * scic_sds_remote_device_get_port() -
+ * sci_remote_device_get_port() -
*
* This macro returns the owning port of this device
*/
-#define scic_sds_remote_device_get_port(idev) \
+#define sci_remote_device_get_port(idev) \
((idev)->owning_port)
/**
- * scic_sds_remote_device_get_controller_peg() -
+ * sci_remote_device_get_controller_peg() -
*
* This macro returns the controllers protocol engine group
*/
-#define scic_sds_remote_device_get_controller_peg(idev) \
+#define sci_remote_device_get_controller_peg(idev) \
(\
- scic_sds_controller_get_protocol_engine_group(\
- scic_sds_port_get_controller(\
- scic_sds_remote_device_get_port(idev) \
+ sci_controller_get_protocol_engine_group(\
+ sci_port_get_controller(\
+ sci_remote_device_get_port(idev) \
) \
) \
)
/**
- * scic_sds_remote_device_get_index() -
+ * sci_remote_device_get_index() -
*
* This macro returns the remote node index for this device object
*/
-#define scic_sds_remote_device_get_index(idev) \
+#define sci_remote_device_get_index(idev) \
((idev)->rnc.remote_node_index)
/**
- * scic_sds_remote_device_build_command_context() -
+ * sci_remote_device_build_command_context() -
*
* This macro builds a remote device context for the SCU post request operation
*/
-#define scic_sds_remote_device_build_command_context(device, command) \
+#define sci_remote_device_build_command_context(device, command) \
((command) \
- | (scic_sds_remote_device_get_controller_peg((device)) << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) \
+ | (sci_remote_device_get_controller_peg((device)) << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) \
| ((device)->owning_port->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) \
- | (scic_sds_remote_device_get_index((device))) \
+ | (sci_remote_device_get_index((device))) \
)
/**
- * scic_sds_remote_device_set_working_request() -
+ * sci_remote_device_set_working_request() -
*
* This macro makes the working request assingment for the remote device
* object. To clear the working request use this macro with a NULL request
* object.
*/
-#define scic_sds_remote_device_set_working_request(device, request) \
+#define sci_remote_device_set_working_request(device, request) \
((device)->working_request = (request))
-enum sci_status scic_sds_remote_device_frame_handler(
+enum sci_status sci_remote_device_frame_handler(
struct isci_remote_device *idev,
u32 frame_index);
-enum sci_status scic_sds_remote_device_event_handler(
+enum sci_status sci_remote_device_event_handler(
struct isci_remote_device *idev,
u32 event_code);
-enum sci_status scic_sds_remote_device_start_io(
+enum sci_status sci_remote_device_start_io(
struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq);
-enum sci_status scic_sds_remote_device_start_task(
+enum sci_status sci_remote_device_start_task(
struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq);
-enum sci_status scic_sds_remote_device_complete_io(
+enum sci_status sci_remote_device_complete_io(
struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq);
-enum sci_status scic_sds_remote_device_suspend(
+enum sci_status sci_remote_device_suspend(
struct isci_remote_device *idev,
u32 suspend_type);
-void scic_sds_remote_device_post_request(
+void sci_remote_device_post_request(
struct isci_remote_device *idev,
u32 request);
-#define scic_sds_remote_device_is_atapi(idev) false
-
#endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */
* otherwise it will return false bool true if the remote node context is in
* the ready state. false if the remote node context is not in the ready state.
*/
-bool scic_sds_remote_node_context_is_ready(
- struct scic_sds_remote_node_context *sci_rnc)
+bool sci_remote_node_context_is_ready(
+ struct sci_remote_node_context *sci_rnc)
{
u32 current_state = sci_rnc->sm.current_state_id;
return false;
}
-/**
- *
- * @sci_dev: The remote device to use to construct the RNC buffer.
- * @rnc: The buffer into which the remote device data will be copied.
- *
- * This method will construct the RNC buffer for this remote device object. none
- */
-static void scic_sds_remote_node_context_construct_buffer(
- struct scic_sds_remote_node_context *sci_rnc)
+static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id)
+{
+ if (id < ihost->remote_node_entries &&
+ ihost->device_table[id])
+ return &ihost->remote_node_context_table[id];
+
+ return NULL;
+}
+
+static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc)
{
struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
struct domain_device *dev = idev->domain_dev;
struct isci_host *ihost;
__le64 sas_addr;
- ihost = scic_sds_remote_device_get_controller(idev);
- rnc = scic_sds_controller_get_remote_node_context_buffer(ihost, rni);
+ ihost = sci_remote_device_get_controller(idev);
+ rnc = sci_rnc_by_id(ihost, rni);
memset(rnc, 0, sizeof(union scu_remote_node_context)
- * scic_sds_remote_device_node_count(idev));
+ * sci_remote_device_node_count(idev));
rnc->ssp.remote_node_index = rni;
rnc->ssp.remote_node_port_width = idev->device_port_width;
if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
rnc->ssp.connection_occupancy_timeout =
- ihost->user_parameters.sds1.stp_max_occupancy_timeout;
+ ihost->user_parameters.stp_max_occupancy_timeout;
rnc->ssp.connection_inactivity_timeout =
- ihost->user_parameters.sds1.stp_inactivity_timeout;
+ ihost->user_parameters.stp_inactivity_timeout;
} else {
rnc->ssp.connection_occupancy_timeout =
- ihost->user_parameters.sds1.ssp_max_occupancy_timeout;
+ ihost->user_parameters.ssp_max_occupancy_timeout;
rnc->ssp.connection_inactivity_timeout =
- ihost->user_parameters.sds1.ssp_inactivity_timeout;
+ ihost->user_parameters.ssp_inactivity_timeout;
}
rnc->ssp.initial_arbitration_wait_time = 0;
* to its ready state. If the remote node context is already setup to
* transition to its final state then this function does nothing. none
*/
-static void scic_sds_remote_node_context_setup_to_resume(
- struct scic_sds_remote_node_context *sci_rnc,
+static void sci_remote_node_context_setup_to_resume(
+ struct sci_remote_node_context *sci_rnc,
scics_sds_remote_node_context_callback callback,
void *callback_parameter)
{
}
}
-static void scic_sds_remote_node_context_setup_to_destory(
- struct scic_sds_remote_node_context *sci_rnc,
+static void sci_remote_node_context_setup_to_destory(
+ struct sci_remote_node_context *sci_rnc,
scics_sds_remote_node_context_callback callback,
void *callback_parameter)
{
* This method just calls the user callback function and then resets the
* callback.
*/
-static void scic_sds_remote_node_context_notify_user(
- struct scic_sds_remote_node_context *rnc)
+static void sci_remote_node_context_notify_user(
+ struct sci_remote_node_context *rnc)
{
if (rnc->user_callback != NULL) {
(*rnc->user_callback)(rnc->user_cookie);
}
}
-static void scic_sds_remote_node_context_continue_state_transitions(struct scic_sds_remote_node_context *rnc)
+static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc)
{
if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
- scic_sds_remote_node_context_resume(rnc, rnc->user_callback,
+ sci_remote_node_context_resume(rnc, rnc->user_callback,
rnc->user_cookie);
}
-/**
- *
- * @sci_rnc: The remote node context object that is to be validated.
- *
- * This method will mark the rnc buffer as being valid and post the request to
- * the hardware. none
- */
-static void scic_sds_remote_node_context_validate_context_buffer(
- struct scic_sds_remote_node_context *sci_rnc)
+static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc)
{
+ union scu_remote_node_context *rnc_buffer;
struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
struct domain_device *dev = idev->domain_dev;
- union scu_remote_node_context *rnc_buffer;
+ struct isci_host *ihost = idev->owning_port->owning_controller;
- rnc_buffer = scic_sds_controller_get_remote_node_context_buffer(
- scic_sds_remote_device_get_controller(idev),
- sci_rnc->remote_node_index
- );
+ rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
rnc_buffer->ssp.is_valid = true;
if (!idev->is_direct_attached &&
(dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))) {
- scic_sds_remote_device_post_request(idev,
- SCU_CONTEXT_COMMAND_POST_RNC_96);
+ sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96);
} else {
- scic_sds_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
+ sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
- if (idev->is_direct_attached) {
- scic_sds_port_setup_transports(idev->owning_port,
- sci_rnc->remote_node_index);
- }
+ if (idev->is_direct_attached)
+ sci_port_setup_transports(idev->owning_port,
+ sci_rnc->remote_node_index);
}
}
-/**
- *
- * @sci_rnc: The remote node context object that is to be invalidated.
- *
- * This method will update the RNC buffer and post the invalidate request. none
- */
-static void scic_sds_remote_node_context_invalidate_context_buffer(
- struct scic_sds_remote_node_context *sci_rnc)
+static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc)
{
union scu_remote_node_context *rnc_buffer;
+ struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
- rnc_buffer = scic_sds_controller_get_remote_node_context_buffer(
- scic_sds_remote_device_get_controller(rnc_to_dev(sci_rnc)),
- sci_rnc->remote_node_index);
+ rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
rnc_buffer->ssp.is_valid = false;
- scic_sds_remote_device_post_request(rnc_to_dev(sci_rnc),
- SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE);
+ sci_remote_device_post_request(rnc_to_dev(sci_rnc),
+ SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE);
}
-static void scic_sds_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
+static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
/* Check to see if we have gotten back to the initial state because
* someone requested to destroy the remote node context object.
*/
if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
- scic_sds_remote_node_context_notify_user(rnc);
+ sci_remote_node_context_notify_user(rnc);
}
}
-static void scic_sds_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm)
+static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm);
+ struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm);
- scic_sds_remote_node_context_validate_context_buffer(sci_rnc);
+ sci_remote_node_context_validate_context_buffer(sci_rnc);
}
-static void scic_sds_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm)
+static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
- scic_sds_remote_node_context_invalidate_context_buffer(rnc);
+ sci_remote_node_context_invalidate_context_buffer(rnc);
}
-static void scic_sds_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm)
+static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
struct isci_remote_device *idev;
struct domain_device *dev;
*/
if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
idev->is_direct_attached)
- scic_sds_port_setup_transports(idev->owning_port,
+ sci_port_setup_transports(idev->owning_port,
rnc->remote_node_index);
- scic_sds_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
+ sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
}
-static void scic_sds_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
+static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
if (rnc->user_callback)
- scic_sds_remote_node_context_notify_user(rnc);
+ sci_remote_node_context_notify_user(rnc);
}
-static void scic_sds_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm)
+static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
- scic_sds_remote_node_context_continue_state_transitions(rnc);
+ sci_remote_node_context_continue_state_transitions(rnc);
}
-static void scic_sds_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
+static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
{
- struct scic_sds_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
- scic_sds_remote_node_context_continue_state_transitions(rnc);
+ sci_remote_node_context_continue_state_transitions(rnc);
}
-static const struct sci_base_state scic_sds_remote_node_context_state_table[] = {
+static const struct sci_base_state sci_remote_node_context_state_table[] = {
[SCI_RNC_INITIAL] = {
- .enter_state = scic_sds_remote_node_context_initial_state_enter,
+ .enter_state = sci_remote_node_context_initial_state_enter,
},
[SCI_RNC_POSTING] = {
- .enter_state = scic_sds_remote_node_context_posting_state_enter,
+ .enter_state = sci_remote_node_context_posting_state_enter,
},
[SCI_RNC_INVALIDATING] = {
- .enter_state = scic_sds_remote_node_context_invalidating_state_enter,
+ .enter_state = sci_remote_node_context_invalidating_state_enter,
},
[SCI_RNC_RESUMING] = {
- .enter_state = scic_sds_remote_node_context_resuming_state_enter,
+ .enter_state = sci_remote_node_context_resuming_state_enter,
},
[SCI_RNC_READY] = {
- .enter_state = scic_sds_remote_node_context_ready_state_enter,
+ .enter_state = sci_remote_node_context_ready_state_enter,
},
[SCI_RNC_TX_SUSPENDED] = {
- .enter_state = scic_sds_remote_node_context_tx_suspended_state_enter,
+ .enter_state = sci_remote_node_context_tx_suspended_state_enter,
},
[SCI_RNC_TX_RX_SUSPENDED] = {
- .enter_state = scic_sds_remote_node_context_tx_rx_suspended_state_enter,
+ .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
},
[SCI_RNC_AWAIT_SUSPENSION] = { },
};
-void scic_sds_remote_node_context_construct(struct scic_sds_remote_node_context *rnc,
+void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
u16 remote_node_index)
{
- memset(rnc, 0, sizeof(struct scic_sds_remote_node_context));
+ memset(rnc, 0, sizeof(struct sci_remote_node_context));
rnc->remote_node_index = remote_node_index;
rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
- sci_init_sm(&rnc->sm, scic_sds_remote_node_context_state_table, SCI_RNC_INITIAL);
+ sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL);
}
-enum sci_status scic_sds_remote_node_context_event_handler(struct scic_sds_remote_node_context *sci_rnc,
+enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
u32 event_code)
{
enum scis_sds_remote_node_context_states state;
}
-enum sci_status scic_sds_remote_node_context_destruct(struct scic_sds_remote_node_context *sci_rnc,
+enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
scics_sds_remote_node_context_callback cb_fn,
void *cb_p)
{
state = sci_rnc->sm.current_state_id;
switch (state) {
case SCI_RNC_INVALIDATING:
- scic_sds_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
+ sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
return SCI_SUCCESS;
case SCI_RNC_POSTING:
case SCI_RNC_RESUMING:
case SCI_RNC_TX_SUSPENDED:
case SCI_RNC_TX_RX_SUSPENDED:
case SCI_RNC_AWAIT_SUSPENSION:
- scic_sds_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
+ sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
return SCI_SUCCESS;
case SCI_RNC_INITIAL:
}
}
-enum sci_status scic_sds_remote_node_context_suspend(struct scic_sds_remote_node_context *sci_rnc,
+enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
u32 suspend_type,
scics_sds_remote_node_context_callback cb_fn,
void *cb_p)
sci_rnc->suspension_code = suspend_type;
if (suspend_type == SCI_SOFTWARE_SUSPENSION) {
- scic_sds_remote_device_post_request(rnc_to_dev(sci_rnc),
+ sci_remote_device_post_request(rnc_to_dev(sci_rnc),
SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX);
}
return SCI_SUCCESS;
}
-enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_context *sci_rnc,
+enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
scics_sds_remote_node_context_callback cb_fn,
void *cb_p)
{
if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
return SCI_FAILURE_INVALID_STATE;
- scic_sds_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
- scic_sds_remote_node_context_construct_buffer(sci_rnc);
+ sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
+ sci_remote_node_context_construct_buffer(sci_rnc);
sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
return SCI_SUCCESS;
case SCI_RNC_POSTING:
struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
struct domain_device *dev = idev->domain_dev;
- scic_sds_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
+ sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
/* TODO: consider adding a resume action of NONE, INVALIDATE, WRITE_TLCR */
if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev))
return SCI_SUCCESS;
}
case SCI_RNC_TX_RX_SUSPENDED:
- scic_sds_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
+ sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
return SCI_FAILURE_INVALID_STATE;
case SCI_RNC_AWAIT_SUSPENSION:
- scic_sds_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
+ sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
return SCI_SUCCESS;
default:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
}
}
-enum sci_status scic_sds_remote_node_context_start_io(struct scic_sds_remote_node_context *sci_rnc,
+enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
struct isci_request *ireq)
{
enum scis_sds_remote_node_context_states state;
return SCI_FAILURE_INVALID_STATE;
}
-enum sci_status scic_sds_remote_node_context_start_task(struct scic_sds_remote_node_context *sci_rnc,
+enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
struct isci_request *ireq)
{
enum scis_sds_remote_node_context_states state;
return SCI_SUCCESS;
case SCI_RNC_TX_SUSPENDED:
case SCI_RNC_TX_RX_SUSPENDED:
- scic_sds_remote_node_context_resume(sci_rnc, NULL, NULL);
+ sci_remote_node_context_resume(sci_rnc, NULL, NULL);
return SCI_SUCCESS;
default:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
struct isci_request;
struct isci_remote_device;
-struct scic_sds_remote_node_context;
+struct sci_remote_node_context;
typedef void (*scics_sds_remote_node_context_callback)(void *);
* This enumeration is used to define the end destination state for the remote
* node context.
*/
-enum scic_sds_remote_node_context_destination_state {
+enum sci_remote_node_context_destination_state {
SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED,
SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY,
SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL
};
/**
- * struct scic_sds_remote_node_context - This structure contains the data
+ * struct sci_remote_node_context - This structure contains the data
* associated with the remote node context object. The remote node context
* (RNC) object models the the remote device information necessary to manage
* the silicon RNC.
*/
-struct scic_sds_remote_node_context {
+struct sci_remote_node_context {
/**
* This field indicates the remote node index (RNI) associated with
* this RNC.
* state. This can cause an automatic resume on receiving a suspension
* notification.
*/
- enum scic_sds_remote_node_context_destination_state destination_state;
+ enum sci_remote_node_context_destination_state destination_state;
/**
* This field contains the callback function that the user requested to be
struct sci_base_state_machine sm;
};
-void scic_sds_remote_node_context_construct(struct scic_sds_remote_node_context *rnc,
+void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
u16 remote_node_index);
-bool scic_sds_remote_node_context_is_ready(
- struct scic_sds_remote_node_context *sci_rnc);
+bool sci_remote_node_context_is_ready(
+ struct sci_remote_node_context *sci_rnc);
-#define scic_sds_remote_node_context_get_remote_node_index(rcn) \
+#define sci_remote_node_context_get_remote_node_index(rcn) \
((rnc)->remote_node_index)
-enum sci_status scic_sds_remote_node_context_event_handler(struct scic_sds_remote_node_context *sci_rnc,
+enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
u32 event_code);
-enum sci_status scic_sds_remote_node_context_destruct(struct scic_sds_remote_node_context *sci_rnc,
+enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
scics_sds_remote_node_context_callback callback,
void *callback_parameter);
-enum sci_status scic_sds_remote_node_context_suspend(struct scic_sds_remote_node_context *sci_rnc,
+enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
u32 suspend_type,
scics_sds_remote_node_context_callback cb_fn,
void *cb_p);
-enum sci_status scic_sds_remote_node_context_resume(struct scic_sds_remote_node_context *sci_rnc,
+enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
scics_sds_remote_node_context_callback cb_fn,
void *cb_p);
-enum sci_status scic_sds_remote_node_context_start_task(struct scic_sds_remote_node_context *sci_rnc,
+enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
struct isci_request *ireq);
-enum sci_status scic_sds_remote_node_context_start_io(struct scic_sds_remote_node_context *sci_rnc,
+enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
struct isci_request *ireq);
#endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */
* just bit position. u32 This is the absolute bit position for an available
* group.
*/
-static u32 scic_sds_remote_node_table_get_group_index(
- struct scic_remote_node_table *remote_node_table,
+static u32 sci_remote_node_table_get_group_index(
+ struct sci_remote_node_table *remote_node_table,
u32 group_table_index)
{
u32 dword_index;
* This method will clear the group index entry in the specified group index
* table. none
*/
-static void scic_sds_remote_node_table_clear_group_index(
- struct scic_remote_node_table *remote_node_table,
+static void sci_remote_node_table_clear_group_index(
+ struct sci_remote_node_table *remote_node_table,
u32 group_table_index,
u32 group_index)
{
* This method will set the group index bit entry in the specified gropu index
* table. none
*/
-static void scic_sds_remote_node_table_set_group_index(
- struct scic_remote_node_table *remote_node_table,
+static void sci_remote_node_table_set_group_index(
+ struct sci_remote_node_table *remote_node_table,
u32 group_table_index,
u32 group_index)
{
* This method will set the remote to available in the remote node allocation
* table. none
*/
-static void scic_sds_remote_node_table_set_node_index(
- struct scic_remote_node_table *remote_node_table,
+static void sci_remote_node_table_set_node_index(
+ struct sci_remote_node_table *remote_node_table,
u32 remote_node_index)
{
u32 dword_location;
* This method clears the remote node index from the table of available remote
* nodes. none
*/
-static void scic_sds_remote_node_table_clear_node_index(
- struct scic_remote_node_table *remote_node_table,
+static void sci_remote_node_table_clear_node_index(
+ struct sci_remote_node_table *remote_node_table,
u32 remote_node_index)
{
u32 dword_location;
*
* This method clears the entire table slot at the specified slot index. none
*/
-static void scic_sds_remote_node_table_clear_group(
- struct scic_remote_node_table *remote_node_table,
+static void sci_remote_node_table_clear_group(
+ struct sci_remote_node_table *remote_node_table,
u32 group_index)
{
u32 dword_location;
*
* THis method sets an entire remote node group in the remote node table.
*/
-static void scic_sds_remote_node_table_set_group(
- struct scic_remote_node_table *remote_node_table,
+static void sci_remote_node_table_set_group(
+ struct sci_remote_node_table *remote_node_table,
u32 group_index)
{
u32 dword_location;
* This method will return the group value for the specified group index. The
* bit values at the specified remote node group index.
*/
-static u8 scic_sds_remote_node_table_get_group_value(
- struct scic_remote_node_table *remote_node_table,
+static u8 sci_remote_node_table_get_group_value(
+ struct sci_remote_node_table *remote_node_table,
u32 group_index)
{
u32 dword_location;
*
* This method will initialize the remote node table for use. none
*/
-void scic_sds_remote_node_table_initialize(
- struct scic_remote_node_table *remote_node_table,
+void sci_remote_node_table_initialize(
+ struct sci_remote_node_table *remote_node_table,
u32 remote_node_entries)
{
u32 index;
/* Initialize each full DWORD to a FULL SET of remote nodes */
for (index = 0; index < remote_node_entries; index++) {
- scic_sds_remote_node_table_set_node_index(remote_node_table, index);
+ sci_remote_node_table_set_node_index(remote_node_table, index);
}
remote_node_table->group_array_size = (u16)
/*
* These are all guaranteed to be full slot values so fill them in the
* available sets of 3 remote nodes */
- scic_sds_remote_node_table_set_group_index(remote_node_table, 2, index);
+ sci_remote_node_table_set_group_index(remote_node_table, 2, index);
}
/* Now fill in any remainders that we may find */
if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 2) {
- scic_sds_remote_node_table_set_group_index(remote_node_table, 1, index);
+ sci_remote_node_table_set_group_index(remote_node_table, 1, index);
} else if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 1) {
- scic_sds_remote_node_table_set_group_index(remote_node_table, 0, index);
+ sci_remote_node_table_set_group_index(remote_node_table, 0, index);
}
}
* updated. The RNi value or an invalid remote node context if an RNi can not
* be found.
*/
-static u16 scic_sds_remote_node_table_allocate_single_remote_node(
- struct scic_remote_node_table *remote_node_table,
+static u16 sci_remote_node_table_allocate_single_remote_node(
+ struct sci_remote_node_table *remote_node_table,
u32 group_table_index)
{
u8 index;
u32 group_index;
u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
- group_index = scic_sds_remote_node_table_get_group_index(
+ group_index = sci_remote_node_table_get_group_index(
remote_node_table, group_table_index);
/* We could not find an available slot in the table selector 0 */
if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
- group_value = scic_sds_remote_node_table_get_group_value(
+ group_value = sci_remote_node_table_get_group_value(
remote_node_table, group_index);
for (index = 0; index < SCU_STP_REMOTE_NODE_COUNT; index++) {
remote_node_index = (u16)(group_index * SCU_STP_REMOTE_NODE_COUNT
+ index);
- scic_sds_remote_node_table_clear_group_index(
+ sci_remote_node_table_clear_group_index(
remote_node_table, group_table_index, group_index
);
- scic_sds_remote_node_table_clear_node_index(
+ sci_remote_node_table_clear_node_index(
remote_node_table, remote_node_index
);
if (group_table_index > 0) {
- scic_sds_remote_node_table_set_group_index(
+ sci_remote_node_table_set_group_index(
remote_node_table, group_table_index - 1, group_index
);
}
* The remote node index that represents three consecutive remote node entries
* or an invalid remote node context if none can be found.
*/
-static u16 scic_sds_remote_node_table_allocate_triple_remote_node(
- struct scic_remote_node_table *remote_node_table,
+static u16 sci_remote_node_table_allocate_triple_remote_node(
+ struct sci_remote_node_table *remote_node_table,
u32 group_table_index)
{
u32 group_index;
u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
- group_index = scic_sds_remote_node_table_get_group_index(
+ group_index = sci_remote_node_table_get_group_index(
remote_node_table, group_table_index);
if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
remote_node_index = (u16)group_index * SCU_STP_REMOTE_NODE_COUNT;
- scic_sds_remote_node_table_clear_group_index(
+ sci_remote_node_table_clear_group_index(
remote_node_table, group_table_index, group_index
);
- scic_sds_remote_node_table_clear_group(
+ sci_remote_node_table_clear_group(
remote_node_table, group_index
);
}
* SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3). u16 This is
* the remote node index that is returned or an invalid remote node context.
*/
-u16 scic_sds_remote_node_table_allocate_remote_node(
- struct scic_remote_node_table *remote_node_table,
+u16 sci_remote_node_table_allocate_remote_node(
+ struct sci_remote_node_table *remote_node_table,
u32 remote_node_count)
{
u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
remote_node_index =
- scic_sds_remote_node_table_allocate_single_remote_node(
+ sci_remote_node_table_allocate_single_remote_node(
remote_node_table, 0);
if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
remote_node_index =
- scic_sds_remote_node_table_allocate_single_remote_node(
+ sci_remote_node_table_allocate_single_remote_node(
remote_node_table, 1);
}
if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
remote_node_index =
- scic_sds_remote_node_table_allocate_single_remote_node(
+ sci_remote_node_table_allocate_single_remote_node(
remote_node_table, 2);
}
} else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
remote_node_index =
- scic_sds_remote_node_table_allocate_triple_remote_node(
+ sci_remote_node_table_allocate_triple_remote_node(
remote_node_table, 2);
}
* This method will free a single remote node index back to the remote node
* table. This routine will update the remote node groups
*/
-static void scic_sds_remote_node_table_release_single_remote_node(
- struct scic_remote_node_table *remote_node_table,
+static void sci_remote_node_table_release_single_remote_node(
+ struct sci_remote_node_table *remote_node_table,
u16 remote_node_index)
{
u32 group_index;
group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
- group_value = scic_sds_remote_node_table_get_group_value(remote_node_table, group_index);
+ group_value = sci_remote_node_table_get_group_value(remote_node_table, group_index);
/*
* Assert that we are not trying to add an entry to a slot that is already
/*
* There are no entries in this slot so it must be added to the single
* slot table. */
- scic_sds_remote_node_table_set_group_index(remote_node_table, 0, group_index);
+ sci_remote_node_table_set_group_index(remote_node_table, 0, group_index);
} else if ((group_value & (group_value - 1)) == 0) {
/*
* There is only one entry in this slot so it must be moved from the
* single slot table to the dual slot table */
- scic_sds_remote_node_table_clear_group_index(remote_node_table, 0, group_index);
- scic_sds_remote_node_table_set_group_index(remote_node_table, 1, group_index);
+ sci_remote_node_table_clear_group_index(remote_node_table, 0, group_index);
+ sci_remote_node_table_set_group_index(remote_node_table, 1, group_index);
} else {
/*
* There are two entries in the slot so it must be moved from the dual
* slot table to the tripple slot table. */
- scic_sds_remote_node_table_clear_group_index(remote_node_table, 1, group_index);
- scic_sds_remote_node_table_set_group_index(remote_node_table, 2, group_index);
+ sci_remote_node_table_clear_group_index(remote_node_table, 1, group_index);
+ sci_remote_node_table_set_group_index(remote_node_table, 2, group_index);
}
- scic_sds_remote_node_table_set_node_index(remote_node_table, remote_node_index);
+ sci_remote_node_table_set_node_index(remote_node_table, remote_node_index);
}
/**
* This method will release a group of three consecutive remote nodes back to
* the free remote nodes.
*/
-static void scic_sds_remote_node_table_release_triple_remote_node(
- struct scic_remote_node_table *remote_node_table,
+static void sci_remote_node_table_release_triple_remote_node(
+ struct sci_remote_node_table *remote_node_table,
u16 remote_node_index)
{
u32 group_index;
group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
- scic_sds_remote_node_table_set_group_index(
+ sci_remote_node_table_set_group_index(
remote_node_table, 2, group_index
);
- scic_sds_remote_node_table_set_group(remote_node_table, group_index);
+ sci_remote_node_table_set_group(remote_node_table, group_index);
}
/**
* This method will release the remote node index back into the remote node
* table free pool.
*/
-void scic_sds_remote_node_table_release_remote_node_index(
- struct scic_remote_node_table *remote_node_table,
+void sci_remote_node_table_release_remote_node_index(
+ struct sci_remote_node_table *remote_node_table,
u32 remote_node_count,
u16 remote_node_index)
{
if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
- scic_sds_remote_node_table_release_single_remote_node(
+ sci_remote_node_table_release_single_remote_node(
remote_node_table, remote_node_index);
} else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
- scic_sds_remote_node_table_release_triple_remote_node(
+ sci_remote_node_table_release_triple_remote_node(
remote_node_table, remote_node_index);
}
}
#define SCU_SATA_REMOTE_NODE_COUNT 1
/**
- * struct scic_remote_node_table -
+ * struct sci_remote_node_table -
*
*
*/
-struct scic_remote_node_table {
+struct sci_remote_node_table {
/**
* This field contains the array size in dwords
*/
/* --------------------------------------------------------------------------- */
-void scic_sds_remote_node_table_initialize(
- struct scic_remote_node_table *remote_node_table,
+void sci_remote_node_table_initialize(
+ struct sci_remote_node_table *remote_node_table,
u32 remote_node_entries);
-u16 scic_sds_remote_node_table_allocate_remote_node(
- struct scic_remote_node_table *remote_node_table,
+u16 sci_remote_node_table_allocate_remote_node(
+ struct sci_remote_node_table *remote_node_table,
u32 remote_node_count);
-void scic_sds_remote_node_table_release_remote_node_index(
- struct scic_remote_node_table *remote_node_table,
+void sci_remote_node_table_release_remote_node_index(
+ struct sci_remote_node_table *remote_node_table,
u32 remote_node_count,
u16 remote_node_index);
return ihost->task_context_dma + offset;
}
- return scic_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
+ return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
}
static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
e->address_modifier = 0;
}
-static void scic_sds_request_build_sgl(struct isci_request *ireq)
+static void sci_request_build_sgl(struct isci_request *ireq)
{
struct isci_host *ihost = ireq->isci_host;
struct sas_task *task = isci_request_access_task(ireq);
}
}
-static void scic_sds_io_request_build_ssp_command_iu(struct isci_request *ireq)
+static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
{
struct ssp_cmd_iu *cmd_iu;
struct sas_task *task = isci_request_access_task(ireq);
sizeof(task->ssp_task.cdb) / sizeof(u32));
}
-static void scic_sds_task_request_build_ssp_task_iu(struct isci_request *ireq)
+static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
{
struct ssp_task_iu *task_iu;
struct sas_task *task = isci_request_access_task(ireq);
struct isci_remote_device *idev;
struct isci_port *iport;
- idev = scic_sds_request_get_device(ireq);
- iport = scic_sds_request_get_port(ireq);
+ idev = sci_request_get_device(ireq);
+ iport = sci_request_get_port(ireq);
/* Fill in the TC with the its required data */
task_context->abort = 0;
task_context->initiator_request = 1;
task_context->connection_rate = idev->connection_rate;
task_context->protocol_engine_index =
- scic_sds_controller_get_protocol_engine_group(controller);
- task_context->logical_port_index = scic_sds_port_get_index(iport);
+ sci_controller_get_protocol_engine_group(controller);
+ task_context->logical_port_index = sci_port_get_index(iport);
task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
task_context->valid = SCU_TASK_CONTEXT_VALID;
task_context->context_type = SCU_TASK_CONTEXT_TYPE;
- task_context->remote_node_index = scic_sds_remote_device_get_index(idev);
+ task_context->remote_node_index = sci_remote_device_get_index(idev);
task_context->command_code = 0;
task_context->link_layer_control = 0;
task_context->task_phase = 0x01;
ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
- (scic_sds_controller_get_protocol_engine_group(controller) <<
+ (sci_controller_get_protocol_engine_group(controller) <<
SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
- (scic_sds_port_get_index(iport) <<
+ (sci_port_get_index(iport) <<
SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
ISCI_TAG_TCI(ireq->io_tag));
* Copy the physical address for the command buffer to the
* SCU Task Context
*/
- dma_addr = scic_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
+ dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
task_context->command_iu_upper = upper_32_bits(dma_addr);
task_context->command_iu_lower = lower_32_bits(dma_addr);
* Copy the physical address for the response buffer to the
* SCU Task Context
*/
- dma_addr = scic_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
+ dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
task_context->response_iu_upper = upper_32_bits(dma_addr);
task_context->response_iu_lower = lower_32_bits(dma_addr);
task_context->transfer_length_bytes = len;
if (task_context->transfer_length_bytes > 0)
- scic_sds_request_build_sgl(ireq);
+ sci_request_build_sgl(ireq);
}
/**
struct isci_remote_device *idev;
struct isci_port *iport;
- idev = scic_sds_request_get_device(ireq);
- iport = scic_sds_request_get_port(ireq);
+ idev = sci_request_get_device(ireq);
+ iport = sci_request_get_port(ireq);
/* Fill in the TC with the its required data */
task_context->abort = 0;
task_context->initiator_request = 1;
task_context->connection_rate = idev->connection_rate;
task_context->protocol_engine_index =
- scic_sds_controller_get_protocol_engine_group(controller);
+ sci_controller_get_protocol_engine_group(controller);
task_context->logical_port_index =
- scic_sds_port_get_index(iport);
+ sci_port_get_index(iport);
task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
task_context->valid = SCU_TASK_CONTEXT_VALID;
task_context->context_type = SCU_TASK_CONTEXT_TYPE;
- task_context->remote_node_index = scic_sds_remote_device_get_index(idev);
+ task_context->remote_node_index = sci_remote_device_get_index(idev);
task_context->command_code = 0;
task_context->link_layer_control = 0;
task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
- (scic_sds_controller_get_protocol_engine_group(controller) <<
+ (sci_controller_get_protocol_engine_group(controller) <<
SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
- (scic_sds_port_get_index(iport) <<
+ (sci_port_get_index(iport) <<
SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
ISCI_TAG_TCI(ireq->io_tag));
/*
* Context. We must offset the command buffer by 4 bytes because the
* first 4 bytes are transfered in the body of the TC.
*/
- dma_addr = scic_io_request_get_dma_addr(ireq,
+ dma_addr = sci_io_request_get_dma_addr(ireq,
((char *) &ireq->stp.cmd) +
sizeof(u32));
task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
}
-static enum sci_status scic_sds_stp_pio_request_construct(struct isci_request *ireq,
+static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq,
bool copy_rx_frame)
{
struct isci_stp_request *stp_req = &ireq->stp.req;
stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A;
if (copy_rx_frame) {
- scic_sds_request_build_sgl(ireq);
+ sci_request_build_sgl(ireq);
stp_req->sgl.index = 0;
} else {
/* The user does not want the data copied to the SGL buffer location */
* requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
* returns an indication as to whether the construction was successful.
*/
-static void scic_sds_stp_optimized_request_construct(struct isci_request *ireq,
+static void sci_stp_optimized_request_construct(struct isci_request *ireq,
u8 optimized_task_type,
u32 len,
enum dma_data_direction dir)
scu_sata_reqeust_construct_task_context(ireq, task_context);
/* Copy over the SGL elements */
- scic_sds_request_build_sgl(ireq);
+ sci_request_build_sgl(ireq);
/* Copy over the number of bytes to be transfered */
task_context->transfer_length_bytes = len;
static enum sci_status
-scic_io_request_construct_sata(struct isci_request *ireq,
+sci_io_request_construct_sata(struct isci_request *ireq,
u32 len,
enum dma_data_direction dir,
bool copy)
/* NCQ */
if (task->ata_task.use_ncq) {
- scic_sds_stp_optimized_request_construct(ireq,
+ sci_stp_optimized_request_construct(ireq,
SCU_TASK_TYPE_FPDMAQ_READ,
len, dir);
return SCI_SUCCESS;
/* DMA */
if (task->ata_task.dma_xfer) {
- scic_sds_stp_optimized_request_construct(ireq,
+ sci_stp_optimized_request_construct(ireq,
SCU_TASK_TYPE_DMA_IN,
len, dir);
return SCI_SUCCESS;
} else /* PIO */
- return scic_sds_stp_pio_request_construct(ireq, copy);
+ return sci_stp_pio_request_construct(ireq, copy);
return status;
}
-static enum sci_status scic_io_request_construct_basic_ssp(struct isci_request *ireq)
+static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq)
{
struct sas_task *task = isci_request_access_task(ireq);
task->data_dir,
task->total_xfer_len);
- scic_sds_io_request_build_ssp_command_iu(ireq);
+ sci_io_request_build_ssp_command_iu(ireq);
sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
return SCI_SUCCESS;
}
-enum sci_status scic_task_request_construct_ssp(
+enum sci_status sci_task_request_construct_ssp(
struct isci_request *ireq)
{
/* Construct the SSP Task SCU Task Context */
scu_ssp_task_request_construct_task_context(ireq);
/* Fill in the SSP Task IU */
- scic_sds_task_request_build_ssp_task_iu(ireq);
+ sci_task_request_build_ssp_task_iu(ireq);
sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
return SCI_SUCCESS;
}
-static enum sci_status scic_io_request_construct_basic_sata(struct isci_request *ireq)
+static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq)
{
enum sci_status status;
bool copy = false;
copy = (task->data_dir == DMA_NONE) ? false : true;
- status = scic_io_request_construct_sata(ireq,
+ status = sci_io_request_construct_sata(ireq,
task->total_xfer_len,
task->data_dir,
copy);
return status;
}
-enum sci_status scic_task_request_construct_sata(struct isci_request *ireq)
+enum sci_status sci_task_request_construct_sata(struct isci_request *ireq)
{
enum sci_status status = SCI_SUCCESS;
* BAR1 is the scu_registers
* 0x20002C = 0x200000 + 0x2c
* = start of task context SRAM + offset of (type.ssp.data_offset)
- * TCi is the io_tag of struct scic_sds_request
+ * TCi is the io_tag of struct sci_request
*/
ret_val = readl(scu_reg_base +
(SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
return ret_val;
}
-enum sci_status scic_sds_request_start(struct isci_request *ireq)
+enum sci_status sci_request_start(struct isci_request *ireq)
{
enum sci_base_request_states state;
struct scu_task_context *tc = ireq->tc;
}
enum sci_status
-scic_sds_io_request_terminate(struct isci_request *ireq)
+sci_io_request_terminate(struct isci_request *ireq)
{
enum sci_base_request_states state;
switch (state) {
case SCI_REQ_CONSTRUCTED:
- scic_sds_request_set_status(ireq,
+ sci_request_set_status(ireq,
SCU_TASK_DONE_TASK_ABORT,
SCI_FAILURE_IO_TERMINATED);
return SCI_FAILURE_INVALID_STATE;
}
-enum sci_status scic_sds_request_complete(struct isci_request *ireq)
+enum sci_status sci_request_complete(struct isci_request *ireq)
{
enum sci_base_request_states state;
struct isci_host *ihost = ireq->owning_controller;
return SCI_FAILURE_INVALID_STATE;
if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
- scic_sds_controller_release_frame(ihost,
+ sci_controller_release_frame(ihost,
ireq->saved_rx_frame_index);
/* XXX can we just stop the machine and remove the 'final' state? */
return SCI_SUCCESS;
}
-enum sci_status scic_sds_io_request_event_handler(struct isci_request *ireq,
+enum sci_status sci_io_request_event_handler(struct isci_request *ireq,
u32 event_code)
{
enum sci_base_request_states state;
* @sci_req: This parameter specifies the request object for which to copy
* the response data.
*/
-static void scic_sds_io_request_copy_response(struct isci_request *ireq)
+static void sci_io_request_copy_response(struct isci_request *ireq)
{
void *resp_buf;
u32 len;
*/
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
- scic_sds_request_set_status(ireq,
+ sci_request_set_status(ireq,
SCU_TASK_DONE_GOOD,
SCI_SUCCESS);
break;
word_cnt);
if (resp->status == 0) {
- scic_sds_request_set_status(ireq,
+ sci_request_set_status(ireq,
SCU_TASK_DONE_GOOD,
SCI_SUCCESS_IO_DONE_EARLY);
} else {
- scic_sds_request_set_status(ireq,
+ sci_request_set_status(ireq,
SCU_TASK_DONE_CHECK_RESPONSE,
SCI_FAILURE_IO_RESPONSE_VALID);
}
&ireq->ssp.rsp,
word_cnt);
- scic_sds_request_set_status(ireq,
+ sci_request_set_status(ireq,
SCU_TASK_DONE_CHECK_RESPONSE,
SCI_FAILURE_IO_RESPONSE_VALID);
break;
datapres = resp_iu->datapres;
if (datapres == 1 || datapres == 2) {
- scic_sds_request_set_status(ireq,
+ sci_request_set_status(ireq,
SCU_TASK_DONE_CHECK_RESPONSE,
SCI_FAILURE_IO_RESPONSE_VALID);
} else
- scic_sds_request_set_status(ireq,
+ sci_request_set_status(ireq,
SCU_TASK_DONE_GOOD,
SCI_SUCCESS);
break;
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
if (ireq->protocol == SCIC_STP_PROTOCOL) {
- scic_sds_request_set_status(ireq,
+ sci_request_set_status(ireq,
SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
SCU_COMPLETION_TL_STATUS_SHIFT,
SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
} else {
- scic_sds_request_set_status(ireq,
+ sci_request_set_status(ireq,
SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
SCU_COMPLETION_TL_STATUS_SHIFT,
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
- scic_sds_request_set_status(ireq,
+ sci_request_set_status(ireq,
SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
SCU_COMPLETION_TL_STATUS_SHIFT,
SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
default:
- scic_sds_request_set_status(
+ sci_request_set_status(
ireq,
SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
SCU_COMPLETION_TL_STATUS_SHIFT,
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
- scic_sds_request_set_status(ireq, SCU_TASK_DONE_TASK_ABORT,
+ sci_request_set_status(ireq, SCU_TASK_DONE_TASK_ABORT,
SCI_FAILURE_IO_TERMINATED);
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
{
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
- scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
+ sci_request_set_status(ireq, SCU_TASK_DONE_GOOD,
SCI_SUCCESS);
sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
* If a NAK was received, then it is up to the user to retry
* the request.
*/
- scic_sds_request_set_status(ireq,
+ sci_request_set_status(ireq,
SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
* unexpected. but if the TC has success status, we
* complete the IO anyway.
*/
- scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
+ sci_request_set_status(ireq, SCU_TASK_DONE_GOOD,
SCI_SUCCESS);
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
* these SMP_XXX_XX_ERR status. For these type of error,
* we ask ihost user to retry the request.
*/
- scic_sds_request_set_status(ireq, SCU_TASK_DONE_SMP_RESP_TO_ERR,
+ sci_request_set_status(ireq, SCU_TASK_DONE_SMP_RESP_TO_ERR,
SCI_FAILURE_RETRY_REQUIRED);
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
/* All other completion status cause the IO to be complete. If a NAK
* was received, then it is up to the user to retry the request
*/
- scic_sds_request_set_status(ireq,
+ sci_request_set_status(ireq,
SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
{
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
- scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
+ sci_request_set_status(ireq, SCU_TASK_DONE_GOOD,
SCI_SUCCESS);
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
* complete. If a NAK was received, then it is up to
* the user to retry the request.
*/
- scic_sds_request_set_status(ireq,
+ sci_request_set_status(ireq,
SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
return SCI_SUCCESS;
}
-void scic_stp_io_request_set_ncq_tag(struct isci_request *ireq,
+void sci_stp_io_request_set_ncq_tag(struct isci_request *ireq,
u16 ncq_tag)
{
/**
{
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
- scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
+ sci_request_set_status(ireq, SCU_TASK_DONE_GOOD,
SCI_SUCCESS);
sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
* complete. If a NAK was received, then it is up to
* the user to retry the request.
*/
- scic_sds_request_set_status(ireq,
+ sci_request_set_status(ireq,
SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
/* transmit DATA_FIS from (current sgl + offset) for input
* parameter length. current sgl and offset is alreay stored in the IO request
*/
-static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame(
+static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame(
struct isci_request *ireq,
u32 length)
{
task_context->type.stp.fis_type = FIS_DATA;
/* send the new TC out. */
- return scic_controller_continue_io(ireq);
+ return sci_controller_continue_io(ireq);
}
-static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct isci_request *ireq)
+static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq)
{
struct isci_stp_request *stp_req = &ireq->stp.req;
struct scu_sgl_element_pair *sgl_pair;
return SCI_SUCCESS;
if (stp_req->pio_len >= len) {
- status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(ireq, len);
+ status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len);
if (status != SCI_SUCCESS)
return status;
stp_req->pio_len -= len;
sgl = pio_sgl_next(stp_req);
offset = 0;
} else if (stp_req->pio_len < len) {
- scic_sds_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
+ sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
/* Sgl offset will be adjusted and saved for future */
offset += stp_req->pio_len;
* specified data region. enum sci_status
*/
static enum sci_status
-scic_sds_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
+sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
u8 *data_buf, u32 len)
{
struct isci_request *ireq;
*
* Copy the data buffer to the io request data region. enum sci_status
*/
-static enum sci_status scic_sds_stp_request_pio_data_in_copy_data(
+static enum sci_status sci_stp_request_pio_data_in_copy_data(
struct isci_stp_request *stp_req,
u8 *data_buffer)
{
* If there is less than 1K remaining in the transfer request
* copy just the data for the transfer */
if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) {
- status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
+ status = sci_stp_request_pio_data_in_copy_data_buffer(
stp_req, data_buffer, stp_req->pio_len);
if (status == SCI_SUCCESS)
stp_req->pio_len = 0;
} else {
/* We are transfering the whole frame so copy */
- status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
+ status = sci_stp_request_pio_data_in_copy_data_buffer(
stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
if (status == SCI_SUCCESS)
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
- scic_sds_request_set_status(ireq,
+ sci_request_set_status(ireq,
SCU_TASK_DONE_GOOD,
SCI_SUCCESS);
* complete. If a NAK was received, then it is up to
* the user to retry the request.
*/
- scic_sds_request_set_status(ireq,
+ sci_request_set_status(ireq,
SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
/* Transmit data */
if (stp_req->pio_len != 0) {
- status = scic_sds_stp_request_pio_data_out_transmit_data(ireq);
+ status = sci_stp_request_pio_data_out_transmit_data(ireq);
if (status == SCI_SUCCESS) {
if (stp_req->pio_len == 0)
all_frames_transferred = true;
* If a NAK was received, then it is up to the user to retry
* the request.
*/
- scic_sds_request_set_status(
+ sci_request_set_status(
ireq,
SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
return status;
}
-static void scic_sds_stp_request_udma_complete_request(
+static void sci_stp_request_udma_complete_request(
struct isci_request *ireq,
u32 scu_status,
enum sci_status sci_status)
{
- scic_sds_request_set_status(ireq, scu_status, sci_status);
+ sci_request_set_status(ireq, scu_status, sci_status);
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
}
-static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct isci_request *ireq,
+static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq,
u32 frame_index)
{
struct isci_host *ihost = ireq->owning_controller;
enum sci_status status;
u32 *frame_buffer;
- status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control,
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
frame_index,
(void **)&frame_header);
if ((status == SCI_SUCCESS) &&
(frame_header->fis_type == FIS_REGD2H)) {
- scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
frame_index,
(void **)&frame_buffer);
- scic_sds_controller_copy_sata_response(&ireq->stp.rsp,
+ sci_controller_copy_sata_response(&ireq->stp.rsp,
frame_header,
frame_buffer);
}
- scic_sds_controller_release_frame(ihost, frame_index);
+ sci_controller_release_frame(ihost, frame_index);
return status;
}
enum sci_status
-scic_sds_io_request_frame_handler(struct isci_request *ireq,
+sci_io_request_frame_handler(struct isci_request *ireq,
u32 frame_index)
{
struct isci_host *ihost = ireq->owning_controller;
struct ssp_frame_hdr ssp_hdr;
void *frame_header;
- scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control,
+ sci_unsolicited_frame_control_get_header(&ihost->uf_control,
frame_index,
&frame_header);
struct ssp_response_iu *resp_iu;
ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
- scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
frame_index,
(void **)&resp_iu);
if (resp_iu->datapres == 0x01 ||
resp_iu->datapres == 0x02) {
- scic_sds_request_set_status(ireq,
+ sci_request_set_status(ireq,
SCU_TASK_DONE_CHECK_RESPONSE,
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
} else
- scic_sds_request_set_status(ireq,
+ sci_request_set_status(ireq,
SCU_TASK_DONE_GOOD,
SCI_SUCCESS);
} else {
* In any case we are done with this frame buffer return it to
* the controller
*/
- scic_sds_controller_release_frame(ihost, frame_index);
+ sci_controller_release_frame(ihost, frame_index);
return SCI_SUCCESS;
}
case SCI_REQ_TASK_WAIT_TC_RESP:
- scic_sds_io_request_copy_response(ireq);
+ sci_io_request_copy_response(ireq);
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
- scic_sds_controller_release_frame(ihost,frame_index);
+ sci_controller_release_frame(ihost, frame_index);
return SCI_SUCCESS;
case SCI_REQ_SMP_WAIT_RESP: {
struct smp_resp *rsp_hdr = &ireq->smp.rsp;
void *frame_header;
- scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control,
+ sci_unsolicited_frame_control_get_header(&ihost->uf_control,
frame_index,
&frame_header);
if (rsp_hdr->frame_type == SMP_RESPONSE) {
void *smp_resp;
- scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
frame_index,
&smp_resp);
sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
smp_resp, word_cnt);
- scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
+ sci_request_set_status(ireq, SCU_TASK_DONE_GOOD,
SCI_SUCCESS);
sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
frame_index,
rsp_hdr->frame_type);
- scic_sds_request_set_status(ireq,
+ sci_request_set_status(ireq,
SCU_TASK_DONE_SMP_FRM_TYPE_ERR,
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
}
- scic_sds_controller_release_frame(ihost, frame_index);
+ sci_controller_release_frame(ihost, frame_index);
return SCI_SUCCESS;
}
case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
- return scic_sds_stp_request_udma_general_frame_handler(ireq,
+ return sci_stp_request_udma_general_frame_handler(ireq,
frame_index);
case SCI_REQ_STP_UDMA_WAIT_D2H:
/* Use the general frame handler to copy the resposne data */
- status = scic_sds_stp_request_udma_general_frame_handler(ireq,
+ status = sci_stp_request_udma_general_frame_handler(ireq,
frame_index);
if (status != SCI_SUCCESS)
return status;
- scic_sds_stp_request_udma_complete_request(ireq,
+ sci_stp_request_udma_complete_request(ireq,
SCU_TASK_DONE_CHECK_RESPONSE,
SCI_FAILURE_IO_RESPONSE_VALID);
struct dev_to_host_fis *frame_header;
u32 *frame_buffer;
- status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control,
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
frame_index,
(void **)&frame_header);
switch (frame_header->fis_type) {
case FIS_REGD2H:
- scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
frame_index,
(void **)&frame_buffer);
- scic_sds_controller_copy_sata_response(&ireq->stp.rsp,
+ sci_controller_copy_sata_response(&ireq->stp.rsp,
frame_header,
frame_buffer);
/* The command has completed with error */
- scic_sds_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE,
+ sci_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE,
SCI_FAILURE_IO_RESPONSE_VALID);
break;
"violation occurred\n", __func__, stp_req,
frame_index);
- scic_sds_request_set_status(ireq, SCU_TASK_DONE_UNEXP_FIS,
+ sci_request_set_status(ireq, SCU_TASK_DONE_UNEXP_FIS,
SCI_FAILURE_PROTOCOL_VIOLATION);
break;
}
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
/* Frame has been decoded return it to the controller */
- scic_sds_controller_release_frame(ihost, frame_index);
+ sci_controller_release_frame(ihost, frame_index);
return status;
}
struct dev_to_host_fis *frame_header;
u32 *frame_buffer;
- status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control,
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
frame_index,
(void **)&frame_header);
switch (frame_header->fis_type) {
case FIS_PIO_SETUP:
/* Get from the frame buffer the PIO Setup Data */
- scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
frame_index,
(void **)&frame_buffer);
/* status: 4th byte in the 3rd dword */
stp_req->status = (frame_buffer[2] >> 24) & 0xff;
- scic_sds_controller_copy_sata_response(&ireq->stp.rsp,
+ sci_controller_copy_sata_response(&ireq->stp.rsp,
frame_header,
frame_buffer);
sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN);
} else if (task->data_dir == DMA_TO_DEVICE) {
/* Transmit data */
- status = scic_sds_stp_request_pio_data_out_transmit_data(ireq);
+ status = sci_stp_request_pio_data_out_transmit_data(ireq);
if (status != SCI_SUCCESS)
break;
sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT);
break;
}
- scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
frame_index,
(void **)&frame_buffer);
- scic_sds_controller_copy_sata_response(&ireq->stp.req,
+ sci_controller_copy_sata_response(&ireq->stp.req,
frame_header,
frame_buffer);
- scic_sds_request_set_status(ireq,
+ sci_request_set_status(ireq,
SCU_TASK_DONE_CHECK_RESPONSE,
SCI_FAILURE_IO_RESPONSE_VALID);
}
/* Frame is decoded return it to the controller */
- scic_sds_controller_release_frame(ihost, frame_index);
+ sci_controller_release_frame(ihost, frame_index);
return status;
}
struct dev_to_host_fis *frame_header;
struct sata_fis_data *frame_buffer;
- status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control,
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
frame_index,
(void **)&frame_header);
frame_index,
frame_header->fis_type);
- scic_sds_request_set_status(ireq,
+ sci_request_set_status(ireq,
SCU_TASK_DONE_GOOD,
SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
/* Frame is decoded return it to the controller */
- scic_sds_controller_release_frame(ihost, frame_index);
+ sci_controller_release_frame(ihost, frame_index);
return status;
}
ireq->saved_rx_frame_index = frame_index;
stp_req->pio_len = 0;
} else {
- scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
frame_index,
(void **)&frame_buffer);
- status = scic_sds_stp_request_pio_data_in_copy_data(stp_req,
+ status = sci_stp_request_pio_data_in_copy_data(stp_req,
(u8 *)frame_buffer);
/* Frame is decoded return it to the controller */
- scic_sds_controller_release_frame(ihost, frame_index);
+ sci_controller_release_frame(ihost, frame_index);
}
/* Check for the end of the transfer, are there more
return status;
if ((stp_req->status & ATA_BUSY) == 0) {
- scic_sds_request_set_status(ireq,
+ sci_request_set_status(ireq,
SCU_TASK_DONE_CHECK_RESPONSE,
SCI_FAILURE_IO_RESPONSE_VALID);
struct dev_to_host_fis *frame_header;
u32 *frame_buffer;
- status = scic_sds_unsolicited_frame_control_get_header(&ihost->uf_control,
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
frame_index,
(void **)&frame_header);
if (status != SCI_SUCCESS) {
switch (frame_header->fis_type) {
case FIS_REGD2H:
- scic_sds_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
frame_index,
(void **)&frame_buffer);
- scic_sds_controller_copy_sata_response(&ireq->stp.rsp,
+ sci_controller_copy_sata_response(&ireq->stp.rsp,
frame_header,
frame_buffer);
/* The command has completed with error */
- scic_sds_request_set_status(ireq,
+ sci_request_set_status(ireq,
SCU_TASK_DONE_CHECK_RESPONSE,
SCI_FAILURE_IO_RESPONSE_VALID);
break;
stp_req,
frame_index);
- scic_sds_request_set_status(ireq,
+ sci_request_set_status(ireq,
SCU_TASK_DONE_UNEXP_FIS,
SCI_FAILURE_PROTOCOL_VIOLATION);
break;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
/* Frame has been decoded return it to the controller */
- scic_sds_controller_release_frame(ihost, frame_index);
+ sci_controller_release_frame(ihost, frame_index);
return status;
}
* TODO: Is it even possible to get an unsolicited frame in the
* aborting state?
*/
- scic_sds_controller_release_frame(ihost, frame_index);
+ sci_controller_release_frame(ihost, frame_index);
return SCI_SUCCESS;
default:
frame_index,
state);
- scic_sds_controller_release_frame(ihost, frame_index);
+ sci_controller_release_frame(ihost, frame_index);
return SCI_FAILURE_INVALID_STATE;
}
}
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
- scic_sds_stp_request_udma_complete_request(ireq,
+ sci_stp_request_udma_complete_request(ireq,
SCU_TASK_DONE_GOOD,
SCI_SUCCESS);
break;
* completion.
*/
if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
- scic_sds_remote_device_suspend(ireq->target_device,
+ sci_remote_device_suspend(ireq->target_device,
SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
- scic_sds_stp_request_udma_complete_request(ireq,
+ sci_stp_request_udma_complete_request(ireq,
SCU_TASK_DONE_CHECK_RESPONSE,
SCI_FAILURE_IO_RESPONSE_VALID);
} else {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
- scic_sds_remote_device_suspend(ireq->target_device,
+ sci_remote_device_suspend(ireq->target_device,
SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
/* Fall through to the default case */
default:
/* All other completion status cause the IO to be complete. */
- scic_sds_stp_request_udma_complete_request(ireq,
+ sci_stp_request_udma_complete_request(ireq,
SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
break;
{
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
- scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
+ sci_request_set_status(ireq, SCU_TASK_DONE_GOOD,
SCI_SUCCESS);
sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
* If a NAK was received, then it is up to the user to retry
* the request.
*/
- scic_sds_request_set_status(ireq,
+ sci_request_set_status(ireq,
SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
{
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
- scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD,
+ sci_request_set_status(ireq, SCU_TASK_DONE_GOOD,
SCI_SUCCESS);
sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
* a NAK was received, then it is up to the user to retry the
* request.
*/
- scic_sds_request_set_status(ireq,
+ sci_request_set_status(ireq,
SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
}
enum sci_status
-scic_sds_io_request_tc_completion(struct isci_request *ireq,
+sci_io_request_tc_completion(struct isci_request *ireq,
u32 completion_code)
{
enum sci_base_request_states state;
);
/* complete the io request to the core. */
- scic_controller_complete_io(ihost, request->target_device, request);
+ sci_controller_complete_io(ihost, request->target_device, request);
isci_put_device(idev);
/* set terminated handle so it cannot be completed or
set_bit(IREQ_TERMINATED, &request->flags);
}
-static void scic_sds_request_started_state_enter(struct sci_base_state_machine *sm)
+static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
{
struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
struct domain_device *dev = ireq->target_device->domain_dev;
}
}
-static void scic_sds_request_completed_state_enter(struct sci_base_state_machine *sm)
+static void sci_request_completed_state_enter(struct sci_base_state_machine *sm)
{
struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
struct isci_host *ihost = ireq->owning_controller;
isci_task_request_complete(ihost, ireq, ireq->sci_status);
}
-static void scic_sds_request_aborting_state_enter(struct sci_base_state_machine *sm)
+static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm)
{
struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
ireq->tc->abort = 1;
}
-static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
+static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
{
struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
- scic_sds_remote_device_set_working_request(ireq->target_device,
+ sci_remote_device_set_working_request(ireq->target_device,
ireq);
}
-static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
+static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
{
struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
- scic_sds_remote_device_set_working_request(ireq->target_device,
+ sci_remote_device_set_working_request(ireq->target_device,
ireq);
}
-static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
+static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
{
struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
- scic_sds_remote_device_set_working_request(ireq->target_device,
+ sci_remote_device_set_working_request(ireq->target_device,
ireq);
}
-static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
+static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
{
struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
struct scu_task_context *tc = ireq->tc;
/* Clear the TC control bit */
tc->control_frame = 0;
- status = scic_controller_continue_io(ireq);
+ status = sci_controller_continue_io(ireq);
WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
}
-static const struct sci_base_state scic_sds_request_state_table[] = {
+static const struct sci_base_state sci_request_state_table[] = {
[SCI_REQ_INIT] = { },
[SCI_REQ_CONSTRUCTED] = { },
[SCI_REQ_STARTED] = {
- .enter_state = scic_sds_request_started_state_enter,
+ .enter_state = sci_request_started_state_enter,
},
[SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
- .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter,
+ .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter,
},
[SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
[SCI_REQ_STP_PIO_WAIT_H2D] = {
- .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter,
+ .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter,
},
[SCI_REQ_STP_PIO_WAIT_FRAME] = { },
[SCI_REQ_STP_PIO_DATA_IN] = { },
[SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
[SCI_REQ_STP_UDMA_WAIT_D2H] = { },
[SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
- .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
+ .enter_state = sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
},
[SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
- .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
+ .enter_state = sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
},
[SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
[SCI_REQ_TASK_WAIT_TC_COMP] = { },
[SCI_REQ_SMP_WAIT_RESP] = { },
[SCI_REQ_SMP_WAIT_TC_COMP] = { },
[SCI_REQ_COMPLETED] = {
- .enter_state = scic_sds_request_completed_state_enter,
+ .enter_state = sci_request_completed_state_enter,
},
[SCI_REQ_ABORTING] = {
- .enter_state = scic_sds_request_aborting_state_enter,
+ .enter_state = sci_request_aborting_state_enter,
},
[SCI_REQ_FINAL] = { },
};
static void
-scic_sds_general_request_construct(struct isci_host *ihost,
+sci_general_request_construct(struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq)
{
- sci_init_sm(&ireq->sm, scic_sds_request_state_table, SCI_REQ_INIT);
+ sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT);
ireq->target_device = idev;
ireq->protocol = SCIC_NO_PROTOCOL;
}
static enum sci_status
-scic_io_request_construct(struct isci_host *ihost,
+sci_io_request_construct(struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq)
{
enum sci_status status = SCI_SUCCESS;
/* Build the common part of the request */
- scic_sds_general_request_construct(ihost, idev, ireq);
+ sci_general_request_construct(ihost, idev, ireq);
if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
return SCI_FAILURE_INVALID_REMOTE_DEVICE;
return status;
}
-enum sci_status scic_task_request_construct(struct isci_host *ihost,
+enum sci_status sci_task_request_construct(struct isci_host *ihost,
struct isci_remote_device *idev,
u16 io_tag, struct isci_request *ireq)
{
enum sci_status status = SCI_SUCCESS;
/* Build the common part of the request */
- scic_sds_general_request_construct(ihost, idev, ireq);
+ sci_general_request_construct(ihost, idev, ireq);
if (dev->dev_type == SAS_END_DEV ||
dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
"%s: request = %p\n",
__func__,
request);
- status = scic_io_request_construct_basic_ssp(request);
+ status = sci_io_request_construct_basic_ssp(request);
return status;
}
*/
register_fis = isci_sata_task_to_fis_copy(task);
- status = scic_io_request_construct_basic_sata(request);
+ status = sci_io_request_construct_basic_sata(request);
/* Set the ncq tag in the fis, from the queue
* command in the task.
}
static enum sci_status
-scic_io_request_construct_smp(struct device *dev,
+sci_io_request_construct_smp(struct device *dev,
struct isci_request *ireq,
struct sas_task *task)
{
task_context = ireq->tc;
- idev = scic_sds_request_get_device(ireq);
- iport = scic_sds_request_get_port(ireq);
+ idev = sci_request_get_device(ireq);
+ iport = sci_request_get_port(ireq);
/*
* Fill in the TC with the its required data
task_context->initiator_request = 1;
task_context->connection_rate = idev->connection_rate;
task_context->protocol_engine_index =
- scic_sds_controller_get_protocol_engine_group(ihost);
- task_context->logical_port_index = scic_sds_port_get_index(iport);
+ sci_controller_get_protocol_engine_group(ihost);
+ task_context->logical_port_index = sci_port_get_index(iport);
task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
task_context->abort = 0;
task_context->valid = SCU_TASK_CONTEXT_VALID;
task_context->task_phase = 0;
ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
- (scic_sds_controller_get_protocol_engine_group(ihost) <<
+ (sci_controller_get_protocol_engine_group(ihost) <<
SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
- (scic_sds_port_get_index(iport) <<
+ (sci_port_get_index(iport) <<
SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
ISCI_TAG_TCI(ireq->io_tag));
/*
struct device *dev = &ireq->isci_host->pdev->dev;
enum sci_status status = SCI_FAILURE;
- status = scic_io_request_construct_smp(dev, ireq, task);
+ status = sci_io_request_construct_smp(dev, ireq, task);
if (status != SCI_SUCCESS)
dev_warn(&ireq->isci_host->pdev->dev,
"%s: failed with status = %d\n",
return SCI_FAILURE_INSUFFICIENT_RESOURCES;
}
- status = scic_io_request_construct(ihost, idev, request);
+ status = sci_io_request_construct(ihost, idev, request);
if (status != SCI_SUCCESS) {
dev_warn(&ihost->pdev->dev,
* request was built that way (ie.
* ireq->is_task_management_request is false).
*/
- status = scic_controller_start_task(ihost,
+ status = sci_controller_start_task(ihost,
idev,
ireq);
} else {
}
} else {
/* send the request, let the core assign the IO TAG. */
- status = scic_controller_start_io(ihost, idev,
+ status = sci_controller_start_io(ihost, idev,
ireq);
}
};
/**
- * scic_sds_request_get_controller() -
+ * sci_request_get_controller() -
*
* This macro will return the controller for this io request object
*/
-#define scic_sds_request_get_controller(ireq) \
+#define sci_request_get_controller(ireq) \
((ireq)->owning_controller)
/**
- * scic_sds_request_get_device() -
+ * sci_request_get_device() -
*
* This macro will return the device for this io request object
*/
-#define scic_sds_request_get_device(ireq) \
+#define sci_request_get_device(ireq) \
((ireq)->target_device)
/**
- * scic_sds_request_get_port() -
+ * sci_request_get_port() -
*
* This macro will return the port for this io request object
*/
-#define scic_sds_request_get_port(ireq) \
- scic_sds_remote_device_get_port(scic_sds_request_get_device(ireq))
+#define sci_request_get_port(ireq) \
+ sci_remote_device_get_port(sci_request_get_device(ireq))
/**
- * scic_sds_request_get_post_context() -
+ * sci_request_get_post_context() -
*
* This macro returns the constructed post context result for the io request.
*/
-#define scic_sds_request_get_post_context(ireq) \
+#define sci_request_get_post_context(ireq) \
((ireq)->post_context)
/**
- * scic_sds_request_get_task_context() -
+ * sci_request_get_task_context() -
*
* This is a helper macro to return the os handle for this request object.
*/
-#define scic_sds_request_get_task_context(request) \
+#define sci_request_get_task_context(request) \
((request)->task_context_buffer)
/**
- * scic_sds_request_set_status() -
+ * sci_request_set_status() -
*
* This macro will set the scu hardware status and sci request completion
* status for an io request.
*/
-#define scic_sds_request_set_status(request, scu_status_code, sci_status_code) \
+#define sci_request_set_status(request, scu_status_code, sci_status_code) \
{ \
(request)->scu_status = (scu_status_code); \
(request)->sci_status = (sci_status_code); \
}
-enum sci_status scic_sds_request_start(struct isci_request *ireq);
-enum sci_status scic_sds_io_request_terminate(struct isci_request *ireq);
+enum sci_status sci_request_start(struct isci_request *ireq);
+enum sci_status sci_io_request_terminate(struct isci_request *ireq);
enum sci_status
-scic_sds_io_request_event_handler(struct isci_request *ireq,
+sci_io_request_event_handler(struct isci_request *ireq,
u32 event_code);
enum sci_status
-scic_sds_io_request_frame_handler(struct isci_request *ireq,
+sci_io_request_frame_handler(struct isci_request *ireq,
u32 frame_index);
enum sci_status
-scic_sds_task_request_terminate(struct isci_request *ireq);
+sci_task_request_terminate(struct isci_request *ireq);
extern enum sci_status
-scic_sds_request_complete(struct isci_request *ireq);
+sci_request_complete(struct isci_request *ireq);
extern enum sci_status
-scic_sds_io_request_tc_completion(struct isci_request *ireq, u32 code);
+sci_io_request_tc_completion(struct isci_request *ireq, u32 code);
/* XXX open code in caller */
static inline dma_addr_t
-scic_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr)
+sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr)
{
char *requested_addr = (char *)virt_addr;
void isci_terminate_pending_requests(struct isci_host *ihost,
struct isci_remote_device *idev);
enum sci_status
-scic_task_request_construct(struct isci_host *ihost,
+sci_task_request_construct(struct isci_host *ihost,
struct isci_remote_device *idev,
u16 io_tag,
struct isci_request *ireq);
enum sci_status
-scic_task_request_construct_ssp(struct isci_request *ireq);
+sci_task_request_construct_ssp(struct isci_request *ireq);
enum sci_status
-scic_task_request_construct_sata(struct isci_request *ireq);
+sci_task_request_construct_sata(struct isci_request *ireq);
void
-scic_stp_io_request_set_ncq_tag(struct isci_request *ireq, u16 ncq_tag);
-void scic_sds_smp_request_copy_response(struct isci_request *ireq);
+sci_stp_io_request_set_ncq_tag(struct isci_request *ireq, u16 ncq_tag);
+void sci_smp_request_copy_response(struct isci_request *ireq);
static inline int isci_task_is_ncq_recovery(struct sas_task *task)
{
struct isci_request *request = task->lldd_task;
register_fis->sector_count = qc->tag << 3;
- scic_stp_io_request_set_ncq_tag(request, qc->tag);
+ sci_stp_io_request_set_ncq_tag(request, qc->tag);
}
/**
/* core builds the protocol specific request
* based on the h2d fis.
*/
- status = scic_task_request_construct_sata(ireq);
+ status = sci_task_request_construct_sata(ireq);
return status;
}
return NULL;
/* let the core do it's construct. */
- status = scic_task_request_construct(ihost, idev, tag,
+ status = sci_task_request_construct(ihost, idev, tag,
ireq);
if (status != SCI_SUCCESS) {
dev_warn(&ihost->pdev->dev,
- "%s: scic_task_request_construct failed - "
+ "%s: sci_task_request_construct failed - "
"status = 0x%x\n",
__func__,
status);
/* XXX convert to get this from task->tproto like other drivers */
if (dev->dev_type == SAS_END_DEV) {
isci_tmf->proto = SAS_PROTOCOL_SSP;
- status = scic_task_request_construct_ssp(ireq);
+ status = sci_task_request_construct_ssp(ireq);
if (status != SCI_SUCCESS)
return NULL;
}
spin_lock_irqsave(&ihost->scic_lock, flags);
/* start the TMF io. */
- status = scic_controller_start_task(ihost, idev, ireq);
+ status = sci_controller_start_task(ihost, idev, ireq);
if (status != SCI_TASK_SUCCESS) {
dev_warn(&ihost->pdev->dev,
if (tmf->cb_state_func != NULL)
tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data);
- scic_controller_terminate_request(ihost,
+ sci_controller_terminate_request(ihost,
idev,
ireq);
if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
was_terminated = true;
needs_cleanup_handling = true;
- status = scic_controller_terminate_request(ihost,
+ status = sci_controller_terminate_request(ihost,
idev,
isci_request);
}
*/
if (status != SCI_SUCCESS) {
dev_err(&ihost->pdev->dev,
- "%s: scic_controller_terminate_request"
+ "%s: sci_controller_terminate_request"
" returned = 0x%x\n",
__func__, status);
/* PRINT_TMF( ((struct isci_tmf *)request->task)); */
tmf_complete = tmf->complete;
- scic_controller_complete_io(ihost, ireq->target_device, ireq);
+ sci_controller_complete_io(ihost, ireq->target_device, ireq);
/* set the 'terminated' flag handle to make sure it cannot be terminated
* or completed again.
*/
dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
spin_lock_irqsave(&ihost->scic_lock, flags);
- status = scic_remote_device_reset(idev);
+ status = sci_remote_device_reset(idev);
if (status != SCI_SUCCESS) {
spin_unlock_irqrestore(&ihost->scic_lock, flags);
dev_warn(&ihost->pdev->dev,
- "%s: scic_remote_device_reset(%p) returned %d!\n",
+ "%s: sci_remote_device_reset(%p) returned %d!\n",
__func__, idev, status);
return TMF_RESP_FUNC_FAILED;
/* Since all pending TCs have been cleaned, resume the RNC. */
spin_lock_irqsave(&ihost->scic_lock, flags);
- status = scic_remote_device_reset_complete(idev);
+ status = sci_remote_device_reset_complete(idev);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
/* If this is a device on an expander, bring the phy back up. */
if (status != SCI_SUCCESS) {
dev_warn(&ihost->pdev->dev,
- "%s: scic_remote_device_reset_complete(%p) "
+ "%s: sci_remote_device_reset_complete(%p) "
"returned %d!\n", __func__, idev, status);
}
#include "unsolicited_frame_control.h"
#include "registers.h"
-int scic_sds_unsolicited_frame_control_construct(struct isci_host *ihost)
+int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
{
- struct scic_sds_unsolicited_frame_control *uf_control = &ihost->uf_control;
- struct scic_sds_unsolicited_frame *uf;
+ struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control;
+ struct sci_unsolicited_frame *uf;
u32 buf_len, header_len, i;
dma_addr_t dma;
size_t size;
return 0;
}
-/**
- * This method returns the frame header for the specified frame index.
- * @uf_control:
- * @frame_index:
- * @frame_header:
- *
- * enum sci_status
- */
-enum sci_status scic_sds_unsolicited_frame_control_get_header(
- struct scic_sds_unsolicited_frame_control *uf_control,
- u32 frame_index,
- void **frame_header)
+enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control,
+ u32 frame_index,
+ void **frame_header)
{
if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) {
- /*
- * Skip the first word in the frame since this is a controll word used
- * by the hardware. */
+ /* Skip the first word in the frame since this is a controll word used
+ * by the hardware.
+ */
*frame_header = &uf_control->buffers.array[frame_index].header->data;
return SCI_SUCCESS;
return SCI_FAILURE_INVALID_PARAMETER_VALUE;
}
-/**
- * This method returns the frame buffer for the specified frame index.
- * @uf_control:
- * @frame_index:
- * @frame_buffer:
- *
- * enum sci_status
- */
-enum sci_status scic_sds_unsolicited_frame_control_get_buffer(
- struct scic_sds_unsolicited_frame_control *uf_control,
- u32 frame_index,
- void **frame_buffer)
+enum sci_status sci_unsolicited_frame_control_get_buffer(struct sci_unsolicited_frame_control *uf_control,
+ u32 frame_index,
+ void **frame_buffer)
{
if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) {
*frame_buffer = uf_control->buffers.array[frame_index].buffer;
return SCI_FAILURE_INVALID_PARAMETER_VALUE;
}
-/**
- * This method releases the frame once this is done the frame is available for
- * re-use by the hardware. The data contained in the frame header and frame
- * buffer is no longer valid.
- * @uf_control: This parameter specifies the UF control object
- * @frame_index: This parameter specifies the frame index to attempt to release.
- *
- * This method returns an indication to the caller as to whether the
- * unsolicited frame get pointer should be updated.
- */
-bool scic_sds_unsolicited_frame_control_release_frame(
- struct scic_sds_unsolicited_frame_control *uf_control,
- u32 frame_index)
+bool sci_unsolicited_frame_control_release_frame(struct sci_unsolicited_frame_control *uf_control,
+ u32 frame_index)
{
u32 frame_get;
u32 frame_cycle;
};
/**
- * struct scic_sds_unsolicited_frame -
+ * struct sci_unsolicited_frame -
*
* This is the unsolicited frame data structure it acts as the container for
* the current frame state, frame header and frame buffer.
*/
-struct scic_sds_unsolicited_frame {
+struct sci_unsolicited_frame {
/**
* This field contains the current frame state
*/
};
/**
- * struct scic_sds_uf_header_array -
+ * struct sci_uf_header_array -
*
* This structure contains all of the unsolicited frame header information.
*/
-struct scic_sds_uf_header_array {
+struct sci_uf_header_array {
/**
* This field is represents a virtual pointer to the start
* address of the UF address table. The table contains
};
/**
- * struct scic_sds_uf_buffer_array -
+ * struct sci_uf_buffer_array -
*
* This structure contains all of the unsolicited frame buffer (actual payload)
* information.
*/
-struct scic_sds_uf_buffer_array {
+struct sci_uf_buffer_array {
/**
* This field is the unsolicited frame data its used to manage
* the data for the unsolicited frame requests. It also represents
* the virtual address location that corresponds to the
* physical_address field.
*/
- struct scic_sds_unsolicited_frame array[SCU_MAX_UNSOLICITED_FRAMES];
+ struct sci_unsolicited_frame array[SCU_MAX_UNSOLICITED_FRAMES];
/**
* This field specifies the physical address location for the UF
};
/**
- * struct scic_sds_uf_address_table_array -
+ * struct sci_uf_address_table_array -
*
* This object maintains all of the unsolicited frame address table specific
* data. The address table is a collection of 64-bit pointers that point to
* 1KB buffers into which the silicon will DMA unsolicited frames.
*/
-struct scic_sds_uf_address_table_array {
+struct sci_uf_address_table_array {
/**
* This field represents a virtual pointer that refers to the
* starting address of the UF address table.
};
/**
- * struct scic_sds_unsolicited_frame_control -
+ * struct sci_unsolicited_frame_control -
*
* This object contains all of the data necessary to handle unsolicited frames.
*/
-struct scic_sds_unsolicited_frame_control {
+struct sci_unsolicited_frame_control {
/**
* This field is the software copy of the unsolicited frame queue
* get pointer. The controller object writes this value to the
* This field contains all of the unsolicited frame header
* specific fields.
*/
- struct scic_sds_uf_header_array headers;
+ struct sci_uf_header_array headers;
/**
* This field contains all of the unsolicited frame buffer
* specific fields.
*/
- struct scic_sds_uf_buffer_array buffers;
+ struct sci_uf_buffer_array buffers;
/**
* This field contains all of the unsolicited frame address table
* specific fields.
*/
- struct scic_sds_uf_address_table_array address_table;
+ struct sci_uf_address_table_array address_table;
};
struct isci_host;
-int scic_sds_unsolicited_frame_control_construct(struct isci_host *ihost);
+int sci_unsolicited_frame_control_construct(struct isci_host *ihost);
-enum sci_status scic_sds_unsolicited_frame_control_get_header(
- struct scic_sds_unsolicited_frame_control *uf_control,
+enum sci_status sci_unsolicited_frame_control_get_header(
+ struct sci_unsolicited_frame_control *uf_control,
u32 frame_index,
void **frame_header);
-enum sci_status scic_sds_unsolicited_frame_control_get_buffer(
- struct scic_sds_unsolicited_frame_control *uf_control,
+enum sci_status sci_unsolicited_frame_control_get_buffer(
+ struct sci_unsolicited_frame_control *uf_control,
u32 frame_index,
void **frame_buffer);
-bool scic_sds_unsolicited_frame_control_release_frame(
- struct scic_sds_unsolicited_frame_control *uf_control,
+bool sci_unsolicited_frame_control_release_frame(
+ struct sci_unsolicited_frame_control *uf_control,
u32 frame_index);
#endif /* _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_ */