memset(drv_map, 0, fusion->drv_map_sz);
memset(pDrvRaidMap->ldTgtIdToLd,
- 0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN));
+ 0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN));
if (instance->max_raid_mapsize) {
fw_map_dyn = fusion->ld_map[(instance->map_id & 1)];
fw_map_dyn->dev_hndl_info =
(struct MR_DEV_HANDLE_INFO *)(raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset));
memcpy(pDrvRaidMap->devHndlInfo,
- fw_map_dyn->dev_hndl_info,
- sizeof(struct MR_DEV_HANDLE_INFO) *
- le32_to_cpu(desc_table->raid_map_desc_elements));
+ fw_map_dyn->dev_hndl_info,
+ sizeof(struct MR_DEV_HANDLE_INFO) *
+ le32_to_cpu(desc_table->raid_map_desc_elements));
break;
case RAID_MAP_DESC_TYPE_TGTID_INFO:
fw_map_dyn->ld_tgt_id_to_ld =
- (u16 *) (raid_map_data +
- le32_to_cpu(desc_table->raid_map_desc_offset));
- for (j = 0; j < le32_to_cpu(desc_table->raid_map_desc_elements); j++) {
- pDrvRaidMap->ldTgtIdToLd[j] =
- le16_to_cpu(fw_map_dyn->ld_tgt_id_to_ld[j]);
- }
+ (u16 *)(raid_map_data +
+ le32_to_cpu(desc_table->raid_map_desc_offset));
+ for (j = 0; j < le32_to_cpu(desc_table->raid_map_desc_elements); j++) {
+ pDrvRaidMap->ldTgtIdToLd[j] =
+ le16_to_cpu(fw_map_dyn->ld_tgt_id_to_ld[j]);
+ }
break;
case RAID_MAP_DESC_TYPE_ARRAY_INFO:
fw_map_dyn->ar_map_info =
- (struct MR_ARRAY_INFO *)
- (raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset));
+ (struct MR_ARRAY_INFO *)
+ (raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset));
memcpy(pDrvRaidMap->arMapInfo,
- fw_map_dyn->ar_map_info,
- sizeof(struct MR_ARRAY_INFO) * le32_to_cpu(desc_table->raid_map_desc_elements));
+ fw_map_dyn->ar_map_info,
+ sizeof(struct MR_ARRAY_INFO) *
+ le32_to_cpu(desc_table->raid_map_desc_elements));
break;
case RAID_MAP_DESC_TYPE_SPAN_INFO:
fw_map_dyn->ld_span_map =
- (struct MR_LD_SPAN_MAP *)
- (raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset));
+ (struct MR_LD_SPAN_MAP *)
+ (raid_map_data +
+ le32_to_cpu(desc_table->raid_map_desc_offset));
memcpy(pDrvRaidMap->ldSpanMap,
- fw_map_dyn->ld_span_map,
- sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(desc_table->raid_map_desc_elements));
+ fw_map_dyn->ld_span_map,
+ sizeof(struct MR_LD_SPAN_MAP) *
+ le32_to_cpu(desc_table->raid_map_desc_elements));
break;
default:
dev_dbg(&instance->pdev->dev, "wrong number of desctableElements %d\n",
} else if (instance->supportmax256vd) {
fw_map_ext =
- (struct MR_FW_RAID_MAP_EXT *) fusion->ld_map[(instance->map_id & 1)];
+ (struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(instance->map_id & 1)];
ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount);
if (ld_count > MAX_LOGICAL_DRIVES_EXT) {
dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n");
pDrvRaidMap->ldTgtIdToLd[i] =
(u16)fw_map_ext->ldTgtIdToLd[i];
memcpy(pDrvRaidMap->ldSpanMap, fw_map_ext->ldSpanMap,
- sizeof(struct MR_LD_SPAN_MAP) * ld_count);
+ sizeof(struct MR_LD_SPAN_MAP) * ld_count);
memcpy(pDrvRaidMap->arMapInfo, fw_map_ext->arMapInfo,
- sizeof(struct MR_ARRAY_INFO) * MAX_API_ARRAYS_EXT);
+ sizeof(struct MR_ARRAY_INFO) * MAX_API_ARRAYS_EXT);
memcpy(pDrvRaidMap->devHndlInfo, fw_map_ext->devHndlInfo,
- sizeof(struct MR_DEV_HANDLE_INFO) *
- MAX_RAIDMAP_PHYSICAL_DEVICES);
+ sizeof(struct MR_DEV_HANDLE_INFO) *
+ MAX_RAIDMAP_PHYSICAL_DEVICES);
/* New Raid map will not set totalSize, so keep expected value
* for legacy code in ValidateMapInfo
dev_dbg(&instance->pdev->dev, "megasas: map info structure size 0x%x",
le32_to_cpu(pDrvRaidMap->totalSize));
dev_dbg(&instance->pdev->dev, "is not matching expected size 0x%x\n",
- (unsigned int) expected_size);
+ (unsigned int)expected_size);
dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n",
(unsigned int)sizeof(struct MR_LD_SPAN_MAP),
le32_to_cpu(pDrvRaidMap->totalSize));
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
if (instance->is_ventura) {
- ((struct RAID_CONTEXT_G35 *) pRAID_Context)->span_arm =
+ ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
io_info->span_arm =
(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
if (instance->is_ventura) {
- ((struct RAID_CONTEXT_G35 *) pRAID_Context)->span_arm =
+ ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
io_info->span_arm =
(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
* keep driver in sync with Firmware
*/
if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds) ||
- (bestArm != arm && pend1 > pend0 + lb_pending_cmds))
+ (bestArm != arm && pend1 > pend0 + lb_pending_cmds))
bestArm ^= 1;
/* Update the last accessed block on the correct pd */
return -ENOMEM;
}
-
-
for (i = 0; i < max_mpt_cmd; i++) {
fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion),
GFP_KERNEL);
void
map_cmd_status(struct fusion_context *fusion,
- struct scsi_cmnd *scmd, u8 status, u8 ext_status,
- u32 data_length, u8 *sense)
+ struct scsi_cmnd *scmd, u8 status, u8 ext_status,
+ u32 data_length, u8 *sense)
{
u8 cmd_type;
int resid;
/** stream detection on read and and write IOs */
static void megasas_stream_detect(struct megasas_instance *instance,
- struct megasas_cmd_fusion *cmd,
- struct IO_REQUEST_INFO *io_info)
+ struct megasas_cmd_fusion *cmd,
+ struct IO_REQUEST_INFO *io_info)
{
struct fusion_context *fusion = instance->ctrl_context;
u32 device_id = io_info->ldTgtId;
struct STREAM_DETECT *current_sd;
/* find possible stream */
for (i = 0; i < MAX_STREAMS_TRACKED; ++i) {
- stream_num =
- (*track_stream >> (i * BITS_PER_INDEX_STREAM)) &
+ stream_num = (*track_stream >>
+ (i * BITS_PER_INDEX_STREAM)) &
STREAM_MASK;
current_sd = ¤t_ld_sd->stream_track[stream_num];
- /* if we found a stream, update the raid
- * context and also update the mruBitMap
- */
- /* boundary condition */
- if ((current_sd->next_seq_lba) &&
- (io_info->ldStartBlock >= current_sd->next_seq_lba) &&
- (io_info->ldStartBlock <= (current_sd->next_seq_lba+32)) &&
- (current_sd->is_read == io_info->isRead)) {
-
- if ((io_info->ldStartBlock != current_sd->next_seq_lba)
- && ((!io_info->isRead) || (!is_read_ahead)))
- /*
- * Once the API availible we need to change this.
- * At this point we are not allowing any gap
- */
- continue;
-
- SET_STREAM_DETECTED(cmd->io_request->RaidContext.raid_context_g35);
- current_sd->next_seq_lba =
- io_info->ldStartBlock + io_info->numBlocks;
- /*
- * update the mruBitMap LRU
+ /* if we found a stream, update the raid
+ * context and also update the mruBitMap
*/
- shifted_values_mask =
- (1 << i * BITS_PER_INDEX_STREAM) - 1;
- shifted_values = ((*track_stream & shifted_values_mask)
- << BITS_PER_INDEX_STREAM);
- index_value_mask =
- STREAM_MASK << i * BITS_PER_INDEX_STREAM;
- unshifted_values =
- *track_stream & ~(shifted_values_mask |
- index_value_mask);
- *track_stream =
- unshifted_values | shifted_values | stream_num;
- return;
+ /* boundary condition */
+ if ((current_sd->next_seq_lba) &&
+ (io_info->ldStartBlock >= current_sd->next_seq_lba) &&
+ (io_info->ldStartBlock <= (current_sd->next_seq_lba + 32)) &&
+ (current_sd->is_read == io_info->isRead)) {
+
+ if ((io_info->ldStartBlock != current_sd->next_seq_lba) &&
+ ((!io_info->isRead) || (!is_read_ahead)))
+ /*
+ * Once the API availible we need to change this.
+ * At this point we are not allowing any gap
+ */
+ continue;
+ SET_STREAM_DETECTED(cmd->io_request->RaidContext.raid_context_g35);
+ current_sd->next_seq_lba =
+ io_info->ldStartBlock + io_info->numBlocks;
+ /*
+ * update the mruBitMap LRU
+ */
+ shifted_values_mask =
+ (1 << i * BITS_PER_INDEX_STREAM) - 1;
+ shifted_values = ((*track_stream & shifted_values_mask)
+ << BITS_PER_INDEX_STREAM);
+ index_value_mask =
+ STREAM_MASK << i * BITS_PER_INDEX_STREAM;
+ unshifted_values =
+ *track_stream & ~(shifted_values_mask |
+ index_value_mask);
+ *track_stream =
+ unshifted_values | shifted_values | stream_num;
+ return;
}
-
}
/*
* if we did not find any stream, create a new one
* from the least recently used
*/
- stream_num =
- (*track_stream >> ((MAX_STREAMS_TRACKED - 1) * BITS_PER_INDEX_STREAM)) &
- STREAM_MASK;
+ stream_num = (*track_stream >>
+ ((MAX_STREAMS_TRACKED - 1) * BITS_PER_INDEX_STREAM)) &
+ STREAM_MASK;
current_sd = ¤t_ld_sd->stream_track[stream_num];
current_sd->is_read = io_info->isRead;
current_sd->next_seq_lba = io_info->ldStartBlock + io_info->numBlocks;
- *track_stream =
- (((*track_stream & ZERO_LAST_STREAM) << 4) | stream_num);
+ *track_stream = (((*track_stream & ZERO_LAST_STREAM) << 4) | stream_num);
return;
-
}
/**