static u32
megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
{
- return readl(&(regs)->outbound_scratch_pad);
+ return readl(&(regs)->outbound_scratch_pad_0);
}
/**
static u32
megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs)
{
- return readl(&(regs)->outbound_scratch_pad);
+ return readl(&(regs)->outbound_scratch_pad_0);
}
/**
static u32
megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs)
{
- return readl(&(regs)->outbound_scratch_pad);
+ return readl(&(regs)->outbound_scratch_pad_0);
}
/**
{
u32 max_sectors_1;
u32 max_sectors_2, tmp_sectors, msix_enable;
- u32 scratch_pad_2, scratch_pad_3, scratch_pad_4, status_reg;
+ u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg;
resource_size_t base_addr;
struct megasas_register_set __iomem *reg_set;
struct megasas_ctrl_info *ctrl_info = NULL;
fusion = instance->ctrl_context;
if (instance->adapter_type == VENTURA_SERIES) {
- scratch_pad_3 =
- readl(&instance->reg_set->outbound_scratch_pad_3);
- instance->max_raid_mapsize = ((scratch_pad_3 >>
+ scratch_pad_2 =
+ readl(&instance->reg_set->outbound_scratch_pad_2);
+ instance->max_raid_mapsize = ((scratch_pad_2 >>
MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
MR_MAX_RAID_MAP_SIZE_MASK);
}
if (msix_enable && !msix_disable) {
int irq_flags = PCI_IRQ_MSIX;
- scratch_pad_2 = readl
- (&instance->reg_set->outbound_scratch_pad_2);
+ scratch_pad_1 = readl
+ (&instance->reg_set->outbound_scratch_pad_1);
/* Check max MSI-X vectors */
if (fusion) {
if (instance->adapter_type == THUNDERBOLT_SERIES) {
/* Thunderbolt Series*/
- instance->msix_vectors = (scratch_pad_2
+ instance->msix_vectors = (scratch_pad_1
& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
fw_msix_count = instance->msix_vectors;
} else {
- instance->msix_vectors = ((scratch_pad_2
+ instance->msix_vectors = ((scratch_pad_1
& MR_MAX_REPLY_QUEUES_EXT_OFFSET)
>> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
}
if (rdpq_enable)
- instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ?
+ instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ?
1 : 0;
fw_msix_count = instance->msix_vectors;
/* Save 1-15 reply post index address to local memory
goto fail_init_adapter;
if (instance->adapter_type == VENTURA_SERIES) {
- scratch_pad_4 =
- readl(&instance->reg_set->outbound_scratch_pad_4);
- if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >=
+ scratch_pad_3 =
+ readl(&instance->reg_set->outbound_scratch_pad_3);
+ if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >=
MR_DEFAULT_NVME_PAGE_SHIFT)
instance->nvme_page_size =
- (1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK));
+ (1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK));
dev_info(&instance->pdev->dev,
"NVME page size\t: (%d)\n", instance->nvme_page_size);
{
u64 consistent_mask;
struct pci_dev *pdev;
- u32 scratch_pad_2;
+ u32 scratch_pad_1;
pdev = instance->pdev;
consistent_mask = (instance->adapter_type == VENTURA_SERIES) ?
* If 32 bit DMA mask fails, then try for 64 bit mask
* for FW capable of handling 64 bit DMA.
*/
- scratch_pad_2 = readl
- (&instance->reg_set->outbound_scratch_pad_2);
+ scratch_pad_1 = readl
+ (&instance->reg_set->outbound_scratch_pad_1);
- if (!(scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
+ if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
goto fail_set_dma_mask;
else if (dma_set_mask_and_coherent(&pdev->dev,
DMA_BIT_MASK(64)))
reg_set = instance->reg_set;
- /* ventura FW does not fill outbound_scratch_pad_3 with queue depth */
+ /* ventura FW does not fill outbound_scratch_pad_2 with queue depth */
if (instance->adapter_type < VENTURA_SERIES)
cur_max_fw_cmds =
- readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF;
+ readl(&instance->reg_set->outbound_scratch_pad_2) & 0x00FFFF;
if (dual_qdepth_disable || !cur_max_fw_cmds)
cur_max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
struct megasas_header *frame_hdr;
const char *sys_info;
MFI_CAPABILITIES *drv_ops;
- u32 scratch_pad_2;
+ u32 scratch_pad_1;
ktime_t time;
bool cur_fw_64bit_dma_capable;
cmd = fusion->ioc_init_cmd;
- scratch_pad_2 = readl
- (&instance->reg_set->outbound_scratch_pad_2);
+ scratch_pad_1 = readl
+ (&instance->reg_set->outbound_scratch_pad_1);
- cur_rdpq_mode = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 1 : 0;
+ cur_rdpq_mode = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ? 1 : 0;
if (instance->adapter_type == INVADER_SERIES) {
cur_fw_64bit_dma_capable =
- (scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET) ? true : false;
+ (scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET) ? true : false;
if (instance->consistent_mask_64bit && !cur_fw_64bit_dma_capable) {
dev_err(&instance->pdev->dev, "Driver was operating on 64bit "
goto fail_fw_init;
}
- instance->fw_sync_cache_support = (scratch_pad_2 &
+ instance->fw_sync_cache_support = (scratch_pad_1 &
MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n",
instance->fw_sync_cache_support ? "Yes" : "No");
{
struct megasas_register_set __iomem *reg_set;
struct fusion_context *fusion;
- u32 scratch_pad_2;
+ u32 scratch_pad_1;
int i = 0, count;
fusion = instance->ctrl_context;
megasas_configure_queue_sizes(instance);
- scratch_pad_2 = readl(&instance->reg_set->outbound_scratch_pad_2);
- /* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
+ scratch_pad_1 = readl(&instance->reg_set->outbound_scratch_pad_1);
+ /* If scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
* Firmware support extended IO chain frame which is 4 times more than
* legacy Firmware.
* Legacy Firmware - Frame size is (8 * 128) = 1K
* 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K
*/
- if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
+ if (scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
instance->max_chain_frame_sz =
- ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
+ ((scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_1MB_IO;
else
instance->max_chain_frame_sz =
- ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
+ ((scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_256K_IO;
if (instance->max_chain_frame_sz < MEGASAS_CHAIN_FRAME_SZ_MIN) {
static u32
megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem *regs)
{
- return readl(&(regs)->outbound_scratch_pad);
+ return readl(&(regs)->outbound_scratch_pad_0);
}
/**
"crash dump and initiating OCR\n");
status_reg |= MFI_STATE_CRASH_DUMP_DONE;
writel(status_reg,
- &instance->reg_set->outbound_scratch_pad);
- readl(&instance->reg_set->outbound_scratch_pad);
+ &instance->reg_set->outbound_scratch_pad_0);
+ readl(&instance->reg_set->outbound_scratch_pad_0);
return;
}
megasas_alloc_host_crash_buffer(instance);
status_reg &= ~MFI_STATE_DMADONE;
}
- writel(status_reg, &instance->reg_set->outbound_scratch_pad);
- readl(&instance->reg_set->outbound_scratch_pad);
+ writel(status_reg, &instance->reg_set->outbound_scratch_pad_0);
+ readl(&instance->reg_set->outbound_scratch_pad_0);
msleep(MEGASAS_WAIT_FOR_NEXT_DMA_MSECS);
status_reg = instance->instancet->read_fw_status_reg(
instance->fw_crash_buffer_size = instance->drv_buf_index;
instance->fw_crash_state = AVAILABLE;
instance->drv_buf_index = 0;
- writel(status_reg, &instance->reg_set->outbound_scratch_pad);
- readl(&instance->reg_set->outbound_scratch_pad);
+ writel(status_reg, &instance->reg_set->outbound_scratch_pad_0);
+ readl(&instance->reg_set->outbound_scratch_pad_0);
if (!partial_copy)
megasas_reset_fusion(instance->host, 0);
}