提交 bd756dde 编写于 作者: S Shun Fu 提交者: James Bottomley

[SCSI] mvumi: Add support for Marvell SAS/SATA RAID-on-Chip(ROC) 88RC9580

[jejb: fix up for spelling correction patch]
Signed-off-by: NShun Fu <fushun@marvell.com>
Signed-off-by: NJames Bottomley <JBottomley@Parallels.com>
上级 08bc166d
...@@ -35,10 +35,12 @@ ...@@ -35,10 +35,12 @@
#include <linux/io.h> #include <linux/io.h>
#include <scsi/scsi.h> #include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h> #include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h> #include <scsi/scsi_transport.h>
#include <scsi/scsi_eh.h> #include <scsi/scsi_eh.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/kthread.h>
#include "mvumi.h" #include "mvumi.h"
...@@ -48,6 +50,7 @@ MODULE_DESCRIPTION("Marvell UMI Driver"); ...@@ -48,6 +50,7 @@ MODULE_DESCRIPTION("Marvell UMI Driver");
static DEFINE_PCI_DEVICE_TABLE(mvumi_pci_table) = { static DEFINE_PCI_DEVICE_TABLE(mvumi_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_2, PCI_DEVICE_ID_MARVELL_MV9143) }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_2, PCI_DEVICE_ID_MARVELL_MV9143) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_2, PCI_DEVICE_ID_MARVELL_MV9580) },
{ 0 } { 0 }
}; };
...@@ -118,7 +121,7 @@ static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array) ...@@ -118,7 +121,7 @@ static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array)
static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba, static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
enum resource_type type, unsigned int size) enum resource_type type, unsigned int size)
{ {
struct mvumi_res *res = kzalloc(sizeof(*res), GFP_KERNEL); struct mvumi_res *res = kzalloc(sizeof(*res), GFP_ATOMIC);
if (!res) { if (!res) {
dev_err(&mhba->pdev->dev, dev_err(&mhba->pdev->dev,
...@@ -128,7 +131,7 @@ static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba, ...@@ -128,7 +131,7 @@ static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
switch (type) { switch (type) {
case RESOURCE_CACHED_MEMORY: case RESOURCE_CACHED_MEMORY:
res->virt_addr = kzalloc(size, GFP_KERNEL); res->virt_addr = kzalloc(size, GFP_ATOMIC);
if (!res->virt_addr) { if (!res->virt_addr) {
dev_err(&mhba->pdev->dev, dev_err(&mhba->pdev->dev,
"unable to allocate memory,size = %d.\n", size); "unable to allocate memory,size = %d.\n", size);
...@@ -222,11 +225,11 @@ static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd, ...@@ -222,11 +225,11 @@ static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr)); m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr)); m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
m_sg->flags = 0; m_sg->flags = 0;
m_sg->size = cpu_to_le32(sg_dma_len(&sg[i])); sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i])));
if ((i + 1) == *sg_count) if ((i + 1) == *sg_count)
m_sg->flags |= SGD_EOT; m_sg->flags |= 1U << mhba->eot_flag;
m_sg++; sgd_inc(mhba, m_sg);
} }
} else { } else {
scmd->SCp.dma_handle = scsi_bufflen(scmd) ? scmd->SCp.dma_handle = scsi_bufflen(scmd) ?
...@@ -237,8 +240,8 @@ static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd, ...@@ -237,8 +240,8 @@ static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
busaddr = scmd->SCp.dma_handle; busaddr = scmd->SCp.dma_handle;
m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr)); m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr)); m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
m_sg->flags = SGD_EOT; m_sg->flags = 1U << mhba->eot_flag;
m_sg->size = cpu_to_le32(scsi_bufflen(scmd)); sgd_setsz(mhba, m_sg, cpu_to_le32(scsi_bufflen(scmd)));
*sg_count = 1; *sg_count = 1;
} }
...@@ -267,8 +270,8 @@ static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, ...@@ -267,8 +270,8 @@ static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr)); m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr));
m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr)); m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr));
m_sg->flags = SGD_EOT; m_sg->flags = 1U << mhba->eot_flag;
m_sg->size = cpu_to_le32(size); sgd_setsz(mhba, m_sg, cpu_to_le32(size));
return 0; return 0;
} }
...@@ -285,7 +288,8 @@ static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba, ...@@ -285,7 +288,8 @@ static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
} }
INIT_LIST_HEAD(&cmd->queue_pointer); INIT_LIST_HEAD(&cmd->queue_pointer);
cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL); cmd->frame = pci_alloc_consistent(mhba->pdev,
mhba->ib_max_size, &cmd->frame_phys);
if (!cmd->frame) { if (!cmd->frame) {
dev_err(&mhba->pdev->dev, "failed to allocate memory for FW" dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
" frame,size = %d.\n", mhba->ib_max_size); " frame,size = %d.\n", mhba->ib_max_size);
...@@ -297,7 +301,8 @@ static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba, ...@@ -297,7 +301,8 @@ static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) { if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
dev_err(&mhba->pdev->dev, "failed to allocate memory" dev_err(&mhba->pdev->dev, "failed to allocate memory"
" for internal frame\n"); " for internal frame\n");
kfree(cmd->frame); pci_free_consistent(mhba->pdev, mhba->ib_max_size,
cmd->frame, cmd->frame_phys);
kfree(cmd); kfree(cmd);
return NULL; return NULL;
} }
...@@ -317,7 +322,7 @@ static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba, ...@@ -317,7 +322,7 @@ static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
if (cmd && cmd->frame) { if (cmd && cmd->frame) {
if (cmd->frame->sg_counts) { if (cmd->frame->sg_counts) {
m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0]; m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
size = m_sg->size; sgd_getsz(mhba, m_sg, size);
phy_addr = (dma_addr_t) m_sg->baseaddr_l | phy_addr = (dma_addr_t) m_sg->baseaddr_l |
(dma_addr_t) ((m_sg->baseaddr_h << 16) << 16); (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
...@@ -325,7 +330,8 @@ static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba, ...@@ -325,7 +330,8 @@ static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
pci_free_consistent(mhba->pdev, size, cmd->data_buf, pci_free_consistent(mhba->pdev, size, cmd->data_buf,
phy_addr); phy_addr);
} }
kfree(cmd->frame); pci_free_consistent(mhba->pdev, mhba->ib_max_size,
cmd->frame, cmd->frame_phys);
kfree(cmd); kfree(cmd);
} }
} }
...@@ -374,7 +380,8 @@ static void mvumi_free_cmds(struct mvumi_hba *mhba) ...@@ -374,7 +380,8 @@ static void mvumi_free_cmds(struct mvumi_hba *mhba)
cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
queue_pointer); queue_pointer);
list_del(&cmd->queue_pointer); list_del(&cmd->queue_pointer);
kfree(cmd->frame); if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
kfree(cmd->frame);
kfree(cmd); kfree(cmd);
} }
} }
...@@ -396,7 +403,12 @@ static int mvumi_alloc_cmds(struct mvumi_hba *mhba) ...@@ -396,7 +403,12 @@ static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
INIT_LIST_HEAD(&cmd->queue_pointer); INIT_LIST_HEAD(&cmd->queue_pointer);
list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool); list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL); if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
cmd->frame = mhba->ib_frame + i * mhba->ib_max_size;
cmd->frame_phys = mhba->ib_frame_phys
+ i * mhba->ib_max_size;
} else
cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
if (!cmd->frame) if (!cmd->frame)
goto err_exit; goto err_exit;
} }
...@@ -409,48 +421,71 @@ static int mvumi_alloc_cmds(struct mvumi_hba *mhba) ...@@ -409,48 +421,71 @@ static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
queue_pointer); queue_pointer);
list_del(&cmd->queue_pointer); list_del(&cmd->queue_pointer);
kfree(cmd->frame); if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
kfree(cmd->frame);
kfree(cmd); kfree(cmd);
} }
return -ENOMEM; return -ENOMEM;
} }
static int mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry) static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba)
{ {
unsigned int ib_rp_reg, cur_ib_entry; unsigned int ib_rp_reg;
struct mvumi_hw_regs *regs = mhba->regs;
ib_rp_reg = ioread32(mhba->regs->inb_read_pointer);
if (unlikely(((ib_rp_reg & regs->cl_slot_num_mask) ==
(mhba->ib_cur_slot & regs->cl_slot_num_mask)) &&
((ib_rp_reg & regs->cl_pointer_toggle)
!= (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) {
dev_warn(&mhba->pdev->dev, "no free slot to use.\n");
return 0;
}
if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) { if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) {
dev_warn(&mhba->pdev->dev, "firmware io overflow.\n"); dev_warn(&mhba->pdev->dev, "firmware io overflow.\n");
return -1; return 0;
} else {
return mhba->max_io - atomic_read(&mhba->fw_outstanding);
} }
ib_rp_reg = ioread32(mhba->mmio + CLA_INB_READ_POINTER); }
if (unlikely(((ib_rp_reg & CL_SLOT_NUM_MASK) == static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba)
(mhba->ib_cur_slot & CL_SLOT_NUM_MASK)) && {
((ib_rp_reg & CL_POINTER_TOGGLE) != unsigned int count;
(mhba->ib_cur_slot & CL_POINTER_TOGGLE)))) { if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1))
dev_warn(&mhba->pdev->dev, "no free slot to use.\n"); return 0;
return -1; count = ioread32(mhba->ib_shadow);
} if (count == 0xffff)
return 0;
return count;
}
static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
{
unsigned int cur_ib_entry;
cur_ib_entry = mhba->ib_cur_slot & CL_SLOT_NUM_MASK; cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask;
cur_ib_entry++; cur_ib_entry++;
if (cur_ib_entry >= mhba->list_num_io) { if (cur_ib_entry >= mhba->list_num_io) {
cur_ib_entry -= mhba->list_num_io; cur_ib_entry -= mhba->list_num_io;
mhba->ib_cur_slot ^= CL_POINTER_TOGGLE; mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle;
}
mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask;
mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask);
if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
*ib_entry = mhba->ib_list + cur_ib_entry *
sizeof(struct mvumi_dyn_list_entry);
} else {
*ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
} }
mhba->ib_cur_slot &= ~CL_SLOT_NUM_MASK;
mhba->ib_cur_slot |= (cur_ib_entry & CL_SLOT_NUM_MASK);
*ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
atomic_inc(&mhba->fw_outstanding); atomic_inc(&mhba->fw_outstanding);
return 0;
} }
static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba) static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba)
{ {
iowrite32(0xfff, mhba->ib_shadow); iowrite32(0xffff, mhba->ib_shadow);
iowrite32(mhba->ib_cur_slot, mhba->mmio + CLA_INB_WRITE_POINTER); iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer);
} }
static char mvumi_check_ob_frame(struct mvumi_hba *mhba, static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
...@@ -480,31 +515,59 @@ static char mvumi_check_ob_frame(struct mvumi_hba *mhba, ...@@ -480,31 +515,59 @@ static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
return 0; return 0;
} }
static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba) static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba,
unsigned int *cur_obf, unsigned int *assign_obf_end)
{ {
unsigned int ob_write_reg, ob_write_shadow_reg; unsigned int ob_write, ob_write_shadow;
unsigned int cur_obf, assign_obf_end, i; struct mvumi_hw_regs *regs = mhba->regs;
struct mvumi_ob_data *ob_data;
struct mvumi_rsp_frame *p_outb_frame;
do { do {
ob_write_reg = ioread32(mhba->mmio + CLA_OUTB_COPY_POINTER); ob_write = ioread32(regs->outb_copy_pointer);
ob_write_shadow_reg = ioread32(mhba->ob_shadow); ob_write_shadow = ioread32(mhba->ob_shadow);
} while ((ob_write_reg & CL_SLOT_NUM_MASK) != ob_write_shadow_reg); } while ((ob_write & regs->cl_slot_num_mask) != ob_write_shadow);
cur_obf = mhba->ob_cur_slot & CL_SLOT_NUM_MASK; *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
assign_obf_end = ob_write_reg & CL_SLOT_NUM_MASK; *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
if ((ob_write_reg & CL_POINTER_TOGGLE) != if ((ob_write & regs->cl_pointer_toggle) !=
(mhba->ob_cur_slot & CL_POINTER_TOGGLE)) { (mhba->ob_cur_slot & regs->cl_pointer_toggle)) {
assign_obf_end += mhba->list_num_io; *assign_obf_end += mhba->list_num_io;
} }
return 0;
}
static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba,
unsigned int *cur_obf, unsigned int *assign_obf_end)
{
unsigned int ob_write;
struct mvumi_hw_regs *regs = mhba->regs;
ob_write = ioread32(regs->outb_read_pointer);
ob_write = ioread32(regs->outb_copy_pointer);
*cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
*assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
if (*assign_obf_end < *cur_obf)
*assign_obf_end += mhba->list_num_io;
else if (*assign_obf_end == *cur_obf)
return -1;
return 0;
}
static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
{
unsigned int cur_obf, assign_obf_end, i;
struct mvumi_ob_data *ob_data;
struct mvumi_rsp_frame *p_outb_frame;
struct mvumi_hw_regs *regs = mhba->regs;
if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end))
return;
for (i = (assign_obf_end - cur_obf); i != 0; i--) { for (i = (assign_obf_end - cur_obf); i != 0; i--) {
cur_obf++; cur_obf++;
if (cur_obf >= mhba->list_num_io) { if (cur_obf >= mhba->list_num_io) {
cur_obf -= mhba->list_num_io; cur_obf -= mhba->list_num_io;
mhba->ob_cur_slot ^= CL_POINTER_TOGGLE; mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
} }
p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size; p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
...@@ -528,7 +591,7 @@ static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba) ...@@ -528,7 +591,7 @@ static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
ob_data = NULL; ob_data = NULL;
if (cur_obf == 0) { if (cur_obf == 0) {
cur_obf = mhba->list_num_io - 1; cur_obf = mhba->list_num_io - 1;
mhba->ob_cur_slot ^= CL_POINTER_TOGGLE; mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
} else } else
cur_obf -= 1; cur_obf -= 1;
break; break;
...@@ -539,18 +602,20 @@ static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba) ...@@ -539,18 +602,20 @@ static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
list_add_tail(&ob_data->list, &mhba->free_ob_list); list_add_tail(&ob_data->list, &mhba->free_ob_list);
} }
mhba->ob_cur_slot &= ~CL_SLOT_NUM_MASK; mhba->ob_cur_slot &= ~regs->cl_slot_num_mask;
mhba->ob_cur_slot |= (cur_obf & CL_SLOT_NUM_MASK); mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask);
iowrite32(mhba->ob_cur_slot, mhba->mmio + CLA_OUTB_READ_POINTER); iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer);
} }
static void mvumi_reset(void *regs) static void mvumi_reset(struct mvumi_hba *mhba)
{ {
iowrite32(0, regs + CPU_ENPOINTA_MASK_REG); struct mvumi_hw_regs *regs = mhba->regs;
if (ioread32(regs + CPU_ARM_TO_PCIEA_MSG1) != HANDSHAKE_DONESTATE)
iowrite32(0, regs->enpointa_mask_reg);
if (ioread32(regs->arm_to_pciea_msg1) != HANDSHAKE_DONESTATE)
return; return;
iowrite32(DRBL_SOFT_RESET, regs + CPU_PCIEA_TO_ARM_DRBL_REG); iowrite32(DRBL_SOFT_RESET, regs->pciea_to_arm_drbl_reg);
} }
static unsigned char mvumi_start(struct mvumi_hba *mhba); static unsigned char mvumi_start(struct mvumi_hba *mhba);
...@@ -558,7 +623,7 @@ static unsigned char mvumi_start(struct mvumi_hba *mhba); ...@@ -558,7 +623,7 @@ static unsigned char mvumi_start(struct mvumi_hba *mhba);
static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba) static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
{ {
mhba->fw_state = FW_STATE_ABORT; mhba->fw_state = FW_STATE_ABORT;
mvumi_reset(mhba->mmio); mvumi_reset(mhba);
if (mvumi_start(mhba)) if (mvumi_start(mhba))
return FAILED; return FAILED;
...@@ -566,6 +631,98 @@ static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba) ...@@ -566,6 +631,98 @@ static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
return SUCCESS; return SUCCESS;
} }
static int mvumi_wait_for_fw(struct mvumi_hba *mhba)
{
struct mvumi_hw_regs *regs = mhba->regs;
u32 tmp;
unsigned long before;
before = jiffies;
iowrite32(0, regs->enpointa_mask_reg);
tmp = ioread32(regs->arm_to_pciea_msg1);
while (tmp != HANDSHAKE_READYSTATE) {
iowrite32(DRBL_MU_RESET, regs->pciea_to_arm_drbl_reg);
if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
dev_err(&mhba->pdev->dev,
"FW reset failed [0x%x].\n", tmp);
return FAILED;
}
msleep(500);
rmb();
tmp = ioread32(regs->arm_to_pciea_msg1);
}
return SUCCESS;
}
static void mvumi_backup_bar_addr(struct mvumi_hba *mhba)
{
unsigned char i;
for (i = 0; i < MAX_BASE_ADDRESS; i++) {
pci_read_config_dword(mhba->pdev, 0x10 + i * 4,
&mhba->pci_base[i]);
}
}
static void mvumi_restore_bar_addr(struct mvumi_hba *mhba)
{
unsigned char i;
for (i = 0; i < MAX_BASE_ADDRESS; i++) {
if (mhba->pci_base[i])
pci_write_config_dword(mhba->pdev, 0x10 + i * 4,
mhba->pci_base[i]);
}
}
static unsigned int mvumi_pci_set_master(struct pci_dev *pdev)
{
unsigned int ret = 0;
pci_set_master(pdev);
if (IS_DMA64) {
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
} else
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
return ret;
}
static int mvumi_reset_host_9580(struct mvumi_hba *mhba)
{
mhba->fw_state = FW_STATE_ABORT;
iowrite32(0, mhba->regs->reset_enable);
iowrite32(0xf, mhba->regs->reset_request);
iowrite32(0x10, mhba->regs->reset_enable);
iowrite32(0x10, mhba->regs->reset_request);
msleep(100);
pci_disable_device(mhba->pdev);
if (pci_enable_device(mhba->pdev)) {
dev_err(&mhba->pdev->dev, "enable device failed\n");
return FAILED;
}
if (mvumi_pci_set_master(mhba->pdev)) {
dev_err(&mhba->pdev->dev, "set master failed\n");
return FAILED;
}
mvumi_restore_bar_addr(mhba);
if (mvumi_wait_for_fw(mhba) == FAILED)
return FAILED;
return mvumi_wait_for_outstanding(mhba);
}
static int mvumi_reset_host_9143(struct mvumi_hba *mhba)
{
return mvumi_wait_for_outstanding(mhba);
}
static int mvumi_host_reset(struct scsi_cmnd *scmd) static int mvumi_host_reset(struct scsi_cmnd *scmd)
{ {
struct mvumi_hba *mhba; struct mvumi_hba *mhba;
...@@ -575,7 +732,7 @@ static int mvumi_host_reset(struct scsi_cmnd *scmd) ...@@ -575,7 +732,7 @@ static int mvumi_host_reset(struct scsi_cmnd *scmd)
scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n", scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n",
scmd->serial_number, scmd->cmnd[0], scmd->retries); scmd->serial_number, scmd->cmnd[0], scmd->retries);
return mvumi_wait_for_outstanding(mhba); return mhba->instancet->reset_host(mhba);
} }
static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba, static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
...@@ -628,7 +785,9 @@ static void mvumi_release_fw(struct mvumi_hba *mhba) ...@@ -628,7 +785,9 @@ static void mvumi_release_fw(struct mvumi_hba *mhba)
mvumi_free_cmds(mhba); mvumi_free_cmds(mhba);
mvumi_release_mem_resource(mhba); mvumi_release_mem_resource(mhba);
mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
kfree(mhba->handshake_page); pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
mhba->handshake_page, mhba->handshake_page_phys);
kfree(mhba->regs);
pci_release_regions(mhba->pdev); pci_release_regions(mhba->pdev);
} }
...@@ -665,6 +824,7 @@ get_cmd: cmd = mvumi_create_internal_cmd(mhba, 0); ...@@ -665,6 +824,7 @@ get_cmd: cmd = mvumi_create_internal_cmd(mhba, 0);
frame->cdb_length = MAX_COMMAND_SIZE; frame->cdb_length = MAX_COMMAND_SIZE;
memset(frame->cdb, 0, MAX_COMMAND_SIZE); memset(frame->cdb, 0, MAX_COMMAND_SIZE);
frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC; frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
frame->cdb[1] = CDB_CORE_MODULE;
frame->cdb[2] = CDB_CORE_SHUTDOWN; frame->cdb[2] = CDB_CORE_SHUTDOWN;
mvumi_issue_blocked_cmd(mhba, cmd); mvumi_issue_blocked_cmd(mhba, cmd);
...@@ -695,7 +855,7 @@ mvumi_calculate_checksum(struct mvumi_hs_header *p_header, ...@@ -695,7 +855,7 @@ mvumi_calculate_checksum(struct mvumi_hs_header *p_header,
return ret; return ret;
} }
void mvumi_hs_build_page(struct mvumi_hba *mhba, static void mvumi_hs_build_page(struct mvumi_hba *mhba,
struct mvumi_hs_header *hs_header) struct mvumi_hs_header *hs_header)
{ {
struct mvumi_hs_page2 *hs_page2; struct mvumi_hs_page2 *hs_page2;
...@@ -710,6 +870,8 @@ void mvumi_hs_build_page(struct mvumi_hba *mhba, ...@@ -710,6 +870,8 @@ void mvumi_hs_build_page(struct mvumi_hba *mhba,
hs_header->frame_length = sizeof(*hs_page2) - 4; hs_header->frame_length = sizeof(*hs_page2) - 4;
memset(hs_header->frame_content, 0, hs_header->frame_length); memset(hs_header->frame_content, 0, hs_header->frame_length);
hs_page2->host_type = 3; /* 3 mean linux*/ hs_page2->host_type = 3; /* 3 mean linux*/
if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
hs_page2->host_cap = 0x08;/* host dynamic source mode */
hs_page2->host_ver.ver_major = VER_MAJOR; hs_page2->host_ver.ver_major = VER_MAJOR;
hs_page2->host_ver.ver_minor = VER_MINOR; hs_page2->host_ver.ver_minor = VER_MINOR;
hs_page2->host_ver.ver_oem = VER_OEM; hs_page2->host_ver.ver_oem = VER_OEM;
...@@ -745,8 +907,18 @@ void mvumi_hs_build_page(struct mvumi_hba *mhba, ...@@ -745,8 +907,18 @@ void mvumi_hs_build_page(struct mvumi_hba *mhba,
hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys); hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys);
hs_page4->ib_entry_size = mhba->ib_max_size_setting; hs_page4->ib_entry_size = mhba->ib_max_size_setting;
hs_page4->ob_entry_size = mhba->ob_max_size_setting; hs_page4->ob_entry_size = mhba->ob_max_size_setting;
hs_page4->ob_depth = mhba->list_num_io; if (mhba->hba_capability
hs_page4->ib_depth = mhba->list_num_io; & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) {
hs_page4->ob_depth = find_first_bit((unsigned long *)
&mhba->list_num_io,
BITS_PER_LONG);
hs_page4->ib_depth = find_first_bit((unsigned long *)
&mhba->list_num_io,
BITS_PER_LONG);
} else {
hs_page4->ob_depth = (u8) mhba->list_num_io;
hs_page4->ib_depth = (u8) mhba->list_num_io;
}
hs_header->checksum = mvumi_calculate_checksum(hs_header, hs_header->checksum = mvumi_calculate_checksum(hs_header,
hs_header->frame_length); hs_header->frame_length);
break; break;
...@@ -774,8 +946,11 @@ static int mvumi_init_data(struct mvumi_hba *mhba) ...@@ -774,8 +946,11 @@ static int mvumi_init_data(struct mvumi_hba *mhba)
return 0; return 0;
tmp_size = mhba->ib_max_size * mhba->max_io; tmp_size = mhba->ib_max_size * mhba->max_io;
if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
tmp_size += 128 + mhba->ob_max_size * mhba->max_io; tmp_size += 128 + mhba->ob_max_size * mhba->max_io;
tmp_size += 8 + sizeof(u32) + 16; tmp_size += 8 + sizeof(u32)*2 + 16;
res_mgnt = mvumi_alloc_mem_resource(mhba, res_mgnt = mvumi_alloc_mem_resource(mhba,
RESOURCE_UNCACHED_MEMORY, tmp_size); RESOURCE_UNCACHED_MEMORY, tmp_size);
...@@ -793,24 +968,41 @@ static int mvumi_init_data(struct mvumi_hba *mhba) ...@@ -793,24 +968,41 @@ static int mvumi_init_data(struct mvumi_hba *mhba)
v += offset; v += offset;
mhba->ib_list = v; mhba->ib_list = v;
mhba->ib_list_phys = p; mhba->ib_list_phys = p;
if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
mhba->ib_frame = v;
mhba->ib_frame_phys = p;
}
v += mhba->ib_max_size * mhba->max_io; v += mhba->ib_max_size * mhba->max_io;
p += mhba->ib_max_size * mhba->max_io; p += mhba->ib_max_size * mhba->max_io;
/* ib shadow */ /* ib shadow */
offset = round_up(p, 8) - p; offset = round_up(p, 8) - p;
p += offset; p += offset;
v += offset; v += offset;
mhba->ib_shadow = v; mhba->ib_shadow = v;
mhba->ib_shadow_phys = p; mhba->ib_shadow_phys = p;
p += sizeof(u32); p += sizeof(u32)*2;
v += sizeof(u32); v += sizeof(u32)*2;
/* ob shadow */ /* ob shadow */
offset = round_up(p, 8) - p; if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
p += offset; offset = round_up(p, 8) - p;
v += offset; p += offset;
mhba->ob_shadow = v; v += offset;
mhba->ob_shadow_phys = p; mhba->ob_shadow = v;
p += 8; mhba->ob_shadow_phys = p;
v += 8; p += 8;
v += 8;
} else {
offset = round_up(p, 4) - p;
p += offset;
v += offset;
mhba->ob_shadow = v;
mhba->ob_shadow_phys = p;
p += 4;
v += 4;
}
/* ob list */ /* ob list */
offset = round_up(p, 128) - p; offset = round_up(p, 128) - p;
...@@ -902,6 +1094,12 @@ static int mvumi_hs_process_page(struct mvumi_hba *mhba, ...@@ -902,6 +1094,12 @@ static int mvumi_hs_process_page(struct mvumi_hba *mhba,
dev_dbg(&mhba->pdev->dev, "FW version:%d\n", dev_dbg(&mhba->pdev->dev, "FW version:%d\n",
hs_page1->fw_ver.ver_build); hs_page1->fw_ver.ver_build);
if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG)
mhba->eot_flag = 22;
else
mhba->eot_flag = 27;
if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF)
mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth;
break; break;
default: default:
dev_err(&mhba->pdev->dev, "handshake: page code error\n"); dev_err(&mhba->pdev->dev, "handshake: page code error\n");
...@@ -923,12 +1121,12 @@ static int mvumi_handshake(struct mvumi_hba *mhba) ...@@ -923,12 +1121,12 @@ static int mvumi_handshake(struct mvumi_hba *mhba)
{ {
unsigned int hs_state, tmp, hs_fun; unsigned int hs_state, tmp, hs_fun;
struct mvumi_hs_header *hs_header; struct mvumi_hs_header *hs_header;
void *regs = mhba->mmio; struct mvumi_hw_regs *regs = mhba->regs;
if (mhba->fw_state == FW_STATE_STARTING) if (mhba->fw_state == FW_STATE_STARTING)
hs_state = HS_S_START; hs_state = HS_S_START;
else { else {
tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG0); tmp = ioread32(regs->arm_to_pciea_msg0);
hs_state = HS_GET_STATE(tmp); hs_state = HS_GET_STATE(tmp);
dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state); dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state);
if (HS_GET_STATUS(tmp) != HS_STATUS_OK) { if (HS_GET_STATUS(tmp) != HS_STATUS_OK) {
...@@ -943,21 +1141,20 @@ static int mvumi_handshake(struct mvumi_hba *mhba) ...@@ -943,21 +1141,20 @@ static int mvumi_handshake(struct mvumi_hba *mhba)
mhba->fw_state = FW_STATE_HANDSHAKING; mhba->fw_state = FW_STATE_HANDSHAKING;
HS_SET_STATUS(hs_fun, HS_STATUS_OK); HS_SET_STATUS(hs_fun, HS_STATUS_OK);
HS_SET_STATE(hs_fun, HS_S_RESET); HS_SET_STATE(hs_fun, HS_S_RESET);
iowrite32(HANDSHAKE_SIGNATURE, regs + CPU_PCIEA_TO_ARM_MSG1); iowrite32(HANDSHAKE_SIGNATURE, regs->pciea_to_arm_msg1);
iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0); iowrite32(hs_fun, regs->pciea_to_arm_msg0);
iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG); iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
break; break;
case HS_S_RESET: case HS_S_RESET:
iowrite32(lower_32_bits(mhba->handshake_page_phys), iowrite32(lower_32_bits(mhba->handshake_page_phys),
regs + CPU_PCIEA_TO_ARM_MSG1); regs->pciea_to_arm_msg1);
iowrite32(upper_32_bits(mhba->handshake_page_phys), iowrite32(upper_32_bits(mhba->handshake_page_phys),
regs + CPU_ARM_TO_PCIEA_MSG1); regs->arm_to_pciea_msg1);
HS_SET_STATUS(hs_fun, HS_STATUS_OK); HS_SET_STATUS(hs_fun, HS_STATUS_OK);
HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR); HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR);
iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0); iowrite32(hs_fun, regs->pciea_to_arm_msg0);
iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG); iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
break; break;
case HS_S_PAGE_ADDR: case HS_S_PAGE_ADDR:
...@@ -997,30 +1194,37 @@ static int mvumi_handshake(struct mvumi_hba *mhba) ...@@ -997,30 +1194,37 @@ static int mvumi_handshake(struct mvumi_hba *mhba)
HS_SET_STATE(hs_fun, HS_S_END); HS_SET_STATE(hs_fun, HS_S_END);
HS_SET_STATUS(hs_fun, HS_STATUS_OK); HS_SET_STATUS(hs_fun, HS_STATUS_OK);
iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0); iowrite32(hs_fun, regs->pciea_to_arm_msg0);
iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG); iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
break; break;
case HS_S_END: case HS_S_END:
/* Set communication list ISR */ /* Set communication list ISR */
tmp = ioread32(regs + CPU_ENPOINTA_MASK_REG); tmp = ioread32(regs->enpointa_mask_reg);
tmp |= INT_MAP_COMAOUT | INT_MAP_COMAERR; tmp |= regs->int_comaout | regs->int_comaerr;
iowrite32(tmp, regs + CPU_ENPOINTA_MASK_REG); iowrite32(tmp, regs->enpointa_mask_reg);
iowrite32(mhba->list_num_io, mhba->ib_shadow); iowrite32(mhba->list_num_io, mhba->ib_shadow);
/* Set InBound List Available count shadow */ /* Set InBound List Available count shadow */
iowrite32(lower_32_bits(mhba->ib_shadow_phys), iowrite32(lower_32_bits(mhba->ib_shadow_phys),
regs + CLA_INB_AVAL_COUNT_BASEL); regs->inb_aval_count_basel);
iowrite32(upper_32_bits(mhba->ib_shadow_phys), iowrite32(upper_32_bits(mhba->ib_shadow_phys),
regs + CLA_INB_AVAL_COUNT_BASEH); regs->inb_aval_count_baseh);
/* Set OutBound List Available count shadow */ if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) {
iowrite32((mhba->list_num_io-1) | CL_POINTER_TOGGLE, /* Set OutBound List Available count shadow */
mhba->ob_shadow); iowrite32((mhba->list_num_io-1) |
iowrite32(lower_32_bits(mhba->ob_shadow_phys), regs + 0x5B0); regs->cl_pointer_toggle,
iowrite32(upper_32_bits(mhba->ob_shadow_phys), regs + 0x5B4); mhba->ob_shadow);
iowrite32(lower_32_bits(mhba->ob_shadow_phys),
regs->outb_copy_basel);
iowrite32(upper_32_bits(mhba->ob_shadow_phys),
regs->outb_copy_baseh);
}
mhba->ib_cur_slot = (mhba->list_num_io - 1) | CL_POINTER_TOGGLE; mhba->ib_cur_slot = (mhba->list_num_io - 1) |
mhba->ob_cur_slot = (mhba->list_num_io - 1) | CL_POINTER_TOGGLE; regs->cl_pointer_toggle;
mhba->ob_cur_slot = (mhba->list_num_io - 1) |
regs->cl_pointer_toggle;
mhba->fw_state = FW_STATE_STARTED; mhba->fw_state = FW_STATE_STARTED;
break; break;
...@@ -1040,7 +1244,7 @@ static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba) ...@@ -1040,7 +1244,7 @@ static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
before = jiffies; before = jiffies;
mvumi_handshake(mhba); mvumi_handshake(mhba);
do { do {
isr_status = mhba->instancet->read_fw_status_reg(mhba->mmio); isr_status = mhba->instancet->read_fw_status_reg(mhba);
if (mhba->fw_state == FW_STATE_STARTED) if (mhba->fw_state == FW_STATE_STARTED)
return 0; return 0;
...@@ -1062,16 +1266,15 @@ static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba) ...@@ -1062,16 +1266,15 @@ static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba) static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
{ {
void *regs = mhba->mmio;
unsigned int tmp; unsigned int tmp;
unsigned long before; unsigned long before;
before = jiffies; before = jiffies;
tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG1); tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) { while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) {
if (tmp != HANDSHAKE_READYSTATE) if (tmp != HANDSHAKE_READYSTATE)
iowrite32(DRBL_MU_RESET, iowrite32(DRBL_MU_RESET,
regs + CPU_PCIEA_TO_ARM_DRBL_REG); mhba->regs->pciea_to_arm_drbl_reg);
if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) { if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
dev_err(&mhba->pdev->dev, dev_err(&mhba->pdev->dev,
"invalid signature [0x%x].\n", tmp); "invalid signature [0x%x].\n", tmp);
...@@ -1079,7 +1282,7 @@ static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba) ...@@ -1079,7 +1282,7 @@ static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
} }
usleep_range(1000, 2000); usleep_range(1000, 2000);
rmb(); rmb();
tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG1); tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
} }
mhba->fw_state = FW_STATE_STARTING; mhba->fw_state = FW_STATE_STARTING;
...@@ -1100,15 +1303,17 @@ static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba) ...@@ -1100,15 +1303,17 @@ static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
static unsigned char mvumi_start(struct mvumi_hba *mhba) static unsigned char mvumi_start(struct mvumi_hba *mhba)
{ {
void *regs = mhba->mmio;
unsigned int tmp; unsigned int tmp;
struct mvumi_hw_regs *regs = mhba->regs;
/* clear Door bell */ /* clear Door bell */
tmp = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG); tmp = ioread32(regs->arm_to_pciea_drbl_reg);
iowrite32(tmp, regs + CPU_ARM_TO_PCIEA_DRBL_REG); iowrite32(tmp, regs->arm_to_pciea_drbl_reg);
iowrite32(0x3FFFFFFF, regs + CPU_ARM_TO_PCIEA_MASK_REG); iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
tmp = ioread32(regs + CPU_ENPOINTA_MASK_REG) | INT_MAP_DL_CPU2PCIEA; tmp = ioread32(regs->enpointa_mask_reg) | regs->int_dl_cpu2pciea;
iowrite32(tmp, regs + CPU_ENPOINTA_MASK_REG); iowrite32(tmp, regs->enpointa_mask_reg);
msleep(100);
if (mvumi_check_handshake(mhba)) if (mvumi_check_handshake(mhba))
return -1; return -1;
...@@ -1166,6 +1371,7 @@ static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, ...@@ -1166,6 +1371,7 @@ static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
cmd->scmd->scsi_done(scmd); cmd->scmd->scsi_done(scmd);
mvumi_return_cmd(mhba, cmd); mvumi_return_cmd(mhba, cmd);
} }
static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba, static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba,
struct mvumi_cmd *cmd, struct mvumi_cmd *cmd,
struct mvumi_rsp_frame *ob_frame) struct mvumi_rsp_frame *ob_frame)
...@@ -1210,6 +1416,304 @@ static void mvumi_show_event(struct mvumi_hba *mhba, ...@@ -1210,6 +1416,304 @@ static void mvumi_show_event(struct mvumi_hba *mhba,
} }
} }
static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status)
{
struct scsi_device *sdev;
int ret = -1;
if (status == DEVICE_OFFLINE) {
sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
if (sdev) {
dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0,
sdev->id, 0);
scsi_remove_device(sdev);
scsi_device_put(sdev);
ret = 0;
} else
dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n",
devid);
} else if (status == DEVICE_ONLINE) {
sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
if (!sdev) {
scsi_add_device(mhba->shost, 0, devid, 0);
dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0,
devid, 0);
ret = 0;
} else {
dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n",
0, devid, 0);
scsi_device_put(sdev);
}
}
return ret;
}
static u64 mvumi_inquiry(struct mvumi_hba *mhba,
unsigned int id, struct mvumi_cmd *cmd)
{
struct mvumi_msg_frame *frame;
u64 wwid = 0;
int cmd_alloc = 0;
int data_buf_len = 64;
if (!cmd) {
cmd = mvumi_create_internal_cmd(mhba, data_buf_len);
if (cmd)
cmd_alloc = 1;
else
return 0;
} else {
memset(cmd->data_buf, 0, data_buf_len);
}
cmd->scmd = NULL;
cmd->cmd_status = REQ_STATUS_PENDING;
atomic_set(&cmd->sync_cmd, 0);
frame = cmd->frame;
frame->device_id = (u16) id;
frame->cmd_flag = CMD_FLAG_DATA_IN;
frame->req_function = CL_FUN_SCSI_CMD;
frame->cdb_length = 6;
frame->data_transfer_length = MVUMI_INQUIRY_LENGTH;
memset(frame->cdb, 0, frame->cdb_length);
frame->cdb[0] = INQUIRY;
frame->cdb[4] = frame->data_transfer_length;
mvumi_issue_blocked_cmd(mhba, cmd);
if (cmd->cmd_status == SAM_STAT_GOOD) {
if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
wwid = id + 1;
else
memcpy((void *)&wwid,
(cmd->data_buf + MVUMI_INQUIRY_UUID_OFF),
MVUMI_INQUIRY_UUID_LEN);
dev_dbg(&mhba->pdev->dev,
"inquiry device(0:%d:0) wwid(%llx)\n", id, wwid);
} else {
wwid = 0;
}
if (cmd_alloc)
mvumi_delete_internal_cmd(mhba, cmd);
return wwid;
}
static void mvumi_detach_devices(struct mvumi_hba *mhba)
{
struct mvumi_device *mv_dev = NULL , *dev_next;
struct scsi_device *sdev = NULL;
mutex_lock(&mhba->device_lock);
/* detach Hard Disk */
list_for_each_entry_safe(mv_dev, dev_next,
&mhba->shost_dev_list, list) {
mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
list_del_init(&mv_dev->list);
dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
mv_dev->id, mv_dev->wwid);
kfree(mv_dev);
}
list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) {
list_del_init(&mv_dev->list);
dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
mv_dev->id, mv_dev->wwid);
kfree(mv_dev);
}
/* detach virtual device */
if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
sdev = scsi_device_lookup(mhba->shost, 0,
mhba->max_target_id - 1, 0);
if (sdev) {
scsi_remove_device(sdev);
scsi_device_put(sdev);
}
mutex_unlock(&mhba->device_lock);
}
static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id)
{
struct scsi_device *sdev;
sdev = scsi_device_lookup(mhba->shost, 0, id, 0);
if (sdev) {
scsi_rescan_device(&sdev->sdev_gendev);
scsi_device_put(sdev);
}
}
static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid)
{
struct mvumi_device *mv_dev = NULL;
list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) {
if (mv_dev->wwid == wwid) {
if (mv_dev->id != id) {
dev_err(&mhba->pdev->dev,
"%s has same wwid[%llx] ,"
" but different id[%d %d]\n",
__func__, mv_dev->wwid, mv_dev->id, id);
return -1;
} else {
if (mhba->pdev->device ==
PCI_DEVICE_ID_MARVELL_MV9143)
mvumi_rescan_devices(mhba, id);
return 1;
}
}
}
return 0;
}
static void mvumi_remove_devices(struct mvumi_hba *mhba, int id)
{
struct mvumi_device *mv_dev = NULL, *dev_next;
list_for_each_entry_safe(mv_dev, dev_next,
&mhba->shost_dev_list, list) {
if (mv_dev->id == id) {
dev_dbg(&mhba->pdev->dev,
"detach device(0:%d:0) wwid(%llx) from HOST\n",
mv_dev->id, mv_dev->wwid);
mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
list_del_init(&mv_dev->list);
kfree(mv_dev);
}
}
}
static int mvumi_probe_devices(struct mvumi_hba *mhba)
{
int id, maxid;
u64 wwid = 0;
struct mvumi_device *mv_dev = NULL;
struct mvumi_cmd *cmd = NULL;
int found = 0;
cmd = mvumi_create_internal_cmd(mhba, 64);
if (!cmd)
return -1;
if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
maxid = mhba->max_target_id;
else
maxid = mhba->max_target_id - 1;
for (id = 0; id < maxid; id++) {
wwid = mvumi_inquiry(mhba, id, cmd);
if (!wwid) {
/* device no response, remove it */
mvumi_remove_devices(mhba, id);
} else {
/* device response, add it */
found = mvumi_match_devices(mhba, id, wwid);
if (!found) {
mvumi_remove_devices(mhba, id);
mv_dev = kzalloc(sizeof(struct mvumi_device),
GFP_KERNEL);
if (!mv_dev) {
dev_err(&mhba->pdev->dev,
"%s alloc mv_dev failed\n",
__func__);
continue;
}
mv_dev->id = id;
mv_dev->wwid = wwid;
mv_dev->sdev = NULL;
INIT_LIST_HEAD(&mv_dev->list);
list_add_tail(&mv_dev->list,
&mhba->mhba_dev_list);
dev_dbg(&mhba->pdev->dev,
"probe a new device(0:%d:0)"
" wwid(%llx)\n", id, mv_dev->wwid);
} else if (found == -1)
return -1;
else
continue;
}
}
if (cmd)
mvumi_delete_internal_cmd(mhba, cmd);
return 0;
}
static int mvumi_rescan_bus(void *data)
{
int ret = 0;
struct mvumi_hba *mhba = (struct mvumi_hba *) data;
struct mvumi_device *mv_dev = NULL , *dev_next;
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
if (!atomic_read(&mhba->pnp_count))
schedule();
msleep(1000);
atomic_set(&mhba->pnp_count, 0);
__set_current_state(TASK_RUNNING);
mutex_lock(&mhba->device_lock);
ret = mvumi_probe_devices(mhba);
if (!ret) {
list_for_each_entry_safe(mv_dev, dev_next,
&mhba->mhba_dev_list, list) {
if (mvumi_handle_hotplug(mhba, mv_dev->id,
DEVICE_ONLINE)) {
dev_err(&mhba->pdev->dev,
"%s add device(0:%d:0) failed"
"wwid(%llx) has exist\n",
__func__,
mv_dev->id, mv_dev->wwid);
list_del_init(&mv_dev->list);
kfree(mv_dev);
} else {
list_move_tail(&mv_dev->list,
&mhba->shost_dev_list);
}
}
}
mutex_unlock(&mhba->device_lock);
}
return 0;
}
static void mvumi_proc_msg(struct mvumi_hba *mhba,
struct mvumi_hotplug_event *param)
{
u16 size = param->size;
const unsigned long *ar_bitmap;
const unsigned long *re_bitmap;
int index;
if (mhba->fw_flag & MVUMI_FW_ATTACH) {
index = -1;
ar_bitmap = (const unsigned long *) param->bitmap;
re_bitmap = (const unsigned long *) &param->bitmap[size >> 3];
mutex_lock(&mhba->sas_discovery_mutex);
do {
index = find_next_zero_bit(ar_bitmap, size, index + 1);
if (index >= size)
break;
mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE);
} while (1);
index = -1;
do {
index = find_next_zero_bit(re_bitmap, size, index + 1);
if (index >= size)
break;
mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE);
} while (1);
mutex_unlock(&mhba->sas_discovery_mutex);
}
}
static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer) static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
{ {
if (msg == APICDB1_EVENT_GETEVENT) { if (msg == APICDB1_EVENT_GETEVENT) {
...@@ -1227,6 +1731,8 @@ static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer) ...@@ -1227,6 +1731,8 @@ static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
param = &er->events[i]; param = &er->events[i];
mvumi_show_event(mhba, param); mvumi_show_event(mhba, param);
} }
} else if (msg == APICDB1_HOST_GETEVENT) {
mvumi_proc_msg(mhba, buffer);
} }
} }
...@@ -1271,17 +1777,27 @@ static void mvumi_scan_events(struct work_struct *work) ...@@ -1271,17 +1777,27 @@ static void mvumi_scan_events(struct work_struct *work)
kfree(mu_ev); kfree(mu_ev);
} }
static void mvumi_launch_events(struct mvumi_hba *mhba, u8 msg) static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status)
{ {
struct mvumi_events_wq *mu_ev; struct mvumi_events_wq *mu_ev;
mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC); while (isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) {
if (mu_ev) { if (isr_status & DRBL_BUS_CHANGE) {
INIT_WORK(&mu_ev->work_q, mvumi_scan_events); atomic_inc(&mhba->pnp_count);
mu_ev->mhba = mhba; wake_up_process(mhba->dm_thread);
mu_ev->event = msg; isr_status &= ~(DRBL_BUS_CHANGE);
mu_ev->param = NULL; continue;
schedule_work(&mu_ev->work_q); }
mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC);
if (mu_ev) {
INIT_WORK(&mu_ev->work_q, mvumi_scan_events);
mu_ev->mhba = mhba;
mu_ev->event = APICDB1_EVENT_GETEVENT;
isr_status &= ~(DRBL_EVENT_NOTIFY);
mu_ev->param = NULL;
schedule_work(&mu_ev->work_q);
}
} }
} }
...@@ -1322,16 +1838,17 @@ static irqreturn_t mvumi_isr_handler(int irq, void *devp) ...@@ -1322,16 +1838,17 @@ static irqreturn_t mvumi_isr_handler(int irq, void *devp)
return IRQ_NONE; return IRQ_NONE;
} }
if (mhba->global_isr & INT_MAP_DL_CPU2PCIEA) { if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) {
if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY))
mvumi_launch_events(mhba, mhba->isr_status);
if (mhba->isr_status & DRBL_HANDSHAKE_ISR) { if (mhba->isr_status & DRBL_HANDSHAKE_ISR) {
dev_warn(&mhba->pdev->dev, "enter handshake again!\n"); dev_warn(&mhba->pdev->dev, "enter handshake again!\n");
mvumi_handshake(mhba); mvumi_handshake(mhba);
} }
if (mhba->isr_status & DRBL_EVENT_NOTIFY)
mvumi_launch_events(mhba, APICDB1_EVENT_GETEVENT);
} }
if (mhba->global_isr & INT_MAP_COMAOUT) if (mhba->global_isr & mhba->regs->int_comaout)
mvumi_receive_ob_list_entry(mhba); mvumi_receive_ob_list_entry(mhba);
mhba->global_isr = 0; mhba->global_isr = 0;
...@@ -1358,8 +1875,7 @@ static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba, ...@@ -1358,8 +1875,7 @@ static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
dev_dbg(&mhba->pdev->dev, "no free tag.\n"); dev_dbg(&mhba->pdev->dev, "no free tag.\n");
return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE; return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
} }
if (mvumi_get_ib_list_entry(mhba, &ib_entry)) mvumi_get_ib_list_entry(mhba, &ib_entry);
return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool); cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool);
cmd->frame->request_id = mhba->io_seq++; cmd->frame->request_id = mhba->io_seq++;
...@@ -1367,21 +1883,35 @@ static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba, ...@@ -1367,21 +1883,35 @@ static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
mhba->tag_cmd[cmd->frame->tag] = cmd; mhba->tag_cmd[cmd->frame->tag] = cmd;
frame_len = sizeof(*ib_frame) - 4 + frame_len = sizeof(*ib_frame) - 4 +
ib_frame->sg_counts * sizeof(struct mvumi_sgl); ib_frame->sg_counts * sizeof(struct mvumi_sgl);
memcpy(ib_entry, ib_frame, frame_len); if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
struct mvumi_dyn_list_entry *dle;
dle = ib_entry;
dle->src_low_addr =
cpu_to_le32(lower_32_bits(cmd->frame_phys));
dle->src_high_addr =
cpu_to_le32(upper_32_bits(cmd->frame_phys));
dle->if_length = (frame_len >> 2) & 0xFFF;
} else {
memcpy(ib_entry, ib_frame, frame_len);
}
return MV_QUEUE_COMMAND_RESULT_SENT; return MV_QUEUE_COMMAND_RESULT_SENT;
} }
static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd) static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
{ {
unsigned short num_of_cl_sent = 0; unsigned short num_of_cl_sent = 0;
unsigned int count;
enum mvumi_qc_result result; enum mvumi_qc_result result;
if (cmd) if (cmd)
list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list); list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list);
count = mhba->instancet->check_ib_list(mhba);
if (list_empty(&mhba->waiting_req_list) || !count)
return;
while (!list_empty(&mhba->waiting_req_list)) { do {
cmd = list_first_entry(&mhba->waiting_req_list, cmd = list_first_entry(&mhba->waiting_req_list,
struct mvumi_cmd, queue_pointer); struct mvumi_cmd, queue_pointer);
list_del_init(&cmd->queue_pointer); list_del_init(&cmd->queue_pointer);
result = mvumi_send_command(mhba, cmd); result = mvumi_send_command(mhba, cmd);
switch (result) { switch (result) {
...@@ -1395,65 +1925,77 @@ static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd) ...@@ -1395,65 +1925,77 @@ static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
return; return;
} }
} } while (!list_empty(&mhba->waiting_req_list) && count--);
if (num_of_cl_sent > 0) if (num_of_cl_sent > 0)
mvumi_send_ib_list_entry(mhba); mvumi_send_ib_list_entry(mhba);
} }
/** /**
* mvumi_enable_intr - Enables interrupts * mvumi_enable_intr - Enables interrupts
* @regs: FW register set * @mhba: Adapter soft state
*/ */
static void mvumi_enable_intr(void *regs) static void mvumi_enable_intr(struct mvumi_hba *mhba)
{ {
unsigned int mask; unsigned int mask;
struct mvumi_hw_regs *regs = mhba->regs;
iowrite32(0x3FFFFFFF, regs + CPU_ARM_TO_PCIEA_MASK_REG); iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
mask = ioread32(regs + CPU_ENPOINTA_MASK_REG); mask = ioread32(regs->enpointa_mask_reg);
mask |= INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAOUT | INT_MAP_COMAERR; mask |= regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr;
iowrite32(mask, regs + CPU_ENPOINTA_MASK_REG); iowrite32(mask, regs->enpointa_mask_reg);
} }
/** /**
* mvumi_disable_intr -Disables interrupt * mvumi_disable_intr -Disables interrupt
* @regs: FW register set * @mhba: Adapter soft state
*/ */
static void mvumi_disable_intr(void *regs) static void mvumi_disable_intr(struct mvumi_hba *mhba)
{ {
unsigned int mask; unsigned int mask;
struct mvumi_hw_regs *regs = mhba->regs;
iowrite32(0, regs + CPU_ARM_TO_PCIEA_MASK_REG); iowrite32(0, regs->arm_to_pciea_mask_reg);
mask = ioread32(regs + CPU_ENPOINTA_MASK_REG); mask = ioread32(regs->enpointa_mask_reg);
mask &= ~(INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAOUT | INT_MAP_COMAERR); mask &= ~(regs->int_dl_cpu2pciea | regs->int_comaout |
iowrite32(mask, regs + CPU_ENPOINTA_MASK_REG); regs->int_comaerr);
iowrite32(mask, regs->enpointa_mask_reg);
} }
static int mvumi_clear_intr(void *extend) static int mvumi_clear_intr(void *extend)
{ {
struct mvumi_hba *mhba = (struct mvumi_hba *) extend; struct mvumi_hba *mhba = (struct mvumi_hba *) extend;
unsigned int status, isr_status = 0, tmp = 0; unsigned int status, isr_status = 0, tmp = 0;
void *regs = mhba->mmio; struct mvumi_hw_regs *regs = mhba->regs;
status = ioread32(regs + CPU_MAIN_INT_CAUSE_REG); status = ioread32(regs->main_int_cause_reg);
if (!(status & INT_MAP_MU) || status == 0xFFFFFFFF) if (!(status & regs->int_mu) || status == 0xFFFFFFFF)
return 1; return 1;
if (unlikely(status & INT_MAP_COMAERR)) { if (unlikely(status & regs->int_comaerr)) {
tmp = ioread32(regs + CLA_ISR_CAUSE); tmp = ioread32(regs->outb_isr_cause);
if (tmp & (CLIC_IN_ERR_IRQ | CLIC_OUT_ERR_IRQ)) if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
iowrite32(tmp & (CLIC_IN_ERR_IRQ | CLIC_OUT_ERR_IRQ), if (tmp & regs->clic_out_err) {
regs + CLA_ISR_CAUSE); iowrite32(tmp & regs->clic_out_err,
status ^= INT_MAP_COMAERR; regs->outb_isr_cause);
}
} else {
if (tmp & (regs->clic_in_err | regs->clic_out_err))
iowrite32(tmp & (regs->clic_in_err |
regs->clic_out_err),
regs->outb_isr_cause);
}
status ^= mhba->regs->int_comaerr;
/* inbound or outbound parity error, command will timeout */ /* inbound or outbound parity error, command will timeout */
} }
if (status & INT_MAP_COMAOUT) { if (status & regs->int_comaout) {
tmp = ioread32(regs + CLA_ISR_CAUSE); tmp = ioread32(regs->outb_isr_cause);
if (tmp & CLIC_OUT_IRQ) if (tmp & regs->clic_irq)
iowrite32(tmp & CLIC_OUT_IRQ, regs + CLA_ISR_CAUSE); iowrite32(tmp & regs->clic_irq, regs->outb_isr_cause);
} }
if (status & INT_MAP_DL_CPU2PCIEA) { if (status & regs->int_dl_cpu2pciea) {
isr_status = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG); isr_status = ioread32(regs->arm_to_pciea_drbl_reg);
if (isr_status) if (isr_status)
iowrite32(isr_status, regs + CPU_ARM_TO_PCIEA_DRBL_REG); iowrite32(isr_status, regs->arm_to_pciea_drbl_reg);
} }
mhba->global_isr = status; mhba->global_isr = status;
...@@ -1464,24 +2006,38 @@ static int mvumi_clear_intr(void *extend) ...@@ -1464,24 +2006,38 @@ static int mvumi_clear_intr(void *extend)
/** /**
* mvumi_read_fw_status_reg - returns the current FW status value * mvumi_read_fw_status_reg - returns the current FW status value
* @regs: FW register set * @mhba: Adapter soft state
*/ */
static unsigned int mvumi_read_fw_status_reg(void *regs) static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)
{ {
unsigned int status; unsigned int status;
status = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG); status = ioread32(mhba->regs->arm_to_pciea_drbl_reg);
if (status) if (status)
iowrite32(status, regs + CPU_ARM_TO_PCIEA_DRBL_REG); iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg);
return status; return status;
} }
static struct mvumi_instance_template mvumi_instance_template = { static struct mvumi_instance_template mvumi_instance_9143 = {
.fire_cmd = mvumi_fire_cmd, .fire_cmd = mvumi_fire_cmd,
.enable_intr = mvumi_enable_intr, .enable_intr = mvumi_enable_intr,
.disable_intr = mvumi_disable_intr, .disable_intr = mvumi_disable_intr,
.clear_intr = mvumi_clear_intr, .clear_intr = mvumi_clear_intr,
.read_fw_status_reg = mvumi_read_fw_status_reg, .read_fw_status_reg = mvumi_read_fw_status_reg,
.check_ib_list = mvumi_check_ib_list_9143,
.check_ob_list = mvumi_check_ob_list_9143,
.reset_host = mvumi_reset_host_9143,
};
static struct mvumi_instance_template mvumi_instance_9580 = {
.fire_cmd = mvumi_fire_cmd,
.enable_intr = mvumi_enable_intr,
.disable_intr = mvumi_disable_intr,
.clear_intr = mvumi_clear_intr,
.read_fw_status_reg = mvumi_read_fw_status_reg,
.check_ib_list = mvumi_check_ib_list_9580,
.check_ob_list = mvumi_check_ob_list_9580,
.reset_host = mvumi_reset_host_9580,
}; };
static int mvumi_slave_configure(struct scsi_device *sdev) static int mvumi_slave_configure(struct scsi_device *sdev)
...@@ -1681,6 +2237,124 @@ static struct scsi_transport_template mvumi_transport_template = { ...@@ -1681,6 +2237,124 @@ static struct scsi_transport_template mvumi_transport_template = {
.eh_timed_out = mvumi_timed_out, .eh_timed_out = mvumi_timed_out,
}; };
static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
{
void *base = NULL;
struct mvumi_hw_regs *regs;
switch (mhba->pdev->device) {
case PCI_DEVICE_ID_MARVELL_MV9143:
mhba->mmio = mhba->base_addr[0];
base = mhba->mmio;
if (!mhba->regs) {
mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
if (mhba->regs == NULL)
return -ENOMEM;
}
regs = mhba->regs;
/* For Arm */
regs->ctrl_sts_reg = base + 0x20104;
regs->rstoutn_mask_reg = base + 0x20108;
regs->sys_soft_rst_reg = base + 0x2010C;
regs->main_int_cause_reg = base + 0x20200;
regs->enpointa_mask_reg = base + 0x2020C;
regs->rstoutn_en_reg = base + 0xF1400;
/* For Doorbell */
regs->pciea_to_arm_drbl_reg = base + 0x20400;
regs->arm_to_pciea_drbl_reg = base + 0x20408;
regs->arm_to_pciea_mask_reg = base + 0x2040C;
regs->pciea_to_arm_msg0 = base + 0x20430;
regs->pciea_to_arm_msg1 = base + 0x20434;
regs->arm_to_pciea_msg0 = base + 0x20438;
regs->arm_to_pciea_msg1 = base + 0x2043C;
/* For Message Unit */
regs->inb_aval_count_basel = base + 0x508;
regs->inb_aval_count_baseh = base + 0x50C;
regs->inb_write_pointer = base + 0x518;
regs->inb_read_pointer = base + 0x51C;
regs->outb_coal_cfg = base + 0x568;
regs->outb_copy_basel = base + 0x5B0;
regs->outb_copy_baseh = base + 0x5B4;
regs->outb_copy_pointer = base + 0x544;
regs->outb_read_pointer = base + 0x548;
regs->outb_isr_cause = base + 0x560;
regs->outb_coal_cfg = base + 0x568;
/* Bit setting for HW */
regs->int_comaout = 1 << 8;
regs->int_comaerr = 1 << 6;
regs->int_dl_cpu2pciea = 1 << 1;
regs->cl_pointer_toggle = 1 << 12;
regs->clic_irq = 1 << 1;
regs->clic_in_err = 1 << 8;
regs->clic_out_err = 1 << 12;
regs->cl_slot_num_mask = 0xFFF;
regs->int_drbl_int_mask = 0x3FFFFFFF;
regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout |
regs->int_comaerr;
break;
case PCI_DEVICE_ID_MARVELL_MV9580:
mhba->mmio = mhba->base_addr[2];
base = mhba->mmio;
if (!mhba->regs) {
mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
if (mhba->regs == NULL)
return -ENOMEM;
}
regs = mhba->regs;
/* For Arm */
regs->ctrl_sts_reg = base + 0x20104;
regs->rstoutn_mask_reg = base + 0x1010C;
regs->sys_soft_rst_reg = base + 0x10108;
regs->main_int_cause_reg = base + 0x10200;
regs->enpointa_mask_reg = base + 0x1020C;
regs->rstoutn_en_reg = base + 0xF1400;
/* For Doorbell */
regs->pciea_to_arm_drbl_reg = base + 0x10460;
regs->arm_to_pciea_drbl_reg = base + 0x10480;
regs->arm_to_pciea_mask_reg = base + 0x10484;
regs->pciea_to_arm_msg0 = base + 0x10400;
regs->pciea_to_arm_msg1 = base + 0x10404;
regs->arm_to_pciea_msg0 = base + 0x10420;
regs->arm_to_pciea_msg1 = base + 0x10424;
/* For reset*/
regs->reset_request = base + 0x10108;
regs->reset_enable = base + 0x1010c;
/* For Message Unit */
regs->inb_aval_count_basel = base + 0x4008;
regs->inb_aval_count_baseh = base + 0x400C;
regs->inb_write_pointer = base + 0x4018;
regs->inb_read_pointer = base + 0x401C;
regs->outb_copy_basel = base + 0x4058;
regs->outb_copy_baseh = base + 0x405C;
regs->outb_copy_pointer = base + 0x406C;
regs->outb_read_pointer = base + 0x4070;
regs->outb_coal_cfg = base + 0x4080;
regs->outb_isr_cause = base + 0x4088;
/* Bit setting for HW */
regs->int_comaout = 1 << 4;
regs->int_dl_cpu2pciea = 1 << 12;
regs->int_comaerr = 1 << 29;
regs->cl_pointer_toggle = 1 << 14;
regs->cl_slot_num_mask = 0x3FFF;
regs->clic_irq = 1 << 0;
regs->clic_out_err = 1 << 1;
regs->int_drbl_int_mask = 0x3FFFFFFF;
regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout;
break;
default:
return -1;
break;
}
return 0;
}
/** /**
* mvumi_init_fw - Initializes the FW * mvumi_init_fw - Initializes the FW
* @mhba: Adapter soft state * @mhba: Adapter soft state
...@@ -1699,15 +2373,18 @@ static int mvumi_init_fw(struct mvumi_hba *mhba) ...@@ -1699,15 +2373,18 @@ static int mvumi_init_fw(struct mvumi_hba *mhba)
if (ret) if (ret)
goto fail_ioremap; goto fail_ioremap;
mhba->mmio = mhba->base_addr[0];
switch (mhba->pdev->device) { switch (mhba->pdev->device) {
case PCI_DEVICE_ID_MARVELL_MV9143: case PCI_DEVICE_ID_MARVELL_MV9143:
mhba->instancet = &mvumi_instance_template; mhba->instancet = &mvumi_instance_9143;
mhba->io_seq = 0; mhba->io_seq = 0;
mhba->max_sge = MVUMI_MAX_SG_ENTRY; mhba->max_sge = MVUMI_MAX_SG_ENTRY;
mhba->request_id_enabled = 1; mhba->request_id_enabled = 1;
break; break;
case PCI_DEVICE_ID_MARVELL_MV9580:
mhba->instancet = &mvumi_instance_9580;
mhba->io_seq = 0;
mhba->max_sge = MVUMI_MAX_SG_ENTRY;
break;
default: default:
dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n", dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n",
mhba->pdev->device); mhba->pdev->device);
...@@ -1717,15 +2394,21 @@ static int mvumi_init_fw(struct mvumi_hba *mhba) ...@@ -1717,15 +2394,21 @@ static int mvumi_init_fw(struct mvumi_hba *mhba)
} }
dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n", dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n",
mhba->pdev->device); mhba->pdev->device);
ret = mvumi_cfg_hw_reg(mhba);
mhba->handshake_page = kzalloc(HSP_MAX_SIZE, GFP_KERNEL); if (ret) {
dev_err(&mhba->pdev->dev,
"failed to allocate memory for reg\n");
ret = -ENOMEM;
goto fail_alloc_mem;
}
mhba->handshake_page = pci_alloc_consistent(mhba->pdev, HSP_MAX_SIZE,
&mhba->handshake_page_phys);
if (!mhba->handshake_page) { if (!mhba->handshake_page) {
dev_err(&mhba->pdev->dev, dev_err(&mhba->pdev->dev,
"failed to allocate memory for handshake\n"); "failed to allocate memory for handshake\n");
ret = -ENOMEM; ret = -ENOMEM;
goto fail_alloc_mem; goto fail_alloc_page;
} }
mhba->handshake_page_phys = virt_to_phys(mhba->handshake_page);
if (mvumi_start(mhba)) { if (mvumi_start(mhba)) {
ret = -EINVAL; ret = -EINVAL;
...@@ -1739,7 +2422,10 @@ static int mvumi_init_fw(struct mvumi_hba *mhba) ...@@ -1739,7 +2422,10 @@ static int mvumi_init_fw(struct mvumi_hba *mhba)
fail_ready_state: fail_ready_state:
mvumi_release_mem_resource(mhba); mvumi_release_mem_resource(mhba);
kfree(mhba->handshake_page); pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
mhba->handshake_page, mhba->handshake_page_phys);
fail_alloc_page:
kfree(mhba->regs);
fail_alloc_mem: fail_alloc_mem:
mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
fail_ioremap: fail_ioremap:
...@@ -1755,6 +2441,7 @@ static int mvumi_init_fw(struct mvumi_hba *mhba) ...@@ -1755,6 +2441,7 @@ static int mvumi_init_fw(struct mvumi_hba *mhba)
static int mvumi_io_attach(struct mvumi_hba *mhba) static int mvumi_io_attach(struct mvumi_hba *mhba)
{ {
struct Scsi_Host *host = mhba->shost; struct Scsi_Host *host = mhba->shost;
struct scsi_device *sdev = NULL;
int ret; int ret;
unsigned int max_sg = (mhba->ib_max_size + 4 - unsigned int max_sg = (mhba->ib_max_size + 4 -
sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl); sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl);
...@@ -1764,7 +2451,7 @@ static int mvumi_io_attach(struct mvumi_hba *mhba) ...@@ -1764,7 +2451,7 @@ static int mvumi_io_attach(struct mvumi_hba *mhba)
host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge; host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge;
host->max_sectors = mhba->max_transfer_size / 512; host->max_sectors = mhba->max_transfer_size / 512;
host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
host->max_id = mhba->max_target_id; host->max_id = mhba->max_target_id;
host->max_cmd_len = MAX_COMMAND_SIZE; host->max_cmd_len = MAX_COMMAND_SIZE;
host->transportt = &mvumi_transport_template; host->transportt = &mvumi_transport_template;
...@@ -1775,9 +2462,43 @@ static int mvumi_io_attach(struct mvumi_hba *mhba) ...@@ -1775,9 +2462,43 @@ static int mvumi_io_attach(struct mvumi_hba *mhba)
return ret; return ret;
} }
mhba->fw_flag |= MVUMI_FW_ATTACH; mhba->fw_flag |= MVUMI_FW_ATTACH;
scsi_scan_host(host);
mutex_lock(&mhba->sas_discovery_mutex);
if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0);
else
ret = 0;
if (ret) {
dev_err(&mhba->pdev->dev, "add virtual device failed\n");
mutex_unlock(&mhba->sas_discovery_mutex);
goto fail_add_device;
}
mhba->dm_thread = kthread_create(mvumi_rescan_bus,
mhba, "mvumi_scanthread");
if (IS_ERR(mhba->dm_thread)) {
dev_err(&mhba->pdev->dev,
"failed to create device scan thread\n");
mutex_unlock(&mhba->sas_discovery_mutex);
goto fail_create_thread;
}
atomic_set(&mhba->pnp_count, 1);
wake_up_process(mhba->dm_thread);
mutex_unlock(&mhba->sas_discovery_mutex);
return 0; return 0;
fail_create_thread:
if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
sdev = scsi_device_lookup(mhba->shost, 0,
mhba->max_target_id - 1, 0);
if (sdev) {
scsi_remove_device(sdev);
scsi_device_put(sdev);
}
fail_add_device:
scsi_remove_host(mhba->shost);
return ret;
} }
/** /**
...@@ -1828,8 +2549,12 @@ static int __devinit mvumi_probe_one(struct pci_dev *pdev, ...@@ -1828,8 +2549,12 @@ static int __devinit mvumi_probe_one(struct pci_dev *pdev,
INIT_LIST_HEAD(&mhba->free_ob_list); INIT_LIST_HEAD(&mhba->free_ob_list);
INIT_LIST_HEAD(&mhba->res_list); INIT_LIST_HEAD(&mhba->res_list);
INIT_LIST_HEAD(&mhba->waiting_req_list); INIT_LIST_HEAD(&mhba->waiting_req_list);
mutex_init(&mhba->device_lock);
INIT_LIST_HEAD(&mhba->mhba_dev_list);
INIT_LIST_HEAD(&mhba->shost_dev_list);
atomic_set(&mhba->fw_outstanding, 0); atomic_set(&mhba->fw_outstanding, 0);
init_waitqueue_head(&mhba->int_cmd_wait_q); init_waitqueue_head(&mhba->int_cmd_wait_q);
mutex_init(&mhba->sas_discovery_mutex);
mhba->pdev = pdev; mhba->pdev = pdev;
mhba->shost = host; mhba->shost = host;
...@@ -1845,19 +2570,22 @@ static int __devinit mvumi_probe_one(struct pci_dev *pdev, ...@@ -1845,19 +2570,22 @@ static int __devinit mvumi_probe_one(struct pci_dev *pdev,
dev_err(&pdev->dev, "failed to register IRQ\n"); dev_err(&pdev->dev, "failed to register IRQ\n");
goto fail_init_irq; goto fail_init_irq;
} }
mhba->instancet->enable_intr(mhba->mmio);
mhba->instancet->enable_intr(mhba);
pci_set_drvdata(pdev, mhba); pci_set_drvdata(pdev, mhba);
ret = mvumi_io_attach(mhba); ret = mvumi_io_attach(mhba);
if (ret) if (ret)
goto fail_io_attach; goto fail_io_attach;
mvumi_backup_bar_addr(mhba);
dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n"); dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n");
return 0; return 0;
fail_io_attach: fail_io_attach:
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
mhba->instancet->disable_intr(mhba->mmio); mhba->instancet->disable_intr(mhba);
free_irq(mhba->pdev->irq, mhba); free_irq(mhba->pdev->irq, mhba);
fail_init_irq: fail_init_irq:
mvumi_release_fw(mhba); mvumi_release_fw(mhba);
...@@ -1877,11 +2605,17 @@ static void mvumi_detach_one(struct pci_dev *pdev) ...@@ -1877,11 +2605,17 @@ static void mvumi_detach_one(struct pci_dev *pdev)
struct mvumi_hba *mhba; struct mvumi_hba *mhba;
mhba = pci_get_drvdata(pdev); mhba = pci_get_drvdata(pdev);
if (mhba->dm_thread) {
kthread_stop(mhba->dm_thread);
mhba->dm_thread = NULL;
}
mvumi_detach_devices(mhba);
host = mhba->shost; host = mhba->shost;
scsi_remove_host(mhba->shost); scsi_remove_host(mhba->shost);
mvumi_flush_cache(mhba); mvumi_flush_cache(mhba);
mhba->instancet->disable_intr(mhba->mmio); mhba->instancet->disable_intr(mhba);
free_irq(mhba->pdev->irq, mhba); free_irq(mhba->pdev->irq, mhba);
mvumi_release_fw(mhba); mvumi_release_fw(mhba);
scsi_host_put(host); scsi_host_put(host);
...@@ -1909,7 +2643,7 @@ static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state) ...@@ -1909,7 +2643,7 @@ static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
mvumi_flush_cache(mhba); mvumi_flush_cache(mhba);
pci_set_drvdata(pdev, mhba); pci_set_drvdata(pdev, mhba);
mhba->instancet->disable_intr(mhba->mmio); mhba->instancet->disable_intr(mhba);
free_irq(mhba->pdev->irq, mhba); free_irq(mhba->pdev->irq, mhba);
mvumi_unmap_pci_addr(pdev, mhba->base_addr); mvumi_unmap_pci_addr(pdev, mhba->base_addr);
pci_release_regions(pdev); pci_release_regions(pdev);
...@@ -1956,8 +2690,13 @@ static int mvumi_resume(struct pci_dev *pdev) ...@@ -1956,8 +2690,13 @@ static int mvumi_resume(struct pci_dev *pdev)
if (ret) if (ret)
goto release_regions; goto release_regions;
if (mvumi_cfg_hw_reg(mhba)) {
ret = -EINVAL;
goto unmap_pci_addr;
}
mhba->mmio = mhba->base_addr[0]; mhba->mmio = mhba->base_addr[0];
mvumi_reset(mhba->mmio); mvumi_reset(mhba);
if (mvumi_start(mhba)) { if (mvumi_start(mhba)) {
ret = -EINVAL; ret = -EINVAL;
...@@ -1970,7 +2709,7 @@ static int mvumi_resume(struct pci_dev *pdev) ...@@ -1970,7 +2709,7 @@ static int mvumi_resume(struct pci_dev *pdev)
dev_err(&pdev->dev, "failed to register IRQ\n"); dev_err(&pdev->dev, "failed to register IRQ\n");
goto unmap_pci_addr; goto unmap_pci_addr;
} }
mhba->instancet->enable_intr(mhba->mmio); mhba->instancet->enable_intr(mhba);
return 0; return 0;
......
...@@ -34,51 +34,87 @@ ...@@ -34,51 +34,87 @@
#define MV_DRIVER_NAME "mvumi" #define MV_DRIVER_NAME "mvumi"
#define PCI_VENDOR_ID_MARVELL_2 0x1b4b #define PCI_VENDOR_ID_MARVELL_2 0x1b4b
#define PCI_DEVICE_ID_MARVELL_MV9143 0x9143 #define PCI_DEVICE_ID_MARVELL_MV9143 0x9143
#define PCI_DEVICE_ID_MARVELL_MV9580 0x9580
#define MVUMI_INTERNAL_CMD_WAIT_TIME 45 #define MVUMI_INTERNAL_CMD_WAIT_TIME 45
#define MVUMI_INQUIRY_LENGTH 44
#define MVUMI_INQUIRY_UUID_OFF 36
#define MVUMI_INQUIRY_UUID_LEN 8
#define IS_DMA64 (sizeof(dma_addr_t) == 8) #define IS_DMA64 (sizeof(dma_addr_t) == 8)
enum mvumi_qc_result { enum mvumi_qc_result {
MV_QUEUE_COMMAND_RESULT_SENT = 0, MV_QUEUE_COMMAND_RESULT_SENT = 0,
MV_QUEUE_COMMAND_RESULT_NO_RESOURCE, MV_QUEUE_COMMAND_RESULT_NO_RESOURCE,
}; };
enum { struct mvumi_hw_regs {
/*******************************************/ /* For CPU */
void *main_int_cause_reg;
/* ARM Mbus Registers Map */ void *enpointa_mask_reg;
void *enpointb_mask_reg;
/*******************************************/ void *rstoutn_en_reg;
CPU_MAIN_INT_CAUSE_REG = 0x20200, void *ctrl_sts_reg;
CPU_MAIN_IRQ_MASK_REG = 0x20204, void *rstoutn_mask_reg;
CPU_MAIN_FIQ_MASK_REG = 0x20208, void *sys_soft_rst_reg;
CPU_ENPOINTA_MASK_REG = 0x2020C,
CPU_ENPOINTB_MASK_REG = 0x20210, /* For Doorbell */
void *pciea_to_arm_drbl_reg;
INT_MAP_COMAERR = 1 << 6, void *arm_to_pciea_drbl_reg;
INT_MAP_COMAIN = 1 << 7, void *arm_to_pciea_mask_reg;
INT_MAP_COMAOUT = 1 << 8, void *pciea_to_arm_msg0;
INT_MAP_COMBERR = 1 << 9, void *pciea_to_arm_msg1;
INT_MAP_COMBIN = 1 << 10, void *arm_to_pciea_msg0;
INT_MAP_COMBOUT = 1 << 11, void *arm_to_pciea_msg1;
INT_MAP_COMAINT = (INT_MAP_COMAOUT | INT_MAP_COMAERR), /* reset register */
INT_MAP_COMBINT = (INT_MAP_COMBOUT | INT_MAP_COMBIN | INT_MAP_COMBERR), void *reset_request;
void *reset_enable;
INT_MAP_DL_PCIEA2CPU = 1 << 0,
INT_MAP_DL_CPU2PCIEA = 1 << 1, /* For Message Unit */
void *inb_list_basel;
/***************************************/ void *inb_list_baseh;
void *inb_aval_count_basel;
void *inb_aval_count_baseh;
void *inb_write_pointer;
void *inb_read_pointer;
void *outb_list_basel;
void *outb_list_baseh;
void *outb_copy_basel;
void *outb_copy_baseh;
void *outb_copy_pointer;
void *outb_read_pointer;
void *inb_isr_cause;
void *outb_isr_cause;
void *outb_coal_cfg;
void *outb_coal_timeout;
/* Bit setting for HW */
u32 int_comaout;
u32 int_comaerr;
u32 int_dl_cpu2pciea;
u32 int_mu;
u32 int_drbl_int_mask;
u32 int_main_int_mask;
u32 cl_pointer_toggle;
u32 cl_slot_num_mask;
u32 clic_irq;
u32 clic_in_err;
u32 clic_out_err;
};
/* ARM Doorbell Registers Map */ struct mvumi_dyn_list_entry {
u32 src_low_addr;
u32 src_high_addr;
u32 if_length;
u32 reserve;
};
/***************************************/ #define SCSI_CMD_MARVELL_SPECIFIC 0xE1
CPU_PCIEA_TO_ARM_DRBL_REG = 0x20400, #define CDB_CORE_MODULE 0x1
CPU_PCIEA_TO_ARM_MASK_REG = 0x20404, #define CDB_CORE_SHUTDOWN 0xB
CPU_ARM_TO_PCIEA_DRBL_REG = 0x20408,
CPU_ARM_TO_PCIEA_MASK_REG = 0x2040C,
enum {
DRBL_HANDSHAKE = 1 << 0, DRBL_HANDSHAKE = 1 << 0,
DRBL_SOFT_RESET = 1 << 1, DRBL_SOFT_RESET = 1 << 1,
DRBL_BUS_CHANGE = 1 << 2, DRBL_BUS_CHANGE = 1 << 2,
...@@ -86,46 +122,6 @@ enum { ...@@ -86,46 +122,6 @@ enum {
DRBL_MU_RESET = 1 << 4, DRBL_MU_RESET = 1 << 4,
DRBL_HANDSHAKE_ISR = DRBL_HANDSHAKE, DRBL_HANDSHAKE_ISR = DRBL_HANDSHAKE,
CPU_PCIEA_TO_ARM_MSG0 = 0x20430,
CPU_PCIEA_TO_ARM_MSG1 = 0x20434,
CPU_ARM_TO_PCIEA_MSG0 = 0x20438,
CPU_ARM_TO_PCIEA_MSG1 = 0x2043C,
/*******************************************/
/* ARM Communication List Registers Map */
/*******************************************/
CLA_INB_LIST_BASEL = 0x500,
CLA_INB_LIST_BASEH = 0x504,
CLA_INB_AVAL_COUNT_BASEL = 0x508,
CLA_INB_AVAL_COUNT_BASEH = 0x50C,
CLA_INB_DESTI_LIST_BASEL = 0x510,
CLA_INB_DESTI_LIST_BASEH = 0x514,
CLA_INB_WRITE_POINTER = 0x518,
CLA_INB_READ_POINTER = 0x51C,
CLA_OUTB_LIST_BASEL = 0x530,
CLA_OUTB_LIST_BASEH = 0x534,
CLA_OUTB_SOURCE_LIST_BASEL = 0x538,
CLA_OUTB_SOURCE_LIST_BASEH = 0x53C,
CLA_OUTB_COPY_POINTER = 0x544,
CLA_OUTB_READ_POINTER = 0x548,
CLA_ISR_CAUSE = 0x560,
CLA_ISR_MASK = 0x564,
INT_MAP_MU = (INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAINT),
CL_POINTER_TOGGLE = 1 << 12,
CLIC_IN_IRQ = 1 << 0,
CLIC_OUT_IRQ = 1 << 1,
CLIC_IN_ERR_IRQ = 1 << 8,
CLIC_OUT_ERR_IRQ = 1 << 12,
CL_SLOT_NUM_MASK = 0xFFF,
/* /*
* Command flag is the flag for the CDB command itself * Command flag is the flag for the CDB command itself
*/ */
...@@ -137,15 +133,23 @@ enum { ...@@ -137,15 +133,23 @@ enum {
CMD_FLAG_DATA_IN = 1 << 3, CMD_FLAG_DATA_IN = 1 << 3,
/* 1-host write data */ /* 1-host write data */
CMD_FLAG_DATA_OUT = 1 << 4, CMD_FLAG_DATA_OUT = 1 << 4,
CMD_FLAG_PRDT_IN_HOST = 1 << 5,
SCSI_CMD_MARVELL_SPECIFIC = 0xE1,
CDB_CORE_SHUTDOWN = 0xB,
}; };
#define APICDB0_EVENT 0xF4 #define APICDB0_EVENT 0xF4
#define APICDB1_EVENT_GETEVENT 0 #define APICDB1_EVENT_GETEVENT 0
#define APICDB1_HOST_GETEVENT 1
#define MAX_EVENTS_RETURNED 6 #define MAX_EVENTS_RETURNED 6
#define DEVICE_OFFLINE 0
#define DEVICE_ONLINE 1
struct mvumi_hotplug_event {
u16 size;
u8 dummy[2];
u8 bitmap[0];
};
struct mvumi_driver_event { struct mvumi_driver_event {
u32 time_stamp; u32 time_stamp;
u32 sequence_no; u32 sequence_no;
...@@ -172,8 +176,14 @@ struct mvumi_events_wq { ...@@ -172,8 +176,14 @@ struct mvumi_events_wq {
void *param; void *param;
}; };
#define HS_CAPABILITY_SUPPORT_COMPACT_SG (1U << 4)
#define HS_CAPABILITY_SUPPORT_PRD_HOST (1U << 5)
#define HS_CAPABILITY_SUPPORT_DYN_SRC (1U << 6)
#define HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF (1U << 14)
#define MVUMI_MAX_SG_ENTRY 32 #define MVUMI_MAX_SG_ENTRY 32
#define SGD_EOT (1L << 27) #define SGD_EOT (1L << 27)
#define SGD_EOT_CP (1L << 22)
struct mvumi_sgl { struct mvumi_sgl {
u32 baseaddr_l; u32 baseaddr_l;
...@@ -181,6 +191,39 @@ struct mvumi_sgl { ...@@ -181,6 +191,39 @@ struct mvumi_sgl {
u32 flags; u32 flags;
u32 size; u32 size;
}; };
struct mvumi_compact_sgl {
u32 baseaddr_l;
u32 baseaddr_h;
u32 flags;
};
#define GET_COMPACT_SGD_SIZE(sgd) \
((((struct mvumi_compact_sgl *)(sgd))->flags) & 0x3FFFFFL)
#define SET_COMPACT_SGD_SIZE(sgd, sz) do { \
(((struct mvumi_compact_sgl *)(sgd))->flags) &= ~0x3FFFFFL; \
(((struct mvumi_compact_sgl *)(sgd))->flags) |= (sz); \
} while (0)
#define sgd_getsz(_mhba, sgd, sz) do { \
if (_mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) \
(sz) = GET_COMPACT_SGD_SIZE(sgd); \
else \
(sz) = (sgd)->size; \
} while (0)
#define sgd_setsz(_mhba, sgd, sz) do { \
if (_mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) \
SET_COMPACT_SGD_SIZE(sgd, sz); \
else \
(sgd)->size = (sz); \
} while (0)
#define sgd_inc(_mhba, sgd) do { \
if (_mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) \
sgd = (struct mvumi_sgl *)(((unsigned char *) (sgd)) + 12); \
else \
sgd = (struct mvumi_sgl *)(((unsigned char *) (sgd)) + 16); \
} while (0)
struct mvumi_res { struct mvumi_res {
struct list_head entry; struct list_head entry;
...@@ -197,7 +240,7 @@ enum resource_type { ...@@ -197,7 +240,7 @@ enum resource_type {
}; };
struct mvumi_sense_data { struct mvumi_sense_data {
u8 error_eode:7; u8 error_code:7;
u8 valid:1; u8 valid:1;
u8 segment_number; u8 segment_number;
u8 sense_key:4; u8 sense_key:4;
...@@ -220,6 +263,7 @@ struct mvumi_sense_data { ...@@ -220,6 +263,7 @@ struct mvumi_sense_data {
struct mvumi_cmd { struct mvumi_cmd {
struct list_head queue_pointer; struct list_head queue_pointer;
struct mvumi_msg_frame *frame; struct mvumi_msg_frame *frame;
dma_addr_t frame_phys;
struct scsi_cmnd *scmd; struct scsi_cmnd *scmd;
atomic_t sync_cmd; atomic_t sync_cmd;
void *data_buf; void *data_buf;
...@@ -393,7 +437,8 @@ struct mvumi_hs_page2 { ...@@ -393,7 +437,8 @@ struct mvumi_hs_page2 {
u16 frame_length; u16 frame_length;
u8 host_type; u8 host_type;
u8 reserved[3]; u8 host_cap;
u8 reserved[2];
struct version_info host_ver; struct version_info host_ver;
u32 system_io_bus; u32 system_io_bus;
u32 slot_number; u32 slot_number;
...@@ -435,8 +480,17 @@ struct mvumi_tag { ...@@ -435,8 +480,17 @@ struct mvumi_tag {
unsigned short size; unsigned short size;
}; };
struct mvumi_device {
struct list_head list;
struct scsi_device *sdev;
u64 wwid;
u8 dev_type;
int id;
};
struct mvumi_hba { struct mvumi_hba {
void *base_addr[MAX_BASE_ADDRESS]; void *base_addr[MAX_BASE_ADDRESS];
u32 pci_base[MAX_BASE_ADDRESS];
void *mmio; void *mmio;
struct list_head cmd_pool; struct list_head cmd_pool;
struct Scsi_Host *shost; struct Scsi_Host *shost;
...@@ -449,6 +503,9 @@ struct mvumi_hba { ...@@ -449,6 +503,9 @@ struct mvumi_hba {
void *ib_list; void *ib_list;
dma_addr_t ib_list_phys; dma_addr_t ib_list_phys;
void *ib_frame;
dma_addr_t ib_frame_phys;
void *ob_list; void *ob_list;
dma_addr_t ob_list_phys; dma_addr_t ob_list_phys;
...@@ -477,12 +534,14 @@ struct mvumi_hba { ...@@ -477,12 +534,14 @@ struct mvumi_hba {
unsigned char hba_total_pages; unsigned char hba_total_pages;
unsigned char fw_flag; unsigned char fw_flag;
unsigned char request_id_enabled; unsigned char request_id_enabled;
unsigned char eot_flag;
unsigned short hba_capability; unsigned short hba_capability;
unsigned short io_seq; unsigned short io_seq;
unsigned int ib_cur_slot; unsigned int ib_cur_slot;
unsigned int ob_cur_slot; unsigned int ob_cur_slot;
unsigned int fw_state; unsigned int fw_state;
struct mutex sas_discovery_mutex;
struct list_head ob_data_list; struct list_head ob_data_list;
struct list_head free_ob_list; struct list_head free_ob_list;
...@@ -491,14 +550,24 @@ struct mvumi_hba { ...@@ -491,14 +550,24 @@ struct mvumi_hba {
struct mvumi_tag tag_pool; struct mvumi_tag tag_pool;
struct mvumi_cmd **tag_cmd; struct mvumi_cmd **tag_cmd;
struct mvumi_hw_regs *regs;
struct mutex device_lock;
struct list_head mhba_dev_list;
struct list_head shost_dev_list;
struct task_struct *dm_thread;
atomic_t pnp_count;
}; };
struct mvumi_instance_template { struct mvumi_instance_template {
void (*fire_cmd)(struct mvumi_hba *, struct mvumi_cmd *); void (*fire_cmd) (struct mvumi_hba *, struct mvumi_cmd *);
void (*enable_intr)(void *) ; void (*enable_intr) (struct mvumi_hba *);
void (*disable_intr)(void *); void (*disable_intr) (struct mvumi_hba *);
int (*clear_intr)(void *); int (*clear_intr) (void *);
unsigned int (*read_fw_status_reg)(void *); unsigned int (*read_fw_status_reg) (struct mvumi_hba *);
unsigned int (*check_ib_list) (struct mvumi_hba *);
int (*check_ob_list) (struct mvumi_hba *, unsigned int *,
unsigned int *);
int (*reset_host) (struct mvumi_hba *);
}; };
extern struct timezone sys_tz; extern struct timezone sys_tz;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册