/* Module parameters */
static int geoid;
-static struct vme_bridge *ca91cx42_bridge;
-static wait_queue_head_t dma_queue;
-static wait_queue_head_t iack_queue;
-#if 0
-static wait_queue_head_t lm_queue;
-#endif
-static wait_queue_head_t mbox_queue;
-
-static void (*lm_callback[4])(int); /* Called in interrupt handler */
-static void *crcsr_kernel;
-static dma_addr_t crcsr_bus;
-
-static struct mutex vme_rmw; /* Only one RMW cycle at a time */
-static struct mutex vme_int; /*
- * Only one VME interrupt can be
- * generated at a time, provide locking
- */
-
static char driver_name[] = "vme_ca91cx42";
static const struct pci_device_id ca91cx42_ids[] = {
.remove = ca91cx42_remove,
};
-static u32 ca91cx42_DMA_irqhandler(void)
+static u32 ca91cx42_DMA_irqhandler(struct ca91cx42_driver *bridge)
{
- wake_up(&dma_queue);
+ wake_up(&(bridge->dma_queue));
return CA91CX42_LINT_DMA;
}
-static u32 ca91cx42_LM_irqhandler(u32 stat)
+static u32 ca91cx42_LM_irqhandler(struct ca91cx42_driver *bridge, u32 stat)
{
int i;
u32 serviced = 0;
for (i = 0; i < 4; i++) {
if (stat & CA91CX42_LINT_LM[i]) {
/* We only enable interrupts if the callback is set */
- lm_callback[i](i);
+ bridge->lm_callback[i](i);
serviced |= CA91CX42_LINT_LM[i];
}
}
}
/* XXX This needs to be split into 4 queues */
-static u32 ca91cx42_MB_irqhandler(int mbox_mask)
+static u32 ca91cx42_MB_irqhandler(struct ca91cx42_driver *bridge, int mbox_mask)
{
- wake_up(&mbox_queue);
+ wake_up(&(bridge->mbox_queue));
return CA91CX42_LINT_MBOX;
}
-static u32 ca91cx42_IACK_irqhandler(void)
+static u32 ca91cx42_IACK_irqhandler(struct ca91cx42_driver *bridge)
{
- wake_up(&iack_queue);
+ wake_up(&(bridge->iack_queue));
return CA91CX42_LINT_SW_IACK;
}
int ca91cx42_bus_error_chk(int clrflag)
{
int tmp;
- tmp = ioread32(ca91cx42_bridge->base + PCI_COMMAND);
+ tmp = ioread32(bridge->base + PCI_COMMAND);
if (tmp & 0x08000000) { /* S_TA is Set */
if (clrflag)
iowrite32(tmp | 0x08000000,
- ca91cx42_bridge->base + PCI_COMMAND);
+ bridge->base + PCI_COMMAND);
return 1;
}
return 0;
}
#endif
-static u32 ca91cx42_VERR_irqhandler(void)
+static u32 ca91cx42_VERR_irqhandler(struct ca91cx42_driver *bridge)
{
int val;
- val = ioread32(ca91cx42_bridge->base + DGCS);
+ val = ioread32(bridge->base + DGCS);
if (!(val & 0x00000800)) {
printk(KERN_ERR "ca91c042: ca91cx42_VERR_irqhandler DMA Read "
return CA91CX42_LINT_VERR;
}
-static u32 ca91cx42_LERR_irqhandler(void)
+static u32 ca91cx42_LERR_irqhandler(struct ca91cx42_driver *bridge)
{
int val;
- val = ioread32(ca91cx42_bridge->base + DGCS);
+ val = ioread32(bridge->base + DGCS);
if (!(val & 0x00000800)) {
printk(KERN_ERR "ca91c042: ca91cx42_LERR_irqhandler DMA Read "
}
-static u32 ca91cx42_VIRQ_irqhandler(int stat)
+static u32 ca91cx42_VIRQ_irqhandler(struct vme_bridge *ca91cx42_bridge,
+ int stat)
{
int vec, i, serviced = 0;
+ struct ca91cx42_driver *bridge;
+
+ bridge = ca91cx42_bridge->driver_priv;
+
for (i = 7; i > 0; i--) {
if (stat & (1 << i)) {
- vec = ioread32(ca91cx42_bridge->base +
+ vec = ioread32(bridge->base +
CA91CX42_V_STATID[i]) & 0xff;
vme_irq_handler(ca91cx42_bridge, i, vec);
return serviced;
}
-static irqreturn_t ca91cx42_irqhandler(int irq, void *dev_id)
+static irqreturn_t ca91cx42_irqhandler(int irq, void *ptr)
{
u32 stat, enable, serviced = 0;
+ struct vme_bridge *ca91cx42_bridge;
+ struct ca91cx42_driver *bridge;
- if (dev_id != ca91cx42_bridge->base)
- return IRQ_NONE;
+ ca91cx42_bridge = ptr;
- enable = ioread32(ca91cx42_bridge->base + LINT_EN);
- stat = ioread32(ca91cx42_bridge->base + LINT_STAT);
+ bridge = ca91cx42_bridge->driver_priv;
+
+ enable = ioread32(bridge->base + LINT_EN);
+ stat = ioread32(bridge->base + LINT_STAT);
/* Only look at unmasked interrupts */
stat &= enable;
return IRQ_NONE;
if (stat & CA91CX42_LINT_DMA)
- serviced |= ca91cx42_DMA_irqhandler();
+ serviced |= ca91cx42_DMA_irqhandler(bridge);
if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
CA91CX42_LINT_LM3))
- serviced |= ca91cx42_LM_irqhandler(stat);
+ serviced |= ca91cx42_LM_irqhandler(bridge, stat);
if (stat & CA91CX42_LINT_MBOX)
- serviced |= ca91cx42_MB_irqhandler(stat);
+ serviced |= ca91cx42_MB_irqhandler(bridge, stat);
if (stat & CA91CX42_LINT_SW_IACK)
- serviced |= ca91cx42_IACK_irqhandler();
+ serviced |= ca91cx42_IACK_irqhandler(bridge);
if (stat & CA91CX42_LINT_VERR)
- serviced |= ca91cx42_VERR_irqhandler();
+ serviced |= ca91cx42_VERR_irqhandler(bridge);
if (stat & CA91CX42_LINT_LERR)
- serviced |= ca91cx42_LERR_irqhandler();
+ serviced |= ca91cx42_LERR_irqhandler(bridge);
if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 |
CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 |
CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 |
CA91CX42_LINT_VIRQ7))
- serviced |= ca91cx42_VIRQ_irqhandler(stat);
+ serviced |= ca91cx42_VIRQ_irqhandler(ca91cx42_bridge, stat);
/* Clear serviced interrupts */
- iowrite32(stat, ca91cx42_bridge->base + LINT_STAT);
+ iowrite32(stat, bridge->base + LINT_STAT);
return IRQ_HANDLED;
}
-static int ca91cx42_irq_init(struct vme_bridge *bridge)
+static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge)
{
int result, tmp;
struct pci_dev *pdev;
+ struct ca91cx42_driver *bridge;
+
+ bridge = ca91cx42_bridge->driver_priv;
/* Need pdev */
- pdev = container_of(bridge->parent, struct pci_dev, dev);
+ pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
/* Initialise list for VME bus errors */
- INIT_LIST_HEAD(&(bridge->vme_errors));
+ INIT_LIST_HEAD(&(ca91cx42_bridge->vme_errors));
- mutex_init(&(bridge->irq_mtx));
+ mutex_init(&(ca91cx42_bridge->irq_mtx));
/* Disable interrupts from PCI to VME */
iowrite32(0, bridge->base + VINT_EN);
iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED,
- driver_name, pdev);
+ driver_name, ca91cx42_bridge);
if (result) {
dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
pdev->irq);
return 0;
}
-static void ca91cx42_irq_exit(struct pci_dev *pdev)
+static void ca91cx42_irq_exit(struct ca91cx42_driver *bridge,
+ struct pci_dev *pdev)
{
/* Disable interrupts from PCI to VME */
- iowrite32(0, ca91cx42_bridge->base + VINT_EN);
+ iowrite32(0, bridge->base + VINT_EN);
/* Disable PCI interrupts */
- iowrite32(0, ca91cx42_bridge->base + LINT_EN);
+ iowrite32(0, bridge->base + LINT_EN);
/* Clear Any Pending PCI Interrupts */
- iowrite32(0x00FFFFFF, ca91cx42_bridge->base + LINT_STAT);
+ iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
free_irq(pdev->irq, pdev);
}
/*
* Set up an VME interrupt
*/
-void ca91cx42_irq_set(int level, int state, int sync)
+void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level, int state,
+ int sync)
{
struct pci_dev *pdev;
u32 tmp;
+ struct ca91cx42_driver *bridge;
+
+ bridge = ca91cx42_bridge->driver_priv;
/* Enable IRQ level */
- tmp = ioread32(ca91cx42_bridge->base + LINT_EN);
+ tmp = ioread32(bridge->base + LINT_EN);
if (state == 0)
tmp &= ~CA91CX42_LINT_VIRQ[level];
else
tmp |= CA91CX42_LINT_VIRQ[level];
- iowrite32(tmp, ca91cx42_bridge->base + LINT_EN);
+ iowrite32(tmp, bridge->base + LINT_EN);
if ((state == 0) && (sync != 0)) {
pdev = container_of(ca91cx42_bridge->parent, struct pci_dev,
}
}
-int ca91cx42_irq_generate(int level, int statid)
+int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
+ int statid)
{
u32 tmp;
+ struct ca91cx42_driver *bridge;
+
+ bridge = ca91cx42_bridge->driver_priv;
/* Universe can only generate even vectors */
if (statid & 1)
return -EINVAL;
- mutex_lock(&(vme_int));
+ mutex_lock(&(bridge->vme_int));
- tmp = ioread32(ca91cx42_bridge->base + VINT_EN);
+ tmp = ioread32(bridge->base + VINT_EN);
/* Set Status/ID */
- iowrite32(statid << 24, ca91cx42_bridge->base + STATID);
+ iowrite32(statid << 24, bridge->base + STATID);
/* Assert VMEbus IRQ */
tmp = tmp | (1 << (level + 24));
- iowrite32(tmp, ca91cx42_bridge->base + VINT_EN);
+ iowrite32(tmp, bridge->base + VINT_EN);
/* Wait for IACK */
- wait_event_interruptible(iack_queue, 0);
+ wait_event_interruptible(bridge->iack_queue, 0);
/* Return interrupt to low state */
- tmp = ioread32(ca91cx42_bridge->base + VINT_EN);
+ tmp = ioread32(bridge->base + VINT_EN);
tmp = tmp & ~(1 << (level + 24));
- iowrite32(tmp, ca91cx42_bridge->base + VINT_EN);
+ iowrite32(tmp, bridge->base + VINT_EN);
- mutex_unlock(&(vme_int));
+ mutex_unlock(&(bridge->vme_int));
return 0;
}
unsigned int i, addr = 0, granularity = 0;
unsigned int temp_ctl = 0;
unsigned int vme_bound, pci_offset;
+ struct ca91cx42_driver *bridge;
+
+ bridge = image->parent->driver_priv;
i = image->number;
}
/* Disable while we are mucking around */
- temp_ctl = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
+ temp_ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
temp_ctl &= ~CA91CX42_VSI_CTL_EN;
- iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
+ iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
/* Setup mapping */
- iowrite32(vme_base, ca91cx42_bridge->base + CA91CX42_VSI_BS[i]);
- iowrite32(vme_bound, ca91cx42_bridge->base + CA91CX42_VSI_BD[i]);
- iowrite32(pci_offset, ca91cx42_bridge->base + CA91CX42_VSI_TO[i]);
+ iowrite32(vme_base, bridge->base + CA91CX42_VSI_BS[i]);
+ iowrite32(vme_bound, bridge->base + CA91CX42_VSI_BD[i]);
+ iowrite32(pci_offset, bridge->base + CA91CX42_VSI_TO[i]);
/* XXX Prefetch stuff currently unsupported */
#if 0
temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA;
/* Write ctl reg without enable */
- iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
+ iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
if (enabled)
temp_ctl |= CA91CX42_VSI_CTL_EN;
- iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
+ iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
return 0;
}
{
unsigned int i, granularity = 0, ctl = 0;
unsigned long long vme_bound, pci_offset;
+ struct ca91cx42_driver *bridge;
+
+ bridge = image->parent->driver_priv;
i = image->number;
granularity = 0x10000;
/* Read Registers */
- ctl = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
+ ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
- *vme_base = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_BS[i]);
- vme_bound = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_BD[i]);
- pci_offset = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_TO[i]);
+ *vme_base = ioread32(bridge->base + CA91CX42_VSI_BS[i]);
+ vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
+ pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
*pci_base = (dma_addr_t)vme_base + pci_offset;
*size = (unsigned long long)((vme_bound - *vme_base) + granularity);
unsigned long long existing_size;
int retval = 0;
struct pci_dev *pdev;
+ struct vme_bridge *ca91cx42_bridge;
+
+ ca91cx42_bridge = image->parent;
/* Find pci_dev container of dev */
if (ca91cx42_bridge->parent == NULL) {
unsigned int i;
unsigned int temp_ctl = 0;
unsigned long long pci_bound, vme_offset, pci_base;
+ struct ca91cx42_driver *bridge;
+
+ bridge = image->parent->driver_priv;
/* Verify input data */
if (vme_base & 0xFFF) {
i = image->number;
/* Disable while we are mucking around */
- temp_ctl = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
+ temp_ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
temp_ctl &= ~CA91CX42_LSI_CTL_EN;
- iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
+ iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
/* XXX Prefetch stuff currently unsupported */
#if 0
temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM;
/* Setup mapping */
- iowrite32(pci_base, ca91cx42_bridge->base + CA91CX42_LSI_BS[i]);
- iowrite32(pci_bound, ca91cx42_bridge->base + CA91CX42_LSI_BD[i]);
- iowrite32(vme_offset, ca91cx42_bridge->base + CA91CX42_LSI_TO[i]);
+ iowrite32(pci_base, bridge->base + CA91CX42_LSI_BS[i]);
+ iowrite32(pci_bound, bridge->base + CA91CX42_LSI_BD[i]);
+ iowrite32(vme_offset, bridge->base + CA91CX42_LSI_TO[i]);
/* Write ctl reg without enable */
- iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
+ iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
if (enabled)
temp_ctl |= CA91CX42_LSI_CTL_EN;
- iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
+ iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
spin_unlock(&(image->lock));
return 0;
{
unsigned int i, ctl;
unsigned long long pci_base, pci_bound, vme_offset;
+ struct ca91cx42_driver *bridge;
+
+ bridge = image->parent->driver_priv;
i = image->number;
- ctl = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
+ ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
- pci_base = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BS[i]);
- vme_offset = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_TO[i]);
- pci_bound = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BD[i]);
+ pci_base = ioread32(bridge->base + CA91CX42_LSI_BS[i]);
+ vme_offset = ioread32(bridge->base + CA91CX42_LSI_TO[i]);
+ pci_bound = ioread32(bridge->base + CA91CX42_LSI_BD[i]);
*vme_base = pci_base + vme_offset;
*size = (pci_bound - pci_base) + 0x1000;
return retval;
}
-int ca91cx42_slot_get(void)
+int ca91cx42_slot_get(struct vme_bridge *ca91cx42_bridge)
{
u32 slot = 0;
+ struct ca91cx42_driver *bridge;
+
+ bridge = ca91cx42_bridge->driver_priv;
if (!geoid) {
- slot = ioread32(ca91cx42_bridge->base + VCSR_BS);
+ slot = ioread32(bridge->base + VCSR_BS);
slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27);
} else
slot = geoid;
* Auto-ID or Geographic address. This function ensures that the window is
* enabled at an offset consistent with the boards geopgraphic address.
*/
-static int ca91cx42_crcsr_init(struct pci_dev *pdev)
+static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge,
+ struct pci_dev *pdev)
{
unsigned int crcsr_addr;
int tmp, slot;
+ struct ca91cx42_driver *bridge;
+
+ bridge = ca91cx42_bridge->driver_priv;
/* XXX We may need to set this somehow as the Universe II does not support
* geographical addressing.
*/
#if 0
if (vme_slotnum != -1)
- iowrite32(vme_slotnum << 27, ca91cx42_bridge->base + VCSR_BS);
+ iowrite32(vme_slotnum << 27, bridge->base + VCSR_BS);
#endif
- slot = ca91cx42_slot_get();
+ slot = ca91cx42_slot_get(ca91cx42_bridge);
dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot);
if (slot == 0) {
dev_err(&pdev->dev, "Slot number is unset, not configuring "
}
/* Allocate mem for CR/CSR image */
- crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
- &crcsr_bus);
- if (crcsr_kernel == NULL) {
+ bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
+ &(bridge->crcsr_bus));
+ if (bridge->crcsr_kernel == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
"image\n");
return -ENOMEM;
}
- memset(crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
+ memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
crcsr_addr = slot * (512 * 1024);
- iowrite32(crcsr_bus - crcsr_addr, ca91cx42_bridge->base + VCSR_TO);
+ iowrite32(bridge->crcsr_bus - crcsr_addr, bridge->base + VCSR_TO);
- tmp = ioread32(ca91cx42_bridge->base + VCSR_CTL);
+ tmp = ioread32(bridge->base + VCSR_CTL);
tmp |= CA91CX42_VCSR_CTL_EN;
- iowrite32(tmp, ca91cx42_bridge->base + VCSR_CTL);
+ iowrite32(tmp, bridge->base + VCSR_CTL);
return 0;
}
-static void ca91cx42_crcsr_exit(struct pci_dev *pdev)
+static void ca91cx42_crcsr_exit(struct vme_bridge *ca91cx42_bridge,
+ struct pci_dev *pdev)
{
u32 tmp;
+ struct ca91cx42_driver *bridge;
+
+ bridge = ca91cx42_bridge->driver_priv;
/* Turn off CR/CSR space */
- tmp = ioread32(ca91cx42_bridge->base + VCSR_CTL);
+ tmp = ioread32(bridge->base + VCSR_CTL);
tmp &= ~CA91CX42_VCSR_CTL_EN;
- iowrite32(tmp, ca91cx42_bridge->base + VCSR_CTL);
+ iowrite32(tmp, bridge->base + VCSR_CTL);
/* Free image */
- iowrite32(0, ca91cx42_bridge->base + VCSR_TO);
+ iowrite32(0, bridge->base + VCSR_TO);
- pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, crcsr_kernel, crcsr_bus);
+ pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
+ bridge->crcsr_bus);
}
static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
int retval, i;
u32 data;
struct list_head *pos = NULL;
+ struct vme_bridge *ca91cx42_bridge;
+ struct ca91cx42_driver *ca91cx42_device;
struct vme_master_resource *master_image;
struct vme_slave_resource *slave_image;
#if 0
memset(ca91cx42_bridge, 0, sizeof(struct vme_bridge));
+ ca91cx42_device = kmalloc(sizeof(struct ca91cx42_driver), GFP_KERNEL);
+
+ if (ca91cx42_device == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate memory for device "
+ "structure\n");
+ retval = -ENOMEM;
+ goto err_driver;
+ }
+
+ memset(ca91cx42_device, 0, sizeof(struct ca91cx42_driver));
+
+ ca91cx42_bridge->driver_priv = ca91cx42_device;
+
/* Enable the device */
retval = pci_enable_device(pdev);
if (retval) {
}
/* map registers in BAR 0 */
- ca91cx42_bridge->base = ioremap_nocache(pci_resource_start(pdev, 0),
+ ca91cx42_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
4096);
- if (!ca91cx42_bridge->base) {
+ if (!ca91cx42_device->base) {
dev_err(&pdev->dev, "Unable to remap CRG region\n");
retval = -EIO;
goto err_remap;
}
/* Check to see if the mapping worked out */
- data = ioread32(ca91cx42_bridge->base + CA91CX42_PCI_ID) & 0x0000FFFF;
+ data = ioread32(ca91cx42_device->base + CA91CX42_PCI_ID) & 0x0000FFFF;
if (data != PCI_VENDOR_ID_TUNDRA) {
dev_err(&pdev->dev, "PCI_ID check failed\n");
retval = -EIO;
}
/* Initialize wait queues & mutual exclusion flags */
- /* XXX These need to be moved to the vme_bridge structure */
- init_waitqueue_head(&dma_queue);
- init_waitqueue_head(&iack_queue);
- mutex_init(&(vme_int));
- mutex_init(&(vme_rmw));
+ init_waitqueue_head(&(ca91cx42_device->dma_queue));
+ init_waitqueue_head(&(ca91cx42_device->iack_queue));
+ mutex_init(&(ca91cx42_device->vme_int));
+ mutex_init(&(ca91cx42_device->vme_rmw));
ca91cx42_bridge->parent = &(pdev->dev);
strcpy(ca91cx42_bridge->name, driver_name);
#endif
ca91cx42_bridge->slot_get = ca91cx42_slot_get;
- data = ioread32(ca91cx42_bridge->base + MISC_CTL);
+ data = ioread32(ca91cx42_device->base + MISC_CTL);
dev_info(&pdev->dev, "Board is%s the VME system controller\n",
(data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not");
- dev_info(&pdev->dev, "Slot ID is %d\n", ca91cx42_slot_get());
+ dev_info(&pdev->dev, "Slot ID is %d\n",
+ ca91cx42_slot_get(ca91cx42_bridge));
- if (ca91cx42_crcsr_init(pdev)) {
+ if (ca91cx42_crcsr_init(ca91cx42_bridge, pdev)) {
dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
retval = -EINVAL;
#if 0
goto err_reg;
}
+ pci_set_drvdata(pdev, ca91cx42_bridge);
+
return 0;
vme_unregister_bridge(ca91cx42_bridge);
err_reg:
- ca91cx42_crcsr_exit(pdev);
+ ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
#if 0
err_crcsr:
#endif
kfree(master_image);
}
- ca91cx42_irq_exit(pdev);
+ ca91cx42_irq_exit(ca91cx42_device, pdev);
err_irq:
err_test:
- iounmap(ca91cx42_bridge->base);
+ iounmap(ca91cx42_device->base);
err_remap:
pci_release_regions(pdev);
err_resource:
pci_disable_device(pdev);
err_enable:
+ kfree(ca91cx42_device);
+err_driver:
kfree(ca91cx42_bridge);
err_struct:
return retval;
struct vme_slave_resource *slave_image;
struct vme_dma_resource *dma_ctrlr;
struct vme_lm_resource *lm;
+ struct ca91cx42_driver *bridge;
+ struct vme_bridge *ca91cx42_bridge = pci_get_drvdata(pdev);
+
+ bridge = ca91cx42_bridge->driver_priv;
+
/* Turn off Ints */
- iowrite32(0, ca91cx42_bridge->base + LINT_EN);
+ iowrite32(0, bridge->base + LINT_EN);
/* Turn off the windows */
- iowrite32(0x00800000, ca91cx42_bridge->base + LSI0_CTL);
- iowrite32(0x00800000, ca91cx42_bridge->base + LSI1_CTL);
- iowrite32(0x00800000, ca91cx42_bridge->base + LSI2_CTL);
- iowrite32(0x00800000, ca91cx42_bridge->base + LSI3_CTL);
- iowrite32(0x00800000, ca91cx42_bridge->base + LSI4_CTL);
- iowrite32(0x00800000, ca91cx42_bridge->base + LSI5_CTL);
- iowrite32(0x00800000, ca91cx42_bridge->base + LSI6_CTL);
- iowrite32(0x00800000, ca91cx42_bridge->base + LSI7_CTL);
- iowrite32(0x00F00000, ca91cx42_bridge->base + VSI0_CTL);
- iowrite32(0x00F00000, ca91cx42_bridge->base + VSI1_CTL);
- iowrite32(0x00F00000, ca91cx42_bridge->base + VSI2_CTL);
- iowrite32(0x00F00000, ca91cx42_bridge->base + VSI3_CTL);
- iowrite32(0x00F00000, ca91cx42_bridge->base + VSI4_CTL);
- iowrite32(0x00F00000, ca91cx42_bridge->base + VSI5_CTL);
- iowrite32(0x00F00000, ca91cx42_bridge->base + VSI6_CTL);
- iowrite32(0x00F00000, ca91cx42_bridge->base + VSI7_CTL);
+ iowrite32(0x00800000, bridge->base + LSI0_CTL);
+ iowrite32(0x00800000, bridge->base + LSI1_CTL);
+ iowrite32(0x00800000, bridge->base + LSI2_CTL);
+ iowrite32(0x00800000, bridge->base + LSI3_CTL);
+ iowrite32(0x00800000, bridge->base + LSI4_CTL);
+ iowrite32(0x00800000, bridge->base + LSI5_CTL);
+ iowrite32(0x00800000, bridge->base + LSI6_CTL);
+ iowrite32(0x00800000, bridge->base + LSI7_CTL);
+ iowrite32(0x00F00000, bridge->base + VSI0_CTL);
+ iowrite32(0x00F00000, bridge->base + VSI1_CTL);
+ iowrite32(0x00F00000, bridge->base + VSI2_CTL);
+ iowrite32(0x00F00000, bridge->base + VSI3_CTL);
+ iowrite32(0x00F00000, bridge->base + VSI4_CTL);
+ iowrite32(0x00F00000, bridge->base + VSI5_CTL);
+ iowrite32(0x00F00000, bridge->base + VSI6_CTL);
+ iowrite32(0x00F00000, bridge->base + VSI7_CTL);
vme_unregister_bridge(ca91cx42_bridge);
#if 0
kfree(master_image);
}
- ca91cx42_irq_exit(pdev);
+ ca91cx42_irq_exit(bridge, pdev);
- iounmap(ca91cx42_bridge->base);
+ iounmap(bridge->base);
pci_release_regions(pdev);
}
/* Find the PCI address that maps to the desired VME address */
for (i = 0; i < 8; i++) {
- temp_ctl = ioread32(ca91cx42_bridge->base +
+ temp_ctl = ioread32(bridge->base +
CA91CX42_LSI_CTL[i]);
if ((temp_ctl & 0x80000000) == 0) {
continue;
if (vmeOut.addrSpace != vmeRmw->addrSpace) {
continue;
}
- tempBS = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BS[i]);
- tempBD = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BD[i]);
- tempTO = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_TO[i]);
+ tempBS = ioread32(bridge->base + CA91CX42_LSI_BS[i]);
+ tempBD = ioread32(bridge->base + CA91CX42_LSI_BD[i]);
+ tempTO = ioread32(bridge->base + CA91CX42_LSI_TO[i]);
vmeBS = tempBS + tempTO;
vmeBD = tempBD + tempTO;
if ((vmeRmw->targetAddr >= vmeBS) &&
return -EINVAL;
}
/* Setup the RMW registers. */
- iowrite32(0, ca91cx42_bridge->base + SCYC_CTL);
- iowrite32(SWIZZLE(vmeRmw->enableMask), ca91cx42_bridge->base + SCYC_EN);
- iowrite32(SWIZZLE(vmeRmw->compareData), ca91cx42_bridge->base +
+ iowrite32(0, bridge->base + SCYC_CTL);
+ iowrite32(SWIZZLE(vmeRmw->enableMask), bridge->base + SCYC_EN);
+ iowrite32(SWIZZLE(vmeRmw->compareData), bridge->base +
SCYC_CMP);
- iowrite32(SWIZZLE(vmeRmw->swapData), ca91cx42_bridge->base + SCYC_SWP);
- iowrite32((int)rmw_pci_data_ptr, ca91cx42_bridge->base + SCYC_ADDR);
- iowrite32(1, ca91cx42_bridge->base + SCYC_CTL);
+ iowrite32(SWIZZLE(vmeRmw->swapData), bridge->base + SCYC_SWP);
+ iowrite32((int)rmw_pci_data_ptr, bridge->base + SCYC_ADDR);
+ iowrite32(1, bridge->base + SCYC_CTL);
/* Run the RMW cycle until either success or max attempts. */
vmeRmw->numAttempts = 1;
if ((ioread32(vaDataPtr) & vmeRmw->enableMask) ==
(vmeRmw->swapData & vmeRmw->enableMask)) {
- iowrite32(0, ca91cx42_bridge->base + SCYC_CTL);
+ iowrite32(0, bridge->base + SCYC_CTL);
break;
}
/* Setup registers as needed for direct or chained. */
if (dgcsreg & 0x8000000) {
- iowrite32(0, ca91cx42_bridge->base + DTBC);
- iowrite32((unsigned int)vmeLL, ca91cx42_bridge->base + DCPP);
+ iowrite32(0, bridge->base + DTBC);
+ iowrite32((unsigned int)vmeLL, bridge->base + DCPP);
} else {
#if 0
printk(KERN_ERR "Starting: DGCS = %08x\n", dgcsreg);
ioread32(&vmeLL->dctl));
#endif
/* Write registers */
- iowrite32(ioread32(&vmeLL->dva), ca91cx42_bridge->base + DVA);
- iowrite32(ioread32(&vmeLL->dlv), ca91cx42_bridge->base + DLA);
- iowrite32(ioread32(&vmeLL->dtbc), ca91cx42_bridge->base + DTBC);
- iowrite32(ioread32(&vmeLL->dctl), ca91cx42_bridge->base + DCTL);
- iowrite32(0, ca91cx42_bridge->base + DCPP);
+ iowrite32(ioread32(&vmeLL->dva), bridge->base + DVA);
+ iowrite32(ioread32(&vmeLL->dlv), bridge->base + DLA);
+ iowrite32(ioread32(&vmeLL->dtbc), bridge->base + DTBC);
+ iowrite32(ioread32(&vmeLL->dctl), bridge->base + DCTL);
+ iowrite32(0, bridge->base + DCPP);
}
/* Start the operation */
- iowrite32(dgcsreg, ca91cx42_bridge->base + DGCS);
+ iowrite32(dgcsreg, bridge->base + DGCS);
val = get_tbl();
- iowrite32(dgcsreg | 0x8000000F, ca91cx42_bridge->base + DGCS);
+ iowrite32(dgcsreg | 0x8000000F, bridge->base + DGCS);
return val;
}
}
wait_event_interruptible(dma_queue,
- ioread32(ca91cx42_bridge->base + DGCS) & 0x800);
+ ioread32(bridge->base + DGCS) & 0x800);
- val = ioread32(ca91cx42_bridge->base + DGCS);
- iowrite32(val | 0xF00, ca91cx42_bridge->base + DGCS);
+ val = ioread32(bridge->base + DGCS);
+ iowrite32(val | 0xF00, bridge->base + DGCS);
vmeDma->vmeDmaStatus = 0;
vmeDma->vmeDmaStatus = val & 0x700;
printk(KERN_ERR "ca91c042: DMA Error in ca91cx42_DMA_irqhandler"
" DGCS=%08X\n", val);
- val = ioread32(ca91cx42_bridge->base + DCPP);
+ val = ioread32(bridge->base + DCPP);
printk(KERN_ERR "ca91c042: DCPP=%08X\n", val);
- val = ioread32(ca91cx42_bridge->base + DCTL);
+ val = ioread32(bridge->base + DCTL);
printk(KERN_ERR "ca91c042: DCTL=%08X\n", val);
- val = ioread32(ca91cx42_bridge->base + DTBC);
+ val = ioread32(bridge->base + DTBC);
printk(KERN_ERR "ca91c042: DTBC=%08X\n", val);
- val = ioread32(ca91cx42_bridge->base + DLA);
+ val = ioread32(bridge->base + DLA);
printk(KERN_ERR "ca91c042: DLA=%08X\n", val);
- val = ioread32(ca91cx42_bridge->base + DVA);
+ val = ioread32(bridge->base + DVA);
printk(KERN_ERR "ca91c042: DVA=%08X\n", val);
}
}
/* Disable while we are mucking around */
- iowrite32(0x00000000, ca91cx42_bridge->base + LM_CTL);
+ iowrite32(0x00000000, bridge->base + LM_CTL);
- iowrite32(vmeLm->addr, ca91cx42_bridge->base + LM_BS);
+ iowrite32(vmeLm->addr, bridge->base + LM_BS);
/* Setup CTL register. */
if (vmeLm->userAccessType & VME_SUPER)
/* Write ctl reg and enable */
- iowrite32(0x80000000 | temp_ctl, ca91cx42_bridge->base + LM_CTL);
- temp_ctl = ioread32(ca91cx42_bridge->base + LM_CTL);
+ iowrite32(0x80000000 | temp_ctl, bridge->base + LM_CTL);
+ temp_ctl = ioread32(bridge->base + LM_CTL);
return 0;
}
vmeLm->lmWait = 10;
interruptible_sleep_on_timeout(&lm_queue, vmeLm->lmWait);
}
- iowrite32(0x00000000, ca91cx42_bridge->base + LM_CTL);
+ iowrite32(0x00000000, bridge->base + LM_CTL);
return 0;
}
int temp_ctl = 0;
int vbto = 0;
- temp_ctl = ioread32(ca91cx42_bridge->base + MISC_CTL);
+ temp_ctl = ioread32(bridge->base + MISC_CTL);
temp_ctl &= 0x00FFFFFF;
if (vmeArb->globalTimeoutTimer == 0xFFFFFFFF) {
if (vmeArb->arbiterTimeoutFlag)
temp_ctl |= 2 << 24;
- iowrite32(temp_ctl, ca91cx42_bridge->base + MISC_CTL);
+ iowrite32(temp_ctl, bridge->base + MISC_CTL);
return 0;
}
int temp_ctl = 0;
int vbto = 0;
- temp_ctl = ioread32(ca91cx42_bridge->base + MISC_CTL);
+ temp_ctl = ioread32(bridge->base + MISC_CTL);
vbto = (temp_ctl >> 28) & 0xF;
if (vbto != 0)
{
int temp_ctl = 0;
- temp_ctl = ioread32(ca91cx42_bridge->base + MAST_CTL);
+ temp_ctl = ioread32(bridge->base + MAST_CTL);
temp_ctl &= 0xFF0FFFFF;
if (vmeReq->releaseMode == 1)
temp_ctl |= (vmeReq->requestLevel << 22);
- iowrite32(temp_ctl, ca91cx42_bridge->base + MAST_CTL);
+ iowrite32(temp_ctl, bridge->base + MAST_CTL);
return 0;
}
{
int temp_ctl = 0;
- temp_ctl = ioread32(ca91cx42_bridge->base + MAST_CTL);
+ temp_ctl = ioread32(bridge->base + MAST_CTL);
if (temp_ctl & (1 << 20))
vmeReq->releaseMode = 1;
int tsi148_dma_list_exec(struct vme_dma_list *);
int tsi148_dma_list_empty(struct vme_dma_list *);
int tsi148_generate_irq(int, int);
-int tsi148_slot_get(void);
-/* Modue parameter */
+/* Module parameter */
static int err_chk;
static int geoid;
-/* XXX These should all be in a per device structure */
-static struct vme_bridge *tsi148_bridge;
-static wait_queue_head_t dma_queue[2];
-static wait_queue_head_t iack_queue;
-static void (*lm_callback[4])(int); /* Called in interrupt handler */
-static void *crcsr_kernel;
-static dma_addr_t crcsr_bus;
-static struct vme_master_resource *flush_image;
-static struct mutex vme_rmw; /* Only one RMW cycle at a time */
-static struct mutex vme_int; /*
- * Only one VME interrupt can be
- * generated at a time, provide locking
- */
-
static char driver_name[] = "vme_tsi148";
static const struct pci_device_id tsi148_ids[] = {
/*
* Wakes up DMA queue.
*/
-static u32 tsi148_DMA_irqhandler(int channel_mask)
+static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
+ int channel_mask)
{
u32 serviced = 0;
if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
- wake_up(&dma_queue[0]);
+ wake_up(&(bridge->dma_queue[0]));
serviced |= TSI148_LCSR_INTC_DMA0C;
}
if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
- wake_up(&dma_queue[1]);
+ wake_up(&(bridge->dma_queue[1]));
serviced |= TSI148_LCSR_INTC_DMA1C;
}
/*
* Wake up location monitor queue
*/
-static u32 tsi148_LM_irqhandler(u32 stat)
+static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
{
int i;
u32 serviced = 0;
for (i = 0; i < 4; i++) {
if(stat & TSI148_LCSR_INTS_LMS[i]) {
/* We only enable interrupts if the callback is set */
- lm_callback[i](i);
+ bridge->lm_callback[i](i);
serviced |= TSI148_LCSR_INTC_LMC[i];
}
}
*
* XXX This functionality is not exposed up though API.
*/
-static u32 tsi148_MB_irqhandler(u32 stat)
+static u32 tsi148_MB_irqhandler(struct tsi148_driver *bridge, u32 stat)
{
int i;
u32 val;
for (i = 0; i < 4; i++) {
if(stat & TSI148_LCSR_INTS_MBS[i]) {
- val = ioread32be(tsi148_bridge->base +
- TSI148_GCSR_MBOX[i]);
+ val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
printk("VME Mailbox %d received: 0x%x\n", i, val);
serviced |= TSI148_LCSR_INTC_MBC[i];
}
/*
* Display error & status message when PERR (PCI) exception interrupt occurs.
*/
-static u32 tsi148_PERR_irqhandler(void)
+static u32 tsi148_PERR_irqhandler(struct tsi148_driver *bridge)
{
printk(KERN_ERR
"PCI Exception at address: 0x%08x:%08x, attributes: %08x\n",
- ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPAU),
- ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPAL),
- ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPAT)
+ ioread32be(bridge->base + TSI148_LCSR_EDPAU),
+ ioread32be(bridge->base + TSI148_LCSR_EDPAL),
+ ioread32be(bridge->base + TSI148_LCSR_EDPAT)
);
printk(KERN_ERR
"PCI-X attribute reg: %08x, PCI-X split completion reg: %08x\n",
- ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPXA),
- ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPXS)
+ ioread32be(bridge->base + TSI148_LCSR_EDPXA),
+ ioread32be(bridge->base + TSI148_LCSR_EDPXS)
);
- iowrite32be(TSI148_LCSR_EDPAT_EDPCL,
- tsi148_bridge->base + TSI148_LCSR_EDPAT);
+ iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
return TSI148_LCSR_INTC_PERRC;
}
/*
* Save address and status when VME error interrupt occurs.
*/
-static u32 tsi148_VERR_irqhandler(void)
+static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
{
unsigned int error_addr_high, error_addr_low;
unsigned long long error_addr;
u32 error_attrib;
struct vme_bus_error *error;
+ struct tsi148_driver *bridge;
+
+ bridge = tsi148_bridge->driver_priv;
- error_addr_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_VEAU);
- error_addr_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_VEAL);
- error_attrib = ioread32be(tsi148_bridge->base + TSI148_LCSR_VEAT);
+ error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
+ error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
+ error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
reg_join(error_addr_high, error_addr_low, &error_addr);
}
/* Clear Status */
- iowrite32be(TSI148_LCSR_VEAT_VESCL,
- tsi148_bridge->base + TSI148_LCSR_VEAT);
+ iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
return TSI148_LCSR_INTC_VERRC;
}
/*
* Wake up IACK queue.
*/
-static u32 tsi148_IACK_irqhandler(void)
+static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
{
- wake_up(&iack_queue);
+ wake_up(&(bridge->iack_queue));
return TSI148_LCSR_INTC_IACKC;
}
/*
* Calling VME bus interrupt callback if provided.
*/
-static u32 tsi148_VIRQ_irqhandler(u32 stat)
+static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
+ u32 stat)
{
int vec, i, serviced = 0;
+ struct tsi148_driver *bridge;
+
+ bridge = tsi148_bridge->driver_priv;
for (i = 7; i > 0; i--) {
if (stat & (1 << i)) {
* 8-bit IACK cycles on the bus, read from offset
* 3.
*/
- vec = ioread8(tsi148_bridge->base +
- TSI148_LCSR_VIACK[i] + 3);
+ vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
vme_irq_handler(tsi148_bridge, i, vec);
* Top level interrupt handler. Clears appropriate interrupt status bits and
* then calls appropriate sub handler(s).
*/
-static irqreturn_t tsi148_irqhandler(int irq, void *dev_id)
+static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
{
u32 stat, enable, serviced = 0;
+ struct vme_bridge *tsi148_bridge;
+ struct tsi148_driver *bridge;
+
+ tsi148_bridge = ptr;
+
+ bridge = tsi148_bridge->driver_priv;
/* Determine which interrupts are unmasked and set */
- enable = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO);
- stat = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTS);
+ enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
+ stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
/* Only look at unmasked interrupts */
stat &= enable;
/* Call subhandlers as appropriate */
/* DMA irqs */
if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
- serviced |= tsi148_DMA_irqhandler(stat);
+ serviced |= tsi148_DMA_irqhandler(bridge, stat);
/* Location monitor irqs */
if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
- serviced |= tsi148_LM_irqhandler(stat);
+ serviced |= tsi148_LM_irqhandler(bridge, stat);
/* Mail box irqs */
if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
- serviced |= tsi148_MB_irqhandler(stat);
+ serviced |= tsi148_MB_irqhandler(bridge, stat);
/* PCI bus error */
if (stat & TSI148_LCSR_INTS_PERRS)
- serviced |= tsi148_PERR_irqhandler();
+ serviced |= tsi148_PERR_irqhandler(bridge);
/* VME bus error */
if (stat & TSI148_LCSR_INTS_VERRS)
- serviced |= tsi148_VERR_irqhandler();
+ serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
/* IACK irq */
if (stat & TSI148_LCSR_INTS_IACKS)
- serviced |= tsi148_IACK_irqhandler();
+ serviced |= tsi148_IACK_irqhandler(bridge);
/* VME bus irqs */
if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
TSI148_LCSR_INTS_IRQ1S))
- serviced |= tsi148_VIRQ_irqhandler(stat);
+ serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
/* Clear serviced interrupts */
- iowrite32be(serviced, tsi148_bridge->base + TSI148_LCSR_INTC);
+ iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
return IRQ_HANDLED;
}
-static int tsi148_irq_init(struct vme_bridge *bridge)
+static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
{
int result;
unsigned int tmp;
struct pci_dev *pdev;
+ struct tsi148_driver *bridge;
+
+ pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
- /* Need pdev */
- pdev = container_of(bridge->parent, struct pci_dev, dev);
+ bridge = tsi148_bridge->driver_priv;
/* Initialise list for VME bus errors */
- INIT_LIST_HEAD(&(bridge->vme_errors));
+ INIT_LIST_HEAD(&(tsi148_bridge->vme_errors));
- mutex_init(&(bridge->irq_mtx));
+ mutex_init(&(tsi148_bridge->irq_mtx));
result = request_irq(pdev->irq,
tsi148_irqhandler,
IRQF_SHARED,
- driver_name, pdev);
+ driver_name, tsi148_bridge);
if (result) {
dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
pdev->irq);
TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
TSI148_LCSR_INTEO_IACKEO;
- /* XXX This leaves the following interrupts masked.
+ /* This leaves the following interrupts masked.
* TSI148_LCSR_INTEO_VIEEO
* TSI148_LCSR_INTEO_SYSFLEO
* TSI148_LCSR_INTEO_ACFLEO
return 0;
}
-static void tsi148_irq_exit(struct pci_dev *pdev)
+static void tsi148_irq_exit(struct tsi148_driver *bridge, struct pci_dev *pdev)
{
/* Turn off interrupts */
- iowrite32be(0x0, tsi148_bridge->base + TSI148_LCSR_INTEO);
- iowrite32be(0x0, tsi148_bridge->base + TSI148_LCSR_INTEN);
+ iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
+ iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
/* Clear all interrupts */
- iowrite32be(0xFFFFFFFF, tsi148_bridge->base + TSI148_LCSR_INTC);
+ iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
/* Detach interrupt handler */
free_irq(pdev->irq, pdev);
/*
* Check to see if an IACk has been received, return true (1) or false (0).
*/
-int tsi148_iack_received(void)
+int tsi148_iack_received(struct tsi148_driver *bridge)
{
u32 tmp;
- tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_VICR);
+ tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
if (tmp & TSI148_LCSR_VICR_IRQS)
return 0;
/*
* Configure VME interrupt
*/
-void tsi148_irq_set(int level, int state, int sync)
+void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
+ int state, int sync)
{
struct pci_dev *pdev;
u32 tmp;
+ struct tsi148_driver *bridge;
+
+ bridge = tsi148_bridge->driver_priv;
/* We need to do the ordering differently for enabling and disabling */
if (state == 0) {
- tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEN);
+ tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
- iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEN);
+ iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
- tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO);
+ tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
- iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEO);
+ iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
if (sync != 0) {
pdev = container_of(tsi148_bridge->parent,
synchronize_irq(pdev->irq);
}
} else {
- tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO);
+ tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
- iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEO);
+ iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
- tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEN);
+ tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
- iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEN);
+ iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
}
}
* Generate a VME bus interrupt at the requested level & vector. Wait for
* interrupt to be acked.
*/
-int tsi148_irq_generate(int level, int statid)
+int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level, int statid)
{
u32 tmp;
+ struct tsi148_driver *bridge;
+
+ bridge = tsi148_bridge->driver_priv;
- mutex_lock(&(vme_int));
+ mutex_lock(&(bridge->vme_int));
/* Read VICR register */
- tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_VICR);
+ tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
/* Set Status/ID */
tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
(statid & TSI148_LCSR_VICR_STID_M);
- iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_VICR);
+ iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
/* Assert VMEbus IRQ */
tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
- iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_VICR);
+ iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
/* XXX Consider implementing a timeout? */
- wait_event_interruptible(iack_queue, tsi148_iack_received());
+ wait_event_interruptible(bridge->iack_queue,
+ tsi148_iack_received(bridge));
- mutex_unlock(&(vme_int));
+ mutex_unlock(&(bridge->vme_int));
return 0;
}
/*
* Find the first error in this address range
*/
-static struct vme_bus_error *tsi148_find_error(vme_address_t aspace,
- unsigned long long address, size_t count)
+static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
+ vme_address_t aspace, unsigned long long address, size_t count)
{
struct list_head *err_pos;
struct vme_bus_error *vme_err, *valid = NULL;
/*
* Clear errors in the provided address range.
*/
-static void tsi148_clear_errors(vme_address_t aspace,
- unsigned long long address, size_t count)
+static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
+ vme_address_t aspace, unsigned long long address, size_t count)
{
struct list_head *err_pos, *temp;
struct vme_bus_error *vme_err;
unsigned int vme_bound_low, vme_bound_high;
unsigned int pci_offset_low, pci_offset_high;
unsigned long long vme_bound, pci_offset;
+ struct tsi148_driver *bridge;
+
+ bridge = image->parent->driver_priv;
#if 0
printk("Set slave image %d to:\n", image->number);
#endif
/* Disable while we are mucking around */
- temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
+ temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
temp_ctl &= ~TSI148_LCSR_ITAT_EN;
- iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_IT[i] +
+ iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
/* Setup mapping */
- iowrite32be(vme_base_high, tsi148_bridge->base + TSI148_LCSR_IT[i] +
+ iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITSAU);
- iowrite32be(vme_base_low, tsi148_bridge->base + TSI148_LCSR_IT[i] +
+ iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITSAL);
- iowrite32be(vme_bound_high, tsi148_bridge->base + TSI148_LCSR_IT[i] +
+ iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITEAU);
- iowrite32be(vme_bound_low, tsi148_bridge->base + TSI148_LCSR_IT[i] +
+ iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITEAL);
- iowrite32be(pci_offset_high, tsi148_bridge->base + TSI148_LCSR_IT[i] +
+ iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITOFU);
- iowrite32be(pci_offset_low, tsi148_bridge->base + TSI148_LCSR_IT[i] +
+ iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITOFL);
/* XXX Prefetch stuff currently unsupported */
temp_ctl |= TSI148_LCSR_ITAT_DATA;
/* Write ctl reg without enable */
- iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_IT[i] +
+ iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
if (enabled)
temp_ctl |= TSI148_LCSR_ITAT_EN;
- iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_IT[i] +
+ iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
return 0;
unsigned int vme_bound_low, vme_bound_high;
unsigned int pci_offset_low, pci_offset_high;
unsigned long long vme_bound, pci_offset;
+ struct tsi148_driver *bridge;
+ bridge = image->parent->driver_priv;
i = image->number;
/* Read registers */
- ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
+ ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
- vme_base_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
+ vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITSAU);
- vme_base_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
+ vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITSAL);
- vme_bound_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
+ vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITEAU);
- vme_bound_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
+ vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITEAL);
- pci_offset_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
+ pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITOFU);
- pci_offset_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
+ pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITOFL);
/* Convert 64-bit variables to 2x 32-bit variables */
unsigned long long existing_size;
int retval = 0;
struct pci_dev *pdev;
+ struct vme_bridge *tsi148_bridge;
+
+ tsi148_bridge = image->parent;
/* Find pci_dev container of dev */
if (tsi148_bridge->parent == NULL) {
unsigned int pci_bound_low, pci_bound_high;
unsigned int vme_offset_low, vme_offset_high;
unsigned long long pci_bound, vme_offset, pci_base;
+ struct tsi148_driver *bridge;
+
+ bridge = image->parent->driver_priv;
/* Verify input data */
if (vme_base & 0xFFFF) {
i = image->number;
/* Disable while we are mucking around */
- temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
+ temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
temp_ctl &= ~TSI148_LCSR_OTAT_EN;
- iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_OT[i] +
+ iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
/* XXX Prefetch stuff currently unsupported */
temp_ctl |= TSI148_LCSR_OTAT_PGM;
/* Setup mapping */
- iowrite32be(pci_base_high, tsi148_bridge->base + TSI148_LCSR_OT[i] +
+ iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAU);
- iowrite32be(pci_base_low, tsi148_bridge->base + TSI148_LCSR_OT[i] +
+ iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAL);
- iowrite32be(pci_bound_high, tsi148_bridge->base + TSI148_LCSR_OT[i] +
+ iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTEAU);
- iowrite32be(pci_bound_low, tsi148_bridge->base + TSI148_LCSR_OT[i] +
+ iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTEAL);
- iowrite32be(vme_offset_high, tsi148_bridge->base + TSI148_LCSR_OT[i] +
+ iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTOFU);
- iowrite32be(vme_offset_low, tsi148_bridge->base + TSI148_LCSR_OT[i] +
+ iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTOFL);
/* XXX We need to deal with OTBS */
#if 0
- iowrite32be(vmeOut->bcastSelect2esst, tsi148_bridge->base +
+ iowrite32be(vmeOut->bcastSelect2esst, bridge->base +
TSI148_LCSR_OT[i] + TSI148_LCSR_OFFSET_OTBS);
#endif
/* Write ctl reg without enable */
- iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_OT[i] +
+ iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
if (enabled)
temp_ctl |= TSI148_LCSR_OTAT_EN;
- iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_OT[i] +
+ iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
spin_unlock(&(image->lock));
unsigned int vme_offset_low, vme_offset_high;
unsigned long long pci_base, pci_bound, vme_offset;
+ struct tsi148_driver *bridge;
+
+ bridge = image->parent->driver_priv;
i = image->number;
- ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
+ ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
- pci_base_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
+ pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAU);
- pci_base_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
+ pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAL);
- pci_bound_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
+ pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTEAU);
- pci_bound_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
+ pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTEAL);
- vme_offset_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
+ vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTOFU);
- vme_offset_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
+ vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTOFL);
/* Convert 64-bit variables to 2x 32-bit variables */
vme_cycle_t cycle;
vme_width_t dwidth;
struct vme_bus_error *vme_err = NULL;
+ struct vme_bridge *tsi148_bridge;
+
+ tsi148_bridge = image->parent;
spin_lock(&(image->lock));
__tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
&dwidth);
- vme_err = tsi148_find_error(aspace, vme_base + offset, count);
+ vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
+ count);
if(vme_err != NULL) {
dev_err(image->parent->parent, "First VME read error detected "
"an at address 0x%llx\n", vme_err->address);
retval = vme_err->address - (vme_base + offset);
/* Clear down save errors in this address range */
- tsi148_clear_errors(aspace, vme_base + offset, count);
+ tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
+ count);
}
skip_chk:
}
-/* XXX We need to change vme_master_resource->mtx to a spinlock so that read
- * and write functions can be used in an interrupt context
- */
ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
size_t count, loff_t offset)
{
vme_width_t dwidth;
struct vme_bus_error *vme_err = NULL;
+ struct vme_bridge *tsi148_bridge;
+ struct tsi148_driver *bridge;
+
+ tsi148_bridge = image->parent;
+
+ bridge = tsi148_bridge->driver_priv;
spin_lock(&(image->lock));
__tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
&dwidth);
- ioread16(flush_image->kern_base + 0x7F000);
+ ioread16(bridge->flush_image->kern_base + 0x7F000);
- vme_err = tsi148_find_error(aspace, vme_base + offset, count);
+ vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
+ count);
if(vme_err != NULL) {
printk("First VME write error detected an at address 0x%llx\n",
vme_err->address);
retval = vme_err->address - (vme_base + offset);
/* Clear down save errors in this address range */
- tsi148_clear_errors(aspace, vme_base + offset, count);
+ tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
+ count);
}
skip_chk:
unsigned int pci_addr_high, pci_addr_low;
u32 tmp, result;
int i;
+ struct tsi148_driver *bridge;
+ bridge = image->parent->driver_priv;
/* Find the PCI address that maps to the desired VME address */
i = image->number;
/* Locking as we can only do one of these at a time */
- mutex_lock(&(vme_rmw));
+ mutex_lock(&(bridge->vme_rmw));
/* Lock image */
spin_lock(&(image->lock));
- pci_addr_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
+ pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAU);
- pci_addr_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
+ pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAL);
reg_join(pci_addr_high, pci_addr_low, &pci_addr);
reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
/* Configure registers */
- iowrite32be(mask, tsi148_bridge->base + TSI148_LCSR_RMWEN);
- iowrite32be(compare, tsi148_bridge->base + TSI148_LCSR_RMWC);
- iowrite32be(swap, tsi148_bridge->base + TSI148_LCSR_RMWS);
- iowrite32be(pci_addr_high, tsi148_bridge->base + TSI148_LCSR_RMWAU);
- iowrite32be(pci_addr_low, tsi148_bridge->base + TSI148_LCSR_RMWAL);
+ iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
+ iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
+ iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
+ iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
+ iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
/* Enable RMW */
- tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_VMCTRL);
+ tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
tmp |= TSI148_LCSR_VMCTRL_RMWEN;
- iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_VMCTRL);
+ iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
/* Kick process off with a read to the required address. */
result = ioread32be(image->kern_base + offset);
/* Disable RMW */
- tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_VMCTRL);
+ tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
- iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_VMCTRL);
+ iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
spin_unlock(&(image->lock));
- mutex_unlock(&(vme_rmw));
+ mutex_unlock(&(bridge->vme_rmw));
return result;
}
/*
* Check to see if the provided DMA channel is busy.
*/
-static int tsi148_dma_busy(int channel)
+static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
{
u32 tmp;
+ struct tsi148_driver *bridge;
+
+ bridge = tsi148_bridge->driver_priv;
- tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_DMA[channel] +
+ tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
TSI148_LCSR_OFFSET_DSTA);
if (tmp & TSI148_LCSR_DSTA_BSY)
#if 0
int x;
#endif
+ struct tsi148_driver *bridge;
ctrlr = list->parent;
+ bridge = ctrlr->parent->driver_priv;
+
mutex_lock(&(ctrlr->mtx));
channel = ctrlr->number;
reg_split(bus_addr, &bus_addr_high, &bus_addr_low);
- iowrite32be(bus_addr_high, tsi148_bridge->base +
+ iowrite32be(bus_addr_high, bridge->base +
TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
- iowrite32be(bus_addr_low, tsi148_bridge->base +
+ iowrite32be(bus_addr_low, bridge->base +
TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
/* Start the operation */
- iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, tsi148_bridge->base +
+ iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
- wait_event_interruptible(dma_queue[channel], tsi148_dma_busy(channel));
+ wait_event_interruptible(bridge->dma_queue[channel],
+ tsi148_dma_busy(ctrlr->parent, channel));
/*
* Read status register, this register is valid until we kick off a
* new transfer.
*/
- val = ioread32be(tsi148_bridge->base + TSI148_LCSR_DMA[channel] +
+ val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
TSI148_LCSR_OFFSET_DSTA);
if (val & TSI148_LCSR_DSTA_VBE) {
{
u32 lm_base_high, lm_base_low, lm_ctl = 0;
int i;
+ struct tsi148_driver *bridge;
+
+ bridge = lm->parent->driver_priv;
mutex_lock(&(lm->mtx));
/* If we already have a callback attached, we can't move it! */
for (i = 0; i < lm->monitors; i++) {
- if(lm_callback[i] != NULL) {
+ if (bridge->lm_callback[i] != NULL) {
mutex_unlock(&(lm->mtx));
printk("Location monitor callback attached, can't "
"reset\n");
reg_split(lm_base, &lm_base_high, &lm_base_low);
- iowrite32be(lm_base_high, tsi148_bridge->base + TSI148_LCSR_LMBAU);
- iowrite32be(lm_base_low, tsi148_bridge->base + TSI148_LCSR_LMBAL);
- iowrite32be(lm_ctl, tsi148_bridge->base + TSI148_LCSR_LMAT);
+ iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
+ iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
+ iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
mutex_unlock(&(lm->mtx));
vme_address_t *aspace, vme_cycle_t *cycle)
{
u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
+ struct tsi148_driver *bridge;
+
+ bridge = lm->parent->driver_priv;
mutex_lock(&(lm->mtx));
- lm_base_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMBAU);
- lm_base_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMBAL);
- lm_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMAT);
+ lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
+ lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
+ lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
reg_join(lm_base_high, lm_base_low, lm_base);
void (*callback)(int))
{
u32 lm_ctl, tmp;
+ struct tsi148_driver *bridge;
+
+ bridge = lm->parent->driver_priv;
mutex_lock(&(lm->mtx));
/* Ensure that the location monitor is configured - need PGM or DATA */
- lm_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMAT);
+ lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
mutex_unlock(&(lm->mtx));
printk("Location monitor not properly configured\n");
}
/* Check that a callback isn't already attached */
- if (lm_callback[monitor] != NULL) {
+ if (bridge->lm_callback[monitor] != NULL) {
mutex_unlock(&(lm->mtx));
printk("Existing callback attached\n");
return -EBUSY;
}
/* Attach callback */
- lm_callback[monitor] = callback;
+ bridge->lm_callback[monitor] = callback;
/* Enable Location Monitor interrupt */
- tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEN);
+ tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
- iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEN);
+ iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
- tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO);
+ tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
- iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEO);
+ iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
/* Ensure that global Location Monitor Enable set */
if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
lm_ctl |= TSI148_LCSR_LMAT_EN;
- iowrite32be(lm_ctl, tsi148_bridge->base + TSI148_LCSR_LMAT);
+ iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
}
mutex_unlock(&(lm->mtx));
int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
{
u32 lm_en, tmp;
+ struct tsi148_driver *bridge;
+
+ bridge = lm->parent->driver_priv;
mutex_lock(&(lm->mtx));
/* Disable Location Monitor and ensure previous interrupts are clear */
- lm_en = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEN);
+ lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
- iowrite32be(lm_en, tsi148_bridge->base + TSI148_LCSR_INTEN);
+ iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
- tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO);
+ tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
- iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEO);
+ iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
- tsi148_bridge->base + TSI148_LCSR_INTC);
+ bridge->base + TSI148_LCSR_INTC);
/* Detach callback */
- lm_callback[monitor] = NULL;
+ bridge->lm_callback[monitor] = NULL;
/* If all location monitors disabled, disable global Location Monitor */
if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
- tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMAT);
+ tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
tmp &= ~TSI148_LCSR_LMAT_EN;
- iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_LMAT);
+ iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
}
mutex_unlock(&(lm->mtx));
/*
* Determine Geographical Addressing
*/
-int tsi148_slot_get(void)
+int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
{
u32 slot = 0;
+ struct tsi148_driver *bridge;
+
+ bridge = tsi148_bridge->driver_priv;
if (!geoid) {
- slot = ioread32be(tsi148_bridge->base + TSI148_LCSR_VSTAT);
+ slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
slot = slot & TSI148_LCSR_VSTAT_GA_M;
} else
slot = geoid;
* boards registers, this means there is a fix length 508kB window which must
* be mapped onto PCI memory.
*/
-static int tsi148_crcsr_init(struct pci_dev *pdev)
+static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
+ struct pci_dev *pdev)
{
u32 cbar, crat, vstat;
u32 crcsr_bus_high, crcsr_bus_low;
int retval;
+ struct tsi148_driver *bridge;
+
+ bridge = tsi148_bridge->driver_priv;
/* Allocate mem for CR/CSR image */
- crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
- &crcsr_bus);
- if (crcsr_kernel == NULL) {
+ bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
+ &(bridge->crcsr_bus));
+ if (bridge->crcsr_kernel == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
"image\n");
return -ENOMEM;
}
- memset(crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
+ memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
- reg_split(crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
+ reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
- iowrite32be(crcsr_bus_high, tsi148_bridge->base + TSI148_LCSR_CROU);
- iowrite32be(crcsr_bus_low, tsi148_bridge->base + TSI148_LCSR_CROL);
+ iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
+ iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
/* Ensure that the CR/CSR is configured at the correct offset */
- cbar = ioread32be(tsi148_bridge->base + TSI148_CBAR);
+ cbar = ioread32be(bridge->base + TSI148_CBAR);
cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
- vstat = tsi148_slot_get();
+ vstat = tsi148_slot_get(tsi148_bridge);
if (cbar != vstat) {
cbar = vstat;
dev_info(&pdev->dev, "Setting CR/CSR offset\n");
- iowrite32be(cbar<<3, tsi148_bridge->base + TSI148_CBAR);
+ iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
}
dev_info(&pdev->dev, "CR/CSR Offset: %d\n", cbar);
- crat = ioread32be(tsi148_bridge->base + TSI148_LCSR_CRAT);
+ crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
if (crat & TSI148_LCSR_CRAT_EN) {
dev_info(&pdev->dev, "Enabling CR/CSR space\n");
iowrite32be(crat | TSI148_LCSR_CRAT_EN,
- tsi148_bridge->base + TSI148_LCSR_CRAT);
+ bridge->base + TSI148_LCSR_CRAT);
} else
dev_info(&pdev->dev, "CR/CSR already enabled\n");
* through VME writes.
*/
if(err_chk) {
- retval = tsi148_master_set(flush_image, 1, (vstat * 0x80000),
- 0x80000, VME_CRCSR, VME_SCT, VME_D16);
+ retval = tsi148_master_set(bridge->flush_image, 1,
+ (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
+ VME_D16);
if (retval)
dev_err(&pdev->dev, "Configuring flush image failed\n");
}
}
-static void tsi148_crcsr_exit(struct pci_dev *pdev)
+static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
+ struct pci_dev *pdev)
{
u32 crat;
+ struct tsi148_driver *bridge;
+
+ bridge = tsi148_bridge->driver_priv;
/* Turn off CR/CSR space */
- crat = ioread32be(tsi148_bridge->base + TSI148_LCSR_CRAT);
+ crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
- tsi148_bridge->base + TSI148_LCSR_CRAT);
+ bridge->base + TSI148_LCSR_CRAT);
/* Free image */
- iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_CROU);
- iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_CROL);
+ iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
+ iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
- pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, crcsr_kernel, crcsr_bus);
+ pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
+ bridge->crcsr_bus);
}
static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
int retval, i, master_num;
u32 data;
struct list_head *pos = NULL;
+ struct vme_bridge *tsi148_bridge;
+ struct tsi148_driver *tsi148_device;
struct vme_master_resource *master_image;
struct vme_slave_resource *slave_image;
struct vme_dma_resource *dma_ctrlr;
memset(tsi148_bridge, 0, sizeof(struct vme_bridge));
+ tsi148_device = kmalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
+ if (tsi148_device == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate memory for device "
+ "structure\n");
+ retval = -ENOMEM;
+ goto err_driver;
+ }
+
+ memset(tsi148_device, 0, sizeof(struct tsi148_driver));
+
+ tsi148_bridge->driver_priv = tsi148_device;
+
/* Enable the device */
retval = pci_enable_device(pdev);
if (retval) {
}
/* map registers in BAR 0 */
- tsi148_bridge->base = ioremap_nocache(pci_resource_start(pdev, 0), 4096);
- if (!tsi148_bridge->base) {
+ tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
+ 4096);
+ if (!tsi148_device->base) {
dev_err(&pdev->dev, "Unable to remap CRG region\n");
retval = -EIO;
goto err_remap;
}
/* Check to see if the mapping worked out */
- data = ioread32(tsi148_bridge->base + TSI148_PCFS_ID) & 0x0000FFFF;
+ data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
if (data != PCI_VENDOR_ID_TUNDRA) {
dev_err(&pdev->dev, "CRG region check failed\n");
retval = -EIO;
}
/* Initialize wait queues & mutual exclusion flags */
- /* XXX These need to be moved to the vme_bridge structure */
- init_waitqueue_head(&dma_queue[0]);
- init_waitqueue_head(&dma_queue[1]);
- init_waitqueue_head(&iack_queue);
- mutex_init(&(vme_int));
- mutex_init(&(vme_rmw));
+ init_waitqueue_head(&(tsi148_device->dma_queue[0]));
+ init_waitqueue_head(&(tsi148_device->dma_queue[1]));
+ init_waitqueue_head(&(tsi148_device->iack_queue));
+ mutex_init(&(tsi148_device->vme_int));
+ mutex_init(&(tsi148_device->vme_rmw));
tsi148_bridge->parent = &(pdev->dev);
strcpy(tsi148_bridge->name, driver_name);
master_num = TSI148_MAX_MASTER;
if(err_chk){
master_num--;
- /* XXX */
- flush_image = (struct vme_master_resource *)kmalloc(
- sizeof(struct vme_master_resource), GFP_KERNEL);
- if (flush_image == NULL) {
+
+ tsi148_device->flush_image = (struct vme_master_resource *)
+ kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
+ if (tsi148_device->flush_image == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"flush resource structure\n");
retval = -ENOMEM;
goto err_master;
}
- flush_image->parent = tsi148_bridge;
- spin_lock_init(&(flush_image->lock));
- flush_image->locked = 1;
- flush_image->number = master_num;
- flush_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
- VME_A64;
- flush_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
- VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
- VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
- VME_PROG | VME_DATA;
- flush_image->width_attr = VME_D16 | VME_D32;
- memset(&(flush_image->pci_resource), 0,
+ tsi148_device->flush_image->parent = tsi148_bridge;
+ spin_lock_init(&(tsi148_device->flush_image->lock));
+ tsi148_device->flush_image->locked = 1;
+ tsi148_device->flush_image->number = master_num;
+ tsi148_device->flush_image->address_attr = VME_A16 | VME_A24 |
+ VME_A32 | VME_A64;
+ tsi148_device->flush_image->cycle_attr = VME_SCT | VME_BLT |
+ VME_MBLT | VME_2eVME | VME_2eSST | VME_2eSSTB |
+ VME_2eSST160 | VME_2eSST267 | VME_2eSST320 | VME_SUPER |
+ VME_USER | VME_PROG | VME_DATA;
+ tsi148_device->flush_image->width_attr = VME_D16 | VME_D32;
+ memset(&(tsi148_device->flush_image->pci_resource), 0,
sizeof(struct resource));
- flush_image->kern_base = NULL;
+ tsi148_device->flush_image->kern_base = NULL;
}
/* Add master windows to list */
tsi148_bridge->lm_detach = tsi148_lm_detach;
tsi148_bridge->slot_get = tsi148_slot_get;
- data = ioread32be(tsi148_bridge->base + TSI148_LCSR_VSTAT);
+ data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
dev_info(&pdev->dev, "Board is%s the VME system controller\n",
(data & TSI148_LCSR_VSTAT_SCONS)? "" : " not");
- if (!geoid) {
+ if (!geoid)
dev_info(&pdev->dev, "VME geographical address is %d\n",
data & TSI148_LCSR_VSTAT_GA_M);
- } else {
+ else
dev_info(&pdev->dev, "VME geographical address is set to %d\n",
geoid);
- }
+
dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
err_chk ? "enabled" : "disabled");
- if(tsi148_crcsr_init(pdev)) {
+ if (tsi148_crcsr_init(tsi148_bridge, pdev))
dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
goto err_crcsr;
- }
-
- /* Need to save tsi148_bridge pointer locally in link list for use in
- * tsi148_remove()
- */
retval = vme_register_bridge(tsi148_bridge);
if (retval != 0) {
dev_err(&pdev->dev, "Chip Registration failed.\n");
goto err_reg;
}
+ pci_set_drvdata(pdev, tsi148_bridge);
+
/* Clear VME bus "board fail", and "power-up reset" lines */
- data = ioread32be(tsi148_bridge->base + TSI148_LCSR_VSTAT);
+ data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
data &= ~TSI148_LCSR_VSTAT_BRDFL;
data |= TSI148_LCSR_VSTAT_CPURST;
- iowrite32be(data, tsi148_bridge->base + TSI148_LCSR_VSTAT);
+ iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
return 0;
vme_unregister_bridge(tsi148_bridge);
err_reg:
- tsi148_crcsr_exit(pdev);
+ tsi148_crcsr_exit(tsi148_bridge, pdev);
err_crcsr:
err_lm:
/* resources are stored in link list */
kfree(master_image);
}
- tsi148_irq_exit(pdev);
+ tsi148_irq_exit(tsi148_device, pdev);
err_irq:
err_test:
- iounmap(tsi148_bridge->base);
+ iounmap(tsi148_device->base);
err_remap:
pci_release_regions(pdev);
err_resource:
pci_disable_device(pdev);
err_enable:
+ kfree(tsi148_device);
+err_driver:
kfree(tsi148_bridge);
err_struct:
return retval;
struct vme_slave_resource *slave_image;
struct vme_dma_resource *dma_ctrlr;
int i;
+ struct tsi148_driver *bridge;
+ struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
+
+ bridge = tsi148_bridge->driver_priv;
- dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
- /* XXX We need to find the pdev->dev in the list of vme_bridge->dev's */
+ dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
/*
* Shutdown all inbound and outbound windows.
*/
for (i = 0; i < 8; i++) {
- iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_IT[i] +
+ iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
- iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_OT[i] +
+ iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
}
/*
* Shutdown Location monitor.
*/
- iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_LMAT);
+ iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
/*
* Shutdown CRG map.
*/
- iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_CSRAT);
+ iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
/*
* Clear error status.
*/
- iowrite32be(0xFFFFFFFF, tsi148_bridge->base + TSI148_LCSR_EDPAT);
- iowrite32be(0xFFFFFFFF, tsi148_bridge->base + TSI148_LCSR_VEAT);
- iowrite32be(0x07000700, tsi148_bridge->base + TSI148_LCSR_PSTAT);
+ iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
+ iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
+ iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
/*
* Remove VIRQ interrupt (if any)
*/
- if (ioread32be(tsi148_bridge->base + TSI148_LCSR_VICR) & 0x800) {
- iowrite32be(0x8000, tsi148_bridge->base + TSI148_LCSR_VICR);
- }
+ if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
+ iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
/*
* Map all Interrupts to PCI INTA
*/
- iowrite32be(0x0, tsi148_bridge->base + TSI148_LCSR_INTM1);
- iowrite32be(0x0, tsi148_bridge->base + TSI148_LCSR_INTM2);
+ iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
+ iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
- tsi148_irq_exit(pdev);
+ tsi148_irq_exit(bridge, pdev);
vme_unregister_bridge(tsi148_bridge);
- tsi148_crcsr_exit(pdev);
+ tsi148_crcsr_exit(tsi148_bridge, pdev);
/* resources are stored in link list */
list_for_each(pos, &(tsi148_bridge->dma_resources)) {
kfree(master_image);
}
- tsi148_irq_exit(pdev);
+ tsi148_irq_exit(bridge, pdev);
- iounmap(tsi148_bridge->base);
+ iounmap(bridge->base);
pci_release_regions(pdev);
pci_disable_device(pdev);
+ kfree(tsi148_bridge->driver_priv);
+
kfree(tsi148_bridge);
}
}
/* Program registers for DMA transfer */
- iowrite32be(dmaLL->dsau, tsi148_bridge->base +
+ iowrite32be(dmaLL->dsau, tsi148_bridge->driver_priv->base +
TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DSAU);
- iowrite32be(dmaLL->dsal, tsi148_bridge->base +
+ iowrite32be(dmaLL->dsal, tsi148_bridge->driver_priv->base +
TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DSAL);
- iowrite32be(dmaLL->ddau, tsi148_bridge->base +
+ iowrite32be(dmaLL->ddau, tsi148_bridge->driver_priv->base +
TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDAU);
- iowrite32be(dmaLL->ddal, tsi148_bridge->base +
+ iowrite32be(dmaLL->ddal, tsi148_bridge->driver_priv->base +
TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDAL);
- iowrite32be(dmaLL->dsat, tsi148_bridge->base +
+ iowrite32be(dmaLL->dsat, tsi148_bridge->driver_priv->base +
TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DSAT);
- iowrite32be(dmaLL->ddat, tsi148_bridge->base +
+ iowrite32be(dmaLL->ddat, tsi148_bridge->driver_priv->base +
TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDAT);
- iowrite32be(dmaLL->dcnt, tsi148_bridge->base +
+ iowrite32be(dmaLL->dcnt, tsi148_bridge->driver_priv->base +
TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCNT);
- iowrite32be(dmaLL->ddbs, tsi148_bridge->base +
+ iowrite32be(dmaLL->ddbs, tsi148_bridge->driver_priv->base +
TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDBS);
/* Start the operation */
- iowrite32be(dctlreg | 0x2000000, tsi148_bridge->base +
+ iowrite32be(dctlreg | 0x2000000, tsi148_bridge->driver_priv->base +
TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
- tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_DMA[channel] +
- TSI148_LCSR_OFFSET_DSTA);
+ tmp = ioread32be(tsi148_bridge->driver_priv->base +
+ TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DSTA);
wait_event_interruptible(dma_queue[channel], (tmp & 0x1000000) == 0);
/*
* handler rather than here so that we can be sure we haven't kicked off
* another DMA transfer.
*/
- val = ioread32be(tsi148_bridge->base + TSI148_LCSR_DMA[channel] +
- TSI148_LCSR_OFFSET_DSTA);
+ val = ioread32be(tsi148_bridge->driver_priv->base +
+ TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DSTA);
vmeDma->vmeDmaStatus = 0;
if (val & 0x10000000) {
int temp_ctl = 0;
int gto = 0;
- temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_VCTRL);
+ temp_ctl = ioread32be(tsi148_bridge->driver_priv->base +
+ TSI148_LCSR_VCTRL);
temp_ctl &= 0xFFEFFF00;
if (vmeArb->globalTimeoutTimer == 0xFFFFFFFF) {
if (vmeArb->noEarlyReleaseFlag) {
temp_ctl |= 1 << 20;
}
- iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_VCTRL);
+ iowrite32be(temp_ctl, tsi148_bridge->driver_priv->base +
+ TSI148_LCSR_VCTRL);
return (0);
}
int gto = 0;
- temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_VCTRL);
+ temp_ctl = ioread32be(tsi148_bridge->driver_priv->base +
+ TSI148_LCSR_VCTRL);
gto = temp_ctl & 0xF;
if (gto != 0) {
{
int temp_ctl = 0;
- temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_VMCTRL);
+ temp_ctl = ioread32be(tsi148_bridge->driver_priv->base +
+ TSI148_LCSR_VMCTRL);
temp_ctl &= 0xFFFF0000;
if (vmeReq->releaseMode == 1) {
temp_ctl |= (vmeReq->timeoffTimeoutTimer & 7) << 12;
temp_ctl |= vmeReq->requestLevel;
- iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_VMCTRL);
+ iowrite32be(temp_ctl, tsi148_bridge->driver_priv->base +
+ TSI148_LCSR_VMCTRL);
return (0);
}
{
int temp_ctl = 0;
- temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_VMCTRL);
+ temp_ctl = ioread32be(tsi148_bridge->driver_priv->base +
+ TSI148_LCSR_VMCTRL);
if (temp_ctl & 0x18) {
vmeReq->releaseMode = 1;