ret = -EBUSY;
dev->irq = gsc_claim_irq(&gsc_irq, ASP_GSC_IRQ);
if (dev->irq < 0) {
- printk(KERN_ERR "%s(): cannot get GSC irq\n", __FUNCTION__);
+ printk(KERN_ERR "%s(): cannot get GSC irq\n", __func__);
goto out;
}
BUG_ON((pages_needed * IOVP_SIZE) > DMA_CHUNK_SIZE);
DBG_RES("%s() size: %d pages_needed %d\n",
- __FUNCTION__, size, pages_needed);
+ __func__, size, pages_needed);
/*
** "seek and ye shall find"...praying never hurts either...
#endif
} else {
panic("%s: %s() Too many pages to map. pages_needed: %u\n",
- __FILE__, __FUNCTION__, pages_needed);
+ __FILE__, __func__, pages_needed);
}
panic("%s: %s() I/O MMU is out of mapping resources.\n", __FILE__,
- __FUNCTION__);
+ __func__);
resource_found:
DBG_RES("%s() res_idx %d res_hint: %d\n",
- __FUNCTION__, res_idx, ioc->res_hint);
+ __func__, res_idx, ioc->res_hint);
#ifdef CCIO_SEARCH_TIME
{
BUG_ON(pages_mapped > BITS_PER_LONG);
DBG_RES("%s(): res_idx: %d pages_mapped %d\n",
- __FUNCTION__, res_idx, pages_mapped);
+ __func__, res_idx, pages_mapped);
#ifdef CCIO_MAP_STATS
ioc->used_pages -= pages_mapped;
#endif
} else {
panic("%s:%s() Too many pages to unmap.\n", __FILE__,
- __FUNCTION__);
+ __func__);
}
}
pdir_start = &(ioc->pdir_base[idx]);
DBG_RUN("%s() 0x%p -> 0x%lx size: %0x%x\n",
- __FUNCTION__, addr, (long)iovp | offset, size);
+ __func__, addr, (long)iovp | offset, size);
/* If not cacheline aligned, force SAFE_DMA on the whole mess */
if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES))
ioc = GET_IOC(dev);
DBG_RUN("%s() iovp 0x%lx/%x\n",
- __FUNCTION__, (long)iova, size);
+ __func__, (long)iova, size);
iova ^= offset; /* clear offset bits */
size += offset;
BUG_ON(!dev);
ioc = GET_IOC(dev);
- DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents);
+ DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
/* Fast path single entry scatterlists. */
if (nents == 1) {
BUG_ON(coalesced != filled);
- DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled);
+ DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
for (i = 0; i < filled; i++)
current_len += sg_dma_len(sglist + i);
ioc = GET_IOC(dev);
DBG_RUN_SG("%s() START %d entries, %08lx,%x\n",
- __FUNCTION__, nents, sg_virt_addr(sglist), sglist->length);
+ __func__, nents, sg_virt_addr(sglist), sglist->length);
#ifdef CCIO_MAP_STATS
ioc->usg_calls++;
++sglist;
}
- DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents);
+ DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
}
static struct hppa_dma_ops ccio_ops = {
ccio_get_iotlb_size(struct parisc_device *dev)
{
if (dev->spa_shift == 0) {
- panic("%s() : Can't determine I/O TLB size.\n", __FUNCTION__);
+ panic("%s() : Can't determine I/O TLB size.\n", __func__);
}
return (1 << dev->spa_shift);
}
BUG_ON((1 << get_order(ioc->pdir_size)) != (ioc->pdir_size >> PAGE_SHIFT));
DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits)\n",
- __FUNCTION__, ioc->ioc_regs,
+ __func__, ioc->ioc_regs,
(unsigned long) num_physpages >> (20 - PAGE_SHIFT),
iova_space_size>>20,
iov_order + PAGE_SHIFT);
ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL,
get_order(ioc->pdir_size));
if(NULL == ioc->pdir_base) {
- panic("%s() could not allocate I/O Page Table\n", __FUNCTION__);
+ panic("%s() could not allocate I/O Page Table\n", __func__);
}
memset(ioc->pdir_base, 0, ioc->pdir_size);
/* resource map size dictated by pdir_size */
ioc->res_size = (ioc->pdir_size / sizeof(u64)) >> 3;
- DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, ioc->res_size);
+ DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
ioc->res_map = (u8 *)__get_free_pages(GFP_KERNEL,
get_order(ioc->res_size));
if(NULL == ioc->res_map) {
- panic("%s() could not allocate resource map\n", __FUNCTION__);
+ panic("%s() could not allocate resource map\n", __func__);
}
memset(ioc->res_map, 0, ioc->res_size);
result = insert_resource(&iomem_resource, res);
if (result < 0) {
printk(KERN_ERR "%s() failed to claim CCIO bus address space (%08lx,%08lx)\n",
- __FUNCTION__, res->start, res->end);
+ __func__, res->start, res->end);
}
}
void __iomem *base_addr = d->hba.base_addr;
unsigned long flags;
- DBG("%s: %p, %d, %d, %d\n", __FUNCTION__, base_addr, devfn, where,
+ DBG("%s: %p, %d, %d, %d\n", __func__, base_addr, devfn, where,
size);
spin_lock_irqsave(&d->dinosaur_pen, flags);
void __iomem *base_addr = d->hba.base_addr;
unsigned long flags;
- DBG("%s: %p, %d, %d, %d\n", __FUNCTION__, base_addr, devfn, where,
+ DBG("%s: %p, %d, %d, %d\n", __func__, base_addr, devfn, where,
size);
spin_lock_irqsave(&d->dinosaur_pen, flags);
struct dino_device *dino_dev = irq_desc[irq].chip_data;
int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
- DBG(KERN_WARNING "%s(0x%p, %d)\n", __FUNCTION__, dino_dev, irq);
+ DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, irq);
/* Clear the matching bit in the IMR register */
dino_dev->imr &= ~(DINO_MASK_IRQ(local_irq));
int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
u32 tmp;
- DBG(KERN_WARNING "%s(0x%p, %d)\n", __FUNCTION__, dino_dev, irq);
+ DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, irq);
/*
** clear pending IRQ bits
tmp = __raw_readl(dino_dev->hba.base_addr+DINO_ILR);
if (tmp & DINO_MASK_IRQ(local_irq)) {
DBG(KERN_WARNING "%s(): IRQ asserted! (ILR 0x%x)\n",
- __FUNCTION__, tmp);
+ __func__, tmp);
gsc_writel(dino_dev->txn_data, dino_dev->txn_addr);
}
}
int local_irq = __ffs(mask);
int irq = dino_dev->global_irq[local_irq];
DBG(KERN_DEBUG "%s(%d, %p) mask 0x%x\n",
- __FUNCTION__, irq, intr_dev, mask);
+ __func__, irq, intr_dev, mask);
__do_IRQ(irq);
mask &= ~(1 << local_irq);
} while (mask);
int port_base = HBA_PORT_BASE(dino_dev->hba.hba_num);
DBG(KERN_WARNING "%s(0x%p) bus %d platform_data 0x%p\n",
- __FUNCTION__, bus, bus->secondary,
+ __func__, bus, bus->secondary,
bus->bridge->platform_data);
/* Firmware doesn't set up card-mode dino, so we have to */
int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
u32 imr;
- DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __FUNCTION__, irq,
+ DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, irq,
irq_dev->name, imr);
/* Disable the IRQ line by clearing the bit in the IMR */
int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
u32 imr;
- DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __FUNCTION__, irq,
+ DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, irq,
irq_dev->name, imr);
/* Enable the IRQ line by setting the bit in the IMR */
dev->irq = gsc_alloc_irq(&gsc_irq);
if (dev->irq < 0) {
printk(KERN_ERR "%s(): cannot get GSC irq\n",
- __FUNCTION__);
+ __func__);
kfree(lasi);
return -EBUSY;
}
/* original - Generate config cycle on broken elroy
with risk we will miss PCI bus errors. */
*data = lba_rd_cfg(d, tok, pos, size);
- DBG_CFG("%s(%x+%2x) -> 0x%x (a)\n", __FUNCTION__, tok, pos, *data);
+ DBG_CFG("%s(%x+%2x) -> 0x%x (a)\n", __func__, tok, pos, *data);
return 0;
}
if (LBA_SKIP_PROBE(d) && !lba_device_present(bus->secondary, devfn, d)) {
- DBG_CFG("%s(%x+%2x) -> -1 (b)\n", __FUNCTION__, tok, pos);
+ DBG_CFG("%s(%x+%2x) -> -1 (b)\n", __func__, tok, pos);
/* either don't want to look or know device isn't present. */
*data = ~0U;
return(0);
case 2: *data = READ_REG16(data_reg + (pos & 2)); break;
case 4: *data = READ_REG32(data_reg); break;
}
- DBG_CFG("%s(%x+%2x) -> 0x%x (c)\n", __FUNCTION__, tok, pos, *data);
+ DBG_CFG("%s(%x+%2x) -> 0x%x (c)\n", __func__, tok, pos, *data);
return 0;
}
if (!LBA_SKIP_PROBE(d)) {
/* Original Workaround */
lba_wr_cfg(d, tok, pos, (u32) data, size);
- DBG_CFG("%s(%x+%2x) = 0x%x (a)\n", __FUNCTION__, tok, pos,data);
+ DBG_CFG("%s(%x+%2x) = 0x%x (a)\n", __func__, tok, pos,data);
return 0;
}
if (LBA_SKIP_PROBE(d) && (!lba_device_present(bus->secondary, devfn, d))) {
- DBG_CFG("%s(%x+%2x) = 0x%x (b)\n", __FUNCTION__, tok, pos,data);
+ DBG_CFG("%s(%x+%2x) = 0x%x (b)\n", __func__, tok, pos,data);
return 1; /* New Workaround */
}
- DBG_CFG("%s(%x+%2x) = 0x%x (c)\n", __FUNCTION__, tok, pos, data);
+ DBG_CFG("%s(%x+%2x) = 0x%x (c)\n", __func__, tok, pos, data);
/* Basic Algorithm */
LBA_CFG_ADDR_SETUP(d, tok | pos);
if ((pos > 255) || (devfn > 255))
return -EINVAL;
- DBG_CFG("%s(%x+%2x) <- 0x%x (c)\n", __FUNCTION__, tok, pos, data);
+ DBG_CFG("%s(%x+%2x) <- 0x%x (c)\n", __func__, tok, pos, data);
LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);
switch(size) {
#define LBA_PORT_OUT(size, mask) \
static void lba_astro_out##size (struct pci_hba_data *d, u16 addr, u##size val) \
{ \
- DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __FUNCTION__, d, addr, val); \
+ DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __func__, d, addr, val); \
WRITE_REG##size(val, astro_iop_base + addr); \
if (LBA_DEV(d)->hw_rev < 3) \
lba_t32 = READ_U32(d->base_addr + LBA_FUNC_ID); \
static u##size lba_pat_in##size (struct pci_hba_data *l, u16 addr) \
{ \
u##size t; \
- DBG_PORT("%s(0x%p, 0x%x) ->", __FUNCTION__, l, addr); \
+ DBG_PORT("%s(0x%p, 0x%x) ->", __func__, l, addr); \
t = READ_REG##size(PIOP_TO_GMMIO(LBA_DEV(l), addr)); \
DBG_PORT(" 0x%x\n", t); \
return (t); \
static void lba_pat_out##size (struct pci_hba_data *l, u16 addr, u##size val) \
{ \
void __iomem *where = PIOP_TO_GMMIO(LBA_DEV(l), addr); \
- DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __FUNCTION__, l, addr, val); \
+ DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __func__, l, addr, val); \
WRITE_REG##size(val, where); \
/* flush the I/O down to the elroy at least */ \
lba_t32 = READ_U32(l->base_addr + LBA_FUNC_ID); \
WARN_ON((ibase & 0x001fffff) != 0);
WARN_ON((imask & 0x001fffff) != 0);
- DBG("%s() ibase 0x%x imask 0x%x\n", __FUNCTION__, ibase, imask);
+ DBG("%s() ibase 0x%x imask 0x%x\n", __func__, ibase, imask);
WRITE_REG32( imask, base_addr + LBA_IMASK);
WRITE_REG32( ibase, base_addr + LBA_IBASE);
iounmap(base_addr);
default:
printk(KERN_ERR "%s: Wrong LCD/LED model %d !\n",
- __FUNCTION__, lcd_info.model);
+ __func__, lcd_info.model);
return 1;
}
}
mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt;
- DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr);
+ DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
while(res_ptr < res_end)
{
DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
#endif
DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
- __FUNCTION__, size, pages_needed, pide,
+ __func__, size, pages_needed, pide,
(uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
ioc->res_bitshift );
unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1));
DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n",
- __FUNCTION__, (uint) iova, size,
+ __func__, (uint) iova, size,
bits_not_wanted, m, pide, res_ptr, *res_ptr);
#ifdef SBA_COLLECT_STATS
iovp = (dma_addr_t) pide << IOVP_SHIFT;
DBG_RUN("%s() 0x%p -> 0x%lx\n",
- __FUNCTION__, addr, (long) iovp | offset);
+ __func__, addr, (long) iovp | offset);
pdir_start = &(ioc->pdir_base[pide]);
unsigned long flags;
dma_addr_t offset;
- DBG_RUN("%s() iovp 0x%lx/%x\n", __FUNCTION__, (long) iova, size);
+ DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
ioc = GET_IOC(dev);
offset = iova & ~IOVP_MASK;
int coalesced, filled = 0;
unsigned long flags;
- DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents);
+ DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
ioc = GET_IOC(dev);
spin_unlock_irqrestore(&ioc->res_lock, flags);
- DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled);
+ DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
return filled;
}
#endif
DBG_RUN_SG("%s() START %d entries, %p,%x\n",
- __FUNCTION__, nents, sg_virt_addr(sglist), sglist->length);
+ __func__, nents, sg_virt_addr(sglist), sglist->length);
ioc = GET_IOC(dev);
++sglist;
}
- DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents);
+ DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
#ifdef ASSERT_PDIR_SANITY
spin_lock_irqsave(&ioc->res_lock, flags);
pdir_base = __get_free_pages(GFP_KERNEL, pdir_order);
if (NULL == (void *) pdir_base) {
panic("%s() could not allocate I/O Page Table\n",
- __FUNCTION__);
+ __func__);
}
/* If this is not PA8700 (PCX-W2)
ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
DBG_INIT("%s() hpa 0x%p IOV %dMB (%d bits)\n",
- __FUNCTION__, ioc->ioc_hpa, iova_space_size >> 20,
+ __func__, ioc->ioc_hpa, iova_space_size >> 20,
iov_order + PAGE_SHIFT);
ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
memset(ioc->pdir_base, 0, ioc->pdir_size);
DBG_INIT("%s() pdir %p size %x\n",
- __FUNCTION__, ioc->pdir_base, ioc->pdir_size);
+ __func__, ioc->pdir_base, ioc->pdir_size);
#ifdef SBA_HINT_SUPPORT
ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
if (agp_found && sba_reserve_agpgart) {
printk(KERN_INFO "%s: reserving %dMb of IOVA space for agpgart\n",
- __FUNCTION__, (iova_space_size/2) >> 20);
+ __func__, (iova_space_size/2) >> 20);
ioc->pdir_size /= 2;
ioc->pdir_base[PDIR_INDEX(iova_space_size/2)] = SBA_AGPGART_COOKIE;
}
ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n",
- __FUNCTION__,
+ __func__,
ioc->ioc_hpa,
(unsigned long) num_physpages >> (20 - PAGE_SHIFT),
iova_space_size>>20,
ioc->pdir_base = sba_alloc_pdir(pdir_size);
DBG_INIT("%s() pdir %p size %x\n",
- __FUNCTION__, ioc->pdir_base, pdir_size);
+ __func__, ioc->pdir_base, pdir_size);
#ifdef SBA_HINT_SUPPORT
/* FIXME : DMA HINTs not used */
#endif
DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
- __FUNCTION__, ioc->ibase, ioc->imask);
+ __func__, ioc->ibase, ioc->imask);
/*
** FIXME: Hint registers are programmed with default hint
ioc->ibase = 0; /* used by SBA_IOVA and related macros */
- DBG_INIT("%s() DONE\n", __FUNCTION__);
+ DBG_INIT("%s() DONE\n", __func__);
}
if (!IS_PLUTO(sba_dev->dev)) {
ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
- __FUNCTION__, sba_dev->sba_hpa, ioc_ctl);
+ __func__, sba_dev->sba_hpa, ioc_ctl);
ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC;
/* j6700 v1.6 firmware sets 0x294f */
res_size >>= 3; /* convert bit count to byte count */
DBG_INIT("%s() res_size 0x%x\n",
- __FUNCTION__, res_size);
+ __func__, res_size);
sba_dev->ioc[i].res_size = res_size;
sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size));
if (NULL == sba_dev->ioc[i].res_map)
{
panic("%s:%s() could not allocate resource map\n",
- __FILE__, __FUNCTION__ );
+ __FILE__, __func__ );
}
memset(sba_dev->ioc[i].res_map, 0, res_size);
#endif
DBG_INIT("%s() %d res_map %x %p\n",
- __FUNCTION__, i, res_size, sba_dev->ioc[i].res_map);
+ __func__, i, res_size, sba_dev->ioc[i].res_map);
}
spin_lock_init(&sba_dev->sba_lock);
dev->irq = gsc_claim_irq(&gsc_irq, WAX_GSC_IRQ);
if (dev->irq < 0) {
printk(KERN_ERR "%s(): cannot get GSC irq\n",
- __FUNCTION__);
+ __func__);
kfree(wax);
return -EBUSY;
}