if (addr == 0x72)
(void)R_REG(osh, ®s->phyregdata);
#else
- W_REG(osh, (volatile u32 *)(®s->phyregaddr),
+ W_REG(osh, (u32 *)(®s->phyregaddr),
addr | (val << 16));
if (pi->sh->bustype == PCI_BUS) {
if (++pi->phy_wreg >= pi->phy_wreg_limit) {
#else
struct sbpcieregs;
-extern u8 pcicore_find_pci_capability(struct osl_info *osh, u8 req_cap_id,
+extern u8 pcicore_find_pci_capability(void *dev, u8 req_cap_id,
unsigned char *buf, u32 *buflen);
extern uint pcie_readreg(struct osl_info *osh, struct sbpcieregs *pcieregs,
uint addrtype, uint offset);
extern u32 pcicore_pciereg(void *pch, u32 offset, u32 mask,
u32 val, uint type);
-extern bool pcicore_pmecap_fast(struct osl_info *osh);
+extern bool pcicore_pmecap_fast(void *pch);
extern void pcicore_pmeen(void *pch);
extern void pcicore_pmeclr(void *pch);
extern bool pcicore_pmestat(void *pch);
#endif
#if defined(BCMSDIO)
-#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) \
- if ((osh)->mmbus) \
- mmap_op else bus_op
-#define SELECT_BUS_READ(osh, mmap_op, bus_op) \
- ((osh)->mmbus) ? mmap_op : bus_op
+#define SELECT_BUS_WRITE(mmap_op, bus_op) bus_op
+#define SELECT_BUS_READ(mmap_op, bus_op) bus_op
#else
-#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) mmap_op
-#define SELECT_BUS_READ(osh, mmap_op, bus_op) mmap_op
+#define SELECT_BUS_WRITE(mmap_op, bus_op) mmap_op
+#define SELECT_BUS_READ(mmap_op, bus_op) mmap_op
#endif
/* the largest reasonable packet buffer driver uses for ethernet MTU in bytes */
#ifndef IL_BIGENDIAN
#ifndef __mips__
#define R_REG(osh, r) (\
- SELECT_BUS_READ(osh, sizeof(*(r)) == sizeof(u8) ? \
+ SELECT_BUS_READ(sizeof(*(r)) == sizeof(u8) ? \
readb((volatile u8*)(r)) : \
sizeof(*(r)) == sizeof(u16) ? readw((volatile u16*)(r)) : \
readl((volatile u32*)(r)), OSL_READ_REG(osh, r)) \
)
#else /* __mips__ */
#define R_REG(osh, r) (\
- SELECT_BUS_READ(osh, \
+ SELECT_BUS_READ( \
({ \
__typeof(*(r)) __osl_v; \
__asm__ __volatile__("sync"); \
#endif /* __mips__ */
#define W_REG(osh, r, v) do { \
- SELECT_BUS_WRITE(osh, \
+ SELECT_BUS_WRITE( \
switch (sizeof(*(r))) { \
case sizeof(u8): \
writeb((u8)(v), (volatile u8*)(r)); break; \
} while (0)
#else /* IL_BIGENDIAN */
#define R_REG(osh, r) (\
- SELECT_BUS_READ(osh, \
+ SELECT_BUS_READ( \
({ \
__typeof(*(r)) __osl_v; \
switch (sizeof(*(r))) { \
OSL_READ_REG(osh, r)) \
)
#define W_REG(osh, r, v) do { \
- SELECT_BUS_WRITE(osh, \
+ SELECT_BUS_WRITE( \
switch (sizeof(*(r))) { \
case sizeof(u8): \
writeb((u8)(v), \
/* misc si info needed by some of the routines */
typedef struct si_info {
- struct si_pub pub; /* back plane public state (must be first field) */
+ struct si_pub pub; /* back plane public state (must be first) */
struct osl_info *osh; /* osl os handle */
- void *sdh; /* bcmsdh handle */
+ void *pbus; /* handle to bus (pci/sdio/..) */
uint dev_coreid; /* the core provides driver functions */
void *intr_arg; /* interrupt callback function arg */
si_intrsoff_t intrsoff_fn; /* turns chip interrupts off */
sii->curwrap = (void *)((unsigned long)regs + SI_CORE_SIZE);
/* Now point the window at the erom */
- pci_write_config_dword(sii->osh->pdev, PCI_BAR0_WIN, erombase);
+ pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, erombase);
eromptr = regs;
break;
case PCI_BUS:
/* point bar0 window */
- pci_write_config_dword(sii->osh->pdev, PCI_BAR0_WIN, addr);
+ pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, addr);
regs = sii->curmap;
/* point bar0 2nd 4KB window */
- pci_write_config_dword(sii->osh->pdev, PCI_BAR0_WIN2, wrap);
+ pci_write_config_dword(sii->pbus, PCI_BAR0_WIN2, wrap);
break;
case SPI_BUS:
char name[MAXNAMEL]; /* callers name for diag msgs */
struct osl_info *osh; /* os handle */
+ void *pbus; /* bus handle */
si_t *sih; /* sb handle */
bool dma64; /* this dma engine is operating in 64-bit mode */
static void _dma_fifoloopbackenable(dma_info_t *di);
static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags);
static u8 dma_align_sizetobits(uint size);
-static void *dma_ringalloc(struct osl_info *osh, u32 boundary, uint size,
+static void *dma_ringalloc(dma_info_t *di, u32 boundary, uint size,
u16 *alignbits, uint *alloced,
dmaaddr_t *descpa, osldma_t **dmah);
di->osh = osh;
di->sih = sih;
+ di->pbus = osh->pdev;
/* save tunables */
di->ntxd = (u16) ntxd;
return dma64_alloc(di, direction);
}
-void *dma_alloc_consistent(struct osl_info *osh, uint size, u16 align_bits,
+void *dma_alloc_consistent(struct pci_dev *pdev, uint size, u16 align_bits,
uint *alloced, unsigned long *pap)
{
if (align_bits) {
size += align;
*alloced = size;
}
- return pci_alloc_consistent(osh->pdev, size, (dma_addr_t *) pap);
+ return pci_alloc_consistent(pdev, size, (dma_addr_t *) pap);
}
/* !! may be called with core in reset */
/* free dma descriptor rings */
if (di->txd64)
- pci_free_consistent(di->osh->pdev, di->txdalloc,
+ pci_free_consistent(di->pbus, di->txdalloc,
((s8 *)di->txd64 - di->txdalign),
(di->txdpaorig));
if (di->rxd64)
- pci_free_consistent(di->osh->pdev, di->rxdalloc,
+ pci_free_consistent(di->pbus, di->rxdalloc,
((s8 *)di->rxd64 - di->rxdalign),
(di->rxdpaorig));
memset(&di->rxp_dmah[rxout], 0,
sizeof(hnddma_seg_map_t));
- pa = pci_map_single(di->osh->pdev, p->data,
+ pa = pci_map_single(di->pbus, p->data,
di->rxbufsize, PCI_DMA_FROMDEVICE);
ASSERT(IS_ALIGNED(PHYSADDRLO(pa), 4));
* descriptor ring size aligned location. This will ensure that the ring will
* not cross page boundary
*/
-static void *dma_ringalloc(struct osl_info *osh, u32 boundary, uint size,
+static void *dma_ringalloc(dma_info_t *di, u32 boundary, uint size,
u16 *alignbits, uint *alloced,
dmaaddr_t *descpa, osldma_t **dmah)
{
u32 desc_strtaddr;
u32 alignbytes = 1 << *alignbits;
- va = dma_alloc_consistent(osh, size, *alignbits, alloced, descpa);
+ va = dma_alloc_consistent(di->pbus, size, *alignbits, alloced, descpa);
if (NULL == va)
return NULL;
if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
& boundary)) {
*alignbits = dma_align_sizetobits(size);
- pci_free_consistent(osh->pdev, size, va, *descpa);
- va = dma_alloc_consistent(osh, size, *alignbits,
+ pci_free_consistent(di->pbus, size, va, *descpa);
+ va = dma_alloc_consistent(di->pbus, size, *alignbits,
alloced, descpa);
}
return va;
align = (1 << align_bits);
if (direction == DMA_TX) {
- va = dma_ringalloc(di->osh, D64RINGALIGN, size, &align_bits,
+ va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
&alloced, &di->txdpaorig, &di->tx_dmah);
if (va == NULL) {
DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
di->txdalloc = alloced;
ASSERT(IS_ALIGNED((unsigned long)di->txd64, align));
} else {
- va = dma_ringalloc(di->osh, D64RINGALIGN, size, &align_bits,
+ va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
&alloced, &di->rxdpaorig, &di->rx_dmah);
if (va == NULL) {
DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
if (len == 0)
return 0;
- pa = pci_map_single(di->osh->pdev, buf, len, PCI_DMA_TODEVICE);
+ pa = pci_map_single(di->pbus, buf, len, PCI_DMA_TODEVICE);
flags = (D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF);
memset(&di->txp_dmah[txout], 0,
sizeof(hnddma_seg_map_t));
- pa = pci_map_single(di->osh->pdev, data, len, PCI_DMA_TODEVICE);
+ pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE);
if (DMASGLIST_ENAB) {
map = &di->txp_dmah[txout];
i = NEXTTXD(i);
}
- pci_unmap_single(di->osh->pdev, pa, size, PCI_DMA_TODEVICE);
+ pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE);
}
di->txin = i;
di->dataoffsethigh));
/* clear this packet from the descriptor ring */
- pci_unmap_single(di->osh->pdev, pa, di->rxbufsize, PCI_DMA_FROMDEVICE);
+ pci_unmap_single(di->pbus, pa, di->rxbufsize, PCI_DMA_FROMDEVICE);
W_SM(&di->rxd64[i].addrlow, 0xdeadbeef);
W_SM(&di->rxd64[i].addrhigh, 0xdeadbeef);
} regs; /* Memory mapped register to the core */
si_t *sih; /* System interconnect handle */
+ struct pci_dev *dev;
struct osl_info *osh; /* OSL handle */
u8 pciecap_lcreg_offset; /* PCIE capability LCreg offset in the config space */
bool pcie_pr42767;
pi->sih = sih;
pi->osh = osh;
+ pi->dev = osh->pdev;
if (sih->buscoretype == PCIE_CORE_ID) {
u8 cap_ptr;
pi->regs.pcieregs = (sbpcieregs_t *) regs;
cap_ptr =
- pcicore_find_pci_capability(pi->osh, PCI_CAP_PCIECAP_ID,
+ pcicore_find_pci_capability(pi->dev, PCI_CAP_PCIECAP_ID,
NULL, NULL);
ASSERT(cap_ptr);
pi->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET;
/* return cap_offset if requested capability exists in the PCI config space */
/* Note that it's caller's responsibility to make sure it's a pci bus */
u8
-pcicore_find_pci_capability(struct osl_info *osh, u8 req_cap_id,
+pcicore_find_pci_capability(void *dev, u8 req_cap_id,
unsigned char *buf, u32 *buflen)
{
u8 cap_id;
u8 byte_val;
/* check for Header type 0 */
- pci_read_config_byte(osh->pdev, PCI_CFG_HDR, &byte_val);
+ pci_read_config_byte(dev, PCI_CFG_HDR, &byte_val);
if ((byte_val & 0x7f) != PCI_HEADER_NORMAL)
goto end;
/* check if the capability pointer field exists */
- pci_read_config_byte(osh->pdev, PCI_CFG_STAT, &byte_val);
+ pci_read_config_byte(dev, PCI_CFG_STAT, &byte_val);
if (!(byte_val & PCI_CAPPTR_PRESENT))
goto end;
- pci_read_config_byte(osh->pdev, PCI_CFG_CAPPTR, &cap_ptr);
+ pci_read_config_byte(dev, PCI_CFG_CAPPTR, &cap_ptr);
/* check if the capability pointer is 0x00 */
if (cap_ptr == 0x00)
goto end;
/* loop thr'u the capability list and see if the pcie capabilty exists */
- pci_read_config_byte(osh->pdev, cap_ptr, &cap_id);
+ pci_read_config_byte(dev, cap_ptr, &cap_id);
while (cap_id != req_cap_id) {
- pci_read_config_byte(osh->pdev, cap_ptr + 1, &cap_ptr);
+ pci_read_config_byte(dev, cap_ptr + 1, &cap_ptr);
if (cap_ptr == 0x00)
break;
- pci_read_config_byte(osh->pdev, cap_ptr, &cap_id);
+ pci_read_config_byte(dev, cap_ptr, &cap_id);
}
if (cap_id != req_cap_id) {
goto end;
bufsize = SZPCR - cap_data;
*buflen = bufsize;
while (bufsize--) {
- pci_read_config_byte(osh->pdev, cap_data, buf);
+ pci_read_config_byte(dev, cap_data, buf);
cap_data++;
buf++;
}
if (!offset)
return 0;
- pci_read_config_dword(pi->osh->pdev, offset, ®_val);
+ pci_read_config_dword(pi->dev, offset, ®_val);
/* set operation */
if (mask) {
if (val)
reg_val |= PCIE_CLKREQ_ENAB;
else
reg_val &= ~PCIE_CLKREQ_ENAB;
- pci_write_config_dword(pi->osh->pdev, offset, reg_val);
- pci_read_config_dword(pi->osh->pdev, offset, ®_val);
+ pci_write_config_dword(pi->dev, offset, reg_val);
+ pci_read_config_dword(pi->dev, offset, ®_val);
}
if (reg_val & PCIE_CLKREQ_ENAB)
return 1;
W_REG(pi->osh, reg16, val16);
- pci_read_config_dword(pi->osh->pdev, pi->pciecap_lcreg_offset,
+ pci_read_config_dword(pi->dev, pi->pciecap_lcreg_offset,
&w);
w &= ~PCIE_ASPM_ENAB;
w |= pi->pcie_war_aspm_ovr;
- pci_write_config_dword(pi->osh->pdev,
+ pci_write_config_dword(pi->dev,
pi->pciecap_lcreg_offset, w);
}
if (!pi || !PCIE_ASPM(pi->sih))
return;
- pci_read_config_dword(pi->osh->pdev, pi->pciecap_lcreg_offset, &w);
+ pci_read_config_dword(pi->dev, pi->pciecap_lcreg_offset, &w);
w &= ~PCIE_CAP_LCREG_ASPML1;
- pci_write_config_dword(pi->osh->pdev, pi->pciecap_lcreg_offset, w);
+ pci_write_config_dword(pi->dev, pi->pciecap_lcreg_offset, w);
pi->pcie_pr42767 = false;
}
/* ***** Wake-on-wireless-LAN (WOWL) support functions ***** */
/* Just uses PCI config accesses to find out, when needed before sb_attach is done */
-bool pcicore_pmecap_fast(struct osl_info *osh)
+bool pcicore_pmecap_fast(void *pch)
{
+ pcicore_info_t *pi = (pcicore_info_t *) pch;
u8 cap_ptr;
u32 pmecap;
cap_ptr =
- pcicore_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID, NULL,
+ pcicore_find_pci_capability(pi->dev, PCI_CAP_POWERMGMTCAP_ID, NULL,
NULL);
if (!cap_ptr)
return false;
- pci_read_config_dword(osh->pdev, cap_ptr, &pmecap);
+ pci_read_config_dword(pi->dev, cap_ptr, &pmecap);
return (pmecap & PME_CAP_PM_STATES) != 0;
}
if (!pi->pmecap_offset) {
cap_ptr =
- pcicore_find_pci_capability(pi->osh,
+ pcicore_find_pci_capability(pi->dev,
PCI_CAP_POWERMGMTCAP_ID, NULL,
NULL);
if (!cap_ptr)
pi->pmecap_offset = cap_ptr;
- pci_read_config_dword(pi->osh->pdev, pi->pmecap_offset,
+ pci_read_config_dword(pi->dev, pi->pmecap_offset,
&pmecap);
/* At least one state can generate PME */
if (!pcicore_pmecap(pi))
return;
- pci_read_config_dword(pi->osh->pdev, pi->pmecap_offset + PME_CSR_OFFSET,
+ pci_read_config_dword(pi->dev, pi->pmecap_offset + PME_CSR_OFFSET,
&w);
w |= (PME_CSR_PME_EN);
- pci_write_config_dword(pi->osh->pdev,
+ pci_write_config_dword(pi->dev,
pi->pmecap_offset + PME_CSR_OFFSET, w);
}
if (!pcicore_pmecap(pi))
return false;
- pci_read_config_dword(pi->osh->pdev, pi->pmecap_offset + PME_CSR_OFFSET,
+ pci_read_config_dword(pi->dev, pi->pmecap_offset + PME_CSR_OFFSET,
&w);
return (w & PME_CSR_PME_STAT) == PME_CSR_PME_STAT;
if (!pcicore_pmecap(pi))
return;
- pci_read_config_dword(pi->osh->pdev, pi->pmecap_offset + PME_CSR_OFFSET,
+ pci_read_config_dword(pi->dev, pi->pmecap_offset + PME_CSR_OFFSET,
&w);
PCI_ERROR(("pcicore_pci_pmeclr PMECSR : 0x%x\n", w));
/* PMESTAT is cleared by writing 1 to it */
w &= ~(PME_CSR_PME_EN);
- pci_write_config_dword(pi->osh->pdev,
+ pci_write_config_dword(pi->dev,
pi->pmecap_offset + PME_CSR_OFFSET, w);
}
/* set operation */
if (mask)
- pci_write_config_dword(pi->osh->pdev, offset, val);
+ pci_write_config_dword(pi->dev, offset, val);
- pci_read_config_dword(pi->osh->pdev, offset, &tmpval);
+ pci_read_config_dword(pi->dev, offset, &tmpval);
return tmpval;
}
switch (sii->pub.bustype) {
case PCI_BUS:
/* do a pci config read to get subsystem id and subvendor id */
- pci_read_config_dword(sii->osh->pdev, PCI_CFG_SVID, &w);
+ pci_read_config_dword(sii->pbus, PCI_CFG_SVID, &w);
/* Let nvram variables override subsystem Vend/ID */
sii->pub.boardvendor = (u16)si_getdevpathintvar(&sii->pub,
"boardvendor");
/* this has been customized for the bcm 4329 ONLY */
#ifdef BCMSDIO
static si_info_t *si_doattach(si_info_t *sii, uint devid, struct osl_info *osh,
- void *regs, uint bustype, void *sdh,
+ void *regs, uint bustype, void *pbus,
char **vars, uint *varsz)
{
struct si_pub *sih = &sii->pub;
sih->buscoreidx = BADIDX;
sii->curmap = regs;
- sii->sdh = sdh;
+ sii->pbus = pbus;
sii->osh = osh;
/* find Chipcommon address */
sih->bustype = bustype;
/* bus/core/clk setup for register access */
- if (!si_buscore_prep(sii, bustype, devid, sdh)) {
+ if (!si_buscore_prep(sii, bustype, devid, pbus)) {
SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n",
bustype));
return NULL;
#else /* BCMSDIO */
static si_info_t *si_doattach(si_info_t *sii, uint devid, struct osl_info *osh,
- void *regs, uint bustype, void *sdh,
+ void *regs, uint bustype, void *pbus,
char **vars, uint *varsz)
{
struct si_pub *sih = &sii->pub;
sih->buscoreidx = BADIDX;
sii->curmap = regs;
- sii->sdh = sdh;
+ sii->pbus = pbus;
sii->osh = osh;
/* check to see if we are a si core mimic'ing a pci core */
if (bustype == PCI_BUS) {
- pci_read_config_dword(sii->osh->pdev, PCI_SPROM_CONTROL, &w);
+ pci_read_config_dword(sii->pbus, PCI_SPROM_CONTROL, &w);
if (w == 0xffffffff) {
SI_ERROR(("%s: incoming bus is PCI but it's a lie, "
" switching to SI devid:0x%x\n",
/* find Chipcommon address */
if (bustype == PCI_BUS) {
- pci_read_config_dword(sii->osh->pdev, PCI_BAR0_WIN, &savewin);
+ pci_read_config_dword(sii->pbus, PCI_BAR0_WIN, &savewin);
if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
savewin = SI_ENUM_BASE;
- pci_write_config_dword(sii->osh->pdev, PCI_BAR0_WIN,
+ pci_write_config_dword(sii->pbus, PCI_BAR0_WIN,
SI_ENUM_BASE);
cc = (chipcregs_t *) regs;
} else {
sih->bustype = bustype;
/* bus/core/clk setup for register access */
- if (!si_buscore_prep(sii, bustype, devid, sdh)) {
+ if (!si_buscore_prep(sii, bustype, devid, pbus)) {
SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n",
bustype));
return NULL;
if (sii->pub.ccrev < 6) {
if (sii->pub.bustype == PCI_BUS) {
- pci_read_config_dword(sii->osh->pdev, PCI_GPIO_OUT,
+ pci_read_config_dword(sii->pbus, PCI_GPIO_OUT,
&val);
if (val & PCI_CFG_GPIO_SCS)
return SCC_SS_PCI;
if (PCIE(sii))
return -1;
- pci_read_config_dword(sii->osh->pdev, PCI_GPIO_IN, &in);
- pci_read_config_dword(sii->osh->pdev, PCI_GPIO_OUT, &out);
- pci_read_config_dword(sii->osh->pdev, PCI_GPIO_OUTEN, &outen);
+ pci_read_config_dword(sii->pbus, PCI_GPIO_IN, &in);
+ pci_read_config_dword(sii->pbus, PCI_GPIO_OUT, &out);
+ pci_read_config_dword(sii->pbus, PCI_GPIO_OUTEN, &outen);
/*
* Avoid glitching the clock if GPRS is already using it.
out |= PCI_CFG_GPIO_XTAL;
if (what & PLL)
out |= PCI_CFG_GPIO_PLL;
- pci_write_config_dword(sii->osh->pdev,
+ pci_write_config_dword(sii->pbus,
PCI_GPIO_OUT, out);
- pci_write_config_dword(sii->osh->pdev,
+ pci_write_config_dword(sii->pbus,
PCI_GPIO_OUTEN, outen);
udelay(XTAL_ON_DELAY);
}
/* turn pll on */
if (what & PLL) {
out &= ~PCI_CFG_GPIO_PLL;
- pci_write_config_dword(sii->osh->pdev,
+ pci_write_config_dword(sii->pbus,
PCI_GPIO_OUT, out);
mdelay(2);
}
out &= ~PCI_CFG_GPIO_XTAL;
if (what & PLL)
out |= PCI_CFG_GPIO_PLL;
- pci_write_config_dword(sii->osh->pdev,
+ pci_write_config_dword(sii->pbus,
PCI_GPIO_OUT, out);
- pci_write_config_dword(sii->osh->pdev,
+ pci_write_config_dword(sii->pbus,
PCI_GPIO_OUTEN, outen);
}
case PCI_BUS:
ASSERT((SI_INFO(sih))->osh != NULL);
slen = snprintf(path, (size_t) size, "pci/%u/%u/",
- OSL_PCI_BUS((SI_INFO(sih))->osh),
- OSL_PCI_SLOT((SI_INFO(sih))->osh));
+ ((struct pci_dev *)((SI_INFO(sih))->pbus))->bus->number,
+ PCI_SLOT(
+ ((struct pci_dev *)((SI_INFO(sih))->pbus))->devfn));
break;
#ifdef BCMSDIO
return false;
cap_ptr =
- pcicore_find_pci_capability(sii->osh, PCI_CAP_PCIECAP_ID, NULL,
+ pcicore_find_pci_capability(sii->pbus, PCI_CAP_PCIECAP_ID, NULL,
NULL);
if (!cap_ptr)
return false;
}
/* enable interrupts */
- bcmsdh_intr_enable(sii->sdh);
+ bcmsdh_intr_enable(sii->pbus);
}
#endif /* BCMSDIO */
*/
if (PCIE(sii) || (PCI(sii) && ((sii->pub.buscorerev) >= 6))) {
/* pci config write to set this core bit in PCIIntMask */
- pci_read_config_dword(sii->osh->pdev, PCI_INT_MASK, &w);
+ pci_read_config_dword(sii->pbus, PCI_INT_MASK, &w);
w |= (coremask << PCI_SBIM_SHIFT);
- pci_write_config_dword(sii->osh->pdev, PCI_INT_MASK, w);
+ pci_write_config_dword(sii->pbus, PCI_INT_MASK, w);
} else {
/* set sbintvec bit for our flag number */
si_setint(sih, siflag);
switch (sih->bustype) {
case PCI_BUS:
ASSERT(sii->osh != NULL);
- pci_read_config_dword(sii->osh->pdev, PCI_CFG_VID, &w);
+ pci_read_config_dword(sii->pbus, PCI_CFG_VID, &w);
if ((w & 0xFFFF) != VENDOR_BROADCOM)
return true;
break;