#define unmap_page_from_agp(page)
#define flush_agp_cache() mb()
-/* Convert a physical address to an address suitable for the GART. */
-#define phys_to_gart(x) (x)
-#define gart_to_phys(x) (x)
-
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define unmap_page_from_agp(page) /* nothing */
#define flush_agp_cache() mb()
-/* Convert a physical address to an address suitable for the GART. */
-#define phys_to_gart(x) (x)
-#define gart_to_phys(x) (x)
-
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define unmap_page_from_agp(page) /* nothing */
#define flush_agp_cache() mb()
-/* Convert a physical address to an address suitable for the GART. */
-#define phys_to_gart(x) (x)
-#define gart_to_phys(x) (x)
-
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define unmap_page_from_agp(page)
#define flush_agp_cache() mb()
-/* Convert a physical address to an address suitable for the GART. */
-#define phys_to_gart(x) (x)
-#define gart_to_phys(x) (x)
-
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define unmap_page_from_agp(page)
#define flush_agp_cache() mb()
-/* Convert a physical address to an address suitable for the GART. */
-#define phys_to_gart(x) (x)
-#define gart_to_phys(x) (x)
-
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
*/
#define flush_agp_cache() wbinvd()
-/* Convert a physical address to an address suitable for the GART. */
-#define phys_to_gart(x) (x)
-#define gart_to_phys(x) (x)
-
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define AGP_GENERIC_SIZES_ENTRIES 11
extern const struct aper_size_info_16 agp3_generic_sizes[];
-#define virt_to_gart(x) (phys_to_gart(virt_to_phys(x)))
-#define gart_to_virt(x) (phys_to_virt(gart_to_phys(x)))
-
extern int agp_off;
extern int agp_try_unsupported_boot;
pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
- phys_to_gart(page_to_phys(page))) | ALI_CACHE_FLUSH_EN ));
+ page_to_phys(page)) | ALI_CACHE_FLUSH_EN ));
return page;
}
pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
- phys_to_gart(page_to_phys(page))) | ALI_CACHE_FLUSH_EN));
+ page_to_phys(page)) | ALI_CACHE_FLUSH_EN));
}
agp_generic_destroy_page(page, flags);
}
#ifndef CONFIG_X86
SetPageReserved(virt_to_page(page_map->real));
global_cache_flush();
- page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real),
+ page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
PAGE_SIZE);
if (page_map->remapped == NULL) {
ClearPageReserved(virt_to_page(page_map->real));
agp_bridge->gatt_table_real = (u32 *)page_dir.real;
agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
- agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
+ agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
/* Get the address for the gart region.
* This is a bus address even on the alpha, b/c its
/* Calculate the agp offset */
for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
- writel(virt_to_gart(amd_irongate_private.gatt_pages[i]->real) | 1,
+ writel(virt_to_phys(amd_irongate_private.gatt_pages[i]->real) | 1,
page_dir.remapped+GET_PAGE_DIR_OFF(addr));
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
}
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
writel(agp_generic_mask_memory(agp_bridge,
- phys_to_gart(page_to_phys(mem->pages[i])),
+ page_to_phys(mem->pages[i]),
mem->type),
cur_gatt+GET_GATT_OFF(addr));
readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
tmp = agp_bridge->driver->mask_memory(agp_bridge,
- phys_to_gart(page_to_phys(mem->pages[i])),
+ page_to_phys(mem->pages[i]),
mask_type);
BUG_ON(tmp & 0xffffff0000000ffcULL);
static int amd_8151_configure(void)
{
- unsigned long gatt_bus = virt_to_gart(agp_bridge->gatt_table_real);
+ unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real);
int i;
/* Configure AGP regs in each x86-64 host bridge. */
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
- release_mem_region(virt_to_gart(bridge->gatt_table_real),
+ release_mem_region(virt_to_phys(bridge->gatt_table_real),
amd64_aperture_sizes[bridge->aperture_size_idx].size);
agp_remove_bridge(bridge);
agp_put_bridge(bridge);
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
writel(agp_bridge->driver->mask_memory(agp_bridge,
- phys_to_gart(page_to_phys(mem->pages[i])),
+ page_to_phys(mem->pages[i]),
mem->type),
cur_gatt+GET_GATT_OFF(addr));
}
agp_bridge->gatt_table_real = (u32 *)page_dir.real;
agp_bridge->gatt_table = (u32 __iomem *) page_dir.remapped;
- agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
+ agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
/* Write out the size register */
current_size = A_SIZE_LVL2(agp_bridge->current_size);
/* Calculate the agp offset */
for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
- writel(virt_to_gart(ati_generic_private.gatt_pages[i]->real) | 1,
+ writel(virt_to_phys(ati_generic_private.gatt_pages[i]->real) | 1,
page_dir.remapped+GET_PAGE_DIR_OFF(addr));
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
}
goto err_out_nounmap;
}
} else {
- bridge->scratch_page_dma = phys_to_gart(page_to_phys(page));
+ bridge->scratch_page_dma = page_to_phys(page);
}
bridge->scratch_page = bridge->driver->mask_memory(bridge,
/* This function does the same thing as mask_memory() for this chipset... */
static inline unsigned long efficeon_mask_memory(struct page *page)
{
- unsigned long addr = phys_to_gart(page_to_phys(page));
+ unsigned long addr = page_to_phys(page);
return addr | 0x00000001;
}
efficeon_private.l1_table[index] = page;
- value = virt_to_gart((unsigned long *)page) | pati | present | index;
+ value = virt_to_phys((unsigned long *)page) | pati | present | index;
pci_write_config_dword(agp_bridge->dev,
EFFICEON_ATTPAGE, value);
set_memory_uc((unsigned long)table, 1 << page_order);
bridge->gatt_table = (void *)table;
#else
- bridge->gatt_table = ioremap_nocache(virt_to_gart(table),
+ bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
(PAGE_SIZE * (1 << page_order)));
bridge->driver->cache_flush();
#endif
return -ENOMEM;
}
- bridge->gatt_bus_addr = virt_to_gart(bridge->gatt_table_real);
+ bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real);
/* AK: bogus, should encode addresses > 4GB */
for (i = 0; i < num_entries; i++) {
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(bridge->driver->mask_memory(bridge,
- phys_to_gart(page_to_phys(mem->pages[i])),
+ page_to_phys(mem->pages[i]),
mask_type),
bridge->gatt_table+j);
}
hp->gart_size = HP_ZX1_GART_SIZE;
hp->gatt_entries = hp->gart_size / hp->io_page_size;
- hp->io_pdir = gart_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE));
+ hp->io_pdir = phys_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE));
hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
agp_bridge->mode = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
if (hp->io_pdir_owner) {
- writel(virt_to_gart(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE);
+ writel(virt_to_phys(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE);
readl(hp->ioc_regs+HP_ZX1_PDIR_BASE);
writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG);
readl(hp->ioc_regs+HP_ZX1_TCNFG);
io_page_size = 1UL << I460_IO_PAGE_SHIFT;
for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
- paddr = phys_to_gart(page_to_phys(mem->pages[i]));
+ paddr = page_to_phys(mem->pages[i]);
for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size)
WR_GATT(j, i460_mask_memory(agp_bridge, paddr, mem->type));
}
return -ENOMEM;
}
- lp->paddr = phys_to_gart(page_to_phys(lp->page));
+ lp->paddr = page_to_phys(lp->page);
lp->refcount = 0;
atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
return 0;
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
- phys_to_gart(page_to_phys(mem->pages[i])), mask_type),
+ page_to_phys(mem->pages[i]), mask_type),
intel_private.gtt+j);
}
global_cache_flush();
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
- phys_to_gart(page_to_phys(mem->pages[i])),
- mask_type),
+ page_to_phys(mem->pages[i]), mask_type),
intel_private.registers+I810_PTE_BASE+(j*4));
}
readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
- phys_to_gart(page_to_phys(mem->pages[i])), mask_type),
+ page_to_phys(mem->pages[i]), mask_type),
intel_private.registers+I810_PTE_BASE+(j*4));
}
readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
- phys_to_gart(page_to_phys(mem->pages[i])), mask_type),
+ page_to_phys(mem->pages[i]), mask_type),
agp_bridge->gatt_table+nvidia_private.pg_offset+j);
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
table[j] =
bridge->driver->mask_memory(bridge,
- phys_to_gart(page_to_phys(mem->pages[i])),
+ page_to_phys(mem->pages[i]),
mem->type);
}
/* Create a fake scratch directory */
for (i = 0; i < 1024; i++) {
writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i);
- writel(virt_to_gart(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
+ writel(virt_to_phys(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
}
retval = serverworks_create_gatt_pages(value->num_entries / 1024);
agp_bridge->gatt_table_real = (u32 *)page_dir.real;
agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
- agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
+ agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
/* Get the address for the gart region.
* This is a bus address even on the alpha, b/c its
/* Calculate the agp offset */
for (i = 0; i < value->num_entries / 1024; i++)
- writel(virt_to_gart(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
+ writel(virt_to_phys(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
return 0;
}
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = SVRWRKS_GET_GATT(addr);
writel(agp_bridge->driver->mask_memory(agp_bridge,
- phys_to_gart(page_to_phys(mem->pages[i])), mem->type),
+ page_to_phys(mem->pages[i]), mem->type),
cur_gatt+GET_GATT_OFF(addr));
}
serverworks_tlbflush(mem);
bridge->gatt_table_real = (u32 *) table;
bridge->gatt_table = (u32 *)table;
- bridge->gatt_bus_addr = virt_to_gart(table);
+ bridge->gatt_bus_addr = virt_to_phys(table);
for (i = 0; i < num_entries; i++)
bridge->gatt_table[i] = 0;