struct iommu_domain domain;
struct tegra_smmu *smmu;
unsigned int use_count;
- struct page *count;
+ u32 *count;
struct page **pts;
struct page *pd;
unsigned id;
return NULL;
}
- as->count = alloc_page(GFP_KERNEL);
+ as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
if (!as->count) {
__free_page(as->pd);
kfree(as);
as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
if (!as->pts) {
- __free_page(as->count);
+ kfree(as->count);
__free_page(as->pd);
kfree(as);
return NULL;
for (i = 0; i < SMMU_NUM_PDE; i++)
pd[i] = 0;
- /* clear PDE usage counters */
- pd = page_address(as->count);
- SetPageReserved(as->count);
-
- for (i = 0; i < SMMU_NUM_PDE; i++)
- pd[i] = 0;
-
/* setup aperture */
as->domain.geometry.aperture_start = 0;
as->domain.geometry.aperture_end = 0xffffffff;
static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
struct page **pagep)
{
- u32 *pd = page_address(as->pd), *pt, *count;
+ u32 *pd = page_address(as->pd), *pt;
unsigned int pde = iova_pd_index(iova);
struct tegra_smmu *smmu = as->smmu;
struct page *page;
pt = page_address(page);
/* Keep track of entries in this page table. */
- count = page_address(as->count);
if (pt[iova_pt_index(iova)] == 0)
- count[pde]++;
+ as->count[pde]++;
return tegra_smmu_pte_offset(page, iova);
}
{
struct tegra_smmu *smmu = as->smmu;
unsigned int pde = iova_pd_index(iova);
- u32 *count = page_address(as->count);
u32 *pd = page_address(as->pd);
struct page *page = as->pts[pde];
* When no entries in this page table are used anymore, return the
* memory page to the system.
*/
- if (--count[pde] == 0) {
+ if (--as->count[pde] == 0) {
unsigned int offset = pde * sizeof(*pd);
/* Clear the page directory entry first */