#endif /* arch */
-#if ! defined(__arm__)
-#define NEED_RESERVE_PAGES
-#endif
-
/*
*
* Generic memory allocators
snd_allocated_pages -= 1 << order;
}
-static void mark_pages(struct page *page, int order)
-{
- struct page *last_page = page + (1 << order);
- while (page < last_page)
- SetPageReserved(page++);
-}
-
-static void unmark_pages(struct page *page, int order)
-{
- struct page *last_page = page + (1 << order);
- while (page < last_page)
- ClearPageReserved(page++);
-}
-
/**
* snd_malloc_pages - allocate pages with the given size
* @size: the size to allocate in bytes
snd_assert(gfp_flags != 0, return NULL);
gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */
pg = get_order(size);
- if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL) {
- mark_pages(virt_to_page(res), pg);
+ if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL)
inc_snd_pages(pg);
- }
return res;
}
return;
pg = get_order(size);
dec_snd_pages(pg);
- unmark_pages(virt_to_page(ptr), pg);
free_pages((unsigned long) ptr, pg);
}
| __GFP_NORETRY /* don't trigger OOM-killer */
| __GFP_NOWARN; /* no stack trace print - this call is non-critical */
res = dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags);
- if (res != NULL) {
-#ifdef NEED_RESERVE_PAGES
- mark_pages(virt_to_page(res), pg); /* should be dma_to_page() */
-#endif
+ if (res != NULL)
inc_snd_pages(pg);
- }
return res;
}
return;
pg = get_order(size);
dec_snd_pages(pg);
-#ifdef NEED_RESERVE_PAGES
- unmark_pages(virt_to_page(ptr), pg); /* should be dma_to_page() */
-#endif
dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma);
}