}
EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
+#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
+static inline void cache_init(struct gnttab_page_cache *cache)
+{
+ cache->pages = NULL;
+}
+
+static inline bool cache_empty(struct gnttab_page_cache *cache)
+{
+ return !cache->pages;
+}
+
+static inline struct page *cache_deq(struct gnttab_page_cache *cache)
+{
+ struct page *page;
+
+ page = cache->pages;
+ cache->pages = page->zone_device_data;
+
+ return page;
+}
+
+static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
+{
+ page->zone_device_data = cache->pages;
+ cache->pages = page;
+}
+#else
+static inline void cache_init(struct gnttab_page_cache *cache)
+{
+ INIT_LIST_HEAD(&cache->pages);
+}
+
+static inline bool cache_empty(struct gnttab_page_cache *cache)
+{
+ return list_empty(&cache->pages);
+}
+
+static inline struct page *cache_deq(struct gnttab_page_cache *cache)
+{
+ struct page *page;
+
+ page = list_first_entry(&cache->pages, struct page, lru);
+ list_del(&page->lru);
+
+ return page;
+}
+
+static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
+{
+ list_add(&page->lru, &cache->pages);
+}
+#endif
+
void gnttab_page_cache_init(struct gnttab_page_cache *cache)
{
spin_lock_init(&cache->lock);
- INIT_LIST_HEAD(&cache->pages);
+ cache_init(cache);
cache->num_pages = 0;
}
EXPORT_SYMBOL_GPL(gnttab_page_cache_init);
spin_lock_irqsave(&cache->lock, flags);
- if (list_empty(&cache->pages)) {
+ if (cache_empty(cache)) {
spin_unlock_irqrestore(&cache->lock, flags);
return gnttab_alloc_pages(1, page);
}
- page[0] = list_first_entry(&cache->pages, struct page, lru);
- list_del(&page[0]->lru);
+ page[0] = cache_deq(cache);
cache->num_pages--;
spin_unlock_irqrestore(&cache->lock, flags);
spin_lock_irqsave(&cache->lock, flags);
for (i = 0; i < num; i++)
- list_add(&page[i]->lru, &cache->pages);
+ cache_enq(cache, page[i]);
cache->num_pages += num;
spin_unlock_irqrestore(&cache->lock, flags);
spin_lock_irqsave(&cache->lock, flags);
while (cache->num_pages > num) {
- page[i] = list_first_entry(&cache->pages, struct page, lru);
- list_del(&page[i]->lru);
+ page[i] = cache_deq(cache);
cache->num_pages--;
if (++i == ARRAY_SIZE(page)) {
spin_unlock_irqrestore(&cache->lock, flags);
#include <xen/xen.h>
static DEFINE_MUTEX(list_lock);
-static LIST_HEAD(page_list);
+static struct page *page_list;
static unsigned int list_count;
static int fill_list(unsigned int nr_pages)
struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i));
- list_add(&pg->lru, &page_list);
+ pg->zone_device_data = page_list;
+ page_list = pg;
list_count++;
}
}
for (i = 0; i < nr_pages; i++) {
- struct page *pg = list_first_entry_or_null(&page_list,
- struct page,
- lru);
+ struct page *pg = page_list;
BUG_ON(!pg);
- list_del(&pg->lru);
+ page_list = pg->zone_device_data;
list_count--;
pages[i] = pg;
unsigned int j;
for (j = 0; j <= i; j++) {
- list_add(&pages[j]->lru, &page_list);
+ pages[j]->zone_device_data = page_list;
+ page_list = pages[j];
list_count++;
}
goto out;
mutex_lock(&list_lock);
for (i = 0; i < nr_pages; i++) {
- list_add(&pages[i]->lru, &page_list);
+ pages[i]->zone_device_data = page_list;
+ page_list = pages[i];
list_count++;
}
mutex_unlock(&list_lock);
struct page *pg =
pfn_to_page(xen_extra_mem[i].start_pfn + j);
- list_add(&pg->lru, &page_list);
+ pg->zone_device_data = page_list;
+ page_list = pg;
list_count++;
}
}