projects
/
platform
/
adaptation
/
renesas_rcar
/
renesas_kernel.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
mm: page_alloc: use word-based accesses for get/set pageblock bitmaps
[platform/adaptation/renesas_rcar/renesas_kernel.git]
/
mm
/
hugetlb.c
diff --git
a/mm/hugetlb.c
b/mm/hugetlb.c
index
04306b9
..
06a9bc0
100644
(file)
--- a/
mm/hugetlb.c
+++ b/
mm/hugetlb.c
@@
-584,7
+584,7
@@
static void update_and_free_page(struct hstate *h, struct page *page)
1 << PG_active | 1 << PG_reserved |
1 << PG_private | 1 << PG_writeback);
}
1 << PG_active | 1 << PG_reserved |
1 << PG_private | 1 << PG_writeback);
}
- VM_BUG_ON
(hugetlb_cgroup_from_page(page)
);
+ VM_BUG_ON
_PAGE(hugetlb_cgroup_from_page(page), page
);
set_compound_page_dtor(page, NULL);
set_page_refcounted(page);
arch_release_hugepage(page);
set_compound_page_dtor(page, NULL);
set_page_refcounted(page);
arch_release_hugepage(page);
@@
-1089,7
+1089,7
@@
retry:
* no users -- drop the buddy allocator's reference.
*/
put_page_testzero(page);
* no users -- drop the buddy allocator's reference.
*/
put_page_testzero(page);
- VM_BUG_ON
(page_count(page)
);
+ VM_BUG_ON
_PAGE(page_count(page), page
);
enqueue_huge_page(h, page);
}
free:
enqueue_huge_page(h, page);
}
free:
@@
-1134,6
+1134,7
@@
static void return_unused_surplus_pages(struct hstate *h,
while (nr_pages--) {
if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
break;
while (nr_pages--) {
if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
break;
+ cond_resched_lock(&hugetlb_lock);
}
}
}
}
@@
-1509,6
+1510,7
@@
static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
while (min_count < persistent_huge_pages(h)) {
if (!free_pool_huge_page(h, nodes_allowed, 0))
break;
while (min_count < persistent_huge_pages(h)) {
if (!free_pool_huge_page(h, nodes_allowed, 0))
break;
+ cond_resched_lock(&hugetlb_lock);
}
while (count < persistent_huge_pages(h)) {
if (!adjust_pool_surplus(h, nodes_allowed, 1))
}
while (count < persistent_huge_pages(h)) {
if (!adjust_pool_surplus(h, nodes_allowed, 1))
@@
-3503,7
+3505,7
@@
int dequeue_hwpoisoned_huge_page(struct page *hpage)
bool isolate_huge_page(struct page *page, struct list_head *list)
{
bool isolate_huge_page(struct page *page, struct list_head *list)
{
- VM_BUG_ON
(!PageHead(page)
);
+ VM_BUG_ON
_PAGE(!PageHead(page), page
);
if (!get_page_unless_zero(page))
return false;
spin_lock(&hugetlb_lock);
if (!get_page_unless_zero(page))
return false;
spin_lock(&hugetlb_lock);
@@
-3514,7
+3516,7
@@
bool isolate_huge_page(struct page *page, struct list_head *list)
void putback_active_hugepage(struct page *page)
{
void putback_active_hugepage(struct page *page)
{
- VM_BUG_ON
(!PageHead(page)
);
+ VM_BUG_ON
_PAGE(!PageHead(page), page
);
spin_lock(&hugetlb_lock);
list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
spin_unlock(&hugetlb_lock);
spin_lock(&hugetlb_lock);
list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
spin_unlock(&hugetlb_lock);
@@
-3523,7
+3525,7
@@
void putback_active_hugepage(struct page *page)
bool is_hugepage_active(struct page *page)
{
bool is_hugepage_active(struct page *page)
{
- VM_BUG_ON
(!PageHuge(page)
);
+ VM_BUG_ON
_PAGE(!PageHuge(page), page
);
/*
* This function can be called for a tail page because the caller,
* scan_movable_pages, scans through a given pfn-range which typically
/*
* This function can be called for a tail page because the caller,
* scan_movable_pages, scans through a given pfn-range which typically