for (i = 0; i < nr_pages; i++) {
struct page *page;
-
+ /*
+ * __GFP_NORETRY flag makes sure that the allocation fails
+ * gracefully without invoking oom-killer and the system is
+ * not destabilized.
+ */
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
- GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
+ GFP_KERNEL | __GFP_NORETRY,
+ cpu_to_node(cpu_buffer->cpu));
if (!bpage)
goto free_pages;
list_add(&bpage->list, &pages);
page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
- GFP_KERNEL, 0);
+ GFP_KERNEL | __GFP_NORETRY, 0);
if (!page)
goto free_pages;
bpage->page = page_address(page);
for_each_buffer_cpu(buffer, cpu) {
for (i = 0; i < new_pages; i++) {
struct page *page;
+ /*
+ * __GFP_NORETRY flag makes sure that the allocation
+ * fails gracefully without invoking oom-killer and
+ * the system is not destabilized.
+ */
bpage = kzalloc_node(ALIGN(sizeof(*bpage),
cache_line_size()),
- GFP_KERNEL, cpu_to_node(cpu));
+ GFP_KERNEL | __GFP_NORETRY,
+ cpu_to_node(cpu));
if (!bpage)
goto free_pages;
list_add(&bpage->list, &pages);
- page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
+ page = alloc_pages_node(cpu_to_node(cpu),
+ GFP_KERNEL | __GFP_NORETRY, 0);
if (!page)
goto free_pages;
bpage->page = page_address(page);
struct buffer_data_page *bpage;
struct page *page;
- page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
+ page = alloc_pages_node(cpu_to_node(cpu),
+ GFP_KERNEL | __GFP_NORETRY, 0);
if (!page)
return NULL;