ring-buffer: Set __GFP_NORETRY flag for ring buffer allocating process
authorVaibhav Nagarnaik <vnagarnaik@google.com>
Wed, 8 Jun 2011 00:01:42 +0000 (17:01 -0700)
committerSteven Rostedt <rostedt@goodmis.org>
Wed, 15 Jun 2011 02:48:51 +0000 (22:48 -0400)
The tracing ring buffer is allocated from kernel memory. While
allocating a large chunk of memory, OOM might happen which destabilizes
the system. Thus random processes might get killed during the
allocation.

This patch adds __GFP_NORETRY flag to the ring buffer allocation calls
to make it fail more gracefully if the system will not be able to
complete the allocation request.

Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Vaibhav Nagarnaik <vnagarnaik@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Michael Rubin <mrubin@google.com>
Cc: David Sharp <dhsharp@google.com>
Link: http://lkml.kernel.org/r/1307491302-9236-1-git-send-email-vnagarnaik@google.com
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
kernel/trace/ring_buffer.c

index f00ede3..731201b 100644 (file)
@@ -1004,9 +1004,14 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
 
        for (i = 0; i < nr_pages; i++) {
                struct page *page;
-
+               /*
+                * __GFP_NORETRY flag makes sure that the allocation fails
+                * gracefully without invoking oom-killer and the system is
+                * not destabilized.
+                */
                bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
-                                   GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
+                                   GFP_KERNEL | __GFP_NORETRY,
+                                   cpu_to_node(cpu_buffer->cpu));
                if (!bpage)
                        goto free_pages;
 
@@ -1015,7 +1020,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
                list_add(&bpage->list, &pages);
 
                page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
-                                       GFP_KERNEL, 0);
+                                       GFP_KERNEL | __GFP_NORETRY, 0);
                if (!page)
                        goto free_pages;
                bpage->page = page_address(page);
@@ -1377,13 +1382,20 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
        for_each_buffer_cpu(buffer, cpu) {
                for (i = 0; i < new_pages; i++) {
                        struct page *page;
+                       /*
+                        * __GFP_NORETRY flag makes sure that the allocation
+                        * fails gracefully without invoking oom-killer and
+                        * the system is not destabilized.
+                        */
                        bpage = kzalloc_node(ALIGN(sizeof(*bpage),
                                                  cache_line_size()),
-                                           GFP_KERNEL, cpu_to_node(cpu));
+                                           GFP_KERNEL | __GFP_NORETRY,
+                                           cpu_to_node(cpu));
                        if (!bpage)
                                goto free_pages;
                        list_add(&bpage->list, &pages);
-                       page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
+                       page = alloc_pages_node(cpu_to_node(cpu),
+                                               GFP_KERNEL | __GFP_NORETRY, 0);
                        if (!page)
                                goto free_pages;
                        bpage->page = page_address(page);
@@ -3737,7 +3749,8 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
        struct buffer_data_page *bpage;
        struct page *page;
 
-       page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
+       page = alloc_pages_node(cpu_to_node(cpu),
+                               GFP_KERNEL | __GFP_NORETRY, 0);
        if (!page)
                return NULL;