Commit d7ec4bfe authored by Vaibhav Nagarnaik's avatar Vaibhav Nagarnaik Committed by Steven Rostedt
Browse files

ring-buffer: Set __GFP_NORETRY flag for ring buffer allocating process



The tracing ring buffer is allocated from kernel memory. While
allocating a large chunk of memory, OOM might happen which destabilizes
the system. Thus random processes might get killed during the
allocation.

This patch adds __GFP_NORETRY flag to the ring buffer allocation calls
to make it fail more gracefully if the system will not be able to
complete the allocation request.

Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarVaibhav Nagarnaik <vnagarnaik@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Michael Rubin <mrubin@google.com>
Cc: David Sharp <dhsharp@google.com>
Link: http://lkml.kernel.org/r/1307491302-9236-1-git-send-email-vnagarnaik@google.com


Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent 22fe9b54
Loading
Loading
Loading
Loading
+19 −6
Original line number Diff line number Diff line
@@ -1004,9 +1004,14 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,

	for (i = 0; i < nr_pages; i++) {
		struct page *page;

		/*
		 * __GFP_NORETRY flag makes sure that the allocation fails
		 * gracefully without invoking oom-killer and the system is
		 * not destabilized.
		 */
		bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
				    GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
				    GFP_KERNEL | __GFP_NORETRY,
				    cpu_to_node(cpu_buffer->cpu));
		if (!bpage)
			goto free_pages;

@@ -1015,7 +1020,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
		list_add(&bpage->list, &pages);

		page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
					GFP_KERNEL, 0);
					GFP_KERNEL | __GFP_NORETRY, 0);
		if (!page)
			goto free_pages;
		bpage->page = page_address(page);
@@ -1377,13 +1382,20 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
	for_each_buffer_cpu(buffer, cpu) {
		for (i = 0; i < new_pages; i++) {
			struct page *page;
			/*
			 * __GFP_NORETRY flag makes sure that the allocation
			 * fails gracefully without invoking oom-killer and
			 * the system is not destabilized.
			 */
			bpage = kzalloc_node(ALIGN(sizeof(*bpage),
						  cache_line_size()),
					    GFP_KERNEL, cpu_to_node(cpu));
					    GFP_KERNEL | __GFP_NORETRY,
					    cpu_to_node(cpu));
			if (!bpage)
				goto free_pages;
			list_add(&bpage->list, &pages);
			page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
			page = alloc_pages_node(cpu_to_node(cpu),
						GFP_KERNEL | __GFP_NORETRY, 0);
			if (!page)
				goto free_pages;
			bpage->page = page_address(page);
@@ -3737,7 +3749,8 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
	struct buffer_data_page *bpage;
	struct page *page;

	page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
	page = alloc_pages_node(cpu_to_node(cpu),
				GFP_KERNEL | __GFP_NORETRY, 0);
	if (!page)
		return NULL;