OSDN Git Service

Merge branch 'v4l_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[uclinux-h8/linux.git] / kernel / trace / ring_buffer.c
index b0c7aa4..731201b 100644 (file)
@@ -997,15 +997,21 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
                             unsigned nr_pages)
 {
        struct buffer_page *bpage, *tmp;
-       unsigned long addr;
        LIST_HEAD(pages);
        unsigned i;
 
        WARN_ON(!nr_pages);
 
        for (i = 0; i < nr_pages; i++) {
+               struct page *page;
+               /*
+                * __GFP_NORETRY flag makes sure that the allocation fails
+                * gracefully without invoking oom-killer and the system is
+                * not destabilized.
+                */
                bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
-                                   GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
+                                   GFP_KERNEL | __GFP_NORETRY,
+                                   cpu_to_node(cpu_buffer->cpu));
                if (!bpage)
                        goto free_pages;
 
@@ -1013,10 +1019,11 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
 
                list_add(&bpage->list, &pages);
 
-               addr = __get_free_page(GFP_KERNEL);
-               if (!addr)
+               page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
+                                       GFP_KERNEL | __GFP_NORETRY, 0);
+               if (!page)
                        goto free_pages;
-               bpage->page = (void *)addr;
+               bpage->page = page_address(page);
                rb_init_page(bpage->page);
        }
 
@@ -1045,7 +1052,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        struct buffer_page *bpage;
-       unsigned long addr;
+       struct page *page;
        int ret;
 
        cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
@@ -1067,10 +1074,10 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
        rb_check_bpage(cpu_buffer, bpage);
 
        cpu_buffer->reader_page = bpage;
-       addr = __get_free_page(GFP_KERNEL);
-       if (!addr)
+       page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
+       if (!page)
                goto fail_free_reader;
-       bpage->page = (void *)addr;
+       bpage->page = page_address(page);
        rb_init_page(bpage->page);
 
        INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
@@ -1314,7 +1321,6 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
        unsigned nr_pages, rm_pages, new_pages;
        struct buffer_page *bpage, *tmp;
        unsigned long buffer_size;
-       unsigned long addr;
        LIST_HEAD(pages);
        int i, cpu;
 
@@ -1375,16 +1381,24 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
 
        for_each_buffer_cpu(buffer, cpu) {
                for (i = 0; i < new_pages; i++) {
+                       struct page *page;
+                       /*
+                        * __GFP_NORETRY flag makes sure that the allocation
+                        * fails gracefully without invoking oom-killer and
+                        * the system is not destabilized.
+                        */
                        bpage = kzalloc_node(ALIGN(sizeof(*bpage),
                                                  cache_line_size()),
-                                           GFP_KERNEL, cpu_to_node(cpu));
+                                           GFP_KERNEL | __GFP_NORETRY,
+                                           cpu_to_node(cpu));
                        if (!bpage)
                                goto free_pages;
                        list_add(&bpage->list, &pages);
-                       addr = __get_free_page(GFP_KERNEL);
-                       if (!addr)
+                       page = alloc_pages_node(cpu_to_node(cpu),
+                                               GFP_KERNEL | __GFP_NORETRY, 0);
+                       if (!page)
                                goto free_pages;
-                       bpage->page = (void *)addr;
+                       bpage->page = page_address(page);
                        rb_init_page(bpage->page);
                }
        }
@@ -3730,16 +3744,17 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
  * Returns:
  *  The page allocated, or NULL on error.
  */
-void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
+void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
 {
        struct buffer_data_page *bpage;
-       unsigned long addr;
+       struct page *page;
 
-       addr = __get_free_page(GFP_KERNEL);
-       if (!addr)
+       page = alloc_pages_node(cpu_to_node(cpu),
+                               GFP_KERNEL | __GFP_NORETRY, 0);
+       if (!page)
                return NULL;
 
-       bpage = (void *)addr;
+       bpage = page_address(page);
 
        rb_init_page(bpage);
 
@@ -3978,20 +3993,11 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
                size_t cnt, loff_t *ppos)
 {
        unsigned long *p = filp->private_data;
-       char buf[64];
        unsigned long val;
        int ret;
 
-       if (cnt >= sizeof(buf))
-               return -EINVAL;
-
-       if (copy_from_user(&buf, ubuf, cnt))
-               return -EFAULT;
-
-       buf[cnt] = 0;
-
-       ret = strict_strtoul(buf, 10, &val);
-       if (ret < 0)
+       ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+       if (ret)
                return ret;
 
        if (val)