Revert "ring-buffer: Use long for nr_pages to avoid overflow failures"
authorMarek Szyprowski <m.szyprowski@samsung.com>
Wed, 25 Apr 2018 10:20:32 +0000 (12:20 +0200)
committerMarek Szyprowski <m.szyprowski@samsung.com>
Wed, 25 Apr 2018 10:24:55 +0000 (12:24 +0200)
This reverts commit 453babf108cade06562021a1ce8787f28e5fee08.

kernel/trace/ring_buffer.c

index 07d70d1dc9b93909b8ff107ab59ccb2c23f0849d..d503a73f3736560c604fc1cf6702d1b20fb7163c 100644 (file)
@@ -461,7 +461,7 @@ struct ring_buffer_per_cpu {
        raw_spinlock_t                  reader_lock;    /* serialize readers */
        arch_spinlock_t                 lock;
        struct lock_class_key           lock_key;
-       unsigned long                   nr_pages;
+       unsigned int                    nr_pages;
        unsigned int                    current_context;
        struct list_head                *pages;
        struct buffer_page              *head_page;     /* read from head */
@@ -482,7 +482,7 @@ struct ring_buffer_per_cpu {
        u64                             write_stamp;
        u64                             read_stamp;
        /* ring buffer pages to update, > 0 to add, < 0 to remove */
-       long                            nr_pages_to_update;
+       int                             nr_pages_to_update;
        struct list_head                new_pages; /* new pages to add */
        struct work_struct              update_pages_work;
        struct completion               update_done;
@@ -1161,10 +1161,10 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
        return 0;
 }
 
-static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
+static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
 {
+       int i;
        struct buffer_page *bpage, *tmp;
-       long i;
 
        for (i = 0; i < nr_pages; i++) {
                struct page *page;
@@ -1201,7 +1201,7 @@ free_pages:
 }
 
 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
-                            unsigned long nr_pages)
+                            unsigned nr_pages)
 {
        LIST_HEAD(pages);
 
@@ -1226,7 +1226,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
 }
 
 static struct ring_buffer_per_cpu *
-rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
+rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        struct buffer_page *bpage;
@@ -1326,9 +1326,8 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
                                        struct lock_class_key *key)
 {
        struct ring_buffer *buffer;
-       long nr_pages;
        int bsize;
-       int cpu;
+       int cpu, nr_pages;
 
        /* keep it in its own cache line */
        buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
@@ -1454,12 +1453,12 @@ static inline unsigned long rb_page_write(struct buffer_page *bpage)
 }
 
 static int
-rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
+rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
 {
        struct list_head *tail_page, *to_remove, *next_page;
        struct buffer_page *to_remove_page, *tmp_iter_page;
        struct buffer_page *last_page, *first_page;
-       unsigned long nr_removed;
+       unsigned int nr_removed;
        unsigned long head_bit;
        int page_entries;
 
@@ -1676,7 +1675,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
                        int cpu_id)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
-       unsigned long nr_pages;
+       unsigned nr_pages;
        int cpu, err = 0;
 
        /*
@@ -4649,9 +4648,8 @@ static int rb_cpu_notify(struct notifier_block *self,
        struct ring_buffer *buffer =
                container_of(self, struct ring_buffer, cpu_notify);
        long cpu = (long)hcpu;
-       long nr_pages_same;
-       int cpu_i;
-       unsigned long nr_pages;
+       int cpu_i, nr_pages_same;
+       unsigned int nr_pages;
 
        switch (action) {
        case CPU_UP_PREPARE: