writer: reduce atomic context usage in swap_msg
authorVyacheslav Cherkashin <v.cherkashin@samsung.com>
Thu, 24 Nov 2016 09:04:07 +0000 (12:04 +0300)
committerVyacheslav Cherkashin <v.cherkashin@samsung.com>
Thu, 24 Nov 2016 12:24:19 +0000 (15:24 +0300)
Usage pool buffer in swap_msg. If pool buffer is empty,
per_cpu buffer will be used which lead atomic context.

Change-Id: Ica59721ed12ae74bda1bc486b6dfe1928e0c1f1f
Signed-off-by: Vyacheslav Cherkashin <v.cherkashin@samsung.com>
writer/swap_msg.c

index 025f9a4..fcd0a82 100644 (file)
@@ -26,6 +26,8 @@
 #include <linux/errno.h>
 #include <linux/atomic.h>
 #include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/spinlock.h>
 #include <kprobe/swap_kprobes.h>
 #include <buffer/swap_buffer_module.h>
 #include <swap-asm/swap_kprobes.h>
 #define MSG_PREFIX     "[SWAP_MSG] "
 
 
+/* simple buffer */
+struct sb_struct {
+       size_t subbuf_size;
+
+       size_t count;
+       void *data;
+};
+
+static int sb_init(struct sb_struct *sb, size_t count, size_t subbuf_size)
+{
+       sb->data = vmalloc(count * subbuf_size);
+       if (!sb->data)
+               return -ENOMEM;
+
+       sb->count = count;
+       sb->subbuf_size = subbuf_size;
+
+       return 0;
+}
+
+static void sb_uninit(struct sb_struct *sb)
+{
+       vfree(sb->data);
+}
+
+static void *sb_data(struct sb_struct *sb, size_t idx)
+{
+       return sb->data + sb->subbuf_size * idx;
+}
+
+static size_t sb_idx(struct sb_struct *sb, void *data)
+{
+       return (data - sb->data) / sb->subbuf_size;
+}
+
+static bool sb_contains(struct sb_struct *sb, void *data)
+{
+       void *begin = sb->data;
+       void *end = sb->data + sb->count * sb->subbuf_size;
+
+       return data >= begin && data < end;
+}
+
+static size_t sb_count(struct sb_struct *sb)
+{
+       return sb->count;
+}
+
+
+/* pool buffer */
+struct pb_struct {
+       spinlock_t lock;
+       size_t free_first;
+       size_t free_count;
+
+       struct sb_struct buf;
+};
+
+static void *pb_data(struct pb_struct *pb, size_t idx)
+{
+       return sb_data(&pb->buf, idx);
+}
+
+static size_t pb_idx(struct pb_struct *pb, void *data)
+{
+       return sb_idx(&pb->buf, data);
+}
+
+static void pb_val_set(struct pb_struct *pb, size_t idx, size_t val)
+{
+       *(size_t *)pb_data(pb, idx) = val;
+}
+
+static size_t pb_val_get(struct pb_struct *pb, size_t idx)
+{
+       return *(size_t *)pb_data(pb, idx);
+}
+
+static int pb_init(struct pb_struct *pb, size_t count, size_t subbuf_size)
+{
+       int ret;
+       size_t idx;
+
+       ret = sb_init(&pb->buf, count, subbuf_size);
+       if (ret)
+               return ret;
+
+       spin_lock_init(&pb->lock);
+       pb->free_first = 0;
+       pb->free_count = count;
+
+       for (idx = 0; idx < count; ++idx)
+               pb_val_set(pb, idx, idx + 1);
+
+       return 0;
+}
+
+static void pb_uninit(struct pb_struct *pb)
+{
+       WARN(sb_count(&pb->buf) != pb->free_count,
+            "count=%zu free_conut=%zu\n", sb_count(&pb->buf), pb->free_count);
+
+       sb_uninit(&pb->buf);
+}
+
+static void *pb_buf_get(struct pb_struct *pb)
+{
+       void *data;
+       unsigned long flags;
+
+       if (!pb->free_count)
+               return NULL;
+
+       spin_lock_irqsave(&pb->lock, flags);
+       data = pb_data(pb, pb->free_first);
+       pb->free_first = pb_val_get(pb, pb->free_first);
+       --pb->free_count;
+       spin_unlock_irqrestore(&pb->lock, flags);
+
+       return data;
+}
+
+static void pb_buf_put(struct pb_struct *pb, void *data)
+{
+       unsigned long flags;
+       size_t idx = pb_idx(pb, data);
+
+       spin_lock_irqsave(&pb->lock, flags);
+       pb_val_set(pb, idx, pb->free_first);
+       pb->free_first = idx;
+       ++pb->free_count;
+       spin_unlock_irqrestore(&pb->lock, flags);
+}
+
+
 struct swap_msg {
        u32 msg_id;
        u32 seq_num;
@@ -45,38 +182,35 @@ struct swap_msg {
 } __packed;
 
 
-static char *cpu_buf[NR_CPUS];
+static struct sb_struct cpu_buf;
+static struct pb_struct pool_buffer;
 static atomic_t seq_num = ATOMIC_INIT(-1);
 static atomic_t discarded = ATOMIC_INIT(0);
 
 
 int swap_msg_init(void)
 {
-       size_t i;
-       const size_t end = ((size_t) 0) - 1;
+       int ret;
 
-       for (i = 0; i < NR_CPUS; ++i) {
-               cpu_buf[i] = kmalloc(SWAP_MSG_BUF_SIZE, GFP_KERNEL);
-               if (cpu_buf[i] == NULL)
-                       goto no_mem;
+       ret = sb_init(&cpu_buf, NR_CPUS, SWAP_MSG_BUF_SIZE);
+       if (ret) {
+               pr_err(MSG_PREFIX "Cannot init cpu_buf, ret=%d\n", ret);
+               return ret;
        }
 
-       return 0;
-
-no_mem:
-       --i;
-       for (; i != end; --i)
-               kfree(cpu_buf[i]);
+       ret = pb_init(&pool_buffer, NR_CPUS * 32, SWAP_MSG_BUF_SIZE);
+       if (ret) {
+               sb_uninit(&cpu_buf);
+               pr_err(MSG_PREFIX "Cannot init ring_buffer, ret=%d\n", ret);
+       }
 
-       return -ENOMEM;
+       return ret;
 }
 
 void swap_msg_exit(void)
 {
-       int i;
-
-       for (i = 0; i < NR_CPUS; ++i)
-               kfree(cpu_buf[i]);
+       pb_uninit(&pool_buffer);
+       sb_uninit(&cpu_buf);
 }
 
 void swap_msg_seq_num_reset(void)
@@ -111,7 +245,9 @@ struct swap_msg *swap_msg_get(enum swap_msg_id id)
 {
        struct swap_msg *m;
 
-       m = (struct swap_msg *)cpu_buf[get_cpu()];
+       m = pb_buf_get(&pool_buffer);
+       if (!m)
+               m = sb_data(&cpu_buf, get_cpu());
 
        m->msg_id = (u32)id;
        m->seq_num = atomic_inc_return(&seq_num);
@@ -151,7 +287,10 @@ EXPORT_SYMBOL_GPL(swap_msg_flush_wakeupoff);
 
 void swap_msg_put(struct swap_msg *m)
 {
-       put_cpu();
+       if (unlikely(sb_contains(&cpu_buf, m)))
+               put_cpu();
+       else
+               pb_buf_put(&pool_buffer, m);
 }
 EXPORT_SYMBOL_GPL(swap_msg_put);