block: replace g_new0 with g_new for bottom half allocation.
authorPaolo Bonzini <pbonzini@redhat.com>
Wed, 17 Dec 2014 15:10:00 +0000 (16:10 +0100)
committerStefan Hajnoczi <stefanha@redhat.com>
Tue, 13 Jan 2015 11:47:56 +0000 (11:47 +0000)
This saves about 15% of the clock cycles spent on allocation.  Using the
slice allocator does not add a visible improvement; allocation is faster
than malloc, while freeing seems to be slower.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
async.c

diff --git a/async.c b/async.c
index 572f239..2be88cc 100644 (file)
--- a/async.c
+++ b/async.c
@@ -44,10 +44,12 @@ struct QEMUBH {
 QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
 {
     QEMUBH *bh;
-    bh = g_new0(QEMUBH, 1);
-    bh->ctx = ctx;
-    bh->cb = cb;
-    bh->opaque = opaque;
+    bh = g_new(QEMUBH, 1);
+    *bh = (QEMUBH){
+        .ctx = ctx,
+        .cb = cb,
+        .opaque = opaque,
+    };
     qemu_mutex_lock(&ctx->bh_lock);
     bh->next = ctx->first_bh;
     /* Make sure that the members are ready before putting bh into list */