[net/9p] Add gup/zero_copy support to VirtIO transport layer.
authorVenkateswararao Jujjuri (JV) <jvrao@linux.vnet.ibm.com>
Fri, 28 Jan 2011 23:22:36 +0000 (15:22 -0800)
committerEric Van Hensbergen <ericvh@gmail.com>
Tue, 15 Mar 2011 14:57:35 +0000 (09:57 -0500)
Modify p9_virtio_request() and req_done() functions to support
additional payload sent down to the transport layer through
tc->pubuf and tc->pkbuf.

Signed-off-by: Venkateswararao Jujjuri <jvrao@linux.vnet.ibm.com>
Signed-off-by: Eric Van Hensbergen <ericvh@gmail.com>
net/9p/trans_common.h
net/9p/trans_virtio.c

index 04977e0..7630922 100644 (file)
@@ -12,6 +12,9 @@
  *
  */
 
+/* TRUE if it is user context */
+#define P9_IS_USER_CONTEXT (!segment_eq(get_fs(), KERNEL_DS))
+
 /**
  * struct trans_rpage_info - To store mapped page information in PDU.
  * @rp_alloc:Set if this structure is allocd, not a reuse unused space in pdu.
index c8f3f72..4b236de 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/scatterlist.h>
 #include <linux/virtio.h>
 #include <linux/virtio_9p.h>
+#include "trans_common.h"
 
 #define VIRTQUEUE_NUM  128
 
@@ -155,6 +156,14 @@ static void req_done(struct virtqueue *vq)
                                        rc->tag);
                        req = p9_tag_lookup(chan->client, rc->tag);
                        req->status = REQ_STATUS_RCVD;
+                       if (req->tc->private) {
+                               struct trans_rpage_info *rp = req->tc->private;
+                               /*Release pages */
+                               p9_release_req_pages(rp);
+                               if (rp->rp_alloc)
+                                       kfree(rp);
+                               req->tc->private = NULL;
+                       }
                        p9_client_cb(chan->client, req);
                } else {
                        spin_unlock_irqrestore(&chan->lock, flags);
@@ -203,6 +212,38 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
 }
 
 /**
+ * pack_sg_list_p - Just like pack_sg_list. Instead of taking a buffer,
+ * this takes a list of pages.
+ * @sg: scatter/gather list to pack into
+ * @start: which segment of the sg_list to start at
+ * @pdata_off: Offset into the first page
+ * @**pdata: a list of pages to add into sg.
+ * @count: amount of data to pack into the scatter/gather list
+ */
+static int
+pack_sg_list_p(struct scatterlist *sg, int start, int limit, size_t pdata_off,
+               struct page **pdata, int count)
+{
+       int s;
+       int i = 0;
+       int index = start;
+
+       if (pdata_off) {
+               s = min((int)(PAGE_SIZE - pdata_off), count);
+               sg_set_page(&sg[index++], pdata[i++], s, pdata_off);
+               count -= s;
+       }
+
+       while (count) {
+               BUG_ON(index > limit);
+               s = min((int)PAGE_SIZE, count);
+               sg_set_page(&sg[index++], pdata[i++], s, 0);
+               count -= s;
+       }
+       return index-start;
+}
+
+/**
  * p9_virtio_request - issue a request
  * @client: client instance issuing the request
  * @req: request to be issued
@@ -212,22 +253,97 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
 static int
 p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
 {
-       int in, out;
+       int in, out, inp, outp;
        struct virtio_chan *chan = client->trans;
        char *rdata = (char *)req->rc+sizeof(struct p9_fcall);
        unsigned long flags;
-       int err;
+       size_t pdata_off = 0;
+       struct trans_rpage_info *rpinfo = NULL;
+       int err, pdata_len = 0;
 
        P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n");
 
 req_retry:
        req->status = REQ_STATUS_SENT;
 
+       if (req->tc->pbuf_size && (req->tc->pubuf && P9_IS_USER_CONTEXT)) {
+               int nr_pages = p9_nr_pages(req);
+               int rpinfo_size = sizeof(struct trans_rpage_info) +
+                       sizeof(struct page *) * nr_pages;
+
+               if (rpinfo_size <= (req->tc->capacity - req->tc->size)) {
+                       /* We can use sdata */
+                       req->tc->private = req->tc->sdata + req->tc->size;
+                       rpinfo = (struct trans_rpage_info *)req->tc->private;
+                       rpinfo->rp_alloc = 0;
+               } else {
+                       req->tc->private = kmalloc(rpinfo_size, GFP_NOFS);
+                       if (!req->tc->private) {
+                               P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: "
+                                       "private kmalloc returned NULL");
+                               return -ENOMEM;
+                       }
+                       rpinfo = (struct trans_rpage_info *)req->tc->private;
+                       rpinfo->rp_alloc = 1;
+               }
+
+               err = p9_payload_gup(req, &pdata_off, &pdata_len, nr_pages,
+                               req->tc->id == P9_TREAD ? 1 : 0);
+               if (err < 0) {
+                       if (rpinfo->rp_alloc)
+                               kfree(rpinfo);
+                       return err;
+               }
+       }
+
        spin_lock_irqsave(&chan->lock, flags);
+
+       /* Handle out VirtIO ring buffers */
        out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, req->tc->sdata,
-                                                               req->tc->size);
-       in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM-out, rdata,
-                                                               client->msize);
+                       req->tc->size);
+
+       if (req->tc->pbuf_size && (req->tc->id == P9_TWRITE)) {
+               /* We have additional write payload buffer to take care */
+               if (req->tc->pubuf && P9_IS_USER_CONTEXT) {
+                       outp = pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
+                                       pdata_off, rpinfo->rp_data, pdata_len);
+               } else {
+                       char *pbuf = req->tc->pubuf ? req->tc->pubuf :
+                                                               req->tc->pkbuf;
+                       outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
+                                       req->tc->pbuf_size);
+               }
+               out += outp;
+       }
+
+       /* Handle in VirtIO ring buffers */
+       if (req->tc->pbuf_size &&
+               ((req->tc->id == P9_TREAD) || (req->tc->id == P9_TREADDIR))) {
+               /*
+                * Take care of additional Read payload.
+                * 11 is the read/write header = PDU Header(7) + IO Size (4).
+                * Arrange in such a way that server places header in the
+                * alloced memory and payload onto the user buffer.
+                */
+               inp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata, 11);
+               /*
+                * Running executables in the filesystem may result in
+                * a read request with kernel buffer as opposed to user buffer.
+                */
+               if (req->tc->pubuf && P9_IS_USER_CONTEXT) {
+                       in = pack_sg_list_p(chan->sg, out+inp, VIRTQUEUE_NUM,
+                                       pdata_off, rpinfo->rp_data, pdata_len);
+               } else {
+                       char *pbuf = req->tc->pubuf ? req->tc->pubuf :
+                                                               req->tc->pkbuf;
+                       in = pack_sg_list(chan->sg, out+inp, VIRTQUEUE_NUM,
+                                       pbuf, req->tc->pbuf_size);
+               }
+               in += inp;
+       } else {
+               in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata,
+                               client->msize);
+       }
 
        err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
        if (err < 0) {
@@ -246,6 +362,8 @@ req_retry:
                        P9_DPRINTK(P9_DEBUG_TRANS,
                                        "9p debug: "
                                        "virtio rpc add_buf returned failure");
+                       if (rpinfo && rpinfo->rp_alloc)
+                               kfree(rpinfo);
                        return -EIO;
                }
        }