nvme-tcp: fix possible crash in write_zeroes processing
authorSagi Grimberg <sagi@grimberg.me>
Mon, 23 Mar 2020 22:06:30 +0000 (15:06 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 29 Apr 2020 14:32:59 +0000 (16:32 +0200)
[ Upstream commit 25e5cb780e62bde432b401f312bb847edc78b432 ]

We cannot look at blk_rq_payload_bytes without first checking
that the request has a mappable physical segments first (e.g.
blk_rq_nr_phys_segments(rq) != 0) and only then to take the
request payload bytes. This caused us to send a wrong sgl to
the target or even dereference a non-existing buffer in case
we actually got to the data send sequence (if it was in-capsule).

Reported-by: Tony Asleson <tasleson@redhat.com>
Suggested-by: Chaitanya Kulkarni <Chaitanya.Kulkarni@wdc.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/nvme/host/tcp.c

index 2449844..11e84ed 100644 (file)
@@ -164,16 +164,14 @@ static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
 static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
 {
        struct request *rq;
-       unsigned int bytes;
 
        if (unlikely(nvme_tcp_async_req(req)))
                return false; /* async events don't have a request */
 
        rq = blk_mq_rq_from_pdu(req);
-       bytes = blk_rq_payload_bytes(rq);
 
-       return rq_data_dir(rq) == WRITE && bytes &&
-               bytes <= nvme_tcp_inline_data_size(req->queue);
+       return rq_data_dir(rq) == WRITE && req->data_len &&
+               req->data_len <= nvme_tcp_inline_data_size(req->queue);
 }
 
 static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
@@ -2090,7 +2088,9 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
 
        c->common.flags |= NVME_CMD_SGL_METABUF;
 
-       if (rq_data_dir(rq) == WRITE && req->data_len &&
+       if (!blk_rq_nr_phys_segments(rq))
+               nvme_tcp_set_sg_null(c);
+       else if (rq_data_dir(rq) == WRITE &&
            req->data_len <= nvme_tcp_inline_data_size(queue))
                nvme_tcp_set_sg_inline(queue, c, req->data_len);
        else
@@ -2117,7 +2117,8 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
        req->data_sent = 0;
        req->pdu_len = 0;
        req->pdu_sent = 0;
-       req->data_len = blk_rq_payload_bytes(rq);
+       req->data_len = blk_rq_nr_phys_segments(rq) ?
+                               blk_rq_payload_bytes(rq) : 0;
        req->curr_bio = rq->bio;
 
        if (rq_data_dir(rq) == WRITE &&