2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License version 2
4 * as published by the Free Software Foundation; or, when distributed
5 * separately from the Linux kernel or incorporated into other
6 * software packages, subject to the following license:
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this source file (the "Software"), to deal in the Software without
10 * restriction, including without limitation the rights to use, copy, modify,
11 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
12 * and to permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #ifndef __XEN_BLKIF__BACKEND__COMMON_H__
28 #define __XEN_BLKIF__BACKEND__COMMON_H__
30 #include <linux/module.h>
31 #include <linux/interrupt.h>
32 #include <linux/slab.h>
33 #include <linux/blkdev.h>
34 #include <linux/vmalloc.h>
35 #include <linux/wait.h>
37 #include <linux/rbtree.h>
38 #include <asm/setup.h>
39 #include <asm/pgalloc.h>
40 #include <asm/hypervisor.h>
41 #include <xen/grant_table.h>
42 #include <xen/xenbus.h>
43 #include <xen/interface/io/ring.h>
44 #include <xen/interface/io/blkif.h>
45 #include <xen/interface/io/protocols.h>
47 #define DRV_PFX "xen-blkback:"
48 #define DPRINTK(fmt, args...) \
49 pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \
50 __func__, __LINE__, ##args)
54 * This is the maximum number of segments that would be allowed in indirect
55 * requests. This value will also be passed to the frontend.
57 #define MAX_INDIRECT_SEGMENTS 256
59 #define SEGS_PER_INDIRECT_FRAME \
60 (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned))
61 #define MAX_INDIRECT_PAGES \
62 ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
63 #define INDIRECT_PAGES(_segs) \
64 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
66 /* Not a real protocol. Used to generate ring structs which contain
67 * the elements common to all protocols only. This way we get a
68 * compiler-checkable way to use common struct elements, so we can
69 * avoid using switch(protocol) in a number of places. */
70 struct blkif_common_request {
73 struct blkif_common_response {
77 struct blkif_x86_32_request_rw {
78 uint8_t nr_segments; /* number of segments */
79 blkif_vdev_t handle; /* only for read/write requests */
80 uint64_t id; /* private guest value, echoed in resp */
81 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
82 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
83 } __attribute__((__packed__));
85 struct blkif_x86_32_request_discard {
86 uint8_t flag; /* BLKIF_DISCARD_SECURE or zero */
87 blkif_vdev_t _pad1; /* was "handle" for read/write requests */
88 uint64_t id; /* private guest value, echoed in resp */
89 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
91 } __attribute__((__packed__));
93 struct blkif_x86_32_request_other {
96 uint64_t id; /* private guest value, echoed in resp */
97 } __attribute__((__packed__));
99 struct blkif_x86_32_request_indirect {
101 uint16_t nr_segments;
103 blkif_sector_t sector_number;
106 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
108 * The maximum number of indirect segments (and pages) that will
109 * be used is determined by MAX_INDIRECT_SEGMENTS, this value
110 * is also exported to the guest (via xenstore
111 * feature-max-indirect-segments entry), so the frontend knows how
112 * many indirect segments the backend supports.
114 uint64_t _pad2; /* make it 64 byte aligned */
115 } __attribute__((__packed__));
117 struct blkif_x86_32_request {
118 uint8_t operation; /* BLKIF_OP_??? */
120 struct blkif_x86_32_request_rw rw;
121 struct blkif_x86_32_request_discard discard;
122 struct blkif_x86_32_request_other other;
123 struct blkif_x86_32_request_indirect indirect;
125 } __attribute__((__packed__));
127 /* i386 protocol version */
128 #pragma pack(push, 4)
129 struct blkif_x86_32_response {
130 uint64_t id; /* copied from request */
131 uint8_t operation; /* copied from request */
132 int16_t status; /* BLKIF_RSP_??? */
135 /* x86_64 protocol version */
137 struct blkif_x86_64_request_rw {
138 uint8_t nr_segments; /* number of segments */
139 blkif_vdev_t handle; /* only for read/write requests */
140 uint32_t _pad1; /* offsetof(blkif_reqest..,u.rw.id)==8 */
142 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
143 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
144 } __attribute__((__packed__));
146 struct blkif_x86_64_request_discard {
147 uint8_t flag; /* BLKIF_DISCARD_SECURE or zero */
148 blkif_vdev_t _pad1; /* was "handle" for read/write requests */
149 uint32_t _pad2; /* offsetof(blkif_..,u.discard.id)==8 */
151 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
153 } __attribute__((__packed__));
155 struct blkif_x86_64_request_other {
158 uint32_t _pad3; /* offsetof(blkif_..,u.discard.id)==8 */
159 uint64_t id; /* private guest value, echoed in resp */
160 } __attribute__((__packed__));
162 struct blkif_x86_64_request_indirect {
164 uint16_t nr_segments;
165 uint32_t _pad1; /* offsetof(blkif_..,u.indirect.id)==8 */
167 blkif_sector_t sector_number;
170 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
172 * The maximum number of indirect segments (and pages) that will
173 * be used is determined by MAX_INDIRECT_SEGMENTS, this value
174 * is also exported to the guest (via xenstore
175 * feature-max-indirect-segments entry), so the frontend knows how
176 * many indirect segments the backend supports.
178 uint32_t _pad3; /* make it 64 byte aligned */
179 } __attribute__((__packed__));
181 struct blkif_x86_64_request {
182 uint8_t operation; /* BLKIF_OP_??? */
184 struct blkif_x86_64_request_rw rw;
185 struct blkif_x86_64_request_discard discard;
186 struct blkif_x86_64_request_other other;
187 struct blkif_x86_64_request_indirect indirect;
189 } __attribute__((__packed__));
191 struct blkif_x86_64_response {
192 uint64_t __attribute__((__aligned__(8))) id;
193 uint8_t operation; /* copied from request */
194 int16_t status; /* BLKIF_RSP_??? */
197 DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
198 struct blkif_common_response);
199 DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
200 struct blkif_x86_32_response);
201 DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
202 struct blkif_x86_64_response);
204 union blkif_back_rings {
205 struct blkif_back_ring native;
206 struct blkif_common_back_ring common;
207 struct blkif_x86_32_back_ring x86_32;
208 struct blkif_x86_64_back_ring x86_64;
211 enum blkif_protocol {
212 BLKIF_PROTOCOL_NATIVE = 1,
213 BLKIF_PROTOCOL_X86_32 = 2,
214 BLKIF_PROTOCOL_X86_64 = 3,
218 /* What the domain refers to this vbd as. */
220 /* Non-zero -> read-only */
221 unsigned char readonly;
224 /* phys device that this vbd maps to. */
226 struct block_device *bdev;
227 /* Cached size parameter. */
229 unsigned int flush_support:1;
230 unsigned int discard_secure:1;
231 unsigned int feature_gnt_persistent:1;
232 unsigned int overflow_max_grants:1;
237 /* Number of available flags */
238 #define PERSISTENT_GNT_FLAGS_SIZE 2
239 /* This persistent grant is currently in use */
240 #define PERSISTENT_GNT_ACTIVE 0
242 * This persistent grant has been used, this flag is set when we remove the
243 * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently.
245 #define PERSISTENT_GNT_WAS_ACTIVE 1
247 /* Number of requests that we can fit in a ring */
248 #define XEN_BLKIF_REQS 32
250 struct persistent_gnt {
253 grant_handle_t handle;
254 DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE);
256 struct list_head remove_node;
260 /* Unique identifier for this interface. */
263 /* Physical parameters of the comms window. */
265 /* Comms information. */
266 enum blkif_protocol blk_protocol;
267 union blkif_back_rings blk_rings;
269 /* The VBD attached to this interface. */
271 /* Back pointer to the backend_info. */
272 struct backend_info *be;
273 /* Private fields. */
274 spinlock_t blk_ring_lock;
277 wait_queue_head_t wq;
278 /* for barrier (drain) requests */
279 struct completion drain_complete;
281 /* One thread per one blkif. */
282 struct task_struct *xenblkd;
283 unsigned int waiting_reqs;
285 /* tree to store persistent grants */
286 struct rb_root persistent_gnts;
287 unsigned int persistent_gnt_c;
288 atomic_t persistent_gnt_in_use;
289 unsigned long next_lru;
291 /* used by the kworker that offload work from the persistent purge */
292 struct list_head persistent_purge_list;
293 struct work_struct persistent_purge_work;
295 /* buffer of free pages to map grant refs */
296 spinlock_t free_pages_lock;
298 struct list_head free_pages;
300 /* List of all 'pending_req' available */
301 struct list_head pending_free;
302 /* And its spinlock. */
303 spinlock_t pending_free_lock;
304 wait_queue_head_t pending_free_wq;
307 unsigned long st_print;
308 unsigned long long st_rd_req;
309 unsigned long long st_wr_req;
310 unsigned long long st_oo_req;
311 unsigned long long st_f_req;
312 unsigned long long st_ds_req;
313 unsigned long long st_rd_sect;
314 unsigned long long st_wr_sect;
316 wait_queue_head_t waiting_to_free;
317 /* Thread shutdown wait queue. */
318 wait_queue_head_t shutdown_wq;
322 unsigned long offset;
328 struct persistent_gnt *persistent_gnt;
329 grant_handle_t handle;
334 * Each outstanding request that we've passed to the lower device layers has a
335 * 'pending_req' allocated to it. Each buffer_head that completes decrements
336 * the pendcnt towards zero. When it hits zero, the specified domain has a
337 * response queued for it, with the saved 'id' passed back.
340 struct xen_blkif *blkif;
344 unsigned short operation;
346 struct list_head free_list;
347 struct grant_page *segments[MAX_INDIRECT_SEGMENTS];
348 /* Indirect descriptors */
349 struct grant_page *indirect_pages[MAX_INDIRECT_PAGES];
350 struct seg_buf seg[MAX_INDIRECT_SEGMENTS];
351 struct bio *biolist[MAX_INDIRECT_SEGMENTS];
355 #define vbd_sz(_v) ((_v)->bdev->bd_part ? \
356 (_v)->bdev->bd_part->nr_sects : \
357 get_capacity((_v)->bdev->bd_disk))
359 #define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt))
360 #define xen_blkif_put(_b) \
362 if (atomic_dec_and_test(&(_b)->refcnt)) \
363 wake_up(&(_b)->waiting_to_free);\
368 blkif_sector_t nr_sects;
369 struct block_device *bdev;
370 blkif_sector_t sector_number;
372 int xen_blkif_interface_init(void);
374 int xen_blkif_xenbus_init(void);
376 irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
377 int xen_blkif_schedule(void *arg);
378 int xen_blkif_purge_persistent(void *arg);
380 int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
381 struct backend_info *be, int state);
383 int xen_blkbk_barrier(struct xenbus_transaction xbt,
384 struct backend_info *be, int state);
385 struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
387 static inline void blkif_get_x86_32_req(struct blkif_request *dst,
388 struct blkif_x86_32_request *src)
390 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
391 dst->operation = src->operation;
392 switch (src->operation) {
395 case BLKIF_OP_WRITE_BARRIER:
396 case BLKIF_OP_FLUSH_DISKCACHE:
397 dst->u.rw.nr_segments = src->u.rw.nr_segments;
398 dst->u.rw.handle = src->u.rw.handle;
399 dst->u.rw.id = src->u.rw.id;
400 dst->u.rw.sector_number = src->u.rw.sector_number;
402 if (n > dst->u.rw.nr_segments)
403 n = dst->u.rw.nr_segments;
404 for (i = 0; i < n; i++)
405 dst->u.rw.seg[i] = src->u.rw.seg[i];
407 case BLKIF_OP_DISCARD:
408 dst->u.discard.flag = src->u.discard.flag;
409 dst->u.discard.id = src->u.discard.id;
410 dst->u.discard.sector_number = src->u.discard.sector_number;
411 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
413 case BLKIF_OP_INDIRECT:
414 dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
415 dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
416 dst->u.indirect.handle = src->u.indirect.handle;
417 dst->u.indirect.id = src->u.indirect.id;
418 dst->u.indirect.sector_number = src->u.indirect.sector_number;
420 j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
421 for (i = 0; i < j; i++)
422 dst->u.indirect.indirect_grefs[i] =
423 src->u.indirect.indirect_grefs[i];
427 * Don't know how to translate this op. Only get the
428 * ID so failure can be reported to the frontend.
430 dst->u.other.id = src->u.other.id;
435 static inline void blkif_get_x86_64_req(struct blkif_request *dst,
436 struct blkif_x86_64_request *src)
438 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
439 dst->operation = src->operation;
440 switch (src->operation) {
443 case BLKIF_OP_WRITE_BARRIER:
444 case BLKIF_OP_FLUSH_DISKCACHE:
445 dst->u.rw.nr_segments = src->u.rw.nr_segments;
446 dst->u.rw.handle = src->u.rw.handle;
447 dst->u.rw.id = src->u.rw.id;
448 dst->u.rw.sector_number = src->u.rw.sector_number;
450 if (n > dst->u.rw.nr_segments)
451 n = dst->u.rw.nr_segments;
452 for (i = 0; i < n; i++)
453 dst->u.rw.seg[i] = src->u.rw.seg[i];
455 case BLKIF_OP_DISCARD:
456 dst->u.discard.flag = src->u.discard.flag;
457 dst->u.discard.id = src->u.discard.id;
458 dst->u.discard.sector_number = src->u.discard.sector_number;
459 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
461 case BLKIF_OP_INDIRECT:
462 dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
463 dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
464 dst->u.indirect.handle = src->u.indirect.handle;
465 dst->u.indirect.id = src->u.indirect.id;
466 dst->u.indirect.sector_number = src->u.indirect.sector_number;
468 j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
469 for (i = 0; i < j; i++)
470 dst->u.indirect.indirect_grefs[i] =
471 src->u.indirect.indirect_grefs[i];
475 * Don't know how to translate this op. Only get the
476 * ID so failure can be reported to the frontend.
478 dst->u.other.id = src->u.other.id;
483 #endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */