2 * Copyright (C) 2005, 2006
3 * Avishay Traeger (avishay@gmail.com)
4 * Copyright (C) 2008, 2009
5 * Boaz Harrosh <bharrosh@panasas.com>
7 * This file is part of exofs.
9 * exofs is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation. Since it is based on ext2, and the only
12 * valid version of GPL for the Linux kernel is version 2, the only valid
13 * version of GPL for exofs is version 2.
15 * exofs is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with exofs; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 #include <linux/slab.h>
26 #include <linux/module.h>
27 #include <asm/div64.h>
28 #include <linux/lcm.h>
32 MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>");
33 MODULE_DESCRIPTION("Objects Raid Engine ore.ko");
34 MODULE_LICENSE("GPL");
36 /* ore_verify_layout does a couple of things:
37 * 1. Given a minimum number of needed parameters fixes up the rest of the
38 * members to be operatonals for the ore. The needed parameters are those
39 * that are defined by the pnfs-objects layout STD.
40 * 2. Check to see if the current ore code actually supports these parameters
41 * for example stripe_unit must be a multple of the system PAGE_SIZE,
43 * 3. Cache some havily used calculations that will be needed by users.
46 enum { BIO_MAX_PAGES_KMALLOC =
47 (PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec),};
49 int ore_verify_layout(unsigned total_comps, struct ore_layout *layout)
53 switch (layout->raid_algorithm) {
60 case PNFS_OSD_RAID_PQ:
63 ORE_ERR("Only RAID_0/5 for now\n");
66 if (0 != (layout->stripe_unit & ~PAGE_MASK)) {
67 ORE_ERR("Stripe Unit(0x%llx)"
68 " must be Multples of PAGE_SIZE(0x%lx)\n",
69 _LLU(layout->stripe_unit), PAGE_SIZE);
72 if (layout->group_width) {
73 if (!layout->group_depth) {
74 ORE_ERR("group_depth == 0 && group_width != 0\n");
77 if (total_comps < (layout->group_width * layout->mirrors_p1)) {
78 ORE_ERR("Data Map wrong, "
79 "numdevs=%d < group_width=%d * mirrors=%d\n",
80 total_comps, layout->group_width,
84 layout->group_count = total_comps / layout->mirrors_p1 /
87 if (layout->group_depth) {
88 printk(KERN_NOTICE "Warning: group_depth ignored "
89 "group_width == 0 && group_depth == %lld\n",
90 _LLU(layout->group_depth));
92 layout->group_width = total_comps / layout->mirrors_p1;
93 layout->group_depth = -1;
94 layout->group_count = 1;
97 stripe_length = (u64)layout->group_width * layout->stripe_unit;
98 if (stripe_length >= (1ULL << 32)) {
99 ORE_ERR("Stripe_length(0x%llx) >= 32bit is not supported\n",
100 _LLU(stripe_length));
104 layout->max_io_length =
105 (BIO_MAX_PAGES_KMALLOC * PAGE_SIZE - layout->stripe_unit) *
106 (layout->group_width - layout->parity);
107 if (layout->parity) {
108 unsigned stripe_length =
109 (layout->group_width - layout->parity) *
112 layout->max_io_length /= stripe_length;
113 layout->max_io_length *= stripe_length;
117 EXPORT_SYMBOL(ore_verify_layout);
119 static u8 *_ios_cred(struct ore_io_state *ios, unsigned index)
121 return ios->oc->comps[index & ios->oc->single_comp].cred;
124 static struct osd_obj_id *_ios_obj(struct ore_io_state *ios, unsigned index)
126 return &ios->oc->comps[index & ios->oc->single_comp].obj;
129 static struct osd_dev *_ios_od(struct ore_io_state *ios, unsigned index)
131 ORE_DBGMSG2("oc->first_dev=%d oc->numdevs=%d i=%d oc->ods=%p\n",
132 ios->oc->first_dev, ios->oc->numdevs, index,
135 return ore_comp_dev(ios->oc, index);
138 int _ore_get_io_state(struct ore_layout *layout,
139 struct ore_components *oc, unsigned numdevs,
140 unsigned sgs_per_dev, unsigned num_par_pages,
141 struct ore_io_state **pios)
143 struct ore_io_state *ios;
145 struct osd_sg_entry *sgilist;
146 struct __alloc_all_io_state {
147 struct ore_io_state ios;
148 struct ore_per_dev_state per_dev[numdevs];
150 struct osd_sg_entry sglist[sgs_per_dev * numdevs];
151 struct page *pages[num_par_pages];
155 if (likely(sizeof(*_aios) <= PAGE_SIZE)) {
156 _aios = kzalloc(sizeof(*_aios), GFP_KERNEL);
157 if (unlikely(!_aios)) {
158 ORE_DBGMSG("Failed kzalloc bytes=%zd\n",
163 pages = num_par_pages ? _aios->pages : NULL;
164 sgilist = sgs_per_dev ? _aios->sglist : NULL;
167 struct __alloc_small_io_state {
168 struct ore_io_state ios;
169 struct ore_per_dev_state per_dev[numdevs];
172 struct osd_sg_entry sglist[sgs_per_dev * numdevs];
173 struct page *pages[num_par_pages];
176 _aio_small = kzalloc(sizeof(*_aio_small), GFP_KERNEL);
177 if (unlikely(!_aio_small)) {
178 ORE_DBGMSG("Failed alloc first part bytes=%zd\n",
179 sizeof(*_aio_small));
183 extra_part = kzalloc(sizeof(*extra_part), GFP_KERNEL);
184 if (unlikely(!extra_part)) {
185 ORE_DBGMSG("Failed alloc second part bytes=%zd\n",
186 sizeof(*extra_part));
192 pages = num_par_pages ? extra_part->pages : NULL;
193 sgilist = sgs_per_dev ? extra_part->sglist : NULL;
194 /* In this case the per_dev[0].sgilist holds the pointer to
197 ios = &_aio_small->ios;
198 ios->extra_part_alloc = true;
202 ios->parity_pages = pages;
203 ios->max_par_pages = num_par_pages;
208 for (d = 0; d < numdevs; ++d) {
209 ios->per_dev[d].sglist = sgilist;
210 sgilist += sgs_per_dev;
212 ios->sgs_per_dev = sgs_per_dev;
215 ios->layout = layout;
221 /* Allocate an io_state for only a single group of devices
223 * If a user needs to call ore_read/write() this version must be used becase it
224 * allocates extra stuff for striping and raid.
225 * The ore might decide to only IO less then @length bytes do to alignmets
226 * and constrains as follows:
227 * - The IO cannot cross group boundary.
228 * - In raid5/6 The end of the IO must align at end of a stripe eg.
229 * (@offset + @length) % strip_size == 0. Or the complete range is within a
231 * - Memory condition only permitted a shorter IO. (A user can use @length=~0
232 * And check the returned ios->length for max_io_size.)
234 * The caller must check returned ios->length (and/or ios->nr_pages) and
235 * re-issue these pages that fall outside of ios->length
237 int ore_get_rw_state(struct ore_layout *layout, struct ore_components *oc,
238 bool is_reading, u64 offset, u64 length,
239 struct ore_io_state **pios)
241 struct ore_io_state *ios;
242 unsigned numdevs = layout->group_width * layout->mirrors_p1;
243 unsigned sgs_per_dev = 0, max_par_pages = 0;
246 if (layout->parity && length) {
247 unsigned data_devs = layout->group_width - layout->parity;
248 unsigned stripe_size = layout->stripe_unit * data_devs;
249 unsigned pages_in_unit = layout->stripe_unit / PAGE_SIZE;
254 num_stripes = div_u64_rem(length, stripe_size, &remainder);
258 num_raid_units = num_stripes * layout->parity;
261 /* For reads add per_dev sglist array */
262 /* TODO: Raid 6 we need twice more. Actually:
263 * num_stripes / LCMdP(W,P);
264 * if (W%P != 0) num_stripes *= parity;
267 /* first/last seg is split */
268 num_raid_units += layout->group_width;
269 sgs_per_dev = div_u64(num_raid_units, data_devs) + 2;
271 /* For Writes add parity pages array. */
272 max_par_pages = num_raid_units * pages_in_unit *
273 sizeof(struct page *);
277 ret = _ore_get_io_state(layout, oc, numdevs, sgs_per_dev, max_par_pages,
283 ios->reading = is_reading;
284 ios->offset = offset;
287 ore_calc_stripe_info(layout, offset, length, &ios->si);
288 ios->length = ios->si.length;
289 ios->nr_pages = ((ios->offset & (PAGE_SIZE - 1)) +
290 ios->length + PAGE_SIZE - 1) / PAGE_SIZE;
292 _ore_post_alloc_raid_stuff(ios);
297 EXPORT_SYMBOL(ore_get_rw_state);
299 /* Allocate an io_state for all the devices in the comps array
301 * This version of io_state allocation is used mostly by create/remove
302 * and trunc where we currently need all the devices. The only wastful
303 * bit is the read/write_attributes with no IO. Those sites should
304 * be converted to use ore_get_rw_state() with length=0
306 int ore_get_io_state(struct ore_layout *layout, struct ore_components *oc,
307 struct ore_io_state **pios)
309 return _ore_get_io_state(layout, oc, oc->numdevs, 0, 0, pios);
311 EXPORT_SYMBOL(ore_get_io_state);
313 void ore_put_io_state(struct ore_io_state *ios)
318 for (i = 0; i < ios->numdevs; i++) {
319 struct ore_per_dev_state *per_dev = &ios->per_dev[i];
322 osd_end_request(per_dev->or);
324 bio_put(per_dev->bio);
327 _ore_free_raid_stuff(ios);
331 EXPORT_SYMBOL(ore_put_io_state);
333 static void _sync_done(struct ore_io_state *ios, void *p)
335 struct completion *waiting = p;
340 static void _last_io(struct kref *kref)
342 struct ore_io_state *ios = container_of(
343 kref, struct ore_io_state, kref);
345 ios->done(ios, ios->private);
348 static void _done_io(struct osd_request *or, void *p)
350 struct ore_io_state *ios = p;
352 kref_put(&ios->kref, _last_io);
355 int ore_io_execute(struct ore_io_state *ios)
357 DECLARE_COMPLETION_ONSTACK(wait);
358 bool sync = (ios->done == NULL);
362 ios->done = _sync_done;
363 ios->private = &wait;
366 for (i = 0; i < ios->numdevs; i++) {
367 struct osd_request *or = ios->per_dev[i].or;
371 ret = osd_finalize_request(or, 0, _ios_cred(ios, i), NULL);
373 ORE_DBGMSG("Failed to osd_finalize_request() => %d\n",
379 kref_init(&ios->kref);
381 for (i = 0; i < ios->numdevs; i++) {
382 struct osd_request *or = ios->per_dev[i].or;
386 kref_get(&ios->kref);
387 osd_execute_request_async(or, _done_io, ios);
390 kref_put(&ios->kref, _last_io);
394 wait_for_completion(&wait);
395 ret = ore_check_io(ios, NULL);
400 static void _clear_bio(struct bio *bio)
405 bio_for_each_segment_all(bv, bio, i) {
406 unsigned this_count = bv->bv_len;
408 if (likely(PAGE_SIZE == this_count))
409 clear_highpage(bv->bv_page);
411 zero_user(bv->bv_page, bv->bv_offset, this_count);
415 int ore_check_io(struct ore_io_state *ios, ore_on_dev_error on_dev_error)
417 enum osd_err_priority acumulated_osd_err = 0;
418 int acumulated_lin_err = 0;
421 for (i = 0; i < ios->numdevs; i++) {
422 struct osd_sense_info osi;
423 struct ore_per_dev_state *per_dev = &ios->per_dev[i];
424 struct osd_request *or = per_dev->or;
430 ret = osd_req_decode_sense(or, &osi);
434 if ((OSD_ERR_PRI_CLEAR_PAGES == osi.osd_err_pri) &&
436 /* start read offset passed endof file.
437 * Note: if we do not have bio it means read-attributes
438 * In this case we should return error to caller.
440 _clear_bio(per_dev->bio);
441 ORE_DBGMSG("start read offset passed end of file "
442 "offset=0x%llx, length=0x%llx\n",
443 _LLU(per_dev->offset),
444 _LLU(per_dev->length));
446 continue; /* we recovered */
450 u64 residual = ios->reading ?
451 or->in.residual : or->out.residual;
452 u64 offset = (ios->offset + ios->length) - residual;
453 unsigned dev = per_dev->dev - ios->oc->first_dev;
454 struct ore_dev *od = ios->oc->ods[dev];
456 on_dev_error(ios, od, dev, osi.osd_err_pri,
459 if (osi.osd_err_pri >= acumulated_osd_err) {
460 acumulated_osd_err = osi.osd_err_pri;
461 acumulated_lin_err = ret;
465 return acumulated_lin_err;
467 EXPORT_SYMBOL(ore_check_io);
470 * L - logical offset into the file
472 * D - number of Data devices
473 * D = group_width - parity
475 * U - The number of bytes in a stripe within a group
476 * U = stripe_unit * D
478 * T - The number of bytes striped within a group of component objects
479 * (before advancing to the next group)
480 * T = U * group_depth
482 * S - The number of bytes striped across all component objects
483 * before the pattern repeats
484 * S = T * group_count
486 * M - The "major" (i.e., across all components) cycle number
489 * G - Counts the groups from the beginning of the major cycle
490 * G = (L - (M * S)) / T [or (L % S) / T]
492 * H - The byte offset within the group
493 * H = (L - (M * S)) % T [or (L % S) % T]
495 * N - The "minor" (i.e., across the group) stripe number
498 * C - The component index coresponding to L
500 * C = (H - (N * U)) / stripe_unit + G * D
501 * [or (L % U) / stripe_unit + G * D]
503 * O - The component offset coresponding to L
504 * O = L % stripe_unit + N * stripe_unit + M * group_depth * stripe_unit
506 * LCMdP – Parity cycle: Lowest Common Multiple of group_width, parity
508 * LCMdP = lcm(group_width, parity) / parity
510 * R - The parity Rotation stripe
511 * (Note parity cycle always starts at a group's boundary)
514 * I = the first parity device index
515 * I = (group_width + group_width - R*parity - parity) % group_width
517 * Craid - The component index Rotated
518 * Craid = (group_width + C - R*parity) % group_width
519 * (We add the group_width to avoid negative numbers modulo math)
521 void ore_calc_stripe_info(struct ore_layout *layout, u64 file_offset,
522 u64 length, struct ore_striping_info *si)
524 u32 stripe_unit = layout->stripe_unit;
525 u32 group_width = layout->group_width;
526 u64 group_depth = layout->group_depth;
527 u32 parity = layout->parity;
529 u32 D = group_width - parity;
530 u32 U = D * stripe_unit;
531 u64 T = U * group_depth;
532 u64 S = T * layout->group_count;
533 u64 M = div64_u64(file_offset, S);
536 G = (L - (M * S)) / T
537 H = (L - (M * S)) % T
539 u64 LmodS = file_offset - M * S;
540 u32 G = div64_u64(LmodS, T);
541 u64 H = LmodS - G * T;
543 u32 N = div_u64(H, U);
546 /* "H - (N * U)" is just "H % U" so it's bound to u32 */
547 u32 C = (u32)(H - (N * U)) / stripe_unit + G * group_width;
549 div_u64_rem(file_offset, stripe_unit, &si->unit_off);
551 si->obj_offset = si->unit_off + (N * stripe_unit) +
552 (M * group_depth * stripe_unit);
555 u32 LCMdP = lcm(group_width, parity) / parity;
557 u32 RxP = (N % LCMdP) * parity;
558 u32 first_dev = C - C % group_width;
560 si->par_dev = (group_width + group_width - parity - RxP) %
561 group_width + first_dev;
562 si->dev = (group_width + C - RxP) % group_width + first_dev;
563 si->bytes_in_stripe = U;
564 si->first_stripe_start = M * S + G * T + N * U;
566 /* Make the math correct see _prepare_one_group */
567 si->par_dev = group_width;
571 si->dev *= layout->mirrors_p1;
572 si->par_dev *= layout->mirrors_p1;
573 si->offset = file_offset;
575 if (si->length > length)
578 Nlast = div_u64(H + si->length + U - 1, U);
579 si->maxdevUnits = Nlast - N;
583 EXPORT_SYMBOL(ore_calc_stripe_info);
585 int _ore_add_stripe_unit(struct ore_io_state *ios, unsigned *cur_pg,
586 unsigned pgbase, struct page **pages,
587 struct ore_per_dev_state *per_dev, int cur_len)
589 unsigned pg = *cur_pg;
590 struct request_queue *q =
591 osd_request_queue(_ios_od(ios, per_dev->dev));
592 unsigned len = cur_len;
595 if (per_dev->bio == NULL) {
599 bio_size = ios->si.maxdevUnits;
601 bio_size = (ios->si.maxdevUnits + 1) *
602 (ios->layout->group_width - ios->layout->parity) /
603 ios->layout->group_width;
605 bio_size *= (ios->layout->stripe_unit / PAGE_SIZE);
607 per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size);
608 if (unlikely(!per_dev->bio)) {
609 ORE_DBGMSG("Failed to allocate BIO size=%u\n",
616 while (cur_len > 0) {
617 unsigned pglen = min_t(unsigned, PAGE_SIZE - pgbase, cur_len);
622 added_len = bio_add_pc_page(q, per_dev->bio, pages[pg],
624 if (unlikely(pglen != added_len)) {
625 /* If bi_vcnt == bi_max then this is a SW BUG */
626 ORE_DBGMSG("Failed bio_add_pc_page bi_vcnt=0x%x "
627 "bi_max=0x%x BIO_MAX=0x%x cur_len=0x%x\n",
628 per_dev->bio->bi_vcnt,
629 per_dev->bio->bi_max_vecs,
630 BIO_MAX_PAGES_KMALLOC, cur_len);
634 _add_stripe_page(ios->sp2d, &ios->si, pages[pg]);
641 per_dev->length += len;
644 out: /* we fail the complete unit on an error eg don't advance
645 * per_dev->length and cur_pg. This means that we might have a bigger
646 * bio than the CDB requested length (per_dev->length). That's fine
647 * only the oposite is fatal.
652 static int _prepare_for_striping(struct ore_io_state *ios)
654 struct ore_striping_info *si = &ios->si;
655 unsigned stripe_unit = ios->layout->stripe_unit;
656 unsigned mirrors_p1 = ios->layout->mirrors_p1;
657 unsigned group_width = ios->layout->group_width;
658 unsigned devs_in_group = group_width * mirrors_p1;
659 unsigned dev = si->dev;
660 unsigned first_dev = dev - (dev % devs_in_group);
662 unsigned cur_pg = ios->pages_consumed;
663 u64 length = ios->length;
667 ios->numdevs = ios->layout->mirrors_p1;
671 BUG_ON(length > si->length);
673 dev_order = _dev_order(devs_in_group, mirrors_p1, si->par_dev, dev);
674 si->cur_comp = dev_order;
675 si->cur_pg = si->unit_off / PAGE_SIZE;
678 unsigned comp = dev - first_dev;
679 struct ore_per_dev_state *per_dev = &ios->per_dev[comp];
680 unsigned cur_len, page_off = 0;
682 if (!per_dev->length) {
684 if (dev == si->dev) {
685 WARN_ON(dev == si->par_dev);
686 per_dev->offset = si->obj_offset;
687 cur_len = stripe_unit - si->unit_off;
688 page_off = si->unit_off & ~PAGE_MASK;
689 BUG_ON(page_off && (page_off != ios->pgbase));
691 if (si->cur_comp > dev_order)
693 si->obj_offset - si->unit_off;
694 else /* si->cur_comp < dev_order */
696 si->obj_offset + stripe_unit -
698 cur_len = stripe_unit;
701 cur_len = stripe_unit;
703 if (cur_len >= length)
706 ret = _ore_add_stripe_unit(ios, &cur_pg, page_off, ios->pages,
712 dev = (dev % devs_in_group) + first_dev;
716 si->cur_comp = (si->cur_comp + 1) % group_width;
717 if (unlikely((dev == si->par_dev) || (!length && ios->sp2d))) {
718 if (!length && ios->sp2d) {
719 /* If we are writing and this is the very last
720 * stripe. then operate on parity dev.
725 /* In writes cur_len just means if it's the
726 * last one. See _ore_add_parity_unit.
729 per_dev = &ios->per_dev[dev - first_dev];
730 if (!per_dev->length) {
731 /* Only/always the parity unit of the first
732 * stripe will be empty. So this is a chance to
733 * initialize the per_dev info.
736 per_dev->offset = si->obj_offset - si->unit_off;
739 ret = _ore_add_parity_unit(ios, si, per_dev, cur_len);
743 /* Rotate next par_dev backwards with wraping */
744 si->par_dev = (devs_in_group + si->par_dev -
745 ios->layout->parity * mirrors_p1) %
746 devs_in_group + first_dev;
747 /* Next stripe, start fresh */
753 ios->numdevs = devs_in_group;
754 ios->pages_consumed = cur_pg;
758 int ore_create(struct ore_io_state *ios)
762 for (i = 0; i < ios->oc->numdevs; i++) {
763 struct osd_request *or;
765 or = osd_start_request(_ios_od(ios, i), GFP_KERNEL);
767 ORE_ERR("%s: osd_start_request failed\n", __func__);
771 ios->per_dev[i].or = or;
774 osd_req_create_object(or, _ios_obj(ios, i));
776 ret = ore_io_execute(ios);
781 EXPORT_SYMBOL(ore_create);
783 int ore_remove(struct ore_io_state *ios)
787 for (i = 0; i < ios->oc->numdevs; i++) {
788 struct osd_request *or;
790 or = osd_start_request(_ios_od(ios, i), GFP_KERNEL);
792 ORE_ERR("%s: osd_start_request failed\n", __func__);
796 ios->per_dev[i].or = or;
799 osd_req_remove_object(or, _ios_obj(ios, i));
801 ret = ore_io_execute(ios);
806 EXPORT_SYMBOL(ore_remove);
808 static int _write_mirror(struct ore_io_state *ios, int cur_comp)
810 struct ore_per_dev_state *master_dev = &ios->per_dev[cur_comp];
811 unsigned dev = ios->per_dev[cur_comp].dev;
812 unsigned last_comp = cur_comp + ios->layout->mirrors_p1;
815 if (ios->pages && !master_dev->length)
816 return 0; /* Just an empty slot */
818 for (; cur_comp < last_comp; ++cur_comp, ++dev) {
819 struct ore_per_dev_state *per_dev = &ios->per_dev[cur_comp];
820 struct osd_request *or;
822 or = osd_start_request(_ios_od(ios, dev), GFP_KERNEL);
824 ORE_ERR("%s: osd_start_request failed\n", __func__);
833 if (per_dev != master_dev) {
834 bio = bio_clone_kmalloc(master_dev->bio,
836 if (unlikely(!bio)) {
838 "Failed to allocate BIO size=%u\n",
839 master_dev->bio->bi_max_vecs);
846 per_dev->offset = master_dev->offset;
847 per_dev->length = master_dev->length;
851 bio = master_dev->bio;
852 /* FIXME: bio_set_dir() */
853 bio->bi_rw |= REQ_WRITE;
856 osd_req_write(or, _ios_obj(ios, cur_comp),
857 per_dev->offset, bio, per_dev->length);
858 ORE_DBGMSG("write(0x%llx) offset=0x%llx "
859 "length=0x%llx dev=%d\n",
860 _LLU(_ios_obj(ios, cur_comp)->id),
861 _LLU(per_dev->offset),
862 _LLU(per_dev->length), dev);
863 } else if (ios->kern_buff) {
864 per_dev->offset = ios->si.obj_offset;
865 per_dev->dev = ios->si.dev + dev;
867 /* no cross device without page array */
868 BUG_ON((ios->layout->group_width > 1) &&
869 (ios->si.unit_off + ios->length >
870 ios->layout->stripe_unit));
872 ret = osd_req_write_kern(or, _ios_obj(ios, cur_comp),
874 ios->kern_buff, ios->length);
877 ORE_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
878 "length=0x%llx dev=%d\n",
879 _LLU(_ios_obj(ios, cur_comp)->id),
880 _LLU(per_dev->offset),
881 _LLU(ios->length), per_dev->dev);
883 osd_req_set_attributes(or, _ios_obj(ios, cur_comp));
884 ORE_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
885 _LLU(_ios_obj(ios, cur_comp)->id),
886 ios->out_attr_len, dev);
890 osd_req_add_set_attr_list(or, ios->out_attr,
894 osd_req_add_get_attr_list(or, ios->in_attr,
902 int ore_write(struct ore_io_state *ios)
907 if (unlikely(ios->sp2d && !ios->r4w)) {
908 /* A library is attempting a RAID-write without providing
909 * a pages lock interface.
915 ret = _prepare_for_striping(ios);
919 for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
920 ret = _write_mirror(ios, i);
925 ret = ore_io_execute(ios);
928 EXPORT_SYMBOL(ore_write);
930 int _ore_read_mirror(struct ore_io_state *ios, unsigned cur_comp)
932 struct osd_request *or;
933 struct ore_per_dev_state *per_dev = &ios->per_dev[cur_comp];
934 struct osd_obj_id *obj = _ios_obj(ios, cur_comp);
935 unsigned first_dev = (unsigned)obj->id;
937 if (ios->pages && !per_dev->length)
938 return 0; /* Just an empty slot */
940 first_dev = per_dev->dev + first_dev % ios->layout->mirrors_p1;
941 or = osd_start_request(_ios_od(ios, first_dev), GFP_KERNEL);
943 ORE_ERR("%s: osd_start_request failed\n", __func__);
949 if (per_dev->cur_sg) {
950 /* finalize the last sg_entry */
951 _ore_add_sg_seg(per_dev, 0, false);
952 if (unlikely(!per_dev->cur_sg))
953 return 0; /* Skip parity only device */
955 osd_req_read_sg(or, obj, per_dev->bio,
956 per_dev->sglist, per_dev->cur_sg);
958 /* The no raid case */
959 osd_req_read(or, obj, per_dev->offset,
960 per_dev->bio, per_dev->length);
963 ORE_DBGMSG("read(0x%llx) offset=0x%llx length=0x%llx"
964 " dev=%d sg_len=%d\n", _LLU(obj->id),
965 _LLU(per_dev->offset), _LLU(per_dev->length),
966 first_dev, per_dev->cur_sg);
968 BUG_ON(ios->kern_buff);
970 osd_req_get_attributes(or, obj);
971 ORE_DBGMSG2("obj(0x%llx) get_attributes=%d dev=%d\n",
973 ios->in_attr_len, first_dev);
976 osd_req_add_set_attr_list(or, ios->out_attr, ios->out_attr_len);
979 osd_req_add_get_attr_list(or, ios->in_attr, ios->in_attr_len);
984 int ore_read(struct ore_io_state *ios)
989 ret = _prepare_for_striping(ios);
993 for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
994 ret = _ore_read_mirror(ios, i);
999 ret = ore_io_execute(ios);
1002 EXPORT_SYMBOL(ore_read);
1004 int extract_attr_from_ios(struct ore_io_state *ios, struct osd_attr *attr)
1006 struct osd_attr cur_attr = {.attr_page = 0}; /* start with zeros */
1012 osd_req_decode_get_attr_list(ios->per_dev[0].or,
1013 &cur_attr, &nelem, &iter);
1014 if ((cur_attr.attr_page == attr->attr_page) &&
1015 (cur_attr.attr_id == attr->attr_id)) {
1016 attr->len = cur_attr.len;
1017 attr->val_ptr = cur_attr.val_ptr;
1024 EXPORT_SYMBOL(extract_attr_from_ios);
1026 static int _truncate_mirrors(struct ore_io_state *ios, unsigned cur_comp,
1027 struct osd_attr *attr)
1029 int last_comp = cur_comp + ios->layout->mirrors_p1;
1031 for (; cur_comp < last_comp; ++cur_comp) {
1032 struct ore_per_dev_state *per_dev = &ios->per_dev[cur_comp];
1033 struct osd_request *or;
1035 or = osd_start_request(_ios_od(ios, cur_comp), GFP_KERNEL);
1036 if (unlikely(!or)) {
1037 ORE_ERR("%s: osd_start_request failed\n", __func__);
1042 osd_req_set_attributes(or, _ios_obj(ios, cur_comp));
1043 osd_req_add_set_attr_list(or, attr, 1);
1049 struct _trunc_info {
1050 struct ore_striping_info si;
1051 u64 prev_group_obj_off;
1052 u64 next_group_obj_off;
1054 unsigned first_group_dev;
1055 unsigned nex_group_dev;
1058 static void _calc_trunk_info(struct ore_layout *layout, u64 file_offset,
1059 struct _trunc_info *ti)
1061 unsigned stripe_unit = layout->stripe_unit;
1063 ore_calc_stripe_info(layout, file_offset, 0, &ti->si);
1065 ti->prev_group_obj_off = ti->si.M * stripe_unit;
1066 ti->next_group_obj_off = ti->si.M ? (ti->si.M - 1) * stripe_unit : 0;
1068 ti->first_group_dev = ti->si.dev - (ti->si.dev % layout->group_width);
1069 ti->nex_group_dev = ti->first_group_dev + layout->group_width;
1072 int ore_truncate(struct ore_layout *layout, struct ore_components *oc,
1075 struct ore_io_state *ios;
1076 struct exofs_trunc_attr {
1077 struct osd_attr attr;
1080 struct _trunc_info ti;
1083 ret = ore_get_io_state(layout, oc, &ios);
1087 _calc_trunk_info(ios->layout, size, &ti);
1089 size_attrs = kcalloc(ios->oc->numdevs, sizeof(*size_attrs),
1091 if (unlikely(!size_attrs)) {
1096 ios->numdevs = ios->oc->numdevs;
1098 for (i = 0; i < ios->numdevs; ++i) {
1099 struct exofs_trunc_attr *size_attr = &size_attrs[i];
1102 if (i < ti.first_group_dev)
1103 obj_size = ti.prev_group_obj_off;
1104 else if (i >= ti.nex_group_dev)
1105 obj_size = ti.next_group_obj_off;
1106 else if (i < ti.si.dev) /* dev within this group */
1107 obj_size = ti.si.obj_offset +
1108 ios->layout->stripe_unit - ti.si.unit_off;
1109 else if (i == ti.si.dev)
1110 obj_size = ti.si.obj_offset;
1111 else /* i > ti.dev */
1112 obj_size = ti.si.obj_offset - ti.si.unit_off;
1114 size_attr->newsize = cpu_to_be64(obj_size);
1115 size_attr->attr = g_attr_logical_length;
1116 size_attr->attr.val_ptr = &size_attr->newsize;
1118 ORE_DBGMSG2("trunc(0x%llx) obj_offset=0x%llx dev=%d\n",
1119 _LLU(oc->comps->obj.id), _LLU(obj_size), i);
1120 ret = _truncate_mirrors(ios, i * ios->layout->mirrors_p1,
1125 ret = ore_io_execute(ios);
1129 ore_put_io_state(ios);
1132 EXPORT_SYMBOL(ore_truncate);
1134 const struct osd_attr g_attr_logical_length = ATTR_DEF(
1135 OSD_APAGE_OBJECT_INFORMATION, OSD_ATTR_OI_LOGICAL_LENGTH, 8);
1136 EXPORT_SYMBOL(g_attr_logical_length);