2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/module.h>
5 #include <linux/slab.h>
8 #include <linux/ceph/libceph.h>
9 #include <linux/ceph/osdmap.h>
10 #include <linux/ceph/decode.h>
11 #include <linux/crush/hash.h>
12 #include <linux/crush/mapper.h>
14 char *ceph_osdmap_state_str(char *str, int len, int state)
19 if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP))
20 snprintf(str, len, "exists, up");
21 else if (state & CEPH_OSD_EXISTS)
22 snprintf(str, len, "exists");
23 else if (state & CEPH_OSD_UP)
24 snprintf(str, len, "up");
26 snprintf(str, len, "doesn't exist");
33 static int calc_bits_of(unsigned int t)
44 * the foo_mask is the smallest value 2^n-1 that is >= foo.
46 static void calc_pg_masks(struct ceph_pg_pool_info *pi)
48 pi->pg_num_mask = (1 << calc_bits_of(le32_to_cpu(pi->v.pg_num)-1)) - 1;
50 (1 << calc_bits_of(le32_to_cpu(pi->v.pgp_num)-1)) - 1;
52 (1 << calc_bits_of(le32_to_cpu(pi->v.lpg_num)-1)) - 1;
54 (1 << calc_bits_of(le32_to_cpu(pi->v.lpgp_num)-1)) - 1;
60 static int crush_decode_uniform_bucket(void **p, void *end,
61 struct crush_bucket_uniform *b)
63 dout("crush_decode_uniform_bucket %p to %p\n", *p, end);
64 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad);
65 b->item_weight = ceph_decode_32(p);
71 static int crush_decode_list_bucket(void **p, void *end,
72 struct crush_bucket_list *b)
75 dout("crush_decode_list_bucket %p to %p\n", *p, end);
76 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
77 if (b->item_weights == NULL)
79 b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
80 if (b->sum_weights == NULL)
82 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
83 for (j = 0; j < b->h.size; j++) {
84 b->item_weights[j] = ceph_decode_32(p);
85 b->sum_weights[j] = ceph_decode_32(p);
92 static int crush_decode_tree_bucket(void **p, void *end,
93 struct crush_bucket_tree *b)
96 dout("crush_decode_tree_bucket %p to %p\n", *p, end);
97 ceph_decode_32_safe(p, end, b->num_nodes, bad);
98 b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
99 if (b->node_weights == NULL)
101 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad);
102 for (j = 0; j < b->num_nodes; j++)
103 b->node_weights[j] = ceph_decode_32(p);
109 static int crush_decode_straw_bucket(void **p, void *end,
110 struct crush_bucket_straw *b)
113 dout("crush_decode_straw_bucket %p to %p\n", *p, end);
114 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
115 if (b->item_weights == NULL)
117 b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
118 if (b->straws == NULL)
120 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
121 for (j = 0; j < b->h.size; j++) {
122 b->item_weights[j] = ceph_decode_32(p);
123 b->straws[j] = ceph_decode_32(p);
130 static int skip_name_map(void **p, void *end)
133 ceph_decode_32_safe(p, end, len ,bad);
137 ceph_decode_32_safe(p, end, strlen, bad);
145 static struct crush_map *crush_decode(void *pbyval, void *end)
151 void *start = pbyval;
155 dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p));
157 c = kzalloc(sizeof(*c), GFP_NOFS);
159 return ERR_PTR(-ENOMEM);
161 /* set tunables to default values */
162 c->choose_local_tries = 2;
163 c->choose_local_fallback_tries = 5;
164 c->choose_total_tries = 19;
165 c->chooseleaf_descend_once = 0;
167 ceph_decode_need(p, end, 4*sizeof(u32), bad);
168 magic = ceph_decode_32(p);
169 if (magic != CRUSH_MAGIC) {
170 pr_err("crush_decode magic %x != current %x\n",
171 (unsigned int)magic, (unsigned int)CRUSH_MAGIC);
174 c->max_buckets = ceph_decode_32(p);
175 c->max_rules = ceph_decode_32(p);
176 c->max_devices = ceph_decode_32(p);
178 c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
179 if (c->buckets == NULL)
181 c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS);
182 if (c->rules == NULL)
186 for (i = 0; i < c->max_buckets; i++) {
189 struct crush_bucket *b;
191 ceph_decode_32_safe(p, end, alg, bad);
193 c->buckets[i] = NULL;
196 dout("crush_decode bucket %d off %x %p to %p\n",
197 i, (int)(*p-start), *p, end);
200 case CRUSH_BUCKET_UNIFORM:
201 size = sizeof(struct crush_bucket_uniform);
203 case CRUSH_BUCKET_LIST:
204 size = sizeof(struct crush_bucket_list);
206 case CRUSH_BUCKET_TREE:
207 size = sizeof(struct crush_bucket_tree);
209 case CRUSH_BUCKET_STRAW:
210 size = sizeof(struct crush_bucket_straw);
217 b = c->buckets[i] = kzalloc(size, GFP_NOFS);
221 ceph_decode_need(p, end, 4*sizeof(u32), bad);
222 b->id = ceph_decode_32(p);
223 b->type = ceph_decode_16(p);
224 b->alg = ceph_decode_8(p);
225 b->hash = ceph_decode_8(p);
226 b->weight = ceph_decode_32(p);
227 b->size = ceph_decode_32(p);
229 dout("crush_decode bucket size %d off %x %p to %p\n",
230 b->size, (int)(*p-start), *p, end);
232 b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS);
233 if (b->items == NULL)
235 b->perm = kcalloc(b->size, sizeof(u32), GFP_NOFS);
240 ceph_decode_need(p, end, b->size*sizeof(u32), bad);
241 for (j = 0; j < b->size; j++)
242 b->items[j] = ceph_decode_32(p);
245 case CRUSH_BUCKET_UNIFORM:
246 err = crush_decode_uniform_bucket(p, end,
247 (struct crush_bucket_uniform *)b);
251 case CRUSH_BUCKET_LIST:
252 err = crush_decode_list_bucket(p, end,
253 (struct crush_bucket_list *)b);
257 case CRUSH_BUCKET_TREE:
258 err = crush_decode_tree_bucket(p, end,
259 (struct crush_bucket_tree *)b);
263 case CRUSH_BUCKET_STRAW:
264 err = crush_decode_straw_bucket(p, end,
265 (struct crush_bucket_straw *)b);
273 dout("rule vec is %p\n", c->rules);
274 for (i = 0; i < c->max_rules; i++) {
276 struct crush_rule *r;
278 ceph_decode_32_safe(p, end, yes, bad);
280 dout("crush_decode NO rule %d off %x %p to %p\n",
281 i, (int)(*p-start), *p, end);
286 dout("crush_decode rule %d off %x %p to %p\n",
287 i, (int)(*p-start), *p, end);
290 ceph_decode_32_safe(p, end, yes, bad);
291 #if BITS_PER_LONG == 32
293 if (yes > (ULONG_MAX - sizeof(*r))
294 / sizeof(struct crush_rule_step))
297 r = c->rules[i] = kmalloc(sizeof(*r) +
298 yes*sizeof(struct crush_rule_step),
302 dout(" rule %d is at %p\n", i, r);
304 ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */
305 ceph_decode_need(p, end, r->len*3*sizeof(u32), bad);
306 for (j = 0; j < r->len; j++) {
307 r->steps[j].op = ceph_decode_32(p);
308 r->steps[j].arg1 = ceph_decode_32(p);
309 r->steps[j].arg2 = ceph_decode_32(p);
313 /* ignore trailing name maps. */
314 for (num_name_maps = 0; num_name_maps < 3; num_name_maps++) {
315 err = skip_name_map(p, end);
321 ceph_decode_need(p, end, 3*sizeof(u32), done);
322 c->choose_local_tries = ceph_decode_32(p);
323 c->choose_local_fallback_tries = ceph_decode_32(p);
324 c->choose_total_tries = ceph_decode_32(p);
325 dout("crush decode tunable choose_local_tries = %d",
326 c->choose_local_tries);
327 dout("crush decode tunable choose_local_fallback_tries = %d",
328 c->choose_local_fallback_tries);
329 dout("crush decode tunable choose_total_tries = %d",
330 c->choose_total_tries);
332 ceph_decode_need(p, end, sizeof(u32), done);
333 c->chooseleaf_descend_once = ceph_decode_32(p);
334 dout("crush decode tunable chooseleaf_descend_once = %d",
335 c->chooseleaf_descend_once);
338 dout("crush_decode success\n");
344 dout("crush_decode fail %d\n", err);
350 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
353 static int pgid_cmp(struct ceph_pg l, struct ceph_pg r)
366 static int __insert_pg_mapping(struct ceph_pg_mapping *new,
367 struct rb_root *root)
369 struct rb_node **p = &root->rb_node;
370 struct rb_node *parent = NULL;
371 struct ceph_pg_mapping *pg = NULL;
374 dout("__insert_pg_mapping %llx %p\n", *(u64 *)&new->pgid, new);
377 pg = rb_entry(parent, struct ceph_pg_mapping, node);
378 c = pgid_cmp(new->pgid, pg->pgid);
387 rb_link_node(&new->node, parent, p);
388 rb_insert_color(&new->node, root);
392 static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root,
395 struct rb_node *n = root->rb_node;
396 struct ceph_pg_mapping *pg;
400 pg = rb_entry(n, struct ceph_pg_mapping, node);
401 c = pgid_cmp(pgid, pg->pgid);
407 dout("__lookup_pg_mapping %lld.%x got %p\n",
408 pgid.pool, pgid.seed, pg);
415 static int __remove_pg_mapping(struct rb_root *root, struct ceph_pg pgid)
417 struct ceph_pg_mapping *pg = __lookup_pg_mapping(root, pgid);
420 dout("__remove_pg_mapping %lld.%x %p\n", pgid.pool, pgid.seed,
422 rb_erase(&pg->node, root);
426 dout("__remove_pg_mapping %lld.%x dne\n", pgid.pool, pgid.seed);
431 * rbtree of pg pool info
433 static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new)
435 struct rb_node **p = &root->rb_node;
436 struct rb_node *parent = NULL;
437 struct ceph_pg_pool_info *pi = NULL;
441 pi = rb_entry(parent, struct ceph_pg_pool_info, node);
442 if (new->id < pi->id)
444 else if (new->id > pi->id)
450 rb_link_node(&new->node, parent, p);
451 rb_insert_color(&new->node, root);
455 static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id)
457 struct ceph_pg_pool_info *pi;
458 struct rb_node *n = root->rb_node;
461 pi = rb_entry(n, struct ceph_pg_pool_info, node);
464 else if (id > pi->id)
472 const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id)
474 struct ceph_pg_pool_info *pi;
476 if (id == CEPH_NOPOOL)
479 if (WARN_ON_ONCE(id > (u64) INT_MAX))
482 pi = __lookup_pg_pool(&map->pg_pools, (int) id);
484 return pi ? pi->name : NULL;
486 EXPORT_SYMBOL(ceph_pg_pool_name_by_id);
488 int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
492 for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) {
493 struct ceph_pg_pool_info *pi =
494 rb_entry(rbp, struct ceph_pg_pool_info, node);
495 if (pi->name && strcmp(pi->name, name) == 0)
500 EXPORT_SYMBOL(ceph_pg_poolid_by_name);
502 static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
504 rb_erase(&pi->node, root);
509 static int __decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
513 ceph_decode_copy(p, &pi->v, sizeof(pi->v));
516 /* num_snaps * snap_info_t */
517 n = le32_to_cpu(pi->v.num_snaps);
519 ceph_decode_need(p, end, sizeof(u64) + 1 + sizeof(u64) +
520 sizeof(struct ceph_timespec), bad);
521 *p += sizeof(u64) + /* key */
522 1 + sizeof(u64) + /* u8, snapid */
523 sizeof(struct ceph_timespec);
524 m = ceph_decode_32(p); /* snap name */
528 *p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2;
535 static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
537 struct ceph_pg_pool_info *pi;
540 ceph_decode_32_safe(p, end, num, bad);
541 dout(" %d pool names\n", num);
543 ceph_decode_32_safe(p, end, pool, bad);
544 ceph_decode_32_safe(p, end, len, bad);
545 dout(" pool %d len %d\n", pool, len);
546 ceph_decode_need(p, end, len, bad);
547 pi = __lookup_pg_pool(&map->pg_pools, pool);
549 char *name = kstrndup(*p, len, GFP_NOFS);
555 dout(" name is %s\n", pi->name);
568 void ceph_osdmap_destroy(struct ceph_osdmap *map)
570 dout("osdmap_destroy %p\n", map);
572 crush_destroy(map->crush);
573 while (!RB_EMPTY_ROOT(&map->pg_temp)) {
574 struct ceph_pg_mapping *pg =
575 rb_entry(rb_first(&map->pg_temp),
576 struct ceph_pg_mapping, node);
577 rb_erase(&pg->node, &map->pg_temp);
580 while (!RB_EMPTY_ROOT(&map->pg_pools)) {
581 struct ceph_pg_pool_info *pi =
582 rb_entry(rb_first(&map->pg_pools),
583 struct ceph_pg_pool_info, node);
584 __remove_pg_pool(&map->pg_pools, pi);
586 kfree(map->osd_state);
587 kfree(map->osd_weight);
588 kfree(map->osd_addr);
593 * adjust max osd value. reallocate arrays.
595 static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
598 struct ceph_entity_addr *addr;
601 state = kcalloc(max, sizeof(*state), GFP_NOFS);
602 addr = kcalloc(max, sizeof(*addr), GFP_NOFS);
603 weight = kcalloc(max, sizeof(*weight), GFP_NOFS);
604 if (state == NULL || addr == NULL || weight == NULL) {
612 if (map->osd_state) {
613 memcpy(state, map->osd_state, map->max_osd*sizeof(*state));
614 memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr));
615 memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight));
616 kfree(map->osd_state);
617 kfree(map->osd_addr);
618 kfree(map->osd_weight);
621 map->osd_state = state;
622 map->osd_weight = weight;
623 map->osd_addr = addr;
631 struct ceph_osdmap *osdmap_decode(void **p, void *end)
633 struct ceph_osdmap *map;
639 struct ceph_pg_pool_info *pi;
641 dout("osdmap_decode %p to %p len %d\n", *p, end, (int)(end - *p));
643 map = kzalloc(sizeof(*map), GFP_NOFS);
645 return ERR_PTR(-ENOMEM);
646 map->pg_temp = RB_ROOT;
648 ceph_decode_16_safe(p, end, version, bad);
649 if (version > CEPH_OSDMAP_VERSION) {
650 pr_warning("got unknown v %d > %d of osdmap\n", version,
651 CEPH_OSDMAP_VERSION);
655 ceph_decode_need(p, end, 2*sizeof(u64)+6*sizeof(u32), bad);
656 ceph_decode_copy(p, &map->fsid, sizeof(map->fsid));
657 map->epoch = ceph_decode_32(p);
658 ceph_decode_copy(p, &map->created, sizeof(map->created));
659 ceph_decode_copy(p, &map->modified, sizeof(map->modified));
661 ceph_decode_32_safe(p, end, max, bad);
663 ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad);
665 pi = kzalloc(sizeof(*pi), GFP_NOFS);
668 pi->id = ceph_decode_32(p);
670 ev = ceph_decode_8(p); /* encoding version */
671 if (ev > CEPH_PG_POOL_VERSION) {
672 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
673 ev, CEPH_PG_POOL_VERSION);
677 err = __decode_pool(p, end, pi);
682 __insert_pg_pool(&map->pg_pools, pi);
686 err = __decode_pool_names(p, end, map);
688 dout("fail to decode pool names");
693 ceph_decode_32_safe(p, end, map->pool_max, bad);
695 ceph_decode_32_safe(p, end, map->flags, bad);
697 max = ceph_decode_32(p);
699 /* (re)alloc osd arrays */
700 err = osdmap_set_max_osd(map, max);
703 dout("osdmap_decode max_osd = %d\n", map->max_osd);
707 ceph_decode_need(p, end, 3*sizeof(u32) +
708 map->max_osd*(1 + sizeof(*map->osd_weight) +
709 sizeof(*map->osd_addr)), bad);
710 *p += 4; /* skip length field (should match max) */
711 ceph_decode_copy(p, map->osd_state, map->max_osd);
713 *p += 4; /* skip length field (should match max) */
714 for (i = 0; i < map->max_osd; i++)
715 map->osd_weight[i] = ceph_decode_32(p);
717 *p += 4; /* skip length field (should match max) */
718 ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr));
719 for (i = 0; i < map->max_osd; i++)
720 ceph_decode_addr(&map->osd_addr[i]);
723 ceph_decode_32_safe(p, end, len, bad);
724 for (i = 0; i < len; i++) {
727 struct ceph_pg_v1 pgid_v1;
728 struct ceph_pg_mapping *pg;
730 ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad);
731 ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1));
732 pgid.pool = le32_to_cpu(pgid_v1.pool);
733 pgid.seed = le16_to_cpu(pgid_v1.ps);
734 n = ceph_decode_32(p);
736 if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32))
738 ceph_decode_need(p, end, n * sizeof(u32), bad);
740 pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS);
745 for (j = 0; j < n; j++)
746 pg->osds[j] = ceph_decode_32(p);
748 err = __insert_pg_mapping(pg, &map->pg_temp);
751 dout(" added pg_temp %lld.%x len %d\n", pgid.pool, pgid.seed,
756 ceph_decode_32_safe(p, end, len, bad);
757 dout("osdmap_decode crush len %d from off 0x%x\n", len,
759 ceph_decode_need(p, end, len, bad);
760 map->crush = crush_decode(*p, end);
762 if (IS_ERR(map->crush)) {
763 err = PTR_ERR(map->crush);
768 /* ignore the rest of the map */
771 dout("osdmap_decode done %p %p\n", *p, end);
775 dout("osdmap_decode fail err %d\n", err);
776 ceph_osdmap_destroy(map);
781 * decode and apply an incremental map update.
783 struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
784 struct ceph_osdmap *map,
785 struct ceph_messenger *msgr)
787 struct crush_map *newcrush = NULL;
788 struct ceph_fsid fsid;
790 struct ceph_timespec modified;
792 __s32 new_pool_max, new_flags, max;
797 ceph_decode_16_safe(p, end, version, bad);
798 if (version > CEPH_OSDMAP_INC_VERSION) {
799 pr_warning("got unknown v %d > %d of inc osdmap\n", version,
800 CEPH_OSDMAP_INC_VERSION);
804 ceph_decode_need(p, end, sizeof(fsid)+sizeof(modified)+2*sizeof(u32),
806 ceph_decode_copy(p, &fsid, sizeof(fsid));
807 epoch = ceph_decode_32(p);
808 BUG_ON(epoch != map->epoch+1);
809 ceph_decode_copy(p, &modified, sizeof(modified));
810 new_pool_max = ceph_decode_32(p);
811 new_flags = ceph_decode_32(p);
814 ceph_decode_32_safe(p, end, len, bad);
816 dout("apply_incremental full map len %d, %p to %p\n",
818 return osdmap_decode(p, min(*p+len, end));
822 ceph_decode_32_safe(p, end, len, bad);
824 dout("apply_incremental new crush map len %d, %p to %p\n",
826 newcrush = crush_decode(*p, min(*p+len, end));
827 if (IS_ERR(newcrush))
828 return ERR_CAST(newcrush);
834 map->flags = new_flags;
835 if (new_pool_max >= 0)
836 map->pool_max = new_pool_max;
838 ceph_decode_need(p, end, 5*sizeof(u32), bad);
841 max = ceph_decode_32(p);
843 err = osdmap_set_max_osd(map, max);
849 map->modified = modified;
852 crush_destroy(map->crush);
853 map->crush = newcrush;
858 ceph_decode_32_safe(p, end, len, bad);
861 struct ceph_pg_pool_info *pi;
863 ceph_decode_32_safe(p, end, pool, bad);
864 ceph_decode_need(p, end, 1 + sizeof(pi->v), bad);
865 ev = ceph_decode_8(p); /* encoding version */
866 if (ev > CEPH_PG_POOL_VERSION) {
867 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
868 ev, CEPH_PG_POOL_VERSION);
872 pi = __lookup_pg_pool(&map->pg_pools, pool);
874 pi = kzalloc(sizeof(*pi), GFP_NOFS);
880 __insert_pg_pool(&map->pg_pools, pi);
882 err = __decode_pool(p, end, pi);
887 err = __decode_pool_names(p, end, map);
893 ceph_decode_32_safe(p, end, len, bad);
895 struct ceph_pg_pool_info *pi;
897 ceph_decode_32_safe(p, end, pool, bad);
898 pi = __lookup_pg_pool(&map->pg_pools, pool);
900 __remove_pg_pool(&map->pg_pools, pi);
905 ceph_decode_32_safe(p, end, len, bad);
908 struct ceph_entity_addr addr;
909 ceph_decode_32_safe(p, end, osd, bad);
910 ceph_decode_copy_safe(p, end, &addr, sizeof(addr), bad);
911 ceph_decode_addr(&addr);
912 pr_info("osd%d up\n", osd);
913 BUG_ON(osd >= map->max_osd);
914 map->osd_state[osd] |= CEPH_OSD_UP;
915 map->osd_addr[osd] = addr;
919 ceph_decode_32_safe(p, end, len, bad);
923 ceph_decode_32_safe(p, end, osd, bad);
924 xorstate = **(u8 **)p;
925 (*p)++; /* clean flag */
927 xorstate = CEPH_OSD_UP;
928 if (xorstate & CEPH_OSD_UP)
929 pr_info("osd%d down\n", osd);
930 if (osd < map->max_osd)
931 map->osd_state[osd] ^= xorstate;
935 ceph_decode_32_safe(p, end, len, bad);
938 ceph_decode_need(p, end, sizeof(u32)*2, bad);
939 osd = ceph_decode_32(p);
940 off = ceph_decode_32(p);
941 pr_info("osd%d weight 0x%x %s\n", osd, off,
942 off == CEPH_OSD_IN ? "(in)" :
943 (off == CEPH_OSD_OUT ? "(out)" : ""));
944 if (osd < map->max_osd)
945 map->osd_weight[osd] = off;
949 ceph_decode_32_safe(p, end, len, bad);
951 struct ceph_pg_mapping *pg;
953 struct ceph_pg_v1 pgid_v1;
956 ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad);
957 ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1));
958 pgid.pool = le32_to_cpu(pgid_v1.pool);
959 pgid.seed = le16_to_cpu(pgid_v1.ps);
960 pglen = ceph_decode_32(p);
963 ceph_decode_need(p, end, pglen*sizeof(u32), bad);
965 /* removing existing (if any) */
966 (void) __remove_pg_mapping(&map->pg_temp, pgid);
970 if (pglen > (UINT_MAX - sizeof(*pg)) / sizeof(u32))
973 pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
978 for (j = 0; j < pglen; j++)
979 pg->osds[j] = ceph_decode_32(p);
980 err = __insert_pg_mapping(pg, &map->pg_temp);
985 dout(" added pg_temp %lld.%x len %d\n", pgid.pool,
989 __remove_pg_mapping(&map->pg_temp, pgid);
993 /* ignore the rest */
998 pr_err("corrupt inc osdmap epoch %d off %d (%p of %p-%p)\n",
999 epoch, (int)(*p - start), *p, start, end);
1000 print_hex_dump(KERN_DEBUG, "osdmap: ",
1001 DUMP_PREFIX_OFFSET, 16, 1,
1002 start, end - start, true);
1004 crush_destroy(newcrush);
1005 return ERR_PTR(err);
1012 * calculate file layout from given offset, length.
1013 * fill in correct oid, logical length, and object extent
1016 * for now, we write only a single su, until we can
1017 * pass a stride back to the caller.
1019 int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
1022 u64 *oxoff, u64 *oxlen)
1024 u32 osize = le32_to_cpu(layout->fl_object_size);
1025 u32 su = le32_to_cpu(layout->fl_stripe_unit);
1026 u32 sc = le32_to_cpu(layout->fl_stripe_count);
1027 u32 bl, stripeno, stripepos, objsetno;
1031 dout("mapping %llu~%llu osize %u fl_su %u\n", off, len,
1033 if (su == 0 || sc == 0)
1035 su_per_object = osize / su;
1036 if (su_per_object == 0)
1038 dout("osize %u / su %u = su_per_object %u\n", osize, su,
1041 if ((su & ~PAGE_MASK) != 0)
1044 /* bl = *off / su; */
1048 dout("off %llu / su %u = bl %u\n", off, su, bl);
1051 stripepos = bl % sc;
1052 objsetno = stripeno / su_per_object;
1054 *ono = objsetno * sc + stripepos;
1055 dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned int)*ono);
1057 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */
1059 su_offset = do_div(t, su);
1060 *oxoff = su_offset + (stripeno % su_per_object) * su;
1063 * Calculate the length of the extent being written to the selected
1064 * object. This is the minimum of the full length requested (len) or
1065 * the remainder of the current stripe being written to.
1067 *oxlen = min_t(u64, len, su - su_offset);
1069 dout(" obj extent %llu~%llu\n", *oxoff, *oxlen);
1073 dout(" invalid layout\n");
1079 EXPORT_SYMBOL(ceph_calc_file_object_mapping);
1082 * calculate an object layout (i.e. pgid) from an oid,
1083 * file_layout, and osdmap
1085 int ceph_calc_object_layout(struct ceph_object_layout *ol,
1087 struct ceph_file_layout *fl,
1088 struct ceph_osdmap *osdmap)
1090 unsigned int num, num_mask;
1091 struct ceph_pg pgid;
1092 struct ceph_pg_pool_info *pool;
1096 pgid.pool = le32_to_cpu(fl->fl_pg_pool);
1097 pool = __lookup_pg_pool(&osdmap->pg_pools, pgid.pool);
1100 pgid.seed = ceph_str_hash(pool->v.object_hash, oid, strlen(oid));
1101 num = le32_to_cpu(pool->v.pg_num);
1102 num_mask = pool->pg_num_mask;
1104 dout("calc_object_layout '%s' pgid %lld.%x\n", oid, pgid.pool,
1107 ol->ol_pgid.ps = cpu_to_le16(pgid.seed);
1108 ol->ol_pgid.pool = fl->fl_pg_pool;
1109 ol->ol_pgid.preferred = cpu_to_le16(-1);
1110 ol->ol_stripe_unit = fl->fl_object_stripe_unit;
1113 EXPORT_SYMBOL(ceph_calc_object_layout);
1116 * Calculate raw osd vector for the given pgid. Return pointer to osd
1117 * array, or NULL on failure.
1119 static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
1120 int *osds, int *num)
1122 struct ceph_pg_mapping *pg;
1123 struct ceph_pg_pool_info *pool;
1125 unsigned int poolid, ps, pps, t, r;
1130 pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
1135 t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num),
1136 pool->pgp_num_mask);
1138 pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
1145 ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset,
1146 pool->v.type, pool->v.size);
1148 pr_err("no crush rule pool %d ruleset %d type %d size %d\n",
1149 poolid, pool->v.crush_ruleset, pool->v.type,
1154 pps = ceph_stable_mod(ps,
1155 le32_to_cpu(pool->v.pgp_num),
1156 pool->pgp_num_mask);
1158 r = crush_do_rule(osdmap->crush, ruleno, pps, osds,
1159 min_t(int, pool->v.size, *num),
1160 osdmap->osd_weight);
1162 pr_err("error %d from crush rule: pool %d ruleset %d type %d"
1163 " size %d\n", r, poolid, pool->v.crush_ruleset,
1164 pool->v.type, pool->v.size);
1172 * Return acting set for given pgid.
1174 int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
1177 int rawosds[CEPH_PG_MAX_SIZE], *osds;
1178 int i, o, num = CEPH_PG_MAX_SIZE;
1180 osds = calc_pg_raw(osdmap, pgid, rawosds, &num);
1184 /* primary is first up osd */
1186 for (i = 0; i < num; i++)
1187 if (ceph_osd_is_up(osdmap, osds[i]))
1188 acting[o++] = osds[i];
1193 * Return primary osd for given pgid, or -1 if none.
1195 int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid)
1197 int rawosds[CEPH_PG_MAX_SIZE], *osds;
1198 int i, num = CEPH_PG_MAX_SIZE;
1200 osds = calc_pg_raw(osdmap, pgid, rawosds, &num);
1204 /* primary is first up osd */
1205 for (i = 0; i < num; i++)
1206 if (ceph_osd_is_up(osdmap, osds[i]))
1210 EXPORT_SYMBOL(ceph_calc_pg_primary);