2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/module.h>
5 #include <linux/slab.h>
8 #include <linux/ceph/libceph.h>
9 #include <linux/ceph/osdmap.h>
10 #include <linux/ceph/decode.h>
11 #include <linux/crush/hash.h>
12 #include <linux/crush/mapper.h>
14 char *ceph_osdmap_state_str(char *str, int len, int state)
19 if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP))
20 snprintf(str, len, "exists, up");
21 else if (state & CEPH_OSD_EXISTS)
22 snprintf(str, len, "exists");
23 else if (state & CEPH_OSD_UP)
24 snprintf(str, len, "up");
26 snprintf(str, len, "doesn't exist");
33 static int calc_bits_of(unsigned int t)
44 * the foo_mask is the smallest value 2^n-1 that is >= foo.
46 static void calc_pg_masks(struct ceph_pg_pool_info *pi)
48 pi->pg_num_mask = (1 << calc_bits_of(le32_to_cpu(pi->v.pg_num)-1)) - 1;
50 (1 << calc_bits_of(le32_to_cpu(pi->v.pgp_num)-1)) - 1;
52 (1 << calc_bits_of(le32_to_cpu(pi->v.lpg_num)-1)) - 1;
54 (1 << calc_bits_of(le32_to_cpu(pi->v.lpgp_num)-1)) - 1;
60 static int crush_decode_uniform_bucket(void **p, void *end,
61 struct crush_bucket_uniform *b)
63 dout("crush_decode_uniform_bucket %p to %p\n", *p, end);
64 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad);
65 b->item_weight = ceph_decode_32(p);
71 static int crush_decode_list_bucket(void **p, void *end,
72 struct crush_bucket_list *b)
75 dout("crush_decode_list_bucket %p to %p\n", *p, end);
76 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
77 if (b->item_weights == NULL)
79 b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
80 if (b->sum_weights == NULL)
82 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
83 for (j = 0; j < b->h.size; j++) {
84 b->item_weights[j] = ceph_decode_32(p);
85 b->sum_weights[j] = ceph_decode_32(p);
92 static int crush_decode_tree_bucket(void **p, void *end,
93 struct crush_bucket_tree *b)
96 dout("crush_decode_tree_bucket %p to %p\n", *p, end);
97 ceph_decode_32_safe(p, end, b->num_nodes, bad);
98 b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
99 if (b->node_weights == NULL)
101 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad);
102 for (j = 0; j < b->num_nodes; j++)
103 b->node_weights[j] = ceph_decode_32(p);
109 static int crush_decode_straw_bucket(void **p, void *end,
110 struct crush_bucket_straw *b)
113 dout("crush_decode_straw_bucket %p to %p\n", *p, end);
114 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
115 if (b->item_weights == NULL)
117 b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
118 if (b->straws == NULL)
120 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
121 for (j = 0; j < b->h.size; j++) {
122 b->item_weights[j] = ceph_decode_32(p);
123 b->straws[j] = ceph_decode_32(p);
130 static int skip_name_map(void **p, void *end)
133 ceph_decode_32_safe(p, end, len ,bad);
137 ceph_decode_32_safe(p, end, strlen, bad);
145 static struct crush_map *crush_decode(void *pbyval, void *end)
151 void *start = pbyval;
155 dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p));
157 c = kzalloc(sizeof(*c), GFP_NOFS);
159 return ERR_PTR(-ENOMEM);
161 /* set tunables to default values */
162 c->choose_local_tries = 2;
163 c->choose_local_fallback_tries = 5;
164 c->choose_total_tries = 19;
165 c->chooseleaf_descend_once = 0;
167 ceph_decode_need(p, end, 4*sizeof(u32), bad);
168 magic = ceph_decode_32(p);
169 if (magic != CRUSH_MAGIC) {
170 pr_err("crush_decode magic %x != current %x\n",
171 (unsigned int)magic, (unsigned int)CRUSH_MAGIC);
174 c->max_buckets = ceph_decode_32(p);
175 c->max_rules = ceph_decode_32(p);
176 c->max_devices = ceph_decode_32(p);
178 c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
179 if (c->buckets == NULL)
181 c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS);
182 if (c->rules == NULL)
186 for (i = 0; i < c->max_buckets; i++) {
189 struct crush_bucket *b;
191 ceph_decode_32_safe(p, end, alg, bad);
193 c->buckets[i] = NULL;
196 dout("crush_decode bucket %d off %x %p to %p\n",
197 i, (int)(*p-start), *p, end);
200 case CRUSH_BUCKET_UNIFORM:
201 size = sizeof(struct crush_bucket_uniform);
203 case CRUSH_BUCKET_LIST:
204 size = sizeof(struct crush_bucket_list);
206 case CRUSH_BUCKET_TREE:
207 size = sizeof(struct crush_bucket_tree);
209 case CRUSH_BUCKET_STRAW:
210 size = sizeof(struct crush_bucket_straw);
217 b = c->buckets[i] = kzalloc(size, GFP_NOFS);
221 ceph_decode_need(p, end, 4*sizeof(u32), bad);
222 b->id = ceph_decode_32(p);
223 b->type = ceph_decode_16(p);
224 b->alg = ceph_decode_8(p);
225 b->hash = ceph_decode_8(p);
226 b->weight = ceph_decode_32(p);
227 b->size = ceph_decode_32(p);
229 dout("crush_decode bucket size %d off %x %p to %p\n",
230 b->size, (int)(*p-start), *p, end);
232 b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS);
233 if (b->items == NULL)
235 b->perm = kcalloc(b->size, sizeof(u32), GFP_NOFS);
240 ceph_decode_need(p, end, b->size*sizeof(u32), bad);
241 for (j = 0; j < b->size; j++)
242 b->items[j] = ceph_decode_32(p);
245 case CRUSH_BUCKET_UNIFORM:
246 err = crush_decode_uniform_bucket(p, end,
247 (struct crush_bucket_uniform *)b);
251 case CRUSH_BUCKET_LIST:
252 err = crush_decode_list_bucket(p, end,
253 (struct crush_bucket_list *)b);
257 case CRUSH_BUCKET_TREE:
258 err = crush_decode_tree_bucket(p, end,
259 (struct crush_bucket_tree *)b);
263 case CRUSH_BUCKET_STRAW:
264 err = crush_decode_straw_bucket(p, end,
265 (struct crush_bucket_straw *)b);
273 dout("rule vec is %p\n", c->rules);
274 for (i = 0; i < c->max_rules; i++) {
276 struct crush_rule *r;
278 ceph_decode_32_safe(p, end, yes, bad);
280 dout("crush_decode NO rule %d off %x %p to %p\n",
281 i, (int)(*p-start), *p, end);
286 dout("crush_decode rule %d off %x %p to %p\n",
287 i, (int)(*p-start), *p, end);
290 ceph_decode_32_safe(p, end, yes, bad);
291 #if BITS_PER_LONG == 32
293 if (yes > (ULONG_MAX - sizeof(*r))
294 / sizeof(struct crush_rule_step))
297 r = c->rules[i] = kmalloc(sizeof(*r) +
298 yes*sizeof(struct crush_rule_step),
302 dout(" rule %d is at %p\n", i, r);
304 ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */
305 ceph_decode_need(p, end, r->len*3*sizeof(u32), bad);
306 for (j = 0; j < r->len; j++) {
307 r->steps[j].op = ceph_decode_32(p);
308 r->steps[j].arg1 = ceph_decode_32(p);
309 r->steps[j].arg2 = ceph_decode_32(p);
313 /* ignore trailing name maps. */
314 for (num_name_maps = 0; num_name_maps < 3; num_name_maps++) {
315 err = skip_name_map(p, end);
321 ceph_decode_need(p, end, 3*sizeof(u32), done);
322 c->choose_local_tries = ceph_decode_32(p);
323 c->choose_local_fallback_tries = ceph_decode_32(p);
324 c->choose_total_tries = ceph_decode_32(p);
325 dout("crush decode tunable choose_local_tries = %d",
326 c->choose_local_tries);
327 dout("crush decode tunable choose_local_fallback_tries = %d",
328 c->choose_local_fallback_tries);
329 dout("crush decode tunable choose_total_tries = %d",
330 c->choose_total_tries);
332 ceph_decode_need(p, end, sizeof(u32), done);
333 c->chooseleaf_descend_once = ceph_decode_32(p);
334 dout("crush decode tunable chooseleaf_descend_once = %d",
335 c->chooseleaf_descend_once);
338 dout("crush_decode success\n");
344 dout("crush_decode fail %d\n", err);
350 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
353 static int pgid_cmp(struct ceph_pg l, struct ceph_pg r)
365 static int __insert_pg_mapping(struct ceph_pg_mapping *new,
366 struct rb_root *root)
368 struct rb_node **p = &root->rb_node;
369 struct rb_node *parent = NULL;
370 struct ceph_pg_mapping *pg = NULL;
373 dout("__insert_pg_mapping %llx %p\n", *(u64 *)&new->pgid, new);
376 pg = rb_entry(parent, struct ceph_pg_mapping, node);
377 c = pgid_cmp(new->pgid, pg->pgid);
386 rb_link_node(&new->node, parent, p);
387 rb_insert_color(&new->node, root);
391 static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root,
394 struct rb_node *n = root->rb_node;
395 struct ceph_pg_mapping *pg;
399 pg = rb_entry(n, struct ceph_pg_mapping, node);
400 c = pgid_cmp(pgid, pg->pgid);
406 dout("__lookup_pg_mapping %llx got %p\n",
414 static int __remove_pg_mapping(struct rb_root *root, struct ceph_pg pgid)
416 struct ceph_pg_mapping *pg = __lookup_pg_mapping(root, pgid);
419 dout("__remove_pg_mapping %llx %p\n", *(u64 *)&pgid, pg);
420 rb_erase(&pg->node, root);
424 dout("__remove_pg_mapping %llx dne\n", *(u64 *)&pgid);
429 * rbtree of pg pool info
431 static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new)
433 struct rb_node **p = &root->rb_node;
434 struct rb_node *parent = NULL;
435 struct ceph_pg_pool_info *pi = NULL;
439 pi = rb_entry(parent, struct ceph_pg_pool_info, node);
440 if (new->id < pi->id)
442 else if (new->id > pi->id)
448 rb_link_node(&new->node, parent, p);
449 rb_insert_color(&new->node, root);
453 static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id)
455 struct ceph_pg_pool_info *pi;
456 struct rb_node *n = root->rb_node;
459 pi = rb_entry(n, struct ceph_pg_pool_info, node);
462 else if (id > pi->id)
470 const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id)
472 struct ceph_pg_pool_info *pi;
474 if (id == CEPH_NOPOOL)
477 if (WARN_ON_ONCE(id > (u64) INT_MAX))
480 pi = __lookup_pg_pool(&map->pg_pools, (int) id);
482 return pi ? pi->name : NULL;
484 EXPORT_SYMBOL(ceph_pg_pool_name_by_id);
486 int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
490 for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) {
491 struct ceph_pg_pool_info *pi =
492 rb_entry(rbp, struct ceph_pg_pool_info, node);
493 if (pi->name && strcmp(pi->name, name) == 0)
498 EXPORT_SYMBOL(ceph_pg_poolid_by_name);
500 static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
502 rb_erase(&pi->node, root);
507 static int __decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
511 ceph_decode_copy(p, &pi->v, sizeof(pi->v));
514 /* num_snaps * snap_info_t */
515 n = le32_to_cpu(pi->v.num_snaps);
517 ceph_decode_need(p, end, sizeof(u64) + 1 + sizeof(u64) +
518 sizeof(struct ceph_timespec), bad);
519 *p += sizeof(u64) + /* key */
520 1 + sizeof(u64) + /* u8, snapid */
521 sizeof(struct ceph_timespec);
522 m = ceph_decode_32(p); /* snap name */
526 *p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2;
533 static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
535 struct ceph_pg_pool_info *pi;
538 ceph_decode_32_safe(p, end, num, bad);
539 dout(" %d pool names\n", num);
541 ceph_decode_32_safe(p, end, pool, bad);
542 ceph_decode_32_safe(p, end, len, bad);
543 dout(" pool %d len %d\n", pool, len);
544 ceph_decode_need(p, end, len, bad);
545 pi = __lookup_pg_pool(&map->pg_pools, pool);
547 char *name = kstrndup(*p, len, GFP_NOFS);
553 dout(" name is %s\n", pi->name);
566 void ceph_osdmap_destroy(struct ceph_osdmap *map)
568 dout("osdmap_destroy %p\n", map);
570 crush_destroy(map->crush);
571 while (!RB_EMPTY_ROOT(&map->pg_temp)) {
572 struct ceph_pg_mapping *pg =
573 rb_entry(rb_first(&map->pg_temp),
574 struct ceph_pg_mapping, node);
575 rb_erase(&pg->node, &map->pg_temp);
578 while (!RB_EMPTY_ROOT(&map->pg_pools)) {
579 struct ceph_pg_pool_info *pi =
580 rb_entry(rb_first(&map->pg_pools),
581 struct ceph_pg_pool_info, node);
582 __remove_pg_pool(&map->pg_pools, pi);
584 kfree(map->osd_state);
585 kfree(map->osd_weight);
586 kfree(map->osd_addr);
591 * adjust max osd value. reallocate arrays.
593 static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
596 struct ceph_entity_addr *addr;
599 state = kcalloc(max, sizeof(*state), GFP_NOFS);
600 addr = kcalloc(max, sizeof(*addr), GFP_NOFS);
601 weight = kcalloc(max, sizeof(*weight), GFP_NOFS);
602 if (state == NULL || addr == NULL || weight == NULL) {
610 if (map->osd_state) {
611 memcpy(state, map->osd_state, map->max_osd*sizeof(*state));
612 memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr));
613 memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight));
614 kfree(map->osd_state);
615 kfree(map->osd_addr);
616 kfree(map->osd_weight);
619 map->osd_state = state;
620 map->osd_weight = weight;
621 map->osd_addr = addr;
629 struct ceph_osdmap *osdmap_decode(void **p, void *end)
631 struct ceph_osdmap *map;
637 struct ceph_pg_pool_info *pi;
639 dout("osdmap_decode %p to %p len %d\n", *p, end, (int)(end - *p));
641 map = kzalloc(sizeof(*map), GFP_NOFS);
643 return ERR_PTR(-ENOMEM);
644 map->pg_temp = RB_ROOT;
646 ceph_decode_16_safe(p, end, version, bad);
647 if (version > CEPH_OSDMAP_VERSION) {
648 pr_warning("got unknown v %d > %d of osdmap\n", version,
649 CEPH_OSDMAP_VERSION);
653 ceph_decode_need(p, end, 2*sizeof(u64)+6*sizeof(u32), bad);
654 ceph_decode_copy(p, &map->fsid, sizeof(map->fsid));
655 map->epoch = ceph_decode_32(p);
656 ceph_decode_copy(p, &map->created, sizeof(map->created));
657 ceph_decode_copy(p, &map->modified, sizeof(map->modified));
659 ceph_decode_32_safe(p, end, max, bad);
661 ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad);
663 pi = kzalloc(sizeof(*pi), GFP_NOFS);
666 pi->id = ceph_decode_32(p);
668 ev = ceph_decode_8(p); /* encoding version */
669 if (ev > CEPH_PG_POOL_VERSION) {
670 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
671 ev, CEPH_PG_POOL_VERSION);
675 err = __decode_pool(p, end, pi);
680 __insert_pg_pool(&map->pg_pools, pi);
684 err = __decode_pool_names(p, end, map);
686 dout("fail to decode pool names");
691 ceph_decode_32_safe(p, end, map->pool_max, bad);
693 ceph_decode_32_safe(p, end, map->flags, bad);
695 max = ceph_decode_32(p);
697 /* (re)alloc osd arrays */
698 err = osdmap_set_max_osd(map, max);
701 dout("osdmap_decode max_osd = %d\n", map->max_osd);
705 ceph_decode_need(p, end, 3*sizeof(u32) +
706 map->max_osd*(1 + sizeof(*map->osd_weight) +
707 sizeof(*map->osd_addr)), bad);
708 *p += 4; /* skip length field (should match max) */
709 ceph_decode_copy(p, map->osd_state, map->max_osd);
711 *p += 4; /* skip length field (should match max) */
712 for (i = 0; i < map->max_osd; i++)
713 map->osd_weight[i] = ceph_decode_32(p);
715 *p += 4; /* skip length field (should match max) */
716 ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr));
717 for (i = 0; i < map->max_osd; i++)
718 ceph_decode_addr(&map->osd_addr[i]);
721 ceph_decode_32_safe(p, end, len, bad);
722 for (i = 0; i < len; i++) {
725 struct ceph_pg_mapping *pg;
727 ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad);
728 ceph_decode_copy(p, &pgid, sizeof(pgid));
729 n = ceph_decode_32(p);
731 if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32))
733 ceph_decode_need(p, end, n * sizeof(u32), bad);
735 pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS);
740 for (j = 0; j < n; j++)
741 pg->osds[j] = ceph_decode_32(p);
743 err = __insert_pg_mapping(pg, &map->pg_temp);
746 dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, len);
750 ceph_decode_32_safe(p, end, len, bad);
751 dout("osdmap_decode crush len %d from off 0x%x\n", len,
753 ceph_decode_need(p, end, len, bad);
754 map->crush = crush_decode(*p, end);
756 if (IS_ERR(map->crush)) {
757 err = PTR_ERR(map->crush);
762 /* ignore the rest of the map */
765 dout("osdmap_decode done %p %p\n", *p, end);
769 dout("osdmap_decode fail err %d\n", err);
770 ceph_osdmap_destroy(map);
775 * decode and apply an incremental map update.
777 struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
778 struct ceph_osdmap *map,
779 struct ceph_messenger *msgr)
781 struct crush_map *newcrush = NULL;
782 struct ceph_fsid fsid;
784 struct ceph_timespec modified;
786 __s32 new_pool_max, new_flags, max;
791 ceph_decode_16_safe(p, end, version, bad);
792 if (version > CEPH_OSDMAP_INC_VERSION) {
793 pr_warning("got unknown v %d > %d of inc osdmap\n", version,
794 CEPH_OSDMAP_INC_VERSION);
798 ceph_decode_need(p, end, sizeof(fsid)+sizeof(modified)+2*sizeof(u32),
800 ceph_decode_copy(p, &fsid, sizeof(fsid));
801 epoch = ceph_decode_32(p);
802 BUG_ON(epoch != map->epoch+1);
803 ceph_decode_copy(p, &modified, sizeof(modified));
804 new_pool_max = ceph_decode_32(p);
805 new_flags = ceph_decode_32(p);
808 ceph_decode_32_safe(p, end, len, bad);
810 dout("apply_incremental full map len %d, %p to %p\n",
812 return osdmap_decode(p, min(*p+len, end));
816 ceph_decode_32_safe(p, end, len, bad);
818 dout("apply_incremental new crush map len %d, %p to %p\n",
820 newcrush = crush_decode(*p, min(*p+len, end));
821 if (IS_ERR(newcrush))
822 return ERR_CAST(newcrush);
828 map->flags = new_flags;
829 if (new_pool_max >= 0)
830 map->pool_max = new_pool_max;
832 ceph_decode_need(p, end, 5*sizeof(u32), bad);
835 max = ceph_decode_32(p);
837 err = osdmap_set_max_osd(map, max);
843 map->modified = modified;
846 crush_destroy(map->crush);
847 map->crush = newcrush;
852 ceph_decode_32_safe(p, end, len, bad);
855 struct ceph_pg_pool_info *pi;
857 ceph_decode_32_safe(p, end, pool, bad);
858 ceph_decode_need(p, end, 1 + sizeof(pi->v), bad);
859 ev = ceph_decode_8(p); /* encoding version */
860 if (ev > CEPH_PG_POOL_VERSION) {
861 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
862 ev, CEPH_PG_POOL_VERSION);
866 pi = __lookup_pg_pool(&map->pg_pools, pool);
868 pi = kzalloc(sizeof(*pi), GFP_NOFS);
874 __insert_pg_pool(&map->pg_pools, pi);
876 err = __decode_pool(p, end, pi);
881 err = __decode_pool_names(p, end, map);
887 ceph_decode_32_safe(p, end, len, bad);
889 struct ceph_pg_pool_info *pi;
891 ceph_decode_32_safe(p, end, pool, bad);
892 pi = __lookup_pg_pool(&map->pg_pools, pool);
894 __remove_pg_pool(&map->pg_pools, pi);
899 ceph_decode_32_safe(p, end, len, bad);
902 struct ceph_entity_addr addr;
903 ceph_decode_32_safe(p, end, osd, bad);
904 ceph_decode_copy_safe(p, end, &addr, sizeof(addr), bad);
905 ceph_decode_addr(&addr);
906 pr_info("osd%d up\n", osd);
907 BUG_ON(osd >= map->max_osd);
908 map->osd_state[osd] |= CEPH_OSD_UP;
909 map->osd_addr[osd] = addr;
913 ceph_decode_32_safe(p, end, len, bad);
917 ceph_decode_32_safe(p, end, osd, bad);
918 xorstate = **(u8 **)p;
919 (*p)++; /* clean flag */
921 xorstate = CEPH_OSD_UP;
922 if (xorstate & CEPH_OSD_UP)
923 pr_info("osd%d down\n", osd);
924 if (osd < map->max_osd)
925 map->osd_state[osd] ^= xorstate;
929 ceph_decode_32_safe(p, end, len, bad);
932 ceph_decode_need(p, end, sizeof(u32)*2, bad);
933 osd = ceph_decode_32(p);
934 off = ceph_decode_32(p);
935 pr_info("osd%d weight 0x%x %s\n", osd, off,
936 off == CEPH_OSD_IN ? "(in)" :
937 (off == CEPH_OSD_OUT ? "(out)" : ""));
938 if (osd < map->max_osd)
939 map->osd_weight[osd] = off;
943 ceph_decode_32_safe(p, end, len, bad);
945 struct ceph_pg_mapping *pg;
949 ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad);
950 ceph_decode_copy(p, &pgid, sizeof(pgid));
951 pglen = ceph_decode_32(p);
954 ceph_decode_need(p, end, pglen*sizeof(u32), bad);
956 /* removing existing (if any) */
957 (void) __remove_pg_mapping(&map->pg_temp, pgid);
961 if (pglen > (UINT_MAX - sizeof(*pg)) / sizeof(u32))
964 pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
969 for (j = 0; j < pglen; j++)
970 pg->osds[j] = ceph_decode_32(p);
971 err = __insert_pg_mapping(pg, &map->pg_temp);
976 dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid,
980 __remove_pg_mapping(&map->pg_temp, pgid);
984 /* ignore the rest */
989 pr_err("corrupt inc osdmap epoch %d off %d (%p of %p-%p)\n",
990 epoch, (int)(*p - start), *p, start, end);
991 print_hex_dump(KERN_DEBUG, "osdmap: ",
992 DUMP_PREFIX_OFFSET, 16, 1,
993 start, end - start, true);
995 crush_destroy(newcrush);
1003 * calculate file layout from given offset, length.
1004 * fill in correct oid, logical length, and object extent
1007 * for now, we write only a single su, until we can
1008 * pass a stride back to the caller.
1010 int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
1013 u64 *oxoff, u64 *oxlen)
1015 u32 osize = le32_to_cpu(layout->fl_object_size);
1016 u32 su = le32_to_cpu(layout->fl_stripe_unit);
1017 u32 sc = le32_to_cpu(layout->fl_stripe_count);
1018 u32 bl, stripeno, stripepos, objsetno;
1022 dout("mapping %llu~%llu osize %u fl_su %u\n", off, len,
1024 if (su == 0 || sc == 0)
1026 su_per_object = osize / su;
1027 if (su_per_object == 0)
1029 dout("osize %u / su %u = su_per_object %u\n", osize, su,
1032 if ((su & ~PAGE_MASK) != 0)
1035 /* bl = *off / su; */
1039 dout("off %llu / su %u = bl %u\n", off, su, bl);
1042 stripepos = bl % sc;
1043 objsetno = stripeno / su_per_object;
1045 *ono = objsetno * sc + stripepos;
1046 dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned int)*ono);
1048 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */
1050 su_offset = do_div(t, su);
1051 *oxoff = su_offset + (stripeno % su_per_object) * su;
1054 * Calculate the length of the extent being written to the selected
1055 * object. This is the minimum of the full length requested (len) or
1056 * the remainder of the current stripe being written to.
1058 *oxlen = min_t(u64, len, su - su_offset);
1060 dout(" obj extent %llu~%llu\n", *oxoff, *oxlen);
1064 dout(" invalid layout\n");
1070 EXPORT_SYMBOL(ceph_calc_file_object_mapping);
1073 * calculate an object layout (i.e. pgid) from an oid,
1074 * file_layout, and osdmap
1076 int ceph_calc_object_layout(struct ceph_object_layout *ol,
1078 struct ceph_file_layout *fl,
1079 struct ceph_osdmap *osdmap)
1081 unsigned int num, num_mask;
1082 struct ceph_pg pgid;
1083 int poolid = le32_to_cpu(fl->fl_pg_pool);
1084 struct ceph_pg_pool_info *pool;
1089 pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
1092 ps = ceph_str_hash(pool->v.object_hash, oid, strlen(oid));
1093 num = le32_to_cpu(pool->v.pg_num);
1094 num_mask = pool->pg_num_mask;
1096 pgid.ps = cpu_to_le16(ps);
1097 pgid.preferred = cpu_to_le16(-1);
1098 pgid.pool = fl->fl_pg_pool;
1099 dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps);
1102 ol->ol_stripe_unit = fl->fl_object_stripe_unit;
1105 EXPORT_SYMBOL(ceph_calc_object_layout);
1108 * Calculate raw osd vector for the given pgid. Return pointer to osd
1109 * array, or NULL on failure.
1111 static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
1112 int *osds, int *num)
1114 struct ceph_pg_mapping *pg;
1115 struct ceph_pg_pool_info *pool;
1117 unsigned int poolid, ps, pps, t, r;
1119 poolid = le32_to_cpu(pgid.pool);
1120 ps = le16_to_cpu(pgid.ps);
1122 pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
1127 t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num),
1128 pool->pgp_num_mask);
1129 pgid.ps = cpu_to_le16(t);
1130 pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
1137 ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset,
1138 pool->v.type, pool->v.size);
1140 pr_err("no crush rule pool %d ruleset %d type %d size %d\n",
1141 poolid, pool->v.crush_ruleset, pool->v.type,
1146 pps = ceph_stable_mod(ps,
1147 le32_to_cpu(pool->v.pgp_num),
1148 pool->pgp_num_mask);
1150 r = crush_do_rule(osdmap->crush, ruleno, pps, osds,
1151 min_t(int, pool->v.size, *num),
1152 osdmap->osd_weight);
1154 pr_err("error %d from crush rule: pool %d ruleset %d type %d"
1155 " size %d\n", r, poolid, pool->v.crush_ruleset,
1156 pool->v.type, pool->v.size);
1164 * Return acting set for given pgid.
1166 int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
1169 int rawosds[CEPH_PG_MAX_SIZE], *osds;
1170 int i, o, num = CEPH_PG_MAX_SIZE;
1172 osds = calc_pg_raw(osdmap, pgid, rawosds, &num);
1176 /* primary is first up osd */
1178 for (i = 0; i < num; i++)
1179 if (ceph_osd_is_up(osdmap, osds[i]))
1180 acting[o++] = osds[i];
1185 * Return primary osd for given pgid, or -1 if none.
1187 int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid)
1189 int rawosds[CEPH_PG_MAX_SIZE], *osds;
1190 int i, num = CEPH_PG_MAX_SIZE;
1192 osds = calc_pg_raw(osdmap, pgid, rawosds, &num);
1196 /* primary is first up osd */
1197 for (i = 0; i < num; i++)
1198 if (ceph_osd_is_up(osdmap, osds[i]))
1202 EXPORT_SYMBOL(ceph_calc_pg_primary);