2 * Copyright (C) 2012 Fusion-io All rights reserved.
3 * Copyright (C) 2012 Intel Corp. All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/wait.h>
21 #include <linux/bio.h>
22 #include <linux/slab.h>
23 #include <linux/buffer_head.h>
24 #include <linux/blkdev.h>
25 #include <linux/random.h>
26 #include <linux/iocontext.h>
27 #include <linux/capability.h>
28 #include <linux/ratelimit.h>
29 #include <linux/kthread.h>
30 #include <linux/raid/pq.h>
31 #include <linux/hash.h>
32 #include <linux/list_sort.h>
33 #include <linux/raid/xor.h>
34 #include <asm/div64.h>
37 #include "extent_map.h"
39 #include "transaction.h"
40 #include "print-tree.h"
43 #include "async-thread.h"
44 #include "check-integrity.h"
45 #include "rcu-string.h"
47 /* set when additional merges to this rbio are not allowed */
48 #define RBIO_RMW_LOCKED_BIT 1
51 * set when this rbio is sitting in the hash, but it is just a cache
54 #define RBIO_CACHE_BIT 2
57 * set when it is safe to trust the stripe_pages for caching
59 #define RBIO_CACHE_READY_BIT 3
62 #define RBIO_CACHE_SIZE 1024
64 struct btrfs_raid_bio {
65 struct btrfs_fs_info *fs_info;
66 struct btrfs_bio *bbio;
69 * logical block numbers for the start of each stripe
70 * The last one or two are p/q. These are sorted,
71 * so raid_map[0] is the start of our full stripe
75 /* while we're doing rmw on a stripe
76 * we put it into a hash table so we can
77 * lock the stripe and merge more rbios
80 struct list_head hash_list;
83 * LRU list for the stripe cache
85 struct list_head stripe_cache;
88 * for scheduling work in the helper threads
90 struct btrfs_work work;
93 * bio list and bio_list_lock are used
94 * to add more bios into the stripe
95 * in hopes of avoiding the full rmw
97 struct bio_list bio_list;
98 spinlock_t bio_list_lock;
100 /* also protected by the bio_list_lock, the
101 * plug list is used by the plugging code
102 * to collect partial bios while plugged. The
103 * stripe locking code also uses it to hand off
104 * the stripe lock to the next pending IO
106 struct list_head plug_list;
109 * flags that tell us if it is safe to
110 * merge with this bio
114 /* size of each individual stripe on disk */
117 /* number of data stripes (no p/q) */
121 * set if we're doing a parity rebuild
122 * for a read from higher up, which is handled
123 * differently from a parity rebuild as part of
128 /* first bad stripe */
131 /* second bad stripe (for raid6 use) */
135 * number of pages needed to represent the full
141 * size of all the bios in the bio_list. This
142 * helps us decide if the rbio maps to a full
150 * these are two arrays of pointers. We allocate the
151 * rbio big enough to hold them both and setup their
152 * locations when the rbio is allocated
155 /* pointers to pages that we allocated for
156 * reading/writing stripes directly from the disk (including P/Q)
158 struct page **stripe_pages;
161 * pointers to the pages in the bio_list. Stored
162 * here for faster lookup
164 struct page **bio_pages;
167 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
168 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
169 static void rmw_work(struct btrfs_work *work);
170 static void read_rebuild_work(struct btrfs_work *work);
171 static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
172 static void async_read_rebuild(struct btrfs_raid_bio *rbio);
173 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
174 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
175 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
176 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
177 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
180 * the stripe hash table is used for locking, and to collect
181 * bios in hopes of making a full stripe
183 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
185 struct btrfs_stripe_hash_table *table;
186 struct btrfs_stripe_hash_table *x;
187 struct btrfs_stripe_hash *cur;
188 struct btrfs_stripe_hash *h;
189 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
192 if (info->stripe_hash_table)
195 table = kzalloc(sizeof(*table) + sizeof(*h) * num_entries, GFP_NOFS);
199 spin_lock_init(&table->cache_lock);
200 INIT_LIST_HEAD(&table->stripe_cache);
204 for (i = 0; i < num_entries; i++) {
206 INIT_LIST_HEAD(&cur->hash_list);
207 spin_lock_init(&cur->lock);
208 init_waitqueue_head(&cur->wait);
211 x = cmpxchg(&info->stripe_hash_table, NULL, table);
218 * caching an rbio means to copy anything from the
219 * bio_pages array into the stripe_pages array. We
220 * use the page uptodate bit in the stripe cache array
221 * to indicate if it has valid data
223 * once the caching is done, we set the cache ready
226 static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
233 ret = alloc_rbio_pages(rbio);
237 for (i = 0; i < rbio->nr_pages; i++) {
238 if (!rbio->bio_pages[i])
241 s = kmap(rbio->bio_pages[i]);
242 d = kmap(rbio->stripe_pages[i]);
244 memcpy(d, s, PAGE_CACHE_SIZE);
246 kunmap(rbio->bio_pages[i]);
247 kunmap(rbio->stripe_pages[i]);
248 SetPageUptodate(rbio->stripe_pages[i]);
250 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
254 * we hash on the first logical address of the stripe
256 static int rbio_bucket(struct btrfs_raid_bio *rbio)
258 u64 num = rbio->raid_map[0];
261 * we shift down quite a bit. We're using byte
262 * addressing, and most of the lower bits are zeros.
263 * This tends to upset hash_64, and it consistently
264 * returns just one or two different values.
266 * shifting off the lower bits fixes things.
268 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
272 * stealing an rbio means taking all the uptodate pages from the stripe
273 * array in the source rbio and putting them into the destination rbio
275 static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
281 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
284 for (i = 0; i < dest->nr_pages; i++) {
285 s = src->stripe_pages[i];
286 if (!s || !PageUptodate(s)) {
290 d = dest->stripe_pages[i];
294 dest->stripe_pages[i] = s;
295 src->stripe_pages[i] = NULL;
300 * merging means we take the bio_list from the victim and
301 * splice it into the destination. The victim should
302 * be discarded afterwards.
304 * must be called with dest->rbio_list_lock held
306 static void merge_rbio(struct btrfs_raid_bio *dest,
307 struct btrfs_raid_bio *victim)
309 bio_list_merge(&dest->bio_list, &victim->bio_list);
310 dest->bio_list_bytes += victim->bio_list_bytes;
311 bio_list_init(&victim->bio_list);
315 * used to prune items that are in the cache. The caller
316 * must hold the hash table lock.
318 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
320 int bucket = rbio_bucket(rbio);
321 struct btrfs_stripe_hash_table *table;
322 struct btrfs_stripe_hash *h;
326 * check the bit again under the hash table lock.
328 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
331 table = rbio->fs_info->stripe_hash_table;
332 h = table->table + bucket;
334 /* hold the lock for the bucket because we may be
335 * removing it from the hash table
340 * hold the lock for the bio list because we need
341 * to make sure the bio list is empty
343 spin_lock(&rbio->bio_list_lock);
345 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
346 list_del_init(&rbio->stripe_cache);
347 table->cache_size -= 1;
350 /* if the bio list isn't empty, this rbio is
351 * still involved in an IO. We take it out
352 * of the cache list, and drop the ref that
353 * was held for the list.
355 * If the bio_list was empty, we also remove
356 * the rbio from the hash_table, and drop
357 * the corresponding ref
359 if (bio_list_empty(&rbio->bio_list)) {
360 if (!list_empty(&rbio->hash_list)) {
361 list_del_init(&rbio->hash_list);
362 atomic_dec(&rbio->refs);
363 BUG_ON(!list_empty(&rbio->plug_list));
368 spin_unlock(&rbio->bio_list_lock);
369 spin_unlock(&h->lock);
372 __free_raid_bio(rbio);
376 * prune a given rbio from the cache
378 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
380 struct btrfs_stripe_hash_table *table;
383 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
386 table = rbio->fs_info->stripe_hash_table;
388 spin_lock_irqsave(&table->cache_lock, flags);
389 __remove_rbio_from_cache(rbio);
390 spin_unlock_irqrestore(&table->cache_lock, flags);
394 * remove everything in the cache
396 void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
398 struct btrfs_stripe_hash_table *table;
400 struct btrfs_raid_bio *rbio;
402 table = info->stripe_hash_table;
404 spin_lock_irqsave(&table->cache_lock, flags);
405 while (!list_empty(&table->stripe_cache)) {
406 rbio = list_entry(table->stripe_cache.next,
407 struct btrfs_raid_bio,
409 __remove_rbio_from_cache(rbio);
411 spin_unlock_irqrestore(&table->cache_lock, flags);
415 * remove all cached entries and free the hash table
418 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
420 if (!info->stripe_hash_table)
422 btrfs_clear_rbio_cache(info);
423 kfree(info->stripe_hash_table);
424 info->stripe_hash_table = NULL;
428 * insert an rbio into the stripe cache. It
429 * must have already been prepared by calling
432 * If this rbio was already cached, it gets
433 * moved to the front of the lru.
435 * If the size of the rbio cache is too big, we
438 static void cache_rbio(struct btrfs_raid_bio *rbio)
440 struct btrfs_stripe_hash_table *table;
443 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
446 table = rbio->fs_info->stripe_hash_table;
448 spin_lock_irqsave(&table->cache_lock, flags);
449 spin_lock(&rbio->bio_list_lock);
451 /* bump our ref if we were not in the list before */
452 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
453 atomic_inc(&rbio->refs);
455 if (!list_empty(&rbio->stripe_cache)){
456 list_move(&rbio->stripe_cache, &table->stripe_cache);
458 list_add(&rbio->stripe_cache, &table->stripe_cache);
459 table->cache_size += 1;
462 spin_unlock(&rbio->bio_list_lock);
464 if (table->cache_size > RBIO_CACHE_SIZE) {
465 struct btrfs_raid_bio *found;
467 found = list_entry(table->stripe_cache.prev,
468 struct btrfs_raid_bio,
472 __remove_rbio_from_cache(found);
475 spin_unlock_irqrestore(&table->cache_lock, flags);
480 * helper function to run the xor_blocks api. It is only
481 * able to do MAX_XOR_BLOCKS at a time, so we need to
484 static void run_xor(void **pages, int src_cnt, ssize_t len)
488 void *dest = pages[src_cnt];
491 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
492 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
494 src_cnt -= xor_src_cnt;
495 src_off += xor_src_cnt;
500 * returns true if the bio list inside this rbio
501 * covers an entire stripe (no rmw required).
502 * Must be called with the bio list lock held, or
503 * at a time when you know it is impossible to add
504 * new bios into the list
506 static int __rbio_is_full(struct btrfs_raid_bio *rbio)
508 unsigned long size = rbio->bio_list_bytes;
511 if (size != rbio->nr_data * rbio->stripe_len)
514 BUG_ON(size > rbio->nr_data * rbio->stripe_len);
518 static int rbio_is_full(struct btrfs_raid_bio *rbio)
523 spin_lock_irqsave(&rbio->bio_list_lock, flags);
524 ret = __rbio_is_full(rbio);
525 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
530 * returns 1 if it is safe to merge two rbios together.
531 * The merging is safe if the two rbios correspond to
532 * the same stripe and if they are both going in the same
533 * direction (read vs write), and if neither one is
534 * locked for final IO
536 * The caller is responsible for locking such that
537 * rmw_locked is safe to test
539 static int rbio_can_merge(struct btrfs_raid_bio *last,
540 struct btrfs_raid_bio *cur)
542 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
543 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
547 * we can't merge with cached rbios, since the
548 * idea is that when we merge the destination
549 * rbio is going to run our IO for us. We can
550 * steal from cached rbio's though, other functions
553 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
554 test_bit(RBIO_CACHE_BIT, &cur->flags))
557 if (last->raid_map[0] !=
561 /* reads can't merge with writes */
562 if (last->read_rebuild !=
571 * helper to index into the pstripe
573 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
575 index += (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT;
576 return rbio->stripe_pages[index];
580 * helper to index into the qstripe, returns null
581 * if there is no qstripe
583 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
585 if (rbio->nr_data + 1 == rbio->bbio->num_stripes)
588 index += ((rbio->nr_data + 1) * rbio->stripe_len) >>
590 return rbio->stripe_pages[index];
594 * The first stripe in the table for a logical address
595 * has the lock. rbios are added in one of three ways:
597 * 1) Nobody has the stripe locked yet. The rbio is given
598 * the lock and 0 is returned. The caller must start the IO
601 * 2) Someone has the stripe locked, but we're able to merge
602 * with the lock owner. The rbio is freed and the IO will
603 * start automatically along with the existing rbio. 1 is returned.
605 * 3) Someone has the stripe locked, but we're not able to merge.
606 * The rbio is added to the lock owner's plug list, or merged into
607 * an rbio already on the plug list. When the lock owner unlocks,
608 * the next rbio on the list is run and the IO is started automatically.
611 * If we return 0, the caller still owns the rbio and must continue with
612 * IO submission. If we return 1, the caller must assume the rbio has
613 * already been freed.
615 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
617 int bucket = rbio_bucket(rbio);
618 struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
619 struct btrfs_raid_bio *cur;
620 struct btrfs_raid_bio *pending;
623 struct btrfs_raid_bio *freeit = NULL;
624 struct btrfs_raid_bio *cache_drop = NULL;
628 spin_lock_irqsave(&h->lock, flags);
629 list_for_each_entry(cur, &h->hash_list, hash_list) {
631 if (cur->raid_map[0] == rbio->raid_map[0]) {
632 spin_lock(&cur->bio_list_lock);
634 /* can we steal this cached rbio's pages? */
635 if (bio_list_empty(&cur->bio_list) &&
636 list_empty(&cur->plug_list) &&
637 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
638 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
639 list_del_init(&cur->hash_list);
640 atomic_dec(&cur->refs);
642 steal_rbio(cur, rbio);
644 spin_unlock(&cur->bio_list_lock);
649 /* can we merge into the lock owner? */
650 if (rbio_can_merge(cur, rbio)) {
651 merge_rbio(cur, rbio);
652 spin_unlock(&cur->bio_list_lock);
660 * we couldn't merge with the running
661 * rbio, see if we can merge with the
662 * pending ones. We don't have to
663 * check for rmw_locked because there
664 * is no way they are inside finish_rmw
667 list_for_each_entry(pending, &cur->plug_list,
669 if (rbio_can_merge(pending, rbio)) {
670 merge_rbio(pending, rbio);
671 spin_unlock(&cur->bio_list_lock);
678 /* no merging, put us on the tail of the plug list,
679 * our rbio will be started with the currently
680 * running rbio unlocks
682 list_add_tail(&rbio->plug_list, &cur->plug_list);
683 spin_unlock(&cur->bio_list_lock);
689 atomic_inc(&rbio->refs);
690 list_add(&rbio->hash_list, &h->hash_list);
692 spin_unlock_irqrestore(&h->lock, flags);
694 remove_rbio_from_cache(cache_drop);
696 __free_raid_bio(freeit);
701 * called as rmw or parity rebuild is completed. If the plug list has more
702 * rbios waiting for this stripe, the next one on the list will be started
704 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
707 struct btrfs_stripe_hash *h;
711 bucket = rbio_bucket(rbio);
712 h = rbio->fs_info->stripe_hash_table->table + bucket;
714 if (list_empty(&rbio->plug_list))
717 spin_lock_irqsave(&h->lock, flags);
718 spin_lock(&rbio->bio_list_lock);
720 if (!list_empty(&rbio->hash_list)) {
722 * if we're still cached and there is no other IO
723 * to perform, just leave this rbio here for others
724 * to steal from later
726 if (list_empty(&rbio->plug_list) &&
727 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
729 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
730 BUG_ON(!bio_list_empty(&rbio->bio_list));
734 list_del_init(&rbio->hash_list);
735 atomic_dec(&rbio->refs);
738 * we use the plug list to hold all the rbios
739 * waiting for the chance to lock this stripe.
740 * hand the lock over to one of them.
742 if (!list_empty(&rbio->plug_list)) {
743 struct btrfs_raid_bio *next;
744 struct list_head *head = rbio->plug_list.next;
746 next = list_entry(head, struct btrfs_raid_bio,
749 list_del_init(&rbio->plug_list);
751 list_add(&next->hash_list, &h->hash_list);
752 atomic_inc(&next->refs);
753 spin_unlock(&rbio->bio_list_lock);
754 spin_unlock_irqrestore(&h->lock, flags);
756 if (next->read_rebuild)
757 async_read_rebuild(next);
759 steal_rbio(rbio, next);
760 async_rmw_stripe(next);
764 } else if (waitqueue_active(&h->wait)) {
765 spin_unlock(&rbio->bio_list_lock);
766 spin_unlock_irqrestore(&h->lock, flags);
772 spin_unlock(&rbio->bio_list_lock);
773 spin_unlock_irqrestore(&h->lock, flags);
777 remove_rbio_from_cache(rbio);
780 static void __free_raid_bio(struct btrfs_raid_bio *rbio)
784 WARN_ON(atomic_read(&rbio->refs) < 0);
785 if (!atomic_dec_and_test(&rbio->refs))
788 WARN_ON(!list_empty(&rbio->stripe_cache));
789 WARN_ON(!list_empty(&rbio->hash_list));
790 WARN_ON(!bio_list_empty(&rbio->bio_list));
792 for (i = 0; i < rbio->nr_pages; i++) {
793 if (rbio->stripe_pages[i]) {
794 __free_page(rbio->stripe_pages[i]);
795 rbio->stripe_pages[i] = NULL;
798 kfree(rbio->raid_map);
803 static void free_raid_bio(struct btrfs_raid_bio *rbio)
806 __free_raid_bio(rbio);
810 * this frees the rbio and runs through all the bios in the
811 * bio_list and calls end_io on them
813 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err, int uptodate)
815 struct bio *cur = bio_list_get(&rbio->bio_list);
823 set_bit(BIO_UPTODATE, &cur->bi_flags);
830 * end io function used by finish_rmw. When we finally
831 * get here, we've written a full stripe
833 static void raid_write_end_io(struct bio *bio, int err)
835 struct btrfs_raid_bio *rbio = bio->bi_private;
838 fail_bio_stripe(rbio, bio);
842 if (!atomic_dec_and_test(&rbio->bbio->stripes_pending))
847 /* OK, we have read all the stripes we need to. */
848 if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors)
851 rbio_orig_end_io(rbio, err, 0);
856 * the read/modify/write code wants to use the original bio for
857 * any pages it included, and then use the rbio for everything
858 * else. This function decides if a given index (stripe number)
859 * and page number in that stripe fall inside the original bio
862 * if you set bio_list_only, you'll get a NULL back for any ranges
863 * that are outside the bio_list
865 * This doesn't take any refs on anything, you get a bare page pointer
866 * and the caller must bump refs as required.
868 * You must call index_rbio_pages once before you can trust
869 * the answers from this function.
871 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
872 int index, int pagenr, int bio_list_only)
875 struct page *p = NULL;
877 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
879 spin_lock_irq(&rbio->bio_list_lock);
880 p = rbio->bio_pages[chunk_page];
881 spin_unlock_irq(&rbio->bio_list_lock);
883 if (p || bio_list_only)
886 return rbio->stripe_pages[chunk_page];
890 * number of pages we need for the entire stripe across all the
893 static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
895 unsigned long nr = stripe_len * nr_stripes;
896 return (nr + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
900 * allocation and initial setup for the btrfs_raid_bio. Not
901 * this does not allocate any pages for rbio->pages.
903 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
904 struct btrfs_bio *bbio, u64 *raid_map,
907 struct btrfs_raid_bio *rbio;
909 int num_pages = rbio_nr_pages(stripe_len, bbio->num_stripes);
912 rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2,
917 return ERR_PTR(-ENOMEM);
920 bio_list_init(&rbio->bio_list);
921 INIT_LIST_HEAD(&rbio->plug_list);
922 spin_lock_init(&rbio->bio_list_lock);
923 INIT_LIST_HEAD(&rbio->stripe_cache);
924 INIT_LIST_HEAD(&rbio->hash_list);
926 rbio->raid_map = raid_map;
927 rbio->fs_info = root->fs_info;
928 rbio->stripe_len = stripe_len;
929 rbio->nr_pages = num_pages;
932 atomic_set(&rbio->refs, 1);
935 * the stripe_pages and bio_pages array point to the extra
936 * memory we allocated past the end of the rbio
939 rbio->stripe_pages = p;
940 rbio->bio_pages = p + sizeof(struct page *) * num_pages;
942 if (raid_map[bbio->num_stripes - 1] == RAID6_Q_STRIPE)
943 nr_data = bbio->num_stripes - 2;
945 nr_data = bbio->num_stripes - 1;
947 rbio->nr_data = nr_data;
951 /* allocate pages for all the stripes in the bio, including parity */
952 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
957 for (i = 0; i < rbio->nr_pages; i++) {
958 if (rbio->stripe_pages[i])
960 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
963 rbio->stripe_pages[i] = page;
964 ClearPageUptodate(page);
969 /* allocate pages for just the p/q stripes */
970 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
975 i = (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT;
977 for (; i < rbio->nr_pages; i++) {
978 if (rbio->stripe_pages[i])
980 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
983 rbio->stripe_pages[i] = page;
989 * add a single page from a specific stripe into our list of bios for IO
990 * this will try to merge into existing bios if possible, and returns
991 * zero if all went well.
993 int rbio_add_io_page(struct btrfs_raid_bio *rbio,
994 struct bio_list *bio_list,
997 unsigned long page_index,
998 unsigned long bio_max_len)
1000 struct bio *last = bio_list->tail;
1004 struct btrfs_bio_stripe *stripe;
1007 stripe = &rbio->bbio->stripes[stripe_nr];
1008 disk_start = stripe->physical + (page_index << PAGE_CACHE_SHIFT);
1010 /* if the device is missing, just fail this stripe */
1011 if (!stripe->dev->bdev)
1012 return fail_rbio_index(rbio, stripe_nr);
1014 /* see if we can add this page onto our existing bio */
1016 last_end = (u64)last->bi_sector << 9;
1017 last_end += last->bi_size;
1020 * we can't merge these if they are from different
1021 * devices or if they are not contiguous
1023 if (last_end == disk_start && stripe->dev->bdev &&
1024 test_bit(BIO_UPTODATE, &last->bi_flags) &&
1025 last->bi_bdev == stripe->dev->bdev) {
1026 ret = bio_add_page(last, page, PAGE_CACHE_SIZE, 0);
1027 if (ret == PAGE_CACHE_SIZE)
1032 /* put a new bio on the list */
1033 bio = bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1);
1038 bio->bi_bdev = stripe->dev->bdev;
1039 bio->bi_sector = disk_start >> 9;
1040 set_bit(BIO_UPTODATE, &bio->bi_flags);
1042 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
1043 bio_list_add(bio_list, bio);
1048 * while we're doing the read/modify/write cycle, we could
1049 * have errors in reading pages off the disk. This checks
1050 * for errors and if we're not able to read the page it'll
1051 * trigger parity reconstruction. The rmw will be finished
1052 * after we've reconstructed the failed stripes
1054 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1056 if (rbio->faila >= 0 || rbio->failb >= 0) {
1057 BUG_ON(rbio->faila == rbio->bbio->num_stripes - 1);
1058 __raid56_parity_recover(rbio);
1065 * these are just the pages from the rbio array, not from anything
1066 * the FS sent down to us
1068 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, int page)
1071 index = stripe * (rbio->stripe_len >> PAGE_CACHE_SHIFT);
1073 return rbio->stripe_pages[index];
1077 * helper function to walk our bio list and populate the bio_pages array with
1078 * the result. This seems expensive, but it is faster than constantly
1079 * searching through the bio list as we setup the IO in finish_rmw or stripe
1082 * This must be called before you trust the answers from page_in_rbio
1084 static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1088 unsigned long stripe_offset;
1089 unsigned long page_index;
1093 spin_lock_irq(&rbio->bio_list_lock);
1094 bio_list_for_each(bio, &rbio->bio_list) {
1095 start = (u64)bio->bi_sector << 9;
1096 stripe_offset = start - rbio->raid_map[0];
1097 page_index = stripe_offset >> PAGE_CACHE_SHIFT;
1099 for (i = 0; i < bio->bi_vcnt; i++) {
1100 p = bio->bi_io_vec[i].bv_page;
1101 rbio->bio_pages[page_index + i] = p;
1104 spin_unlock_irq(&rbio->bio_list_lock);
1108 * this is called from one of two situations. We either
1109 * have a full stripe from the higher layers, or we've read all
1110 * the missing bits off disk.
1112 * This will calculate the parity and then send down any
1115 static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1117 struct btrfs_bio *bbio = rbio->bbio;
1118 void *pointers[bbio->num_stripes];
1119 int stripe_len = rbio->stripe_len;
1120 int nr_data = rbio->nr_data;
1125 struct bio_list bio_list;
1127 int pages_per_stripe = stripe_len >> PAGE_CACHE_SHIFT;
1130 bio_list_init(&bio_list);
1132 if (bbio->num_stripes - rbio->nr_data == 1) {
1133 p_stripe = bbio->num_stripes - 1;
1134 } else if (bbio->num_stripes - rbio->nr_data == 2) {
1135 p_stripe = bbio->num_stripes - 2;
1136 q_stripe = bbio->num_stripes - 1;
1141 /* at this point we either have a full stripe,
1142 * or we've read the full stripe from the drive.
1143 * recalculate the parity and write the new results.
1145 * We're not allowed to add any new bios to the
1146 * bio list here, anyone else that wants to
1147 * change this stripe needs to do their own rmw.
1149 spin_lock_irq(&rbio->bio_list_lock);
1150 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1151 spin_unlock_irq(&rbio->bio_list_lock);
1153 atomic_set(&rbio->bbio->error, 0);
1156 * now that we've set rmw_locked, run through the
1157 * bio list one last time and map the page pointers
1159 * We don't cache full rbios because we're assuming
1160 * the higher layers are unlikely to use this area of
1161 * the disk again soon. If they do use it again,
1162 * hopefully they will send another full bio.
1164 index_rbio_pages(rbio);
1165 if (!rbio_is_full(rbio))
1166 cache_rbio_pages(rbio);
1168 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1170 for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
1172 /* first collect one page from each data stripe */
1173 for (stripe = 0; stripe < nr_data; stripe++) {
1174 p = page_in_rbio(rbio, stripe, pagenr, 0);
1175 pointers[stripe] = kmap(p);
1178 /* then add the parity stripe */
1179 p = rbio_pstripe_page(rbio, pagenr);
1181 pointers[stripe++] = kmap(p);
1183 if (q_stripe != -1) {
1186 * raid6, add the qstripe and call the
1187 * library function to fill in our p/q
1189 p = rbio_qstripe_page(rbio, pagenr);
1191 pointers[stripe++] = kmap(p);
1193 raid6_call.gen_syndrome(bbio->num_stripes, PAGE_SIZE,
1197 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
1198 run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE);
1202 for (stripe = 0; stripe < bbio->num_stripes; stripe++)
1203 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1207 * time to start writing. Make bios for everything from the
1208 * higher layers (the bio_list in our rbio) and our p/q. Ignore
1211 for (stripe = 0; stripe < bbio->num_stripes; stripe++) {
1212 for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
1214 if (stripe < rbio->nr_data) {
1215 page = page_in_rbio(rbio, stripe, pagenr, 1);
1219 page = rbio_stripe_page(rbio, stripe, pagenr);
1222 ret = rbio_add_io_page(rbio, &bio_list,
1223 page, stripe, pagenr, rbio->stripe_len);
1229 atomic_set(&bbio->stripes_pending, bio_list_size(&bio_list));
1230 BUG_ON(atomic_read(&bbio->stripes_pending) == 0);
1233 bio = bio_list_pop(&bio_list);
1237 bio->bi_private = rbio;
1238 bio->bi_end_io = raid_write_end_io;
1239 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
1240 submit_bio(WRITE, bio);
1245 rbio_orig_end_io(rbio, -EIO, 0);
1249 * helper to find the stripe number for a given bio. Used to figure out which
1250 * stripe has failed. This expects the bio to correspond to a physical disk,
1251 * so it looks up based on physical sector numbers.
1253 static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1256 u64 physical = bio->bi_sector;
1259 struct btrfs_bio_stripe *stripe;
1263 for (i = 0; i < rbio->bbio->num_stripes; i++) {
1264 stripe = &rbio->bbio->stripes[i];
1265 stripe_start = stripe->physical;
1266 if (physical >= stripe_start &&
1267 physical < stripe_start + rbio->stripe_len) {
1275 * helper to find the stripe number for a given
1276 * bio (before mapping). Used to figure out which stripe has
1277 * failed. This looks up based on logical block numbers.
1279 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1282 u64 logical = bio->bi_sector;
1288 for (i = 0; i < rbio->nr_data; i++) {
1289 stripe_start = rbio->raid_map[i];
1290 if (logical >= stripe_start &&
1291 logical < stripe_start + rbio->stripe_len) {
1299 * returns -EIO if we had too many failures
1301 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1303 unsigned long flags;
1306 spin_lock_irqsave(&rbio->bio_list_lock, flags);
1308 /* we already know this stripe is bad, move on */
1309 if (rbio->faila == failed || rbio->failb == failed)
1312 if (rbio->faila == -1) {
1313 /* first failure on this rbio */
1314 rbio->faila = failed;
1315 atomic_inc(&rbio->bbio->error);
1316 } else if (rbio->failb == -1) {
1317 /* second failure on this rbio */
1318 rbio->failb = failed;
1319 atomic_inc(&rbio->bbio->error);
1324 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1330 * helper to fail a stripe based on a physical disk
1333 static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1336 int failed = find_bio_stripe(rbio, bio);
1341 return fail_rbio_index(rbio, failed);
1345 * this sets each page in the bio uptodate. It should only be used on private
1346 * rbio pages, nothing that comes in from the higher layers
1348 static void set_bio_pages_uptodate(struct bio *bio)
1353 for (i = 0; i < bio->bi_vcnt; i++) {
1354 p = bio->bi_io_vec[i].bv_page;
1360 * end io for the read phase of the rmw cycle. All the bios here are physical
1361 * stripe bios we've read from the disk so we can recalculate the parity of the
1364 * This will usually kick off finish_rmw once all the bios are read in, but it
1365 * may trigger parity reconstruction if we had any errors along the way
1367 static void raid_rmw_end_io(struct bio *bio, int err)
1369 struct btrfs_raid_bio *rbio = bio->bi_private;
1372 fail_bio_stripe(rbio, bio);
1374 set_bio_pages_uptodate(bio);
1378 if (!atomic_dec_and_test(&rbio->bbio->stripes_pending))
1382 if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors)
1386 * this will normally call finish_rmw to start our write
1387 * but if there are any failed stripes we'll reconstruct
1390 validate_rbio_for_rmw(rbio);
1395 rbio_orig_end_io(rbio, -EIO, 0);
1398 static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
1400 rbio->work.flags = 0;
1401 rbio->work.func = rmw_work;
1403 btrfs_queue_worker(&rbio->fs_info->rmw_workers,
1407 static void async_read_rebuild(struct btrfs_raid_bio *rbio)
1409 rbio->work.flags = 0;
1410 rbio->work.func = read_rebuild_work;
1412 btrfs_queue_worker(&rbio->fs_info->rmw_workers,
1417 * the stripe must be locked by the caller. It will
1418 * unlock after all the writes are done
1420 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1422 int bios_to_read = 0;
1423 struct btrfs_bio *bbio = rbio->bbio;
1424 struct bio_list bio_list;
1426 int nr_pages = (rbio->stripe_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1431 bio_list_init(&bio_list);
1433 ret = alloc_rbio_pages(rbio);
1437 index_rbio_pages(rbio);
1439 atomic_set(&rbio->bbio->error, 0);
1441 * build a list of bios to read all the missing parts of this
1444 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1445 for (pagenr = 0; pagenr < nr_pages; pagenr++) {
1448 * we want to find all the pages missing from
1449 * the rbio and read them from the disk. If
1450 * page_in_rbio finds a page in the bio list
1451 * we don't need to read it off the stripe.
1453 page = page_in_rbio(rbio, stripe, pagenr, 1);
1457 page = rbio_stripe_page(rbio, stripe, pagenr);
1459 * the bio cache may have handed us an uptodate
1460 * page. If so, be happy and use it
1462 if (PageUptodate(page))
1465 ret = rbio_add_io_page(rbio, &bio_list, page,
1466 stripe, pagenr, rbio->stripe_len);
1472 bios_to_read = bio_list_size(&bio_list);
1473 if (!bios_to_read) {
1475 * this can happen if others have merged with
1476 * us, it means there is nothing left to read.
1477 * But if there are missing devices it may not be
1478 * safe to do the full stripe write yet.
1484 * the bbio may be freed once we submit the last bio. Make sure
1485 * not to touch it after that
1487 atomic_set(&bbio->stripes_pending, bios_to_read);
1489 bio = bio_list_pop(&bio_list);
1493 bio->bi_private = rbio;
1494 bio->bi_end_io = raid_rmw_end_io;
1496 btrfs_bio_wq_end_io(rbio->fs_info, bio,
1497 BTRFS_WQ_ENDIO_RAID56);
1499 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
1500 submit_bio(READ, bio);
1502 /* the actual write will happen once the reads are done */
1506 rbio_orig_end_io(rbio, -EIO, 0);
1510 validate_rbio_for_rmw(rbio);
1515 * if the upper layers pass in a full stripe, we thank them by only allocating
1516 * enough pages to hold the parity, and sending it all down quickly.
1518 static int full_stripe_write(struct btrfs_raid_bio *rbio)
1522 ret = alloc_rbio_parity_pages(rbio);
1526 ret = lock_stripe_add(rbio);
1533 * partial stripe writes get handed over to async helpers.
1534 * We're really hoping to merge a few more writes into this
1535 * rbio before calculating new parity
1537 static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1541 ret = lock_stripe_add(rbio);
1543 async_rmw_stripe(rbio);
1548 * sometimes while we were reading from the drive to
1549 * recalculate parity, enough new bios come into create
1550 * a full stripe. So we do a check here to see if we can
1551 * go directly to finish_rmw
1553 static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1555 /* head off into rmw land if we don't have a full stripe */
1556 if (!rbio_is_full(rbio))
1557 return partial_stripe_write(rbio);
1558 return full_stripe_write(rbio);
1562 * We use plugging call backs to collect full stripes.
1563 * Any time we get a partial stripe write while plugged
1564 * we collect it into a list. When the unplug comes down,
1565 * we sort the list by logical block number and merge
1566 * everything we can into the same rbios
1568 struct btrfs_plug_cb {
1569 struct blk_plug_cb cb;
1570 struct btrfs_fs_info *info;
1571 struct list_head rbio_list;
1572 struct btrfs_work work;
1576 * rbios on the plug list are sorted for easier merging.
1578 static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1580 struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1582 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1584 u64 a_sector = ra->bio_list.head->bi_sector;
1585 u64 b_sector = rb->bio_list.head->bi_sector;
1587 if (a_sector < b_sector)
1589 if (a_sector > b_sector)
1594 static void run_plug(struct btrfs_plug_cb *plug)
1596 struct btrfs_raid_bio *cur;
1597 struct btrfs_raid_bio *last = NULL;
1600 * sort our plug list then try to merge
1601 * everything we can in hopes of creating full
1604 list_sort(NULL, &plug->rbio_list, plug_cmp);
1605 while (!list_empty(&plug->rbio_list)) {
1606 cur = list_entry(plug->rbio_list.next,
1607 struct btrfs_raid_bio, plug_list);
1608 list_del_init(&cur->plug_list);
1610 if (rbio_is_full(cur)) {
1611 /* we have a full stripe, send it down */
1612 full_stripe_write(cur);
1616 if (rbio_can_merge(last, cur)) {
1617 merge_rbio(last, cur);
1618 __free_raid_bio(cur);
1622 __raid56_parity_write(last);
1627 __raid56_parity_write(last);
1633 * if the unplug comes from schedule, we have to push the
1634 * work off to a helper thread
1636 static void unplug_work(struct btrfs_work *work)
1638 struct btrfs_plug_cb *plug;
1639 plug = container_of(work, struct btrfs_plug_cb, work);
1643 static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1645 struct btrfs_plug_cb *plug;
1646 plug = container_of(cb, struct btrfs_plug_cb, cb);
1648 if (from_schedule) {
1649 plug->work.flags = 0;
1650 plug->work.func = unplug_work;
1651 btrfs_queue_worker(&plug->info->rmw_workers,
1659 * our main entry point for writes from the rest of the FS.
1661 int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
1662 struct btrfs_bio *bbio, u64 *raid_map,
1665 struct btrfs_raid_bio *rbio;
1666 struct btrfs_plug_cb *plug = NULL;
1667 struct blk_plug_cb *cb;
1669 rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
1673 return PTR_ERR(rbio);
1675 bio_list_add(&rbio->bio_list, bio);
1676 rbio->bio_list_bytes = bio->bi_size;
1679 * don't plug on full rbios, just get them out the door
1680 * as quickly as we can
1682 if (rbio_is_full(rbio))
1683 return full_stripe_write(rbio);
1685 cb = blk_check_plugged(btrfs_raid_unplug, root->fs_info,
1688 plug = container_of(cb, struct btrfs_plug_cb, cb);
1690 plug->info = root->fs_info;
1691 INIT_LIST_HEAD(&plug->rbio_list);
1693 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1695 return __raid56_parity_write(rbio);
1701 * all parity reconstruction happens here. We've read in everything
1702 * we can find from the drives and this does the heavy lifting of
1703 * sorting the good from the bad.
1705 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1709 int faila = -1, failb = -1;
1710 int nr_pages = (rbio->stripe_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1715 pointers = kzalloc(rbio->bbio->num_stripes * sizeof(void *),
1722 faila = rbio->faila;
1723 failb = rbio->failb;
1725 if (rbio->read_rebuild) {
1726 spin_lock_irq(&rbio->bio_list_lock);
1727 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1728 spin_unlock_irq(&rbio->bio_list_lock);
1731 index_rbio_pages(rbio);
1733 for (pagenr = 0; pagenr < nr_pages; pagenr++) {
1734 /* setup our array of pointers with pages
1737 for (stripe = 0; stripe < rbio->bbio->num_stripes; stripe++) {
1739 * if we're rebuilding a read, we have to use
1740 * pages from the bio list
1742 if (rbio->read_rebuild &&
1743 (stripe == faila || stripe == failb)) {
1744 page = page_in_rbio(rbio, stripe, pagenr, 0);
1746 page = rbio_stripe_page(rbio, stripe, pagenr);
1748 pointers[stripe] = kmap(page);
1751 /* all raid6 handling here */
1752 if (rbio->raid_map[rbio->bbio->num_stripes - 1] ==
1756 * single failure, rebuild from parity raid5
1760 if (faila == rbio->nr_data) {
1762 * Just the P stripe has failed, without
1763 * a bad data or Q stripe.
1764 * TODO, we should redo the xor here.
1770 * a single failure in raid6 is rebuilt
1771 * in the pstripe code below
1776 /* make sure our ps and qs are in order */
1777 if (faila > failb) {
1783 /* if the q stripe is failed, do a pstripe reconstruction
1785 * If both the q stripe and the P stripe are failed, we're
1786 * here due to a crc mismatch and we can't give them the
1789 if (rbio->raid_map[failb] == RAID6_Q_STRIPE) {
1790 if (rbio->raid_map[faila] == RAID5_P_STRIPE) {
1795 * otherwise we have one bad data stripe and
1796 * a good P stripe. raid5!
1801 if (rbio->raid_map[failb] == RAID5_P_STRIPE) {
1802 raid6_datap_recov(rbio->bbio->num_stripes,
1803 PAGE_SIZE, faila, pointers);
1805 raid6_2data_recov(rbio->bbio->num_stripes,
1806 PAGE_SIZE, faila, failb,
1812 /* rebuild from P stripe here (raid5 or raid6) */
1813 BUG_ON(failb != -1);
1815 /* Copy parity block into failed block to start with */
1816 memcpy(pointers[faila],
1817 pointers[rbio->nr_data],
1820 /* rearrange the pointer array */
1821 p = pointers[faila];
1822 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1823 pointers[stripe] = pointers[stripe + 1];
1824 pointers[rbio->nr_data - 1] = p;
1826 /* xor in the rest */
1827 run_xor(pointers, rbio->nr_data - 1, PAGE_CACHE_SIZE);
1829 /* if we're doing this rebuild as part of an rmw, go through
1830 * and set all of our private rbio pages in the
1831 * failed stripes as uptodate. This way finish_rmw will
1832 * know they can be trusted. If this was a read reconstruction,
1833 * other endio functions will fiddle the uptodate bits
1835 if (!rbio->read_rebuild) {
1836 for (i = 0; i < nr_pages; i++) {
1838 page = rbio_stripe_page(rbio, faila, i);
1839 SetPageUptodate(page);
1842 page = rbio_stripe_page(rbio, failb, i);
1843 SetPageUptodate(page);
1847 for (stripe = 0; stripe < rbio->bbio->num_stripes; stripe++) {
1849 * if we're rebuilding a read, we have to use
1850 * pages from the bio list
1852 if (rbio->read_rebuild &&
1853 (stripe == faila || stripe == failb)) {
1854 page = page_in_rbio(rbio, stripe, pagenr, 0);
1856 page = rbio_stripe_page(rbio, stripe, pagenr);
1868 if (rbio->read_rebuild) {
1870 cache_rbio_pages(rbio);
1872 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1874 rbio_orig_end_io(rbio, err, err == 0);
1875 } else if (err == 0) {
1880 rbio_orig_end_io(rbio, err, 0);
1885 * This is called only for stripes we've read from disk to
1886 * reconstruct the parity.
1888 static void raid_recover_end_io(struct bio *bio, int err)
1890 struct btrfs_raid_bio *rbio = bio->bi_private;
1893 * we only read stripe pages off the disk, set them
1894 * up to date if there were no errors
1897 fail_bio_stripe(rbio, bio);
1899 set_bio_pages_uptodate(bio);
1902 if (!atomic_dec_and_test(&rbio->bbio->stripes_pending))
1905 if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors)
1906 rbio_orig_end_io(rbio, -EIO, 0);
1908 __raid_recover_end_io(rbio);
1912 * reads everything we need off the disk to reconstruct
1913 * the parity. endio handlers trigger final reconstruction
1914 * when the IO is done.
1916 * This is used both for reads from the higher layers and for
1917 * parity construction required to finish a rmw cycle.
1919 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
1921 int bios_to_read = 0;
1922 struct btrfs_bio *bbio = rbio->bbio;
1923 struct bio_list bio_list;
1925 int nr_pages = (rbio->stripe_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1930 bio_list_init(&bio_list);
1932 ret = alloc_rbio_pages(rbio);
1936 atomic_set(&rbio->bbio->error, 0);
1939 * read everything that hasn't failed. Thanks to the
1940 * stripe cache, it is possible that some or all of these
1941 * pages are going to be uptodate.
1943 for (stripe = 0; stripe < bbio->num_stripes; stripe++) {
1944 if (rbio->faila == stripe ||
1945 rbio->failb == stripe)
1948 for (pagenr = 0; pagenr < nr_pages; pagenr++) {
1952 * the rmw code may have already read this
1955 p = rbio_stripe_page(rbio, stripe, pagenr);
1956 if (PageUptodate(p))
1959 ret = rbio_add_io_page(rbio, &bio_list,
1960 rbio_stripe_page(rbio, stripe, pagenr),
1961 stripe, pagenr, rbio->stripe_len);
1967 bios_to_read = bio_list_size(&bio_list);
1968 if (!bios_to_read) {
1970 * we might have no bios to read just because the pages
1971 * were up to date, or we might have no bios to read because
1972 * the devices were gone.
1974 if (atomic_read(&rbio->bbio->error) <= rbio->bbio->max_errors) {
1975 __raid_recover_end_io(rbio);
1983 * the bbio may be freed once we submit the last bio. Make sure
1984 * not to touch it after that
1986 atomic_set(&bbio->stripes_pending, bios_to_read);
1988 bio = bio_list_pop(&bio_list);
1992 bio->bi_private = rbio;
1993 bio->bi_end_io = raid_recover_end_io;
1995 btrfs_bio_wq_end_io(rbio->fs_info, bio,
1996 BTRFS_WQ_ENDIO_RAID56);
1998 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
1999 submit_bio(READ, bio);
2005 if (rbio->read_rebuild)
2006 rbio_orig_end_io(rbio, -EIO, 0);
2011 * the main entry point for reads from the higher layers. This
2012 * is really only called when the normal read path had a failure,
2013 * so we assume the bio they send down corresponds to a failed part
2016 int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
2017 struct btrfs_bio *bbio, u64 *raid_map,
2018 u64 stripe_len, int mirror_num)
2020 struct btrfs_raid_bio *rbio;
2023 rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
2025 return PTR_ERR(rbio);
2028 rbio->read_rebuild = 1;
2029 bio_list_add(&rbio->bio_list, bio);
2030 rbio->bio_list_bytes = bio->bi_size;
2032 rbio->faila = find_logical_bio_stripe(rbio, bio);
2033 if (rbio->faila == -1) {
2040 * reconstruct from the q stripe if they are
2041 * asking for mirror 3
2043 if (mirror_num == 3)
2044 rbio->failb = bbio->num_stripes - 2;
2046 ret = lock_stripe_add(rbio);
2049 * __raid56_parity_recover will end the bio with
2050 * any errors it hits. We don't want to return
2051 * its error value up the stack because our caller
2052 * will end up calling bio_endio with any nonzero
2056 __raid56_parity_recover(rbio);
2058 * our rbio has been added to the list of
2059 * rbios that will be handled after the
2060 * currently lock owner is done
2066 static void rmw_work(struct btrfs_work *work)
2068 struct btrfs_raid_bio *rbio;
2070 rbio = container_of(work, struct btrfs_raid_bio, work);
2071 raid56_rmw_stripe(rbio);
2074 static void read_rebuild_work(struct btrfs_work *work)
2076 struct btrfs_raid_bio *rbio;
2078 rbio = container_of(work, struct btrfs_raid_bio, work);
2079 __raid56_parity_recover(rbio);