Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 | // SPDX-License-Identifier: GPL-2.0 /* Maximum size of each resync request */ #define RESYNC_BLOCK_SIZE (64*1024) #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) /* * Number of guaranteed raid bios in case of extreme VM load: */ #define NR_RAID_BIOS 256 /* when we get a read error on a read-only array, we redirect to another * device without failing the first device, or trying to over-write to * correct the read error. To keep track of bad blocks on a per-bio * level, we store IO_BLOCKED in the appropriate 'bios' pointer */ #define IO_BLOCKED ((struct bio *)1) /* When we successfully write to a known bad-block, we need to remove the * bad-block marking which must be done from process context. So we record * the success by setting devs[n].bio to IO_MADE_GOOD */ #define IO_MADE_GOOD ((struct bio *)2) #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) /* for managing resync I/O pages */ struct resync_pages { void *raid_bio; struct page *pages[RESYNC_PAGES]; }; struct raid1_plug_cb { struct blk_plug_cb cb; struct bio_list pending; }; static void rbio_pool_free(void *rbio, void *data) { kfree(rbio); } static inline int resync_alloc_pages(struct resync_pages *rp, gfp_t gfp_flags) { int i; for (i = 0; i < RESYNC_PAGES; i++) { rp->pages[i] = alloc_page(gfp_flags); if (!rp->pages[i]) goto out_free; } return 0; out_free: while (--i >= 0) put_page(rp->pages[i]); return -ENOMEM; } static inline void resync_free_pages(struct resync_pages *rp) { int i; for (i = 0; i < RESYNC_PAGES; i++) put_page(rp->pages[i]); } static inline void resync_get_all_pages(struct resync_pages *rp) { int i; for (i = 0; i < RESYNC_PAGES; i++) get_page(rp->pages[i]); } static inline struct page *resync_fetch_page(struct resync_pages *rp, unsigned idx) { if (WARN_ON_ONCE(idx >= RESYNC_PAGES)) return NULL; return rp->pages[idx]; } /* * 'strct resync_pages' stores actual pages used for doing the resync * IO, and it is per-bio, so make .bi_private points to it. */ static inline struct resync_pages *get_resync_pages(struct bio *bio) { return bio->bi_private; } /* generally called after bio_reset() for reseting bvec */ static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp, int size) { int idx = 0; /* initialize bvec table again */ do { struct page *page = resync_fetch_page(rp, idx); int len = min_t(int, size, PAGE_SIZE); /* * won't fail because the vec table is big * enough to hold all these pages */ bio_add_page(bio, page, len, 0); size -= len; } while (idx++ < RESYNC_PAGES && size > 0); } |