Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 | /* * linux/mm/page_isolation.c */ #include <linux/mm.h> #include <linux/page-isolation.h> #include <linux/pageblock-flags.h> #include <linux/memory.h> #include "internal.h" /* called while holding zone->lock */ static void set_pageblock_isolate(struct page *page) { if (get_pageblock_migratetype(page) == MIGRATE_ISOLATE) return; set_pageblock_migratetype(page, MIGRATE_ISOLATE); page_zone(page)->nr_pageblock_isolate++; } /* called while holding zone->lock */ static void restore_pageblock_isolate(struct page *page, int migratetype) { struct zone *zone = page_zone(page); if (WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE)) return; BUG_ON(zone->nr_pageblock_isolate <= 0); set_pageblock_migratetype(page, migratetype); zone->nr_pageblock_isolate--; } int set_migratetype_isolate(struct page *page) { struct zone *zone; unsigned long flags, pfn; struct memory_isolate_notify arg; int notifier_ret; int ret = -EBUSY; zone = page_zone(page); spin_lock_irqsave(&zone->lock, flags); pfn = page_to_pfn(page); arg.start_pfn = pfn; arg.nr_pages = pageblock_nr_pages; arg.pages_found = 0; /* * It may be possible to isolate a pageblock even if the * migratetype is not MIGRATE_MOVABLE. The memory isolation * notifier chain is used by balloon drivers to return the * number of pages in a range that are held by the balloon * driver to shrink memory. If all the pages are accounted for * by balloons, are free, or on the LRU, isolation can continue. * Later, for example, when memory hotplug notifier runs, these * pages reported as "can be isolated" should be isolated(freed) * by the balloon driver through the memory notifier chain. */ notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg); notifier_ret = notifier_to_errno(notifier_ret); if (notifier_ret) goto out; /* * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. * We just check MOVABLE pages. */ if (!has_unmovable_pages(zone, page, arg.pages_found)) ret = 0; /* * immobile means "not-on-lru" paes. If immobile is larger than * removable-by-driver pages reported by notifier, we'll fail. */ out: if (!ret) { set_pageblock_isolate(page); move_freepages_block(zone, page, MIGRATE_ISOLATE); } spin_unlock_irqrestore(&zone->lock, flags); if (!ret) drain_all_pages(); return ret; } void unset_migratetype_isolate(struct page *page, unsigned migratetype) { struct zone *zone; unsigned long flags; zone = page_zone(page); spin_lock_irqsave(&zone->lock, flags); if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) goto out; move_freepages_block(zone, page, migratetype); restore_pageblock_isolate(page, migratetype); out: spin_unlock_irqrestore(&zone->lock, flags); } static inline struct page * __first_valid_page(unsigned long pfn, unsigned long nr_pages) { int i; for (i = 0; i < nr_pages; i++) if (pfn_valid_within(pfn + i)) break; if (unlikely(i == nr_pages)) return NULL; return pfn_to_page(pfn + i); } /* * start_isolate_page_range() -- make page-allocation-type of range of pages * to be MIGRATE_ISOLATE. * @start_pfn: The lower PFN of the range to be isolated. * @end_pfn: The upper PFN of the range to be isolated. * @migratetype: migrate type to set in error recovery. * * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in * the range will never be allocated. Any free pages and pages freed in the * future will not be allocated again. * * start_pfn/end_pfn must be aligned to pageblock_order. * Returns 0 on success and -EBUSY if any part of range cannot be isolated. */ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, unsigned migratetype) { unsigned long pfn; unsigned long undo_pfn; struct page *page; BUG_ON((start_pfn) & (pageblock_nr_pages - 1)); BUG_ON((end_pfn) & (pageblock_nr_pages - 1)); for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { page = __first_valid_page(pfn, pageblock_nr_pages); if (page && set_migratetype_isolate(page)) { undo_pfn = pfn; goto undo; } } return 0; undo: for (pfn = start_pfn; pfn < undo_pfn; pfn += pageblock_nr_pages) unset_migratetype_isolate(pfn_to_page(pfn), migratetype); return -EBUSY; } /* * Make isolated pages available again. */ int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, unsigned migratetype) { unsigned long pfn; struct page *page; BUG_ON((start_pfn) & (pageblock_nr_pages - 1)); BUG_ON((end_pfn) & (pageblock_nr_pages - 1)); for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { page = __first_valid_page(pfn, pageblock_nr_pages); if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE) continue; unset_migratetype_isolate(page, migratetype); } return 0; } /* * Test all pages in the range is free(means isolated) or not. * all pages in [start_pfn...end_pfn) must be in the same zone. * zone->lock must be held before call this. * * Returns 1 if all pages in the range are isolated. */ static int __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn) { struct page *page; while (pfn < end_pfn) { if (!pfn_valid_within(pfn)) { pfn++; continue; } page = pfn_to_page(pfn); if (PageBuddy(page)) pfn += 1 << page_order(page); else if (page_count(page) == 0 && page_private(page) == MIGRATE_ISOLATE) pfn += 1; else break; } if (pfn < end_pfn) return 0; return 1; } int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn, flags; struct page *page; struct zone *zone; int ret; /* * Note: pageblock_nr_page != MAX_ORDER. Then, chunks of free page * is not aligned to pageblock_nr_pages. * Then we just check pagetype fist. */ for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { page = __first_valid_page(pfn, pageblock_nr_pages); if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE) break; } page = __first_valid_page(start_pfn, end_pfn - start_pfn); if ((pfn < end_pfn) || !page) return -EBUSY; /* Check all pages are free or Marked as ISOLATED */ zone = page_zone(page); spin_lock_irqsave(&zone->lock, flags); ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn); spin_unlock_irqrestore(&zone->lock, flags); return ret ? 0 : -EBUSY; } |