Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 | // SPDX-License-Identifier: GPL-2.0 /* * linux/mm/page_isolation.c */ #include <linux/mm.h> #include <linux/page-isolation.h> #include <linux/pageblock-flags.h> #include <linux/memory.h> #include <linux/hugetlb.h> #include <linux/page_owner.h> #include <linux/migrate.h> #include "internal.h" #define CREATE_TRACE_POINTS #include <trace/events/page_isolation.h> static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags) { struct zone *zone = page_zone(page); struct page *unmovable; unsigned long flags; spin_lock_irqsave(&zone->lock, flags); /* * We assume the caller intended to SET migrate type to isolate. * If it is already set, then someone else must have raced and * set it before us. */ if (is_migrate_isolate_page(page)) { spin_unlock_irqrestore(&zone->lock, flags); return -EBUSY; } /* * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. * We just check MOVABLE pages. */ unmovable = has_unmovable_pages(zone, page, migratetype, isol_flags); if (!unmovable) { unsigned long nr_pages; int mt = get_pageblock_migratetype(page); set_pageblock_migratetype(page, MIGRATE_ISOLATE); zone->nr_isolate_pageblock++; nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, NULL); __mod_zone_freepage_state(zone, -nr_pages, mt); spin_unlock_irqrestore(&zone->lock, flags); return 0; } spin_unlock_irqrestore(&zone->lock, flags); if (isol_flags & REPORT_FAILURE) { /* * printk() with zone->lock held will likely trigger a * lockdep splat, so defer it here. */ dump_page(unmovable, "unmovable page"); } return -EBUSY; } static void unset_migratetype_isolate(struct page *page, unsigned migratetype) { struct zone *zone; unsigned long flags, nr_pages; bool isolated_page = false; unsigned int order; unsigned long pfn, buddy_pfn; struct page *buddy; zone = page_zone(page); spin_lock_irqsave(&zone->lock, flags); if (!is_migrate_isolate_page(page)) goto out; /* * Because freepage with more than pageblock_order on isolated * pageblock is restricted to merge due to freepage counting problem, * it is possible that there is free buddy page. * move_freepages_block() doesn't care of merge so we need other * approach in order to merge them. Isolation and free will make * these pages to be merged. */ if (PageBuddy(page)) { order = buddy_order(page); if (order >= pageblock_order && order < MAX_ORDER - 1) { pfn = page_to_pfn(page); buddy_pfn = __find_buddy_pfn(pfn, order); buddy = page + (buddy_pfn - pfn); if (pfn_valid_within(buddy_pfn) && !is_migrate_isolate_page(buddy)) { __isolate_free_page(page, order); isolated_page = true; } } } /* * If we isolate freepage with more than pageblock_order, there * should be no freepage in the range, so we could avoid costly * pageblock scanning for freepage moving. * * We didn't actually touch any of the isolated pages, so place them * to the tail of the freelist. This is an optimization for memory * onlining - just onlined memory won't immediately be considered for * allocation. */ if (!isolated_page) { nr_pages = move_freepages_block(zone, page, migratetype, NULL); __mod_zone_freepage_state(zone, nr_pages, migratetype); } set_pageblock_migratetype(page, migratetype); if (isolated_page) __putback_isolated_page(page, order, migratetype); zone->nr_isolate_pageblock--; out: spin_unlock_irqrestore(&zone->lock, flags); } static inline struct page * __first_valid_page(unsigned long pfn, unsigned long nr_pages) { int i; for (i = 0; i < nr_pages; i++) { struct page *page; page = pfn_to_online_page(pfn + i); if (!page) continue; return page; } return NULL; } /** * start_isolate_page_range() - make page-allocation-type of range of pages to * be MIGRATE_ISOLATE. * @start_pfn: The lower PFN of the range to be isolated. * @end_pfn: The upper PFN of the range to be isolated. * start_pfn/end_pfn must be aligned to pageblock_order. * @migratetype: Migrate type to set in error recovery. * @flags: The following flags are allowed (they can be combined in * a bit mask) * MEMORY_OFFLINE - isolate to offline (!allocate) memory * e.g., skip over PageHWPoison() pages * and PageOffline() pages. * REPORT_FAILURE - report details about the failure to * isolate the range * * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in * the range will never be allocated. Any free pages and pages freed in the * future will not be allocated again. If specified range includes migrate types * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all * pages in the range finally, the caller have to free all pages in the range. * test_page_isolated() can be used for test it. * * There is no high level synchronization mechanism that prevents two threads * from trying to isolate overlapping ranges. If this happens, one thread * will notice pageblocks in the overlapping range already set to isolate. * This happens in set_migratetype_isolate, and set_migratetype_isolate * returns an error. We then clean up by restoring the migration type on * pageblocks we may have modified and return -EBUSY to caller. This * prevents two threads from simultaneously working on overlapping ranges. * * Please note that there is no strong synchronization with the page allocator * either. Pages might be freed while their page blocks are marked ISOLATED. * A call to drain_all_pages() after isolation can flush most of them. However * in some cases pages might still end up on pcp lists and that would allow * for their allocation even when they are in fact isolated already. Depending * on how strong of a guarantee the caller needs, zone_pcp_disable/enable() * might be used to flush and disable pcplist before isolation and enable after * unisolation. * * Return: 0 on success and -EBUSY if any part of range cannot be isolated. */ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, unsigned migratetype, int flags) { unsigned long pfn; unsigned long undo_pfn; struct page *page; BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { page = __first_valid_page(pfn, pageblock_nr_pages); if (page) { if (set_migratetype_isolate(page, migratetype, flags)) { undo_pfn = pfn; goto undo; } } } return 0; undo: for (pfn = start_pfn; pfn < undo_pfn; pfn += pageblock_nr_pages) { struct page *page = pfn_to_online_page(pfn); if (!page) continue; unset_migratetype_isolate(page, migratetype); } return -EBUSY; } /* * Make isolated pages available again. */ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, unsigned migratetype) { unsigned long pfn; struct page *page; BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { page = __first_valid_page(pfn, pageblock_nr_pages); if (!page || !is_migrate_isolate_page(page)) continue; unset_migratetype_isolate(page, migratetype); } } /* * Test all pages in the range is free(means isolated) or not. * all pages in [start_pfn...end_pfn) must be in the same zone. * zone->lock must be held before call this. * * Returns the last tested pfn. */ static unsigned long __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, int flags) { struct page *page; while (pfn < end_pfn) { if (!pfn_valid_within(pfn)) { pfn++; continue; } page = pfn_to_page(pfn); if (PageBuddy(page)) /* * If the page is on a free list, it has to be on * the correct MIGRATE_ISOLATE freelist. There is no * simple way to verify that as VM_BUG_ON(), though. */ pfn += 1 << buddy_order(page); else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page)) /* A HWPoisoned page cannot be also PageBuddy */ pfn++; else if ((flags & MEMORY_OFFLINE) && PageOffline(page) && !page_count(page)) /* * The responsible driver agreed to skip PageOffline() * pages when offlining memory by dropping its * reference in MEM_GOING_OFFLINE. */ pfn++; else break; } return pfn; } /* Caller should ensure that requested range is in a single zone */ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, int isol_flags) { unsigned long pfn, flags; struct page *page; struct zone *zone; /* * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages * are not aligned to pageblock_nr_pages. * Then we just check migratetype first. */ for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { page = __first_valid_page(pfn, pageblock_nr_pages); if (page && !is_migrate_isolate_page(page)) break; } page = __first_valid_page(start_pfn, end_pfn - start_pfn); if ((pfn < end_pfn) || !page) return -EBUSY; /* Check all pages are free or marked as ISOLATED */ zone = page_zone(page); spin_lock_irqsave(&zone->lock, flags); pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, isol_flags); spin_unlock_irqrestore(&zone->lock, flags); trace_test_pages_isolated(start_pfn, end_pfn, pfn); return pfn < end_pfn ? -EBUSY : 0; } |