Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 | /* * mm/readahead.c - address_space-level file readahead. * * Copyright (C) 2002, Linus Torvalds * * 09Apr2002 akpm@zip.com.au * Initial version. */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/blkdev.h> #include <linux/backing-dev.h> #include <linux/task_io_accounting_ops.h> #include <linux/pagevec.h> void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) { } EXPORT_SYMBOL(default_unplug_io_fn); struct backing_dev_info default_backing_dev_info = { .ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE, .state = 0, .capabilities = BDI_CAP_MAP_COPY, .unplug_io_fn = default_unplug_io_fn, }; EXPORT_SYMBOL_GPL(default_backing_dev_info); /* * Initialise a struct file's readahead state. Assumes that the caller has * memset *ra to zero. */ void file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) { ra->ra_pages = mapping->backing_dev_info->ra_pages; ra->prev_index = -1; } EXPORT_SYMBOL_GPL(file_ra_state_init); /* * Return max readahead size for this inode in number-of-pages. */ static inline unsigned long get_max_readahead(struct file_ra_state *ra) { return ra->ra_pages; } static inline unsigned long get_min_readahead(struct file_ra_state *ra) { return (VM_MIN_READAHEAD * 1024) / PAGE_CACHE_SIZE; } static inline void reset_ahead_window(struct file_ra_state *ra) { /* * ... but preserve ahead_start + ahead_size value, * see 'recheck:' label in page_cache_readahead(). * Note: We never use ->ahead_size as rvalue without * checking ->ahead_start != 0 first. */ ra->ahead_size += ra->ahead_start; ra->ahead_start = 0; } static inline void ra_off(struct file_ra_state *ra) { ra->start = 0; ra->flags = 0; ra->size = 0; reset_ahead_window(ra); return; } /* * Set the initial window size, round to next power of 2 and square * for small size, x 4 for medium, and x 2 for large * for 128k (32 page) max ra * 1-8 page = 32k initial, > 8 page = 128k initial */ static unsigned long get_init_ra_size(unsigned long size, unsigned long max) { unsigned long newsize = roundup_pow_of_two(size); if (newsize <= max / 32) newsize = newsize * 4; else if (newsize <= max / 4) newsize = newsize * 2; else newsize = max; return newsize; } /* * Set the new window size, this is called only when I/O is to be submitted, * not for each call to readahead. If a cache miss occured, reduce next I/O * size, else increase depending on how close to max we are. */ static inline unsigned long get_next_ra_size(struct file_ra_state *ra) { unsigned long max = get_max_readahead(ra); unsigned long min = get_min_readahead(ra); unsigned long cur = ra->size; unsigned long newsize; if (ra->flags & RA_FLAG_MISS) { ra->flags &= ~RA_FLAG_MISS; newsize = max((cur - 2), min); } else if (cur < max / 16) { newsize = 4 * cur; } else { newsize = 2 * cur; } return min(newsize, max); } #define list_to_page(head) (list_entry((head)->prev, struct page, lru)) /** * read_cache_pages - populate an address space with some pages & start reads against them * @mapping: the address_space * @pages: The address of a list_head which contains the target pages. These * pages have their ->index populated and are otherwise uninitialised. * @filler: callback routine for filling a single page. * @data: private data for the callback routine. * * Hides the details of the LRU cache etc from the filesystems. */ int read_cache_pages(struct address_space *mapping, struct list_head *pages, int (*filler)(void *, struct page *), void *data) { struct page *page; struct pagevec lru_pvec; int ret = 0; pagevec_init(&lru_pvec, 0); while (!list_empty(pages)) { page = list_to_page(pages); list_del(&page->lru); if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) { page_cache_release(page); continue; } ret = filler(data, page); if (!pagevec_add(&lru_pvec, page)) __pagevec_lru_add(&lru_pvec); if (ret) { put_pages_list(pages); break; } task_io_account_read(PAGE_CACHE_SIZE); } pagevec_lru_add(&lru_pvec); return ret; } EXPORT_SYMBOL(read_cache_pages); static int read_pages(struct address_space *mapping, struct file *filp, struct list_head *pages, unsigned nr_pages) { unsigned page_idx; struct pagevec lru_pvec; int ret; if (mapping->a_ops->readpages) { ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); /* Clean up the remaining pages */ put_pages_list(pages); goto out; } pagevec_init(&lru_pvec, 0); for (page_idx = 0; page_idx < nr_pages; page_idx++) { struct page *page = list_to_page(pages); list_del(&page->lru); if (!add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) { mapping->a_ops->readpage(filp, page); if (!pagevec_add(&lru_pvec, page)) __pagevec_lru_add(&lru_pvec); } else page_cache_release(page); } pagevec_lru_add(&lru_pvec); ret = 0; out: return ret; } /* * Readahead design. * * The fields in struct file_ra_state represent the most-recently-executed * readahead attempt: * * start: Page index at which we started the readahead * size: Number of pages in that read * Together, these form the "current window". * Together, start and size represent the `readahead window'. * prev_index: The page which the readahead algorithm most-recently inspected. * It is mainly used to detect sequential file reading. * If page_cache_readahead sees that it is again being called for * a page which it just looked at, it can return immediately without * making any state changes. * offset: Offset in the prev_index where the last read ended - used for * detection of sequential file reading. * ahead_start, * ahead_size: Together, these form the "ahead window". * ra_pages: The externally controlled max readahead for this fd. * * When readahead is in the off state (size == 0), readahead is disabled. * In this state, prev_index is used to detect the resumption of sequential I/O. * * The readahead code manages two windows - the "current" and the "ahead" * windows. The intent is that while the application is walking the pages * in the current window, I/O is underway on the ahead window. When the * current window is fully traversed, it is replaced by the ahead window * and the ahead window is invalidated. When this copying happens, the * new current window's pages are probably still locked. So * we submit a new batch of I/O immediately, creating a new ahead window. * * So: * * ----|----------------|----------------|----- * ^start ^start+size * ^ahead_start ^ahead_start+ahead_size * * ^ When this page is read, we submit I/O for the * ahead window. * * A `readahead hit' occurs when a read request is made against a page which is * the next sequential page. Ahead window calculations are done only when it * is time to submit a new IO. The code ramps up the size agressively at first, * but slow down as it approaches max_readhead. * * Any seek/ramdom IO will result in readahead being turned off. It will resume * at the first sequential access. * * There is a special-case: if the first page which the application tries to * read happens to be the first page of the file, it is assumed that a linear * read is about to happen and the window is immediately set to the initial size * based on I/O request size and the max_readahead. * * This function is to be called for every read request, rather than when * it is time to perform readahead. It is called only once for the entire I/O * regardless of size unless readahead is unable to start enough I/O to satisfy * the request (I/O request > max_readahead). */ /* * do_page_cache_readahead actually reads a chunk of disk. It allocates all * the pages first, then submits them all for I/O. This avoids the very bad * behaviour which would occur if page allocations are causing VM writeback. * We really don't want to intermingle reads and writes like that. * * Returns the number of pages requested, or the maximum amount of I/O allowed. * * do_page_cache_readahead() returns -1 if it encountered request queue * congestion. */ static int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, pgoff_t offset, unsigned long nr_to_read) { struct inode *inode = mapping->host; struct page *page; unsigned long end_index; /* The last page we want to read */ LIST_HEAD(page_pool); int page_idx; int ret = 0; loff_t isize = i_size_read(inode); if (isize == 0) goto out; end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); /* * Preallocate as many pages as we will need. */ read_lock_irq(&mapping->tree_lock); for (page_idx = 0; page_idx < nr_to_read; page_idx++) { pgoff_t page_offset = offset + page_idx; if (page_offset > end_index) break; page = radix_tree_lookup(&mapping->page_tree, page_offset); if (page) continue; read_unlock_irq(&mapping->tree_lock); page = page_cache_alloc_cold(mapping); read_lock_irq(&mapping->tree_lock); if (!page) break; page->index = page_offset; list_add(&page->lru, &page_pool); ret++; } read_unlock_irq(&mapping->tree_lock); /* * Now start the IO. We ignore I/O errors - if the page is not * uptodate then the caller will launch readpage again, and * will then handle the error. */ if (ret) read_pages(mapping, filp, &page_pool, ret); BUG_ON(!list_empty(&page_pool)); out: return ret; } /* * Chunk the readahead into 2 megabyte units, so that we don't pin too much * memory at once. */ int force_page_cache_readahead(struct address_space *mapping, struct file *filp, pgoff_t offset, unsigned long nr_to_read) { int ret = 0; if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages)) return -EINVAL; while (nr_to_read) { int err; unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE; if (this_chunk > nr_to_read) this_chunk = nr_to_read; err = __do_page_cache_readahead(mapping, filp, offset, this_chunk); if (err < 0) { ret = err; break; } ret += err; offset += this_chunk; nr_to_read -= this_chunk; } return ret; } /* * Check how effective readahead is being. If the amount of started IO is * less than expected then the file is partly or fully in pagecache and * readahead isn't helping. * */ static inline int check_ra_success(struct file_ra_state *ra, unsigned long nr_to_read, unsigned long actual) { if (actual == 0) { ra->cache_hit += nr_to_read; if (ra->cache_hit >= VM_MAX_CACHE_HIT) { ra_off(ra); ra->flags |= RA_FLAG_INCACHE; return 0; } } else { ra->cache_hit=0; } return 1; } /* * This version skips the IO if the queue is read-congested, and will tell the * block layer to abandon the readahead if request allocation would block. * * force_page_cache_readahead() will ignore queue congestion and will block on * request queues. */ int do_page_cache_readahead(struct address_space *mapping, struct file *filp, pgoff_t offset, unsigned long nr_to_read) { if (bdi_read_congested(mapping->backing_dev_info)) return -1; return __do_page_cache_readahead(mapping, filp, offset, nr_to_read); } /* * Read 'nr_to_read' pages starting at page 'offset'. If the flag 'block' * is set wait till the read completes. Otherwise attempt to read without * blocking. * Returns 1 meaning 'success' if read is successful without switching off * readahead mode. Otherwise return failure. */ static int blockable_page_cache_readahead(struct address_space *mapping, struct file *filp, pgoff_t offset, unsigned long nr_to_read, struct file_ra_state *ra, int block) { int actual; if (!block && bdi_read_congested(mapping->backing_dev_info)) return 0; actual = __do_page_cache_readahead(mapping, filp, offset, nr_to_read); return check_ra_success(ra, nr_to_read, actual); } static int make_ahead_window(struct address_space *mapping, struct file *filp, struct file_ra_state *ra, int force) { int block, ret; ra->ahead_size = get_next_ra_size(ra); ra->ahead_start = ra->start + ra->size; block = force || (ra->prev_index >= ra->ahead_start); ret = blockable_page_cache_readahead(mapping, filp, ra->ahead_start, ra->ahead_size, ra, block); if (!ret && !force) { /* A read failure in blocking mode, implies pages are * all cached. So we can safely assume we have taken * care of all the pages requested in this call. * A read failure in non-blocking mode, implies we are * reading more pages than requested in this call. So * we safely assume we have taken care of all the pages * requested in this call. * * Just reset the ahead window in case we failed due to * congestion. The ahead window will any way be closed * in case we failed due to excessive page cache hits. */ reset_ahead_window(ra); } return ret; } /** * page_cache_readahead - generic adaptive readahead * @mapping: address_space which holds the pagecache and I/O vectors * @ra: file_ra_state which holds the readahead state * @filp: passed on to ->readpage() and ->readpages() * @offset: start offset into @mapping, in PAGE_CACHE_SIZE units * @req_size: hint: total size of the read which the caller is performing in * PAGE_CACHE_SIZE units * * page_cache_readahead() is the main function. If performs the adaptive * readahead window size management and submits the readahead I/O. * * Note that @filp is purely used for passing on to the ->readpage[s]() * handler: it may refer to a different file from @mapping (so we may not use * @filp->f_mapping or @filp->f_path.dentry->d_inode here). * Also, @ra may not be equal to &@filp->f_ra. * */ unsigned long page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *filp, pgoff_t offset, unsigned long req_size) { unsigned long max, newsize; int sequential; /* * We avoid doing extra work and bogusly perturbing the readahead * window expansion logic. */ if (offset == ra->prev_index && --req_size) ++offset; /* Note that prev_index == -1 if it is a first read */ sequential = (offset == ra->prev_index + 1); ra->prev_index = offset; ra->prev_offset = 0; max = get_max_readahead(ra); newsize = min(req_size, max); /* No readahead or sub-page sized read or file already in cache */ if (newsize == 0 || (ra->flags & RA_FLAG_INCACHE)) goto out; ra->prev_index += newsize - 1; /* * Special case - first read at start of file. We'll assume it's * a whole-file read and grow the window fast. Or detect first * sequential access */ if (sequential && ra->size == 0) { ra->size = get_init_ra_size(newsize, max); ra->start = offset; if (!blockable_page_cache_readahead(mapping, filp, offset, ra->size, ra, 1)) goto out; /* * If the request size is larger than our max readahead, we * at least want to be sure that we get 2 IOs in flight and * we know that we will definitly need the new I/O. * once we do this, subsequent calls should be able to overlap * IOs,* thus preventing stalls. so issue the ahead window * immediately. */ if (req_size >= max) make_ahead_window(mapping, filp, ra, 1); goto out; } /* * Now handle the random case: * partial page reads and first access were handled above, * so this must be the next page otherwise it is random */ if (!sequential) { ra_off(ra); blockable_page_cache_readahead(mapping, filp, offset, newsize, ra, 1); goto out; } /* * If we get here we are doing sequential IO and this was not the first * occurence (ie we have an existing window) */ if (ra->ahead_start == 0) { /* no ahead window yet */ if (!make_ahead_window(mapping, filp, ra, 0)) goto recheck; } /* * Already have an ahead window, check if we crossed into it. * If so, shift windows and issue a new ahead window. * Only return the #pages that are in the current window, so that * we get called back on the first page of the ahead window which * will allow us to submit more IO. */ if (ra->prev_index >= ra->ahead_start) { ra->start = ra->ahead_start; ra->size = ra->ahead_size; make_ahead_window(mapping, filp, ra, 0); recheck: /* prev_index shouldn't overrun the ahead window */ ra->prev_index = min(ra->prev_index, ra->ahead_start + ra->ahead_size - 1); } out: return ra->prev_index + 1; } EXPORT_SYMBOL_GPL(page_cache_readahead); /* * handle_ra_miss() is called when it is known that a page which should have * been present in the pagecache (we just did some readahead there) was in fact * not found. This will happen if it was evicted by the VM (readahead * thrashing) * * Turn on the cache miss flag in the RA struct, this will cause the RA code * to reduce the RA size on the next read. */ void handle_ra_miss(struct address_space *mapping, struct file_ra_state *ra, pgoff_t offset) { ra->flags |= RA_FLAG_MISS; ra->flags &= ~RA_FLAG_INCACHE; ra->cache_hit = 0; } /* * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a * sensible upper limit. */ unsigned long max_sane_readahead(unsigned long nr) { return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE) + node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2); } |