Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 | /* * linux/mm/swap.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds */ /* * This file contains the default values for the opereation of the * Linux VM subsystem. Fine-tuning documentation can be found in * linux/Documentation/sysctl/vm.txt. * Started 18.12.91 * Swap aging added 23.2.95, Stephen Tweedie. * Buffermem limits added 12.3.98, Rik van Riel. */ #include <linux/mm.h> #include <linux/kernel_stat.h> #include <linux/swap.h> #include <linux/swapctl.h> #include <linux/pagemap.h> #include <linux/init.h> #include <asm/dma.h> #include <asm/uaccess.h> /* for copy_to/from_user */ #include <asm/pgtable.h> /* How many pages do we try to swap or page in/out together? */ int page_cluster; pager_daemon_t pager_daemon = { 512, /* base number for calculating the number of tries */ SWAP_CLUSTER_MAX, /* minimum number of tries */ 8, /* do swap I/O in clusters of this size */ }; /** * (de)activate_page - move pages from/to active and inactive lists * @page: the page we want to move * @nolock - are we already holding the pagemap_lru_lock? * * Deactivate_page will move an active page to the right * inactive list, while activate_page will move a page back * from one of the inactive lists to the active list. If * called on a page which is not on any of the lists, the * page is left alone. */ static inline void deactivate_page_nolock(struct page * page) { if (PageActive(page)) { del_page_from_active_list(page); add_page_to_inactive_list(page); } } void deactivate_page(struct page * page) { spin_lock(&pagemap_lru_lock); deactivate_page_nolock(page); spin_unlock(&pagemap_lru_lock); } /* * Move an inactive page to the active list. */ static inline void activate_page_nolock(struct page * page) { if (PageInactive(page)) { del_page_from_inactive_list(page); add_page_to_active_list(page); } } void activate_page(struct page * page) { spin_lock(&pagemap_lru_lock); activate_page_nolock(page); spin_unlock(&pagemap_lru_lock); } /** * lru_cache_add: add a page to the page lists * @page: the page to add */ void lru_cache_add(struct page * page) { if (!PageActive(page) && !PageInactive(page)) { spin_lock(&pagemap_lru_lock); add_page_to_inactive_list(page); spin_unlock(&pagemap_lru_lock); } } /** * __lru_cache_del: remove a page from the page lists * @page: the page to add * * This function is for when the caller already holds * the pagemap_lru_lock. */ void __lru_cache_del(struct page * page) { if (PageActive(page)) { del_page_from_active_list(page); } else if (PageInactive(page)) { del_page_from_inactive_list(page); } else { // printk("VM: __lru_cache_del, found unknown page ?!\n"); } } /** * lru_cache_del: remove a page from the page lists * @page: the page to remove */ void lru_cache_del(struct page * page) { spin_lock(&pagemap_lru_lock); __lru_cache_del(page); spin_unlock(&pagemap_lru_lock); } /* * Perform any setup for the swap system */ void __init swap_setup(void) { unsigned long megs = num_physpages >> (20 - PAGE_SHIFT); /* Use a smaller cluster for small-memory machines */ if (megs < 16) page_cluster = 2; else page_cluster = 3; /* * Right now other parts of the system means that we * _really_ don't want to cluster much more */ } |