Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 | /* * $Id: idle.c,v 1.61.2.2 1999/06/15 16:54:14 cort Exp $ * * Idle daemon for PowerPC. Idle daemon will handle any action * that needs to be taken when the system becomes idle. * * Written by Cort Dougan (cort@cs.nmt.edu) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/config.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/smp_lock.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/malloc.h> #include <asm/pgtable.h> #include <asm/uaccess.h> #include <asm/system.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/mmu.h> #include <asm/cache.h> void zero_paged(void); void power_save(void); void inline htab_reclaim(void); unsigned long htab_reclaim_on = 0; unsigned long zero_paged_on = 0; unsigned long powersave_nap = 0; unsigned long *zero_cache; /* head linked list of pre-zero'd pages */ unsigned long zero_sz; /* # currently pre-zero'd pages */ unsigned long zeropage_hits; /* # zero'd pages request that we've done */ unsigned long zeropage_calls; /* # zero'd pages request that've been made */ unsigned long zerototal; /* # pages zero'd over time */ int idled(void *unused) { /* endless loop with no priority at all */ current->priority = 0; current->counter = -100; init_idle(); for (;;) { __sti(); check_pgt_cache(); if ( !current->need_resched && zero_paged_on ) zero_paged(); if ( !current->need_resched && htab_reclaim_on ) htab_reclaim(); if ( !current->need_resched ) power_save(); #ifdef __SMP__ if (current->need_resched) #endif schedule(); } return 0; } #ifdef __SMP__ /* * SMP entry into the idle task - calls the same thing as the * non-smp versions. -- Cort */ int cpu_idle(void *unused) { idled(unused); return 0; } #endif /* __SMP__ */ /* * Syscall entry into the idle task. -- Cort */ asmlinkage int sys_idle(void) { if(current->pid != 0) return -EPERM; idled(NULL); return 0; /* should never execute this but it makes gcc happy -- Cort */ } /* * Mark 'zombie' pte's in the hash table as invalid. * This improves performance for the hash table reload code * a bit since we don't consider unused pages as valid. * -- Cort */ PTE *reclaim_ptr = 0; void inline htab_reclaim(void) { #ifndef CONFIG_8xx #if 0 PTE *ptr, *start; static int dir = 1; #endif struct task_struct *p; unsigned long valid = 0; extern PTE *Hash, *Hash_end; extern unsigned long Hash_size; /* if we don't have a htab */ if ( Hash_size == 0 ) return; #if 0 /* find a random place in the htab to start each time */ start = &Hash[jiffies%(Hash_size/sizeof(PTE))]; /* go a different direction each time */ dir *= -1; for ( ptr = start; !current->need_resched && (ptr != Hash_end) && (ptr != Hash); ptr += dir) { #else if ( !reclaim_ptr ) reclaim_ptr = Hash; while ( !current->need_resched ) { reclaim_ptr++; if ( reclaim_ptr == Hash_end ) reclaim_ptr = Hash; #endif if (!reclaim_ptr->v) continue; valid = 0; for_each_task(p) { if ( current->need_resched ) goto out; /* if this vsid/context is in use */ if ( (reclaim_ptr->vsid >> 4) == p->mm->context ) { valid = 1; break; } } if ( valid ) continue; /* this pte isn't used */ reclaim_ptr->v = 0; } out: if ( current->need_resched ) printk("need_resched: %lx\n", current->need_resched); #endif /* CONFIG_8xx */ } /* * Returns a pre-zero'd page from the list otherwise returns * NULL. */ unsigned long get_zero_page_fast(void) { unsigned long page = 0; atomic_inc((atomic_t *)&zero_cache_calls); if ( zero_quicklist ) { /* atomically remove this page from the list */ asm ( "101:lwarx %1,0,%2\n" /* reserve zero_cache */ " lwz %0,0(%1)\n" /* get next -- new zero_cache */ " stwcx. %0,0,%2\n" /* update zero_cache */ " bne- 101b\n" /* if lost reservation try again */ : "=&r" (zero_quicklist), "=&r" (page) : "r" (&zero_quicklist) : "cc" ); #ifdef __SMP__ /* if another cpu beat us above this can happen -- Cort */ if ( page == 0 ) return 0; #endif /* __SMP__ */ /* we can update zerocount after the fact since it is not * used for anything but control of a loop which doesn't * matter since it won't affect anything if it zeros one * less page -- Cort */ atomic_inc((atomic_t *)&zero_cache_hits); atomic_dec((atomic_t *)&zero_cache_sz); /* zero out the pointer to next in the page */ *(unsigned long *)page = 0; return page; } return 0; } /* * Experimental stuff to zero out pages in the idle task * to speed up get_free_pages(). Zero's out pages until * we've reached the limit of zero'd pages. We handle * reschedule()'s in here so when we return we know we've * zero'd all we need to for now. */ int zero_cache_water[2] = { 25, 96 }; /* high and low water marks for zero cache */ void zero_paged(void) { unsigned long pageptr = 0; /* current page being zero'd */ unsigned long bytecount = 0; pte_t *pte; if ( zero_cache_sz >= zero_cache_water[0] ) return; while ( (zero_cache_sz < zero_cache_water[1]) && (!current->need_resched) ) { /* * Mark a page as reserved so we can mess with it * If we're interrupted we keep this page and our place in it * since we validly hold it and it's reserved for us. */ pageptr = __get_free_pages(GFP_ATOMIC, 0); if ( !pageptr ) return; if ( current->need_resched ) schedule(); /* * Make the page no cache so we don't blow our cache with 0's */ pte = find_pte(init_task.mm, pageptr); if ( !pte ) { printk("pte NULL in zero_paged()\n"); return; } pte_uncache(*pte); flush_tlb_page(find_vma(init_task.mm,pageptr),pageptr); /* * Important here to not take time away from real processes. */ for ( bytecount = 0; bytecount < PAGE_SIZE ; bytecount += 4 ) { if ( current->need_resched ) schedule(); *(unsigned long *)(bytecount + pageptr) = 0; } /* * If we finished zero-ing out a page add this page to * the zero_cache atomically -- we can't use * down/up since we can't sleep in idle. * Disabling interrupts is also a bad idea since we would * steal time away from real processes. * We can also have several zero_paged's running * on different processors so we can't interfere with them. * So we update the list atomically without locking it. * -- Cort */ /* turn cache on for this page */ pte_cache(*pte); flush_tlb_page(find_vma(init_task.mm,pageptr),pageptr); /* atomically add this page to the list */ asm ( "101:lwarx %0,0,%1\n" /* reserve zero_cache */ " stw %0,0(%2)\n" /* update *pageptr */ #ifdef __SMP__ " sync\n" /* let store settle */ #endif " mr %0,%2\n" /* update zero_cache in reg */ " stwcx. %2,0,%1\n" /* update zero_cache in mem */ " bne- 101b\n" /* if lost reservation try again */ : "=&r" (zero_quicklist) : "r" (&zero_quicklist), "r" (pageptr) : "cc" ); /* * This variable is used in the above loop and nowhere * else so the worst that could happen is we would * zero out one more or one less page than we want * per processor on the machine. This is because * we could add our page to the list but not have * zerocount updated yet when another processor * reads it. -- Cort */ atomic_inc((atomic_t *)&zero_cache_sz); atomic_inc((atomic_t *)&zero_cache_total); } } void power_save(void) { unsigned long msr, hid0; /* only sleep on the 603-family/750 processors */ switch (_get_PVR() >> 16) { case 3: /* 603 */ case 6: /* 603e */ case 7: /* 603ev */ case 8: /* 750 */ save_flags(msr); __cli(); if (!current->need_resched) { asm("mfspr %0,1008" : "=r" (hid0) :); hid0 &= ~(HID0_NAP | HID0_SLEEP | HID0_DOZE); hid0 |= (powersave_nap? HID0_NAP: HID0_DOZE) | HID0_DPM; asm("mtspr 1008,%0" : : "r" (hid0)); /* set the POW bit in the MSR, and enable interrupts * so we wake up sometime! */ _nmask_and_or_msr(0, MSR_POW | MSR_EE); /* Disable interrupts again so restore_flags will * work. */ _nmask_and_or_msr(MSR_EE, 0); } restore_flags(msr); default: return; } } |