Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 | /* * BK Id: SCCS/s.hashtable.S 1.18 08/15/01 22:43:07 paulus */ /* * arch/ppc/kernel/hashtable.S * * $Id: hashtable.S,v 1.6 1999/10/08 01:56:15 paulus Exp $ * * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> * Adapted for Power Macintosh by Paul Mackerras. * Low-level exception handlers and MMU support * rewritten by Paul Mackerras. * Copyright (C) 1996 Paul Mackerras. * * This file contains low-level assembler routines for managing * the PowerPC MMU hash table. (PPC 8xx processors don't use a * hash table, so this file is not used on them.) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/config.h> #include "../kernel/ppc_asm.h" #include <asm/processor.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/cputable.h> #ifdef CONFIG_SMP .comm hash_table_lock,4 #endif /* CONFIG_SMP */ /* * Load a PTE into the hash table, if possible. * The address is in r4, and r3 contains an access flag: * _PAGE_RW (0x400) if a write. * r23 contains the SRR1 value, from which we use the MSR_PR bit. * SPRG3 contains the physical address of the current task's thread. * * Returns to the caller if the access is illegal or there is no * mapping for the address. Otherwise it places an appropriate PTE * in the hash table and returns from the exception. * Uses r0, r2 - r7, ctr, lr. */ .text .globl hash_page hash_page: #ifdef CONFIG_PPC64BRIDGE mfmsr r0 clrldi r0,r0,1 /* make sure it's in 32-bit mode */ MTMSRD(r0) isync #endif tophys(r7,0) /* gets -KERNELBASE into r7 */ #ifdef CONFIG_SMP addis r2,r7,hash_table_lock@h ori r2,r2,hash_table_lock@l mfspr r5,SPRG3 lwz r0,PROCESSOR-THREAD(r5) oris r0,r0,0x0fff b 10f 11: lwz r6,0(r2) cmpwi 0,r6,0 bne 11b 10: lwarx r6,0,r2 cmpwi 0,r6,0 bne- 11b stwcx. r0,0,r2 bne- 10b isync #endif /* Get PTE (linux-style) and check access */ lis r0,KERNELBASE@h /* check if kernel address */ cmplw 0,r4,r0 mfspr r2,SPRG3 /* current task's THREAD (phys) */ ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */ lwz r5,PGDIR(r2) /* virt page-table root */ blt+ 112f /* assume user more likely */ lis r5,swapper_pg_dir@ha /* if kernel address, use */ addi r5,r5,swapper_pg_dir@l /* kernel page table */ rlwimi r3,r23,32-12,29,29 /* MSR_PR -> _PAGE_USER */ 112: add r5,r5,r7 /* convert to phys addr */ rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */ lwz r5,0(r5) /* get pmd entry */ rlwinm. r5,r5,0,0,19 /* extract address of pte page */ #ifdef CONFIG_SMP beq- hash_page_out /* return if no mapping */ #else /* XXX it seems like the 601 will give a machine fault on the rfi if its alignment is wrong (bottom 4 bits of address are 8 or 0xc) and we have had a not-taken conditional branch to the address following the rfi. */ beqlr- #endif add r2,r5,r7 /* convert to phys addr */ rlwimi r2,r4,22,20,29 /* insert next 10 bits of address */ rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */ ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE /* * Update the linux PTE atomically. We do the lwarx up-front * because almost always, there won't be a permission violation * and there won't already be an HPTE, and thus we will have * to update the PTE to set _PAGE_HASHPTE. -- paulus. */ retry: lwarx r6,0,r2 /* get linux-style pte */ andc. r5,r3,r6 /* check access & ~permission */ #ifdef CONFIG_SMP bne- hash_page_out /* return if access not permitted */ #else bnelr- #endif or r5,r0,r6 /* set accessed/dirty bits */ stwcx. r5,0,r2 /* attempt to update PTE */ bne- retry /* retry if someone got there first */ mfsrin r3,r4 /* get segment reg for segment */ mr r2,r8 /* we have saved r2 but not r8 */ bl create_hpte /* add the hash table entry */ mr r8,r2 /* * htab_reloads counts the number of times we have to fault an * HPTE into the hash table. This should only happen after a * fork (because fork does a flush_tlb_mm) or a vmalloc or ioremap. * Where a page is faulted into a process's address space, * update_mmu_cache gets called to put the HPTE into the hash table * and those are counted as preloads rather than reloads. */ addis r2,r7,htab_reloads@ha lwz r3,htab_reloads@l(r2) addi r3,r3,1 stw r3,htab_reloads@l(r2) #ifdef CONFIG_SMP eieio addis r2,r7,hash_table_lock@ha li r0,0 stw r0,hash_table_lock@l(r2) #endif /* Return from the exception */ lwz r3,_CCR(r21) lwz r4,_LINK(r21) lwz r5,_CTR(r21) mtcrf 0xff,r3 mtlr r4 mtctr r5 lwz r0,GPR0(r21) lwz r1,GPR1(r21) lwz r2,GPR2(r21) lwz r3,GPR3(r21) lwz r4,GPR4(r21) lwz r5,GPR5(r21) lwz r6,GPR6(r21) lwz r7,GPR7(r21) /* we haven't used xer */ mtspr SRR1,r23 mtspr SRR0,r22 lwz r20,GPR20(r21) lwz r22,GPR22(r21) lwz r23,GPR23(r21) lwz r21,GPR21(r21) RFI #ifdef CONFIG_SMP hash_page_out: eieio addis r2,r7,hash_table_lock@ha li r0,0 stw r0,hash_table_lock@l(r2) blr #endif /* CONFIG_SMP */ /* * Add an entry for a particular page to the hash table. * * add_hash_page(unsigned context, unsigned long va, pte_t pte) * * We assume any necessary modifications to the pte (e.g. setting * the accessed bit) have already been done and that there is actually * a hash table in use (i.e. we're not on a 603). */ _GLOBAL(add_hash_page) mflr r0 stw r0,4(r1) /* Convert context and va to VSID */ mulli r3,r3,897*16 /* multiply context by context skew */ rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */ mulli r0,r0,0x111 /* multiply by ESID skew */ add r3,r3,r0 /* note create_hpte trims to 24 bits */ /* * We disable interrupts here, even on UP, because we don't * want to race with hash_page, and because we want the * _PAGE_HASHPTE bit to be a reliable indication of whether * the HPTE exists (or at least whether one did once). -- paulus */ mfmsr r10 SYNC rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ mtmsr r0 SYNC #ifdef CONFIG_SMP lis r9,hash_table_lock@h ori r9,r9,hash_table_lock@l lwz r8,PROCESSOR(r2) oris r8,r8,10 10: lwarx r7,0,r9 cmpi 0,r7,0 bne- 11f stwcx. r8,0,r9 beq+ 12f 11: lwz r7,0(r9) cmpi 0,r7,0 beq 10b b 11b 12: isync #endif /* * Fetch the linux pte and test and set _PAGE_HASHPTE atomically. * If _PAGE_HASHPTE was already set, we don't replace the existing * HPTE, so we just unlock and return. */ mr r7,r5 1: lwarx r6,0,r7 andi. r0,r6,_PAGE_HASHPTE bne 9f /* if HASHPTE already set, done */ ori r5,r6,_PAGE_ACCESSED|_PAGE_HASHPTE stwcx. r5,0,r7 bne- 1b li r7,0 /* no address offset needed */ bl create_hpte lis r8,htab_preloads@ha lwz r3,htab_preloads@l(r8) addi r3,r3,1 stw r3,htab_preloads@l(r8) 9: #ifdef CONFIG_SMP eieio li r0,0 stw r0,0(r9) /* clear hash_table_lock */ #endif lwz r0,4(r1) mtlr r0 /* reenable interrupts */ mtmsr r10 SYNC blr /* * This routine adds a hardware PTE to the hash table. * It is designed to be called with the MMU either on or off. * r3 contains the VSID, r4 contains the virtual address, * r5 contains the linux PTE, r6 contains the old value of the * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the * offset to be added to addresses (0 if the MMU is on, * -KERNELBASE if it is off). * On SMP, the caller should have the hash_table_lock held. * We assume that the caller has (or will) set the _PAGE_HASHPTE * bit in the linux PTE in memory. The value passed in r6 should * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set * this routine will skip the search for an existing HPTE. * This procedure modifies r0, r3 - r6, r8, cr0. * -- paulus. * * For speed, 4 of the instructions get patched once the size and * physical address of the hash table are known. These definitions * of Hash_base and Hash_bits below are just an example. */ Hash_base = 0xc0180000 Hash_bits = 12 /* e.g. 256kB hash table */ Hash_msk = (((1 << Hash_bits) - 1) * 64) #ifndef CONFIG_PPC64BRIDGE /* defines for the PTE format for 32-bit PPCs */ #define PTE_SIZE 8 #define PTEG_SIZE 64 #define LG_PTEG_SIZE 6 #define LDPTEu lwzu #define STPTE stw #define CMPPTE cmpw #define PTE_H 0x40 #define PTE_V 0x80000000 #define TST_V(r) rlwinm. r,r,0,0,0 #define SET_V(r) oris r,r,PTE_V@h #define CLR_V(r,t) rlwinm r,r,0,1,31 #else /* defines for the PTE format for 64-bit PPCs */ #define PTE_SIZE 16 #define PTEG_SIZE 128 #define LG_PTEG_SIZE 7 #define LDPTEu ldu #define STPTE std #define CMPPTE cmpd #define PTE_H 2 #define PTE_V 1 #define TST_V(r) andi. r,r,PTE_V #define SET_V(r) ori r,r,PTE_V #define CLR_V(r,t) li t,PTE_V; andc r,r,t #endif /* CONFIG_PPC64BRIDGE */ #define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1) #define HASH_RIGHT 31-LG_PTEG_SIZE _GLOBAL(create_hpte) /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */ rlwinm r8,r5,32-10,31,31 /* _PAGE_RW -> PP lsb */ rlwinm r0,r5,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ and r8,r8,r0 /* writable if _RW & _DIRTY */ rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */ rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */ ori r8,r8,0xe14 /* clear out reserved bits and M */ andc r8,r5,r8 /* PP = user? (rw&dirty? 2: 3): 0 */ #ifdef CONFIG_SMP ori r8,r8,_PAGE_COHERENT /* set M (coherence required) */ #endif #ifdef CONFIG_POWER4 /* * XXX hack hack hack - translate 32-bit "physical" addresses * in the linux page tables to 42-bit real addresses in such * a fashion that we can get at the I/O we need to access. * -- paulus */ cmpwi r8,0 rlwinm r0,r8,16,16,30 bge 57f cmplwi r0,0xfe00 li r0,0x3fd bne 56f li r0,0x3ff 56: sldi r0,r0,32 or r8,r8,r0 57: #endif /* Construct the high word of the PPC-style PTE (r5) */ #ifndef CONFIG_PPC64BRIDGE rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */ rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */ #else /* CONFIG_PPC64BRIDGE */ clrlwi r3,r3,8 /* reduce vsid to 24 bits */ sldi r5,r3,12 /* shift vsid into position */ rlwimi r5,r4,16,20,24 /* put in API (abbrev page index) */ #endif /* CONFIG_PPC64BRIDGE */ SET_V(r5) /* set V (valid) bit */ /* Get the address of the primary PTE group in the hash table (r3) */ .globl hash_page_patch_A hash_page_patch_A: addis r0,r7,Hash_base@h /* base address of hash table */ rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */ rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */ xor r3,r3,r0 /* make primary hash */ li r0,8 /* PTEs/group */ /* * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search * if it is clear, meaning that the HPTE isn't there already... */ andi. r6,r6,_PAGE_HASHPTE beq+ 10f /* no PTE: go look for an empty slot */ tlbie r4 addis r4,r7,htab_hash_searches@ha lwz r6,htab_hash_searches@l(r4) addi r6,r6,1 /* count how many searches we do */ stw r6,htab_hash_searches@l(r4) /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */ mtctr r0 addi r4,r3,-PTE_SIZE 1: LDPTEu r6,PTE_SIZE(r4) /* get next PTE */ CMPPTE 0,r6,r5 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ beq+ found_slot /* Search the secondary PTEG for a matching PTE */ ori r5,r5,PTE_H /* set H (secondary hash) bit */ .globl hash_page_patch_B hash_page_patch_B: xoris r4,r3,Hash_msk>>16 /* compute secondary hash */ xori r4,r4,(-PTEG_SIZE & 0xffff) addi r4,r4,-PTE_SIZE mtctr r0 2: LDPTEu r6,PTE_SIZE(r4) CMPPTE 0,r6,r5 bdnzf 2,2b beq+ found_slot xori r5,r5,PTE_H /* clear H bit again */ /* Search the primary PTEG for an empty slot */ 10: mtctr r0 addi r4,r3,-PTE_SIZE /* search primary PTEG */ 1: LDPTEu r6,PTE_SIZE(r4) /* get next PTE */ TST_V(r6) /* test valid bit */ bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ beq+ found_empty /* update counter of times that the primary PTEG is full */ addis r4,r7,primary_pteg_full@ha lwz r6,primary_pteg_full@l(r4) addi r6,r6,1 stw r6,primary_pteg_full@l(r4) /* Search the secondary PTEG for an empty slot */ ori r5,r5,PTE_H /* set H (secondary hash) bit */ .globl hash_page_patch_C hash_page_patch_C: xoris r4,r3,Hash_msk>>16 /* compute secondary hash */ xori r4,r4,(-PTEG_SIZE & 0xffff) addi r4,r4,-PTE_SIZE mtctr r0 2: LDPTEu r6,PTE_SIZE(r4) TST_V(r6) bdnzf 2,2b beq+ found_empty xori r5,r5,PTE_H /* clear H bit again */ /* * Choose an arbitrary slot in the primary PTEG to overwrite. * Since both the primary and secondary PTEGs are full, and we * have no information that the PTEs in the primary PTEG are * more important or useful than those in the secondary PTEG, * and we know there is a definite (although small) speed * advantage to putting the PTE in the primary PTEG, we always * put the PTE in the primary PTEG. */ addis r4,r7,next_slot@ha lwz r6,next_slot@l(r4) addi r6,r6,PTE_SIZE andi. r6,r6,7*PTE_SIZE #ifdef CONFIG_POWER4 /* * Since we don't have BATs on POWER4, we rely on always having * PTEs in the hash table to map the hash table and the code * that manipulates it in virtual mode, namely flush_hash_page and * flush_hash_segments. Otherwise we can get a DSI inside those * routines which leads to a deadlock on the hash_table_lock on * SMP machines. We avoid this by never overwriting the first * PTE of each PTEG if it is already valid. * -- paulus. */ bne 102f li r6,PTE_SIZE 102: #endif /* CONFIG_POWER4 */ stw r6,next_slot@l(r4) add r4,r3,r6 /* update counter of evicted pages */ addis r6,r7,htab_evicts@ha lwz r3,htab_evicts@l(r6) addi r3,r3,1 stw r3,htab_evicts@l(r6) #ifndef CONFIG_SMP /* Store PTE in PTEG */ found_empty: STPTE r5,0(r4) found_slot: STPTE r8,PTE_SIZE/2(r4) #else /* CONFIG_SMP */ /* * Between the tlbie above and updating the hash table entry below, * another CPU could read the hash table entry and put it in its TLB. * There are 3 cases: * 1. using an empty slot * 2. updating an earlier entry to change permissions (i.e. enable write) * 3. taking over the PTE for an unrelated address * * In each case it doesn't really matter if the other CPUs have the old * PTE in their TLB. So we don't need to bother with another tlbie here, * which is convenient as we've overwritten the register that had the * address. :-) The tlbie above is mainly to make sure that this CPU comes * and gets the new PTE from the hash table. * * We do however have to make sure that the PTE is never in an invalid * state with the V bit set. */ found_empty: found_slot: CLR_V(r5,r0) /* clear V (valid) bit in PTE */ STPTE r5,0(r4) sync TLBSYNC STPTE r8,PTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */ sync SET_V(r5) STPTE r5,0(r4) /* finally set V bit in PTE */ #endif /* CONFIG_SMP */ sync /* make sure pte updates get to memory */ blr .comm next_slot,4 .comm primary_pteg_full,4 .comm htab_hash_searches,4 /* * Flush the entry for a particular page from the hash table. * * flush_hash_page(unsigned context, unsigned long va, pte_t *ptep) * * We assume that there is a hash table in use (Hash != 0). */ _GLOBAL(flush_hash_page) /* Convert context and va to VSID */ mulli r3,r3,897*16 /* multiply context by context skew */ rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */ mulli r0,r0,0x111 /* multiply by ESID skew */ add r3,r3,r0 /* note code below trims to 24 bits */ /* * We disable interrupts here, even on UP, because we want * the _PAGE_HASHPTE bit to be a reliable indication of * whether the HPTE exists. -- paulus */ mfmsr r10 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ SYNC mtmsr r0 SYNC #ifdef CONFIG_SMP lis r9,hash_table_lock@h ori r9,r9,hash_table_lock@l lwz r8,PROCESSOR(r2) oris r8,r8,9 10: lwarx r7,0,r9 cmpi 0,r7,0 bne- 11f stwcx. r8,0,r9 beq+ 12f 11: lwz r7,0(r9) cmpi 0,r7,0 beq 10b b 11b 12: isync #endif /* * Check the _PAGE_HASHPTE bit in the linux PTE. If it is * already clear, we're done. If not, clear it (atomically) * and proceed. -- paulus. */ 1: lwarx r6,0,r5 /* fetch the pte */ andi. r0,r6,_PAGE_HASHPTE beq 9f /* done if HASHPTE is already clear */ rlwinm r6,r6,0,31,29 /* clear HASHPTE bit */ stwcx. r6,0,r5 /* update the pte */ bne- 1b /* Construct the high word of the PPC-style PTE (r5) */ #ifndef CONFIG_PPC64BRIDGE rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */ rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */ #else /* CONFIG_PPC64BRIDGE */ clrlwi r3,r3,8 /* reduce vsid to 24 bits */ sldi r5,r3,12 /* shift vsid into position */ rlwimi r5,r4,16,20,24 /* put in API (abbrev page index) */ #endif /* CONFIG_PPC64BRIDGE */ SET_V(r5) /* set V (valid) bit */ /* Get the address of the primary PTE group in the hash table (r3) */ .globl flush_hash_patch_A flush_hash_patch_A: lis r8,Hash_base@h /* base address of hash table */ rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */ rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */ xor r3,r3,r8 /* make primary hash */ li r8,8 /* PTEs/group */ /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */ mtctr r8 addi r7,r3,-PTE_SIZE 1: LDPTEu r0,PTE_SIZE(r7) /* get next PTE */ CMPPTE 0,r0,r5 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ beq+ 3f /* Search the secondary PTEG for a matching PTE */ ori r5,r5,PTE_H /* set H (secondary hash) bit */ .globl flush_hash_patch_B flush_hash_patch_B: xoris r7,r3,Hash_msk>>16 /* compute secondary hash */ xori r7,r7,(-PTEG_SIZE & 0xffff) addi r7,r7,-PTE_SIZE mtctr r8 2: LDPTEu r0,PTE_SIZE(r7) CMPPTE 0,r0,r5 bdnzf 2,2b bne- 4f /* should never fail to find it */ 3: li r0,0 STPTE r0,0(r7) /* invalidate entry */ 4: sync tlbie r4 /* in hw tlb too */ sync #ifdef CONFIG_SMP TLBSYNC 9: li r0,0 stw r0,0(r9) /* clear hash_table_lock */ #endif 9: mtmsr r10 SYNC blr |