Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 | /* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994 - 2001 by Ralf Baechle at alii * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. */ #ifndef _ASM_PGTABLE_H #define _ASM_PGTABLE_H #include <asm/addrspace.h> #include <asm/page.h> #ifndef _LANGUAGE_ASSEMBLY #include <linux/linkage.h> #include <linux/config.h> #include <linux/mmzone.h> #include <asm/cachectl.h> /* Cache flushing: * * - flush_cache_all() flushes entire cache * - flush_cache_mm(mm) flushes the specified mm context's cache lines * - flush_cache_page(mm, vmaddr) flushes a single page * - flush_cache_range(vma, start, end) flushes a range of pages * - flush_page_to_ram(page) write back kernel page to ram */ extern void (*_flush_cache_mm)(struct mm_struct *mm); extern void (*_flush_cache_range)(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void (*_flush_cache_page)(struct vm_area_struct *vma, unsigned long page); extern void (*_flush_page_to_ram)(struct page * page); #define flush_cache_all() do { } while(0) #define flush_dcache_page(page) do { } while (0) #ifndef CONFIG_CPU_R10000 #define flush_cache_mm(mm) _flush_cache_mm(mm) #define flush_cache_range(vma,start,end) _flush_cache_range(vma,start,end) #define flush_cache_page(vma,page) _flush_cache_page(vma, page) #define flush_page_to_ram(page) _flush_page_to_ram(page) #define flush_icache_range(start, end) _flush_cache_l1() #define flush_icache_user_range(vma, page, addr, len) \ flush_icache_page((vma), (page)) #define flush_icache_page(vma, page) \ do { \ unsigned long addr; \ addr = (unsigned long) page_address(page); \ _flush_cache_page(vma, addr); \ } while (0) #else /* !CONFIG_CPU_R10000 */ /* * Since the r10k handles VCEs in hardware, most of the flush cache * routines are not needed. Only the icache on a processor is not * coherent with the dcache of the _same_ processor, so we must flush * the icache so that it does not contain stale contents of physical * memory. No flushes are needed for dma coherency, since the o200s * are io coherent. The only place where we might be overoptimizing * out icache flushes are from mprotect (when PROT_EXEC is added). */ extern void andes_flush_icache_page(unsigned long); #define flush_cache_mm(mm) do { } while(0) #define flush_cache_range(vma,start,end) do { } while(0) #define flush_cache_page(vma,page) do { } while(0) #define flush_page_to_ram(page) do { } while(0) #define flush_icache_range(start, end) _flush_cache_l1() #define flush_icache_user_range(vma, page, addr, len) \ flush_icache_page((vma), (page)) #define flush_icache_page(vma, page) \ do { \ if ((vma)->vm_flags & VM_EXEC) \ andes_flush_icache_page(page_address(page)); \ } while (0) #endif /* !CONFIG_CPU_R10000 */ /* * The foll cache flushing routines are MIPS specific. * flush_cache_l2 is needed only during initialization. */ extern void (*_flush_cache_sigtramp)(unsigned long addr); extern void (*_flush_cache_l2)(void); extern void (*_flush_cache_l1)(void); #define flush_cache_sigtramp(addr) _flush_cache_sigtramp(addr) #define flush_cache_l2() _flush_cache_l2() #define flush_cache_l1() _flush_cache_l1() /* * Each address space has 2 4K pages as its page directory, giving 1024 * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a * pair of 4K pages, giving 1024 (== PTRS_PER_PMD) 8 byte pointers to * page tables. Each page table is a single 4K page, giving 512 (== * PTRS_PER_PTE) 8 byte ptes. Each pgde is initialized to point to * invalid_pmd_table, each pmde is initialized to point to * invalid_pte_table, each pte is initialized to 0. When memory is low, * and a pmd table or a page table allocation fails, empty_bad_pmd_table * and empty_bad_page_table is returned back to higher layer code, so * that the failure is recognized later on. Linux does not seem to * handle these failures very well though. The empty_bad_page_table has * invalid pte entries in it, to force page faults. * Vmalloc handling: vmalloc uses swapper_pg_dir[0] (returned by * pgd_offset_k), which is initalized to point to kpmdtbl. kpmdtbl is * the only single page pmd in the system. kpmdtbl entries point into * kptbl[] array. We reserve 1<<KPTBL_PAGE_ORDER pages to hold the * vmalloc range translations, which the fault handler looks at. */ #endif /* !defined (_LANGUAGE_ASSEMBLY) */ /* PMD_SHIFT determines the size of the area a second-level page table can map */ #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) /* PGDIR_SHIFT determines what a third-level page table entry can map */ #define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + 1 - 3)) #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) /* Entries per page directory level: we use two-level, so we don't really have any PMD directory physically. */ #define PTRS_PER_PGD 1024 #define PTRS_PER_PMD 1024 #define PTRS_PER_PTE 512 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) #define FIRST_USER_PGD_NR 0 #define KPTBL_PAGE_ORDER 1 #define VMALLOC_START XKSEG #define VMALLOC_VMADDR(x) ((unsigned long)(x)) #define VMALLOC_END \ (VMALLOC_START + ((1 << KPTBL_PAGE_ORDER) * PTRS_PER_PTE * PAGE_SIZE)) /* Note that we shift the lower 32bits of each EntryLo[01] entry * 6 bits to the left. That way we can convert the PFN into the * physical address by a single 'and' operation and gain 6 additional * bits for storing information which isn't present in a normal * MIPS page table. * * Similar to the Alpha port, we need to keep track of the ref * and mod bits in software. We have a software "yeah you can read * from this page" bit, and a hardware one which actually lets the * process read from the page. On the same token we have a software * writable bit and the real hardware one which actually lets the * process write to the page, this keeps a mod bit via the hardware * dirty bit. * * Certain revisions of the R4000 and R5000 have a bug where if a * certain sequence occurs in the last 3 instructions of an executable * page, and the following page is not mapped, the cpu can do * unpredictable things. The code (when it is written) to deal with * this problem will be in the update_mmu_cache() code for the r4k. */ #define _PAGE_PRESENT (1<<0) /* implemented in software */ #define _PAGE_READ (1<<1) /* implemented in software */ #define _PAGE_WRITE (1<<2) /* implemented in software */ #define _PAGE_ACCESSED (1<<3) /* implemented in software */ #define _PAGE_MODIFIED (1<<4) /* implemented in software */ #define _PAGE_R4KBUG (1<<5) /* workaround for r4k bug */ #define _PAGE_GLOBAL (1<<6) #define _PAGE_VALID (1<<7) #define _PAGE_SILENT_READ (1<<7) /* synonym */ #define _PAGE_DIRTY (1<<8) /* The MIPS dirty bit */ #define _PAGE_SILENT_WRITE (1<<8) #define _CACHE_CACHABLE_NO_WA (0<<9) /* R4600 only */ #define _CACHE_CACHABLE_WA (1<<9) /* R4600 only */ #define _CACHE_UNCACHED (2<<9) /* R4[0246]00 */ #define _CACHE_CACHABLE_NONCOHERENT (3<<9) /* R4[0246]00 */ #define _CACHE_CACHABLE_CE (4<<9) /* R4[04]00 only */ #define _CACHE_CACHABLE_COW (5<<9) /* R4[04]00 only */ #define _CACHE_CACHABLE_CUW (6<<9) /* R4[04]00 only */ #define _CACHE_CACHABLE_ACCELERATED (7<<9) /* R10000 only */ #define _CACHE_MASK (7<<9) #define __READABLE (_PAGE_READ | _PAGE_SILENT_READ | _PAGE_ACCESSED) #define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED) #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK) #ifdef CONFIG_MIPS_UNCACHED #define PAGE_CACHABLE_DEFAULT _CACHE_UNCACHED #else /* ! UNCACHED */ #ifdef CONFIG_SGI_IP22 #define PAGE_CACHABLE_DEFAULT _CACHE_CACHABLE_NONCOHERENT #else /* ! IP22 */ #define PAGE_CACHABLE_DEFAULT _CACHE_CACHABLE_COW #endif /* IP22 */ #endif /* UNCACHED */ #define PAGE_NONE __pgprot(_PAGE_PRESENT | PAGE_CACHABLE_DEFAULT) #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ PAGE_CACHABLE_DEFAULT) #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | \ PAGE_CACHABLE_DEFAULT) #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | \ PAGE_CACHABLE_DEFAULT) #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ PAGE_CACHABLE_DEFAULT) #define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ _CACHE_UNCACHED) #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ _CACHE_UNCACHED) /* * MIPS can't do page protection for execute, and considers that the same like * read. Also, write permissions imply read permissions. This is the closest * we can get by reasonable means.. */ #define __P000 PAGE_NONE #define __P001 PAGE_READONLY #define __P010 PAGE_COPY #define __P011 PAGE_COPY #define __P100 PAGE_READONLY #define __P101 PAGE_READONLY #define __P110 PAGE_COPY #define __P111 PAGE_COPY #define __S000 PAGE_NONE #define __S001 PAGE_READONLY #define __S010 PAGE_SHARED #define __S011 PAGE_SHARED #define __S100 PAGE_READONLY #define __S101 PAGE_READONLY #define __S110 PAGE_SHARED #define __S111 PAGE_SHARED #if !defined (_LANGUAGE_ASSEMBLY) #define pte_ERROR(e) \ printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) #define pmd_ERROR(e) \ printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) #define pgd_ERROR(e) \ printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. */ extern unsigned long empty_zero_page; extern unsigned long zero_page_mask; #define ZERO_PAGE(vaddr) \ (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))) /* number of bits that fit into a memory pointer */ #define BITS_PER_PTR (8*sizeof(unsigned long)) /* to align the pointer to a pointer address */ #define PTR_MASK (~(sizeof(void*)-1)) /* * sizeof(void*) == (1 << SIZEOF_PTR_LOG2) */ #define SIZEOF_PTR_LOG2 3 /* to find an entry in a page-table */ #define PAGE_PTR(address) \ ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK) extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)]; extern pte_t empty_bad_page_table[PAGE_SIZE/sizeof(pte_t)]; extern pmd_t invalid_pmd_table[2*PAGE_SIZE/sizeof(pmd_t)]; extern pmd_t empty_bad_pmd_table[2*PAGE_SIZE/sizeof(pmd_t)]; /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */ extern inline unsigned long pmd_page(pmd_t pmd) { return pmd_val(pmd); } extern inline unsigned long pgd_page(pgd_t pgd) { return pgd_val(pgd); } extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep) { pmd_val(*pmdp) = (((unsigned long) ptep) & PAGE_MASK); } extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp) { pgd_val(*pgdp) = (((unsigned long) pmdp) & PAGE_MASK); } extern inline int pte_none(pte_t pte) { return !pte_val(pte); } extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; } /* * Certain architectures need to do special things when pte's * within a page table are directly modified. Thus, the following * hook is made available. */ extern inline void set_pte(pte_t *ptep, pte_t pteval) { *ptep = pteval; } extern inline void pte_clear(pte_t *ptep) { set_pte(ptep, __pte(0)); } /* * (pmds are folded into pgds so this doesn't get actually called, * but the define is needed for a generic inline function.) */ #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) #define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval) /* * Empty pmd entries point to the invalid_pte_table. */ extern inline int pmd_none(pmd_t pmd) { return pmd_val(pmd) == (unsigned long) invalid_pte_table; } extern inline int pmd_bad(pmd_t pmd) { return pmd_val(pmd) &~ PAGE_MASK; } extern inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) != (unsigned long) invalid_pte_table; } extern inline void pmd_clear(pmd_t *pmdp) { pmd_val(*pmdp) = ((unsigned long) invalid_pte_table); } /* * Empty pgd entries point to the invalid_pmd_table. */ extern inline int pgd_none(pgd_t pgd) { return pgd_val(pgd) == (unsigned long) invalid_pmd_table; } extern inline int pgd_bad(pgd_t pgd) { return pgd_val(pgd) &~ PAGE_MASK; } extern inline int pgd_present(pgd_t pgd) { return pgd_val(pgd) != (unsigned long) invalid_pmd_table; } extern inline void pgd_clear(pgd_t *pgdp) { pgd_val(*pgdp) = ((unsigned long) invalid_pmd_table); } #ifndef CONFIG_DISCONTIGMEM #define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT))) #else #define pte_page(x) ( NODE_MEM_MAP(PHYSADDR_TO_NID(pte_val(x))) + PLAT_NODE_DATA_LOCALNR(pte_val(x), PHYSADDR_TO_NID(pte_val(x))) ) #endif /* * The following only work if pte_present() is true. * Undefined behaviour if not.. */ extern inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_READ; } extern inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; } extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); return pte; } extern inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) &= ~(_PAGE_READ | _PAGE_SILENT_READ); return pte; } extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE); return pte; } extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); return pte; } extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; if (pte_val(pte) & _PAGE_MODIFIED) pte_val(pte) |= _PAGE_SILENT_WRITE; return pte; } extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) |= _PAGE_READ; if (pte_val(pte) & _PAGE_ACCESSED) pte_val(pte) |= _PAGE_SILENT_READ; return pte; } extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_MODIFIED; if (pte_val(pte) & _PAGE_WRITE) pte_val(pte) |= _PAGE_SILENT_WRITE; return pte; } extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; if (pte_val(pte) & _PAGE_READ) pte_val(pte) |= _PAGE_SILENT_READ; return pte; } /* * Macro to make mark a page protection value as "uncacheable". Note * that "protection" is really a misnomer here as the protection value * contains the memory attribute bits, dirty bits, and various other * bits as well. */ #define pgprot_noncached pgprot_noncached static inline pgprot_t pgprot_noncached(pgprot_t _prot) { unsigned long prot = pgprot_val(_prot); prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; return __pgprot(prot); } /* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */ #ifndef CONFIG_DISCONTIGMEM #define PAGE_TO_PA(page) ((page - mem_map) << PAGE_SHIFT) #else #define PAGE_TO_PA(page) \ (( ((page)-(page)->zone->zone_mem_map) + \ (page)->zone->zone_start_pfn) << PAGE_SHIFT) #endif #define mk_pte(page, pgprot) \ ({ \ pte_t __pte; \ \ pte_val(__pte) = ((unsigned long)(PAGE_TO_PA(page))) | \ pgprot_val(pgprot); \ \ __pte; \ }) extern inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) { return __pte(physpage | pgprot_val(pgprot)); } extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); } #define page_pte(page) page_pte_prot(page, __pgprot(0)) /* to find an entry in a kernel page-table-directory */ #define pgd_offset_k(address) pgd_offset(&init_mm, 0) #define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) /* to find an entry in a page-table-directory */ extern inline pgd_t *pgd_offset(struct mm_struct *mm, unsigned long address) { return mm->pgd + pgd_index(address); } /* Find an entry in the second-level page table.. */ extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) { return (pmd_t *) pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); } /* Find an entry in the third-level page table.. */ extern inline pte_t *pte_offset(pmd_t * dir, unsigned long address) { return (pte_t *) (pmd_page(*dir)) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); } /* * Initialize a new pgd / pmd table with invalid pointers. */ extern void pgd_init(unsigned long page); extern void pmd_init(unsigned long page, unsigned long pagetable); extern pgd_t swapper_pg_dir[1024]; extern void paging_init(void); extern void (*update_mmu_cache)(struct vm_area_struct *vma, unsigned long address, pte_t pte); /* * Non-present pages: high 24 bits are offset, next 8 bits type, * low 32 bits zero. */ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) { pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; } #define __swp_type(x) (((x).val >> 32) & 0xff) #define __swp_offset(x) ((x).val >> 40) #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #ifndef CONFIG_DISCONTIGMEM #define kern_addr_valid(addr) (1) #endif /* TLB operations. */ extern inline void tlb_probe(void) { __asm__ __volatile__( ".set noreorder\n\t" "tlbp\n\t" ".set reorder"); } extern inline void tlb_read(void) { __asm__ __volatile__( ".set noreorder\n\t" "tlbr\n\t" ".set reorder"); } extern inline void tlb_write_indexed(void) { __asm__ __volatile__( ".set noreorder\n\t" "tlbwi\n\t" ".set reorder"); } extern inline void tlb_write_random(void) { __asm__ __volatile__( ".set noreorder\n\t" "tlbwr\n\t" ".set reorder"); } /* Dealing with various CP0 mmu/cache related registers. */ /* CP0_PAGEMASK register */ extern inline unsigned long get_pagemask(void) { unsigned long val; __asm__ __volatile__( ".set noreorder\n\t" "mfc0 %0, $5\n\t" ".set reorder" : "=r" (val)); return val; } extern inline void set_pagemask(unsigned long val) { __asm__ __volatile__( ".set noreorder\n\t" "mtc0 %z0, $5\n\t" ".set reorder" : : "Jr" (val)); } /* CP0_ENTRYLO0 and CP0_ENTRYLO1 registers */ extern inline unsigned long get_entrylo0(void) { unsigned long val; __asm__ __volatile__( ".set noreorder\n\t" "dmfc0 %0, $2\n\t" ".set reorder" : "=r" (val)); return val; } extern inline void set_entrylo0(unsigned long val) { __asm__ __volatile__( ".set noreorder\n\t" "dmtc0 %z0, $2\n\t" ".set reorder" : : "Jr" (val)); } extern inline unsigned long get_entrylo1(void) { unsigned long val; __asm__ __volatile__( ".set noreorder\n\t" "dmfc0 %0, $3\n\t" ".set reorder" : "=r" (val)); return val; } extern inline void set_entrylo1(unsigned long val) { __asm__ __volatile__( ".set noreorder\n\t" "dmtc0 %z0, $3\n\t" ".set reorder" : : "Jr" (val)); } /* CP0_ENTRYHI register */ extern inline unsigned long get_entryhi(void) { unsigned long val; __asm__ __volatile__( ".set noreorder\n\t" "dmfc0 %0, $10\n\t" ".set reorder" : "=r" (val)); return val; } extern inline void set_entryhi(unsigned long val) { __asm__ __volatile__( ".set noreorder\n\t" "dmtc0 %z0, $10\n\t" ".set reorder" : : "Jr" (val)); } /* CP0_INDEX register */ extern inline unsigned int get_index(void) { unsigned long val; __asm__ __volatile__( ".set noreorder\n\t" "mfc0 %0, $0\n\t" ".set reorder" : "=r" (val)); return val; } extern inline void set_index(unsigned int val) { __asm__ __volatile__( ".set noreorder\n\t" "mtc0 %z0, $0\n\t" ".set reorder\n\t" : : "Jr" (val)); } /* CP0_WIRED register */ extern inline unsigned long get_wired(void) { unsigned long val; __asm__ __volatile__( ".set noreorder\n\t" "mfc0 %0, $6\n\t" ".set reorder\n\t" : "=r" (val)); return val; } extern inline void set_wired(unsigned long val) { __asm__ __volatile__( "\n\t.set noreorder\n\t" "mtc0 %z0, $6\n\t" ".set reorder" : : "Jr" (val)); } extern inline unsigned long get_info(void) { unsigned long val; __asm__( ".set push\n\t" ".set reorder\n\t" "mfc0 %0, $7\n\t" ".set pop" : "=r" (val)); return val; } /* CP0_TAGLO and CP0_TAGHI registers */ extern inline unsigned long get_taglo(void) { unsigned long val; __asm__ __volatile__( ".set noreorder\n\t" "mfc0 %0, $28\n\t" ".set reorder" : "=r" (val)); return val; } extern inline void set_taglo(unsigned long val) { __asm__ __volatile__( ".set noreorder\n\t" "mtc0 %z0, $28\n\t" ".set reorder" : : "Jr" (val)); } extern inline unsigned long get_taghi(void) { unsigned long val; __asm__ __volatile__( ".set noreorder\n\t" "mfc0 %0, $29\n\t" ".set reorder" : "=r" (val)); return val; } extern inline void set_taghi(unsigned long val) { __asm__ __volatile__( ".set noreorder\n\t" "mtc0 %z0, $29\n\t" ".set reorder" : : "Jr" (val)); } /* CP0_CONTEXT register */ extern inline unsigned long get_context(void) { unsigned long val; __asm__ __volatile__( ".set noreorder\n\t" "dmfc0 %0, $4\n\t" ".set reorder" : "=r" (val)); return val; } extern inline void set_context(unsigned long val) { __asm__ __volatile__( ".set noreorder\n\t" "dmtc0 %z0, $4\n\t" ".set reorder" : : "Jr" (val)); } #include <asm-generic/pgtable.h> typedef pte_t *pte_addr_t; #endif /* !defined (_LANGUAGE_ASSEMBLY) */ /* * No page table caches to initialise */ #define pgtable_cache_init() do { } while (0) #endif /* _ASM_PGTABLE_H */ |