Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 | /* * include/asm-s390/page.h * * S390 version * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Hartmut Penner (hp@de.ibm.com) */ #ifndef _S390_PAGE_H #define _S390_PAGE_H #include <asm/setup.h> #include <asm/types.h> /* PAGE_SHIFT determines the page size */ #define PAGE_SHIFT 12 #define PAGE_SIZE (1UL << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) #ifdef __KERNEL__ #ifndef __ASSEMBLY__ static inline void clear_page(void *page) { register_pair rp; rp.subreg.even = (unsigned long) page; rp.subreg.odd = (unsigned long) 4096; asm volatile (" slr 1,1\n" " mvcl %0,0" : "+&a" (rp) : : "memory", "cc", "1" ); } static inline void copy_page(void *to, void *from) { if (MACHINE_HAS_MVPG) asm volatile (" sr 0,0\n" " mvpg %0,%1" : : "a" ((void *)(to)), "a" ((void *)(from)) : "memory", "cc", "0" ); else asm volatile (" mvc 0(256,%0),0(%1)\n" " mvc 256(256,%0),256(%1)\n" " mvc 512(256,%0),512(%1)\n" " mvc 768(256,%0),768(%1)\n" " mvc 1024(256,%0),1024(%1)\n" " mvc 1280(256,%0),1280(%1)\n" " mvc 1536(256,%0),1536(%1)\n" " mvc 1792(256,%0),1792(%1)\n" " mvc 2048(256,%0),2048(%1)\n" " mvc 2304(256,%0),2304(%1)\n" " mvc 2560(256,%0),2560(%1)\n" " mvc 2816(256,%0),2816(%1)\n" " mvc 3072(256,%0),3072(%1)\n" " mvc 3328(256,%0),3328(%1)\n" " mvc 3584(256,%0),3584(%1)\n" " mvc 3840(256,%0),3840(%1)\n" : : "a"((void *)(to)),"a"((void *)(from)) : "memory" ); } #define clear_user_page(page, vaddr, pg) clear_page(page) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) #define BUG() do { \ printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ __asm__ __volatile__(".word 0x0000"); \ } while (0) #define PAGE_BUG(page) do { \ BUG(); \ } while (0) /* Pure 2^n version of get_order */ extern __inline__ int get_order(unsigned long size) { int order; size = (size-1) >> (PAGE_SHIFT-1); order = -1; do { size >>= 1; order++; } while (size); return order; } /* * These are used to make use of C type-checking.. */ typedef struct { unsigned long pte; } pte_t; typedef struct { unsigned long pmd; } pmd_t; typedef struct { unsigned long pgd0; unsigned long pgd1; unsigned long pgd2; unsigned long pgd3; } pgd_t; typedef struct { unsigned long pgprot; } pgprot_t; #define pte_val(x) ((x).pte) #define pmd_val(x) ((x).pmd) #define pgd_val(x) ((x).pgd0) #define pgprot_val(x) ((x).pgprot) #define __pte(x) ((pte_t) { (x) } ) #define __pmd(x) ((pmd_t) { (x) } ) #define __pgd(x) ((pgd_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } ) #endif /* !__ASSEMBLY__ */ /* to align the pointer to the (next) page boundary */ #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) #define __PAGE_OFFSET 0x0UL #define PAGE_OFFSET 0x0UL #define __pa(x) (unsigned long)(x) #define __va(x) (void *)(unsigned long)(x) #define pfn_to_page(pfn) (mem_map + (pfn)) #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define pfn_valid(pfn) ((pfn) < max_mapnr) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #endif /* __KERNEL__ */ #endif /* _S390_PAGE_H */ |