Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 | /* * IBM System z Huge TLB Page Support for Kernel. * * Copyright 2007 IBM Corp. * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com> */ #include <linux/mm.h> #include <linux/hugetlb.h> void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *pteptr, pte_t pteval) { pmd_t *pmdp = (pmd_t *) pteptr; pte_t shadow_pteval = pteval; unsigned long mask; if (!MACHINE_HAS_HPAGE) { pteptr = (pte_t *) pte_page(pteval)[1].index; mask = pte_val(pteval) & (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO); pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask; if (mm->context.noexec) { pteptr += PTRS_PER_PTE; pte_val(shadow_pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask; } } pmd_val(*pmdp) = pte_val(pteval); if (mm->context.noexec) { pmdp = get_shadow_table(pmdp); pmd_val(*pmdp) = pte_val(shadow_pteval); } } int arch_prepare_hugepage(struct page *page) { unsigned long addr = page_to_phys(page); pte_t pte; pte_t *ptep; int i; if (MACHINE_HAS_HPAGE) return 0; ptep = (pte_t *) pte_alloc_one(&init_mm, address); if (!ptep) return -ENOMEM; pte = mk_pte(page, PAGE_RW); for (i = 0; i < PTRS_PER_PTE; i++) { set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte); pte_val(pte) += PAGE_SIZE; } page[1].index = (unsigned long) ptep; return 0; } void arch_release_hugepage(struct page *page) { pte_t *ptep; if (MACHINE_HAS_HPAGE) return; ptep = (pte_t *) page[1].index; if (!ptep) return; pte_free(&init_mm, ptep); page[1].index = 0; } pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) { pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp = NULL; pgdp = pgd_offset(mm, addr); pudp = pud_alloc(mm, pgdp, addr); if (pudp) pmdp = pmd_alloc(mm, pudp, addr); return (pte_t *) pmdp; } pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) { pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp = NULL; pgdp = pgd_offset(mm, addr); if (pgd_present(*pgdp)) { pudp = pud_offset(pgdp, addr); if (pud_present(*pudp)) pmdp = pmd_offset(pudp, addr); } return (pte_t *) pmdp; } int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) { return 0; } struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) { return ERR_PTR(-EINVAL); } int pmd_huge(pmd_t pmd) { if (!MACHINE_HAS_HPAGE) return 0; return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE); } int pud_huge(pud_t pud) { return 0; } struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmdp, int write) { struct page *page; if (!MACHINE_HAS_HPAGE) return NULL; page = pmd_page(*pmdp); if (page) page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT); return page; } |