Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 | /* * arch/arm/include/asm/pgalloc.h * * Copyright (C) 2000-2001 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef _ASMARM_PGALLOC_H #define _ASMARM_PGALLOC_H #include <asm/domain.h> #include <asm/pgtable-hwdef.h> #include <asm/processor.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #define check_pgt_cache() do { } while (0) #ifdef CONFIG_MMU #define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER)) #define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL)) /* * Since we have only two-level page tables, these are trivial */ #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); }) #define pmd_free(mm, pmd) do { } while (0) #define pgd_populate(mm,pmd,pte) BUG() extern pgd_t *get_pgd_slow(struct mm_struct *mm); extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd); #define pgd_alloc(mm) get_pgd_slow(mm) #define pgd_free(mm, pgd) free_pgd_slow(mm, pgd) /* * Allocate one PTE table. * * This actually allocates two hardware PTE tables, but we wrap this up * into one table thus: * * +------------+ * | h/w pt 0 | * +------------+ * | h/w pt 1 | * +------------+ * | Linux pt 0 | * +------------+ * | Linux pt 1 | * +------------+ */ static inline pte_t * pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) { pte_t *pte; pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); if (pte) { clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE); pte += PTRS_PER_PTE; } return pte; } static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr) { struct page *pte; pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); if (pte) { void *page = page_address(pte); clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE); pgtable_page_ctor(pte); } return pte; } /* * Free one PTE table. */ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { if (pte) { pte -= PTRS_PER_PTE; free_page((unsigned long)pte); } } static inline void pte_free(struct mm_struct *mm, pgtable_t pte) { pgtable_page_dtor(pte); __free_page(pte); } static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval) { pmdp[0] = __pmd(pmdval); pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); flush_pmd_entry(pmdp); } /* * Populate the pmdp entry with a pointer to the pte. This pmd is part * of the mm address space. * * Ensure that we always set both PMD entries. */ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) { unsigned long pte_ptr = (unsigned long)ptep; /* * The pmd must be loaded with the physical * address of the PTE table */ pte_ptr -= PTRS_PER_PTE * sizeof(void *); __pmd_populate(pmdp, __pa(pte_ptr) | _PAGE_KERNEL_TABLE); } static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) { __pmd_populate(pmdp, page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE); } #define pmd_pgtable(pmd) pmd_page(pmd) #endif /* CONFIG_MMU */ #endif |