Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 | #ifndef _SPARC64_PGALLOC_H #define _SPARC64_PGALLOC_H #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/slab.h> #include <asm/spitfire.h> #include <asm/cpudata.h> #include <asm/cacheflush.h> #include <asm/page.h> /* Page table allocation/freeing. */ extern struct kmem_cache *pgtable_cache; static inline pgd_t *pgd_alloc(struct mm_struct *mm) { return kmem_cache_alloc(pgtable_cache, GFP_KERNEL); } static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { kmem_cache_free(pgtable_cache, pgd); } #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) { return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT); } static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) { kmem_cache_free(pgtable_cache, pmd); } static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); } static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) { struct page *page; pte_t *pte; pte = pte_alloc_one_kernel(mm, address); if (!pte) return NULL; page = virt_to_page(pte); pgtable_page_ctor(page); return page; } static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { free_page((unsigned long)pte); } static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) { pgtable_page_dtor(ptepage); __free_page(ptepage); } #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE) #define pmd_populate(MM,PMD,PTE_PAGE) \ pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE)) #define pmd_pgtable(pmd) pmd_page(pmd) #define check_pgt_cache() do { } while (0) static inline void pgtable_free(void *table, bool is_page) { if (is_page) free_page((unsigned long)table); else kmem_cache_free(pgtable_cache, table); } #ifdef CONFIG_SMP struct mmu_gather; extern void tlb_remove_table(struct mmu_gather *, void *); static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, bool is_page) { unsigned long pgf = (unsigned long)table; if (is_page) pgf |= 0x1UL; tlb_remove_table(tlb, (void *)pgf); } static inline void __tlb_remove_table(void *_table) { void *table = (void *)((unsigned long)_table & ~0x1UL); bool is_page = false; if ((unsigned long)_table & 0x1UL) is_page = true; pgtable_free(table, is_page); } #else /* CONFIG_SMP */ static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, bool is_page) { pgtable_free(table, is_page); } #endif /* !CONFIG_SMP */ static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage, unsigned long address) { pgtable_page_dtor(ptepage); pgtable_free_tlb(tlb, page_address(ptepage), true); } #define __pmd_free_tlb(tlb, pmd, addr) \ pgtable_free_tlb(tlb, pmd, false) #endif /* _SPARC64_PGALLOC_H */ |