Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 | /* $Id: mmu_context.h,v 1.40 1999/09/10 10:44:37 davem Exp $ */ #ifndef __SPARC64_MMU_CONTEXT_H #define __SPARC64_MMU_CONTEXT_H /* Derived heavily from Linus's Alpha/AXP ASN code... */ #include <linux/spinlock.h> #include <asm/system.h> #include <asm/spitfire.h> #ifndef __ASSEMBLY__ extern spinlock_t ctx_alloc_lock; extern unsigned long tlb_context_cache; extern unsigned long mmu_context_bmap[]; #define CTX_VERSION_SHIFT (PAGE_SHIFT - 3) #define CTX_VERSION_MASK ((~0UL) << CTX_VERSION_SHIFT) #define CTX_FIRST_VERSION ((1UL << CTX_VERSION_SHIFT) + 1UL) #define CTX_VALID(__ctx) \ (!(((__ctx) ^ tlb_context_cache) & CTX_VERSION_MASK)) #define CTX_HWBITS(__ctx) ((__ctx) & ~CTX_VERSION_MASK) extern void get_new_mmu_context(struct mm_struct *mm); /* Initialize a new mmu context. This is invoked when a new * address space instance (unique or shared) is instantiated. * A fresh mm_struct is cleared out to zeros, so this need not * do anything on Sparc64 since the only thing we care about * is that mm->context is an invalid context (ie. zero). */ #define init_new_context(__tsk, __mm) do { } while(0) /* Destroy a dead context. This occurs when mmput drops the * mm_users count to zero, the mmaps have been released, and * all the page tables have been flushed. Our job is to destroy * any remaining processor-specific state, and in the sparc64 * case this just means freeing up the mmu context ID held by * this task if valid. */ #define destroy_context(__mm) \ do { spin_lock(&ctx_alloc_lock); \ if (CTX_VALID((__mm)->context)) { \ unsigned long nr = CTX_HWBITS((__mm)->context); \ mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); \ } \ spin_unlock(&ctx_alloc_lock); \ } while(0) /* Reload the two core values used by TLB miss handler * processing on sparc64. They are: * 1) The physical address of mm->pgd, when full page * table walks are necessary, this is where the * search begins. * 2) A "PGD cache". For 32-bit tasks only pgd[0] is * ever used since that maps the entire low 4GB * completely. To speed up TLB miss processing we * make this value available to the handlers. This * decreases the amount of memory traffic incurred. */ #define reload_tlbmiss_state(__tsk, __mm) \ do { \ register unsigned long paddr asm("o5"); \ register unsigned long pgd_cache asm("o4"); \ paddr = __pa((__mm)->pgd); \ pgd_cache = 0UL; \ if ((__tsk)->thread.flags & SPARC_FLAG_32BIT) \ pgd_cache = pgd_val((__mm)->pgd[0]) << 11UL; \ __asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \ "mov %3, %%g4\n\t" \ "mov %0, %%g7\n\t" \ "stxa %1, [%%g4] %2\n\t" \ "wrpr %%g0, 0x096, %%pstate" \ : /* no outputs */ \ : "r" (paddr), "r" (pgd_cache),\ "i" (ASI_DMMU), "i" (TSB_REG)); \ } while(0) /* Set MMU context in the actual hardware. */ #define load_secondary_context(__mm) \ __asm__ __volatile__("stxa %0, [%1] %2\n\t" \ "flush %%g6" \ : /* No outputs */ \ : "r" (CTX_HWBITS((__mm)->context)), \ "r" (0x10), "i" (0x58)) /* Clean out potential stale TLB entries due to previous * users of this TLB context. We flush TLB contexts * lazily on sparc64. */ #define clean_secondary_context() \ __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" \ "stxa %%g0, [%0] %2\n\t" \ "flush %%g6" \ : /* No outputs */ \ : "r" (0x50), "i" (0x5f), "i" (0x57)) /* Switch the current MM context. */ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk, int cpu) { long dirty; spin_lock(&mm->page_table_lock); if (CTX_VALID(mm->context)) dirty = 0; else dirty = 1; if (dirty || (old_mm != mm)) { unsigned long vm_mask; if (dirty) get_new_mmu_context(mm); vm_mask = (1UL << cpu); if (!(mm->cpu_vm_mask & vm_mask)) { mm->cpu_vm_mask |= vm_mask; dirty = 1; } load_secondary_context(mm); if (dirty != 0) clean_secondary_context(); reload_tlbmiss_state(tsk, mm); } spin_unlock(&mm->page_table_lock); } /* Activate a new MM instance for the current task. */ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) { unsigned long vm_mask; spin_lock(&mm->page_table_lock); if (!CTX_VALID(mm->context)) get_new_mmu_context(mm); vm_mask = (1UL << smp_processor_id()); if (!(mm->cpu_vm_mask & vm_mask)) mm->cpu_vm_mask |= vm_mask; spin_unlock(&mm->page_table_lock); load_secondary_context(mm); clean_secondary_context(); reload_tlbmiss_state(current, mm); } #endif /* !(__ASSEMBLY__) */ #endif /* !(__SPARC64_MMU_CONTEXT_H) */ |