Loading...
/* * linux/arch/arm/mm/pgd.c * * Copyright (C) 1998-2005 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/mm.h> #include <linux/highmem.h> #include <asm/pgalloc.h> #include <asm/page.h> #include <asm/tlbflush.h> #include "mm.h" #define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD) /* * need to get a 16k page for level 1 */ pgd_t *get_pgd_slow(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); if (!new_pgd) goto no_pgd; memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); /* * Copy over the kernel and IO PGD entries */ init_pgd = pgd_offset_k(0); memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); if (!vectors_high()) { /* * On ARM, first page must always be allocated since it * contains the machine vectors. */ new_pmd = pmd_alloc(mm, new_pgd, 0); if (!new_pmd) goto no_pmd; new_pte = pte_alloc_map(mm, new_pmd, 0); if (!new_pte) goto no_pte; init_pmd = pmd_offset(init_pgd, 0); init_pte = pte_offset_map_nested(init_pmd, 0); set_pte_ext(new_pte, *init_pte, 0); pte_unmap_nested(init_pte); pte_unmap(new_pte); } return new_pgd; no_pte: pmd_free(new_pmd); no_pmd: free_pages((unsigned long)new_pgd, 2); no_pgd: return NULL; } void free_pgd_slow(pgd_t *pgd) { pmd_t *pmd; struct page *pte; if (!pgd) return; /* pgd is always present and good */ pmd = pmd_off(pgd, 0); if (pmd_none(*pmd)) goto free; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); goto free; } pte = pmd_page(*pmd); pmd_clear(pmd); dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE); pte_lock_deinit(pte); pte_free(pte); pmd_free(pmd); free: free_pages((unsigned long) pgd, 2); } |