Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 | /* * Copyright © 2014 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * */ #include <linux/mm.h> #include <linux/io-mapping.h> #include "i915_drv.h" struct remap_pfn { struct mm_struct *mm; unsigned long pfn; pgprot_t prot; struct sgt_iter sgt; resource_size_t iobase; }; static int remap_pfn(pte_t *pte, unsigned long addr, void *data) { struct remap_pfn *r = data; /* Special PTE are not associated with any struct page */ set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot))); r->pfn++; return 0; } #define use_dma(io) ((io) != -1) static inline unsigned long sgt_pfn(const struct remap_pfn *r) { if (use_dma(r->iobase)) return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT; else return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT); } static int remap_sg(pte_t *pte, unsigned long addr, void *data) { struct remap_pfn *r = data; if (GEM_WARN_ON(!r->sgt.pfn)) return -EINVAL; /* Special PTE are not associated with any struct page */ set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot))); r->pfn++; /* track insertions in case we need to unwind later */ r->sgt.curr += PAGE_SIZE; if (r->sgt.curr >= r->sgt.max) r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase)); return 0; } /** * remap_io_mapping - remap an IO mapping to userspace * @vma: user vma to map to * @addr: target user address to start at * @pfn: physical address of kernel memory * @size: size of map area * @iomap: the source io_mapping * * Note: this is only safe if the mm semaphore is held when called. */ int remap_io_mapping(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, struct io_mapping *iomap) { struct remap_pfn r; int err; #define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP) GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS); /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ r.mm = vma->vm_mm; r.pfn = pfn; r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) | (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK)); err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r); if (unlikely(err)) { zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT); return err; } return 0; } /** * remap_io_sg - remap an IO mapping to userspace * @vma: user vma to map to * @addr: target user address to start at * @size: size of map area * @sgl: Start sg entry * @iobase: Use stored dma address offset by this address or pfn if -1 * * Note: this is only safe if the mm semaphore is held when called. */ int remap_io_sg(struct vm_area_struct *vma, unsigned long addr, unsigned long size, struct scatterlist *sgl, resource_size_t iobase) { struct remap_pfn r = { .mm = vma->vm_mm, .prot = vma->vm_page_prot, .sgt = __sgt_iter(sgl, use_dma(iobase)), .iobase = iobase, }; int err; /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS); if (!use_dma(iobase)) flush_cache_range(vma, addr, size); err = apply_to_page_range(r.mm, addr, size, remap_sg, &r); if (unlikely(err)) { zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT); return err; } return 0; } |