Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 | /* * drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c * * Copyright (C) 2011 Texas Instruments * Author: Rob Clark <rob.clark@linaro.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/dma-buf.h> #include "omap_drv.h" static struct sg_table *omap_gem_map_dma_buf( struct dma_buf_attachment *attachment, enum dma_data_direction dir) { struct drm_gem_object *obj = attachment->dmabuf->priv; struct sg_table *sg; dma_addr_t paddr; int ret; sg = kzalloc(sizeof(*sg), GFP_KERNEL); if (!sg) return ERR_PTR(-ENOMEM); /* camera, etc, need physically contiguous.. but we need a * better way to know this.. */ ret = omap_gem_get_paddr(obj, &paddr, true); if (ret) goto out; ret = sg_alloc_table(sg, 1, GFP_KERNEL); if (ret) goto out; sg_init_table(sg->sgl, 1); sg_dma_len(sg->sgl) = obj->size; sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(paddr)), obj->size, 0); sg_dma_address(sg->sgl) = paddr; /* this should be after _get_paddr() to ensure we have pages attached */ omap_gem_dma_sync(obj, dir); return sg; out: kfree(sg); return ERR_PTR(ret); } static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *sg, enum dma_data_direction dir) { struct drm_gem_object *obj = attachment->dmabuf->priv; omap_gem_put_paddr(obj); sg_free_table(sg); kfree(sg); } static void omap_gem_dmabuf_release(struct dma_buf *buffer) { struct drm_gem_object *obj = buffer->priv; /* release reference that was taken when dmabuf was exported * in omap_gem_prime_set().. */ drm_gem_object_unreference_unlocked(obj); } static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer, size_t start, size_t len, enum dma_data_direction dir) { struct drm_gem_object *obj = buffer->priv; struct page **pages; if (omap_gem_flags(obj) & OMAP_BO_TILED) { /* TODO we would need to pin at least part of the buffer to * get de-tiled view. For now just reject it. */ return -ENOMEM; } /* make sure we have the pages: */ return omap_gem_get_pages(obj, &pages, true); } static void omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer, size_t start, size_t len, enum dma_data_direction dir) { struct drm_gem_object *obj = buffer->priv; omap_gem_put_pages(obj); } static void *omap_gem_dmabuf_kmap_atomic(struct dma_buf *buffer, unsigned long page_num) { struct drm_gem_object *obj = buffer->priv; struct page **pages; omap_gem_get_pages(obj, &pages, false); omap_gem_cpu_sync(obj, page_num); return kmap_atomic(pages[page_num]); } static void omap_gem_dmabuf_kunmap_atomic(struct dma_buf *buffer, unsigned long page_num, void *addr) { kunmap_atomic(addr); } static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer, unsigned long page_num) { struct drm_gem_object *obj = buffer->priv; struct page **pages; omap_gem_get_pages(obj, &pages, false); omap_gem_cpu_sync(obj, page_num); return kmap(pages[page_num]); } static void omap_gem_dmabuf_kunmap(struct dma_buf *buffer, unsigned long page_num, void *addr) { struct drm_gem_object *obj = buffer->priv; struct page **pages; omap_gem_get_pages(obj, &pages, false); kunmap(pages[page_num]); } static int omap_gem_dmabuf_mmap(struct dma_buf *buffer, struct vm_area_struct *vma) { struct drm_gem_object *obj = buffer->priv; struct drm_device *dev = obj->dev; int ret = 0; if (WARN_ON(!obj->filp)) return -EINVAL; mutex_lock(&dev->struct_mutex); ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma); mutex_unlock(&dev->struct_mutex); if (ret < 0) return ret; return omap_gem_mmap_obj(obj, vma); } static struct dma_buf_ops omap_dmabuf_ops = { .map_dma_buf = omap_gem_map_dma_buf, .unmap_dma_buf = omap_gem_unmap_dma_buf, .release = omap_gem_dmabuf_release, .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access, .end_cpu_access = omap_gem_dmabuf_end_cpu_access, .kmap_atomic = omap_gem_dmabuf_kmap_atomic, .kunmap_atomic = omap_gem_dmabuf_kunmap_atomic, .kmap = omap_gem_dmabuf_kmap, .kunmap = omap_gem_dmabuf_kunmap, .mmap = omap_gem_dmabuf_mmap, }; struct dma_buf *omap_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags) { DEFINE_DMA_BUF_EXPORT_INFO(exp_info); exp_info.ops = &omap_dmabuf_ops; exp_info.size = obj->size; exp_info.flags = flags; exp_info.priv = obj; return dma_buf_export(&exp_info); } struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev, struct dma_buf *buffer) { struct drm_gem_object *obj; /* is this one of own objects? */ if (buffer->ops == &omap_dmabuf_ops) { obj = buffer->priv; /* is it from our device? */ if (obj->dev == dev) { /* * Importing dmabuf exported from out own gem increases * refcount on gem itself instead of f_count of dmabuf. */ drm_gem_object_reference(obj); return obj; } } /* * TODO add support for importing buffers from other devices.. * for now we don't need this but would be nice to add eventually */ return ERR_PTR(-EINVAL); } |