Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. */ #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/errno.h> #include <rdma/uverbs_ioctl.h> #include "rxe.h" #include "rxe_loc.h" #include "rxe_queue.h" void rxe_mmap_release(struct kref *ref) { struct rxe_mmap_info *ip = container_of(ref, struct rxe_mmap_info, ref); struct rxe_dev *rxe = to_rdev(ip->context->device); spin_lock_bh(&rxe->pending_lock); if (!list_empty(&ip->pending_mmaps)) list_del(&ip->pending_mmaps); spin_unlock_bh(&rxe->pending_lock); vfree(ip->obj); /* buf */ kfree(ip); } /* * open and close keep track of how many times the memory region is mapped, * to avoid releasing it. */ static void rxe_vma_open(struct vm_area_struct *vma) { struct rxe_mmap_info *ip = vma->vm_private_data; kref_get(&ip->ref); } static void rxe_vma_close(struct vm_area_struct *vma) { struct rxe_mmap_info *ip = vma->vm_private_data; kref_put(&ip->ref, rxe_mmap_release); } static const struct vm_operations_struct rxe_vm_ops = { .open = rxe_vma_open, .close = rxe_vma_close, }; /** * rxe_mmap - create a new mmap region * @context: the IB user context of the process making the mmap() call * @vma: the VMA to be initialized * Return zero if the mmap is OK. Otherwise, return an errno. */ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) { struct rxe_dev *rxe = to_rdev(context->device); unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; unsigned long size = vma->vm_end - vma->vm_start; struct rxe_mmap_info *ip, *pp; int ret; /* * Search the device's list of objects waiting for a mmap call. * Normally, this list is very short since a call to create a * CQ, QP, or SRQ is soon followed by a call to mmap(). */ spin_lock_bh(&rxe->pending_lock); list_for_each_entry_safe(ip, pp, &rxe->pending_mmaps, pending_mmaps) { if (context != ip->context || (__u64)offset != ip->info.offset) continue; /* Don't allow a mmap larger than the object. */ if (size > ip->info.size) { pr_err("mmap region is larger than the object!\n"); spin_unlock_bh(&rxe->pending_lock); ret = -EINVAL; goto done; } goto found_it; } pr_warn("unable to find pending mmap info\n"); spin_unlock_bh(&rxe->pending_lock); ret = -EINVAL; goto done; found_it: list_del_init(&ip->pending_mmaps); spin_unlock_bh(&rxe->pending_lock); ret = remap_vmalloc_range(vma, ip->obj, 0); if (ret) { pr_err("err %d from remap_vmalloc_range\n", ret); goto done; } vma->vm_ops = &rxe_vm_ops; vma->vm_private_data = ip; rxe_vma_open(vma); done: return ret; } /* * Allocate information for rxe_mmap */ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size, struct ib_udata *udata, void *obj) { struct rxe_mmap_info *ip; if (!udata) return ERR_PTR(-EINVAL); ip = kmalloc(sizeof(*ip), GFP_KERNEL); if (!ip) return ERR_PTR(-ENOMEM); size = PAGE_ALIGN(size); spin_lock_bh(&rxe->mmap_offset_lock); if (rxe->mmap_offset == 0) rxe->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA); ip->info.offset = rxe->mmap_offset; rxe->mmap_offset += ALIGN(size, SHMLBA); spin_unlock_bh(&rxe->mmap_offset_lock); INIT_LIST_HEAD(&ip->pending_mmaps); ip->info.size = size; ip->context = container_of(udata, struct uverbs_attr_bundle, driver_udata) ->context; ip->obj = obj; kref_init(&ip->ref); return ip; } |