Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 | // SPDX-License-Identifier: GPL-2.0 /* * Xen hypercall batching. * * Xen allows multiple hypercalls to be issued at once, using the * multicall interface. This allows the cost of trapping into the * hypervisor to be amortized over several calls. * * This file implements a simple interface for multicalls. There's a * per-cpu buffer of outstanding multicalls. When you want to queue a * multicall for issuing, you can allocate a multicall slot for the * call and its arguments, along with storage for space which is * pointed to by the arguments (for passing pointers to structures, * etc). When the multicall is actually issued, all the space for the * commands and allocated memory is freed for reuse. * * Multicalls are flushed whenever any of the buffers get full, or * when explicitly requested. There's no way to get per-multicall * return results back. It will BUG if any of the multicalls fail. * * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 */ #include <linux/percpu.h> #include <linux/hardirq.h> #include <linux/debugfs.h> #include <asm/xen/hypercall.h> #include "multicalls.h" #include "debugfs.h" #define MC_BATCH 32 #define MC_DEBUG 0 #define MC_ARGS (MC_BATCH * 16) struct mc_buffer { unsigned mcidx, argidx, cbidx; struct multicall_entry entries[MC_BATCH]; #if MC_DEBUG struct multicall_entry debug[MC_BATCH]; void *caller[MC_BATCH]; #endif unsigned char args[MC_ARGS]; struct callback { void (*fn)(void *); void *data; } callbacks[MC_BATCH]; }; static DEFINE_PER_CPU(struct mc_buffer, mc_buffer); DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags); void xen_mc_flush(void) { struct mc_buffer *b = this_cpu_ptr(&mc_buffer); struct multicall_entry *mc; int ret = 0; unsigned long flags; int i; BUG_ON(preemptible()); /* Disable interrupts in case someone comes in and queues something in the middle */ local_irq_save(flags); trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx); #if MC_DEBUG memcpy(b->debug, b->entries, b->mcidx * sizeof(struct multicall_entry)); #endif switch (b->mcidx) { case 0: /* no-op */ BUG_ON(b->argidx != 0); break; case 1: /* Singleton multicall - bypass multicall machinery and just do the call directly. */ mc = &b->entries[0]; mc->result = xen_single_call(mc->op, mc->args[0], mc->args[1], mc->args[2], mc->args[3], mc->args[4]); ret = mc->result < 0; break; default: if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0) BUG(); for (i = 0; i < b->mcidx; i++) if (b->entries[i].result < 0) ret++; } if (WARN_ON(ret)) { pr_err("%d of %d multicall(s) failed: cpu %d\n", ret, b->mcidx, smp_processor_id()); for (i = 0; i < b->mcidx; i++) { if (b->entries[i].result < 0) { #if MC_DEBUG pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\t%pS\n", i + 1, b->debug[i].op, b->debug[i].args[0], b->entries[i].result, b->caller[i]); #else pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\n", i + 1, b->entries[i].op, b->entries[i].args[0], b->entries[i].result); #endif } } } b->mcidx = 0; b->argidx = 0; for (i = 0; i < b->cbidx; i++) { struct callback *cb = &b->callbacks[i]; (*cb->fn)(cb->data); } b->cbidx = 0; local_irq_restore(flags); } struct multicall_space __xen_mc_entry(size_t args) { struct mc_buffer *b = this_cpu_ptr(&mc_buffer); struct multicall_space ret; unsigned argidx = roundup(b->argidx, sizeof(u64)); trace_xen_mc_entry_alloc(args); BUG_ON(preemptible()); BUG_ON(b->argidx >= MC_ARGS); if (unlikely(b->mcidx == MC_BATCH || (argidx + args) >= MC_ARGS)) { trace_xen_mc_flush_reason((b->mcidx == MC_BATCH) ? XEN_MC_FL_BATCH : XEN_MC_FL_ARGS); xen_mc_flush(); argidx = roundup(b->argidx, sizeof(u64)); } ret.mc = &b->entries[b->mcidx]; #if MC_DEBUG b->caller[b->mcidx] = __builtin_return_address(0); #endif b->mcidx++; ret.args = &b->args[argidx]; b->argidx = argidx + args; BUG_ON(b->argidx >= MC_ARGS); return ret; } struct multicall_space xen_mc_extend_args(unsigned long op, size_t size) { struct mc_buffer *b = this_cpu_ptr(&mc_buffer); struct multicall_space ret = { NULL, NULL }; BUG_ON(preemptible()); BUG_ON(b->argidx >= MC_ARGS); if (unlikely(b->mcidx == 0 || b->entries[b->mcidx - 1].op != op)) { trace_xen_mc_extend_args(op, size, XEN_MC_XE_BAD_OP); goto out; } if (unlikely((b->argidx + size) >= MC_ARGS)) { trace_xen_mc_extend_args(op, size, XEN_MC_XE_NO_SPACE); goto out; } ret.mc = &b->entries[b->mcidx - 1]; ret.args = &b->args[b->argidx]; b->argidx += size; BUG_ON(b->argidx >= MC_ARGS); trace_xen_mc_extend_args(op, size, XEN_MC_XE_OK); out: return ret; } void xen_mc_callback(void (*fn)(void *), void *data) { struct mc_buffer *b = this_cpu_ptr(&mc_buffer); struct callback *cb; if (b->cbidx == MC_BATCH) { trace_xen_mc_flush_reason(XEN_MC_FL_CALLBACK); xen_mc_flush(); } trace_xen_mc_callback(fn, data); cb = &b->callbacks[b->cbidx++]; cb->fn = fn; cb->data = data; } |