Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 | // SPDX-License-Identifier: GPL-2.0 #define _GNU_SOURCE #include "main.h" #include <stdlib.h> #include <stdio.h> #include <string.h> #include <pthread.h> #include <malloc.h> #include <assert.h> #include <errno.h> #include <limits.h> #define SMP_CACHE_BYTES 64 #define cache_line_size() SMP_CACHE_BYTES #define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES))) #define unlikely(x) (__builtin_expect(!!(x), 0)) #define likely(x) (__builtin_expect(!!(x), 1)) #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a)) typedef pthread_spinlock_t spinlock_t; typedef int gfp_t; static void *kmalloc(unsigned size, gfp_t gfp) { return memalign(64, size); } static void *kzalloc(unsigned size, gfp_t gfp) { void *p = memalign(64, size); if (!p) return p; memset(p, 0, size); return p; } static void kfree(void *p) { if (p) free(p); } static void spin_lock_init(spinlock_t *lock) { int r = pthread_spin_init(lock, 0); assert(!r); } static void spin_lock(spinlock_t *lock) { int ret = pthread_spin_lock(lock); assert(!ret); } static void spin_unlock(spinlock_t *lock) { int ret = pthread_spin_unlock(lock); assert(!ret); } static void spin_lock_bh(spinlock_t *lock) { spin_lock(lock); } static void spin_unlock_bh(spinlock_t *lock) { spin_unlock(lock); } static void spin_lock_irq(spinlock_t *lock) { spin_lock(lock); } static void spin_unlock_irq(spinlock_t *lock) { spin_unlock(lock); } static void spin_lock_irqsave(spinlock_t *lock, unsigned long f) { spin_lock(lock); } static void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f) { spin_unlock(lock); } #include "../../../include/linux/ptr_ring.h" static unsigned long long headcnt, tailcnt; static struct ptr_ring array ____cacheline_aligned_in_smp; /* implemented by ring */ void alloc_ring(void) { int ret = ptr_ring_init(&array, ring_size, 0); assert(!ret); /* Hacky way to poke at ring internals. Useful for testing though. */ if (param) array.batch = param; } /* guest side */ int add_inbuf(unsigned len, void *buf, void *datap) { int ret; ret = __ptr_ring_produce(&array, buf); if (ret >= 0) { ret = 0; headcnt++; } return ret; } /* * ptr_ring API provides no way for producer to find out whether a given * buffer was consumed. Our tests merely require that a successful get_buf * implies that add_inbuf succeed in the past, and that add_inbuf will succeed, * fake it accordingly. */ void *get_buf(unsigned *lenp, void **bufp) { void *datap; if (tailcnt == headcnt || __ptr_ring_full(&array)) datap = NULL; else { datap = "Buffer\n"; ++tailcnt; } return datap; } bool used_empty() { return (tailcnt == headcnt || __ptr_ring_full(&array)); } void disable_call() { assert(0); } bool enable_call() { assert(0); } void kick_available(void) { assert(0); } /* host side */ void disable_kick() { assert(0); } bool enable_kick() { assert(0); } bool avail_empty() { return !__ptr_ring_peek(&array); } bool use_buf(unsigned *lenp, void **bufp) { void *ptr; ptr = __ptr_ring_consume(&array); return ptr; } void call_used(void) { assert(0); } |