Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 | // SPDX-License-Identifier: MIT /* Copyright (C) 2006-2017 Oracle Corporation */ #include <linux/vbox_err.h> #include "vbox_drv.h" #include "vboxvideo_guest.h" #include "hgsmi_channels.h" /* * There is a hardware ring buffer in the graphics device video RAM, formerly * in the VBox VMMDev PCI memory space. * All graphics commands go there serialized by vbva_buffer_begin_update. * and vbva_buffer_end_update. * * free_offset is writing position. data_offset is reading position. * free_offset == data_offset means buffer is empty. * There must be always gap between data_offset and free_offset when data * are in the buffer. * Guest only changes free_offset, host changes data_offset. */ static u32 vbva_buffer_available(const struct vbva_buffer *vbva) { s32 diff = vbva->data_offset - vbva->free_offset; return diff > 0 ? diff : vbva->data_len + diff; } static void vbva_buffer_place_data_at(struct vbva_buf_ctx *vbva_ctx, const void *p, u32 len, u32 offset) { struct vbva_buffer *vbva = vbva_ctx->vbva; u32 bytes_till_boundary = vbva->data_len - offset; u8 *dst = &vbva->data[offset]; s32 diff = len - bytes_till_boundary; if (diff <= 0) { /* Chunk will not cross buffer boundary. */ memcpy(dst, p, len); } else { /* Chunk crosses buffer boundary. */ memcpy(dst, p, bytes_till_boundary); memcpy(&vbva->data[0], (u8 *)p + bytes_till_boundary, diff); } } static void vbva_buffer_flush(struct gen_pool *ctx) { struct vbva_flush *p; p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_FLUSH); if (!p) return; p->reserved = 0; hgsmi_buffer_submit(ctx, p); hgsmi_buffer_free(ctx, p); } bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, const void *p, u32 len) { struct vbva_record *record; struct vbva_buffer *vbva; u32 available; vbva = vbva_ctx->vbva; record = vbva_ctx->record; if (!vbva || vbva_ctx->buffer_overflow || !record || !(record->len_and_flags & VBVA_F_RECORD_PARTIAL)) return false; available = vbva_buffer_available(vbva); while (len > 0) { u32 chunk = len; if (chunk >= available) { vbva_buffer_flush(ctx); available = vbva_buffer_available(vbva); } if (chunk >= available) { if (WARN_ON(available <= vbva->partial_write_tresh)) { vbva_ctx->buffer_overflow = true; return false; } chunk = available - vbva->partial_write_tresh; } vbva_buffer_place_data_at(vbva_ctx, p, chunk, vbva->free_offset); vbva->free_offset = (vbva->free_offset + chunk) % vbva->data_len; record->len_and_flags += chunk; available -= chunk; len -= chunk; p += chunk; } return true; } static bool vbva_inform_host(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, s32 screen, bool enable) { struct vbva_enable_ex *p; bool ret; p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_ENABLE); if (!p) return false; p->base.flags = enable ? VBVA_F_ENABLE : VBVA_F_DISABLE; p->base.offset = vbva_ctx->buffer_offset; p->base.result = VERR_NOT_SUPPORTED; if (screen >= 0) { p->base.flags |= VBVA_F_EXTENDED | VBVA_F_ABSOFFSET; p->screen_id = screen; } hgsmi_buffer_submit(ctx, p); if (enable) ret = p->base.result >= 0; else ret = true; hgsmi_buffer_free(ctx, p); return ret; } bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, struct vbva_buffer *vbva, s32 screen) { bool ret = false; memset(vbva, 0, sizeof(*vbva)); vbva->partial_write_tresh = 256; vbva->data_len = vbva_ctx->buffer_length - sizeof(struct vbva_buffer); vbva_ctx->vbva = vbva; ret = vbva_inform_host(vbva_ctx, ctx, screen, true); if (!ret) vbva_disable(vbva_ctx, ctx, screen); return ret; } void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, s32 screen) { vbva_ctx->buffer_overflow = false; vbva_ctx->record = NULL; vbva_ctx->vbva = NULL; vbva_inform_host(vbva_ctx, ctx, screen, false); } bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx) { struct vbva_record *record; u32 next; if (!vbva_ctx->vbva || !(vbva_ctx->vbva->host_flags.host_events & VBVA_F_MODE_ENABLED)) return false; WARN_ON(vbva_ctx->buffer_overflow || vbva_ctx->record); next = (vbva_ctx->vbva->record_free_index + 1) % VBVA_MAX_RECORDS; /* Flush if all slots in the records queue are used */ if (next == vbva_ctx->vbva->record_first_index) vbva_buffer_flush(ctx); /* If even after flush there is no place then fail the request */ if (next == vbva_ctx->vbva->record_first_index) return false; record = &vbva_ctx->vbva->records[vbva_ctx->vbva->record_free_index]; record->len_and_flags = VBVA_F_RECORD_PARTIAL; vbva_ctx->vbva->record_free_index = next; /* Remember which record we are using. */ vbva_ctx->record = record; return true; } void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx) { struct vbva_record *record = vbva_ctx->record; WARN_ON(!vbva_ctx->vbva || !record || !(record->len_and_flags & VBVA_F_RECORD_PARTIAL)); /* Mark the record completed. */ record->len_and_flags &= ~VBVA_F_RECORD_PARTIAL; vbva_ctx->buffer_overflow = false; vbva_ctx->record = NULL; } void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx, u32 buffer_offset, u32 buffer_length) { vbva_ctx->buffer_offset = buffer_offset; vbva_ctx->buffer_length = buffer_length; } |