Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 | // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ /* Copyright (c) 2008-2019, IBM Corporation */ #include <linux/errno.h> #include <linux/types.h> #include <rdma/ib_verbs.h> #include "siw.h" static int map_wc_opcode[SIW_NUM_OPCODES] = { [SIW_OP_WRITE] = IB_WC_RDMA_WRITE, [SIW_OP_SEND] = IB_WC_SEND, [SIW_OP_SEND_WITH_IMM] = IB_WC_SEND, [SIW_OP_READ] = IB_WC_RDMA_READ, [SIW_OP_READ_LOCAL_INV] = IB_WC_RDMA_READ, [SIW_OP_COMP_AND_SWAP] = IB_WC_COMP_SWAP, [SIW_OP_FETCH_AND_ADD] = IB_WC_FETCH_ADD, [SIW_OP_INVAL_STAG] = IB_WC_LOCAL_INV, [SIW_OP_REG_MR] = IB_WC_REG_MR, [SIW_OP_RECEIVE] = IB_WC_RECV, [SIW_OP_READ_RESPONSE] = -1 /* not used */ }; static struct { enum siw_wc_status siw; enum ib_wc_status ib; } map_cqe_status[SIW_NUM_WC_STATUS] = { { SIW_WC_SUCCESS, IB_WC_SUCCESS }, { SIW_WC_LOC_LEN_ERR, IB_WC_LOC_LEN_ERR }, { SIW_WC_LOC_PROT_ERR, IB_WC_LOC_PROT_ERR }, { SIW_WC_LOC_QP_OP_ERR, IB_WC_LOC_QP_OP_ERR }, { SIW_WC_WR_FLUSH_ERR, IB_WC_WR_FLUSH_ERR }, { SIW_WC_BAD_RESP_ERR, IB_WC_BAD_RESP_ERR }, { SIW_WC_LOC_ACCESS_ERR, IB_WC_LOC_ACCESS_ERR }, { SIW_WC_REM_ACCESS_ERR, IB_WC_REM_ACCESS_ERR }, { SIW_WC_REM_INV_REQ_ERR, IB_WC_REM_INV_REQ_ERR }, { SIW_WC_GENERAL_ERR, IB_WC_GENERAL_ERR } }; /* * Reap one CQE from the CQ. Only used by kernel clients * during CQ normal operation. Might be called during CQ * flush for user mapped CQE array as well. */ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc) { struct siw_cqe *cqe; unsigned long flags; spin_lock_irqsave(&cq->lock, flags); cqe = &cq->queue[cq->cq_get % cq->num_cqe]; if (READ_ONCE(cqe->flags) & SIW_WQE_VALID) { memset(wc, 0, sizeof(*wc)); wc->wr_id = cqe->id; wc->byte_len = cqe->bytes; /* * During CQ flush, also user land CQE's may get * reaped here, which do not hold a QP reference * and do not qualify for memory extension verbs. */ if (likely(rdma_is_kernel_res(&cq->base_cq.res))) { if (cqe->flags & SIW_WQE_REM_INVAL) { wc->ex.invalidate_rkey = cqe->inval_stag; wc->wc_flags = IB_WC_WITH_INVALIDATE; } wc->qp = cqe->base_qp; wc->opcode = map_wc_opcode[cqe->opcode]; wc->status = map_cqe_status[cqe->status].ib; siw_dbg_cq(cq, "idx %u, type %d, flags %2x, id 0x%pK\n", cq->cq_get % cq->num_cqe, cqe->opcode, cqe->flags, (void *)(uintptr_t)cqe->id); } else { /* * A malicious user may set invalid opcode or * status in the user mmapped CQE array. * Sanity check and correct values in that case * to avoid out-of-bounds access to global arrays * for opcode and status mapping. */ u8 opcode = cqe->opcode; u16 status = cqe->status; if (opcode >= SIW_NUM_OPCODES) { opcode = 0; status = SIW_WC_GENERAL_ERR; } else if (status >= SIW_NUM_WC_STATUS) { status = SIW_WC_GENERAL_ERR; } wc->opcode = map_wc_opcode[opcode]; wc->status = map_cqe_status[status].ib; } WRITE_ONCE(cqe->flags, 0); cq->cq_get++; spin_unlock_irqrestore(&cq->lock, flags); return 1; } spin_unlock_irqrestore(&cq->lock, flags); return 0; } /* * siw_cq_flush() * * Flush all CQ elements. */ void siw_cq_flush(struct siw_cq *cq) { struct ib_wc wc; while (siw_reap_cqe(cq, &wc)) ; } |