Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2008 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. */ #ifndef _VNIC_CQ_H_ #define _VNIC_CQ_H_ #include "cq_desc.h" #include "vnic_dev.h" /* * These defines avoid symbol clash between fnic and enic (Cisco 10G Eth * Driver) when both are built with CONFIG options =y */ #define vnic_cq_service fnic_cq_service #define vnic_cq_free fnic_cq_free #define vnic_cq_alloc fnic_cq_alloc #define vnic_cq_init fnic_cq_init #define vnic_cq_clean fnic_cq_clean /* Completion queue control */ struct vnic_cq_ctrl { u64 ring_base; /* 0x00 */ u32 ring_size; /* 0x08 */ u32 pad0; u32 flow_control_enable; /* 0x10 */ u32 pad1; u32 color_enable; /* 0x18 */ u32 pad2; u32 cq_head; /* 0x20 */ u32 pad3; u32 cq_tail; /* 0x28 */ u32 pad4; u32 cq_tail_color; /* 0x30 */ u32 pad5; u32 interrupt_enable; /* 0x38 */ u32 pad6; u32 cq_entry_enable; /* 0x40 */ u32 pad7; u32 cq_message_enable; /* 0x48 */ u32 pad8; u32 interrupt_offset; /* 0x50 */ u32 pad9; u64 cq_message_addr; /* 0x58 */ u32 pad10; }; struct vnic_cq { unsigned int index; struct vnic_dev *vdev; struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */ struct vnic_dev_ring ring; unsigned int to_clean; unsigned int last_color; }; static inline unsigned int vnic_cq_service(struct vnic_cq *cq, unsigned int work_to_do, int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc, u8 type, u16 q_number, u16 completed_index, void *opaque), void *opaque) { struct cq_desc *cq_desc; unsigned int work_done = 0; u16 q_number, completed_index; u8 type, color; cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + cq->ring.desc_size * cq->to_clean); cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); while (color != cq->last_color) { if ((*q_service)(cq->vdev, cq_desc, type, q_number, completed_index, opaque)) break; cq->to_clean++; if (cq->to_clean == cq->ring.desc_count) { cq->to_clean = 0; cq->last_color = cq->last_color ? 0 : 1; } cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + cq->ring.desc_size * cq->to_clean); cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); work_done++; if (work_done >= work_to_do) break; } return work_done; } void vnic_cq_free(struct vnic_cq *cq); int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, unsigned int desc_count, unsigned int desc_size); void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, unsigned int cq_tail_color, unsigned int interrupt_enable, unsigned int cq_entry_enable, unsigned int message_enable, unsigned int interrupt_offset, u64 message_addr); void vnic_cq_clean(struct vnic_cq *cq); #endif /* _VNIC_CQ_H_ */ |