Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 | // SPDX-License-Identifier: GPL-2.0 /* Marvell RVU Ethernet driver * * Copyright (C) 2021 Marvell. * */ #include "otx2_common.h" static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac, u32 *dmac_index) { struct cgx_mac_addr_add_req *req; struct cgx_mac_addr_add_rsp *rsp; int err; mutex_lock(&pf->mbox.lock); req = otx2_mbox_alloc_msg_cgx_mac_addr_add(&pf->mbox); if (!req) { mutex_unlock(&pf->mbox.lock); return -ENOMEM; } ether_addr_copy(req->mac_addr, mac); err = otx2_sync_mbox_msg(&pf->mbox); if (!err) { rsp = (struct cgx_mac_addr_add_rsp *) otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr); *dmac_index = rsp->index; } mutex_unlock(&pf->mbox.lock); return err; } static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf, u32 *dmac_index) { struct cgx_mac_addr_set_or_get *req; struct cgx_mac_addr_set_or_get *rsp; int err; mutex_lock(&pf->mbox.lock); req = otx2_mbox_alloc_msg_cgx_mac_addr_set(&pf->mbox); if (!req) { mutex_unlock(&pf->mbox.lock); return -ENOMEM; } req->index = *dmac_index; ether_addr_copy(req->mac_addr, pf->netdev->dev_addr); err = otx2_sync_mbox_msg(&pf->mbox); if (err) goto out; rsp = (struct cgx_mac_addr_set_or_get *) otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr); if (IS_ERR_OR_NULL(rsp)) { err = -EINVAL; goto out; } *dmac_index = rsp->index; out: mutex_unlock(&pf->mbox.lock); return err; } int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos) { u32 *dmacindex; /* Store dmacindex returned by CGX/RPM driver which will * be used for macaddr update/remove */ dmacindex = &pf->flow_cfg->bmap_to_dmacindex[bit_pos]; if (ether_addr_equal(mac, pf->netdev->dev_addr)) return otx2_dmacflt_add_pfmac(pf, dmacindex); else return otx2_dmacflt_do_add(pf, mac, dmacindex); } static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac, u32 dmac_index) { struct cgx_mac_addr_del_req *req; int err; mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_cgx_mac_addr_del(&pfvf->mbox); if (!req) { mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } req->index = dmac_index; err = otx2_sync_mbox_msg(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock); return err; } static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf, u32 dmac_index) { struct cgx_mac_addr_reset_req *req; int err; mutex_lock(&pf->mbox.lock); req = otx2_mbox_alloc_msg_cgx_mac_addr_reset(&pf->mbox); if (!req) { mutex_unlock(&pf->mbox.lock); return -ENOMEM; } req->index = dmac_index; err = otx2_sync_mbox_msg(&pf->mbox); mutex_unlock(&pf->mbox.lock); return err; } int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u32 bit_pos) { u32 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos]; if (ether_addr_equal(mac, pf->netdev->dev_addr)) return otx2_dmacflt_remove_pfmac(pf, dmacindex); else return otx2_dmacflt_do_remove(pf, mac, dmacindex); } /* CGX/RPM blocks support max unicast entries of 32. * on typical configuration MAC block associated * with 4 lmacs, each lmac will have 8 dmac entries */ int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf) { struct cgx_max_dmac_entries_get_rsp *rsp; struct msg_req *msg; int err; mutex_lock(&pf->mbox.lock); msg = otx2_mbox_alloc_msg_cgx_mac_max_entries_get(&pf->mbox); if (!msg) { mutex_unlock(&pf->mbox.lock); return -ENOMEM; } err = otx2_sync_mbox_msg(&pf->mbox); if (err) goto out; rsp = (struct cgx_max_dmac_entries_get_rsp *) otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &msg->hdr); if (IS_ERR_OR_NULL(rsp)) { err = -EINVAL; goto out; } pf->flow_cfg->dmacflt_max_flows = rsp->max_dmac_filters; out: mutex_unlock(&pf->mbox.lock); return err; } int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos) { struct cgx_mac_addr_update_req *req; struct cgx_mac_addr_update_rsp *rsp; int rc; mutex_lock(&pf->mbox.lock); req = otx2_mbox_alloc_msg_cgx_mac_addr_update(&pf->mbox); if (!req) { mutex_unlock(&pf->mbox.lock); return -ENOMEM; } ether_addr_copy(req->mac_addr, mac); req->index = pf->flow_cfg->bmap_to_dmacindex[bit_pos]; /* check the response and change index */ rc = otx2_sync_mbox_msg(&pf->mbox); if (rc) goto out; rsp = (struct cgx_mac_addr_update_rsp *) otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr); pf->flow_cfg->bmap_to_dmacindex[bit_pos] = rsp->index; out: mutex_unlock(&pf->mbox.lock); return rc; } |