Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 | // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2014 - 2020 Intel Corporation */ #include <linux/mutex.h> #include <linux/list.h> #include "adf_cfg.h" #include "adf_common_drv.h" static LIST_HEAD(accel_table); static LIST_HEAD(vfs_table); static DEFINE_MUTEX(table_lock); static u32 num_devices; static u8 id_map[ADF_MAX_DEVICES]; struct vf_id_map { u32 bdf; u32 id; u32 fake_id; bool attached; struct list_head list; }; static int adf_get_vf_id(struct adf_accel_dev *vf) { return ((7 * (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)) + PCI_FUNC(accel_to_pci_dev(vf)->devfn) + (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)); } static int adf_get_vf_num(struct adf_accel_dev *vf) { return (accel_to_pci_dev(vf)->bus->number << 8) | adf_get_vf_id(vf); } static struct vf_id_map *adf_find_vf(u32 bdf) { struct list_head *itr; list_for_each(itr, &vfs_table) { struct vf_id_map *ptr = list_entry(itr, struct vf_id_map, list); if (ptr->bdf == bdf) return ptr; } return NULL; } static int adf_get_vf_real_id(u32 fake) { struct list_head *itr; list_for_each(itr, &vfs_table) { struct vf_id_map *ptr = list_entry(itr, struct vf_id_map, list); if (ptr->fake_id == fake) return ptr->id; } return -1; } /** * adf_clean_vf_map() - Cleans VF id mapings * * Function cleans internal ids for virtual functions. * @vf: flag indicating whether mappings is cleaned * for vfs only or for vfs and pfs */ void adf_clean_vf_map(bool vf) { struct vf_id_map *map; struct list_head *ptr, *tmp; mutex_lock(&table_lock); list_for_each_safe(ptr, tmp, &vfs_table) { map = list_entry(ptr, struct vf_id_map, list); if (map->bdf != -1) { id_map[map->id] = 0; num_devices--; } if (vf && map->bdf == -1) continue; list_del(ptr); kfree(map); } mutex_unlock(&table_lock); } EXPORT_SYMBOL_GPL(adf_clean_vf_map); /** * adf_devmgr_update_class_index() - Update internal index * @hw_data: Pointer to internal device data. * * Function updates internal dev index for VFs */ void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data) { struct adf_hw_device_class *class = hw_data->dev_class; struct list_head *itr; int i = 0; list_for_each(itr, &accel_table) { struct adf_accel_dev *ptr = list_entry(itr, struct adf_accel_dev, list); if (ptr->hw_device->dev_class == class) ptr->hw_device->instance_id = i++; if (i == class->instances) break; } } EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index); static unsigned int adf_find_free_id(void) { unsigned int i; for (i = 0; i < ADF_MAX_DEVICES; i++) { if (!id_map[i]) { id_map[i] = 1; return i; } } return ADF_MAX_DEVICES + 1; } /** * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework * @accel_dev: Pointer to acceleration device. * @pf: Corresponding PF if the accel_dev is a VF * * Function adds acceleration device to the acceleration framework. * To be used by QAT device specific drivers. * * Return: 0 on success, error code otherwise. */ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, struct adf_accel_dev *pf) { struct list_head *itr; int ret = 0; if (num_devices == ADF_MAX_DEVICES) { dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n", ADF_MAX_DEVICES); return -EFAULT; } mutex_lock(&table_lock); atomic_set(&accel_dev->ref_count, 0); /* PF on host or VF on guest - optimized to remove redundant is_vf */ if (!accel_dev->is_vf || !pf) { struct vf_id_map *map; list_for_each(itr, &accel_table) { struct adf_accel_dev *ptr = list_entry(itr, struct adf_accel_dev, list); if (ptr == accel_dev) { ret = -EEXIST; goto unlock; } } list_add_tail(&accel_dev->list, &accel_table); accel_dev->accel_id = adf_find_free_id(); if (accel_dev->accel_id > ADF_MAX_DEVICES) { ret = -EFAULT; goto unlock; } num_devices++; map = kzalloc(sizeof(*map), GFP_KERNEL); if (!map) { ret = -ENOMEM; goto unlock; } map->bdf = ~0; map->id = accel_dev->accel_id; map->fake_id = map->id; map->attached = true; list_add_tail(&map->list, &vfs_table); } else if (accel_dev->is_vf && pf) { /* VF on host */ struct vf_id_map *map; map = adf_find_vf(adf_get_vf_num(accel_dev)); if (map) { struct vf_id_map *next; accel_dev->accel_id = map->id; list_add_tail(&accel_dev->list, &accel_table); map->fake_id++; map->attached = true; next = list_next_entry(map, list); while (next && &next->list != &vfs_table) { next->fake_id++; next = list_next_entry(next, list); } ret = 0; goto unlock; } map = kzalloc(sizeof(*map), GFP_KERNEL); if (!map) { ret = -ENOMEM; goto unlock; } accel_dev->accel_id = adf_find_free_id(); if (accel_dev->accel_id > ADF_MAX_DEVICES) { kfree(map); ret = -EFAULT; goto unlock; } num_devices++; list_add_tail(&accel_dev->list, &accel_table); map->bdf = adf_get_vf_num(accel_dev); map->id = accel_dev->accel_id; map->fake_id = map->id; map->attached = true; list_add_tail(&map->list, &vfs_table); } mutex_init(&accel_dev->state_lock); unlock: mutex_unlock(&table_lock); return ret; } EXPORT_SYMBOL_GPL(adf_devmgr_add_dev); struct list_head *adf_devmgr_get_head(void) { return &accel_table; } /** * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework. * @accel_dev: Pointer to acceleration device. * @pf: Corresponding PF if the accel_dev is a VF * * Function removes acceleration device from the acceleration framework. * To be used by QAT device specific drivers. * * Return: void */ void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev, struct adf_accel_dev *pf) { mutex_lock(&table_lock); /* PF on host or VF on guest - optimized to remove redundant is_vf */ if (!accel_dev->is_vf || !pf) { id_map[accel_dev->accel_id] = 0; num_devices--; } else if (accel_dev->is_vf && pf) { struct vf_id_map *map, *next; map = adf_find_vf(adf_get_vf_num(accel_dev)); if (!map) { dev_err(&GET_DEV(accel_dev), "Failed to find VF map\n"); goto unlock; } map->fake_id--; map->attached = false; next = list_next_entry(map, list); while (next && &next->list != &vfs_table) { next->fake_id--; next = list_next_entry(next, list); } } unlock: mutex_destroy(&accel_dev->state_lock); list_del(&accel_dev->list); mutex_unlock(&table_lock); } EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev); struct adf_accel_dev *adf_devmgr_get_first(void) { struct adf_accel_dev *dev = NULL; if (!list_empty(&accel_table)) dev = list_first_entry(&accel_table, struct adf_accel_dev, list); return dev; } /** * adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev. * @pci_dev: Pointer to PCI device. * * Function returns acceleration device associated with the given PCI device. * To be used by QAT device specific drivers. * * Return: pointer to accel_dev or NULL if not found. */ struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev) { struct list_head *itr; mutex_lock(&table_lock); list_for_each(itr, &accel_table) { struct adf_accel_dev *ptr = list_entry(itr, struct adf_accel_dev, list); if (ptr->accel_pci_dev.pci_dev == pci_dev) { mutex_unlock(&table_lock); return ptr; } } mutex_unlock(&table_lock); return NULL; } EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev); struct adf_accel_dev *adf_devmgr_get_dev_by_id(u32 id) { struct list_head *itr; int real_id; mutex_lock(&table_lock); real_id = adf_get_vf_real_id(id); if (real_id < 0) goto unlock; id = real_id; list_for_each(itr, &accel_table) { struct adf_accel_dev *ptr = list_entry(itr, struct adf_accel_dev, list); if (ptr->accel_id == id) { mutex_unlock(&table_lock); return ptr; } } unlock: mutex_unlock(&table_lock); return NULL; } int adf_devmgr_verify_id(u32 id) { if (id == ADF_CFG_ALL_DEVICES) return 0; if (adf_devmgr_get_dev_by_id(id)) return 0; return -ENODEV; } static int adf_get_num_dettached_vfs(void) { struct list_head *itr; int vfs = 0; mutex_lock(&table_lock); list_for_each(itr, &vfs_table) { struct vf_id_map *ptr = list_entry(itr, struct vf_id_map, list); if (ptr->bdf != ~0 && !ptr->attached) vfs++; } mutex_unlock(&table_lock); return vfs; } void adf_devmgr_get_num_dev(u32 *num) { *num = num_devices - adf_get_num_dettached_vfs(); } /** * adf_dev_in_use() - Check whether accel_dev is currently in use * @accel_dev: Pointer to acceleration device. * * To be used by QAT device specific drivers. * * Return: 1 when device is in use, 0 otherwise. */ int adf_dev_in_use(struct adf_accel_dev *accel_dev) { return atomic_read(&accel_dev->ref_count) != 0; } EXPORT_SYMBOL_GPL(adf_dev_in_use); /** * adf_dev_get() - Increment accel_dev reference count * @accel_dev: Pointer to acceleration device. * * Increment the accel_dev refcount and if this is the first time * incrementing it during this period the accel_dev is in use, * increment the module refcount too. * To be used by QAT device specific drivers. * * Return: 0 when successful, EFAULT when fail to bump module refcount */ int adf_dev_get(struct adf_accel_dev *accel_dev) { if (atomic_add_return(1, &accel_dev->ref_count) == 1) if (!try_module_get(accel_dev->owner)) return -EFAULT; return 0; } EXPORT_SYMBOL_GPL(adf_dev_get); /** * adf_dev_put() - Decrement accel_dev reference count * @accel_dev: Pointer to acceleration device. * * Decrement the accel_dev refcount and if this is the last time * decrementing it during this period the accel_dev is in use, * decrement the module refcount too. * To be used by QAT device specific drivers. * * Return: void */ void adf_dev_put(struct adf_accel_dev *accel_dev) { if (atomic_sub_return(1, &accel_dev->ref_count) == 0) module_put(accel_dev->owner); } EXPORT_SYMBOL_GPL(adf_dev_put); /** * adf_devmgr_in_reset() - Check whether device is in reset * @accel_dev: Pointer to acceleration device. * * To be used by QAT device specific drivers. * * Return: 1 when the device is being reset, 0 otherwise. */ int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev) { return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status); } EXPORT_SYMBOL_GPL(adf_devmgr_in_reset); /** * adf_dev_started() - Check whether device has started * @accel_dev: Pointer to acceleration device. * * To be used by QAT device specific drivers. * * Return: 1 when the device has started, 0 otherwise */ int adf_dev_started(struct adf_accel_dev *accel_dev) { return test_bit(ADF_STATUS_STARTED, &accel_dev->status); } EXPORT_SYMBOL_GPL(adf_dev_started); |