Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 | // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) /* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp_main.c * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> * Alejandro Lucero <alejandro.lucero@netronome.com> * Jason McMullan <jason.mcmullan@netronome.com> * Rolf Neugebauer <rolf.neugebauer@netronome.com> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/pci.h> #include <linux/firmware.h> #include <linux/vmalloc.h> #include <net/devlink.h> #include "nfpcore/nfp.h" #include "nfpcore/nfp_cpp.h" #include "nfpcore/nfp_dev.h" #include "nfpcore/nfp_nffw.h" #include "nfpcore/nfp_nsp.h" #include "nfpcore/nfp6000_pcie.h" #include "nfp_abi.h" #include "nfp_app.h" #include "nfp_main.h" #include "nfp_net.h" static const char nfp_driver_name[] = "nfp"; static const struct pci_device_id nfp_pci_device_ids[] = { { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NFP3800, PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, PCI_ANY_ID, 0, NFP_DEV_NFP3800, }, { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NFP4000, PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, PCI_ANY_ID, 0, NFP_DEV_NFP6000, }, { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NFP5000, PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, PCI_ANY_ID, 0, NFP_DEV_NFP6000, }, { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NFP6000, PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, PCI_ANY_ID, 0, NFP_DEV_NFP6000, }, { PCI_VENDOR_ID_CORIGINE, PCI_DEVICE_ID_NFP3800, PCI_VENDOR_ID_CORIGINE, PCI_ANY_ID, PCI_ANY_ID, 0, NFP_DEV_NFP3800, }, { PCI_VENDOR_ID_CORIGINE, PCI_DEVICE_ID_NFP4000, PCI_VENDOR_ID_CORIGINE, PCI_ANY_ID, PCI_ANY_ID, 0, NFP_DEV_NFP6000, }, { PCI_VENDOR_ID_CORIGINE, PCI_DEVICE_ID_NFP5000, PCI_VENDOR_ID_CORIGINE, PCI_ANY_ID, PCI_ANY_ID, 0, NFP_DEV_NFP6000, }, { PCI_VENDOR_ID_CORIGINE, PCI_DEVICE_ID_NFP6000, PCI_VENDOR_ID_CORIGINE, PCI_ANY_ID, PCI_ANY_ID, 0, NFP_DEV_NFP6000, }, { 0, } /* Required last entry. */ }; MODULE_DEVICE_TABLE(pci, nfp_pci_device_ids); int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, unsigned int default_val) { char name[256]; int err = 0; u64 val; snprintf(name, sizeof(name), format, nfp_cppcore_pcie_unit(pf->cpp)); val = nfp_rtsym_read_le(pf->rtbl, name, &err); if (err) { if (err == -ENOENT) return default_val; nfp_err(pf->cpp, "Unable to read symbol %s\n", name); return err; } return val; } u8 __iomem * nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, unsigned int min_size, struct nfp_cpp_area **area) { char pf_symbol[256]; snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt, nfp_cppcore_pcie_unit(pf->cpp)); return nfp_rtsym_map(pf->rtbl, pf_symbol, name, min_size, area); } /* Callers should hold the devlink instance lock */ int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length, void *out_data, u64 out_length) { unsigned long err_at; u64 max_data_sz; u32 val = 0; int n, err; if (!pf->mbox) return -EOPNOTSUPP; max_data_sz = nfp_rtsym_size(pf->mbox) - NFP_MBOX_SYM_MIN_SIZE; /* Check if cmd field is clear */ err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_CMD, &val); if (err || val) { nfp_warn(pf->cpp, "failed to issue command (%u): %u, err: %d\n", cmd, val, err); return err ?: -EBUSY; } in_length = min(in_length, max_data_sz); n = nfp_rtsym_write(pf->cpp, pf->mbox, NFP_MBOX_DATA, in_data, in_length); if (n != in_length) return -EIO; /* Write data_len and wipe reserved */ err = nfp_rtsym_writeq(pf->cpp, pf->mbox, NFP_MBOX_DATA_LEN, in_length); if (err) return err; /* Read back for ordering */ err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_DATA_LEN, &val); if (err) return err; /* Write cmd and wipe return value */ err = nfp_rtsym_writeq(pf->cpp, pf->mbox, NFP_MBOX_CMD, cmd); if (err) return err; err_at = jiffies + 5 * HZ; while (true) { /* Wait for command to go to 0 (NFP_MBOX_NO_CMD) */ err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_CMD, &val); if (err) return err; if (!val) break; if (time_is_before_eq_jiffies(err_at)) return -ETIMEDOUT; msleep(5); } /* Copy output if any (could be error info, do it before reading ret) */ err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_DATA_LEN, &val); if (err) return err; out_length = min_t(u32, val, min(out_length, max_data_sz)); n = nfp_rtsym_read(pf->cpp, pf->mbox, NFP_MBOX_DATA, out_data, out_length); if (n != out_length) return -EIO; /* Check if there is an error */ err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_RET, &val); if (err) return err; if (val) return -val; return out_length; } static bool nfp_board_ready(struct nfp_pf *pf) { const char *cp; long state; int err; cp = nfp_hwinfo_lookup(pf->hwinfo, "board.state"); if (!cp) return false; err = kstrtol(cp, 0, &state); if (err < 0) return false; return state == 15; } static int nfp_pf_board_state_wait(struct nfp_pf *pf) { const unsigned long wait_until = jiffies + 10 * HZ; while (!nfp_board_ready(pf)) { if (time_is_before_eq_jiffies(wait_until)) { nfp_err(pf->cpp, "NFP board initialization timeout\n"); return -EINVAL; } nfp_info(pf->cpp, "waiting for board initialization\n"); if (msleep_interruptible(500)) return -ERESTARTSYS; /* Refresh cached information */ kfree(pf->hwinfo); pf->hwinfo = nfp_hwinfo_read(pf->cpp); } return 0; } static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf) { int err; pf->limit_vfs = nfp_rtsym_read_le(pf->rtbl, "nfd_vf_cfg_max_vfs", &err); if (err) { /* For backwards compatibility if symbol not found allow all */ pf->limit_vfs = ~0; if (err == -ENOENT) return 0; nfp_warn(pf->cpp, "Warning: VF limit read failed: %d\n", err); return err; } err = pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs); if (err) nfp_warn(pf->cpp, "Failed to set VF count in sysfs: %d\n", err); return 0; } static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs) { #ifdef CONFIG_PCI_IOV struct nfp_pf *pf = pci_get_drvdata(pdev); struct devlink *devlink; int err; if (num_vfs > pf->limit_vfs) { nfp_info(pf->cpp, "Firmware limits number of VFs to %u\n", pf->limit_vfs); return -EINVAL; } err = pci_enable_sriov(pdev, num_vfs); if (err) { dev_warn(&pdev->dev, "Failed to enable PCI SR-IOV: %d\n", err); return err; } devlink = priv_to_devlink(pf); devl_lock(devlink); err = nfp_app_sriov_enable(pf->app, num_vfs); if (err) { dev_warn(&pdev->dev, "App specific PCI SR-IOV configuration failed: %d\n", err); goto err_sriov_disable; } pf->num_vfs = num_vfs; dev_dbg(&pdev->dev, "Created %d VFs.\n", pf->num_vfs); devl_unlock(devlink); return num_vfs; err_sriov_disable: devl_unlock(devlink); pci_disable_sriov(pdev); return err; #endif return 0; } static int nfp_pcie_sriov_disable(struct pci_dev *pdev) { #ifdef CONFIG_PCI_IOV struct nfp_pf *pf = pci_get_drvdata(pdev); struct devlink *devlink; devlink = priv_to_devlink(pf); devl_lock(devlink); /* If the VFs are assigned we cannot shut down SR-IOV without * causing issues, so just leave the hardware available but * disabled */ if (pci_vfs_assigned(pdev)) { dev_warn(&pdev->dev, "Disabling while VFs assigned - VFs will not be deallocated\n"); devl_unlock(devlink); return -EPERM; } nfp_app_sriov_disable(pf->app); pf->num_vfs = 0; devl_unlock(devlink); pci_disable_sriov(pdev); dev_dbg(&pdev->dev, "Removed VFs.\n"); #endif return 0; } static int nfp_pcie_sriov_configure(struct pci_dev *pdev, int num_vfs) { if (!pci_get_drvdata(pdev)) return -ENOENT; if (num_vfs == 0) return nfp_pcie_sriov_disable(pdev); else return nfp_pcie_sriov_enable(pdev, num_vfs); } int nfp_flash_update_common(struct nfp_pf *pf, const struct firmware *fw, struct netlink_ext_ack *extack) { struct device *dev = &pf->pdev->dev; struct nfp_nsp *nsp; int err; nsp = nfp_nsp_open(pf->cpp); if (IS_ERR(nsp)) { err = PTR_ERR(nsp); if (extack) NL_SET_ERR_MSG_MOD(extack, "can't access NSP"); else dev_err(dev, "Failed to access the NSP: %d\n", err); return err; } err = nfp_nsp_write_flash(nsp, fw); if (err < 0) goto exit_close_nsp; dev_info(dev, "Finished writing flash image\n"); err = 0; exit_close_nsp: nfp_nsp_close(nsp); return err; } static const struct firmware * nfp_net_fw_request(struct pci_dev *pdev, struct nfp_pf *pf, const char *name) { const struct firmware *fw = NULL; int err; err = request_firmware_direct(&fw, name, &pdev->dev); nfp_info(pf->cpp, " %s: %s\n", name, err ? "not found" : "found"); if (err) return NULL; return fw; } /** * nfp_net_fw_find() - Find the correct firmware image for netdev mode * @pdev: PCI Device structure * @pf: NFP PF Device structure * * Return: firmware if found and requested successfully. */ static const struct firmware * nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf) { struct nfp_eth_table_port *port; const struct firmware *fw; const char *fw_model; char fw_name[256]; const u8 *serial; u16 interface; int spc, i, j; nfp_info(pf->cpp, "Looking for firmware file in order of priority:\n"); /* First try to find a firmware image specific for this device */ interface = nfp_cpp_interface(pf->cpp); nfp_cpp_serial(pf->cpp, &serial); sprintf(fw_name, "netronome/serial-%pMF-%02x-%02x.nffw", serial, interface >> 8, interface & 0xff); fw = nfp_net_fw_request(pdev, pf, fw_name); if (fw) return fw; /* Then try the PCI name */ sprintf(fw_name, "netronome/pci-%s.nffw", pci_name(pdev)); fw = nfp_net_fw_request(pdev, pf, fw_name); if (fw) return fw; /* Finally try the card type and media */ if (!pf->eth_tbl) { dev_err(&pdev->dev, "Error: can't identify media config\n"); return NULL; } fw_model = nfp_hwinfo_lookup(pf->hwinfo, "nffw.partno"); if (!fw_model) fw_model = nfp_hwinfo_lookup(pf->hwinfo, "assembly.partno"); if (!fw_model) { dev_err(&pdev->dev, "Error: can't read part number\n"); return NULL; } spc = ARRAY_SIZE(fw_name); spc -= snprintf(fw_name, spc, "netronome/nic_%s", fw_model); for (i = 0; spc > 0 && i < pf->eth_tbl->count; i += j) { port = &pf->eth_tbl->ports[i]; j = 1; while (i + j < pf->eth_tbl->count && port->speed == port[j].speed) j++; spc -= snprintf(&fw_name[ARRAY_SIZE(fw_name) - spc], spc, "_%dx%d", j, port->speed / 1000); } if (spc <= 0) return NULL; spc -= snprintf(&fw_name[ARRAY_SIZE(fw_name) - spc], spc, ".nffw"); if (spc <= 0) return NULL; return nfp_net_fw_request(pdev, pf, fw_name); } static int nfp_get_fw_policy_value(struct pci_dev *pdev, struct nfp_nsp *nsp, const char *key, const char *default_val, int max_val, int *value) { char hwinfo[64]; long hi_val; int err; snprintf(hwinfo, sizeof(hwinfo), key); err = nfp_nsp_hwinfo_lookup_optional(nsp, hwinfo, sizeof(hwinfo), default_val); if (err) return err; err = kstrtol(hwinfo, 0, &hi_val); if (err || hi_val < 0 || hi_val > max_val) { dev_warn(&pdev->dev, "Invalid value '%s' from '%s', ignoring\n", hwinfo, key); err = kstrtol(default_val, 0, &hi_val); } *value = hi_val; return err; } /** * nfp_fw_load() - Load the firmware image * @pdev: PCI Device structure * @pf: NFP PF Device structure * @nsp: NFP SP handle * * Return: -ERRNO, 0 for no firmware loaded, 1 for firmware loaded */ static int nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) { bool do_reset, fw_loaded = false; const struct firmware *fw = NULL; int err, reset, policy, ifcs = 0; char *token, *ptr; char hwinfo[64]; u16 interface; snprintf(hwinfo, sizeof(hwinfo), "abi_drv_load_ifc"); err = nfp_nsp_hwinfo_lookup_optional(nsp, hwinfo, sizeof(hwinfo), NFP_NSP_DRV_LOAD_IFC_DEFAULT); if (err) return err; interface = nfp_cpp_interface(pf->cpp); ptr = hwinfo; while ((token = strsep(&ptr, ","))) { unsigned long interface_hi; err = kstrtoul(token, 0, &interface_hi); if (err) { dev_err(&pdev->dev, "Failed to parse interface '%s': %d\n", token, err); return err; } ifcs++; if (interface == interface_hi) break; } if (!token) { dev_info(&pdev->dev, "Firmware will be loaded by partner\n"); return 0; } err = nfp_get_fw_policy_value(pdev, nsp, "abi_drv_reset", NFP_NSP_DRV_RESET_DEFAULT, NFP_NSP_DRV_RESET_NEVER, &reset); if (err) return err; err = nfp_get_fw_policy_value(pdev, nsp, "app_fw_from_flash", NFP_NSP_APP_FW_LOAD_DEFAULT, NFP_NSP_APP_FW_LOAD_PREF, &policy); if (err) return err; fw = nfp_net_fw_find(pdev, pf); do_reset = reset == NFP_NSP_DRV_RESET_ALWAYS || (fw && reset == NFP_NSP_DRV_RESET_DISK); if (do_reset) { dev_info(&pdev->dev, "Soft-resetting the NFP\n"); err = nfp_nsp_device_soft_reset(nsp); if (err < 0) { dev_err(&pdev->dev, "Failed to soft reset the NFP: %d\n", err); goto exit_release_fw; } } if (fw && policy != NFP_NSP_APP_FW_LOAD_FLASH) { if (nfp_nsp_has_fw_loaded(nsp) && nfp_nsp_fw_loaded(nsp)) goto exit_release_fw; err = nfp_nsp_load_fw(nsp, fw); if (err < 0) { dev_err(&pdev->dev, "FW loading failed: %d\n", err); goto exit_release_fw; } dev_info(&pdev->dev, "Finished loading FW image\n"); fw_loaded = true; } else if (policy != NFP_NSP_APP_FW_LOAD_DISK && nfp_nsp_has_stored_fw_load(nsp)) { /* Don't propagate this error to stick with legacy driver * behavior, failure will be detected later during init. */ if (!nfp_nsp_load_stored_fw(nsp)) dev_info(&pdev->dev, "Finished loading stored FW image\n"); /* Don't flag the fw_loaded in this case since other devices * may reuse the firmware when configured this way */ } else { dev_warn(&pdev->dev, "Didn't load firmware, please update flash or reconfigure card\n"); } exit_release_fw: release_firmware(fw); /* We don't want to unload firmware when other devices may still be * dependent on it, which could be the case if there are multiple * devices that could load firmware. */ if (fw_loaded && ifcs == 1) pf->unload_fw_on_remove = true; return err < 0 ? err : fw_loaded; } static void nfp_nsp_init_ports(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) { bool needs_reinit = false; int i; pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp); if (!pf->eth_tbl) return; if (!nfp_nsp_has_mac_reinit(nsp)) return; for (i = 0; i < pf->eth_tbl->count; i++) needs_reinit |= pf->eth_tbl->ports[i].override_changed; if (!needs_reinit) return; kfree(pf->eth_tbl); if (nfp_nsp_mac_reinit(nsp)) dev_warn(&pdev->dev, "MAC reinit failed\n"); pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp); } static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf) { struct nfp_nsp *nsp; int err; err = nfp_resource_wait(pf->cpp, NFP_RESOURCE_NSP, 30); if (err) return err; nsp = nfp_nsp_open(pf->cpp); if (IS_ERR(nsp)) { err = PTR_ERR(nsp); dev_err(&pdev->dev, "Failed to access the NSP: %d\n", err); return err; } err = nfp_nsp_wait(nsp); if (err < 0) goto exit_close_nsp; nfp_nsp_init_ports(pdev, pf, nsp); pf->nspi = __nfp_nsp_identify(nsp); if (pf->nspi) dev_info(&pdev->dev, "BSP: %s\n", pf->nspi->version); err = nfp_fw_load(pdev, pf, nsp); if (err < 0) { kfree(pf->nspi); kfree(pf->eth_tbl); dev_err(&pdev->dev, "Failed to load FW\n"); goto exit_close_nsp; } pf->fw_loaded = !!err; err = 0; exit_close_nsp: nfp_nsp_close(nsp); return err; } static void nfp_fw_unload(struct nfp_pf *pf) { struct nfp_nsp *nsp; int err; nsp = nfp_nsp_open(pf->cpp); if (IS_ERR(nsp)) { nfp_err(pf->cpp, "Reset failed, can't open NSP\n"); return; } err = nfp_nsp_device_soft_reset(nsp); if (err < 0) dev_warn(&pf->pdev->dev, "Couldn't unload firmware: %d\n", err); else dev_info(&pf->pdev->dev, "Firmware safely unloaded\n"); nfp_nsp_close(nsp); } static int nfp_pf_find_rtsyms(struct nfp_pf *pf) { char pf_symbol[256]; unsigned int pf_id; pf_id = nfp_cppcore_pcie_unit(pf->cpp); /* Optional per-PCI PF mailbox */ snprintf(pf_symbol, sizeof(pf_symbol), NFP_MBOX_SYM_NAME, pf_id); pf->mbox = nfp_rtsym_lookup(pf->rtbl, pf_symbol); if (pf->mbox && nfp_rtsym_size(pf->mbox) < NFP_MBOX_SYM_MIN_SIZE) { nfp_err(pf->cpp, "PF mailbox symbol too small: %llu < %d\n", nfp_rtsym_size(pf->mbox), NFP_MBOX_SYM_MIN_SIZE); return -EINVAL; } return 0; } int nfp_net_pf_get_app_id(struct nfp_pf *pf) { return nfp_pf_rtsym_read_optional(pf, "_pf%u_net_app_id", NFP_APP_CORE_NIC); } static u64 nfp_net_pf_get_app_cap(struct nfp_pf *pf) { char name[32]; int err = 0; u64 val; snprintf(name, sizeof(name), "_pf%u_net_app_cap", nfp_cppcore_pcie_unit(pf->cpp)); val = nfp_rtsym_read_le(pf->rtbl, name, &err); if (err) { if (err != -ENOENT) nfp_err(pf->cpp, "Unable to read symbol %s\n", name); return 0; } return val; } static void nfp_pf_cfg_hwinfo(struct nfp_pf *pf) { struct nfp_nsp *nsp; char hwinfo[32]; bool sp_indiff; int err; nsp = nfp_nsp_open(pf->cpp); if (IS_ERR(nsp)) return; if (!nfp_nsp_has_hwinfo_set(nsp)) goto end; sp_indiff = (nfp_net_pf_get_app_id(pf) == NFP_APP_FLOWER_NIC) || (nfp_net_pf_get_app_cap(pf) & NFP_NET_APP_CAP_SP_INDIFF); /* No need to clean `sp_indiff` in driver, management firmware * will do it when application firmware is unloaded. */ snprintf(hwinfo, sizeof(hwinfo), "sp_indiff=%d", sp_indiff); err = nfp_nsp_hwinfo_set(nsp, hwinfo, sizeof(hwinfo)); /* Not a fatal error, no need to return error to stop driver from loading */ if (err) { nfp_warn(pf->cpp, "HWinfo(sp_indiff=%d) set failed: %d\n", sp_indiff, err); } else { /* Need reinit eth_tbl since the eth table state may change * after sp_indiff is configured. */ kfree(pf->eth_tbl); pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp); } end: nfp_nsp_close(nsp); } static int nfp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { const struct nfp_dev_info *dev_info; struct devlink *devlink; struct nfp_pf *pf; int err; if ((pdev->vendor == PCI_VENDOR_ID_NETRONOME || pdev->vendor == PCI_VENDOR_ID_CORIGINE) && (pdev->device == PCI_DEVICE_ID_NFP3800_VF || pdev->device == PCI_DEVICE_ID_NFP6000_VF)) dev_warn(&pdev->dev, "Binding NFP VF device to the NFP PF driver, the VF driver is called 'nfp_netvf'\n"); dev_info = &nfp_dev_info[pci_id->driver_data]; err = pci_enable_device(pdev); if (err < 0) return err; pci_set_master(pdev); err = dma_set_mask_and_coherent(&pdev->dev, dev_info->dma_mask); if (err) goto err_pci_disable; err = pci_request_regions(pdev, nfp_driver_name); if (err < 0) { dev_err(&pdev->dev, "Unable to reserve pci resources.\n"); goto err_pci_disable; } devlink = devlink_alloc(&nfp_devlink_ops, sizeof(*pf), &pdev->dev); if (!devlink) { err = -ENOMEM; goto err_rel_regions; } pf = devlink_priv(devlink); INIT_LIST_HEAD(&pf->vnics); INIT_LIST_HEAD(&pf->ports); pci_set_drvdata(pdev, pf); pf->pdev = pdev; pf->dev_info = dev_info; pf->wq = alloc_workqueue("nfp-%s", 0, 2, pci_name(pdev)); if (!pf->wq) { err = -ENOMEM; goto err_pci_priv_unset; } pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev, dev_info); if (IS_ERR(pf->cpp)) { err = PTR_ERR(pf->cpp); goto err_disable_msix; } err = nfp_resource_table_init(pf->cpp); if (err) goto err_cpp_free; pf->hwinfo = nfp_hwinfo_read(pf->cpp); dev_info(&pdev->dev, "Assembly: %s%s%s-%s CPLD: %s\n", nfp_hwinfo_lookup(pf->hwinfo, "assembly.vendor"), nfp_hwinfo_lookup(pf->hwinfo, "assembly.partno"), nfp_hwinfo_lookup(pf->hwinfo, "assembly.serial"), nfp_hwinfo_lookup(pf->hwinfo, "assembly.revision"), nfp_hwinfo_lookup(pf->hwinfo, "cpld.version")); err = nfp_pf_board_state_wait(pf); if (err) goto err_hwinfo_free; err = nfp_nsp_init(pdev, pf); if (err) goto err_hwinfo_free; pf->mip = nfp_mip_open(pf->cpp); pf->rtbl = __nfp_rtsym_table_read(pf->cpp, pf->mip); err = nfp_pf_find_rtsyms(pf); if (err) goto err_fw_unload; pf->dump_flag = NFP_DUMP_NSP_DIAG; pf->dumpspec = nfp_net_dump_load_dumpspec(pf->cpp, pf->rtbl); err = nfp_pcie_sriov_read_nfd_limit(pf); if (err) goto err_fw_unload; pf->num_vfs = pci_num_vf(pdev); if (pf->num_vfs > pf->limit_vfs) { dev_err(&pdev->dev, "Error: %d VFs already enabled, but loaded FW can only support %d\n", pf->num_vfs, pf->limit_vfs); err = -EINVAL; goto err_fw_unload; } nfp_pf_cfg_hwinfo(pf); err = nfp_net_pci_probe(pf); if (err) goto err_fw_unload; err = nfp_hwmon_register(pf); if (err) { dev_err(&pdev->dev, "Failed to register hwmon info\n"); goto err_net_remove; } return 0; err_net_remove: nfp_net_pci_remove(pf); err_fw_unload: kfree(pf->rtbl); nfp_mip_close(pf->mip); if (pf->unload_fw_on_remove) nfp_fw_unload(pf); kfree(pf->eth_tbl); kfree(pf->nspi); vfree(pf->dumpspec); err_hwinfo_free: kfree(pf->hwinfo); err_cpp_free: nfp_cpp_free(pf->cpp); err_disable_msix: destroy_workqueue(pf->wq); err_pci_priv_unset: pci_set_drvdata(pdev, NULL); devlink_free(devlink); err_rel_regions: pci_release_regions(pdev); err_pci_disable: pci_disable_device(pdev); return err; } static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw) { struct nfp_pf *pf; pf = pci_get_drvdata(pdev); if (!pf) return; nfp_hwmon_unregister(pf); nfp_pcie_sriov_disable(pdev); nfp_net_pci_remove(pf); vfree(pf->dumpspec); kfree(pf->rtbl); nfp_mip_close(pf->mip); if (unload_fw && pf->unload_fw_on_remove) nfp_fw_unload(pf); destroy_workqueue(pf->wq); pci_set_drvdata(pdev, NULL); kfree(pf->hwinfo); nfp_cpp_free(pf->cpp); kfree(pf->eth_tbl); kfree(pf->nspi); devlink_free(priv_to_devlink(pf)); pci_release_regions(pdev); pci_disable_device(pdev); } static void nfp_pci_remove(struct pci_dev *pdev) { __nfp_pci_shutdown(pdev, true); } static void nfp_pci_shutdown(struct pci_dev *pdev) { __nfp_pci_shutdown(pdev, false); } static struct pci_driver nfp_pci_driver = { .name = nfp_driver_name, .id_table = nfp_pci_device_ids, .probe = nfp_pci_probe, .remove = nfp_pci_remove, .shutdown = nfp_pci_shutdown, .sriov_configure = nfp_pcie_sriov_configure, }; static int __init nfp_main_init(void) { int err; pr_info("%s: NFP PCIe Driver, Copyright (C) 2014-2020 Netronome Systems\n", nfp_driver_name); pr_info("%s: NFP PCIe Driver, Copyright (C) 2021-2022 Corigine Inc.\n", nfp_driver_name); nfp_net_debugfs_create(); err = pci_register_driver(&nfp_pci_driver); if (err < 0) goto err_destroy_debugfs; err = pci_register_driver(&nfp_netvf_pci_driver); if (err) goto err_unreg_pf; return err; err_unreg_pf: pci_unregister_driver(&nfp_pci_driver); err_destroy_debugfs: nfp_net_debugfs_destroy(); return err; } static void __exit nfp_main_exit(void) { pci_unregister_driver(&nfp_netvf_pci_driver); pci_unregister_driver(&nfp_pci_driver); nfp_net_debugfs_destroy(); } module_init(nfp_main_init); module_exit(nfp_main_exit); MODULE_FIRMWARE("netronome/nic_AMDA0058-0011_2x40.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0058-0012_2x40.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_1x40.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_4x10.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0096-0001_2x10.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_2x40.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_4x10_1x40.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_8x10.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x10.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x25.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_1x10_1x25.nffw"); MODULE_AUTHOR("Corigine, Inc. <oss-drivers@corigine.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("The Network Flow Processor (NFP) driver."); |