Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2004 Matthew Wilcox <matthew@wil.cx> * Copyright (C) 2004 Intel Corp. */ /* * mmconfig.c - Low-level direct PCI config space access via MMCONFIG */ #include <linux/pci.h> #include <linux/init.h> #include <linux/rcupdate.h> #include <asm/e820/api.h> #include <asm/pci_x86.h> /* Assume systems with more busses have correct MCFG */ #define mmcfg_virt_addr ((void __iomem *) fix_to_virt(FIX_PCIE_MCFG)) /* The base address of the last MMCONFIG device accessed */ static u32 mmcfg_last_accessed_device; static int mmcfg_last_accessed_cpu; /* * Functions for accessing PCI configuration space with MMCONFIG accesses */ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn) { struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus); if (cfg) return cfg->address; return 0; } /* * This is always called under pci_config_lock */ static void pci_exp_set_dev_base(unsigned int base, int bus, int devfn) { u32 dev_base = base | PCI_MMCFG_BUS_OFFSET(bus) | (devfn << 12); int cpu = smp_processor_id(); if (dev_base != mmcfg_last_accessed_device || cpu != mmcfg_last_accessed_cpu) { mmcfg_last_accessed_device = dev_base; mmcfg_last_accessed_cpu = cpu; set_fixmap_nocache(FIX_PCIE_MCFG, dev_base); } } static int pci_mmcfg_read(unsigned int seg, unsigned int bus, unsigned int devfn, int reg, int len, u32 *value) { unsigned long flags; u32 base; if ((bus > 255) || (devfn > 255) || (reg > 4095)) { err: *value = -1; return -EINVAL; } rcu_read_lock(); base = get_base_addr(seg, bus, devfn); if (!base) { rcu_read_unlock(); goto err; } raw_spin_lock_irqsave(&pci_config_lock, flags); pci_exp_set_dev_base(base, bus, devfn); switch (len) { case 1: *value = mmio_config_readb(mmcfg_virt_addr + reg); break; case 2: *value = mmio_config_readw(mmcfg_virt_addr + reg); break; case 4: *value = mmio_config_readl(mmcfg_virt_addr + reg); break; } raw_spin_unlock_irqrestore(&pci_config_lock, flags); rcu_read_unlock(); return 0; } static int pci_mmcfg_write(unsigned int seg, unsigned int bus, unsigned int devfn, int reg, int len, u32 value) { unsigned long flags; u32 base; if ((bus > 255) || (devfn > 255) || (reg > 4095)) return -EINVAL; rcu_read_lock(); base = get_base_addr(seg, bus, devfn); if (!base) { rcu_read_unlock(); return -EINVAL; } raw_spin_lock_irqsave(&pci_config_lock, flags); pci_exp_set_dev_base(base, bus, devfn); switch (len) { case 1: mmio_config_writeb(mmcfg_virt_addr + reg, value); break; case 2: mmio_config_writew(mmcfg_virt_addr + reg, value); break; case 4: mmio_config_writel(mmcfg_virt_addr + reg, value); break; } raw_spin_unlock_irqrestore(&pci_config_lock, flags); rcu_read_unlock(); return 0; } const struct pci_raw_ops pci_mmcfg = { .read = pci_mmcfg_read, .write = pci_mmcfg_write, }; int __init pci_mmcfg_arch_init(void) { printk(KERN_INFO "PCI: Using ECAM for extended config space\n"); raw_pci_ext_ops = &pci_mmcfg; return 1; } void __init pci_mmcfg_arch_free(void) { } int pci_mmcfg_arch_map(struct pci_mmcfg_region *cfg) { return 0; } void pci_mmcfg_arch_unmap(struct pci_mmcfg_region *cfg) { unsigned long flags; /* Invalidate the cached mmcfg map entry. */ raw_spin_lock_irqsave(&pci_config_lock, flags); mmcfg_last_accessed_device = 0; raw_spin_unlock_irqrestore(&pci_config_lock, flags); } |