Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2013 Imagination Technologies * Author: Paul Burton <paul.burton@mips.com> */ #include <linux/bitfield.h> #include <linux/errno.h> #include <linux/percpu.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/spinlock.h> #include <asm/mips-cps.h> void __iomem *mips_cpc_base; static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock); static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags); phys_addr_t __weak mips_cpc_default_phys_base(void) { struct device_node *cpc_node; struct resource res; int err; cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc"); if (cpc_node) { err = of_address_to_resource(cpc_node, 0, &res); of_node_put(cpc_node); if (!err) return res.start; } return 0; } /** * mips_cpc_phys_base - retrieve the physical base address of the CPC * * This function returns the physical base address of the Cluster Power * Controller memory mapped registers, or 0 if no Cluster Power Controller * is present. */ static phys_addr_t mips_cpc_phys_base(void) { unsigned long cpc_base; if (!mips_cm_present()) return 0; if (!(read_gcr_cpc_status() & CM_GCR_CPC_STATUS_EX)) return 0; /* If the CPC is already enabled, leave it so */ cpc_base = read_gcr_cpc_base(); if (cpc_base & CM_GCR_CPC_BASE_CPCEN) return cpc_base & CM_GCR_CPC_BASE_CPCBASE; /* Otherwise, use the default address */ cpc_base = mips_cpc_default_phys_base(); if (!cpc_base) return cpc_base; /* Enable the CPC, mapped at the default address */ write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN); return cpc_base; } int mips_cpc_probe(void) { phys_addr_t addr; unsigned int cpu; for_each_possible_cpu(cpu) spin_lock_init(&per_cpu(cpc_core_lock, cpu)); addr = mips_cpc_phys_base(); if (!addr) return -ENODEV; mips_cpc_base = ioremap(addr, 0x8000); if (!mips_cpc_base) return -ENXIO; return 0; } void mips_cpc_lock_other(unsigned int core) { unsigned int curr_core; if (mips_cm_revision() >= CM_REV_CM3) /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */ return; preempt_disable(); curr_core = cpu_core(¤t_cpu_data); spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core), per_cpu(cpc_core_lock_flags, curr_core)); write_cpc_cl_other(FIELD_PREP(CPC_Cx_OTHER_CORENUM, core)); /* * Ensure the core-other region reflects the appropriate core & * VP before any accesses to it occur. */ mb(); } void mips_cpc_unlock_other(void) { unsigned int curr_core; if (mips_cm_revision() >= CM_REV_CM3) /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */ return; curr_core = cpu_core(¤t_cpu_data); spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core), per_cpu(cpc_core_lock_flags, curr_core)); preempt_enable(); } |