Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 | /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef __ASM_ARC_SMP_H #define __ASM_ARC_SMP_H #ifdef CONFIG_SMP #include <linux/types.h> #include <linux/init.h> #include <linux/threads.h> #define raw_smp_processor_id() (current_thread_info()->cpu) /* including cpumask.h leads to cyclic deps hence this Forward declaration */ struct cpumask; /* * APIs provided by arch SMP code to generic code */ extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); /* * APIs provided by arch SMP code to rest of arch code */ extern void __init smp_init_cpus(void); extern void first_lines_of_secondary(void); extern const char *arc_platform_smp_cpuinfo(void); /* * API expected BY platform smp code (FROM arch smp code) * * smp_ipi_irq_setup: * Takes @cpu and @hwirq to which the arch-common ISR is hooked up */ extern int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq); /* * struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP * * @info: SoC SMP specific info for /proc/cpuinfo etc * @init_early_smp: A SMP specific h/w block can init itself * Could be common across platforms so not covered by * mach_desc->init_early() * @init_per_cpu: Called for each core so SMP h/w block driver can do * any needed setup per cpu (e.g. IPI request) * @cpu_kick: For Master to kickstart a cpu (optionally at a PC) * @ipi_send: To send IPI to a @cpu * @ips_clear: To clear IPI received at @irq */ struct plat_smp_ops { const char *info; void (*init_early_smp)(void); void (*init_per_cpu)(int cpu); void (*cpu_kick)(int cpu, unsigned long pc); void (*ipi_send)(int cpu); void (*ipi_clear)(int irq); }; /* TBD: stop exporting it for direct population by platform */ extern struct plat_smp_ops plat_smp_ops; #else /* CONFIG_SMP */ static inline void smp_init_cpus(void) {} static inline const char *arc_platform_smp_cpuinfo(void) { return ""; } #endif /* !CONFIG_SMP */ /* * ARC700 doesn't support atomic Read-Modify-Write ops. * Originally Interrupts had to be disabled around code to gaurantee atomicity. * The LLOCK/SCOND insns allow writing interrupt-hassle-free based atomic ops * based on retry-if-irq-in-atomic (with hardware assist). * However despite these, we provide the IRQ disabling variant * * (1) These insn were introduced only in 4.10 release. So for older released * support needed. * * (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be * gaurantted by the platform (not something which core handles). * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ * disabling for atomicity. * * However exported spinlock API is not usable due to cyclic hdr deps * (even after system.h disintegration upstream) * asm/bitops.h -> linux/spinlock.h -> linux/preempt.h * -> linux/thread_info.h -> linux/bitops.h -> asm/bitops.h * * So the workaround is to use the lowest level arch spinlock API. * The exported spinlock API is smart enough to be NOP for !CONFIG_SMP, * but same is not true for ARCH backend, hence the need for 2 variants */ #ifndef CONFIG_ARC_HAS_LLSC #include <linux/irqflags.h> #ifdef CONFIG_SMP #include <asm/spinlock.h> extern arch_spinlock_t smp_atomic_ops_lock; extern arch_spinlock_t smp_bitops_lock; #define atomic_ops_lock(flags) do { \ local_irq_save(flags); \ arch_spin_lock(&smp_atomic_ops_lock); \ } while (0) #define atomic_ops_unlock(flags) do { \ arch_spin_unlock(&smp_atomic_ops_lock); \ local_irq_restore(flags); \ } while (0) #define bitops_lock(flags) do { \ local_irq_save(flags); \ arch_spin_lock(&smp_bitops_lock); \ } while (0) #define bitops_unlock(flags) do { \ arch_spin_unlock(&smp_bitops_lock); \ local_irq_restore(flags); \ } while (0) #else /* !CONFIG_SMP */ #define atomic_ops_lock(flags) local_irq_save(flags) #define atomic_ops_unlock(flags) local_irq_restore(flags) #define bitops_lock(flags) local_irq_save(flags) #define bitops_unlock(flags) local_irq_restore(flags) #endif /* !CONFIG_SMP */ #endif /* !CONFIG_ARC_HAS_LLSC */ #endif |