Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 | // SPDX-License-Identifier: GPL-2.0 /* * H8S TPU Driver * * Copyright 2015 Yoshinori Sato <ysato@users.sourcefoge.jp> * */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/clocksource.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #define TCR 0x0 #define TSR 0x5 #define TCNT 0x6 #define TCFV 0x10 struct tpu_priv { struct clocksource cs; void __iomem *mapbase1; void __iomem *mapbase2; raw_spinlock_t lock; unsigned int cs_enabled; }; static inline unsigned long read_tcnt32(struct tpu_priv *p) { unsigned long tcnt; tcnt = ioread16be(p->mapbase1 + TCNT) << 16; tcnt |= ioread16be(p->mapbase2 + TCNT); return tcnt; } static int tpu_get_counter(struct tpu_priv *p, unsigned long long *val) { unsigned long v1, v2, v3; int o1, o2; o1 = ioread8(p->mapbase1 + TSR) & TCFV; /* Make sure the timer value is stable. Stolen from acpi_pm.c */ do { o2 = o1; v1 = read_tcnt32(p); v2 = read_tcnt32(p); v3 = read_tcnt32(p); o1 = ioread8(p->mapbase1 + TSR) & TCFV; } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2))); *val = v2; return o1; } static inline struct tpu_priv *cs_to_priv(struct clocksource *cs) { return container_of(cs, struct tpu_priv, cs); } static u64 tpu_clocksource_read(struct clocksource *cs) { struct tpu_priv *p = cs_to_priv(cs); unsigned long flags; unsigned long long value; raw_spin_lock_irqsave(&p->lock, flags); if (tpu_get_counter(p, &value)) value += 0x100000000; raw_spin_unlock_irqrestore(&p->lock, flags); return value; } static int tpu_clocksource_enable(struct clocksource *cs) { struct tpu_priv *p = cs_to_priv(cs); WARN_ON(p->cs_enabled); iowrite16be(0, p->mapbase1 + TCNT); iowrite16be(0, p->mapbase2 + TCNT); iowrite8(0x0f, p->mapbase1 + TCR); iowrite8(0x03, p->mapbase2 + TCR); p->cs_enabled = true; return 0; } static void tpu_clocksource_disable(struct clocksource *cs) { struct tpu_priv *p = cs_to_priv(cs); WARN_ON(!p->cs_enabled); iowrite8(0, p->mapbase1 + TCR); iowrite8(0, p->mapbase2 + TCR); p->cs_enabled = false; } static struct tpu_priv tpu_priv = { .cs = { .name = "H8S_TPU", .rating = 200, .read = tpu_clocksource_read, .enable = tpu_clocksource_enable, .disable = tpu_clocksource_disable, .mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }, }; #define CH_L 0 #define CH_H 1 static int __init h8300_tpu_init(struct device_node *node) { void __iomem *base[2]; struct clk *clk; int ret = -ENXIO; clk = of_clk_get(node, 0); if (IS_ERR(clk)) { pr_err("failed to get clock for clocksource\n"); return PTR_ERR(clk); } base[CH_L] = of_iomap(node, CH_L); if (!base[CH_L]) { pr_err("failed to map registers for clocksource\n"); goto free_clk; } base[CH_H] = of_iomap(node, CH_H); if (!base[CH_H]) { pr_err("failed to map registers for clocksource\n"); goto unmap_L; } tpu_priv.mapbase1 = base[CH_L]; tpu_priv.mapbase2 = base[CH_H]; return clocksource_register_hz(&tpu_priv.cs, clk_get_rate(clk) / 64); unmap_L: iounmap(base[CH_H]); free_clk: clk_put(clk); return ret; } TIMER_OF_DECLARE(h8300_tpu, "renesas,tpu", h8300_tpu_init); |