Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 | // SPDX-License-Identifier: GPL-2.0+ /* * Copyright 2018 NXP. * Dong Aisheng <aisheng.dong@nxp.com> */ #include <linux/clk-provider.h> #include <linux/err.h> #include <linux/io.h> #include <linux/slab.h> #include "clk.h" struct clk_divider_gate { struct clk_divider divider; u32 cached_val; }; static inline struct clk_divider_gate *to_clk_divider_gate(struct clk_hw *hw) { struct clk_divider *div = to_clk_divider(hw); return container_of(div, struct clk_divider_gate, divider); } static unsigned long clk_divider_gate_recalc_rate_ro(struct clk_hw *hw, unsigned long parent_rate) { struct clk_divider *div = to_clk_divider(hw); unsigned int val; val = readl(div->reg) >> div->shift; val &= clk_div_mask(div->width); if (!val) return 0; return divider_recalc_rate(hw, parent_rate, val, div->table, div->flags, div->width); } static unsigned long clk_divider_gate_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_divider_gate *div_gate = to_clk_divider_gate(hw); struct clk_divider *div = to_clk_divider(hw); unsigned long flags; unsigned int val; spin_lock_irqsave(div->lock, flags); if (!clk_hw_is_enabled(hw)) { val = div_gate->cached_val; } else { val = readl(div->reg) >> div->shift; val &= clk_div_mask(div->width); } spin_unlock_irqrestore(div->lock, flags); if (!val) return 0; return divider_recalc_rate(hw, parent_rate, val, div->table, div->flags, div->width); } static int clk_divider_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) { return clk_divider_ops.determine_rate(hw, req); } static int clk_divider_gate_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_divider_gate *div_gate = to_clk_divider_gate(hw); struct clk_divider *div = to_clk_divider(hw); unsigned long flags; int value; u32 val; value = divider_get_val(rate, parent_rate, div->table, div->width, div->flags); if (value < 0) return value; spin_lock_irqsave(div->lock, flags); if (clk_hw_is_enabled(hw)) { val = readl(div->reg); val &= ~(clk_div_mask(div->width) << div->shift); val |= (u32)value << div->shift; writel(val, div->reg); } else { div_gate->cached_val = value; } spin_unlock_irqrestore(div->lock, flags); return 0; } static int clk_divider_enable(struct clk_hw *hw) { struct clk_divider_gate *div_gate = to_clk_divider_gate(hw); struct clk_divider *div = to_clk_divider(hw); unsigned long flags; u32 val; if (!div_gate->cached_val) { pr_err("%s: no valid preset rate\n", clk_hw_get_name(hw)); return -EINVAL; } spin_lock_irqsave(div->lock, flags); /* restore div val */ val = readl(div->reg); val |= div_gate->cached_val << div->shift; writel(val, div->reg); spin_unlock_irqrestore(div->lock, flags); return 0; } static void clk_divider_disable(struct clk_hw *hw) { struct clk_divider_gate *div_gate = to_clk_divider_gate(hw); struct clk_divider *div = to_clk_divider(hw); unsigned long flags; u32 val; spin_lock_irqsave(div->lock, flags); /* store the current div val */ val = readl(div->reg) >> div->shift; val &= clk_div_mask(div->width); div_gate->cached_val = val; writel(0, div->reg); spin_unlock_irqrestore(div->lock, flags); } static int clk_divider_is_enabled(struct clk_hw *hw) { struct clk_divider *div = to_clk_divider(hw); u32 val; val = readl(div->reg) >> div->shift; val &= clk_div_mask(div->width); return val ? 1 : 0; } static const struct clk_ops clk_divider_gate_ro_ops = { .recalc_rate = clk_divider_gate_recalc_rate_ro, .determine_rate = clk_divider_determine_rate, }; static const struct clk_ops clk_divider_gate_ops = { .recalc_rate = clk_divider_gate_recalc_rate, .determine_rate = clk_divider_determine_rate, .set_rate = clk_divider_gate_set_rate, .enable = clk_divider_enable, .disable = clk_divider_disable, .is_enabled = clk_divider_is_enabled, }; /* * NOTE: In order to reuse the most code from the common divider, * we also design our divider following the way that provids an extra * clk_divider_flags, however it's fixed to CLK_DIVIDER_ONE_BASED by * default as our HW is. Besides that it supports only CLK_DIVIDER_READ_ONLY * flag which can be specified by user flexibly. */ struct clk_hw *imx_clk_hw_divider_gate(const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags, const struct clk_div_table *table, spinlock_t *lock) { struct clk_init_data init; struct clk_divider_gate *div_gate; struct clk_hw *hw; u32 val; int ret; div_gate = kzalloc(sizeof(*div_gate), GFP_KERNEL); if (!div_gate) return ERR_PTR(-ENOMEM); init.name = name; if (clk_divider_flags & CLK_DIVIDER_READ_ONLY) init.ops = &clk_divider_gate_ro_ops; else init.ops = &clk_divider_gate_ops; init.flags = flags; init.parent_names = parent_name ? &parent_name : NULL; init.num_parents = parent_name ? 1 : 0; div_gate->divider.reg = reg; div_gate->divider.shift = shift; div_gate->divider.width = width; div_gate->divider.lock = lock; div_gate->divider.table = table; div_gate->divider.hw.init = &init; div_gate->divider.flags = CLK_DIVIDER_ONE_BASED | clk_divider_flags; /* cache gate status */ val = readl(reg) >> shift; val &= clk_div_mask(width); div_gate->cached_val = val; hw = &div_gate->divider.hw; ret = clk_hw_register(NULL, hw); if (ret) { kfree(div_gate); hw = ERR_PTR(ret); } return hw; } |