Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 | // SPDX-License-Identifier: GPL-2.0 /* * Floating proportions with flexible aging period * * Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz> * * The goal of this code is: Given different types of event, measure proportion * of each type of event over time. The proportions are measured with * exponentially decaying history to give smooth transitions. A formula * expressing proportion of event of type 'j' is: * * p_{j} = (\Sum_{i>=0} x_{i,j}/2^{i+1})/(\Sum_{i>=0} x_i/2^{i+1}) * * Where x_{i,j} is j's number of events in i-th last time period and x_i is * total number of events in i-th last time period. * * Note that p_{j}'s are normalised, i.e. * * \Sum_{j} p_{j} = 1, * * This formula can be straightforwardly computed by maintaining denominator * (let's call it 'd') and for each event type its numerator (let's call it * 'n_j'). When an event of type 'j' happens, we simply need to do: * n_j++; d++; * * When a new period is declared, we could do: * d /= 2 * for each j * n_j /= 2 * * To avoid iteration over all event types, we instead shift numerator of event * j lazily when someone asks for a proportion of event j or when event j * occurs. This can bit trivially implemented by remembering last period in * which something happened with proportion of type j. */ #include <linux/flex_proportions.h> int fprop_global_init(struct fprop_global *p, gfp_t gfp) { int err; p->period = 0; /* Use 1 to avoid dealing with periods with 0 events... */ err = percpu_counter_init(&p->events, 1, gfp); if (err) return err; seqcount_init(&p->sequence); return 0; } void fprop_global_destroy(struct fprop_global *p) { percpu_counter_destroy(&p->events); } /* * Declare @periods new periods. It is upto the caller to make sure period * transitions cannot happen in parallel. * * The function returns true if the proportions are still defined and false * if aging zeroed out all events. This can be used to detect whether declaring * further periods has any effect. */ bool fprop_new_period(struct fprop_global *p, int periods) { s64 events; unsigned long flags; local_irq_save(flags); events = percpu_counter_sum(&p->events); /* * Don't do anything if there are no events. */ if (events <= 1) { local_irq_restore(flags); return false; } write_seqcount_begin(&p->sequence); if (periods < 64) events -= events >> periods; /* Use addition to avoid losing events happening between sum and set */ percpu_counter_add(&p->events, -events); p->period += periods; write_seqcount_end(&p->sequence); local_irq_restore(flags); return true; } /* * ---- SINGLE ---- */ int fprop_local_init_single(struct fprop_local_single *pl) { pl->events = 0; pl->period = 0; raw_spin_lock_init(&pl->lock); return 0; } void fprop_local_destroy_single(struct fprop_local_single *pl) { } static void fprop_reflect_period_single(struct fprop_global *p, struct fprop_local_single *pl) { unsigned int period = p->period; unsigned long flags; /* Fast path - period didn't change */ if (pl->period == period) return; raw_spin_lock_irqsave(&pl->lock, flags); /* Someone updated pl->period while we were spinning? */ if (pl->period >= period) { raw_spin_unlock_irqrestore(&pl->lock, flags); return; } /* Aging zeroed our fraction? */ if (period - pl->period < BITS_PER_LONG) pl->events >>= period - pl->period; else pl->events = 0; pl->period = period; raw_spin_unlock_irqrestore(&pl->lock, flags); } /* Event of type pl happened */ void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl) { fprop_reflect_period_single(p, pl); pl->events++; percpu_counter_add(&p->events, 1); } /* Return fraction of events of type pl */ void fprop_fraction_single(struct fprop_global *p, struct fprop_local_single *pl, unsigned long *numerator, unsigned long *denominator) { unsigned int seq; s64 num, den; do { seq = read_seqcount_begin(&p->sequence); fprop_reflect_period_single(p, pl); num = pl->events; den = percpu_counter_read_positive(&p->events); } while (read_seqcount_retry(&p->sequence, seq)); /* * Make fraction <= 1 and denominator > 0 even in presence of percpu * counter errors */ if (den <= num) { if (num) den = num; else den = 1; } *denominator = den; *numerator = num; } /* * ---- PERCPU ---- */ #define PROP_BATCH (8*(1+ilog2(nr_cpu_ids))) int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp) { int err; err = percpu_counter_init(&pl->events, 0, gfp); if (err) return err; pl->period = 0; raw_spin_lock_init(&pl->lock); return 0; } void fprop_local_destroy_percpu(struct fprop_local_percpu *pl) { percpu_counter_destroy(&pl->events); } static void fprop_reflect_period_percpu(struct fprop_global *p, struct fprop_local_percpu *pl) { unsigned int period = p->period; unsigned long flags; /* Fast path - period didn't change */ if (pl->period == period) return; raw_spin_lock_irqsave(&pl->lock, flags); /* Someone updated pl->period while we were spinning? */ if (pl->period >= period) { raw_spin_unlock_irqrestore(&pl->lock, flags); return; } /* Aging zeroed our fraction? */ if (period - pl->period < BITS_PER_LONG) { s64 val = percpu_counter_read(&pl->events); if (val < (nr_cpu_ids * PROP_BATCH)) val = percpu_counter_sum(&pl->events); percpu_counter_add_batch(&pl->events, -val + (val >> (period-pl->period)), PROP_BATCH); } else percpu_counter_set(&pl->events, 0); pl->period = period; raw_spin_unlock_irqrestore(&pl->lock, flags); } /* Event of type pl happened */ void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl, long nr) { fprop_reflect_period_percpu(p, pl); percpu_counter_add_batch(&pl->events, nr, PROP_BATCH); percpu_counter_add(&p->events, nr); } void fprop_fraction_percpu(struct fprop_global *p, struct fprop_local_percpu *pl, unsigned long *numerator, unsigned long *denominator) { unsigned int seq; s64 num, den; do { seq = read_seqcount_begin(&p->sequence); fprop_reflect_period_percpu(p, pl); num = percpu_counter_read_positive(&pl->events); den = percpu_counter_read_positive(&p->events); } while (read_seqcount_retry(&p->sequence, seq)); /* * Make fraction <= 1 and denominator > 0 even in presence of percpu * counter errors */ if (den <= num) { if (num) den = num; else den = 1; } *denominator = den; *numerator = num; } /* * Like __fprop_add_percpu() except that event is counted only if the given * type has fraction smaller than @max_frac/FPROP_FRAC_BASE */ void __fprop_add_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl, int max_frac, long nr) { if (unlikely(max_frac < FPROP_FRAC_BASE)) { unsigned long numerator, denominator; s64 tmp; fprop_fraction_percpu(p, pl, &numerator, &denominator); /* Adding 'nr' to fraction exceeds max_frac/FPROP_FRAC_BASE? */ tmp = (u64)denominator * max_frac - ((u64)numerator << FPROP_FRAC_SHIFT); if (tmp < 0) { /* Maximum fraction already exceeded? */ return; } else if (tmp < nr * (FPROP_FRAC_BASE - max_frac)) { /* Add just enough for the fraction to saturate */ nr = div_u64(tmp + FPROP_FRAC_BASE - max_frac - 1, FPROP_FRAC_BASE - max_frac); } } __fprop_add_percpu(p, pl, nr); } |