Linux Audio

Check our new training course

Embedded Linux Audio

Check our new training course
with Creative Commons CC-BY-SA
lecture materials

Bootlin logo

Elixir Cross Referencer

Loading...
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
// SPDX-License-Identifier: GPL-2.0-or-later
/* FS-Cache cache handling
 *
 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
 * Written by David Howells (dhowells@redhat.com)
 */

#define FSCACHE_DEBUG_LEVEL CACHE
#include <linux/export.h>
#include <linux/slab.h>
#include "internal.h"

static LIST_HEAD(fscache_caches);
DECLARE_RWSEM(fscache_addremove_sem);
EXPORT_SYMBOL(fscache_addremove_sem);
DECLARE_WAIT_QUEUE_HEAD(fscache_clearance_waiters);
EXPORT_SYMBOL(fscache_clearance_waiters);

static atomic_t fscache_cache_debug_id;

/*
 * Allocate a cache cookie.
 */
static struct fscache_cache *fscache_alloc_cache(const char *name)
{
	struct fscache_cache *cache;

	cache = kzalloc(sizeof(*cache), GFP_KERNEL);
	if (cache) {
		if (name) {
			cache->name = kstrdup(name, GFP_KERNEL);
			if (!cache->name) {
				kfree(cache);
				return NULL;
			}
		}
		refcount_set(&cache->ref, 1);
		INIT_LIST_HEAD(&cache->cache_link);
		cache->debug_id = atomic_inc_return(&fscache_cache_debug_id);
	}
	return cache;
}

static bool fscache_get_cache_maybe(struct fscache_cache *cache,
				    enum fscache_cache_trace where)
{
	bool success;
	int ref;

	success = __refcount_inc_not_zero(&cache->ref, &ref);
	if (success)
		trace_fscache_cache(cache->debug_id, ref + 1, where);
	return success;
}

/*
 * Look up a cache cookie.
 */
struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache)
{
	struct fscache_cache *candidate, *cache, *unnamed = NULL;

	/* firstly check for the existence of the cache under read lock */
	down_read(&fscache_addremove_sem);

	list_for_each_entry(cache, &fscache_caches, cache_link) {
		if (cache->name && name && strcmp(cache->name, name) == 0 &&
		    fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
			goto got_cache_r;
		if (!cache->name && !name &&
		    fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
			goto got_cache_r;
	}

	if (!name) {
		list_for_each_entry(cache, &fscache_caches, cache_link) {
			if (cache->name &&
			    fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
				goto got_cache_r;
		}
	}

	up_read(&fscache_addremove_sem);

	/* the cache does not exist - create a candidate */
	candidate = fscache_alloc_cache(name);
	if (!candidate)
		return ERR_PTR(-ENOMEM);

	/* write lock, search again and add if still not present */
	down_write(&fscache_addremove_sem);

	list_for_each_entry(cache, &fscache_caches, cache_link) {
		if (cache->name && name && strcmp(cache->name, name) == 0 &&
		    fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
			goto got_cache_w;
		if (!cache->name) {
			unnamed = cache;
			if (!name &&
			    fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
				goto got_cache_w;
		}
	}

	if (unnamed && is_cache &&
	    fscache_get_cache_maybe(unnamed, fscache_cache_get_acquire))
		goto use_unnamed_cache;

	if (!name) {
		list_for_each_entry(cache, &fscache_caches, cache_link) {
			if (cache->name &&
			    fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
				goto got_cache_w;
		}
	}

	list_add_tail(&candidate->cache_link, &fscache_caches);
	trace_fscache_cache(candidate->debug_id,
			    refcount_read(&candidate->ref),
			    fscache_cache_new_acquire);
	up_write(&fscache_addremove_sem);
	return candidate;

got_cache_r:
	up_read(&fscache_addremove_sem);
	return cache;
use_unnamed_cache:
	cache = unnamed;
	cache->name = candidate->name;
	candidate->name = NULL;
got_cache_w:
	up_write(&fscache_addremove_sem);
	kfree(candidate->name);
	kfree(candidate);
	return cache;
}

/**
 * fscache_acquire_cache - Acquire a cache-level cookie.
 * @name: The name of the cache.
 *
 * Get a cookie to represent an actual cache.  If a name is given and there is
 * a nameless cache record available, this will acquire that and set its name,
 * directing all the volumes using it to this cache.
 *
 * The cache will be switched over to the preparing state if not currently in
 * use, otherwise -EBUSY will be returned.
 */
struct fscache_cache *fscache_acquire_cache(const char *name)
{
	struct fscache_cache *cache;

	ASSERT(name);
	cache = fscache_lookup_cache(name, true);
	if (IS_ERR(cache))
		return cache;

	if (!fscache_set_cache_state_maybe(cache,
					   FSCACHE_CACHE_IS_NOT_PRESENT,
					   FSCACHE_CACHE_IS_PREPARING)) {
		pr_warn("Cache tag %s in use\n", name);
		fscache_put_cache(cache, fscache_cache_put_cache);
		return ERR_PTR(-EBUSY);
	}

	return cache;
}
EXPORT_SYMBOL(fscache_acquire_cache);

/**
 * fscache_put_cache - Release a cache-level cookie.
 * @cache: The cache cookie to be released
 * @where: An indication of where the release happened
 *
 * Release the caller's reference on a cache-level cookie.  The @where
 * indication should give information about the circumstances in which the call
 * occurs and will be logged through a tracepoint.
 */
void fscache_put_cache(struct fscache_cache *cache,
		       enum fscache_cache_trace where)
{
	unsigned int debug_id = cache->debug_id;
	bool zero;
	int ref;

	if (IS_ERR_OR_NULL(cache))
		return;

	zero = __refcount_dec_and_test(&cache->ref, &ref);
	trace_fscache_cache(debug_id, ref - 1, where);

	if (zero) {
		down_write(&fscache_addremove_sem);
		list_del_init(&cache->cache_link);
		up_write(&fscache_addremove_sem);
		kfree(cache->name);
		kfree(cache);
	}
}

/**
 * fscache_relinquish_cache - Reset cache state and release cookie
 * @cache: The cache cookie to be released
 *
 * Reset the state of a cache and release the caller's reference on a cache
 * cookie.
 */
void fscache_relinquish_cache(struct fscache_cache *cache)
{
	enum fscache_cache_trace where =
		(cache->state == FSCACHE_CACHE_IS_PREPARING) ?
		fscache_cache_put_prep_failed :
		fscache_cache_put_relinquish;

	cache->ops = NULL;
	cache->cache_priv = NULL;
	smp_store_release(&cache->state, FSCACHE_CACHE_IS_NOT_PRESENT);
	fscache_put_cache(cache, where);
}
EXPORT_SYMBOL(fscache_relinquish_cache);

/**
 * fscache_add_cache - Declare a cache as being open for business
 * @cache: The cache-level cookie representing the cache
 * @ops: Table of cache operations to use
 * @cache_priv: Private data for the cache record
 *
 * Add a cache to the system, making it available for netfs's to use.
 *
 * See Documentation/filesystems/caching/backend-api.rst for a complete
 * description.
 */
int fscache_add_cache(struct fscache_cache *cache,
		      const struct fscache_cache_ops *ops,
		      void *cache_priv)
{
	int n_accesses;

	_enter("{%s,%s}", ops->name, cache->name);

	BUG_ON(fscache_cache_state(cache) != FSCACHE_CACHE_IS_PREPARING);

	/* Get a ref on the cache cookie and keep its n_accesses counter raised
	 * by 1 to prevent wakeups from transitioning it to 0 until we're
	 * withdrawing caching services from it.
	 */
	n_accesses = atomic_inc_return(&cache->n_accesses);
	trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
				   n_accesses, fscache_access_cache_pin);

	down_write(&fscache_addremove_sem);

	cache->ops = ops;
	cache->cache_priv = cache_priv;
	fscache_set_cache_state(cache, FSCACHE_CACHE_IS_ACTIVE);

	up_write(&fscache_addremove_sem);
	pr_notice("Cache \"%s\" added (type %s)\n", cache->name, ops->name);
	_leave(" = 0 [%s]", cache->name);
	return 0;
}
EXPORT_SYMBOL(fscache_add_cache);

/**
 * fscache_begin_cache_access - Pin a cache so it can be accessed
 * @cache: The cache-level cookie
 * @why: An indication of the circumstances of the access for tracing
 *
 * Attempt to pin the cache to prevent it from going away whilst we're
 * accessing it and returns true if successful.  This works as follows:
 *
 *  (1) If the cache tests as not live (state is not FSCACHE_CACHE_IS_ACTIVE),
 *      then we return false to indicate access was not permitted.
 *
 *  (2) If the cache tests as live, then we increment the n_accesses count and
 *      then recheck the liveness, ending the access if it ceased to be live.
 *
 *  (3) When we end the access, we decrement n_accesses and wake up the any
 *      waiters if it reaches 0.
 *
 *  (4) Whilst the cache is caching, n_accesses is kept artificially
 *      incremented to prevent wakeups from happening.
 *
 *  (5) When the cache is taken offline, the state is changed to prevent new
 *      accesses, n_accesses is decremented and we wait for n_accesses to
 *      become 0.
 */
bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why)
{
	int n_accesses;

	if (!fscache_cache_is_live(cache))
		return false;

	n_accesses = atomic_inc_return(&cache->n_accesses);
	smp_mb__after_atomic(); /* Reread live flag after n_accesses */
	trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
				   n_accesses, why);
	if (!fscache_cache_is_live(cache)) {
		fscache_end_cache_access(cache, fscache_access_unlive);
		return false;
	}
	return true;
}

/**
 * fscache_end_cache_access - Unpin a cache at the end of an access.
 * @cache: The cache-level cookie
 * @why: An indication of the circumstances of the access for tracing
 *
 * Unpin a cache after we've accessed it.  The @why indicator is merely
 * provided for tracing purposes.
 */
void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why)
{
	int n_accesses;

	smp_mb__before_atomic();
	n_accesses = atomic_dec_return(&cache->n_accesses);
	trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
				   n_accesses, why);
	if (n_accesses == 0)
		wake_up_var(&cache->n_accesses);
}

/**
 * fscache_io_error - Note a cache I/O error
 * @cache: The record describing the cache
 *
 * Note that an I/O error occurred in a cache and that it should no longer be
 * used for anything.  This also reports the error into the kernel log.
 *
 * See Documentation/filesystems/caching/backend-api.rst for a complete
 * description.
 */
void fscache_io_error(struct fscache_cache *cache)
{
	if (fscache_set_cache_state_maybe(cache,
					  FSCACHE_CACHE_IS_ACTIVE,
					  FSCACHE_CACHE_GOT_IOERROR))
		pr_err("Cache '%s' stopped due to I/O error\n",
		       cache->name);
}
EXPORT_SYMBOL(fscache_io_error);

/**
 * fscache_withdraw_cache - Withdraw a cache from the active service
 * @cache: The cache cookie
 *
 * Begin the process of withdrawing a cache from service.  This stops new
 * cache-level and volume-level accesses from taking place and waits for
 * currently ongoing cache-level accesses to end.
 */
void fscache_withdraw_cache(struct fscache_cache *cache)
{
	int n_accesses;

	pr_notice("Withdrawing cache \"%s\" (%u objs)\n",
		  cache->name, atomic_read(&cache->object_count));

	fscache_set_cache_state(cache, FSCACHE_CACHE_IS_WITHDRAWN);

	/* Allow wakeups on dec-to-0 */
	n_accesses = atomic_dec_return(&cache->n_accesses);
	trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
				   n_accesses, fscache_access_cache_unpin);

	wait_var_event(&cache->n_accesses,
		       atomic_read(&cache->n_accesses) == 0);
}
EXPORT_SYMBOL(fscache_withdraw_cache);

#ifdef CONFIG_PROC_FS
static const char fscache_cache_states[NR__FSCACHE_CACHE_STATE] = "-PAEW";

/*
 * Generate a list of caches in /proc/fs/fscache/caches
 */
static int fscache_caches_seq_show(struct seq_file *m, void *v)
{
	struct fscache_cache *cache;

	if (v == &fscache_caches) {
		seq_puts(m,
			 "CACHE    REF   VOLS  OBJS  ACCES S NAME\n"
			 "======== ===== ===== ===== ===== = ===============\n"
			 );
		return 0;
	}

	cache = list_entry(v, struct fscache_cache, cache_link);
	seq_printf(m,
		   "%08x %5d %5d %5d %5d %c %s\n",
		   cache->debug_id,
		   refcount_read(&cache->ref),
		   atomic_read(&cache->n_volumes),
		   atomic_read(&cache->object_count),
		   atomic_read(&cache->n_accesses),
		   fscache_cache_states[cache->state],
		   cache->name ?: "-");
	return 0;
}

static void *fscache_caches_seq_start(struct seq_file *m, loff_t *_pos)
	__acquires(fscache_addremove_sem)
{
	down_read(&fscache_addremove_sem);
	return seq_list_start_head(&fscache_caches, *_pos);
}

static void *fscache_caches_seq_next(struct seq_file *m, void *v, loff_t *_pos)
{
	return seq_list_next(v, &fscache_caches, _pos);
}

static void fscache_caches_seq_stop(struct seq_file *m, void *v)
	__releases(fscache_addremove_sem)
{
	up_read(&fscache_addremove_sem);
}

const struct seq_operations fscache_caches_seq_ops = {
	.start  = fscache_caches_seq_start,
	.next   = fscache_caches_seq_next,
	.stop   = fscache_caches_seq_stop,
	.show   = fscache_caches_seq_show,
};
#endif /* CONFIG_PROC_FS */