Linux Audio

Check our new training course

Embedded Linux Audio

Check our new training course
with Creative Commons CC-BY-SA
lecture materials

Bootlin logo

Elixir Cross Referencer

Loading...
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * sun4i-ss-hash.c - hardware cryptographic accelerator for Allwinner A20 SoC
 *
 * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
 *
 * This file add support for MD5 and SHA1.
 *
 * You could find the datasheet in Documentation/arm/sunxi.rst
 */
#include "sun4i-ss.h"
#include <asm/unaligned.h>
#include <linux/scatterlist.h>

/* This is a totally arbitrary value */
#define SS_TIMEOUT 100

int sun4i_hash_crainit(struct crypto_tfm *tfm)
{
	struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
	struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
	struct sun4i_ss_alg_template *algt;
	int err;

	memset(op, 0, sizeof(struct sun4i_tfm_ctx));

	algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
	op->ss = algt->ss;

	err = pm_runtime_resume_and_get(op->ss->dev);
	if (err < 0)
		return err;

	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
				 sizeof(struct sun4i_req_ctx));
	return 0;
}

void sun4i_hash_craexit(struct crypto_tfm *tfm)
{
	struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);

	pm_runtime_put(op->ss->dev);
}

/* sun4i_hash_init: initialize request context */
int sun4i_hash_init(struct ahash_request *areq)
{
	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
	struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
	struct sun4i_ss_alg_template *algt;

	memset(op, 0, sizeof(struct sun4i_req_ctx));

	algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
	op->mode = algt->mode;

	return 0;
}

int sun4i_hash_export_md5(struct ahash_request *areq, void *out)
{
	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
	struct md5_state *octx = out;
	int i;

	octx->byte_count = op->byte_count + op->len;

	memcpy(octx->block, op->buf, op->len);

	if (op->byte_count) {
		for (i = 0; i < 4; i++)
			octx->hash[i] = op->hash[i];
	} else {
		octx->hash[0] = SHA1_H0;
		octx->hash[1] = SHA1_H1;
		octx->hash[2] = SHA1_H2;
		octx->hash[3] = SHA1_H3;
	}

	return 0;
}

int sun4i_hash_import_md5(struct ahash_request *areq, const void *in)
{
	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
	const struct md5_state *ictx = in;
	int i;

	sun4i_hash_init(areq);

	op->byte_count = ictx->byte_count & ~0x3F;
	op->len = ictx->byte_count & 0x3F;

	memcpy(op->buf, ictx->block, op->len);

	for (i = 0; i < 4; i++)
		op->hash[i] = ictx->hash[i];

	return 0;
}

int sun4i_hash_export_sha1(struct ahash_request *areq, void *out)
{
	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
	struct sha1_state *octx = out;
	int i;

	octx->count = op->byte_count + op->len;

	memcpy(octx->buffer, op->buf, op->len);

	if (op->byte_count) {
		for (i = 0; i < 5; i++)
			octx->state[i] = op->hash[i];
	} else {
		octx->state[0] = SHA1_H0;
		octx->state[1] = SHA1_H1;
		octx->state[2] = SHA1_H2;
		octx->state[3] = SHA1_H3;
		octx->state[4] = SHA1_H4;
	}

	return 0;
}

int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in)
{
	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
	const struct sha1_state *ictx = in;
	int i;

	sun4i_hash_init(areq);

	op->byte_count = ictx->count & ~0x3F;
	op->len = ictx->count & 0x3F;

	memcpy(op->buf, ictx->buffer, op->len);

	for (i = 0; i < 5; i++)
		op->hash[i] = ictx->state[i];

	return 0;
}

#define SS_HASH_UPDATE 1
#define SS_HASH_FINAL 2

/*
 * sun4i_hash_update: update hash engine
 *
 * Could be used for both SHA1 and MD5
 * Write data by step of 32bits and put then in the SS.
 *
 * Since we cannot leave partial data and hash state in the engine,
 * we need to get the hash state at the end of this function.
 * We can get the hash state every 64 bytes
 *
 * So the first work is to get the number of bytes to write to SS modulo 64
 * The extra bytes will go to a temporary buffer op->buf storing op->len bytes
 *
 * So at the begin of update()
 * if op->len + areq->nbytes < 64
 * => all data will be written to wait buffer (op->buf) and end=0
 * if not, write all data from op->buf to the device and position end to
 * complete to 64bytes
 *
 * example 1:
 * update1 60o => op->len=60
 * update2 60o => need one more word to have 64 bytes
 * end=4
 * so write all data from op->buf and one word of SGs
 * write remaining data in op->buf
 * final state op->len=56
 */
static int sun4i_hash(struct ahash_request *areq)
{
	/*
	 * i is the total bytes read from SGs, to be compared to areq->nbytes
	 * i is important because we cannot rely on SG length since the sum of
	 * SG->length could be greater than areq->nbytes
	 *
	 * end is the position when we need to stop writing to the device,
	 * to be compared to i
	 *
	 * in_i: advancement in the current SG
	 */
	unsigned int i = 0, end, fill, min_fill, nwait, nbw = 0, j = 0, todo;
	unsigned int in_i = 0;
	u32 spaces, rx_cnt = SS_RX_DEFAULT, bf[32] = {0}, v, ivmode = 0;
	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
	struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
	struct sun4i_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
	struct sun4i_ss_ctx *ss = tfmctx->ss;
	struct sun4i_ss_alg_template *algt;
	struct scatterlist *in_sg = areq->src;
	struct sg_mapping_iter mi;
	int in_r, err = 0;
	size_t copied = 0;
	u32 wb = 0;

	dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x",
		__func__, crypto_tfm_alg_name(areq->base.tfm),
		op->byte_count, areq->nbytes, op->mode,
		op->len, op->hash[0]);

	if (unlikely(!areq->nbytes) && !(op->flags & SS_HASH_FINAL))
		return 0;

	/* protect against overflow */
	if (unlikely(areq->nbytes > UINT_MAX - op->len)) {
		dev_err(ss->dev, "Cannot process too large request\n");
		return -EINVAL;
	}

	if (op->len + areq->nbytes < 64 && !(op->flags & SS_HASH_FINAL)) {
		/* linearize data to op->buf */
		copied = sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
					    op->buf + op->len, areq->nbytes, 0);
		op->len += copied;
		return 0;
	}

	spin_lock_bh(&ss->slock);

	/*
	 * if some data have been processed before,
	 * we need to restore the partial hash state
	 */
	if (op->byte_count) {
		ivmode = SS_IV_ARBITRARY;
		for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++)
			writel(op->hash[i], ss->base + SS_IV0 + i * 4);
	}
	/* Enable the device */
	writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL);

	if (!(op->flags & SS_HASH_UPDATE))
		goto hash_final;

	/* start of handling data */
	if (!(op->flags & SS_HASH_FINAL)) {
		end = ((areq->nbytes + op->len) / 64) * 64 - op->len;

		if (end > areq->nbytes || areq->nbytes - end > 63) {
			dev_err(ss->dev, "ERROR: Bound error %u %u\n",
				end, areq->nbytes);
			err = -EINVAL;
			goto release_ss;
		}
	} else {
		/* Since we have the flag final, we can go up to modulo 4 */
		if (areq->nbytes < 4)
			end = 0;
		else
			end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
	}

	/* TODO if SGlen % 4 and !op->len then DMA */
	i = 1;
	while (in_sg && i == 1) {
		if (in_sg->length % 4)
			i = 0;
		in_sg = sg_next(in_sg);
	}
	if (i == 1 && !op->len && areq->nbytes)
		dev_dbg(ss->dev, "We can DMA\n");

	i = 0;
	sg_miter_start(&mi, areq->src, sg_nents(areq->src),
		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
	sg_miter_next(&mi);
	in_i = 0;

	do {
		/*
		 * we need to linearize in two case:
		 * - the buffer is already used
		 * - the SG does not have enough byte remaining ( < 4)
		 */
		if (op->len || (mi.length - in_i) < 4) {
			/*
			 * if we have entered here we have two reason to stop
			 * - the buffer is full
			 * - reach the end
			 */
			while (op->len < 64 && i < end) {
				/* how many bytes we can read from current SG */
				in_r = min(end - i, 64 - op->len);
				in_r = min_t(size_t, mi.length - in_i, in_r);
				memcpy(op->buf + op->len, mi.addr + in_i, in_r);
				op->len += in_r;
				i += in_r;
				in_i += in_r;
				if (in_i == mi.length) {
					sg_miter_next(&mi);
					in_i = 0;
				}
			}
			if (op->len > 3 && !(op->len % 4)) {
				/* write buf to the device */
				writesl(ss->base + SS_RXFIFO, op->buf,
					op->len / 4);
				op->byte_count += op->len;
				op->len = 0;
			}
		}
		if (mi.length - in_i > 3 && i < end) {
			/* how many bytes we can read from current SG */
			in_r = min_t(size_t, mi.length - in_i, areq->nbytes - i);
			in_r = min_t(size_t, ((mi.length - in_i) / 4) * 4, in_r);
			/* how many bytes we can write in the device*/
			todo = min3((u32)(end - i) / 4, rx_cnt, (u32)in_r / 4);
			writesl(ss->base + SS_RXFIFO, mi.addr + in_i, todo);
			op->byte_count += todo * 4;
			i += todo * 4;
			in_i += todo * 4;
			rx_cnt -= todo;
			if (!rx_cnt) {
				spaces = readl(ss->base + SS_FCSR);
				rx_cnt = SS_RXFIFO_SPACES(spaces);
			}
			if (in_i == mi.length) {
				sg_miter_next(&mi);
				in_i = 0;
			}
		}
	} while (i < end);

	/*
	 * Now we have written to the device all that we can,
	 * store the remaining bytes in op->buf
	 */
	if ((areq->nbytes - i) < 64) {
		while (i < areq->nbytes && in_i < mi.length && op->len < 64) {
			/* how many bytes we can read from current SG */
			in_r = min(areq->nbytes - i, 64 - op->len);
			in_r = min_t(size_t, mi.length - in_i, in_r);
			memcpy(op->buf + op->len, mi.addr + in_i, in_r);
			op->len += in_r;
			i += in_r;
			in_i += in_r;
			if (in_i == mi.length) {
				sg_miter_next(&mi);
				in_i = 0;
			}
		}
	}

	sg_miter_stop(&mi);

	/*
	 * End of data process
	 * Now if we have the flag final go to finalize part
	 * If not, store the partial hash
	 */
	if (op->flags & SS_HASH_FINAL)
		goto hash_final;

	writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL);
	i = 0;
	do {
		v = readl(ss->base + SS_CTL);
		i++;
	} while (i < SS_TIMEOUT && (v & SS_DATA_END));
	if (unlikely(i >= SS_TIMEOUT)) {
		dev_err_ratelimited(ss->dev,
				    "ERROR: hash end timeout %d>%d ctl=%x len=%u\n",
				    i, SS_TIMEOUT, v, areq->nbytes);
		err = -EIO;
		goto release_ss;
	}

	/*
	 * The datasheet isn't very clear about when to retrieve the digest. The
	 * bit SS_DATA_END is cleared when the engine has processed the data and
	 * when the digest is computed *but* it doesn't mean the digest is
	 * available in the digest registers. Hence the delay to be sure we can
	 * read it.
	 */
	ndelay(1);

	for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++)
		op->hash[i] = readl(ss->base + SS_MD0 + i * 4);

	goto release_ss;

/*
 * hash_final: finalize hashing operation
 *
 * If we have some remaining bytes, we write them.
 * Then ask the SS for finalizing the hashing operation
 *
 * I do not check RX FIFO size in this function since the size is 32
 * after each enabling and this function neither write more than 32 words.
 * If we come from the update part, we cannot have more than
 * 3 remaining bytes to write and SS is fast enough to not care about it.
 */

hash_final:
	if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
		algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
		algt->stat_req++;
	}

	/* write the remaining words of the wait buffer */
	if (op->len) {
		nwait = op->len / 4;
		if (nwait) {
			writesl(ss->base + SS_RXFIFO, op->buf, nwait);
			op->byte_count += 4 * nwait;
		}

		nbw = op->len - 4 * nwait;
		if (nbw) {
			wb = le32_to_cpup((__le32 *)(op->buf + nwait * 4));
			wb &= GENMASK((nbw * 8) - 1, 0);

			op->byte_count += nbw;
		}
	}

	/* write the remaining bytes of the nbw buffer */
	wb |= ((1 << 7) << (nbw * 8));
	((__le32 *)bf)[j++] = cpu_to_le32(wb);

	/*
	 * number of space to pad to obtain 64o minus 8(size) minus 4 (final 1)
	 * I take the operations from other MD5/SHA1 implementations
	 */

	/* last block size */
	fill = 64 - (op->byte_count % 64);
	min_fill = 2 * sizeof(u32) + (nbw ? 0 : sizeof(u32));

	/* if we can't fill all data, jump to the next 64 block */
	if (fill < min_fill)
		fill += 64;

	j += (fill - min_fill) / sizeof(u32);

	/* write the length of data */
	if (op->mode == SS_OP_SHA1) {
		__be64 *bits = (__be64 *)&bf[j];
		*bits = cpu_to_be64(op->byte_count << 3);
		j += 2;
	} else {
		__le64 *bits = (__le64 *)&bf[j];
		*bits = cpu_to_le64(op->byte_count << 3);
		j += 2;
	}
	writesl(ss->base + SS_RXFIFO, bf, j);

	/* Tell the SS to stop the hashing */
	writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL);

	/*
	 * Wait for SS to finish the hash.
	 * The timeout could happen only in case of bad overclocking
	 * or driver bug.
	 */
	i = 0;
	do {
		v = readl(ss->base + SS_CTL);
		i++;
	} while (i < SS_TIMEOUT && (v & SS_DATA_END));
	if (unlikely(i >= SS_TIMEOUT)) {
		dev_err_ratelimited(ss->dev,
				    "ERROR: hash end timeout %d>%d ctl=%x len=%u\n",
				    i, SS_TIMEOUT, v, areq->nbytes);
		err = -EIO;
		goto release_ss;
	}

	/*
	 * The datasheet isn't very clear about when to retrieve the digest. The
	 * bit SS_DATA_END is cleared when the engine has processed the data and
	 * when the digest is computed *but* it doesn't mean the digest is
	 * available in the digest registers. Hence the delay to be sure we can
	 * read it.
	 */
	ndelay(1);

	/* Get the hash from the device */
	if (op->mode == SS_OP_SHA1) {
		for (i = 0; i < 5; i++) {
			v = readl(ss->base + SS_MD0 + i * 4);
			if (ss->variant->sha1_in_be)
				put_unaligned_le32(v, areq->result + i * 4);
			else
				put_unaligned_be32(v, areq->result + i * 4);
		}
	} else {
		for (i = 0; i < 4; i++) {
			v = readl(ss->base + SS_MD0 + i * 4);
			put_unaligned_le32(v, areq->result + i * 4);
		}
	}

release_ss:
	writel(0, ss->base + SS_CTL);
	spin_unlock_bh(&ss->slock);
	return err;
}

int sun4i_hash_final(struct ahash_request *areq)
{
	struct sun4i_req_ctx *op = ahash_request_ctx(areq);

	op->flags = SS_HASH_FINAL;
	return sun4i_hash(areq);
}

int sun4i_hash_update(struct ahash_request *areq)
{
	struct sun4i_req_ctx *op = ahash_request_ctx(areq);

	op->flags = SS_HASH_UPDATE;
	return sun4i_hash(areq);
}

/* sun4i_hash_finup: finalize hashing operation after an update */
int sun4i_hash_finup(struct ahash_request *areq)
{
	struct sun4i_req_ctx *op = ahash_request_ctx(areq);

	op->flags = SS_HASH_UPDATE | SS_HASH_FINAL;
	return sun4i_hash(areq);
}

/* combo of init/update/final functions */
int sun4i_hash_digest(struct ahash_request *areq)
{
	int err;
	struct sun4i_req_ctx *op = ahash_request_ctx(areq);

	err = sun4i_hash_init(areq);
	if (err)
		return err;

	op->flags = SS_HASH_UPDATE | SS_HASH_FINAL;
	return sun4i_hash(areq);
}