summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/intel/keembay/ocs-hcu.c
blob: deb9bd460ee621a542e4ef5606e4f80bf0213e2c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Intel Keem Bay OCS HCU Crypto Driver.
 *
 * Copyright (C) 2018-2020 Intel Corporation
 */

#include <linux/delay.h>
#include <linux/device.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/module.h>

#include <crypto/sha2.h>

#include "ocs-hcu.h"

/* Registers. */
#define OCS_HCU_MODE			0x00
#define OCS_HCU_CHAIN			0x04
#define OCS_HCU_OPERATION		0x08
#define OCS_HCU_KEY_0			0x0C
#define OCS_HCU_ISR			0x50
#define OCS_HCU_IER			0x54
#define OCS_HCU_STATUS			0x58
#define OCS_HCU_MSG_LEN_LO		0x60
#define OCS_HCU_MSG_LEN_HI		0x64
#define OCS_HCU_KEY_BYTE_ORDER_CFG	0x80
#define OCS_HCU_DMA_SRC_ADDR		0x400
#define OCS_HCU_DMA_SRC_SIZE		0x408
#define OCS_HCU_DMA_DST_SIZE		0x40C
#define OCS_HCU_DMA_DMA_MODE		0x410
#define OCS_HCU_DMA_NEXT_SRC_DESCR	0x418
#define OCS_HCU_DMA_MSI_ISR		0x480
#define OCS_HCU_DMA_MSI_IER		0x484
#define OCS_HCU_DMA_MSI_MASK		0x488

/* Register bit definitions. */
#define HCU_MODE_ALGO_SHIFT		16
#define HCU_MODE_HMAC_SHIFT		22

#define HCU_STATUS_BUSY			BIT(0)

#define HCU_BYTE_ORDER_SWAP		BIT(0)

#define HCU_IRQ_HASH_DONE		BIT(2)
#define HCU_IRQ_HASH_ERR_MASK		(BIT(3) | BIT(1) | BIT(0))

#define HCU_DMA_IRQ_SRC_DONE		BIT(0)
#define HCU_DMA_IRQ_SAI_ERR		BIT(2)
#define HCU_DMA_IRQ_BAD_COMP_ERR	BIT(3)
#define HCU_DMA_IRQ_INBUF_RD_ERR	BIT(4)
#define HCU_DMA_IRQ_INBUF_WD_ERR	BIT(5)
#define HCU_DMA_IRQ_OUTBUF_WR_ERR	BIT(6)
#define HCU_DMA_IRQ_OUTBUF_RD_ERR	BIT(7)
#define HCU_DMA_IRQ_CRD_ERR		BIT(8)
#define HCU_DMA_IRQ_ERR_MASK		(HCU_DMA_IRQ_SAI_ERR | \
					 HCU_DMA_IRQ_BAD_COMP_ERR | \
					 HCU_DMA_IRQ_INBUF_RD_ERR | \
					 HCU_DMA_IRQ_INBUF_WD_ERR | \
					 HCU_DMA_IRQ_OUTBUF_WR_ERR | \
					 HCU_DMA_IRQ_OUTBUF_RD_ERR | \
					 HCU_DMA_IRQ_CRD_ERR)

#define HCU_DMA_SNOOP_MASK		(0x7 << 28)
#define HCU_DMA_SRC_LL_EN		BIT(25)
#define HCU_DMA_EN			BIT(31)

#define OCS_HCU_ENDIANNESS_VALUE	0x2A

#define HCU_DMA_MSI_UNMASK		BIT(0)
#define HCU_DMA_MSI_DISABLE		0
#define HCU_IRQ_DISABLE			0

#define OCS_HCU_START			BIT(0)
#define OCS_HCU_TERMINATE		BIT(1)

#define OCS_LL_DMA_FLAG_TERMINATE	BIT(31)

#define OCS_HCU_HW_KEY_LEN_U32		(OCS_HCU_HW_KEY_LEN / sizeof(u32))

#define HCU_DATA_WRITE_ENDIANNESS_OFFSET	26

#define OCS_HCU_NUM_CHAINS_SHA256_224_SM3	(SHA256_DIGEST_SIZE / sizeof(u32))
#define OCS_HCU_NUM_CHAINS_SHA384_512		(SHA512_DIGEST_SIZE / sizeof(u32))

/*
 * While polling on a busy HCU, wait maximum 200us between one check and the
 * other.
 */
#define OCS_HCU_WAIT_BUSY_RETRY_DELAY_US	200
/* Wait on a busy HCU for maximum 1 second. */
#define OCS_HCU_WAIT_BUSY_TIMEOUT_US		1000000

/**
 * struct ocs_hcu_dma_entry - An entry in an OCS DMA linked list.
 * @src_addr:  Source address of the data.
 * @src_len:   Length of data to be fetched.
 * @nxt_desc:  Next descriptor to fetch.
 * @ll_flags:  Flags (Freeze @ terminate) for the DMA engine.
 */
struct ocs_hcu_dma_entry {
	u32 src_addr;
	u32 src_len;
	u32 nxt_desc;
	u32 ll_flags;
};

/**
 * struct ocs_hcu_dma_list - OCS-specific DMA linked list.
 * @head:	The head of the list (points to the array backing the list).
 * @tail:	The current tail of the list; NULL if the list is empty.
 * @dma_addr:	The DMA address of @head (i.e., the DMA address of the backing
 *		array).
 * @max_nents:	Maximum number of entries in the list (i.e., number of elements
 *		in the backing array).
 *
 * The OCS DMA list is an array-backed list of OCS DMA descriptors. The array
 * backing the list is allocated with dma_alloc_coherent() and pointed by
 * @head.
 */
struct ocs_hcu_dma_list {
	struct ocs_hcu_dma_entry	*head;
	struct ocs_hcu_dma_entry	*tail;
	dma_addr_t			dma_addr;
	size_t				max_nents;
};

static inline u32 ocs_hcu_num_chains(enum ocs_hcu_algo algo)
{
	switch (algo) {
	case OCS_HCU_ALGO_SHA224:
	case OCS_HCU_ALGO_SHA256:
	case OCS_HCU_ALGO_SM3:
		return OCS_HCU_NUM_CHAINS_SHA256_224_SM3;
	case OCS_HCU_ALGO_SHA384:
	case OCS_HCU_ALGO_SHA512:
		return OCS_HCU_NUM_CHAINS_SHA384_512;
	default:
		return 0;
	};
}

static inline u32 ocs_hcu_digest_size(enum ocs_hcu_algo algo)
{
	switch (algo) {
	case OCS_HCU_ALGO_SHA224:
		return SHA224_DIGEST_SIZE;
	case OCS_HCU_ALGO_SHA256:
	case OCS_HCU_ALGO_SM3:
		/* SM3 shares the same block size. */
		return SHA256_DIGEST_SIZE;
	case OCS_HCU_ALGO_SHA384:
		return SHA384_DIGEST_SIZE;
	case OCS_HCU_ALGO_SHA512:
		return SHA512_DIGEST_SIZE;
	default:
		return 0;
	}
}

/**
 * ocs_hcu_wait_busy() - Wait for HCU OCS hardware to became usable.
 * @hcu_dev:	OCS HCU device to wait for.
 *
 * Return: 0 if device free, -ETIMEOUT if device busy and internal timeout has
 *	   expired.
 */
static int ocs_hcu_wait_busy(struct ocs_hcu_dev *hcu_dev)
{
	long val;

	return readl_poll_timeout(hcu_dev->io_base + OCS_HCU_STATUS, val,
				  !(val & HCU_STATUS_BUSY),
				  OCS_HCU_WAIT_BUSY_RETRY_DELAY_US,
				  OCS_HCU_WAIT_BUSY_TIMEOUT_US);
}

static void ocs_hcu_done_irq_en(struct ocs_hcu_dev *hcu_dev)
{
	/* Clear any pending interrupts. */
	writel(0xFFFFFFFF, hcu_dev->io_base + OCS_HCU_ISR);
	hcu_dev->irq_err = false;
	/* Enable error and HCU done interrupts. */
	writel(HCU_IRQ_HASH_DONE | HCU_IRQ_HASH_ERR_MASK,
	       hcu_dev->io_base + OCS_HCU_IER);
}

static void ocs_hcu_dma_irq_en(struct ocs_hcu_dev *hcu_dev)
{
	/* Clear any pending interrupts. */
	writel(0xFFFFFFFF, hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
	hcu_dev->irq_err = false;
	/* Only operating on DMA source completion and error interrupts. */
	writel(HCU_DMA_IRQ_ERR_MASK | HCU_DMA_IRQ_SRC_DONE,
	       hcu_dev->io_base + OCS_HCU_DMA_MSI_IER);
	/* Unmask */
	writel(HCU_DMA_MSI_UNMASK, hcu_dev->io_base + OCS_HCU_DMA_MSI_MASK);
}

static void ocs_hcu_irq_dis(struct ocs_hcu_dev *hcu_dev)
{
	writel(HCU_IRQ_DISABLE, hcu_dev->io_base + OCS_HCU_IER);
	writel(HCU_DMA_MSI_DISABLE, hcu_dev->io_base + OCS_HCU_DMA_MSI_IER);
}

static int ocs_hcu_wait_and_disable_irq(struct ocs_hcu_dev *hcu_dev)
{
	int rc;

	rc = wait_for_completion_interruptible(&hcu_dev->irq_done);
	if (rc)
		goto exit;

	if (hcu_dev->irq_err) {
		/* Unset flag and return error. */
		hcu_dev->irq_err = false;
		rc = -EIO;
		goto exit;
	}

exit:
	ocs_hcu_irq_dis(hcu_dev);

	return rc;
}

/**
 * ocs_hcu_get_intermediate_data() - Get intermediate data.
 * @hcu_dev:	The target HCU device.
 * @data:	Where to store the intermediate.
 * @algo:	The algorithm being used.
 *
 * This function is used to save the current hashing process state in order to
 * continue it in the future.
 *
 * Note: once all data has been processed, the intermediate data actually
 * contains the hashing result. So this function is also used to retrieve the
 * final result of a hashing process.
 *
 * Return: 0 on success, negative error code otherwise.
 */
static int ocs_hcu_get_intermediate_data(struct ocs_hcu_dev *hcu_dev,
					 struct ocs_hcu_idata *data,
					 enum ocs_hcu_algo algo)
{
	const int n = ocs_hcu_num_chains(algo);
	u32 *chain;
	int rc;
	int i;

	/* Data not requested. */
	if (!data)
		return -EINVAL;

	chain = (u32 *)data->digest;

	/* Ensure that the OCS is no longer busy before reading the chains. */
	rc = ocs_hcu_wait_busy(hcu_dev);
	if (rc)
		return rc;

	/*
	 * This loops is safe because data->digest is an array of
	 * SHA512_DIGEST_SIZE bytes and the maximum value returned by
	 * ocs_hcu_num_chains() is OCS_HCU_NUM_CHAINS_SHA384_512 which is equal
	 * to SHA512_DIGEST_SIZE / sizeof(u32).
	 */
	for (i = 0; i < n; i++)
		chain[i] = readl(hcu_dev->io_base + OCS_HCU_CHAIN);

	data->msg_len_lo = readl(hcu_dev->io_base + OCS_HCU_MSG_LEN_LO);
	data->msg_len_hi = readl(hcu_dev->io_base + OCS_HCU_MSG_LEN_HI);

	return 0;
}

/**
 * ocs_hcu_set_intermediate_data() - Set intermediate data.
 * @hcu_dev:	The target HCU device.
 * @data:	The intermediate data to be set.
 * @algo:	The algorithm being used.
 *
 * This function is used to continue a previous hashing process.
 */
static void ocs_hcu_set_intermediate_data(struct ocs_hcu_dev *hcu_dev,
					  const struct ocs_hcu_idata *data,
					  enum ocs_hcu_algo algo)
{
	const int n = ocs_hcu_num_chains(algo);
	u32 *chain = (u32 *)data->digest;
	int i;

	/*
	 * This loops is safe because data->digest is an array of
	 * SHA512_DIGEST_SIZE bytes and the maximum value returned by
	 * ocs_hcu_num_chains() is OCS_HCU_NUM_CHAINS_SHA384_512 which is equal
	 * to SHA512_DIGEST_SIZE / sizeof(u32).
	 */
	for (i = 0; i < n; i++)
		writel(chain[i], hcu_dev->io_base + OCS_HCU_CHAIN);

	writel(data->msg_len_lo, hcu_dev->io_base + OCS_HCU_MSG_LEN_LO);
	writel(data->msg_len_hi, hcu_dev->io_base + OCS_HCU_MSG_LEN_HI);
}

static int ocs_hcu_get_digest(struct ocs_hcu_dev *hcu_dev,
			      enum ocs_hcu_algo algo, u8 *dgst, size_t dgst_len)
{
	u32 *chain;
	int rc;
	int i;

	if (!dgst)
		return -EINVAL;

	/* Length of the output buffer must match the algo digest size. */
	if (dgst_len != ocs_hcu_digest_size(algo))
		return -EINVAL;

	/* Ensure that the OCS is no longer busy before reading the chains. */
	rc = ocs_hcu_wait_busy(hcu_dev);
	if (rc)
		return rc;

	chain = (u32 *)dgst;
	for (i = 0; i < dgst_len / sizeof(u32); i++)
		chain[i] = readl(hcu_dev->io_base + OCS_HCU_CHAIN);

	return 0;
}

/**
 * ocs_hcu_hw_cfg() - Configure the HCU hardware.
 * @hcu_dev:	The HCU device to configure.
 * @algo:	The algorithm to be used by the HCU device.
 * @use_hmac:	Whether or not HW HMAC should be used.
 *
 * Return: 0 on success, negative error code otherwise.
 */
static int ocs_hcu_hw_cfg(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
			  bool use_hmac)
{
	u32 cfg;
	int rc;

	if (algo != OCS_HCU_ALGO_SHA256 && algo != OCS_HCU_ALGO_SHA224 &&
	    algo != OCS_HCU_ALGO_SHA384 && algo != OCS_HCU_ALGO_SHA512 &&
	    algo != OCS_HCU_ALGO_SM3)
		return -EINVAL;

	rc = ocs_hcu_wait_busy(hcu_dev);
	if (rc)
		return rc;

	/* Ensure interrupts are disabled. */
	ocs_hcu_irq_dis(hcu_dev);

	/* Configure endianness, hashing algorithm and HW HMAC (if needed) */
	cfg = OCS_HCU_ENDIANNESS_VALUE << HCU_DATA_WRITE_ENDIANNESS_OFFSET;
	cfg |= algo << HCU_MODE_ALGO_SHIFT;
	if (use_hmac)
		cfg |= BIT(HCU_MODE_HMAC_SHIFT);

	writel(cfg, hcu_dev->io_base + OCS_HCU_MODE);

	return 0;
}

/**
 * ocs_hcu_clear_key() - Clear key stored in OCS HMAC KEY registers.
 * @hcu_dev:	The OCS HCU device whose key registers should be cleared.
 */
static void ocs_hcu_clear_key(struct ocs_hcu_dev *hcu_dev)
{
	int reg_off;

	/* Clear OCS_HCU_KEY_[0..15] */
	for (reg_off = 0; reg_off < OCS_HCU_HW_KEY_LEN; reg_off += sizeof(u32))
		writel(0, hcu_dev->io_base + OCS_HCU_KEY_0 + reg_off);
}

/**
 * ocs_hcu_write_key() - Write key to OCS HMAC KEY registers.
 * @hcu_dev:	The OCS HCU device the key should be written to.
 * @key:	The key to be written.
 * @len:	The size of the key to write. It must be OCS_HCU_HW_KEY_LEN.
 *
 * Return:	0 on success, negative error code otherwise.
 */
static int ocs_hcu_write_key(struct ocs_hcu_dev *hcu_dev, const u8 *key, size_t len)
{
	u32 key_u32[OCS_HCU_HW_KEY_LEN_U32];
	int i;

	if (len > OCS_HCU_HW_KEY_LEN)
		return -EINVAL;

	/* Copy key into temporary u32 array. */
	memcpy(key_u32, key, len);

	/*
	 * Hardware requires all the bytes of the HW Key vector to be
	 * written. So pad with zero until we reach OCS_HCU_HW_KEY_LEN.
	 */
	memzero_explicit((u8 *)key_u32 + len, OCS_HCU_HW_KEY_LEN - len);

	/*
	 * OCS hardware expects the MSB of the key to be written at the highest
	 * address of the HCU Key vector; in other word, the key must be
	 * written in reverse order.
	 *
	 * Therefore, we first enable byte swapping for the HCU key vector;
	 * so that bytes of 32-bit word written to OCS_HCU_KEY_[0..15] will be
	 * swapped:
	 * 3 <---> 0, 2 <---> 1.
	 */
	writel(HCU_BYTE_ORDER_SWAP,
	       hcu_dev->io_base + OCS_HCU_KEY_BYTE_ORDER_CFG);
	/*
	 * And then we write the 32-bit words composing the key starting from
	 * the end of the key.
	 */
	for (i = 0; i < OCS_HCU_HW_KEY_LEN_U32; i++)
		writel(key_u32[OCS_HCU_HW_KEY_LEN_U32 - 1 - i],
		       hcu_dev->io_base + OCS_HCU_KEY_0 + (sizeof(u32) * i));

	memzero_explicit(key_u32, OCS_HCU_HW_KEY_LEN);

	return 0;
}

/**
 * ocs_hcu_ll_dma_start() - Start OCS HCU hashing via DMA
 * @hcu_dev:	The OCS HCU device to use.
 * @dma_list:	The OCS DMA list mapping the data to hash.
 * @finalize:	Whether or not this is the last hashing operation and therefore
 *		the final hash should be compute even if data is not
 *		block-aligned.
 *
 * Return: 0 on success, negative error code otherwise.
 */
static int ocs_hcu_ll_dma_start(struct ocs_hcu_dev *hcu_dev,
				const struct ocs_hcu_dma_list *dma_list,
				bool finalize)
{
	u32 cfg = HCU_DMA_SNOOP_MASK | HCU_DMA_SRC_LL_EN | HCU_DMA_EN;
	int rc;

	if (!dma_list)
		return -EINVAL;

	/*
	 * For final requests we use HCU_DONE IRQ to be notified when all input
	 * data has been processed by the HCU; however, we cannot do so for
	 * non-final requests, because we don't get a HCU_DONE IRQ when we
	 * don't terminate the operation.
	 *
	 * Therefore, for non-final requests, we use the DMA IRQ, which
	 * triggers when DMA has finishing feeding all the input data to the
	 * HCU, but the HCU may still be processing it. This is fine, since we
	 * will wait for the HCU processing to be completed when we try to read
	 * intermediate results, in ocs_hcu_get_intermediate_data().
	 */
	if (finalize)
		ocs_hcu_done_irq_en(hcu_dev);
	else
		ocs_hcu_dma_irq_en(hcu_dev);

	reinit_completion(&hcu_dev->irq_done);
	writel(dma_list->dma_addr, hcu_dev->io_base + OCS_HCU_DMA_NEXT_SRC_DESCR);
	writel(0, hcu_dev->io_base + OCS_HCU_DMA_SRC_SIZE);
	writel(0, hcu_dev->io_base + OCS_HCU_DMA_DST_SIZE);

	writel(OCS_HCU_START, hcu_dev->io_base + OCS_HCU_OPERATION);

	writel(cfg, hcu_dev->io_base + OCS_HCU_DMA_DMA_MODE);

	if (finalize)
		writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);

	rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
	if (rc)
		return rc;

	return 0;
}

struct ocs_hcu_dma_list *ocs_hcu_dma_list_alloc(struct ocs_hcu_dev *hcu_dev,
						int max_nents)
{
	struct ocs_hcu_dma_list *dma_list;

	dma_list = kmalloc(sizeof(*dma_list), GFP_KERNEL);
	if (!dma_list)
		return NULL;

	/* Total size of the DMA list to allocate. */
	dma_list->head = dma_alloc_coherent(hcu_dev->dev,
					    sizeof(*dma_list->head) * max_nents,
					    &dma_list->dma_addr, GFP_KERNEL);
	if (!dma_list->head) {
		kfree(dma_list);
		return NULL;
	}
	dma_list->max_nents = max_nents;
	dma_list->tail = NULL;

	return dma_list;
}

void ocs_hcu_dma_list_free(struct ocs_hcu_dev *hcu_dev,
			   struct ocs_hcu_dma_list *dma_list)
{
	if (!dma_list)
		return;

	dma_free_coherent(hcu_dev->dev,
			  sizeof(*dma_list->head) * dma_list->max_nents,
			  dma_list->head, dma_list->dma_addr);

	kfree(dma_list);
}

/* Add a new DMA entry at the end of the OCS DMA list. */
int ocs_hcu_dma_list_add_tail(struct ocs_hcu_dev *hcu_dev,
			      struct ocs_hcu_dma_list *dma_list,
			      dma_addr_t addr, u32 len)
{
	struct device *dev = hcu_dev->dev;
	struct ocs_hcu_dma_entry *old_tail;
	struct ocs_hcu_dma_entry *new_tail;

	if (!len)
		return 0;

	if (!dma_list)
		return -EINVAL;

	if (addr & ~OCS_HCU_DMA_BIT_MASK) {
		dev_err(dev,
			"Unexpected error: Invalid DMA address for OCS HCU\n");
		return -EINVAL;
	}

	old_tail = dma_list->tail;
	new_tail = old_tail ? old_tail + 1 : dma_list->head;

	/* Check if list is full. */
	if (new_tail - dma_list->head >= dma_list->max_nents)
		return -ENOMEM;

	/*
	 * If there was an old tail (i.e., this is not the first element we are
	 * adding), un-terminate the old tail and make it point to the new one.
	 */
	if (old_tail) {
		old_tail->ll_flags &= ~OCS_LL_DMA_FLAG_TERMINATE;
		/*
		 * The old tail 'nxt_desc' must point to the DMA address of the
		 * new tail.
		 */
		old_tail->nxt_desc = dma_list->dma_addr +
				     sizeof(*dma_list->tail) * (new_tail -
								dma_list->head);
	}

	new_tail->src_addr = (u32)addr;
	new_tail->src_len = (u32)len;
	new_tail->ll_flags = OCS_LL_DMA_FLAG_TERMINATE;
	new_tail->nxt_desc = 0;

	/* Update list tail with new tail. */
	dma_list->tail = new_tail;

	return 0;
}

/**
 * ocs_hcu_hash_init() - Initialize hash operation context.
 * @ctx:	The context to initialize.
 * @algo:	The hashing algorithm to use.
 *
 * Return:	0 on success, negative error code otherwise.
 */
int ocs_hcu_hash_init(struct ocs_hcu_hash_ctx *ctx, enum ocs_hcu_algo algo)
{
	if (!ctx)
		return -EINVAL;

	ctx->algo = algo;
	ctx->idata.msg_len_lo = 0;
	ctx->idata.msg_len_hi = 0;
	/* No need to set idata.digest to 0. */

	return 0;
}

/**
 * ocs_hcu_hash_update() - Perform a hashing iteration.
 * @hcu_dev:	The OCS HCU device to use.
 * @ctx:	The OCS HCU hashing context.
 * @dma_list:	The OCS DMA list mapping the input data to process.
 *
 * Return: 0 on success; negative error code otherwise.
 */
int ocs_hcu_hash_update(struct ocs_hcu_dev *hcu_dev,
			struct ocs_hcu_hash_ctx *ctx,
			const struct ocs_hcu_dma_list *dma_list)
{
	int rc;

	if (!hcu_dev || !ctx)
		return -EINVAL;

	/* Configure the hardware for the current request. */
	rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
	if (rc)
		return rc;

	/* If we already processed some data, idata needs to be set. */
	if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
		ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);

	/* Start linked-list DMA hashing. */
	rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, false);
	if (rc)
		return rc;

	/* Update idata and return. */
	return ocs_hcu_get_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
}

/**
 * ocs_hcu_hash_finup() - Update and finalize hash computation.
 * @hcu_dev:	The OCS HCU device to use.
 * @ctx:	The OCS HCU hashing context.
 * @dma_list:	The OCS DMA list mapping the input data to process.
 * @dgst:	The buffer where to save the computed digest.
 * @dgst_len:	The length of @dgst.
 *
 * Return: 0 on success; negative error code otherwise.
 */
int ocs_hcu_hash_finup(struct ocs_hcu_dev *hcu_dev,
		       const struct ocs_hcu_hash_ctx *ctx,
		       const struct ocs_hcu_dma_list *dma_list,
		       u8 *dgst, size_t dgst_len)
{
	int rc;

	if (!hcu_dev || !ctx)
		return -EINVAL;

	/* Configure the hardware for the current request. */
	rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
	if (rc)
		return rc;

	/* If we already processed some data, idata needs to be set. */
	if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
		ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);

	/* Start linked-list DMA hashing. */
	rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, true);
	if (rc)
		return rc;

	/* Get digest and return. */
	return ocs_hcu_get_digest(hcu_dev, ctx->algo, dgst, dgst_len);
}

/**
 * ocs_hcu_hash_final() - Finalize hash computation.
 * @hcu_dev:		The OCS HCU device to use.
 * @ctx:		The OCS HCU hashing context.
 * @dgst:		The buffer where to save the computed digest.
 * @dgst_len:		The length of @dgst.
 *
 * Return: 0 on success; negative error code otherwise.
 */
int ocs_hcu_hash_final(struct ocs_hcu_dev *hcu_dev,
		       const struct ocs_hcu_hash_ctx *ctx, u8 *dgst,
		       size_t dgst_len)
{
	int rc;

	if (!hcu_dev || !ctx)
		return -EINVAL;

	/* Configure the hardware for the current request. */
	rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false);
	if (rc)
		return rc;

	/* If we already processed some data, idata needs to be set. */
	if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
		ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);

	/*
	 * Enable HCU interrupts, so that HCU_DONE will be triggered once the
	 * final hash is computed.
	 */
	ocs_hcu_done_irq_en(hcu_dev);
	reinit_completion(&hcu_dev->irq_done);
	writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);

	rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
	if (rc)
		return rc;

	/* Get digest and return. */
	return ocs_hcu_get_digest(hcu_dev, ctx->algo, dgst, dgst_len);
}

/**
 * ocs_hcu_digest() - Compute hash digest.
 * @hcu_dev:		The OCS HCU device to use.
 * @algo:		The hash algorithm to use.
 * @data:		The input data to process.
 * @data_len:		The length of @data.
 * @dgst:		The buffer where to save the computed digest.
 * @dgst_len:		The length of @dgst.
 *
 * Return: 0 on success; negative error code otherwise.
 */
int ocs_hcu_digest(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
		   void *data, size_t data_len, u8 *dgst, size_t dgst_len)
{
	struct device *dev = hcu_dev->dev;
	dma_addr_t dma_handle;
	u32 reg;
	int rc;

	/* Configure the hardware for the current request. */
	rc = ocs_hcu_hw_cfg(hcu_dev, algo, false);
	if (rc)
		return rc;

	dma_handle = dma_map_single(dev, data, data_len, DMA_TO_DEVICE);
	if (dma_mapping_error(dev, dma_handle))
		return -EIO;

	reg = HCU_DMA_SNOOP_MASK | HCU_DMA_EN;

	ocs_hcu_done_irq_en(hcu_dev);

	reinit_completion(&hcu_dev->irq_done);

	writel(dma_handle, hcu_dev->io_base + OCS_HCU_DMA_SRC_ADDR);
	writel(data_len, hcu_dev->io_base + OCS_HCU_DMA_SRC_SIZE);
	writel(OCS_HCU_START, hcu_dev->io_base + OCS_HCU_OPERATION);
	writel(reg, hcu_dev->io_base + OCS_HCU_DMA_DMA_MODE);

	writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);

	rc = ocs_hcu_wait_and_disable_irq(hcu_dev);
	if (rc)
		return rc;

	dma_unmap_single(dev, dma_handle, data_len, DMA_TO_DEVICE);

	return ocs_hcu_get_digest(hcu_dev, algo, dgst, dgst_len);
}

/**
 * ocs_hcu_hmac() - Compute HMAC.
 * @hcu_dev:		The OCS HCU device to use.
 * @algo:		The hash algorithm to use with HMAC.
 * @key:		The key to use.
 * @dma_list:	The OCS DMA list mapping the input data to process.
 * @key_len:		The length of @key.
 * @dgst:		The buffer where to save the computed HMAC.
 * @dgst_len:		The length of @dgst.
 *
 * Return: 0 on success; negative error code otherwise.
 */
int ocs_hcu_hmac(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo,
		 const u8 *key, size_t key_len,
		 const struct ocs_hcu_dma_list *dma_list,
		 u8 *dgst, size_t dgst_len)
{
	int rc;

	/* Ensure 'key' is not NULL. */
	if (!key || key_len == 0)
		return -EINVAL;

	/* Configure the hardware for the current request. */
	rc = ocs_hcu_hw_cfg(hcu_dev, algo, true);
	if (rc)
		return rc;

	rc = ocs_hcu_write_key(hcu_dev, key, key_len);
	if (rc)
		return rc;

	rc = ocs_hcu_ll_dma_start(hcu_dev, dma_list, true);

	/* Clear HW key before processing return code. */
	ocs_hcu_clear_key(hcu_dev);

	if (rc)
		return rc;

	return ocs_hcu_get_digest(hcu_dev, algo, dgst, dgst_len);
}

irqreturn_t ocs_hcu_irq_handler(int irq, void *dev_id)
{
	struct ocs_hcu_dev *hcu_dev = dev_id;
	u32 hcu_irq;
	u32 dma_irq;

	/* Read and clear the HCU interrupt. */
	hcu_irq = readl(hcu_dev->io_base + OCS_HCU_ISR);
	writel(hcu_irq, hcu_dev->io_base + OCS_HCU_ISR);

	/* Read and clear the HCU DMA interrupt. */
	dma_irq = readl(hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);
	writel(dma_irq, hcu_dev->io_base + OCS_HCU_DMA_MSI_ISR);

	/* Check for errors. */
	if (hcu_irq & HCU_IRQ_HASH_ERR_MASK || dma_irq & HCU_DMA_IRQ_ERR_MASK) {
		hcu_dev->irq_err = true;
		goto complete;
	}

	/* Check for DONE IRQs. */
	if (hcu_irq & HCU_IRQ_HASH_DONE || dma_irq & HCU_DMA_IRQ_SRC_DONE)
		goto complete;

	return IRQ_NONE;

complete:
	complete(&hcu_dev->irq_done);

	return IRQ_HANDLED;
}

MODULE_LICENSE("GPL");