1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This file is part of STM32 Crypto driver for Linux.
4 *
5 * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
6 * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
7 */
8
9 #include <linux/clk.h>
10 #include <linux/crypto.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/iopoll.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/of_device.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/reset.h>
23
24 #include <crypto/engine.h>
25 #include <crypto/hash.h>
26 #include <crypto/md5.h>
27 #include <crypto/scatterwalk.h>
28 #include <crypto/sha1.h>
29 #include <crypto/sha2.h>
30 #include <crypto/internal/hash.h>
31
32 #define HASH_CR 0x00
33 #define HASH_DIN 0x04
34 #define HASH_STR 0x08
35 #define HASH_UX500_HREG(x) (0x0c + ((x) * 0x04))
36 #define HASH_IMR 0x20
37 #define HASH_SR 0x24
38 #define HASH_CSR(x) (0x0F8 + ((x) * 0x04))
39 #define HASH_HREG(x) (0x310 + ((x) * 0x04))
40 #define HASH_HWCFGR 0x3F0
41 #define HASH_VER 0x3F4
42 #define HASH_ID 0x3F8
43
44 /* Control Register */
45 #define HASH_CR_INIT BIT(2)
46 #define HASH_CR_DMAE BIT(3)
47 #define HASH_CR_DATATYPE_POS 4
48 #define HASH_CR_MODE BIT(6)
49 #define HASH_CR_MDMAT BIT(13)
50 #define HASH_CR_DMAA BIT(14)
51 #define HASH_CR_LKEY BIT(16)
52
53 #define HASH_CR_ALGO_SHA1 0x0
54 #define HASH_CR_ALGO_MD5 0x80
55 #define HASH_CR_ALGO_SHA224 0x40000
56 #define HASH_CR_ALGO_SHA256 0x40080
57
58 #define HASH_CR_UX500_EMPTYMSG BIT(20)
59 #define HASH_CR_UX500_ALGO_SHA1 BIT(7)
60 #define HASH_CR_UX500_ALGO_SHA256 0x0
61
62 /* Interrupt */
63 #define HASH_DINIE BIT(0)
64 #define HASH_DCIE BIT(1)
65
66 /* Interrupt Mask */
67 #define HASH_MASK_CALC_COMPLETION BIT(0)
68 #define HASH_MASK_DATA_INPUT BIT(1)
69
70 /* Context swap register */
71 #define HASH_CSR_REGISTER_NUMBER 53
72
73 /* Status Flags */
74 #define HASH_SR_DATA_INPUT_READY BIT(0)
75 #define HASH_SR_OUTPUT_READY BIT(1)
76 #define HASH_SR_DMA_ACTIVE BIT(2)
77 #define HASH_SR_BUSY BIT(3)
78
79 /* STR Register */
80 #define HASH_STR_NBLW_MASK GENMASK(4, 0)
81 #define HASH_STR_DCAL BIT(8)
82
83 #define HASH_FLAGS_INIT BIT(0)
84 #define HASH_FLAGS_OUTPUT_READY BIT(1)
85 #define HASH_FLAGS_CPU BIT(2)
86 #define HASH_FLAGS_DMA_READY BIT(3)
87 #define HASH_FLAGS_DMA_ACTIVE BIT(4)
88 #define HASH_FLAGS_HMAC_INIT BIT(5)
89 #define HASH_FLAGS_HMAC_FINAL BIT(6)
90 #define HASH_FLAGS_HMAC_KEY BIT(7)
91
92 #define HASH_FLAGS_FINAL BIT(15)
93 #define HASH_FLAGS_FINUP BIT(16)
94 #define HASH_FLAGS_ALGO_MASK GENMASK(21, 18)
95 #define HASH_FLAGS_MD5 BIT(18)
96 #define HASH_FLAGS_SHA1 BIT(19)
97 #define HASH_FLAGS_SHA224 BIT(20)
98 #define HASH_FLAGS_SHA256 BIT(21)
99 #define HASH_FLAGS_ERRORS BIT(22)
100 #define HASH_FLAGS_HMAC BIT(23)
101
102 #define HASH_OP_UPDATE 1
103 #define HASH_OP_FINAL 2
104
105 enum stm32_hash_data_format {
106 HASH_DATA_32_BITS = 0x0,
107 HASH_DATA_16_BITS = 0x1,
108 HASH_DATA_8_BITS = 0x2,
109 HASH_DATA_1_BIT = 0x3
110 };
111
112 #define HASH_BUFLEN 256
113 #define HASH_LONG_KEY 64
114 #define HASH_MAX_KEY_SIZE (SHA256_BLOCK_SIZE * 8)
115 #define HASH_QUEUE_LENGTH 16
116 #define HASH_DMA_THRESHOLD 50
117
118 #define HASH_AUTOSUSPEND_DELAY 50
119
120 struct stm32_hash_ctx {
121 struct crypto_engine_ctx enginectx;
122 struct stm32_hash_dev *hdev;
123 struct crypto_shash *xtfm;
124 unsigned long flags;
125
126 u8 key[HASH_MAX_KEY_SIZE];
127 int keylen;
128 };
129
130 struct stm32_hash_request_ctx {
131 struct stm32_hash_dev *hdev;
132 unsigned long flags;
133 unsigned long op;
134
135 u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
136 size_t digcnt;
137 size_t bufcnt;
138 size_t buflen;
139
140 /* DMA */
141 struct scatterlist *sg;
142 unsigned int offset;
143 unsigned int total;
144 struct scatterlist sg_key;
145
146 dma_addr_t dma_addr;
147 size_t dma_ct;
148 int nents;
149
150 u8 data_type;
151
152 u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
153
154 /* Export Context */
155 u32 *hw_context;
156 };
157
158 struct stm32_hash_algs_info {
159 struct ahash_alg *algs_list;
160 size_t size;
161 };
162
163 struct stm32_hash_pdata {
164 struct stm32_hash_algs_info *algs_info;
165 size_t algs_info_size;
166 bool has_sr;
167 bool has_mdmat;
168 bool broken_emptymsg;
169 bool ux500;
170 };
171
172 struct stm32_hash_dev {
173 struct list_head list;
174 struct device *dev;
175 struct clk *clk;
176 struct reset_control *rst;
177 void __iomem *io_base;
178 phys_addr_t phys_base;
179 u32 dma_mode;
180 u32 dma_maxburst;
181 bool polled;
182
183 struct ahash_request *req;
184 struct crypto_engine *engine;
185
186 int err;
187 unsigned long flags;
188
189 struct dma_chan *dma_lch;
190 struct completion dma_completion;
191
192 const struct stm32_hash_pdata *pdata;
193 };
194
195 struct stm32_hash_drv {
196 struct list_head dev_list;
197 spinlock_t lock; /* List protection access */
198 };
199
200 static struct stm32_hash_drv stm32_hash = {
201 .dev_list = LIST_HEAD_INIT(stm32_hash.dev_list),
202 .lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock),
203 };
204
205 static void stm32_hash_dma_callback(void *param);
206
stm32_hash_read(struct stm32_hash_dev * hdev,u32 offset)207 static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
208 {
209 return readl_relaxed(hdev->io_base + offset);
210 }
211
stm32_hash_write(struct stm32_hash_dev * hdev,u32 offset,u32 value)212 static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
213 u32 offset, u32 value)
214 {
215 writel_relaxed(value, hdev->io_base + offset);
216 }
217
stm32_hash_wait_busy(struct stm32_hash_dev * hdev)218 static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
219 {
220 u32 status;
221
222 /* The Ux500 lacks the special status register, we poll the DCAL bit instead */
223 if (!hdev->pdata->has_sr)
224 return readl_relaxed_poll_timeout(hdev->io_base + HASH_STR, status,
225 !(status & HASH_STR_DCAL), 10, 10000);
226
227 return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
228 !(status & HASH_SR_BUSY), 10, 10000);
229 }
230
stm32_hash_set_nblw(struct stm32_hash_dev * hdev,int length)231 static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
232 {
233 u32 reg;
234
235 reg = stm32_hash_read(hdev, HASH_STR);
236 reg &= ~(HASH_STR_NBLW_MASK);
237 reg |= (8U * ((length) % 4U));
238 stm32_hash_write(hdev, HASH_STR, reg);
239 }
240
stm32_hash_write_key(struct stm32_hash_dev * hdev)241 static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
242 {
243 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
244 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
245 u32 reg;
246 int keylen = ctx->keylen;
247 void *key = ctx->key;
248
249 if (keylen) {
250 stm32_hash_set_nblw(hdev, keylen);
251
252 while (keylen > 0) {
253 stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
254 keylen -= 4;
255 key += 4;
256 }
257
258 reg = stm32_hash_read(hdev, HASH_STR);
259 reg |= HASH_STR_DCAL;
260 stm32_hash_write(hdev, HASH_STR, reg);
261
262 return -EINPROGRESS;
263 }
264
265 return 0;
266 }
267
stm32_hash_write_ctrl(struct stm32_hash_dev * hdev,int bufcnt)268 static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt)
269 {
270 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
271 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
272 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
273
274 u32 reg = HASH_CR_INIT;
275
276 if (!(hdev->flags & HASH_FLAGS_INIT)) {
277 switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
278 case HASH_FLAGS_MD5:
279 reg |= HASH_CR_ALGO_MD5;
280 break;
281 case HASH_FLAGS_SHA1:
282 if (hdev->pdata->ux500)
283 reg |= HASH_CR_UX500_ALGO_SHA1;
284 else
285 reg |= HASH_CR_ALGO_SHA1;
286 break;
287 case HASH_FLAGS_SHA224:
288 reg |= HASH_CR_ALGO_SHA224;
289 break;
290 case HASH_FLAGS_SHA256:
291 if (hdev->pdata->ux500)
292 reg |= HASH_CR_UX500_ALGO_SHA256;
293 else
294 reg |= HASH_CR_ALGO_SHA256;
295 break;
296 default:
297 reg |= HASH_CR_ALGO_MD5;
298 }
299
300 reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
301
302 if (rctx->flags & HASH_FLAGS_HMAC) {
303 hdev->flags |= HASH_FLAGS_HMAC;
304 reg |= HASH_CR_MODE;
305 if (ctx->keylen > HASH_LONG_KEY)
306 reg |= HASH_CR_LKEY;
307 }
308
309 /*
310 * On the Ux500 we need to set a special flag to indicate that
311 * the message is zero length.
312 */
313 if (hdev->pdata->ux500 && bufcnt == 0)
314 reg |= HASH_CR_UX500_EMPTYMSG;
315
316 if (!hdev->polled)
317 stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
318
319 stm32_hash_write(hdev, HASH_CR, reg);
320
321 hdev->flags |= HASH_FLAGS_INIT;
322
323 dev_dbg(hdev->dev, "Write Control %x\n", reg);
324 }
325 }
326
stm32_hash_append_sg(struct stm32_hash_request_ctx * rctx)327 static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
328 {
329 size_t count;
330
331 while ((rctx->bufcnt < rctx->buflen) && rctx->total) {
332 count = min(rctx->sg->length - rctx->offset, rctx->total);
333 count = min(count, rctx->buflen - rctx->bufcnt);
334
335 if (count <= 0) {
336 if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
337 rctx->sg = sg_next(rctx->sg);
338 continue;
339 } else {
340 break;
341 }
342 }
343
344 scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->sg,
345 rctx->offset, count, 0);
346
347 rctx->bufcnt += count;
348 rctx->offset += count;
349 rctx->total -= count;
350
351 if (rctx->offset == rctx->sg->length) {
352 rctx->sg = sg_next(rctx->sg);
353 if (rctx->sg)
354 rctx->offset = 0;
355 else
356 rctx->total = 0;
357 }
358 }
359 }
360
stm32_hash_xmit_cpu(struct stm32_hash_dev * hdev,const u8 * buf,size_t length,int final)361 static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
362 const u8 *buf, size_t length, int final)
363 {
364 unsigned int count, len32;
365 const u32 *buffer = (const u32 *)buf;
366 u32 reg;
367
368 if (final)
369 hdev->flags |= HASH_FLAGS_FINAL;
370
371 len32 = DIV_ROUND_UP(length, sizeof(u32));
372
373 dev_dbg(hdev->dev, "%s: length: %zd, final: %x len32 %i\n",
374 __func__, length, final, len32);
375
376 hdev->flags |= HASH_FLAGS_CPU;
377
378 stm32_hash_write_ctrl(hdev, length);
379
380 if (stm32_hash_wait_busy(hdev))
381 return -ETIMEDOUT;
382
383 if ((hdev->flags & HASH_FLAGS_HMAC) &&
384 (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
385 hdev->flags |= HASH_FLAGS_HMAC_KEY;
386 stm32_hash_write_key(hdev);
387 if (stm32_hash_wait_busy(hdev))
388 return -ETIMEDOUT;
389 }
390
391 for (count = 0; count < len32; count++)
392 stm32_hash_write(hdev, HASH_DIN, buffer[count]);
393
394 if (final) {
395 if (stm32_hash_wait_busy(hdev))
396 return -ETIMEDOUT;
397
398 stm32_hash_set_nblw(hdev, length);
399 reg = stm32_hash_read(hdev, HASH_STR);
400 reg |= HASH_STR_DCAL;
401 stm32_hash_write(hdev, HASH_STR, reg);
402 if (hdev->flags & HASH_FLAGS_HMAC) {
403 if (stm32_hash_wait_busy(hdev))
404 return -ETIMEDOUT;
405 stm32_hash_write_key(hdev);
406 }
407 return -EINPROGRESS;
408 }
409
410 return 0;
411 }
412
stm32_hash_update_cpu(struct stm32_hash_dev * hdev)413 static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
414 {
415 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
416 int bufcnt, err = 0, final;
417
418 dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags);
419
420 final = (rctx->flags & HASH_FLAGS_FINUP);
421
422 while ((rctx->total >= rctx->buflen) ||
423 (rctx->bufcnt + rctx->total >= rctx->buflen)) {
424 stm32_hash_append_sg(rctx);
425 bufcnt = rctx->bufcnt;
426 rctx->bufcnt = 0;
427 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 0);
428 }
429
430 stm32_hash_append_sg(rctx);
431
432 if (final) {
433 bufcnt = rctx->bufcnt;
434 rctx->bufcnt = 0;
435 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 1);
436
437 /* If we have an IRQ, wait for that, else poll for completion */
438 if (hdev->polled) {
439 if (stm32_hash_wait_busy(hdev))
440 return -ETIMEDOUT;
441 hdev->flags |= HASH_FLAGS_OUTPUT_READY;
442 err = 0;
443 }
444 }
445
446 return err;
447 }
448
stm32_hash_xmit_dma(struct stm32_hash_dev * hdev,struct scatterlist * sg,int length,int mdma)449 static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
450 struct scatterlist *sg, int length, int mdma)
451 {
452 struct dma_async_tx_descriptor *in_desc;
453 dma_cookie_t cookie;
454 u32 reg;
455 int err;
456
457 in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
458 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
459 DMA_CTRL_ACK);
460 if (!in_desc) {
461 dev_err(hdev->dev, "dmaengine_prep_slave error\n");
462 return -ENOMEM;
463 }
464
465 reinit_completion(&hdev->dma_completion);
466 in_desc->callback = stm32_hash_dma_callback;
467 in_desc->callback_param = hdev;
468
469 hdev->flags |= HASH_FLAGS_FINAL;
470 hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
471
472 reg = stm32_hash_read(hdev, HASH_CR);
473
474 if (!hdev->pdata->has_mdmat) {
475 if (mdma)
476 reg |= HASH_CR_MDMAT;
477 else
478 reg &= ~HASH_CR_MDMAT;
479 }
480 reg |= HASH_CR_DMAE;
481
482 stm32_hash_write(hdev, HASH_CR, reg);
483
484 stm32_hash_set_nblw(hdev, length);
485
486 cookie = dmaengine_submit(in_desc);
487 err = dma_submit_error(cookie);
488 if (err)
489 return -ENOMEM;
490
491 dma_async_issue_pending(hdev->dma_lch);
492
493 if (!wait_for_completion_timeout(&hdev->dma_completion,
494 msecs_to_jiffies(100)))
495 err = -ETIMEDOUT;
496
497 if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
498 NULL, NULL) != DMA_COMPLETE)
499 err = -ETIMEDOUT;
500
501 if (err) {
502 dev_err(hdev->dev, "DMA Error %i\n", err);
503 dmaengine_terminate_all(hdev->dma_lch);
504 return err;
505 }
506
507 return -EINPROGRESS;
508 }
509
stm32_hash_dma_callback(void * param)510 static void stm32_hash_dma_callback(void *param)
511 {
512 struct stm32_hash_dev *hdev = param;
513
514 complete(&hdev->dma_completion);
515
516 hdev->flags |= HASH_FLAGS_DMA_READY;
517 }
518
stm32_hash_hmac_dma_send(struct stm32_hash_dev * hdev)519 static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
520 {
521 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
522 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
523 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
524 int err;
525
526 if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) {
527 err = stm32_hash_write_key(hdev);
528 if (stm32_hash_wait_busy(hdev))
529 return -ETIMEDOUT;
530 } else {
531 if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
532 sg_init_one(&rctx->sg_key, ctx->key,
533 ALIGN(ctx->keylen, sizeof(u32)));
534
535 rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
536 DMA_TO_DEVICE);
537 if (rctx->dma_ct == 0) {
538 dev_err(hdev->dev, "dma_map_sg error\n");
539 return -ENOMEM;
540 }
541
542 err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
543
544 dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
545 }
546
547 return err;
548 }
549
stm32_hash_dma_init(struct stm32_hash_dev * hdev)550 static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
551 {
552 struct dma_slave_config dma_conf;
553 struct dma_chan *chan;
554 int err;
555
556 memset(&dma_conf, 0, sizeof(dma_conf));
557
558 dma_conf.direction = DMA_MEM_TO_DEV;
559 dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
560 dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
561 dma_conf.src_maxburst = hdev->dma_maxburst;
562 dma_conf.dst_maxburst = hdev->dma_maxburst;
563 dma_conf.device_fc = false;
564
565 chan = dma_request_chan(hdev->dev, "in");
566 if (IS_ERR(chan))
567 return PTR_ERR(chan);
568
569 hdev->dma_lch = chan;
570
571 err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
572 if (err) {
573 dma_release_channel(hdev->dma_lch);
574 hdev->dma_lch = NULL;
575 dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
576 return err;
577 }
578
579 init_completion(&hdev->dma_completion);
580
581 return 0;
582 }
583
stm32_hash_dma_send(struct stm32_hash_dev * hdev)584 static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
585 {
586 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
587 struct scatterlist sg[1], *tsg;
588 int err = 0, len = 0, reg, ncp = 0;
589 unsigned int i;
590 u32 *buffer = (void *)rctx->buffer;
591
592 rctx->sg = hdev->req->src;
593 rctx->total = hdev->req->nbytes;
594
595 rctx->nents = sg_nents(rctx->sg);
596
597 if (rctx->nents < 0)
598 return -EINVAL;
599
600 stm32_hash_write_ctrl(hdev, rctx->total);
601
602 if (hdev->flags & HASH_FLAGS_HMAC) {
603 err = stm32_hash_hmac_dma_send(hdev);
604 if (err != -EINPROGRESS)
605 return err;
606 }
607
608 for_each_sg(rctx->sg, tsg, rctx->nents, i) {
609 len = sg->length;
610
611 sg[0] = *tsg;
612 if (sg_is_last(sg)) {
613 if (hdev->dma_mode == 1) {
614 len = (ALIGN(sg->length, 16) - 16);
615
616 ncp = sg_pcopy_to_buffer(
617 rctx->sg, rctx->nents,
618 rctx->buffer, sg->length - len,
619 rctx->total - sg->length + len);
620
621 sg->length = len;
622 } else {
623 if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
624 len = sg->length;
625 sg->length = ALIGN(sg->length,
626 sizeof(u32));
627 }
628 }
629 }
630
631 rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
632 DMA_TO_DEVICE);
633 if (rctx->dma_ct == 0) {
634 dev_err(hdev->dev, "dma_map_sg error\n");
635 return -ENOMEM;
636 }
637
638 err = stm32_hash_xmit_dma(hdev, sg, len,
639 !sg_is_last(sg));
640
641 dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
642
643 if (err == -ENOMEM)
644 return err;
645 }
646
647 if (hdev->dma_mode == 1) {
648 if (stm32_hash_wait_busy(hdev))
649 return -ETIMEDOUT;
650 reg = stm32_hash_read(hdev, HASH_CR);
651 reg &= ~HASH_CR_DMAE;
652 reg |= HASH_CR_DMAA;
653 stm32_hash_write(hdev, HASH_CR, reg);
654
655 if (ncp) {
656 memset(buffer + ncp, 0,
657 DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
658 writesl(hdev->io_base + HASH_DIN, buffer,
659 DIV_ROUND_UP(ncp, sizeof(u32)));
660 }
661 stm32_hash_set_nblw(hdev, ncp);
662 reg = stm32_hash_read(hdev, HASH_STR);
663 reg |= HASH_STR_DCAL;
664 stm32_hash_write(hdev, HASH_STR, reg);
665 err = -EINPROGRESS;
666 }
667
668 if (hdev->flags & HASH_FLAGS_HMAC) {
669 if (stm32_hash_wait_busy(hdev))
670 return -ETIMEDOUT;
671 err = stm32_hash_hmac_dma_send(hdev);
672 }
673
674 return err;
675 }
676
stm32_hash_find_dev(struct stm32_hash_ctx * ctx)677 static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
678 {
679 struct stm32_hash_dev *hdev = NULL, *tmp;
680
681 spin_lock_bh(&stm32_hash.lock);
682 if (!ctx->hdev) {
683 list_for_each_entry(tmp, &stm32_hash.dev_list, list) {
684 hdev = tmp;
685 break;
686 }
687 ctx->hdev = hdev;
688 } else {
689 hdev = ctx->hdev;
690 }
691
692 spin_unlock_bh(&stm32_hash.lock);
693
694 return hdev;
695 }
696
stm32_hash_dma_aligned_data(struct ahash_request * req)697 static bool stm32_hash_dma_aligned_data(struct ahash_request *req)
698 {
699 struct scatterlist *sg;
700 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
701 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
702 int i;
703
704 if (req->nbytes <= HASH_DMA_THRESHOLD)
705 return false;
706
707 if (sg_nents(req->src) > 1) {
708 if (hdev->dma_mode == 1)
709 return false;
710 for_each_sg(req->src, sg, sg_nents(req->src), i) {
711 if ((!IS_ALIGNED(sg->length, sizeof(u32))) &&
712 (!sg_is_last(sg)))
713 return false;
714 }
715 }
716
717 if (req->src->offset % 4)
718 return false;
719
720 return true;
721 }
722
stm32_hash_init(struct ahash_request * req)723 static int stm32_hash_init(struct ahash_request *req)
724 {
725 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
726 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
727 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
728 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
729
730 rctx->hdev = hdev;
731
732 rctx->flags = HASH_FLAGS_CPU;
733
734 rctx->digcnt = crypto_ahash_digestsize(tfm);
735 switch (rctx->digcnt) {
736 case MD5_DIGEST_SIZE:
737 rctx->flags |= HASH_FLAGS_MD5;
738 break;
739 case SHA1_DIGEST_SIZE:
740 rctx->flags |= HASH_FLAGS_SHA1;
741 break;
742 case SHA224_DIGEST_SIZE:
743 rctx->flags |= HASH_FLAGS_SHA224;
744 break;
745 case SHA256_DIGEST_SIZE:
746 rctx->flags |= HASH_FLAGS_SHA256;
747 break;
748 default:
749 return -EINVAL;
750 }
751
752 rctx->bufcnt = 0;
753 rctx->buflen = HASH_BUFLEN;
754 rctx->total = 0;
755 rctx->offset = 0;
756 rctx->data_type = HASH_DATA_8_BITS;
757
758 memset(rctx->buffer, 0, HASH_BUFLEN);
759
760 if (ctx->flags & HASH_FLAGS_HMAC)
761 rctx->flags |= HASH_FLAGS_HMAC;
762
763 dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags);
764
765 return 0;
766 }
767
stm32_hash_update_req(struct stm32_hash_dev * hdev)768 static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
769 {
770 return stm32_hash_update_cpu(hdev);
771 }
772
stm32_hash_final_req(struct stm32_hash_dev * hdev)773 static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
774 {
775 struct ahash_request *req = hdev->req;
776 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
777 int err;
778 int buflen = rctx->bufcnt;
779
780 rctx->bufcnt = 0;
781
782 if (!(rctx->flags & HASH_FLAGS_CPU))
783 err = stm32_hash_dma_send(hdev);
784 else
785 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, buflen, 1);
786
787 /* If we have an IRQ, wait for that, else poll for completion */
788 if (hdev->polled) {
789 if (stm32_hash_wait_busy(hdev))
790 return -ETIMEDOUT;
791 hdev->flags |= HASH_FLAGS_OUTPUT_READY;
792 /* Caller will call stm32_hash_finish_req() */
793 err = 0;
794 }
795
796 return err;
797 }
798
stm32_hash_emptymsg_fallback(struct ahash_request * req)799 static void stm32_hash_emptymsg_fallback(struct ahash_request *req)
800 {
801 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
802 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(ahash);
803 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
804 struct stm32_hash_dev *hdev = rctx->hdev;
805 int ret;
806
807 dev_dbg(hdev->dev, "use fallback message size 0 key size %d\n",
808 ctx->keylen);
809
810 if (!ctx->xtfm) {
811 dev_err(hdev->dev, "no fallback engine\n");
812 return;
813 }
814
815 if (ctx->keylen) {
816 ret = crypto_shash_setkey(ctx->xtfm, ctx->key, ctx->keylen);
817 if (ret) {
818 dev_err(hdev->dev, "failed to set key ret=%d\n", ret);
819 return;
820 }
821 }
822
823 ret = crypto_shash_tfm_digest(ctx->xtfm, NULL, 0, rctx->digest);
824 if (ret)
825 dev_err(hdev->dev, "shash digest error\n");
826 }
827
stm32_hash_copy_hash(struct ahash_request * req)828 static void stm32_hash_copy_hash(struct ahash_request *req)
829 {
830 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
831 struct stm32_hash_dev *hdev = rctx->hdev;
832 __be32 *hash = (void *)rctx->digest;
833 unsigned int i, hashsize;
834
835 if (hdev->pdata->broken_emptymsg && !req->nbytes)
836 return stm32_hash_emptymsg_fallback(req);
837
838 switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
839 case HASH_FLAGS_MD5:
840 hashsize = MD5_DIGEST_SIZE;
841 break;
842 case HASH_FLAGS_SHA1:
843 hashsize = SHA1_DIGEST_SIZE;
844 break;
845 case HASH_FLAGS_SHA224:
846 hashsize = SHA224_DIGEST_SIZE;
847 break;
848 case HASH_FLAGS_SHA256:
849 hashsize = SHA256_DIGEST_SIZE;
850 break;
851 default:
852 return;
853 }
854
855 for (i = 0; i < hashsize / sizeof(u32); i++) {
856 if (hdev->pdata->ux500)
857 hash[i] = cpu_to_be32(stm32_hash_read(hdev,
858 HASH_UX500_HREG(i)));
859 else
860 hash[i] = cpu_to_be32(stm32_hash_read(hdev,
861 HASH_HREG(i)));
862 }
863 }
864
stm32_hash_finish(struct ahash_request * req)865 static int stm32_hash_finish(struct ahash_request *req)
866 {
867 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
868
869 if (!req->result)
870 return -EINVAL;
871
872 memcpy(req->result, rctx->digest, rctx->digcnt);
873
874 return 0;
875 }
876
stm32_hash_finish_req(struct ahash_request * req,int err)877 static void stm32_hash_finish_req(struct ahash_request *req, int err)
878 {
879 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
880 struct stm32_hash_dev *hdev = rctx->hdev;
881
882 if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
883 stm32_hash_copy_hash(req);
884 err = stm32_hash_finish(req);
885 hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU |
886 HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY |
887 HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC |
888 HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL |
889 HASH_FLAGS_HMAC_KEY);
890 } else {
891 rctx->flags |= HASH_FLAGS_ERRORS;
892 }
893
894 pm_runtime_mark_last_busy(hdev->dev);
895 pm_runtime_put_autosuspend(hdev->dev);
896
897 crypto_finalize_hash_request(hdev->engine, req, err);
898 }
899
stm32_hash_hw_init(struct stm32_hash_dev * hdev,struct stm32_hash_request_ctx * rctx)900 static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
901 struct stm32_hash_request_ctx *rctx)
902 {
903 pm_runtime_get_sync(hdev->dev);
904
905 if (!(HASH_FLAGS_INIT & hdev->flags)) {
906 stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
907 stm32_hash_write(hdev, HASH_STR, 0);
908 stm32_hash_write(hdev, HASH_DIN, 0);
909 stm32_hash_write(hdev, HASH_IMR, 0);
910 hdev->err = 0;
911 }
912
913 return 0;
914 }
915
916 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq);
917 static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq);
918
stm32_hash_handle_queue(struct stm32_hash_dev * hdev,struct ahash_request * req)919 static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
920 struct ahash_request *req)
921 {
922 return crypto_transfer_hash_request_to_engine(hdev->engine, req);
923 }
924
stm32_hash_prepare_req(struct crypto_engine * engine,void * areq)925 static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq)
926 {
927 struct ahash_request *req = container_of(areq, struct ahash_request,
928 base);
929 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
930 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
931 struct stm32_hash_request_ctx *rctx;
932
933 if (!hdev)
934 return -ENODEV;
935
936 hdev->req = req;
937
938 rctx = ahash_request_ctx(req);
939
940 dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
941 rctx->op, req->nbytes);
942
943 return stm32_hash_hw_init(hdev, rctx);
944 }
945
stm32_hash_one_request(struct crypto_engine * engine,void * areq)946 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
947 {
948 struct ahash_request *req = container_of(areq, struct ahash_request,
949 base);
950 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
951 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
952 struct stm32_hash_request_ctx *rctx;
953 int err = 0;
954
955 if (!hdev)
956 return -ENODEV;
957
958 hdev->req = req;
959
960 rctx = ahash_request_ctx(req);
961
962 if (rctx->op == HASH_OP_UPDATE)
963 err = stm32_hash_update_req(hdev);
964 else if (rctx->op == HASH_OP_FINAL)
965 err = stm32_hash_final_req(hdev);
966
967 if (err != -EINPROGRESS)
968 /* done task will not finish it, so do it here */
969 stm32_hash_finish_req(req, err);
970
971 return 0;
972 }
973
stm32_hash_enqueue(struct ahash_request * req,unsigned int op)974 static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
975 {
976 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
977 struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
978 struct stm32_hash_dev *hdev = ctx->hdev;
979
980 rctx->op = op;
981
982 return stm32_hash_handle_queue(hdev, req);
983 }
984
stm32_hash_update(struct ahash_request * req)985 static int stm32_hash_update(struct ahash_request *req)
986 {
987 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
988
989 if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU))
990 return 0;
991
992 rctx->total = req->nbytes;
993 rctx->sg = req->src;
994 rctx->offset = 0;
995
996 if ((rctx->bufcnt + rctx->total < rctx->buflen)) {
997 stm32_hash_append_sg(rctx);
998 return 0;
999 }
1000
1001 return stm32_hash_enqueue(req, HASH_OP_UPDATE);
1002 }
1003
stm32_hash_final(struct ahash_request * req)1004 static int stm32_hash_final(struct ahash_request *req)
1005 {
1006 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1007
1008 rctx->flags |= HASH_FLAGS_FINUP;
1009
1010 return stm32_hash_enqueue(req, HASH_OP_FINAL);
1011 }
1012
stm32_hash_finup(struct ahash_request * req)1013 static int stm32_hash_finup(struct ahash_request *req)
1014 {
1015 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1016 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1017 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1018 int err1, err2;
1019
1020 rctx->flags |= HASH_FLAGS_FINUP;
1021
1022 if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
1023 rctx->flags &= ~HASH_FLAGS_CPU;
1024
1025 err1 = stm32_hash_update(req);
1026
1027 if (err1 == -EINPROGRESS || err1 == -EBUSY)
1028 return err1;
1029
1030 /*
1031 * final() has to be always called to cleanup resources
1032 * even if update() failed, except EINPROGRESS
1033 */
1034 err2 = stm32_hash_final(req);
1035
1036 return err1 ?: err2;
1037 }
1038
stm32_hash_digest(struct ahash_request * req)1039 static int stm32_hash_digest(struct ahash_request *req)
1040 {
1041 return stm32_hash_init(req) ?: stm32_hash_finup(req);
1042 }
1043
stm32_hash_export(struct ahash_request * req,void * out)1044 static int stm32_hash_export(struct ahash_request *req, void *out)
1045 {
1046 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1047 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1048 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1049 u32 *preg;
1050 unsigned int i;
1051 int ret;
1052
1053 pm_runtime_get_sync(hdev->dev);
1054
1055 ret = stm32_hash_wait_busy(hdev);
1056 if (ret)
1057 return ret;
1058
1059 rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
1060 sizeof(u32),
1061 GFP_KERNEL);
1062
1063 preg = rctx->hw_context;
1064
1065 if (!hdev->pdata->ux500)
1066 *preg++ = stm32_hash_read(hdev, HASH_IMR);
1067 *preg++ = stm32_hash_read(hdev, HASH_STR);
1068 *preg++ = stm32_hash_read(hdev, HASH_CR);
1069 for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
1070 *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
1071
1072 pm_runtime_mark_last_busy(hdev->dev);
1073 pm_runtime_put_autosuspend(hdev->dev);
1074
1075 memcpy(out, rctx, sizeof(*rctx));
1076
1077 return 0;
1078 }
1079
stm32_hash_import(struct ahash_request * req,const void * in)1080 static int stm32_hash_import(struct ahash_request *req, const void *in)
1081 {
1082 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1083 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1084 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1085 const u32 *preg = in;
1086 u32 reg;
1087 unsigned int i;
1088
1089 memcpy(rctx, in, sizeof(*rctx));
1090
1091 preg = rctx->hw_context;
1092
1093 pm_runtime_get_sync(hdev->dev);
1094
1095 if (!hdev->pdata->ux500)
1096 stm32_hash_write(hdev, HASH_IMR, *preg++);
1097 stm32_hash_write(hdev, HASH_STR, *preg++);
1098 stm32_hash_write(hdev, HASH_CR, *preg);
1099 reg = *preg++ | HASH_CR_INIT;
1100 stm32_hash_write(hdev, HASH_CR, reg);
1101
1102 for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
1103 stm32_hash_write(hdev, HASH_CSR(i), *preg++);
1104
1105 pm_runtime_mark_last_busy(hdev->dev);
1106 pm_runtime_put_autosuspend(hdev->dev);
1107
1108 kfree(rctx->hw_context);
1109
1110 return 0;
1111 }
1112
stm32_hash_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)1113 static int stm32_hash_setkey(struct crypto_ahash *tfm,
1114 const u8 *key, unsigned int keylen)
1115 {
1116 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1117
1118 if (keylen <= HASH_MAX_KEY_SIZE) {
1119 memcpy(ctx->key, key, keylen);
1120 ctx->keylen = keylen;
1121 } else {
1122 return -ENOMEM;
1123 }
1124
1125 return 0;
1126 }
1127
stm32_hash_init_fallback(struct crypto_tfm * tfm)1128 static int stm32_hash_init_fallback(struct crypto_tfm *tfm)
1129 {
1130 struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1131 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1132 const char *name = crypto_tfm_alg_name(tfm);
1133 struct crypto_shash *xtfm;
1134
1135 /* The fallback is only needed on Ux500 */
1136 if (!hdev->pdata->ux500)
1137 return 0;
1138
1139 xtfm = crypto_alloc_shash(name, 0, CRYPTO_ALG_NEED_FALLBACK);
1140 if (IS_ERR(xtfm)) {
1141 dev_err(hdev->dev, "failed to allocate %s fallback\n",
1142 name);
1143 return PTR_ERR(xtfm);
1144 }
1145 dev_info(hdev->dev, "allocated %s fallback\n", name);
1146 ctx->xtfm = xtfm;
1147
1148 return 0;
1149 }
1150
stm32_hash_cra_init_algs(struct crypto_tfm * tfm,const char * algs_hmac_name)1151 static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
1152 const char *algs_hmac_name)
1153 {
1154 struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1155
1156 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1157 sizeof(struct stm32_hash_request_ctx));
1158
1159 ctx->keylen = 0;
1160
1161 if (algs_hmac_name)
1162 ctx->flags |= HASH_FLAGS_HMAC;
1163
1164 ctx->enginectx.op.do_one_request = stm32_hash_one_request;
1165 ctx->enginectx.op.prepare_request = stm32_hash_prepare_req;
1166 ctx->enginectx.op.unprepare_request = NULL;
1167
1168 return stm32_hash_init_fallback(tfm);
1169 }
1170
stm32_hash_cra_init(struct crypto_tfm * tfm)1171 static int stm32_hash_cra_init(struct crypto_tfm *tfm)
1172 {
1173 return stm32_hash_cra_init_algs(tfm, NULL);
1174 }
1175
stm32_hash_cra_md5_init(struct crypto_tfm * tfm)1176 static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm)
1177 {
1178 return stm32_hash_cra_init_algs(tfm, "md5");
1179 }
1180
stm32_hash_cra_sha1_init(struct crypto_tfm * tfm)1181 static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm)
1182 {
1183 return stm32_hash_cra_init_algs(tfm, "sha1");
1184 }
1185
stm32_hash_cra_sha224_init(struct crypto_tfm * tfm)1186 static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm)
1187 {
1188 return stm32_hash_cra_init_algs(tfm, "sha224");
1189 }
1190
stm32_hash_cra_sha256_init(struct crypto_tfm * tfm)1191 static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
1192 {
1193 return stm32_hash_cra_init_algs(tfm, "sha256");
1194 }
1195
stm32_hash_cra_exit(struct crypto_tfm * tfm)1196 static void stm32_hash_cra_exit(struct crypto_tfm *tfm)
1197 {
1198 struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1199
1200 if (ctx->xtfm)
1201 crypto_free_shash(ctx->xtfm);
1202 }
1203
stm32_hash_irq_thread(int irq,void * dev_id)1204 static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
1205 {
1206 struct stm32_hash_dev *hdev = dev_id;
1207
1208 if (HASH_FLAGS_CPU & hdev->flags) {
1209 if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
1210 hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
1211 goto finish;
1212 }
1213 } else if (HASH_FLAGS_DMA_READY & hdev->flags) {
1214 if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
1215 hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
1216 goto finish;
1217 }
1218 }
1219
1220 return IRQ_HANDLED;
1221
1222 finish:
1223 /* Finish current request */
1224 stm32_hash_finish_req(hdev->req, 0);
1225
1226 return IRQ_HANDLED;
1227 }
1228
stm32_hash_irq_handler(int irq,void * dev_id)1229 static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
1230 {
1231 struct stm32_hash_dev *hdev = dev_id;
1232 u32 reg;
1233
1234 reg = stm32_hash_read(hdev, HASH_SR);
1235 if (reg & HASH_SR_OUTPUT_READY) {
1236 reg &= ~HASH_SR_OUTPUT_READY;
1237 stm32_hash_write(hdev, HASH_SR, reg);
1238 hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1239 /* Disable IT*/
1240 stm32_hash_write(hdev, HASH_IMR, 0);
1241 return IRQ_WAKE_THREAD;
1242 }
1243
1244 return IRQ_NONE;
1245 }
1246
1247 static struct ahash_alg algs_md5[] = {
1248 {
1249 .init = stm32_hash_init,
1250 .update = stm32_hash_update,
1251 .final = stm32_hash_final,
1252 .finup = stm32_hash_finup,
1253 .digest = stm32_hash_digest,
1254 .export = stm32_hash_export,
1255 .import = stm32_hash_import,
1256 .halg = {
1257 .digestsize = MD5_DIGEST_SIZE,
1258 .statesize = sizeof(struct stm32_hash_request_ctx),
1259 .base = {
1260 .cra_name = "md5",
1261 .cra_driver_name = "stm32-md5",
1262 .cra_priority = 200,
1263 .cra_flags = CRYPTO_ALG_ASYNC |
1264 CRYPTO_ALG_KERN_DRIVER_ONLY,
1265 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1266 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1267 .cra_alignmask = 3,
1268 .cra_init = stm32_hash_cra_init,
1269 .cra_exit = stm32_hash_cra_exit,
1270 .cra_module = THIS_MODULE,
1271 }
1272 }
1273 },
1274 {
1275 .init = stm32_hash_init,
1276 .update = stm32_hash_update,
1277 .final = stm32_hash_final,
1278 .finup = stm32_hash_finup,
1279 .digest = stm32_hash_digest,
1280 .export = stm32_hash_export,
1281 .import = stm32_hash_import,
1282 .setkey = stm32_hash_setkey,
1283 .halg = {
1284 .digestsize = MD5_DIGEST_SIZE,
1285 .statesize = sizeof(struct stm32_hash_request_ctx),
1286 .base = {
1287 .cra_name = "hmac(md5)",
1288 .cra_driver_name = "stm32-hmac-md5",
1289 .cra_priority = 200,
1290 .cra_flags = CRYPTO_ALG_ASYNC |
1291 CRYPTO_ALG_KERN_DRIVER_ONLY,
1292 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1293 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1294 .cra_alignmask = 3,
1295 .cra_init = stm32_hash_cra_md5_init,
1296 .cra_exit = stm32_hash_cra_exit,
1297 .cra_module = THIS_MODULE,
1298 }
1299 }
1300 },
1301 };
1302
1303 static struct ahash_alg algs_sha1[] = {
1304 {
1305 .init = stm32_hash_init,
1306 .update = stm32_hash_update,
1307 .final = stm32_hash_final,
1308 .finup = stm32_hash_finup,
1309 .digest = stm32_hash_digest,
1310 .export = stm32_hash_export,
1311 .import = stm32_hash_import,
1312 .halg = {
1313 .digestsize = SHA1_DIGEST_SIZE,
1314 .statesize = sizeof(struct stm32_hash_request_ctx),
1315 .base = {
1316 .cra_name = "sha1",
1317 .cra_driver_name = "stm32-sha1",
1318 .cra_priority = 200,
1319 .cra_flags = CRYPTO_ALG_ASYNC |
1320 CRYPTO_ALG_KERN_DRIVER_ONLY,
1321 .cra_blocksize = SHA1_BLOCK_SIZE,
1322 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1323 .cra_alignmask = 3,
1324 .cra_init = stm32_hash_cra_init,
1325 .cra_exit = stm32_hash_cra_exit,
1326 .cra_module = THIS_MODULE,
1327 }
1328 }
1329 },
1330 {
1331 .init = stm32_hash_init,
1332 .update = stm32_hash_update,
1333 .final = stm32_hash_final,
1334 .finup = stm32_hash_finup,
1335 .digest = stm32_hash_digest,
1336 .export = stm32_hash_export,
1337 .import = stm32_hash_import,
1338 .setkey = stm32_hash_setkey,
1339 .halg = {
1340 .digestsize = SHA1_DIGEST_SIZE,
1341 .statesize = sizeof(struct stm32_hash_request_ctx),
1342 .base = {
1343 .cra_name = "hmac(sha1)",
1344 .cra_driver_name = "stm32-hmac-sha1",
1345 .cra_priority = 200,
1346 .cra_flags = CRYPTO_ALG_ASYNC |
1347 CRYPTO_ALG_KERN_DRIVER_ONLY,
1348 .cra_blocksize = SHA1_BLOCK_SIZE,
1349 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1350 .cra_alignmask = 3,
1351 .cra_init = stm32_hash_cra_sha1_init,
1352 .cra_exit = stm32_hash_cra_exit,
1353 .cra_module = THIS_MODULE,
1354 }
1355 }
1356 },
1357 };
1358
1359 static struct ahash_alg algs_sha224[] = {
1360 {
1361 .init = stm32_hash_init,
1362 .update = stm32_hash_update,
1363 .final = stm32_hash_final,
1364 .finup = stm32_hash_finup,
1365 .digest = stm32_hash_digest,
1366 .export = stm32_hash_export,
1367 .import = stm32_hash_import,
1368 .halg = {
1369 .digestsize = SHA224_DIGEST_SIZE,
1370 .statesize = sizeof(struct stm32_hash_request_ctx),
1371 .base = {
1372 .cra_name = "sha224",
1373 .cra_driver_name = "stm32-sha224",
1374 .cra_priority = 200,
1375 .cra_flags = CRYPTO_ALG_ASYNC |
1376 CRYPTO_ALG_KERN_DRIVER_ONLY,
1377 .cra_blocksize = SHA224_BLOCK_SIZE,
1378 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1379 .cra_alignmask = 3,
1380 .cra_init = stm32_hash_cra_init,
1381 .cra_exit = stm32_hash_cra_exit,
1382 .cra_module = THIS_MODULE,
1383 }
1384 }
1385 },
1386 {
1387 .init = stm32_hash_init,
1388 .update = stm32_hash_update,
1389 .final = stm32_hash_final,
1390 .finup = stm32_hash_finup,
1391 .digest = stm32_hash_digest,
1392 .setkey = stm32_hash_setkey,
1393 .export = stm32_hash_export,
1394 .import = stm32_hash_import,
1395 .halg = {
1396 .digestsize = SHA224_DIGEST_SIZE,
1397 .statesize = sizeof(struct stm32_hash_request_ctx),
1398 .base = {
1399 .cra_name = "hmac(sha224)",
1400 .cra_driver_name = "stm32-hmac-sha224",
1401 .cra_priority = 200,
1402 .cra_flags = CRYPTO_ALG_ASYNC |
1403 CRYPTO_ALG_KERN_DRIVER_ONLY,
1404 .cra_blocksize = SHA224_BLOCK_SIZE,
1405 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1406 .cra_alignmask = 3,
1407 .cra_init = stm32_hash_cra_sha224_init,
1408 .cra_exit = stm32_hash_cra_exit,
1409 .cra_module = THIS_MODULE,
1410 }
1411 }
1412 },
1413 };
1414
1415 static struct ahash_alg algs_sha256[] = {
1416 {
1417 .init = stm32_hash_init,
1418 .update = stm32_hash_update,
1419 .final = stm32_hash_final,
1420 .finup = stm32_hash_finup,
1421 .digest = stm32_hash_digest,
1422 .export = stm32_hash_export,
1423 .import = stm32_hash_import,
1424 .halg = {
1425 .digestsize = SHA256_DIGEST_SIZE,
1426 .statesize = sizeof(struct stm32_hash_request_ctx),
1427 .base = {
1428 .cra_name = "sha256",
1429 .cra_driver_name = "stm32-sha256",
1430 .cra_priority = 200,
1431 .cra_flags = CRYPTO_ALG_ASYNC |
1432 CRYPTO_ALG_KERN_DRIVER_ONLY,
1433 .cra_blocksize = SHA256_BLOCK_SIZE,
1434 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1435 .cra_alignmask = 3,
1436 .cra_init = stm32_hash_cra_init,
1437 .cra_exit = stm32_hash_cra_exit,
1438 .cra_module = THIS_MODULE,
1439 }
1440 }
1441 },
1442 {
1443 .init = stm32_hash_init,
1444 .update = stm32_hash_update,
1445 .final = stm32_hash_final,
1446 .finup = stm32_hash_finup,
1447 .digest = stm32_hash_digest,
1448 .export = stm32_hash_export,
1449 .import = stm32_hash_import,
1450 .setkey = stm32_hash_setkey,
1451 .halg = {
1452 .digestsize = SHA256_DIGEST_SIZE,
1453 .statesize = sizeof(struct stm32_hash_request_ctx),
1454 .base = {
1455 .cra_name = "hmac(sha256)",
1456 .cra_driver_name = "stm32-hmac-sha256",
1457 .cra_priority = 200,
1458 .cra_flags = CRYPTO_ALG_ASYNC |
1459 CRYPTO_ALG_KERN_DRIVER_ONLY,
1460 .cra_blocksize = SHA256_BLOCK_SIZE,
1461 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1462 .cra_alignmask = 3,
1463 .cra_init = stm32_hash_cra_sha256_init,
1464 .cra_exit = stm32_hash_cra_exit,
1465 .cra_module = THIS_MODULE,
1466 }
1467 }
1468 },
1469 };
1470
stm32_hash_register_algs(struct stm32_hash_dev * hdev)1471 static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
1472 {
1473 unsigned int i, j;
1474 int err;
1475
1476 for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1477 for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
1478 err = crypto_register_ahash(
1479 &hdev->pdata->algs_info[i].algs_list[j]);
1480 if (err)
1481 goto err_algs;
1482 }
1483 }
1484
1485 return 0;
1486 err_algs:
1487 dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
1488 for (; i--; ) {
1489 for (; j--;)
1490 crypto_unregister_ahash(
1491 &hdev->pdata->algs_info[i].algs_list[j]);
1492 }
1493
1494 return err;
1495 }
1496
stm32_hash_unregister_algs(struct stm32_hash_dev * hdev)1497 static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
1498 {
1499 unsigned int i, j;
1500
1501 for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1502 for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
1503 crypto_unregister_ahash(
1504 &hdev->pdata->algs_info[i].algs_list[j]);
1505 }
1506
1507 return 0;
1508 }
1509
1510 static struct stm32_hash_algs_info stm32_hash_algs_info_ux500[] = {
1511 {
1512 .algs_list = algs_sha1,
1513 .size = ARRAY_SIZE(algs_sha1),
1514 },
1515 {
1516 .algs_list = algs_sha256,
1517 .size = ARRAY_SIZE(algs_sha256),
1518 },
1519 };
1520
1521 static const struct stm32_hash_pdata stm32_hash_pdata_ux500 = {
1522 .algs_info = stm32_hash_algs_info_ux500,
1523 .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_ux500),
1524 .broken_emptymsg = true,
1525 .ux500 = true,
1526 };
1527
1528 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
1529 {
1530 .algs_list = algs_md5,
1531 .size = ARRAY_SIZE(algs_md5),
1532 },
1533 {
1534 .algs_list = algs_sha1,
1535 .size = ARRAY_SIZE(algs_sha1),
1536 },
1537 };
1538
1539 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
1540 .algs_info = stm32_hash_algs_info_stm32f4,
1541 .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
1542 .has_sr = true,
1543 .has_mdmat = true,
1544 };
1545
1546 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
1547 {
1548 .algs_list = algs_md5,
1549 .size = ARRAY_SIZE(algs_md5),
1550 },
1551 {
1552 .algs_list = algs_sha1,
1553 .size = ARRAY_SIZE(algs_sha1),
1554 },
1555 {
1556 .algs_list = algs_sha224,
1557 .size = ARRAY_SIZE(algs_sha224),
1558 },
1559 {
1560 .algs_list = algs_sha256,
1561 .size = ARRAY_SIZE(algs_sha256),
1562 },
1563 };
1564
1565 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
1566 .algs_info = stm32_hash_algs_info_stm32f7,
1567 .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
1568 .has_sr = true,
1569 .has_mdmat = true,
1570 };
1571
1572 static const struct of_device_id stm32_hash_of_match[] = {
1573 {
1574 .compatible = "stericsson,ux500-hash",
1575 .data = &stm32_hash_pdata_ux500,
1576 },
1577 {
1578 .compatible = "st,stm32f456-hash",
1579 .data = &stm32_hash_pdata_stm32f4,
1580 },
1581 {
1582 .compatible = "st,stm32f756-hash",
1583 .data = &stm32_hash_pdata_stm32f7,
1584 },
1585 {},
1586 };
1587
1588 MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
1589
stm32_hash_get_of_match(struct stm32_hash_dev * hdev,struct device * dev)1590 static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
1591 struct device *dev)
1592 {
1593 hdev->pdata = of_device_get_match_data(dev);
1594 if (!hdev->pdata) {
1595 dev_err(dev, "no compatible OF match\n");
1596 return -EINVAL;
1597 }
1598
1599 if (of_property_read_u32(dev->of_node, "dma-maxburst",
1600 &hdev->dma_maxburst)) {
1601 dev_info(dev, "dma-maxburst not specified, using 0\n");
1602 hdev->dma_maxburst = 0;
1603 }
1604
1605 return 0;
1606 }
1607
stm32_hash_probe(struct platform_device * pdev)1608 static int stm32_hash_probe(struct platform_device *pdev)
1609 {
1610 struct stm32_hash_dev *hdev;
1611 struct device *dev = &pdev->dev;
1612 struct resource *res;
1613 int ret, irq;
1614
1615 hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
1616 if (!hdev)
1617 return -ENOMEM;
1618
1619 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1620 hdev->io_base = devm_ioremap_resource(dev, res);
1621 if (IS_ERR(hdev->io_base))
1622 return PTR_ERR(hdev->io_base);
1623
1624 hdev->phys_base = res->start;
1625
1626 ret = stm32_hash_get_of_match(hdev, dev);
1627 if (ret)
1628 return ret;
1629
1630 irq = platform_get_irq_optional(pdev, 0);
1631 if (irq < 0 && irq != -ENXIO)
1632 return irq;
1633
1634 if (irq > 0) {
1635 ret = devm_request_threaded_irq(dev, irq,
1636 stm32_hash_irq_handler,
1637 stm32_hash_irq_thread,
1638 IRQF_ONESHOT,
1639 dev_name(dev), hdev);
1640 if (ret) {
1641 dev_err(dev, "Cannot grab IRQ\n");
1642 return ret;
1643 }
1644 } else {
1645 dev_info(dev, "No IRQ, use polling mode\n");
1646 hdev->polled = true;
1647 }
1648
1649 hdev->clk = devm_clk_get(&pdev->dev, NULL);
1650 if (IS_ERR(hdev->clk))
1651 return dev_err_probe(dev, PTR_ERR(hdev->clk),
1652 "failed to get clock for hash\n");
1653
1654 ret = clk_prepare_enable(hdev->clk);
1655 if (ret) {
1656 dev_err(dev, "failed to enable hash clock (%d)\n", ret);
1657 return ret;
1658 }
1659
1660 pm_runtime_set_autosuspend_delay(dev, HASH_AUTOSUSPEND_DELAY);
1661 pm_runtime_use_autosuspend(dev);
1662
1663 pm_runtime_get_noresume(dev);
1664 pm_runtime_set_active(dev);
1665 pm_runtime_enable(dev);
1666
1667 hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
1668 if (IS_ERR(hdev->rst)) {
1669 if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) {
1670 ret = -EPROBE_DEFER;
1671 goto err_reset;
1672 }
1673 } else {
1674 reset_control_assert(hdev->rst);
1675 udelay(2);
1676 reset_control_deassert(hdev->rst);
1677 }
1678
1679 hdev->dev = dev;
1680
1681 platform_set_drvdata(pdev, hdev);
1682
1683 ret = stm32_hash_dma_init(hdev);
1684 switch (ret) {
1685 case 0:
1686 break;
1687 case -ENOENT:
1688 case -ENODEV:
1689 dev_info(dev, "DMA mode not available\n");
1690 break;
1691 default:
1692 dev_err(dev, "DMA init error %d\n", ret);
1693 goto err_dma;
1694 }
1695
1696 spin_lock(&stm32_hash.lock);
1697 list_add_tail(&hdev->list, &stm32_hash.dev_list);
1698 spin_unlock(&stm32_hash.lock);
1699
1700 /* Initialize crypto engine */
1701 hdev->engine = crypto_engine_alloc_init(dev, 1);
1702 if (!hdev->engine) {
1703 ret = -ENOMEM;
1704 goto err_engine;
1705 }
1706
1707 ret = crypto_engine_start(hdev->engine);
1708 if (ret)
1709 goto err_engine_start;
1710
1711 if (hdev->pdata->ux500)
1712 /* FIXME: implement DMA mode for Ux500 */
1713 hdev->dma_mode = 0;
1714 else
1715 hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
1716
1717 /* Register algos */
1718 ret = stm32_hash_register_algs(hdev);
1719 if (ret)
1720 goto err_algs;
1721
1722 dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
1723 stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
1724
1725 pm_runtime_put_sync(dev);
1726
1727 return 0;
1728
1729 err_algs:
1730 err_engine_start:
1731 crypto_engine_exit(hdev->engine);
1732 err_engine:
1733 spin_lock(&stm32_hash.lock);
1734 list_del(&hdev->list);
1735 spin_unlock(&stm32_hash.lock);
1736 err_dma:
1737 if (hdev->dma_lch)
1738 dma_release_channel(hdev->dma_lch);
1739 err_reset:
1740 pm_runtime_disable(dev);
1741 pm_runtime_put_noidle(dev);
1742
1743 clk_disable_unprepare(hdev->clk);
1744
1745 return ret;
1746 }
1747
stm32_hash_remove(struct platform_device * pdev)1748 static int stm32_hash_remove(struct platform_device *pdev)
1749 {
1750 struct stm32_hash_dev *hdev;
1751 int ret;
1752
1753 hdev = platform_get_drvdata(pdev);
1754 if (!hdev)
1755 return -ENODEV;
1756
1757 ret = pm_runtime_resume_and_get(hdev->dev);
1758 if (ret < 0)
1759 return ret;
1760
1761 stm32_hash_unregister_algs(hdev);
1762
1763 crypto_engine_exit(hdev->engine);
1764
1765 spin_lock(&stm32_hash.lock);
1766 list_del(&hdev->list);
1767 spin_unlock(&stm32_hash.lock);
1768
1769 if (hdev->dma_lch)
1770 dma_release_channel(hdev->dma_lch);
1771
1772 pm_runtime_disable(hdev->dev);
1773 pm_runtime_put_noidle(hdev->dev);
1774
1775 clk_disable_unprepare(hdev->clk);
1776
1777 return 0;
1778 }
1779
1780 #ifdef CONFIG_PM
stm32_hash_runtime_suspend(struct device * dev)1781 static int stm32_hash_runtime_suspend(struct device *dev)
1782 {
1783 struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
1784
1785 clk_disable_unprepare(hdev->clk);
1786
1787 return 0;
1788 }
1789
stm32_hash_runtime_resume(struct device * dev)1790 static int stm32_hash_runtime_resume(struct device *dev)
1791 {
1792 struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
1793 int ret;
1794
1795 ret = clk_prepare_enable(hdev->clk);
1796 if (ret) {
1797 dev_err(hdev->dev, "Failed to prepare_enable clock\n");
1798 return ret;
1799 }
1800
1801 return 0;
1802 }
1803 #endif
1804
1805 static const struct dev_pm_ops stm32_hash_pm_ops = {
1806 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1807 pm_runtime_force_resume)
1808 SET_RUNTIME_PM_OPS(stm32_hash_runtime_suspend,
1809 stm32_hash_runtime_resume, NULL)
1810 };
1811
1812 static struct platform_driver stm32_hash_driver = {
1813 .probe = stm32_hash_probe,
1814 .remove = stm32_hash_remove,
1815 .driver = {
1816 .name = "stm32-hash",
1817 .pm = &stm32_hash_pm_ops,
1818 .of_match_table = stm32_hash_of_match,
1819 }
1820 };
1821
1822 module_platform_driver(stm32_hash_driver);
1823
1824 MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver");
1825 MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
1826 MODULE_LICENSE("GPL v2");
1827