1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * caam - Freescale FSL CAAM support for ahash functions of crypto API
4 *
5 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Copyright 2018-2019 NXP
7 *
8 * Based on caamalg.c crypto API driver.
9 *
10 * relationship of digest job descriptor or first job descriptor after init to
11 * shared descriptors:
12 *
13 * --------------- ---------------
14 * | JobDesc #1 |-------------------->| ShareDesc |
15 * | *(packet 1) | | (hashKey) |
16 * --------------- | (operation) |
17 * ---------------
18 *
19 * relationship of subsequent job descriptors to shared descriptors:
20 *
21 * --------------- ---------------
22 * | JobDesc #2 |-------------------->| ShareDesc |
23 * | *(packet 2) | |------------->| (hashKey) |
24 * --------------- | |-------->| (operation) |
25 * . | | | (load ctx2) |
26 * . | | ---------------
27 * --------------- | |
28 * | JobDesc #3 |------| |
29 * | *(packet 3) | |
30 * --------------- |
31 * . |
32 * . |
33 * --------------- |
34 * | JobDesc #4 |------------
35 * | *(packet 4) |
36 * ---------------
37 *
38 * The SharedDesc never changes for a connection unless rekeyed, but
39 * each packet will likely be in a different place. So all we need
40 * to know to process the packet is where the input is, where the
41 * output goes, and what context we want to process with. Context is
42 * in the SharedDesc, packet references in the JobDesc.
43 *
44 * So, a job desc looks like:
45 *
46 * ---------------------
47 * | Header |
48 * | ShareDesc Pointer |
49 * | SEQ_OUT_PTR |
50 * | (output buffer) |
51 * | (output length) |
52 * | SEQ_IN_PTR |
53 * | (input buffer) |
54 * | (input length) |
55 * ---------------------
56 */
57
58 #include "compat.h"
59
60 #include "regs.h"
61 #include "intern.h"
62 #include "desc_constr.h"
63 #include "jr.h"
64 #include "error.h"
65 #include "sg_sw_sec4.h"
66 #include "key_gen.h"
67 #include "caamhash_desc.h"
68 #include <crypto/engine.h>
69 #include <linux/dma-mapping.h>
70 #include <linux/kernel.h>
71
72 #define CAAM_CRA_PRIORITY 3000
73
74 /* max hash key is max split key size */
75 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
76
77 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
78 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
79
80 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
81 CAAM_MAX_HASH_KEY_SIZE)
82 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
83
84 /* caam context sizes for hashes: running digest + 8 */
85 #define HASH_MSG_LEN 8
86 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
87
88 static struct list_head hash_list;
89
90 /* ahash per-session context */
91 struct caam_hash_ctx {
92 struct crypto_engine_ctx enginectx;
93 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
94 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
95 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
96 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
97 u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
98 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
99 dma_addr_t sh_desc_update_first_dma;
100 dma_addr_t sh_desc_fin_dma;
101 dma_addr_t sh_desc_digest_dma;
102 enum dma_data_direction dir;
103 enum dma_data_direction key_dir;
104 struct device *jrdev;
105 int ctx_len;
106 struct alginfo adata;
107 };
108
109 /* ahash state */
110 struct caam_hash_state {
111 dma_addr_t buf_dma;
112 dma_addr_t ctx_dma;
113 int ctx_dma_len;
114 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
115 int buflen;
116 int next_buflen;
117 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
118 int (*update)(struct ahash_request *req) ____cacheline_aligned;
119 int (*final)(struct ahash_request *req);
120 int (*finup)(struct ahash_request *req);
121 struct ahash_edesc *edesc;
122 void (*ahash_op_done)(struct device *jrdev, u32 *desc, u32 err,
123 void *context);
124 };
125
126 struct caam_export_state {
127 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
128 u8 caam_ctx[MAX_CTX_LEN];
129 int buflen;
130 int (*update)(struct ahash_request *req);
131 int (*final)(struct ahash_request *req);
132 int (*finup)(struct ahash_request *req);
133 };
134
is_cmac_aes(u32 algtype)135 static inline bool is_cmac_aes(u32 algtype)
136 {
137 return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
138 (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
139 }
140 /* Common job descriptor seq in/out ptr routines */
141
142 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
map_seq_out_ptr_ctx(u32 * desc,struct device * jrdev,struct caam_hash_state * state,int ctx_len)143 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
144 struct caam_hash_state *state,
145 int ctx_len)
146 {
147 state->ctx_dma_len = ctx_len;
148 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
149 ctx_len, DMA_FROM_DEVICE);
150 if (dma_mapping_error(jrdev, state->ctx_dma)) {
151 dev_err(jrdev, "unable to map ctx\n");
152 state->ctx_dma = 0;
153 return -ENOMEM;
154 }
155
156 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
157
158 return 0;
159 }
160
161 /* Map current buffer in state (if length > 0) and put it in link table */
buf_map_to_sec4_sg(struct device * jrdev,struct sec4_sg_entry * sec4_sg,struct caam_hash_state * state)162 static inline int buf_map_to_sec4_sg(struct device *jrdev,
163 struct sec4_sg_entry *sec4_sg,
164 struct caam_hash_state *state)
165 {
166 int buflen = state->buflen;
167
168 if (!buflen)
169 return 0;
170
171 state->buf_dma = dma_map_single(jrdev, state->buf, buflen,
172 DMA_TO_DEVICE);
173 if (dma_mapping_error(jrdev, state->buf_dma)) {
174 dev_err(jrdev, "unable to map buf\n");
175 state->buf_dma = 0;
176 return -ENOMEM;
177 }
178
179 dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
180
181 return 0;
182 }
183
184 /* Map state->caam_ctx, and add it to link table */
ctx_map_to_sec4_sg(struct device * jrdev,struct caam_hash_state * state,int ctx_len,struct sec4_sg_entry * sec4_sg,u32 flag)185 static inline int ctx_map_to_sec4_sg(struct device *jrdev,
186 struct caam_hash_state *state, int ctx_len,
187 struct sec4_sg_entry *sec4_sg, u32 flag)
188 {
189 state->ctx_dma_len = ctx_len;
190 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
191 if (dma_mapping_error(jrdev, state->ctx_dma)) {
192 dev_err(jrdev, "unable to map ctx\n");
193 state->ctx_dma = 0;
194 return -ENOMEM;
195 }
196
197 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
198
199 return 0;
200 }
201
ahash_set_sh_desc(struct crypto_ahash * ahash)202 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
203 {
204 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
205 int digestsize = crypto_ahash_digestsize(ahash);
206 struct device *jrdev = ctx->jrdev;
207 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
208 u32 *desc;
209
210 ctx->adata.key_virt = ctx->key;
211
212 /* ahash_update shared descriptor */
213 desc = ctx->sh_desc_update;
214 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
215 ctx->ctx_len, true, ctrlpriv->era);
216 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
217 desc_bytes(desc), ctx->dir);
218
219 print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
220 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
221 1);
222
223 /* ahash_update_first shared descriptor */
224 desc = ctx->sh_desc_update_first;
225 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
226 ctx->ctx_len, false, ctrlpriv->era);
227 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
228 desc_bytes(desc), ctx->dir);
229 print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
230 ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
231 desc_bytes(desc), 1);
232
233 /* ahash_final shared descriptor */
234 desc = ctx->sh_desc_fin;
235 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
236 ctx->ctx_len, true, ctrlpriv->era);
237 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
238 desc_bytes(desc), ctx->dir);
239
240 print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
241 DUMP_PREFIX_ADDRESS, 16, 4, desc,
242 desc_bytes(desc), 1);
243
244 /* ahash_digest shared descriptor */
245 desc = ctx->sh_desc_digest;
246 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
247 ctx->ctx_len, false, ctrlpriv->era);
248 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
249 desc_bytes(desc), ctx->dir);
250
251 print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
252 DUMP_PREFIX_ADDRESS, 16, 4, desc,
253 desc_bytes(desc), 1);
254
255 return 0;
256 }
257
axcbc_set_sh_desc(struct crypto_ahash * ahash)258 static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
259 {
260 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
261 int digestsize = crypto_ahash_digestsize(ahash);
262 struct device *jrdev = ctx->jrdev;
263 u32 *desc;
264
265 /* shared descriptor for ahash_update */
266 desc = ctx->sh_desc_update;
267 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
268 ctx->ctx_len, ctx->ctx_len);
269 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
270 desc_bytes(desc), ctx->dir);
271 print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
272 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
273 1);
274
275 /* shared descriptor for ahash_{final,finup} */
276 desc = ctx->sh_desc_fin;
277 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
278 digestsize, ctx->ctx_len);
279 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
280 desc_bytes(desc), ctx->dir);
281 print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
282 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
283 1);
284
285 /* key is immediate data for INIT and INITFINAL states */
286 ctx->adata.key_virt = ctx->key;
287
288 /* shared descriptor for first invocation of ahash_update */
289 desc = ctx->sh_desc_update_first;
290 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
291 ctx->ctx_len);
292 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
293 desc_bytes(desc), ctx->dir);
294 print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
295 " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
296 desc_bytes(desc), 1);
297
298 /* shared descriptor for ahash_digest */
299 desc = ctx->sh_desc_digest;
300 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
301 digestsize, ctx->ctx_len);
302 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
303 desc_bytes(desc), ctx->dir);
304 print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
305 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
306 1);
307 return 0;
308 }
309
acmac_set_sh_desc(struct crypto_ahash * ahash)310 static int acmac_set_sh_desc(struct crypto_ahash *ahash)
311 {
312 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
313 int digestsize = crypto_ahash_digestsize(ahash);
314 struct device *jrdev = ctx->jrdev;
315 u32 *desc;
316
317 /* shared descriptor for ahash_update */
318 desc = ctx->sh_desc_update;
319 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
320 ctx->ctx_len, ctx->ctx_len);
321 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
322 desc_bytes(desc), ctx->dir);
323 print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
324 DUMP_PREFIX_ADDRESS, 16, 4, desc,
325 desc_bytes(desc), 1);
326
327 /* shared descriptor for ahash_{final,finup} */
328 desc = ctx->sh_desc_fin;
329 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
330 digestsize, ctx->ctx_len);
331 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
332 desc_bytes(desc), ctx->dir);
333 print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
334 DUMP_PREFIX_ADDRESS, 16, 4, desc,
335 desc_bytes(desc), 1);
336
337 /* shared descriptor for first invocation of ahash_update */
338 desc = ctx->sh_desc_update_first;
339 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
340 ctx->ctx_len);
341 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
342 desc_bytes(desc), ctx->dir);
343 print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
344 " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
345 desc_bytes(desc), 1);
346
347 /* shared descriptor for ahash_digest */
348 desc = ctx->sh_desc_digest;
349 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
350 digestsize, ctx->ctx_len);
351 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
352 desc_bytes(desc), ctx->dir);
353 print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
354 DUMP_PREFIX_ADDRESS, 16, 4, desc,
355 desc_bytes(desc), 1);
356
357 return 0;
358 }
359
360 /* Digest hash size if it is too large */
hash_digest_key(struct caam_hash_ctx * ctx,u32 * keylen,u8 * key,u32 digestsize)361 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
362 u32 digestsize)
363 {
364 struct device *jrdev = ctx->jrdev;
365 u32 *desc;
366 struct split_key_result result;
367 dma_addr_t key_dma;
368 int ret;
369
370 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL);
371 if (!desc) {
372 dev_err(jrdev, "unable to allocate key input memory\n");
373 return -ENOMEM;
374 }
375
376 init_job_desc(desc, 0);
377
378 key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL);
379 if (dma_mapping_error(jrdev, key_dma)) {
380 dev_err(jrdev, "unable to map key memory\n");
381 kfree(desc);
382 return -ENOMEM;
383 }
384
385 /* Job descriptor to perform unkeyed hash on key_in */
386 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
387 OP_ALG_AS_INITFINAL);
388 append_seq_in_ptr(desc, key_dma, *keylen, 0);
389 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
390 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
391 append_seq_out_ptr(desc, key_dma, digestsize, 0);
392 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
393 LDST_SRCDST_BYTE_CONTEXT);
394
395 print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
396 DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
397 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
398 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
399 1);
400
401 result.err = 0;
402 init_completion(&result.completion);
403
404 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
405 if (ret == -EINPROGRESS) {
406 /* in progress */
407 wait_for_completion(&result.completion);
408 ret = result.err;
409
410 print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
411 DUMP_PREFIX_ADDRESS, 16, 4, key,
412 digestsize, 1);
413 }
414 dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
415
416 *keylen = digestsize;
417
418 kfree(desc);
419
420 return ret;
421 }
422
ahash_setkey(struct crypto_ahash * ahash,const u8 * key,unsigned int keylen)423 static int ahash_setkey(struct crypto_ahash *ahash,
424 const u8 *key, unsigned int keylen)
425 {
426 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
427 struct device *jrdev = ctx->jrdev;
428 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
429 int digestsize = crypto_ahash_digestsize(ahash);
430 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
431 int ret;
432 u8 *hashed_key = NULL;
433
434 dev_dbg(jrdev, "keylen %d\n", keylen);
435
436 if (keylen > blocksize) {
437 unsigned int aligned_len =
438 ALIGN(keylen, dma_get_cache_alignment());
439
440 if (aligned_len < keylen)
441 return -EOVERFLOW;
442
443 hashed_key = kmemdup(key, keylen, GFP_KERNEL);
444 if (!hashed_key)
445 return -ENOMEM;
446 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
447 if (ret)
448 goto bad_free_key;
449 key = hashed_key;
450 }
451
452 /*
453 * If DKP is supported, use it in the shared descriptor to generate
454 * the split key.
455 */
456 if (ctrlpriv->era >= 6) {
457 ctx->adata.key_inline = true;
458 ctx->adata.keylen = keylen;
459 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
460 OP_ALG_ALGSEL_MASK);
461
462 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
463 goto bad_free_key;
464
465 memcpy(ctx->key, key, keylen);
466
467 /*
468 * In case |user key| > |derived key|, using DKP<imm,imm>
469 * would result in invalid opcodes (last bytes of user key) in
470 * the resulting descriptor. Use DKP<ptr,imm> instead => both
471 * virtual and dma key addresses are needed.
472 */
473 if (keylen > ctx->adata.keylen_pad)
474 dma_sync_single_for_device(ctx->jrdev,
475 ctx->adata.key_dma,
476 ctx->adata.keylen_pad,
477 DMA_TO_DEVICE);
478 } else {
479 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
480 keylen, CAAM_MAX_HASH_KEY_SIZE);
481 if (ret)
482 goto bad_free_key;
483 }
484
485 kfree(hashed_key);
486 return ahash_set_sh_desc(ahash);
487 bad_free_key:
488 kfree(hashed_key);
489 return -EINVAL;
490 }
491
axcbc_setkey(struct crypto_ahash * ahash,const u8 * key,unsigned int keylen)492 static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
493 unsigned int keylen)
494 {
495 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
496 struct device *jrdev = ctx->jrdev;
497
498 if (keylen != AES_KEYSIZE_128)
499 return -EINVAL;
500
501 memcpy(ctx->key, key, keylen);
502 dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
503 DMA_TO_DEVICE);
504 ctx->adata.keylen = keylen;
505
506 print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
507 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
508
509 return axcbc_set_sh_desc(ahash);
510 }
511
acmac_setkey(struct crypto_ahash * ahash,const u8 * key,unsigned int keylen)512 static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
513 unsigned int keylen)
514 {
515 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
516 int err;
517
518 err = aes_check_keylen(keylen);
519 if (err)
520 return err;
521
522 /* key is immediate data for all cmac shared descriptors */
523 ctx->adata.key_virt = key;
524 ctx->adata.keylen = keylen;
525
526 print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
527 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
528
529 return acmac_set_sh_desc(ahash);
530 }
531
532 /*
533 * ahash_edesc - s/w-extended ahash descriptor
534 * @sec4_sg_dma: physical mapped address of h/w link table
535 * @src_nents: number of segments in input scatterlist
536 * @sec4_sg_bytes: length of dma mapped sec4_sg space
537 * @bklog: stored to determine if the request needs backlog
538 * @hw_desc: the h/w job descriptor followed by any referenced link tables
539 * @sec4_sg: h/w link table
540 */
541 struct ahash_edesc {
542 dma_addr_t sec4_sg_dma;
543 int src_nents;
544 int sec4_sg_bytes;
545 bool bklog;
546 u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
547 struct sec4_sg_entry sec4_sg[];
548 };
549
ahash_unmap(struct device * dev,struct ahash_edesc * edesc,struct ahash_request * req,int dst_len)550 static inline void ahash_unmap(struct device *dev,
551 struct ahash_edesc *edesc,
552 struct ahash_request *req, int dst_len)
553 {
554 struct caam_hash_state *state = ahash_request_ctx_dma(req);
555
556 if (edesc->src_nents)
557 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
558
559 if (edesc->sec4_sg_bytes)
560 dma_unmap_single(dev, edesc->sec4_sg_dma,
561 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
562
563 if (state->buf_dma) {
564 dma_unmap_single(dev, state->buf_dma, state->buflen,
565 DMA_TO_DEVICE);
566 state->buf_dma = 0;
567 }
568 }
569
ahash_unmap_ctx(struct device * dev,struct ahash_edesc * edesc,struct ahash_request * req,int dst_len,u32 flag)570 static inline void ahash_unmap_ctx(struct device *dev,
571 struct ahash_edesc *edesc,
572 struct ahash_request *req, int dst_len, u32 flag)
573 {
574 struct caam_hash_state *state = ahash_request_ctx_dma(req);
575
576 if (state->ctx_dma) {
577 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
578 state->ctx_dma = 0;
579 }
580 ahash_unmap(dev, edesc, req, dst_len);
581 }
582
ahash_done_cpy(struct device * jrdev,u32 * desc,u32 err,void * context,enum dma_data_direction dir)583 static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
584 void *context, enum dma_data_direction dir)
585 {
586 struct ahash_request *req = context;
587 struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
588 struct ahash_edesc *edesc;
589 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
590 int digestsize = crypto_ahash_digestsize(ahash);
591 struct caam_hash_state *state = ahash_request_ctx_dma(req);
592 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
593 int ecode = 0;
594 bool has_bklog;
595
596 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
597
598 edesc = state->edesc;
599 has_bklog = edesc->bklog;
600
601 if (err)
602 ecode = caam_jr_strstatus(jrdev, err);
603
604 ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir);
605 memcpy(req->result, state->caam_ctx, digestsize);
606 kfree(edesc);
607
608 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
609 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
610 ctx->ctx_len, 1);
611
612 /*
613 * If no backlog flag, the completion of the request is done
614 * by CAAM, not crypto engine.
615 */
616 if (!has_bklog)
617 ahash_request_complete(req, ecode);
618 else
619 crypto_finalize_hash_request(jrp->engine, req, ecode);
620 }
621
ahash_done(struct device * jrdev,u32 * desc,u32 err,void * context)622 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
623 void *context)
624 {
625 ahash_done_cpy(jrdev, desc, err, context, DMA_FROM_DEVICE);
626 }
627
ahash_done_ctx_src(struct device * jrdev,u32 * desc,u32 err,void * context)628 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
629 void *context)
630 {
631 ahash_done_cpy(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
632 }
633
ahash_done_switch(struct device * jrdev,u32 * desc,u32 err,void * context,enum dma_data_direction dir)634 static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
635 void *context, enum dma_data_direction dir)
636 {
637 struct ahash_request *req = context;
638 struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
639 struct ahash_edesc *edesc;
640 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
641 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
642 struct caam_hash_state *state = ahash_request_ctx_dma(req);
643 int digestsize = crypto_ahash_digestsize(ahash);
644 int ecode = 0;
645 bool has_bklog;
646
647 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
648
649 edesc = state->edesc;
650 has_bklog = edesc->bklog;
651 if (err)
652 ecode = caam_jr_strstatus(jrdev, err);
653
654 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir);
655 kfree(edesc);
656
657 scatterwalk_map_and_copy(state->buf, req->src,
658 req->nbytes - state->next_buflen,
659 state->next_buflen, 0);
660 state->buflen = state->next_buflen;
661
662 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
663 DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
664 state->buflen, 1);
665
666 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
667 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
668 ctx->ctx_len, 1);
669 if (req->result)
670 print_hex_dump_debug("result@"__stringify(__LINE__)": ",
671 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
672 digestsize, 1);
673
674 /*
675 * If no backlog flag, the completion of the request is done
676 * by CAAM, not crypto engine.
677 */
678 if (!has_bklog)
679 ahash_request_complete(req, ecode);
680 else
681 crypto_finalize_hash_request(jrp->engine, req, ecode);
682
683 }
684
ahash_done_bi(struct device * jrdev,u32 * desc,u32 err,void * context)685 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
686 void *context)
687 {
688 ahash_done_switch(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
689 }
690
ahash_done_ctx_dst(struct device * jrdev,u32 * desc,u32 err,void * context)691 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
692 void *context)
693 {
694 ahash_done_switch(jrdev, desc, err, context, DMA_FROM_DEVICE);
695 }
696
697 /*
698 * Allocate an enhanced descriptor, which contains the hardware descriptor
699 * and space for hardware scatter table containing sg_num entries.
700 */
ahash_edesc_alloc(struct ahash_request * req,int sg_num,u32 * sh_desc,dma_addr_t sh_desc_dma)701 static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
702 int sg_num, u32 *sh_desc,
703 dma_addr_t sh_desc_dma)
704 {
705 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
706 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
707 struct caam_hash_state *state = ahash_request_ctx_dma(req);
708 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
709 GFP_KERNEL : GFP_ATOMIC;
710 struct ahash_edesc *edesc;
711 unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
712
713 edesc = kzalloc(sizeof(*edesc) + sg_size, flags);
714 if (!edesc) {
715 dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
716 return NULL;
717 }
718
719 state->edesc = edesc;
720
721 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
722 HDR_SHARE_DEFER | HDR_REVERSE);
723
724 return edesc;
725 }
726
ahash_edesc_add_src(struct caam_hash_ctx * ctx,struct ahash_edesc * edesc,struct ahash_request * req,int nents,unsigned int first_sg,unsigned int first_bytes,size_t to_hash)727 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
728 struct ahash_edesc *edesc,
729 struct ahash_request *req, int nents,
730 unsigned int first_sg,
731 unsigned int first_bytes, size_t to_hash)
732 {
733 dma_addr_t src_dma;
734 u32 options;
735
736 if (nents > 1 || first_sg) {
737 struct sec4_sg_entry *sg = edesc->sec4_sg;
738 unsigned int sgsize = sizeof(*sg) *
739 pad_sg_nents(first_sg + nents);
740
741 sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
742
743 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
744 if (dma_mapping_error(ctx->jrdev, src_dma)) {
745 dev_err(ctx->jrdev, "unable to map S/G table\n");
746 return -ENOMEM;
747 }
748
749 edesc->sec4_sg_bytes = sgsize;
750 edesc->sec4_sg_dma = src_dma;
751 options = LDST_SGF;
752 } else {
753 src_dma = sg_dma_address(req->src);
754 options = 0;
755 }
756
757 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
758 options);
759
760 return 0;
761 }
762
ahash_do_one_req(struct crypto_engine * engine,void * areq)763 static int ahash_do_one_req(struct crypto_engine *engine, void *areq)
764 {
765 struct ahash_request *req = ahash_request_cast(areq);
766 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(crypto_ahash_reqtfm(req));
767 struct caam_hash_state *state = ahash_request_ctx_dma(req);
768 struct device *jrdev = ctx->jrdev;
769 u32 *desc = state->edesc->hw_desc;
770 int ret;
771
772 state->edesc->bklog = true;
773
774 ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req);
775
776 if (ret == -ENOSPC && engine->retry_support)
777 return ret;
778
779 if (ret != -EINPROGRESS) {
780 ahash_unmap(jrdev, state->edesc, req, 0);
781 kfree(state->edesc);
782 } else {
783 ret = 0;
784 }
785
786 return ret;
787 }
788
ahash_enqueue_req(struct device * jrdev,void (* cbk)(struct device * jrdev,u32 * desc,u32 err,void * context),struct ahash_request * req,int dst_len,enum dma_data_direction dir)789 static int ahash_enqueue_req(struct device *jrdev,
790 void (*cbk)(struct device *jrdev, u32 *desc,
791 u32 err, void *context),
792 struct ahash_request *req,
793 int dst_len, enum dma_data_direction dir)
794 {
795 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
796 struct caam_hash_state *state = ahash_request_ctx_dma(req);
797 struct ahash_edesc *edesc = state->edesc;
798 u32 *desc = edesc->hw_desc;
799 int ret;
800
801 state->ahash_op_done = cbk;
802
803 /*
804 * Only the backlog request are sent to crypto-engine since the others
805 * can be handled by CAAM, if free, especially since JR has up to 1024
806 * entries (more than the 10 entries from crypto-engine).
807 */
808 if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
809 ret = crypto_transfer_hash_request_to_engine(jrpriv->engine,
810 req);
811 else
812 ret = caam_jr_enqueue(jrdev, desc, cbk, req);
813
814 if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
815 ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir);
816 kfree(edesc);
817 }
818
819 return ret;
820 }
821
822 /* submit update job descriptor */
ahash_update_ctx(struct ahash_request * req)823 static int ahash_update_ctx(struct ahash_request *req)
824 {
825 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
826 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
827 struct caam_hash_state *state = ahash_request_ctx_dma(req);
828 struct device *jrdev = ctx->jrdev;
829 u8 *buf = state->buf;
830 int *buflen = &state->buflen;
831 int *next_buflen = &state->next_buflen;
832 int blocksize = crypto_ahash_blocksize(ahash);
833 int in_len = *buflen + req->nbytes, to_hash;
834 u32 *desc;
835 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
836 struct ahash_edesc *edesc;
837 int ret = 0;
838
839 *next_buflen = in_len & (blocksize - 1);
840 to_hash = in_len - *next_buflen;
841
842 /*
843 * For XCBC and CMAC, if to_hash is multiple of block size,
844 * keep last block in internal buffer
845 */
846 if ((is_xcbc_aes(ctx->adata.algtype) ||
847 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
848 (*next_buflen == 0)) {
849 *next_buflen = blocksize;
850 to_hash -= blocksize;
851 }
852
853 if (to_hash) {
854 int pad_nents;
855 int src_len = req->nbytes - *next_buflen;
856
857 src_nents = sg_nents_for_len(req->src, src_len);
858 if (src_nents < 0) {
859 dev_err(jrdev, "Invalid number of src SG.\n");
860 return src_nents;
861 }
862
863 if (src_nents) {
864 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
865 DMA_TO_DEVICE);
866 if (!mapped_nents) {
867 dev_err(jrdev, "unable to DMA map source\n");
868 return -ENOMEM;
869 }
870 } else {
871 mapped_nents = 0;
872 }
873
874 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
875 pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
876 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
877
878 /*
879 * allocate space for base edesc and hw desc commands,
880 * link tables
881 */
882 edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update,
883 ctx->sh_desc_update_dma);
884 if (!edesc) {
885 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
886 return -ENOMEM;
887 }
888
889 edesc->src_nents = src_nents;
890 edesc->sec4_sg_bytes = sec4_sg_bytes;
891
892 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
893 edesc->sec4_sg, DMA_BIDIRECTIONAL);
894 if (ret)
895 goto unmap_ctx;
896
897 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
898 if (ret)
899 goto unmap_ctx;
900
901 if (mapped_nents)
902 sg_to_sec4_sg_last(req->src, src_len,
903 edesc->sec4_sg + sec4_sg_src_index,
904 0);
905 else
906 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
907 1);
908
909 desc = edesc->hw_desc;
910
911 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
912 sec4_sg_bytes,
913 DMA_TO_DEVICE);
914 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
915 dev_err(jrdev, "unable to map S/G table\n");
916 ret = -ENOMEM;
917 goto unmap_ctx;
918 }
919
920 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
921 to_hash, LDST_SGF);
922
923 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
924
925 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
926 DUMP_PREFIX_ADDRESS, 16, 4, desc,
927 desc_bytes(desc), 1);
928
929 ret = ahash_enqueue_req(jrdev, ahash_done_bi, req,
930 ctx->ctx_len, DMA_BIDIRECTIONAL);
931 } else if (*next_buflen) {
932 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
933 req->nbytes, 0);
934 *buflen = *next_buflen;
935
936 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
937 DUMP_PREFIX_ADDRESS, 16, 4, buf,
938 *buflen, 1);
939 }
940
941 return ret;
942 unmap_ctx:
943 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
944 kfree(edesc);
945 return ret;
946 }
947
ahash_final_ctx(struct ahash_request * req)948 static int ahash_final_ctx(struct ahash_request *req)
949 {
950 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
951 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
952 struct caam_hash_state *state = ahash_request_ctx_dma(req);
953 struct device *jrdev = ctx->jrdev;
954 int buflen = state->buflen;
955 u32 *desc;
956 int sec4_sg_bytes;
957 int digestsize = crypto_ahash_digestsize(ahash);
958 struct ahash_edesc *edesc;
959 int ret;
960
961 sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
962 sizeof(struct sec4_sg_entry);
963
964 /* allocate space for base edesc and hw desc commands, link tables */
965 edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin,
966 ctx->sh_desc_fin_dma);
967 if (!edesc)
968 return -ENOMEM;
969
970 desc = edesc->hw_desc;
971
972 edesc->sec4_sg_bytes = sec4_sg_bytes;
973
974 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
975 edesc->sec4_sg, DMA_BIDIRECTIONAL);
976 if (ret)
977 goto unmap_ctx;
978
979 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
980 if (ret)
981 goto unmap_ctx;
982
983 sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
984
985 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
986 sec4_sg_bytes, DMA_TO_DEVICE);
987 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
988 dev_err(jrdev, "unable to map S/G table\n");
989 ret = -ENOMEM;
990 goto unmap_ctx;
991 }
992
993 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
994 LDST_SGF);
995 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
996
997 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
998 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
999 1);
1000
1001 return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
1002 digestsize, DMA_BIDIRECTIONAL);
1003 unmap_ctx:
1004 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1005 kfree(edesc);
1006 return ret;
1007 }
1008
ahash_finup_ctx(struct ahash_request * req)1009 static int ahash_finup_ctx(struct ahash_request *req)
1010 {
1011 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1012 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1013 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1014 struct device *jrdev = ctx->jrdev;
1015 int buflen = state->buflen;
1016 u32 *desc;
1017 int sec4_sg_src_index;
1018 int src_nents, mapped_nents;
1019 int digestsize = crypto_ahash_digestsize(ahash);
1020 struct ahash_edesc *edesc;
1021 int ret;
1022
1023 src_nents = sg_nents_for_len(req->src, req->nbytes);
1024 if (src_nents < 0) {
1025 dev_err(jrdev, "Invalid number of src SG.\n");
1026 return src_nents;
1027 }
1028
1029 if (src_nents) {
1030 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1031 DMA_TO_DEVICE);
1032 if (!mapped_nents) {
1033 dev_err(jrdev, "unable to DMA map source\n");
1034 return -ENOMEM;
1035 }
1036 } else {
1037 mapped_nents = 0;
1038 }
1039
1040 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1041
1042 /* allocate space for base edesc and hw desc commands, link tables */
1043 edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1044 ctx->sh_desc_fin, ctx->sh_desc_fin_dma);
1045 if (!edesc) {
1046 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1047 return -ENOMEM;
1048 }
1049
1050 desc = edesc->hw_desc;
1051
1052 edesc->src_nents = src_nents;
1053
1054 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
1055 edesc->sec4_sg, DMA_BIDIRECTIONAL);
1056 if (ret)
1057 goto unmap_ctx;
1058
1059 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
1060 if (ret)
1061 goto unmap_ctx;
1062
1063 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1064 sec4_sg_src_index, ctx->ctx_len + buflen,
1065 req->nbytes);
1066 if (ret)
1067 goto unmap_ctx;
1068
1069 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
1070
1071 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1072 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1073 1);
1074
1075 return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
1076 digestsize, DMA_BIDIRECTIONAL);
1077 unmap_ctx:
1078 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1079 kfree(edesc);
1080 return ret;
1081 }
1082
ahash_digest(struct ahash_request * req)1083 static int ahash_digest(struct ahash_request *req)
1084 {
1085 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1086 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1087 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1088 struct device *jrdev = ctx->jrdev;
1089 u32 *desc;
1090 int digestsize = crypto_ahash_digestsize(ahash);
1091 int src_nents, mapped_nents;
1092 struct ahash_edesc *edesc;
1093 int ret;
1094
1095 state->buf_dma = 0;
1096
1097 src_nents = sg_nents_for_len(req->src, req->nbytes);
1098 if (src_nents < 0) {
1099 dev_err(jrdev, "Invalid number of src SG.\n");
1100 return src_nents;
1101 }
1102
1103 if (src_nents) {
1104 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1105 DMA_TO_DEVICE);
1106 if (!mapped_nents) {
1107 dev_err(jrdev, "unable to map source for DMA\n");
1108 return -ENOMEM;
1109 }
1110 } else {
1111 mapped_nents = 0;
1112 }
1113
1114 /* allocate space for base edesc and hw desc commands, link tables */
1115 edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0,
1116 ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1117 if (!edesc) {
1118 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1119 return -ENOMEM;
1120 }
1121
1122 edesc->src_nents = src_nents;
1123
1124 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1125 req->nbytes);
1126 if (ret) {
1127 ahash_unmap(jrdev, edesc, req, digestsize);
1128 kfree(edesc);
1129 return ret;
1130 }
1131
1132 desc = edesc->hw_desc;
1133
1134 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1135 if (ret) {
1136 ahash_unmap(jrdev, edesc, req, digestsize);
1137 kfree(edesc);
1138 return -ENOMEM;
1139 }
1140
1141 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1142 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1143 1);
1144
1145 return ahash_enqueue_req(jrdev, ahash_done, req, digestsize,
1146 DMA_FROM_DEVICE);
1147 }
1148
1149 /* submit ahash final if it the first job descriptor */
ahash_final_no_ctx(struct ahash_request * req)1150 static int ahash_final_no_ctx(struct ahash_request *req)
1151 {
1152 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1153 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1154 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1155 struct device *jrdev = ctx->jrdev;
1156 u8 *buf = state->buf;
1157 int buflen = state->buflen;
1158 u32 *desc;
1159 int digestsize = crypto_ahash_digestsize(ahash);
1160 struct ahash_edesc *edesc;
1161 int ret;
1162
1163 /* allocate space for base edesc and hw desc commands, link tables */
1164 edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest,
1165 ctx->sh_desc_digest_dma);
1166 if (!edesc)
1167 return -ENOMEM;
1168
1169 desc = edesc->hw_desc;
1170
1171 if (buflen) {
1172 state->buf_dma = dma_map_single(jrdev, buf, buflen,
1173 DMA_TO_DEVICE);
1174 if (dma_mapping_error(jrdev, state->buf_dma)) {
1175 dev_err(jrdev, "unable to map src\n");
1176 goto unmap;
1177 }
1178
1179 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1180 }
1181
1182 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1183 if (ret)
1184 goto unmap;
1185
1186 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1187 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1188 1);
1189
1190 return ahash_enqueue_req(jrdev, ahash_done, req,
1191 digestsize, DMA_FROM_DEVICE);
1192 unmap:
1193 ahash_unmap(jrdev, edesc, req, digestsize);
1194 kfree(edesc);
1195 return -ENOMEM;
1196 }
1197
1198 /* submit ahash update if it the first job descriptor after update */
ahash_update_no_ctx(struct ahash_request * req)1199 static int ahash_update_no_ctx(struct ahash_request *req)
1200 {
1201 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1202 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1203 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1204 struct device *jrdev = ctx->jrdev;
1205 u8 *buf = state->buf;
1206 int *buflen = &state->buflen;
1207 int *next_buflen = &state->next_buflen;
1208 int blocksize = crypto_ahash_blocksize(ahash);
1209 int in_len = *buflen + req->nbytes, to_hash;
1210 int sec4_sg_bytes, src_nents, mapped_nents;
1211 struct ahash_edesc *edesc;
1212 u32 *desc;
1213 int ret = 0;
1214
1215 *next_buflen = in_len & (blocksize - 1);
1216 to_hash = in_len - *next_buflen;
1217
1218 /*
1219 * For XCBC and CMAC, if to_hash is multiple of block size,
1220 * keep last block in internal buffer
1221 */
1222 if ((is_xcbc_aes(ctx->adata.algtype) ||
1223 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1224 (*next_buflen == 0)) {
1225 *next_buflen = blocksize;
1226 to_hash -= blocksize;
1227 }
1228
1229 if (to_hash) {
1230 int pad_nents;
1231 int src_len = req->nbytes - *next_buflen;
1232
1233 src_nents = sg_nents_for_len(req->src, src_len);
1234 if (src_nents < 0) {
1235 dev_err(jrdev, "Invalid number of src SG.\n");
1236 return src_nents;
1237 }
1238
1239 if (src_nents) {
1240 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1241 DMA_TO_DEVICE);
1242 if (!mapped_nents) {
1243 dev_err(jrdev, "unable to DMA map source\n");
1244 return -ENOMEM;
1245 }
1246 } else {
1247 mapped_nents = 0;
1248 }
1249
1250 pad_nents = pad_sg_nents(1 + mapped_nents);
1251 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
1252
1253 /*
1254 * allocate space for base edesc and hw desc commands,
1255 * link tables
1256 */
1257 edesc = ahash_edesc_alloc(req, pad_nents,
1258 ctx->sh_desc_update_first,
1259 ctx->sh_desc_update_first_dma);
1260 if (!edesc) {
1261 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1262 return -ENOMEM;
1263 }
1264
1265 edesc->src_nents = src_nents;
1266 edesc->sec4_sg_bytes = sec4_sg_bytes;
1267
1268 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1269 if (ret)
1270 goto unmap_ctx;
1271
1272 sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
1273
1274 desc = edesc->hw_desc;
1275
1276 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1277 sec4_sg_bytes,
1278 DMA_TO_DEVICE);
1279 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1280 dev_err(jrdev, "unable to map S/G table\n");
1281 ret = -ENOMEM;
1282 goto unmap_ctx;
1283 }
1284
1285 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1286
1287 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1288 if (ret)
1289 goto unmap_ctx;
1290
1291 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1292 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1293 desc_bytes(desc), 1);
1294
1295 ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1296 ctx->ctx_len, DMA_TO_DEVICE);
1297 if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1298 return ret;
1299 state->update = ahash_update_ctx;
1300 state->finup = ahash_finup_ctx;
1301 state->final = ahash_final_ctx;
1302 } else if (*next_buflen) {
1303 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1304 req->nbytes, 0);
1305 *buflen = *next_buflen;
1306
1307 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1308 DUMP_PREFIX_ADDRESS, 16, 4, buf,
1309 *buflen, 1);
1310 }
1311
1312 return ret;
1313 unmap_ctx:
1314 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1315 kfree(edesc);
1316 return ret;
1317 }
1318
1319 /* submit ahash finup if it the first job descriptor after update */
ahash_finup_no_ctx(struct ahash_request * req)1320 static int ahash_finup_no_ctx(struct ahash_request *req)
1321 {
1322 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1323 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1324 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1325 struct device *jrdev = ctx->jrdev;
1326 int buflen = state->buflen;
1327 u32 *desc;
1328 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1329 int digestsize = crypto_ahash_digestsize(ahash);
1330 struct ahash_edesc *edesc;
1331 int ret;
1332
1333 src_nents = sg_nents_for_len(req->src, req->nbytes);
1334 if (src_nents < 0) {
1335 dev_err(jrdev, "Invalid number of src SG.\n");
1336 return src_nents;
1337 }
1338
1339 if (src_nents) {
1340 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1341 DMA_TO_DEVICE);
1342 if (!mapped_nents) {
1343 dev_err(jrdev, "unable to DMA map source\n");
1344 return -ENOMEM;
1345 }
1346 } else {
1347 mapped_nents = 0;
1348 }
1349
1350 sec4_sg_src_index = 2;
1351 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1352 sizeof(struct sec4_sg_entry);
1353
1354 /* allocate space for base edesc and hw desc commands, link tables */
1355 edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1356 ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1357 if (!edesc) {
1358 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1359 return -ENOMEM;
1360 }
1361
1362 desc = edesc->hw_desc;
1363
1364 edesc->src_nents = src_nents;
1365 edesc->sec4_sg_bytes = sec4_sg_bytes;
1366
1367 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1368 if (ret)
1369 goto unmap;
1370
1371 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1372 req->nbytes);
1373 if (ret) {
1374 dev_err(jrdev, "unable to map S/G table\n");
1375 goto unmap;
1376 }
1377
1378 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1379 if (ret)
1380 goto unmap;
1381
1382 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1383 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1384 1);
1385
1386 return ahash_enqueue_req(jrdev, ahash_done, req,
1387 digestsize, DMA_FROM_DEVICE);
1388 unmap:
1389 ahash_unmap(jrdev, edesc, req, digestsize);
1390 kfree(edesc);
1391 return -ENOMEM;
1392
1393 }
1394
1395 /* submit first update job descriptor after init */
ahash_update_first(struct ahash_request * req)1396 static int ahash_update_first(struct ahash_request *req)
1397 {
1398 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1399 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1400 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1401 struct device *jrdev = ctx->jrdev;
1402 u8 *buf = state->buf;
1403 int *buflen = &state->buflen;
1404 int *next_buflen = &state->next_buflen;
1405 int to_hash;
1406 int blocksize = crypto_ahash_blocksize(ahash);
1407 u32 *desc;
1408 int src_nents, mapped_nents;
1409 struct ahash_edesc *edesc;
1410 int ret = 0;
1411
1412 *next_buflen = req->nbytes & (blocksize - 1);
1413 to_hash = req->nbytes - *next_buflen;
1414
1415 /*
1416 * For XCBC and CMAC, if to_hash is multiple of block size,
1417 * keep last block in internal buffer
1418 */
1419 if ((is_xcbc_aes(ctx->adata.algtype) ||
1420 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1421 (*next_buflen == 0)) {
1422 *next_buflen = blocksize;
1423 to_hash -= blocksize;
1424 }
1425
1426 if (to_hash) {
1427 src_nents = sg_nents_for_len(req->src,
1428 req->nbytes - *next_buflen);
1429 if (src_nents < 0) {
1430 dev_err(jrdev, "Invalid number of src SG.\n");
1431 return src_nents;
1432 }
1433
1434 if (src_nents) {
1435 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1436 DMA_TO_DEVICE);
1437 if (!mapped_nents) {
1438 dev_err(jrdev, "unable to map source for DMA\n");
1439 return -ENOMEM;
1440 }
1441 } else {
1442 mapped_nents = 0;
1443 }
1444
1445 /*
1446 * allocate space for base edesc and hw desc commands,
1447 * link tables
1448 */
1449 edesc = ahash_edesc_alloc(req, mapped_nents > 1 ?
1450 mapped_nents : 0,
1451 ctx->sh_desc_update_first,
1452 ctx->sh_desc_update_first_dma);
1453 if (!edesc) {
1454 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1455 return -ENOMEM;
1456 }
1457
1458 edesc->src_nents = src_nents;
1459
1460 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1461 to_hash);
1462 if (ret)
1463 goto unmap_ctx;
1464
1465 desc = edesc->hw_desc;
1466
1467 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1468 if (ret)
1469 goto unmap_ctx;
1470
1471 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1472 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1473 desc_bytes(desc), 1);
1474
1475 ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1476 ctx->ctx_len, DMA_TO_DEVICE);
1477 if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1478 return ret;
1479 state->update = ahash_update_ctx;
1480 state->finup = ahash_finup_ctx;
1481 state->final = ahash_final_ctx;
1482 } else if (*next_buflen) {
1483 state->update = ahash_update_no_ctx;
1484 state->finup = ahash_finup_no_ctx;
1485 state->final = ahash_final_no_ctx;
1486 scatterwalk_map_and_copy(buf, req->src, 0,
1487 req->nbytes, 0);
1488 *buflen = *next_buflen;
1489
1490 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1491 DUMP_PREFIX_ADDRESS, 16, 4, buf,
1492 *buflen, 1);
1493 }
1494
1495 return ret;
1496 unmap_ctx:
1497 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1498 kfree(edesc);
1499 return ret;
1500 }
1501
ahash_finup_first(struct ahash_request * req)1502 static int ahash_finup_first(struct ahash_request *req)
1503 {
1504 return ahash_digest(req);
1505 }
1506
ahash_init(struct ahash_request * req)1507 static int ahash_init(struct ahash_request *req)
1508 {
1509 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1510
1511 state->update = ahash_update_first;
1512 state->finup = ahash_finup_first;
1513 state->final = ahash_final_no_ctx;
1514
1515 state->ctx_dma = 0;
1516 state->ctx_dma_len = 0;
1517 state->buf_dma = 0;
1518 state->buflen = 0;
1519 state->next_buflen = 0;
1520
1521 return 0;
1522 }
1523
ahash_update(struct ahash_request * req)1524 static int ahash_update(struct ahash_request *req)
1525 {
1526 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1527
1528 return state->update(req);
1529 }
1530
ahash_finup(struct ahash_request * req)1531 static int ahash_finup(struct ahash_request *req)
1532 {
1533 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1534
1535 return state->finup(req);
1536 }
1537
ahash_final(struct ahash_request * req)1538 static int ahash_final(struct ahash_request *req)
1539 {
1540 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1541
1542 return state->final(req);
1543 }
1544
ahash_export(struct ahash_request * req,void * out)1545 static int ahash_export(struct ahash_request *req, void *out)
1546 {
1547 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1548 struct caam_export_state *export = out;
1549 u8 *buf = state->buf;
1550 int len = state->buflen;
1551
1552 memcpy(export->buf, buf, len);
1553 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1554 export->buflen = len;
1555 export->update = state->update;
1556 export->final = state->final;
1557 export->finup = state->finup;
1558
1559 return 0;
1560 }
1561
ahash_import(struct ahash_request * req,const void * in)1562 static int ahash_import(struct ahash_request *req, const void *in)
1563 {
1564 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1565 const struct caam_export_state *export = in;
1566
1567 memset(state, 0, sizeof(*state));
1568 memcpy(state->buf, export->buf, export->buflen);
1569 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1570 state->buflen = export->buflen;
1571 state->update = export->update;
1572 state->final = export->final;
1573 state->finup = export->finup;
1574
1575 return 0;
1576 }
1577
1578 struct caam_hash_template {
1579 char name[CRYPTO_MAX_ALG_NAME];
1580 char driver_name[CRYPTO_MAX_ALG_NAME];
1581 char hmac_name[CRYPTO_MAX_ALG_NAME];
1582 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1583 unsigned int blocksize;
1584 struct ahash_alg template_ahash;
1585 u32 alg_type;
1586 };
1587
1588 /* ahash descriptors */
1589 static struct caam_hash_template driver_hash[] = {
1590 {
1591 .name = "sha1",
1592 .driver_name = "sha1-caam",
1593 .hmac_name = "hmac(sha1)",
1594 .hmac_driver_name = "hmac-sha1-caam",
1595 .blocksize = SHA1_BLOCK_SIZE,
1596 .template_ahash = {
1597 .init = ahash_init,
1598 .update = ahash_update,
1599 .final = ahash_final,
1600 .finup = ahash_finup,
1601 .digest = ahash_digest,
1602 .export = ahash_export,
1603 .import = ahash_import,
1604 .setkey = ahash_setkey,
1605 .halg = {
1606 .digestsize = SHA1_DIGEST_SIZE,
1607 .statesize = sizeof(struct caam_export_state),
1608 },
1609 },
1610 .alg_type = OP_ALG_ALGSEL_SHA1,
1611 }, {
1612 .name = "sha224",
1613 .driver_name = "sha224-caam",
1614 .hmac_name = "hmac(sha224)",
1615 .hmac_driver_name = "hmac-sha224-caam",
1616 .blocksize = SHA224_BLOCK_SIZE,
1617 .template_ahash = {
1618 .init = ahash_init,
1619 .update = ahash_update,
1620 .final = ahash_final,
1621 .finup = ahash_finup,
1622 .digest = ahash_digest,
1623 .export = ahash_export,
1624 .import = ahash_import,
1625 .setkey = ahash_setkey,
1626 .halg = {
1627 .digestsize = SHA224_DIGEST_SIZE,
1628 .statesize = sizeof(struct caam_export_state),
1629 },
1630 },
1631 .alg_type = OP_ALG_ALGSEL_SHA224,
1632 }, {
1633 .name = "sha256",
1634 .driver_name = "sha256-caam",
1635 .hmac_name = "hmac(sha256)",
1636 .hmac_driver_name = "hmac-sha256-caam",
1637 .blocksize = SHA256_BLOCK_SIZE,
1638 .template_ahash = {
1639 .init = ahash_init,
1640 .update = ahash_update,
1641 .final = ahash_final,
1642 .finup = ahash_finup,
1643 .digest = ahash_digest,
1644 .export = ahash_export,
1645 .import = ahash_import,
1646 .setkey = ahash_setkey,
1647 .halg = {
1648 .digestsize = SHA256_DIGEST_SIZE,
1649 .statesize = sizeof(struct caam_export_state),
1650 },
1651 },
1652 .alg_type = OP_ALG_ALGSEL_SHA256,
1653 }, {
1654 .name = "sha384",
1655 .driver_name = "sha384-caam",
1656 .hmac_name = "hmac(sha384)",
1657 .hmac_driver_name = "hmac-sha384-caam",
1658 .blocksize = SHA384_BLOCK_SIZE,
1659 .template_ahash = {
1660 .init = ahash_init,
1661 .update = ahash_update,
1662 .final = ahash_final,
1663 .finup = ahash_finup,
1664 .digest = ahash_digest,
1665 .export = ahash_export,
1666 .import = ahash_import,
1667 .setkey = ahash_setkey,
1668 .halg = {
1669 .digestsize = SHA384_DIGEST_SIZE,
1670 .statesize = sizeof(struct caam_export_state),
1671 },
1672 },
1673 .alg_type = OP_ALG_ALGSEL_SHA384,
1674 }, {
1675 .name = "sha512",
1676 .driver_name = "sha512-caam",
1677 .hmac_name = "hmac(sha512)",
1678 .hmac_driver_name = "hmac-sha512-caam",
1679 .blocksize = SHA512_BLOCK_SIZE,
1680 .template_ahash = {
1681 .init = ahash_init,
1682 .update = ahash_update,
1683 .final = ahash_final,
1684 .finup = ahash_finup,
1685 .digest = ahash_digest,
1686 .export = ahash_export,
1687 .import = ahash_import,
1688 .setkey = ahash_setkey,
1689 .halg = {
1690 .digestsize = SHA512_DIGEST_SIZE,
1691 .statesize = sizeof(struct caam_export_state),
1692 },
1693 },
1694 .alg_type = OP_ALG_ALGSEL_SHA512,
1695 }, {
1696 .name = "md5",
1697 .driver_name = "md5-caam",
1698 .hmac_name = "hmac(md5)",
1699 .hmac_driver_name = "hmac-md5-caam",
1700 .blocksize = MD5_BLOCK_WORDS * 4,
1701 .template_ahash = {
1702 .init = ahash_init,
1703 .update = ahash_update,
1704 .final = ahash_final,
1705 .finup = ahash_finup,
1706 .digest = ahash_digest,
1707 .export = ahash_export,
1708 .import = ahash_import,
1709 .setkey = ahash_setkey,
1710 .halg = {
1711 .digestsize = MD5_DIGEST_SIZE,
1712 .statesize = sizeof(struct caam_export_state),
1713 },
1714 },
1715 .alg_type = OP_ALG_ALGSEL_MD5,
1716 }, {
1717 .hmac_name = "xcbc(aes)",
1718 .hmac_driver_name = "xcbc-aes-caam",
1719 .blocksize = AES_BLOCK_SIZE,
1720 .template_ahash = {
1721 .init = ahash_init,
1722 .update = ahash_update,
1723 .final = ahash_final,
1724 .finup = ahash_finup,
1725 .digest = ahash_digest,
1726 .export = ahash_export,
1727 .import = ahash_import,
1728 .setkey = axcbc_setkey,
1729 .halg = {
1730 .digestsize = AES_BLOCK_SIZE,
1731 .statesize = sizeof(struct caam_export_state),
1732 },
1733 },
1734 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
1735 }, {
1736 .hmac_name = "cmac(aes)",
1737 .hmac_driver_name = "cmac-aes-caam",
1738 .blocksize = AES_BLOCK_SIZE,
1739 .template_ahash = {
1740 .init = ahash_init,
1741 .update = ahash_update,
1742 .final = ahash_final,
1743 .finup = ahash_finup,
1744 .digest = ahash_digest,
1745 .export = ahash_export,
1746 .import = ahash_import,
1747 .setkey = acmac_setkey,
1748 .halg = {
1749 .digestsize = AES_BLOCK_SIZE,
1750 .statesize = sizeof(struct caam_export_state),
1751 },
1752 },
1753 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
1754 },
1755 };
1756
1757 struct caam_hash_alg {
1758 struct list_head entry;
1759 int alg_type;
1760 struct ahash_alg ahash_alg;
1761 };
1762
caam_hash_cra_init(struct crypto_tfm * tfm)1763 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1764 {
1765 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1766 struct crypto_alg *base = tfm->__crt_alg;
1767 struct hash_alg_common *halg =
1768 container_of(base, struct hash_alg_common, base);
1769 struct ahash_alg *alg =
1770 container_of(halg, struct ahash_alg, halg);
1771 struct caam_hash_alg *caam_hash =
1772 container_of(alg, struct caam_hash_alg, ahash_alg);
1773 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1774 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1775 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1776 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1777 HASH_MSG_LEN + 32,
1778 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1779 HASH_MSG_LEN + 64,
1780 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1781 const size_t sh_desc_update_offset = offsetof(struct caam_hash_ctx,
1782 sh_desc_update);
1783 dma_addr_t dma_addr;
1784 struct caam_drv_private *priv;
1785
1786 /*
1787 * Get a Job ring from Job Ring driver to ensure in-order
1788 * crypto request processing per tfm
1789 */
1790 ctx->jrdev = caam_jr_alloc();
1791 if (IS_ERR(ctx->jrdev)) {
1792 pr_err("Job Ring Device allocation for transform failed\n");
1793 return PTR_ERR(ctx->jrdev);
1794 }
1795
1796 priv = dev_get_drvdata(ctx->jrdev->parent);
1797
1798 if (is_xcbc_aes(caam_hash->alg_type)) {
1799 ctx->dir = DMA_TO_DEVICE;
1800 ctx->key_dir = DMA_BIDIRECTIONAL;
1801 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1802 ctx->ctx_len = 48;
1803 } else if (is_cmac_aes(caam_hash->alg_type)) {
1804 ctx->dir = DMA_TO_DEVICE;
1805 ctx->key_dir = DMA_NONE;
1806 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1807 ctx->ctx_len = 32;
1808 } else {
1809 if (priv->era >= 6) {
1810 ctx->dir = DMA_BIDIRECTIONAL;
1811 ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE;
1812 } else {
1813 ctx->dir = DMA_TO_DEVICE;
1814 ctx->key_dir = DMA_NONE;
1815 }
1816 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1817 ctx->ctx_len = runninglen[(ctx->adata.algtype &
1818 OP_ALG_ALGSEL_SUBMASK) >>
1819 OP_ALG_ALGSEL_SHIFT];
1820 }
1821
1822 if (ctx->key_dir != DMA_NONE) {
1823 ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
1824 ARRAY_SIZE(ctx->key),
1825 ctx->key_dir,
1826 DMA_ATTR_SKIP_CPU_SYNC);
1827 if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) {
1828 dev_err(ctx->jrdev, "unable to map key\n");
1829 caam_jr_free(ctx->jrdev);
1830 return -ENOMEM;
1831 }
1832 }
1833
1834 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1835 offsetof(struct caam_hash_ctx, key) -
1836 sh_desc_update_offset,
1837 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1838 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1839 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1840
1841 if (ctx->key_dir != DMA_NONE)
1842 dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1843 ARRAY_SIZE(ctx->key),
1844 ctx->key_dir,
1845 DMA_ATTR_SKIP_CPU_SYNC);
1846
1847 caam_jr_free(ctx->jrdev);
1848 return -ENOMEM;
1849 }
1850
1851 ctx->sh_desc_update_dma = dma_addr;
1852 ctx->sh_desc_update_first_dma = dma_addr +
1853 offsetof(struct caam_hash_ctx,
1854 sh_desc_update_first) -
1855 sh_desc_update_offset;
1856 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1857 sh_desc_fin) -
1858 sh_desc_update_offset;
1859 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1860 sh_desc_digest) -
1861 sh_desc_update_offset;
1862
1863 ctx->enginectx.op.do_one_request = ahash_do_one_req;
1864
1865 crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state));
1866
1867 /*
1868 * For keyed hash algorithms shared descriptors
1869 * will be created later in setkey() callback
1870 */
1871 return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
1872 }
1873
caam_hash_cra_exit(struct crypto_tfm * tfm)1874 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1875 {
1876 struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
1877
1878 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1879 offsetof(struct caam_hash_ctx, key) -
1880 offsetof(struct caam_hash_ctx, sh_desc_update),
1881 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1882 if (ctx->key_dir != DMA_NONE)
1883 dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1884 ARRAY_SIZE(ctx->key), ctx->key_dir,
1885 DMA_ATTR_SKIP_CPU_SYNC);
1886 caam_jr_free(ctx->jrdev);
1887 }
1888
caam_algapi_hash_exit(void)1889 void caam_algapi_hash_exit(void)
1890 {
1891 struct caam_hash_alg *t_alg, *n;
1892
1893 if (!hash_list.next)
1894 return;
1895
1896 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1897 crypto_unregister_ahash(&t_alg->ahash_alg);
1898 list_del(&t_alg->entry);
1899 kfree(t_alg);
1900 }
1901 }
1902
1903 static struct caam_hash_alg *
caam_hash_alloc(struct caam_hash_template * template,bool keyed)1904 caam_hash_alloc(struct caam_hash_template *template,
1905 bool keyed)
1906 {
1907 struct caam_hash_alg *t_alg;
1908 struct ahash_alg *halg;
1909 struct crypto_alg *alg;
1910
1911 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1912 if (!t_alg) {
1913 pr_err("failed to allocate t_alg\n");
1914 return ERR_PTR(-ENOMEM);
1915 }
1916
1917 t_alg->ahash_alg = template->template_ahash;
1918 halg = &t_alg->ahash_alg;
1919 alg = &halg->halg.base;
1920
1921 if (keyed) {
1922 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1923 template->hmac_name);
1924 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1925 template->hmac_driver_name);
1926 } else {
1927 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1928 template->name);
1929 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1930 template->driver_name);
1931 t_alg->ahash_alg.setkey = NULL;
1932 }
1933 alg->cra_module = THIS_MODULE;
1934 alg->cra_init = caam_hash_cra_init;
1935 alg->cra_exit = caam_hash_cra_exit;
1936 alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding();
1937 alg->cra_priority = CAAM_CRA_PRIORITY;
1938 alg->cra_blocksize = template->blocksize;
1939 alg->cra_alignmask = 0;
1940 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
1941
1942 t_alg->alg_type = template->alg_type;
1943
1944 return t_alg;
1945 }
1946
caam_algapi_hash_init(struct device * ctrldev)1947 int caam_algapi_hash_init(struct device *ctrldev)
1948 {
1949 int i = 0, err = 0;
1950 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1951 unsigned int md_limit = SHA512_DIGEST_SIZE;
1952 u32 md_inst, md_vid;
1953
1954 /*
1955 * Register crypto algorithms the device supports. First, identify
1956 * presence and attributes of MD block.
1957 */
1958 if (priv->era < 10) {
1959 md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
1960 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1961 md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1962 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1963 } else {
1964 u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
1965
1966 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
1967 md_inst = mdha & CHA_VER_NUM_MASK;
1968 }
1969
1970 /*
1971 * Skip registration of any hashing algorithms if MD block
1972 * is not present.
1973 */
1974 if (!md_inst)
1975 return 0;
1976
1977 /* Limit digest size based on LP256 */
1978 if (md_vid == CHA_VER_VID_MD_LP256)
1979 md_limit = SHA256_DIGEST_SIZE;
1980
1981 INIT_LIST_HEAD(&hash_list);
1982
1983 /* register crypto algorithms the device supports */
1984 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1985 struct caam_hash_alg *t_alg;
1986 struct caam_hash_template *alg = driver_hash + i;
1987
1988 /* If MD size is not supported by device, skip registration */
1989 if (is_mdha(alg->alg_type) &&
1990 alg->template_ahash.halg.digestsize > md_limit)
1991 continue;
1992
1993 /* register hmac version */
1994 t_alg = caam_hash_alloc(alg, true);
1995 if (IS_ERR(t_alg)) {
1996 err = PTR_ERR(t_alg);
1997 pr_warn("%s alg allocation failed\n",
1998 alg->hmac_driver_name);
1999 continue;
2000 }
2001
2002 err = crypto_register_ahash(&t_alg->ahash_alg);
2003 if (err) {
2004 pr_warn("%s alg registration failed: %d\n",
2005 t_alg->ahash_alg.halg.base.cra_driver_name,
2006 err);
2007 kfree(t_alg);
2008 } else
2009 list_add_tail(&t_alg->entry, &hash_list);
2010
2011 if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
2012 continue;
2013
2014 /* register unkeyed version */
2015 t_alg = caam_hash_alloc(alg, false);
2016 if (IS_ERR(t_alg)) {
2017 err = PTR_ERR(t_alg);
2018 pr_warn("%s alg allocation failed\n", alg->driver_name);
2019 continue;
2020 }
2021
2022 err = crypto_register_ahash(&t_alg->ahash_alg);
2023 if (err) {
2024 pr_warn("%s alg registration failed: %d\n",
2025 t_alg->ahash_alg.halg.base.cra_driver_name,
2026 err);
2027 kfree(t_alg);
2028 } else
2029 list_add_tail(&t_alg->entry, &hash_list);
2030 }
2031
2032 return err;
2033 }
2034