1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/module.h>
4 #include <linux/slab.h>
5 #include <linux/crypto.h>
6 #include <crypto/internal/aead.h>
7 #include <crypto/internal/cipher.h>
8 #include <crypto/internal/skcipher.h>
9 #include <crypto/aes.h>
10 #include <crypto/sha1.h>
11 #include <crypto/sha2.h>
12 #include <crypto/hash.h>
13 #include <crypto/hmac.h>
14 #include <crypto/algapi.h>
15 #include <crypto/authenc.h>
16 #include <crypto/scatterwalk.h>
17 #include <crypto/xts.h>
18 #include <linux/dma-mapping.h>
19 #include "adf_accel_devices.h"
20 #include "adf_transport.h"
21 #include "adf_common_drv.h"
22 #include "qat_crypto.h"
23 #include "icp_qat_hw.h"
24 #include "icp_qat_fw.h"
25 #include "icp_qat_fw_la.h"
26
27 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
28 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
29 ICP_QAT_HW_CIPHER_NO_CONVERT, \
30 ICP_QAT_HW_CIPHER_ENCRYPT)
31
32 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
33 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
34 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
35 ICP_QAT_HW_CIPHER_DECRYPT)
36
37 #define QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode) \
38 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
39 ICP_QAT_HW_CIPHER_NO_CONVERT, \
40 ICP_QAT_HW_CIPHER_DECRYPT)
41
42 #define HW_CAP_AES_V2(accel_dev) \
43 (GET_HW_DATA(accel_dev)->accel_capabilities_mask & \
44 ICP_ACCEL_CAPABILITIES_AES_V2)
45
46 static DEFINE_MUTEX(algs_lock);
47 static unsigned int active_devs;
48
49 struct qat_alg_buf {
50 u32 len;
51 u32 resrvd;
52 u64 addr;
53 } __packed;
54
55 struct qat_alg_buf_list {
56 u64 resrvd;
57 u32 num_bufs;
58 u32 num_mapped_bufs;
59 struct qat_alg_buf bufers[];
60 } __packed __aligned(64);
61
62 /* Common content descriptor */
63 struct qat_alg_cd {
64 union {
65 struct qat_enc { /* Encrypt content desc */
66 struct icp_qat_hw_cipher_algo_blk cipher;
67 struct icp_qat_hw_auth_algo_blk hash;
68 } qat_enc_cd;
69 struct qat_dec { /* Decrypt content desc */
70 struct icp_qat_hw_auth_algo_blk hash;
71 struct icp_qat_hw_cipher_algo_blk cipher;
72 } qat_dec_cd;
73 };
74 } __aligned(64);
75
76 struct qat_alg_aead_ctx {
77 struct qat_alg_cd *enc_cd;
78 struct qat_alg_cd *dec_cd;
79 dma_addr_t enc_cd_paddr;
80 dma_addr_t dec_cd_paddr;
81 struct icp_qat_fw_la_bulk_req enc_fw_req;
82 struct icp_qat_fw_la_bulk_req dec_fw_req;
83 struct crypto_shash *hash_tfm;
84 enum icp_qat_hw_auth_algo qat_hash_alg;
85 struct qat_crypto_instance *inst;
86 union {
87 struct sha1_state sha1;
88 struct sha256_state sha256;
89 struct sha512_state sha512;
90 };
91 char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
92 char opad[SHA512_BLOCK_SIZE];
93 };
94
95 struct qat_alg_skcipher_ctx {
96 struct icp_qat_hw_cipher_algo_blk *enc_cd;
97 struct icp_qat_hw_cipher_algo_blk *dec_cd;
98 dma_addr_t enc_cd_paddr;
99 dma_addr_t dec_cd_paddr;
100 struct icp_qat_fw_la_bulk_req enc_fw_req;
101 struct icp_qat_fw_la_bulk_req dec_fw_req;
102 struct qat_crypto_instance *inst;
103 struct crypto_skcipher *ftfm;
104 struct crypto_cipher *tweak;
105 bool fallback;
106 int mode;
107 };
108
qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)109 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
110 {
111 switch (qat_hash_alg) {
112 case ICP_QAT_HW_AUTH_ALGO_SHA1:
113 return ICP_QAT_HW_SHA1_STATE1_SZ;
114 case ICP_QAT_HW_AUTH_ALGO_SHA256:
115 return ICP_QAT_HW_SHA256_STATE1_SZ;
116 case ICP_QAT_HW_AUTH_ALGO_SHA512:
117 return ICP_QAT_HW_SHA512_STATE1_SZ;
118 default:
119 return -EFAULT;
120 }
121 return -EFAULT;
122 }
123
qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk * hash,struct qat_alg_aead_ctx * ctx,const u8 * auth_key,unsigned int auth_keylen)124 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
125 struct qat_alg_aead_ctx *ctx,
126 const u8 *auth_key,
127 unsigned int auth_keylen)
128 {
129 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
130 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
131 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
132 __be32 *hash_state_out;
133 __be64 *hash512_state_out;
134 int i, offset;
135
136 memset(ctx->ipad, 0, block_size);
137 memset(ctx->opad, 0, block_size);
138 shash->tfm = ctx->hash_tfm;
139
140 if (auth_keylen > block_size) {
141 int ret = crypto_shash_digest(shash, auth_key,
142 auth_keylen, ctx->ipad);
143 if (ret)
144 return ret;
145
146 memcpy(ctx->opad, ctx->ipad, digest_size);
147 } else {
148 memcpy(ctx->ipad, auth_key, auth_keylen);
149 memcpy(ctx->opad, auth_key, auth_keylen);
150 }
151
152 for (i = 0; i < block_size; i++) {
153 char *ipad_ptr = ctx->ipad + i;
154 char *opad_ptr = ctx->opad + i;
155 *ipad_ptr ^= HMAC_IPAD_VALUE;
156 *opad_ptr ^= HMAC_OPAD_VALUE;
157 }
158
159 if (crypto_shash_init(shash))
160 return -EFAULT;
161
162 if (crypto_shash_update(shash, ctx->ipad, block_size))
163 return -EFAULT;
164
165 hash_state_out = (__be32 *)hash->sha.state1;
166 hash512_state_out = (__be64 *)hash_state_out;
167
168 switch (ctx->qat_hash_alg) {
169 case ICP_QAT_HW_AUTH_ALGO_SHA1:
170 if (crypto_shash_export(shash, &ctx->sha1))
171 return -EFAULT;
172 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
173 *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
174 break;
175 case ICP_QAT_HW_AUTH_ALGO_SHA256:
176 if (crypto_shash_export(shash, &ctx->sha256))
177 return -EFAULT;
178 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
179 *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
180 break;
181 case ICP_QAT_HW_AUTH_ALGO_SHA512:
182 if (crypto_shash_export(shash, &ctx->sha512))
183 return -EFAULT;
184 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
185 *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
186 break;
187 default:
188 return -EFAULT;
189 }
190
191 if (crypto_shash_init(shash))
192 return -EFAULT;
193
194 if (crypto_shash_update(shash, ctx->opad, block_size))
195 return -EFAULT;
196
197 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
198 if (offset < 0)
199 return -EFAULT;
200
201 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
202 hash512_state_out = (__be64 *)hash_state_out;
203
204 switch (ctx->qat_hash_alg) {
205 case ICP_QAT_HW_AUTH_ALGO_SHA1:
206 if (crypto_shash_export(shash, &ctx->sha1))
207 return -EFAULT;
208 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
209 *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
210 break;
211 case ICP_QAT_HW_AUTH_ALGO_SHA256:
212 if (crypto_shash_export(shash, &ctx->sha256))
213 return -EFAULT;
214 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
215 *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
216 break;
217 case ICP_QAT_HW_AUTH_ALGO_SHA512:
218 if (crypto_shash_export(shash, &ctx->sha512))
219 return -EFAULT;
220 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
221 *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
222 break;
223 default:
224 return -EFAULT;
225 }
226 memzero_explicit(ctx->ipad, block_size);
227 memzero_explicit(ctx->opad, block_size);
228 return 0;
229 }
230
qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr * header)231 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
232 {
233 header->hdr_flags =
234 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
235 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
236 header->comn_req_flags =
237 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
238 QAT_COMN_PTR_TYPE_SGL);
239 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
240 ICP_QAT_FW_LA_PARTIAL_NONE);
241 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
242 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
243 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
244 ICP_QAT_FW_LA_NO_PROTO);
245 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
246 ICP_QAT_FW_LA_NO_UPDATE_STATE);
247 }
248
qat_alg_aead_init_enc_session(struct crypto_aead * aead_tfm,int alg,struct crypto_authenc_keys * keys,int mode)249 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
250 int alg,
251 struct crypto_authenc_keys *keys,
252 int mode)
253 {
254 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
255 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
256 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
257 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
258 struct icp_qat_hw_auth_algo_blk *hash =
259 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
260 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
261 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
262 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
263 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
264 void *ptr = &req_tmpl->cd_ctrl;
265 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
266 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
267
268 /* CD setup */
269 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
270 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
271 hash->sha.inner_setup.auth_config.config =
272 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
273 ctx->qat_hash_alg, digestsize);
274 hash->sha.inner_setup.auth_counter.counter =
275 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
276
277 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
278 return -EFAULT;
279
280 /* Request setup */
281 qat_alg_init_common_hdr(header);
282 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
283 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
284 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
285 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
286 ICP_QAT_FW_LA_RET_AUTH_RES);
287 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
288 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
289 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
290 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
291
292 /* Cipher CD config setup */
293 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
294 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
295 cipher_cd_ctrl->cipher_cfg_offset = 0;
296 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
297 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
298 /* Auth CD config setup */
299 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
300 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
301 hash_cd_ctrl->inner_res_sz = digestsize;
302 hash_cd_ctrl->final_sz = digestsize;
303
304 switch (ctx->qat_hash_alg) {
305 case ICP_QAT_HW_AUTH_ALGO_SHA1:
306 hash_cd_ctrl->inner_state1_sz =
307 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
308 hash_cd_ctrl->inner_state2_sz =
309 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
310 break;
311 case ICP_QAT_HW_AUTH_ALGO_SHA256:
312 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
313 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
314 break;
315 case ICP_QAT_HW_AUTH_ALGO_SHA512:
316 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
317 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
318 break;
319 default:
320 break;
321 }
322 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
323 ((sizeof(struct icp_qat_hw_auth_setup) +
324 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
325 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
326 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
327 return 0;
328 }
329
qat_alg_aead_init_dec_session(struct crypto_aead * aead_tfm,int alg,struct crypto_authenc_keys * keys,int mode)330 static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
331 int alg,
332 struct crypto_authenc_keys *keys,
333 int mode)
334 {
335 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
336 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
337 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
338 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
339 struct icp_qat_hw_cipher_algo_blk *cipher =
340 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
341 sizeof(struct icp_qat_hw_auth_setup) +
342 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
343 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
344 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
345 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
346 void *ptr = &req_tmpl->cd_ctrl;
347 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
348 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
349 struct icp_qat_fw_la_auth_req_params *auth_param =
350 (struct icp_qat_fw_la_auth_req_params *)
351 ((char *)&req_tmpl->serv_specif_rqpars +
352 sizeof(struct icp_qat_fw_la_cipher_req_params));
353
354 /* CD setup */
355 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
356 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
357 hash->sha.inner_setup.auth_config.config =
358 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
359 ctx->qat_hash_alg,
360 digestsize);
361 hash->sha.inner_setup.auth_counter.counter =
362 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
363
364 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
365 return -EFAULT;
366
367 /* Request setup */
368 qat_alg_init_common_hdr(header);
369 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
370 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
371 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
372 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
373 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
374 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
375 ICP_QAT_FW_LA_CMP_AUTH_RES);
376 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
377 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
378
379 /* Cipher CD config setup */
380 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
381 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
382 cipher_cd_ctrl->cipher_cfg_offset =
383 (sizeof(struct icp_qat_hw_auth_setup) +
384 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
385 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
386 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
387
388 /* Auth CD config setup */
389 hash_cd_ctrl->hash_cfg_offset = 0;
390 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
391 hash_cd_ctrl->inner_res_sz = digestsize;
392 hash_cd_ctrl->final_sz = digestsize;
393
394 switch (ctx->qat_hash_alg) {
395 case ICP_QAT_HW_AUTH_ALGO_SHA1:
396 hash_cd_ctrl->inner_state1_sz =
397 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
398 hash_cd_ctrl->inner_state2_sz =
399 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
400 break;
401 case ICP_QAT_HW_AUTH_ALGO_SHA256:
402 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
403 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
404 break;
405 case ICP_QAT_HW_AUTH_ALGO_SHA512:
406 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
407 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
408 break;
409 default:
410 break;
411 }
412
413 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
414 ((sizeof(struct icp_qat_hw_auth_setup) +
415 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
416 auth_param->auth_res_sz = digestsize;
417 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
418 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
419 return 0;
420 }
421
qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx * ctx,struct icp_qat_fw_la_bulk_req * req,struct icp_qat_hw_cipher_algo_blk * cd,const u8 * key,unsigned int keylen)422 static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
423 struct icp_qat_fw_la_bulk_req *req,
424 struct icp_qat_hw_cipher_algo_blk *cd,
425 const u8 *key, unsigned int keylen)
426 {
427 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
428 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
429 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
430 bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
431 int mode = ctx->mode;
432
433 qat_alg_init_common_hdr(header);
434 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
435 cd_pars->u.s.content_desc_params_sz =
436 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
437
438 if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
439 ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
440 ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
441
442 /* Store both XTS keys in CD, only the first key is sent
443 * to the HW, the second key is used for tweak calculation
444 */
445 memcpy(cd->ucs_aes.key, key, keylen);
446 keylen = keylen / 2;
447 } else if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
448 ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
449 ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
450 keylen = round_up(keylen, 16);
451 memcpy(cd->ucs_aes.key, key, keylen);
452 } else {
453 memcpy(cd->aes.key, key, keylen);
454 }
455
456 /* Cipher CD config setup */
457 cd_ctrl->cipher_key_sz = keylen >> 3;
458 cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
459 cd_ctrl->cipher_cfg_offset = 0;
460 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
461 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
462 }
463
qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx * ctx,int alg,const u8 * key,unsigned int keylen,int mode)464 static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
465 int alg, const u8 *key,
466 unsigned int keylen, int mode)
467 {
468 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
469 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
470 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
471
472 qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
473 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
474 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
475 }
476
qat_alg_xts_reverse_key(const u8 * key_forward,unsigned int keylen,u8 * key_reverse)477 static void qat_alg_xts_reverse_key(const u8 *key_forward, unsigned int keylen,
478 u8 *key_reverse)
479 {
480 struct crypto_aes_ctx aes_expanded;
481 int nrounds;
482 u8 *key;
483
484 aes_expandkey(&aes_expanded, key_forward, keylen);
485 if (keylen == AES_KEYSIZE_128) {
486 nrounds = 10;
487 key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
488 memcpy(key_reverse, key, AES_BLOCK_SIZE);
489 } else {
490 /* AES_KEYSIZE_256 */
491 nrounds = 14;
492 key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
493 memcpy(key_reverse, key, AES_BLOCK_SIZE);
494 memcpy(key_reverse + AES_BLOCK_SIZE, key - AES_BLOCK_SIZE,
495 AES_BLOCK_SIZE);
496 }
497 }
498
qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx * ctx,int alg,const u8 * key,unsigned int keylen,int mode)499 static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
500 int alg, const u8 *key,
501 unsigned int keylen, int mode)
502 {
503 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
504 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
505 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
506 bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
507
508 qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
509 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
510
511 if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
512 /* Key reversing not supported, set no convert */
513 dec_cd->aes.cipher_config.val =
514 QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode);
515
516 /* In-place key reversal */
517 qat_alg_xts_reverse_key(dec_cd->ucs_aes.key, keylen / 2,
518 dec_cd->ucs_aes.key);
519 } else if (mode != ICP_QAT_HW_CIPHER_CTR_MODE) {
520 dec_cd->aes.cipher_config.val =
521 QAT_AES_HW_CONFIG_DEC(alg, mode);
522 } else {
523 dec_cd->aes.cipher_config.val =
524 QAT_AES_HW_CONFIG_ENC(alg, mode);
525 }
526 }
527
qat_alg_validate_key(int key_len,int * alg,int mode)528 static int qat_alg_validate_key(int key_len, int *alg, int mode)
529 {
530 if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
531 switch (key_len) {
532 case AES_KEYSIZE_128:
533 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
534 break;
535 case AES_KEYSIZE_192:
536 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
537 break;
538 case AES_KEYSIZE_256:
539 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
540 break;
541 default:
542 return -EINVAL;
543 }
544 } else {
545 switch (key_len) {
546 case AES_KEYSIZE_128 << 1:
547 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
548 break;
549 case AES_KEYSIZE_256 << 1:
550 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
551 break;
552 default:
553 return -EINVAL;
554 }
555 }
556 return 0;
557 }
558
qat_alg_aead_init_sessions(struct crypto_aead * tfm,const u8 * key,unsigned int keylen,int mode)559 static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
560 unsigned int keylen, int mode)
561 {
562 struct crypto_authenc_keys keys;
563 int alg;
564
565 if (crypto_authenc_extractkeys(&keys, key, keylen))
566 goto bad_key;
567
568 if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
569 goto bad_key;
570
571 if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
572 goto error;
573
574 if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
575 goto error;
576
577 memzero_explicit(&keys, sizeof(keys));
578 return 0;
579 bad_key:
580 memzero_explicit(&keys, sizeof(keys));
581 return -EINVAL;
582 error:
583 memzero_explicit(&keys, sizeof(keys));
584 return -EFAULT;
585 }
586
qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx * ctx,const u8 * key,unsigned int keylen,int mode)587 static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
588 const u8 *key,
589 unsigned int keylen,
590 int mode)
591 {
592 int alg;
593
594 if (qat_alg_validate_key(keylen, &alg, mode))
595 return -EINVAL;
596
597 qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
598 qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
599 return 0;
600 }
601
qat_alg_aead_rekey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)602 static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key,
603 unsigned int keylen)
604 {
605 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
606
607 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
608 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
609 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
610 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
611
612 return qat_alg_aead_init_sessions(tfm, key, keylen,
613 ICP_QAT_HW_CIPHER_CBC_MODE);
614 }
615
qat_alg_aead_newkey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)616 static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
617 unsigned int keylen)
618 {
619 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
620 struct qat_crypto_instance *inst = NULL;
621 int node = get_current_node();
622 struct device *dev;
623 int ret;
624
625 inst = qat_crypto_get_instance_node(node);
626 if (!inst)
627 return -EINVAL;
628 dev = &GET_DEV(inst->accel_dev);
629 ctx->inst = inst;
630 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
631 &ctx->enc_cd_paddr,
632 GFP_ATOMIC);
633 if (!ctx->enc_cd) {
634 ret = -ENOMEM;
635 goto out_free_inst;
636 }
637 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
638 &ctx->dec_cd_paddr,
639 GFP_ATOMIC);
640 if (!ctx->dec_cd) {
641 ret = -ENOMEM;
642 goto out_free_enc;
643 }
644
645 ret = qat_alg_aead_init_sessions(tfm, key, keylen,
646 ICP_QAT_HW_CIPHER_CBC_MODE);
647 if (ret)
648 goto out_free_all;
649
650 return 0;
651
652 out_free_all:
653 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
654 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
655 ctx->dec_cd, ctx->dec_cd_paddr);
656 ctx->dec_cd = NULL;
657 out_free_enc:
658 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
659 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
660 ctx->enc_cd, ctx->enc_cd_paddr);
661 ctx->enc_cd = NULL;
662 out_free_inst:
663 ctx->inst = NULL;
664 qat_crypto_put_instance(inst);
665 return ret;
666 }
667
qat_alg_aead_setkey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)668 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
669 unsigned int keylen)
670 {
671 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
672
673 if (ctx->enc_cd)
674 return qat_alg_aead_rekey(tfm, key, keylen);
675 else
676 return qat_alg_aead_newkey(tfm, key, keylen);
677 }
678
qat_alg_free_bufl(struct qat_crypto_instance * inst,struct qat_crypto_request * qat_req)679 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
680 struct qat_crypto_request *qat_req)
681 {
682 struct device *dev = &GET_DEV(inst->accel_dev);
683 struct qat_alg_buf_list *bl = qat_req->buf.bl;
684 struct qat_alg_buf_list *blout = qat_req->buf.blout;
685 dma_addr_t blp = qat_req->buf.blp;
686 dma_addr_t blpout = qat_req->buf.bloutp;
687 size_t sz = qat_req->buf.sz;
688 size_t sz_out = qat_req->buf.sz_out;
689 int i;
690
691 for (i = 0; i < bl->num_bufs; i++)
692 dma_unmap_single(dev, bl->bufers[i].addr,
693 bl->bufers[i].len, DMA_BIDIRECTIONAL);
694
695 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
696 kfree(bl);
697 if (blp != blpout) {
698 /* If out of place operation dma unmap only data */
699 int bufless = blout->num_bufs - blout->num_mapped_bufs;
700
701 for (i = bufless; i < blout->num_bufs; i++) {
702 dma_unmap_single(dev, blout->bufers[i].addr,
703 blout->bufers[i].len,
704 DMA_BIDIRECTIONAL);
705 }
706 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
707 kfree(blout);
708 }
709 }
710
qat_alg_sgl_to_bufl(struct qat_crypto_instance * inst,struct scatterlist * sgl,struct scatterlist * sglout,struct qat_crypto_request * qat_req)711 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
712 struct scatterlist *sgl,
713 struct scatterlist *sglout,
714 struct qat_crypto_request *qat_req)
715 {
716 struct device *dev = &GET_DEV(inst->accel_dev);
717 int i, sg_nctr = 0;
718 int n = sg_nents(sgl);
719 struct qat_alg_buf_list *bufl;
720 struct qat_alg_buf_list *buflout = NULL;
721 dma_addr_t blp = DMA_MAPPING_ERROR;
722 dma_addr_t bloutp = DMA_MAPPING_ERROR;
723 struct scatterlist *sg;
724 size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
725
726 if (unlikely(!n))
727 return -EINVAL;
728
729 bufl = kzalloc_node(sz, GFP_ATOMIC,
730 dev_to_node(&GET_DEV(inst->accel_dev)));
731 if (unlikely(!bufl))
732 return -ENOMEM;
733
734 for_each_sg(sgl, sg, n, i)
735 bufl->bufers[i].addr = DMA_MAPPING_ERROR;
736
737 for_each_sg(sgl, sg, n, i) {
738 int y = sg_nctr;
739
740 if (!sg->length)
741 continue;
742
743 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
744 sg->length,
745 DMA_BIDIRECTIONAL);
746 bufl->bufers[y].len = sg->length;
747 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
748 goto err_in;
749 sg_nctr++;
750 }
751 bufl->num_bufs = sg_nctr;
752 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
753 if (unlikely(dma_mapping_error(dev, blp)))
754 goto err_in;
755 qat_req->buf.bl = bufl;
756 qat_req->buf.blp = blp;
757 qat_req->buf.sz = sz;
758 /* Handle out of place operation */
759 if (sgl != sglout) {
760 struct qat_alg_buf *bufers;
761
762 n = sg_nents(sglout);
763 sz_out = struct_size(buflout, bufers, n + 1);
764 sg_nctr = 0;
765 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
766 dev_to_node(&GET_DEV(inst->accel_dev)));
767 if (unlikely(!buflout))
768 goto err_in;
769
770 bufers = buflout->bufers;
771 for_each_sg(sglout, sg, n, i)
772 bufers[i].addr = DMA_MAPPING_ERROR;
773
774 for_each_sg(sglout, sg, n, i) {
775 int y = sg_nctr;
776
777 if (!sg->length)
778 continue;
779
780 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
781 sg->length,
782 DMA_BIDIRECTIONAL);
783 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
784 goto err_out;
785 bufers[y].len = sg->length;
786 sg_nctr++;
787 }
788 buflout->num_bufs = sg_nctr;
789 buflout->num_mapped_bufs = sg_nctr;
790 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
791 if (unlikely(dma_mapping_error(dev, bloutp)))
792 goto err_out;
793 qat_req->buf.blout = buflout;
794 qat_req->buf.bloutp = bloutp;
795 qat_req->buf.sz_out = sz_out;
796 } else {
797 /* Otherwise set the src and dst to the same address */
798 qat_req->buf.bloutp = qat_req->buf.blp;
799 qat_req->buf.sz_out = 0;
800 }
801 return 0;
802
803 err_out:
804 if (!dma_mapping_error(dev, bloutp))
805 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
806
807 n = sg_nents(sglout);
808 for (i = 0; i < n; i++)
809 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
810 dma_unmap_single(dev, buflout->bufers[i].addr,
811 buflout->bufers[i].len,
812 DMA_BIDIRECTIONAL);
813 kfree(buflout);
814
815 err_in:
816 if (!dma_mapping_error(dev, blp))
817 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
818
819 n = sg_nents(sgl);
820 for (i = 0; i < n; i++)
821 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
822 dma_unmap_single(dev, bufl->bufers[i].addr,
823 bufl->bufers[i].len,
824 DMA_BIDIRECTIONAL);
825
826 kfree(bufl);
827
828 dev_err(dev, "Failed to map buf for dma\n");
829 return -ENOMEM;
830 }
831
qat_aead_alg_callback(struct icp_qat_fw_la_resp * qat_resp,struct qat_crypto_request * qat_req)832 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
833 struct qat_crypto_request *qat_req)
834 {
835 struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
836 struct qat_crypto_instance *inst = ctx->inst;
837 struct aead_request *areq = qat_req->aead_req;
838 u8 stat_filed = qat_resp->comn_resp.comn_status;
839 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
840
841 qat_alg_free_bufl(inst, qat_req);
842 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
843 res = -EBADMSG;
844 areq->base.complete(&areq->base, res);
845 }
846
qat_alg_update_iv_ctr_mode(struct qat_crypto_request * qat_req)847 static void qat_alg_update_iv_ctr_mode(struct qat_crypto_request *qat_req)
848 {
849 struct skcipher_request *sreq = qat_req->skcipher_req;
850 u64 iv_lo_prev;
851 u64 iv_lo;
852 u64 iv_hi;
853
854 memcpy(qat_req->iv, sreq->iv, AES_BLOCK_SIZE);
855
856 iv_lo = be64_to_cpu(qat_req->iv_lo);
857 iv_hi = be64_to_cpu(qat_req->iv_hi);
858
859 iv_lo_prev = iv_lo;
860 iv_lo += DIV_ROUND_UP(sreq->cryptlen, AES_BLOCK_SIZE);
861 if (iv_lo < iv_lo_prev)
862 iv_hi++;
863
864 qat_req->iv_lo = cpu_to_be64(iv_lo);
865 qat_req->iv_hi = cpu_to_be64(iv_hi);
866 }
867
qat_alg_update_iv_cbc_mode(struct qat_crypto_request * qat_req)868 static void qat_alg_update_iv_cbc_mode(struct qat_crypto_request *qat_req)
869 {
870 struct skcipher_request *sreq = qat_req->skcipher_req;
871 int offset = sreq->cryptlen - AES_BLOCK_SIZE;
872 struct scatterlist *sgl;
873
874 if (qat_req->encryption)
875 sgl = sreq->dst;
876 else
877 sgl = sreq->src;
878
879 scatterwalk_map_and_copy(qat_req->iv, sgl, offset, AES_BLOCK_SIZE, 0);
880 }
881
qat_alg_update_iv(struct qat_crypto_request * qat_req)882 static void qat_alg_update_iv(struct qat_crypto_request *qat_req)
883 {
884 struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
885 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
886
887 switch (ctx->mode) {
888 case ICP_QAT_HW_CIPHER_CTR_MODE:
889 qat_alg_update_iv_ctr_mode(qat_req);
890 break;
891 case ICP_QAT_HW_CIPHER_CBC_MODE:
892 qat_alg_update_iv_cbc_mode(qat_req);
893 break;
894 case ICP_QAT_HW_CIPHER_XTS_MODE:
895 break;
896 default:
897 dev_warn(dev, "Unsupported IV update for cipher mode %d\n",
898 ctx->mode);
899 }
900 }
901
qat_skcipher_alg_callback(struct icp_qat_fw_la_resp * qat_resp,struct qat_crypto_request * qat_req)902 static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
903 struct qat_crypto_request *qat_req)
904 {
905 struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
906 struct qat_crypto_instance *inst = ctx->inst;
907 struct skcipher_request *sreq = qat_req->skcipher_req;
908 u8 stat_filed = qat_resp->comn_resp.comn_status;
909 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
910
911 qat_alg_free_bufl(inst, qat_req);
912 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
913 res = -EINVAL;
914
915 if (qat_req->encryption)
916 qat_alg_update_iv(qat_req);
917
918 memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
919
920 sreq->base.complete(&sreq->base, res);
921 }
922
qat_alg_callback(void * resp)923 void qat_alg_callback(void *resp)
924 {
925 struct icp_qat_fw_la_resp *qat_resp = resp;
926 struct qat_crypto_request *qat_req =
927 (void *)(__force long)qat_resp->opaque_data;
928
929 qat_req->cb(qat_resp, qat_req);
930 }
931
qat_alg_aead_dec(struct aead_request * areq)932 static int qat_alg_aead_dec(struct aead_request *areq)
933 {
934 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
935 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
936 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
937 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
938 struct icp_qat_fw_la_cipher_req_params *cipher_param;
939 struct icp_qat_fw_la_auth_req_params *auth_param;
940 struct icp_qat_fw_la_bulk_req *msg;
941 int digst_size = crypto_aead_authsize(aead_tfm);
942 int ret, ctr = 0;
943 u32 cipher_len;
944
945 cipher_len = areq->cryptlen - digst_size;
946 if (cipher_len % AES_BLOCK_SIZE != 0)
947 return -EINVAL;
948
949 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
950 if (unlikely(ret))
951 return ret;
952
953 msg = &qat_req->req;
954 *msg = ctx->dec_fw_req;
955 qat_req->aead_ctx = ctx;
956 qat_req->aead_req = areq;
957 qat_req->cb = qat_aead_alg_callback;
958 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
959 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
960 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
961 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
962 cipher_param->cipher_length = cipher_len;
963 cipher_param->cipher_offset = areq->assoclen;
964 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
965 auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
966 auth_param->auth_off = 0;
967 auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
968 do {
969 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
970 } while (ret == -EAGAIN && ctr++ < 10);
971
972 if (ret == -EAGAIN) {
973 qat_alg_free_bufl(ctx->inst, qat_req);
974 return -EBUSY;
975 }
976 return -EINPROGRESS;
977 }
978
qat_alg_aead_enc(struct aead_request * areq)979 static int qat_alg_aead_enc(struct aead_request *areq)
980 {
981 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
982 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
983 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
984 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
985 struct icp_qat_fw_la_cipher_req_params *cipher_param;
986 struct icp_qat_fw_la_auth_req_params *auth_param;
987 struct icp_qat_fw_la_bulk_req *msg;
988 u8 *iv = areq->iv;
989 int ret, ctr = 0;
990
991 if (areq->cryptlen % AES_BLOCK_SIZE != 0)
992 return -EINVAL;
993
994 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
995 if (unlikely(ret))
996 return ret;
997
998 msg = &qat_req->req;
999 *msg = ctx->enc_fw_req;
1000 qat_req->aead_ctx = ctx;
1001 qat_req->aead_req = areq;
1002 qat_req->cb = qat_aead_alg_callback;
1003 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1004 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1005 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1006 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1007 auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
1008
1009 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
1010 cipher_param->cipher_length = areq->cryptlen;
1011 cipher_param->cipher_offset = areq->assoclen;
1012
1013 auth_param->auth_off = 0;
1014 auth_param->auth_len = areq->assoclen + areq->cryptlen;
1015
1016 do {
1017 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
1018 } while (ret == -EAGAIN && ctr++ < 10);
1019
1020 if (ret == -EAGAIN) {
1021 qat_alg_free_bufl(ctx->inst, qat_req);
1022 return -EBUSY;
1023 }
1024 return -EINPROGRESS;
1025 }
1026
qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx * ctx,const u8 * key,unsigned int keylen,int mode)1027 static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
1028 const u8 *key, unsigned int keylen,
1029 int mode)
1030 {
1031 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
1032 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
1033 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
1034 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
1035
1036 return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
1037 }
1038
qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx * ctx,const u8 * key,unsigned int keylen,int mode)1039 static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
1040 const u8 *key, unsigned int keylen,
1041 int mode)
1042 {
1043 struct qat_crypto_instance *inst = NULL;
1044 struct device *dev;
1045 int node = get_current_node();
1046 int ret;
1047
1048 inst = qat_crypto_get_instance_node(node);
1049 if (!inst)
1050 return -EINVAL;
1051 dev = &GET_DEV(inst->accel_dev);
1052 ctx->inst = inst;
1053 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
1054 &ctx->enc_cd_paddr,
1055 GFP_ATOMIC);
1056 if (!ctx->enc_cd) {
1057 ret = -ENOMEM;
1058 goto out_free_instance;
1059 }
1060 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
1061 &ctx->dec_cd_paddr,
1062 GFP_ATOMIC);
1063 if (!ctx->dec_cd) {
1064 ret = -ENOMEM;
1065 goto out_free_enc;
1066 }
1067
1068 ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
1069 if (ret)
1070 goto out_free_all;
1071
1072 return 0;
1073
1074 out_free_all:
1075 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
1076 dma_free_coherent(dev, sizeof(*ctx->dec_cd),
1077 ctx->dec_cd, ctx->dec_cd_paddr);
1078 ctx->dec_cd = NULL;
1079 out_free_enc:
1080 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
1081 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
1082 ctx->enc_cd, ctx->enc_cd_paddr);
1083 ctx->enc_cd = NULL;
1084 out_free_instance:
1085 ctx->inst = NULL;
1086 qat_crypto_put_instance(inst);
1087 return ret;
1088 }
1089
qat_alg_skcipher_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen,int mode)1090 static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
1091 const u8 *key, unsigned int keylen,
1092 int mode)
1093 {
1094 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1095
1096 ctx->mode = mode;
1097
1098 if (ctx->enc_cd)
1099 return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
1100 else
1101 return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
1102 }
1103
qat_alg_skcipher_cbc_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)1104 static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
1105 const u8 *key, unsigned int keylen)
1106 {
1107 return qat_alg_skcipher_setkey(tfm, key, keylen,
1108 ICP_QAT_HW_CIPHER_CBC_MODE);
1109 }
1110
qat_alg_skcipher_ctr_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)1111 static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
1112 const u8 *key, unsigned int keylen)
1113 {
1114 return qat_alg_skcipher_setkey(tfm, key, keylen,
1115 ICP_QAT_HW_CIPHER_CTR_MODE);
1116 }
1117
qat_alg_skcipher_xts_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)1118 static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
1119 const u8 *key, unsigned int keylen)
1120 {
1121 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1122 int ret;
1123
1124 ret = xts_verify_key(tfm, key, keylen);
1125 if (ret)
1126 return ret;
1127
1128 if (keylen >> 1 == AES_KEYSIZE_192) {
1129 ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
1130 if (ret)
1131 return ret;
1132
1133 ctx->fallback = true;
1134
1135 return 0;
1136 }
1137
1138 ctx->fallback = false;
1139
1140 ret = qat_alg_skcipher_setkey(tfm, key, keylen,
1141 ICP_QAT_HW_CIPHER_XTS_MODE);
1142 if (ret)
1143 return ret;
1144
1145 if (HW_CAP_AES_V2(ctx->inst->accel_dev))
1146 ret = crypto_cipher_setkey(ctx->tweak, key + (keylen / 2),
1147 keylen / 2);
1148
1149 return ret;
1150 }
1151
qat_alg_set_req_iv(struct qat_crypto_request * qat_req)1152 static void qat_alg_set_req_iv(struct qat_crypto_request *qat_req)
1153 {
1154 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1155 struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
1156 bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
1157 u8 *iv = qat_req->skcipher_req->iv;
1158
1159 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1160
1161 if (aes_v2_capable && ctx->mode == ICP_QAT_HW_CIPHER_XTS_MODE)
1162 crypto_cipher_encrypt_one(ctx->tweak,
1163 (u8 *)cipher_param->u.cipher_IV_array,
1164 iv);
1165 else
1166 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
1167 }
1168
qat_alg_skcipher_encrypt(struct skcipher_request * req)1169 static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
1170 {
1171 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1172 struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1173 struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1174 struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1175 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1176 struct icp_qat_fw_la_bulk_req *msg;
1177 int ret, ctr = 0;
1178
1179 if (req->cryptlen == 0)
1180 return 0;
1181
1182 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1183 if (unlikely(ret))
1184 return ret;
1185
1186 msg = &qat_req->req;
1187 *msg = ctx->enc_fw_req;
1188 qat_req->skcipher_ctx = ctx;
1189 qat_req->skcipher_req = req;
1190 qat_req->cb = qat_skcipher_alg_callback;
1191 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1192 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1193 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1194 qat_req->encryption = true;
1195 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1196 cipher_param->cipher_length = req->cryptlen;
1197 cipher_param->cipher_offset = 0;
1198
1199 qat_alg_set_req_iv(qat_req);
1200
1201 do {
1202 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
1203 } while (ret == -EAGAIN && ctr++ < 10);
1204
1205 if (ret == -EAGAIN) {
1206 qat_alg_free_bufl(ctx->inst, qat_req);
1207 return -EBUSY;
1208 }
1209 return -EINPROGRESS;
1210 }
1211
qat_alg_skcipher_blk_encrypt(struct skcipher_request * req)1212 static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
1213 {
1214 if (req->cryptlen % AES_BLOCK_SIZE != 0)
1215 return -EINVAL;
1216
1217 return qat_alg_skcipher_encrypt(req);
1218 }
1219
qat_alg_skcipher_xts_encrypt(struct skcipher_request * req)1220 static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
1221 {
1222 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1223 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1224 struct skcipher_request *nreq = skcipher_request_ctx(req);
1225
1226 if (req->cryptlen < XTS_BLOCK_SIZE)
1227 return -EINVAL;
1228
1229 if (ctx->fallback) {
1230 memcpy(nreq, req, sizeof(*req));
1231 skcipher_request_set_tfm(nreq, ctx->ftfm);
1232 return crypto_skcipher_encrypt(nreq);
1233 }
1234
1235 return qat_alg_skcipher_encrypt(req);
1236 }
1237
qat_alg_skcipher_decrypt(struct skcipher_request * req)1238 static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
1239 {
1240 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1241 struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1242 struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1243 struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1244 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1245 struct icp_qat_fw_la_bulk_req *msg;
1246 int ret, ctr = 0;
1247
1248 if (req->cryptlen == 0)
1249 return 0;
1250
1251 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1252 if (unlikely(ret))
1253 return ret;
1254
1255 msg = &qat_req->req;
1256 *msg = ctx->dec_fw_req;
1257 qat_req->skcipher_ctx = ctx;
1258 qat_req->skcipher_req = req;
1259 qat_req->cb = qat_skcipher_alg_callback;
1260 qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1261 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1262 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1263 qat_req->encryption = false;
1264 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1265 cipher_param->cipher_length = req->cryptlen;
1266 cipher_param->cipher_offset = 0;
1267
1268 qat_alg_set_req_iv(qat_req);
1269 qat_alg_update_iv(qat_req);
1270
1271 do {
1272 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg);
1273 } while (ret == -EAGAIN && ctr++ < 10);
1274
1275 if (ret == -EAGAIN) {
1276 qat_alg_free_bufl(ctx->inst, qat_req);
1277 return -EBUSY;
1278 }
1279 return -EINPROGRESS;
1280 }
1281
qat_alg_skcipher_blk_decrypt(struct skcipher_request * req)1282 static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
1283 {
1284 if (req->cryptlen % AES_BLOCK_SIZE != 0)
1285 return -EINVAL;
1286
1287 return qat_alg_skcipher_decrypt(req);
1288 }
1289
qat_alg_skcipher_xts_decrypt(struct skcipher_request * req)1290 static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
1291 {
1292 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1293 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1294 struct skcipher_request *nreq = skcipher_request_ctx(req);
1295
1296 if (req->cryptlen < XTS_BLOCK_SIZE)
1297 return -EINVAL;
1298
1299 if (ctx->fallback) {
1300 memcpy(nreq, req, sizeof(*req));
1301 skcipher_request_set_tfm(nreq, ctx->ftfm);
1302 return crypto_skcipher_decrypt(nreq);
1303 }
1304
1305 return qat_alg_skcipher_decrypt(req);
1306 }
1307
qat_alg_aead_init(struct crypto_aead * tfm,enum icp_qat_hw_auth_algo hash,const char * hash_name)1308 static int qat_alg_aead_init(struct crypto_aead *tfm,
1309 enum icp_qat_hw_auth_algo hash,
1310 const char *hash_name)
1311 {
1312 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1313
1314 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1315 if (IS_ERR(ctx->hash_tfm))
1316 return PTR_ERR(ctx->hash_tfm);
1317 ctx->qat_hash_alg = hash;
1318 crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1319 return 0;
1320 }
1321
qat_alg_aead_sha1_init(struct crypto_aead * tfm)1322 static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1323 {
1324 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1325 }
1326
qat_alg_aead_sha256_init(struct crypto_aead * tfm)1327 static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1328 {
1329 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1330 }
1331
qat_alg_aead_sha512_init(struct crypto_aead * tfm)1332 static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1333 {
1334 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1335 }
1336
qat_alg_aead_exit(struct crypto_aead * tfm)1337 static void qat_alg_aead_exit(struct crypto_aead *tfm)
1338 {
1339 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1340 struct qat_crypto_instance *inst = ctx->inst;
1341 struct device *dev;
1342
1343 crypto_free_shash(ctx->hash_tfm);
1344
1345 if (!inst)
1346 return;
1347
1348 dev = &GET_DEV(inst->accel_dev);
1349 if (ctx->enc_cd) {
1350 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1351 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1352 ctx->enc_cd, ctx->enc_cd_paddr);
1353 }
1354 if (ctx->dec_cd) {
1355 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1356 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1357 ctx->dec_cd, ctx->dec_cd_paddr);
1358 }
1359 qat_crypto_put_instance(inst);
1360 }
1361
qat_alg_skcipher_init_tfm(struct crypto_skcipher * tfm)1362 static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
1363 {
1364 crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1365 return 0;
1366 }
1367
qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher * tfm)1368 static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
1369 {
1370 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1371 int reqsize;
1372
1373 ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0,
1374 CRYPTO_ALG_NEED_FALLBACK);
1375 if (IS_ERR(ctx->ftfm))
1376 return PTR_ERR(ctx->ftfm);
1377
1378 ctx->tweak = crypto_alloc_cipher("aes", 0, 0);
1379 if (IS_ERR(ctx->tweak)) {
1380 crypto_free_skcipher(ctx->ftfm);
1381 return PTR_ERR(ctx->tweak);
1382 }
1383
1384 reqsize = max(sizeof(struct qat_crypto_request),
1385 sizeof(struct skcipher_request) +
1386 crypto_skcipher_reqsize(ctx->ftfm));
1387 crypto_skcipher_set_reqsize(tfm, reqsize);
1388
1389 return 0;
1390 }
1391
qat_alg_skcipher_exit_tfm(struct crypto_skcipher * tfm)1392 static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
1393 {
1394 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1395 struct qat_crypto_instance *inst = ctx->inst;
1396 struct device *dev;
1397
1398 if (!inst)
1399 return;
1400
1401 dev = &GET_DEV(inst->accel_dev);
1402 if (ctx->enc_cd) {
1403 memset(ctx->enc_cd, 0,
1404 sizeof(struct icp_qat_hw_cipher_algo_blk));
1405 dma_free_coherent(dev,
1406 sizeof(struct icp_qat_hw_cipher_algo_blk),
1407 ctx->enc_cd, ctx->enc_cd_paddr);
1408 }
1409 if (ctx->dec_cd) {
1410 memset(ctx->dec_cd, 0,
1411 sizeof(struct icp_qat_hw_cipher_algo_blk));
1412 dma_free_coherent(dev,
1413 sizeof(struct icp_qat_hw_cipher_algo_blk),
1414 ctx->dec_cd, ctx->dec_cd_paddr);
1415 }
1416 qat_crypto_put_instance(inst);
1417 }
1418
qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher * tfm)1419 static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
1420 {
1421 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1422
1423 if (ctx->ftfm)
1424 crypto_free_skcipher(ctx->ftfm);
1425
1426 if (ctx->tweak)
1427 crypto_free_cipher(ctx->tweak);
1428
1429 qat_alg_skcipher_exit_tfm(tfm);
1430 }
1431
1432 static struct aead_alg qat_aeads[] = { {
1433 .base = {
1434 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1435 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1436 .cra_priority = 4001,
1437 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1438 .cra_blocksize = AES_BLOCK_SIZE,
1439 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1440 .cra_module = THIS_MODULE,
1441 },
1442 .init = qat_alg_aead_sha1_init,
1443 .exit = qat_alg_aead_exit,
1444 .setkey = qat_alg_aead_setkey,
1445 .decrypt = qat_alg_aead_dec,
1446 .encrypt = qat_alg_aead_enc,
1447 .ivsize = AES_BLOCK_SIZE,
1448 .maxauthsize = SHA1_DIGEST_SIZE,
1449 }, {
1450 .base = {
1451 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1452 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1453 .cra_priority = 4001,
1454 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1455 .cra_blocksize = AES_BLOCK_SIZE,
1456 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1457 .cra_module = THIS_MODULE,
1458 },
1459 .init = qat_alg_aead_sha256_init,
1460 .exit = qat_alg_aead_exit,
1461 .setkey = qat_alg_aead_setkey,
1462 .decrypt = qat_alg_aead_dec,
1463 .encrypt = qat_alg_aead_enc,
1464 .ivsize = AES_BLOCK_SIZE,
1465 .maxauthsize = SHA256_DIGEST_SIZE,
1466 }, {
1467 .base = {
1468 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1469 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1470 .cra_priority = 4001,
1471 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1472 .cra_blocksize = AES_BLOCK_SIZE,
1473 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1474 .cra_module = THIS_MODULE,
1475 },
1476 .init = qat_alg_aead_sha512_init,
1477 .exit = qat_alg_aead_exit,
1478 .setkey = qat_alg_aead_setkey,
1479 .decrypt = qat_alg_aead_dec,
1480 .encrypt = qat_alg_aead_enc,
1481 .ivsize = AES_BLOCK_SIZE,
1482 .maxauthsize = SHA512_DIGEST_SIZE,
1483 } };
1484
1485 static struct skcipher_alg qat_skciphers[] = { {
1486 .base.cra_name = "cbc(aes)",
1487 .base.cra_driver_name = "qat_aes_cbc",
1488 .base.cra_priority = 4001,
1489 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1490 .base.cra_blocksize = AES_BLOCK_SIZE,
1491 .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1492 .base.cra_alignmask = 0,
1493 .base.cra_module = THIS_MODULE,
1494
1495 .init = qat_alg_skcipher_init_tfm,
1496 .exit = qat_alg_skcipher_exit_tfm,
1497 .setkey = qat_alg_skcipher_cbc_setkey,
1498 .decrypt = qat_alg_skcipher_blk_decrypt,
1499 .encrypt = qat_alg_skcipher_blk_encrypt,
1500 .min_keysize = AES_MIN_KEY_SIZE,
1501 .max_keysize = AES_MAX_KEY_SIZE,
1502 .ivsize = AES_BLOCK_SIZE,
1503 }, {
1504 .base.cra_name = "ctr(aes)",
1505 .base.cra_driver_name = "qat_aes_ctr",
1506 .base.cra_priority = 4001,
1507 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1508 .base.cra_blocksize = 1,
1509 .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1510 .base.cra_alignmask = 0,
1511 .base.cra_module = THIS_MODULE,
1512
1513 .init = qat_alg_skcipher_init_tfm,
1514 .exit = qat_alg_skcipher_exit_tfm,
1515 .setkey = qat_alg_skcipher_ctr_setkey,
1516 .decrypt = qat_alg_skcipher_decrypt,
1517 .encrypt = qat_alg_skcipher_encrypt,
1518 .min_keysize = AES_MIN_KEY_SIZE,
1519 .max_keysize = AES_MAX_KEY_SIZE,
1520 .ivsize = AES_BLOCK_SIZE,
1521 }, {
1522 .base.cra_name = "xts(aes)",
1523 .base.cra_driver_name = "qat_aes_xts",
1524 .base.cra_priority = 4001,
1525 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
1526 CRYPTO_ALG_ALLOCATES_MEMORY,
1527 .base.cra_blocksize = AES_BLOCK_SIZE,
1528 .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1529 .base.cra_alignmask = 0,
1530 .base.cra_module = THIS_MODULE,
1531
1532 .init = qat_alg_skcipher_init_xts_tfm,
1533 .exit = qat_alg_skcipher_exit_xts_tfm,
1534 .setkey = qat_alg_skcipher_xts_setkey,
1535 .decrypt = qat_alg_skcipher_xts_decrypt,
1536 .encrypt = qat_alg_skcipher_xts_encrypt,
1537 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1538 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1539 .ivsize = AES_BLOCK_SIZE,
1540 } };
1541
qat_algs_register(void)1542 int qat_algs_register(void)
1543 {
1544 int ret = 0;
1545
1546 mutex_lock(&algs_lock);
1547 if (++active_devs != 1)
1548 goto unlock;
1549
1550 ret = crypto_register_skciphers(qat_skciphers,
1551 ARRAY_SIZE(qat_skciphers));
1552 if (ret)
1553 goto unlock;
1554
1555 ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1556 if (ret)
1557 goto unreg_algs;
1558
1559 unlock:
1560 mutex_unlock(&algs_lock);
1561 return ret;
1562
1563 unreg_algs:
1564 crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1565 goto unlock;
1566 }
1567
qat_algs_unregister(void)1568 void qat_algs_unregister(void)
1569 {
1570 mutex_lock(&algs_lock);
1571 if (--active_devs != 0)
1572 goto unlock;
1573
1574 crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1575 crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1576
1577 unlock:
1578 mutex_unlock(&algs_lock);
1579 }
1580