2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/internal/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/hmac.h>
55 #include <crypto/algapi.h>
56 #include <crypto/authenc.h>
57 #include <linux/dma-mapping.h>
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62 #include "icp_qat_hw.h"
63 #include "icp_qat_fw.h"
64 #include "icp_qat_fw_la.h"
66 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
67 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
68 ICP_QAT_HW_CIPHER_NO_CONVERT, \
69 ICP_QAT_HW_CIPHER_ENCRYPT)
71 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
72 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 ICP_QAT_HW_CIPHER_DECRYPT)
76 static DEFINE_MUTEX(algs_lock);
77 static unsigned int active_devs;
85 struct qat_alg_buf_list {
88 uint32_t num_mapped_bufs;
89 struct qat_alg_buf bufers[];
90 } __packed __aligned(64);
92 /* Common content descriptor */
95 struct qat_enc { /* Encrypt content desc */
96 struct icp_qat_hw_cipher_algo_blk cipher;
97 struct icp_qat_hw_auth_algo_blk hash;
99 struct qat_dec { /* Decrytp content desc */
100 struct icp_qat_hw_auth_algo_blk hash;
101 struct icp_qat_hw_cipher_algo_blk cipher;
106 struct qat_alg_aead_ctx {
107 struct qat_alg_cd *enc_cd;
108 struct qat_alg_cd *dec_cd;
109 dma_addr_t enc_cd_paddr;
110 dma_addr_t dec_cd_paddr;
111 struct icp_qat_fw_la_bulk_req enc_fw_req;
112 struct icp_qat_fw_la_bulk_req dec_fw_req;
113 struct crypto_shash *hash_tfm;
114 enum icp_qat_hw_auth_algo qat_hash_alg;
115 struct qat_crypto_instance *inst;
117 struct sha1_state sha1;
118 struct sha256_state sha256;
119 struct sha512_state sha512;
121 char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
122 char opad[SHA512_BLOCK_SIZE];
125 struct qat_alg_ablkcipher_ctx {
126 struct icp_qat_hw_cipher_algo_blk *enc_cd;
127 struct icp_qat_hw_cipher_algo_blk *dec_cd;
128 dma_addr_t enc_cd_paddr;
129 dma_addr_t dec_cd_paddr;
130 struct icp_qat_fw_la_bulk_req enc_fw_req;
131 struct icp_qat_fw_la_bulk_req dec_fw_req;
132 struct qat_crypto_instance *inst;
133 struct crypto_tfm *tfm;
134 spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */
137 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
139 switch (qat_hash_alg) {
140 case ICP_QAT_HW_AUTH_ALGO_SHA1:
141 return ICP_QAT_HW_SHA1_STATE1_SZ;
142 case ICP_QAT_HW_AUTH_ALGO_SHA256:
143 return ICP_QAT_HW_SHA256_STATE1_SZ;
144 case ICP_QAT_HW_AUTH_ALGO_SHA512:
145 return ICP_QAT_HW_SHA512_STATE1_SZ;
152 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
153 struct qat_alg_aead_ctx *ctx,
154 const uint8_t *auth_key,
155 unsigned int auth_keylen)
157 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
158 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
159 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
160 __be32 *hash_state_out;
161 __be64 *hash512_state_out;
164 memset(ctx->ipad, 0, block_size);
165 memset(ctx->opad, 0, block_size);
166 shash->tfm = ctx->hash_tfm;
169 if (auth_keylen > block_size) {
170 int ret = crypto_shash_digest(shash, auth_key,
171 auth_keylen, ctx->ipad);
175 memcpy(ctx->opad, ctx->ipad, digest_size);
177 memcpy(ctx->ipad, auth_key, auth_keylen);
178 memcpy(ctx->opad, auth_key, auth_keylen);
181 for (i = 0; i < block_size; i++) {
182 char *ipad_ptr = ctx->ipad + i;
183 char *opad_ptr = ctx->opad + i;
184 *ipad_ptr ^= HMAC_IPAD_VALUE;
185 *opad_ptr ^= HMAC_OPAD_VALUE;
188 if (crypto_shash_init(shash))
191 if (crypto_shash_update(shash, ctx->ipad, block_size))
194 hash_state_out = (__be32 *)hash->sha.state1;
195 hash512_state_out = (__be64 *)hash_state_out;
197 switch (ctx->qat_hash_alg) {
198 case ICP_QAT_HW_AUTH_ALGO_SHA1:
199 if (crypto_shash_export(shash, &ctx->sha1))
201 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
202 *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
204 case ICP_QAT_HW_AUTH_ALGO_SHA256:
205 if (crypto_shash_export(shash, &ctx->sha256))
207 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
208 *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
210 case ICP_QAT_HW_AUTH_ALGO_SHA512:
211 if (crypto_shash_export(shash, &ctx->sha512))
213 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
214 *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
220 if (crypto_shash_init(shash))
223 if (crypto_shash_update(shash, ctx->opad, block_size))
226 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
227 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
228 hash512_state_out = (__be64 *)hash_state_out;
230 switch (ctx->qat_hash_alg) {
231 case ICP_QAT_HW_AUTH_ALGO_SHA1:
232 if (crypto_shash_export(shash, &ctx->sha1))
234 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
235 *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
237 case ICP_QAT_HW_AUTH_ALGO_SHA256:
238 if (crypto_shash_export(shash, &ctx->sha256))
240 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
241 *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
243 case ICP_QAT_HW_AUTH_ALGO_SHA512:
244 if (crypto_shash_export(shash, &ctx->sha512))
246 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
247 *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
252 memzero_explicit(ctx->ipad, block_size);
253 memzero_explicit(ctx->opad, block_size);
257 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
260 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
261 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
262 header->comn_req_flags =
263 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
264 QAT_COMN_PTR_TYPE_SGL);
265 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
266 ICP_QAT_FW_LA_PARTIAL_NONE);
267 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
268 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
269 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
270 ICP_QAT_FW_LA_NO_PROTO);
271 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
272 ICP_QAT_FW_LA_NO_UPDATE_STATE);
275 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
277 struct crypto_authenc_keys *keys,
280 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
281 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
282 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
283 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
284 struct icp_qat_hw_auth_algo_blk *hash =
285 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
286 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
287 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
288 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
289 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
290 void *ptr = &req_tmpl->cd_ctrl;
291 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
292 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
295 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
296 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
297 hash->sha.inner_setup.auth_config.config =
298 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
299 ctx->qat_hash_alg, digestsize);
300 hash->sha.inner_setup.auth_counter.counter =
301 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
303 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
307 qat_alg_init_common_hdr(header);
308 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
309 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
310 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
311 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
312 ICP_QAT_FW_LA_RET_AUTH_RES);
313 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
314 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
315 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
316 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
318 /* Cipher CD config setup */
319 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
320 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
321 cipher_cd_ctrl->cipher_cfg_offset = 0;
322 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
323 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
324 /* Auth CD config setup */
325 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
326 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
327 hash_cd_ctrl->inner_res_sz = digestsize;
328 hash_cd_ctrl->final_sz = digestsize;
330 switch (ctx->qat_hash_alg) {
331 case ICP_QAT_HW_AUTH_ALGO_SHA1:
332 hash_cd_ctrl->inner_state1_sz =
333 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
334 hash_cd_ctrl->inner_state2_sz =
335 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
337 case ICP_QAT_HW_AUTH_ALGO_SHA256:
338 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
339 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
341 case ICP_QAT_HW_AUTH_ALGO_SHA512:
342 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
343 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
348 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
349 ((sizeof(struct icp_qat_hw_auth_setup) +
350 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
351 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
352 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
356 static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
358 struct crypto_authenc_keys *keys,
361 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
362 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
363 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
364 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
365 struct icp_qat_hw_cipher_algo_blk *cipher =
366 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
367 sizeof(struct icp_qat_hw_auth_setup) +
368 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
369 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
370 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
371 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
372 void *ptr = &req_tmpl->cd_ctrl;
373 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
374 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
375 struct icp_qat_fw_la_auth_req_params *auth_param =
376 (struct icp_qat_fw_la_auth_req_params *)
377 ((char *)&req_tmpl->serv_specif_rqpars +
378 sizeof(struct icp_qat_fw_la_cipher_req_params));
381 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
382 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
383 hash->sha.inner_setup.auth_config.config =
384 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
387 hash->sha.inner_setup.auth_counter.counter =
388 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
390 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
394 qat_alg_init_common_hdr(header);
395 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
396 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
397 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
398 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
399 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
400 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
401 ICP_QAT_FW_LA_CMP_AUTH_RES);
402 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
403 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
405 /* Cipher CD config setup */
406 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
407 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
408 cipher_cd_ctrl->cipher_cfg_offset =
409 (sizeof(struct icp_qat_hw_auth_setup) +
410 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
411 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
412 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
414 /* Auth CD config setup */
415 hash_cd_ctrl->hash_cfg_offset = 0;
416 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
417 hash_cd_ctrl->inner_res_sz = digestsize;
418 hash_cd_ctrl->final_sz = digestsize;
420 switch (ctx->qat_hash_alg) {
421 case ICP_QAT_HW_AUTH_ALGO_SHA1:
422 hash_cd_ctrl->inner_state1_sz =
423 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
424 hash_cd_ctrl->inner_state2_sz =
425 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
427 case ICP_QAT_HW_AUTH_ALGO_SHA256:
428 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
429 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
431 case ICP_QAT_HW_AUTH_ALGO_SHA512:
432 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
433 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
439 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
440 ((sizeof(struct icp_qat_hw_auth_setup) +
441 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
442 auth_param->auth_res_sz = digestsize;
443 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
444 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
448 static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
449 struct icp_qat_fw_la_bulk_req *req,
450 struct icp_qat_hw_cipher_algo_blk *cd,
451 const uint8_t *key, unsigned int keylen)
453 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
454 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
455 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
457 memcpy(cd->aes.key, key, keylen);
458 qat_alg_init_common_hdr(header);
459 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
460 cd_pars->u.s.content_desc_params_sz =
461 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
462 /* Cipher CD config setup */
463 cd_ctrl->cipher_key_sz = keylen >> 3;
464 cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
465 cd_ctrl->cipher_cfg_offset = 0;
466 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
467 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
470 static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
471 int alg, const uint8_t *key,
472 unsigned int keylen, int mode)
474 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
475 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
476 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
478 qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
479 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
480 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
483 static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
484 int alg, const uint8_t *key,
485 unsigned int keylen, int mode)
487 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
488 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
489 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
491 qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
492 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
494 if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
495 dec_cd->aes.cipher_config.val =
496 QAT_AES_HW_CONFIG_DEC(alg, mode);
498 dec_cd->aes.cipher_config.val =
499 QAT_AES_HW_CONFIG_ENC(alg, mode);
502 static int qat_alg_validate_key(int key_len, int *alg, int mode)
504 if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
506 case AES_KEYSIZE_128:
507 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
509 case AES_KEYSIZE_192:
510 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
512 case AES_KEYSIZE_256:
513 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
520 case AES_KEYSIZE_128 << 1:
521 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
523 case AES_KEYSIZE_256 << 1:
524 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
533 static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
534 unsigned int keylen, int mode)
536 struct crypto_authenc_keys keys;
539 if (crypto_authenc_extractkeys(&keys, key, keylen))
542 if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
545 if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
548 if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
551 memzero_explicit(&keys, sizeof(keys));
554 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
555 memzero_explicit(&keys, sizeof(keys));
558 memzero_explicit(&keys, sizeof(keys));
562 static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
569 if (qat_alg_validate_key(keylen, &alg, mode))
572 qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode);
573 qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode);
576 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
580 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
583 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
588 dev = &GET_DEV(ctx->inst->accel_dev);
589 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
590 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
591 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
592 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
595 int node = get_current_node();
596 struct qat_crypto_instance *inst =
597 qat_crypto_get_instance_node(node);
602 dev = &GET_DEV(inst->accel_dev);
604 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
610 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
617 if (qat_alg_aead_init_sessions(tfm, key, keylen,
618 ICP_QAT_HW_CIPHER_CBC_MODE))
624 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
625 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
626 ctx->dec_cd, ctx->dec_cd_paddr);
629 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
630 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
631 ctx->enc_cd, ctx->enc_cd_paddr);
636 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
637 struct qat_crypto_request *qat_req)
639 struct device *dev = &GET_DEV(inst->accel_dev);
640 struct qat_alg_buf_list *bl = qat_req->buf.bl;
641 struct qat_alg_buf_list *blout = qat_req->buf.blout;
642 dma_addr_t blp = qat_req->buf.blp;
643 dma_addr_t blpout = qat_req->buf.bloutp;
644 size_t sz = qat_req->buf.sz;
645 size_t sz_out = qat_req->buf.sz_out;
648 for (i = 0; i < bl->num_bufs; i++)
649 dma_unmap_single(dev, bl->bufers[i].addr,
650 bl->bufers[i].len, DMA_BIDIRECTIONAL);
652 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
655 /* If out of place operation dma unmap only data */
656 int bufless = blout->num_bufs - blout->num_mapped_bufs;
658 for (i = bufless; i < blout->num_bufs; i++) {
659 dma_unmap_single(dev, blout->bufers[i].addr,
660 blout->bufers[i].len,
663 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
668 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
669 struct scatterlist *sgl,
670 struct scatterlist *sglout,
671 struct qat_crypto_request *qat_req)
673 struct device *dev = &GET_DEV(inst->accel_dev);
675 int n = sg_nents(sgl);
676 struct qat_alg_buf_list *bufl;
677 struct qat_alg_buf_list *buflout = NULL;
679 dma_addr_t bloutp = 0;
680 struct scatterlist *sg;
681 size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
682 ((1 + n) * sizeof(struct qat_alg_buf));
687 bufl = kzalloc_node(sz, GFP_ATOMIC,
688 dev_to_node(&GET_DEV(inst->accel_dev)));
692 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
693 if (unlikely(dma_mapping_error(dev, blp)))
696 for_each_sg(sgl, sg, n, i) {
702 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
705 bufl->bufers[y].len = sg->length;
706 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
710 bufl->num_bufs = sg_nctr;
711 qat_req->buf.bl = bufl;
712 qat_req->buf.blp = blp;
713 qat_req->buf.sz = sz;
714 /* Handle out of place operation */
716 struct qat_alg_buf *bufers;
718 n = sg_nents(sglout);
719 sz_out = sizeof(struct qat_alg_buf_list) +
720 ((1 + n) * sizeof(struct qat_alg_buf));
722 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
723 dev_to_node(&GET_DEV(inst->accel_dev)));
724 if (unlikely(!buflout))
726 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
727 if (unlikely(dma_mapping_error(dev, bloutp)))
729 bufers = buflout->bufers;
730 for_each_sg(sglout, sg, n, i) {
736 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
739 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
741 bufers[y].len = sg->length;
744 buflout->num_bufs = sg_nctr;
745 buflout->num_mapped_bufs = sg_nctr;
746 qat_req->buf.blout = buflout;
747 qat_req->buf.bloutp = bloutp;
748 qat_req->buf.sz_out = sz_out;
750 /* Otherwise set the src and dst to the same address */
751 qat_req->buf.bloutp = qat_req->buf.blp;
752 qat_req->buf.sz_out = 0;
757 n = sg_nents(sglout);
758 for (i = 0; i < n; i++)
759 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
760 dma_unmap_single(dev, buflout->bufers[i].addr,
761 buflout->bufers[i].len,
763 if (!dma_mapping_error(dev, bloutp))
764 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
769 for (i = 0; i < n; i++)
770 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
771 dma_unmap_single(dev, bufl->bufers[i].addr,
775 if (!dma_mapping_error(dev, blp))
776 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
779 dev_err(dev, "Failed to map buf for dma\n");
783 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
784 struct qat_crypto_request *qat_req)
786 struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
787 struct qat_crypto_instance *inst = ctx->inst;
788 struct aead_request *areq = qat_req->aead_req;
789 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
790 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
792 qat_alg_free_bufl(inst, qat_req);
793 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
795 areq->base.complete(&areq->base, res);
798 static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
799 struct qat_crypto_request *qat_req)
801 struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
802 struct qat_crypto_instance *inst = ctx->inst;
803 struct ablkcipher_request *areq = qat_req->ablkcipher_req;
804 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
805 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
807 qat_alg_free_bufl(inst, qat_req);
808 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
810 areq->base.complete(&areq->base, res);
813 void qat_alg_callback(void *resp)
815 struct icp_qat_fw_la_resp *qat_resp = resp;
816 struct qat_crypto_request *qat_req =
817 (void *)(__force long)qat_resp->opaque_data;
819 qat_req->cb(qat_resp, qat_req);
822 static int qat_alg_aead_dec(struct aead_request *areq)
824 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
825 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
826 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
827 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
828 struct icp_qat_fw_la_cipher_req_params *cipher_param;
829 struct icp_qat_fw_la_auth_req_params *auth_param;
830 struct icp_qat_fw_la_bulk_req *msg;
831 int digst_size = crypto_aead_authsize(aead_tfm);
834 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
839 *msg = ctx->dec_fw_req;
840 qat_req->aead_ctx = ctx;
841 qat_req->aead_req = areq;
842 qat_req->cb = qat_aead_alg_callback;
843 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
844 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
845 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
846 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
847 cipher_param->cipher_length = areq->cryptlen - digst_size;
848 cipher_param->cipher_offset = areq->assoclen;
849 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
850 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
851 auth_param->auth_off = 0;
852 auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
854 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
855 } while (ret == -EAGAIN && ctr++ < 10);
857 if (ret == -EAGAIN) {
858 qat_alg_free_bufl(ctx->inst, qat_req);
864 static int qat_alg_aead_enc(struct aead_request *areq)
866 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
867 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
868 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
869 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
870 struct icp_qat_fw_la_cipher_req_params *cipher_param;
871 struct icp_qat_fw_la_auth_req_params *auth_param;
872 struct icp_qat_fw_la_bulk_req *msg;
873 uint8_t *iv = areq->iv;
876 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
881 *msg = ctx->enc_fw_req;
882 qat_req->aead_ctx = ctx;
883 qat_req->aead_req = areq;
884 qat_req->cb = qat_aead_alg_callback;
885 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
886 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
887 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
888 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
889 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
891 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
892 cipher_param->cipher_length = areq->cryptlen;
893 cipher_param->cipher_offset = areq->assoclen;
895 auth_param->auth_off = 0;
896 auth_param->auth_len = areq->assoclen + areq->cryptlen;
899 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
900 } while (ret == -EAGAIN && ctr++ < 10);
902 if (ret == -EAGAIN) {
903 qat_alg_free_bufl(ctx->inst, qat_req);
909 static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
910 const u8 *key, unsigned int keylen,
913 struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
916 spin_lock(&ctx->lock);
919 dev = &GET_DEV(ctx->inst->accel_dev);
920 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
921 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
922 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
923 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
926 int node = get_current_node();
927 struct qat_crypto_instance *inst =
928 qat_crypto_get_instance_node(node);
930 spin_unlock(&ctx->lock);
934 dev = &GET_DEV(inst->accel_dev);
936 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
940 spin_unlock(&ctx->lock);
943 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
947 spin_unlock(&ctx->lock);
951 spin_unlock(&ctx->lock);
952 if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode))
958 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
959 dma_free_coherent(dev, sizeof(*ctx->dec_cd),
960 ctx->dec_cd, ctx->dec_cd_paddr);
963 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
964 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
965 ctx->enc_cd, ctx->enc_cd_paddr);
970 static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
971 const u8 *key, unsigned int keylen)
973 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
974 ICP_QAT_HW_CIPHER_CBC_MODE);
977 static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm,
978 const u8 *key, unsigned int keylen)
980 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
981 ICP_QAT_HW_CIPHER_CTR_MODE);
984 static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm,
985 const u8 *key, unsigned int keylen)
987 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
988 ICP_QAT_HW_CIPHER_XTS_MODE);
991 static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
993 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
994 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
995 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
996 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
997 struct icp_qat_fw_la_cipher_req_params *cipher_param;
998 struct icp_qat_fw_la_bulk_req *msg;
1001 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1005 msg = &qat_req->req;
1006 *msg = ctx->enc_fw_req;
1007 qat_req->ablkcipher_ctx = ctx;
1008 qat_req->ablkcipher_req = req;
1009 qat_req->cb = qat_ablkcipher_alg_callback;
1010 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1011 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1012 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1013 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1014 cipher_param->cipher_length = req->nbytes;
1015 cipher_param->cipher_offset = 0;
1016 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1018 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1019 } while (ret == -EAGAIN && ctr++ < 10);
1021 if (ret == -EAGAIN) {
1022 qat_alg_free_bufl(ctx->inst, qat_req);
1025 return -EINPROGRESS;
1028 static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1030 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1031 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1032 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1033 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1034 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1035 struct icp_qat_fw_la_bulk_req *msg;
1038 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1042 msg = &qat_req->req;
1043 *msg = ctx->dec_fw_req;
1044 qat_req->ablkcipher_ctx = ctx;
1045 qat_req->ablkcipher_req = req;
1046 qat_req->cb = qat_ablkcipher_alg_callback;
1047 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1048 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1049 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1050 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1051 cipher_param->cipher_length = req->nbytes;
1052 cipher_param->cipher_offset = 0;
1053 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1055 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1056 } while (ret == -EAGAIN && ctr++ < 10);
1058 if (ret == -EAGAIN) {
1059 qat_alg_free_bufl(ctx->inst, qat_req);
1062 return -EINPROGRESS;
1065 static int qat_alg_aead_init(struct crypto_aead *tfm,
1066 enum icp_qat_hw_auth_algo hash,
1067 const char *hash_name)
1069 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1071 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1072 if (IS_ERR(ctx->hash_tfm))
1073 return PTR_ERR(ctx->hash_tfm);
1074 ctx->qat_hash_alg = hash;
1075 crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1079 static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1081 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1084 static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1086 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1089 static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1091 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1094 static void qat_alg_aead_exit(struct crypto_aead *tfm)
1096 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1097 struct qat_crypto_instance *inst = ctx->inst;
1100 crypto_free_shash(ctx->hash_tfm);
1105 dev = &GET_DEV(inst->accel_dev);
1107 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1108 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1109 ctx->enc_cd, ctx->enc_cd_paddr);
1112 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1113 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1114 ctx->dec_cd, ctx->dec_cd_paddr);
1116 qat_crypto_put_instance(inst);
1119 static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1121 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1123 spin_lock_init(&ctx->lock);
1124 tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
1129 static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
1131 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1132 struct qat_crypto_instance *inst = ctx->inst;
1138 dev = &GET_DEV(inst->accel_dev);
1140 memset(ctx->enc_cd, 0,
1141 sizeof(struct icp_qat_hw_cipher_algo_blk));
1142 dma_free_coherent(dev,
1143 sizeof(struct icp_qat_hw_cipher_algo_blk),
1144 ctx->enc_cd, ctx->enc_cd_paddr);
1147 memset(ctx->dec_cd, 0,
1148 sizeof(struct icp_qat_hw_cipher_algo_blk));
1149 dma_free_coherent(dev,
1150 sizeof(struct icp_qat_hw_cipher_algo_blk),
1151 ctx->dec_cd, ctx->dec_cd_paddr);
1153 qat_crypto_put_instance(inst);
1157 static struct aead_alg qat_aeads[] = { {
1159 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1160 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1161 .cra_priority = 4001,
1162 .cra_flags = CRYPTO_ALG_ASYNC,
1163 .cra_blocksize = AES_BLOCK_SIZE,
1164 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1165 .cra_module = THIS_MODULE,
1167 .init = qat_alg_aead_sha1_init,
1168 .exit = qat_alg_aead_exit,
1169 .setkey = qat_alg_aead_setkey,
1170 .decrypt = qat_alg_aead_dec,
1171 .encrypt = qat_alg_aead_enc,
1172 .ivsize = AES_BLOCK_SIZE,
1173 .maxauthsize = SHA1_DIGEST_SIZE,
1176 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1177 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1178 .cra_priority = 4001,
1179 .cra_flags = CRYPTO_ALG_ASYNC,
1180 .cra_blocksize = AES_BLOCK_SIZE,
1181 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1182 .cra_module = THIS_MODULE,
1184 .init = qat_alg_aead_sha256_init,
1185 .exit = qat_alg_aead_exit,
1186 .setkey = qat_alg_aead_setkey,
1187 .decrypt = qat_alg_aead_dec,
1188 .encrypt = qat_alg_aead_enc,
1189 .ivsize = AES_BLOCK_SIZE,
1190 .maxauthsize = SHA256_DIGEST_SIZE,
1193 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1194 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1195 .cra_priority = 4001,
1196 .cra_flags = CRYPTO_ALG_ASYNC,
1197 .cra_blocksize = AES_BLOCK_SIZE,
1198 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1199 .cra_module = THIS_MODULE,
1201 .init = qat_alg_aead_sha512_init,
1202 .exit = qat_alg_aead_exit,
1203 .setkey = qat_alg_aead_setkey,
1204 .decrypt = qat_alg_aead_dec,
1205 .encrypt = qat_alg_aead_enc,
1206 .ivsize = AES_BLOCK_SIZE,
1207 .maxauthsize = SHA512_DIGEST_SIZE,
1210 static struct crypto_alg qat_algs[] = { {
1211 .cra_name = "cbc(aes)",
1212 .cra_driver_name = "qat_aes_cbc",
1213 .cra_priority = 4001,
1214 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1215 .cra_blocksize = AES_BLOCK_SIZE,
1216 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1218 .cra_type = &crypto_ablkcipher_type,
1219 .cra_module = THIS_MODULE,
1220 .cra_init = qat_alg_ablkcipher_init,
1221 .cra_exit = qat_alg_ablkcipher_exit,
1224 .setkey = qat_alg_ablkcipher_cbc_setkey,
1225 .decrypt = qat_alg_ablkcipher_decrypt,
1226 .encrypt = qat_alg_ablkcipher_encrypt,
1227 .min_keysize = AES_MIN_KEY_SIZE,
1228 .max_keysize = AES_MAX_KEY_SIZE,
1229 .ivsize = AES_BLOCK_SIZE,
1233 .cra_name = "ctr(aes)",
1234 .cra_driver_name = "qat_aes_ctr",
1235 .cra_priority = 4001,
1236 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1237 .cra_blocksize = AES_BLOCK_SIZE,
1238 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1240 .cra_type = &crypto_ablkcipher_type,
1241 .cra_module = THIS_MODULE,
1242 .cra_init = qat_alg_ablkcipher_init,
1243 .cra_exit = qat_alg_ablkcipher_exit,
1246 .setkey = qat_alg_ablkcipher_ctr_setkey,
1247 .decrypt = qat_alg_ablkcipher_decrypt,
1248 .encrypt = qat_alg_ablkcipher_encrypt,
1249 .min_keysize = AES_MIN_KEY_SIZE,
1250 .max_keysize = AES_MAX_KEY_SIZE,
1251 .ivsize = AES_BLOCK_SIZE,
1255 .cra_name = "xts(aes)",
1256 .cra_driver_name = "qat_aes_xts",
1257 .cra_priority = 4001,
1258 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1259 .cra_blocksize = AES_BLOCK_SIZE,
1260 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1262 .cra_type = &crypto_ablkcipher_type,
1263 .cra_module = THIS_MODULE,
1264 .cra_init = qat_alg_ablkcipher_init,
1265 .cra_exit = qat_alg_ablkcipher_exit,
1268 .setkey = qat_alg_ablkcipher_xts_setkey,
1269 .decrypt = qat_alg_ablkcipher_decrypt,
1270 .encrypt = qat_alg_ablkcipher_encrypt,
1271 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1272 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1273 .ivsize = AES_BLOCK_SIZE,
1278 int qat_algs_register(void)
1282 mutex_lock(&algs_lock);
1283 if (++active_devs != 1)
1286 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
1287 qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1289 ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1293 for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
1294 qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
1296 ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1301 mutex_unlock(&algs_lock);
1305 crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1309 void qat_algs_unregister(void)
1311 mutex_lock(&algs_lock);
1312 if (--active_devs != 0)
1315 crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1316 crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1319 mutex_unlock(&algs_lock);