1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 
4 #include <crypto/aes.h>
5 #include <crypto/aead.h>
6 #include <crypto/algapi.h>
7 #include <crypto/authenc.h>
8 #include <crypto/des.h>
9 #include <crypto/hash.h>
10 #include <crypto/internal/aead.h>
11 #include <crypto/internal/des.h>
12 #include <crypto/sha1.h>
13 #include <crypto/sha2.h>
14 #include <crypto/skcipher.h>
15 #include <crypto/xts.h>
16 #include <linux/crypto.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/idr.h>
19 
20 #include "sec.h"
21 #include "sec_crypto.h"
22 
23 #define SEC_PRIORITY		4001
24 #define SEC_XTS_MIN_KEY_SIZE	(2 * AES_MIN_KEY_SIZE)
25 #define SEC_XTS_MID_KEY_SIZE	(3 * AES_MIN_KEY_SIZE)
26 #define SEC_XTS_MAX_KEY_SIZE	(2 * AES_MAX_KEY_SIZE)
27 #define SEC_DES3_2KEY_SIZE	(2 * DES_KEY_SIZE)
28 #define SEC_DES3_3KEY_SIZE	(3 * DES_KEY_SIZE)
29 
30 /* SEC sqe(bd) bit operational relative MACRO */
31 #define SEC_DE_OFFSET		1
32 #define SEC_CIPHER_OFFSET	4
33 #define SEC_SCENE_OFFSET	3
34 #define SEC_DST_SGL_OFFSET	2
35 #define SEC_SRC_SGL_OFFSET	7
36 #define SEC_CKEY_OFFSET		9
37 #define SEC_CMODE_OFFSET	12
38 #define SEC_AKEY_OFFSET         5
39 #define SEC_AEAD_ALG_OFFSET     11
40 #define SEC_AUTH_OFFSET		6
41 
42 #define SEC_DE_OFFSET_V3		9
43 #define SEC_SCENE_OFFSET_V3	5
44 #define SEC_CKEY_OFFSET_V3	13
45 #define SEC_CTR_CNT_OFFSET	25
46 #define SEC_CTR_CNT_ROLLOVER	2
47 #define SEC_SRC_SGL_OFFSET_V3	11
48 #define SEC_DST_SGL_OFFSET_V3	14
49 #define SEC_CALG_OFFSET_V3	4
50 #define SEC_AKEY_OFFSET_V3	9
51 #define SEC_MAC_OFFSET_V3	4
52 #define SEC_AUTH_ALG_OFFSET_V3	15
53 #define SEC_CIPHER_AUTH_V3	0xbf
54 #define SEC_AUTH_CIPHER_V3	0x40
55 #define SEC_FLAG_OFFSET		7
56 #define SEC_FLAG_MASK		0x0780
57 #define SEC_TYPE_MASK		0x0F
58 #define SEC_DONE_MASK		0x0001
59 #define SEC_ICV_MASK		0x000E
60 
61 #define SEC_TOTAL_IV_SZ(depth)	(SEC_IV_SIZE * (depth))
62 #define SEC_SGL_SGE_NR		128
63 #define SEC_CIPHER_AUTH		0xfe
64 #define SEC_AUTH_CIPHER		0x1
65 #define SEC_MAX_MAC_LEN		64
66 #define SEC_MAX_AAD_LEN		65535
67 #define SEC_MAX_CCM_AAD_LEN	65279
68 #define SEC_TOTAL_MAC_SZ(depth) (SEC_MAX_MAC_LEN * (depth))
69 
70 #define SEC_PBUF_SZ			512
71 #define SEC_PBUF_IV_OFFSET		SEC_PBUF_SZ
72 #define SEC_PBUF_MAC_OFFSET		(SEC_PBUF_SZ + SEC_IV_SIZE)
73 #define SEC_PBUF_PKG		(SEC_PBUF_SZ + SEC_IV_SIZE +	\
74 			SEC_MAX_MAC_LEN * 2)
75 #define SEC_PBUF_NUM		(PAGE_SIZE / SEC_PBUF_PKG)
76 #define SEC_PBUF_PAGE_NUM(depth)	((depth) / SEC_PBUF_NUM)
77 #define SEC_PBUF_LEFT_SZ(depth)		(SEC_PBUF_PKG * ((depth) -	\
78 				SEC_PBUF_PAGE_NUM(depth) * SEC_PBUF_NUM))
79 #define SEC_TOTAL_PBUF_SZ(depth)	(PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) +	\
80 				SEC_PBUF_LEFT_SZ(depth))
81 
82 #define SEC_SQE_CFLAG		2
83 #define SEC_SQE_AEAD_FLAG	3
84 #define SEC_SQE_DONE		0x1
85 #define SEC_ICV_ERR		0x2
86 #define MAC_LEN_MASK		0x1U
87 #define MAX_INPUT_DATA_LEN	0xFFFE00
88 #define BITS_MASK		0xFF
89 #define WORD_MASK		0x3
90 #define BYTE_BITS		0x8
91 #define BYTES_TO_WORDS(bcount)	((bcount) >> 2)
92 #define SEC_XTS_NAME_SZ		0x3
93 #define IV_CM_CAL_NUM		2
94 #define IV_CL_MASK		0x7
95 #define IV_CL_MIN		2
96 #define IV_CL_MID		4
97 #define IV_CL_MAX		8
98 #define IV_FLAGS_OFFSET	0x6
99 #define IV_CM_OFFSET		0x3
100 #define IV_LAST_BYTE1		1
101 #define IV_LAST_BYTE2		2
102 #define IV_LAST_BYTE_MASK	0xFF
103 #define IV_CTR_INIT		0x1
104 #define IV_BYTE_OFFSET		0x8
105 
106 static DEFINE_MUTEX(sec_algs_lock);
107 static unsigned int sec_available_devs;
108 
109 struct sec_skcipher {
110 	u64 alg_msk;
111 	struct skcipher_alg alg;
112 };
113 
114 struct sec_aead {
115 	u64 alg_msk;
116 	struct aead_alg alg;
117 };
118 
119 /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
sec_alloc_queue_id(struct sec_ctx * ctx,struct sec_req * req)120 static inline u32 sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
121 {
122 	if (req->c_req.encrypt)
123 		return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
124 				 ctx->hlf_q_num;
125 
126 	return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
127 				 ctx->hlf_q_num;
128 }
129 
sec_free_queue_id(struct sec_ctx * ctx,struct sec_req * req)130 static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
131 {
132 	if (req->c_req.encrypt)
133 		atomic_dec(&ctx->enc_qcyclic);
134 	else
135 		atomic_dec(&ctx->dec_qcyclic);
136 }
137 
sec_alloc_req_id(struct sec_req * req,struct sec_qp_ctx * qp_ctx)138 static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
139 {
140 	int req_id;
141 
142 	spin_lock_bh(&qp_ctx->req_lock);
143 	req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, qp_ctx->qp->sq_depth, GFP_ATOMIC);
144 	spin_unlock_bh(&qp_ctx->req_lock);
145 	if (unlikely(req_id < 0)) {
146 		dev_err(req->ctx->dev, "alloc req id fail!\n");
147 		return req_id;
148 	}
149 
150 	req->qp_ctx = qp_ctx;
151 	qp_ctx->req_list[req_id] = req;
152 
153 	return req_id;
154 }
155 
sec_free_req_id(struct sec_req * req)156 static void sec_free_req_id(struct sec_req *req)
157 {
158 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
159 	int req_id = req->req_id;
160 
161 	if (unlikely(req_id < 0 || req_id >= qp_ctx->qp->sq_depth)) {
162 		dev_err(req->ctx->dev, "free request id invalid!\n");
163 		return;
164 	}
165 
166 	qp_ctx->req_list[req_id] = NULL;
167 	req->qp_ctx = NULL;
168 
169 	spin_lock_bh(&qp_ctx->req_lock);
170 	idr_remove(&qp_ctx->req_idr, req_id);
171 	spin_unlock_bh(&qp_ctx->req_lock);
172 }
173 
pre_parse_finished_bd(struct bd_status * status,void * resp)174 static u8 pre_parse_finished_bd(struct bd_status *status, void *resp)
175 {
176 	struct sec_sqe *bd = resp;
177 
178 	status->done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
179 	status->icv = (le16_to_cpu(bd->type2.done_flag) & SEC_ICV_MASK) >> 1;
180 	status->flag = (le16_to_cpu(bd->type2.done_flag) &
181 					SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
182 	status->tag = le16_to_cpu(bd->type2.tag);
183 	status->err_type = bd->type2.error_type;
184 
185 	return bd->type_cipher_auth & SEC_TYPE_MASK;
186 }
187 
pre_parse_finished_bd3(struct bd_status * status,void * resp)188 static u8 pre_parse_finished_bd3(struct bd_status *status, void *resp)
189 {
190 	struct sec_sqe3 *bd3 = resp;
191 
192 	status->done = le16_to_cpu(bd3->done_flag) & SEC_DONE_MASK;
193 	status->icv = (le16_to_cpu(bd3->done_flag) & SEC_ICV_MASK) >> 1;
194 	status->flag = (le16_to_cpu(bd3->done_flag) &
195 					SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
196 	status->tag = le64_to_cpu(bd3->tag);
197 	status->err_type = bd3->error_type;
198 
199 	return le32_to_cpu(bd3->bd_param) & SEC_TYPE_MASK;
200 }
201 
sec_cb_status_check(struct sec_req * req,struct bd_status * status)202 static int sec_cb_status_check(struct sec_req *req,
203 			       struct bd_status *status)
204 {
205 	struct sec_ctx *ctx = req->ctx;
206 
207 	if (unlikely(req->err_type || status->done != SEC_SQE_DONE)) {
208 		dev_err_ratelimited(ctx->dev, "err_type[%d], done[%u]\n",
209 				    req->err_type, status->done);
210 		return -EIO;
211 	}
212 
213 	if (unlikely(ctx->alg_type == SEC_SKCIPHER)) {
214 		if (unlikely(status->flag != SEC_SQE_CFLAG)) {
215 			dev_err_ratelimited(ctx->dev, "flag[%u]\n",
216 					    status->flag);
217 			return -EIO;
218 		}
219 	} else if (unlikely(ctx->alg_type == SEC_AEAD)) {
220 		if (unlikely(status->flag != SEC_SQE_AEAD_FLAG ||
221 			     status->icv == SEC_ICV_ERR)) {
222 			dev_err_ratelimited(ctx->dev,
223 					    "flag[%u], icv[%u]\n",
224 					    status->flag, status->icv);
225 			return -EBADMSG;
226 		}
227 	}
228 
229 	return 0;
230 }
231 
sec_req_cb(struct hisi_qp * qp,void * resp)232 static void sec_req_cb(struct hisi_qp *qp, void *resp)
233 {
234 	struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
235 	struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx;
236 	u8 type_supported = qp_ctx->ctx->type_supported;
237 	struct bd_status status;
238 	struct sec_ctx *ctx;
239 	struct sec_req *req;
240 	int err;
241 	u8 type;
242 
243 	if (type_supported == SEC_BD_TYPE2) {
244 		type = pre_parse_finished_bd(&status, resp);
245 		req = qp_ctx->req_list[status.tag];
246 	} else {
247 		type = pre_parse_finished_bd3(&status, resp);
248 		req = (void *)(uintptr_t)status.tag;
249 	}
250 
251 	if (unlikely(type != type_supported)) {
252 		atomic64_inc(&dfx->err_bd_cnt);
253 		pr_err("err bd type [%u]\n", type);
254 		return;
255 	}
256 
257 	if (unlikely(!req)) {
258 		atomic64_inc(&dfx->invalid_req_cnt);
259 		atomic_inc(&qp->qp_status.used);
260 		return;
261 	}
262 
263 	req->err_type = status.err_type;
264 	ctx = req->ctx;
265 	err = sec_cb_status_check(req, &status);
266 	if (err)
267 		atomic64_inc(&dfx->done_flag_cnt);
268 
269 	atomic64_inc(&dfx->recv_cnt);
270 
271 	ctx->req_op->buf_unmap(ctx, req);
272 
273 	ctx->req_op->callback(ctx, req, err);
274 }
275 
sec_bd_send(struct sec_ctx * ctx,struct sec_req * req)276 static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
277 {
278 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
279 	int ret;
280 
281 	if (ctx->fake_req_limit <=
282 	    atomic_read(&qp_ctx->qp->qp_status.used) &&
283 	    !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
284 		return -EBUSY;
285 
286 	spin_lock_bh(&qp_ctx->req_lock);
287 	ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
288 	if (ctx->fake_req_limit <=
289 	    atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
290 		list_add_tail(&req->backlog_head, &qp_ctx->backlog);
291 		atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
292 		atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
293 		spin_unlock_bh(&qp_ctx->req_lock);
294 		return -EBUSY;
295 	}
296 	spin_unlock_bh(&qp_ctx->req_lock);
297 
298 	if (unlikely(ret == -EBUSY))
299 		return -ENOBUFS;
300 
301 	if (likely(!ret)) {
302 		ret = -EINPROGRESS;
303 		atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
304 	}
305 
306 	return ret;
307 }
308 
309 /* Get DMA memory resources */
sec_alloc_civ_resource(struct device * dev,struct sec_alg_res * res)310 static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
311 {
312 	u16 q_depth = res->depth;
313 	int i;
314 
315 	res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),
316 					 &res->c_ivin_dma, GFP_KERNEL);
317 	if (!res->c_ivin)
318 		return -ENOMEM;
319 
320 	for (i = 1; i < q_depth; i++) {
321 		res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
322 		res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
323 	}
324 
325 	return 0;
326 }
327 
sec_free_civ_resource(struct device * dev,struct sec_alg_res * res)328 static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
329 {
330 	if (res->c_ivin)
331 		dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth),
332 				  res->c_ivin, res->c_ivin_dma);
333 }
334 
sec_alloc_aiv_resource(struct device * dev,struct sec_alg_res * res)335 static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res)
336 {
337 	u16 q_depth = res->depth;
338 	int i;
339 
340 	res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),
341 					 &res->a_ivin_dma, GFP_KERNEL);
342 	if (!res->a_ivin)
343 		return -ENOMEM;
344 
345 	for (i = 1; i < q_depth; i++) {
346 		res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE;
347 		res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE;
348 	}
349 
350 	return 0;
351 }
352 
sec_free_aiv_resource(struct device * dev,struct sec_alg_res * res)353 static void sec_free_aiv_resource(struct device *dev, struct sec_alg_res *res)
354 {
355 	if (res->a_ivin)
356 		dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth),
357 				  res->a_ivin, res->a_ivin_dma);
358 }
359 
sec_alloc_mac_resource(struct device * dev,struct sec_alg_res * res)360 static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
361 {
362 	u16 q_depth = res->depth;
363 	int i;
364 
365 	res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ(q_depth) << 1,
366 					  &res->out_mac_dma, GFP_KERNEL);
367 	if (!res->out_mac)
368 		return -ENOMEM;
369 
370 	for (i = 1; i < q_depth; i++) {
371 		res[i].out_mac_dma = res->out_mac_dma +
372 				     i * (SEC_MAX_MAC_LEN << 1);
373 		res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);
374 	}
375 
376 	return 0;
377 }
378 
sec_free_mac_resource(struct device * dev,struct sec_alg_res * res)379 static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
380 {
381 	if (res->out_mac)
382 		dma_free_coherent(dev, SEC_TOTAL_MAC_SZ(res->depth) << 1,
383 				  res->out_mac, res->out_mac_dma);
384 }
385 
sec_free_pbuf_resource(struct device * dev,struct sec_alg_res * res)386 static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
387 {
388 	if (res->pbuf)
389 		dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ(res->depth),
390 				  res->pbuf, res->pbuf_dma);
391 }
392 
393 /*
394  * To improve performance, pbuffer is used for
395  * small packets (< 512Bytes) as IOMMU translation using.
396  */
sec_alloc_pbuf_resource(struct device * dev,struct sec_alg_res * res)397 static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
398 {
399 	u16 q_depth = res->depth;
400 	int size = SEC_PBUF_PAGE_NUM(q_depth);
401 	int pbuf_page_offset;
402 	int i, j, k;
403 
404 	res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ(q_depth),
405 				&res->pbuf_dma, GFP_KERNEL);
406 	if (!res->pbuf)
407 		return -ENOMEM;
408 
409 	/*
410 	 * SEC_PBUF_PKG contains data pbuf, iv and
411 	 * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC>
412 	 * Every PAGE contains six SEC_PBUF_PKG
413 	 * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG
414 	 * So we need SEC_PBUF_PAGE_NUM numbers of PAGE
415 	 * for the SEC_TOTAL_PBUF_SZ
416 	 */
417 	for (i = 0; i <= size; i++) {
418 		pbuf_page_offset = PAGE_SIZE * i;
419 		for (j = 0; j < SEC_PBUF_NUM; j++) {
420 			k = i * SEC_PBUF_NUM + j;
421 			if (k == q_depth)
422 				break;
423 			res[k].pbuf = res->pbuf +
424 				j * SEC_PBUF_PKG + pbuf_page_offset;
425 			res[k].pbuf_dma = res->pbuf_dma +
426 				j * SEC_PBUF_PKG + pbuf_page_offset;
427 		}
428 	}
429 
430 	return 0;
431 }
432 
sec_alg_resource_alloc(struct sec_ctx * ctx,struct sec_qp_ctx * qp_ctx)433 static int sec_alg_resource_alloc(struct sec_ctx *ctx,
434 				  struct sec_qp_ctx *qp_ctx)
435 {
436 	struct sec_alg_res *res = qp_ctx->res;
437 	struct device *dev = ctx->dev;
438 	int ret;
439 
440 	ret = sec_alloc_civ_resource(dev, res);
441 	if (ret)
442 		return ret;
443 
444 	if (ctx->alg_type == SEC_AEAD) {
445 		ret = sec_alloc_aiv_resource(dev, res);
446 		if (ret)
447 			goto alloc_aiv_fail;
448 
449 		ret = sec_alloc_mac_resource(dev, res);
450 		if (ret)
451 			goto alloc_mac_fail;
452 	}
453 	if (ctx->pbuf_supported) {
454 		ret = sec_alloc_pbuf_resource(dev, res);
455 		if (ret) {
456 			dev_err(dev, "fail to alloc pbuf dma resource!\n");
457 			goto alloc_pbuf_fail;
458 		}
459 	}
460 
461 	return 0;
462 
463 alloc_pbuf_fail:
464 	if (ctx->alg_type == SEC_AEAD)
465 		sec_free_mac_resource(dev, qp_ctx->res);
466 alloc_mac_fail:
467 	if (ctx->alg_type == SEC_AEAD)
468 		sec_free_aiv_resource(dev, res);
469 alloc_aiv_fail:
470 	sec_free_civ_resource(dev, res);
471 	return ret;
472 }
473 
sec_alg_resource_free(struct sec_ctx * ctx,struct sec_qp_ctx * qp_ctx)474 static void sec_alg_resource_free(struct sec_ctx *ctx,
475 				  struct sec_qp_ctx *qp_ctx)
476 {
477 	struct device *dev = ctx->dev;
478 
479 	sec_free_civ_resource(dev, qp_ctx->res);
480 
481 	if (ctx->pbuf_supported)
482 		sec_free_pbuf_resource(dev, qp_ctx->res);
483 	if (ctx->alg_type == SEC_AEAD) {
484 		sec_free_mac_resource(dev, qp_ctx->res);
485 		sec_free_aiv_resource(dev, qp_ctx->res);
486 	}
487 }
488 
sec_alloc_qp_ctx_resource(struct sec_ctx * ctx,struct sec_qp_ctx * qp_ctx)489 static int sec_alloc_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
490 {
491 	u16 q_depth = qp_ctx->qp->sq_depth;
492 	struct device *dev = ctx->dev;
493 	int ret = -ENOMEM;
494 
495 	qp_ctx->req_list = kcalloc(q_depth, sizeof(struct sec_req *), GFP_KERNEL);
496 	if (!qp_ctx->req_list)
497 		return ret;
498 
499 	qp_ctx->res = kcalloc(q_depth, sizeof(struct sec_alg_res), GFP_KERNEL);
500 	if (!qp_ctx->res)
501 		goto err_free_req_list;
502 	qp_ctx->res->depth = q_depth;
503 
504 	qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR);
505 	if (IS_ERR(qp_ctx->c_in_pool)) {
506 		dev_err(dev, "fail to create sgl pool for input!\n");
507 		goto err_free_res;
508 	}
509 
510 	qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR);
511 	if (IS_ERR(qp_ctx->c_out_pool)) {
512 		dev_err(dev, "fail to create sgl pool for output!\n");
513 		goto err_free_c_in_pool;
514 	}
515 
516 	ret = sec_alg_resource_alloc(ctx, qp_ctx);
517 	if (ret)
518 		goto err_free_c_out_pool;
519 
520 	return 0;
521 
522 err_free_c_out_pool:
523 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
524 err_free_c_in_pool:
525 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
526 err_free_res:
527 	kfree(qp_ctx->res);
528 err_free_req_list:
529 	kfree(qp_ctx->req_list);
530 	return ret;
531 }
532 
sec_free_qp_ctx_resource(struct sec_ctx * ctx,struct sec_qp_ctx * qp_ctx)533 static void sec_free_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
534 {
535 	struct device *dev = ctx->dev;
536 
537 	sec_alg_resource_free(ctx, qp_ctx);
538 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
539 	hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
540 	kfree(qp_ctx->res);
541 	kfree(qp_ctx->req_list);
542 }
543 
sec_create_qp_ctx(struct sec_ctx * ctx,int qp_ctx_id)544 static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id)
545 {
546 	struct sec_qp_ctx *qp_ctx;
547 	struct hisi_qp *qp;
548 	int ret;
549 
550 	qp_ctx = &ctx->qp_ctx[qp_ctx_id];
551 	qp = ctx->qps[qp_ctx_id];
552 	qp->req_type = 0;
553 	qp->qp_ctx = qp_ctx;
554 	qp_ctx->qp = qp;
555 	qp_ctx->ctx = ctx;
556 
557 	qp->req_cb = sec_req_cb;
558 
559 	spin_lock_init(&qp_ctx->req_lock);
560 	idr_init(&qp_ctx->req_idr);
561 	INIT_LIST_HEAD(&qp_ctx->backlog);
562 
563 	ret = sec_alloc_qp_ctx_resource(ctx, qp_ctx);
564 	if (ret)
565 		goto err_destroy_idr;
566 
567 	ret = hisi_qm_start_qp(qp, 0);
568 	if (ret < 0)
569 		goto err_resource_free;
570 
571 	return 0;
572 
573 err_resource_free:
574 	sec_free_qp_ctx_resource(ctx, qp_ctx);
575 err_destroy_idr:
576 	idr_destroy(&qp_ctx->req_idr);
577 	return ret;
578 }
579 
sec_release_qp_ctx(struct sec_ctx * ctx,struct sec_qp_ctx * qp_ctx)580 static void sec_release_qp_ctx(struct sec_ctx *ctx,
581 			       struct sec_qp_ctx *qp_ctx)
582 {
583 	hisi_qm_stop_qp(qp_ctx->qp);
584 	sec_free_qp_ctx_resource(ctx, qp_ctx);
585 	idr_destroy(&qp_ctx->req_idr);
586 }
587 
sec_ctx_base_init(struct sec_ctx * ctx)588 static int sec_ctx_base_init(struct sec_ctx *ctx)
589 {
590 	struct sec_dev *sec;
591 	int i, ret;
592 
593 	ctx->qps = sec_create_qps();
594 	if (!ctx->qps) {
595 		pr_err("Can not create sec qps!\n");
596 		return -ENODEV;
597 	}
598 
599 	sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
600 	ctx->sec = sec;
601 	ctx->dev = &sec->qm.pdev->dev;
602 	ctx->hlf_q_num = sec->ctx_q_num >> 1;
603 
604 	ctx->pbuf_supported = ctx->sec->iommu_used;
605 
606 	/* Half of queue depth is taken as fake requests limit in the queue. */
607 	ctx->fake_req_limit = ctx->qps[0]->sq_depth >> 1;
608 	ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
609 			      GFP_KERNEL);
610 	if (!ctx->qp_ctx) {
611 		ret = -ENOMEM;
612 		goto err_destroy_qps;
613 	}
614 
615 	for (i = 0; i < sec->ctx_q_num; i++) {
616 		ret = sec_create_qp_ctx(ctx, i);
617 		if (ret)
618 			goto err_sec_release_qp_ctx;
619 	}
620 
621 	return 0;
622 
623 err_sec_release_qp_ctx:
624 	for (i = i - 1; i >= 0; i--)
625 		sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
626 	kfree(ctx->qp_ctx);
627 err_destroy_qps:
628 	sec_destroy_qps(ctx->qps, sec->ctx_q_num);
629 	return ret;
630 }
631 
sec_ctx_base_uninit(struct sec_ctx * ctx)632 static void sec_ctx_base_uninit(struct sec_ctx *ctx)
633 {
634 	int i;
635 
636 	for (i = 0; i < ctx->sec->ctx_q_num; i++)
637 		sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
638 
639 	sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num);
640 	kfree(ctx->qp_ctx);
641 }
642 
sec_cipher_init(struct sec_ctx * ctx)643 static int sec_cipher_init(struct sec_ctx *ctx)
644 {
645 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
646 
647 	c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
648 					  &c_ctx->c_key_dma, GFP_KERNEL);
649 	if (!c_ctx->c_key)
650 		return -ENOMEM;
651 
652 	return 0;
653 }
654 
sec_cipher_uninit(struct sec_ctx * ctx)655 static void sec_cipher_uninit(struct sec_ctx *ctx)
656 {
657 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
658 
659 	memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
660 	dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
661 			  c_ctx->c_key, c_ctx->c_key_dma);
662 }
663 
sec_auth_init(struct sec_ctx * ctx)664 static int sec_auth_init(struct sec_ctx *ctx)
665 {
666 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
667 
668 	a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_AKEY_SIZE,
669 					  &a_ctx->a_key_dma, GFP_KERNEL);
670 	if (!a_ctx->a_key)
671 		return -ENOMEM;
672 
673 	return 0;
674 }
675 
sec_auth_uninit(struct sec_ctx * ctx)676 static void sec_auth_uninit(struct sec_ctx *ctx)
677 {
678 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
679 
680 	memzero_explicit(a_ctx->a_key, SEC_MAX_AKEY_SIZE);
681 	dma_free_coherent(ctx->dev, SEC_MAX_AKEY_SIZE,
682 			  a_ctx->a_key, a_ctx->a_key_dma);
683 }
684 
sec_skcipher_fbtfm_init(struct crypto_skcipher * tfm)685 static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)
686 {
687 	const char *alg = crypto_tfm_alg_name(&tfm->base);
688 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
689 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
690 
691 	c_ctx->fallback = false;
692 
693 	c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0,
694 						  CRYPTO_ALG_NEED_FALLBACK);
695 	if (IS_ERR(c_ctx->fbtfm)) {
696 		pr_err("failed to alloc fallback tfm for %s!\n", alg);
697 		return PTR_ERR(c_ctx->fbtfm);
698 	}
699 
700 	return 0;
701 }
702 
sec_skcipher_init(struct crypto_skcipher * tfm)703 static int sec_skcipher_init(struct crypto_skcipher *tfm)
704 {
705 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
706 	int ret;
707 
708 	ctx->alg_type = SEC_SKCIPHER;
709 	crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
710 	ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
711 	if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
712 		pr_err("get error skcipher iv size!\n");
713 		return -EINVAL;
714 	}
715 
716 	ret = sec_ctx_base_init(ctx);
717 	if (ret)
718 		return ret;
719 
720 	ret = sec_cipher_init(ctx);
721 	if (ret)
722 		goto err_cipher_init;
723 
724 	ret = sec_skcipher_fbtfm_init(tfm);
725 	if (ret)
726 		goto err_fbtfm_init;
727 
728 	return 0;
729 
730 err_fbtfm_init:
731 	sec_cipher_uninit(ctx);
732 err_cipher_init:
733 	sec_ctx_base_uninit(ctx);
734 	return ret;
735 }
736 
sec_skcipher_uninit(struct crypto_skcipher * tfm)737 static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
738 {
739 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
740 
741 	if (ctx->c_ctx.fbtfm)
742 		crypto_free_sync_skcipher(ctx->c_ctx.fbtfm);
743 
744 	sec_cipher_uninit(ctx);
745 	sec_ctx_base_uninit(ctx);
746 }
747 
sec_skcipher_3des_setkey(struct crypto_skcipher * tfm,const u8 * key,const u32 keylen)748 static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key, const u32 keylen)
749 {
750 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
751 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
752 	int ret;
753 
754 	ret = verify_skcipher_des3_key(tfm, key);
755 	if (ret)
756 		return ret;
757 
758 	switch (keylen) {
759 	case SEC_DES3_2KEY_SIZE:
760 		c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
761 		break;
762 	case SEC_DES3_3KEY_SIZE:
763 		c_ctx->c_key_len = SEC_CKEY_3DES_3KEY;
764 		break;
765 	default:
766 		return -EINVAL;
767 	}
768 
769 	return 0;
770 }
771 
sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx * c_ctx,const u32 keylen,const enum sec_cmode c_mode)772 static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
773 				       const u32 keylen,
774 				       const enum sec_cmode c_mode)
775 {
776 	if (c_mode == SEC_CMODE_XTS) {
777 		switch (keylen) {
778 		case SEC_XTS_MIN_KEY_SIZE:
779 			c_ctx->c_key_len = SEC_CKEY_128BIT;
780 			break;
781 		case SEC_XTS_MID_KEY_SIZE:
782 			c_ctx->fallback = true;
783 			break;
784 		case SEC_XTS_MAX_KEY_SIZE:
785 			c_ctx->c_key_len = SEC_CKEY_256BIT;
786 			break;
787 		default:
788 			pr_err("hisi_sec2: xts mode key error!\n");
789 			return -EINVAL;
790 		}
791 	} else {
792 		if (c_ctx->c_alg == SEC_CALG_SM4 &&
793 		    keylen != AES_KEYSIZE_128) {
794 			pr_err("hisi_sec2: sm4 key error!\n");
795 			return -EINVAL;
796 		} else {
797 			switch (keylen) {
798 			case AES_KEYSIZE_128:
799 				c_ctx->c_key_len = SEC_CKEY_128BIT;
800 				break;
801 			case AES_KEYSIZE_192:
802 				c_ctx->c_key_len = SEC_CKEY_192BIT;
803 				break;
804 			case AES_KEYSIZE_256:
805 				c_ctx->c_key_len = SEC_CKEY_256BIT;
806 				break;
807 			default:
808 				pr_err("hisi_sec2: aes key error!\n");
809 				return -EINVAL;
810 			}
811 		}
812 	}
813 
814 	return 0;
815 }
816 
sec_skcipher_setkey(struct crypto_skcipher * tfm,const u8 * key,const u32 keylen,const enum sec_calg c_alg,const enum sec_cmode c_mode)817 static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
818 			       const u32 keylen, const enum sec_calg c_alg,
819 			       const enum sec_cmode c_mode)
820 {
821 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
822 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
823 	struct device *dev = ctx->dev;
824 	int ret;
825 
826 	if (c_mode == SEC_CMODE_XTS) {
827 		ret = xts_verify_key(tfm, key, keylen);
828 		if (ret) {
829 			dev_err(dev, "xts mode key err!\n");
830 			return ret;
831 		}
832 	}
833 
834 	c_ctx->c_alg  = c_alg;
835 	c_ctx->c_mode = c_mode;
836 
837 	switch (c_alg) {
838 	case SEC_CALG_3DES:
839 		ret = sec_skcipher_3des_setkey(tfm, key, keylen);
840 		break;
841 	case SEC_CALG_AES:
842 	case SEC_CALG_SM4:
843 		ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
844 		break;
845 	default:
846 		dev_err(dev, "sec c_alg err!\n");
847 		return -EINVAL;
848 	}
849 
850 	if (ret) {
851 		dev_err(dev, "set sec key err!\n");
852 		return ret;
853 	}
854 
855 	memcpy(c_ctx->c_key, key, keylen);
856 	if (c_ctx->fbtfm) {
857 		ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
858 		if (ret) {
859 			dev_err(dev, "failed to set fallback skcipher key!\n");
860 			return ret;
861 		}
862 	}
863 	return 0;
864 }
865 
866 #define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode)			\
867 static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
868 	u32 keylen)							\
869 {									\
870 	return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode);	\
871 }
872 
GEN_SEC_SETKEY_FUNC(aes_ecb,SEC_CALG_AES,SEC_CMODE_ECB)873 GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
874 GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
875 GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
876 GEN_SEC_SETKEY_FUNC(aes_ctr, SEC_CALG_AES, SEC_CMODE_CTR)
877 GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
878 GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
879 GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
880 GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
881 GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR)
882 
883 static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
884 			struct scatterlist *src)
885 {
886 	struct sec_aead_req *a_req = &req->aead_req;
887 	struct aead_request *aead_req = a_req->aead_req;
888 	struct sec_cipher_req *c_req = &req->c_req;
889 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
890 	struct device *dev = ctx->dev;
891 	int copy_size, pbuf_length;
892 	int req_id = req->req_id;
893 	struct crypto_aead *tfm;
894 	size_t authsize;
895 	u8 *mac_offset;
896 
897 	if (ctx->alg_type == SEC_AEAD)
898 		copy_size = aead_req->cryptlen + aead_req->assoclen;
899 	else
900 		copy_size = c_req->c_len;
901 
902 	pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
903 			qp_ctx->res[req_id].pbuf, copy_size);
904 	if (unlikely(pbuf_length != copy_size)) {
905 		dev_err(dev, "copy src data to pbuf error!\n");
906 		return -EINVAL;
907 	}
908 	if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
909 		tfm = crypto_aead_reqtfm(aead_req);
910 		authsize = crypto_aead_authsize(tfm);
911 		mac_offset = qp_ctx->res[req_id].pbuf + copy_size - authsize;
912 		memcpy(a_req->out_mac, mac_offset, authsize);
913 	}
914 
915 	req->in_dma = qp_ctx->res[req_id].pbuf_dma;
916 	c_req->c_out_dma = req->in_dma;
917 
918 	return 0;
919 }
920 
sec_cipher_pbuf_unmap(struct sec_ctx * ctx,struct sec_req * req,struct scatterlist * dst)921 static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
922 			struct scatterlist *dst)
923 {
924 	struct aead_request *aead_req = req->aead_req.aead_req;
925 	struct sec_cipher_req *c_req = &req->c_req;
926 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
927 	int copy_size, pbuf_length;
928 	int req_id = req->req_id;
929 
930 	if (ctx->alg_type == SEC_AEAD)
931 		copy_size = c_req->c_len + aead_req->assoclen;
932 	else
933 		copy_size = c_req->c_len;
934 
935 	pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
936 			qp_ctx->res[req_id].pbuf, copy_size);
937 	if (unlikely(pbuf_length != copy_size))
938 		dev_err(ctx->dev, "copy pbuf data to dst error!\n");
939 }
940 
sec_aead_mac_init(struct sec_aead_req * req)941 static int sec_aead_mac_init(struct sec_aead_req *req)
942 {
943 	struct aead_request *aead_req = req->aead_req;
944 	struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
945 	size_t authsize = crypto_aead_authsize(tfm);
946 	struct scatterlist *sgl = aead_req->src;
947 	u8 *mac_out = req->out_mac;
948 	size_t copy_size;
949 	off_t skip_size;
950 
951 	/* Copy input mac */
952 	skip_size = aead_req->assoclen + aead_req->cryptlen - authsize;
953 	copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, authsize, skip_size);
954 	if (unlikely(copy_size != authsize))
955 		return -EINVAL;
956 
957 	return 0;
958 }
959 
sec_cipher_map(struct sec_ctx * ctx,struct sec_req * req,struct scatterlist * src,struct scatterlist * dst)960 static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
961 			  struct scatterlist *src, struct scatterlist *dst)
962 {
963 	struct sec_cipher_req *c_req = &req->c_req;
964 	struct sec_aead_req *a_req = &req->aead_req;
965 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
966 	struct sec_alg_res *res = &qp_ctx->res[req->req_id];
967 	struct device *dev = ctx->dev;
968 	int ret;
969 
970 	if (req->use_pbuf) {
971 		c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
972 		c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
973 		if (ctx->alg_type == SEC_AEAD) {
974 			a_req->a_ivin = res->a_ivin;
975 			a_req->a_ivin_dma = res->a_ivin_dma;
976 			a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET;
977 			a_req->out_mac_dma = res->pbuf_dma +
978 					SEC_PBUF_MAC_OFFSET;
979 		}
980 		ret = sec_cipher_pbuf_map(ctx, req, src);
981 
982 		return ret;
983 	}
984 	c_req->c_ivin = res->c_ivin;
985 	c_req->c_ivin_dma = res->c_ivin_dma;
986 	if (ctx->alg_type == SEC_AEAD) {
987 		a_req->a_ivin = res->a_ivin;
988 		a_req->a_ivin_dma = res->a_ivin_dma;
989 		a_req->out_mac = res->out_mac;
990 		a_req->out_mac_dma = res->out_mac_dma;
991 	}
992 
993 	req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
994 						qp_ctx->c_in_pool,
995 						req->req_id,
996 						&req->in_dma);
997 	if (IS_ERR(req->in)) {
998 		dev_err(dev, "fail to dma map input sgl buffers!\n");
999 		return PTR_ERR(req->in);
1000 	}
1001 
1002 	if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
1003 		ret = sec_aead_mac_init(a_req);
1004 		if (unlikely(ret)) {
1005 			dev_err(dev, "fail to init mac data for ICV!\n");
1006 			hisi_acc_sg_buf_unmap(dev, src, req->in);
1007 			return ret;
1008 		}
1009 	}
1010 
1011 	if (dst == src) {
1012 		c_req->c_out = req->in;
1013 		c_req->c_out_dma = req->in_dma;
1014 	} else {
1015 		c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
1016 							     qp_ctx->c_out_pool,
1017 							     req->req_id,
1018 							     &c_req->c_out_dma);
1019 
1020 		if (IS_ERR(c_req->c_out)) {
1021 			dev_err(dev, "fail to dma map output sgl buffers!\n");
1022 			hisi_acc_sg_buf_unmap(dev, src, req->in);
1023 			return PTR_ERR(c_req->c_out);
1024 		}
1025 	}
1026 
1027 	return 0;
1028 }
1029 
sec_cipher_unmap(struct sec_ctx * ctx,struct sec_req * req,struct scatterlist * src,struct scatterlist * dst)1030 static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
1031 			     struct scatterlist *src, struct scatterlist *dst)
1032 {
1033 	struct sec_cipher_req *c_req = &req->c_req;
1034 	struct device *dev = ctx->dev;
1035 
1036 	if (req->use_pbuf) {
1037 		sec_cipher_pbuf_unmap(ctx, req, dst);
1038 	} else {
1039 		if (dst != src)
1040 			hisi_acc_sg_buf_unmap(dev, src, req->in);
1041 
1042 		hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
1043 	}
1044 }
1045 
sec_skcipher_sgl_map(struct sec_ctx * ctx,struct sec_req * req)1046 static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
1047 {
1048 	struct skcipher_request *sq = req->c_req.sk_req;
1049 
1050 	return sec_cipher_map(ctx, req, sq->src, sq->dst);
1051 }
1052 
sec_skcipher_sgl_unmap(struct sec_ctx * ctx,struct sec_req * req)1053 static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
1054 {
1055 	struct skcipher_request *sq = req->c_req.sk_req;
1056 
1057 	sec_cipher_unmap(ctx, req, sq->src, sq->dst);
1058 }
1059 
sec_aead_aes_set_key(struct sec_cipher_ctx * c_ctx,struct crypto_authenc_keys * keys)1060 static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx,
1061 				struct crypto_authenc_keys *keys)
1062 {
1063 	switch (keys->enckeylen) {
1064 	case AES_KEYSIZE_128:
1065 		c_ctx->c_key_len = SEC_CKEY_128BIT;
1066 		break;
1067 	case AES_KEYSIZE_192:
1068 		c_ctx->c_key_len = SEC_CKEY_192BIT;
1069 		break;
1070 	case AES_KEYSIZE_256:
1071 		c_ctx->c_key_len = SEC_CKEY_256BIT;
1072 		break;
1073 	default:
1074 		pr_err("hisi_sec2: aead aes key error!\n");
1075 		return -EINVAL;
1076 	}
1077 	memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen);
1078 
1079 	return 0;
1080 }
1081 
sec_aead_auth_set_key(struct sec_auth_ctx * ctx,struct crypto_authenc_keys * keys)1082 static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
1083 				 struct crypto_authenc_keys *keys)
1084 {
1085 	struct crypto_shash *hash_tfm = ctx->hash_tfm;
1086 	int blocksize, digestsize, ret;
1087 
1088 	blocksize = crypto_shash_blocksize(hash_tfm);
1089 	digestsize = crypto_shash_digestsize(hash_tfm);
1090 	if (keys->authkeylen > blocksize) {
1091 		ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey,
1092 					      keys->authkeylen, ctx->a_key);
1093 		if (ret) {
1094 			pr_err("hisi_sec2: aead auth digest error!\n");
1095 			return -EINVAL;
1096 		}
1097 		ctx->a_key_len = digestsize;
1098 	} else {
1099 		if (keys->authkeylen)
1100 			memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
1101 		ctx->a_key_len = keys->authkeylen;
1102 	}
1103 
1104 	return 0;
1105 }
1106 
sec_aead_setauthsize(struct crypto_aead * aead,unsigned int authsize)1107 static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize)
1108 {
1109 	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
1110 	struct sec_ctx *ctx = crypto_tfm_ctx(tfm);
1111 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
1112 
1113 	return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
1114 }
1115 
sec_aead_fallback_setkey(struct sec_auth_ctx * a_ctx,struct crypto_aead * tfm,const u8 * key,unsigned int keylen)1116 static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
1117 				    struct crypto_aead *tfm, const u8 *key,
1118 				    unsigned int keylen)
1119 {
1120 	crypto_aead_clear_flags(a_ctx->fallback_aead_tfm, CRYPTO_TFM_REQ_MASK);
1121 	crypto_aead_set_flags(a_ctx->fallback_aead_tfm,
1122 			      crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK);
1123 	return crypto_aead_setkey(a_ctx->fallback_aead_tfm, key, keylen);
1124 }
1125 
sec_aead_setkey(struct crypto_aead * tfm,const u8 * key,const u32 keylen,const enum sec_hash_alg a_alg,const enum sec_calg c_alg,const enum sec_cmode c_mode)1126 static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1127 			   const u32 keylen, const enum sec_hash_alg a_alg,
1128 			   const enum sec_calg c_alg,
1129 			   const enum sec_cmode c_mode)
1130 {
1131 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1132 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
1133 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
1134 	struct device *dev = ctx->dev;
1135 	struct crypto_authenc_keys keys;
1136 	int ret;
1137 
1138 	ctx->a_ctx.a_alg = a_alg;
1139 	ctx->c_ctx.c_alg = c_alg;
1140 	c_ctx->c_mode = c_mode;
1141 
1142 	if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) {
1143 		ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
1144 		if (ret) {
1145 			dev_err(dev, "set sec aes ccm cipher key err!\n");
1146 			return ret;
1147 		}
1148 		memcpy(c_ctx->c_key, key, keylen);
1149 
1150 		return sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
1151 	}
1152 
1153 	ret = crypto_authenc_extractkeys(&keys, key, keylen);
1154 	if (ret) {
1155 		dev_err(dev, "sec extract aead keys err!\n");
1156 		goto bad_key;
1157 	}
1158 
1159 	ret = sec_aead_aes_set_key(c_ctx, &keys);
1160 	if (ret) {
1161 		dev_err(dev, "set sec cipher key err!\n");
1162 		goto bad_key;
1163 	}
1164 
1165 	ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
1166 	if (ret) {
1167 		dev_err(dev, "set sec auth key err!\n");
1168 		goto bad_key;
1169 	}
1170 
1171 	ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
1172 	if (ret) {
1173 		dev_err(dev, "set sec fallback key err!\n");
1174 		goto bad_key;
1175 	}
1176 
1177 	return 0;
1178 
1179 bad_key:
1180 	memzero_explicit(&keys, sizeof(struct crypto_authenc_keys));
1181 	return ret;
1182 }
1183 
1184 
1185 #define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, cmode)				\
1186 static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, u32 keylen)	\
1187 {											\
1188 	return sec_aead_setkey(tfm, key, keylen, aalg, calg, cmode);			\
1189 }
1190 
GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1,SEC_A_HMAC_SHA1,SEC_CALG_AES,SEC_CMODE_CBC)1191 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, SEC_CALG_AES, SEC_CMODE_CBC)
1192 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, SEC_CALG_AES, SEC_CMODE_CBC)
1193 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, SEC_CALG_AES, SEC_CMODE_CBC)
1194 GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, SEC_CMODE_CCM)
1195 GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, SEC_CMODE_GCM)
1196 GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, SEC_CMODE_CCM)
1197 GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, SEC_CMODE_GCM)
1198 
1199 static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
1200 {
1201 	struct aead_request *aq = req->aead_req.aead_req;
1202 
1203 	return sec_cipher_map(ctx, req, aq->src, aq->dst);
1204 }
1205 
sec_aead_sgl_unmap(struct sec_ctx * ctx,struct sec_req * req)1206 static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
1207 {
1208 	struct aead_request *aq = req->aead_req.aead_req;
1209 
1210 	sec_cipher_unmap(ctx, req, aq->src, aq->dst);
1211 }
1212 
sec_request_transfer(struct sec_ctx * ctx,struct sec_req * req)1213 static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
1214 {
1215 	int ret;
1216 
1217 	ret = ctx->req_op->buf_map(ctx, req);
1218 	if (unlikely(ret))
1219 		return ret;
1220 
1221 	ctx->req_op->do_transfer(ctx, req);
1222 
1223 	ret = ctx->req_op->bd_fill(ctx, req);
1224 	if (unlikely(ret))
1225 		goto unmap_req_buf;
1226 
1227 	return ret;
1228 
1229 unmap_req_buf:
1230 	ctx->req_op->buf_unmap(ctx, req);
1231 	return ret;
1232 }
1233 
sec_request_untransfer(struct sec_ctx * ctx,struct sec_req * req)1234 static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
1235 {
1236 	ctx->req_op->buf_unmap(ctx, req);
1237 }
1238 
sec_skcipher_copy_iv(struct sec_ctx * ctx,struct sec_req * req)1239 static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
1240 {
1241 	struct skcipher_request *sk_req = req->c_req.sk_req;
1242 	struct sec_cipher_req *c_req = &req->c_req;
1243 
1244 	memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
1245 }
1246 
sec_skcipher_bd_fill(struct sec_ctx * ctx,struct sec_req * req)1247 static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
1248 {
1249 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
1250 	struct sec_cipher_req *c_req = &req->c_req;
1251 	struct sec_sqe *sec_sqe = &req->sec_sqe;
1252 	u8 scene, sa_type, da_type;
1253 	u8 bd_type, cipher;
1254 	u8 de = 0;
1255 
1256 	memset(sec_sqe, 0, sizeof(struct sec_sqe));
1257 
1258 	sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
1259 	sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
1260 	sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma);
1261 	sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
1262 
1263 	sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
1264 						SEC_CMODE_OFFSET);
1265 	sec_sqe->type2.c_alg = c_ctx->c_alg;
1266 	sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
1267 						SEC_CKEY_OFFSET);
1268 
1269 	bd_type = SEC_BD_TYPE2;
1270 	if (c_req->encrypt)
1271 		cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET;
1272 	else
1273 		cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
1274 	sec_sqe->type_cipher_auth = bd_type | cipher;
1275 
1276 	/* Set destination and source address type */
1277 	if (req->use_pbuf) {
1278 		sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET;
1279 		da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
1280 	} else {
1281 		sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
1282 		da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
1283 	}
1284 
1285 	sec_sqe->sdm_addr_type |= da_type;
1286 	scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
1287 	if (req->in_dma != c_req->c_out_dma)
1288 		de = 0x1 << SEC_DE_OFFSET;
1289 
1290 	sec_sqe->sds_sa_type = (de | scene | sa_type);
1291 
1292 	sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
1293 	sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
1294 
1295 	return 0;
1296 }
1297 
sec_skcipher_bd_fill_v3(struct sec_ctx * ctx,struct sec_req * req)1298 static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
1299 {
1300 	struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
1301 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
1302 	struct sec_cipher_req *c_req = &req->c_req;
1303 	u32 bd_param = 0;
1304 	u16 cipher;
1305 
1306 	memset(sec_sqe3, 0, sizeof(struct sec_sqe3));
1307 
1308 	sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
1309 	sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
1310 	sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma);
1311 	sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma);
1312 
1313 	sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) |
1314 						c_ctx->c_mode;
1315 	sec_sqe3->c_icv_key |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
1316 						SEC_CKEY_OFFSET_V3);
1317 
1318 	if (c_req->encrypt)
1319 		cipher = SEC_CIPHER_ENC;
1320 	else
1321 		cipher = SEC_CIPHER_DEC;
1322 	sec_sqe3->c_icv_key |= cpu_to_le16(cipher);
1323 
1324 	/* Set the CTR counter mode is 128bit rollover */
1325 	sec_sqe3->auth_mac_key = cpu_to_le32((u32)SEC_CTR_CNT_ROLLOVER <<
1326 					SEC_CTR_CNT_OFFSET);
1327 
1328 	if (req->use_pbuf) {
1329 		bd_param |= SEC_PBUF << SEC_SRC_SGL_OFFSET_V3;
1330 		bd_param |= SEC_PBUF << SEC_DST_SGL_OFFSET_V3;
1331 	} else {
1332 		bd_param |= SEC_SGL << SEC_SRC_SGL_OFFSET_V3;
1333 		bd_param |= SEC_SGL << SEC_DST_SGL_OFFSET_V3;
1334 	}
1335 
1336 	bd_param |= SEC_COMM_SCENE << SEC_SCENE_OFFSET_V3;
1337 	if (req->in_dma != c_req->c_out_dma)
1338 		bd_param |= 0x1 << SEC_DE_OFFSET_V3;
1339 
1340 	bd_param |= SEC_BD_TYPE3;
1341 	sec_sqe3->bd_param = cpu_to_le32(bd_param);
1342 
1343 	sec_sqe3->c_len_ivin |= cpu_to_le32(c_req->c_len);
1344 	sec_sqe3->tag = cpu_to_le64((unsigned long)req);
1345 
1346 	return 0;
1347 }
1348 
1349 /* increment counter (128-bit int) */
ctr_iv_inc(__u8 * counter,__u8 bits,__u32 nums)1350 static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums)
1351 {
1352 	do {
1353 		--bits;
1354 		nums += counter[bits];
1355 		counter[bits] = nums & BITS_MASK;
1356 		nums >>= BYTE_BITS;
1357 	} while (bits && nums);
1358 }
1359 
sec_update_iv(struct sec_req * req,enum sec_alg_type alg_type)1360 static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
1361 {
1362 	struct aead_request *aead_req = req->aead_req.aead_req;
1363 	struct skcipher_request *sk_req = req->c_req.sk_req;
1364 	u32 iv_size = req->ctx->c_ctx.ivsize;
1365 	struct scatterlist *sgl;
1366 	unsigned int cryptlen;
1367 	size_t sz;
1368 	u8 *iv;
1369 
1370 	if (req->c_req.encrypt)
1371 		sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst;
1372 	else
1373 		sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src;
1374 
1375 	if (alg_type == SEC_SKCIPHER) {
1376 		iv = sk_req->iv;
1377 		cryptlen = sk_req->cryptlen;
1378 	} else {
1379 		iv = aead_req->iv;
1380 		cryptlen = aead_req->cryptlen;
1381 	}
1382 
1383 	if (req->ctx->c_ctx.c_mode == SEC_CMODE_CBC) {
1384 		sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
1385 					cryptlen - iv_size);
1386 		if (unlikely(sz != iv_size))
1387 			dev_err(req->ctx->dev, "copy output iv error!\n");
1388 	} else {
1389 		sz = cryptlen / iv_size;
1390 		if (cryptlen % iv_size)
1391 			sz += 1;
1392 		ctr_iv_inc(iv, iv_size, sz);
1393 	}
1394 }
1395 
sec_back_req_clear(struct sec_ctx * ctx,struct sec_qp_ctx * qp_ctx)1396 static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
1397 				struct sec_qp_ctx *qp_ctx)
1398 {
1399 	struct sec_req *backlog_req = NULL;
1400 
1401 	spin_lock_bh(&qp_ctx->req_lock);
1402 	if (ctx->fake_req_limit >=
1403 	    atomic_read(&qp_ctx->qp->qp_status.used) &&
1404 	    !list_empty(&qp_ctx->backlog)) {
1405 		backlog_req = list_first_entry(&qp_ctx->backlog,
1406 				typeof(*backlog_req), backlog_head);
1407 		list_del(&backlog_req->backlog_head);
1408 	}
1409 	spin_unlock_bh(&qp_ctx->req_lock);
1410 
1411 	return backlog_req;
1412 }
1413 
sec_skcipher_callback(struct sec_ctx * ctx,struct sec_req * req,int err)1414 static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
1415 				  int err)
1416 {
1417 	struct skcipher_request *sk_req = req->c_req.sk_req;
1418 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1419 	struct skcipher_request *backlog_sk_req;
1420 	struct sec_req *backlog_req;
1421 
1422 	sec_free_req_id(req);
1423 
1424 	/* IV output at encrypto of CBC/CTR mode */
1425 	if (!err && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
1426 	    ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt)
1427 		sec_update_iv(req, SEC_SKCIPHER);
1428 
1429 	while (1) {
1430 		backlog_req = sec_back_req_clear(ctx, qp_ctx);
1431 		if (!backlog_req)
1432 			break;
1433 
1434 		backlog_sk_req = backlog_req->c_req.sk_req;
1435 		skcipher_request_complete(backlog_sk_req, -EINPROGRESS);
1436 		atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
1437 	}
1438 
1439 	skcipher_request_complete(sk_req, err);
1440 }
1441 
set_aead_auth_iv(struct sec_ctx * ctx,struct sec_req * req)1442 static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
1443 {
1444 	struct aead_request *aead_req = req->aead_req.aead_req;
1445 	struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
1446 	size_t authsize = crypto_aead_authsize(tfm);
1447 	struct sec_aead_req *a_req = &req->aead_req;
1448 	struct sec_cipher_req *c_req = &req->c_req;
1449 	u32 data_size = aead_req->cryptlen;
1450 	u8 flage = 0;
1451 	u8 cm, cl;
1452 
1453 	/* the specification has been checked in aead_iv_demension_check() */
1454 	cl = c_req->c_ivin[0] + 1;
1455 	c_req->c_ivin[ctx->c_ctx.ivsize - cl] = 0x00;
1456 	memset(&c_req->c_ivin[ctx->c_ctx.ivsize - cl], 0, cl);
1457 	c_req->c_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = IV_CTR_INIT;
1458 
1459 	/* the last 3bit is L' */
1460 	flage |= c_req->c_ivin[0] & IV_CL_MASK;
1461 
1462 	/* the M' is bit3~bit5, the Flags is bit6 */
1463 	cm = (authsize - IV_CM_CAL_NUM) / IV_CM_CAL_NUM;
1464 	flage |= cm << IV_CM_OFFSET;
1465 	if (aead_req->assoclen)
1466 		flage |= 0x01 << IV_FLAGS_OFFSET;
1467 
1468 	memcpy(a_req->a_ivin, c_req->c_ivin, ctx->c_ctx.ivsize);
1469 	a_req->a_ivin[0] = flage;
1470 
1471 	/*
1472 	 * the last 32bit is counter's initial number,
1473 	 * but the nonce uses the first 16bit
1474 	 * the tail 16bit fill with the cipher length
1475 	 */
1476 	if (!c_req->encrypt)
1477 		data_size = aead_req->cryptlen - authsize;
1478 
1479 	a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] =
1480 			data_size & IV_LAST_BYTE_MASK;
1481 	data_size >>= IV_BYTE_OFFSET;
1482 	a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE2] =
1483 			data_size & IV_LAST_BYTE_MASK;
1484 }
1485 
sec_aead_set_iv(struct sec_ctx * ctx,struct sec_req * req)1486 static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
1487 {
1488 	struct aead_request *aead_req = req->aead_req.aead_req;
1489 	struct sec_aead_req *a_req = &req->aead_req;
1490 	struct sec_cipher_req *c_req = &req->c_req;
1491 
1492 	memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
1493 
1494 	if (ctx->c_ctx.c_mode == SEC_CMODE_CCM) {
1495 		/*
1496 		 * CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},
1497 		 * the  counter must set to 0x01
1498 		 * CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length}
1499 		 */
1500 		set_aead_auth_iv(ctx, req);
1501 	} else if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
1502 		/* GCM 12Byte Cipher_IV == Auth_IV */
1503 		memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE);
1504 	}
1505 }
1506 
sec_auth_bd_fill_xcm(struct sec_auth_ctx * ctx,int dir,struct sec_req * req,struct sec_sqe * sec_sqe)1507 static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir,
1508 				 struct sec_req *req, struct sec_sqe *sec_sqe)
1509 {
1510 	struct sec_aead_req *a_req = &req->aead_req;
1511 	struct aead_request *aq = a_req->aead_req;
1512 	struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
1513 	size_t authsize = crypto_aead_authsize(tfm);
1514 
1515 	/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
1516 	sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)authsize);
1517 
1518 	/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
1519 	sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr;
1520 	sec_sqe->type2.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
1521 	sec_sqe->type_cipher_auth |= SEC_NO_AUTH << SEC_AUTH_OFFSET;
1522 
1523 	if (dir)
1524 		sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
1525 	else
1526 		sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
1527 
1528 	sec_sqe->type2.alen_ivllen = cpu_to_le32(aq->assoclen);
1529 	sec_sqe->type2.auth_src_offset = cpu_to_le16(0x0);
1530 	sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1531 
1532 	sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
1533 }
1534 
sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx * ctx,int dir,struct sec_req * req,struct sec_sqe3 * sqe3)1535 static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir,
1536 				    struct sec_req *req, struct sec_sqe3 *sqe3)
1537 {
1538 	struct sec_aead_req *a_req = &req->aead_req;
1539 	struct aead_request *aq = a_req->aead_req;
1540 	struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
1541 	size_t authsize = crypto_aead_authsize(tfm);
1542 
1543 	/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
1544 	sqe3->c_icv_key |= cpu_to_le16((u16)authsize << SEC_MAC_OFFSET_V3);
1545 
1546 	/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
1547 	sqe3->a_key_addr = sqe3->c_key_addr;
1548 	sqe3->auth_ivin.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
1549 	sqe3->auth_mac_key |= SEC_NO_AUTH;
1550 
1551 	if (dir)
1552 		sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
1553 	else
1554 		sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
1555 
1556 	sqe3->a_len_key = cpu_to_le32(aq->assoclen);
1557 	sqe3->auth_src_offset = cpu_to_le16(0x0);
1558 	sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1559 	sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);
1560 }
1561 
sec_auth_bd_fill_ex(struct sec_auth_ctx * ctx,int dir,struct sec_req * req,struct sec_sqe * sec_sqe)1562 static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
1563 			       struct sec_req *req, struct sec_sqe *sec_sqe)
1564 {
1565 	struct sec_aead_req *a_req = &req->aead_req;
1566 	struct sec_cipher_req *c_req = &req->c_req;
1567 	struct aead_request *aq = a_req->aead_req;
1568 	struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
1569 	size_t authsize = crypto_aead_authsize(tfm);
1570 
1571 	sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
1572 
1573 	sec_sqe->type2.mac_key_alg = cpu_to_le32(BYTES_TO_WORDS(authsize));
1574 
1575 	sec_sqe->type2.mac_key_alg |=
1576 			cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET);
1577 
1578 	sec_sqe->type2.mac_key_alg |=
1579 			cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
1580 
1581 	if (dir) {
1582 		sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;
1583 		sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
1584 	} else {
1585 		sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE2 << SEC_AUTH_OFFSET;
1586 		sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
1587 	}
1588 	sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen);
1589 
1590 	sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1591 
1592 	sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
1593 }
1594 
sec_aead_bd_fill(struct sec_ctx * ctx,struct sec_req * req)1595 static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
1596 {
1597 	struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1598 	struct sec_sqe *sec_sqe = &req->sec_sqe;
1599 	int ret;
1600 
1601 	ret = sec_skcipher_bd_fill(ctx, req);
1602 	if (unlikely(ret)) {
1603 		dev_err(ctx->dev, "skcipher bd fill is error!\n");
1604 		return ret;
1605 	}
1606 
1607 	if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
1608 	    ctx->c_ctx.c_mode == SEC_CMODE_GCM)
1609 		sec_auth_bd_fill_xcm(auth_ctx, req->c_req.encrypt, req, sec_sqe);
1610 	else
1611 		sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
1612 
1613 	return 0;
1614 }
1615 
sec_auth_bd_fill_ex_v3(struct sec_auth_ctx * ctx,int dir,struct sec_req * req,struct sec_sqe3 * sqe3)1616 static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
1617 				   struct sec_req *req, struct sec_sqe3 *sqe3)
1618 {
1619 	struct sec_aead_req *a_req = &req->aead_req;
1620 	struct sec_cipher_req *c_req = &req->c_req;
1621 	struct aead_request *aq = a_req->aead_req;
1622 	struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
1623 	size_t authsize = crypto_aead_authsize(tfm);
1624 
1625 	sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
1626 
1627 	sqe3->auth_mac_key |=
1628 			cpu_to_le32(BYTES_TO_WORDS(authsize) << SEC_MAC_OFFSET_V3);
1629 
1630 	sqe3->auth_mac_key |=
1631 			cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET_V3);
1632 
1633 	sqe3->auth_mac_key |=
1634 			cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3);
1635 
1636 	if (dir) {
1637 		sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1);
1638 		sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
1639 	} else {
1640 		sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE2);
1641 		sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
1642 	}
1643 	sqe3->a_len_key = cpu_to_le32(c_req->c_len + aq->assoclen);
1644 
1645 	sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
1646 
1647 	sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);
1648 }
1649 
sec_aead_bd_fill_v3(struct sec_ctx * ctx,struct sec_req * req)1650 static int sec_aead_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
1651 {
1652 	struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
1653 	struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
1654 	int ret;
1655 
1656 	ret = sec_skcipher_bd_fill_v3(ctx, req);
1657 	if (unlikely(ret)) {
1658 		dev_err(ctx->dev, "skcipher bd3 fill is error!\n");
1659 		return ret;
1660 	}
1661 
1662 	if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
1663 	    ctx->c_ctx.c_mode == SEC_CMODE_GCM)
1664 		sec_auth_bd_fill_xcm_v3(auth_ctx, req->c_req.encrypt,
1665 					req, sec_sqe3);
1666 	else
1667 		sec_auth_bd_fill_ex_v3(auth_ctx, req->c_req.encrypt,
1668 				       req, sec_sqe3);
1669 
1670 	return 0;
1671 }
1672 
sec_aead_callback(struct sec_ctx * c,struct sec_req * req,int err)1673 static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
1674 {
1675 	struct aead_request *a_req = req->aead_req.aead_req;
1676 	struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
1677 	size_t authsize = crypto_aead_authsize(tfm);
1678 	struct sec_aead_req *aead_req = &req->aead_req;
1679 	struct sec_cipher_req *c_req = &req->c_req;
1680 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
1681 	struct aead_request *backlog_aead_req;
1682 	struct sec_req *backlog_req;
1683 	size_t sz;
1684 
1685 	if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
1686 		sec_update_iv(req, SEC_AEAD);
1687 
1688 	/* Copy output mac */
1689 	if (!err && c_req->encrypt) {
1690 		struct scatterlist *sgl = a_req->dst;
1691 
1692 		sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), aead_req->out_mac,
1693 					  authsize, a_req->cryptlen + a_req->assoclen);
1694 		if (unlikely(sz != authsize)) {
1695 			dev_err(c->dev, "copy out mac err!\n");
1696 			err = -EINVAL;
1697 		}
1698 	}
1699 
1700 	sec_free_req_id(req);
1701 
1702 	while (1) {
1703 		backlog_req = sec_back_req_clear(c, qp_ctx);
1704 		if (!backlog_req)
1705 			break;
1706 
1707 		backlog_aead_req = backlog_req->aead_req.aead_req;
1708 		aead_request_complete(backlog_aead_req, -EINPROGRESS);
1709 		atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt);
1710 	}
1711 
1712 	aead_request_complete(a_req, err);
1713 }
1714 
sec_request_uninit(struct sec_ctx * ctx,struct sec_req * req)1715 static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
1716 {
1717 	sec_free_req_id(req);
1718 	sec_free_queue_id(ctx, req);
1719 }
1720 
sec_request_init(struct sec_ctx * ctx,struct sec_req * req)1721 static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
1722 {
1723 	struct sec_qp_ctx *qp_ctx;
1724 	int queue_id;
1725 
1726 	/* To load balance */
1727 	queue_id = sec_alloc_queue_id(ctx, req);
1728 	qp_ctx = &ctx->qp_ctx[queue_id];
1729 
1730 	req->req_id = sec_alloc_req_id(req, qp_ctx);
1731 	if (unlikely(req->req_id < 0)) {
1732 		sec_free_queue_id(ctx, req);
1733 		return req->req_id;
1734 	}
1735 
1736 	return 0;
1737 }
1738 
sec_process(struct sec_ctx * ctx,struct sec_req * req)1739 static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
1740 {
1741 	struct sec_cipher_req *c_req = &req->c_req;
1742 	int ret;
1743 
1744 	ret = sec_request_init(ctx, req);
1745 	if (unlikely(ret))
1746 		return ret;
1747 
1748 	ret = sec_request_transfer(ctx, req);
1749 	if (unlikely(ret))
1750 		goto err_uninit_req;
1751 
1752 	/* Output IV as decrypto */
1753 	if (!req->c_req.encrypt && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
1754 	    ctx->c_ctx.c_mode == SEC_CMODE_CTR))
1755 		sec_update_iv(req, ctx->alg_type);
1756 
1757 	ret = ctx->req_op->bd_send(ctx, req);
1758 	if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
1759 		(ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1760 		dev_err_ratelimited(ctx->dev, "send sec request failed!\n");
1761 		goto err_send_req;
1762 	}
1763 
1764 	return ret;
1765 
1766 err_send_req:
1767 	/* As failing, restore the IV from user */
1768 	if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
1769 		if (ctx->alg_type == SEC_SKCIPHER)
1770 			memcpy(req->c_req.sk_req->iv, c_req->c_ivin,
1771 			       ctx->c_ctx.ivsize);
1772 		else
1773 			memcpy(req->aead_req.aead_req->iv, c_req->c_ivin,
1774 			       ctx->c_ctx.ivsize);
1775 	}
1776 
1777 	sec_request_untransfer(ctx, req);
1778 err_uninit_req:
1779 	sec_request_uninit(ctx, req);
1780 	return ret;
1781 }
1782 
1783 static const struct sec_req_op sec_skcipher_req_ops = {
1784 	.buf_map	= sec_skcipher_sgl_map,
1785 	.buf_unmap	= sec_skcipher_sgl_unmap,
1786 	.do_transfer	= sec_skcipher_copy_iv,
1787 	.bd_fill	= sec_skcipher_bd_fill,
1788 	.bd_send	= sec_bd_send,
1789 	.callback	= sec_skcipher_callback,
1790 	.process	= sec_process,
1791 };
1792 
1793 static const struct sec_req_op sec_aead_req_ops = {
1794 	.buf_map	= sec_aead_sgl_map,
1795 	.buf_unmap	= sec_aead_sgl_unmap,
1796 	.do_transfer	= sec_aead_set_iv,
1797 	.bd_fill	= sec_aead_bd_fill,
1798 	.bd_send	= sec_bd_send,
1799 	.callback	= sec_aead_callback,
1800 	.process	= sec_process,
1801 };
1802 
1803 static const struct sec_req_op sec_skcipher_req_ops_v3 = {
1804 	.buf_map	= sec_skcipher_sgl_map,
1805 	.buf_unmap	= sec_skcipher_sgl_unmap,
1806 	.do_transfer	= sec_skcipher_copy_iv,
1807 	.bd_fill	= sec_skcipher_bd_fill_v3,
1808 	.bd_send	= sec_bd_send,
1809 	.callback	= sec_skcipher_callback,
1810 	.process	= sec_process,
1811 };
1812 
1813 static const struct sec_req_op sec_aead_req_ops_v3 = {
1814 	.buf_map	= sec_aead_sgl_map,
1815 	.buf_unmap	= sec_aead_sgl_unmap,
1816 	.do_transfer	= sec_aead_set_iv,
1817 	.bd_fill	= sec_aead_bd_fill_v3,
1818 	.bd_send	= sec_bd_send,
1819 	.callback	= sec_aead_callback,
1820 	.process	= sec_process,
1821 };
1822 
sec_skcipher_ctx_init(struct crypto_skcipher * tfm)1823 static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
1824 {
1825 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
1826 	int ret;
1827 
1828 	ret = sec_skcipher_init(tfm);
1829 	if (ret)
1830 		return ret;
1831 
1832 	if (ctx->sec->qm.ver < QM_HW_V3) {
1833 		ctx->type_supported = SEC_BD_TYPE2;
1834 		ctx->req_op = &sec_skcipher_req_ops;
1835 	} else {
1836 		ctx->type_supported = SEC_BD_TYPE3;
1837 		ctx->req_op = &sec_skcipher_req_ops_v3;
1838 	}
1839 
1840 	return ret;
1841 }
1842 
sec_skcipher_ctx_exit(struct crypto_skcipher * tfm)1843 static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
1844 {
1845 	sec_skcipher_uninit(tfm);
1846 }
1847 
sec_aead_init(struct crypto_aead * tfm)1848 static int sec_aead_init(struct crypto_aead *tfm)
1849 {
1850 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1851 	int ret;
1852 
1853 	crypto_aead_set_reqsize(tfm, sizeof(struct sec_req));
1854 	ctx->alg_type = SEC_AEAD;
1855 	ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
1856 	if (ctx->c_ctx.ivsize < SEC_AIV_SIZE ||
1857 	    ctx->c_ctx.ivsize > SEC_IV_SIZE) {
1858 		pr_err("get error aead iv size!\n");
1859 		return -EINVAL;
1860 	}
1861 
1862 	ret = sec_ctx_base_init(ctx);
1863 	if (ret)
1864 		return ret;
1865 	if (ctx->sec->qm.ver < QM_HW_V3) {
1866 		ctx->type_supported = SEC_BD_TYPE2;
1867 		ctx->req_op = &sec_aead_req_ops;
1868 	} else {
1869 		ctx->type_supported = SEC_BD_TYPE3;
1870 		ctx->req_op = &sec_aead_req_ops_v3;
1871 	}
1872 
1873 	ret = sec_auth_init(ctx);
1874 	if (ret)
1875 		goto err_auth_init;
1876 
1877 	ret = sec_cipher_init(ctx);
1878 	if (ret)
1879 		goto err_cipher_init;
1880 
1881 	return ret;
1882 
1883 err_cipher_init:
1884 	sec_auth_uninit(ctx);
1885 err_auth_init:
1886 	sec_ctx_base_uninit(ctx);
1887 	return ret;
1888 }
1889 
sec_aead_exit(struct crypto_aead * tfm)1890 static void sec_aead_exit(struct crypto_aead *tfm)
1891 {
1892 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1893 
1894 	sec_cipher_uninit(ctx);
1895 	sec_auth_uninit(ctx);
1896 	sec_ctx_base_uninit(ctx);
1897 }
1898 
sec_aead_ctx_init(struct crypto_aead * tfm,const char * hash_name)1899 static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
1900 {
1901 	struct aead_alg *alg = crypto_aead_alg(tfm);
1902 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1903 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
1904 	const char *aead_name = alg->base.cra_name;
1905 	int ret;
1906 
1907 	ret = sec_aead_init(tfm);
1908 	if (ret) {
1909 		pr_err("hisi_sec2: aead init error!\n");
1910 		return ret;
1911 	}
1912 
1913 	a_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1914 	if (IS_ERR(a_ctx->hash_tfm)) {
1915 		dev_err(ctx->dev, "aead alloc shash error!\n");
1916 		sec_aead_exit(tfm);
1917 		return PTR_ERR(a_ctx->hash_tfm);
1918 	}
1919 
1920 	a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
1921 						     CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
1922 	if (IS_ERR(a_ctx->fallback_aead_tfm)) {
1923 		dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
1924 		crypto_free_shash(ctx->a_ctx.hash_tfm);
1925 		sec_aead_exit(tfm);
1926 		return PTR_ERR(a_ctx->fallback_aead_tfm);
1927 	}
1928 
1929 	return 0;
1930 }
1931 
sec_aead_ctx_exit(struct crypto_aead * tfm)1932 static void sec_aead_ctx_exit(struct crypto_aead *tfm)
1933 {
1934 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1935 
1936 	crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
1937 	crypto_free_shash(ctx->a_ctx.hash_tfm);
1938 	sec_aead_exit(tfm);
1939 }
1940 
sec_aead_xcm_ctx_init(struct crypto_aead * tfm)1941 static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
1942 {
1943 	struct aead_alg *alg = crypto_aead_alg(tfm);
1944 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1945 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
1946 	const char *aead_name = alg->base.cra_name;
1947 	int ret;
1948 
1949 	ret = sec_aead_init(tfm);
1950 	if (ret) {
1951 		dev_err(ctx->dev, "hisi_sec2: aead xcm init error!\n");
1952 		return ret;
1953 	}
1954 
1955 	a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
1956 						     CRYPTO_ALG_NEED_FALLBACK |
1957 						     CRYPTO_ALG_ASYNC);
1958 	if (IS_ERR(a_ctx->fallback_aead_tfm)) {
1959 		dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
1960 		sec_aead_exit(tfm);
1961 		return PTR_ERR(a_ctx->fallback_aead_tfm);
1962 	}
1963 
1964 	return 0;
1965 }
1966 
sec_aead_xcm_ctx_exit(struct crypto_aead * tfm)1967 static void sec_aead_xcm_ctx_exit(struct crypto_aead *tfm)
1968 {
1969 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
1970 
1971 	crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
1972 	sec_aead_exit(tfm);
1973 }
1974 
sec_aead_sha1_ctx_init(struct crypto_aead * tfm)1975 static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm)
1976 {
1977 	return sec_aead_ctx_init(tfm, "sha1");
1978 }
1979 
sec_aead_sha256_ctx_init(struct crypto_aead * tfm)1980 static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm)
1981 {
1982 	return sec_aead_ctx_init(tfm, "sha256");
1983 }
1984 
sec_aead_sha512_ctx_init(struct crypto_aead * tfm)1985 static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
1986 {
1987 	return sec_aead_ctx_init(tfm, "sha512");
1988 }
1989 
sec_skcipher_cryptlen_check(struct sec_ctx * ctx,struct sec_req * sreq)1990 static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, struct sec_req *sreq)
1991 {
1992 	u32 cryptlen = sreq->c_req.sk_req->cryptlen;
1993 	struct device *dev = ctx->dev;
1994 	u8 c_mode = ctx->c_ctx.c_mode;
1995 	int ret = 0;
1996 
1997 	switch (c_mode) {
1998 	case SEC_CMODE_XTS:
1999 		if (unlikely(cryptlen < AES_BLOCK_SIZE)) {
2000 			dev_err(dev, "skcipher XTS mode input length error!\n");
2001 			ret = -EINVAL;
2002 		}
2003 		break;
2004 	case SEC_CMODE_ECB:
2005 	case SEC_CMODE_CBC:
2006 		if (unlikely(cryptlen & (AES_BLOCK_SIZE - 1))) {
2007 			dev_err(dev, "skcipher AES input length error!\n");
2008 			ret = -EINVAL;
2009 		}
2010 		break;
2011 	case SEC_CMODE_CTR:
2012 		break;
2013 	default:
2014 		ret = -EINVAL;
2015 	}
2016 
2017 	return ret;
2018 }
2019 
sec_skcipher_param_check(struct sec_ctx * ctx,struct sec_req * sreq,bool * need_fallback)2020 static int sec_skcipher_param_check(struct sec_ctx *ctx,
2021 				    struct sec_req *sreq, bool *need_fallback)
2022 {
2023 	struct skcipher_request *sk_req = sreq->c_req.sk_req;
2024 	struct device *dev = ctx->dev;
2025 	u8 c_alg = ctx->c_ctx.c_alg;
2026 
2027 	if (unlikely(!sk_req->src || !sk_req->dst)) {
2028 		dev_err(dev, "skcipher input param error!\n");
2029 		return -EINVAL;
2030 	}
2031 
2032 	if (sk_req->cryptlen > MAX_INPUT_DATA_LEN)
2033 		*need_fallback = true;
2034 
2035 	sreq->c_req.c_len = sk_req->cryptlen;
2036 
2037 	if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
2038 		sreq->use_pbuf = true;
2039 	else
2040 		sreq->use_pbuf = false;
2041 
2042 	if (c_alg == SEC_CALG_3DES) {
2043 		if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
2044 			dev_err(dev, "skcipher 3des input length error!\n");
2045 			return -EINVAL;
2046 		}
2047 		return 0;
2048 	} else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
2049 		return sec_skcipher_cryptlen_check(ctx, sreq);
2050 	}
2051 
2052 	dev_err(dev, "skcipher algorithm error!\n");
2053 
2054 	return -EINVAL;
2055 }
2056 
sec_skcipher_soft_crypto(struct sec_ctx * ctx,struct skcipher_request * sreq,bool encrypt)2057 static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,
2058 				    struct skcipher_request *sreq, bool encrypt)
2059 {
2060 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
2061 	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm);
2062 	struct device *dev = ctx->dev;
2063 	int ret;
2064 
2065 	if (!c_ctx->fbtfm) {
2066 		dev_err_ratelimited(dev, "the soft tfm isn't supported in the current system.\n");
2067 		return -EINVAL;
2068 	}
2069 
2070 	skcipher_request_set_sync_tfm(subreq, c_ctx->fbtfm);
2071 
2072 	/* software need sync mode to do crypto */
2073 	skcipher_request_set_callback(subreq, sreq->base.flags,
2074 				      NULL, NULL);
2075 	skcipher_request_set_crypt(subreq, sreq->src, sreq->dst,
2076 				   sreq->cryptlen, sreq->iv);
2077 	if (encrypt)
2078 		ret = crypto_skcipher_encrypt(subreq);
2079 	else
2080 		ret = crypto_skcipher_decrypt(subreq);
2081 
2082 	skcipher_request_zero(subreq);
2083 
2084 	return ret;
2085 }
2086 
sec_skcipher_crypto(struct skcipher_request * sk_req,bool encrypt)2087 static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
2088 {
2089 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
2090 	struct sec_req *req = skcipher_request_ctx(sk_req);
2091 	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
2092 	bool need_fallback = false;
2093 	int ret;
2094 
2095 	if (!sk_req->cryptlen) {
2096 		if (ctx->c_ctx.c_mode == SEC_CMODE_XTS)
2097 			return -EINVAL;
2098 		return 0;
2099 	}
2100 
2101 	req->flag = sk_req->base.flags;
2102 	req->c_req.sk_req = sk_req;
2103 	req->c_req.encrypt = encrypt;
2104 	req->ctx = ctx;
2105 
2106 	ret = sec_skcipher_param_check(ctx, req, &need_fallback);
2107 	if (unlikely(ret))
2108 		return -EINVAL;
2109 
2110 	if (unlikely(ctx->c_ctx.fallback || need_fallback))
2111 		return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);
2112 
2113 	return ctx->req_op->process(ctx, req);
2114 }
2115 
sec_skcipher_encrypt(struct skcipher_request * sk_req)2116 static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
2117 {
2118 	return sec_skcipher_crypto(sk_req, true);
2119 }
2120 
sec_skcipher_decrypt(struct skcipher_request * sk_req)2121 static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
2122 {
2123 	return sec_skcipher_crypto(sk_req, false);
2124 }
2125 
2126 #define SEC_SKCIPHER_ALG(sec_cra_name, sec_set_key, \
2127 	sec_min_key_size, sec_max_key_size, blk_size, iv_size)\
2128 {\
2129 	.base = {\
2130 		.cra_name = sec_cra_name,\
2131 		.cra_driver_name = "hisi_sec_"sec_cra_name,\
2132 		.cra_priority = SEC_PRIORITY,\
2133 		.cra_flags = CRYPTO_ALG_ASYNC |\
2134 		 CRYPTO_ALG_NEED_FALLBACK,\
2135 		.cra_blocksize = blk_size,\
2136 		.cra_ctxsize = sizeof(struct sec_ctx),\
2137 		.cra_module = THIS_MODULE,\
2138 	},\
2139 	.init = sec_skcipher_ctx_init,\
2140 	.exit = sec_skcipher_ctx_exit,\
2141 	.setkey = sec_set_key,\
2142 	.decrypt = sec_skcipher_decrypt,\
2143 	.encrypt = sec_skcipher_encrypt,\
2144 	.min_keysize = sec_min_key_size,\
2145 	.max_keysize = sec_max_key_size,\
2146 	.ivsize = iv_size,\
2147 }
2148 
2149 static struct sec_skcipher sec_skciphers[] = {
2150 	{
2151 		.alg_msk = BIT(0),
2152 		.alg = SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, AES_MIN_KEY_SIZE,
2153 					AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0),
2154 	},
2155 	{
2156 		.alg_msk = BIT(1),
2157 		.alg = SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, AES_MIN_KEY_SIZE,
2158 					AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
2159 	},
2160 	{
2161 		.alg_msk = BIT(2),
2162 		.alg = SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr,	AES_MIN_KEY_SIZE,
2163 					AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
2164 	},
2165 	{
2166 		.alg_msk = BIT(3),
2167 		.alg = SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,	SEC_XTS_MIN_KEY_SIZE,
2168 					SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
2169 	},
2170 	{
2171 		.alg_msk = BIT(12),
2172 		.alg = SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,	AES_MIN_KEY_SIZE,
2173 					AES_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
2174 	},
2175 	{
2176 		.alg_msk = BIT(13),
2177 		.alg = SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr, AES_MIN_KEY_SIZE,
2178 					AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
2179 	},
2180 	{
2181 		.alg_msk = BIT(14),
2182 		.alg = SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,	SEC_XTS_MIN_KEY_SIZE,
2183 					SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
2184 	},
2185 	{
2186 		.alg_msk = BIT(23),
2187 		.alg = SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, SEC_DES3_3KEY_SIZE,
2188 					SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0),
2189 	},
2190 	{
2191 		.alg_msk = BIT(24),
2192 		.alg = SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, SEC_DES3_3KEY_SIZE,
2193 					SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE,
2194 					DES3_EDE_BLOCK_SIZE),
2195 	},
2196 };
2197 
aead_iv_demension_check(struct aead_request * aead_req)2198 static int aead_iv_demension_check(struct aead_request *aead_req)
2199 {
2200 	u8 cl;
2201 
2202 	cl = aead_req->iv[0] + 1;
2203 	if (cl < IV_CL_MIN || cl > IV_CL_MAX)
2204 		return -EINVAL;
2205 
2206 	if (cl < IV_CL_MID && aead_req->cryptlen >> (BYTE_BITS * cl))
2207 		return -EOVERFLOW;
2208 
2209 	return 0;
2210 }
2211 
sec_aead_spec_check(struct sec_ctx * ctx,struct sec_req * sreq)2212 static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
2213 {
2214 	struct aead_request *req = sreq->aead_req.aead_req;
2215 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2216 	size_t sz = crypto_aead_authsize(tfm);
2217 	u8 c_mode = ctx->c_ctx.c_mode;
2218 	int ret;
2219 
2220 	if (unlikely(ctx->sec->qm.ver == QM_HW_V2 && !sreq->c_req.c_len))
2221 		return -EINVAL;
2222 
2223 	if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
2224 		     req->assoclen > SEC_MAX_AAD_LEN))
2225 		return -EINVAL;
2226 
2227 	if (c_mode == SEC_CMODE_CCM) {
2228 		if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN))
2229 			return -EINVAL;
2230 
2231 		ret = aead_iv_demension_check(req);
2232 		if (unlikely(ret))
2233 			return -EINVAL;
2234 	} else if (c_mode == SEC_CMODE_CBC) {
2235 		if (unlikely(sz & WORD_MASK))
2236 			return -EINVAL;
2237 		if (unlikely(ctx->a_ctx.a_key_len & WORD_MASK))
2238 			return -EINVAL;
2239 	}
2240 
2241 	return 0;
2242 }
2243 
sec_aead_param_check(struct sec_ctx * ctx,struct sec_req * sreq,bool * need_fallback)2244 static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq, bool *need_fallback)
2245 {
2246 	struct aead_request *req = sreq->aead_req.aead_req;
2247 	struct device *dev = ctx->dev;
2248 	u8 c_alg = ctx->c_ctx.c_alg;
2249 
2250 	if (unlikely(!req->src || !req->dst)) {
2251 		dev_err(dev, "aead input param error!\n");
2252 		return -EINVAL;
2253 	}
2254 
2255 	if (unlikely(ctx->c_ctx.c_mode == SEC_CMODE_CBC &&
2256 		     sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
2257 		dev_err(dev, "aead cbc mode input data length error!\n");
2258 		return -EINVAL;
2259 	}
2260 
2261 	/* Support AES or SM4 */
2262 	if (unlikely(c_alg != SEC_CALG_AES && c_alg != SEC_CALG_SM4)) {
2263 		dev_err(dev, "aead crypto alg error!\n");
2264 		return -EINVAL;
2265 	}
2266 
2267 	if (unlikely(sec_aead_spec_check(ctx, sreq))) {
2268 		*need_fallback = true;
2269 		return -EINVAL;
2270 	}
2271 
2272 	if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
2273 		SEC_PBUF_SZ)
2274 		sreq->use_pbuf = true;
2275 	else
2276 		sreq->use_pbuf = false;
2277 
2278 	return 0;
2279 }
2280 
sec_aead_soft_crypto(struct sec_ctx * ctx,struct aead_request * aead_req,bool encrypt)2281 static int sec_aead_soft_crypto(struct sec_ctx *ctx,
2282 				struct aead_request *aead_req,
2283 				bool encrypt)
2284 {
2285 	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
2286 	struct aead_request *subreq;
2287 	int ret;
2288 
2289 	subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL);
2290 	if (!subreq)
2291 		return -ENOMEM;
2292 
2293 	aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm);
2294 	aead_request_set_callback(subreq, aead_req->base.flags,
2295 				  aead_req->base.complete, aead_req->base.data);
2296 	aead_request_set_crypt(subreq, aead_req->src, aead_req->dst,
2297 			       aead_req->cryptlen, aead_req->iv);
2298 	aead_request_set_ad(subreq, aead_req->assoclen);
2299 
2300 	if (encrypt)
2301 		ret = crypto_aead_encrypt(subreq);
2302 	else
2303 		ret = crypto_aead_decrypt(subreq);
2304 	aead_request_free(subreq);
2305 
2306 	return ret;
2307 }
2308 
sec_aead_crypto(struct aead_request * a_req,bool encrypt)2309 static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
2310 {
2311 	struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
2312 	struct sec_req *req = aead_request_ctx(a_req);
2313 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
2314 	size_t sz = crypto_aead_authsize(tfm);
2315 	bool need_fallback = false;
2316 	int ret;
2317 
2318 	req->flag = a_req->base.flags;
2319 	req->aead_req.aead_req = a_req;
2320 	req->c_req.encrypt = encrypt;
2321 	req->ctx = ctx;
2322 	req->c_req.c_len = a_req->cryptlen - (req->c_req.encrypt ? 0 : sz);
2323 
2324 	ret = sec_aead_param_check(ctx, req, &need_fallback);
2325 	if (unlikely(ret)) {
2326 		if (need_fallback)
2327 			return sec_aead_soft_crypto(ctx, a_req, encrypt);
2328 		return -EINVAL;
2329 	}
2330 
2331 	return ctx->req_op->process(ctx, req);
2332 }
2333 
sec_aead_encrypt(struct aead_request * a_req)2334 static int sec_aead_encrypt(struct aead_request *a_req)
2335 {
2336 	return sec_aead_crypto(a_req, true);
2337 }
2338 
sec_aead_decrypt(struct aead_request * a_req)2339 static int sec_aead_decrypt(struct aead_request *a_req)
2340 {
2341 	return sec_aead_crypto(a_req, false);
2342 }
2343 
2344 #define SEC_AEAD_ALG(sec_cra_name, sec_set_key, ctx_init,\
2345 			 ctx_exit, blk_size, iv_size, max_authsize)\
2346 {\
2347 	.base = {\
2348 		.cra_name = sec_cra_name,\
2349 		.cra_driver_name = "hisi_sec_"sec_cra_name,\
2350 		.cra_priority = SEC_PRIORITY,\
2351 		.cra_flags = CRYPTO_ALG_ASYNC |\
2352 		 CRYPTO_ALG_NEED_FALLBACK,\
2353 		.cra_blocksize = blk_size,\
2354 		.cra_ctxsize = sizeof(struct sec_ctx),\
2355 		.cra_module = THIS_MODULE,\
2356 	},\
2357 	.init = ctx_init,\
2358 	.exit = ctx_exit,\
2359 	.setkey = sec_set_key,\
2360 	.setauthsize = sec_aead_setauthsize,\
2361 	.decrypt = sec_aead_decrypt,\
2362 	.encrypt = sec_aead_encrypt,\
2363 	.ivsize = iv_size,\
2364 	.maxauthsize = max_authsize,\
2365 }
2366 
2367 static struct sec_aead sec_aeads[] = {
2368 	{
2369 		.alg_msk = BIT(6),
2370 		.alg = SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init,
2371 				    sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,
2372 				    AES_BLOCK_SIZE),
2373 	},
2374 	{
2375 		.alg_msk = BIT(7),
2376 		.alg = SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init,
2377 				    sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,
2378 				    AES_BLOCK_SIZE),
2379 	},
2380 	{
2381 		.alg_msk = BIT(17),
2382 		.alg = SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init,
2383 				    sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,
2384 				    AES_BLOCK_SIZE),
2385 	},
2386 	{
2387 		.alg_msk = BIT(18),
2388 		.alg = SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init,
2389 				    sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,
2390 				    AES_BLOCK_SIZE),
2391 	},
2392 	{
2393 		.alg_msk = BIT(43),
2394 		.alg = SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", sec_setkey_aes_cbc_sha1,
2395 				    sec_aead_sha1_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
2396 				    AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
2397 	},
2398 	{
2399 		.alg_msk = BIT(44),
2400 		.alg = SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", sec_setkey_aes_cbc_sha256,
2401 				    sec_aead_sha256_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
2402 				    AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
2403 	},
2404 	{
2405 		.alg_msk = BIT(45),
2406 		.alg = SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", sec_setkey_aes_cbc_sha512,
2407 				    sec_aead_sha512_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
2408 				    AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
2409 	},
2410 };
2411 
sec_unregister_skcipher(u64 alg_mask,int end)2412 static void sec_unregister_skcipher(u64 alg_mask, int end)
2413 {
2414 	int i;
2415 
2416 	for (i = 0; i < end; i++)
2417 		if (sec_skciphers[i].alg_msk & alg_mask)
2418 			crypto_unregister_skcipher(&sec_skciphers[i].alg);
2419 }
2420 
sec_register_skcipher(u64 alg_mask)2421 static int sec_register_skcipher(u64 alg_mask)
2422 {
2423 	int i, ret, count;
2424 
2425 	count = ARRAY_SIZE(sec_skciphers);
2426 
2427 	for (i = 0; i < count; i++) {
2428 		if (!(sec_skciphers[i].alg_msk & alg_mask))
2429 			continue;
2430 
2431 		ret = crypto_register_skcipher(&sec_skciphers[i].alg);
2432 		if (ret)
2433 			goto err;
2434 	}
2435 
2436 	return 0;
2437 
2438 err:
2439 	sec_unregister_skcipher(alg_mask, i);
2440 
2441 	return ret;
2442 }
2443 
sec_unregister_aead(u64 alg_mask,int end)2444 static void sec_unregister_aead(u64 alg_mask, int end)
2445 {
2446 	int i;
2447 
2448 	for (i = 0; i < end; i++)
2449 		if (sec_aeads[i].alg_msk & alg_mask)
2450 			crypto_unregister_aead(&sec_aeads[i].alg);
2451 }
2452 
sec_register_aead(u64 alg_mask)2453 static int sec_register_aead(u64 alg_mask)
2454 {
2455 	int i, ret, count;
2456 
2457 	count = ARRAY_SIZE(sec_aeads);
2458 
2459 	for (i = 0; i < count; i++) {
2460 		if (!(sec_aeads[i].alg_msk & alg_mask))
2461 			continue;
2462 
2463 		ret = crypto_register_aead(&sec_aeads[i].alg);
2464 		if (ret)
2465 			goto err;
2466 	}
2467 
2468 	return 0;
2469 
2470 err:
2471 	sec_unregister_aead(alg_mask, i);
2472 
2473 	return ret;
2474 }
2475 
sec_register_to_crypto(struct hisi_qm * qm)2476 int sec_register_to_crypto(struct hisi_qm *qm)
2477 {
2478 	u64 alg_mask;
2479 	int ret = 0;
2480 
2481 	alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_TB,
2482 				      SEC_DRV_ALG_BITMAP_LOW_TB);
2483 
2484 	mutex_lock(&sec_algs_lock);
2485 	if (sec_available_devs) {
2486 		sec_available_devs++;
2487 		goto unlock;
2488 	}
2489 
2490 	ret = sec_register_skcipher(alg_mask);
2491 	if (ret)
2492 		goto unlock;
2493 
2494 	ret = sec_register_aead(alg_mask);
2495 	if (ret)
2496 		goto unreg_skcipher;
2497 
2498 	sec_available_devs++;
2499 	mutex_unlock(&sec_algs_lock);
2500 
2501 	return 0;
2502 
2503 unreg_skcipher:
2504 	sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
2505 unlock:
2506 	mutex_unlock(&sec_algs_lock);
2507 	return ret;
2508 }
2509 
sec_unregister_from_crypto(struct hisi_qm * qm)2510 void sec_unregister_from_crypto(struct hisi_qm *qm)
2511 {
2512 	u64 alg_mask;
2513 
2514 	alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_TB,
2515 				      SEC_DRV_ALG_BITMAP_LOW_TB);
2516 
2517 	mutex_lock(&sec_algs_lock);
2518 	if (--sec_available_devs)
2519 		goto unlock;
2520 
2521 	sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads));
2522 	sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
2523 
2524 unlock:
2525 	mutex_unlock(&sec_algs_lock);
2526 }
2527