1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Handle async block request by crypto hardware engine.
5 * Copyright (C) 2016 Linaro, Inc.
7 * Author: Baolin Wang <baolin.wang@linaro.org>
10 #include <crypto/aead.h>
11 #include <crypto/akcipher.h>
12 #include <crypto/hash.h>
13 #include <crypto/internal/engine.h>
14 #include <crypto/kpp.h>
15 #include <crypto/skcipher.h>
16 #include <linux/err.h>
17 #include <linux/delay.h>
18 #include <linux/device.h>
19 #include <uapi/linux/sched/types.h>
22 #define CRYPTO_ENGINE_MAX_QLEN 10
25 * crypto_finalize_request - finalize one request if the request is done
26 * @engine: the hardware engine
27 * @req: the request need to be finalized
30 static void crypto_finalize_request(struct crypto_engine *engine,
31 struct crypto_async_request *req, int err)
36 * If hardware cannot enqueue more requests
37 * and retry mechanism is not supported
38 * make sure we are completing the current request
40 if (!engine->retry_support) {
41 spin_lock_irqsave(&engine->queue_lock, flags);
42 if (engine->cur_req == req) {
43 engine->cur_req = NULL;
45 spin_unlock_irqrestore(&engine->queue_lock, flags);
48 lockdep_assert_in_softirq();
49 crypto_request_complete(req, err);
51 kthread_queue_work(engine->kworker, &engine->pump_requests);
55 * crypto_pump_requests - dequeue one request from engine queue to process
56 * @engine: the hardware engine
57 * @in_kthread: true if we are in the context of the request pump thread
59 * This function checks if there is any request in the engine queue that
60 * needs processing and if so call out to the driver to initialize hardware
61 * and handle each request.
63 static void crypto_pump_requests(struct crypto_engine *engine,
66 struct crypto_async_request *async_req, *backlog;
68 bool was_busy = false;
70 struct crypto_engine_ctx *enginectx;
72 spin_lock_irqsave(&engine->queue_lock, flags);
74 /* Make sure we are not already running a request */
75 if (!engine->retry_support && engine->cur_req)
78 /* If another context is idling then defer */
80 kthread_queue_work(engine->kworker, &engine->pump_requests);
84 /* Check if the engine queue is idle */
85 if (!crypto_queue_len(&engine->queue) || !engine->running) {
89 /* Only do teardown in the thread */
91 kthread_queue_work(engine->kworker,
92 &engine->pump_requests);
97 engine->idling = true;
98 spin_unlock_irqrestore(&engine->queue_lock, flags);
100 if (engine->unprepare_crypt_hardware &&
101 engine->unprepare_crypt_hardware(engine))
102 dev_err(engine->dev, "failed to unprepare crypt hardware\n");
104 spin_lock_irqsave(&engine->queue_lock, flags);
105 engine->idling = false;
110 /* Get the fist request from the engine queue to handle */
111 backlog = crypto_get_backlog(&engine->queue);
112 async_req = crypto_dequeue_request(&engine->queue);
117 * If hardware doesn't support the retry mechanism,
118 * keep track of the request we are processing now.
119 * We'll need it on completion (crypto_finalize_request).
121 if (!engine->retry_support)
122 engine->cur_req = async_req;
129 spin_unlock_irqrestore(&engine->queue_lock, flags);
131 /* Until here we get the request need to be encrypted successfully */
132 if (!was_busy && engine->prepare_crypt_hardware) {
133 ret = engine->prepare_crypt_hardware(engine);
135 dev_err(engine->dev, "failed to prepare crypt hardware\n");
140 enginectx = crypto_tfm_ctx(async_req->tfm);
142 if (!enginectx->op.do_one_request) {
143 dev_err(engine->dev, "failed to do request\n");
148 ret = enginectx->op.do_one_request(engine, async_req);
150 /* Request unsuccessfully executed by hardware */
153 * If hardware queue is full (-ENOSPC), requeue request
154 * regardless of backlog flag.
155 * Otherwise, unprepare and complete the request.
157 if (!engine->retry_support ||
160 "Failed to do one request from queue: %d\n",
164 spin_lock_irqsave(&engine->queue_lock, flags);
166 * If hardware was unable to execute request, enqueue it
167 * back in front of crypto-engine queue, to keep the order
170 crypto_enqueue_request_head(&engine->queue, async_req);
172 kthread_queue_work(engine->kworker, &engine->pump_requests);
179 crypto_request_complete(async_req, ret);
183 crypto_request_complete(backlog, -EINPROGRESS);
185 /* If retry mechanism is supported, send new requests to engine */
186 if (engine->retry_support) {
187 spin_lock_irqsave(&engine->queue_lock, flags);
193 spin_unlock_irqrestore(&engine->queue_lock, flags);
196 * Batch requests is possible only if
197 * hardware can enqueue multiple requests
199 if (engine->do_batch_requests) {
200 ret = engine->do_batch_requests(engine);
202 dev_err(engine->dev, "failed to do batch requests: %d\n",
209 static void crypto_pump_work(struct kthread_work *work)
211 struct crypto_engine *engine =
212 container_of(work, struct crypto_engine, pump_requests);
214 crypto_pump_requests(engine, true);
218 * crypto_transfer_request - transfer the new request into the engine queue
219 * @engine: the hardware engine
220 * @req: the request need to be listed into the engine queue
221 * @need_pump: indicates whether queue the pump of request to kthread_work
223 static int crypto_transfer_request(struct crypto_engine *engine,
224 struct crypto_async_request *req,
230 spin_lock_irqsave(&engine->queue_lock, flags);
232 if (!engine->running) {
233 spin_unlock_irqrestore(&engine->queue_lock, flags);
237 ret = crypto_enqueue_request(&engine->queue, req);
239 if (!engine->busy && need_pump)
240 kthread_queue_work(engine->kworker, &engine->pump_requests);
242 spin_unlock_irqrestore(&engine->queue_lock, flags);
247 * crypto_transfer_request_to_engine - transfer one request to list
248 * into the engine queue
249 * @engine: the hardware engine
250 * @req: the request need to be listed into the engine queue
252 static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
253 struct crypto_async_request *req)
255 return crypto_transfer_request(engine, req, true);
259 * crypto_transfer_aead_request_to_engine - transfer one aead_request
260 * to list into the engine queue
261 * @engine: the hardware engine
262 * @req: the request need to be listed into the engine queue
264 int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
265 struct aead_request *req)
267 return crypto_transfer_request_to_engine(engine, &req->base);
269 EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
272 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
273 * to list into the engine queue
274 * @engine: the hardware engine
275 * @req: the request need to be listed into the engine queue
277 int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
278 struct akcipher_request *req)
280 return crypto_transfer_request_to_engine(engine, &req->base);
282 EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
285 * crypto_transfer_hash_request_to_engine - transfer one ahash_request
286 * to list into the engine queue
287 * @engine: the hardware engine
288 * @req: the request need to be listed into the engine queue
290 int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
291 struct ahash_request *req)
293 return crypto_transfer_request_to_engine(engine, &req->base);
295 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
298 * crypto_transfer_kpp_request_to_engine - transfer one kpp_request to list
299 * into the engine queue
300 * @engine: the hardware engine
301 * @req: the request need to be listed into the engine queue
303 int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine,
304 struct kpp_request *req)
306 return crypto_transfer_request_to_engine(engine, &req->base);
308 EXPORT_SYMBOL_GPL(crypto_transfer_kpp_request_to_engine);
311 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
312 * to list into the engine queue
313 * @engine: the hardware engine
314 * @req: the request need to be listed into the engine queue
316 int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
317 struct skcipher_request *req)
319 return crypto_transfer_request_to_engine(engine, &req->base);
321 EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
324 * crypto_finalize_aead_request - finalize one aead_request if
325 * the request is done
326 * @engine: the hardware engine
327 * @req: the request need to be finalized
330 void crypto_finalize_aead_request(struct crypto_engine *engine,
331 struct aead_request *req, int err)
333 return crypto_finalize_request(engine, &req->base, err);
335 EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
338 * crypto_finalize_akcipher_request - finalize one akcipher_request if
339 * the request is done
340 * @engine: the hardware engine
341 * @req: the request need to be finalized
344 void crypto_finalize_akcipher_request(struct crypto_engine *engine,
345 struct akcipher_request *req, int err)
347 return crypto_finalize_request(engine, &req->base, err);
349 EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
352 * crypto_finalize_hash_request - finalize one ahash_request if
353 * the request is done
354 * @engine: the hardware engine
355 * @req: the request need to be finalized
358 void crypto_finalize_hash_request(struct crypto_engine *engine,
359 struct ahash_request *req, int err)
361 return crypto_finalize_request(engine, &req->base, err);
363 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
366 * crypto_finalize_kpp_request - finalize one kpp_request if the request is done
367 * @engine: the hardware engine
368 * @req: the request need to be finalized
371 void crypto_finalize_kpp_request(struct crypto_engine *engine,
372 struct kpp_request *req, int err)
374 return crypto_finalize_request(engine, &req->base, err);
376 EXPORT_SYMBOL_GPL(crypto_finalize_kpp_request);
379 * crypto_finalize_skcipher_request - finalize one skcipher_request if
380 * the request is done
381 * @engine: the hardware engine
382 * @req: the request need to be finalized
385 void crypto_finalize_skcipher_request(struct crypto_engine *engine,
386 struct skcipher_request *req, int err)
388 return crypto_finalize_request(engine, &req->base, err);
390 EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
393 * crypto_engine_start - start the hardware engine
394 * @engine: the hardware engine need to be started
396 * Return 0 on success, else on fail.
398 int crypto_engine_start(struct crypto_engine *engine)
402 spin_lock_irqsave(&engine->queue_lock, flags);
404 if (engine->running || engine->busy) {
405 spin_unlock_irqrestore(&engine->queue_lock, flags);
409 engine->running = true;
410 spin_unlock_irqrestore(&engine->queue_lock, flags);
412 kthread_queue_work(engine->kworker, &engine->pump_requests);
416 EXPORT_SYMBOL_GPL(crypto_engine_start);
419 * crypto_engine_stop - stop the hardware engine
420 * @engine: the hardware engine need to be stopped
422 * Return 0 on success, else on fail.
424 int crypto_engine_stop(struct crypto_engine *engine)
427 unsigned int limit = 500;
430 spin_lock_irqsave(&engine->queue_lock, flags);
433 * If the engine queue is not empty or the engine is on busy state,
434 * we need to wait for a while to pump the requests of engine queue.
436 while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
437 spin_unlock_irqrestore(&engine->queue_lock, flags);
439 spin_lock_irqsave(&engine->queue_lock, flags);
442 if (crypto_queue_len(&engine->queue) || engine->busy)
445 engine->running = false;
447 spin_unlock_irqrestore(&engine->queue_lock, flags);
450 dev_warn(engine->dev, "could not stop engine\n");
454 EXPORT_SYMBOL_GPL(crypto_engine_stop);
457 * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
458 * and initialize it by setting the maximum number of entries in the software
459 * crypto-engine queue.
460 * @dev: the device attached with one hardware engine
461 * @retry_support: whether hardware has support for retry mechanism
462 * @cbk_do_batch: pointer to a callback function to be invoked when executing
463 * a batch of requests.
465 * callback(struct crypto_engine *engine)
467 * engine: the crypto engine structure.
468 * @rt: whether this queue is set to run as a realtime task
469 * @qlen: maximum size of the crypto-engine queue
471 * This must be called from context that can sleep.
472 * Return: the crypto engine structure on success, else NULL.
474 struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
476 int (*cbk_do_batch)(struct crypto_engine *engine),
479 struct crypto_engine *engine;
484 engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
490 engine->running = false;
491 engine->busy = false;
492 engine->idling = false;
493 engine->retry_support = retry_support;
494 engine->priv_data = dev;
496 * Batch requests is possible only if
497 * hardware has support for retry mechanism.
499 engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
501 snprintf(engine->name, sizeof(engine->name),
502 "%s-engine", dev_name(dev));
504 crypto_init_queue(&engine->queue, qlen);
505 spin_lock_init(&engine->queue_lock);
507 engine->kworker = kthread_create_worker(0, "%s", engine->name);
508 if (IS_ERR(engine->kworker)) {
509 dev_err(dev, "failed to create crypto request pump task\n");
512 kthread_init_work(&engine->pump_requests, crypto_pump_work);
515 dev_info(dev, "will run requests pump with realtime priority\n");
516 sched_set_fifo(engine->kworker->task);
521 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
524 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
526 * @dev: the device attached with one hardware engine
527 * @rt: whether this queue is set to run as a realtime task
529 * This must be called from context that can sleep.
530 * Return: the crypto engine structure on success, else NULL.
532 struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
534 return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
535 CRYPTO_ENGINE_MAX_QLEN);
537 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
540 * crypto_engine_exit - free the resources of hardware engine when exit
541 * @engine: the hardware engine need to be freed
543 * Return 0 for success.
545 int crypto_engine_exit(struct crypto_engine *engine)
549 ret = crypto_engine_stop(engine);
553 kthread_destroy_worker(engine->kworker);
557 EXPORT_SYMBOL_GPL(crypto_engine_exit);
559 MODULE_LICENSE("GPL");
560 MODULE_DESCRIPTION("Crypto hardware engine framework");