1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Handle async block request by crypto hardware engine.
5 * Copyright (C) 2016 Linaro, Inc.
7 * Author: Baolin Wang <baolin.wang@linaro.org>
10 #include <linux/err.h>
11 #include <linux/delay.h>
12 #include <crypto/engine.h>
13 #include <uapi/linux/sched/types.h>
16 #define CRYPTO_ENGINE_MAX_QLEN 10
19 * crypto_finalize_request - finalize one request if the request is done
20 * @engine: the hardware engine
21 * @req: the request need to be finalized
24 static void crypto_finalize_request(struct crypto_engine *engine,
25 struct crypto_async_request *req, int err)
28 bool finalize_cur_req = false;
30 struct crypto_engine_ctx *enginectx;
32 spin_lock_irqsave(&engine->queue_lock, flags);
33 if (engine->cur_req == req)
34 finalize_cur_req = true;
35 spin_unlock_irqrestore(&engine->queue_lock, flags);
37 if (finalize_cur_req) {
38 enginectx = crypto_tfm_ctx(req->tfm);
39 if (engine->cur_req_prepared &&
40 enginectx->op.unprepare_request) {
41 ret = enginectx->op.unprepare_request(engine, req);
43 dev_err(engine->dev, "failed to unprepare request\n");
45 spin_lock_irqsave(&engine->queue_lock, flags);
46 engine->cur_req = NULL;
47 engine->cur_req_prepared = false;
48 spin_unlock_irqrestore(&engine->queue_lock, flags);
51 req->complete(req, err);
53 kthread_queue_work(engine->kworker, &engine->pump_requests);
57 * crypto_pump_requests - dequeue one request from engine queue to process
58 * @engine: the hardware engine
59 * @in_kthread: true if we are in the context of the request pump thread
61 * This function checks if there is any request in the engine queue that
62 * needs processing and if so call out to the driver to initialize hardware
63 * and handle each request.
65 static void crypto_pump_requests(struct crypto_engine *engine,
68 struct crypto_async_request *async_req, *backlog;
70 bool was_busy = false;
72 struct crypto_engine_ctx *enginectx;
74 spin_lock_irqsave(&engine->queue_lock, flags);
76 /* Make sure we are not already running a request */
80 /* If another context is idling then defer */
82 kthread_queue_work(engine->kworker, &engine->pump_requests);
86 /* Check if the engine queue is idle */
87 if (!crypto_queue_len(&engine->queue) || !engine->running) {
91 /* Only do teardown in the thread */
93 kthread_queue_work(engine->kworker,
94 &engine->pump_requests);
99 engine->idling = true;
100 spin_unlock_irqrestore(&engine->queue_lock, flags);
102 if (engine->unprepare_crypt_hardware &&
103 engine->unprepare_crypt_hardware(engine))
104 dev_err(engine->dev, "failed to unprepare crypt hardware\n");
106 spin_lock_irqsave(&engine->queue_lock, flags);
107 engine->idling = false;
111 /* Get the fist request from the engine queue to handle */
112 backlog = crypto_get_backlog(&engine->queue);
113 async_req = crypto_dequeue_request(&engine->queue);
117 engine->cur_req = async_req;
119 backlog->complete(backlog, -EINPROGRESS);
126 spin_unlock_irqrestore(&engine->queue_lock, flags);
128 /* Until here we get the request need to be encrypted successfully */
129 if (!was_busy && engine->prepare_crypt_hardware) {
130 ret = engine->prepare_crypt_hardware(engine);
132 dev_err(engine->dev, "failed to prepare crypt hardware\n");
137 enginectx = crypto_tfm_ctx(async_req->tfm);
139 if (enginectx->op.prepare_request) {
140 ret = enginectx->op.prepare_request(engine, async_req);
142 dev_err(engine->dev, "failed to prepare request: %d\n",
146 engine->cur_req_prepared = true;
148 if (!enginectx->op.do_one_request) {
149 dev_err(engine->dev, "failed to do request\n");
153 ret = enginectx->op.do_one_request(engine, async_req);
155 dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret);
161 crypto_finalize_request(engine, async_req, ret);
165 spin_unlock_irqrestore(&engine->queue_lock, flags);
168 static void crypto_pump_work(struct kthread_work *work)
170 struct crypto_engine *engine =
171 container_of(work, struct crypto_engine, pump_requests);
173 crypto_pump_requests(engine, true);
177 * crypto_transfer_request - transfer the new request into the engine queue
178 * @engine: the hardware engine
179 * @req: the request need to be listed into the engine queue
181 static int crypto_transfer_request(struct crypto_engine *engine,
182 struct crypto_async_request *req,
188 spin_lock_irqsave(&engine->queue_lock, flags);
190 if (!engine->running) {
191 spin_unlock_irqrestore(&engine->queue_lock, flags);
195 ret = crypto_enqueue_request(&engine->queue, req);
197 if (!engine->busy && need_pump)
198 kthread_queue_work(engine->kworker, &engine->pump_requests);
200 spin_unlock_irqrestore(&engine->queue_lock, flags);
205 * crypto_transfer_request_to_engine - transfer one request to list
206 * into the engine queue
207 * @engine: the hardware engine
208 * @req: the request need to be listed into the engine queue
210 static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
211 struct crypto_async_request *req)
213 return crypto_transfer_request(engine, req, true);
217 * crypto_transfer_aead_request_to_engine - transfer one aead_request
218 * to list into the engine queue
219 * @engine: the hardware engine
220 * @req: the request need to be listed into the engine queue
222 int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
223 struct aead_request *req)
225 return crypto_transfer_request_to_engine(engine, &req->base);
227 EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
230 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
231 * to list into the engine queue
232 * @engine: the hardware engine
233 * @req: the request need to be listed into the engine queue
235 int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
236 struct akcipher_request *req)
238 return crypto_transfer_request_to_engine(engine, &req->base);
240 EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
243 * crypto_transfer_hash_request_to_engine - transfer one ahash_request
244 * to list into the engine queue
245 * @engine: the hardware engine
246 * @req: the request need to be listed into the engine queue
248 int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
249 struct ahash_request *req)
251 return crypto_transfer_request_to_engine(engine, &req->base);
253 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
256 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
257 * to list into the engine queue
258 * @engine: the hardware engine
259 * @req: the request need to be listed into the engine queue
261 int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
262 struct skcipher_request *req)
264 return crypto_transfer_request_to_engine(engine, &req->base);
266 EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
269 * crypto_finalize_aead_request - finalize one aead_request if
270 * the request is done
271 * @engine: the hardware engine
272 * @req: the request need to be finalized
275 void crypto_finalize_aead_request(struct crypto_engine *engine,
276 struct aead_request *req, int err)
278 return crypto_finalize_request(engine, &req->base, err);
280 EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
283 * crypto_finalize_akcipher_request - finalize one akcipher_request if
284 * the request is done
285 * @engine: the hardware engine
286 * @req: the request need to be finalized
289 void crypto_finalize_akcipher_request(struct crypto_engine *engine,
290 struct akcipher_request *req, int err)
292 return crypto_finalize_request(engine, &req->base, err);
294 EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
297 * crypto_finalize_hash_request - finalize one ahash_request if
298 * the request is done
299 * @engine: the hardware engine
300 * @req: the request need to be finalized
303 void crypto_finalize_hash_request(struct crypto_engine *engine,
304 struct ahash_request *req, int err)
306 return crypto_finalize_request(engine, &req->base, err);
308 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
311 * crypto_finalize_skcipher_request - finalize one skcipher_request if
312 * the request is done
313 * @engine: the hardware engine
314 * @req: the request need to be finalized
317 void crypto_finalize_skcipher_request(struct crypto_engine *engine,
318 struct skcipher_request *req, int err)
320 return crypto_finalize_request(engine, &req->base, err);
322 EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
325 * crypto_engine_start - start the hardware engine
326 * @engine: the hardware engine need to be started
328 * Return 0 on success, else on fail.
330 int crypto_engine_start(struct crypto_engine *engine)
334 spin_lock_irqsave(&engine->queue_lock, flags);
336 if (engine->running || engine->busy) {
337 spin_unlock_irqrestore(&engine->queue_lock, flags);
341 engine->running = true;
342 spin_unlock_irqrestore(&engine->queue_lock, flags);
344 kthread_queue_work(engine->kworker, &engine->pump_requests);
348 EXPORT_SYMBOL_GPL(crypto_engine_start);
351 * crypto_engine_stop - stop the hardware engine
352 * @engine: the hardware engine need to be stopped
354 * Return 0 on success, else on fail.
356 int crypto_engine_stop(struct crypto_engine *engine)
359 unsigned int limit = 500;
362 spin_lock_irqsave(&engine->queue_lock, flags);
365 * If the engine queue is not empty or the engine is on busy state,
366 * we need to wait for a while to pump the requests of engine queue.
368 while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
369 spin_unlock_irqrestore(&engine->queue_lock, flags);
371 spin_lock_irqsave(&engine->queue_lock, flags);
374 if (crypto_queue_len(&engine->queue) || engine->busy)
377 engine->running = false;
379 spin_unlock_irqrestore(&engine->queue_lock, flags);
382 dev_warn(engine->dev, "could not stop engine\n");
386 EXPORT_SYMBOL_GPL(crypto_engine_stop);
389 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
391 * @dev: the device attached with one hardware engine
392 * @rt: whether this queue is set to run as a realtime task
394 * This must be called from context that can sleep.
395 * Return: the crypto engine structure on success, else NULL.
397 struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
399 struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 };
400 struct crypto_engine *engine;
405 engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
411 engine->running = false;
412 engine->busy = false;
413 engine->idling = false;
414 engine->cur_req_prepared = false;
415 engine->priv_data = dev;
416 snprintf(engine->name, sizeof(engine->name),
417 "%s-engine", dev_name(dev));
419 crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
420 spin_lock_init(&engine->queue_lock);
422 engine->kworker = kthread_create_worker(0, "%s", engine->name);
423 if (IS_ERR(engine->kworker)) {
424 dev_err(dev, "failed to create crypto request pump task\n");
427 kthread_init_work(&engine->pump_requests, crypto_pump_work);
430 dev_info(dev, "will run requests pump with realtime priority\n");
431 sched_setscheduler(engine->kworker->task, SCHED_FIFO, ¶m);
436 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
439 * crypto_engine_exit - free the resources of hardware engine when exit
440 * @engine: the hardware engine need to be freed
442 * Return 0 for success.
444 int crypto_engine_exit(struct crypto_engine *engine)
448 ret = crypto_engine_stop(engine);
452 kthread_destroy_worker(engine->kworker);
456 EXPORT_SYMBOL_GPL(crypto_engine_exit);
458 MODULE_LICENSE("GPL");
459 MODULE_DESCRIPTION("Crypto hardware engine framework");