diff options
author | Baolin Wang <baolin.wang@linaro.org> | 2016-01-26 20:25:39 +0800 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2016-02-01 22:27:02 +0800 |
commit | 735d37b5424b27aa685276b8b90b7e57c4705ac1 (patch) | |
tree | 93fa6b3f48a144f4209d02c4b1c0a69677093a6b /include/crypto | |
parent | 9f93a8a0ba91fa3babe76a428e6c24f4c39f125e (diff) |
crypto: engine - Introduce the block request crypto engine framework
Now block cipher engines need to implement and maintain their own queue/thread
for processing requests, moreover currently helpers provided for only the queue
itself (in crypto_enqueue_request() and crypto_dequeue_request()) but they
don't help with the mechanics of driving the hardware (things like running the
request immediately, DMA map it or providing a thread to process the queue in)
even though a lot of that code really shouldn't vary that much from device to
device.
Thus this patch provides a mechanism for pushing requests to the hardware
as it becomes free that drivers could use. And this framework is patterned
on the SPI code and has worked out well there.
(https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/
drivers/spi/spi.c?id=ffbbdd21329f3e15eeca6df2d4bc11c04d9d91c0)
Signed-off-by: Baolin Wang <baolin.wang@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'include/crypto')
-rw-r--r-- | include/crypto/algapi.h | 70 |
1 files changed, 70 insertions, 0 deletions
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 4f861c44d066..b09d43f612e1 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -15,6 +15,7 @@ #include <linux/crypto.h> #include <linux/list.h> #include <linux/kernel.h> +#include <linux/kthread.h> #include <linux/skbuff.h> struct crypto_aead; @@ -128,6 +129,75 @@ struct ablkcipher_walk { unsigned int blocksize; }; +#define ENGINE_NAME_LEN 30 +/* + * struct crypto_engine - crypto hardware engine + * @name: the engine name + * @idling: the engine is entering idle state + * @busy: request pump is busy + * @running: the engine is on working + * @cur_req_prepared: current request is prepared + * @list: link with the global crypto engine list + * @queue_lock: spinlock to syncronise access to request queue + * @queue: the crypto queue of the engine + * @rt: whether this queue is set to run as a realtime task + * @prepare_crypt_hardware: a request will soon arrive from the queue + * so the subsystem requests the driver to prepare the hardware + * by issuing this call + * @unprepare_crypt_hardware: there are currently no more requests on the + * queue so the subsystem notifies the driver that it may relax the + * hardware by issuing this call + * @prepare_request: do some prepare if need before handle the current request + * @unprepare_request: undo any work done by prepare_message() + * @crypt_one_request: do encryption for current request + * @kworker: thread struct for request pump + * @kworker_task: pointer to task for request pump kworker thread + * @pump_requests: work struct for scheduling work to the request pump + * @priv_data: the engine private data + * @cur_req: the current request which is on processing + */ +struct crypto_engine { + char name[ENGINE_NAME_LEN]; + bool idling; + bool busy; + bool running; + bool cur_req_prepared; + + struct list_head list; + spinlock_t queue_lock; + struct crypto_queue queue; + + bool rt; + + int (*prepare_crypt_hardware)(struct crypto_engine *engine); + int (*unprepare_crypt_hardware)(struct crypto_engine *engine); + + int (*prepare_request)(struct crypto_engine *engine, + struct ablkcipher_request *req); + int (*unprepare_request)(struct crypto_engine *engine, + struct ablkcipher_request *req); + int (*crypt_one_request)(struct crypto_engine *engine, + struct ablkcipher_request *req); + + struct kthread_worker kworker; + struct task_struct *kworker_task; + struct kthread_work pump_requests; + + void *priv_data; + struct ablkcipher_request *cur_req; +}; + +int crypto_transfer_request(struct crypto_engine *engine, + struct ablkcipher_request *req, bool need_pump); +int crypto_transfer_request_to_engine(struct crypto_engine *engine, + struct ablkcipher_request *req); +void crypto_finalize_request(struct crypto_engine *engine, + struct ablkcipher_request *req, int err); +int crypto_engine_start(struct crypto_engine *engine); +int crypto_engine_stop(struct crypto_engine *engine); +struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt); +int crypto_engine_exit(struct crypto_engine *engine); + extern const struct crypto_type crypto_ablkcipher_type; extern const struct crypto_type crypto_blkcipher_type; |