diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-04 18:01:17 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-04 18:01:17 -0700 |
commit | 6adae5d9e69743aede91b274224751811f7174f1 (patch) | |
tree | 5ad15959313fbf4b7a37a36e40b04ca718b1e423 /crypto | |
parent | 253f04e78ded827c30f9582489773ebe2adc8924 (diff) | |
parent | f6259deacfd55607ae57cff422d3bc7694ea14e7 (diff) |
Merge master.kernel.org:/pub/scm/linux/kernel/git/herbert/crypto-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/herbert/crypto-2.6:
[CRYPTO] padlock: Remove pointless padlock module
[CRYPTO] api: Add ablkcipher_request_set_tfm
[CRYPTO] cryptd: Add software async crypto daemon
[CRYPTO] api: Do not remove users unless new algorithm matches
[CRYPTO] cryptomgr: Fix parsing of nested templates
[CRYPTO] api: Add async blkcipher type
[CRYPTO] templates: Pass type/mask when creating instances
[CRYPTO] tcrypt: Use async blkcipher interface
[CRYPTO] api: Add async block cipher interface
[CRYPTO] api: Proc functions should be marked as unused
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/Kconfig | 13 | ||||
-rw-r--r-- | crypto/Makefile | 2 | ||||
-rw-r--r-- | crypto/ablkcipher.c | 83 | ||||
-rw-r--r-- | crypto/algapi.c | 169 | ||||
-rw-r--r-- | crypto/blkcipher.c | 72 | ||||
-rw-r--r-- | crypto/cbc.c | 11 | ||||
-rw-r--r-- | crypto/cryptd.c | 375 | ||||
-rw-r--r-- | crypto/cryptomgr.c | 66 | ||||
-rw-r--r-- | crypto/ecb.c | 11 | ||||
-rw-r--r-- | crypto/hash.c | 2 | ||||
-rw-r--r-- | crypto/hmac.c | 11 | ||||
-rw-r--r-- | crypto/lrw.c | 11 | ||||
-rw-r--r-- | crypto/pcbc.c | 11 | ||||
-rw-r--r-- | crypto/tcrypt.c | 121 | ||||
-rw-r--r-- | crypto/xcbc.c | 12 |
15 files changed, 851 insertions, 119 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig index 086fcec4472..620e14cabdc 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -16,6 +16,10 @@ config CRYPTO_ALGAPI help This option provides the API for cryptographic algorithms. +config CRYPTO_ABLKCIPHER + tristate + select CRYPTO_BLKCIPHER + config CRYPTO_BLKCIPHER tristate select CRYPTO_ALGAPI @@ -171,6 +175,15 @@ config CRYPTO_LRW The first 128, 192 or 256 bits in the key are used for AES and the rest is used to tie each cipher block to its logical position. +config CRYPTO_CRYPTD + tristate "Software async crypto daemon" + select CRYPTO_ABLKCIPHER + select CRYPTO_MANAGER + help + This is a generic software asynchronous crypto daemon that + converts an arbitrary synchronous software crypto algorithm + into an asynchronous algorithm that executes in a kernel thread. + config CRYPTO_DES tristate "DES and Triple DES EDE cipher algorithms" select CRYPTO_ALGAPI diff --git a/crypto/Makefile b/crypto/Makefile index 12f93f57817..cce46a1c9dc 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -8,6 +8,7 @@ crypto_algapi-$(CONFIG_PROC_FS) += proc.o crypto_algapi-objs := algapi.o $(crypto_algapi-y) obj-$(CONFIG_CRYPTO_ALGAPI) += crypto_algapi.o +obj-$(CONFIG_CRYPTO_ABLKCIPHER) += ablkcipher.o obj-$(CONFIG_CRYPTO_BLKCIPHER) += blkcipher.o crypto_hash-objs := hash.o @@ -29,6 +30,7 @@ obj-$(CONFIG_CRYPTO_ECB) += ecb.o obj-$(CONFIG_CRYPTO_CBC) += cbc.o obj-$(CONFIG_CRYPTO_PCBC) += pcbc.o obj-$(CONFIG_CRYPTO_LRW) += lrw.o +obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o obj-$(CONFIG_CRYPTO_DES) += des.o obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c new file mode 100644 index 00000000000..9348ddd84a5 --- /dev/null +++ b/crypto/ablkcipher.c @@ -0,0 +1,83 @@ +/* + * Asynchronous block chaining cipher operations. + * + * This is the asynchronous version of blkcipher.c indicating completion + * via a callback. + * + * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include <crypto/algapi.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/seq_file.h> + +static int setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm); + + if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) { + crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + return cipher->setkey(tfm, key, keylen); +} + +static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type, + u32 mask) +{ + return alg->cra_ctxsize; +} + +static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type, + u32 mask) +{ + struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; + struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher; + + if (alg->ivsize > PAGE_SIZE / 8) + return -EINVAL; + + crt->setkey = setkey; + crt->encrypt = alg->encrypt; + crt->decrypt = alg->decrypt; + crt->ivsize = alg->ivsize; + + return 0; +} + +static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) + __attribute__ ((unused)); +static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) +{ + struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher; + + seq_printf(m, "type : ablkcipher\n"); + seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); + seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize); + seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize); + seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize); + seq_printf(m, "qlen : %u\n", ablkcipher->queue->qlen); + seq_printf(m, "max qlen : %u\n", ablkcipher->queue->max_qlen); +} + +const struct crypto_type crypto_ablkcipher_type = { + .ctxsize = crypto_ablkcipher_ctxsize, + .init = crypto_init_ablkcipher_ops, +#ifdef CONFIG_PROC_FS + .show = crypto_ablkcipher_show, +#endif +}; +EXPORT_SYMBOL_GPL(crypto_ablkcipher_type); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Asynchronous block chaining cipher type"); diff --git a/crypto/algapi.c b/crypto/algapi.c index f7d2185b2c8..f137a432061 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -84,36 +84,47 @@ static void crypto_destroy_instance(struct crypto_alg *alg) crypto_tmpl_put(tmpl); } -static void crypto_remove_spawns(struct list_head *spawns, - struct list_head *list) +static void crypto_remove_spawn(struct crypto_spawn *spawn, + struct list_head *list, + struct list_head *secondary_spawns) { - struct crypto_spawn *spawn, *n; + struct crypto_instance *inst = spawn->inst; + struct crypto_template *tmpl = inst->tmpl; - list_for_each_entry_safe(spawn, n, spawns, list) { - struct crypto_instance *inst = spawn->inst; - struct crypto_template *tmpl = inst->tmpl; + list_del_init(&spawn->list); + spawn->alg = NULL; - list_del_init(&spawn->list); - spawn->alg = NULL; + if (crypto_is_dead(&inst->alg)) + return; - if (crypto_is_dead(&inst->alg)) - continue; + inst->alg.cra_flags |= CRYPTO_ALG_DEAD; + if (!tmpl || !crypto_tmpl_get(tmpl)) + return; - inst->alg.cra_flags |= CRYPTO_ALG_DEAD; - if (!tmpl || !crypto_tmpl_get(tmpl)) + crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, &inst->alg); + list_move(&inst->alg.cra_list, list); + hlist_del(&inst->list); + inst->alg.cra_destroy = crypto_destroy_instance; + + list_splice(&inst->alg.cra_users, secondary_spawns); +} + +static void crypto_remove_spawns(struct list_head *spawns, + struct list_head *list, u32 new_type) +{ + struct crypto_spawn *spawn, *n; + LIST_HEAD(secondary_spawns); + + list_for_each_entry_safe(spawn, n, spawns, list) { + if ((spawn->alg->cra_flags ^ new_type) & spawn->mask) continue; - crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, &inst->alg); - list_move(&inst->alg.cra_list, list); - hlist_del(&inst->list); - inst->alg.cra_destroy = crypto_destroy_instance; + crypto_remove_spawn(spawn, list, &secondary_spawns); + } - if (!list_empty(&inst->alg.cra_users)) { - if (&n->list == spawns) - n = list_entry(inst->alg.cra_users.next, - typeof(*n), list); - __list_splice(&inst->alg.cra_users, spawns->prev); - } + while (!list_empty(&secondary_spawns)) { + list_for_each_entry_safe(spawn, n, &secondary_spawns, list) + crypto_remove_spawn(spawn, list, &secondary_spawns); } } @@ -164,7 +175,7 @@ static int __crypto_register_alg(struct crypto_alg *alg, q->cra_priority > alg->cra_priority) continue; - crypto_remove_spawns(&q->cra_users, list); + crypto_remove_spawns(&q->cra_users, list, alg->cra_flags); } list_add(&alg->cra_list, &crypto_alg_list); @@ -214,7 +225,7 @@ static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list) crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg); list_del_init(&alg->cra_list); - crypto_remove_spawns(&alg->cra_users, list); + crypto_remove_spawns(&alg->cra_users, list, alg->cra_flags); return 0; } @@ -351,11 +362,12 @@ err: EXPORT_SYMBOL_GPL(crypto_register_instance); int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, - struct crypto_instance *inst) + struct crypto_instance *inst, u32 mask) { int err = -EAGAIN; spawn->inst = inst; + spawn->mask = mask; down_write(&crypto_alg_sem); if (!crypto_is_moribund(alg)) { @@ -425,15 +437,45 @@ int crypto_unregister_notifier(struct notifier_block *nb) } EXPORT_SYMBOL_GPL(crypto_unregister_notifier); -struct crypto_alg *crypto_get_attr_alg(void *param, unsigned int len, - u32 type, u32 mask) +struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb) +{ + struct rtattr *rta = tb[CRYPTOA_TYPE - 1]; + struct crypto_attr_type *algt; + + if (!rta) + return ERR_PTR(-ENOENT); + if (RTA_PAYLOAD(rta) < sizeof(*algt)) + return ERR_PTR(-EINVAL); + + algt = RTA_DATA(rta); + + return algt; +} +EXPORT_SYMBOL_GPL(crypto_get_attr_type); + +int crypto_check_attr_type(struct rtattr **tb, u32 type) { - struct rtattr *rta = param; + struct crypto_attr_type *algt; + + algt = crypto_get_attr_type(tb); + if (IS_ERR(algt)) + return PTR_ERR(algt); + + if ((algt->type ^ type) & algt->mask) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_check_attr_type); + +struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, u32 type, u32 mask) +{ + struct rtattr *rta = tb[CRYPTOA_ALG - 1]; struct crypto_attr_alg *alga; - if (!RTA_OK(rta, len)) - return ERR_PTR(-EBADR); - if (rta->rta_type != CRYPTOA_ALG || RTA_PAYLOAD(rta) < sizeof(*alga)) + if (!rta) + return ERR_PTR(-ENOENT); + if (RTA_PAYLOAD(rta) < sizeof(*alga)) return ERR_PTR(-EINVAL); alga = RTA_DATA(rta); @@ -464,7 +506,8 @@ struct crypto_instance *crypto_alloc_instance(const char *name, goto err_free_inst; spawn = crypto_instance_ctx(inst); - err = crypto_init_spawn(spawn, alg, inst); + err = crypto_init_spawn(spawn, alg, inst, + CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); if (err) goto err_free_inst; @@ -477,6 +520,68 @@ err_free_inst: } EXPORT_SYMBOL_GPL(crypto_alloc_instance); +void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen) +{ + INIT_LIST_HEAD(&queue->list); + queue->backlog = &queue->list; + queue->qlen = 0; + queue->max_qlen = max_qlen; +} +EXPORT_SYMBOL_GPL(crypto_init_queue); + +int crypto_enqueue_request(struct crypto_queue *queue, + struct crypto_async_request *request) +{ + int err = -EINPROGRESS; + + if (unlikely(queue->qlen >= queue->max_qlen)) { + err = -EBUSY; + if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + goto out; + if (queue->backlog == &queue->list) + queue->backlog = &request->list; + } + + queue->qlen++; + list_add_tail(&request->list, &queue->list); + +out: + return err; +} +EXPORT_SYMBOL_GPL(crypto_enqueue_request); + +struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue) +{ + struct list_head *request; + + if (unlikely(!queue->qlen)) + return NULL; + + queue->qlen--; + + if (queue->backlog != &queue->list) + queue->backlog = queue->backlog->next; + + request = queue->list.next; + list_del(request); + + return list_entry(request, struct crypto_async_request, list); +} +EXPORT_SYMBOL_GPL(crypto_dequeue_request); + +int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm) +{ + struct crypto_async_request *req; + + list_for_each_entry(req, &queue->list, list) { + if (req->tfm == tfm) + return 1; + } + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_tfm_in_queue); + static int __init crypto_algapi_init(void) { crypto_init_proc(); diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index b5befe8c3a9..8edf40c835a 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -349,13 +349,48 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key, return cipher->setkey(tfm, key, keylen); } +static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen) +{ + return setkey(crypto_ablkcipher_tfm(tfm), key, keylen); +} + +static int async_encrypt(struct ablkcipher_request *req) +{ + struct crypto_tfm *tfm = req->base.tfm; + struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; + struct blkcipher_desc desc = { + .tfm = __crypto_blkcipher_cast(tfm), + .info = req->info, + .flags = req->base.flags, + }; + + + return alg->encrypt(&desc, req->dst, req->src, req->nbytes); +} + +static int async_decrypt(struct ablkcipher_request *req) +{ + struct crypto_tfm *tfm = req->base.tfm; + struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; + struct blkcipher_desc desc = { + .tfm = __crypto_blkcipher_cast(tfm), + .info = req->info, + .flags = req->base.flags, + }; + + return alg->decrypt(&desc, req->dst, req->src, req->nbytes); +} + static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) { struct blkcipher_alg *cipher = &alg->cra_blkcipher; unsigned int len = alg->cra_ctxsize; - if (cipher->ivsize) { + type ^= CRYPTO_ALG_ASYNC; + mask &= CRYPTO_ALG_ASYNC; + if ((type & mask) && cipher->ivsize) { len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1); len += cipher->ivsize; } @@ -363,16 +398,26 @@ static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type, return len; } -static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask) +static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm) +{ + struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher; + struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; + + crt->setkey = async_setkey; + crt->encrypt = async_encrypt; + crt->decrypt = async_decrypt; + crt->ivsize = alg->ivsize; + + return 0; +} + +static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm) { struct blkcipher_tfm *crt = &tfm->crt_blkcipher; struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1; unsigned long addr; - if (alg->ivsize > PAGE_SIZE / 8) - return -EINVAL; - crt->setkey = setkey; crt->encrypt = alg->encrypt; crt->decrypt = alg->decrypt; @@ -385,8 +430,23 @@ static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask) return 0; } +static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask) +{ + struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; + + if (alg->ivsize > PAGE_SIZE / 8) + return -EINVAL; + + type ^= CRYPTO_ALG_ASYNC; + mask &= CRYPTO_ALG_ASYNC; + if (type & mask) + return crypto_init_blkcipher_ops_sync(tfm); + else + return crypto_init_blkcipher_ops_async(tfm); +} + static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) - __attribute_used__; + __attribute__ ((unused)); static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) { seq_printf(m, "type : blkcipher\n"); diff --git a/crypto/cbc.c b/crypto/cbc.c index 136fea7e700..1f2649e13b4 100644 --- a/crypto/cbc.c +++ b/crypto/cbc.c @@ -275,13 +275,18 @@ static void crypto_cbc_exit_tfm(struct crypto_tfm *tfm) crypto_free_cipher(ctx->child); } -static struct crypto_instance *crypto_cbc_alloc(void *param, unsigned int len) +static struct crypto_instance *crypto_cbc_alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); + if (err) + return ERR_PTR(err); - alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, + CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return ERR_PTR(PTR_ERR(alg)); diff --git a/crypto/cryptd.c b/crypto/cryptd.c new file mode 100644 index 00000000000..3ff4e1f0f03 --- /dev/null +++ b/crypto/cryptd.c @@ -0,0 +1,375 @@ +/* + * Software async crypto daemon. + * + * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include <crypto/algapi.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/kthread.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/scatterlist.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#define CRYPTD_MAX_QLEN 100 + +struct cryptd_state { + spinlock_t lock; + struct mutex mutex; + struct crypto_queue queue; + struct task_struct *task; +}; + +struct cryptd_instance_ctx { + struct crypto_spawn spawn; + struct cryptd_state *state; +}; + +struct cryptd_blkcipher_ctx { + struct crypto_blkcipher *child; +}; + +struct cryptd_blkcipher_request_ctx { + crypto_completion_t complete; +}; + + +static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm) +{ + struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); + struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); + return ictx->state; +} + +static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, + const u8 *key, unsigned int keylen) +{ + struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); + struct crypto_blkcipher *child = ctx->child; + int err; + + crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_blkcipher_setkey(child, key, keylen); + crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) & + CRYPTO_TFM_RES_MASK); + return err; +} + +static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, + struct crypto_blkcipher *child, + int err, + int (*crypt)(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int len)) +{ + struct cryptd_blkcipher_request_ctx *rctx; + struct blkcipher_desc desc; + + rctx = ablkcipher_request_ctx(req); + + if (unlikely(err == -EINPROGRESS)) { + rctx->complete(&req->base, err); + return; + } + + desc.tfm = child; + desc.info = req->info; + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + err = crypt(&desc, req->dst, req->src, req->nbytes); + + req->base.complete = rctx->complete; + + local_bh_disable(); + req->base.complete(&req->base, err); + local_bh_enable(); +} + +static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) +{ + struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); + struct crypto_blkcipher *child = ctx->child; + + cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, + crypto_blkcipher_crt(child)->encrypt); +} + +static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) +{ + struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); + struct crypto_blkcipher *child = ctx->child; + + cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, + crypto_blkcipher_crt(child)->decrypt); +} + +static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, + crypto_completion_t complete) +{ + struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct cryptd_state *state = + cryptd_get_state(crypto_ablkcipher_tfm(tfm)); + int err; + + rctx->complete = req->base.complete; + req->base.complete = complete; + + spin_lock_bh(&state->lock); + err = ablkcipher_enqueue_request(crypto_ablkcipher_alg(tfm), req); + spin_unlock_bh(&state->lock); + + wake_up_process(state->task); + return err; +} + +static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) +{ + return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); +} + +static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) +{ + return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt); +} + +static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); + struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); + struct crypto_spawn *spawn = &ictx->spawn; + struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_blkcipher *cipher; + + cipher = crypto_spawn_blkcipher(spawn); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + ctx->child = cipher; + tfm->crt_ablkcipher.reqsize = + sizeof(struct cryptd_blkcipher_request_ctx); + return 0; +} + +static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) +{ + struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct cryptd_state *state = cryptd_get_state(tfm); + int active; + + mutex_lock(&state->mutex); + active = ablkcipher_tfm_in_queue(__crypto_ablkcipher_cast(tfm)); + mutex_unlock(&state->mutex); + + BUG_ON(active); + + crypto_free_blkcipher(ctx->child); +} + +static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, + struct cryptd_state *state) +{ + struct crypto_instance *inst; + struct cryptd_instance_ctx *ctx; + int err; + + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); + if (IS_ERR(inst)) + goto out; + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + goto out_free_inst; + + ctx = crypto_instance_ctx(inst); + err = crypto_init_spawn(&ctx->spawn, alg, inst, + CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); + if (err) + goto out_free_inst; + + ctx->state = state; + + memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); + + inst->alg.cra_priority = alg->cra_priority + 50; + inst->alg.cra_blocksize = alg->cra_blocksize; + inst->alg.cra_alignmask = alg->cra_alignmask; + +out: + return inst; + +out_free_inst: + kfree(inst); + inst = ERR_PTR(err); + goto out; +} + +static struct crypto_instance *cryptd_alloc_blkcipher( + struct rtattr **tb, struct cryptd_state *state) +{ + struct crypto_instance *inst; + struct crypto_alg *alg; + + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, + CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); + if (IS_ERR(alg)) + return ERR_PTR(PTR_ERR(alg)); + + inst = cryptd_alloc_instance(alg, state); + if (IS_ERR(inst)) + goto out_put_alg; + + inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_ASYNC; + inst->alg.cra_type = &crypto_ablkcipher_type; + + inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; + inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; + inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; + + inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); + + inst->alg.cra_init = cryptd_blkcipher_init_tfm; + inst->alg.cra_exit = cryptd_blkcipher_exit_tfm; + + inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey; + inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; + inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; + + inst->alg.cra_ablkcipher.queue = &state->queue; + +out_put_alg: + crypto_mod_put(alg); + return inst; +} + +static struct cryptd_state state; + +static struct crypto_instance *cryptd_alloc(struct rtattr **tb) +{ + struct crypto_attr_type *algt; + + algt = crypto_get_attr_type(tb); + if (IS_ERR(algt)) + return ERR_PTR(PTR_ERR(algt)); + + switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { + case CRYPTO_ALG_TYPE_BLKCIPHER: + return cryptd_alloc_blkcipher(tb, &state); + } + + return ERR_PTR(-EINVAL); +} + +static void cryptd_free(struct crypto_instance *inst) +{ + struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); + + crypto_drop_spawn(&ctx->spawn); + kfree(inst); +} + +static struct crypto_template cryptd_tmpl = { + .name = "cryptd", + .alloc = cryptd_alloc, + .free = cryptd_free, + .module = THIS_MODULE, +}; + +static inline int cryptd_create_thread(struct cryptd_state *state, + int (*fn)(void *data), const char *name) +{ + spin_lock_init(&state->lock); + mutex_init(&state->mutex); + crypto_init_queue(&state->queue, CRYPTD_MAX_QLEN); + + state->task = kthread_create(fn, state, name); + if (IS_ERR(state->task)) + return PTR_ERR(state->task); + + return 0; +} + +static inline void cryptd_stop_thread(struct cryptd_state *state) +{ + BUG_ON(state->queue.qlen); + kthread_stop(state->task); +} + +static int cryptd_thread(void *data) +{ + struct cryptd_state *state = data; + int stop; + + do { + struct crypto_async_request *req, *backlog; + + mutex_lock(&state->mutex); + __set_current_state(TASK_INTERRUPTIBLE); + + spin_lock_bh(&state->lock); + backlog = crypto_get_backlog(&state->queue); + req = crypto_dequeue_request(&state->queue); + spin_unlock_bh(&state->lock); + + stop = kthread_should_stop(); + + if (stop || req) { + __set_current_state(TASK_RUNNING); + if (req) { + if (backlog) + backlog->complete(backlog, + -EINPROGRESS); + req->complete(req, 0); + } + } + + mutex_unlock(&state->mutex); + + schedule(); + } while (!stop); + + return 0; +} + +static int __init cryptd_init(void) +{ + int err; + + err = cryptd_create_thread(&state, cryptd_thread, "cryptd"); + if (err) + return err; + + err = crypto_register_template(&cryptd_tmpl); + if (err) + kthread_stop(state.task); + + return err; +} + +static void __exit cryptd_exit(void) +{ + cryptd_stop_thread(&state); + crypto_unregister_template(&cryptd_tmpl); +} + +module_init(cryptd_init); +module_exit(cryptd_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Software async crypto daemon"); diff --git a/crypto/cryptomgr.c b/crypto/cryptomgr.c index 2ebffb84f1d..6958ea83ee4 100644 --- a/crypto/cryptomgr.c +++ b/crypto/cryptomgr.c @@ -14,17 +14,24 @@ #include <linux/ctype.h> #include <linux/err.h> #include <linux/init.h> +#include <linux/kthread.h> #include <linux/module.h> #include <linux/notifier.h> #include <linux/rtnetlink.h> #include <linux/sched.h> #include <linux/string.h> -#include <linux/workqueue.h> #include "internal.h" struct cryptomgr_param { - struct work_struct work; + struct task_struct *thread; + + struct rtattr *tb[CRYPTOA_MAX]; + + struct { + struct rtattr attr; + struct crypto_attr_type data; + } type; struct { struct rtattr attr; @@ -32,18 +39,15 @@ struct cryptomgr_param { } alg; struct { - u32 type; - u32 mask; char name[CRYPTO_MAX_ALG_NAME]; } larval; char template[CRYPTO_MAX_ALG_NAME]; }; -static void cryptomgr_probe(struct work_struct *work) +static int cryptomgr_probe(void *data) { - struct cryptomgr_param *param = - container_of(work, struct cryptomgr_param, work); + struct cryptomgr_param *param = data; struct crypto_template *tmpl; struct crypto_instance *inst; int err; @@ -53,7 +57,7 @@ static void cryptomgr_probe(struct work_struct *work) goto err; do { - inst = tmpl->alloc(¶m->alg, sizeof(param->alg)); + inst = tmpl->alloc(param->tb); if (IS_ERR(inst)) err = PTR_ERR(inst); else if ((err = crypto_register_instance(tmpl, inst))) @@ -67,11 +71,11 @@ static void cryptomgr_probe(struct work_struct *work) out: kfree(param); - return; + module_put_and_exit(0); err: - crypto_larval_error(param->larval.name, param->larval.type, - param->larval.mask); + crypto_larval_error(param->larval.name, param->type.data.type, + param->type.data.mask); goto out; } @@ -82,10 +86,13 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) const char *p; unsigned int len; - param = kmalloc(sizeof(*param), GFP_KERNEL); - if (!param) + if (!try_module_get(THIS_MODULE)) goto err; + param = kzalloc(sizeof(*param), GFP_KERNEL); + if (!param) + goto err_put_module; + for (p = name; isalnum(*p) || *p == '-' || *p == '_'; p++) ; @@ -94,32 +101,45 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) goto err_free_param; memcpy(param->template, name, len); - param->template[len] = 0; name = p + 1; - for (p = name; isalnum(*p) || *p == '-' || *p == '_'; p++) - ; + len = 0; + for (p = name; *p; p++) { + for (; isalnum(*p) || *p == '-' || *p == '_' || *p == '('; p++) + ; - len = p - name; - if (!len || *p != ')' || p[1]) + if (*p != ')') + goto err_free_param; + + len = p - name; + } + + if (!len || name[len + 1]) goto err_free_param; + param->type.attr.rta_len = sizeof(param->type); + param->type.attr.rta_type = CRYPTOA_TYPE; + param->type.data.type = larval->alg.cra_flags; + param->type.data.mask = larval->mask; + param->tb[CRYPTOA_TYPE - 1] = ¶m->type.attr; + param->alg.attr.rta_len = sizeof(param->alg); param->alg.attr.rta_type = CRYPTOA_ALG; memcpy(param->alg.data.name, name, len); - param->alg.data.name[len] = 0; + param->tb[CRYPTOA_ALG - 1] = ¶m->alg.attr; memcpy(param->larval.name, larval->alg.cra_name, CRYPTO_MAX_ALG_NAME); - param->larval.type = larval->alg.cra_flags; - param->larval.mask = larval->mask; - INIT_WORK(¶m->work, cryptomgr_probe); - schedule_work(¶m->work); + param->thread = kthread_run(cryptomgr_probe, param, "cryptomgr"); + if (IS_ERR(param->thread)) + goto err_free_param; return NOTIFY_STOP; err_free_param: kfree(param); +err_put_module: + module_put(THIS_MODULE); err: return NOTIFY_OK; } diff --git a/crypto/ecb.c b/crypto/ecb.c index 839a0aed8c2..6310387a872 100644 --- a/crypto/ecb.c +++ b/crypto/ecb.c @@ -115,13 +115,18 @@ static void crypto_ecb_exit_tfm(struct crypto_tfm *tfm) crypto_free_cipher(ctx->child); } -static struct crypto_instance *crypto_ecb_alloc(void *param, unsigned int len) +static struct crypto_instance *crypto_ecb_alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); + if (err) + return ERR_PTR(err); - alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, + CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return ERR_PTR(PTR_ERR(alg)); diff --git a/crypto/hash.c b/crypto/hash.c index 12c4514f347..4ccd22deef3 100644 --- a/crypto/hash.c +++ b/crypto/hash.c @@ -41,7 +41,7 @@ static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) } static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg) - __attribute_used__; + __attribute__ ((unused)); static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg) { seq_printf(m, "type : hash\n"); diff --git a/crypto/hmac.c b/crypto/hmac.c index 44187c5ee59..8802fb6dd5a 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -197,13 +197,18 @@ static void hmac_free(struct crypto_instance *inst) kfree(inst); } -static struct crypto_instance *hmac_alloc(void *param, unsigned int len) +static struct crypto_instance *hmac_alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH); + if (err) + return ERR_PTR(err); - alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_HASH, - CRYPTO_ALG_TYPE_HASH_MASK | CRYPTO_ALG_ASYNC); + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH, + CRYPTO_ALG_TYPE_HASH_MASK); if (IS_ERR(alg)) return ERR_PTR(PTR_ERR(alg)); diff --git a/crypto/lrw.c b/crypto/lrw.c index b4105080ac7..621095db28b 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -228,13 +228,18 @@ static void exit_tfm(struct crypto_tfm *tfm) crypto_free_cipher(ctx->child); } -static struct crypto_instance *alloc(void *param, unsigned int len) +static struct crypto_instance *alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); + if (err) + return ERR_PTR(err); - alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, + CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return ERR_PTR(PTR_ERR(alg)); diff --git a/crypto/pcbc.c b/crypto/pcbc.c index 5174d7fdad6..c3ed8a1c9f4 100644 --- a/crypto/pcbc.c +++ b/crypto/pcbc.c @@ -279,13 +279,18 @@ static void crypto_pcbc_exit_tfm(struct crypto_tfm *tfm) crypto_free_cipher(ctx->child); } -static struct crypto_instance *crypto_pcbc_alloc(void *param, unsigned int len) +static struct crypto_instance *crypto_pcbc_alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); + if (err) + return ERR_PTR(err); - alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, + CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return ERR_PTR(PTR_ERR(alg)); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 8eaa5aa210b..f0aed0106ad 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -57,6 +57,11 @@ #define ENCRYPT 1 #define DECRYPT 0 +struct tcrypt_result { + struct completion completion; + int err; +}; + static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 }; /* @@ -84,6 +89,17 @@ static void hexdump(unsigned char *buf, unsigned int len) printk("\n"); } +static void tcrypt_complete(struct crypto_async_request *req, int err) +{ + struct tcrypt_result *res = req->data; + + if (err == -EINPROGRESS) + return; + + res->err = err; + complete(&res->completion); +} + static void test_hash(char *algo, struct hash_testvec *template, unsigned int tcount) { @@ -203,15 +219,14 @@ static void test_cipher(char *algo, int enc, { unsigned int ret, i, j, k, temp; unsigned int tsize; - unsigned int iv_len; - unsigned int len; char *q; - struct crypto_blkcipher *tfm; + struct crypto_ablkcipher *tfm; char *key; struct cipher_testvec *cipher_tv; - struct blkcipher_desc desc; + struct ablkcipher_request *req; struct scatterlist sg[8]; const char *e; + struct tcrypt_result result; if (enc == ENCRYPT) e = "encryption"; @@ -232,15 +247,24 @@ static void test_cipher(char *algo, int enc, memcpy(tvmem, template, tsize); cipher_tv = (void *)tvmem; - tfm = crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC); + init_completion(&result.completion); + + tfm = crypto_alloc_ablkcipher(algo, 0, 0); if (IS_ERR(tfm)) { printk("failed to load transform for %s: %ld\n", algo, PTR_ERR(tfm)); return; } - desc.tfm = tfm; - desc.flags = 0; + + req = ablkcipher_request_alloc(tfm, GFP_KERNEL); + if (!req) { + printk("failed to allocate request for %s\n", algo); + goto out; + } + + ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + tcrypt_complete, &result); j = 0; for (i = 0; i < tcount; i++) { @@ -249,17 +273,17 @@ static void test_cipher(char *algo, int enc, printk("test %u (%d bit key):\n", j, cipher_tv[i].klen * 8); - crypto_blkcipher_clear_flags(tfm, ~0); + crypto_ablkcipher_clear_flags(tfm, ~0); if (cipher_tv[i].wk) - crypto_blkcipher_set_flags( + crypto_ablkcipher_set_flags( tfm, CRYPTO_TFM_REQ_WEAK_KEY); key = cipher_tv[i].key; - ret = crypto_blkcipher_setkey(tfm, key, - cipher_tv[i].klen); + ret = crypto_ablkcipher_setkey(tfm, key, + cipher_tv[i].klen); if (ret) { printk("setkey() failed flags=%x\n", - crypto_blkcipher_get_flags(tfm)); + crypto_ablkcipher_get_flags(tfm)); if (!cipher_tv[i].fail) goto out; @@ -268,19 +292,28 @@ static void test_cipher(char *algo, int enc, sg_set_buf(&sg[0], cipher_tv[i].input, cipher_tv[i].ilen); - iv_len = crypto_blkcipher_ivsize(tfm); - if (iv_len) - crypto_blkcipher_set_iv(tfm, cipher_tv[i].iv, - iv_len); + ablkcipher_request_set_crypt(req, sg, sg, + cipher_tv[i].ilen, + cipher_tv[i].iv); - len = cipher_tv[i].ilen; ret = enc ? - crypto_blkcipher_encrypt(&desc, sg, sg, len) : - crypto_blkcipher_decrypt(&desc, sg, sg, len); + crypto_ablkcipher_encrypt(req) : + crypto_ablkcipher_decrypt(req); - if (ret) { - printk("%s () failed flags=%x\n", e, - desc.flags); + switch (ret) { + case 0: + break; + case -EINPROGRESS: + case -EBUSY: + ret = wait_for_completion_interruptible( + &result.completion); + if (!ret && !((ret = result.err))) { + INIT_COMPLETION(result.completion); + break; + } + /* fall through */ + default: + printk("%s () failed err=%d\n", e, -ret); goto out; } @@ -303,17 +336,17 @@ static void test_cipher(char *algo, int enc, printk("test %u (%d bit key):\n", j, cipher_tv[i].klen * 8); - crypto_blkcipher_clear_flags(tfm, ~0); + crypto_ablkcipher_clear_flags(tfm, ~0); if (cipher_tv[i].wk) - crypto_blkcipher_set_flags( + crypto_ablkcipher_set_flags( tfm, CRYPTO_TFM_REQ_WEAK_KEY); key = cipher_tv[i].key; - ret = crypto_blkcipher_setkey(tfm, key, - cipher_tv[i].klen); + ret = crypto_ablkcipher_setkey(tfm, key, + cipher_tv[i].klen); if (ret) { printk("setkey() failed flags=%x\n", - crypto_blkcipher_get_flags(tfm)); + crypto_ablkcipher_get_flags(tfm)); if (!cipher_tv[i].fail) goto out; @@ -329,19 +362,28 @@ static void test_cipher(char *algo, int enc, cipher_tv[i].tap[k]); } - iv_len = crypto_blkcipher_ivsize(tfm); - if (iv_len) - crypto_blkcipher_set_iv(tfm, cipher_tv[i].iv, - iv_len); + ablkcipher_request_set_crypt(req, sg, sg, + cipher_tv[i].ilen, + cipher_tv[i].iv); - len = cipher_tv[i].ilen; ret = enc ? - crypto_blkcipher_encrypt(&desc, sg, sg, len) : - crypto_blkcipher_decrypt(&desc, sg, sg, len); + crypto_ablkcipher_encrypt(req) : + crypto_ablkcipher_decrypt(req); - if (ret) { - printk("%s () failed flags=%x\n", e, - desc.flags); + switch (ret) { + case 0: + break; + case -EINPROGRESS: + case -EBUSY: + ret = wait_for_completion_interruptible( + &result.completion); + if (!ret && !((ret = result.err))) { + INIT_COMPLETION(result.completion); + break; + } + /* fall through */ + default: + printk("%s () failed err=%d\n", e, -ret); goto out; } @@ -360,7 +402,8 @@ static void test_cipher(char *algo, int enc, } out: - crypto_free_blkcipher(tfm); + crypto_free_ablkcipher(tfm); + ablkcipher_request_free(req); } static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc, char *p, @@ -832,7 +875,7 @@ static void test_available(void) while (*name) { printk("alg %s ", *name); - printk(crypto_has_alg(*name, 0, CRYPTO_ALG_ASYNC) ? + printk(crypto_has_alg(*name, 0, 0) ? "found\n" : "not found\n"); name++; } diff --git a/crypto/xcbc.c b/crypto/xcbc.c index 53e8ccbf0f5..9f502b86e0e 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c @@ -288,12 +288,18 @@ static void xcbc_exit_tfm(struct crypto_tfm *tfm) crypto_free_cipher(ctx->child); } -static struct crypto_instance *xcbc_alloc(void *param, unsigned int len) +static struct crypto_instance *xcbc_alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; - alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_HASH_MASK | CRYPTO_ALG_ASYNC); + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH); + if (err) + return ERR_PTR(err); + + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, + CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return ERR_PTR(PTR_ERR(alg)); |