From d12d6b6d37cad766b390467e770eb0ab81345d78 Mon Sep 17 00:00:00 2001 From: Neil Horman Date: Sun, 12 Oct 2008 20:36:51 +0800 Subject: crypto: testmgr - Trigger a panic when self test fails in FIPS mode The FIPS specification requires that should self test for any supported crypto algorithm fail during operation in fips mode, we need to prevent the use of any crypto functionality until such time as the system can be re-initialized. Seems like the best way to handle that would be to panic the system if we were in fips mode and failed a self test. This patch implements that functionality. I've built and run it successfully. Signed-off-by: Neil Horman Signed-off-by: Herbert Xu --- crypto/testmgr.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/testmgr.c b/crypto/testmgr.c index b828c6cf1b1..308d9cffdc4 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -1801,6 +1801,7 @@ static int alg_find_test(const char *alg) int alg_test(const char *driver, const char *alg, u32 type, u32 mask) { int i; + int rc; if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) { char nalg[CRYPTO_MAX_ALG_NAME]; @@ -1820,8 +1821,12 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) if (i < 0) goto notest; - return alg_test_descs[i].test(alg_test_descs + i, driver, + rc = alg_test_descs[i].test(alg_test_descs + i, driver, type, mask); + if (fips_enabled && rc) + panic("%s: %s alg self test failed in fips mode!\n", driver, alg); + + return rc; notest: printk(KERN_INFO "alg: No test for %s (%s)\n", alg, driver); -- cgit v1.2.3 From 32bd78e0a5d34cd8e34046502bddcf31aeb38e64 Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Sun, 12 Oct 2008 20:40:12 +0800 Subject: crypto: camellia - use kernel-provided bitops, unaligned access Remove the private implementation of 32-bit rotation and unaligned access with byteswapping. As a bonus, fixes sparse warnings: crypto/camellia.c:602:2: warning: cast to restricted __be32 crypto/camellia.c:603:2: warning: cast to restricted __be32 crypto/camellia.c:604:2: warning: cast to restricted __be32 crypto/camellia.c:605:2: warning: cast to restricted __be32 crypto/camellia.c:710:2: warning: cast to restricted __be32 crypto/camellia.c:711:2: warning: cast to restricted __be32 crypto/camellia.c:712:2: warning: cast to restricted __be32 crypto/camellia.c:713:2: warning: cast to restricted __be32 crypto/camellia.c:714:2: warning: cast to restricted __be32 crypto/camellia.c:715:2: warning: cast to restricted __be32 crypto/camellia.c:716:2: warning: cast to restricted __be32 crypto/camellia.c:717:2: warning: cast to restricted __be32 [Thanks to Tomoyuki Okazaki for spotting the typo] Tested-by: Carlo E. Prelz Signed-off-by: Harvey Harrison Signed-off-by: Herbert Xu --- crypto/camellia.c | 84 ++++++++++++++++++++++++------------------------------- 1 file changed, 36 insertions(+), 48 deletions(-) (limited to 'crypto') diff --git a/crypto/camellia.c b/crypto/camellia.c index 493fee7e0a8..964635d163f 100644 --- a/crypto/camellia.c +++ b/crypto/camellia.c @@ -35,6 +35,8 @@ #include #include #include +#include +#include static const u32 camellia_sp1110[256] = { 0x70707000,0x82828200,0x2c2c2c00,0xececec00, @@ -335,20 +337,6 @@ static const u32 camellia_sp4404[256] = { /* * macros */ -#define GETU32(v, pt) \ - do { \ - /* latest breed of gcc is clever enough to use move */ \ - memcpy(&(v), (pt), 4); \ - (v) = be32_to_cpu(v); \ - } while(0) - -/* rotation right shift 1byte */ -#define ROR8(x) (((x) >> 8) + ((x) << 24)) -/* rotation left shift 1bit */ -#define ROL1(x) (((x) << 1) + ((x) >> 31)) -/* rotation left shift 1byte */ -#define ROL8(x) (((x) << 8) + ((x) >> 24)) - #define ROLDQ(ll, lr, rl, rr, w0, w1, bits) \ do { \ w0 = ll; \ @@ -383,7 +371,7 @@ static const u32 camellia_sp4404[256] = { ^ camellia_sp3033[(u8)(il >> 8)] \ ^ camellia_sp4404[(u8)(il )]; \ yl ^= yr; \ - yr = ROR8(yr); \ + yr = ror32(yr, 8); \ yr ^= yl; \ } while(0) @@ -405,7 +393,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) subL[7] ^= subL[1]; subR[7] ^= subR[1]; subL[1] ^= subR[1] & ~subR[9]; dw = subL[1] & subL[9], - subR[1] ^= ROL1(dw); /* modified for FLinv(kl2) */ + subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl2) */ /* round 8 */ subL[11] ^= subL[1]; subR[11] ^= subR[1]; /* round 10 */ @@ -414,7 +402,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) subL[15] ^= subL[1]; subR[15] ^= subR[1]; subL[1] ^= subR[1] & ~subR[17]; dw = subL[1] & subL[17], - subR[1] ^= ROL1(dw); /* modified for FLinv(kl4) */ + subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl4) */ /* round 14 */ subL[19] ^= subL[1]; subR[19] ^= subR[1]; /* round 16 */ @@ -430,7 +418,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) } else { subL[1] ^= subR[1] & ~subR[25]; dw = subL[1] & subL[25], - subR[1] ^= ROL1(dw); /* modified for FLinv(kl6) */ + subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl6) */ /* round 20 */ subL[27] ^= subL[1]; subR[27] ^= subR[1]; /* round 22 */ @@ -450,7 +438,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) subL[26] ^= kw4l; subR[26] ^= kw4r; kw4l ^= kw4r & ~subR[24]; dw = kw4l & subL[24], - kw4r ^= ROL1(dw); /* modified for FL(kl5) */ + kw4r ^= rol32(dw, 1); /* modified for FL(kl5) */ } /* round 17 */ subL[22] ^= kw4l; subR[22] ^= kw4r; @@ -460,7 +448,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) subL[18] ^= kw4l; subR[18] ^= kw4r; kw4l ^= kw4r & ~subR[16]; dw = kw4l & subL[16], - kw4r ^= ROL1(dw); /* modified for FL(kl3) */ + kw4r ^= rol32(dw, 1); /* modified for FL(kl3) */ /* round 11 */ subL[14] ^= kw4l; subR[14] ^= kw4r; /* round 9 */ @@ -469,7 +457,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) subL[10] ^= kw4l; subR[10] ^= kw4r; kw4l ^= kw4r & ~subR[8]; dw = kw4l & subL[8], - kw4r ^= ROL1(dw); /* modified for FL(kl1) */ + kw4r ^= rol32(dw, 1); /* modified for FL(kl1) */ /* round 5 */ subL[6] ^= kw4l; subR[6] ^= kw4r; /* round 3 */ @@ -494,7 +482,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) SUBKEY_R(6) = subR[5] ^ subR[7]; tl = subL[10] ^ (subR[10] & ~subR[8]); dw = tl & subL[8], /* FL(kl1) */ - tr = subR[10] ^ ROL1(dw); + tr = subR[10] ^ rol32(dw, 1); SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */ SUBKEY_R(7) = subR[6] ^ tr; SUBKEY_L(8) = subL[8]; /* FL(kl1) */ @@ -503,7 +491,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) SUBKEY_R(9) = subR[9]; tl = subL[7] ^ (subR[7] & ~subR[9]); dw = tl & subL[9], /* FLinv(kl2) */ - tr = subR[7] ^ ROL1(dw); + tr = subR[7] ^ rol32(dw, 1); SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */ SUBKEY_R(10) = tr ^ subR[11]; SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */ @@ -516,7 +504,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) SUBKEY_R(14) = subR[13] ^ subR[15]; tl = subL[18] ^ (subR[18] & ~subR[16]); dw = tl & subL[16], /* FL(kl3) */ - tr = subR[18] ^ ROL1(dw); + tr = subR[18] ^ rol32(dw, 1); SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */ SUBKEY_R(15) = subR[14] ^ tr; SUBKEY_L(16) = subL[16]; /* FL(kl3) */ @@ -525,7 +513,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) SUBKEY_R(17) = subR[17]; tl = subL[15] ^ (subR[15] & ~subR[17]); dw = tl & subL[17], /* FLinv(kl4) */ - tr = subR[15] ^ ROL1(dw); + tr = subR[15] ^ rol32(dw, 1); SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */ SUBKEY_R(18) = tr ^ subR[19]; SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */ @@ -544,7 +532,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) } else { tl = subL[26] ^ (subR[26] & ~subR[24]); dw = tl & subL[24], /* FL(kl5) */ - tr = subR[26] ^ ROL1(dw); + tr = subR[26] ^ rol32(dw, 1); SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */ SUBKEY_R(23) = subR[22] ^ tr; SUBKEY_L(24) = subL[24]; /* FL(kl5) */ @@ -553,7 +541,7 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) SUBKEY_R(25) = subR[25]; tl = subL[23] ^ (subR[23] & ~subR[25]); dw = tl & subL[25], /* FLinv(kl6) */ - tr = subR[23] ^ ROL1(dw); + tr = subR[23] ^ rol32(dw, 1); SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */ SUBKEY_R(26) = tr ^ subR[27]; SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */ @@ -573,17 +561,17 @@ static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max) /* apply the inverse of the last half of P-function */ i = 2; do { - dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = ROL8(dw);/* round 1 */ + dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = rol32(dw, 8);/* round 1 */ SUBKEY_R(i + 0) = SUBKEY_L(i + 0) ^ dw; SUBKEY_L(i + 0) = dw; - dw = SUBKEY_L(i + 1) ^ SUBKEY_R(i + 1); dw = ROL8(dw);/* round 2 */ + dw = SUBKEY_L(i + 1) ^ SUBKEY_R(i + 1); dw = rol32(dw, 8);/* round 2 */ SUBKEY_R(i + 1) = SUBKEY_L(i + 1) ^ dw; SUBKEY_L(i + 1) = dw; - dw = SUBKEY_L(i + 2) ^ SUBKEY_R(i + 2); dw = ROL8(dw);/* round 3 */ + dw = SUBKEY_L(i + 2) ^ SUBKEY_R(i + 2); dw = rol32(dw, 8);/* round 3 */ SUBKEY_R(i + 2) = SUBKEY_L(i + 2) ^ dw; SUBKEY_L(i + 2) = dw; - dw = SUBKEY_L(i + 3) ^ SUBKEY_R(i + 3); dw = ROL8(dw);/* round 4 */ + dw = SUBKEY_L(i + 3) ^ SUBKEY_R(i + 3); dw = rol32(dw, 8);/* round 4 */ SUBKEY_R(i + 3) = SUBKEY_L(i + 3) ^ dw; SUBKEY_L(i + 3) = dw; - dw = SUBKEY_L(i + 4) ^ SUBKEY_R(i + 4); dw = ROL8(dw);/* round 5 */ + dw = SUBKEY_L(i + 4) ^ SUBKEY_R(i + 4); dw = rol32(dw, 8);/* round 5 */ SUBKEY_R(i + 4) = SUBKEY_L(i + 4) ^ dw; SUBKEY_L(i + 4) = dw; - dw = SUBKEY_L(i + 5) ^ SUBKEY_R(i + 5); dw = ROL8(dw);/* round 6 */ + dw = SUBKEY_L(i + 5) ^ SUBKEY_R(i + 5); dw = rol32(dw, 8);/* round 6 */ SUBKEY_R(i + 5) = SUBKEY_L(i + 5) ^ dw; SUBKEY_L(i + 5) = dw; i += 8; } while (i < max); @@ -599,10 +587,10 @@ static void camellia_setup128(const unsigned char *key, u32 *subkey) /** * k == kll || klr || krl || krr (|| is concatenation) */ - GETU32(kll, key ); - GETU32(klr, key + 4); - GETU32(krl, key + 8); - GETU32(krr, key + 12); + kll = get_unaligned_be32(key); + klr = get_unaligned_be32(key + 4); + krl = get_unaligned_be32(key + 8); + krr = get_unaligned_be32(key + 12); /* generate KL dependent subkeys */ /* kw1 */ @@ -707,14 +695,14 @@ static void camellia_setup256(const unsigned char *key, u32 *subkey) * key = (kll || klr || krl || krr || krll || krlr || krrl || krrr) * (|| is concatenation) */ - GETU32(kll, key ); - GETU32(klr, key + 4); - GETU32(krl, key + 8); - GETU32(krr, key + 12); - GETU32(krll, key + 16); - GETU32(krlr, key + 20); - GETU32(krrl, key + 24); - GETU32(krrr, key + 28); + kll = get_unaligned_be32(key); + klr = get_unaligned_be32(key + 4); + krl = get_unaligned_be32(key + 8); + krr = get_unaligned_be32(key + 12); + krll = get_unaligned_be32(key + 16); + krlr = get_unaligned_be32(key + 20); + krrl = get_unaligned_be32(key + 24); + krrr = get_unaligned_be32(key + 28); /* generate KL dependent subkeys */ /* kw1 */ @@ -870,13 +858,13 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey) t0 &= ll; \ t2 |= rr; \ rl ^= t2; \ - lr ^= ROL1(t0); \ + lr ^= rol32(t0, 1); \ t3 = krl; \ t1 = klr; \ t3 &= rl; \ t1 |= lr; \ ll ^= t1; \ - rr ^= ROL1(t3); \ + rr ^= rol32(t3, 1); \ } while(0) #define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) \ @@ -892,7 +880,7 @@ static void camellia_setup192(const unsigned char *key, u32 *subkey) il ^= kl; \ ir ^= il ^ kr; \ yl ^= ir; \ - yr ^= ROR8(il) ^ ir; \ + yr ^= ror32(il, 8) ^ ir; \ } while(0) /* max = 24: 128bit encrypt, max = 32: 256bit encrypt */ -- cgit v1.2.3 From 2566578a6feb9d9e39da41326afe8ed6022db3c5 Mon Sep 17 00:00:00 2001 From: Neil Horman Date: Wed, 5 Nov 2008 12:13:14 +0800 Subject: crypto: ansi_cprng - Allow resetting of DT value This is a patch that was sent to me by Jarod Wilson, marking off my outstanding todo to allow the ansi cprng to set/reset the DT counter value in a cprng instance. Currently crytpo_rng_reset accepts a seed byte array which is interpreted by the ansi_cprng as a {V key} tuple. This patch extends that tuple to now be {V key DT}, with DT an optional value during reset. This patch also fixes a bug we noticed in which the offset of the key area of the seed is started at DEFAULT_PRNG_KSZ rather than DEFAULT_BLK_SZ as it should be. Signed-off-by: Neil Horman Signed-off-by: Jarod Wilson Signed-off-by: Herbert Xu --- crypto/ansi_cprng.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) (limited to 'crypto') diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c index 72db0fd763c..486aa93646f 100644 --- a/crypto/ansi_cprng.c +++ b/crypto/ansi_cprng.c @@ -349,15 +349,25 @@ static int cprng_get_random(struct crypto_rng *tfm, u8 *rdata, return get_prng_bytes(rdata, dlen, prng); } +/* + * This is the cprng_registered reset method the seed value is + * interpreted as the tuple { V KEY DT} + * V and KEY are required during reset, and DT is optional, detected + * as being present by testing the length of the seed + */ static int cprng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) { struct prng_context *prng = crypto_rng_ctx(tfm); - u8 *key = seed + DEFAULT_PRNG_KSZ; + u8 *key = seed + DEFAULT_BLK_SZ; + u8 *dt = NULL; if (slen < DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ) return -EINVAL; - reset_prng_context(prng, key, DEFAULT_PRNG_KSZ, seed, NULL); + if (slen >= (2 * DEFAULT_BLK_SZ + DEFAULT_PRNG_KSZ)) + dt = key + DEFAULT_PRNG_KSZ; + + reset_prng_context(prng, key, DEFAULT_PRNG_KSZ, seed, dt); if (prng->flags & PRNG_NEED_RESET) return -EINVAL; @@ -379,7 +389,7 @@ static struct crypto_alg rng_alg = { .rng = { .rng_make_random = cprng_get_random, .rng_reset = cprng_reset, - .seedsize = DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ, + .seedsize = DEFAULT_PRNG_KSZ + 2*DEFAULT_BLK_SZ, } } }; -- cgit v1.2.3 From 4a7794860ba2b56693b1d89fd485fd08cdc763e3 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 13 Sep 2008 18:19:03 -0700 Subject: crypto: api - Move type exit function into crypto_tfm The type exit function needs to undo any allocations done by the type init function. However, the type init function may differ depending on the upper-level type of the transform (e.g., a crypto_blkcipher instantiated as a crypto_ablkcipher). So we need to move the exit function out of the lower-level structure and into crypto_tfm itself. As it stands this is a no-op since nobody uses exit functions at all. However, all cases where a lower-level type is instantiated as a different upper-level type (such as blkcipher as ablkcipher) will be converted such that they allocate the underlying transform and use that instead of casting (e.g., crypto_ablkcipher casted into crypto_blkcipher). That will need to use a different exit function depending on the upper-level type. This patch also allows the type init/exit functions to call (or not) cra_init/cra_exit instead of always calling them from the top level. Signed-off-by: Herbert Xu --- crypto/api.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'crypto') diff --git a/crypto/api.c b/crypto/api.c index 0444d242e98..cbaaf346ad1 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -300,8 +300,8 @@ static void crypto_exit_ops(struct crypto_tfm *tfm) const struct crypto_type *type = tfm->__crt_alg->cra_type; if (type) { - if (type->exit) - type->exit(tfm); + if (tfm->exit) + tfm->exit(tfm); return; } @@ -379,17 +379,16 @@ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, if (err) goto out_free_tfm; - if (alg->cra_init && (err = alg->cra_init(tfm))) { - if (err == -EAGAIN) - crypto_shoot_alg(alg); + if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm))) goto cra_init_failed; - } goto out; cra_init_failed: crypto_exit_ops(tfm); out_free_tfm: + if (err == -EAGAIN) + crypto_shoot_alg(alg); kfree(tfm); out_err: tfm = ERR_PTR(err); @@ -469,7 +468,7 @@ void crypto_free_tfm(struct crypto_tfm *tfm) alg = tfm->__crt_alg; size = sizeof(*tfm) + alg->cra_ctxsize; - if (alg->cra_exit) + if (!tfm->exit && alg->cra_exit) alg->cra_exit(tfm); crypto_exit_ops(tfm); crypto_mod_put(alg); -- cgit v1.2.3 From 7b0bac64cd5b74d6f1147524c26216de13a501fd Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 21 Sep 2008 06:52:53 +0900 Subject: crypto: api - Rebirth of crypto_alloc_tfm This patch reintroduces a completely revamped crypto_alloc_tfm. The biggest change is that we now take two crypto_type objects when allocating a tfm, a frontend and a backend. In fact this simply formalises what we've been doing behind the API's back. For example, as it stands crypto_alloc_ahash may use an actual ahash algorithm or a crypto_hash algorithm. Putting this in the API allows us to do this much more cleanly. The existing types will be converted across gradually. Signed-off-by: Herbert Xu --- crypto/api.c | 108 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ crypto/internal.h | 2 + 2 files changed, 110 insertions(+) (limited to 'crypto') diff --git a/crypto/api.c b/crypto/api.c index cbaaf346ad1..9975a7bd246 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -403,6 +403,9 @@ EXPORT_SYMBOL_GPL(__crypto_alloc_tfm); * @type: Type of algorithm * @mask: Mask for type comparison * + * This function should not be used by new algorithm types. + * Plesae use crypto_alloc_tfm instead. + * * crypto_alloc_base() will first attempt to locate an already loaded * algorithm. If that fails and the kernel supports dynamically loadable * modules, it will then attempt to load a module of the same name or @@ -449,6 +452,111 @@ err: return ERR_PTR(err); } EXPORT_SYMBOL_GPL(crypto_alloc_base); + +struct crypto_tfm *crypto_create_tfm(struct crypto_alg *alg, + const struct crypto_type *frontend) +{ + char *mem; + struct crypto_tfm *tfm = NULL; + unsigned int tfmsize; + unsigned int total; + int err = -ENOMEM; + + tfmsize = frontend->tfmsize; + total = tfmsize + sizeof(*tfm) + frontend->extsize(alg, frontend); + + mem = kzalloc(total, GFP_KERNEL); + if (mem == NULL) + goto out_err; + + tfm = (struct crypto_tfm *)(mem + tfmsize); + tfm->__crt_alg = alg; + + err = frontend->init_tfm(tfm, frontend); + if (err) + goto out_free_tfm; + + if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm))) + goto cra_init_failed; + + goto out; + +cra_init_failed: + crypto_exit_ops(tfm); +out_free_tfm: + if (err == -EAGAIN) + crypto_shoot_alg(alg); + kfree(mem); +out_err: + tfm = ERR_PTR(err); +out: + return tfm; +} +EXPORT_SYMBOL_GPL(crypto_create_tfm); + +/* + * crypto_alloc_tfm - Locate algorithm and allocate transform + * @alg_name: Name of algorithm + * @frontend: Frontend algorithm type + * @type: Type of algorithm + * @mask: Mask for type comparison + * + * crypto_alloc_tfm() will first attempt to locate an already loaded + * algorithm. If that fails and the kernel supports dynamically loadable + * modules, it will then attempt to load a module of the same name or + * alias. If that fails it will send a query to any loaded crypto manager + * to construct an algorithm on the fly. A refcount is grabbed on the + * algorithm which is then associated with the new transform. + * + * The returned transform is of a non-determinate type. Most people + * should use one of the more specific allocation functions such as + * crypto_alloc_blkcipher. + * + * In case of error the return value is an error pointer. + */ +struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, + const struct crypto_type *frontend, + u32 type, u32 mask) +{ + struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask); + struct crypto_tfm *tfm; + int err; + + type &= frontend->maskclear; + mask &= frontend->maskclear; + type |= frontend->type; + mask |= frontend->maskset; + + lookup = frontend->lookup ?: crypto_alg_mod_lookup; + + for (;;) { + struct crypto_alg *alg; + + alg = lookup(alg_name, type, mask); + if (IS_ERR(alg)) { + err = PTR_ERR(alg); + goto err; + } + + tfm = crypto_create_tfm(alg, frontend); + if (!IS_ERR(tfm)) + return tfm; + + crypto_mod_put(alg); + err = PTR_ERR(tfm); + +err: + if (err != -EAGAIN) + break; + if (signal_pending(current)) { + err = -EINTR; + break; + } + } + + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(crypto_alloc_tfm); /* * crypto_free_tfm - Free crypto transform diff --git a/crypto/internal.h b/crypto/internal.h index 8ef72d76092..3c19a27a756 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -109,6 +109,8 @@ void crypto_alg_tested(const char *name, int err); void crypto_shoot_alg(struct crypto_alg *alg); struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, u32 mask); +struct crypto_tfm *crypto_create_tfm(struct crypto_alg *alg, + const struct crypto_type *frontend); int crypto_register_instance(struct crypto_template *tmpl, struct crypto_instance *inst); -- cgit v1.2.3 From 7b5a080b3c46f0cac71c0d0262634c6517d4ee4f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 31 Aug 2008 15:47:27 +1000 Subject: crypto: hash - Add shash interface The shash interface replaces the current synchronous hash interface. It improves over hash in two ways. Firstly shash is reentrant, meaning that the same tfm may be used by two threads simultaneously as all hashing state is stored in a local descriptor. The other enhancement is that shash no longer takes scatter list entries. This is because shash is specifically designed for synchronous algorithms and as such scatter lists are unnecessary. All existing hash users will be converted to shash once the algorithms have been completely converted. There is also a new finup function that combines update with final. This will be extended to ahash once the algorithm conversion is done. This is also the first time that an algorithm type has their own registration function. Existing algorithm types will be converted to this way in due course. Signed-off-by: Herbert Xu --- crypto/Makefile | 1 + crypto/shash.c | 239 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 240 insertions(+) create mode 100644 crypto/shash.c (limited to 'crypto') diff --git a/crypto/Makefile b/crypto/Makefile index cd4a4ed078f..46b08bf2035 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o crypto_hash-objs := hash.o crypto_hash-objs += ahash.o +crypto_hash-objs += shash.o obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o cryptomgr-objs := algboss.o testmgr.o diff --git a/crypto/shash.c b/crypto/shash.c new file mode 100644 index 00000000000..82ec4bd8d2f --- /dev/null +++ b/crypto/shash.c @@ -0,0 +1,239 @@ +/* + * Synchronous Cryptographic Hash operations. + * + * Copyright (c) 2008 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include +#include +#include +#include +#include +#include + +static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_shash, base); +} + +static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen) +{ + struct shash_alg *shash = crypto_shash_alg(tfm); + unsigned long alignmask = crypto_shash_alignmask(tfm); + unsigned long absize; + u8 *buffer, *alignbuffer; + int err; + + absize = keylen + (alignmask & ~(CRYPTO_MINALIGN - 1)); + buffer = kmalloc(absize, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); + memcpy(alignbuffer, key, keylen); + err = shash->setkey(tfm, alignbuffer, keylen); + memset(alignbuffer, 0, keylen); + kfree(buffer); + return err; +} + +int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen) +{ + struct shash_alg *shash = crypto_shash_alg(tfm); + unsigned long alignmask = crypto_shash_alignmask(tfm); + + if ((unsigned long)key & alignmask) + return shash_setkey_unaligned(tfm, key, keylen); + + return shash->setkey(tfm, key, keylen); +} +EXPORT_SYMBOL_GPL(crypto_shash_setkey); + +static inline unsigned int shash_align_buffer_size(unsigned len, + unsigned long mask) +{ + return len + (mask & ~(__alignof__(u8 __attribute__ ((aligned))) - 1)); +} + +static int shash_update_unaligned(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + struct crypto_shash *tfm = desc->tfm; + struct shash_alg *shash = crypto_shash_alg(tfm); + unsigned long alignmask = crypto_shash_alignmask(tfm); + unsigned int unaligned_len = alignmask + 1 - + ((unsigned long)data & alignmask); + u8 buf[shash_align_buffer_size(unaligned_len, alignmask)] + __attribute__ ((aligned)); + + memcpy(buf, data, unaligned_len); + + return shash->update(desc, buf, unaligned_len) ?: + shash->update(desc, data + unaligned_len, len - unaligned_len); +} + +int crypto_shash_update(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + struct crypto_shash *tfm = desc->tfm; + struct shash_alg *shash = crypto_shash_alg(tfm); + unsigned long alignmask = crypto_shash_alignmask(tfm); + + if ((unsigned long)data & alignmask) + return shash_update_unaligned(desc, data, len); + + return shash->update(desc, data, len); +} +EXPORT_SYMBOL_GPL(crypto_shash_update); + +static int shash_final_unaligned(struct shash_desc *desc, u8 *out) +{ + struct crypto_shash *tfm = desc->tfm; + unsigned long alignmask = crypto_shash_alignmask(tfm); + struct shash_alg *shash = crypto_shash_alg(tfm); + unsigned int ds = crypto_shash_digestsize(tfm); + u8 buf[shash_align_buffer_size(ds, alignmask)] + __attribute__ ((aligned)); + int err; + + err = shash->final(desc, buf); + memcpy(out, buf, ds); + return err; +} + +int crypto_shash_final(struct shash_desc *desc, u8 *out) +{ + struct crypto_shash *tfm = desc->tfm; + struct shash_alg *shash = crypto_shash_alg(tfm); + unsigned long alignmask = crypto_shash_alignmask(tfm); + + if ((unsigned long)out & alignmask) + return shash_final_unaligned(desc, out); + + return shash->final(desc, out); +} +EXPORT_SYMBOL_GPL(crypto_shash_final); + +static int shash_finup_unaligned(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + return crypto_shash_update(desc, data, len) ?: + crypto_shash_final(desc, out); +} + +int crypto_shash_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + struct crypto_shash *tfm = desc->tfm; + struct shash_alg *shash = crypto_shash_alg(tfm); + unsigned long alignmask = crypto_shash_alignmask(tfm); + + if (((unsigned long)data | (unsigned long)out) & alignmask || + !shash->finup) + return shash_finup_unaligned(desc, data, len, out); + + return shash->finup(desc, data, len, out); +} +EXPORT_SYMBOL_GPL(crypto_shash_finup); + +static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + return crypto_shash_init(desc) ?: + crypto_shash_update(desc, data, len) ?: + crypto_shash_final(desc, out); +} + +int crypto_shash_digest(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + struct crypto_shash *tfm = desc->tfm; + struct shash_alg *shash = crypto_shash_alg(tfm); + unsigned long alignmask = crypto_shash_alignmask(tfm); + + if (((unsigned long)data | (unsigned long)out) & alignmask || + !shash->digest) + return shash_digest_unaligned(desc, data, len, out); + + return shash->digest(desc, data, len, out); +} +EXPORT_SYMBOL_GPL(crypto_shash_digest); + +static int crypto_shash_init_tfm(struct crypto_tfm *tfm, + const struct crypto_type *frontend) +{ + if (frontend->type != CRYPTO_ALG_TYPE_SHASH) + return -EINVAL; + return 0; +} + +static unsigned int crypto_shash_extsize(struct crypto_alg *alg, + const struct crypto_type *frontend) +{ + return alg->cra_ctxsize; +} + +static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg) + __attribute__ ((unused)); +static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg) +{ + struct shash_alg *salg = __crypto_shash_alg(alg); + + seq_printf(m, "type : shash\n"); + seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); + seq_printf(m, "digestsize : %u\n", salg->digestsize); + seq_printf(m, "descsize : %u\n", salg->descsize); +} + +static const struct crypto_type crypto_shash_type = { + .extsize = crypto_shash_extsize, + .init_tfm = crypto_shash_init_tfm, +#ifdef CONFIG_PROC_FS + .show = crypto_shash_show, +#endif + .maskclear = ~CRYPTO_ALG_TYPE_MASK, + .maskset = CRYPTO_ALG_TYPE_MASK, + .type = CRYPTO_ALG_TYPE_SHASH, + .tfmsize = offsetof(struct crypto_shash, base), +}; + +struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type, + u32 mask) +{ + return __crypto_shash_cast( + crypto_alloc_tfm(alg_name, &crypto_shash_type, type, mask)); +} +EXPORT_SYMBOL_GPL(crypto_alloc_shash); + +int crypto_register_shash(struct shash_alg *alg) +{ + struct crypto_alg *base = &alg->base; + + if (alg->digestsize > PAGE_SIZE / 8 || + alg->descsize > PAGE_SIZE / 8) + return -EINVAL; + + base->cra_type = &crypto_shash_type; + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; + base->cra_flags |= CRYPTO_ALG_TYPE_SHASH; + + return crypto_register_alg(base); +} +EXPORT_SYMBOL_GPL(crypto_register_shash); + +int crypto_unregister_shash(struct shash_alg *alg) +{ + return crypto_unregister_alg(&alg->base); +} +EXPORT_SYMBOL_GPL(crypto_unregister_shash); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Synchronous cryptographic hash type"); -- cgit v1.2.3 From 3b2f6df08258e2875f42bd630eece7e7241a053b Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 31 Aug 2008 18:52:18 +1000 Subject: crypto: hash - Export shash through ahash This patch allows shash algorithms to be used through the ahash interface. This is required before we can convert digest algorithms over to shash. Signed-off-by: Herbert Xu --- crypto/shash.c | 143 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 143 insertions(+) (limited to 'crypto') diff --git a/crypto/shash.c b/crypto/shash.c index 82ec4bd8d2f..3f4c713a21e 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -10,6 +10,7 @@ * */ +#include #include #include #include @@ -17,11 +18,15 @@ #include #include +static const struct crypto_type crypto_shash_type; + static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm) { return container_of(tfm, struct crypto_shash, base); } +#include "internal.h" + static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { @@ -167,6 +172,142 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, } EXPORT_SYMBOL_GPL(crypto_shash_digest); +static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + struct crypto_shash **ctx = crypto_ahash_ctx(tfm); + + return crypto_shash_setkey(*ctx, key, keylen); +} + +static int shash_async_init(struct ahash_request *req) +{ + struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct shash_desc *desc = ahash_request_ctx(req); + + desc->tfm = *ctx; + desc->flags = req->base.flags; + + return crypto_shash_init(desc); +} + +static int shash_async_update(struct ahash_request *req) +{ + struct shash_desc *desc = ahash_request_ctx(req); + struct crypto_hash_walk walk; + int nbytes; + + for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0; + nbytes = crypto_hash_walk_done(&walk, nbytes)) + nbytes = crypto_shash_update(desc, walk.data, nbytes); + + return nbytes; +} + +static int shash_async_final(struct ahash_request *req) +{ + return crypto_shash_final(ahash_request_ctx(req), req->result); +} + +static int shash_async_digest(struct ahash_request *req) +{ + struct scatterlist *sg = req->src; + unsigned int offset = sg->offset; + unsigned int nbytes = req->nbytes; + int err; + + if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { + struct crypto_shash **ctx = + crypto_ahash_ctx(crypto_ahash_reqtfm(req)); + struct shash_desc *desc = ahash_request_ctx(req); + void *data; + + desc->tfm = *ctx; + desc->flags = req->base.flags; + + data = crypto_kmap(sg_page(sg), 0); + err = crypto_shash_digest(desc, data + offset, nbytes, + req->result); + crypto_kunmap(data, 0); + crypto_yield(desc->flags); + goto out; + } + + err = shash_async_init(req); + if (err) + goto out; + + err = shash_async_update(req); + if (err) + goto out; + + err = shash_async_final(req); + +out: + return err; +} + +static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) +{ + struct crypto_shash **ctx = crypto_tfm_ctx(tfm); + + crypto_free_shash(*ctx); +} + +static int crypto_init_shash_ops_async(struct crypto_tfm *tfm) +{ + struct crypto_alg *calg = tfm->__crt_alg; + struct shash_alg *alg = __crypto_shash_alg(calg); + struct ahash_tfm *crt = &tfm->crt_ahash; + struct crypto_shash **ctx = crypto_tfm_ctx(tfm); + struct crypto_shash *shash; + + if (!crypto_mod_get(calg)) + return -EAGAIN; + + shash = __crypto_shash_cast(crypto_create_tfm( + calg, &crypto_shash_type)); + if (IS_ERR(shash)) { + crypto_mod_put(calg); + return PTR_ERR(shash); + } + + *ctx = shash; + tfm->exit = crypto_exit_shash_ops_async; + + crt->init = shash_async_init; + crt->update = shash_async_update; + crt->final = shash_async_final; + crt->digest = shash_async_digest; + crt->setkey = shash_async_setkey; + + crt->digestsize = alg->digestsize; + crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); + + return 0; +} + +static int crypto_init_shash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) +{ + switch (mask & CRYPTO_ALG_TYPE_MASK) { + case CRYPTO_ALG_TYPE_AHASH_MASK: + return crypto_init_shash_ops_async(tfm); + } + + return -EINVAL; +} + +static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type, + u32 mask) +{ + switch (mask & CRYPTO_ALG_TYPE_MASK) { + case CRYPTO_ALG_TYPE_AHASH_MASK: + return sizeof(struct crypto_shash *); + } + + return 0; +} + static int crypto_shash_init_tfm(struct crypto_tfm *tfm, const struct crypto_type *frontend) { @@ -194,7 +335,9 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg) } static const struct crypto_type crypto_shash_type = { + .ctxsize = crypto_shash_ctxsize, .extsize = crypto_shash_extsize, + .init = crypto_init_shash_ops, .init_tfm = crypto_shash_init_tfm, #ifdef CONFIG_PROC_FS .show = crypto_shash_show, -- cgit v1.2.3 From dec8b78606ebd5f309c38f2fb10196ce996dd18d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 2 Nov 2008 21:38:11 +0800 Subject: crypto: hash - Add import/export interface It is often useful to save the partial state of a hash function so that it can be used as a base for two or more computations. The most prominent example is HMAC where all hashes start from a base determined by the key. Having an import/export interface means that we only have to compute that base once rather than for each message. Signed-off-by: Herbert Xu --- crypto/ahash.c | 14 ++++++++++++++ crypto/shash.c | 14 ++++++++++++++ 2 files changed, 28 insertions(+) (limited to 'crypto') diff --git a/crypto/ahash.c b/crypto/ahash.c index 27128f2c687..7d4e33dfe21 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -146,6 +146,20 @@ static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key, return ahash->setkey(tfm, key, keylen); } +int crypto_ahash_import(struct ahash_request *req, const u8 *in) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ahash_alg *alg = crypto_ahash_alg(tfm); + + memcpy(ahash_request_ctx(req), in, crypto_ahash_reqsize(tfm)); + + if (alg->reinit) + alg->reinit(req); + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_ahash_import); + static unsigned int crypto_ahash_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) { diff --git a/crypto/shash.c b/crypto/shash.c index 3f4c713a21e..26aff3feefc 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -172,6 +172,20 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, } EXPORT_SYMBOL_GPL(crypto_shash_digest); +int crypto_shash_import(struct shash_desc *desc, const u8 *in) +{ + struct crypto_shash *tfm = desc->tfm; + struct shash_alg *alg = crypto_shash_alg(tfm); + + memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(tfm)); + + if (alg->reinit) + alg->reinit(desc); + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_shash_import); + static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { -- cgit v1.2.3 From 67cd080c5070b4f17520c1385f7684206f4987b3 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 6 Nov 2008 14:39:16 +0800 Subject: crypto: api - Call type show function before legacy for proc This patch makes /proc/crypto call the type-specific show function if one is present before calling the legacy show functions for cipher/digest/compress. This allows us to reuse the type values for those legacy types. In particular, hash and digest will share one type value while shash is phased in as the default hash type. Signed-off-by: Herbert Xu --- crypto/proc.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) (limited to 'crypto') diff --git a/crypto/proc.c b/crypto/proc.c index 37a13d05636..5dc07e442fc 100644 --- a/crypto/proc.c +++ b/crypto/proc.c @@ -94,6 +94,17 @@ static int c_show(struct seq_file *m, void *p) seq_printf(m, "selftest : %s\n", (alg->cra_flags & CRYPTO_ALG_TESTED) ? "passed" : "unknown"); + + if (alg->cra_flags & CRYPTO_ALG_LARVAL) { + seq_printf(m, "type : larval\n"); + seq_printf(m, "flags : 0x%x\n", alg->cra_flags); + goto out; + } + + if (alg->cra_type && alg->cra_type->show) { + alg->cra_type->show(m, alg); + goto out; + } switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) { case CRYPTO_ALG_TYPE_CIPHER: @@ -115,16 +126,11 @@ static int c_show(struct seq_file *m, void *p) seq_printf(m, "type : compression\n"); break; default: - if (alg->cra_flags & CRYPTO_ALG_LARVAL) { - seq_printf(m, "type : larval\n"); - seq_printf(m, "flags : 0x%x\n", alg->cra_flags); - } else if (alg->cra_type && alg->cra_type->show) - alg->cra_type->show(m, alg); - else - seq_printf(m, "type : unknown\n"); + seq_printf(m, "type : unknown\n"); break; } +out: seq_putc(m, '\n'); return 0; } -- cgit v1.2.3 From 5f7082ed4f482f05db01d84dbf58190492ebf0ad Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 31 Aug 2008 22:21:09 +1000 Subject: crypto: hash - Export shash through hash This patch allows shash algorithms to be used through the old hash interface. This is a transitional measure so we can convert the underlying algorithms to shash before converting the users across. Signed-off-by: Herbert Xu --- crypto/ahash.c | 16 ++++++++ crypto/authenc.c | 3 ++ crypto/hmac.c | 10 +++-- crypto/shash.c | 109 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 134 insertions(+), 4 deletions(-) (limited to 'crypto') diff --git a/crypto/ahash.c b/crypto/ahash.c index 7d4e33dfe21..9f98956b17f 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -112,6 +112,22 @@ int crypto_hash_walk_first(struct ahash_request *req, } EXPORT_SYMBOL_GPL(crypto_hash_walk_first); +int crypto_hash_walk_first_compat(struct hash_desc *hdesc, + struct crypto_hash_walk *walk, + struct scatterlist *sg, unsigned int len) +{ + walk->total = len; + + if (!walk->total) + return 0; + + walk->alignmask = crypto_hash_alignmask(hdesc->tfm); + walk->sg = sg; + walk->flags = hdesc->flags; + + return hash_walk_new_entry(walk); +} + static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { diff --git a/crypto/authenc.c b/crypto/authenc.c index fd9f06c63d7..40b6e9ec9e3 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -11,6 +11,7 @@ */ #include +#include #include #include #include @@ -431,6 +432,8 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize; inst->alg.cra_aead.maxauthsize = auth->cra_type == &crypto_hash_type ? auth->cra_hash.digestsize : + auth->cra_type ? + __crypto_shash_alg(auth)->digestsize : auth->cra_digest.dia_digestsize; inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx); diff --git a/crypto/hmac.c b/crypto/hmac.c index 7ff2d6a8c7d..0ad39c37496 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -16,7 +16,7 @@ * */ -#include +#include #include #include #include @@ -238,9 +238,11 @@ static struct crypto_instance *hmac_alloc(struct rtattr **tb) return ERR_CAST(alg); inst = ERR_PTR(-EINVAL); - ds = (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == - CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize : - alg->cra_digest.dia_digestsize; + ds = alg->cra_type == &crypto_hash_type ? + alg->cra_hash.digestsize : + alg->cra_type ? + __crypto_shash_alg(alg)->digestsize : + alg->cra_digest.dia_digestsize; if (ds > alg->cra_blocksize) goto out_put_alg; diff --git a/crypto/shash.c b/crypto/shash.c index 26aff3feefc..50d69a4e4b6 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -301,9 +301,114 @@ static int crypto_init_shash_ops_async(struct crypto_tfm *tfm) return 0; } +static int shash_compat_setkey(struct crypto_hash *tfm, const u8 *key, + unsigned int keylen) +{ + struct shash_desc *desc = crypto_hash_ctx(tfm); + + return crypto_shash_setkey(desc->tfm, key, keylen); +} + +static int shash_compat_init(struct hash_desc *hdesc) +{ + struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm); + + desc->flags = hdesc->flags; + + return crypto_shash_init(desc); +} + +static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg, + unsigned int len) +{ + struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm); + struct crypto_hash_walk walk; + int nbytes; + + for (nbytes = crypto_hash_walk_first_compat(hdesc, &walk, sg, len); + nbytes > 0; nbytes = crypto_hash_walk_done(&walk, nbytes)) + nbytes = crypto_shash_update(desc, walk.data, nbytes); + + return nbytes; +} + +static int shash_compat_final(struct hash_desc *hdesc, u8 *out) +{ + return crypto_shash_final(crypto_hash_ctx(hdesc->tfm), out); +} + +static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg, + unsigned int nbytes, u8 *out) +{ + unsigned int offset = sg->offset; + int err; + + if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { + struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm); + void *data; + + desc->flags = hdesc->flags; + + data = crypto_kmap(sg_page(sg), 0); + err = crypto_shash_digest(desc, data + offset, nbytes, out); + crypto_kunmap(data, 0); + crypto_yield(desc->flags); + goto out; + } + + err = shash_compat_init(hdesc); + if (err) + goto out; + + err = shash_compat_update(hdesc, sg, nbytes); + if (err) + goto out; + + err = shash_compat_final(hdesc, out); + +out: + return err; +} + +static void crypto_exit_shash_ops_compat(struct crypto_tfm *tfm) +{ + struct shash_desc *desc= crypto_tfm_ctx(tfm); + + crypto_free_shash(desc->tfm); +} + +static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm) +{ + struct hash_tfm *crt = &tfm->crt_hash; + struct crypto_alg *calg = tfm->__crt_alg; + struct shash_alg *alg = __crypto_shash_alg(calg); + struct shash_desc *desc = crypto_tfm_ctx(tfm); + struct crypto_shash *shash; + + shash = __crypto_shash_cast(crypto_create_tfm( + calg, &crypto_shash_type)); + if (IS_ERR(shash)) + return PTR_ERR(shash); + + desc->tfm = shash; + tfm->exit = crypto_exit_shash_ops_compat; + + crt->init = shash_compat_init; + crt->update = shash_compat_update; + crt->final = shash_compat_final; + crt->digest = shash_compat_digest; + crt->setkey = shash_compat_setkey; + + crt->digestsize = alg->digestsize; + + return 0; +} + static int crypto_init_shash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) { switch (mask & CRYPTO_ALG_TYPE_MASK) { + case CRYPTO_ALG_TYPE_HASH_MASK: + return crypto_init_shash_ops_compat(tfm); case CRYPTO_ALG_TYPE_AHASH_MASK: return crypto_init_shash_ops_async(tfm); } @@ -314,7 +419,11 @@ static int crypto_init_shash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) { + struct shash_alg *salg = __crypto_shash_alg(alg); + switch (mask & CRYPTO_ALG_TYPE_MASK) { + case CRYPTO_ALG_TYPE_HASH_MASK: + return sizeof(struct shash_desc) + salg->descsize; case CRYPTO_ALG_TYPE_AHASH_MASK: return sizeof(struct crypto_shash *); } -- cgit v1.2.3 From faccc4bba160784e834b758f23d598e500ac7108 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 9 Sep 2008 17:23:07 +1000 Subject: crypto: crc32c - Switch to shash This patch changes crc32c to the new shash interface. Signed-off-by: Herbert Xu --- crypto/crc32c.c | 185 ++++++++++++++++---------------------------------------- 1 file changed, 53 insertions(+), 132 deletions(-) (limited to 'crypto') diff --git a/crypto/crc32c.c b/crypto/crc32c.c index a882d9e4e63..b21b93f2bb9 100644 --- a/crypto/crc32c.c +++ b/crypto/crc32c.c @@ -25,20 +25,26 @@ #define CHKSUM_DIGEST_SIZE 4 struct chksum_ctx { - u32 crc; u32 key; }; +struct chksum_desc_ctx { + u32 crc; +}; + /* * Steps through buffer one byte at at time, calculates reflected * crc using table. */ -static void chksum_init(struct crypto_tfm *tfm) +static int chksum_init(struct shash_desc *desc) { - struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); + struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm); + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + ctx->crc = mctx->key; - mctx->crc = mctx->key; + return 0; } /* @@ -46,180 +52,95 @@ static void chksum_init(struct crypto_tfm *tfm) * If your algorithm starts with ~0, then XOR with ~0 before you set * the seed. */ -static int chksum_setkey(struct crypto_tfm *tfm, const u8 *key, +static int chksum_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { - struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); + struct chksum_ctx *mctx = crypto_shash_ctx(tfm); - if (keylen != sizeof(mctx->crc)) { - tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + if (keylen != sizeof(mctx->key)) { + crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } mctx->key = le32_to_cpu(*(__le32 *)key); return 0; } -static void chksum_update(struct crypto_tfm *tfm, const u8 *data, - unsigned int length) +static int chksum_update(struct shash_desc *desc, const u8 *data, + unsigned int length) { - struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - mctx->crc = crc32c(mctx->crc, data, length); -} - -static void chksum_final(struct crypto_tfm *tfm, u8 *out) -{ - struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); - - *(__le32 *)out = ~cpu_to_le32(mctx->crc); -} - -static int crc32c_cra_init_old(struct crypto_tfm *tfm) -{ - struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); - - mctx->key = ~0; + ctx->crc = crc32c(ctx->crc, data, length); return 0; } -static struct crypto_alg old_alg = { - .cra_name = "crc32c", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = CHKSUM_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct chksum_ctx), - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(old_alg.cra_list), - .cra_init = crc32c_cra_init_old, - .cra_u = { - .digest = { - .dia_digestsize= CHKSUM_DIGEST_SIZE, - .dia_setkey = chksum_setkey, - .dia_init = chksum_init, - .dia_update = chksum_update, - .dia_final = chksum_final - } - } -}; - -/* - * Setting the seed allows arbitrary accumulators and flexible XOR policy - * If your algorithm starts with ~0, then XOR with ~0 before you set - * the seed. - */ -static int crc32c_setkey(struct crypto_ahash *hash, const u8 *key, - unsigned int keylen) +static int chksum_final(struct shash_desc *desc, u8 *out) { - u32 *mctx = crypto_ahash_ctx(hash); + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - if (keylen != sizeof(u32)) { - crypto_ahash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - *mctx = le32_to_cpup((__le32 *)key); + *(__le32 *)out = ~cpu_to_le32p(&ctx->crc); return 0; } -static int crc32c_init(struct ahash_request *req) +static int __chksum_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out) { - u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - u32 *crcp = ahash_request_ctx(req); - - *crcp = *mctx; + *(__le32 *)out = ~cpu_to_le32(crc32c(*crcp, data, len)); return 0; } -static int crc32c_update(struct ahash_request *req) +static int chksum_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) { - struct crypto_hash_walk walk; - u32 *crcp = ahash_request_ctx(req); - u32 crc = *crcp; - int nbytes; - - for (nbytes = crypto_hash_walk_first(req, &walk); nbytes; - nbytes = crypto_hash_walk_done(&walk, 0)) - crc = crc32c(crc, walk.data, nbytes); + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - *crcp = crc; - return 0; + return __chksum_finup(&ctx->crc, data, len, out); } -static int crc32c_final(struct ahash_request *req) +static int chksum_digest(struct shash_desc *desc, const u8 *data, + unsigned int length, u8 *out) { - u32 *crcp = ahash_request_ctx(req); - - *(__le32 *)req->result = ~cpu_to_le32p(crcp); - return 0; -} + struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm); -static int crc32c_digest(struct ahash_request *req) -{ - struct crypto_hash_walk walk; - u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); - u32 crc = *mctx; - int nbytes; - - for (nbytes = crypto_hash_walk_first(req, &walk); nbytes; - nbytes = crypto_hash_walk_done(&walk, 0)) - crc = crc32c(crc, walk.data, nbytes); - - *(__le32 *)req->result = ~cpu_to_le32(crc); - return 0; + return __chksum_finup(&mctx->key, data, length, out); } static int crc32c_cra_init(struct crypto_tfm *tfm) { - u32 *key = crypto_tfm_ctx(tfm); - - *key = ~0; - - tfm->crt_ahash.reqsize = sizeof(u32); + struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); + mctx->key = ~0; return 0; } -static struct crypto_alg alg = { - .cra_name = "crc32c", - .cra_driver_name = "crc32c-generic", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_TYPE_AHASH, - .cra_blocksize = CHKSUM_BLOCK_SIZE, - .cra_alignmask = 3, - .cra_ctxsize = sizeof(u32), - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(alg.cra_list), - .cra_init = crc32c_cra_init, - .cra_type = &crypto_ahash_type, - .cra_u = { - .ahash = { - .digestsize = CHKSUM_DIGEST_SIZE, - .setkey = crc32c_setkey, - .init = crc32c_init, - .update = crc32c_update, - .final = crc32c_final, - .digest = crc32c_digest, - } +static struct shash_alg alg = { + .digestsize = CHKSUM_DIGEST_SIZE, + .setkey = chksum_setkey, + .init = chksum_init, + .update = chksum_update, + .final = chksum_final, + .finup = chksum_finup, + .digest = chksum_digest, + .descsize = sizeof(struct chksum_desc_ctx), + .base = { + .cra_name = "crc32c", + .cra_driver_name = "crc32c-generic", + .cra_priority = 100, + .cra_blocksize = CHKSUM_BLOCK_SIZE, + .cra_alignmask = 3, + .cra_ctxsize = sizeof(struct chksum_ctx), + .cra_module = THIS_MODULE, + .cra_init = crc32c_cra_init, } }; static int __init crc32c_mod_init(void) { - int err; - - err = crypto_register_alg(&old_alg); - if (err) - return err; - - err = crypto_register_alg(&alg); - if (err) - crypto_unregister_alg(&old_alg); - - return err; + return crypto_register_shash(&alg); } static void __exit crc32c_mod_fini(void) { - crypto_unregister_alg(&alg); - crypto_unregister_alg(&old_alg); + crypto_unregister_shash(&alg); } module_init(crc32c_mod_init); -- cgit v1.2.3 From 8e3ee85e68c5d5c95451afd3e8f0997eec6f99e5 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 7 Nov 2008 14:58:52 +0800 Subject: crypto: crc32c - Test descriptor context format This patch adds a test for the requirement that all crc32c algorithms shall store the partial result in the first four bytes of the descriptor context. Signed-off-by: Herbert Xu --- crypto/testmgr.c | 51 ++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 50 insertions(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 308d9cffdc4..67dce77d49d 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -1010,6 +1010,55 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver, return err; } +static int alg_test_crc32c(const struct alg_test_desc *desc, + const char *driver, u32 type, u32 mask) +{ + struct crypto_shash *tfm; + u32 val; + int err; + + err = alg_test_hash(desc, driver, type, mask); + if (err) + goto out; + + tfm = crypto_alloc_shash(driver, type, mask); + if (IS_ERR(tfm)) { + printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: " + "%ld\n", driver, PTR_ERR(tfm)); + err = PTR_ERR(tfm); + goto out; + } + + do { + struct { + struct shash_desc shash; + char ctx[crypto_shash_descsize(tfm)]; + } sdesc; + + sdesc.shash.tfm = tfm; + sdesc.shash.flags = 0; + + *(u32 *)sdesc.ctx = le32_to_cpu(420553207); + err = crypto_shash_final(&sdesc.shash, (u8 *)&val); + if (err) { + printk(KERN_ERR "alg: crc32c: Operation failed for " + "%s: %d\n", driver, err); + break; + } + + if (val != ~420553207) { + printk(KERN_ERR "alg: crc32c: Test failed for %s: " + "%d\n", driver, val); + err = -EINVAL; + } + } while (0); + + crypto_free_shash(tfm); + +out: + return err; +} + /* Please keep this list sorted by algorithm name. */ static const struct alg_test_desc alg_test_descs[] = { { @@ -1134,7 +1183,7 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { .alg = "crc32c", - .test = alg_test_hash, + .test = alg_test_crc32c, .suite = { .hash = { .vecs = crc32c_tv_template, -- cgit v1.2.3 From 69c35efcf1576ab5f00cba83e8ca740923afb6c9 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 7 Nov 2008 15:11:47 +0800 Subject: libcrc32c: Move implementation to crypto crc32c This patch swaps the role of libcrc32c and crc32c. Previously the implementation was in libcrc32c and crc32c was a wrapper. Now the code is in crc32c and libcrc32c just calls the crypto layer. The reason for the change is to tap into the algorithm selection capability of the crypto API so that optimised implementations such as the one utilising Intel's CRC32C instruction can be used where available. Signed-off-by: Herbert Xu --- crypto/Kconfig | 4 +- crypto/crc32c.c | 113 +++++++++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 112 insertions(+), 5 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index dc20a34ba5e..aede80246df 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -256,12 +256,10 @@ comment "Digest" config CRYPTO_CRC32C tristate "CRC32c CRC algorithm" select CRYPTO_HASH - select LIBCRC32C help Castagnoli, et al Cyclic Redundancy-Check Algorithm. Used by iSCSI for header and data digests and by others. - See Castagnoli93. This implementation uses lib/libcrc32c. - Module will be crc32c. + See Castagnoli93. Module will be crc32c. config CRYPTO_CRC32C_INTEL tristate "CRC32c INTEL hardware acceleration" diff --git a/crypto/crc32c.c b/crypto/crc32c.c index b21b93f2bb9..973bc2cfab2 100644 --- a/crypto/crc32c.c +++ b/crypto/crc32c.c @@ -3,8 +3,29 @@ * * CRC32C chksum * - * This module file is a wrapper to invoke the lib/crc32c routines. + *@Article{castagnoli-crc, + * author = { Guy Castagnoli and Stefan Braeuer and Martin Herrman}, + * title = {{Optimization of Cyclic Redundancy-Check Codes with 24 + * and 32 Parity Bits}}, + * journal = IEEE Transactions on Communication, + * year = {1993}, + * volume = {41}, + * number = {6}, + * pages = {}, + * month = {June}, + *} + * Used by the iSCSI driver, possibly others, and derived from the + * the iscsi-crc.c module of the linux-iscsi driver at + * http://linux-iscsi.sourceforge.net. * + * Following the example of lib/crc32, this function is intended to be + * flexible and useful for all users. Modules that currently have their + * own crc32c, but hopefully may be able to use this one are: + * net/sctp (please add all your doco to here if you change to + * use this one!) + * + * + * Copyright (c) 2004 Cisco Systems, Inc. * Copyright (c) 2008 Herbert Xu * * This program is free software; you can redistribute it and/or modify it @@ -18,7 +39,6 @@ #include #include #include -#include #include #define CHKSUM_BLOCK_SIZE 1 @@ -32,6 +52,95 @@ struct chksum_desc_ctx { u32 crc; }; +/* + * This is the CRC-32C table + * Generated with: + * width = 32 bits + * poly = 0x1EDC6F41 + * reflect input bytes = true + * reflect output bytes = true + */ + +static const u32 crc32c_table[256] = { + 0x00000000L, 0xF26B8303L, 0xE13B70F7L, 0x1350F3F4L, + 0xC79A971FL, 0x35F1141CL, 0x26A1E7E8L, 0xD4CA64EBL, + 0x8AD958CFL, 0x78B2DBCCL, 0x6BE22838L, 0x9989AB3BL, + 0x4D43CFD0L, 0xBF284CD3L, 0xAC78BF27L, 0x5E133C24L, + 0x105EC76FL, 0xE235446CL, 0xF165B798L, 0x030E349BL, + 0xD7C45070L, 0x25AFD373L, 0x36FF2087L, 0xC494A384L, + 0x9A879FA0L, 0x68EC1CA3L, 0x7BBCEF57L, 0x89D76C54L, + 0x5D1D08BFL, 0xAF768BBCL, 0xBC267848L, 0x4E4DFB4BL, + 0x20BD8EDEL, 0xD2D60DDDL, 0xC186FE29L, 0x33ED7D2AL, + 0xE72719C1L, 0x154C9AC2L, 0x061C6936L, 0xF477EA35L, + 0xAA64D611L, 0x580F5512L, 0x4B5FA6E6L, 0xB93425E5L, + 0x6DFE410EL, 0x9F95C20DL, 0x8CC531F9L, 0x7EAEB2FAL, + 0x30E349B1L, 0xC288CAB2L, 0xD1D83946L, 0x23B3BA45L, + 0xF779DEAEL, 0x05125DADL, 0x1642AE59L, 0xE4292D5AL, + 0xBA3A117EL, 0x4851927DL, 0x5B016189L, 0xA96AE28AL, + 0x7DA08661L, 0x8FCB0562L, 0x9C9BF696L, 0x6EF07595L, + 0x417B1DBCL, 0xB3109EBFL, 0xA0406D4BL, 0x522BEE48L, + 0x86E18AA3L, 0x748A09A0L, 0x67DAFA54L, 0x95B17957L, + 0xCBA24573L, 0x39C9C670L, 0x2A993584L, 0xD8F2B687L, + 0x0C38D26CL, 0xFE53516FL, 0xED03A29BL, 0x1F682198L, + 0x5125DAD3L, 0xA34E59D0L, 0xB01EAA24L, 0x42752927L, + 0x96BF4DCCL, 0x64D4CECFL, 0x77843D3BL, 0x85EFBE38L, + 0xDBFC821CL, 0x2997011FL, 0x3AC7F2EBL, 0xC8AC71E8L, + 0x1C661503L, 0xEE0D9600L, 0xFD5D65F4L, 0x0F36E6F7L, + 0x61C69362L, 0x93AD1061L, 0x80FDE395L, 0x72966096L, + 0xA65C047DL, 0x5437877EL, 0x4767748AL, 0xB50CF789L, + 0xEB1FCBADL, 0x197448AEL, 0x0A24BB5AL, 0xF84F3859L, + 0x2C855CB2L, 0xDEEEDFB1L, 0xCDBE2C45L, 0x3FD5AF46L, + 0x7198540DL, 0x83F3D70EL, 0x90A324FAL, 0x62C8A7F9L, + 0xB602C312L, 0x44694011L, 0x5739B3E5L, 0xA55230E6L, + 0xFB410CC2L, 0x092A8FC1L, 0x1A7A7C35L, 0xE811FF36L, + 0x3CDB9BDDL, 0xCEB018DEL, 0xDDE0EB2AL, 0x2F8B6829L, + 0x82F63B78L, 0x709DB87BL, 0x63CD4B8FL, 0x91A6C88CL, + 0x456CAC67L, 0xB7072F64L, 0xA457DC90L, 0x563C5F93L, + 0x082F63B7L, 0xFA44E0B4L, 0xE9141340L, 0x1B7F9043L, + 0xCFB5F4A8L, 0x3DDE77ABL, 0x2E8E845FL, 0xDCE5075CL, + 0x92A8FC17L, 0x60C37F14L, 0x73938CE0L, 0x81F80FE3L, + 0x55326B08L, 0xA759E80BL, 0xB4091BFFL, 0x466298FCL, + 0x1871A4D8L, 0xEA1A27DBL, 0xF94AD42FL, 0x0B21572CL, + 0xDFEB33C7L, 0x2D80B0C4L, 0x3ED04330L, 0xCCBBC033L, + 0xA24BB5A6L, 0x502036A5L, 0x4370C551L, 0xB11B4652L, + 0x65D122B9L, 0x97BAA1BAL, 0x84EA524EL, 0x7681D14DL, + 0x2892ED69L, 0xDAF96E6AL, 0xC9A99D9EL, 0x3BC21E9DL, + 0xEF087A76L, 0x1D63F975L, 0x0E330A81L, 0xFC588982L, + 0xB21572C9L, 0x407EF1CAL, 0x532E023EL, 0xA145813DL, + 0x758FE5D6L, 0x87E466D5L, 0x94B49521L, 0x66DF1622L, + 0x38CC2A06L, 0xCAA7A905L, 0xD9F75AF1L, 0x2B9CD9F2L, + 0xFF56BD19L, 0x0D3D3E1AL, 0x1E6DCDEEL, 0xEC064EEDL, + 0xC38D26C4L, 0x31E6A5C7L, 0x22B65633L, 0xD0DDD530L, + 0x0417B1DBL, 0xF67C32D8L, 0xE52CC12CL, 0x1747422FL, + 0x49547E0BL, 0xBB3FFD08L, 0xA86F0EFCL, 0x5A048DFFL, + 0x8ECEE914L, 0x7CA56A17L, 0x6FF599E3L, 0x9D9E1AE0L, + 0xD3D3E1ABL, 0x21B862A8L, 0x32E8915CL, 0xC083125FL, + 0x144976B4L, 0xE622F5B7L, 0xF5720643L, 0x07198540L, + 0x590AB964L, 0xAB613A67L, 0xB831C993L, 0x4A5A4A90L, + 0x9E902E7BL, 0x6CFBAD78L, 0x7FAB5E8CL, 0x8DC0DD8FL, + 0xE330A81AL, 0x115B2B19L, 0x020BD8EDL, 0xF0605BEEL, + 0x24AA3F05L, 0xD6C1BC06L, 0xC5914FF2L, 0x37FACCF1L, + 0x69E9F0D5L, 0x9B8273D6L, 0x88D28022L, 0x7AB90321L, + 0xAE7367CAL, 0x5C18E4C9L, 0x4F48173DL, 0xBD23943EL, + 0xF36E6F75L, 0x0105EC76L, 0x12551F82L, 0xE03E9C81L, + 0x34F4F86AL, 0xC69F7B69L, 0xD5CF889DL, 0x27A40B9EL, + 0x79B737BAL, 0x8BDCB4B9L, 0x988C474DL, 0x6AE7C44EL, + 0xBE2DA0A5L, 0x4C4623A6L, 0x5F16D052L, 0xAD7D5351L +}; + +/* + * Steps through buffer one byte at at time, calculates reflected + * crc using table. + */ + +static u32 crc32c(u32 crc, const u8 *data, unsigned int length) +{ + while (length--) + crc = crc32c_table[(crc ^ *data++) & 0xFFL] ^ (crc >> 8); + + return crc; +} + /* * Steps through buffer one byte at at time, calculates reflected * crc using table. -- cgit v1.2.3 From 31a61bfc6e415fbd871317cbee7b8a4158d8ac5b Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Thu, 13 Nov 2008 21:19:04 +0800 Subject: crypto: md4 - Use ARRAY_SIZE ARRAY_SIZE is more concise to use when the size of an array is divided by the size of its type or the size of its first element. The semantic patch that makes this change is as follows: (http://www.emn.fr/x-info/coccinelle/) // @i@ @@ #include @depends on i using "paren.iso"@ type T; T[] E; @@ - (sizeof(E)/sizeof(T)) + ARRAY_SIZE(E) // Signed-off-by: Julia Lawall Signed-off-by: Herbert Xu --- crypto/md4.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/md4.c b/crypto/md4.c index 3c19aa0750f..a143c4aaa39 100644 --- a/crypto/md4.c +++ b/crypto/md4.c @@ -148,7 +148,7 @@ static void md4_transform(u32 *hash, u32 const *in) static inline void md4_transform_helper(struct md4_ctx *ctx) { - le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32)); + le32_to_cpu_array(ctx->block, ARRAY_SIZE(ctx->block)); md4_transform(ctx->hash, ctx->block); } @@ -214,7 +214,7 @@ static void md4_final(struct crypto_tfm *tfm, u8 *out) le32_to_cpu_array(mctx->block, (sizeof(mctx->block) - sizeof(u64)) / sizeof(u32)); md4_transform(mctx->hash, mctx->block); - cpu_to_le32_array(mctx->hash, sizeof(mctx->hash) / sizeof(u32)); + cpu_to_le32_array(mctx->hash, ARRAY_SIZE(mctx->hash)); memcpy(out, mctx->hash, sizeof(mctx->hash)); memset(mctx, 0, sizeof(*mctx)); } -- cgit v1.2.3 From aa1a85dbd1d3265ca36f684026fe7689b7836bed Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Thu, 13 Nov 2008 22:03:20 +0800 Subject: crypto: ansi_cprng - Avoid incorrect extra call to _get_more_prng_bytes While working with some FIPS RNGVS test vectors yesterday, I discovered a little bug in the way the ansi_cprng code works right now. For example, the following test vector (complete with expected result) from http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf ... Key = f3b1666d13607242ed061cabb8d46202 DT = e6b3be782a23fa62d71d4afbb0e922fc V = f0000000000000000000000000000000 R = 88dda456302423e5f69da57e7b95c73a ...when run through ansi_cprng, yields an incorrect R value of e2afe0d794120103d6e86a2b503bdfaa. If I load up ansi_cprng w/dbg=1 though, it was fairly obvious what was going wrong: ----8<---- getting 16 random bytes for context ffff810033fb2b10 Calling _get_more_prng_bytes for context ffff810033fb2b10 Input DT: 00000000: e6 b3 be 78 2a 23 fa 62 d7 1d 4a fb b0 e9 22 fc Input I: 00000000: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 Input V: 00000000: f0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 tmp stage 0: 00000000: e6 b3 be 78 2a 23 fa 62 d7 1d 4a fb b0 e9 22 fc tmp stage 1: 00000000: f4 8e cb 25 94 3e 8c 31 d6 14 cd 8a 23 f1 3f 84 tmp stage 2: 00000000: 8c 53 6f 73 a4 1a af d4 20 89 68 f4 58 64 f8 be Returning new block for context ffff810033fb2b10 Output DT: 00000000: e7 b3 be 78 2a 23 fa 62 d7 1d 4a fb b0 e9 22 fc Output I: 00000000: 04 8e cb 25 94 3e 8c 31 d6 14 cd 8a 23 f1 3f 84 Output V: 00000000: 48 89 3b 71 bc e4 00 b6 5e 21 ba 37 8a 0a d5 70 New Random Data: 00000000: 88 dd a4 56 30 24 23 e5 f6 9d a5 7e 7b 95 c7 3a Calling _get_more_prng_bytes for context ffff810033fb2b10 Input DT: 00000000: e7 b3 be 78 2a 23 fa 62 d7 1d 4a fb b0 e9 22 fc Input I: 00000000: 04 8e cb 25 94 3e 8c 31 d6 14 cd 8a 23 f1 3f 84 Input V: 00000000: 48 89 3b 71 bc e4 00 b6 5e 21 ba 37 8a 0a d5 70 tmp stage 0: 00000000: e7 b3 be 78 2a 23 fa 62 d7 1d 4a fb b0 e9 22 fc tmp stage 1: 00000000: 80 6b 3a 8c 23 ae 8f 53 be 71 4c 16 fc 13 b2 ea tmp stage 2: 00000000: 2a 4d e1 2a 0b 58 8e e6 36 b8 9c 0a 26 22 b8 30 Returning new block for context ffff810033fb2b10 Output DT: 00000000: e8 b3 be 78 2a 23 fa 62 d7 1d 4a fb b0 e9 22 fc Output I: 00000000: c8 e2 01 fd 9f 4a 8f e5 e0 50 f6 21 76 19 67 9a Output V: 00000000: ba 98 e3 75 c0 1b 81 8d 03 d6 f8 e2 0c c6 54 4b New Random Data: 00000000: e2 af e0 d7 94 12 01 03 d6 e8 6a 2b 50 3b df aa returning 16 from get_prng_bytes in context ffff810033fb2b10 ----8<---- The expected result is there, in the first "New Random Data", but we're incorrectly making a second call to _get_more_prng_bytes, due to some checks that are slightly off, which resulted in our original bytes never being returned anywhere. One approach to fixing this would be to alter some byte_count checks in get_prng_bytes, but it would mean the last DEFAULT_BLK_SZ bytes would be copied a byte at a time, rather than in a single memcpy, so a slightly more involved, equally functional, and ultimately more efficient way of fixing this was suggested to me by Neil, which I'm submitting here. All of the RNGVS ANSI X9.31 AES128 VST test vectors I've passed through ansi_cprng are now returning the expected results with this change. Signed-off-by: Jarod Wilson Acked-by: Neil Horman Signed-off-by: Herbert Xu --- crypto/ansi_cprng.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'crypto') diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c index 486aa93646f..1b3b1da1fd3 100644 --- a/crypto/ansi_cprng.c +++ b/crypto/ansi_cprng.c @@ -223,9 +223,10 @@ remainder: } /* - * Copy up to the next whole block size + * Copy any data less than an entire block */ if (byte_count < DEFAULT_BLK_SZ) { +empty_rbuf: for (; ctx->rand_data_valid < DEFAULT_BLK_SZ; ctx->rand_data_valid++) { *ptr = ctx->rand_data[ctx->rand_data_valid]; @@ -240,18 +241,22 @@ remainder: * Now copy whole blocks */ for (; byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) { - if (_get_more_prng_bytes(ctx) < 0) { - memset(buf, 0, nbytes); - err = -EINVAL; - goto done; + if (ctx->rand_data_valid == DEFAULT_BLK_SZ) { + if (_get_more_prng_bytes(ctx) < 0) { + memset(buf, 0, nbytes); + err = -EINVAL; + goto done; + } } + if (ctx->rand_data_valid > 0) + goto empty_rbuf; memcpy(ptr, ctx->rand_data, DEFAULT_BLK_SZ); ctx->rand_data_valid += DEFAULT_BLK_SZ; ptr += DEFAULT_BLK_SZ; } /* - * Now copy any extra partial data + * Now go back and get any remaining partial block */ if (byte_count) goto remainder; -- cgit v1.2.3 From 09fbf7c0f24176ef3b450c590f220ed8033dd2c3 Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Mon, 24 Nov 2008 21:20:13 +0800 Subject: crypto: ansi_cprng - fix inverted DT increment routine The ANSI X9.31 PRNG docs aren't particularly clear on how to increment DT, but empirical testing shows we're incrementing from the wrong end. A 10,000 iteration Monte Carlo RNG test currently winds up not getting the expected result. From http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf : # CAVS 4.3 # ANSI931 MCT [X9.31] [AES 128-Key] COUNT = 0 Key = 9f5b51200bf334b5d82be8c37255c848 DT = 6376bbe52902ba3b67c925fa701f11ac V = 572c8e76872647977e74fbddc49501d1 R = 48e9bd0d06ee18fbe45790d5c3fc9b73 Currently, we get 0dd08496c4f7178bfa70a2161a79459a after 10000 loops. Inverting the DT increment routine results in us obtaining the expected result of 48e9bd0d06ee18fbe45790d5c3fc9b73. Verified on both x86_64 and ppc64. Signed-off-by: Jarod Wilson Acked-by: Neil Horman Signed-off-by: Herbert Xu --- crypto/ansi_cprng.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c index 1b3b1da1fd3..0fac8ffc2fb 100644 --- a/crypto/ansi_cprng.c +++ b/crypto/ansi_cprng.c @@ -161,7 +161,7 @@ static int _get_more_prng_bytes(struct prng_context *ctx) /* * Now update our DT value */ - for (i = 0; i < DEFAULT_BLK_SZ; i++) { + for (i = DEFAULT_BLK_SZ - 1; i >= 0; i--) { ctx->DT[i] += 1; if (ctx->DT[i] != 0) break; -- cgit v1.2.3 From 664134d2916109be76648977705a2bea3ff76427 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 25 Nov 2008 23:19:24 +0800 Subject: crypto: testmgr - Fix error flow of test_comp MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This warning: crypto/testmgr.c: In function ‘test_comp’: crypto/testmgr.c:829: warning: ‘ret’ may be used uninitialized in this function triggers because GCC correctly notices that in the ctcount == 0 && dtcount != 0 input condition case this function can return an undefined value, if the second loop fails. Remove the shadowed 'ret' variable from the second loop that was probably unintended. Signed-off-by: Ingo Molnar Signed-off-by: Herbert Xu --- crypto/testmgr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 67dce77d49d..67ff4aaa3c9 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -853,7 +853,7 @@ static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate, } for (i = 0; i < dtcount; i++) { - int ilen, ret, dlen = COMP_BUF_SIZE; + int ilen, dlen = COMP_BUF_SIZE; memset(result, 0, sizeof (result)); -- cgit v1.2.3 From dad3df2044b78ba68a92bf78e38a408bab80ff61 Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Fri, 28 Nov 2008 20:49:19 +0800 Subject: crypto: remove uses of __constant_{endian} helpers Base versions handle constant folding just fine. Signed-off-by: Harvey Harrison Signed-off-by: Herbert Xu --- crypto/fcrypt.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'crypto') diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c index 1302f4cae33..b82d61f4e26 100644 --- a/crypto/fcrypt.c +++ b/crypto/fcrypt.c @@ -73,7 +73,7 @@ do { \ * /afs/transarc.com/public/afsps/afs.rel31b.export-src/rxkad/sboxes.h */ #undef Z -#define Z(x) __constant_cpu_to_be32(x << 3) +#define Z(x) cpu_to_be32(x << 3) static const __be32 sbox0[256] = { Z(0xea), Z(0x7f), Z(0xb2), Z(0x64), Z(0x9d), Z(0xb0), Z(0xd9), Z(0x11), Z(0xcd), Z(0x86), Z(0x86), Z(0x91), Z(0x0a), Z(0xb2), Z(0x93), Z(0x06), @@ -110,7 +110,7 @@ static const __be32 sbox0[256] = { }; #undef Z -#define Z(x) __constant_cpu_to_be32((x << 27) | (x >> 5)) +#define Z(x) cpu_to_be32((x << 27) | (x >> 5)) static const __be32 sbox1[256] = { Z(0x77), Z(0x14), Z(0xa6), Z(0xfe), Z(0xb2), Z(0x5e), Z(0x8c), Z(0x3e), Z(0x67), Z(0x6c), Z(0xa1), Z(0x0d), Z(0xc2), Z(0xa2), Z(0xc1), Z(0x85), @@ -147,7 +147,7 @@ static const __be32 sbox1[256] = { }; #undef Z -#define Z(x) __constant_cpu_to_be32(x << 11) +#define Z(x) cpu_to_be32(x << 11) static const __be32 sbox2[256] = { Z(0xf0), Z(0x37), Z(0x24), Z(0x53), Z(0x2a), Z(0x03), Z(0x83), Z(0x86), Z(0xd1), Z(0xec), Z(0x50), Z(0xf0), Z(0x42), Z(0x78), Z(0x2f), Z(0x6d), @@ -184,7 +184,7 @@ static const __be32 sbox2[256] = { }; #undef Z -#define Z(x) __constant_cpu_to_be32(x << 19) +#define Z(x) cpu_to_be32(x << 19) static const __be32 sbox3[256] = { Z(0xa9), Z(0x2a), Z(0x48), Z(0x51), Z(0x84), Z(0x7e), Z(0x49), Z(0xe2), Z(0xb5), Z(0xb7), Z(0x42), Z(0x33), Z(0x7d), Z(0x5d), Z(0xa6), Z(0x12), -- cgit v1.2.3 From b812eb0076235743872b5c9d18714d2324cc668d Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 28 Nov 2008 20:51:28 +0800 Subject: crypto: testmgr - Validate output length in (de)compression tests When self-testing (de)compression algorithms, make sure the actual size of the (de)compressed output data matches the expected output size. Otherwise, in case the actual output size would be smaller than the expected output size, the subsequent buffer compare test would still succeed, and no error would be reported. Signed-off-by: Geert Uytterhoeven Signed-off-by: Herbert Xu --- crypto/testmgr.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'crypto') diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 67ff4aaa3c9..a75f11ffb95 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -843,6 +843,14 @@ static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate, goto out; } + if (dlen != ctemplate[i].outlen) { + printk(KERN_ERR "alg: comp: Compression test %d " + "failed for %s: output len = %d\n", i + 1, algo, + dlen); + ret = -EINVAL; + goto out; + } + if (memcmp(result, ctemplate[i].output, dlen)) { printk(KERN_ERR "alg: comp: Compression test %d " "failed for %s\n", i + 1, algo); @@ -867,6 +875,14 @@ static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate, goto out; } + if (dlen != dtemplate[i].outlen) { + printk(KERN_ERR "alg: comp: Decompression test %d " + "failed for %s: output len = %d\n", i + 1, algo, + dlen); + ret = -EINVAL; + goto out; + } + if (memcmp(result, dtemplate[i].output, dlen)) { printk(KERN_ERR "alg: comp: Decompression test %d " "failed for %s\n", i + 1, algo); -- cgit v1.2.3 From 3751f402e099893c34089ed303dca6f5f92dbfd1 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 8 Nov 2008 08:56:57 +0800 Subject: crypto: hash - Make setkey optional Since most cryptographic hash algorithms have no keys, this patch makes the setkey function optional for ahash and shash. Signed-off-by: Herbert Xu --- crypto/ahash.c | 8 +++++++- crypto/shash.c | 3 +++ 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/ahash.c b/crypto/ahash.c index 9f98956b17f..ba5292d69eb 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -162,6 +162,12 @@ static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key, return ahash->setkey(tfm, key, keylen); } +static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + return -ENOSYS; +} + int crypto_ahash_import(struct ahash_request *req, const u8 *in) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); @@ -194,7 +200,7 @@ static int crypto_init_ahash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) crt->update = alg->update; crt->final = alg->final; crt->digest = alg->digest; - crt->setkey = ahash_setkey; + crt->setkey = alg->setkey ? ahash_setkey : ahash_nosetkey; crt->digestsize = alg->digestsize; return 0; diff --git a/crypto/shash.c b/crypto/shash.c index 50d69a4e4b6..c9df367332f 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -55,6 +55,9 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, struct shash_alg *shash = crypto_shash_alg(tfm); unsigned long alignmask = crypto_shash_alignmask(tfm); + if (!shash->setkey) + return -ENOSYS; + if ((unsigned long)key & alignmask) return shash_setkey_unaligned(tfm, key, keylen); -- cgit v1.2.3 From d35d2454ce2175be77d2a366c2648597fd33a98f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 8 Nov 2008 08:09:56 +0800 Subject: crypto: null - Switch to shash This patch changes digest_null to the new shash interface. Signed-off-by: Herbert Xu --- crypto/Kconfig | 1 + crypto/crypto_null.c | 64 +++++++++++++++++++++++++++++++++------------------- 2 files changed, 42 insertions(+), 23 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index aede80246df..359a7c24af3 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -102,6 +102,7 @@ config CRYPTO_NULL tristate "Null algorithms" select CRYPTO_ALGAPI select CRYPTO_BLKCIPHER + select CRYPTO_HASH help These are 'Null' algorithms, used by IPsec, which do nothing. diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c index 1f7d53013a2..cb71c9122bc 100644 --- a/crypto/crypto_null.c +++ b/crypto/crypto_null.c @@ -17,6 +17,7 @@ * */ +#include #include #include #include @@ -38,15 +39,31 @@ static int null_compress(struct crypto_tfm *tfm, const u8 *src, return 0; } -static void null_init(struct crypto_tfm *tfm) -{ } +static int null_init(struct shash_desc *desc) +{ + return 0; +} -static void null_update(struct crypto_tfm *tfm, const u8 *data, - unsigned int len) -{ } +static int null_update(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + return 0; +} -static void null_final(struct crypto_tfm *tfm, u8 *out) -{ } +static int null_final(struct shash_desc *desc, u8 *out) +{ + return 0; +} + +static int null_digest(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + return 0; +} + +static int null_hash_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen) +{ return 0; } static int null_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) @@ -89,19 +106,20 @@ static struct crypto_alg compress_null = { .coa_decompress = null_compress } } }; -static struct crypto_alg digest_null = { - .cra_name = "digest_null", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = NULL_BLOCK_SIZE, - .cra_ctxsize = 0, - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(digest_null.cra_list), - .cra_u = { .digest = { - .dia_digestsize = NULL_DIGEST_SIZE, - .dia_setkey = null_setkey, - .dia_init = null_init, - .dia_update = null_update, - .dia_final = null_final } } +static struct shash_alg digest_null = { + .digestsize = NULL_DIGEST_SIZE, + .setkey = null_hash_setkey, + .init = null_init, + .update = null_update, + .finup = null_digest, + .digest = null_digest, + .final = null_final, + .base = { + .cra_name = "digest_null", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = NULL_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } }; static struct crypto_alg cipher_null = { @@ -154,7 +172,7 @@ static int __init crypto_null_mod_init(void) if (ret < 0) goto out_unregister_cipher; - ret = crypto_register_alg(&digest_null); + ret = crypto_register_shash(&digest_null); if (ret < 0) goto out_unregister_skcipher; @@ -166,7 +184,7 @@ out: return ret; out_unregister_digest: - crypto_unregister_alg(&digest_null); + crypto_unregister_shash(&digest_null); out_unregister_skcipher: crypto_unregister_alg(&skcipher_null); out_unregister_cipher: @@ -177,7 +195,7 @@ out_unregister_cipher: static void __exit crypto_null_mod_fini(void) { crypto_unregister_alg(&compress_null); - crypto_unregister_alg(&digest_null); + crypto_unregister_shash(&digest_null); crypto_unregister_alg(&skcipher_null); crypto_unregister_alg(&cipher_null); } -- cgit v1.2.3 From 7c4468bc011131e77d0a872d6d9942390f8217ea Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 8 Nov 2008 09:10:40 +0800 Subject: crypto: rmd128 - Switch to shash This patch changes rmd128 to the new shash interface. Signed-off-by: Herbert Xu --- crypto/Kconfig | 2 +- crypto/rmd128.c | 61 ++++++++++++++++++++++++++++++--------------------------- 2 files changed, 33 insertions(+), 30 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 359a7c24af3..848c886d667 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -297,7 +297,7 @@ config CRYPTO_MICHAEL_MIC config CRYPTO_RMD128 tristate "RIPEMD-128 digest algorithm" - select CRYPTO_ALGAPI + select CRYPTO_HASH help RIPEMD-128 (ISO/IEC 10118-3:2004). diff --git a/crypto/rmd128.c b/crypto/rmd128.c index 5de6fa2a76f..1ceb6735aa5 100644 --- a/crypto/rmd128.c +++ b/crypto/rmd128.c @@ -13,11 +13,10 @@ * any later version. * */ +#include #include #include #include -#include -#include #include #include @@ -218,9 +217,9 @@ static void rmd128_transform(u32 *state, const __le32 *in) return; } -static void rmd128_init(struct crypto_tfm *tfm) +static int rmd128_init(struct shash_desc *desc) { - struct rmd128_ctx *rctx = crypto_tfm_ctx(tfm); + struct rmd128_ctx *rctx = shash_desc_ctx(desc); rctx->byte_count = 0; @@ -230,12 +229,14 @@ static void rmd128_init(struct crypto_tfm *tfm) rctx->state[3] = RMD_H3; memset(rctx->buffer, 0, sizeof(rctx->buffer)); + + return 0; } -static void rmd128_update(struct crypto_tfm *tfm, const u8 *data, - unsigned int len) +static int rmd128_update(struct shash_desc *desc, const u8 *data, + unsigned int len) { - struct rmd128_ctx *rctx = crypto_tfm_ctx(tfm); + struct rmd128_ctx *rctx = shash_desc_ctx(desc); const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f); rctx->byte_count += len; @@ -244,7 +245,7 @@ static void rmd128_update(struct crypto_tfm *tfm, const u8 *data, if (avail > len) { memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), data, len); - return; + goto out; } memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), @@ -262,12 +263,15 @@ static void rmd128_update(struct crypto_tfm *tfm, const u8 *data, } memcpy(rctx->buffer, data, len); + +out: + return 0; } /* Add padding and return the message digest. */ -static void rmd128_final(struct crypto_tfm *tfm, u8 *out) +static int rmd128_final(struct shash_desc *desc, u8 *out) { - struct rmd128_ctx *rctx = crypto_tfm_ctx(tfm); + struct rmd128_ctx *rctx = shash_desc_ctx(desc); u32 i, index, padlen; __le64 bits; __le32 *dst = (__le32 *)out; @@ -278,10 +282,10 @@ static void rmd128_final(struct crypto_tfm *tfm, u8 *out) /* Pad out to 56 mod 64 */ index = rctx->byte_count & 0x3f; padlen = (index < 56) ? (56 - index) : ((64+56) - index); - rmd128_update(tfm, padding, padlen); + rmd128_update(desc, padding, padlen); /* Append length */ - rmd128_update(tfm, (const u8 *)&bits, sizeof(bits)); + rmd128_update(desc, (const u8 *)&bits, sizeof(bits)); /* Store state in digest */ for (i = 0; i < 4; i++) @@ -289,31 +293,32 @@ static void rmd128_final(struct crypto_tfm *tfm, u8 *out) /* Wipe context */ memset(rctx, 0, sizeof(*rctx)); + + return 0; } -static struct crypto_alg alg = { - .cra_name = "rmd128", - .cra_driver_name = "rmd128", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = RMD128_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct rmd128_ctx), - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(alg.cra_list), - .cra_u = { .digest = { - .dia_digestsize = RMD128_DIGEST_SIZE, - .dia_init = rmd128_init, - .dia_update = rmd128_update, - .dia_final = rmd128_final } } +static struct shash_alg alg = { + .digestsize = RMD128_DIGEST_SIZE, + .init = rmd128_init, + .update = rmd128_update, + .final = rmd128_final, + .descsize = sizeof(struct rmd128_ctx), + .base = { + .cra_name = "rmd128", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = RMD128_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } }; static int __init rmd128_mod_init(void) { - return crypto_register_alg(&alg); + return crypto_register_shash(&alg); } static void __exit rmd128_mod_fini(void) { - crypto_unregister_alg(&alg); + crypto_unregister_shash(&alg); } module_init(rmd128_mod_init); @@ -321,5 +326,3 @@ module_exit(rmd128_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("RIPEMD-128 Message Digest"); - -MODULE_ALIAS("rmd128"); -- cgit v1.2.3 From e5835fba0206a331bdefdf5d805d1a384af37c44 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 8 Nov 2008 09:18:51 +0800 Subject: crypto: rmd160 - Switch to shash This patch changes rmd160 to the new shash interface. Signed-off-by: Herbert Xu --- crypto/Kconfig | 2 +- crypto/rmd160.c | 61 ++++++++++++++++++++++++++++++--------------------------- 2 files changed, 33 insertions(+), 30 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 848c886d667..513b9fb6723 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -310,7 +310,7 @@ config CRYPTO_RMD128 config CRYPTO_RMD160 tristate "RIPEMD-160 digest algorithm" - select CRYPTO_ALGAPI + select CRYPTO_HASH help RIPEMD-160 (ISO/IEC 10118-3:2004). diff --git a/crypto/rmd160.c b/crypto/rmd160.c index f001ec775e1..472261fc913 100644 --- a/crypto/rmd160.c +++ b/crypto/rmd160.c @@ -13,11 +13,10 @@ * any later version. * */ +#include #include #include #include -#include -#include #include #include @@ -261,9 +260,9 @@ static void rmd160_transform(u32 *state, const __le32 *in) return; } -static void rmd160_init(struct crypto_tfm *tfm) +static int rmd160_init(struct shash_desc *desc) { - struct rmd160_ctx *rctx = crypto_tfm_ctx(tfm); + struct rmd160_ctx *rctx = shash_desc_ctx(desc); rctx->byte_count = 0; @@ -274,12 +273,14 @@ static void rmd160_init(struct crypto_tfm *tfm) rctx->state[4] = RMD_H4; memset(rctx->buffer, 0, sizeof(rctx->buffer)); + + return 0; } -static void rmd160_update(struct crypto_tfm *tfm, const u8 *data, - unsigned int len) +static int rmd160_update(struct shash_desc *desc, const u8 *data, + unsigned int len) { - struct rmd160_ctx *rctx = crypto_tfm_ctx(tfm); + struct rmd160_ctx *rctx = shash_desc_ctx(desc); const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f); rctx->byte_count += len; @@ -288,7 +289,7 @@ static void rmd160_update(struct crypto_tfm *tfm, const u8 *data, if (avail > len) { memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), data, len); - return; + goto out; } memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), @@ -306,12 +307,15 @@ static void rmd160_update(struct crypto_tfm *tfm, const u8 *data, } memcpy(rctx->buffer, data, len); + +out: + return 0; } /* Add padding and return the message digest. */ -static void rmd160_final(struct crypto_tfm *tfm, u8 *out) +static int rmd160_final(struct shash_desc *desc, u8 *out) { - struct rmd160_ctx *rctx = crypto_tfm_ctx(tfm); + struct rmd160_ctx *rctx = shash_desc_ctx(desc); u32 i, index, padlen; __le64 bits; __le32 *dst = (__le32 *)out; @@ -322,10 +326,10 @@ static void rmd160_final(struct crypto_tfm *tfm, u8 *out) /* Pad out to 56 mod 64 */ index = rctx->byte_count & 0x3f; padlen = (index < 56) ? (56 - index) : ((64+56) - index); - rmd160_update(tfm, padding, padlen); + rmd160_update(desc, padding, padlen); /* Append length */ - rmd160_update(tfm, (const u8 *)&bits, sizeof(bits)); + rmd160_update(desc, (const u8 *)&bits, sizeof(bits)); /* Store state in digest */ for (i = 0; i < 5; i++) @@ -333,31 +337,32 @@ static void rmd160_final(struct crypto_tfm *tfm, u8 *out) /* Wipe context */ memset(rctx, 0, sizeof(*rctx)); + + return 0; } -static struct crypto_alg alg = { - .cra_name = "rmd160", - .cra_driver_name = "rmd160", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = RMD160_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct rmd160_ctx), - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(alg.cra_list), - .cra_u = { .digest = { - .dia_digestsize = RMD160_DIGEST_SIZE, - .dia_init = rmd160_init, - .dia_update = rmd160_update, - .dia_final = rmd160_final } } +static struct shash_alg alg = { + .digestsize = RMD160_DIGEST_SIZE, + .init = rmd160_init, + .update = rmd160_update, + .final = rmd160_final, + .descsize = sizeof(struct rmd160_ctx), + .base = { + .cra_name = "rmd160", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = RMD160_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } }; static int __init rmd160_mod_init(void) { - return crypto_register_alg(&alg); + return crypto_register_shash(&alg); } static void __exit rmd160_mod_fini(void) { - crypto_unregister_alg(&alg); + crypto_unregister_shash(&alg); } module_init(rmd160_mod_init); @@ -365,5 +370,3 @@ module_exit(rmd160_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("RIPEMD-160 Message Digest"); - -MODULE_ALIAS("rmd160"); -- cgit v1.2.3 From d8a5e2e9f4e70ade136c67ce8242f0db4c2cddc7 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 8 Nov 2008 09:58:10 +0800 Subject: crypto: rmd256 - Switch to shash This patch changes rmd256 to the new shash interface. Signed-off-by: Herbert Xu --- crypto/Kconfig | 2 +- crypto/rmd256.c | 61 ++++++++++++++++++++++++++++++--------------------------- 2 files changed, 33 insertions(+), 30 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 513b9fb6723..7f3d7954c92 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -327,7 +327,7 @@ config CRYPTO_RMD160 config CRYPTO_RMD256 tristate "RIPEMD-256 digest algorithm" - select CRYPTO_ALGAPI + select CRYPTO_HASH help RIPEMD-256 is an optional extension of RIPEMD-128 with a 256 bit hash. It is intended for applications that require diff --git a/crypto/rmd256.c b/crypto/rmd256.c index e3de5b4cb47..72eafa8d2e7 100644 --- a/crypto/rmd256.c +++ b/crypto/rmd256.c @@ -13,11 +13,10 @@ * any later version. * */ +#include #include #include #include -#include -#include #include #include @@ -233,9 +232,9 @@ static void rmd256_transform(u32 *state, const __le32 *in) return; } -static void rmd256_init(struct crypto_tfm *tfm) +static int rmd256_init(struct shash_desc *desc) { - struct rmd256_ctx *rctx = crypto_tfm_ctx(tfm); + struct rmd256_ctx *rctx = shash_desc_ctx(desc); rctx->byte_count = 0; @@ -249,12 +248,14 @@ static void rmd256_init(struct crypto_tfm *tfm) rctx->state[7] = RMD_H8; memset(rctx->buffer, 0, sizeof(rctx->buffer)); + + return 0; } -static void rmd256_update(struct crypto_tfm *tfm, const u8 *data, - unsigned int len) +static int rmd256_update(struct shash_desc *desc, const u8 *data, + unsigned int len) { - struct rmd256_ctx *rctx = crypto_tfm_ctx(tfm); + struct rmd256_ctx *rctx = shash_desc_ctx(desc); const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f); rctx->byte_count += len; @@ -263,7 +264,7 @@ static void rmd256_update(struct crypto_tfm *tfm, const u8 *data, if (avail > len) { memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), data, len); - return; + goto out; } memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), @@ -281,12 +282,15 @@ static void rmd256_update(struct crypto_tfm *tfm, const u8 *data, } memcpy(rctx->buffer, data, len); + +out: + return 0; } /* Add padding and return the message digest. */ -static void rmd256_final(struct crypto_tfm *tfm, u8 *out) +static int rmd256_final(struct shash_desc *desc, u8 *out) { - struct rmd256_ctx *rctx = crypto_tfm_ctx(tfm); + struct rmd256_ctx *rctx = shash_desc_ctx(desc); u32 i, index, padlen; __le64 bits; __le32 *dst = (__le32 *)out; @@ -297,10 +301,10 @@ static void rmd256_final(struct crypto_tfm *tfm, u8 *out) /* Pad out to 56 mod 64 */ index = rctx->byte_count & 0x3f; padlen = (index < 56) ? (56 - index) : ((64+56) - index); - rmd256_update(tfm, padding, padlen); + rmd256_update(desc, padding, padlen); /* Append length */ - rmd256_update(tfm, (const u8 *)&bits, sizeof(bits)); + rmd256_update(desc, (const u8 *)&bits, sizeof(bits)); /* Store state in digest */ for (i = 0; i < 8; i++) @@ -308,31 +312,32 @@ static void rmd256_final(struct crypto_tfm *tfm, u8 *out) /* Wipe context */ memset(rctx, 0, sizeof(*rctx)); + + return 0; } -static struct crypto_alg alg = { - .cra_name = "rmd256", - .cra_driver_name = "rmd256", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = RMD256_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct rmd256_ctx), - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(alg.cra_list), - .cra_u = { .digest = { - .dia_digestsize = RMD256_DIGEST_SIZE, - .dia_init = rmd256_init, - .dia_update = rmd256_update, - .dia_final = rmd256_final } } +static struct shash_alg alg = { + .digestsize = RMD256_DIGEST_SIZE, + .init = rmd256_init, + .update = rmd256_update, + .final = rmd256_final, + .descsize = sizeof(struct rmd256_ctx), + .base = { + .cra_name = "rmd256", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = RMD256_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } }; static int __init rmd256_mod_init(void) { - return crypto_register_alg(&alg); + return crypto_register_shash(&alg); } static void __exit rmd256_mod_fini(void) { - crypto_unregister_alg(&alg); + crypto_unregister_shash(&alg); } module_init(rmd256_mod_init); @@ -340,5 +345,3 @@ module_exit(rmd256_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("RIPEMD-256 Message Digest"); - -MODULE_ALIAS("rmd256"); -- cgit v1.2.3 From 3b8efb4c4147094652570d7791a516d07b7df8c2 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 8 Nov 2008 10:11:09 +0800 Subject: crypto: rmd320 - Switch to shash This patch changes rmd320 to the new shash interface. Signed-off-by: Herbert Xu --- crypto/Kconfig | 2 +- crypto/rmd320.c | 61 ++++++++++++++++++++++++++++++--------------------------- 2 files changed, 33 insertions(+), 30 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 7f3d7954c92..edf6c71b576 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -339,7 +339,7 @@ config CRYPTO_RMD256 config CRYPTO_RMD320 tristate "RIPEMD-320 digest algorithm" - select CRYPTO_ALGAPI + select CRYPTO_HASH help RIPEMD-320 is an optional extension of RIPEMD-160 with a 320 bit hash. It is intended for applications that require diff --git a/crypto/rmd320.c b/crypto/rmd320.c index b143d66e42c..86becaba2f0 100644 --- a/crypto/rmd320.c +++ b/crypto/rmd320.c @@ -13,11 +13,10 @@ * any later version. * */ +#include #include #include #include -#include -#include #include #include @@ -280,9 +279,9 @@ static void rmd320_transform(u32 *state, const __le32 *in) return; } -static void rmd320_init(struct crypto_tfm *tfm) +static int rmd320_init(struct shash_desc *desc) { - struct rmd320_ctx *rctx = crypto_tfm_ctx(tfm); + struct rmd320_ctx *rctx = shash_desc_ctx(desc); rctx->byte_count = 0; @@ -298,12 +297,14 @@ static void rmd320_init(struct crypto_tfm *tfm) rctx->state[9] = RMD_H9; memset(rctx->buffer, 0, sizeof(rctx->buffer)); + + return 0; } -static void rmd320_update(struct crypto_tfm *tfm, const u8 *data, - unsigned int len) +static int rmd320_update(struct shash_desc *desc, const u8 *data, + unsigned int len) { - struct rmd320_ctx *rctx = crypto_tfm_ctx(tfm); + struct rmd320_ctx *rctx = shash_desc_ctx(desc); const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f); rctx->byte_count += len; @@ -312,7 +313,7 @@ static void rmd320_update(struct crypto_tfm *tfm, const u8 *data, if (avail > len) { memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), data, len); - return; + goto out; } memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), @@ -330,12 +331,15 @@ static void rmd320_update(struct crypto_tfm *tfm, const u8 *data, } memcpy(rctx->buffer, data, len); + +out: + return 0; } /* Add padding and return the message digest. */ -static void rmd320_final(struct crypto_tfm *tfm, u8 *out) +static int rmd320_final(struct shash_desc *desc, u8 *out) { - struct rmd320_ctx *rctx = crypto_tfm_ctx(tfm); + struct rmd320_ctx *rctx = shash_desc_ctx(desc); u32 i, index, padlen; __le64 bits; __le32 *dst = (__le32 *)out; @@ -346,10 +350,10 @@ static void rmd320_final(struct crypto_tfm *tfm, u8 *out) /* Pad out to 56 mod 64 */ index = rctx->byte_count & 0x3f; padlen = (index < 56) ? (56 - index) : ((64+56) - index); - rmd320_update(tfm, padding, padlen); + rmd320_update(desc, padding, padlen); /* Append length */ - rmd320_update(tfm, (const u8 *)&bits, sizeof(bits)); + rmd320_update(desc, (const u8 *)&bits, sizeof(bits)); /* Store state in digest */ for (i = 0; i < 10; i++) @@ -357,31 +361,32 @@ static void rmd320_final(struct crypto_tfm *tfm, u8 *out) /* Wipe context */ memset(rctx, 0, sizeof(*rctx)); + + return 0; } -static struct crypto_alg alg = { - .cra_name = "rmd320", - .cra_driver_name = "rmd320", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = RMD320_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct rmd320_ctx), - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(alg.cra_list), - .cra_u = { .digest = { - .dia_digestsize = RMD320_DIGEST_SIZE, - .dia_init = rmd320_init, - .dia_update = rmd320_update, - .dia_final = rmd320_final } } +static struct shash_alg alg = { + .digestsize = RMD320_DIGEST_SIZE, + .init = rmd320_init, + .update = rmd320_update, + .final = rmd320_final, + .descsize = sizeof(struct rmd320_ctx), + .base = { + .cra_name = "rmd320", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = RMD320_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } }; static int __init rmd320_mod_init(void) { - return crypto_register_alg(&alg); + return crypto_register_shash(&alg); } static void __exit rmd320_mod_fini(void) { - crypto_unregister_alg(&alg); + crypto_unregister_shash(&alg); } module_init(rmd320_mod_init); @@ -389,5 +394,3 @@ module_exit(rmd320_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("RIPEMD-320 Message Digest"); - -MODULE_ALIAS("rmd320"); -- cgit v1.2.3 From 54ccb36776eb7e03b592bfab60393c7800851a0b Mon Sep 17 00:00:00 2001 From: Adrian-Ken Rueegsegger Date: Tue, 2 Dec 2008 21:08:20 +0800 Subject: crypto: sha1 - Switch to shash This patch changes sha1 to the new shash interface. Signed-off-by: Adrian-Ken Rueegsegger Signed-off-by: Herbert Xu --- crypto/Kconfig | 2 +- crypto/sha1_generic.c | 56 ++++++++++++++++++++++++++++----------------------- 2 files changed, 32 insertions(+), 26 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index edf6c71b576..5386beb503e 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -351,7 +351,7 @@ config CRYPTO_RMD320 config CRYPTO_SHA1 tristate "SHA1 digest algorithm" - select CRYPTO_ALGAPI + select CRYPTO_HASH help SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c index c7c6899e1fc..9efef20454c 100644 --- a/crypto/sha1_generic.c +++ b/crypto/sha1_generic.c @@ -16,10 +16,10 @@ * any later version. * */ +#include #include #include #include -#include #include #include #include @@ -31,9 +31,10 @@ struct sha1_ctx { u8 buffer[64]; }; -static void sha1_init(struct crypto_tfm *tfm) +static int sha1_init(struct shash_desc *desc) { - struct sha1_ctx *sctx = crypto_tfm_ctx(tfm); + struct sha1_ctx *sctx = shash_desc_ctx(desc); + static const struct sha1_ctx initstate = { 0, { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, @@ -41,12 +42,14 @@ static void sha1_init(struct crypto_tfm *tfm) }; *sctx = initstate; + + return 0; } -static void sha1_update(struct crypto_tfm *tfm, const u8 *data, +static int sha1_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct sha1_ctx *sctx = crypto_tfm_ctx(tfm); + struct sha1_ctx *sctx = shash_desc_ctx(desc); unsigned int partial, done; const u8 *src; @@ -74,13 +77,15 @@ static void sha1_update(struct crypto_tfm *tfm, const u8 *data, partial = 0; } memcpy(sctx->buffer + partial, src, len - done); + + return 0; } /* Add padding and return the message digest. */ -static void sha1_final(struct crypto_tfm *tfm, u8 *out) +static int sha1_final(struct shash_desc *desc, u8 *out) { - struct sha1_ctx *sctx = crypto_tfm_ctx(tfm); + struct sha1_ctx *sctx = shash_desc_ctx(desc); __be32 *dst = (__be32 *)out; u32 i, index, padlen; __be64 bits; @@ -91,10 +96,10 @@ static void sha1_final(struct crypto_tfm *tfm, u8 *out) /* Pad out to 56 mod 64 */ index = sctx->count & 0x3f; padlen = (index < 56) ? (56 - index) : ((64+56) - index); - sha1_update(tfm, padding, padlen); + sha1_update(desc, padding, padlen); /* Append length */ - sha1_update(tfm, (const u8 *)&bits, sizeof(bits)); + sha1_update(desc, (const u8 *)&bits, sizeof(bits)); /* Store state in digest */ for (i = 0; i < 5; i++) @@ -102,32 +107,33 @@ static void sha1_final(struct crypto_tfm *tfm, u8 *out) /* Wipe context */ memset(sctx, 0, sizeof *sctx); + + return 0; } -static struct crypto_alg alg = { - .cra_name = "sha1", - .cra_driver_name= "sha1-generic", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct sha1_ctx), - .cra_module = THIS_MODULE, - .cra_alignmask = 3, - .cra_list = LIST_HEAD_INIT(alg.cra_list), - .cra_u = { .digest = { - .dia_digestsize = SHA1_DIGEST_SIZE, - .dia_init = sha1_init, - .dia_update = sha1_update, - .dia_final = sha1_final } } +static struct shash_alg alg = { + .digestsize = SHA1_DIGEST_SIZE, + .init = sha1_init, + .update = sha1_update, + .final = sha1_final, + .descsize = sizeof(struct sha1_ctx), + .base = { + .cra_name = "sha1", + .cra_driver_name= "sha1-generic", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } }; static int __init sha1_generic_mod_init(void) { - return crypto_register_alg(&alg); + return crypto_register_shash(&alg); } static void __exit sha1_generic_mod_fini(void) { - crypto_unregister_alg(&alg); + crypto_unregister_shash(&alg); } module_init(sha1_generic_mod_init); -- cgit v1.2.3 From 808a1763cef93bf0f740d7e10dd9a2dfc4065b1a Mon Sep 17 00:00:00 2001 From: Adrian-Ken Rueegsegger Date: Wed, 3 Dec 2008 19:55:27 +0800 Subject: crypto: md4 - Switch to shash This patch changes md4 to the new shash interface. Signed-off-by: Adrian-Ken Rueegsegger Signed-off-by: Herbert Xu --- crypto/Kconfig | 2 +- crypto/md4.c | 52 +++++++++++++++++++++++++++++----------------------- 2 files changed, 30 insertions(+), 24 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 5386beb503e..96f8154147a 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -276,7 +276,7 @@ config CRYPTO_CRC32C_INTEL config CRYPTO_MD4 tristate "MD4 digest algorithm" - select CRYPTO_ALGAPI + select CRYPTO_HASH help MD4 message digest algorithm (RFC1320). diff --git a/crypto/md4.c b/crypto/md4.c index a143c4aaa39..7fca1f59a4f 100644 --- a/crypto/md4.c +++ b/crypto/md4.c @@ -20,8 +20,8 @@ * (at your option) any later version. * */ +#include #include -#include #include #include #include @@ -58,7 +58,7 @@ static inline u32 H(u32 x, u32 y, u32 z) { return x ^ y ^ z; } - + #define ROUND1(a,b,c,d,k,s) (a = lshift(a + F(b,c,d) + k, s)) #define ROUND2(a,b,c,d,k,s) (a = lshift(a + G(b,c,d) + k + (u32)0x5A827999,s)) #define ROUND3(a,b,c,d,k,s) (a = lshift(a + H(b,c,d) + k + (u32)0x6ED9EBA1,s)) @@ -152,20 +152,22 @@ static inline void md4_transform_helper(struct md4_ctx *ctx) md4_transform(ctx->hash, ctx->block); } -static void md4_init(struct crypto_tfm *tfm) +static int md4_init(struct shash_desc *desc) { - struct md4_ctx *mctx = crypto_tfm_ctx(tfm); + struct md4_ctx *mctx = shash_desc_ctx(desc); mctx->hash[0] = 0x67452301; mctx->hash[1] = 0xefcdab89; mctx->hash[2] = 0x98badcfe; mctx->hash[3] = 0x10325476; mctx->byte_count = 0; + + return 0; } -static void md4_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) +static int md4_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct md4_ctx *mctx = crypto_tfm_ctx(tfm); + struct md4_ctx *mctx = shash_desc_ctx(desc); const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); mctx->byte_count += len; @@ -173,7 +175,7 @@ static void md4_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) if (avail > len) { memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, len); - return; + return 0; } memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), @@ -191,11 +193,13 @@ static void md4_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) } memcpy(mctx->block, data, len); + + return 0; } -static void md4_final(struct crypto_tfm *tfm, u8 *out) +static int md4_final(struct shash_desc *desc, u8 *out) { - struct md4_ctx *mctx = crypto_tfm_ctx(tfm); + struct md4_ctx *mctx = shash_desc_ctx(desc); const unsigned int offset = mctx->byte_count & 0x3f; char *p = (char *)mctx->block + offset; int padding = 56 - (offset + 1); @@ -217,30 +221,32 @@ static void md4_final(struct crypto_tfm *tfm, u8 *out) cpu_to_le32_array(mctx->hash, ARRAY_SIZE(mctx->hash)); memcpy(out, mctx->hash, sizeof(mctx->hash)); memset(mctx, 0, sizeof(*mctx)); + + return 0; } -static struct crypto_alg alg = { - .cra_name = "md4", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = MD4_HMAC_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct md4_ctx), - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(alg.cra_list), - .cra_u = { .digest = { - .dia_digestsize = MD4_DIGEST_SIZE, - .dia_init = md4_init, - .dia_update = md4_update, - .dia_final = md4_final } } +static struct shash_alg alg = { + .digestsize = MD4_DIGEST_SIZE, + .init = md4_init, + .update = md4_update, + .final = md4_final, + .descsize = sizeof(struct md4_ctx), + .base = { + .cra_name = "md4", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = MD4_HMAC_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } }; static int __init md4_mod_init(void) { - return crypto_register_alg(&alg); + return crypto_register_shash(&alg); } static void __exit md4_mod_fini(void) { - crypto_unregister_alg(&alg); + crypto_unregister_shash(&alg); } module_init(md4_mod_init); -- cgit v1.2.3 From 14b75ba70da925a9f040a7575cb46ad7d394b117 Mon Sep 17 00:00:00 2001 From: Adrian-Ken Rueegsegger Date: Wed, 3 Dec 2008 19:57:12 +0800 Subject: crypto: md5 - Switch to shash This patch changes md5 to the new shash interface. Signed-off-by: Adrian-Ken Rueegsegger Signed-off-by: Herbert Xu --- crypto/Kconfig | 2 +- crypto/md5.c | 50 ++++++++++++++++++++++++++++---------------------- 2 files changed, 29 insertions(+), 23 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 96f8154147a..989304ca3ea 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -282,7 +282,7 @@ config CRYPTO_MD4 config CRYPTO_MD5 tristate "MD5 digest algorithm" - select CRYPTO_ALGAPI + select CRYPTO_HASH help MD5 message digest algorithm (RFC1321). diff --git a/crypto/md5.c b/crypto/md5.c index 39268f3d2f1..83eb5296175 100644 --- a/crypto/md5.c +++ b/crypto/md5.c @@ -15,10 +15,10 @@ * any later version. * */ +#include #include #include #include -#include #include #include @@ -147,20 +147,22 @@ static inline void md5_transform_helper(struct md5_ctx *ctx) md5_transform(ctx->hash, ctx->block); } -static void md5_init(struct crypto_tfm *tfm) +static int md5_init(struct shash_desc *desc) { - struct md5_ctx *mctx = crypto_tfm_ctx(tfm); + struct md5_ctx *mctx = shash_desc_ctx(desc); mctx->hash[0] = 0x67452301; mctx->hash[1] = 0xefcdab89; mctx->hash[2] = 0x98badcfe; mctx->hash[3] = 0x10325476; mctx->byte_count = 0; + + return 0; } -static void md5_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) +static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct md5_ctx *mctx = crypto_tfm_ctx(tfm); + struct md5_ctx *mctx = shash_desc_ctx(desc); const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); mctx->byte_count += len; @@ -168,7 +170,7 @@ static void md5_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) if (avail > len) { memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, len); - return; + return 0; } memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), @@ -186,11 +188,13 @@ static void md5_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) } memcpy(mctx->block, data, len); + + return 0; } -static void md5_final(struct crypto_tfm *tfm, u8 *out) +static int md5_final(struct shash_desc *desc, u8 *out) { - struct md5_ctx *mctx = crypto_tfm_ctx(tfm); + struct md5_ctx *mctx = shash_desc_ctx(desc); const unsigned int offset = mctx->byte_count & 0x3f; char *p = (char *)mctx->block + offset; int padding = 56 - (offset + 1); @@ -212,30 +216,32 @@ static void md5_final(struct crypto_tfm *tfm, u8 *out) cpu_to_le32_array(mctx->hash, sizeof(mctx->hash) / sizeof(u32)); memcpy(out, mctx->hash, sizeof(mctx->hash)); memset(mctx, 0, sizeof(*mctx)); + + return 0; } -static struct crypto_alg alg = { - .cra_name = "md5", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = MD5_HMAC_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct md5_ctx), - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(alg.cra_list), - .cra_u = { .digest = { - .dia_digestsize = MD5_DIGEST_SIZE, - .dia_init = md5_init, - .dia_update = md5_update, - .dia_final = md5_final } } +static struct shash_alg alg = { + .digestsize = MD5_DIGEST_SIZE, + .init = md5_init, + .update = md5_update, + .final = md5_final, + .descsize = sizeof(struct md5_ctx), + .base = { + .cra_name = "md5", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } }; static int __init md5_mod_init(void) { - return crypto_register_alg(&alg); + return crypto_register_shash(&alg); } static void __exit md5_mod_fini(void) { - crypto_unregister_alg(&alg); + crypto_unregister_shash(&alg); } module_init(md5_mod_init); -- cgit v1.2.3 From 50e109b5b9c1f734e91a6e9b557bce48c9a88654 Mon Sep 17 00:00:00 2001 From: Adrian-Ken Rueegsegger Date: Wed, 3 Dec 2008 19:57:49 +0800 Subject: crypto: sha256 - Switch to shash This patch changes sha256 and sha224 to the new shash interface. Signed-off-by: Adrian-Ken Rueegsegger Signed-off-by: Herbert Xu --- crypto/Kconfig | 2 +- crypto/sha256_generic.c | 104 ++++++++++++++++++++++++++---------------------- 2 files changed, 57 insertions(+), 49 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 989304ca3ea..3d04fa880e4 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -357,7 +357,7 @@ config CRYPTO_SHA1 config CRYPTO_SHA256 tristate "SHA224 and SHA256 digest algorithm" - select CRYPTO_ALGAPI + select CRYPTO_HASH help SHA256 secure hash standard (DFIPS 180-2). diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c index 5a8dd47558e..caa3542e6ce 100644 --- a/crypto/sha256_generic.c +++ b/crypto/sha256_generic.c @@ -17,10 +17,10 @@ * any later version. * */ +#include #include #include #include -#include #include #include #include @@ -69,7 +69,7 @@ static void sha256_transform(u32 *state, const u8 *input) /* now blend */ for (i = 16; i < 64; i++) BLEND_OP(i, W); - + /* load the state into our registers */ a=state[0]; b=state[1]; c=state[2]; d=state[3]; e=state[4]; f=state[5]; g=state[6]; h=state[7]; @@ -220,9 +220,9 @@ static void sha256_transform(u32 *state, const u8 *input) } -static void sha224_init(struct crypto_tfm *tfm) +static int sha224_init(struct shash_desc *desc) { - struct sha256_ctx *sctx = crypto_tfm_ctx(tfm); + struct sha256_ctx *sctx = shash_desc_ctx(desc); sctx->state[0] = SHA224_H0; sctx->state[1] = SHA224_H1; sctx->state[2] = SHA224_H2; @@ -233,11 +233,13 @@ static void sha224_init(struct crypto_tfm *tfm) sctx->state[7] = SHA224_H7; sctx->count[0] = 0; sctx->count[1] = 0; + + return 0; } -static void sha256_init(struct crypto_tfm *tfm) +static int sha256_init(struct shash_desc *desc) { - struct sha256_ctx *sctx = crypto_tfm_ctx(tfm); + struct sha256_ctx *sctx = shash_desc_ctx(desc); sctx->state[0] = SHA256_H0; sctx->state[1] = SHA256_H1; sctx->state[2] = SHA256_H2; @@ -247,12 +249,14 @@ static void sha256_init(struct crypto_tfm *tfm) sctx->state[6] = SHA256_H6; sctx->state[7] = SHA256_H7; sctx->count[0] = sctx->count[1] = 0; + + return 0; } -static void sha256_update(struct crypto_tfm *tfm, const u8 *data, +static int sha256_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct sha256_ctx *sctx = crypto_tfm_ctx(tfm); + struct sha256_ctx *sctx = shash_desc_ctx(desc); unsigned int i, index, part_len; /* Compute number of bytes mod 128 */ @@ -277,14 +281,16 @@ static void sha256_update(struct crypto_tfm *tfm, const u8 *data, } else { i = 0; } - + /* Buffer remaining input */ memcpy(&sctx->buf[index], &data[i], len-i); + + return 0; } -static void sha256_final(struct crypto_tfm *tfm, u8 *out) +static int sha256_final(struct shash_desc *desc, u8 *out) { - struct sha256_ctx *sctx = crypto_tfm_ctx(tfm); + struct sha256_ctx *sctx = shash_desc_ctx(desc); __be32 *dst = (__be32 *)out; __be32 bits[2]; unsigned int index, pad_len; @@ -298,10 +304,10 @@ static void sha256_final(struct crypto_tfm *tfm, u8 *out) /* Pad out to 56 mod 64. */ index = (sctx->count[0] >> 3) & 0x3f; pad_len = (index < 56) ? (56 - index) : ((64+56) - index); - sha256_update(tfm, padding, pad_len); + sha256_update(desc, padding, pad_len); /* Append length (before padding) */ - sha256_update(tfm, (const u8 *)bits, sizeof(bits)); + sha256_update(desc, (const u8 *)bits, sizeof(bits)); /* Store state in digest */ for (i = 0; i < 8; i++) @@ -309,71 +315,73 @@ static void sha256_final(struct crypto_tfm *tfm, u8 *out) /* Zeroize sensitive information. */ memset(sctx, 0, sizeof(*sctx)); + + return 0; } -static void sha224_final(struct crypto_tfm *tfm, u8 *hash) +static int sha224_final(struct shash_desc *desc, u8 *hash) { u8 D[SHA256_DIGEST_SIZE]; - sha256_final(tfm, D); + sha256_final(desc, D); memcpy(hash, D, SHA224_DIGEST_SIZE); memset(D, 0, SHA256_DIGEST_SIZE); + + return 0; } -static struct crypto_alg sha256 = { - .cra_name = "sha256", - .cra_driver_name= "sha256-generic", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct sha256_ctx), - .cra_module = THIS_MODULE, - .cra_alignmask = 3, - .cra_list = LIST_HEAD_INIT(sha256.cra_list), - .cra_u = { .digest = { - .dia_digestsize = SHA256_DIGEST_SIZE, - .dia_init = sha256_init, - .dia_update = sha256_update, - .dia_final = sha256_final } } +static struct shash_alg sha256 = { + .digestsize = SHA256_DIGEST_SIZE, + .init = sha256_init, + .update = sha256_update, + .final = sha256_final, + .descsize = sizeof(struct sha256_ctx), + .base = { + .cra_name = "sha256", + .cra_driver_name= "sha256-generic", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } }; -static struct crypto_alg sha224 = { - .cra_name = "sha224", - .cra_driver_name = "sha224-generic", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct sha256_ctx), - .cra_module = THIS_MODULE, - .cra_alignmask = 3, - .cra_list = LIST_HEAD_INIT(sha224.cra_list), - .cra_u = { .digest = { - .dia_digestsize = SHA224_DIGEST_SIZE, - .dia_init = sha224_init, - .dia_update = sha256_update, - .dia_final = sha224_final } } +static struct shash_alg sha224 = { + .digestsize = SHA224_DIGEST_SIZE, + .init = sha224_init, + .update = sha256_update, + .final = sha224_final, + .descsize = sizeof(struct sha256_ctx), + .base = { + .cra_name = "sha224", + .cra_driver_name= "sha224-generic", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } }; static int __init sha256_generic_mod_init(void) { int ret = 0; - ret = crypto_register_alg(&sha224); + ret = crypto_register_shash(&sha224); if (ret < 0) return ret; - ret = crypto_register_alg(&sha256); + ret = crypto_register_shash(&sha256); if (ret < 0) - crypto_unregister_alg(&sha224); + crypto_unregister_shash(&sha224); return ret; } static void __exit sha256_generic_mod_fini(void) { - crypto_unregister_alg(&sha224); - crypto_unregister_alg(&sha256); + crypto_unregister_shash(&sha224); + crypto_unregister_shash(&sha256); } module_init(sha256_generic_mod_init); -- cgit v1.2.3 From f63fbd3d501b4283e1551e195cb74434a838064f Mon Sep 17 00:00:00 2001 From: Adrian-Ken Rueegsegger Date: Wed, 3 Dec 2008 19:58:32 +0800 Subject: crypto: tgr192 - Switch to shash This patch changes tgr192, tgr160 and tgr128 to the new shash interface. Signed-off-by: Adrian-Ken Rueegsegger Signed-off-by: Herbert Xu --- crypto/Kconfig | 2 +- crypto/tgr192.c | 135 +++++++++++++++++++++++++++++--------------------------- 2 files changed, 72 insertions(+), 65 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 3d04fa880e4..a863d7e5f9e 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -381,7 +381,7 @@ config CRYPTO_SHA512 config CRYPTO_TGR192 tristate "Tiger digest algorithms" - select CRYPTO_ALGAPI + select CRYPTO_HASH help Tiger hash algorithm 192, 160 and 128-bit hashes diff --git a/crypto/tgr192.c b/crypto/tgr192.c index a92414f24be..cbca4f208c9 100644 --- a/crypto/tgr192.c +++ b/crypto/tgr192.c @@ -21,11 +21,11 @@ * (at your option) any later version. * */ +#include #include #include #include #include -#include #include #define TGR192_DIGEST_SIZE 24 @@ -495,24 +495,26 @@ static void tgr192_transform(struct tgr192_ctx *tctx, const u8 * data) tctx->c = c; } -static void tgr192_init(struct crypto_tfm *tfm) +static int tgr192_init(struct shash_desc *desc) { - struct tgr192_ctx *tctx = crypto_tfm_ctx(tfm); + struct tgr192_ctx *tctx = shash_desc_ctx(desc); tctx->a = 0x0123456789abcdefULL; tctx->b = 0xfedcba9876543210ULL; tctx->c = 0xf096a5b4c3b2e187ULL; tctx->nblocks = 0; tctx->count = 0; + + return 0; } /* Update the message digest with the contents * of INBUF with length INLEN. */ -static void tgr192_update(struct crypto_tfm *tfm, const u8 *inbuf, +static int tgr192_update(struct shash_desc *desc, const u8 *inbuf, unsigned int len) { - struct tgr192_ctx *tctx = crypto_tfm_ctx(tfm); + struct tgr192_ctx *tctx = shash_desc_ctx(desc); if (tctx->count == 64) { /* flush the buffer */ tgr192_transform(tctx, tctx->hash); @@ -520,15 +522,15 @@ static void tgr192_update(struct crypto_tfm *tfm, const u8 *inbuf, tctx->nblocks++; } if (!inbuf) { - return; + return 0; } if (tctx->count) { for (; len && tctx->count < 64; len--) { tctx->hash[tctx->count++] = *inbuf++; } - tgr192_update(tfm, NULL, 0); + tgr192_update(desc, NULL, 0); if (!len) { - return; + return 0; } } @@ -543,20 +545,22 @@ static void tgr192_update(struct crypto_tfm *tfm, const u8 *inbuf, for (; len && tctx->count < 64; len--) { tctx->hash[tctx->count++] = *inbuf++; } + + return 0; } /* The routine terminates the computation */ -static void tgr192_final(struct crypto_tfm *tfm, u8 * out) +static int tgr192_final(struct shash_desc *desc, u8 * out) { - struct tgr192_ctx *tctx = crypto_tfm_ctx(tfm); + struct tgr192_ctx *tctx = shash_desc_ctx(desc); __be64 *dst = (__be64 *)out; __be64 *be64p; __le32 *le32p; u32 t, msb, lsb; - tgr192_update(tfm, NULL, 0); /* flush */ ; + tgr192_update(desc, NULL, 0); /* flush */ ; msb = 0; t = tctx->nblocks; @@ -584,7 +588,7 @@ static void tgr192_final(struct crypto_tfm *tfm, u8 * out) while (tctx->count < 64) { tctx->hash[tctx->count++] = 0; } - tgr192_update(tfm, NULL, 0); /* flush */ ; + tgr192_update(desc, NULL, 0); /* flush */ ; memset(tctx->hash, 0, 56); /* fill next block with zeroes */ } /* append the 64 bit count */ @@ -598,91 +602,94 @@ static void tgr192_final(struct crypto_tfm *tfm, u8 * out) dst[0] = be64p[0] = cpu_to_be64(tctx->a); dst[1] = be64p[1] = cpu_to_be64(tctx->b); dst[2] = be64p[2] = cpu_to_be64(tctx->c); + + return 0; } -static void tgr160_final(struct crypto_tfm *tfm, u8 * out) +static int tgr160_final(struct shash_desc *desc, u8 * out) { u8 D[64]; - tgr192_final(tfm, D); + tgr192_final(desc, D); memcpy(out, D, TGR160_DIGEST_SIZE); memset(D, 0, TGR192_DIGEST_SIZE); + + return 0; } -static void tgr128_final(struct crypto_tfm *tfm, u8 * out) +static int tgr128_final(struct shash_desc *desc, u8 * out) { u8 D[64]; - tgr192_final(tfm, D); + tgr192_final(desc, D); memcpy(out, D, TGR128_DIGEST_SIZE); memset(D, 0, TGR192_DIGEST_SIZE); + + return 0; } -static struct crypto_alg tgr192 = { - .cra_name = "tgr192", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = TGR192_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct tgr192_ctx), - .cra_module = THIS_MODULE, - .cra_alignmask = 7, - .cra_list = LIST_HEAD_INIT(tgr192.cra_list), - .cra_u = {.digest = { - .dia_digestsize = TGR192_DIGEST_SIZE, - .dia_init = tgr192_init, - .dia_update = tgr192_update, - .dia_final = tgr192_final}} +static struct shash_alg tgr192 = { + .digestsize = TGR192_DIGEST_SIZE, + .init = tgr192_init, + .update = tgr192_update, + .final = tgr192_final, + .descsize = sizeof(struct tgr192_ctx), + .base = { + .cra_name = "tgr192", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = TGR192_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } }; -static struct crypto_alg tgr160 = { - .cra_name = "tgr160", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = TGR192_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct tgr192_ctx), - .cra_module = THIS_MODULE, - .cra_alignmask = 7, - .cra_list = LIST_HEAD_INIT(tgr160.cra_list), - .cra_u = {.digest = { - .dia_digestsize = TGR160_DIGEST_SIZE, - .dia_init = tgr192_init, - .dia_update = tgr192_update, - .dia_final = tgr160_final}} +static struct shash_alg tgr160 = { + .digestsize = TGR160_DIGEST_SIZE, + .init = tgr192_init, + .update = tgr192_update, + .final = tgr160_final, + .descsize = sizeof(struct tgr192_ctx), + .base = { + .cra_name = "tgr160", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = TGR192_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } }; -static struct crypto_alg tgr128 = { - .cra_name = "tgr128", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = TGR192_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct tgr192_ctx), - .cra_module = THIS_MODULE, - .cra_alignmask = 7, - .cra_list = LIST_HEAD_INIT(tgr128.cra_list), - .cra_u = {.digest = { - .dia_digestsize = TGR128_DIGEST_SIZE, - .dia_init = tgr192_init, - .dia_update = tgr192_update, - .dia_final = tgr128_final}} +static struct shash_alg tgr128 = { + .digestsize = TGR128_DIGEST_SIZE, + .init = tgr192_init, + .update = tgr192_update, + .final = tgr128_final, + .descsize = sizeof(struct tgr192_ctx), + .base = { + .cra_name = "tgr128", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = TGR192_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } }; static int __init tgr192_mod_init(void) { int ret = 0; - ret = crypto_register_alg(&tgr192); + ret = crypto_register_shash(&tgr192); if (ret < 0) { goto out; } - ret = crypto_register_alg(&tgr160); + ret = crypto_register_shash(&tgr160); if (ret < 0) { - crypto_unregister_alg(&tgr192); + crypto_unregister_shash(&tgr192); goto out; } - ret = crypto_register_alg(&tgr128); + ret = crypto_register_shash(&tgr128); if (ret < 0) { - crypto_unregister_alg(&tgr192); - crypto_unregister_alg(&tgr160); + crypto_unregister_shash(&tgr192); + crypto_unregister_shash(&tgr160); } out: return ret; @@ -690,9 +697,9 @@ static int __init tgr192_mod_init(void) static void __exit tgr192_mod_fini(void) { - crypto_unregister_alg(&tgr192); - crypto_unregister_alg(&tgr160); - crypto_unregister_alg(&tgr128); + crypto_unregister_shash(&tgr192); + crypto_unregister_shash(&tgr160); + crypto_unregister_shash(&tgr128); } MODULE_ALIAS("tgr160"); -- cgit v1.2.3 From 4946510baac6aaa8658528e3deefc7e9ba2951a9 Mon Sep 17 00:00:00 2001 From: Adrian-Ken Rueegsegger Date: Sun, 7 Dec 2008 19:34:37 +0800 Subject: crypto: wp512 - Switch to shash This patch changes wp512, wp384 and wp256 to the new shash interface. Signed-off-by: Adrian-Ken Rueegsegger Signed-off-by: Herbert Xu --- crypto/Kconfig | 2 +- crypto/wp512.c | 121 +++++++++++++++++++++++++++++++-------------------------- 2 files changed, 66 insertions(+), 57 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index a863d7e5f9e..bc29216aaad 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -394,7 +394,7 @@ config CRYPTO_TGR192 config CRYPTO_WP512 tristate "Whirlpool digest algorithms" - select CRYPTO_ALGAPI + select CRYPTO_HASH help Whirlpool hash algorithm 512, 384 and 256-bit hashes diff --git a/crypto/wp512.c b/crypto/wp512.c index bff28560d66..72342727368 100644 --- a/crypto/wp512.c +++ b/crypto/wp512.c @@ -19,11 +19,11 @@ * (at your option) any later version. * */ +#include #include #include #include #include -#include #include #define WP512_DIGEST_SIZE 64 @@ -980,8 +980,8 @@ static void wp512_process_buffer(struct wp512_ctx *wctx) { } -static void wp512_init(struct crypto_tfm *tfm) { - struct wp512_ctx *wctx = crypto_tfm_ctx(tfm); +static int wp512_init(struct shash_desc *desc) { + struct wp512_ctx *wctx = shash_desc_ctx(desc); int i; memset(wctx->bitLength, 0, 32); @@ -990,12 +990,14 @@ static void wp512_init(struct crypto_tfm *tfm) { for (i = 0; i < 8; i++) { wctx->hash[i] = 0L; } + + return 0; } -static void wp512_update(struct crypto_tfm *tfm, const u8 *source, +static int wp512_update(struct shash_desc *desc, const u8 *source, unsigned int len) { - struct wp512_ctx *wctx = crypto_tfm_ctx(tfm); + struct wp512_ctx *wctx = shash_desc_ctx(desc); int sourcePos = 0; unsigned int bits_len = len * 8; // convert to number of bits int sourceGap = (8 - ((int)bits_len & 7)) & 7; @@ -1051,11 +1053,12 @@ static void wp512_update(struct crypto_tfm *tfm, const u8 *source, wctx->bufferBits = bufferBits; wctx->bufferPos = bufferPos; + return 0; } -static void wp512_final(struct crypto_tfm *tfm, u8 *out) +static int wp512_final(struct shash_desc *desc, u8 *out) { - struct wp512_ctx *wctx = crypto_tfm_ctx(tfm); + struct wp512_ctx *wctx = shash_desc_ctx(desc); int i; u8 *buffer = wctx->buffer; u8 *bitLength = wctx->bitLength; @@ -1084,89 +1087,95 @@ static void wp512_final(struct crypto_tfm *tfm, u8 *out) digest[i] = cpu_to_be64(wctx->hash[i]); wctx->bufferBits = bufferBits; wctx->bufferPos = bufferPos; + + return 0; } -static void wp384_final(struct crypto_tfm *tfm, u8 *out) +static int wp384_final(struct shash_desc *desc, u8 *out) { u8 D[64]; - wp512_final(tfm, D); + wp512_final(desc, D); memcpy (out, D, WP384_DIGEST_SIZE); memset (D, 0, WP512_DIGEST_SIZE); + + return 0; } -static void wp256_final(struct crypto_tfm *tfm, u8 *out) +static int wp256_final(struct shash_desc *desc, u8 *out) { u8 D[64]; - wp512_final(tfm, D); + wp512_final(desc, D); memcpy (out, D, WP256_DIGEST_SIZE); memset (D, 0, WP512_DIGEST_SIZE); + + return 0; } -static struct crypto_alg wp512 = { - .cra_name = "wp512", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = WP512_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct wp512_ctx), - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(wp512.cra_list), - .cra_u = { .digest = { - .dia_digestsize = WP512_DIGEST_SIZE, - .dia_init = wp512_init, - .dia_update = wp512_update, - .dia_final = wp512_final } } +static struct shash_alg wp512 = { + .digestsize = WP512_DIGEST_SIZE, + .init = wp512_init, + .update = wp512_update, + .final = wp512_final, + .descsize = sizeof(struct wp512_ctx), + .base = { + .cra_name = "wp512", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = WP512_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } }; -static struct crypto_alg wp384 = { - .cra_name = "wp384", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = WP512_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct wp512_ctx), - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(wp384.cra_list), - .cra_u = { .digest = { - .dia_digestsize = WP384_DIGEST_SIZE, - .dia_init = wp512_init, - .dia_update = wp512_update, - .dia_final = wp384_final } } +static struct shash_alg wp384 = { + .digestsize = WP384_DIGEST_SIZE, + .init = wp512_init, + .update = wp512_update, + .final = wp384_final, + .descsize = sizeof(struct wp512_ctx), + .base = { + .cra_name = "wp384", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = WP512_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } }; -static struct crypto_alg wp256 = { - .cra_name = "wp256", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = WP512_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct wp512_ctx), - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(wp256.cra_list), - .cra_u = { .digest = { - .dia_digestsize = WP256_DIGEST_SIZE, - .dia_init = wp512_init, - .dia_update = wp512_update, - .dia_final = wp256_final } } +static struct shash_alg wp256 = { + .digestsize = WP256_DIGEST_SIZE, + .init = wp512_init, + .update = wp512_update, + .final = wp256_final, + .descsize = sizeof(struct wp512_ctx), + .base = { + .cra_name = "wp256", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = WP512_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } }; static int __init wp512_mod_init(void) { int ret = 0; - ret = crypto_register_alg(&wp512); + ret = crypto_register_shash(&wp512); if (ret < 0) goto out; - ret = crypto_register_alg(&wp384); + ret = crypto_register_shash(&wp384); if (ret < 0) { - crypto_unregister_alg(&wp512); + crypto_unregister_shash(&wp512); goto out; } - ret = crypto_register_alg(&wp256); + ret = crypto_register_shash(&wp256); if (ret < 0) { - crypto_unregister_alg(&wp512); - crypto_unregister_alg(&wp384); + crypto_unregister_shash(&wp512); + crypto_unregister_shash(&wp384); } out: return ret; @@ -1174,9 +1183,9 @@ out: static void __exit wp512_mod_fini(void) { - crypto_unregister_alg(&wp512); - crypto_unregister_alg(&wp384); - crypto_unregister_alg(&wp256); + crypto_unregister_shash(&wp512); + crypto_unregister_shash(&wp384); + crypto_unregister_shash(&wp256); } MODULE_ALIAS("wp384"); -- cgit v1.2.3 From 19e2bf146759aea38fd6c2daea08cb7a6367149b Mon Sep 17 00:00:00 2001 From: Adrian-Ken Rueegsegger Date: Sun, 7 Dec 2008 19:35:38 +0800 Subject: crypto: michael_mic - Switch to shash This patch changes michael_mic to the new shash interface. Signed-off-by: Adrian-Ken Rueegsegger Signed-off-by: Herbert Xu --- crypto/Kconfig | 2 +- crypto/michael_mic.c | 72 ++++++++++++++++++++++++++++++---------------------- 2 files changed, 42 insertions(+), 32 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index bc29216aaad..3f88a526d2d 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -288,7 +288,7 @@ config CRYPTO_MD5 config CRYPTO_MICHAEL_MIC tristate "Michael MIC keyed digest algorithm" - select CRYPTO_ALGAPI + select CRYPTO_HASH help Michael MIC is used for message integrity protection in TKIP (IEEE 802.11i). This algorithm is required for TKIP, but it diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c index 9e917b8011b..079b761bc70 100644 --- a/crypto/michael_mic.c +++ b/crypto/michael_mic.c @@ -9,23 +9,25 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ - +#include #include #include #include #include -#include #include struct michael_mic_ctx { + u32 l, r; +}; + +struct michael_mic_desc_ctx { u8 pending[4]; size_t pending_len; u32 l, r; }; - static inline u32 xswap(u32 val) { return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8); @@ -45,17 +47,22 @@ do { \ } while (0) -static void michael_init(struct crypto_tfm *tfm) +static int michael_init(struct shash_desc *desc) { - struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm); + struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc); + struct michael_mic_ctx *ctx = crypto_shash_ctx(desc->tfm); mctx->pending_len = 0; + mctx->l = ctx->l; + mctx->r = ctx->r; + + return 0; } -static void michael_update(struct crypto_tfm *tfm, const u8 *data, +static int michael_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm); + struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc); const __le32 *src; if (mctx->pending_len) { @@ -68,7 +75,7 @@ static void michael_update(struct crypto_tfm *tfm, const u8 *data, len -= flen; if (mctx->pending_len < 4) - return; + return 0; src = (const __le32 *)mctx->pending; mctx->l ^= le32_to_cpup(src); @@ -88,12 +95,14 @@ static void michael_update(struct crypto_tfm *tfm, const u8 *data, mctx->pending_len = len; memcpy(mctx->pending, src, len); } + + return 0; } -static void michael_final(struct crypto_tfm *tfm, u8 *out) +static int michael_final(struct shash_desc *desc, u8 *out) { - struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm); + struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc); u8 *data = mctx->pending; __le32 *dst = (__le32 *)out; @@ -119,17 +128,20 @@ static void michael_final(struct crypto_tfm *tfm, u8 *out) dst[0] = cpu_to_le32(mctx->l); dst[1] = cpu_to_le32(mctx->r); + + return 0; } -static int michael_setkey(struct crypto_tfm *tfm, const u8 *key, +static int michael_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { - struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm); + struct michael_mic_ctx *mctx = crypto_shash_ctx(tfm); + const __le32 *data = (const __le32 *)key; if (keylen != 8) { - tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } @@ -138,33 +150,31 @@ static int michael_setkey(struct crypto_tfm *tfm, const u8 *key, return 0; } - -static struct crypto_alg michael_mic_alg = { - .cra_name = "michael_mic", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = 8, - .cra_ctxsize = sizeof(struct michael_mic_ctx), - .cra_module = THIS_MODULE, - .cra_alignmask = 3, - .cra_list = LIST_HEAD_INIT(michael_mic_alg.cra_list), - .cra_u = { .digest = { - .dia_digestsize = 8, - .dia_init = michael_init, - .dia_update = michael_update, - .dia_final = michael_final, - .dia_setkey = michael_setkey } } +static struct shash_alg alg = { + .digestsize = 8, + .setkey = michael_setkey, + .init = michael_init, + .update = michael_update, + .final = michael_final, + .descsize = sizeof(struct michael_mic_desc_ctx), + .base = { + .cra_name = "michael_mic", + .cra_blocksize = 8, + .cra_alignmask = 3, + .cra_ctxsize = sizeof(struct michael_mic_ctx), + .cra_module = THIS_MODULE, + } }; - static int __init michael_mic_init(void) { - return crypto_register_alg(&michael_mic_alg); + return crypto_register_shash(&alg); } static void __exit michael_mic_exit(void) { - crypto_unregister_alg(&michael_mic_alg); + crypto_unregister_shash(&alg); } -- cgit v1.2.3 From f9e2bca6c22d75a289a349f869701214d63b5060 Mon Sep 17 00:00:00 2001 From: Adrian-Ken Rueegsegger Date: Wed, 17 Dec 2008 16:47:52 +1100 Subject: crypto: sha512 - Move message schedule W[80] to static percpu area The message schedule W (u64[80]) is too big for the stack. In order for this algorithm to be used with shash it is moved to a static percpu area. Signed-off-by: Adrian-Ken Rueegsegger Signed-off-by: Herbert Xu --- crypto/sha512_generic.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'crypto') diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c index bc3686138ae..cb85516d3a7 100644 --- a/crypto/sha512_generic.c +++ b/crypto/sha512_generic.c @@ -18,16 +18,17 @@ #include #include #include - +#include #include struct sha512_ctx { u64 state[8]; u32 count[4]; u8 buf[128]; - u64 W[80]; }; +static DEFINE_PER_CPU(u64[80], msg_schedule); + static inline u64 Ch(u64 x, u64 y, u64 z) { return z ^ (x & (y ^ z)); @@ -89,11 +90,12 @@ static inline void BLEND_OP(int I, u64 *W) } static void -sha512_transform(u64 *state, u64 *W, const u8 *input) +sha512_transform(u64 *state, const u8 *input) { u64 a, b, c, d, e, f, g, h, t1, t2; int i; + u64 *W = get_cpu_var(msg_schedule); /* load the input */ for (i = 0; i < 16; i++) @@ -132,6 +134,8 @@ sha512_transform(u64 *state, u64 *W, const u8 *input) /* erase our data */ a = b = c = d = e = f = g = h = t1 = t2 = 0; + memset(W, 0, sizeof(__get_cpu_var(msg_schedule))); + put_cpu_var(msg_schedule); } static void @@ -187,10 +191,10 @@ sha512_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) /* Transform as many times as possible. */ if (len >= part_len) { memcpy(&sctx->buf[index], data, part_len); - sha512_transform(sctx->state, sctx->W, sctx->buf); + sha512_transform(sctx->state, sctx->buf); for (i = part_len; i + 127 < len; i+=128) - sha512_transform(sctx->state, sctx->W, &data[i]); + sha512_transform(sctx->state, &data[i]); index = 0; } else { @@ -199,9 +203,6 @@ sha512_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) /* Buffer remaining input */ memcpy(&sctx->buf[index], &data[i], len - i); - - /* erase our data */ - memset(sctx->W, 0, sizeof(sctx->W)); } static void -- cgit v1.2.3 From bd9d20dba182ce4541b16b083eccd30fb252b9f4 Mon Sep 17 00:00:00 2001 From: Adrian-Ken Rueegsegger Date: Wed, 17 Dec 2008 16:49:02 +1100 Subject: crypto: sha512 - Switch to shash This patch changes sha512 and sha384 to the new shash interface. Signed-off-by: Adrian-Ken Rueegsegger Signed-off-by: Herbert Xu --- crypto/Kconfig | 2 +- crypto/sha512_generic.c | 112 +++++++++++++++++++++++++----------------------- 2 files changed, 60 insertions(+), 54 deletions(-) (limited to 'crypto') diff --git a/crypto/Kconfig b/crypto/Kconfig index 3f88a526d2d..8dde4fcf99c 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -369,7 +369,7 @@ config CRYPTO_SHA256 config CRYPTO_SHA512 tristate "SHA384 and SHA512 digest algorithms" - select CRYPTO_ALGAPI + select CRYPTO_HASH help SHA512 secure hash standard (DFIPS 180-2). diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c index cb85516d3a7..3bea38d1224 100644 --- a/crypto/sha512_generic.c +++ b/crypto/sha512_generic.c @@ -10,7 +10,7 @@ * later version. * */ - +#include #include #include #include @@ -138,10 +138,10 @@ sha512_transform(u64 *state, const u8 *input) put_cpu_var(msg_schedule); } -static void -sha512_init(struct crypto_tfm *tfm) +static int +sha512_init(struct shash_desc *desc) { - struct sha512_ctx *sctx = crypto_tfm_ctx(tfm); + struct sha512_ctx *sctx = shash_desc_ctx(desc); sctx->state[0] = SHA512_H0; sctx->state[1] = SHA512_H1; sctx->state[2] = SHA512_H2; @@ -151,12 +151,14 @@ sha512_init(struct crypto_tfm *tfm) sctx->state[6] = SHA512_H6; sctx->state[7] = SHA512_H7; sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0; + + return 0; } -static void -sha384_init(struct crypto_tfm *tfm) +static int +sha384_init(struct shash_desc *desc) { - struct sha512_ctx *sctx = crypto_tfm_ctx(tfm); + struct sha512_ctx *sctx = shash_desc_ctx(desc); sctx->state[0] = SHA384_H0; sctx->state[1] = SHA384_H1; sctx->state[2] = SHA384_H2; @@ -166,12 +168,14 @@ sha384_init(struct crypto_tfm *tfm) sctx->state[6] = SHA384_H6; sctx->state[7] = SHA384_H7; sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0; + + return 0; } -static void -sha512_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) +static int +sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct sha512_ctx *sctx = crypto_tfm_ctx(tfm); + struct sha512_ctx *sctx = shash_desc_ctx(desc); unsigned int i, index, part_len; @@ -203,12 +207,14 @@ sha512_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) /* Buffer remaining input */ memcpy(&sctx->buf[index], &data[i], len - i); + + return 0; } -static void -sha512_final(struct crypto_tfm *tfm, u8 *hash) +static int +sha512_final(struct shash_desc *desc, u8 *hash) { - struct sha512_ctx *sctx = crypto_tfm_ctx(tfm); + struct sha512_ctx *sctx = shash_desc_ctx(desc); static u8 padding[128] = { 0x80, }; __be64 *dst = (__be64 *)hash; __be32 bits[4]; @@ -224,10 +230,10 @@ sha512_final(struct crypto_tfm *tfm, u8 *hash) /* Pad out to 112 mod 128. */ index = (sctx->count[0] >> 3) & 0x7f; pad_len = (index < 112) ? (112 - index) : ((128+112) - index); - sha512_update(tfm, padding, pad_len); + sha512_update(desc, padding, pad_len); /* Append length (before padding) */ - sha512_update(tfm, (const u8 *)bits, sizeof(bits)); + sha512_update(desc, (const u8 *)bits, sizeof(bits)); /* Store state in digest */ for (i = 0; i < 8; i++) @@ -235,66 +241,66 @@ sha512_final(struct crypto_tfm *tfm, u8 *hash) /* Zeroize sensitive information. */ memset(sctx, 0, sizeof(struct sha512_ctx)); + + return 0; } -static void sha384_final(struct crypto_tfm *tfm, u8 *hash) +static int sha384_final(struct shash_desc *desc, u8 *hash) { - u8 D[64]; + u8 D[64]; - sha512_final(tfm, D); + sha512_final(desc, D); - memcpy(hash, D, 48); - memset(D, 0, 64); + memcpy(hash, D, 48); + memset(D, 0, 64); + + return 0; } -static struct crypto_alg sha512 = { - .cra_name = "sha512", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct sha512_ctx), - .cra_module = THIS_MODULE, - .cra_alignmask = 3, - .cra_list = LIST_HEAD_INIT(sha512.cra_list), - .cra_u = { .digest = { - .dia_digestsize = SHA512_DIGEST_SIZE, - .dia_init = sha512_init, - .dia_update = sha512_update, - .dia_final = sha512_final } - } +static struct shash_alg sha512 = { + .digestsize = SHA512_DIGEST_SIZE, + .init = sha512_init, + .update = sha512_update, + .final = sha512_final, + .descsize = sizeof(struct sha512_ctx), + .base = { + .cra_name = "sha512", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } }; -static struct crypto_alg sha384 = { - .cra_name = "sha384", - .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = SHA384_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct sha512_ctx), - .cra_alignmask = 3, - .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(sha384.cra_list), - .cra_u = { .digest = { - .dia_digestsize = SHA384_DIGEST_SIZE, - .dia_init = sha384_init, - .dia_update = sha512_update, - .dia_final = sha384_final } - } +static struct shash_alg sha384 = { + .digestsize = SHA384_DIGEST_SIZE, + .init = sha384_init, + .update = sha512_update, + .final = sha384_final, + .descsize = sizeof(struct sha512_ctx), + .base = { + .cra_name = "sha384", + .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } }; static int __init sha512_generic_mod_init(void) { int ret = 0; - if ((ret = crypto_register_alg(&sha384)) < 0) + if ((ret = crypto_register_shash(&sha384)) < 0) goto out; - if ((ret = crypto_register_alg(&sha512)) < 0) - crypto_unregister_alg(&sha384); + if ((ret = crypto_register_shash(&sha512)) < 0) + crypto_unregister_shash(&sha384); out: return ret; } static void __exit sha512_generic_mod_fini(void) { - crypto_unregister_alg(&sha384); - crypto_unregister_alg(&sha512); + crypto_unregister_shash(&sha384); + crypto_unregister_shash(&sha512); } module_init(sha512_generic_mod_init); -- cgit v1.2.3 From ad79cdd77fc1466e45cf923890f66bcfe7c43f12 Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Wed, 17 Dec 2008 16:51:13 +1100 Subject: crypto: des3_ede - permit weak keys unless REQ_WEAK_KEY set While its a slightly insane to bypass the key1 == key2 || key2 == key3 check in triple-des, since it reduces it to the same strength as des, some folks do need to do this from time to time for backwards compatibility with des. My own case is FIPS CAVS test vectors. Many triple-des test vectors use a single key, replicated 3x. In order to get the expected results, des3_ede_setkey() needs to only reject weak keys if the CRYPTO_TFM_REQ_WEAK_KEY flag is set. Also sets a more appropriate RES flag when a weak key is found. Signed-off-by: Jarod Wilson Signed-off-by: Herbert Xu --- crypto/des_generic.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'crypto') diff --git a/crypto/des_generic.c b/crypto/des_generic.c index 5d0e4580f99..5bd3ee345a6 100644 --- a/crypto/des_generic.c +++ b/crypto/des_generic.c @@ -868,9 +868,10 @@ static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key, u32 *flags = &tfm->crt_flags; if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || - !((K[2] ^ K[4]) | (K[3] ^ K[5])))) + !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && + (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { - *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED; + *flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } -- cgit v1.2.3 From f0d1ec3a227e01a27ce20719bf7b58de86d44f0f Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Wed, 17 Dec 2008 16:53:49 +1100 Subject: crypto: salsa20 - Remove private wrappers around various operations ROTATE -> rol32 XOR was always used with the same destination, use ^= PLUS/PLUSONE use ++ or += Signed-off-by: Harvey Harrison Signed-off-by: Herbert Xu --- crypto/salsa20_generic.c | 75 +++++++++++++++++++++++------------------------- 1 file changed, 36 insertions(+), 39 deletions(-) (limited to 'crypto') diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c index b07d5598174..eac10c11685 100644 --- a/crypto/salsa20_generic.c +++ b/crypto/salsa20_generic.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -42,10 +43,6 @@ D. J. Bernstein Public domain. */ -#define ROTATE(v,n) (((v) << (n)) | ((v) >> (32 - (n)))) -#define XOR(v,w) ((v) ^ (w)) -#define PLUS(v,w) (((v) + (w))) -#define PLUSONE(v) (PLUS((v),1)) #define U32TO8_LITTLE(p, v) \ { (p)[0] = (v >> 0) & 0xff; (p)[1] = (v >> 8) & 0xff; \ (p)[2] = (v >> 16) & 0xff; (p)[3] = (v >> 24) & 0xff; } @@ -65,41 +62,41 @@ static void salsa20_wordtobyte(u8 output[64], const u32 input[16]) memcpy(x, input, sizeof(x)); for (i = 20; i > 0; i -= 2) { - x[ 4] = XOR(x[ 4],ROTATE(PLUS(x[ 0],x[12]), 7)); - x[ 8] = XOR(x[ 8],ROTATE(PLUS(x[ 4],x[ 0]), 9)); - x[12] = XOR(x[12],ROTATE(PLUS(x[ 8],x[ 4]),13)); - x[ 0] = XOR(x[ 0],ROTATE(PLUS(x[12],x[ 8]),18)); - x[ 9] = XOR(x[ 9],ROTATE(PLUS(x[ 5],x[ 1]), 7)); - x[13] = XOR(x[13],ROTATE(PLUS(x[ 9],x[ 5]), 9)); - x[ 1] = XOR(x[ 1],ROTATE(PLUS(x[13],x[ 9]),13)); - x[ 5] = XOR(x[ 5],ROTATE(PLUS(x[ 1],x[13]),18)); - x[14] = XOR(x[14],ROTATE(PLUS(x[10],x[ 6]), 7)); - x[ 2] = XOR(x[ 2],ROTATE(PLUS(x[14],x[10]), 9)); - x[ 6] = XOR(x[ 6],ROTATE(PLUS(x[ 2],x[14]),13)); - x[10] = XOR(x[10],ROTATE(PLUS(x[ 6],x[ 2]),18)); - x[ 3] = XOR(x[ 3],ROTATE(PLUS(x[15],x[11]), 7)); - x[ 7] = XOR(x[ 7],ROTATE(PLUS(x[ 3],x[15]), 9)); - x[11] = XOR(x[11],ROTATE(PLUS(x[ 7],x[ 3]),13)); - x[15] = XOR(x[15],ROTATE(PLUS(x[11],x[ 7]),18)); - x[ 1] = XOR(x[ 1],ROTATE(PLUS(x[ 0],x[ 3]), 7)); - x[ 2] = XOR(x[ 2],ROTATE(PLUS(x[ 1],x[ 0]), 9)); - x[ 3] = XOR(x[ 3],ROTATE(PLUS(x[ 2],x[ 1]),13)); - x[ 0] = XOR(x[ 0],ROTATE(PLUS(x[ 3],x[ 2]),18)); - x[ 6] = XOR(x[ 6],ROTATE(PLUS(x[ 5],x[ 4]), 7)); - x[ 7] = XOR(x[ 7],ROTATE(PLUS(x[ 6],x[ 5]), 9)); - x[ 4] = XOR(x[ 4],ROTATE(PLUS(x[ 7],x[ 6]),13)); - x[ 5] = XOR(x[ 5],ROTATE(PLUS(x[ 4],x[ 7]),18)); - x[11] = XOR(x[11],ROTATE(PLUS(x[10],x[ 9]), 7)); - x[ 8] = XOR(x[ 8],ROTATE(PLUS(x[11],x[10]), 9)); - x[ 9] = XOR(x[ 9],ROTATE(PLUS(x[ 8],x[11]),13)); - x[10] = XOR(x[10],ROTATE(PLUS(x[ 9],x[ 8]),18)); - x[12] = XOR(x[12],ROTATE(PLUS(x[15],x[14]), 7)); - x[13] = XOR(x[13],ROTATE(PLUS(x[12],x[15]), 9)); - x[14] = XOR(x[14],ROTATE(PLUS(x[13],x[12]),13)); - x[15] = XOR(x[15],ROTATE(PLUS(x[14],x[13]),18)); + x[ 4] ^= rol32((x[ 0] + x[12]), 7); + x[ 8] ^= rol32((x[ 4] + x[ 0]), 9); + x[12] ^= rol32((x[ 8] + x[ 4]), 13); + x[ 0] ^= rol32((x[12] + x[ 8]), 18); + x[ 9] ^= rol32((x[ 5] + x[ 1]), 7); + x[13] ^= rol32((x[ 9] + x[ 5]), 9); + x[ 1] ^= rol32((x[13] + x[ 9]), 13); + x[ 5] ^= rol32((x[ 1] + x[13]), 18); + x[14] ^= rol32((x[10] + x[ 6]), 7); + x[ 2] ^= rol32((x[14] + x[10]), 9); + x[ 6] ^= rol32((x[ 2] + x[14]), 13); + x[10] ^= rol32((x[ 6] + x[ 2]), 18); + x[ 3] ^= rol32((x[15] + x[11]), 7); + x[ 7] ^= rol32((x[ 3] + x[15]), 9); + x[11] ^= rol32((x[ 7] + x[ 3]), 13); + x[15] ^= rol32((x[11] + x[ 7]), 18); + x[ 1] ^= rol32((x[ 0] + x[ 3]), 7); + x[ 2] ^= rol32((x[ 1] + x[ 0]), 9); + x[ 3] ^= rol32((x[ 2] + x[ 1]), 13); + x[ 0] ^= rol32((x[ 3] + x[ 2]), 18); + x[ 6] ^= rol32((x[ 5] + x[ 4]), 7); + x[ 7] ^= rol32((x[ 6] + x[ 5]), 9); + x[ 4] ^= rol32((x[ 7] + x[ 6]), 13); + x[ 5] ^= rol32((x[ 4] + x[ 7]), 18); + x[11] ^= rol32((x[10] + x[ 9]), 7); + x[ 8] ^= rol32((x[11] + x[10]), 9); + x[ 9] ^= rol32((x[ 8] + x[11]), 13); + x[10] ^= rol32((x[ 9] + x[ 8]), 18); + x[12] ^= rol32((x[15] + x[14]), 7); + x[13] ^= rol32((x[12] + x[15]), 9); + x[14] ^= rol32((x[13] + x[12]), 13); + x[15] ^= rol32((x[14] + x[13]), 18); } for (i = 0; i < 16; ++i) - x[i] = PLUS(x[i],input[i]); + x[i] += input[i]; for (i = 0; i < 16; ++i) U32TO8_LITTLE(output + 4 * i,x[i]); } @@ -150,9 +147,9 @@ static void salsa20_encrypt_bytes(struct salsa20_ctx *ctx, u8 *dst, while (bytes) { salsa20_wordtobyte(buf, ctx->input); - ctx->input[8] = PLUSONE(ctx->input[8]); + ctx->input[8]++; if (!ctx->input[8]) - ctx->input[9] = PLUSONE(ctx->input[9]); + ctx->input[9]++; if (bytes <= 64) { crypto_xor(dst, buf, bytes); -- cgit v1.2.3 From bcf84a38f05c55180bc1225901950c7e715c0d55 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 18 Dec 2008 17:17:46 +1100 Subject: crypto: testmgr - Correct comment about deflate parameters The comment for the deflate test vectors says the winbits parameter is 11, while the deflate module actually uses -11 (a negative window bits parameter enables the raw deflate format instead of the zlib format). Correct this, to avoid confusion about the format used. Signed-off-by: Geert Uytterhoeven Signed-off-by: Herbert Xu --- crypto/testmgr.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'crypto') diff --git a/crypto/testmgr.h b/crypto/testmgr.h index dee94d9ecfb..132953e144d 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -8349,7 +8349,7 @@ struct comp_testvec { /* * Deflate test vectors (null-terminated strings). - * Params: winbits=11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL. + * Params: winbits=-11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL. */ #define DEFLATE_COMP_TEST_VECTORS 2 #define DEFLATE_DECOMP_TEST_VECTORS 2 -- cgit v1.2.3 From 0ee4a96902dd7858e65f378c86f428a0355bd841 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 25 Dec 2008 11:05:13 +1100 Subject: crypto: aes - Precompute tables The tables used by the various AES algorithms are currently computed at run-time. This has created an init ordering problem because some AES algorithms may be registered before the tables have been initialised. This patch gets around this whole thing by precomputing the tables. Signed-off-by: Herbert Xu --- crypto/aes_generic.c | 1145 ++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 1055 insertions(+), 90 deletions(-) (limited to 'crypto') diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c index 136dc98d8a0..b8b66ec3883 100644 --- a/crypto/aes_generic.c +++ b/crypto/aes_generic.c @@ -60,102 +60,1068 @@ static inline u8 byte(const u32 x, const unsigned n) return x >> (n << 3); } -static u8 pow_tab[256] __initdata; -static u8 log_tab[256] __initdata; -static u8 sbx_tab[256] __initdata; -static u8 isb_tab[256] __initdata; -static u32 rco_tab[10]; - -u32 crypto_ft_tab[4][256]; -u32 crypto_fl_tab[4][256]; -u32 crypto_it_tab[4][256]; -u32 crypto_il_tab[4][256]; - -EXPORT_SYMBOL_GPL(crypto_ft_tab); -EXPORT_SYMBOL_GPL(crypto_fl_tab); -EXPORT_SYMBOL_GPL(crypto_it_tab); -EXPORT_SYMBOL_GPL(crypto_il_tab); - -static inline u8 __init f_mult(u8 a, u8 b) -{ - u8 aa = log_tab[a], cc = aa + log_tab[b]; - - return pow_tab[cc + (cc < aa ? 1 : 0)]; -} - -#define ff_mult(a, b) (a && b ? f_mult(a, b) : 0) - -static void __init gen_tabs(void) -{ - u32 i, t; - u8 p, q; - - /* - * log and power tables for GF(2**8) finite field with - * 0x011b as modular polynomial - the simplest primitive - * root is 0x03, used here to generate the tables - */ - - for (i = 0, p = 1; i < 256; ++i) { - pow_tab[i] = (u8) p; - log_tab[p] = (u8) i; - - p ^= (p << 1) ^ (p & 0x80 ? 0x01b : 0); +static const u32 rco_tab[10] = { 1, 2, 4, 8, 16, 32, 64, 128, 27, 54 }; + +const u32 crypto_ft_tab[4][256] = { + { + 0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6, + 0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591, + 0x50303060, 0x03010102, 0xa96767ce, 0x7d2b2b56, + 0x19fefee7, 0x62d7d7b5, 0xe6abab4d, 0x9a7676ec, + 0x45caca8f, 0x9d82821f, 0x40c9c989, 0x877d7dfa, + 0x15fafaef, 0xeb5959b2, 0xc947478e, 0x0bf0f0fb, + 0xecadad41, 0x67d4d4b3, 0xfda2a25f, 0xeaafaf45, + 0xbf9c9c23, 0xf7a4a453, 0x967272e4, 0x5bc0c09b, + 0xc2b7b775, 0x1cfdfde1, 0xae93933d, 0x6a26264c, + 0x5a36366c, 0x413f3f7e, 0x02f7f7f5, 0x4fcccc83, + 0x5c343468, 0xf4a5a551, 0x34e5e5d1, 0x08f1f1f9, + 0x937171e2, 0x73d8d8ab, 0x53313162, 0x3f15152a, + 0x0c040408, 0x52c7c795, 0x65232346, 0x5ec3c39d, + 0x28181830, 0xa1969637, 0x0f05050a, 0xb59a9a2f, + 0x0907070e, 0x36121224, 0x9b80801b, 0x3de2e2df, + 0x26ebebcd, 0x6927274e, 0xcdb2b27f, 0x9f7575ea, + 0x1b090912, 0x9e83831d, 0x742c2c58, 0x2e1a1a34, + 0x2d1b1b36, 0xb26e6edc, 0xee5a5ab4, 0xfba0a05b, + 0xf65252a4, 0x4d3b3b76, 0x61d6d6b7, 0xceb3b37d, + 0x7b292952, 0x3ee3e3dd, 0x712f2f5e, 0x97848413, + 0xf55353a6, 0x68d1d1b9, 0x00000000, 0x2cededc1, + 0x60202040, 0x1ffcfce3, 0xc8b1b179, 0xed5b5bb6, + 0xbe6a6ad4, 0x46cbcb8d, 0xd9bebe67, 0x4b393972, + 0xde4a4a94, 0xd44c4c98, 0xe85858b0, 0x4acfcf85, + 0x6bd0d0bb, 0x2aefefc5, 0xe5aaaa4f, 0x16fbfbed, + 0xc5434386, 0xd74d4d9a, 0x55333366, 0x94858511, + 0xcf45458a, 0x10f9f9e9, 0x06020204, 0x817f7ffe, + 0xf05050a0, 0x443c3c78, 0xba9f9f25, 0xe3a8a84b, + 0xf35151a2, 0xfea3a35d, 0xc0404080, 0x8a8f8f05, + 0xad92923f, 0xbc9d9d21, 0x48383870, 0x04f5f5f1, + 0xdfbcbc63, 0xc1b6b677, 0x75dadaaf, 0x63212142, + 0x30101020, 0x1affffe5, 0x0ef3f3fd, 0x6dd2d2bf, + 0x4ccdcd81, 0x140c0c18, 0x35131326, 0x2fececc3, + 0xe15f5fbe, 0xa2979735, 0xcc444488, 0x3917172e, + 0x57c4c493, 0xf2a7a755, 0x827e7efc, 0x473d3d7a, + 0xac6464c8, 0xe75d5dba, 0x2b191932, 0x957373e6, + 0xa06060c0, 0x98818119, 0xd14f4f9e, 0x7fdcdca3, + 0x66222244, 0x7e2a2a54, 0xab90903b, 0x8388880b, + 0xca46468c, 0x29eeeec7, 0xd3b8b86b, 0x3c141428, + 0x79dedea7, 0xe25e5ebc, 0x1d0b0b16, 0x76dbdbad, + 0x3be0e0db, 0x56323264, 0x4e3a3a74, 0x1e0a0a14, + 0xdb494992, 0x0a06060c, 0x6c242448, 0xe45c5cb8, + 0x5dc2c29f, 0x6ed3d3bd, 0xefacac43, 0xa66262c4, + 0xa8919139, 0xa4959531, 0x37e4e4d3, 0x8b7979f2, + 0x32e7e7d5, 0x43c8c88b, 0x5937376e, 0xb76d6dda, + 0x8c8d8d01, 0x64d5d5b1, 0xd24e4e9c, 0xe0a9a949, + 0xb46c6cd8, 0xfa5656ac, 0x07f4f4f3, 0x25eaeacf, + 0xaf6565ca, 0x8e7a7af4, 0xe9aeae47, 0x18080810, + 0xd5baba6f, 0x887878f0, 0x6f25254a, 0x722e2e5c, + 0x241c1c38, 0xf1a6a657, 0xc7b4b473, 0x51c6c697, + 0x23e8e8cb, 0x7cdddda1, 0x9c7474e8, 0x211f1f3e, + 0xdd4b4b96, 0xdcbdbd61, 0x868b8b0d, 0x858a8a0f, + 0x907070e0, 0x423e3e7c, 0xc4b5b571, 0xaa6666cc, + 0xd8484890, 0x05030306, 0x01f6f6f7, 0x120e0e1c, + 0xa36161c2, 0x5f35356a, 0xf95757ae, 0xd0b9b969, + 0x91868617, 0x58c1c199, 0x271d1d3a, 0xb99e9e27, + 0x38e1e1d9, 0x13f8f8eb, 0xb398982b, 0x33111122, + 0xbb6969d2, 0x70d9d9a9, 0x898e8e07, 0xa7949433, + 0xb69b9b2d, 0x221e1e3c, 0x92878715, 0x20e9e9c9, + 0x49cece87, 0xff5555aa, 0x78282850, 0x7adfdfa5, + 0x8f8c8c03, 0xf8a1a159, 0x80898909, 0x170d0d1a, + 0xdabfbf65, 0x31e6e6d7, 0xc6424284, 0xb86868d0, + 0xc3414182, 0xb0999929, 0x772d2d5a, 0x110f0f1e, + 0xcbb0b07b, 0xfc5454a8, 0xd6bbbb6d, 0x3a16162c, + }, { + 0x6363c6a5, 0x7c7cf884, 0x7777ee99, 0x7b7bf68d, + 0xf2f2ff0d, 0x6b6bd6bd, 0x6f6fdeb1, 0xc5c59154, + 0x30306050, 0x01010203, 0x6767cea9, 0x2b2b567d, + 0xfefee719, 0xd7d7b562, 0xabab4de6, 0x7676ec9a, + 0xcaca8f45, 0x82821f9d, 0xc9c98940, 0x7d7dfa87, + 0xfafaef15, 0x5959b2eb, 0x47478ec9, 0xf0f0fb0b, + 0xadad41ec, 0xd4d4b367, 0xa2a25ffd, 0xafaf45ea, + 0x9c9c23bf, 0xa4a453f7, 0x7272e496, 0xc0c09b5b, + 0xb7b775c2, 0xfdfde11c, 0x93933dae, 0x26264c6a, + 0x36366c5a, 0x3f3f7e41, 0xf7f7f502, 0xcccc834f, + 0x3434685c, 0xa5a551f4, 0xe5e5d134, 0xf1f1f908, + 0x7171e293, 0xd8d8ab73, 0x31316253, 0x15152a3f, + 0x0404080c, 0xc7c79552, 0x23234665, 0xc3c39d5e, + 0x18183028, 0x969637a1, 0x05050a0f, 0x9a9a2fb5, + 0x07070e09, 0x12122436, 0x80801b9b, 0xe2e2df3d, + 0xebebcd26, 0x27274e69, 0xb2b27fcd, 0x7575ea9f, + 0x0909121b, 0x83831d9e, 0x2c2c5874, 0x1a1a342e, + 0x1b1b362d, 0x6e6edcb2, 0x5a5ab4ee, 0xa0a05bfb, + 0x5252a4f6, 0x3b3b764d, 0xd6d6b761, 0xb3b37dce, + 0x2929527b, 0xe3e3dd3e, 0x2f2f5e71, 0x84841397, + 0x5353a6f5, 0xd1d1b968, 0x00000000, 0xededc12c, + 0x20204060, 0xfcfce31f, 0xb1b179c8, 0x5b5bb6ed, + 0x6a6ad4be, 0xcbcb8d46, 0xbebe67d9, 0x3939724b, + 0x4a4a94de, 0x4c4c98d4, 0x5858b0e8, 0xcfcf854a, + 0xd0d0bb6b, 0xefefc52a, 0xaaaa4fe5, 0xfbfbed16, + 0x434386c5, 0x4d4d9ad7, 0x33336655, 0x85851194, + 0x45458acf, 0xf9f9e910, 0x02020406, 0x7f7ffe81, + 0x5050a0f0, 0x3c3c7844, 0x9f9f25ba, 0xa8a84be3, + 0x5151a2f3, 0xa3a35dfe, 0x404080c0, 0x8f8f058a, + 0x92923fad, 0x9d9d21bc, 0x38387048, 0xf5f5f104, + 0xbcbc63df, 0xb6b677c1, 0xdadaaf75, 0x21214263, + 0x10102030, 0xffffe51a, 0xf3f3fd0e, 0xd2d2bf6d, + 0xcdcd814c, 0x0c0c1814, 0x13132635, 0xececc32f, + 0x5f5fbee1, 0x979735a2, 0x444488cc, 0x17172e39, + 0xc4c49357, 0xa7a755f2, 0x7e7efc82, 0x3d3d7a47, + 0x6464c8ac, 0x5d5dbae7, 0x1919322b, 0x7373e695, + 0x6060c0a0, 0x81811998, 0x4f4f9ed1, 0xdcdca37f, + 0x22224466, 0x2a2a547e, 0x90903bab, 0x88880b83, + 0x46468cca, 0xeeeec729, 0xb8b86bd3, 0x1414283c, + 0xdedea779, 0x5e5ebce2, 0x0b0b161d, 0xdbdbad76, + 0xe0e0db3b, 0x32326456, 0x3a3a744e, 0x0a0a141e, + 0x494992db, 0x06060c0a, 0x2424486c, 0x5c5cb8e4, + 0xc2c29f5d, 0xd3d3bd6e, 0xacac43ef, 0x6262c4a6, + 0x919139a8, 0x959531a4, 0xe4e4d337, 0x7979f28b, + 0xe7e7d532, 0xc8c88b43, 0x37376e59, 0x6d6ddab7, + 0x8d8d018c, 0xd5d5b164, 0x4e4e9cd2, 0xa9a949e0, + 0x6c6cd8b4, 0x5656acfa, 0xf4f4f307, 0xeaeacf25, + 0x6565caaf, 0x7a7af48e, 0xaeae47e9, 0x08081018, + 0xbaba6fd5, 0x7878f088, 0x25254a6f, 0x2e2e5c72, + 0x1c1c3824, 0xa6a657f1, 0xb4b473c7, 0xc6c69751, + 0xe8e8cb23, 0xdddda17c, 0x7474e89c, 0x1f1f3e21, + 0x4b4b96dd, 0xbdbd61dc, 0x8b8b0d86, 0x8a8a0f85, + 0x7070e090, 0x3e3e7c42, 0xb5b571c4, 0x6666ccaa, + 0x484890d8, 0x03030605, 0xf6f6f701, 0x0e0e1c12, + 0x6161c2a3, 0x35356a5f, 0x5757aef9, 0xb9b969d0, + 0x86861791, 0xc1c19958, 0x1d1d3a27, 0x9e9e27b9, + 0xe1e1d938, 0xf8f8eb13, 0x98982bb3, 0x11112233, + 0x6969d2bb, 0xd9d9a970, 0x8e8e0789, 0x949433a7, + 0x9b9b2db6, 0x1e1e3c22, 0x87871592, 0xe9e9c920, + 0xcece8749, 0x5555aaff, 0x28285078, 0xdfdfa57a, + 0x8c8c038f, 0xa1a159f8, 0x89890980, 0x0d0d1a17, + 0xbfbf65da, 0xe6e6d731, 0x424284c6, 0x6868d0b8, + 0x414182c3, 0x999929b0, 0x2d2d5a77, 0x0f0f1e11, + 0xb0b07bcb, 0x5454a8fc, 0xbbbb6dd6, 0x16162c3a, + }, { + 0x63c6a563, 0x7cf8847c, 0x77ee9977, 0x7bf68d7b, + 0xf2ff0df2, 0x6bd6bd6b, 0x6fdeb16f, 0xc59154c5, + 0x30605030, 0x01020301, 0x67cea967, 0x2b567d2b, + 0xfee719fe, 0xd7b562d7, 0xab4de6ab, 0x76ec9a76, + 0xca8f45ca, 0x821f9d82, 0xc98940c9, 0x7dfa877d, + 0xfaef15fa, 0x59b2eb59, 0x478ec947, 0xf0fb0bf0, + 0xad41ecad, 0xd4b367d4, 0xa25ffda2, 0xaf45eaaf, + 0x9c23bf9c, 0xa453f7a4, 0x72e49672, 0xc09b5bc0, + 0xb775c2b7, 0xfde11cfd, 0x933dae93, 0x264c6a26, + 0x366c5a36, 0x3f7e413f, 0xf7f502f7, 0xcc834fcc, + 0x34685c34, 0xa551f4a5, 0xe5d134e5, 0xf1f908f1, + 0x71e29371, 0xd8ab73d8, 0x31625331, 0x152a3f15, + 0x04080c04, 0xc79552c7, 0x23466523, 0xc39d5ec3, + 0x18302818, 0x9637a196, 0x050a0f05, 0x9a2fb59a, + 0x070e0907, 0x12243612, 0x801b9b80, 0xe2df3de2, + 0xebcd26eb, 0x274e6927, 0xb27fcdb2, 0x75ea9f75, + 0x09121b09, 0x831d9e83, 0x2c58742c, 0x1a342e1a, + 0x1b362d1b, 0x6edcb26e, 0x5ab4ee5a, 0xa05bfba0, + 0x52a4f652, 0x3b764d3b, 0xd6b761d6, 0xb37dceb3, + 0x29527b29, 0xe3dd3ee3, 0x2f5e712f, 0x84139784, + 0x53a6f553, 0xd1b968d1, 0x00000000, 0xedc12ced, + 0x20406020, 0xfce31ffc, 0xb179c8b1, 0x5bb6ed5b, + 0x6ad4be6a, 0xcb8d46cb, 0xbe67d9be, 0x39724b39, + 0x4a94de4a, 0x4c98d44c, 0x58b0e858, 0xcf854acf, + 0xd0bb6bd0, 0xefc52aef, 0xaa4fe5aa, 0xfbed16fb, + 0x4386c543, 0x4d9ad74d, 0x33665533, 0x85119485, + 0x458acf45, 0xf9e910f9, 0x02040602, 0x7ffe817f, + 0x50a0f050, 0x3c78443c, 0x9f25ba9f, 0xa84be3a8, + 0x51a2f351, 0xa35dfea3, 0x4080c040, 0x8f058a8f, + 0x923fad92, 0x9d21bc9d, 0x38704838, 0xf5f104f5, + 0xbc63dfbc, 0xb677c1b6, 0xdaaf75da, 0x21426321, + 0x10203010, 0xffe51aff, 0xf3fd0ef3, 0xd2bf6dd2, + 0xcd814ccd, 0x0c18140c, 0x13263513, 0xecc32fec, + 0x5fbee15f, 0x9735a297, 0x4488cc44, 0x172e3917, + 0xc49357c4, 0xa755f2a7, 0x7efc827e, 0x3d7a473d, + 0x64c8ac64, 0x5dbae75d, 0x19322b19, 0x73e69573, + 0x60c0a060, 0x81199881, 0x4f9ed14f, 0xdca37fdc, + 0x22446622, 0x2a547e2a, 0x903bab90, 0x880b8388, + 0x468cca46, 0xeec729ee, 0xb86bd3b8, 0x14283c14, + 0xdea779de, 0x5ebce25e, 0x0b161d0b, 0xdbad76db, + 0xe0db3be0, 0x32645632, 0x3a744e3a, 0x0a141e0a, + 0x4992db49, 0x060c0a06, 0x24486c24, 0x5cb8e45c, + 0xc29f5dc2, 0xd3bd6ed3, 0xac43efac, 0x62c4a662, + 0x9139a891, 0x9531a495, 0xe4d337e4, 0x79f28b79, + 0xe7d532e7, 0xc88b43c8, 0x376e5937, 0x6ddab76d, + 0x8d018c8d, 0xd5b164d5, 0x4e9cd24e, 0xa949e0a9, + 0x6cd8b46c, 0x56acfa56, 0xf4f307f4, 0xeacf25ea, + 0x65caaf65, 0x7af48e7a, 0xae47e9ae, 0x08101808, + 0xba6fd5ba, 0x78f08878, 0x254a6f25, 0x2e5c722e, + 0x1c38241c, 0xa657f1a6, 0xb473c7b4, 0xc69751c6, + 0xe8cb23e8, 0xdda17cdd, 0x74e89c74, 0x1f3e211f, + 0x4b96dd4b, 0xbd61dcbd, 0x8b0d868b, 0x8a0f858a, + 0x70e09070, 0x3e7c423e, 0xb571c4b5, 0x66ccaa66, + 0x4890d848, 0x03060503, 0xf6f701f6, 0x0e1c120e, + 0x61c2a361, 0x356a5f35, 0x57aef957, 0xb969d0b9, + 0x86179186, 0xc19958c1, 0x1d3a271d, 0x9e27b99e, + 0xe1d938e1, 0xf8eb13f8, 0x982bb398, 0x11223311, + 0x69d2bb69, 0xd9a970d9, 0x8e07898e, 0x9433a794, + 0x9b2db69b, 0x1e3c221e, 0x87159287, 0xe9c920e9, + 0xce8749ce, 0x55aaff55, 0x28507828, 0xdfa57adf, + 0x8c038f8c, 0xa159f8a1, 0x89098089, 0x0d1a170d, + 0xbf65dabf, 0xe6d731e6, 0x4284c642, 0x68d0b868, + 0x4182c341, 0x9929b099, 0x2d5a772d, 0x0f1e110f, + 0xb07bcbb0, 0x54a8fc54, 0xbb6dd6bb, 0x162c3a16, + }, { + 0xc6a56363, 0xf8847c7c, 0xee997777, 0xf68d7b7b, + 0xff0df2f2, 0xd6bd6b6b, 0xdeb16f6f, 0x9154c5c5, + 0x60503030, 0x02030101, 0xcea96767, 0x567d2b2b, + 0xe719fefe, 0xb562d7d7, 0x4de6abab, 0xec9a7676, + 0x8f45caca, 0x1f9d8282, 0x8940c9c9, 0xfa877d7d, + 0xef15fafa, 0xb2eb5959, 0x8ec94747, 0xfb0bf0f0, + 0x41ecadad, 0xb367d4d4, 0x5ffda2a2, 0x45eaafaf, + 0x23bf9c9c, 0x53f7a4a4, 0xe4967272, 0x9b5bc0c0, + 0x75c2b7b7, 0xe11cfdfd, 0x3dae9393, 0x4c6a2626, + 0x6c5a3636, 0x7e413f3f, 0xf502f7f7, 0x834fcccc, + 0x685c3434, 0x51f4a5a5, 0xd134e5e5, 0xf908f1f1, + 0xe2937171, 0xab73d8d8, 0x62533131, 0x2a3f1515, + 0x080c0404, 0x9552c7c7, 0x46652323, 0x9d5ec3c3, + 0x30281818, 0x37a19696, 0x0a0f0505, 0x2fb59a9a, + 0x0e090707, 0x24361212, 0x1b9b8080, 0xdf3de2e2, + 0xcd26ebeb, 0x4e692727, 0x7fcdb2b2, 0xea9f7575, + 0x121b0909, 0x1d9e8383, 0x58742c2c, 0x342e1a1a, + 0x362d1b1b, 0xdcb26e6e, 0xb4ee5a5a, 0x5bfba0a0, + 0xa4f65252, 0x764d3b3b, 0xb761d6d6, 0x7dceb3b3, + 0x527b2929, 0xdd3ee3e3, 0x5e712f2f, 0x13978484, + 0xa6f55353, 0xb968d1d1, 0x00000000, 0xc12ceded, + 0x40602020, 0xe31ffcfc, 0x79c8b1b1, 0xb6ed5b5b, + 0xd4be6a6a, 0x8d46cbcb, 0x67d9bebe, 0x724b3939, + 0x94de4a4a, 0x98d44c4c, 0xb0e85858, 0x854acfcf, + 0xbb6bd0d0, 0xc52aefef, 0x4fe5aaaa, 0xed16fbfb, + 0x86c54343, 0x9ad74d4d, 0x66553333, 0x11948585, + 0x8acf4545, 0xe910f9f9, 0x04060202, 0xfe817f7f, + 0xa0f05050, 0x78443c3c, 0x25ba9f9f, 0x4be3a8a8, + 0xa2f35151, 0x5dfea3a3, 0x80c04040, 0x058a8f8f, + 0x3fad9292, 0x21bc9d9d, 0x70483838, 0xf104f5f5, + 0x63dfbcbc, 0x77c1b6b6, 0xaf75dada, 0x42632121, + 0x20301010, 0xe51affff, 0xfd0ef3f3, 0xbf6dd2d2, + 0x814ccdcd, 0x18140c0c, 0x26351313, 0xc32fecec, + 0xbee15f5f, 0x35a29797, 0x88cc4444, 0x2e391717, + 0x9357c4c4, 0x55f2a7a7, 0xfc827e7e, 0x7a473d3d, + 0xc8ac6464, 0xbae75d5d, 0x322b1919, 0xe6957373, + 0xc0a06060, 0x19988181, 0x9ed14f4f, 0xa37fdcdc, + 0x44662222, 0x547e2a2a, 0x3bab9090, 0x0b838888, + 0x8cca4646, 0xc729eeee, 0x6bd3b8b8, 0x283c1414, + 0xa779dede, 0xbce25e5e, 0x161d0b0b, 0xad76dbdb, + 0xdb3be0e0, 0x64563232, 0x744e3a3a, 0x141e0a0a, + 0x92db4949, 0x0c0a0606, 0x486c2424, 0xb8e45c5c, + 0x9f5dc2c2, 0xbd6ed3d3, 0x43efacac, 0xc4a66262, + 0x39a89191, 0x31a49595, 0xd337e4e4, 0xf28b7979, + 0xd532e7e7, 0x8b43c8c8, 0x6e593737, 0xdab76d6d, + 0x018c8d8d, 0xb164d5d5, 0x9cd24e4e, 0x49e0a9a9, + 0xd8b46c6c, 0xacfa5656, 0xf307f4f4, 0xcf25eaea, + 0xcaaf6565, 0xf48e7a7a, 0x47e9aeae, 0x10180808, + 0x6fd5baba, 0xf0887878, 0x4a6f2525, 0x5c722e2e, + 0x38241c1c, 0x57f1a6a6, 0x73c7b4b4, 0x9751c6c6, + 0xcb23e8e8, 0xa17cdddd, 0xe89c7474, 0x3e211f1f, + 0x96dd4b4b, 0x61dcbdbd, 0x0d868b8b, 0x0f858a8a, + 0xe0907070, 0x7c423e3e, 0x71c4b5b5, 0xccaa6666, + 0x90d84848, 0x06050303, 0xf701f6f6, 0x1c120e0e, + 0xc2a36161, 0x6a5f3535, 0xaef95757, 0x69d0b9b9, + 0x17918686, 0x9958c1c1, 0x3a271d1d, 0x27b99e9e, + 0xd938e1e1, 0xeb13f8f8, 0x2bb39898, 0x22331111, + 0xd2bb6969, 0xa970d9d9, 0x07898e8e, 0x33a79494, + 0x2db69b9b, 0x3c221e1e, 0x15928787, 0xc920e9e9, + 0x8749cece, 0xaaff5555, 0x50782828, 0xa57adfdf, + 0x038f8c8c, 0x59f8a1a1, 0x09808989, 0x1a170d0d, + 0x65dabfbf, 0xd731e6e6, 0x84c64242, 0xd0b86868, + 0x82c34141, 0x29b09999, 0x5a772d2d, 0x1e110f0f, + 0x7bcbb0b0, 0xa8fc5454, 0x6dd6bbbb, 0x2c3a1616, } +}; - log_tab[1] = 0; - - for (i = 0, p = 1; i < 10; ++i) { - rco_tab[i] = p; - - p = (p << 1) ^ (p & 0x80 ? 0x01b : 0); +const u32 crypto_fl_tab[4][256] = { + { + 0x00000063, 0x0000007c, 0x00000077, 0x0000007b, + 0x000000f2, 0x0000006b, 0x0000006f, 0x000000c5, + 0x00000030, 0x00000001, 0x00000067, 0x0000002b, + 0x000000fe, 0x000000d7, 0x000000ab, 0x00000076, + 0x000000ca, 0x00000082, 0x000000c9, 0x0000007d, + 0x000000fa, 0x00000059, 0x00000047, 0x000000f0, + 0x000000ad, 0x000000d4, 0x000000a2, 0x000000af, + 0x0000009c, 0x000000a4, 0x00000072, 0x000000c0, + 0x000000b7, 0x000000fd, 0x00000093, 0x00000026, + 0x00000036, 0x0000003f, 0x000000f7, 0x000000cc, + 0x00000034, 0x000000a5, 0x000000e5, 0x000000f1, + 0x00000071, 0x000000d8, 0x00000031, 0x00000015, + 0x00000004, 0x000000c7, 0x00000023, 0x000000c3, + 0x00000018, 0x00000096, 0x00000005, 0x0000009a, + 0x00000007, 0x00000012, 0x00000080, 0x000000e2, + 0x000000eb, 0x00000027, 0x000000b2, 0x00000075, + 0x00000009, 0x00000083, 0x0000002c, 0x0000001a, + 0x0000001b, 0x0000006e, 0x0000005a, 0x000000a0, + 0x00000052, 0x0000003b, 0x000000d6, 0x000000b3, + 0x00000029, 0x000000e3, 0x0000002f, 0x00000084, + 0x00000053, 0x000000d1, 0x00000000, 0x000000ed, + 0x00000020, 0x000000fc, 0x000000b1, 0x0000005b, + 0x0000006a, 0x000000cb, 0x000000be, 0x00000039, + 0x0000004a, 0x0000004c, 0x00000058, 0x000000cf, + 0x000000d0, 0x000000ef, 0x000000aa, 0x000000fb, + 0x00000043, 0x0000004d, 0x00000033, 0x00000085, + 0x00000045, 0x000000f9, 0x00000002, 0x0000007f, + 0x00000050, 0x0000003c, 0x0000009f, 0x000000a8, + 0x00000051, 0x000000a3, 0x00000040, 0x0000008f, + 0x00000092, 0x0000009d, 0x00000038, 0x000000f5, + 0x000000bc, 0x000000b6, 0x000000da, 0x00000021, + 0x00000010, 0x000000ff, 0x000000f3, 0x000000d2, + 0x000000cd, 0x0000000c, 0x00000013, 0x000000ec, + 0x0000005f, 0x00000097, 0x00000044, 0x00000017, + 0x000000c4, 0x000000a7, 0x0000007e, 0x0000003d, + 0x00000064, 0x0000005d, 0x00000019, 0x00000073, + 0x00000060, 0x00000081, 0x0000004f, 0x000000dc, + 0x00000022, 0x0000002a, 0x00000090, 0x00000088, + 0x00000046, 0x000000ee, 0x000000b8, 0x00000014, + 0x000000de, 0x0000005e, 0x0000000b, 0x000000db, + 0x000000e0, 0x00000032, 0x0000003a, 0x0000000a, + 0x00000049, 0x00000006, 0x00000024, 0x0000005c, + 0x000000c2, 0x000000d3, 0x000000ac, 0x00000062, + 0x00000091, 0x00000095, 0x000000e4, 0x00000079, + 0x000000e7, 0x000000c8, 0x00000037, 0x0000006d, + 0x0000008d, 0x000000d5, 0x0000004e, 0x000000a9, + 0x0000006c, 0x00000056, 0x000000f4, 0x000000ea, + 0x00000065, 0x0000007a, 0x000000ae, 0x00000008, + 0x000000ba, 0x00000078, 0x00000025, 0x0000002e, + 0x0000001c, 0x000000a6, 0x000000b4, 0x000000c6, + 0x000000e8, 0x000000dd, 0x00000074, 0x0000001f, + 0x0000004b, 0x000000bd, 0x0000008b, 0x0000008a, + 0x00000070, 0x0000003e, 0x000000b5, 0x00000066, + 0x00000048, 0x00000003, 0x000000f6, 0x0000000e, + 0x00000061, 0x00000035, 0x00000057, 0x000000b9, + 0x00000086, 0x000000c1, 0x0000001d, 0x0000009e, + 0x000000e1, 0x000000f8, 0x00000098, 0x00000011, + 0x00000069, 0x000000d9, 0x0000008e, 0x00000094, + 0x0000009b, 0x0000001e, 0x00000087, 0x000000e9, + 0x000000ce, 0x00000055, 0x00000028, 0x000000df, + 0x0000008c, 0x000000a1, 0x00000089, 0x0000000d, + 0x000000bf, 0x000000e6, 0x00000042, 0x00000068, + 0x00000041, 0x00000099, 0x0000002d, 0x0000000f, + 0x000000b0, 0x00000054, 0x000000bb, 0x00000016, + }, { + 0x00006300, 0x00007c00, 0x00007700, 0x00007b00, + 0x0000f200, 0x00006b00, 0x00006f00, 0x0000c500, + 0x00003000, 0x00000100, 0x00006700, 0x00002b00, + 0x0000fe00, 0x0000d700, 0x0000ab00, 0x00007600, + 0x0000ca00, 0x00008200, 0x0000c900, 0x00007d00, + 0x0000fa00, 0x00005900, 0x00004700, 0x0000f000, + 0x0000ad00, 0x0000d400, 0x0000a200, 0x0000af00, + 0x00009c00, 0x0000a400, 0x00007200, 0x0000c000, + 0x0000b700, 0x0000fd00, 0x00009300, 0x00002600, + 0x00003600, 0x00003f00, 0x0000f700, 0x0000cc00, + 0x00003400, 0x0000a500, 0x0000e500, 0x0000f100, + 0x00007100, 0x0000d800, 0x00003100, 0x00001500, + 0x00000400, 0x0000c700, 0x00002300, 0x0000c300, + 0x00001800, 0x00009600, 0x00000500, 0x00009a00, + 0x00000700, 0x00001200, 0x00008000, 0x0000e200, + 0x0000eb00, 0x00002700, 0x0000b200, 0x00007500, + 0x00000900, 0x00008300, 0x00002c00, 0x00001a00, + 0x00001b00, 0x00006e00, 0x00005a00, 0x0000a000, + 0x00005200, 0x00003b00, 0x0000d600, 0x0000b300, + 0x00002900, 0x0000e300, 0x00002f00, 0x00008400, + 0x00005300, 0x0000d100, 0x00000000, 0x0000ed00, + 0x00002000, 0x0000fc00, 0x0000b100, 0x00005b00, + 0x00006a00, 0x0000cb00, 0x0000be00, 0x00003900, + 0x00004a00, 0x00004c00, 0x00005800, 0x0000cf00, + 0x0000d000, 0x0000ef00, 0x0000aa00, 0x0000fb00, + 0x00004300, 0x00004d00, 0x00003300, 0x00008500, + 0x00004500, 0x0000f900, 0x00000200, 0x00007f00, + 0x00005000, 0x00003c00, 0x00009f00, 0x0000a800, + 0x00005100, 0x0000a300, 0x00004000, 0x00008f00, + 0x00009200, 0x00009d00, 0x00003800, 0x0000f500, + 0x0000bc00, 0x0000b600, 0x0000da00, 0x00002100, + 0x00001000, 0x0000ff00, 0x0000f300, 0x0000d200, + 0x0000cd00, 0x00000c00, 0x00001300, 0x0000ec00, + 0x00005f00, 0x00009700, 0x00004400, 0x00001700, + 0x0000c400, 0x0000a700, 0x00007e00, 0x00003d00, + 0x00006400, 0x00005d00, 0x00001900, 0x00007300, + 0x00006000, 0x00008100, 0x00004f00, 0x0000dc00, + 0x00002200, 0x00002a00, 0x00009000, 0x00008800, + 0x00004600, 0x0000ee00, 0x0000b800, 0x00001400, + 0x0000de00, 0x00005e00, 0x00000b00, 0x0000db00, + 0x0000e000, 0x00003200, 0x00003a00, 0x00000a00, + 0x00004900, 0x00000600, 0x00002400, 0x00005c00, + 0x0000c200, 0x0000d300, 0x0000ac00, 0x00006200, + 0x00009100, 0x00009500, 0x0000e400, 0x00007900, + 0x0000e700, 0x0000c800, 0x00003700, 0x00006d00, + 0x00008d00, 0x0000d500, 0x00004e00, 0x0000a900, + 0x00006c00, 0x00005600, 0x0000f400, 0x0000ea00, + 0x00006500, 0x00007a00, 0x0000ae00, 0x00000800, + 0x0000ba00, 0x00007800, 0x00002500, 0x00002e00, + 0x00001c00, 0x0000a600, 0x0000b400, 0x0000c600, + 0x0000e800, 0x0000dd00, 0x00007400, 0x00001f00, + 0x00004b00, 0x0000bd00, 0x00008b00, 0x00008a00, + 0x00007000, 0x00003e00, 0x0000b500, 0x00006600, + 0x00004800, 0x00000300, 0x0000f600, 0x00000e00, + 0x00006100, 0x00003500, 0x00005700, 0x0000b900, + 0x00008600, 0x0000c100, 0x00001d00, 0x00009e00, + 0x0000e100, 0x0000f800, 0x00009800, 0x00001100, + 0x00006900, 0x0000d900, 0x00008e00, 0x00009400, + 0x00009b00, 0x00001e00, 0x00008700, 0x0000e900, + 0x0000ce00, 0x00005500, 0x00002800, 0x0000df00, + 0x00008c00, 0x0000a100, 0x00008900, 0x00000d00, + 0x0000bf00, 0x0000e600, 0x00004200, 0x00006800, + 0x00004100, 0x00009900, 0x00002d00, 0x00000f00, + 0x0000b000, 0x00005400, 0x0000bb00, 0x00001600, + }, { + 0x00630000, 0x007c0000, 0x00770000, 0x007b0000, + 0x00f20000, 0x006b0000, 0x006f0000, 0x00c50000, + 0x00300000, 0x00010000, 0x00670000, 0x002b0000, + 0x00fe0000, 0x00d70000, 0x00ab0000, 0x00760000, + 0x00ca0000, 0x00820000, 0x00c90000, 0x007d0000, + 0x00fa0000, 0x00590000, 0x00470000, 0x00f00000, + 0x00ad0000, 0x00d40000, 0x00a20000, 0x00af0000, + 0x009c0000, 0x00a40000, 0x00720000, 0x00c00000, + 0x00b70000, 0x00fd0000, 0x00930000, 0x00260000, + 0x00360000, 0x003f0000, 0x00f70000, 0x00cc0000, + 0x00340000, 0x00a50000, 0x00e50000, 0x00f10000, + 0x00710000, 0x00d80000, 0x00310000, 0x00150000, + 0x00040000, 0x00c70000, 0x00230000, 0x00c30000, + 0x00180000, 0x00960000, 0x00050000, 0x009a0000, + 0x00070000, 0x00120000, 0x00800000, 0x00e20000, + 0x00eb0000, 0x00270000, 0x00b20000, 0x00750000, + 0x00090000, 0x00830000, 0x002c0000, 0x001a0000, + 0x001b0000, 0x006e0000, 0x005a0000, 0x00a00000, + 0x00520000, 0x003b0000, 0x00d60000, 0x00b30000, + 0x00290000, 0x00e30000, 0x002f0000, 0x00840000, + 0x00530000, 0x00d10000, 0x00000000, 0x00ed0000, + 0x00200000, 0x00fc0000, 0x00b10000, 0x005b0000, + 0x006a0000, 0x00cb0000, 0x00be0000, 0x00390000, + 0x004a0000, 0x004c0000, 0x00580000, 0x00cf0000, + 0x00d00000, 0x00ef0000, 0x00aa0000, 0x00fb0000, + 0x00430000, 0x004d0000, 0x00330000, 0x00850000, + 0x00450000, 0x00f90000, 0x00020000, 0x007f0000, + 0x00500000, 0x003c0000, 0x009f0000, 0x00a80000, + 0x00510000, 0x00a30000, 0x00400000, 0x008f0000, + 0x00920000, 0x009d0000, 0x00380000, 0x00f50000, + 0x00bc0000, 0x00b60000, 0x00da0000, 0x00210000, + 0x00100000, 0x00ff0000, 0x00f30000, 0x00d20000, + 0x00cd0000, 0x000c0000, 0x00130000, 0x00ec0000, + 0x005f0000, 0x00970000, 0x00440000, 0x00170000, + 0x00c40000, 0x00a70000, 0x007e0000, 0x003d0000, + 0x00640000, 0x005d0000, 0x00190000, 0x00730000, + 0x00600000, 0x00810000, 0x004f0000, 0x00dc0000, + 0x00220000, 0x002a0000, 0x00900000, 0x00880000, + 0x00460000, 0x00ee0000, 0x00b80000, 0x00140000, + 0x00de0000, 0x005e0000, 0x000b0000, 0x00db0000, + 0x00e00000, 0x00320000, 0x003a0000, 0x000a0000, + 0x00490000, 0x00060000, 0x00240000, 0x005c0000, + 0x00c20000, 0x00d30000, 0x00ac0000, 0x00620000, + 0x00910000, 0x00950000, 0x00e40000, 0x00790000, + 0x00e70000, 0x00c80000, 0x00370000, 0x006d0000, + 0x008d0000, 0x00d50000, 0x004e0000, 0x00a90000, + 0x006c0000, 0x00560000, 0x00f40000, 0x00ea0000, + 0x00650000, 0x007a0000, 0x00ae0000, 0x00080000, + 0x00ba0000, 0x00780000, 0x00250000, 0x002e0000, + 0x001c0000, 0x00a60000, 0x00b40000, 0x00c60000, + 0x00e80000, 0x00dd0000, 0x00740000, 0x001f0000, + 0x004b0000, 0x00bd0000, 0x008b0000, 0x008a0000, + 0x00700000, 0x003e0000, 0x00b50000, 0x00660000, + 0x00480000, 0x00030000, 0x00f60000, 0x000e0000, + 0x00610000, 0x00350000, 0x00570000, 0x00b90000, + 0x00860000, 0x00c10000, 0x001d0000, 0x009e0000, + 0x00e10000, 0x00f80000, 0x00980000, 0x00110000, + 0x00690000, 0x00d90000, 0x008e0000, 0x00940000, + 0x009b0000, 0x001e0000, 0x00870000, 0x00e90000, + 0x00ce0000, 0x00550000, 0x00280000, 0x00df0000, + 0x008c0000, 0x00a10000, 0x00890000, 0x000d0000, + 0x00bf0000, 0x00e60000, 0x00420000, 0x00680000, + 0x00410000, 0x00990000, 0x002d0000, 0x000f0000, + 0x00b00000, 0x00540000, 0x00bb0000, 0x00160000, + }, { + 0x63000000, 0x7c000000, 0x77000000, 0x7b000000, + 0xf2000000, 0x6b000000, 0x6f000000, 0xc5000000, + 0x30000000, 0x01000000, 0x67000000, 0x2b000000, + 0xfe000000, 0xd7000000, 0xab000000, 0x76000000, + 0xca000000, 0x82000000, 0xc9000000, 0x7d000000, + 0xfa000000, 0x59000000, 0x47000000, 0xf0000000, + 0xad000000, 0xd4000000, 0xa2000000, 0xaf000000, + 0x9c000000, 0xa4000000, 0x72000000, 0xc0000000, + 0xb7000000, 0xfd000000, 0x93000000, 0x26000000, + 0x36000000, 0x3f000000, 0xf7000000, 0xcc000000, + 0x34000000, 0xa5000000, 0xe5000000, 0xf1000000, + 0x71000000, 0xd8000000, 0x31000000, 0x15000000, + 0x04000000, 0xc7000000, 0x23000000, 0xc3000000, + 0x18000000, 0x96000000, 0x05000000, 0x9a000000, + 0x07000000, 0x12000000, 0x80000000, 0xe2000000, + 0xeb000000, 0x27000000, 0xb2000000, 0x75000000, + 0x09000000, 0x83000000, 0x2c000000, 0x1a000000, + 0x1b000000, 0x6e000000, 0x5a000000, 0xa0000000, + 0x52000000, 0x3b000000, 0xd6000000, 0xb3000000, + 0x29000000, 0xe3000000, 0x2f000000, 0x84000000, + 0x53000000, 0xd1000000, 0x00000000, 0xed000000, + 0x20000000, 0xfc000000, 0xb1000000, 0x5b000000, + 0x6a000000, 0xcb000000, 0xbe000000, 0x39000000, + 0x4a000000, 0x4c000000, 0x58000000, 0xcf000000, + 0xd0000000, 0xef000000, 0xaa000000, 0xfb000000, + 0x43000000, 0x4d000000, 0x33000000, 0x85000000, + 0x45000000, 0xf9000000, 0x02000000, 0x7f000000, + 0x50000000, 0x3c000000, 0x9f000000, 0xa8000000, + 0x51000000, 0xa3000000, 0x40000000, 0x8f000000, + 0x92000000, 0x9d000000, 0x38000000, 0xf5000000, + 0xbc000000, 0xb6000000, 0xda000000, 0x21000000, + 0x10000000, 0xff000000, 0xf3000000, 0xd2000000, + 0xcd000000, 0x0c000000, 0x13000000, 0xec000000, + 0x5f000000, 0x97000000, 0x44000000, 0x17000000, + 0xc4000000, 0xa7000000, 0x7e000000, 0x3d000000, + 0x64000000, 0x5d000000, 0x19000000, 0x73000000, + 0x60000000, 0x81000000, 0x4f000000, 0xdc000000, + 0x22000000, 0x2a000000, 0x90000000, 0x88000000, + 0x46000000, 0xee000000, 0xb8000000, 0x14000000, + 0xde000000, 0x5e000000, 0x0b000000, 0xdb000000, + 0xe0000000, 0x32000000, 0x3a000000, 0x0a000000, + 0x49000000, 0x06000000, 0x24000000, 0x5c000000, + 0xc2000000, 0xd3000000, 0xac000000, 0x62000000, + 0x91000000, 0x95000000, 0xe4000000, 0x79000000, + 0xe7000000, 0xc8000000, 0x37000000, 0x6d000000, + 0x8d000000, 0xd5000000, 0x4e000000, 0xa9000000, + 0x6c000000, 0x56000000, 0xf4000000, 0xea000000, + 0x65000000, 0x7a000000, 0xae000000, 0x08000000, + 0xba000000, 0x78000000, 0x25000000, 0x2e000000, + 0x1c000000, 0xa6000000, 0xb4000000, 0xc6000000, + 0xe8000000, 0xdd000000, 0x74000000, 0x1f000000, + 0x4b000000, 0xbd000000, 0x8b000000, 0x8a000000, + 0x70000000, 0x3e000000, 0xb5000000, 0x66000000, + 0x48000000, 0x03000000, 0xf6000000, 0x0e000000, + 0x61000000, 0x35000000, 0x57000000, 0xb9000000, + 0x86000000, 0xc1000000, 0x1d000000, 0x9e000000, + 0xe1000000, 0xf8000000, 0x98000000, 0x11000000, + 0x69000000, 0xd9000000, 0x8e000000, 0x94000000, + 0x9b000000, 0x1e000000, 0x87000000, 0xe9000000, + 0xce000000, 0x55000000, 0x28000000, 0xdf000000, + 0x8c000000, 0xa1000000, 0x89000000, 0x0d000000, + 0xbf000000, 0xe6000000, 0x42000000, 0x68000000, + 0x41000000, 0x99000000, 0x2d000000, 0x0f000000, + 0xb0000000, 0x54000000, 0xbb000000, 0x16000000, } +}; - for (i = 0; i < 256; ++i) { - p = (i ? pow_tab[255 - log_tab[i]] : 0); - q = ((p >> 7) | (p << 1)) ^ ((p >> 6) | (p << 2)); - p ^= 0x63 ^ q ^ ((q >> 6) | (q << 2)); - sbx_tab[i] = p; - isb_tab[p] = (u8) i; +const u32 crypto_it_tab[4][256] = { + { + 0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a, + 0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b, + 0x55fa3020, 0xf66d76ad, 0x9176cc88, 0x254c02f5, + 0xfcd7e54f, 0xd7cb2ac5, 0x80443526, 0x8fa362b5, + 0x495ab1de, 0x671bba25, 0x980eea45, 0xe1c0fe5d, + 0x02752fc3, 0x12f04c81, 0xa397468d, 0xc6f9d36b, + 0xe75f8f03, 0x959c9215, 0xeb7a6dbf, 0xda595295, + 0x2d83bed4, 0xd3217458, 0x2969e049, 0x44c8c98e, + 0x6a89c275, 0x78798ef4, 0x6b3e5899, 0xdd71b927, + 0xb64fe1be, 0x17ad88f0, 0x66ac20c9, 0xb43ace7d, + 0x184adf63, 0x82311ae5, 0x60335197, 0x457f5362, + 0xe07764b1, 0x84ae6bbb, 0x1ca081fe, 0x942b08f9, + 0x58684870, 0x19fd458f, 0x876cde94, 0xb7f87b52, + 0x23d373ab, 0xe2024b72, 0x578f1fe3, 0x2aab5566, + 0x0728ebb2, 0x03c2b52f, 0x9a7bc586, 0xa50837d3, + 0xf2872830, 0xb2a5bf23, 0xba6a0302, 0x5c8216ed, + 0x2b1ccf8a, 0x92b479a7, 0xf0f207f3, 0xa1e2694e, + 0xcdf4da65, 0xd5be0506, 0x1f6234d1, 0x8afea6c4, + 0x9d532e34, 0xa055f3a2, 0x32e18a05, 0x75ebf6a4, + 0x39ec830b, 0xaaef6040, 0x069f715e, 0x51106ebd, + 0xf98a213e, 0x3d06dd96, 0xae053edd, 0x46bde64d, + 0xb58d5491, 0x055dc471, 0x6fd40604, 0xff155060, + 0x24fb9819, 0x97e9bdd6, 0xcc434089, 0x779ed967, + 0xbd42e8b0, 0x888b8907, 0x385b19e7, 0xdbeec879, + 0x470a7ca1, 0xe90f427c, 0xc91e84f8, 0x00000000, + 0x83868009, 0x48ed2b32, 0xac70111e, 0x4e725a6c, + 0xfbff0efd, 0x5638850f, 0x1ed5ae3d, 0x27392d36, + 0x64d90f0a, 0x21a65c68, 0xd1545b9b, 0x3a2e3624, + 0xb1670a0c, 0x0fe75793, 0xd296eeb4, 0x9e919b1b, + 0x4fc5c080, 0xa220dc61, 0x694b775a, 0x161a121c, + 0x0aba93e2, 0xe52aa0c0, 0x43e0223c, 0x1d171b12, + 0x0b0d090e, 0xadc78bf2, 0xb9a8b62d, 0xc8a91e14, + 0x8519f157, 0x4c0775af, 0xbbdd99ee, 0xfd607fa3, + 0x9f2601f7, 0xbcf5725c, 0xc53b6644, 0x347efb5b, + 0x7629438b, 0xdcc623cb, 0x68fcedb6, 0x63f1e4b8, + 0xcadc31d7, 0x10856342, 0x40229713, 0x2011c684, + 0x7d244a85, 0xf83dbbd2, 0x1132f9ae, 0x6da129c7, + 0x4b2f9e1d, 0xf330b2dc, 0xec52860d, 0xd0e3c177, + 0x6c16b32b, 0x99b970a9, 0xfa489411, 0x2264e947, + 0xc48cfca8, 0x1a3ff0a0, 0xd82c7d56, 0xef903322, + 0xc74e4987, 0xc1d138d9, 0xfea2ca8c, 0x360bd498, + 0xcf81f5a6, 0x28de7aa5, 0x268eb7da, 0xa4bfad3f, + 0xe49d3a2c, 0x0d927850, 0x9bcc5f6a, 0x62467e54, + 0xc2138df6, 0xe8b8d890, 0x5ef7392e, 0xf5afc382, + 0xbe805d9f, 0x7c93d069, 0xa92dd56f, 0xb31225cf, + 0x3b99acc8, 0xa77d1810, 0x6e639ce8, 0x7bbb3bdb, + 0x097826cd, 0xf418596e, 0x01b79aec, 0xa89a4f83, + 0x656e95e6, 0x7ee6ffaa, 0x08cfbc21, 0xe6e815ef, + 0xd99be7ba, 0xce366f4a, 0xd4099fea, 0xd67cb029, + 0xafb2a431, 0x31233f2a, 0x3094a5c6, 0xc066a235, + 0x37bc4e74, 0xa6ca82fc, 0xb0d090e0, 0x15d8a733, + 0x4a9804f1, 0xf7daec41, 0x0e50cd7f, 0x2ff69117, + 0x8dd64d76, 0x4db0ef43, 0x544daacc, 0xdf0496e4, + 0xe3b5d19e, 0x1b886a4c, 0xb81f2cc1, 0x7f516546, + 0x04ea5e9d, 0x5d358c01, 0x737487fa, 0x2e410bfb, + 0x5a1d67b3, 0x52d2db92, 0x335610e9, 0x1347d66d, + 0x8c61d79a, 0x7a0ca137, 0x8e14f859, 0x893c13eb, + 0xee27a9ce, 0x35c961b7, 0xede51ce1, 0x3cb1477a, + 0x59dfd29c, 0x3f73f255, 0x79ce1418, 0xbf37c773, + 0xeacdf753, 0x5baafd5f, 0x146f3ddf, 0x86db4478, + 0x81f3afca, 0x3ec468b9, 0x2c342438, 0x5f40a3c2, + 0x72c31d16, 0x0c25e2bc, 0x8b493c28, 0x41950dff, + 0x7101a839, 0xdeb30c08, 0x9ce4b4d8, 0x90c15664, + 0x6184cb7b, 0x70b632d5, 0x745c6c48, 0x4257b8d0, + }, { + 0xa7f45150, 0x65417e53, 0xa4171ac3, 0x5e273a96, + 0x6bab3bcb, 0x459d1ff1, 0x58faacab, 0x03e34b93, + 0xfa302055, 0x6d76adf6, 0x76cc8891, 0x4c02f525, + 0xd7e54ffc, 0xcb2ac5d7, 0x44352680, 0xa362b58f, + 0x5ab1de49, 0x1bba2567, 0x0eea4598, 0xc0fe5de1, + 0x752fc302, 0xf04c8112, 0x97468da3, 0xf9d36bc6, + 0x5f8f03e7, 0x9c921595, 0x7a6dbfeb, 0x595295da, + 0x83bed42d, 0x217458d3, 0x69e04929, 0xc8c98e44, + 0x89c2756a, 0x798ef478, 0x3e58996b, 0x71b927dd, + 0x4fe1beb6, 0xad88f017, 0xac20c966, 0x3ace7db4, + 0x4adf6318, 0x311ae582, 0x33519760, 0x7f536245, + 0x7764b1e0, 0xae6bbb84, 0xa081fe1c, 0x2b08f994, + 0x68487058, 0xfd458f19, 0x6cde9487, 0xf87b52b7, + 0xd373ab23, 0x024b72e2, 0x8f1fe357, 0xab55662a, + 0x28ebb207, 0xc2b52f03, 0x7bc5869a, 0x0837d3a5, + 0x872830f2, 0xa5bf23b2, 0x6a0302ba, 0x8216ed5c, + 0x1ccf8a2b, 0xb479a792, 0xf207f3f0, 0xe2694ea1, + 0xf4da65cd, 0xbe0506d5, 0x6234d11f, 0xfea6c48a, + 0x532e349d, 0x55f3a2a0, 0xe18a0532, 0xebf6a475, + 0xec830b39, 0xef6040aa, 0x9f715e06, 0x106ebd51, + 0x8a213ef9, 0x06dd963d, 0x053eddae, 0xbde64d46, + 0x8d5491b5, 0x5dc47105, 0xd406046f, 0x155060ff, + 0xfb981924, 0xe9bdd697, 0x434089cc, 0x9ed96777, + 0x42e8b0bd, 0x8b890788, 0x5b19e738, 0xeec879db, + 0x0a7ca147, 0x0f427ce9, 0x1e84f8c9, 0x00000000, + 0x86800983, 0xed2b3248, 0x70111eac, 0x725a6c4e, + 0xff0efdfb, 0x38850f56, 0xd5ae3d1e, 0x392d3627, + 0xd90f0a64, 0xa65c6821, 0x545b9bd1, 0x2e36243a, + 0x670a0cb1, 0xe757930f, 0x96eeb4d2, 0x919b1b9e, + 0xc5c0804f, 0x20dc61a2, 0x4b775a69, 0x1a121c16, + 0xba93e20a, 0x2aa0c0e5, 0xe0223c43, 0x171b121d, + 0x0d090e0b, 0xc78bf2ad, 0xa8b62db9, 0xa91e14c8, + 0x19f15785, 0x0775af4c, 0xdd99eebb, 0x607fa3fd, + 0x2601f79f, 0xf5725cbc, 0x3b6644c5, 0x7efb5b34, + 0x29438b76, 0xc623cbdc, 0xfcedb668, 0xf1e4b863, + 0xdc31d7ca, 0x85634210, 0x22971340, 0x11c68420, + 0x244a857d, 0x3dbbd2f8, 0x32f9ae11, 0xa129c76d, + 0x2f9e1d4b, 0x30b2dcf3, 0x52860dec, 0xe3c177d0, + 0x16b32b6c, 0xb970a999, 0x489411fa, 0x64e94722, + 0x8cfca8c4, 0x3ff0a01a, 0x2c7d56d8, 0x903322ef, + 0x4e4987c7, 0xd138d9c1, 0xa2ca8cfe, 0x0bd49836, + 0x81f5a6cf, 0xde7aa528, 0x8eb7da26, 0xbfad3fa4, + 0x9d3a2ce4, 0x9278500d, 0xcc5f6a9b, 0x467e5462, + 0x138df6c2, 0xb8d890e8, 0xf7392e5e, 0xafc382f5, + 0x805d9fbe, 0x93d0697c, 0x2dd56fa9, 0x1225cfb3, + 0x99acc83b, 0x7d1810a7, 0x639ce86e, 0xbb3bdb7b, + 0x7826cd09, 0x18596ef4, 0xb79aec01, 0x9a4f83a8, + 0x6e95e665, 0xe6ffaa7e, 0xcfbc2108, 0xe815efe6, + 0x9be7bad9, 0x366f4ace, 0x099fead4, 0x7cb029d6, + 0xb2a431af, 0x233f2a31, 0x94a5c630, 0x66a235c0, + 0xbc4e7437, 0xca82fca6, 0xd090e0b0, 0xd8a73315, + 0x9804f14a, 0xdaec41f7, 0x50cd7f0e, 0xf691172f, + 0xd64d768d, 0xb0ef434d, 0x4daacc54, 0x0496e4df, + 0xb5d19ee3, 0x886a4c1b, 0x1f2cc1b8, 0x5165467f, + 0xea5e9d04, 0x358c015d, 0x7487fa73, 0x410bfb2e, + 0x1d67b35a, 0xd2db9252, 0x5610e933, 0x47d66d13, + 0x61d79a8c, 0x0ca1377a, 0x14f8598e, 0x3c13eb89, + 0x27a9ceee, 0xc961b735, 0xe51ce1ed, 0xb1477a3c, + 0xdfd29c59, 0x73f2553f, 0xce141879, 0x37c773bf, + 0xcdf753ea, 0xaafd5f5b, 0x6f3ddf14, 0xdb447886, + 0xf3afca81, 0xc468b93e, 0x3424382c, 0x40a3c25f, + 0xc31d1672, 0x25e2bc0c, 0x493c288b, 0x950dff41, + 0x01a83971, 0xb30c08de, 0xe4b4d89c, 0xc1566490, + 0x84cb7b61, 0xb632d570, 0x5c6c4874, 0x57b8d042, + }, { + 0xf45150a7, 0x417e5365, 0x171ac3a4, 0x273a965e, + 0xab3bcb6b, 0x9d1ff145, 0xfaacab58, 0xe34b9303, + 0x302055fa, 0x76adf66d, 0xcc889176, 0x02f5254c, + 0xe54ffcd7, 0x2ac5d7cb, 0x35268044, 0x62b58fa3, + 0xb1de495a, 0xba25671b, 0xea45980e, 0xfe5de1c0, + 0x2fc30275, 0x4c8112f0, 0x468da397, 0xd36bc6f9, + 0x8f03e75f, 0x9215959c, 0x6dbfeb7a, 0x5295da59, + 0xbed42d83, 0x7458d321, 0xe0492969, 0xc98e44c8, + 0xc2756a89, 0x8ef47879, 0x58996b3e, 0xb927dd71, + 0xe1beb64f, 0x88f017ad, 0x20c966ac, 0xce7db43a, + 0xdf63184a, 0x1ae58231, 0x51976033, 0x5362457f, + 0x64b1e077, 0x6bbb84ae, 0x81fe1ca0, 0x08f9942b, + 0x48705868, 0x458f19fd, 0xde94876c, 0x7b52b7f8, + 0x73ab23d3, 0x4b72e202, 0x1fe3578f, 0x55662aab, + 0xebb20728, 0xb52f03c2, 0xc5869a7b, 0x37d3a508, + 0x2830f287, 0xbf23b2a5, 0x0302ba6a, 0x16ed5c82, + 0xcf8a2b1c, 0x79a792b4, 0x07f3f0f2, 0x694ea1e2, + 0xda65cdf4, 0x0506d5be, 0x34d11f62, 0xa6c48afe, + 0x2e349d53, 0xf3a2a055, 0x8a0532e1, 0xf6a475eb, + 0x830b39ec, 0x6040aaef, 0x715e069f, 0x6ebd5110, + 0x213ef98a, 0xdd963d06, 0x3eddae05, 0xe64d46bd, + 0x5491b58d, 0xc471055d, 0x06046fd4, 0x5060ff15, + 0x981924fb, 0xbdd697e9, 0x4089cc43, 0xd967779e, + 0xe8b0bd42, 0x8907888b, 0x19e7385b, 0xc879dbee, + 0x7ca1470a, 0x427ce90f, 0x84f8c91e, 0x00000000, + 0x80098386, 0x2b3248ed, 0x111eac70, 0x5a6c4e72, + 0x0efdfbff, 0x850f5638, 0xae3d1ed5, 0x2d362739, + 0x0f0a64d9, 0x5c6821a6, 0x5b9bd154, 0x36243a2e, + 0x0a0cb167, 0x57930fe7, 0xeeb4d296, 0x9b1b9e91, + 0xc0804fc5, 0xdc61a220, 0x775a694b, 0x121c161a, + 0x93e20aba, 0xa0c0e52a, 0x223c43e0, 0x1b121d17, + 0x090e0b0d, 0x8bf2adc7, 0xb62db9a8, 0x1e14c8a9, + 0xf1578519, 0x75af4c07, 0x99eebbdd, 0x7fa3fd60, + 0x01f79f26, 0x725cbcf5, 0x6644c53b, 0xfb5b347e, + 0x438b7629, 0x23cbdcc6, 0xedb668fc, 0xe4b863f1, + 0x31d7cadc, 0x63421085, 0x97134022, 0xc6842011, + 0x4a857d24, 0xbbd2f83d, 0xf9ae1132, 0x29c76da1, + 0x9e1d4b2f, 0xb2dcf330, 0x860dec52, 0xc177d0e3, + 0xb32b6c16, 0x70a999b9, 0x9411fa48, 0xe9472264, + 0xfca8c48c, 0xf0a01a3f, 0x7d56d82c, 0x3322ef90, + 0x4987c74e, 0x38d9c1d1, 0xca8cfea2, 0xd498360b, + 0xf5a6cf81, 0x7aa528de, 0xb7da268e, 0xad3fa4bf, + 0x3a2ce49d, 0x78500d92, 0x5f6a9bcc, 0x7e546246, + 0x8df6c213, 0xd890e8b8, 0x392e5ef7, 0xc382f5af, + 0x5d9fbe80, 0xd0697c93, 0xd56fa92d, 0x25cfb312, + 0xacc83b99, 0x1810a77d, 0x9ce86e63, 0x3bdb7bbb, + 0x26cd0978, 0x596ef418, 0x9aec01b7, 0x4f83a89a, + 0x95e6656e, 0xffaa7ee6, 0xbc2108cf, 0x15efe6e8, + 0xe7bad99b, 0x6f4ace36, 0x9fead409, 0xb029d67c, + 0xa431afb2, 0x3f2a3123, 0xa5c63094, 0xa235c066, + 0x4e7437bc, 0x82fca6ca, 0x90e0b0d0, 0xa73315d8, + 0x04f14a98, 0xec41f7da, 0xcd7f0e50, 0x91172ff6, + 0x4d768dd6, 0xef434db0, 0xaacc544d, 0x96e4df04, + 0xd19ee3b5, 0x6a4c1b88, 0x2cc1b81f, 0x65467f51, + 0x5e9d04ea, 0x8c015d35, 0x87fa7374, 0x0bfb2e41, + 0x67b35a1d, 0xdb9252d2, 0x10e93356, 0xd66d1347, + 0xd79a8c61, 0xa1377a0c, 0xf8598e14, 0x13eb893c, + 0xa9ceee27, 0x61b735c9, 0x1ce1ede5, 0x477a3cb1, + 0xd29c59df, 0xf2553f73, 0x141879ce, 0xc773bf37, + 0xf753eacd, 0xfd5f5baa, 0x3ddf146f, 0x447886db, + 0xafca81f3, 0x68b93ec4, 0x24382c34, 0xa3c25f40, + 0x1d1672c3, 0xe2bc0c25, 0x3c288b49, 0x0dff4195, + 0xa8397101, 0x0c08deb3, 0xb4d89ce4, 0x566490c1, + 0xcb7b6184, 0x32d570b6, 0x6c48745c, 0xb8d04257, + }, { + 0x5150a7f4, 0x7e536541, 0x1ac3a417, 0x3a965e27, + 0x3bcb6bab, 0x1ff1459d, 0xacab58fa, 0x4b9303e3, + 0x2055fa30, 0xadf66d76, 0x889176cc, 0xf5254c02, + 0x4ffcd7e5, 0xc5d7cb2a, 0x26804435, 0xb58fa362, + 0xde495ab1, 0x25671bba, 0x45980eea, 0x5de1c0fe, + 0xc302752f, 0x8112f04c, 0x8da39746, 0x6bc6f9d3, + 0x03e75f8f, 0x15959c92, 0xbfeb7a6d, 0x95da5952, + 0xd42d83be, 0x58d32174, 0x492969e0, 0x8e44c8c9, + 0x756a89c2, 0xf478798e, 0x996b3e58, 0x27dd71b9, + 0xbeb64fe1, 0xf017ad88, 0xc966ac20, 0x7db43ace, + 0x63184adf, 0xe582311a, 0x97603351, 0x62457f53, + 0xb1e07764, 0xbb84ae6b, 0xfe1ca081, 0xf9942b08, + 0x70586848, 0x8f19fd45, 0x94876cde, 0x52b7f87b, + 0xab23d373, 0x72e2024b, 0xe3578f1f, 0x662aab55, + 0xb20728eb, 0x2f03c2b5, 0x869a7bc5, 0xd3a50837, + 0x30f28728, 0x23b2a5bf, 0x02ba6a03, 0xed5c8216, + 0x8a2b1ccf, 0xa792b479, 0xf3f0f207, 0x4ea1e269, + 0x65cdf4da, 0x06d5be05, 0xd11f6234, 0xc48afea6, + 0x349d532e, 0xa2a055f3, 0x0532e18a, 0xa475ebf6, + 0x0b39ec83, 0x40aaef60, 0x5e069f71, 0xbd51106e, + 0x3ef98a21, 0x963d06dd, 0xddae053e, 0x4d46bde6, + 0x91b58d54, 0x71055dc4, 0x046fd406, 0x60ff1550, + 0x1924fb98, 0xd697e9bd, 0x89cc4340, 0x67779ed9, + 0xb0bd42e8, 0x07888b89, 0xe7385b19, 0x79dbeec8, + 0xa1470a7c, 0x7ce90f42, 0xf8c91e84, 0x00000000, + 0x09838680, 0x3248ed2b, 0x1eac7011, 0x6c4e725a, + 0xfdfbff0e, 0x0f563885, 0x3d1ed5ae, 0x3627392d, + 0x0a64d90f, 0x6821a65c, 0x9bd1545b, 0x243a2e36, + 0x0cb1670a, 0x930fe757, 0xb4d296ee, 0x1b9e919b, + 0x804fc5c0, 0x61a220dc, 0x5a694b77, 0x1c161a12, + 0xe20aba93, 0xc0e52aa0, 0x3c43e022, 0x121d171b, + 0x0e0b0d09, 0xf2adc78b, 0x2db9a8b6, 0x14c8a91e, + 0x578519f1, 0xaf4c0775, 0xeebbdd99, 0xa3fd607f, + 0xf79f2601, 0x5cbcf572, 0x44c53b66, 0x5b347efb, + 0x8b762943, 0xcbdcc623, 0xb668fced, 0xb863f1e4, + 0xd7cadc31, 0x42108563, 0x13402297, 0x842011c6, + 0x857d244a, 0xd2f83dbb, 0xae1132f9, 0xc76da129, + 0x1d4b2f9e, 0xdcf330b2, 0x0dec5286, 0x77d0e3c1, + 0x2b6c16b3, 0xa999b970, 0x11fa4894, 0x472264e9, + 0xa8c48cfc, 0xa01a3ff0, 0x56d82c7d, 0x22ef9033, + 0x87c74e49, 0xd9c1d138, 0x8cfea2ca, 0x98360bd4, + 0xa6cf81f5, 0xa528de7a, 0xda268eb7, 0x3fa4bfad, + 0x2ce49d3a, 0x500d9278, 0x6a9bcc5f, 0x5462467e, + 0xf6c2138d, 0x90e8b8d8, 0x2e5ef739, 0x82f5afc3, + 0x9fbe805d, 0x697c93d0, 0x6fa92dd5, 0xcfb31225, + 0xc83b99ac, 0x10a77d18, 0xe86e639c, 0xdb7bbb3b, + 0xcd097826, 0x6ef41859, 0xec01b79a, 0x83a89a4f, + 0xe6656e95, 0xaa7ee6ff, 0x2108cfbc, 0xefe6e815, + 0xbad99be7, 0x4ace366f, 0xead4099f, 0x29d67cb0, + 0x31afb2a4, 0x2a31233f, 0xc63094a5, 0x35c066a2, + 0x7437bc4e, 0xfca6ca82, 0xe0b0d090, 0x3315d8a7, + 0xf14a9804, 0x41f7daec, 0x7f0e50cd, 0x172ff691, + 0x768dd64d, 0x434db0ef, 0xcc544daa, 0xe4df0496, + 0x9ee3b5d1, 0x4c1b886a, 0xc1b81f2c, 0x467f5165, + 0x9d04ea5e, 0x015d358c, 0xfa737487, 0xfb2e410b, + 0xb35a1d67, 0x9252d2db, 0xe9335610, 0x6d1347d6, + 0x9a8c61d7, 0x377a0ca1, 0x598e14f8, 0xeb893c13, + 0xceee27a9, 0xb735c961, 0xe1ede51c, 0x7a3cb147, + 0x9c59dfd2, 0x553f73f2, 0x1879ce14, 0x73bf37c7, + 0x53eacdf7, 0x5f5baafd, 0xdf146f3d, 0x7886db44, + 0xca81f3af, 0xb93ec468, 0x382c3424, 0xc25f40a3, + 0x1672c31d, 0xbc0c25e2, 0x288b493c, 0xff41950d, + 0x397101a8, 0x08deb30c, 0xd89ce4b4, 0x6490c156, + 0x7b6184cb, 0xd570b632, 0x48745c6c, 0xd04257b8, } +}; - for (i = 0; i < 256; ++i) { - p = sbx_tab[i]; - - t = p; - crypto_fl_tab[0][i] = t; - crypto_fl_tab[1][i] = rol32(t, 8); - crypto_fl_tab[2][i] = rol32(t, 16); - crypto_fl_tab[3][i] = rol32(t, 24); - - t = ((u32) ff_mult(2, p)) | - ((u32) p << 8) | - ((u32) p << 16) | ((u32) ff_mult(3, p) << 24); - - crypto_ft_tab[0][i] = t; - crypto_ft_tab[1][i] = rol32(t, 8); - crypto_ft_tab[2][i] = rol32(t, 16); - crypto_ft_tab[3][i] = rol32(t, 24); - - p = isb_tab[i]; - - t = p; - crypto_il_tab[0][i] = t; - crypto_il_tab[1][i] = rol32(t, 8); - crypto_il_tab[2][i] = rol32(t, 16); - crypto_il_tab[3][i] = rol32(t, 24); - - t = ((u32) ff_mult(14, p)) | - ((u32) ff_mult(9, p) << 8) | - ((u32) ff_mult(13, p) << 16) | - ((u32) ff_mult(11, p) << 24); - - crypto_it_tab[0][i] = t; - crypto_it_tab[1][i] = rol32(t, 8); - crypto_it_tab[2][i] = rol32(t, 16); - crypto_it_tab[3][i] = rol32(t, 24); +const u32 crypto_il_tab[4][256] = { + { + 0x00000052, 0x00000009, 0x0000006a, 0x000000d5, + 0x00000030, 0x00000036, 0x000000a5, 0x00000038, + 0x000000bf, 0x00000040, 0x000000a3, 0x0000009e, + 0x00000081, 0x000000f3, 0x000000d7, 0x000000fb, + 0x0000007c, 0x000000e3, 0x00000039, 0x00000082, + 0x0000009b, 0x0000002f, 0x000000ff, 0x00000087, + 0x00000034, 0x0000008e, 0x00000043, 0x00000044, + 0x000000c4, 0x000000de, 0x000000e9, 0x000000cb, + 0x00000054, 0x0000007b, 0x00000094, 0x00000032, + 0x000000a6, 0x000000c2, 0x00000023, 0x0000003d, + 0x000000ee, 0x0000004c, 0x00000095, 0x0000000b, + 0x00000042, 0x000000fa, 0x000000c3, 0x0000004e, + 0x00000008, 0x0000002e, 0x000000a1, 0x00000066, + 0x00000028, 0x000000d9, 0x00000024, 0x000000b2, + 0x00000076, 0x0000005b, 0x000000a2, 0x00000049, + 0x0000006d, 0x0000008b, 0x000000d1, 0x00000025, + 0x00000072, 0x000000f8, 0x000000f6, 0x00000064, + 0x00000086, 0x00000068, 0x00000098, 0x00000016, + 0x000000d4, 0x000000a4, 0x0000005c, 0x000000cc, + 0x0000005d, 0x00000065, 0x000000b6, 0x00000092, + 0x0000006c, 0x00000070, 0x00000048, 0x00000050, + 0x000000fd, 0x000000ed, 0x000000b9, 0x000000da, + 0x0000005e, 0x00000015, 0x00000046, 0x00000057, + 0x000000a7, 0x0000008d, 0x0000009d, 0x00000084, + 0x00000090, 0x000000d8, 0x000000ab, 0x00000000, + 0x0000008c, 0x000000bc, 0x000000d3, 0x0000000a, + 0x000000f7, 0x000000e4, 0x00000058, 0x00000005, + 0x000000b8, 0x000000b3, 0x00000045, 0x00000006, + 0x000000d0, 0x0000002c, 0x0000001e, 0x0000008f, + 0x000000ca, 0x0000003f, 0x0000000f, 0x00000002, + 0x000000c1, 0x000000af, 0x000000bd, 0x00000003, + 0x00000001, 0x00000013, 0x0000008a, 0x0000006b, + 0x0000003a, 0x00000091, 0x00000011, 0x00000041, + 0x0000004f, 0x00000067, 0x000000dc, 0x000000ea, + 0x00000097, 0x000000f2, 0x000000cf, 0x000000ce, + 0x000000f0, 0x000000b4, 0x000000e6, 0x00000073, + 0x00000096, 0x000000ac, 0x00000074, 0x00000022, + 0x000000e7, 0x000000ad, 0x00000035, 0x00000085, + 0x000000e2, 0x000000f9, 0x00000037, 0x000000e8, + 0x0000001c, 0x00000075, 0x000000df, 0x0000006e, + 0x00000047, 0x000000f1, 0x0000001a, 0x00000071, + 0x0000001d, 0x00000029, 0x000000c5, 0x00000089, + 0x0000006f, 0x000000b7, 0x00000062, 0x0000000e, + 0x000000aa, 0x00000018, 0x000000be, 0x0000001b, + 0x000000fc, 0x00000056, 0x0000003e, 0x0000004b, + 0x000000c6, 0x000000d2, 0x00000079, 0x00000020, + 0x0000009a, 0x000000db, 0x000000c0, 0x000000fe, + 0x00000078, 0x000000cd, 0x0000005a, 0x000000f4, + 0x0000001f, 0x000000dd, 0x000000a8, 0x00000033, + 0x00000088, 0x00000007, 0x000000c7, 0x00000031, + 0x000000b1, 0x00000012, 0x00000010, 0x00000059, + 0x00000027, 0x00000080, 0x000000ec, 0x0000005f, + 0x00000060, 0x00000051, 0x0000007f, 0x000000a9, + 0x00000019, 0x000000b5, 0x0000004a, 0x0000000d, + 0x0000002d, 0x000000e5, 0x0000007a, 0x0000009f, + 0x00000093, 0x000000c9, 0x0000009c, 0x000000ef, + 0x000000a0, 0x000000e0, 0x0000003b, 0x0000004d, + 0x000000ae, 0x0000002a, 0x000000f5, 0x000000b0, + 0x000000c8, 0x000000eb, 0x000000bb, 0x0000003c, + 0x00000083, 0x00000053, 0x00000099, 0x00000061, + 0x00000017, 0x0000002b, 0x00000004, 0x0000007e, + 0x000000ba, 0x00000077, 0x000000d6, 0x00000026, + 0x000000e1, 0x00000069, 0x00000014, 0x00000063, + 0x00000055, 0x00000021, 0x0000000c, 0x0000007d, + }, { + 0x00005200, 0x00000900, 0x00006a00, 0x0000d500, + 0x00003000, 0x00003600, 0x0000a500, 0x00003800, + 0x0000bf00, 0x00004000, 0x0000a300, 0x00009e00, + 0x00008100, 0x0000f300, 0x0000d700, 0x0000fb00, + 0x00007c00, 0x0000e300, 0x00003900, 0x00008200, + 0x00009b00, 0x00002f00, 0x0000ff00, 0x00008700, + 0x00003400, 0x00008e00, 0x00004300, 0x00004400, + 0x0000c400, 0x0000de00, 0x0000e900, 0x0000cb00, + 0x00005400, 0x00007b00, 0x00009400, 0x00003200, + 0x0000a600, 0x0000c200, 0x00002300, 0x00003d00, + 0x0000ee00, 0x00004c00, 0x00009500, 0x00000b00, + 0x00004200, 0x0000fa00, 0x0000c300, 0x00004e00, + 0x00000800, 0x00002e00, 0x0000a100, 0x00006600, + 0x00002800, 0x0000d900, 0x00002400, 0x0000b200, + 0x00007600, 0x00005b00, 0x0000a200, 0x00004900, + 0x00006d00, 0x00008b00, 0x0000d100, 0x00002500, + 0x00007200, 0x0000f800, 0x0000f600, 0x00006400, + 0x00008600, 0x00006800, 0x00009800, 0x00001600, + 0x0000d400, 0x0000a400, 0x00005c00, 0x0000cc00, + 0x00005d00, 0x00006500, 0x0000b600, 0x00009200, + 0x00006c00, 0x00007000, 0x00004800, 0x00005000, + 0x0000fd00, 0x0000ed00, 0x0000b900, 0x0000da00, + 0x00005e00, 0x00001500, 0x00004600, 0x00005700, + 0x0000a700, 0x00008d00, 0x00009d00, 0x00008400, + 0x00009000, 0x0000d800, 0x0000ab00, 0x00000000, + 0x00008c00, 0x0000bc00, 0x0000d300, 0x00000a00, + 0x0000f700, 0x0000e400, 0x00005800, 0x00000500, + 0x0000b800, 0x0000b300, 0x00004500, 0x00000600, + 0x0000d000, 0x00002c00, 0x00001e00, 0x00008f00, + 0x0000ca00, 0x00003f00, 0x00000f00, 0x00000200, + 0x0000c100, 0x0000af00, 0x0000bd00, 0x00000300, + 0x00000100, 0x00001300, 0x00008a00, 0x00006b00, + 0x00003a00, 0x00009100, 0x00001100, 0x00004100, + 0x00004f00, 0x00006700, 0x0000dc00, 0x0000ea00, + 0x00009700, 0x0000f200, 0x0000cf00, 0x0000ce00, + 0x0000f000, 0x0000b400, 0x0000e600, 0x00007300, + 0x00009600, 0x0000ac00, 0x00007400, 0x00002200, + 0x0000e700, 0x0000ad00, 0x00003500, 0x00008500, + 0x0000e200, 0x0000f900, 0x00003700, 0x0000e800, + 0x00001c00, 0x00007500, 0x0000df00, 0x00006e00, + 0x00004700, 0x0000f100, 0x00001a00, 0x00007100, + 0x00001d00, 0x00002900, 0x0000c500, 0x00008900, + 0x00006f00, 0x0000b700, 0x00006200, 0x00000e00, + 0x0000aa00, 0x00001800, 0x0000be00, 0x00001b00, + 0x0000fc00, 0x00005600, 0x00003e00, 0x00004b00, + 0x0000c600, 0x0000d200, 0x00007900, 0x00002000, + 0x00009a00, 0x0000db00, 0x0000c000, 0x0000fe00, + 0x00007800, 0x0000cd00, 0x00005a00, 0x0000f400, + 0x00001f00, 0x0000dd00, 0x0000a800, 0x00003300, + 0x00008800, 0x00000700, 0x0000c700, 0x00003100, + 0x0000b100, 0x00001200, 0x00001000, 0x00005900, + 0x00002700, 0x00008000, 0x0000ec00, 0x00005f00, + 0x00006000, 0x00005100, 0x00007f00, 0x0000a900, + 0x00001900, 0x0000b500, 0x00004a00, 0x00000d00, + 0x00002d00, 0x0000e500, 0x00007a00, 0x00009f00, + 0x00009300, 0x0000c900, 0x00009c00, 0x0000ef00, + 0x0000a000, 0x0000e000, 0x00003b00, 0x00004d00, + 0x0000ae00, 0x00002a00, 0x0000f500, 0x0000b000, + 0x0000c800, 0x0000eb00, 0x0000bb00, 0x00003c00, + 0x00008300, 0x00005300, 0x00009900, 0x00006100, + 0x00001700, 0x00002b00, 0x00000400, 0x00007e00, + 0x0000ba00, 0x00007700, 0x0000d600, 0x00002600, + 0x0000e100, 0x00006900, 0x00001400, 0x00006300, + 0x00005500, 0x00002100, 0x00000c00, 0x00007d00, + }, { + 0x00520000, 0x00090000, 0x006a0000, 0x00d50000, + 0x00300000, 0x00360000, 0x00a50000, 0x00380000, + 0x00bf0000, 0x00400000, 0x00a30000, 0x009e0000, + 0x00810000, 0x00f30000, 0x00d70000, 0x00fb0000, + 0x007c0000, 0x00e30000, 0x00390000, 0x00820000, + 0x009b0000, 0x002f0000, 0x00ff0000, 0x00870000, + 0x00340000, 0x008e0000, 0x00430000, 0x00440000, + 0x00c40000, 0x00de0000, 0x00e90000, 0x00cb0000, + 0x00540000, 0x007b0000, 0x00940000, 0x00320000, + 0x00a60000, 0x00c20000, 0x00230000, 0x003d0000, + 0x00ee0000, 0x004c0000, 0x00950000, 0x000b0000, + 0x00420000, 0x00fa0000, 0x00c30000, 0x004e0000, + 0x00080000, 0x002e0000, 0x00a10000, 0x00660000, + 0x00280000, 0x00d90000, 0x00240000, 0x00b20000, + 0x00760000, 0x005b0000, 0x00a20000, 0x00490000, + 0x006d0000, 0x008b0000, 0x00d10000, 0x00250000, + 0x00720000, 0x00f80000, 0x00f60000, 0x00640000, + 0x00860000, 0x00680000, 0x00980000, 0x00160000, + 0x00d40000, 0x00a40000, 0x005c0000, 0x00cc0000, + 0x005d0000, 0x00650000, 0x00b60000, 0x00920000, + 0x006c0000, 0x00700000, 0x00480000, 0x00500000, + 0x00fd0000, 0x00ed0000, 0x00b90000, 0x00da0000, + 0x005e0000, 0x00150000, 0x00460000, 0x00570000, + 0x00a70000, 0x008d0000, 0x009d0000, 0x00840000, + 0x00900000, 0x00d80000, 0x00ab0000, 0x00000000, + 0x008c0000, 0x00bc0000, 0x00d30000, 0x000a0000, + 0x00f70000, 0x00e40000, 0x00580000, 0x00050000, + 0x00b80000, 0x00b30000, 0x00450000, 0x00060000, + 0x00d00000, 0x002c0000, 0x001e0000, 0x008f0000, + 0x00ca0000, 0x003f0000, 0x000f0000, 0x00020000, + 0x00c10000, 0x00af0000, 0x00bd0000, 0x00030000, + 0x00010000, 0x00130000, 0x008a0000, 0x006b0000, + 0x003a0000, 0x00910000, 0x00110000, 0x00410000, + 0x004f0000, 0x00670000, 0x00dc0000, 0x00ea0000, + 0x00970000, 0x00f20000, 0x00cf0000, 0x00ce0000, + 0x00f00000, 0x00b40000, 0x00e60000, 0x00730000, + 0x00960000, 0x00ac0000, 0x00740000, 0x00220000, + 0x00e70000, 0x00ad0000, 0x00350000, 0x00850000, + 0x00e20000, 0x00f90000, 0x00370000, 0x00e80000, + 0x001c0000, 0x00750000, 0x00df0000, 0x006e0000, + 0x00470000, 0x00f10000, 0x001a0000, 0x00710000, + 0x001d0000, 0x00290000, 0x00c50000, 0x00890000, + 0x006f0000, 0x00b70000, 0x00620000, 0x000e0000, + 0x00aa0000, 0x00180000, 0x00be0000, 0x001b0000, + 0x00fc0000, 0x00560000, 0x003e0000, 0x004b0000, + 0x00c60000, 0x00d20000, 0x00790000, 0x00200000, + 0x009a0000, 0x00db0000, 0x00c00000, 0x00fe0000, + 0x00780000, 0x00cd0000, 0x005a0000, 0x00f40000, + 0x001f0000, 0x00dd0000, 0x00a80000, 0x00330000, + 0x00880000, 0x00070000, 0x00c70000, 0x00310000, + 0x00b10000, 0x00120000, 0x00100000, 0x00590000, + 0x00270000, 0x00800000, 0x00ec0000, 0x005f0000, + 0x00600000, 0x00510000, 0x007f0000, 0x00a90000, + 0x00190000, 0x00b50000, 0x004a0000, 0x000d0000, + 0x002d0000, 0x00e50000, 0x007a0000, 0x009f0000, + 0x00930000, 0x00c90000, 0x009c0000, 0x00ef0000, + 0x00a00000, 0x00e00000, 0x003b0000, 0x004d0000, + 0x00ae0000, 0x002a0000, 0x00f50000, 0x00b00000, + 0x00c80000, 0x00eb0000, 0x00bb0000, 0x003c0000, + 0x00830000, 0x00530000, 0x00990000, 0x00610000, + 0x00170000, 0x002b0000, 0x00040000, 0x007e0000, + 0x00ba0000, 0x00770000, 0x00d60000, 0x00260000, + 0x00e10000, 0x00690000, 0x00140000, 0x00630000, + 0x00550000, 0x00210000, 0x000c0000, 0x007d0000, + }, { + 0x52000000, 0x09000000, 0x6a000000, 0xd5000000, + 0x30000000, 0x36000000, 0xa5000000, 0x38000000, + 0xbf000000, 0x40000000, 0xa3000000, 0x9e000000, + 0x81000000, 0xf3000000, 0xd7000000, 0xfb000000, + 0x7c000000, 0xe3000000, 0x39000000, 0x82000000, + 0x9b000000, 0x2f000000, 0xff000000, 0x87000000, + 0x34000000, 0x8e000000, 0x43000000, 0x44000000, + 0xc4000000, 0xde000000, 0xe9000000, 0xcb000000, + 0x54000000, 0x7b000000, 0x94000000, 0x32000000, + 0xa6000000, 0xc2000000, 0x23000000, 0x3d000000, + 0xee000000, 0x4c000000, 0x95000000, 0x0b000000, + 0x42000000, 0xfa000000, 0xc3000000, 0x4e000000, + 0x08000000, 0x2e000000, 0xa1000000, 0x66000000, + 0x28000000, 0xd9000000, 0x24000000, 0xb2000000, + 0x76000000, 0x5b000000, 0xa2000000, 0x49000000, + 0x6d000000, 0x8b000000, 0xd1000000, 0x25000000, + 0x72000000, 0xf8000000, 0xf6000000, 0x64000000, + 0x86000000, 0x68000000, 0x98000000, 0x16000000, + 0xd4000000, 0xa4000000, 0x5c000000, 0xcc000000, + 0x5d000000, 0x65000000, 0xb6000000, 0x92000000, + 0x6c000000, 0x70000000, 0x48000000, 0x50000000, + 0xfd000000, 0xed000000, 0xb9000000, 0xda000000, + 0x5e000000, 0x15000000, 0x46000000, 0x57000000, + 0xa7000000, 0x8d000000, 0x9d000000, 0x84000000, + 0x90000000, 0xd8000000, 0xab000000, 0x00000000, + 0x8c000000, 0xbc000000, 0xd3000000, 0x0a000000, + 0xf7000000, 0xe4000000, 0x58000000, 0x05000000, + 0xb8000000, 0xb3000000, 0x45000000, 0x06000000, + 0xd0000000, 0x2c000000, 0x1e000000, 0x8f000000, + 0xca000000, 0x3f000000, 0x0f000000, 0x02000000, + 0xc1000000, 0xaf000000, 0xbd000000, 0x03000000, + 0x01000000, 0x13000000, 0x8a000000, 0x6b000000, + 0x3a000000, 0x91000000, 0x11000000, 0x41000000, + 0x4f000000, 0x67000000, 0xdc000000, 0xea000000, + 0x97000000, 0xf2000000, 0xcf000000, 0xce000000, + 0xf0000000, 0xb4000000, 0xe6000000, 0x73000000, + 0x96000000, 0xac000000, 0x74000000, 0x22000000, + 0xe7000000, 0xad000000, 0x35000000, 0x85000000, + 0xe2000000, 0xf9000000, 0x37000000, 0xe8000000, + 0x1c000000, 0x75000000, 0xdf000000, 0x6e000000, + 0x47000000, 0xf1000000, 0x1a000000, 0x71000000, + 0x1d000000, 0x29000000, 0xc5000000, 0x89000000, + 0x6f000000, 0xb7000000, 0x62000000, 0x0e000000, + 0xaa000000, 0x18000000, 0xbe000000, 0x1b000000, + 0xfc000000, 0x56000000, 0x3e000000, 0x4b000000, + 0xc6000000, 0xd2000000, 0x79000000, 0x20000000, + 0x9a000000, 0xdb000000, 0xc0000000, 0xfe000000, + 0x78000000, 0xcd000000, 0x5a000000, 0xf4000000, + 0x1f000000, 0xdd000000, 0xa8000000, 0x33000000, + 0x88000000, 0x07000000, 0xc7000000, 0x31000000, + 0xb1000000, 0x12000000, 0x10000000, 0x59000000, + 0x27000000, 0x80000000, 0xec000000, 0x5f000000, + 0x60000000, 0x51000000, 0x7f000000, 0xa9000000, + 0x19000000, 0xb5000000, 0x4a000000, 0x0d000000, + 0x2d000000, 0xe5000000, 0x7a000000, 0x9f000000, + 0x93000000, 0xc9000000, 0x9c000000, 0xef000000, + 0xa0000000, 0xe0000000, 0x3b000000, 0x4d000000, + 0xae000000, 0x2a000000, 0xf5000000, 0xb0000000, + 0xc8000000, 0xeb000000, 0xbb000000, 0x3c000000, + 0x83000000, 0x53000000, 0x99000000, 0x61000000, + 0x17000000, 0x2b000000, 0x04000000, 0x7e000000, + 0xba000000, 0x77000000, 0xd6000000, 0x26000000, + 0xe1000000, 0x69000000, 0x14000000, 0x63000000, + 0x55000000, 0x21000000, 0x0c000000, 0x7d000000, } -} +}; + +EXPORT_SYMBOL_GPL(crypto_ft_tab); +EXPORT_SYMBOL_GPL(crypto_fl_tab); +EXPORT_SYMBOL_GPL(crypto_it_tab); +EXPORT_SYMBOL_GPL(crypto_il_tab); /* initialise the key schedule from the user supplied key */ @@ -491,7 +1457,6 @@ static struct crypto_alg aes_alg = { static int __init aes_init(void) { - gen_tabs(); return crypto_register_alg(&aes_alg); } -- cgit v1.2.3 From 07f2211e4fbce6990722d78c4f04225da9c0e9cf Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 5 Jan 2009 17:14:31 -0700 Subject: dmaengine: remove dependency on async_tx async_tx.ko is a consumer of dma channels. A circular dependency arises if modules in drivers/dma rely on common code in async_tx.ko. It prevents either module from being unloaded. Move dma_wait_for_async_tx and async_tx_run_dependencies to dmaeninge.o where they should have been from the beginning. Reviewed-by: Andrew Morton Signed-off-by: Dan Williams --- crypto/async_tx/async_tx.c | 75 ---------------------------------------------- 1 file changed, 75 deletions(-) (limited to 'crypto') diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index dcbf1be149f..8cfac182165 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -72,81 +72,6 @@ void async_tx_issue_pending_all(void) } EXPORT_SYMBOL_GPL(async_tx_issue_pending_all); -/* dma_wait_for_async_tx - spin wait for a transcation to complete - * @tx: transaction to wait on - */ -enum dma_status -dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) -{ - enum dma_status status; - struct dma_async_tx_descriptor *iter; - struct dma_async_tx_descriptor *parent; - - if (!tx) - return DMA_SUCCESS; - - /* poll through the dependency chain, return when tx is complete */ - do { - iter = tx; - - /* find the root of the unsubmitted dependency chain */ - do { - parent = iter->parent; - if (!parent) - break; - else - iter = parent; - } while (parent); - - /* there is a small window for ->parent == NULL and - * ->cookie == -EBUSY - */ - while (iter->cookie == -EBUSY) - cpu_relax(); - - status = dma_sync_wait(iter->chan, iter->cookie); - } while (status == DMA_IN_PROGRESS || (iter != tx)); - - return status; -} -EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); - -/* async_tx_run_dependencies - helper routine for dma drivers to process - * (start) dependent operations on their target channel - * @tx: transaction with dependencies - */ -void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx) -{ - struct dma_async_tx_descriptor *dep = tx->next; - struct dma_async_tx_descriptor *dep_next; - struct dma_chan *chan; - - if (!dep) - return; - - chan = dep->chan; - - /* keep submitting up until a channel switch is detected - * in that case we will be called again as a result of - * processing the interrupt from async_tx_channel_switch - */ - for (; dep; dep = dep_next) { - spin_lock_bh(&dep->lock); - dep->parent = NULL; - dep_next = dep->next; - if (dep_next && dep_next->chan == chan) - dep->next = NULL; /* ->next will be submitted */ - else - dep_next = NULL; /* submit current dep and terminate */ - spin_unlock_bh(&dep->lock); - - dep->tx_submit(dep); - } - - chan->device->device_issue_pending(chan); -} -EXPORT_SYMBOL_GPL(async_tx_run_dependencies); - static void free_dma_chan_ref(struct rcu_head *rcu) { -- cgit v1.2.3 From 6f49a57aa5a0c6d4e4e27c85f7af6c83325a12d1 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 6 Jan 2009 11:38:14 -0700 Subject: dmaengine: up-level reference counting to the module level Simply, if a client wants any dmaengine channel then prevent all dmaengine modules from being removed. Once the clients are done re-enable module removal. Why?, beyond reducing complication: 1/ Tracking reference counts per-transaction in an efficient manner, as is currently done, requires a complicated scheme to avoid cache-line bouncing effects. 2/ Per-transaction ref-counting gives the false impression that a dma-driver can be gracefully removed ahead of its user (net, md, or dma-slave) 3/ None of the in-tree dma-drivers talk to hot pluggable hardware, but if such an engine were built one day we still would not need to notify clients of remove events. The driver can simply return NULL to a ->prep() request, something that is much easier for a client to handle. Reviewed-by: Andrew Morton Acked-by: Maciej Sosnowski Signed-off-by: Dan Williams --- crypto/async_tx/async_tx.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'crypto') diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 8cfac182165..43fe4cbe71e 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -198,8 +198,6 @@ dma_channel_add_remove(struct dma_client *client, /* add the channel to the generic management list */ master_ref = kmalloc(sizeof(*master_ref), GFP_KERNEL); if (master_ref) { - /* keep a reference until async_tx is unloaded */ - dma_chan_get(chan); init_dma_chan_ref(master_ref, chan); spin_lock_irqsave(&async_tx_lock, flags); list_add_tail_rcu(&master_ref->node, @@ -221,8 +219,6 @@ dma_channel_add_remove(struct dma_client *client, spin_lock_irqsave(&async_tx_lock, flags); list_for_each_entry(ref, &async_tx_master_list, node) if (ref->chan == chan) { - /* permit backing devices to go away */ - dma_chan_put(ref->chan); list_del_rcu(&ref->node); call_rcu(&ref->rcu, free_dma_chan_ref); found = 1; -- cgit v1.2.3 From bec085134e446577a983f17f57d642a88d1af53b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 6 Jan 2009 11:38:14 -0700 Subject: dmaengine: centralize channel allocation, introduce dma_find_channel Allowing multiple clients to each define their own channel allocation scheme quickly leads to a pathological situation. For memory-to-memory offload all clients can share a central allocator. This simply moves the existing async_tx allocator to dmaengine with minimal fixups: * async_tx.c:get_chan_ref_by_cap --> dmaengine.c:nth_chan * async_tx.c:async_tx_rebalance --> dmaengine.c:dma_channel_rebalance * split out common code from async_tx.c:__async_tx_find_channel --> dma_find_channel Reviewed-by: Andrew Morton Signed-off-by: Dan Williams --- crypto/async_tx/async_tx.c | 146 ++------------------------------------------- 1 file changed, 4 insertions(+), 142 deletions(-) (limited to 'crypto') diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 43fe4cbe71e..b88bb1f608f 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -37,26 +37,11 @@ static struct dma_client async_tx_dma = { /* .cap_mask == 0 defaults to all channels */ }; -/** - * dma_cap_mask_all - enable iteration over all operation types - */ -static dma_cap_mask_t dma_cap_mask_all; - -/** - * chan_ref_percpu - tracks channel allocations per core/opertion - */ -struct chan_ref_percpu { - struct dma_chan_ref *ref; -}; - -static int channel_table_initialized; -static struct chan_ref_percpu *channel_table[DMA_TX_TYPE_END]; - /** * async_tx_lock - protect modification of async_tx_master_list and serialize * rebalance operations */ -static spinlock_t async_tx_lock; +static DEFINE_SPINLOCK(async_tx_lock); static LIST_HEAD(async_tx_master_list); @@ -89,85 +74,6 @@ init_dma_chan_ref(struct dma_chan_ref *ref, struct dma_chan *chan) atomic_set(&ref->count, 0); } -/** - * get_chan_ref_by_cap - returns the nth channel of the given capability - * defaults to returning the channel with the desired capability and the - * lowest reference count if the index can not be satisfied - * @cap: capability to match - * @index: nth channel desired, passing -1 has the effect of forcing the - * default return value - */ -static struct dma_chan_ref * -get_chan_ref_by_cap(enum dma_transaction_type cap, int index) -{ - struct dma_chan_ref *ret_ref = NULL, *min_ref = NULL, *ref; - - rcu_read_lock(); - list_for_each_entry_rcu(ref, &async_tx_master_list, node) - if (dma_has_cap(cap, ref->chan->device->cap_mask)) { - if (!min_ref) - min_ref = ref; - else if (atomic_read(&ref->count) < - atomic_read(&min_ref->count)) - min_ref = ref; - - if (index-- == 0) { - ret_ref = ref; - break; - } - } - rcu_read_unlock(); - - if (!ret_ref) - ret_ref = min_ref; - - if (ret_ref) - atomic_inc(&ret_ref->count); - - return ret_ref; -} - -/** - * async_tx_rebalance - redistribute the available channels, optimize - * for cpu isolation in the SMP case, and opertaion isolation in the - * uniprocessor case - */ -static void async_tx_rebalance(void) -{ - int cpu, cap, cpu_idx = 0; - unsigned long flags; - - if (!channel_table_initialized) - return; - - spin_lock_irqsave(&async_tx_lock, flags); - - /* undo the last distribution */ - for_each_dma_cap_mask(cap, dma_cap_mask_all) - for_each_possible_cpu(cpu) { - struct dma_chan_ref *ref = - per_cpu_ptr(channel_table[cap], cpu)->ref; - if (ref) { - atomic_set(&ref->count, 0); - per_cpu_ptr(channel_table[cap], cpu)->ref = - NULL; - } - } - - for_each_dma_cap_mask(cap, dma_cap_mask_all) - for_each_online_cpu(cpu) { - struct dma_chan_ref *new; - if (NR_CPUS > 1) - new = get_chan_ref_by_cap(cap, cpu_idx++); - else - new = get_chan_ref_by_cap(cap, -1); - - per_cpu_ptr(channel_table[cap], cpu)->ref = new; - } - - spin_unlock_irqrestore(&async_tx_lock, flags); -} - static enum dma_state_client dma_channel_add_remove(struct dma_client *client, struct dma_chan *chan, enum dma_state state) @@ -211,8 +117,6 @@ dma_channel_add_remove(struct dma_client *client, " (-ENOMEM)\n"); return 0; } - - async_tx_rebalance(); break; case DMA_RESOURCE_REMOVED: found = 0; @@ -233,8 +137,6 @@ dma_channel_add_remove(struct dma_client *client, ack = DMA_ACK; else break; - - async_tx_rebalance(); break; case DMA_RESOURCE_SUSPEND: case DMA_RESOURCE_RESUME: @@ -248,51 +150,18 @@ dma_channel_add_remove(struct dma_client *client, return ack; } -static int __init -async_tx_init(void) +static int __init async_tx_init(void) { - enum dma_transaction_type cap; - - spin_lock_init(&async_tx_lock); - bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); - - /* an interrupt will never be an explicit operation type. - * clearing this bit prevents allocation to a slot in 'channel_table' - */ - clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); - - for_each_dma_cap_mask(cap, dma_cap_mask_all) { - channel_table[cap] = alloc_percpu(struct chan_ref_percpu); - if (!channel_table[cap]) - goto err; - } - - channel_table_initialized = 1; dma_async_client_register(&async_tx_dma); dma_async_client_chan_request(&async_tx_dma); printk(KERN_INFO "async_tx: api initialized (async)\n"); return 0; -err: - printk(KERN_ERR "async_tx: initialization failure\n"); - - while (--cap >= 0) - free_percpu(channel_table[cap]); - - return 1; } static void __exit async_tx_exit(void) { - enum dma_transaction_type cap; - - channel_table_initialized = 0; - - for_each_dma_cap_mask(cap, dma_cap_mask_all) - if (channel_table[cap]) - free_percpu(channel_table[cap]); - dma_async_client_unregister(&async_tx_dma); } @@ -308,16 +177,9 @@ __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, { /* see if we can keep the chain on one channel */ if (depend_tx && - dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) + dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) return depend_tx->chan; - else if (likely(channel_table_initialized)) { - struct dma_chan_ref *ref; - int cpu = get_cpu(); - ref = per_cpu_ptr(channel_table[tx_type], cpu)->ref; - put_cpu(); - return ref ? ref->chan : NULL; - } else - return NULL; + return dma_find_channel(tx_type); } EXPORT_SYMBOL_GPL(__async_tx_find_channel); #else -- cgit v1.2.3 From 2ba05622b8b143b0c95968ba59bddfbd6d2f2559 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 6 Jan 2009 11:38:14 -0700 Subject: dmaengine: provide a common 'issue_pending_all' implementation async_tx and net_dma each have open-coded versions of issue_pending_all, so provide a common routine in dmaengine. The implementation needs to walk the global device list, so implement rcu to allow dma_issue_pending_all to run lockless. Clients protect themselves from channel removal events by holding a dmaengine reference. Reviewed-by: Andrew Morton Signed-off-by: Dan Williams --- crypto/async_tx/async_tx.c | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'crypto') diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index b88bb1f608f..2cdf7a0867b 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -45,18 +45,6 @@ static DEFINE_SPINLOCK(async_tx_lock); static LIST_HEAD(async_tx_master_list); -/* async_tx_issue_pending_all - start all transactions on all channels */ -void async_tx_issue_pending_all(void) -{ - struct dma_chan_ref *ref; - - rcu_read_lock(); - list_for_each_entry_rcu(ref, &async_tx_master_list, node) - ref->chan->device->device_issue_pending(ref->chan); - rcu_read_unlock(); -} -EXPORT_SYMBOL_GPL(async_tx_issue_pending_all); - static void free_dma_chan_ref(struct rcu_head *rcu) { -- cgit v1.2.3 From 209b84a88fe81341b4d8d465acc4a67cb7c3feb3 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 6 Jan 2009 11:38:17 -0700 Subject: dmaengine: replace dma_async_client_register with dmaengine_get Now that clients no longer need to be notified of channel arrival dma_async_client_register can simply increment the dmaengine_ref_count. Reviewed-by: Andrew Morton Signed-off-by: Dan Williams --- crypto/async_tx/async_tx.c | 115 +-------------------------------------------- 1 file changed, 2 insertions(+), 113 deletions(-) (limited to 'crypto') diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 2cdf7a0867b..f21147f3626 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -28,120 +28,9 @@ #include #ifdef CONFIG_DMA_ENGINE -static enum dma_state_client -dma_channel_add_remove(struct dma_client *client, - struct dma_chan *chan, enum dma_state state); - -static struct dma_client async_tx_dma = { - .event_callback = dma_channel_add_remove, - /* .cap_mask == 0 defaults to all channels */ -}; - -/** - * async_tx_lock - protect modification of async_tx_master_list and serialize - * rebalance operations - */ -static DEFINE_SPINLOCK(async_tx_lock); - -static LIST_HEAD(async_tx_master_list); - -static void -free_dma_chan_ref(struct rcu_head *rcu) -{ - struct dma_chan_ref *ref; - ref = container_of(rcu, struct dma_chan_ref, rcu); - kfree(ref); -} - -static void -init_dma_chan_ref(struct dma_chan_ref *ref, struct dma_chan *chan) -{ - INIT_LIST_HEAD(&ref->node); - INIT_RCU_HEAD(&ref->rcu); - ref->chan = chan; - atomic_set(&ref->count, 0); -} - -static enum dma_state_client -dma_channel_add_remove(struct dma_client *client, - struct dma_chan *chan, enum dma_state state) -{ - unsigned long found, flags; - struct dma_chan_ref *master_ref, *ref; - enum dma_state_client ack = DMA_DUP; /* default: take no action */ - - switch (state) { - case DMA_RESOURCE_AVAILABLE: - found = 0; - rcu_read_lock(); - list_for_each_entry_rcu(ref, &async_tx_master_list, node) - if (ref->chan == chan) { - found = 1; - break; - } - rcu_read_unlock(); - - pr_debug("async_tx: dma resource available [%s]\n", - found ? "old" : "new"); - - if (!found) - ack = DMA_ACK; - else - break; - - /* add the channel to the generic management list */ - master_ref = kmalloc(sizeof(*master_ref), GFP_KERNEL); - if (master_ref) { - init_dma_chan_ref(master_ref, chan); - spin_lock_irqsave(&async_tx_lock, flags); - list_add_tail_rcu(&master_ref->node, - &async_tx_master_list); - spin_unlock_irqrestore(&async_tx_lock, - flags); - } else { - printk(KERN_WARNING "async_tx: unable to create" - " new master entry in response to" - " a DMA_RESOURCE_ADDED event" - " (-ENOMEM)\n"); - return 0; - } - break; - case DMA_RESOURCE_REMOVED: - found = 0; - spin_lock_irqsave(&async_tx_lock, flags); - list_for_each_entry(ref, &async_tx_master_list, node) - if (ref->chan == chan) { - list_del_rcu(&ref->node); - call_rcu(&ref->rcu, free_dma_chan_ref); - found = 1; - break; - } - spin_unlock_irqrestore(&async_tx_lock, flags); - - pr_debug("async_tx: dma resource removed [%s]\n", - found ? "ours" : "not ours"); - - if (found) - ack = DMA_ACK; - else - break; - break; - case DMA_RESOURCE_SUSPEND: - case DMA_RESOURCE_RESUME: - printk(KERN_WARNING "async_tx: does not support dma channel" - " suspend/resume\n"); - break; - default: - BUG(); - } - - return ack; -} - static int __init async_tx_init(void) { - dma_async_client_register(&async_tx_dma); - dma_async_client_chan_request(&async_tx_dma); + dmaengine_get(); printk(KERN_INFO "async_tx: api initialized (async)\n"); @@ -150,7 +39,7 @@ static int __init async_tx_init(void) static void __exit async_tx_exit(void) { - dma_async_client_unregister(&async_tx_dma); + dmaengine_put(); } /** -- cgit v1.2.3