check in v3.8.0 source

This commit is contained in:
2023-08-31 00:49:24 -07:00
parent 3ef498f9e6
commit 316795abde
1218 changed files with 562506 additions and 0 deletions

202
crypto/modes/cbc128.c Normal file
View File

@@ -0,0 +1,202 @@
/* $OpenBSD: cbc128.c,v 1.6 2022/11/26 16:08:53 tb Exp $ */
/* ====================================================================
* Copyright (c) 2008 The OpenSSL Project. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
*
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
* endorse or promote products derived from this software without
* prior written permission. For written permission, please contact
* openssl-core@openssl.org.
*
* 5. Products derived from this software may not be called "OpenSSL"
* nor may "OpenSSL" appear in their names without prior written
* permission of the OpenSSL Project.
*
* 6. Redistributions of any form whatsoever must retain the following
* acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit (http://www.openssl.org/)"
*
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ====================================================================
*
*/
#include <openssl/crypto.h>
#include "modes_local.h"
#include <string.h>
#ifndef MODES_DEBUG
# ifndef NDEBUG
# define NDEBUG
# endif
#endif
#undef STRICT_ALIGNMENT
#ifdef __STRICT_ALIGNMENT
#define STRICT_ALIGNMENT 1
#else
#define STRICT_ALIGNMENT 0
#endif
void CRYPTO_cbc128_encrypt(const unsigned char *in, unsigned char *out,
size_t len, const void *key,
unsigned char ivec[16], block128_f block)
{
size_t n;
const unsigned char *iv = ivec;
#if !defined(OPENSSL_SMALL_FOOTPRINT)
if (STRICT_ALIGNMENT &&
((size_t)in|(size_t)out|(size_t)ivec)%sizeof(size_t) != 0) {
while (len>=16) {
for(n=0; n<16; ++n)
out[n] = in[n] ^ iv[n];
(*block)(out, out, key);
iv = out;
len -= 16;
in += 16;
out += 16;
}
} else {
while (len>=16) {
for(n=0; n<16; n+=sizeof(size_t))
*(size_t*)(out+n) =
*(size_t*)(in+n) ^ *(size_t*)(iv+n);
(*block)(out, out, key);
iv = out;
len -= 16;
in += 16;
out += 16;
}
}
#endif
while (len) {
for(n=0; n<16 && n<len; ++n)
out[n] = in[n] ^ iv[n];
for(; n<16; ++n)
out[n] = iv[n];
(*block)(out, out, key);
iv = out;
if (len<=16) break;
len -= 16;
in += 16;
out += 16;
}
memmove(ivec,iv,16);
}
void CRYPTO_cbc128_decrypt(const unsigned char *in, unsigned char *out,
size_t len, const void *key,
unsigned char ivec[16], block128_f block)
{
size_t n;
union { size_t t[16/sizeof(size_t)]; unsigned char c[16]; } tmp;
#if !defined(OPENSSL_SMALL_FOOTPRINT)
if (in != out) {
const unsigned char *iv = ivec;
if (STRICT_ALIGNMENT &&
((size_t)in|(size_t)out|(size_t)ivec)%sizeof(size_t) != 0) {
while (len>=16) {
(*block)(in, out, key);
for(n=0; n<16; ++n)
out[n] ^= iv[n];
iv = in;
len -= 16;
in += 16;
out += 16;
}
} else if (16%sizeof(size_t) == 0) { /* always true */
while (len>=16) {
size_t *out_t=(size_t *)out, *iv_t=(size_t *)iv;
(*block)(in, out, key);
for(n=0; n<16/sizeof(size_t); n++)
out_t[n] ^= iv_t[n];
iv = in;
len -= 16;
in += 16;
out += 16;
}
}
memmove(ivec,iv,16);
} else {
if (STRICT_ALIGNMENT &&
((size_t)in|(size_t)out|(size_t)ivec)%sizeof(size_t) != 0) {
unsigned char c;
while (len>=16) {
(*block)(in, tmp.c, key);
for(n=0; n<16; ++n) {
c = in[n];
out[n] = tmp.c[n] ^ ivec[n];
ivec[n] = c;
}
len -= 16;
in += 16;
out += 16;
}
} else if (16%sizeof(size_t) == 0) { /* always true */
while (len>=16) {
size_t c, *out_t=(size_t *)out, *ivec_t=(size_t *)ivec;
const size_t *in_t=(const size_t *)in;
(*block)(in, tmp.c, key);
for(n=0; n<16/sizeof(size_t); n++) {
c = in_t[n];
out_t[n] = tmp.t[n] ^ ivec_t[n];
ivec_t[n] = c;
}
len -= 16;
in += 16;
out += 16;
}
}
}
#endif
while (len) {
unsigned char c;
(*block)(in, tmp.c, key);
for(n=0; n<16 && n<len; ++n) {
c = in[n];
out[n] = tmp.c[n] ^ ivec[n];
ivec[n] = c;
}
if (len<=16) {
for (; n<16; ++n)
ivec[n] = in[n];
break;
}
len -= 16;
in += 16;
out += 16;
}
}

441
crypto/modes/ccm128.c Normal file
View File

@@ -0,0 +1,441 @@
/* $OpenBSD: ccm128.c,v 1.6 2022/11/26 16:08:53 tb Exp $ */
/* ====================================================================
* Copyright (c) 2011 The OpenSSL Project. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
*
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
* endorse or promote products derived from this software without
* prior written permission. For written permission, please contact
* openssl-core@openssl.org.
*
* 5. Products derived from this software may not be called "OpenSSL"
* nor may "OpenSSL" appear in their names without prior written
* permission of the OpenSSL Project.
*
* 6. Redistributions of any form whatsoever must retain the following
* acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit (http://www.openssl.org/)"
*
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ====================================================================
*/
#include <openssl/crypto.h>
#include "modes_local.h"
#include <string.h>
#ifndef MODES_DEBUG
# ifndef NDEBUG
# define NDEBUG
# endif
#endif
/* First you setup M and L parameters and pass the key schedule.
* This is called once per session setup... */
void CRYPTO_ccm128_init(CCM128_CONTEXT *ctx,
unsigned int M,unsigned int L,void *key,block128_f block)
{
memset(ctx->nonce.c,0,sizeof(ctx->nonce.c));
ctx->nonce.c[0] = ((u8)(L-1)&7) | (u8)(((M-2)/2)&7)<<3;
ctx->blocks = 0;
ctx->block = block;
ctx->key = key;
}
/* !!! Following interfaces are to be called *once* per packet !!! */
/* Then you setup per-message nonce and pass the length of the message */
int CRYPTO_ccm128_setiv(CCM128_CONTEXT *ctx,
const unsigned char *nonce,size_t nlen,size_t mlen)
{
unsigned int L = ctx->nonce.c[0]&7; /* the L parameter */
if (nlen<(14-L)) return -1; /* nonce is too short */
if (sizeof(mlen)==8 && L>=3) {
ctx->nonce.c[8] = (u8)(mlen>>(56%(sizeof(mlen)*8)));
ctx->nonce.c[9] = (u8)(mlen>>(48%(sizeof(mlen)*8)));
ctx->nonce.c[10] = (u8)(mlen>>(40%(sizeof(mlen)*8)));
ctx->nonce.c[11] = (u8)(mlen>>(32%(sizeof(mlen)*8)));
}
else
ctx->nonce.u[1] = 0;
ctx->nonce.c[12] = (u8)(mlen>>24);
ctx->nonce.c[13] = (u8)(mlen>>16);
ctx->nonce.c[14] = (u8)(mlen>>8);
ctx->nonce.c[15] = (u8)mlen;
ctx->nonce.c[0] &= ~0x40; /* clear Adata flag */
memcpy(&ctx->nonce.c[1],nonce,14-L);
return 0;
}
/* Then you pass additional authentication data, this is optional */
void CRYPTO_ccm128_aad(CCM128_CONTEXT *ctx,
const unsigned char *aad,size_t alen)
{ unsigned int i;
block128_f block = ctx->block;
if (alen==0) return;
ctx->nonce.c[0] |= 0x40; /* set Adata flag */
(*block)(ctx->nonce.c,ctx->cmac.c,ctx->key),
ctx->blocks++;
if (alen<(0x10000-0x100)) {
ctx->cmac.c[0] ^= (u8)(alen>>8);
ctx->cmac.c[1] ^= (u8)alen;
i=2;
}
else if (sizeof(alen)==8 && alen>=(size_t)1<<(32%(sizeof(alen)*8))) {
ctx->cmac.c[0] ^= 0xFF;
ctx->cmac.c[1] ^= 0xFF;
ctx->cmac.c[2] ^= (u8)(alen>>(56%(sizeof(alen)*8)));
ctx->cmac.c[3] ^= (u8)(alen>>(48%(sizeof(alen)*8)));
ctx->cmac.c[4] ^= (u8)(alen>>(40%(sizeof(alen)*8)));
ctx->cmac.c[5] ^= (u8)(alen>>(32%(sizeof(alen)*8)));
ctx->cmac.c[6] ^= (u8)(alen>>24);
ctx->cmac.c[7] ^= (u8)(alen>>16);
ctx->cmac.c[8] ^= (u8)(alen>>8);
ctx->cmac.c[9] ^= (u8)alen;
i=10;
}
else {
ctx->cmac.c[0] ^= 0xFF;
ctx->cmac.c[1] ^= 0xFE;
ctx->cmac.c[2] ^= (u8)(alen>>24);
ctx->cmac.c[3] ^= (u8)(alen>>16);
ctx->cmac.c[4] ^= (u8)(alen>>8);
ctx->cmac.c[5] ^= (u8)alen;
i=6;
}
do {
for(;i<16 && alen;++i,++aad,--alen)
ctx->cmac.c[i] ^= *aad;
(*block)(ctx->cmac.c,ctx->cmac.c,ctx->key),
ctx->blocks++;
i=0;
} while (alen);
}
/* Finally you encrypt or decrypt the message */
/* counter part of nonce may not be larger than L*8 bits,
* L is not larger than 8, therefore 64-bit counter... */
static void ctr64_inc(unsigned char *counter) {
unsigned int n=8;
u8 c;
counter += 8;
do {
--n;
c = counter[n];
++c;
counter[n] = c;
if (c) return;
} while (n);
}
int CRYPTO_ccm128_encrypt(CCM128_CONTEXT *ctx,
const unsigned char *inp, unsigned char *out,
size_t len)
{
size_t n;
unsigned int i,L;
unsigned char flags0 = ctx->nonce.c[0];
block128_f block = ctx->block;
void * key = ctx->key;
union { u64 u[2]; u8 c[16]; } scratch;
if (!(flags0&0x40))
(*block)(ctx->nonce.c,ctx->cmac.c,key),
ctx->blocks++;
ctx->nonce.c[0] = L = flags0&7;
for (n=0,i=15-L;i<15;++i) {
n |= ctx->nonce.c[i];
ctx->nonce.c[i]=0;
n <<= 8;
}
n |= ctx->nonce.c[15]; /* reconstructed length */
ctx->nonce.c[15]=1;
if (n!=len) return -1; /* length mismatch */
ctx->blocks += ((len+15)>>3)|1;
if (ctx->blocks > (U64(1)<<61)) return -2; /* too much data */
while (len>=16) {
#ifdef __STRICT_ALIGNMENT
union { u64 u[2]; u8 c[16]; } temp;
memcpy (temp.c,inp,16);
ctx->cmac.u[0] ^= temp.u[0];
ctx->cmac.u[1] ^= temp.u[1];
#else
ctx->cmac.u[0] ^= ((u64*)inp)[0];
ctx->cmac.u[1] ^= ((u64*)inp)[1];
#endif
(*block)(ctx->cmac.c,ctx->cmac.c,key);
(*block)(ctx->nonce.c,scratch.c,key);
ctr64_inc(ctx->nonce.c);
#ifdef __STRICT_ALIGNMENT
temp.u[0] ^= scratch.u[0];
temp.u[1] ^= scratch.u[1];
memcpy(out,temp.c,16);
#else
((u64*)out)[0] = scratch.u[0]^((u64*)inp)[0];
((u64*)out)[1] = scratch.u[1]^((u64*)inp)[1];
#endif
inp += 16;
out += 16;
len -= 16;
}
if (len) {
for (i=0; i<len; ++i) ctx->cmac.c[i] ^= inp[i];
(*block)(ctx->cmac.c,ctx->cmac.c,key);
(*block)(ctx->nonce.c,scratch.c,key);
for (i=0; i<len; ++i) out[i] = scratch.c[i]^inp[i];
}
for (i=15-L;i<16;++i)
ctx->nonce.c[i]=0;
(*block)(ctx->nonce.c,scratch.c,key);
ctx->cmac.u[0] ^= scratch.u[0];
ctx->cmac.u[1] ^= scratch.u[1];
ctx->nonce.c[0] = flags0;
return 0;
}
int CRYPTO_ccm128_decrypt(CCM128_CONTEXT *ctx,
const unsigned char *inp, unsigned char *out,
size_t len)
{
size_t n;
unsigned int i,L;
unsigned char flags0 = ctx->nonce.c[0];
block128_f block = ctx->block;
void * key = ctx->key;
union { u64 u[2]; u8 c[16]; } scratch;
if (!(flags0&0x40))
(*block)(ctx->nonce.c,ctx->cmac.c,key);
ctx->nonce.c[0] = L = flags0&7;
for (n=0,i=15-L;i<15;++i) {
n |= ctx->nonce.c[i];
ctx->nonce.c[i]=0;
n <<= 8;
}
n |= ctx->nonce.c[15]; /* reconstructed length */
ctx->nonce.c[15]=1;
if (n!=len) return -1;
while (len>=16) {
#ifdef __STRICT_ALIGNMENT
union { u64 u[2]; u8 c[16]; } temp;
#endif
(*block)(ctx->nonce.c,scratch.c,key);
ctr64_inc(ctx->nonce.c);
#ifdef __STRICT_ALIGNMENT
memcpy (temp.c,inp,16);
ctx->cmac.u[0] ^= (scratch.u[0] ^= temp.u[0]);
ctx->cmac.u[1] ^= (scratch.u[1] ^= temp.u[1]);
memcpy (out,scratch.c,16);
#else
ctx->cmac.u[0] ^= (((u64*)out)[0] = scratch.u[0]^((u64*)inp)[0]);
ctx->cmac.u[1] ^= (((u64*)out)[1] = scratch.u[1]^((u64*)inp)[1]);
#endif
(*block)(ctx->cmac.c,ctx->cmac.c,key);
inp += 16;
out += 16;
len -= 16;
}
if (len) {
(*block)(ctx->nonce.c,scratch.c,key);
for (i=0; i<len; ++i)
ctx->cmac.c[i] ^= (out[i] = scratch.c[i]^inp[i]);
(*block)(ctx->cmac.c,ctx->cmac.c,key);
}
for (i=15-L;i<16;++i)
ctx->nonce.c[i]=0;
(*block)(ctx->nonce.c,scratch.c,key);
ctx->cmac.u[0] ^= scratch.u[0];
ctx->cmac.u[1] ^= scratch.u[1];
ctx->nonce.c[0] = flags0;
return 0;
}
static void ctr64_add (unsigned char *counter,size_t inc)
{ size_t n=8, val=0;
counter += 8;
do {
--n;
val += counter[n] + (inc&0xff);
counter[n] = (unsigned char)val;
val >>= 8; /* carry bit */
inc >>= 8;
} while(n && (inc || val));
}
int CRYPTO_ccm128_encrypt_ccm64(CCM128_CONTEXT *ctx,
const unsigned char *inp, unsigned char *out,
size_t len,ccm128_f stream)
{
size_t n;
unsigned int i,L;
unsigned char flags0 = ctx->nonce.c[0];
block128_f block = ctx->block;
void * key = ctx->key;
union { u64 u[2]; u8 c[16]; } scratch;
if (!(flags0&0x40))
(*block)(ctx->nonce.c,ctx->cmac.c,key),
ctx->blocks++;
ctx->nonce.c[0] = L = flags0&7;
for (n=0,i=15-L;i<15;++i) {
n |= ctx->nonce.c[i];
ctx->nonce.c[i]=0;
n <<= 8;
}
n |= ctx->nonce.c[15]; /* reconstructed length */
ctx->nonce.c[15]=1;
if (n!=len) return -1; /* length mismatch */
ctx->blocks += ((len+15)>>3)|1;
if (ctx->blocks > (U64(1)<<61)) return -2; /* too much data */
if ((n=len/16)) {
(*stream)(inp,out,n,key,ctx->nonce.c,ctx->cmac.c);
n *= 16;
inp += n;
out += n;
len -= n;
if (len) ctr64_add(ctx->nonce.c,n/16);
}
if (len) {
for (i=0; i<len; ++i) ctx->cmac.c[i] ^= inp[i];
(*block)(ctx->cmac.c,ctx->cmac.c,key);
(*block)(ctx->nonce.c,scratch.c,key);
for (i=0; i<len; ++i) out[i] = scratch.c[i]^inp[i];
}
for (i=15-L;i<16;++i)
ctx->nonce.c[i]=0;
(*block)(ctx->nonce.c,scratch.c,key);
ctx->cmac.u[0] ^= scratch.u[0];
ctx->cmac.u[1] ^= scratch.u[1];
ctx->nonce.c[0] = flags0;
return 0;
}
int CRYPTO_ccm128_decrypt_ccm64(CCM128_CONTEXT *ctx,
const unsigned char *inp, unsigned char *out,
size_t len,ccm128_f stream)
{
size_t n;
unsigned int i,L;
unsigned char flags0 = ctx->nonce.c[0];
block128_f block = ctx->block;
void * key = ctx->key;
union { u64 u[2]; u8 c[16]; } scratch;
if (!(flags0&0x40))
(*block)(ctx->nonce.c,ctx->cmac.c,key);
ctx->nonce.c[0] = L = flags0&7;
for (n=0,i=15-L;i<15;++i) {
n |= ctx->nonce.c[i];
ctx->nonce.c[i]=0;
n <<= 8;
}
n |= ctx->nonce.c[15]; /* reconstructed length */
ctx->nonce.c[15]=1;
if (n!=len) return -1;
if ((n=len/16)) {
(*stream)(inp,out,n,key,ctx->nonce.c,ctx->cmac.c);
n *= 16;
inp += n;
out += n;
len -= n;
if (len) ctr64_add(ctx->nonce.c,n/16);
}
if (len) {
(*block)(ctx->nonce.c,scratch.c,key);
for (i=0; i<len; ++i)
ctx->cmac.c[i] ^= (out[i] = scratch.c[i]^inp[i]);
(*block)(ctx->cmac.c,ctx->cmac.c,key);
}
for (i=15-L;i<16;++i)
ctx->nonce.c[i]=0;
(*block)(ctx->nonce.c,scratch.c,key);
ctx->cmac.u[0] ^= scratch.u[0];
ctx->cmac.u[1] ^= scratch.u[1];
ctx->nonce.c[0] = flags0;
return 0;
}
size_t CRYPTO_ccm128_tag(CCM128_CONTEXT *ctx,unsigned char *tag,size_t len)
{ unsigned int M = (ctx->nonce.c[0]>>3)&7; /* the M parameter */
M *= 2; M += 2;
if (len != M) return 0;
memcpy(tag,ctx->cmac.c,M);
return M;
}

234
crypto/modes/cfb128.c Normal file
View File

@@ -0,0 +1,234 @@
/* $OpenBSD: cfb128.c,v 1.5 2022/11/26 16:08:53 tb Exp $ */
/* ====================================================================
* Copyright (c) 2008 The OpenSSL Project. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
*
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
* endorse or promote products derived from this software without
* prior written permission. For written permission, please contact
* openssl-core@openssl.org.
*
* 5. Products derived from this software may not be called "OpenSSL"
* nor may "OpenSSL" appear in their names without prior written
* permission of the OpenSSL Project.
*
* 6. Redistributions of any form whatsoever must retain the following
* acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit (http://www.openssl.org/)"
*
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ====================================================================
*
*/
#include <openssl/crypto.h>
#include "modes_local.h"
#include <string.h>
#ifndef MODES_DEBUG
# ifndef NDEBUG
# define NDEBUG
# endif
#endif
/* The input and output encrypted as though 128bit cfb mode is being
* used. The extra state information to record how much of the
* 128bit block we have used is contained in *num;
*/
void CRYPTO_cfb128_encrypt(const unsigned char *in, unsigned char *out,
size_t len, const void *key,
unsigned char ivec[16], int *num,
int enc, block128_f block)
{
unsigned int n;
size_t l = 0;
n = *num;
if (enc) {
#if !defined(OPENSSL_SMALL_FOOTPRINT)
if (16%sizeof(size_t) == 0) do { /* always true actually */
while (n && len) {
*(out++) = ivec[n] ^= *(in++);
--len;
n = (n+1) % 16;
}
#ifdef __STRICT_ALIGNMENT
if (((size_t)in|(size_t)out|(size_t)ivec)%sizeof(size_t) != 0)
break;
#endif
while (len>=16) {
(*block)(ivec, ivec, key);
for (; n<16; n+=sizeof(size_t)) {
*(size_t*)(out+n) =
*(size_t*)(ivec+n) ^= *(size_t*)(in+n);
}
len -= 16;
out += 16;
in += 16;
n = 0;
}
if (len) {
(*block)(ivec, ivec, key);
while (len--) {
out[n] = ivec[n] ^= in[n];
++n;
}
}
*num = n;
return;
} while (0);
/* the rest would be commonly eliminated by x86* compiler */
#endif
while (l<len) {
if (n == 0) {
(*block)(ivec, ivec, key);
}
out[l] = ivec[n] ^= in[l];
++l;
n = (n+1) % 16;
}
*num = n;
} else {
#if !defined(OPENSSL_SMALL_FOOTPRINT)
if (16%sizeof(size_t) == 0) do { /* always true actually */
while (n && len) {
unsigned char c;
*(out++) = ivec[n] ^ (c = *(in++)); ivec[n] = c;
--len;
n = (n+1) % 16;
}
#ifdef __STRICT_ALIGNMENT
if (((size_t)in|(size_t)out|(size_t)ivec)%sizeof(size_t) != 0)
break;
#endif
while (len>=16) {
(*block)(ivec, ivec, key);
for (; n<16; n+=sizeof(size_t)) {
size_t t = *(size_t*)(in+n);
*(size_t*)(out+n) = *(size_t*)(ivec+n) ^ t;
*(size_t*)(ivec+n) = t;
}
len -= 16;
out += 16;
in += 16;
n = 0;
}
if (len) {
(*block)(ivec, ivec, key);
while (len--) {
unsigned char c;
out[n] = ivec[n] ^ (c = in[n]); ivec[n] = c;
++n;
}
}
*num = n;
return;
} while (0);
/* the rest would be commonly eliminated by x86* compiler */
#endif
while (l<len) {
unsigned char c;
if (n == 0) {
(*block)(ivec, ivec, key);
}
out[l] = ivec[n] ^ (c = in[l]); ivec[n] = c;
++l;
n = (n+1) % 16;
}
*num=n;
}
}
/* This expects a single block of size nbits for both in and out. Note that
it corrupts any extra bits in the last byte of out */
static void cfbr_encrypt_block(const unsigned char *in,unsigned char *out,
int nbits,const void *key,
unsigned char ivec[16],int enc,
block128_f block)
{
int n,rem,num;
unsigned char ovec[16*2 + 1]; /* +1 because we dererefence (but don't use) one byte off the end */
if (nbits<=0 || nbits>128) return;
/* fill in the first half of the new IV with the current IV */
memcpy(ovec,ivec,16);
/* construct the new IV */
(*block)(ivec,ivec,key);
num = (nbits+7)/8;
if (enc) /* encrypt the input */
for(n=0 ; n < num ; ++n)
out[n] = (ovec[16+n] = in[n] ^ ivec[n]);
else /* decrypt the input */
for(n=0 ; n < num ; ++n)
out[n] = (ovec[16+n] = in[n]) ^ ivec[n];
/* shift ovec left... */
rem = nbits%8;
num = nbits/8;
if(rem==0)
memcpy(ivec,ovec+num,16);
else
for(n=0 ; n < 16 ; ++n)
ivec[n] = ovec[n+num]<<rem | ovec[n+num+1]>>(8-rem);
/* it is not necessary to cleanse ovec, since the IV is not secret */
}
/* N.B. This expects the input to be packed, MS bit first */
void CRYPTO_cfb128_1_encrypt(const unsigned char *in, unsigned char *out,
size_t bits, const void *key,
unsigned char ivec[16], int *num,
int enc, block128_f block)
{
size_t n;
unsigned char c[1],d[1];
for(n=0 ; n<bits ; ++n)
{
c[0]=(in[n/8]&(1 << (7-n%8))) ? 0x80 : 0;
cfbr_encrypt_block(c,d,1,key,ivec,enc,block);
out[n/8]=(out[n/8]&~(1 << (unsigned int)(7-n%8))) |
((d[0]&0x80) >> (unsigned int)(n%8));
}
}
void CRYPTO_cfb128_8_encrypt(const unsigned char *in, unsigned char *out,
size_t length, const void *key,
unsigned char ivec[16], int *num,
int enc, block128_f block)
{
size_t n;
for(n=0 ; n<length ; ++n)
cfbr_encrypt_block(&in[n],&out[n],8,key,ivec,enc,block);
}

251
crypto/modes/ctr128.c Normal file
View File

@@ -0,0 +1,251 @@
/* $OpenBSD: ctr128.c,v 1.9 2022/12/26 07:18:52 jmc Exp $ */
/* ====================================================================
* Copyright (c) 2008 The OpenSSL Project. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
*
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
* endorse or promote products derived from this software without
* prior written permission. For written permission, please contact
* openssl-core@openssl.org.
*
* 5. Products derived from this software may not be called "OpenSSL"
* nor may "OpenSSL" appear in their names without prior written
* permission of the OpenSSL Project.
*
* 6. Redistributions of any form whatsoever must retain the following
* acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit (http://www.openssl.org/)"
*
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ====================================================================
*
*/
#include <openssl/crypto.h>
#include "modes_local.h"
#include <string.h>
#ifndef MODES_DEBUG
# ifndef NDEBUG
# define NDEBUG
# endif
#endif
#include <assert.h>
/* NOTE: the IV/counter CTR mode is big-endian. The code itself
* is endian-neutral. */
/* increment counter (128-bit int) by 1 */
static void ctr128_inc(unsigned char *counter) {
u32 n=16;
u8 c;
do {
--n;
c = counter[n];
++c;
counter[n] = c;
if (c) return;
} while (n);
}
#if !defined(OPENSSL_SMALL_FOOTPRINT)
static void
ctr128_inc_aligned(unsigned char *counter)
{
#if BYTE_ORDER == LITTLE_ENDIAN
ctr128_inc(counter);
#else
size_t *data, c, n;
data = (size_t *)counter;
n = 16 / sizeof(size_t);
do {
--n;
c = data[n];
++c;
data[n] = c;
if (c)
return;
} while (n);
#endif
}
#endif
/* The input encrypted as though 128bit counter mode is being
* used. The extra state information to record how much of the
* 128bit block we have used is contained in *num, and the
* encrypted counter is kept in ecount_buf. Both *num and
* ecount_buf must be initialised with zeros before the first
* call to CRYPTO_ctr128_encrypt().
*
* This algorithm assumes that the counter is in the x lower bits
* of the IV (ivec), and that the application has full control over
* overflow and the rest of the IV. This implementation takes NO
* responsibility for checking that the counter doesn't overflow
* into the rest of the IV when incremented.
*/
void CRYPTO_ctr128_encrypt(const unsigned char *in, unsigned char *out,
size_t len, const void *key,
unsigned char ivec[16], unsigned char ecount_buf[16],
unsigned int *num, block128_f block)
{
unsigned int n;
size_t l=0;
assert(*num < 16);
n = *num;
#if !defined(OPENSSL_SMALL_FOOTPRINT)
if (16%sizeof(size_t) == 0) do { /* always true actually */
while (n && len) {
*(out++) = *(in++) ^ ecount_buf[n];
--len;
n = (n+1) % 16;
}
#ifdef __STRICT_ALIGNMENT
if (((size_t)in|(size_t)out|(size_t)ivec)%sizeof(size_t) != 0)
break;
#endif
while (len>=16) {
(*block)(ivec, ecount_buf, key);
ctr128_inc_aligned(ivec);
for (; n<16; n+=sizeof(size_t))
*(size_t *)(out+n) =
*(size_t *)(in+n) ^ *(size_t *)(ecount_buf+n);
len -= 16;
out += 16;
in += 16;
n = 0;
}
if (len) {
(*block)(ivec, ecount_buf, key);
ctr128_inc_aligned(ivec);
while (len--) {
out[n] = in[n] ^ ecount_buf[n];
++n;
}
}
*num = n;
return;
} while(0);
/* the rest would be commonly eliminated by x86* compiler */
#endif
while (l<len) {
if (n==0) {
(*block)(ivec, ecount_buf, key);
ctr128_inc(ivec);
}
out[l] = in[l] ^ ecount_buf[n];
++l;
n = (n+1) % 16;
}
*num=n;
}
/* increment upper 96 bits of 128-bit counter by 1 */
static void ctr96_inc(unsigned char *counter) {
u32 n=12;
u8 c;
do {
--n;
c = counter[n];
++c;
counter[n] = c;
if (c) return;
} while (n);
}
void CRYPTO_ctr128_encrypt_ctr32(const unsigned char *in, unsigned char *out,
size_t len, const void *key,
unsigned char ivec[16], unsigned char ecount_buf[16],
unsigned int *num, ctr128_f func)
{
unsigned int n,ctr32;
assert(*num < 16);
n = *num;
while (n && len) {
*(out++) = *(in++) ^ ecount_buf[n];
--len;
n = (n+1) % 16;
}
ctr32 = GETU32(ivec+12);
while (len>=16) {
size_t blocks = len/16;
/*
* 1<<28 is just a not-so-small yet not-so-large number...
* Below condition is practically never met, but it has to
* be checked for code correctness.
*/
if (sizeof(size_t)>sizeof(unsigned int) && blocks>(1U<<28))
blocks = (1U<<28);
/*
* As (*func) operates on 32-bit counter, caller
* has to handle overflow. 'if' below detects the
* overflow, which is then handled by limiting the
* amount of blocks to the exact overflow point...
*/
ctr32 += (u32)blocks;
if (ctr32 < blocks) {
blocks -= ctr32;
ctr32 = 0;
}
(*func)(in,out,blocks,key,ivec);
/* (*ctr) does not update ivec, caller does: */
PUTU32(ivec+12,ctr32);
/* ... overflow was detected, propagate carry. */
if (ctr32 == 0) ctr96_inc(ivec);
blocks *= 16;
len -= blocks;
out += blocks;
in += blocks;
}
if (len) {
memset(ecount_buf,0,16);
(*func)(ecount_buf,ecount_buf,1,key,ivec);
++ctr32;
PUTU32(ivec+12,ctr32);
if (ctr32 == 0) ctr96_inc(ivec);
while (len--) {
out[n] = in[n] ^ ecount_buf[n];
++n;
}
}
*num=n;
}

1562
crypto/modes/gcm128.c Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,412 @@
#include "arm_arch.h"
.text
.syntax unified
.code 32
.type rem_4bit,%object
.align 5
rem_4bit:
.short 0x0000,0x1C20,0x3840,0x2460
.short 0x7080,0x6CA0,0x48C0,0x54E0
.short 0xE100,0xFD20,0xD940,0xC560
.short 0x9180,0x8DA0,0xA9C0,0xB5E0
.size rem_4bit,.-rem_4bit
.type rem_4bit_get,%function
rem_4bit_get:
sub r2,pc,#8
sub r2,r2,#32 @ &rem_4bit
b .Lrem_4bit_got
nop
.size rem_4bit_get,.-rem_4bit_get
.global gcm_ghash_4bit
.type gcm_ghash_4bit,%function
gcm_ghash_4bit:
sub r12,pc,#8
add r3,r2,r3 @ r3 to point at the end
stmdb sp!,{r3-r11,lr} @ save r3/end too
sub r12,r12,#48 @ &rem_4bit
ldmia r12,{r4-r11} @ copy rem_4bit ...
stmdb sp!,{r4-r11} @ ... to stack
ldrb r12,[r2,#15]
ldrb r14,[r0,#15]
.Louter:
eor r12,r12,r14
and r14,r12,#0xf0
and r12,r12,#0x0f
mov r3,#14
add r7,r1,r12,lsl#4
ldmia r7,{r4-r7} @ load Htbl[nlo]
add r11,r1,r14
ldrb r12,[r2,#14]
and r14,r4,#0xf @ rem
ldmia r11,{r8-r11} @ load Htbl[nhi]
add r14,r14,r14
eor r4,r8,r4,lsr#4
ldrh r8,[sp,r14] @ rem_4bit[rem]
eor r4,r4,r5,lsl#28
ldrb r14,[r0,#14]
eor r5,r9,r5,lsr#4
eor r5,r5,r6,lsl#28
eor r6,r10,r6,lsr#4
eor r6,r6,r7,lsl#28
eor r7,r11,r7,lsr#4
eor r12,r12,r14
and r14,r12,#0xf0
and r12,r12,#0x0f
eor r7,r7,r8,lsl#16
.Linner:
add r11,r1,r12,lsl#4
and r12,r4,#0xf @ rem
subs r3,r3,#1
add r12,r12,r12
ldmia r11,{r8-r11} @ load Htbl[nlo]
eor r4,r8,r4,lsr#4
eor r4,r4,r5,lsl#28
eor r5,r9,r5,lsr#4
eor r5,r5,r6,lsl#28
ldrh r8,[sp,r12] @ rem_4bit[rem]
eor r6,r10,r6,lsr#4
ldrbpl r12,[r2,r3]
eor r6,r6,r7,lsl#28
eor r7,r11,r7,lsr#4
add r11,r1,r14
and r14,r4,#0xf @ rem
eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem]
add r14,r14,r14
ldmia r11,{r8-r11} @ load Htbl[nhi]
eor r4,r8,r4,lsr#4
ldrbpl r8,[r0,r3]
eor r4,r4,r5,lsl#28
eor r5,r9,r5,lsr#4
ldrh r9,[sp,r14]
eor r5,r5,r6,lsl#28
eor r6,r10,r6,lsr#4
eor r6,r6,r7,lsl#28
eorpl r12,r12,r8
eor r7,r11,r7,lsr#4
andpl r14,r12,#0xf0
andpl r12,r12,#0x0f
eor r7,r7,r9,lsl#16 @ ^= rem_4bit[rem]
bpl .Linner
ldr r3,[sp,#32] @ re-load r3/end
add r2,r2,#16
mov r14,r4
#if __ARM_ARCH__>=7 && defined(__ARMEL__)
rev r4,r4
str r4,[r0,#12]
#elif defined(__ARMEB__)
str r4,[r0,#12]
#else
mov r9,r4,lsr#8
strb r4,[r0,#12+3]
mov r10,r4,lsr#16
strb r9,[r0,#12+2]
mov r11,r4,lsr#24
strb r10,[r0,#12+1]
strb r11,[r0,#12]
#endif
cmp r2,r3
#if __ARM_ARCH__>=7 && defined(__ARMEL__)
rev r5,r5
str r5,[r0,#8]
#elif defined(__ARMEB__)
str r5,[r0,#8]
#else
mov r9,r5,lsr#8
strb r5,[r0,#8+3]
mov r10,r5,lsr#16
strb r9,[r0,#8+2]
mov r11,r5,lsr#24
strb r10,[r0,#8+1]
strb r11,[r0,#8]
#endif
ldrbne r12,[r2,#15]
#if __ARM_ARCH__>=7 && defined(__ARMEL__)
rev r6,r6
str r6,[r0,#4]
#elif defined(__ARMEB__)
str r6,[r0,#4]
#else
mov r9,r6,lsr#8
strb r6,[r0,#4+3]
mov r10,r6,lsr#16
strb r9,[r0,#4+2]
mov r11,r6,lsr#24
strb r10,[r0,#4+1]
strb r11,[r0,#4]
#endif
#if __ARM_ARCH__>=7 && defined(__ARMEL__)
rev r7,r7
str r7,[r0,#0]
#elif defined(__ARMEB__)
str r7,[r0,#0]
#else
mov r9,r7,lsr#8
strb r7,[r0,#0+3]
mov r10,r7,lsr#16
strb r9,[r0,#0+2]
mov r11,r7,lsr#24
strb r10,[r0,#0+1]
strb r11,[r0,#0]
#endif
bne .Louter
add sp,sp,#36
#if __ARM_ARCH__>=5
ldmia sp!,{r4-r11,pc}
#else
ldmia sp!,{r4-r11,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size gcm_ghash_4bit,.-gcm_ghash_4bit
.global gcm_gmult_4bit
.type gcm_gmult_4bit,%function
gcm_gmult_4bit:
stmdb sp!,{r4-r11,lr}
ldrb r12,[r0,#15]
b rem_4bit_get
.Lrem_4bit_got:
and r14,r12,#0xf0
and r12,r12,#0x0f
mov r3,#14
add r7,r1,r12,lsl#4
ldmia r7,{r4-r7} @ load Htbl[nlo]
ldrb r12,[r0,#14]
add r11,r1,r14
and r14,r4,#0xf @ rem
ldmia r11,{r8-r11} @ load Htbl[nhi]
add r14,r14,r14
eor r4,r8,r4,lsr#4
ldrh r8,[r2,r14] @ rem_4bit[rem]
eor r4,r4,r5,lsl#28
eor r5,r9,r5,lsr#4
eor r5,r5,r6,lsl#28
eor r6,r10,r6,lsr#4
eor r6,r6,r7,lsl#28
eor r7,r11,r7,lsr#4
and r14,r12,#0xf0
eor r7,r7,r8,lsl#16
and r12,r12,#0x0f
.Loop:
add r11,r1,r12,lsl#4
and r12,r4,#0xf @ rem
subs r3,r3,#1
add r12,r12,r12
ldmia r11,{r8-r11} @ load Htbl[nlo]
eor r4,r8,r4,lsr#4
eor r4,r4,r5,lsl#28
eor r5,r9,r5,lsr#4
eor r5,r5,r6,lsl#28
ldrh r8,[r2,r12] @ rem_4bit[rem]
eor r6,r10,r6,lsr#4
ldrbpl r12,[r0,r3]
eor r6,r6,r7,lsl#28
eor r7,r11,r7,lsr#4
add r11,r1,r14
and r14,r4,#0xf @ rem
eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem]
add r14,r14,r14
ldmia r11,{r8-r11} @ load Htbl[nhi]
eor r4,r8,r4,lsr#4
eor r4,r4,r5,lsl#28
eor r5,r9,r5,lsr#4
ldrh r8,[r2,r14] @ rem_4bit[rem]
eor r5,r5,r6,lsl#28
eor r6,r10,r6,lsr#4
eor r6,r6,r7,lsl#28
eor r7,r11,r7,lsr#4
andpl r14,r12,#0xf0
andpl r12,r12,#0x0f
eor r7,r7,r8,lsl#16 @ ^= rem_4bit[rem]
bpl .Loop
#if __ARM_ARCH__>=7 && defined(__ARMEL__)
rev r4,r4
str r4,[r0,#12]
#elif defined(__ARMEB__)
str r4,[r0,#12]
#else
mov r9,r4,lsr#8
strb r4,[r0,#12+3]
mov r10,r4,lsr#16
strb r9,[r0,#12+2]
mov r11,r4,lsr#24
strb r10,[r0,#12+1]
strb r11,[r0,#12]
#endif
#if __ARM_ARCH__>=7 && defined(__ARMEL__)
rev r5,r5
str r5,[r0,#8]
#elif defined(__ARMEB__)
str r5,[r0,#8]
#else
mov r9,r5,lsr#8
strb r5,[r0,#8+3]
mov r10,r5,lsr#16
strb r9,[r0,#8+2]
mov r11,r5,lsr#24
strb r10,[r0,#8+1]
strb r11,[r0,#8]
#endif
#if __ARM_ARCH__>=7 && defined(__ARMEL__)
rev r6,r6
str r6,[r0,#4]
#elif defined(__ARMEB__)
str r6,[r0,#4]
#else
mov r9,r6,lsr#8
strb r6,[r0,#4+3]
mov r10,r6,lsr#16
strb r9,[r0,#4+2]
mov r11,r6,lsr#24
strb r10,[r0,#4+1]
strb r11,[r0,#4]
#endif
#if __ARM_ARCH__>=7 && defined(__ARMEL__)
rev r7,r7
str r7,[r0,#0]
#elif defined(__ARMEB__)
str r7,[r0,#0]
#else
mov r9,r7,lsr#8
strb r7,[r0,#0+3]
mov r10,r7,lsr#16
strb r9,[r0,#0+2]
mov r11,r7,lsr#24
strb r10,[r0,#0+1]
strb r11,[r0,#0]
#endif
#if __ARM_ARCH__>=5
ldmia sp!,{r4-r11,pc}
#else
ldmia sp!,{r4-r11,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size gcm_gmult_4bit,.-gcm_gmult_4bit
#if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT)
.fpu neon
.global gcm_gmult_neon
.type gcm_gmult_neon,%function
.align 4
gcm_gmult_neon:
sub r1,#16 @ point at H in GCM128_CTX
vld1.64 d29,[r0,:64]!@ load Xi
vmov.i32 d5,#0xe1 @ our irreducible polynomial
vld1.64 d28,[r0,:64]!
vshr.u64 d5,#32
vldmia r1,{d0-d1} @ load H
veor q12,q12
#ifdef __ARMEL__
vrev64.8 q14,q14
#endif
veor q13,q13
veor q11,q11
mov r1,#16
veor q10,q10
mov r3,#16
veor d2,d2
vdup.8 d4,d28[0] @ broadcast lowest byte
b .Linner_neon
.size gcm_gmult_neon,.-gcm_gmult_neon
.global gcm_ghash_neon
.type gcm_ghash_neon,%function
.align 4
gcm_ghash_neon:
vld1.64 d21,[r0,:64]! @ load Xi
vmov.i32 d5,#0xe1 @ our irreducible polynomial
vld1.64 d20,[r0,:64]!
vshr.u64 d5,#32
vldmia r0,{d0-d1} @ load H
veor q12,q12
nop
#ifdef __ARMEL__
vrev64.8 q10,q10
#endif
.Louter_neon:
vld1.64 d29,[r2]! @ load inp
veor q13,q13
vld1.64 d28,[r2]!
veor q11,q11
mov r1,#16
#ifdef __ARMEL__
vrev64.8 q14,q14
#endif
veor d2,d2
veor q14,q10 @ inp^=Xi
veor q10,q10
vdup.8 d4,d28[0] @ broadcast lowest byte
.Linner_neon:
subs r1,r1,#1
vmull.p8 q9,d1,d4 @ H.lo<6C>Xi[i]
vmull.p8 q8,d0,d4 @ H.hi<68>Xi[i]
vext.8 q14,q12,#1 @ IN>>=8
veor q10,q13 @ modulo-scheduled part
vshl.i64 d22,#48
vdup.8 d4,d28[0] @ broadcast lowest byte
veor d3,d18,d20
veor d21,d22
vuzp.8 q9,q8
vsli.8 d2,d3,#1 @ compose the "carry" byte
vext.8 q10,q12,#1 @ Z>>=8
vmull.p8 q11,d2,d5 @ "carry"<22>0xe1
vshr.u8 d2,d3,#7 @ save Z's bottom bit
vext.8 q13,q9,q12,#1 @ Qlo>>=8
veor q10,q8
bne .Linner_neon
veor q10,q13 @ modulo-scheduled artefact
vshl.i64 d22,#48
veor d21,d22
@ finalization, normalize Z:Zo
vand d2,d5 @ suffices to mask the bit
vshr.u64 d3,d20,#63
vshl.i64 q10,#1
subs r3,#16
vorr q10,q1 @ Z=Z:Zo<<1
bne .Louter_neon
#ifdef __ARMEL__
vrev64.8 q10,q10
#endif
sub r0,#16
vst1.64 d21,[r0,:64]! @ write out Xi
vst1.64 d20,[r0,:64]
.word 0xe12fff1e
.size gcm_ghash_neon,.-gcm_ghash_neon
#endif
.asciz "GHASH for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>"
.align 2
#if defined(HAVE_GNU_STACK)
.section .note.GNU-stack,"",%progbits
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

113
crypto/modes/modes_local.h Normal file
View File

@@ -0,0 +1,113 @@
/* $OpenBSD: modes_local.h,v 1.1 2022/11/26 16:08:53 tb Exp $ */
/* ====================================================================
* Copyright (c) 2010 The OpenSSL Project. All rights reserved.
*
* Redistribution and use is governed by OpenSSL license.
* ====================================================================
*/
#include <endian.h>
#include <openssl/opensslconf.h>
#include <openssl/modes.h>
__BEGIN_HIDDEN_DECLS
#if defined(_LP64)
typedef long i64;
typedef unsigned long u64;
#define U64(C) C##UL
#else
typedef long long i64;
typedef unsigned long long u64;
#define U64(C) C##ULL
#endif
typedef unsigned int u32;
typedef unsigned char u8;
#if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
#if defined(__GNUC__) && __GNUC__>=2
# if defined(__x86_64) || defined(__x86_64__)
# define BSWAP8(x) ({ u64 ret=(x); \
asm ("bswapq %0" \
: "+r"(ret)); ret; })
# define BSWAP4(x) ({ u32 ret=(x); \
asm ("bswapl %0" \
: "+r"(ret)); ret; })
# elif (defined(__i386) || defined(__i386__))
# define BSWAP8(x) ({ u32 lo=(u64)(x)>>32,hi=(x); \
asm ("bswapl %0; bswapl %1" \
: "+r"(hi),"+r"(lo)); \
(u64)hi<<32|lo; })
# define BSWAP4(x) ({ u32 ret=(x); \
asm ("bswapl %0" \
: "+r"(ret)); ret; })
# elif (defined(__arm__) || defined(__arm)) && !defined(__STRICT_ALIGNMENT)
# if (__ARM_ARCH >= 6)
# define BSWAP8(x) ({ u32 lo=(u64)(x)>>32,hi=(x); \
asm ("rev %0,%0; rev %1,%1" \
: "+r"(hi),"+r"(lo)); \
(u64)hi<<32|lo; })
# define BSWAP4(x) ({ u32 ret; \
asm ("rev %0,%1" \
: "=r"(ret) : "r"((u32)(x))); \
ret; })
# endif
# endif
#endif
#endif
#if defined(BSWAP4) && !defined(__STRICT_ALIGNMENT)
#define GETU32(p) BSWAP4(*(const u32 *)(p))
#define PUTU32(p,v) *(u32 *)(p) = BSWAP4(v)
#else
#define GETU32(p) ((u32)(p)[0]<<24|(u32)(p)[1]<<16|(u32)(p)[2]<<8|(u32)(p)[3])
#define PUTU32(p,v) ((p)[0]=(u8)((v)>>24),(p)[1]=(u8)((v)>>16),(p)[2]=(u8)((v)>>8),(p)[3]=(u8)(v))
#endif
/* GCM definitions */
typedef struct { u64 hi,lo; } u128;
#ifdef TABLE_BITS
#undef TABLE_BITS
#endif
/*
* Even though permitted values for TABLE_BITS are 8, 4 and 1, it should
* never be set to 8 [or 1]. For further information see gcm128.c.
*/
#define TABLE_BITS 4
struct gcm128_context {
/* Following 6 names follow names in GCM specification */
union { u64 u[2]; u32 d[4]; u8 c[16]; size_t t[16/sizeof(size_t)]; }
Yi,EKi,EK0,len,Xi,H;
/* Relative position of Xi, H and pre-computed Htable is used
* in some assembler modules, i.e. don't change the order! */
#if TABLE_BITS==8
u128 Htable[256];
#else
u128 Htable[16];
void (*gmult)(u64 Xi[2],const u128 Htable[16]);
void (*ghash)(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
#endif
unsigned int mres, ares;
block128_f block;
void *key;
};
struct xts128_context {
void *key1, *key2;
block128_f block1,block2;
};
struct ccm128_context {
union { u64 u[2]; u8 c[16]; } nonce, cmac;
u64 blocks;
block128_f block;
void *key;
};
__END_HIDDEN_DECLS

119
crypto/modes/ofb128.c Normal file
View File

@@ -0,0 +1,119 @@
/* $OpenBSD: ofb128.c,v 1.5 2022/11/26 16:08:53 tb Exp $ */
/* ====================================================================
* Copyright (c) 2008 The OpenSSL Project. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
*
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
* endorse or promote products derived from this software without
* prior written permission. For written permission, please contact
* openssl-core@openssl.org.
*
* 5. Products derived from this software may not be called "OpenSSL"
* nor may "OpenSSL" appear in their names without prior written
* permission of the OpenSSL Project.
*
* 6. Redistributions of any form whatsoever must retain the following
* acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit (http://www.openssl.org/)"
*
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ====================================================================
*
*/
#include <openssl/crypto.h>
#include "modes_local.h"
#include <string.h>
#ifndef MODES_DEBUG
# ifndef NDEBUG
# define NDEBUG
# endif
#endif
/* The input and output encrypted as though 128bit ofb mode is being
* used. The extra state information to record how much of the
* 128bit block we have used is contained in *num;
*/
void CRYPTO_ofb128_encrypt(const unsigned char *in, unsigned char *out,
size_t len, const void *key,
unsigned char ivec[16], int *num,
block128_f block)
{
unsigned int n;
size_t l=0;
n = *num;
#if !defined(OPENSSL_SMALL_FOOTPRINT)
if (16%sizeof(size_t) == 0) do { /* always true actually */
while (n && len) {
*(out++) = *(in++) ^ ivec[n];
--len;
n = (n+1) % 16;
}
#ifdef __STRICT_ALIGNMENT
if (((size_t)in|(size_t)out|(size_t)ivec)%sizeof(size_t) != 0)
break;
#endif
while (len>=16) {
(*block)(ivec, ivec, key);
for (; n<16; n+=sizeof(size_t))
*(size_t*)(out+n) =
*(size_t*)(in+n) ^ *(size_t*)(ivec+n);
len -= 16;
out += 16;
in += 16;
n = 0;
}
if (len) {
(*block)(ivec, ivec, key);
while (len--) {
out[n] = in[n] ^ ivec[n];
++n;
}
}
*num = n;
return;
} while(0);
/* the rest would be commonly eliminated by x86* compiler */
#endif
while (l<len) {
if (n==0) {
(*block)(ivec, ivec, key);
}
out[l] = in[l] ^ ivec[n];
++l;
n = (n+1) % 16;
}
*num=n;
}

186
crypto/modes/xts128.c Normal file
View File

@@ -0,0 +1,186 @@
/* $OpenBSD: xts128.c,v 1.10 2023/05/07 14:38:04 tb Exp $ */
/* ====================================================================
* Copyright (c) 2011 The OpenSSL Project. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
*
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
* endorse or promote products derived from this software without
* prior written permission. For written permission, please contact
* openssl-core@openssl.org.
*
* 5. Products derived from this software may not be called "OpenSSL"
* nor may "OpenSSL" appear in their names without prior written
* permission of the OpenSSL Project.
*
* 6. Redistributions of any form whatsoever must retain the following
* acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit (http://www.openssl.org/)"
*
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ====================================================================
*/
#include <openssl/crypto.h>
#include "modes_local.h"
#include <endian.h>
#include <string.h>
#ifndef MODES_DEBUG
# ifndef NDEBUG
# define NDEBUG
# endif
#endif
int CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx, const unsigned char iv[16],
const unsigned char *inp, unsigned char *out,
size_t len, int enc)
{
union { u64 u[2]; u32 d[4]; u8 c[16]; } tweak, scratch;
unsigned int i;
if (len<16) return -1;
memcpy(tweak.c, iv, 16);
(*ctx->block2)(tweak.c,tweak.c,ctx->key2);
if (!enc && (len%16)) len-=16;
while (len>=16) {
#ifdef __STRICT_ALIGNMENT
memcpy(scratch.c,inp,16);
scratch.u[0] ^= tweak.u[0];
scratch.u[1] ^= tweak.u[1];
#else
scratch.u[0] = ((u64*)inp)[0]^tweak.u[0];
scratch.u[1] = ((u64*)inp)[1]^tweak.u[1];
#endif
(*ctx->block1)(scratch.c,scratch.c,ctx->key1);
#ifdef __STRICT_ALIGNMENT
scratch.u[0] ^= tweak.u[0];
scratch.u[1] ^= tweak.u[1];
memcpy(out,scratch.c,16);
#else
((u64*)out)[0] = scratch.u[0]^=tweak.u[0];
((u64*)out)[1] = scratch.u[1]^=tweak.u[1];
#endif
inp += 16;
out += 16;
len -= 16;
if (len==0) return 0;
#if BYTE_ORDER == LITTLE_ENDIAN
unsigned int carry,res;
res = 0x87&(((int)tweak.d[3])>>31);
carry = (unsigned int)(tweak.u[0]>>63);
tweak.u[0] = (tweak.u[0]<<1)^res;
tweak.u[1] = (tweak.u[1]<<1)|carry;
#else /* BIG_ENDIAN */
size_t c;
for (c=0,i=0;i<16;++i) {
/*+ substitutes for |, because c is 1 bit */
c += ((size_t)tweak.c[i])<<1;
tweak.c[i] = (u8)c;
c = c>>8;
}
tweak.c[0] ^= (u8)(0x87&(0-c));
#endif
}
if (enc) {
for (i=0;i<len;++i) {
u8 ch = inp[i];
out[i] = scratch.c[i];
scratch.c[i] = ch;
}
scratch.u[0] ^= tweak.u[0];
scratch.u[1] ^= tweak.u[1];
(*ctx->block1)(scratch.c,scratch.c,ctx->key1);
scratch.u[0] ^= tweak.u[0];
scratch.u[1] ^= tweak.u[1];
memcpy(out-16,scratch.c,16);
}
else {
union { u64 u[2]; u8 c[16]; } tweak1;
#if BYTE_ORDER == LITTLE_ENDIAN
unsigned int carry,res;
res = 0x87&(((int)tweak.d[3])>>31);
carry = (unsigned int)(tweak.u[0]>>63);
tweak1.u[0] = (tweak.u[0]<<1)^res;
tweak1.u[1] = (tweak.u[1]<<1)|carry;
#else
size_t c;
for (c=0,i=0;i<16;++i) {
/*+ substitutes for |, because c is 1 bit */
c += ((size_t)tweak.c[i])<<1;
tweak1.c[i] = (u8)c;
c = c>>8;
}
tweak1.c[0] ^= (u8)(0x87&(0-c));
#endif
#ifdef __STRICT_ALIGNMENT
memcpy(scratch.c,inp,16);
scratch.u[0] ^= tweak1.u[0];
scratch.u[1] ^= tweak1.u[1];
#else
scratch.u[0] = ((u64*)inp)[0]^tweak1.u[0];
scratch.u[1] = ((u64*)inp)[1]^tweak1.u[1];
#endif
(*ctx->block1)(scratch.c,scratch.c,ctx->key1);
scratch.u[0] ^= tweak1.u[0];
scratch.u[1] ^= tweak1.u[1];
for (i=0;i<len;++i) {
u8 ch = inp[16+i];
out[16+i] = scratch.c[i];
scratch.c[i] = ch;
}
scratch.u[0] ^= tweak.u[0];
scratch.u[1] ^= tweak.u[1];
(*ctx->block1)(scratch.c,scratch.c,ctx->key1);
#ifdef __STRICT_ALIGNMENT
scratch.u[0] ^= tweak.u[0];
scratch.u[1] ^= tweak.u[1];
memcpy (out,scratch.c,16);
#else
((u64*)out)[0] = scratch.u[0]^tweak.u[0];
((u64*)out)[1] = scratch.u[1]^tweak.u[1];
#endif
}
return 0;
}