Update BoringSSL to 2e68a05c9943a8dec1758d4a393b2ae906fd3295 (#88)

Also update the vendor script to pass an explicit destination
This commit is contained in:
Cory Benfield 2021-08-17 09:37:33 +01:00 committed by GitHub
parent 8273610176
commit 684952cafa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
171 changed files with 8333 additions and 6434 deletions

View File

@ -20,7 +20,7 @@
// Sources/CCryptoBoringSSL directory. The source repository is at
// https://boringssl.googlesource.com/boringssl.
//
// BoringSSL Commit: 04b3213d43492b6c9e0434d8e2a4530a9938f958
// BoringSSL Commit: 2e68a05c9943a8dec1758d4a393b2ae906fd3295
import PackageDescription

View File

@ -25,6 +25,7 @@ add_library(CCryptoBoringSSL STATIC
"crypto/asn1/a_object.c"
"crypto/asn1/a_octet.c"
"crypto/asn1/a_print.c"
"crypto/asn1/a_strex.c"
"crypto/asn1/a_strnid.c"
"crypto/asn1/a_time.c"
"crypto/asn1/a_type.c"
@ -169,7 +170,6 @@ add_library(CCryptoBoringSSL STATIC
"crypto/fipsmodule/ecdsa/ecdsa.c"
"crypto/fipsmodule/fips_shared_support.c"
"crypto/fipsmodule/hmac/hmac.c"
"crypto/fipsmodule/is_fips.c"
"crypto/fipsmodule/md4/md4.c"
"crypto/fipsmodule/md5/md5.c"
"crypto/fipsmodule/modes/cbc.c"
@ -187,6 +187,7 @@ add_library(CCryptoBoringSSL STATIC
"crypto/fipsmodule/rsa/padding.c"
"crypto/fipsmodule/rsa/rsa.c"
"crypto/fipsmodule/rsa/rsa_impl.c"
"crypto/fipsmodule/self_check/fips.c"
"crypto/fipsmodule/self_check/self_check.c"
"crypto/fipsmodule/sha/sha1-altivec.c"
"crypto/fipsmodule/sha/sha1.c"
@ -239,13 +240,13 @@ add_library(CCryptoBoringSSL STATIC
"crypto/trust_token/voprf.c"
"crypto/x509/a_digest.c"
"crypto/x509/a_sign.c"
"crypto/x509/a_strex.c"
"crypto/x509/a_verify.c"
"crypto/x509/algorithm.c"
"crypto/x509/asn1_gen.c"
"crypto/x509/by_dir.c"
"crypto/x509/by_file.c"
"crypto/x509/i2d_pr.c"
"crypto/x509/name_print.c"
"crypto/x509/rsa_pss.c"
"crypto/x509/t_crl.c"
"crypto/x509/t_req.c"

View File

@ -65,64 +65,69 @@
#include "../internal.h"
int ASN1_BIT_STRING_set(ASN1_BIT_STRING *x, unsigned char *d, int len)
int ASN1_BIT_STRING_set(ASN1_BIT_STRING *x, const unsigned char *d, int len)
{
return ASN1_STRING_set(x, d, len);
}
static int asn1_bit_string_length(const ASN1_BIT_STRING *str,
uint8_t *out_padding_bits) {
int len = str->length;
if (str->flags & ASN1_STRING_FLAG_BITS_LEFT) {
// If the string is already empty, it cannot have padding bits.
*out_padding_bits = len == 0 ? 0 : str->flags & 0x07;
return len;
}
// TODO(davidben): If we move this logic to |ASN1_BIT_STRING_set_bit|, can
// we remove this representation?
while (len > 0 && str->data[len - 1] == 0) {
len--;
}
uint8_t padding_bits = 0;
if (len > 0) {
uint8_t last = str->data[len - 1];
assert(last != 0);
for (; padding_bits < 7; padding_bits++) {
if (last & (1 << padding_bits)) {
break;
}
}
}
*out_padding_bits = padding_bits;
return len;
}
int ASN1_BIT_STRING_num_bytes(const ASN1_BIT_STRING *str, size_t *out) {
uint8_t padding_bits;
int len = asn1_bit_string_length(str, &padding_bits);
if (padding_bits != 0) {
return 0;
}
*out = len;
return 1;
}
int i2c_ASN1_BIT_STRING(const ASN1_BIT_STRING *a, unsigned char **pp)
{
int ret, j, bits, len;
unsigned char *p, *d;
if (a == NULL) {
return 0;
}
if (a == NULL)
return (0);
len = a->length;
uint8_t bits;
int len = asn1_bit_string_length(a, &bits);
int ret = 1 + len;
if (pp == NULL) {
return ret;
}
uint8_t *p = *pp;
*(p++) = bits;
OPENSSL_memcpy(p, a->data, len);
if (len > 0) {
if (a->flags & ASN1_STRING_FLAG_BITS_LEFT) {
bits = (int)a->flags & 0x07;
} else {
for (; len > 0; len--) {
if (a->data[len - 1])
break;
}
j = a->data[len - 1];
if (j & 0x01)
bits = 0;
else if (j & 0x02)
bits = 1;
else if (j & 0x04)
bits = 2;
else if (j & 0x08)
bits = 3;
else if (j & 0x10)
bits = 4;
else if (j & 0x20)
bits = 5;
else if (j & 0x40)
bits = 6;
else if (j & 0x80)
bits = 7;
else
bits = 0; /* should not happen */
}
} else
bits = 0;
ret = 1 + len;
if (pp == NULL)
return (ret);
p = *pp;
*(p++) = (unsigned char)bits;
d = a->data;
OPENSSL_memcpy(p, d, len);
p[len - 1] &= (0xff << bits);
}
p += len;
if (len > 0)
p[-1] &= (0xff << bits);
*pp = p;
return (ret);
}
@ -251,7 +256,7 @@ int ASN1_BIT_STRING_get_bit(const ASN1_BIT_STRING *a, int n)
* 'len' is the length of 'flags'.
*/
int ASN1_BIT_STRING_check(const ASN1_BIT_STRING *a,
unsigned char *flags, int flags_len)
const unsigned char *flags, int flags_len)
{
int i, ok;
/* Check if there is one bit set at all. */

View File

@ -78,7 +78,7 @@ int i2d_ASN1_BOOLEAN(int a, unsigned char **pp)
}
ASN1_put_object(&p, 0, 1, V_ASN1_BOOLEAN, V_ASN1_UNIVERSAL);
*p = (unsigned char)a;
*p = a ? 0xff : 0x00;
/*
* If a new buffer was allocated, just return it back.

View File

@ -78,7 +78,6 @@ void *ASN1_item_d2i_bio(const ASN1_ITEM *it, BIO *in, void *x)
return ret;
}
#ifndef OPENSSL_NO_FP_API
void *ASN1_item_d2i_fp(const ASN1_ITEM *it, FILE *in, void *x)
{
BIO *b = BIO_new_fp(in, BIO_NOCLOSE);
@ -90,4 +89,3 @@ void *ASN1_item_d2i_fp(const ASN1_ITEM *it, FILE *in, void *x)
BIO_free(b);
return ret;
}
#endif

View File

@ -62,7 +62,7 @@
#include <CCryptoBoringSSL_err.h>
#include <CCryptoBoringSSL_mem.h>
#include "asn1_locl.h"
#include "internal.h"
int asn1_generalizedtime_to_tm(struct tm *tm, const ASN1_GENERALIZEDTIME *d)
{
@ -237,6 +237,11 @@ ASN1_GENERALIZEDTIME *ASN1_GENERALIZEDTIME_adj(ASN1_GENERALIZEDTIME *s,
goto err;
}
if (ts->tm_year < 0 - 1900 || ts->tm_year > 9999 - 1900) {
OPENSSL_PUT_ERROR(ASN1, ASN1_R_ILLEGAL_TIME_VALUE);
goto err;
}
p = (char *)tmps->data;
if ((p == NULL) || ((size_t)tmps->length < len)) {
p = OPENSSL_malloc(len);

View File

@ -63,7 +63,7 @@
#include <CCryptoBoringSSL_err.h>
#include <CCryptoBoringSSL_mem.h>
#include "asn1_locl.h"
#include "internal.h"
#include "../bytestring/internal.h"
static int is_printable(uint32_t value);
@ -208,11 +208,14 @@ int ASN1_mbstring_ncopy(ASN1_STRING **out, const unsigned char *in, int len,
encode_func = cbb_add_utf32_be;
size_estimate = 4 * nchar;
outform = MBSTRING_UNIV;
} else {
} else if (mask & B_ASN1_UTF8STRING) {
str_type = V_ASN1_UTF8STRING;
outform = MBSTRING_UTF8;
encode_func = cbb_add_utf8;
size_estimate = utf8_len;
} else {
OPENSSL_PUT_ERROR(ASN1, ASN1_R_ILLEGAL_CHARACTERS);
return -1;
}
if (!out)

View File

@ -64,6 +64,7 @@
#include <CCryptoBoringSSL_obj.h>
#include "../internal.h"
#include "internal.h"
int i2d_ASN1_OBJECT(const ASN1_OBJECT *a, unsigned char **pp)
@ -180,16 +181,13 @@ ASN1_OBJECT *c2i_ASN1_OBJECT(ASN1_OBJECT **a, const unsigned char **pp,
}
}
/*
* only the ASN1_OBJECTs from the 'table' will have values for ->sn or
* ->ln
*/
if ((a == NULL) || ((*a) == NULL) ||
!((*a)->flags & ASN1_OBJECT_FLAG_DYNAMIC)) {
if ((ret = ASN1_OBJECT_new()) == NULL)
return (NULL);
} else
} else {
ret = (*a);
}
p = *pp;
/* detach data from object */
@ -208,12 +206,17 @@ ASN1_OBJECT *c2i_ASN1_OBJECT(ASN1_OBJECT **a, const unsigned char **pp,
ret->flags |= ASN1_OBJECT_FLAG_DYNAMIC_DATA;
}
OPENSSL_memcpy(data, p, length);
/* If there are dynamic strings, free them here, and clear the flag */
if ((ret->flags & ASN1_OBJECT_FLAG_DYNAMIC_STRINGS) != 0) {
OPENSSL_free((char *)ret->sn);
OPENSSL_free((char *)ret->ln);
ret->flags &= ~ASN1_OBJECT_FLAG_DYNAMIC_STRINGS;
}
/* reattach data to object, after which it remains const */
ret->data = data;
ret->length = length;
ret->sn = NULL;
ret->ln = NULL;
/* ret->flags=ASN1_OBJECT_FLAG_DYNAMIC; we know it is dynamic */
p += length;
if (a != NULL)
@ -263,7 +266,7 @@ void ASN1_OBJECT_free(ASN1_OBJECT *a)
OPENSSL_free(a);
}
ASN1_OBJECT *ASN1_OBJECT_create(int nid, unsigned char *data, int len,
ASN1_OBJECT *ASN1_OBJECT_create(int nid, const unsigned char *data, int len,
const char *sn, const char *ln)
{
ASN1_OBJECT o;

View File

@ -54,23 +54,27 @@
* copied and put under another distribution licence
* [including the GNU Public Licence.] */
#include <CCryptoBoringSSL_x509.h>
#include <CCryptoBoringSSL_asn1.h>
#include <ctype.h>
#include <inttypes.h>
#include <string.h>
#include <CCryptoBoringSSL_asn1.h>
#include <CCryptoBoringSSL_bio.h>
#include <CCryptoBoringSSL_mem.h>
#include <CCryptoBoringSSL_obj.h>
#include "charmap.h"
#include "../asn1/asn1_locl.h"
#include "internal.h"
/*
* ASN1_STRING_print_ex() and X509_NAME_print_ex(). Enhanced string and name
* printing routines handling multibyte characters, RFC2253 and a host of
* other options.
*/
// These flags must be distinct from |ESC_FLAGS| and fit in a byte.
// Character is a valid PrintableString character
#define CHARTYPE_PRINTABLESTRING 0x10
// Character needs escaping if it is the first character
#define CHARTYPE_FIRST_ESC_2253 0x20
// Character needs escaping if it is the last character
#define CHARTYPE_LAST_ESC_2253 0x40
#define CHARTYPE_BS_ESC (ASN1_STRFLGS_ESC_2253 | CHARTYPE_FIRST_ESC_2253 | CHARTYPE_LAST_ESC_2253)
@ -79,26 +83,12 @@
ASN1_STRFLGS_ESC_CTRL | \
ASN1_STRFLGS_ESC_MSB)
static int send_bio_chars(void *arg, const void *buf, int len)
static int maybe_write(BIO *out, const void *buf, int len)
{
if (!arg)
return 1;
if (BIO_write(arg, buf, len) != len)
return 0;
return 1;
/* If |out| is NULL, ignore the output but report the length. */
return out == NULL || BIO_write(out, buf, len) == len;
}
static int send_fp_chars(void *arg, const void *buf, int len)
{
if (!arg)
return 1;
if (fwrite(buf, 1, len, arg) != (unsigned int)len)
return 0;
return 1;
}
typedef int char_io (void *arg, const void *buf, int len);
/*
* This function handles display of strings, one character at a time. It is
* passed an unsigned long for each character because it could come from 2 or
@ -108,20 +98,20 @@ typedef int char_io (void *arg, const void *buf, int len);
#define HEX_SIZE(type) (sizeof(type)*2)
static int do_esc_char(uint32_t c, unsigned char flags, char *do_quotes,
char_io *io_ch, void *arg)
BIO *out)
{
unsigned char chflgs, chtmp;
char tmphex[HEX_SIZE(uint32_t) + 3];
if (c > 0xffff) {
BIO_snprintf(tmphex, sizeof tmphex, "\\W%08" PRIX32, c);
if (!io_ch(arg, tmphex, 10))
if (!maybe_write(out, tmphex, 10))
return -1;
return 10;
}
if (c > 0xff) {
BIO_snprintf(tmphex, sizeof tmphex, "\\U%04" PRIX32, c);
if (!io_ch(arg, tmphex, 6))
if (!maybe_write(out, tmphex, 6))
return -1;
return 6;
}
@ -135,19 +125,19 @@ static int do_esc_char(uint32_t c, unsigned char flags, char *do_quotes,
if (chflgs & ASN1_STRFLGS_ESC_QUOTE) {
if (do_quotes)
*do_quotes = 1;
if (!io_ch(arg, &chtmp, 1))
if (!maybe_write(out, &chtmp, 1))
return -1;
return 1;
}
if (!io_ch(arg, "\\", 1))
if (!maybe_write(out, "\\", 1))
return -1;
if (!io_ch(arg, &chtmp, 1))
if (!maybe_write(out, &chtmp, 1))
return -1;
return 2;
}
if (chflgs & (ASN1_STRFLGS_ESC_CTRL | ASN1_STRFLGS_ESC_MSB)) {
BIO_snprintf(tmphex, 11, "\\%02X", chtmp);
if (!io_ch(arg, tmphex, 3))
if (!maybe_write(out, tmphex, 3))
return -1;
return 3;
}
@ -156,11 +146,11 @@ static int do_esc_char(uint32_t c, unsigned char flags, char *do_quotes,
* character itself: backslash.
*/
if (chtmp == '\\' && flags & ESC_FLAGS) {
if (!io_ch(arg, "\\\\", 2))
if (!maybe_write(out, "\\\\", 2))
return -1;
return 2;
}
if (!io_ch(arg, &chtmp, 1))
if (!maybe_write(out, &chtmp, 1))
return -1;
return 1;
}
@ -175,8 +165,7 @@ static int do_esc_char(uint32_t c, unsigned char flags, char *do_quotes,
*/
static int do_buf(unsigned char *buf, int buflen,
int type, unsigned char flags, char *quotes, char_io *io_ch,
void *arg)
int type, unsigned char flags, char *quotes, BIO *out)
{
int i, outlen, len, charwidth;
unsigned char orflags, *p, *q;
@ -208,6 +197,8 @@ static int do_buf(unsigned char *buf, int buflen,
orflags = CHARTYPE_FIRST_ESC_2253;
else
orflags = 0;
/* TODO(davidben): Replace this with |cbs_get_ucs2_be|, etc., to check
* for invalid codepoints. */
switch (charwidth) {
case 4:
c = ((uint32_t)*p++) << 24;
@ -248,17 +239,14 @@ static int do_buf(unsigned char *buf, int buflen,
* otherwise each character will be > 0x7f and so the
* character will never be escaped on first and last.
*/
len =
do_esc_char(utfbuf[i], (unsigned char)(flags | orflags),
quotes, io_ch, arg);
len = do_esc_char(utfbuf[i], (unsigned char)(flags | orflags),
quotes, out);
if (len < 0)
return -1;
outlen += len;
}
} else {
len =
do_esc_char(c, (unsigned char)(flags | orflags), quotes,
io_ch, arg);
len = do_esc_char(c, (unsigned char)(flags | orflags), quotes, out);
if (len < 0)
return -1;
outlen += len;
@ -269,19 +257,18 @@ static int do_buf(unsigned char *buf, int buflen,
/* This function hex dumps a buffer of characters */
static int do_hex_dump(char_io *io_ch, void *arg, unsigned char *buf,
int buflen)
static int do_hex_dump(BIO *out, unsigned char *buf, int buflen)
{
static const char hexdig[] = "0123456789ABCDEF";
unsigned char *p, *q;
char hextmp[2];
if (arg) {
if (out) {
p = buf;
q = buf + buflen;
while (p != q) {
hextmp[0] = hexdig[*p >> 4];
hextmp[1] = hexdig[*p & 0xf];
if (!io_ch(arg, hextmp, 2))
if (!maybe_write(out, hextmp, 2))
return -1;
p++;
}
@ -295,38 +282,52 @@ static int do_hex_dump(char_io *io_ch, void *arg, unsigned char *buf,
* encoding. This uses the RFC2253 #01234 format.
*/
static int do_dump(unsigned long lflags, char_io *io_ch, void *arg,
const ASN1_STRING *str)
static int do_dump(unsigned long lflags, BIO *out, const ASN1_STRING *str)
{
/*
* Placing the ASN1_STRING in a temp ASN1_TYPE allows the DER encoding to
* readily obtained
*/
ASN1_TYPE t;
unsigned char *der_buf, *p;
int outlen, der_len;
if (!io_ch(arg, "#", 1))
if (!maybe_write(out, "#", 1)) {
return -1;
}
/* If we don't dump DER encoding just dump content octets */
if (!(lflags & ASN1_STRFLGS_DUMP_DER)) {
outlen = do_hex_dump(io_ch, arg, str->data, str->length);
if (outlen < 0)
int outlen = do_hex_dump(out, str->data, str->length);
if (outlen < 0) {
return -1;
}
return outlen + 1;
}
/*
* Placing the ASN1_STRING in a temporary ASN1_TYPE allows the DER encoding
* to readily obtained.
*/
ASN1_TYPE t;
t.type = str->type;
t.value.ptr = (char *)str;
der_len = i2d_ASN1_TYPE(&t, NULL);
der_buf = OPENSSL_malloc(der_len);
if (!der_buf)
/* Negative INTEGER and ENUMERATED values are the only case where
* |ASN1_STRING| and |ASN1_TYPE| types do not match.
*
* TODO(davidben): There are also some type fields which, in |ASN1_TYPE|, do
* not correspond to |ASN1_STRING|. It is unclear whether those are allowed
* in |ASN1_STRING| at all, or what the space of allowed types is.
* |ASN1_item_ex_d2i| will never produce such a value so, for now, we say
* this is an invalid input. But this corner of the library in general
* should be more robust. */
if (t.type == V_ASN1_NEG_INTEGER) {
t.type = V_ASN1_INTEGER;
} else if (t.type == V_ASN1_NEG_ENUMERATED) {
t.type = V_ASN1_ENUMERATED;
}
t.value.asn1_string = (ASN1_STRING *)str;
unsigned char *der_buf = NULL;
int der_len = i2d_ASN1_TYPE(&t, &der_buf);
if (der_len < 0) {
return -1;
p = der_buf;
i2d_ASN1_TYPE(&t, &p);
outlen = do_hex_dump(io_ch, arg, der_buf, der_len);
}
int outlen = do_hex_dump(out, der_buf, der_len);
OPENSSL_free(der_buf);
if (outlen < 0)
if (outlen < 0) {
return -1;
}
return outlen + 1;
}
@ -353,8 +354,7 @@ static const signed char tag2nbyte[] = {
* an error occurred.
*/
static int do_print_ex(char_io *io_ch, void *arg, unsigned long lflags,
const ASN1_STRING *str)
int ASN1_STRING_print_ex(BIO *out, const ASN1_STRING *str, unsigned long lflags)
{
int outlen, len;
int type;
@ -372,7 +372,7 @@ static int do_print_ex(char_io *io_ch, void *arg, unsigned long lflags,
const char *tagname;
tagname = ASN1_tag2str(type);
outlen += strlen(tagname);
if (!io_ch(arg, tagname, outlen) || !io_ch(arg, ":", 1))
if (!maybe_write(out, tagname, outlen) || !maybe_write(out, ":", 1))
return -1;
outlen++;
}
@ -396,7 +396,7 @@ static int do_print_ex(char_io *io_ch, void *arg, unsigned long lflags,
}
if (type == -1) {
len = do_dump(lflags, io_ch, arg, str);
len = do_dump(lflags, out, str);
if (len < 0)
return -1;
outlen += len;
@ -415,219 +415,41 @@ static int do_print_ex(char_io *io_ch, void *arg, unsigned long lflags,
type |= BUF_TYPE_CONVUTF8;
}
len = do_buf(str->data, str->length, type, flags, &quotes, io_ch, NULL);
len = do_buf(str->data, str->length, type, flags, &quotes, NULL);
if (len < 0)
return -1;
outlen += len;
if (quotes)
outlen += 2;
if (!arg)
if (!out)
return outlen;
if (quotes && !io_ch(arg, "\"", 1))
if (quotes && !maybe_write(out, "\"", 1))
return -1;
if (do_buf(str->data, str->length, type, flags, NULL, io_ch, arg) < 0)
if (do_buf(str->data, str->length, type, flags, NULL, out) < 0)
return -1;
if (quotes && !io_ch(arg, "\"", 1))
if (quotes && !maybe_write(out, "\"", 1))
return -1;
return outlen;
}
/* Used for line indenting: print 'indent' spaces */
static int do_indent(char_io *io_ch, void *arg, int indent)
int ASN1_STRING_print_ex_fp(FILE *fp, const ASN1_STRING *str,
unsigned long flags)
{
int i;
for (i = 0; i < indent; i++)
if (!io_ch(arg, " ", 1))
return 0;
return 1;
}
#define FN_WIDTH_LN 25
#define FN_WIDTH_SN 10
static int do_name_ex(char_io *io_ch, void *arg, const X509_NAME *n,
int indent, unsigned long flags)
{
int i, prev = -1, orflags, cnt;
int fn_opt, fn_nid;
ASN1_OBJECT *fn;
ASN1_STRING *val;
X509_NAME_ENTRY *ent;
char objtmp[80];
const char *objbuf;
int outlen, len;
const char *sep_dn, *sep_mv, *sep_eq;
int sep_dn_len, sep_mv_len, sep_eq_len;
if (indent < 0)
indent = 0;
outlen = indent;
if (!do_indent(io_ch, arg, indent))
return -1;
switch (flags & XN_FLAG_SEP_MASK) {
case XN_FLAG_SEP_MULTILINE:
sep_dn = "\n";
sep_dn_len = 1;
sep_mv = " + ";
sep_mv_len = 3;
break;
case XN_FLAG_SEP_COMMA_PLUS:
sep_dn = ",";
sep_dn_len = 1;
sep_mv = "+";
sep_mv_len = 1;
indent = 0;
break;
case XN_FLAG_SEP_CPLUS_SPC:
sep_dn = ", ";
sep_dn_len = 2;
sep_mv = " + ";
sep_mv_len = 3;
indent = 0;
break;
case XN_FLAG_SEP_SPLUS_SPC:
sep_dn = "; ";
sep_dn_len = 2;
sep_mv = " + ";
sep_mv_len = 3;
indent = 0;
break;
default:
return -1;
}
if (flags & XN_FLAG_SPC_EQ) {
sep_eq = " = ";
sep_eq_len = 3;
} else {
sep_eq = "=";
sep_eq_len = 1;
}
fn_opt = flags & XN_FLAG_FN_MASK;
cnt = X509_NAME_entry_count(n);
for (i = 0; i < cnt; i++) {
if (flags & XN_FLAG_DN_REV)
ent = X509_NAME_get_entry(n, cnt - i - 1);
else
ent = X509_NAME_get_entry(n, i);
if (prev != -1) {
if (prev == ent->set) {
if (!io_ch(arg, sep_mv, sep_mv_len))
return -1;
outlen += sep_mv_len;
} else {
if (!io_ch(arg, sep_dn, sep_dn_len))
return -1;
outlen += sep_dn_len;
if (!do_indent(io_ch, arg, indent))
return -1;
outlen += indent;
}
}
prev = ent->set;
fn = X509_NAME_ENTRY_get_object(ent);
val = X509_NAME_ENTRY_get_data(ent);
fn_nid = OBJ_obj2nid(fn);
if (fn_opt != XN_FLAG_FN_NONE) {
int objlen, fld_len;
if ((fn_opt == XN_FLAG_FN_OID) || (fn_nid == NID_undef)) {
OBJ_obj2txt(objtmp, sizeof objtmp, fn, 1);
fld_len = 0; /* XXX: what should this be? */
objbuf = objtmp;
} else {
if (fn_opt == XN_FLAG_FN_SN) {
fld_len = FN_WIDTH_SN;
objbuf = OBJ_nid2sn(fn_nid);
} else if (fn_opt == XN_FLAG_FN_LN) {
fld_len = FN_WIDTH_LN;
objbuf = OBJ_nid2ln(fn_nid);
} else {
fld_len = 0; /* XXX: what should this be? */
objbuf = "";
}
}
objlen = strlen(objbuf);
if (!io_ch(arg, objbuf, objlen))
return -1;
if ((objlen < fld_len) && (flags & XN_FLAG_FN_ALIGN)) {
if (!do_indent(io_ch, arg, fld_len - objlen))
return -1;
outlen += fld_len - objlen;
}
if (!io_ch(arg, sep_eq, sep_eq_len))
return -1;
outlen += objlen + sep_eq_len;
}
/*
* If the field name is unknown then fix up the DER dump flag. We
* might want to limit this further so it will DER dump on anything
* other than a few 'standard' fields.
*/
if ((fn_nid == NID_undef) && (flags & XN_FLAG_DUMP_UNKNOWN_FIELDS))
orflags = ASN1_STRFLGS_DUMP_ALL;
else
orflags = 0;
len = do_print_ex(io_ch, arg, flags | orflags, val);
if (len < 0)
BIO *bio = NULL;
if (fp != NULL) {
/* If |fp| is NULL, this function returns the number of bytes without
* writing. */
bio = BIO_new_fp(fp, BIO_NOCLOSE);
if (bio == NULL) {
return -1;
outlen += len;
}
}
return outlen;
int ret = ASN1_STRING_print_ex(bio, str, flags);
BIO_free(bio);
return ret;
}
/* Wrappers round the main functions */
int X509_NAME_print_ex(BIO *out, const X509_NAME *nm, int indent,
unsigned long flags)
{
if (flags == XN_FLAG_COMPAT)
return X509_NAME_print(out, nm, indent);
return do_name_ex(send_bio_chars, out, nm, indent, flags);
}
#ifndef OPENSSL_NO_FP_API
int X509_NAME_print_ex_fp(FILE *fp, const X509_NAME *nm, int indent,
unsigned long flags)
{
if (flags == XN_FLAG_COMPAT) {
BIO *btmp;
int ret;
btmp = BIO_new_fp(fp, BIO_NOCLOSE);
if (!btmp)
return -1;
ret = X509_NAME_print(btmp, nm, indent);
BIO_free(btmp);
return ret;
}
return do_name_ex(send_fp_chars, fp, nm, indent, flags);
}
#endif
int ASN1_STRING_print_ex(BIO *out, const ASN1_STRING *str, unsigned long flags)
{
return do_print_ex(send_bio_chars, out, flags, str);
}
#ifndef OPENSSL_NO_FP_API
int ASN1_STRING_print_ex_fp(FILE *fp, const ASN1_STRING *str, unsigned long flags)
{
return do_print_ex(send_fp_chars, fp, flags, str);
}
#endif
/*
* Utility function: convert any string type to UTF8, returns number of bytes
* in output string or a negative error code
*/
int ASN1_STRING_to_UTF8(unsigned char **out, ASN1_STRING *in)
int ASN1_STRING_to_UTF8(unsigned char **out, const ASN1_STRING *in)
{
ASN1_STRING stmp, *str = &stmp;
int mbflag, type, ret;
@ -643,11 +465,186 @@ int ASN1_STRING_to_UTF8(unsigned char **out, ASN1_STRING *in)
stmp.data = NULL;
stmp.length = 0;
stmp.flags = 0;
ret =
ASN1_mbstring_copy(&str, in->data, in->length, mbflag,
B_ASN1_UTF8STRING);
ret = ASN1_mbstring_copy(&str, in->data, in->length, mbflag,
B_ASN1_UTF8STRING);
if (ret < 0)
return ret;
*out = stmp.data;
return stmp.length;
}
int ASN1_STRING_print(BIO *bp, const ASN1_STRING *v)
{
int i, n;
char buf[80];
const char *p;
if (v == NULL)
return (0);
n = 0;
p = (const char *)v->data;
for (i = 0; i < v->length; i++) {
if ((p[i] > '~') || ((p[i] < ' ') &&
(p[i] != '\n') && (p[i] != '\r')))
buf[n] = '.';
else
buf[n] = p[i];
n++;
if (n >= 80) {
if (BIO_write(bp, buf, n) <= 0)
return (0);
n = 0;
}
}
if (n > 0)
if (BIO_write(bp, buf, n) <= 0)
return (0);
return (1);
}
int ASN1_TIME_print(BIO *bp, const ASN1_TIME *tm)
{
if (tm->type == V_ASN1_UTCTIME)
return ASN1_UTCTIME_print(bp, tm);
if (tm->type == V_ASN1_GENERALIZEDTIME)
return ASN1_GENERALIZEDTIME_print(bp, tm);
BIO_write(bp, "Bad time value", 14);
return (0);
}
static const char *const mon[12] = {
"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"
};
int ASN1_GENERALIZEDTIME_print(BIO *bp, const ASN1_GENERALIZEDTIME *tm)
{
char *v;
int gmt = 0;
int i;
int y = 0, M = 0, d = 0, h = 0, m = 0, s = 0;
char *f = NULL;
int f_len = 0;
i = tm->length;
v = (char *)tm->data;
if (i < 12)
goto err;
if (v[i - 1] == 'Z')
gmt = 1;
for (i = 0; i < 12; i++)
if ((v[i] > '9') || (v[i] < '0'))
goto err;
y = (v[0] - '0') * 1000 + (v[1] - '0') * 100 + (v[2] - '0') * 10 + (v[3] -
'0');
M = (v[4] - '0') * 10 + (v[5] - '0');
if ((M > 12) || (M < 1))
goto err;
d = (v[6] - '0') * 10 + (v[7] - '0');
h = (v[8] - '0') * 10 + (v[9] - '0');
m = (v[10] - '0') * 10 + (v[11] - '0');
if (tm->length >= 14 &&
(v[12] >= '0') && (v[12] <= '9') &&
(v[13] >= '0') && (v[13] <= '9')) {
s = (v[12] - '0') * 10 + (v[13] - '0');
/* Check for fractions of seconds. */
if (tm->length >= 15 && v[14] == '.') {
int l = tm->length;
f = &v[14]; /* The decimal point. */
f_len = 1;
while (14 + f_len < l && f[f_len] >= '0' && f[f_len] <= '9')
++f_len;
}
}
if (BIO_printf(bp, "%s %2d %02d:%02d:%02d%.*s %d%s",
mon[M - 1], d, h, m, s, f_len, f, y,
(gmt) ? " GMT" : "") <= 0)
return (0);
else
return (1);
err:
BIO_write(bp, "Bad time value", 14);
return (0);
}
// consume_two_digits is a helper function for ASN1_UTCTIME_print. If |*v|,
// assumed to be |*len| bytes long, has two leading digits, updates |*out| with
// their value, updates |v| and |len|, and returns one. Otherwise, returns
// zero.
static int consume_two_digits(int* out, const char **v, int *len) {
if (*len < 2|| !isdigit((*v)[0]) || !isdigit((*v)[1])) {
return 0;
}
*out = ((*v)[0] - '0') * 10 + ((*v)[1] - '0');
*len -= 2;
*v += 2;
return 1;
}
// consume_zulu_timezone is a helper function for ASN1_UTCTIME_print. If |*v|,
// assumed to be |*len| bytes long, starts with "Z" then it updates |*v| and
// |*len| and returns one. Otherwise returns zero.
static int consume_zulu_timezone(const char **v, int *len) {
if (*len == 0 || (*v)[0] != 'Z') {
return 0;
}
*len -= 1;
*v += 1;
return 1;
}
int ASN1_UTCTIME_print(BIO *bp, const ASN1_UTCTIME *tm) {
const char *v = (const char *)tm->data;
int len = tm->length;
int Y = 0, M = 0, D = 0, h = 0, m = 0, s = 0;
// YYMMDDhhmm are required to be present.
if (!consume_two_digits(&Y, &v, &len) ||
!consume_two_digits(&M, &v, &len) ||
!consume_two_digits(&D, &v, &len) ||
!consume_two_digits(&h, &v, &len) ||
!consume_two_digits(&m, &v, &len)) {
goto err;
}
// https://tools.ietf.org/html/rfc5280, section 4.1.2.5.1, requires seconds
// to be present, but historically this code has forgiven its absence.
consume_two_digits(&s, &v, &len);
// https://tools.ietf.org/html/rfc5280, section 4.1.2.5.1, specifies this
// interpretation of the year.
if (Y < 50) {
Y += 2000;
} else {
Y += 1900;
}
if (M > 12 || M == 0) {
goto err;
}
if (D > 31 || D == 0) {
goto err;
}
if (h > 23 || m > 59 || s > 60) {
goto err;
}
// https://tools.ietf.org/html/rfc5280, section 4.1.2.5.1, requires the "Z"
// to be present, but historically this code has forgiven its absence.
const int is_gmt = consume_zulu_timezone(&v, &len);
// https://tools.ietf.org/html/rfc5280, section 4.1.2.5.1, does not permit
// the specification of timezones using the +hhmm / -hhmm syntax, which is
// the only other thing that might legitimately be found at the end.
if (len) {
goto err;
}
return BIO_printf(bp, "%s %2d %02d:%02d:%02d %d%s", mon[M - 1], D, h, m, s, Y,
is_gmt ? " GMT" : "") > 0;
err:
BIO_write(bp, "Bad time value", 14);
return 0;
}

View File

@ -69,53 +69,17 @@ DEFINE_STACK_OF(ASN1_STRING_TABLE)
static STACK_OF(ASN1_STRING_TABLE) *stable = NULL;
static void st_free(ASN1_STRING_TABLE *tbl);
/*
* This is the global mask for the mbstring functions: this is use to mask
* out certain types (such as BMPString and UTF8String) because certain
* software (e.g. Netscape) has problems with them.
*/
static unsigned long global_mask = B_ASN1_UTF8STRING;
void ASN1_STRING_set_default_mask(unsigned long mask)
{
global_mask = mask;
}
unsigned long ASN1_STRING_get_default_mask(void)
{
return global_mask;
return B_ASN1_UTF8STRING;
}
/*
* This function sets the default to various "flavours" of configuration.
* based on an ASCII string. Currently this is: MASK:XXXX : a numerical mask
* value. nobmp : Don't use BMPStrings (just Printable, T61). pkix : PKIX
* recommendation in RFC2459. utf8only : only use UTF8Strings (RFC2459
* recommendation for 2004). default: the default value, Printable, T61, BMP.
*/
int ASN1_STRING_set_default_mask_asc(const char *p)
{
unsigned long mask;
char *end;
if (!strncmp(p, "MASK:", 5)) {
if (!p[5])
return 0;
mask = strtoul(p + 5, &end, 0);
if (*end)
return 0;
} else if (!strcmp(p, "nombstr"))
mask = ~((unsigned long)(B_ASN1_BMPSTRING | B_ASN1_UTF8STRING));
else if (!strcmp(p, "pkix"))
mask = ~((unsigned long)B_ASN1_T61STRING);
else if (!strcmp(p, "utf8only"))
mask = B_ASN1_UTF8STRING;
else if (!strcmp(p, "default"))
mask = 0xFFFFFFFFL;
else
return 0;
ASN1_STRING_set_default_mask(mask);
return 1;
}
@ -139,13 +103,12 @@ ASN1_STRING *ASN1_STRING_set_by_NID(ASN1_STRING **out,
if (tbl) {
mask = tbl->mask;
if (!(tbl->flags & STABLE_NO_MASK))
mask &= global_mask;
mask &= B_ASN1_UTF8STRING;
ret = ASN1_mbstring_ncopy(out, in, inlen, inform, mask,
tbl->minsize, tbl->maxsize);
} else
ret =
ASN1_mbstring_copy(out, in, inlen, inform,
DIRSTRING_TYPE & global_mask);
} else {
ret = ASN1_mbstring_copy(out, in, inlen, inform, B_ASN1_UTF8STRING);
}
if (ret <= 0)
return NULL;
return *out;

View File

@ -63,7 +63,7 @@
#include <CCryptoBoringSSL_err.h>
#include <CCryptoBoringSSL_mem.h>
#include "asn1_locl.h"
#include "internal.h"
/*
* This is an implementation of the ASN1 Time structure which is: Time ::=
@ -200,7 +200,7 @@ static int asn1_time_to_tm(struct tm *tm, const ASN1_TIME *t)
return 0;
}
int ASN1_TIME_diff(int *pday, int *psec,
int ASN1_TIME_diff(int *out_days, int *out_seconds,
const ASN1_TIME *from, const ASN1_TIME *to)
{
struct tm tm_from, tm_to;
@ -208,5 +208,5 @@ int ASN1_TIME_diff(int *pday, int *psec,
return 0;
if (!asn1_time_to_tm(&tm_to, to))
return 0;
return OPENSSL_gmtime_diff(pday, psec, &tm_from, &tm_to);
return OPENSSL_gmtime_diff(out_days, out_seconds, &tm_from, &tm_to);
}

View File

@ -61,23 +61,33 @@
#include <CCryptoBoringSSL_mem.h>
#include <CCryptoBoringSSL_obj.h>
#include "asn1_locl.h"
#include "internal.h"
int ASN1_TYPE_get(const ASN1_TYPE *a)
{
if ((a->value.ptr != NULL) || (a->type == V_ASN1_NULL))
return (a->type);
else
return (0);
if (a->type == V_ASN1_BOOLEAN || a->type == V_ASN1_NULL ||
a->value.ptr != NULL) {
return a->type;
}
return 0;
}
const void *asn1_type_value_as_pointer(const ASN1_TYPE *a)
{
if (a->type == V_ASN1_BOOLEAN) {
return a->value.boolean ? (void *)0xff : NULL;
}
if (a->type == V_ASN1_NULL) {
return NULL;
}
return a->value.ptr;
}
void ASN1_TYPE_set(ASN1_TYPE *a, int type, void *value)
{
if (a->value.ptr != NULL) {
ASN1_TYPE **tmp_a = &a;
ASN1_primitive_free((ASN1_VALUE **)tmp_a, NULL);
}
ASN1_TYPE **tmp_a = &a;
ASN1_primitive_free((ASN1_VALUE **)tmp_a, NULL);
a->type = type;
if (type == V_ASN1_BOOLEAN)
a->value.boolean = value ? 0xff : 0;

View File

@ -62,7 +62,7 @@
#include <CCryptoBoringSSL_err.h>
#include <CCryptoBoringSSL_mem.h>
#include "asn1_locl.h"
#include "internal.h"
int asn1_utctime_to_tm(struct tm *tm, const ASN1_UTCTIME *d)
@ -262,42 +262,3 @@ int ASN1_UTCTIME_cmp_time_t(const ASN1_UTCTIME *s, time_t t)
return -1;
return 0;
}
#if 0
time_t ASN1_UTCTIME_get(const ASN1_UTCTIME *s)
{
struct tm tm;
int offset;
OPENSSL_memset(&tm, '\0', sizeof tm);
# define g2(p) (((p)[0]-'0')*10+(p)[1]-'0')
tm.tm_year = g2(s->data);
if (tm.tm_year < 50)
tm.tm_year += 100;
tm.tm_mon = g2(s->data + 2) - 1;
tm.tm_mday = g2(s->data + 4);
tm.tm_hour = g2(s->data + 6);
tm.tm_min = g2(s->data + 8);
tm.tm_sec = g2(s->data + 10);
if (s->data[12] == 'Z')
offset = 0;
else {
offset = g2(s->data + 13) * 60 + g2(s->data + 15);
if (s->data[12] == '-')
offset = -offset;
}
# undef g2
return mktime(&tm) - offset * 60; /* FIXME: mktime assumes the current
* timezone instead of UTC, and unless
* we rewrite OpenSSL in Lisp we cannot
* locally change the timezone without
* possibly interfering with other
* parts of the program. timegm, which
* uses UTC, is non-standard. Also
* time_t is inappropriate for general
* UTC times because it may a 32 bit
* type. */
}
#endif

View File

@ -59,7 +59,7 @@
#include <CCryptoBoringSSL_err.h>
#include <CCryptoBoringSSL_mem.h>
#include "asn1_locl.h"
#include "internal.h"
/* UTF8 utilities */

View File

@ -370,8 +370,7 @@ int ASN1_STRING_set(ASN1_STRING *str, const void *_data, int len)
void ASN1_STRING_set0(ASN1_STRING *str, void *data, int len)
{
if (str->data)
OPENSSL_free(str->data);
OPENSSL_free(str->data);
str->data = data;
str->length = len;
}

View File

@ -72,7 +72,7 @@ const char *ASN1_tag2str(int tag)
};
if ((tag == V_ASN1_NEG_INTEGER) || (tag == V_ASN1_NEG_ENUMERATED))
tag &= ~0x100;
tag &= ~V_ASN1_NEG;
if (tag < 0 || tag > 30)
return "(unknown)";

View File

@ -1,4 +1,3 @@
/* asn1t.h */
/*
* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project
* 2006.
@ -87,6 +86,26 @@ int OPENSSL_gmtime_diff(int *out_days, int *out_secs, const struct tm *from,
/* Internal ASN1 structures and functions: not for application use */
/* These are used internally in the ASN1_OBJECT to keep track of
* whether the names and data need to be free()ed */
#define ASN1_OBJECT_FLAG_DYNAMIC 0x01 /* internal use */
#define ASN1_OBJECT_FLAG_DYNAMIC_STRINGS 0x04 /* internal use */
#define ASN1_OBJECT_FLAG_DYNAMIC_DATA 0x08 /* internal use */
/* An asn1_object_st (aka |ASN1_OBJECT|) represents an ASN.1 OBJECT IDENTIFIER.
* Note: Mutating an |ASN1_OBJECT| is only permitted when initializing it. The
* library maintains a table of static |ASN1_OBJECT|s, which may be referenced
* by non-const |ASN1_OBJECT| pointers. Code which receives an |ASN1_OBJECT|
* pointer externally must assume it is immutable, even if the pointer is not
* const. */
struct asn1_object_st {
const char *sn, *ln;
int nid;
int length;
const unsigned char *data; /* data remains const after init */
int flags; /* Should we free this one */
};
int asn1_utctime_to_tm(struct tm *tm, const ASN1_UTCTIME *d);
int asn1_generalizedtime_to_tm(struct tm *tm, const ASN1_GENERALIZEDTIME *d);
@ -126,6 +145,11 @@ int asn1_enc_restore(int *len, unsigned char **out, ASN1_VALUE **pval,
int asn1_enc_save(ASN1_VALUE **pval, const unsigned char *in, int inlen,
const ASN1_ITEM *it);
/* asn1_type_value_as_pointer returns |a|'s value in pointer form. This is
* usually the value object but, for BOOLEAN values, is 0 or 0xff cast to
* a pointer. */
const void *asn1_type_value_as_pointer(const ASN1_TYPE *a);
#if defined(__cplusplus)
} /* extern C */

View File

@ -65,7 +65,7 @@
#include <CCryptoBoringSSL_mem.h>
#include "../internal.h"
#include "asn1_locl.h"
#include "internal.h"
/*
* Constructed types with a recursive definition (such as can be found in PKCS7)

View File

@ -63,7 +63,7 @@
#include <CCryptoBoringSSL_mem.h>
#include "../internal.h"
#include "asn1_locl.h"
#include "internal.h"
static int asn1_i2d_ex_primitive(ASN1_VALUE **pval, unsigned char **out,
@ -295,11 +295,12 @@ static int asn1_template_ex_i2d(ASN1_VALUE **pval, unsigned char **out,
if (flags & ASN1_TFLG_SET_OF) {
isset = 1;
/* 2 means we reorder */
if (flags & ASN1_TFLG_SEQUENCE_OF)
isset = 2;
} else
/* Historically, types with both bits set were mutated when
* serialized to apply the sort. We no longer support this. */
assert((flags & ASN1_TFLG_SEQUENCE_OF) == 0);
} else {
isset = 0;
}
/*
* Work out inner tag value: if EXPLICIT or no tagging use underlying
@ -378,7 +379,6 @@ static int asn1_template_ex_i2d(ASN1_VALUE **pval, unsigned char **out,
typedef struct {
unsigned char *data;
int length;
ASN1_VALUE *field;
} DER_ENC;
static int der_cmp(const void *a, const void *b)
@ -433,7 +433,6 @@ static int asn1_set_seq_out(STACK_OF(ASN1_VALUE) *sk, unsigned char **out,
skitem = sk_ASN1_VALUE_value(sk, i);
tder->data = p;
tder->length = ASN1_item_ex_i2d(&skitem, &p, item, -1, iclass);
tder->field = skitem;
}
/* Now sort them */
@ -445,11 +444,6 @@ static int asn1_set_seq_out(STACK_OF(ASN1_VALUE) *sk, unsigned char **out,
p += tder->length;
}
*out = p;
/* If do_sort is 2 then reorder the STACK */
if (do_sort == 2) {
for (i = 0, tder = derlst; i < sk_ASN1_VALUE_num(sk); i++, tder++)
(void)sk_ASN1_VALUE_set(sk, i, tder->field);
}
OPENSSL_free(derlst);
OPENSSL_free(tmpdat);
return 1;
@ -531,6 +525,20 @@ static int asn1_ex_i2c(ASN1_VALUE **pval, unsigned char *cout, int *putype,
/* If MSTRING type set the underlying type */
strtmp = (ASN1_STRING *)*pval;
utype = strtmp->type;
/* Negative INTEGER and ENUMERATED values use |ASN1_STRING| type values
* that do not match their corresponding utype values. INTEGERs cannot
* participate in MSTRING types, but ENUMERATEDs can.
*
* TODO(davidben): Is this a bug? Although arguably one of the MSTRING
* types should contain more values, rather than less. See
* https://crbug.com/boringssl/412. But it is not possible to fit all
* possible ANY values into an |ASN1_STRING|, so matching the spec here
* is somewhat hopeless. */
if (utype == V_ASN1_NEG_INTEGER) {
utype = V_ASN1_INTEGER;
} else if (utype == V_ASN1_NEG_ENUMERATED) {
utype = V_ASN1_ENUMERATED;
}
*putype = utype;
} else if (it->utype == V_ASN1_ANY) {
/* If ANY set type and pointer to value */
@ -569,7 +577,7 @@ static int asn1_ex_i2c(ASN1_VALUE **pval, unsigned char *cout, int *putype,
if (!*tbool && !it->size)
return -1;
}
c = (unsigned char)*tbool;
c = *tbool ? 0xff : 0x00;
cont = &c;
len = 1;
break;

View File

@ -61,7 +61,7 @@
#include <CCryptoBoringSSL_asn1t.h>
#include <CCryptoBoringSSL_mem.h>
#include "asn1_locl.h"
#include "internal.h"
/* Free up an ASN1 structure */
@ -192,7 +192,7 @@ void ASN1_primitive_free(ASN1_VALUE **pval, const ASN1_ITEM *it)
ASN1_TYPE *typ = (ASN1_TYPE *)*pval;
utype = typ->type;
pval = &typ->value.asn1_value;
if (!*pval)
if (utype != V_ASN1_BOOLEAN && !*pval)
return;
} else if (it->itype == ASN1_ITYPE_MSTRING) {
utype = -1;

View File

@ -63,7 +63,7 @@
#include <CCryptoBoringSSL_mem.h>
#include <CCryptoBoringSSL_obj.h>
#include "asn1_locl.h"
#include "internal.h"
#include "../internal.h"
@ -271,7 +271,6 @@ static void asn1_template_clear(ASN1_VALUE **pval, const ASN1_TEMPLATE *tt)
static int ASN1_primitive_new(ASN1_VALUE **pval, const ASN1_ITEM *it)
{
ASN1_TYPE *typ;
ASN1_STRING *str;
int utype;
if (!it)
@ -308,10 +307,7 @@ static int ASN1_primitive_new(ASN1_VALUE **pval, const ASN1_ITEM *it)
break;
default:
str = ASN1_STRING_type_new(utype);
if (it->itype == ASN1_ITYPE_MSTRING && str)
str->flags |= ASN1_STRING_FLAG_MSTRING;
*pval = (ASN1_VALUE *)str;
*pval = (ASN1_VALUE *)ASN1_STRING_type_new(utype);
break;
}
if (*pval)

View File

@ -66,7 +66,7 @@
#include <CCryptoBoringSSL_thread.h>
#include "../internal.h"
#include "asn1_locl.h"
#include "internal.h"
/* Utility functions for manipulating fields and offsets */

View File

@ -59,7 +59,7 @@
#define _POSIX_C_SOURCE 201410L /* for gmtime_r */
#endif
#include "asn1_locl.h"
#include "internal.h"
#include <time.h>

View File

@ -116,17 +116,11 @@ static int mem_new(BIO *bio) {
}
static int mem_free(BIO *bio) {
BUF_MEM *b;
if (bio == NULL) {
return 0;
}
if (!bio->shutdown || !bio->init || bio->ptr == NULL) {
return 1;
}
b = (BUF_MEM *)bio->ptr;
BUF_MEM *b = (BUF_MEM *)bio->ptr;
if (bio->flags & BIO_FLAGS_MEM_RDONLY) {
b->data = NULL;
}

View File

@ -320,7 +320,7 @@ static int conn_new(BIO *bio) {
bio->init = 0;
bio->num = -1;
bio->flags = 0;
bio->ptr = (char *)BIO_CONNECT_new();
bio->ptr = BIO_CONNECT_new();
return bio->ptr != NULL;
}
@ -340,10 +340,6 @@ static void conn_close_socket(BIO *bio) {
}
static int conn_free(BIO *bio) {
if (bio == NULL) {
return 0;
}
if (bio->shutdown) {
conn_close_socket(bio);
}

View File

@ -146,10 +146,6 @@ static int fd_new(BIO *bio) {
}
static int fd_free(BIO *bio) {
if (bio == NULL) {
return 0;
}
if (bio->shutdown) {
if (bio->init) {
BORINGSSL_CLOSE(bio->num);

View File

@ -126,13 +126,7 @@ BIO *BIO_new_fp(FILE *stream, int close_flag) {
return ret;
}
static int file_new(BIO *bio) { return 1; }
static int file_free(BIO *bio) {
if (bio == NULL) {
return 0;
}
if (!bio->shutdown) {
return 1;
}
@ -279,7 +273,7 @@ static const BIO_METHOD methods_filep = {
BIO_TYPE_FILE, "FILE pointer",
file_write, file_read,
NULL /* puts */, file_gets,
file_ctrl, file_new,
file_ctrl, NULL /* create */,
file_free, NULL /* callback_ctrl */,
};

View File

@ -127,12 +127,7 @@ static void bio_destroy_pair(BIO *bio) {
}
static int bio_free(BIO *bio) {
struct bio_bio_st *b;
if (bio == NULL) {
return 0;
}
b = bio->ptr;
struct bio_bio_st *b = bio->ptr;
assert(b != NULL);

View File

@ -81,19 +81,7 @@ static int closesocket(int sock) {
}
#endif
static int sock_new(BIO *bio) {
bio->init = 0;
bio->num = 0;
bio->ptr = NULL;
bio->flags = 0;
return 1;
}
static int sock_free(BIO *bio) {
if (bio == NULL) {
return 0;
}
if (bio->shutdown) {
if (bio->init) {
closesocket(bio->num);
@ -105,17 +93,15 @@ static int sock_free(BIO *bio) {
}
static int sock_read(BIO *b, char *out, int outl) {
int ret = 0;
if (out == NULL) {
return 0;
}
bio_clear_socket_error();
#if defined(OPENSSL_WINDOWS)
ret = recv(b->num, out, outl, 0);
int ret = recv(b->num, out, outl, 0);
#else
ret = read(b->num, out, outl);
int ret = read(b->num, out, outl);
#endif
BIO_clear_retry_flags(b);
if (ret <= 0) {
@ -186,7 +172,7 @@ static const BIO_METHOD methods_sockp = {
BIO_TYPE_SOCKET, "socket",
sock_write, sock_read,
NULL /* puts */, NULL /* gets, */,
sock_ctrl, sock_new,
sock_ctrl, NULL /* create */,
sock_free, NULL /* callback_ctrl */,
};

View File

@ -89,6 +89,10 @@ const EVP_CIPHER *EVP_get_cipherbynid(int nid) {
}
const EVP_CIPHER *EVP_get_cipherbyname(const char *name) {
if (name == NULL) {
return NULL;
}
if (OPENSSL_strcasecmp(name, "rc4") == 0) {
return EVP_rc4();
} else if (OPENSSL_strcasecmp(name, "des-cbc") == 0) {

View File

@ -343,7 +343,7 @@ static int aead_tls_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len,
if (EVP_CIPHER_CTX_mode(&tls_ctx->cipher_ctx) == EVP_CIPH_CBC_MODE &&
EVP_tls_cbc_record_digest_supported(tls_ctx->hmac_ctx.md)) {
if (!EVP_tls_cbc_digest_record(tls_ctx->hmac_ctx.md, mac, &mac_len,
ad_fixed, out, data_plus_mac_len, total,
ad_fixed, out, data_len, total,
tls_ctx->mac_key, tls_ctx->mac_key_len)) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_DECRYPT);
return 0;
@ -406,14 +406,6 @@ static int aead_aes_128_cbc_sha1_tls_implicit_iv_init(
EVP_sha1(), 1);
}
static int aead_aes_128_cbc_sha256_tls_init(EVP_AEAD_CTX *ctx,
const uint8_t *key, size_t key_len,
size_t tag_len,
enum evp_aead_direction_t dir) {
return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_128_cbc(),
EVP_sha256(), 0);
}
static int aead_aes_256_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
size_t key_len, size_t tag_len,
enum evp_aead_direction_t dir) {
@ -428,22 +420,6 @@ static int aead_aes_256_cbc_sha1_tls_implicit_iv_init(
EVP_sha1(), 1);
}
static int aead_aes_256_cbc_sha256_tls_init(EVP_AEAD_CTX *ctx,
const uint8_t *key, size_t key_len,
size_t tag_len,
enum evp_aead_direction_t dir) {
return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
EVP_sha256(), 0);
}
static int aead_aes_256_cbc_sha384_tls_init(EVP_AEAD_CTX *ctx,
const uint8_t *key, size_t key_len,
size_t tag_len,
enum evp_aead_direction_t dir) {
return aead_tls_init(ctx, key, key_len, tag_len, dir, EVP_aes_256_cbc(),
EVP_sha384(), 0);
}
static int aead_des_ede3_cbc_sha1_tls_init(EVP_AEAD_CTX *ctx,
const uint8_t *key, size_t key_len,
size_t tag_len,
@ -513,23 +489,6 @@ static const EVP_AEAD aead_aes_128_cbc_sha1_tls_implicit_iv = {
aead_tls_tag_len,
};
static const EVP_AEAD aead_aes_128_cbc_sha256_tls = {
SHA256_DIGEST_LENGTH + 16, // key len (SHA256 + AES128)
16, // nonce len (IV)
16 + SHA256_DIGEST_LENGTH, // overhead (padding + SHA256)
SHA256_DIGEST_LENGTH, // max tag length
0, // seal_scatter_supports_extra_in
NULL, // init
aead_aes_128_cbc_sha256_tls_init,
aead_tls_cleanup,
aead_tls_open,
aead_tls_seal_scatter,
NULL, // open_gather
NULL, // get_iv
aead_tls_tag_len,
};
static const EVP_AEAD aead_aes_256_cbc_sha1_tls = {
SHA_DIGEST_LENGTH + 32, // key len (SHA1 + AES256)
16, // nonce len (IV)
@ -564,40 +523,6 @@ static const EVP_AEAD aead_aes_256_cbc_sha1_tls_implicit_iv = {
aead_tls_tag_len,
};
static const EVP_AEAD aead_aes_256_cbc_sha256_tls = {
SHA256_DIGEST_LENGTH + 32, // key len (SHA256 + AES256)
16, // nonce len (IV)
16 + SHA256_DIGEST_LENGTH, // overhead (padding + SHA256)
SHA256_DIGEST_LENGTH, // max tag length
0, // seal_scatter_supports_extra_in
NULL, // init
aead_aes_256_cbc_sha256_tls_init,
aead_tls_cleanup,
aead_tls_open,
aead_tls_seal_scatter,
NULL, // open_gather
NULL, // get_iv
aead_tls_tag_len,
};
static const EVP_AEAD aead_aes_256_cbc_sha384_tls = {
SHA384_DIGEST_LENGTH + 32, // key len (SHA384 + AES256)
16, // nonce len (IV)
16 + SHA384_DIGEST_LENGTH, // overhead (padding + SHA384)
SHA384_DIGEST_LENGTH, // max tag length
0, // seal_scatter_supports_extra_in
NULL, // init
aead_aes_256_cbc_sha384_tls_init,
aead_tls_cleanup,
aead_tls_open,
aead_tls_seal_scatter,
NULL, // open_gather
NULL, // get_iv
aead_tls_tag_len,
};
static const EVP_AEAD aead_des_ede3_cbc_sha1_tls = {
SHA_DIGEST_LENGTH + 24, // key len (SHA1 + 3DES)
8, // nonce len (IV)
@ -657,10 +582,6 @@ const EVP_AEAD *EVP_aead_aes_128_cbc_sha1_tls_implicit_iv(void) {
return &aead_aes_128_cbc_sha1_tls_implicit_iv;
}
const EVP_AEAD *EVP_aead_aes_128_cbc_sha256_tls(void) {
return &aead_aes_128_cbc_sha256_tls;
}
const EVP_AEAD *EVP_aead_aes_256_cbc_sha1_tls(void) {
return &aead_aes_256_cbc_sha1_tls;
}
@ -669,14 +590,6 @@ const EVP_AEAD *EVP_aead_aes_256_cbc_sha1_tls_implicit_iv(void) {
return &aead_aes_256_cbc_sha1_tls_implicit_iv;
}
const EVP_AEAD *EVP_aead_aes_256_cbc_sha256_tls(void) {
return &aead_aes_256_cbc_sha256_tls;
}
const EVP_AEAD *EVP_aead_aes_256_cbc_sha384_tls(void) {
return &aead_aes_256_cbc_sha384_tls;
}
const EVP_AEAD *EVP_aead_des_ede3_cbc_sha1_tls(void) {
return &aead_des_ede3_cbc_sha1_tls;
}

View File

@ -99,6 +99,17 @@ void EVP_tls_cbc_copy_mac(uint8_t *out, size_t md_size, const uint8_t *in,
// which EVP_tls_cbc_digest_record supports.
int EVP_tls_cbc_record_digest_supported(const EVP_MD *md);
// EVP_sha1_final_with_secret_suffix computes the result of hashing |len| bytes
// from |in| to |ctx| and writes the resulting hash to |out|. |len| is treated
// as secret and must be at most |max_len|, which is treated as public. |in|
// must point to a buffer of at least |max_len| bytes. It returns one on success
// and zero if inputs are too long.
//
// This function is exported for unit tests.
OPENSSL_EXPORT int EVP_sha1_final_with_secret_suffix(
SHA_CTX *ctx, uint8_t out[SHA_DIGEST_LENGTH], const uint8_t *in, size_t len,
size_t max_len);
// EVP_tls_cbc_digest_record computes the MAC of a decrypted, padded TLS
// record.
//
@ -108,8 +119,8 @@ int EVP_tls_cbc_record_digest_supported(const EVP_MD *md);
// md_out_size: the number of output bytes is written here.
// header: the 13-byte, TLS record header.
// data: the record data itself
// data_plus_mac_size: the secret, reported length of the data and MAC
// once the padding has been removed.
// data_size: the secret, reported length of the data once the padding and MAC
// have been removed.
// data_plus_mac_plus_padding_size: the public length of the whole
// record, including padding.
//
@ -119,7 +130,7 @@ int EVP_tls_cbc_record_digest_supported(const EVP_MD *md);
// padding too. )
int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out,
size_t *md_out_size, const uint8_t header[13],
const uint8_t *data, size_t data_plus_mac_size,
const uint8_t *data, size_t data_size,
size_t data_plus_mac_plus_padding_size,
const uint8_t *mac_secret,
unsigned mac_secret_length);

View File

@ -62,15 +62,6 @@
#include "../fipsmodule/cipher/internal.h"
// MAX_HASH_BIT_COUNT_BYTES is the maximum number of bytes in the hash's length
// field. (SHA-384/512 have 128-bit length.)
#define MAX_HASH_BIT_COUNT_BYTES 16
// MAX_HASH_BLOCK_SIZE is the maximum hash block size that we'll support.
// Currently SHA-384/512 has a 128-byte block size and that's the largest
// supported by TLS.)
#define MAX_HASH_BLOCK_SIZE 128
int EVP_tls_cbc_remove_padding(crypto_word_t *out_padding_ok, size_t *out_len,
const uint8_t *in, size_t in_len,
size_t block_size, size_t mac_size) {
@ -183,134 +174,110 @@ void EVP_tls_cbc_copy_mac(uint8_t *out, size_t md_size, const uint8_t *in,
OPENSSL_memcpy(out, rotated_mac, md_size);
}
// u32toBE serialises an unsigned, 32-bit number (n) as four bytes at (p) in
// big-endian order. The value of p is advanced by four.
#define u32toBE(n, p) \
do { \
*((p)++) = (uint8_t)((n) >> 24); \
*((p)++) = (uint8_t)((n) >> 16); \
*((p)++) = (uint8_t)((n) >> 8); \
*((p)++) = (uint8_t)((n)); \
} while (0)
// u64toBE serialises an unsigned, 64-bit number (n) as eight bytes at (p) in
// big-endian order. The value of p is advanced by eight.
#define u64toBE(n, p) \
do { \
*((p)++) = (uint8_t)((n) >> 56); \
*((p)++) = (uint8_t)((n) >> 48); \
*((p)++) = (uint8_t)((n) >> 40); \
*((p)++) = (uint8_t)((n) >> 32); \
*((p)++) = (uint8_t)((n) >> 24); \
*((p)++) = (uint8_t)((n) >> 16); \
*((p)++) = (uint8_t)((n) >> 8); \
*((p)++) = (uint8_t)((n)); \
} while (0)
typedef union {
SHA_CTX sha1;
SHA256_CTX sha256;
SHA512_CTX sha512;
} HASH_CTX;
static void tls1_sha1_transform(HASH_CTX *ctx, const uint8_t *block) {
SHA1_Transform(&ctx->sha1, block);
}
static void tls1_sha256_transform(HASH_CTX *ctx, const uint8_t *block) {
SHA256_Transform(&ctx->sha256, block);
}
static void tls1_sha512_transform(HASH_CTX *ctx, const uint8_t *block) {
SHA512_Transform(&ctx->sha512, block);
}
// These functions serialize the state of a hash and thus perform the standard
// "final" operation without adding the padding and length that such a function
// typically does.
static void tls1_sha1_final_raw(HASH_CTX *ctx, uint8_t *md_out) {
SHA_CTX *sha1 = &ctx->sha1;
u32toBE(sha1->h[0], md_out);
u32toBE(sha1->h[1], md_out);
u32toBE(sha1->h[2], md_out);
u32toBE(sha1->h[3], md_out);
u32toBE(sha1->h[4], md_out);
}
static void tls1_sha256_final_raw(HASH_CTX *ctx, uint8_t *md_out) {
SHA256_CTX *sha256 = &ctx->sha256;
for (unsigned i = 0; i < 8; i++) {
u32toBE(sha256->h[i], md_out);
int EVP_sha1_final_with_secret_suffix(SHA_CTX *ctx,
uint8_t out[SHA_DIGEST_LENGTH],
const uint8_t *in, size_t len,
size_t max_len) {
// Bound the input length so |total_bits| below fits in four bytes. This is
// redundant with TLS record size limits. This also ensures |input_idx| below
// does not overflow.
size_t max_len_bits = max_len << 3;
if (ctx->Nh != 0 ||
(max_len_bits >> 3) != max_len || // Overflow
ctx->Nl + max_len_bits < max_len_bits ||
ctx->Nl + max_len_bits > UINT32_MAX) {
return 0;
}
}
static void tls1_sha512_final_raw(HASH_CTX *ctx, uint8_t *md_out) {
SHA512_CTX *sha512 = &ctx->sha512;
for (unsigned i = 0; i < 8; i++) {
u64toBE(sha512->h[i], md_out);
// We need to hash the following into |ctx|:
//
// - ctx->data[:ctx->num]
// - in[:len]
// - A 0x80 byte
// - However many zero bytes are needed to pad up to a block.
// - Eight bytes of length.
size_t num_blocks = (ctx->num + len + 1 + 8 + SHA_CBLOCK - 1) >> 6;
size_t last_block = num_blocks - 1;
size_t max_blocks = (ctx->num + max_len + 1 + 8 + SHA_CBLOCK - 1) >> 6;
// The bounds above imply |total_bits| fits in four bytes.
size_t total_bits = ctx->Nl + (len << 3);
uint8_t length_bytes[4];
length_bytes[0] = (uint8_t)(total_bits >> 24);
length_bytes[1] = (uint8_t)(total_bits >> 16);
length_bytes[2] = (uint8_t)(total_bits >> 8);
length_bytes[3] = (uint8_t)total_bits;
// We now construct and process each expected block in constant-time.
uint8_t block[SHA_CBLOCK] = {0};
uint32_t result[5] = {0};
// input_idx is the index into |in| corresponding to the current block.
// However, we allow this index to overflow beyond |max_len|, to simplify the
// 0x80 byte.
size_t input_idx = 0;
for (size_t i = 0; i < max_blocks; i++) {
// Fill |block| with data from the partial block in |ctx| and |in|. We copy
// as if we were hashing up to |max_len| and then zero the excess later.
size_t block_start = 0;
if (i == 0) {
OPENSSL_memcpy(block, ctx->data, ctx->num);
block_start = ctx->num;
}
if (input_idx < max_len) {
size_t to_copy = SHA_CBLOCK - block_start;
if (to_copy > max_len - input_idx) {
to_copy = max_len - input_idx;
}
OPENSSL_memcpy(block + block_start, in + input_idx, to_copy);
}
// Zero any bytes beyond |len| and add the 0x80 byte.
for (size_t j = block_start; j < SHA_CBLOCK; j++) {
// input[idx] corresponds to block[j].
size_t idx = input_idx + j - block_start;
// The barriers on |len| are not strictly necessary. However, without
// them, GCC compiles this code by incorporating |len| into the loop
// counter and subtracting it out later. This is still constant-time, but
// it frustrates attempts to validate this.
uint8_t is_in_bounds = constant_time_lt_8(idx, value_barrier_w(len));
uint8_t is_padding_byte = constant_time_eq_8(idx, value_barrier_w(len));
block[j] &= is_in_bounds;
block[j] |= 0x80 & is_padding_byte;
}
input_idx += SHA_CBLOCK - block_start;
// Fill in the length if this is the last block.
crypto_word_t is_last_block = constant_time_eq_w(i, last_block);
for (size_t j = 0; j < 4; j++) {
block[SHA_CBLOCK - 4 + j] |= is_last_block & length_bytes[j];
}
// Process the block and save the hash state if it is the final value.
SHA1_Transform(ctx, block);
for (size_t j = 0; j < 5; j++) {
result[j] |= is_last_block & ctx->h[j];
}
}
// Write the output.
for (size_t i = 0; i < 5; i++) {
CRYPTO_store_u32_be(out + 4 * i, result[i]);
}
return 1;
}
int EVP_tls_cbc_record_digest_supported(const EVP_MD *md) {
switch (EVP_MD_type(md)) {
case NID_sha1:
case NID_sha256:
case NID_sha384:
return 1;
default:
return 0;
}
return EVP_MD_type(md) == NID_sha1;
}
int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out,
size_t *md_out_size, const uint8_t header[13],
const uint8_t *data, size_t data_plus_mac_size,
const uint8_t *data, size_t data_size,
size_t data_plus_mac_plus_padding_size,
const uint8_t *mac_secret,
unsigned mac_secret_length) {
HASH_CTX md_state;
void (*md_final_raw)(HASH_CTX *ctx, uint8_t *md_out);
void (*md_transform)(HASH_CTX *ctx, const uint8_t *block);
unsigned md_size, md_block_size = 64, md_block_shift = 6;
// md_length_size is the number of bytes in the length field that terminates
// the hash.
unsigned md_length_size = 8;
// Bound the acceptable input so we can forget about many possible overflows
// later in this function. This is redundant with the record size limits in
// TLS.
if (data_plus_mac_plus_padding_size >= 1024 * 1024) {
assert(0);
return 0;
}
switch (EVP_MD_type(md)) {
case NID_sha1:
SHA1_Init(&md_state.sha1);
md_final_raw = tls1_sha1_final_raw;
md_transform = tls1_sha1_transform;
md_size = SHA_DIGEST_LENGTH;
break;
case NID_sha256:
SHA256_Init(&md_state.sha256);
md_final_raw = tls1_sha256_final_raw;
md_transform = tls1_sha256_transform;
md_size = SHA256_DIGEST_LENGTH;
break;
case NID_sha384:
SHA384_Init(&md_state.sha512);
md_final_raw = tls1_sha512_final_raw;
md_transform = tls1_sha512_transform;
md_size = SHA384_DIGEST_LENGTH;
md_block_size = 128;
md_block_shift = 7;
md_length_size = 16;
break;
default:
if (EVP_MD_type(md) != NID_sha1) {
// EVP_tls_cbc_record_digest_supported should have been called first to
// check that the hash function is supported.
assert(0);
@ -318,175 +285,54 @@ int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out,
return 0;
}
assert(md_length_size <= MAX_HASH_BIT_COUNT_BYTES);
assert(md_block_size <= MAX_HASH_BLOCK_SIZE);
assert(md_block_size == (1u << md_block_shift));
assert(md_size <= EVP_MAX_MD_SIZE);
static const size_t kHeaderLength = 13;
// kVarianceBlocks is the number of blocks of the hash that we have to
// calculate in constant time because they could be altered by the
// padding value.
//
// TLSv1 has MACs up to 48 bytes long (SHA-384) and the padding is not
// required to be minimal. Therefore we say that the final |kVarianceBlocks|
// blocks can vary based on the padding and on the hash used. This value
// must be derived from public information.
const size_t kVarianceBlocks =
( 255 + 1 + // maximum padding bytes + padding length
md_size + // length of hash's output
md_block_size - 1 // ceiling
) / md_block_size
+ 1; // the 0x80 marker and the encoded message length could or not
// require an extra block; since the exact value depends on the
// message length; thus, one extra block is always added to run
// in constant time.
// From now on we're dealing with the MAC, which conceptually has 13
// bytes of `header' before the start of the data.
size_t len = data_plus_mac_plus_padding_size + kHeaderLength;
// max_mac_bytes contains the maximum bytes of bytes in the MAC, including
// |header|, assuming that there's no padding.
size_t max_mac_bytes = len - md_size - 1;
// num_blocks is the maximum number of hash blocks.
size_t num_blocks =
(max_mac_bytes + 1 + md_length_size + md_block_size - 1) / md_block_size;
// In order to calculate the MAC in constant time we have to handle
// the final blocks specially because the padding value could cause the
// end to appear somewhere in the final |kVarianceBlocks| blocks and we
// can't leak where. However, |num_starting_blocks| worth of data can
// be hashed right away because no padding value can affect whether
// they are plaintext.
size_t num_starting_blocks = 0;
// k is the starting byte offset into the conceptual header||data where
// we start processing.
size_t k = 0;
// mac_end_offset is the index just past the end of the data to be MACed.
size_t mac_end_offset = data_plus_mac_size + kHeaderLength - md_size;
// c is the index of the 0x80 byte in the final hash block that contains
// application data.
size_t c = mac_end_offset & (md_block_size - 1);
// index_a is the hash block number that contains the 0x80 terminating value.
size_t index_a = mac_end_offset >> md_block_shift;
// index_b is the hash block number that contains the 64-bit hash length, in
// bits.
size_t index_b = (mac_end_offset + md_length_size) >> md_block_shift;
if (num_blocks > kVarianceBlocks) {
num_starting_blocks = num_blocks - kVarianceBlocks;
k = md_block_size * num_starting_blocks;
if (mac_secret_length > SHA_CBLOCK) {
// HMAC pads small keys with zeros and hashes large keys down. This function
// should never reach the large key case.
assert(0);
return 0;
}
// bits is the hash-length in bits. It includes the additional hash
// block for the masked HMAC key.
size_t bits = 8 * mac_end_offset; // at most 18 bits to represent
// Compute the initial HMAC block.
bits += 8 * md_block_size;
// hmac_pad is the masked HMAC key.
uint8_t hmac_pad[MAX_HASH_BLOCK_SIZE];
OPENSSL_memset(hmac_pad, 0, md_block_size);
assert(mac_secret_length <= sizeof(hmac_pad));
uint8_t hmac_pad[SHA_CBLOCK];
OPENSSL_memset(hmac_pad, 0, sizeof(hmac_pad));
OPENSSL_memcpy(hmac_pad, mac_secret, mac_secret_length);
for (size_t i = 0; i < md_block_size; i++) {
for (size_t i = 0; i < SHA_CBLOCK; i++) {
hmac_pad[i] ^= 0x36;
}
md_transform(&md_state, hmac_pad);
SHA_CTX ctx;
SHA1_Init(&ctx);
SHA1_Update(&ctx, hmac_pad, SHA_CBLOCK);
SHA1_Update(&ctx, header, 13);
// The length check means |bits| fits in four bytes.
uint8_t length_bytes[MAX_HASH_BIT_COUNT_BYTES];
OPENSSL_memset(length_bytes, 0, md_length_size - 4);
length_bytes[md_length_size - 4] = (uint8_t)(bits >> 24);
length_bytes[md_length_size - 3] = (uint8_t)(bits >> 16);
length_bytes[md_length_size - 2] = (uint8_t)(bits >> 8);
length_bytes[md_length_size - 1] = (uint8_t)bits;
if (k > 0) {
// k is a multiple of md_block_size.
uint8_t first_block[MAX_HASH_BLOCK_SIZE];
OPENSSL_memcpy(first_block, header, 13);
OPENSSL_memcpy(first_block + 13, data, md_block_size - 13);
md_transform(&md_state, first_block);
for (size_t i = 1; i < k / md_block_size; i++) {
md_transform(&md_state, data + md_block_size * i - 13);
}
// There are at most 256 bytes of padding, so we can compute the public
// minimum length for |data_size|.
size_t min_data_size = 0;
if (data_plus_mac_plus_padding_size > SHA_DIGEST_LENGTH + 256) {
min_data_size = data_plus_mac_plus_padding_size - SHA_DIGEST_LENGTH - 256;
}
uint8_t mac_out[EVP_MAX_MD_SIZE];
OPENSSL_memset(mac_out, 0, sizeof(mac_out));
// Hash the public minimum length directly. This reduces the number of blocks
// that must be computed in constant-time.
SHA1_Update(&ctx, data, min_data_size);
// We now process the final hash blocks. For each block, we construct
// it in constant time. If the |i==index_a| then we'll include the 0x80
// bytes and zero pad etc. For each block we selectively copy it, in
// constant time, to |mac_out|.
for (size_t i = num_starting_blocks;
i <= num_starting_blocks + kVarianceBlocks; i++) {
uint8_t block[MAX_HASH_BLOCK_SIZE];
uint8_t is_block_a = constant_time_eq_8(i, index_a);
uint8_t is_block_b = constant_time_eq_8(i, index_b);
for (size_t j = 0; j < md_block_size; j++) {
uint8_t b = 0;
if (k < kHeaderLength) {
b = header[k];
} else if (k < data_plus_mac_plus_padding_size + kHeaderLength) {
b = data[k - kHeaderLength];
}
k++;
uint8_t is_past_c = is_block_a & constant_time_ge_8(j, c);
uint8_t is_past_cp1 = is_block_a & constant_time_ge_8(j, c + 1);
// If this is the block containing the end of the
// application data, and we are at the offset for the
// 0x80 value, then overwrite b with 0x80.
b = constant_time_select_8(is_past_c, 0x80, b);
// If this the the block containing the end of the
// application data and we're past the 0x80 value then
// just write zero.
b = b & ~is_past_cp1;
// If this is index_b (the final block), but not
// index_a (the end of the data), then the 64-bit
// length didn't fit into index_a and we're having to
// add an extra block of zeros.
b &= ~is_block_b | is_block_a;
// The final bytes of one of the blocks contains the
// length.
if (j >= md_block_size - md_length_size) {
// If this is index_b, write a length byte.
b = constant_time_select_8(
is_block_b, length_bytes[j - (md_block_size - md_length_size)], b);
}
block[j] = b;
}
md_transform(&md_state, block);
md_final_raw(&md_state, block);
// If this is index_b, copy the hash value to |mac_out|.
for (size_t j = 0; j < md_size; j++) {
mac_out[j] |= block[j] & is_block_b;
}
}
EVP_MD_CTX md_ctx;
EVP_MD_CTX_init(&md_ctx);
if (!EVP_DigestInit_ex(&md_ctx, md, NULL /* engine */)) {
EVP_MD_CTX_cleanup(&md_ctx);
// Hash the remaining data without leaking |data_size|.
uint8_t mac_out[SHA_DIGEST_LENGTH];
if (!EVP_sha1_final_with_secret_suffix(
&ctx, mac_out, data + min_data_size, data_size - min_data_size,
data_plus_mac_plus_padding_size - min_data_size)) {
return 0;
}
// Complete the HMAC in the standard manner.
for (size_t i = 0; i < md_block_size; i++) {
SHA1_Init(&ctx);
for (size_t i = 0; i < SHA_CBLOCK; i++) {
hmac_pad[i] ^= 0x6a;
}
EVP_DigestUpdate(&md_ctx, hmac_pad, md_block_size);
EVP_DigestUpdate(&md_ctx, mac_out, md_size);
unsigned md_out_size_u;
EVP_DigestFinal(&md_ctx, md_out, &md_out_size_u);
*md_out_size = md_out_size_u;
EVP_MD_CTX_cleanup(&md_ctx);
SHA1_Update(&ctx, hmac_pad, SHA_CBLOCK);
SHA1_Update(&ctx, mac_out, SHA_DIGEST_LENGTH);
SHA1_Final(md_out, &ctx);
*md_out_size = SHA_DIGEST_LENGTH;
return 1;
}

View File

@ -68,6 +68,7 @@
#include "conf_def.h"
#include "internal.h"
#include "../internal.h"
#include "../lhash/internal.h"
DEFINE_LHASH_OF(CONF_VALUE)
@ -76,12 +77,16 @@ struct conf_st {
LHASH_OF(CONF_VALUE) *data;
};
static const char kDefaultSectionName[] = "default";
// The maximum length we can grow a value to after variable expansion. 64k
// should be more than enough for all reasonable uses.
#define MAX_CONF_VALUE_LENGTH 65536
static uint32_t conf_value_hash(const CONF_VALUE *v) {
return (lh_strhash(v->section) << 2) ^ lh_strhash(v->name);
const uint32_t section_hash = v->section ? OPENSSL_strhash(v->section) : 0;
const uint32_t name_hash = v->name ? OPENSSL_strhash(v->name) : 0;
return (section_hash << 2) ^ name_hash;
}
static int conf_value_cmp(const CONF_VALUE *a, const CONF_VALUE *b) {
@ -155,12 +160,14 @@ static void value_free(CONF_VALUE *value) {
OPENSSL_free(value);
}
static void value_free_arg(CONF_VALUE *value, void *arg) { value_free(value); }
void NCONF_free(CONF *conf) {
if (conf == NULL || conf->data == NULL) {
return;
}
lh_CONF_VALUE_doall(conf->data, value_free);
lh_CONF_VALUE_doall_arg(conf->data, value_free_arg, NULL);
lh_CONF_VALUE_free(conf->data);
OPENSSL_free(conf);
}
@ -390,6 +397,10 @@ const char *NCONF_get_string(const CONF *conf, const char *section,
const char *name) {
CONF_VALUE template, *value;
if (section == NULL) {
section = kDefaultSectionName;
}
OPENSSL_memset(&template, 0, sizeof(template));
template.section = (char *) section;
template.name = (char *) name;
@ -538,7 +549,7 @@ static int def_load_bio(CONF *conf, BIO *in, long *out_error_line) {
goto err;
}
section = OPENSSL_strdup("default");
section = OPENSSL_strdup(kDefaultSectionName);
if (section == NULL) {
OPENSSL_PUT_ERROR(CONF, ERR_R_MALLOC_FAILURE);
goto err;

View File

@ -820,7 +820,7 @@ static void table_select(ge_precomp *t, int pos, signed char b) {
//
// Preconditions:
// a[31] <= 127
void x25519_ge_scalarmult_base(ge_p3 *h, const uint8_t *a) {
void x25519_ge_scalarmult_base(ge_p3 *h, const uint8_t a[32]) {
signed char e[64];
signed char carry;
ge_p1p1 r;

View File

@ -106,7 +106,7 @@ typedef struct {
} ge_cached;
void x25519_ge_tobytes(uint8_t s[32], const ge_p2 *h);
int x25519_ge_frombytes_vartime(ge_p3 *h, const uint8_t *s);
int x25519_ge_frombytes_vartime(ge_p3 *h, const uint8_t s[32]);
void x25519_ge_p3_to_cached(ge_cached *r, const ge_p3 *p);
void x25519_ge_p1p1_to_p2(ge_p2 *r, const ge_p1p1 *p);
void x25519_ge_p1p1_to_p3(ge_p3 *r, const ge_p1p1 *p);

View File

@ -58,11 +58,12 @@
#include <string.h>
#include <CCryptoBoringSSL_asn1.h>
#include <CCryptoBoringSSL_blake2.h>
#include <CCryptoBoringSSL_bytestring.h>
#include <CCryptoBoringSSL_obj.h>
#include <CCryptoBoringSSL_nid.h>
#include "../asn1/internal.h"
#include "../internal.h"
#include "../fipsmodule/digest/internal.h"
@ -152,13 +153,14 @@ static const EVP_MD *cbs_to_md(const CBS *cbs) {
}
const EVP_MD *EVP_get_digestbyobj(const ASN1_OBJECT *obj) {
// Handle objects with no corresponding OID.
// Handle objects with no corresponding OID. Note we don't use |OBJ_obj2nid|
// here to avoid pulling in the OID table.
if (obj->nid != NID_undef) {
return EVP_get_digestbynid(obj->nid);
}
CBS cbs;
CBS_init(&cbs, obj->data, obj->length);
CBS_init(&cbs, OBJ_get0_data(obj), OBJ_length(obj));
return cbs_to_md(&cbs);
}

View File

@ -368,84 +368,6 @@ void ERR_clear_system_error(void) {
errno = 0;
}
char *ERR_error_string(uint32_t packed_error, char *ret) {
static char buf[ERR_ERROR_STRING_BUF_LEN];
if (ret == NULL) {
// TODO(fork): remove this.
ret = buf;
}
#if !defined(NDEBUG)
// This is aimed to help catch callers who don't provide
// |ERR_ERROR_STRING_BUF_LEN| bytes of space.
OPENSSL_memset(ret, 0, ERR_ERROR_STRING_BUF_LEN);
#endif
return ERR_error_string_n(packed_error, ret, ERR_ERROR_STRING_BUF_LEN);
}
char *ERR_error_string_n(uint32_t packed_error, char *buf, size_t len) {
char lib_buf[64], reason_buf[64];
const char *lib_str, *reason_str;
unsigned lib, reason;
if (len == 0) {
return NULL;
}
lib = ERR_GET_LIB(packed_error);
reason = ERR_GET_REASON(packed_error);
lib_str = ERR_lib_error_string(packed_error);
reason_str = ERR_reason_error_string(packed_error);
if (lib_str == NULL) {
BIO_snprintf(lib_buf, sizeof(lib_buf), "lib(%u)", lib);
lib_str = lib_buf;
}
if (reason_str == NULL) {
BIO_snprintf(reason_buf, sizeof(reason_buf), "reason(%u)", reason);
reason_str = reason_buf;
}
BIO_snprintf(buf, len, "error:%08" PRIx32 ":%s:OPENSSL_internal:%s",
packed_error, lib_str, reason_str);
if (strlen(buf) == len - 1) {
// output may be truncated; make sure we always have 5 colon-separated
// fields, i.e. 4 colons.
static const unsigned num_colons = 4;
unsigned i;
char *s = buf;
if (len <= num_colons) {
// In this situation it's not possible to ensure that the correct number
// of colons are included in the output.
return buf;
}
for (i = 0; i < num_colons; i++) {
char *colon = strchr(s, ':');
char *last_pos = &buf[len - 1] - num_colons + i;
if (colon == NULL || colon > last_pos) {
// set colon |i| at last possible position (buf[len-1] is the
// terminating 0). If we're setting this colon, then all whole of the
// rest of the string must be colons in order to have the correct
// number.
OPENSSL_memset(last_pos, ':', num_colons - i);
break;
}
s = colon + 1;
}
}
return buf;
}
// err_string_cmp is a compare function for searching error values with
// |bsearch| in |err_string_lookup|.
static int err_string_cmp(const void *a, const void *b) {
@ -530,7 +452,7 @@ static const char *const kLibraryNames[ERR_NUM_LIBS] = {
"User defined functions", // ERR_LIB_USER
};
const char *ERR_lib_error_string(uint32_t packed_error) {
static const char *err_lib_error_string(uint32_t packed_error) {
const uint32_t lib = ERR_GET_LIB(packed_error);
if (lib >= ERR_NUM_LIBS) {
@ -539,11 +461,16 @@ const char *ERR_lib_error_string(uint32_t packed_error) {
return kLibraryNames[lib];
}
const char *ERR_lib_error_string(uint32_t packed_error) {
const char *ret = err_lib_error_string(packed_error);
return ret == NULL ? "unknown library" : ret;
}
const char *ERR_func_error_string(uint32_t packed_error) {
return "OPENSSL_internal";
}
const char *ERR_reason_error_string(uint32_t packed_error) {
static const char *err_reason_error_string(uint32_t packed_error) {
const uint32_t lib = ERR_GET_LIB(packed_error);
const uint32_t reason = ERR_GET_REASON(packed_error);
@ -579,6 +506,86 @@ const char *ERR_reason_error_string(uint32_t packed_error) {
kOpenSSLReasonValuesLen, kOpenSSLReasonStringData);
}
const char *ERR_reason_error_string(uint32_t packed_error) {
const char *ret = err_reason_error_string(packed_error);
return ret == NULL ? "unknown error" : ret;
}
char *ERR_error_string(uint32_t packed_error, char *ret) {
static char buf[ERR_ERROR_STRING_BUF_LEN];
if (ret == NULL) {
// TODO(fork): remove this.
ret = buf;
}
#if !defined(NDEBUG)
// This is aimed to help catch callers who don't provide
// |ERR_ERROR_STRING_BUF_LEN| bytes of space.
OPENSSL_memset(ret, 0, ERR_ERROR_STRING_BUF_LEN);
#endif
return ERR_error_string_n(packed_error, ret, ERR_ERROR_STRING_BUF_LEN);
}
char *ERR_error_string_n(uint32_t packed_error, char *buf, size_t len) {
if (len == 0) {
return NULL;
}
unsigned lib = ERR_GET_LIB(packed_error);
unsigned reason = ERR_GET_REASON(packed_error);
const char *lib_str = err_lib_error_string(packed_error);
const char *reason_str = err_reason_error_string(packed_error);
char lib_buf[64], reason_buf[64];
if (lib_str == NULL) {
BIO_snprintf(lib_buf, sizeof(lib_buf), "lib(%u)", lib);
lib_str = lib_buf;
}
if (reason_str == NULL) {
BIO_snprintf(reason_buf, sizeof(reason_buf), "reason(%u)", reason);
reason_str = reason_buf;
}
BIO_snprintf(buf, len, "error:%08" PRIx32 ":%s:OPENSSL_internal:%s",
packed_error, lib_str, reason_str);
if (strlen(buf) == len - 1) {
// output may be truncated; make sure we always have 5 colon-separated
// fields, i.e. 4 colons.
static const unsigned num_colons = 4;
unsigned i;
char *s = buf;
if (len <= num_colons) {
// In this situation it's not possible to ensure that the correct number
// of colons are included in the output.
return buf;
}
for (i = 0; i < num_colons; i++) {
char *colon = strchr(s, ':');
char *last_pos = &buf[len - 1] - num_colons + i;
if (colon == NULL || colon > last_pos) {
// set colon |i| at last possible position (buf[len-1] is the
// terminating 0). If we're setting this colon, then all whole of the
// rest of the string must be colons in order to have the correct
// number.
OPENSSL_memset(last_pos, ':', num_colons - i);
break;
}
s = colon + 1;
}
}
return buf;
}
void ERR_print_errors_cb(ERR_print_errors_callback_t callback, void *ctx) {
char buf[ERR_ERROR_STRING_BUF_LEN];
char buf2[1024];

File diff suppressed because it is too large Load Diff

View File

@ -429,6 +429,15 @@ int EVP_PKEY_CTX_get_signature_md(EVP_PKEY_CTX *ctx, const EVP_MD **out_md) {
0, (void *)out_md);
}
void *EVP_PKEY_get0(const EVP_PKEY *pkey) {
// Node references, but never calls this function, so for now we return NULL.
// If other projects require complete support, call |EVP_PKEY_get0_RSA|, etc.,
// rather than reading |pkey->pkey.ptr| directly. This avoids problems if our
// internal representation does not match the type the caller expects from
// OpenSSL.
return NULL;
}
void OpenSSL_add_all_algorithms(void) {}
void OPENSSL_add_all_algorithms_conf(void) {}

View File

@ -285,8 +285,10 @@ int BN_div(BIGNUM *quotient, BIGNUM *rem, const BIGNUM *numerator,
// pointer to the 'top' of snum
wnump = &(snum->d[num_n - 1]);
// Setup to 'res'
res->neg = (numerator->neg ^ divisor->neg);
// Setup |res|. |numerator| and |res| may alias, so we save |numerator->neg|
// for later.
const int numerator_neg = numerator->neg;
res->neg = (numerator_neg ^ divisor->neg);
if (!bn_wexpand(res, loop + 1)) {
goto err;
}
@ -379,14 +381,11 @@ int BN_div(BIGNUM *quotient, BIGNUM *rem, const BIGNUM *numerator,
bn_set_minimal_width(snum);
if (rem != NULL) {
// Keep a copy of the neg flag in numerator because if |rem| == |numerator|
// |BN_rshift| will overwrite it.
int neg = numerator->neg;
if (!BN_rshift(rem, snum, norm_shift)) {
goto err;
}
if (!BN_is_zero(rem)) {
rem->neg = neg;
rem->neg = numerator_neg;
}
}

View File

@ -297,7 +297,7 @@ void bn_mul_comba4(BN_ULONG r[8], const BN_ULONG a[4], const BN_ULONG b[4]);
void bn_mul_comba8(BN_ULONG r[16], const BN_ULONG a[8], const BN_ULONG b[8]);
// bn_sqr_comba8 sets |r| to |a|^2.
void bn_sqr_comba8(BN_ULONG r[16], const BN_ULONG a[4]);
void bn_sqr_comba8(BN_ULONG r[16], const BN_ULONG a[8]);
// bn_sqr_comba4 sets |r| to |a|^2.
void bn_sqr_comba4(BN_ULONG r[8], const BN_ULONG a[4]);

View File

@ -115,10 +115,6 @@
#include "../../internal.h"
// The quick sieve algorithm approach to weeding out primes is Philip
// Zimmermann's, as implemented in PGP. I have had a read of his comments and
// implemented my own version.
// kPrimes contains the first 1024 primes.
static const uint16_t kPrimes[] = {
2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37,

View File

@ -141,10 +141,22 @@ typedef struct {
static int aes_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key,
const uint8_t *iv, int enc) {
int ret, mode;
int ret;
EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data;
const int mode = ctx->cipher->flags & EVP_CIPH_MODE_MASK;
if (mode == EVP_CIPH_CTR_MODE) {
switch (ctx->key_len) {
case 16:
boringssl_fips_inc_counter(fips_counter_evp_aes_128_ctr);
break;
case 32:
boringssl_fips_inc_counter(fips_counter_evp_aes_256_ctr);
break;
}
}
mode = ctx->cipher->flags & EVP_CIPH_MODE_MASK;
if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE) && !enc) {
if (hwaes_capable()) {
ret = aes_hw_set_decrypt_key(key, ctx->key_len * 8, &dat->ks.ks);
@ -353,6 +365,17 @@ static int aes_gcm_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key,
if (!iv && !key) {
return 1;
}
switch (ctx->key_len) {
case 16:
boringssl_fips_inc_counter(fips_counter_evp_aes_128_gcm);
break;
case 32:
boringssl_fips_inc_counter(fips_counter_evp_aes_256_gcm);
break;
}
if (key) {
OPENSSL_memset(&gctx->gcm, 0, sizeof(gctx->gcm));
gctx->ctr = aes_ctr_set_key(&gctx->ks.ks, &gctx->gcm.gcm_key, NULL, key,

View File

@ -68,6 +68,8 @@
int EVP_MD_type(const EVP_MD *md) { return md->type; }
int EVP_MD_nid(const EVP_MD *md) { return EVP_MD_type(md); }
uint32_t EVP_MD_flags(const EVP_MD *md) { return md->flags; }
size_t EVP_MD_size(const EVP_MD *md) { return md->md_size; }
@ -177,6 +179,13 @@ int EVP_MD_CTX_copy_ex(EVP_MD_CTX *out, const EVP_MD_CTX *in) {
return 1;
}
void EVP_MD_CTX_move(EVP_MD_CTX *out, EVP_MD_CTX *in) {
EVP_MD_CTX_cleanup(out);
// While not guaranteed, |EVP_MD_CTX| is currently safe to move with |memcpy|.
OPENSSL_memcpy(out, in, sizeof(EVP_MD_CTX));
EVP_MD_CTX_init(in);
}
int EVP_MD_CTX_copy(EVP_MD_CTX *out, const EVP_MD_CTX *in) {
EVP_MD_CTX_init(out);
return EVP_MD_CTX_copy_ex(out, in);

View File

@ -247,13 +247,21 @@ static void sha512_256_init(EVP_MD_CTX *ctx) {
CHECK(SHA512_256_Init(ctx->md_data));
}
static void sha512_256_update(EVP_MD_CTX *ctx, const void *data, size_t count) {
CHECK(SHA512_256_Update(ctx->md_data, data, count));
}
static void sha512_256_final(EVP_MD_CTX *ctx, uint8_t *md) {
CHECK(SHA512_256_Final(md, ctx->md_data));
}
DEFINE_METHOD_FUNCTION(EVP_MD, EVP_sha512_256) {
out->type = NID_sha512_256;
out->md_size = SHA512_256_DIGEST_LENGTH;
out->flags = 0;
out->init = sha512_256_init;
out->update = sha512_update;
out->final = sha512_final;
out->update = sha512_256_update;
out->final = sha512_256_final;
out->block_size = 128;
out->ctx_size = sizeof(SHA512_CTX);
}

View File

@ -46,6 +46,9 @@
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ==================================================================== */
#ifndef OPENSSL_HEADER_DIGEST_MD32_COMMON_H
#define OPENSSL_HEADER_DIGEST_MD32_COMMON_H
#include <CCryptoBoringSSL_base.h>
#include <assert.h>
@ -59,22 +62,15 @@ extern "C" {
// This is a generic 32-bit "collector" for message digest algorithms. It
// collects input character stream into chunks of 32-bit values and invokes the
// block function that performs the actual hash calculations. To make use of
// this mechanism, the following macros must be defined before including
// md32_common.h.
// block function that performs the actual hash calculations.
//
// One of |DATA_ORDER_IS_BIG_ENDIAN| or |DATA_ORDER_IS_LITTLE_ENDIAN| must be
// defined to specify the byte order of the input stream.
//
// |HASH_CBLOCK| must be defined as the integer block size, in bytes.
//
// |HASH_CTX| must be defined as the name of the context structure, which must
// have at least the following members:
// To make use of this mechanism, the hash context should be defined with the
// following parameters.
//
// typedef struct <name>_state_st {
// uint32_t h[<chaining length> / sizeof(uint32_t)];
// uint32_t Nl, Nh;
// uint8_t data[HASH_CBLOCK];
// uint8_t data[<block size>];
// unsigned num;
// ...
// } <NAME>_CTX;
@ -83,186 +79,117 @@ extern "C" {
// any truncation (e.g. 64 for SHA-224 and SHA-256, 128 for SHA-384 and
// SHA-512).
//
// |HASH_UPDATE| must be defined as the name of the "Update" function to
// generate.
//
// |HASH_TRANSFORM| must be defined as the the name of the "Transform"
// function to generate.
//
// |HASH_FINAL| must be defined as the name of "Final" function to generate.
//
// |HASH_BLOCK_DATA_ORDER| must be defined as the name of the "Block" function.
// That function must be implemented manually. It must be capable of operating
// on *unaligned* input data in its original (data) byte order. It must have
// this signature:
//
// void HASH_BLOCK_DATA_ORDER(uint32_t *state, const uint8_t *data,
// size_t num);
//
// It must update the hash state |state| with |num| blocks of data from |data|,
// where each block is |HASH_CBLOCK| bytes; i.e. |data| points to a array of
// |HASH_CBLOCK * num| bytes. |state| points to the |h| member of a |HASH_CTX|,
// and so will have |<chaining length> / sizeof(uint32_t)| elements.
//
// |HASH_MAKE_STRING(c, s)| must be defined as a block statement that converts
// the hash state |c->h| into the output byte order, storing the result in |s|.
// |h| is the hash state and is updated by a function of type
// |crypto_md32_block_func|. |data| is the partial unprocessed block and has
// |num| bytes. |Nl| and |Nh| maintain the number of bits processed so far.
#if !defined(DATA_ORDER_IS_BIG_ENDIAN) && !defined(DATA_ORDER_IS_LITTLE_ENDIAN)
#error "DATA_ORDER must be defined!"
#endif
#ifndef HASH_CBLOCK
#error "HASH_CBLOCK must be defined!"
#endif
#ifndef HASH_CTX
#error "HASH_CTX must be defined!"
#endif
#ifndef HASH_UPDATE
#error "HASH_UPDATE must be defined!"
#endif
#ifndef HASH_TRANSFORM
#error "HASH_TRANSFORM must be defined!"
#endif
#ifndef HASH_FINAL
#error "HASH_FINAL must be defined!"
#endif
#ifndef HASH_BLOCK_DATA_ORDER
#error "HASH_BLOCK_DATA_ORDER must be defined!"
#endif
#ifndef HASH_MAKE_STRING
#error "HASH_MAKE_STRING must be defined!"
#endif
#if defined(DATA_ORDER_IS_BIG_ENDIAN)
#define HOST_c2l(c, l) \
do { \
(l) = (((uint32_t)(*((c)++))) << 24); \
(l) |= (((uint32_t)(*((c)++))) << 16); \
(l) |= (((uint32_t)(*((c)++))) << 8); \
(l) |= (((uint32_t)(*((c)++)))); \
} while (0)
#define HOST_l2c(l, c) \
do { \
*((c)++) = (uint8_t)(((l) >> 24) & 0xff); \
*((c)++) = (uint8_t)(((l) >> 16) & 0xff); \
*((c)++) = (uint8_t)(((l) >> 8) & 0xff); \
*((c)++) = (uint8_t)(((l)) & 0xff); \
} while (0)
#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
#define HOST_c2l(c, l) \
do { \
(l) = (((uint32_t)(*((c)++)))); \
(l) |= (((uint32_t)(*((c)++))) << 8); \
(l) |= (((uint32_t)(*((c)++))) << 16); \
(l) |= (((uint32_t)(*((c)++))) << 24); \
} while (0)
#define HOST_l2c(l, c) \
do { \
*((c)++) = (uint8_t)(((l)) & 0xff); \
*((c)++) = (uint8_t)(((l) >> 8) & 0xff); \
*((c)++) = (uint8_t)(((l) >> 16) & 0xff); \
*((c)++) = (uint8_t)(((l) >> 24) & 0xff); \
} while (0)
#endif // DATA_ORDER
int HASH_UPDATE(HASH_CTX *c, const void *data_, size_t len) {
const uint8_t *data = data_;
// A crypto_md32_block_func should incorporate |num_blocks| of input from |data|
// into |state|. It is assumed the caller has sized |state| and |data| for the
// hash function.
typedef void (*crypto_md32_block_func)(uint32_t *state, const uint8_t *data,
size_t num_blocks);
// crypto_md32_update adds |len| bytes from |in| to the digest. |data| must be a
// buffer of length |block_size| with the first |*num| bytes containing a
// partial block. This function combines the partial block with |in| and
// incorporates any complete blocks into the digest state |h|. It then updates
// |data| and |*num| with the new partial block and updates |*Nh| and |*Nl| with
// the data consumed.
static inline void crypto_md32_update(crypto_md32_block_func block_func,
uint32_t *h, uint8_t *data,
size_t block_size, unsigned *num,
uint32_t *Nh, uint32_t *Nl,
const uint8_t *in, size_t len) {
if (len == 0) {
return 1;
return;
}
uint32_t l = c->Nl + (((uint32_t)len) << 3);
if (l < c->Nl) {
uint32_t l = *Nl + (((uint32_t)len) << 3);
if (l < *Nl) {
// Handle carries.
c->Nh++;
(*Nh)++;
}
c->Nh += (uint32_t)(len >> 29);
c->Nl = l;
*Nh += (uint32_t)(len >> 29);
*Nl = l;
size_t n = c->num;
size_t n = *num;
if (n != 0) {
if (len >= HASH_CBLOCK || len + n >= HASH_CBLOCK) {
OPENSSL_memcpy(c->data + n, data, HASH_CBLOCK - n);
HASH_BLOCK_DATA_ORDER(c->h, c->data, 1);
n = HASH_CBLOCK - n;
data += n;
if (len >= block_size || len + n >= block_size) {
OPENSSL_memcpy(data + n, in, block_size - n);
block_func(h, data, 1);
n = block_size - n;
in += n;
len -= n;
c->num = 0;
// Keep |c->data| zeroed when unused.
OPENSSL_memset(c->data, 0, HASH_CBLOCK);
*num = 0;
// Keep |data| zeroed when unused.
OPENSSL_memset(data, 0, block_size);
} else {
OPENSSL_memcpy(c->data + n, data, len);
c->num += (unsigned)len;
return 1;
OPENSSL_memcpy(data + n, in, len);
*num += (unsigned)len;
return;
}
}
n = len / HASH_CBLOCK;
n = len / block_size;
if (n > 0) {
HASH_BLOCK_DATA_ORDER(c->h, data, n);
n *= HASH_CBLOCK;
data += n;
block_func(h, in, n);
n *= block_size;
in += n;
len -= n;
}
if (len != 0) {
c->num = (unsigned)len;
OPENSSL_memcpy(c->data, data, len);
*num = (unsigned)len;
OPENSSL_memcpy(data, in, len);
}
return 1;
}
void HASH_TRANSFORM(HASH_CTX *c, const uint8_t data[HASH_CBLOCK]) {
HASH_BLOCK_DATA_ORDER(c->h, data, 1);
}
int HASH_FINAL(uint8_t out[HASH_DIGEST_LENGTH], HASH_CTX *c) {
// |c->data| always has room for at least one byte. A full block would have
// crypto_md32_final incorporates the partial block and trailing length into the
// digest state |h|. The trailing length is encoded in little-endian if
// |is_big_endian| is zero and big-endian otherwise. |data| must be a buffer of
// length |block_size| with the first |*num| bytes containing a partial block.
// |Nh| and |Nl| contain the total number of bits processed. On return, this
// function clears the partial block in |data| and
// |*num|.
//
// This function does not serialize |h| into a final digest. This is the
// responsibility of the caller.
static inline void crypto_md32_final(crypto_md32_block_func block_func,
uint32_t *h, uint8_t *data,
size_t block_size, unsigned *num,
uint32_t Nh, uint32_t Nl,
int is_big_endian) {
// |data| always has room for at least one byte. A full block would have
// been consumed.
size_t n = c->num;
assert(n < HASH_CBLOCK);
c->data[n] = 0x80;
size_t n = *num;
assert(n < block_size);
data[n] = 0x80;
n++;
// Fill the block with zeros if there isn't room for a 64-bit length.
if (n > (HASH_CBLOCK - 8)) {
OPENSSL_memset(c->data + n, 0, HASH_CBLOCK - n);
if (n > block_size - 8) {
OPENSSL_memset(data + n, 0, block_size - n);
n = 0;
HASH_BLOCK_DATA_ORDER(c->h, c->data, 1);
block_func(h, data, 1);
}
OPENSSL_memset(c->data + n, 0, HASH_CBLOCK - 8 - n);
OPENSSL_memset(data + n, 0, block_size - 8 - n);
// Append a 64-bit length to the block and process it.
uint8_t *p = c->data + HASH_CBLOCK - 8;
#if defined(DATA_ORDER_IS_BIG_ENDIAN)
HOST_l2c(c->Nh, p);
HOST_l2c(c->Nl, p);
#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
HOST_l2c(c->Nl, p);
HOST_l2c(c->Nh, p);
#endif
assert(p == c->data + HASH_CBLOCK);
HASH_BLOCK_DATA_ORDER(c->h, c->data, 1);
c->num = 0;
OPENSSL_memset(c->data, 0, HASH_CBLOCK);
HASH_MAKE_STRING(c, out);
return 1;
if (is_big_endian) {
CRYPTO_store_u32_be(data + block_size - 8, Nh);
CRYPTO_store_u32_be(data + block_size - 4, Nl);
} else {
CRYPTO_store_u32_le(data + block_size - 8, Nl);
CRYPTO_store_u32_le(data + block_size - 4, Nh);
}
block_func(h, data, 1);
*num = 0;
OPENSSL_memset(data, 0, block_size);
}
#if defined(__cplusplus)
} // extern C
#endif
#endif // OPENSSL_HEADER_DIGEST_MD32_COMMON_H

View File

@ -16,6 +16,7 @@
#endif
#include <CCryptoBoringSSL_arm_arch.h>
#if __ARM_MAX_ARCH__>=7
.text
.code 32
@ -70,8 +71,7 @@ _gcm_init_v8:
vext.8 q9,q14,q14,#8 @ Karatsuba pre-processing
veor q9,q9,q14
vext.8 q13,q8,q9,#8 @ pack Karatsuba pre-processed
vst1.64 {q13,q14},[r0] @ store Htable[1..2]
vst1.64 {q13,q14},[r0]! @ store Htable[1..2]
bx lr
.globl _gcm_gmult_v8
@ -258,6 +258,7 @@ Ldone_v8:
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif
#endif // !OPENSSL_NO_ASM
#endif // defined(__arm__) && defined(__APPLE__)
#if defined(__linux__) && defined(__ELF__)

View File

@ -17,6 +17,7 @@
#endif
#include <CCryptoBoringSSL_arm_arch.h>
#if __ARM_MAX_ARCH__>=7
.text
.fpu neon
.code 32
@ -69,8 +70,7 @@ gcm_init_v8:
vext.8 q9,q14,q14,#8 @ Karatsuba pre-processing
veor q9,q9,q14
vext.8 q13,q8,q9,#8 @ pack Karatsuba pre-processed
vst1.64 {q13,q14},[r0] @ store Htable[1..2]
vst1.64 {q13,q14},[r0]! @ store Htable[1..2]
bx lr
.size gcm_init_v8,.-gcm_init_v8
.globl gcm_gmult_v8
@ -254,6 +254,7 @@ gcm_ghash_v8:
.align 2
.align 2
#endif
#endif
#endif // !OPENSSL_NO_ASM
.section .note.GNU-stack,"",%progbits
#endif // defined(__arm__) && defined(__linux__)

View File

@ -16,6 +16,7 @@
#endif
#include <CCryptoBoringSSL_arm_arch.h>
#if __ARM_MAX_ARCH__>=7
.text
.globl _gcm_init_v8
@ -66,8 +67,48 @@ _gcm_init_v8:
ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing
eor v17.16b,v17.16b,v22.16b
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v21.2d,v22.2d},[x0] //store Htable[1..2]
st1 {v21.2d,v22.2d},[x0],#32 //store Htable[1..2]
//calculate H^3 and H^4
pmull v0.1q,v20.1d, v22.1d
pmull v5.1q,v22.1d,v22.1d
pmull2 v2.1q,v20.2d, v22.2d
pmull2 v7.1q,v22.2d,v22.2d
pmull v1.1q,v16.1d,v17.1d
pmull v6.1q,v17.1d,v17.1d
ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
ext v17.16b,v5.16b,v7.16b,#8
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v16.16b
eor v4.16b,v5.16b,v7.16b
eor v6.16b,v6.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
eor v6.16b,v6.16b,v4.16b
pmull v4.1q,v5.1d,v19.1d
ins v2.d[0],v1.d[1]
ins v7.d[0],v6.d[1]
ins v1.d[1],v0.d[0]
ins v6.d[1],v5.d[0]
eor v0.16b,v1.16b,v18.16b
eor v5.16b,v6.16b,v4.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
ext v4.16b,v5.16b,v5.16b,#8
pmull v0.1q,v0.1d,v19.1d
pmull v5.1q,v5.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v4.16b,v4.16b,v7.16b
eor v20.16b, v0.16b,v18.16b //H^3
eor v22.16b,v5.16b,v4.16b //H^4
ext v16.16b,v20.16b, v20.16b,#8 //Karatsuba pre-processing
ext v17.16b,v22.16b,v22.16b,#8
eor v16.16b,v16.16b,v20.16b
eor v17.16b,v17.16b,v22.16b
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v20.2d,v21.2d,v22.2d},[x0] //store Htable[3..5]
ret
.globl _gcm_gmult_v8
@ -119,6 +160,8 @@ _gcm_gmult_v8:
.align 4
_gcm_ghash_v8:
AARCH64_VALID_CALL_TARGET
cmp x3,#64
b.hs Lgcm_ghash_v8_4x
ld1 {v0.2d},[x0] //load [rotated] Xi
//"[rotated]" means that
//loaded value would have
@ -245,9 +288,290 @@ Ldone_v8:
ret
.align 4
gcm_ghash_v8_4x:
Lgcm_ghash_v8_4x:
ld1 {v0.2d},[x0] //load [rotated] Xi
ld1 {v20.2d,v21.2d,v22.2d},[x1],#48 //load twisted H, ..., H^2
movi v19.16b,#0xe1
ld1 {v26.2d,v27.2d,v28.2d},[x1] //load twisted H^3, ..., H^4
shl v19.2d,v19.2d,#57 //compose 0xc2.0 constant
ld1 {v4.2d,v5.2d,v6.2d,v7.2d},[x2],#64
#ifndef __ARMEB__
rev64 v0.16b,v0.16b
rev64 v5.16b,v5.16b
rev64 v6.16b,v6.16b
rev64 v7.16b,v7.16b
rev64 v4.16b,v4.16b
#endif
ext v25.16b,v7.16b,v7.16b,#8
ext v24.16b,v6.16b,v6.16b,#8
ext v23.16b,v5.16b,v5.16b,#8
pmull v29.1q,v20.1d,v25.1d //H·Ii+3
eor v7.16b,v7.16b,v25.16b
pmull2 v31.1q,v20.2d,v25.2d
pmull v30.1q,v21.1d,v7.1d
pmull v16.1q,v22.1d,v24.1d //H^2·Ii+2
eor v6.16b,v6.16b,v24.16b
pmull2 v24.1q,v22.2d,v24.2d
pmull2 v6.1q,v21.2d,v6.2d
eor v29.16b,v29.16b,v16.16b
eor v31.16b,v31.16b,v24.16b
eor v30.16b,v30.16b,v6.16b
pmull v7.1q,v26.1d,v23.1d //H^3·Ii+1
eor v5.16b,v5.16b,v23.16b
pmull2 v23.1q,v26.2d,v23.2d
pmull v5.1q,v27.1d,v5.1d
eor v29.16b,v29.16b,v7.16b
eor v31.16b,v31.16b,v23.16b
eor v30.16b,v30.16b,v5.16b
subs x3,x3,#128
b.lo Ltail4x
b Loop4x
.align 4
Loop4x:
eor v16.16b,v4.16b,v0.16b
ld1 {v4.2d,v5.2d,v6.2d,v7.2d},[x2],#64
ext v3.16b,v16.16b,v16.16b,#8
#ifndef __ARMEB__
rev64 v5.16b,v5.16b
rev64 v6.16b,v6.16b
rev64 v7.16b,v7.16b
rev64 v4.16b,v4.16b
#endif
pmull v0.1q,v28.1d,v3.1d //H^4·(Xi+Ii)
eor v16.16b,v16.16b,v3.16b
pmull2 v2.1q,v28.2d,v3.2d
ext v25.16b,v7.16b,v7.16b,#8
pmull2 v1.1q,v27.2d,v16.2d
eor v0.16b,v0.16b,v29.16b
eor v2.16b,v2.16b,v31.16b
ext v24.16b,v6.16b,v6.16b,#8
eor v1.16b,v1.16b,v30.16b
ext v23.16b,v5.16b,v5.16b,#8
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
pmull v29.1q,v20.1d,v25.1d //H·Ii+3
eor v7.16b,v7.16b,v25.16b
eor v1.16b,v1.16b,v17.16b
pmull2 v31.1q,v20.2d,v25.2d
eor v1.16b,v1.16b,v18.16b
pmull v30.1q,v21.1d,v7.1d
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
pmull v16.1q,v22.1d,v24.1d //H^2·Ii+2
eor v6.16b,v6.16b,v24.16b
pmull2 v24.1q,v22.2d,v24.2d
eor v0.16b,v1.16b,v18.16b
pmull2 v6.1q,v21.2d,v6.2d
eor v29.16b,v29.16b,v16.16b
eor v31.16b,v31.16b,v24.16b
eor v30.16b,v30.16b,v6.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
pmull v7.1q,v26.1d,v23.1d //H^3·Ii+1
eor v5.16b,v5.16b,v23.16b
eor v18.16b,v18.16b,v2.16b
pmull2 v23.1q,v26.2d,v23.2d
pmull v5.1q,v27.1d,v5.1d
eor v0.16b,v0.16b,v18.16b
eor v29.16b,v29.16b,v7.16b
eor v31.16b,v31.16b,v23.16b
ext v0.16b,v0.16b,v0.16b,#8
eor v30.16b,v30.16b,v5.16b
subs x3,x3,#64
b.hs Loop4x
Ltail4x:
eor v16.16b,v4.16b,v0.16b
ext v3.16b,v16.16b,v16.16b,#8
pmull v0.1q,v28.1d,v3.1d //H^4·(Xi+Ii)
eor v16.16b,v16.16b,v3.16b
pmull2 v2.1q,v28.2d,v3.2d
pmull2 v1.1q,v27.2d,v16.2d
eor v0.16b,v0.16b,v29.16b
eor v2.16b,v2.16b,v31.16b
eor v1.16b,v1.16b,v30.16b
adds x3,x3,#64
b.eq Ldone4x
cmp x3,#32
b.lo Lone
b.eq Ltwo
Lthree:
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
ld1 {v4.2d,v5.2d,v6.2d},[x2]
eor v1.16b,v1.16b,v18.16b
#ifndef __ARMEB__
rev64 v5.16b,v5.16b
rev64 v6.16b,v6.16b
rev64 v4.16b,v4.16b
#endif
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
ext v24.16b,v6.16b,v6.16b,#8
ext v23.16b,v5.16b,v5.16b,#8
eor v0.16b,v1.16b,v18.16b
pmull v29.1q,v20.1d,v24.1d //H·Ii+2
eor v6.16b,v6.16b,v24.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
pmull2 v31.1q,v20.2d,v24.2d
pmull v30.1q,v21.1d,v6.1d
eor v0.16b,v0.16b,v18.16b
pmull v7.1q,v22.1d,v23.1d //H^2·Ii+1
eor v5.16b,v5.16b,v23.16b
ext v0.16b,v0.16b,v0.16b,#8
pmull2 v23.1q,v22.2d,v23.2d
eor v16.16b,v4.16b,v0.16b
pmull2 v5.1q,v21.2d,v5.2d
ext v3.16b,v16.16b,v16.16b,#8
eor v29.16b,v29.16b,v7.16b
eor v31.16b,v31.16b,v23.16b
eor v30.16b,v30.16b,v5.16b
pmull v0.1q,v26.1d,v3.1d //H^3·(Xi+Ii)
eor v16.16b,v16.16b,v3.16b
pmull2 v2.1q,v26.2d,v3.2d
pmull v1.1q,v27.1d,v16.1d
eor v0.16b,v0.16b,v29.16b
eor v2.16b,v2.16b,v31.16b
eor v1.16b,v1.16b,v30.16b
b Ldone4x
.align 4
Ltwo:
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
ld1 {v4.2d,v5.2d},[x2]
eor v1.16b,v1.16b,v18.16b
#ifndef __ARMEB__
rev64 v5.16b,v5.16b
rev64 v4.16b,v4.16b
#endif
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
ext v23.16b,v5.16b,v5.16b,#8
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
ext v0.16b,v0.16b,v0.16b,#8
pmull v29.1q,v20.1d,v23.1d //H·Ii+1
eor v5.16b,v5.16b,v23.16b
eor v16.16b,v4.16b,v0.16b
ext v3.16b,v16.16b,v16.16b,#8
pmull2 v31.1q,v20.2d,v23.2d
pmull v30.1q,v21.1d,v5.1d
pmull v0.1q,v22.1d,v3.1d //H^2·(Xi+Ii)
eor v16.16b,v16.16b,v3.16b
pmull2 v2.1q,v22.2d,v3.2d
pmull2 v1.1q,v21.2d,v16.2d
eor v0.16b,v0.16b,v29.16b
eor v2.16b,v2.16b,v31.16b
eor v1.16b,v1.16b,v30.16b
b Ldone4x
.align 4
Lone:
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
ld1 {v4.2d},[x2]
eor v1.16b,v1.16b,v18.16b
#ifndef __ARMEB__
rev64 v4.16b,v4.16b
#endif
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
ext v0.16b,v0.16b,v0.16b,#8
eor v16.16b,v4.16b,v0.16b
ext v3.16b,v16.16b,v16.16b,#8
pmull v0.1q,v20.1d,v3.1d
eor v16.16b,v16.16b,v3.16b
pmull2 v2.1q,v20.2d,v3.2d
pmull v1.1q,v21.1d,v16.1d
Ldone4x:
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
ext v0.16b,v0.16b,v0.16b,#8
#ifndef __ARMEB__
rev64 v0.16b,v0.16b
#endif
st1 {v0.2d},[x0] //write out Xi
ret
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif
#endif // !OPENSSL_NO_ASM
#endif // defined(__aarch64__) && defined(__APPLE__)
#if defined(__linux__) && defined(__ELF__)

View File

@ -17,6 +17,7 @@
#endif
#include <CCryptoBoringSSL_arm_arch.h>
#if __ARM_MAX_ARCH__>=7
.text
.arch armv8-a+crypto
.globl gcm_init_v8
@ -67,8 +68,48 @@ gcm_init_v8:
ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing
eor v17.16b,v17.16b,v22.16b
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v21.2d,v22.2d},[x0] //store Htable[1..2]
st1 {v21.2d,v22.2d},[x0],#32 //store Htable[1..2]
//calculate H^3 and H^4
pmull v0.1q,v20.1d, v22.1d
pmull v5.1q,v22.1d,v22.1d
pmull2 v2.1q,v20.2d, v22.2d
pmull2 v7.1q,v22.2d,v22.2d
pmull v1.1q,v16.1d,v17.1d
pmull v6.1q,v17.1d,v17.1d
ext v16.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
ext v17.16b,v5.16b,v7.16b,#8
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v16.16b
eor v4.16b,v5.16b,v7.16b
eor v6.16b,v6.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase
eor v6.16b,v6.16b,v4.16b
pmull v4.1q,v5.1d,v19.1d
ins v2.d[0],v1.d[1]
ins v7.d[0],v6.d[1]
ins v1.d[1],v0.d[0]
ins v6.d[1],v5.d[0]
eor v0.16b,v1.16b,v18.16b
eor v5.16b,v6.16b,v4.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
ext v4.16b,v5.16b,v5.16b,#8
pmull v0.1q,v0.1d,v19.1d
pmull v5.1q,v5.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v4.16b,v4.16b,v7.16b
eor v20.16b, v0.16b,v18.16b //H^3
eor v22.16b,v5.16b,v4.16b //H^4
ext v16.16b,v20.16b, v20.16b,#8 //Karatsuba pre-processing
ext v17.16b,v22.16b,v22.16b,#8
eor v16.16b,v16.16b,v20.16b
eor v17.16b,v17.16b,v22.16b
ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
st1 {v20.2d,v21.2d,v22.2d},[x0] //store Htable[3..5]
ret
.size gcm_init_v8,.-gcm_init_v8
.globl gcm_gmult_v8
@ -120,6 +161,8 @@ gcm_gmult_v8:
.align 4
gcm_ghash_v8:
AARCH64_VALID_CALL_TARGET
cmp x3,#64
b.hs .Lgcm_ghash_v8_4x
ld1 {v0.2d},[x0] //load [rotated] Xi
//"[rotated]" means that
//loaded value would have
@ -246,10 +289,291 @@ gcm_ghash_v8:
ret
.size gcm_ghash_v8,.-gcm_ghash_v8
.type gcm_ghash_v8_4x,%function
.align 4
gcm_ghash_v8_4x:
.Lgcm_ghash_v8_4x:
ld1 {v0.2d},[x0] //load [rotated] Xi
ld1 {v20.2d,v21.2d,v22.2d},[x1],#48 //load twisted H, ..., H^2
movi v19.16b,#0xe1
ld1 {v26.2d,v27.2d,v28.2d},[x1] //load twisted H^3, ..., H^4
shl v19.2d,v19.2d,#57 //compose 0xc2.0 constant
ld1 {v4.2d,v5.2d,v6.2d,v7.2d},[x2],#64
#ifndef __ARMEB__
rev64 v0.16b,v0.16b
rev64 v5.16b,v5.16b
rev64 v6.16b,v6.16b
rev64 v7.16b,v7.16b
rev64 v4.16b,v4.16b
#endif
ext v25.16b,v7.16b,v7.16b,#8
ext v24.16b,v6.16b,v6.16b,#8
ext v23.16b,v5.16b,v5.16b,#8
pmull v29.1q,v20.1d,v25.1d //H·Ii+3
eor v7.16b,v7.16b,v25.16b
pmull2 v31.1q,v20.2d,v25.2d
pmull v30.1q,v21.1d,v7.1d
pmull v16.1q,v22.1d,v24.1d //H^2·Ii+2
eor v6.16b,v6.16b,v24.16b
pmull2 v24.1q,v22.2d,v24.2d
pmull2 v6.1q,v21.2d,v6.2d
eor v29.16b,v29.16b,v16.16b
eor v31.16b,v31.16b,v24.16b
eor v30.16b,v30.16b,v6.16b
pmull v7.1q,v26.1d,v23.1d //H^3·Ii+1
eor v5.16b,v5.16b,v23.16b
pmull2 v23.1q,v26.2d,v23.2d
pmull v5.1q,v27.1d,v5.1d
eor v29.16b,v29.16b,v7.16b
eor v31.16b,v31.16b,v23.16b
eor v30.16b,v30.16b,v5.16b
subs x3,x3,#128
b.lo .Ltail4x
b .Loop4x
.align 4
.Loop4x:
eor v16.16b,v4.16b,v0.16b
ld1 {v4.2d,v5.2d,v6.2d,v7.2d},[x2],#64
ext v3.16b,v16.16b,v16.16b,#8
#ifndef __ARMEB__
rev64 v5.16b,v5.16b
rev64 v6.16b,v6.16b
rev64 v7.16b,v7.16b
rev64 v4.16b,v4.16b
#endif
pmull v0.1q,v28.1d,v3.1d //H^4·(Xi+Ii)
eor v16.16b,v16.16b,v3.16b
pmull2 v2.1q,v28.2d,v3.2d
ext v25.16b,v7.16b,v7.16b,#8
pmull2 v1.1q,v27.2d,v16.2d
eor v0.16b,v0.16b,v29.16b
eor v2.16b,v2.16b,v31.16b
ext v24.16b,v6.16b,v6.16b,#8
eor v1.16b,v1.16b,v30.16b
ext v23.16b,v5.16b,v5.16b,#8
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
pmull v29.1q,v20.1d,v25.1d //H·Ii+3
eor v7.16b,v7.16b,v25.16b
eor v1.16b,v1.16b,v17.16b
pmull2 v31.1q,v20.2d,v25.2d
eor v1.16b,v1.16b,v18.16b
pmull v30.1q,v21.1d,v7.1d
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
pmull v16.1q,v22.1d,v24.1d //H^2·Ii+2
eor v6.16b,v6.16b,v24.16b
pmull2 v24.1q,v22.2d,v24.2d
eor v0.16b,v1.16b,v18.16b
pmull2 v6.1q,v21.2d,v6.2d
eor v29.16b,v29.16b,v16.16b
eor v31.16b,v31.16b,v24.16b
eor v30.16b,v30.16b,v6.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
pmull v7.1q,v26.1d,v23.1d //H^3·Ii+1
eor v5.16b,v5.16b,v23.16b
eor v18.16b,v18.16b,v2.16b
pmull2 v23.1q,v26.2d,v23.2d
pmull v5.1q,v27.1d,v5.1d
eor v0.16b,v0.16b,v18.16b
eor v29.16b,v29.16b,v7.16b
eor v31.16b,v31.16b,v23.16b
ext v0.16b,v0.16b,v0.16b,#8
eor v30.16b,v30.16b,v5.16b
subs x3,x3,#64
b.hs .Loop4x
.Ltail4x:
eor v16.16b,v4.16b,v0.16b
ext v3.16b,v16.16b,v16.16b,#8
pmull v0.1q,v28.1d,v3.1d //H^4·(Xi+Ii)
eor v16.16b,v16.16b,v3.16b
pmull2 v2.1q,v28.2d,v3.2d
pmull2 v1.1q,v27.2d,v16.2d
eor v0.16b,v0.16b,v29.16b
eor v2.16b,v2.16b,v31.16b
eor v1.16b,v1.16b,v30.16b
adds x3,x3,#64
b.eq .Ldone4x
cmp x3,#32
b.lo .Lone
b.eq .Ltwo
.Lthree:
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
ld1 {v4.2d,v5.2d,v6.2d},[x2]
eor v1.16b,v1.16b,v18.16b
#ifndef __ARMEB__
rev64 v5.16b,v5.16b
rev64 v6.16b,v6.16b
rev64 v4.16b,v4.16b
#endif
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
ext v24.16b,v6.16b,v6.16b,#8
ext v23.16b,v5.16b,v5.16b,#8
eor v0.16b,v1.16b,v18.16b
pmull v29.1q,v20.1d,v24.1d //H·Ii+2
eor v6.16b,v6.16b,v24.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
pmull2 v31.1q,v20.2d,v24.2d
pmull v30.1q,v21.1d,v6.1d
eor v0.16b,v0.16b,v18.16b
pmull v7.1q,v22.1d,v23.1d //H^2·Ii+1
eor v5.16b,v5.16b,v23.16b
ext v0.16b,v0.16b,v0.16b,#8
pmull2 v23.1q,v22.2d,v23.2d
eor v16.16b,v4.16b,v0.16b
pmull2 v5.1q,v21.2d,v5.2d
ext v3.16b,v16.16b,v16.16b,#8
eor v29.16b,v29.16b,v7.16b
eor v31.16b,v31.16b,v23.16b
eor v30.16b,v30.16b,v5.16b
pmull v0.1q,v26.1d,v3.1d //H^3·(Xi+Ii)
eor v16.16b,v16.16b,v3.16b
pmull2 v2.1q,v26.2d,v3.2d
pmull v1.1q,v27.1d,v16.1d
eor v0.16b,v0.16b,v29.16b
eor v2.16b,v2.16b,v31.16b
eor v1.16b,v1.16b,v30.16b
b .Ldone4x
.align 4
.Ltwo:
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
ld1 {v4.2d,v5.2d},[x2]
eor v1.16b,v1.16b,v18.16b
#ifndef __ARMEB__
rev64 v5.16b,v5.16b
rev64 v4.16b,v4.16b
#endif
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
ext v23.16b,v5.16b,v5.16b,#8
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
ext v0.16b,v0.16b,v0.16b,#8
pmull v29.1q,v20.1d,v23.1d //H·Ii+1
eor v5.16b,v5.16b,v23.16b
eor v16.16b,v4.16b,v0.16b
ext v3.16b,v16.16b,v16.16b,#8
pmull2 v31.1q,v20.2d,v23.2d
pmull v30.1q,v21.1d,v5.1d
pmull v0.1q,v22.1d,v3.1d //H^2·(Xi+Ii)
eor v16.16b,v16.16b,v3.16b
pmull2 v2.1q,v22.2d,v3.2d
pmull2 v1.1q,v21.2d,v16.2d
eor v0.16b,v0.16b,v29.16b
eor v2.16b,v2.16b,v31.16b
eor v1.16b,v1.16b,v30.16b
b .Ldone4x
.align 4
.Lone:
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
ld1 {v4.2d},[x2]
eor v1.16b,v1.16b,v18.16b
#ifndef __ARMEB__
rev64 v4.16b,v4.16b
#endif
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
ext v0.16b,v0.16b,v0.16b,#8
eor v16.16b,v4.16b,v0.16b
ext v3.16b,v16.16b,v16.16b,#8
pmull v0.1q,v20.1d,v3.1d
eor v16.16b,v16.16b,v3.16b
pmull2 v2.1q,v20.2d,v3.2d
pmull v1.1q,v21.1d,v16.1d
.Ldone4x:
ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
eor v18.16b,v0.16b,v2.16b
eor v1.16b,v1.16b,v17.16b
eor v1.16b,v1.16b,v18.16b
pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
ins v2.d[0],v1.d[1]
ins v1.d[1],v0.d[0]
eor v0.16b,v1.16b,v18.16b
ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
pmull v0.1q,v0.1d,v19.1d
eor v18.16b,v18.16b,v2.16b
eor v0.16b,v0.16b,v18.16b
ext v0.16b,v0.16b,v0.16b,#8
#ifndef __ARMEB__
rev64 v0.16b,v0.16b
#endif
st1 {v0.2d},[x0] //write out Xi
ret
.size gcm_ghash_v8_4x,.-gcm_ghash_v8_4x
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
#endif
#endif
#endif // !OPENSSL_NO_ASM
.section .note.GNU-stack,"",%progbits
#endif // defined(__aarch64__) && defined(__linux__)

View File

@ -1,29 +0,0 @@
/* Copyright (c) 2017, Google Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
#include <CCryptoBoringSSL_crypto.h>
// This file exists in order to give the fipsmodule target, in non-FIPS mode,
// something to compile.
int FIPS_mode(void) {
#if defined(BORINGSSL_FIPS) && !defined(OPENSSL_ASAN)
return 1;
#else
return 0;
#endif
}
int FIPS_mode_set(int on) { return on == FIPS_mode(); }

View File

@ -60,6 +60,7 @@
#include <string.h>
#include "../../internal.h"
#include "../digest/md32_common.h"
uint8_t *MD4(const uint8_t *data, size_t len, uint8_t out[MD4_DIGEST_LENGTH]) {
@ -84,29 +85,26 @@ int MD4_Init(MD4_CTX *md4) {
void md4_block_data_order(uint32_t *state, const uint8_t *data, size_t num);
#define DATA_ORDER_IS_LITTLE_ENDIAN
void MD4_Transform(MD4_CTX *c, const uint8_t data[MD4_CBLOCK]) {
md4_block_data_order(c->h, data, 1);
}
#define HASH_CTX MD4_CTX
#define HASH_CBLOCK 64
#define HASH_DIGEST_LENGTH 16
#define HASH_UPDATE MD4_Update
#define HASH_TRANSFORM MD4_Transform
#define HASH_FINAL MD4_Final
#define HASH_MAKE_STRING(c, s) \
do { \
uint32_t ll; \
ll = (c)->h[0]; \
HOST_l2c(ll, (s)); \
ll = (c)->h[1]; \
HOST_l2c(ll, (s)); \
ll = (c)->h[2]; \
HOST_l2c(ll, (s)); \
ll = (c)->h[3]; \
HOST_l2c(ll, (s)); \
} while (0)
#define HASH_BLOCK_DATA_ORDER md4_block_data_order
int MD4_Update(MD4_CTX *c, const void *data, size_t len) {
crypto_md32_update(&md4_block_data_order, c->h, c->data, MD4_CBLOCK, &c->num,
&c->Nh, &c->Nl, data, len);
return 1;
}
#include "../digest/md32_common.h"
int MD4_Final(uint8_t out[MD4_DIGEST_LENGTH], MD4_CTX *c) {
crypto_md32_final(&md4_block_data_order, c->h, c->data, MD4_CBLOCK, &c->num,
c->Nh, c->Nl, /*is_big_endian=*/0);
CRYPTO_store_u32_le(out, c->h[0]);
CRYPTO_store_u32_le(out + 4, c->h[1]);
CRYPTO_store_u32_le(out + 8, c->h[2]);
CRYPTO_store_u32_le(out + 12, c->h[3]);
return 1;
}
// As pointed out by Wei Dai <weidai@eskimo.com>, the above can be
// simplified to the code below. Wei attributes these optimizations
@ -136,7 +134,7 @@ void md4_block_data_order(uint32_t *state, const uint8_t *data, size_t num);
} while (0)
void md4_block_data_order(uint32_t *state, const uint8_t *data, size_t num) {
uint32_t A, B, C, D, l;
uint32_t A, B, C, D;
uint32_t X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15;
A = state[0];
@ -145,53 +143,53 @@ void md4_block_data_order(uint32_t *state, const uint8_t *data, size_t num) {
D = state[3];
for (; num--;) {
HOST_c2l(data, l);
X0 = l;
HOST_c2l(data, l);
X1 = l;
X0 = CRYPTO_load_u32_le(data);
data += 4;
X1 = CRYPTO_load_u32_le(data);
data += 4;
// Round 0
R0(A, B, C, D, X0, 3, 0);
HOST_c2l(data, l);
X2 = l;
X2 = CRYPTO_load_u32_le(data);
data += 4;
R0(D, A, B, C, X1, 7, 0);
HOST_c2l(data, l);
X3 = l;
X3 = CRYPTO_load_u32_le(data);
data += 4;
R0(C, D, A, B, X2, 11, 0);
HOST_c2l(data, l);
X4 = l;
X4 = CRYPTO_load_u32_le(data);
data += 4;
R0(B, C, D, A, X3, 19, 0);
HOST_c2l(data, l);
X5 = l;
X5 = CRYPTO_load_u32_le(data);
data += 4;
R0(A, B, C, D, X4, 3, 0);
HOST_c2l(data, l);
X6 = l;
X6 = CRYPTO_load_u32_le(data);
data += 4;
R0(D, A, B, C, X5, 7, 0);
HOST_c2l(data, l);
X7 = l;
X7 = CRYPTO_load_u32_le(data);
data += 4;
R0(C, D, A, B, X6, 11, 0);
HOST_c2l(data, l);
X8 = l;
X8 = CRYPTO_load_u32_le(data);
data += 4;
R0(B, C, D, A, X7, 19, 0);
HOST_c2l(data, l);
X9 = l;
X9 = CRYPTO_load_u32_le(data);
data += 4;
R0(A, B, C, D, X8, 3, 0);
HOST_c2l(data, l);
X10 = l;
X10 = CRYPTO_load_u32_le(data);
data += 4;
R0(D, A, B, C, X9, 7, 0);
HOST_c2l(data, l);
X11 = l;
X11 = CRYPTO_load_u32_le(data);
data += 4;
R0(C, D, A, B, X10, 11, 0);
HOST_c2l(data, l);
X12 = l;
X12 = CRYPTO_load_u32_le(data);
data += 4;
R0(B, C, D, A, X11, 19, 0);
HOST_c2l(data, l);
X13 = l;
X13 = CRYPTO_load_u32_le(data);
data += 4;
R0(A, B, C, D, X12, 3, 0);
HOST_c2l(data, l);
X14 = l;
X14 = CRYPTO_load_u32_le(data);
data += 4;
R0(D, A, B, C, X13, 7, 0);
HOST_c2l(data, l);
X15 = l;
X15 = CRYPTO_load_u32_le(data);
data += 4;
R0(C, D, A, B, X14, 11, 0);
R0(B, C, D, A, X15, 19, 0);
// Round 1
@ -236,15 +234,6 @@ void md4_block_data_order(uint32_t *state, const uint8_t *data, size_t num) {
}
}
#undef DATA_ORDER_IS_LITTLE_ENDIAN
#undef HASH_CTX
#undef HASH_CBLOCK
#undef HASH_DIGEST_LENGTH
#undef HASH_UPDATE
#undef HASH_TRANSFORM
#undef HASH_FINAL
#undef HASH_MAKE_STRING
#undef HASH_BLOCK_DATA_ORDER
#undef F
#undef G
#undef H
@ -252,5 +241,3 @@ void md4_block_data_order(uint32_t *state, const uint8_t *data, size_t num) {
#undef R0
#undef R1
#undef R2
#undef HOST_c2l
#undef HOST_l2c

View File

@ -60,8 +60,9 @@
#include <CCryptoBoringSSL_mem.h>
#include "internal.h"
#include "../../internal.h"
#include "../digest/md32_common.h"
#include "internal.h"
uint8_t *MD5(const uint8_t *data, size_t len, uint8_t out[MD5_DIGEST_LENGTH]) {
@ -89,30 +90,26 @@ static void md5_block_data_order(uint32_t *state, const uint8_t *data,
size_t num);
#endif
void MD5_Transform(MD5_CTX *c, const uint8_t data[MD5_CBLOCK]) {
md5_block_data_order(c->h, data, 1);
}
#define DATA_ORDER_IS_LITTLE_ENDIAN
int MD5_Update(MD5_CTX *c, const void *data, size_t len) {
crypto_md32_update(&md5_block_data_order, c->h, c->data, MD5_CBLOCK, &c->num,
&c->Nh, &c->Nl, data, len);
return 1;
}
#define HASH_CTX MD5_CTX
#define HASH_CBLOCK 64
#define HASH_DIGEST_LENGTH 16
#define HASH_UPDATE MD5_Update
#define HASH_TRANSFORM MD5_Transform
#define HASH_FINAL MD5_Final
#define HASH_MAKE_STRING(c, s) \
do { \
uint32_t ll; \
ll = (c)->h[0]; \
HOST_l2c(ll, (s)); \
ll = (c)->h[1]; \
HOST_l2c(ll, (s)); \
ll = (c)->h[2]; \
HOST_l2c(ll, (s)); \
ll = (c)->h[3]; \
HOST_l2c(ll, (s)); \
} while (0)
#define HASH_BLOCK_DATA_ORDER md5_block_data_order
int MD5_Final(uint8_t out[MD5_DIGEST_LENGTH], MD5_CTX *c) {
crypto_md32_final(&md5_block_data_order, c->h, c->data, MD5_CBLOCK, &c->num,
c->Nh, c->Nl, /*is_big_endian=*/0);
#include "../digest/md32_common.h"
CRYPTO_store_u32_le(out, c->h[0]);
CRYPTO_store_u32_le(out + 4, c->h[1]);
CRYPTO_store_u32_le(out + 8, c->h[2]);
CRYPTO_store_u32_le(out + 12, c->h[3]);
return 1;
}
// As pointed out by Wei Dai <weidai@eskimo.com>, the above can be
// simplified to the code below. Wei attributes these optimizations
@ -158,7 +155,7 @@ static void md5_block_data_order(uint32_t *state, const uint8_t *data,
#endif
static void md5_block_data_order(uint32_t *state, const uint8_t *data,
size_t num) {
uint32_t A, B, C, D, l;
uint32_t A, B, C, D;
uint32_t XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7, XX8, XX9, XX10, XX11, XX12,
XX13, XX14, XX15;
#define X(i) XX##i
@ -169,53 +166,53 @@ static void md5_block_data_order(uint32_t *state, const uint8_t *data,
D = state[3];
for (; num--;) {
HOST_c2l(data, l);
X(0) = l;
HOST_c2l(data, l);
X(1) = l;
X(0) = CRYPTO_load_u32_le(data);
data += 4;
X(1) = CRYPTO_load_u32_le(data);
data += 4;
// Round 0
R0(A, B, C, D, X(0), 7, 0xd76aa478L);
HOST_c2l(data, l);
X(2) = l;
X(2) = CRYPTO_load_u32_le(data);
data += 4;
R0(D, A, B, C, X(1), 12, 0xe8c7b756L);
HOST_c2l(data, l);
X(3) = l;
X(3) = CRYPTO_load_u32_le(data);
data += 4;
R0(C, D, A, B, X(2), 17, 0x242070dbL);
HOST_c2l(data, l);
X(4) = l;
X(4) = CRYPTO_load_u32_le(data);
data += 4;
R0(B, C, D, A, X(3), 22, 0xc1bdceeeL);
HOST_c2l(data, l);
X(5) = l;
X(5) = CRYPTO_load_u32_le(data);
data += 4;
R0(A, B, C, D, X(4), 7, 0xf57c0fafL);
HOST_c2l(data, l);
X(6) = l;
X(6) = CRYPTO_load_u32_le(data);
data += 4;
R0(D, A, B, C, X(5), 12, 0x4787c62aL);
HOST_c2l(data, l);
X(7) = l;
X(7) = CRYPTO_load_u32_le(data);
data += 4;
R0(C, D, A, B, X(6), 17, 0xa8304613L);
HOST_c2l(data, l);
X(8) = l;
X(8) = CRYPTO_load_u32_le(data);
data += 4;
R0(B, C, D, A, X(7), 22, 0xfd469501L);
HOST_c2l(data, l);
X(9) = l;
X(9) = CRYPTO_load_u32_le(data);
data += 4;
R0(A, B, C, D, X(8), 7, 0x698098d8L);
HOST_c2l(data, l);
X(10) = l;
X(10) = CRYPTO_load_u32_le(data);
data += 4;
R0(D, A, B, C, X(9), 12, 0x8b44f7afL);
HOST_c2l(data, l);
X(11) = l;
X(11) = CRYPTO_load_u32_le(data);
data += 4;
R0(C, D, A, B, X(10), 17, 0xffff5bb1L);
HOST_c2l(data, l);
X(12) = l;
X(12) = CRYPTO_load_u32_le(data);
data += 4;
R0(B, C, D, A, X(11), 22, 0x895cd7beL);
HOST_c2l(data, l);
X(13) = l;
X(13) = CRYPTO_load_u32_le(data);
data += 4;
R0(A, B, C, D, X(12), 7, 0x6b901122L);
HOST_c2l(data, l);
X(14) = l;
X(14) = CRYPTO_load_u32_le(data);
data += 4;
R0(D, A, B, C, X(13), 12, 0xfd987193L);
HOST_c2l(data, l);
X(15) = l;
X(15) = CRYPTO_load_u32_le(data);
data += 4;
R0(C, D, A, B, X(14), 17, 0xa679438eL);
R0(B, C, D, A, X(15), 22, 0x49b40821L);
// Round 1
@ -279,15 +276,6 @@ static void md5_block_data_order(uint32_t *state, const uint8_t *data,
#undef X
#endif
#undef DATA_ORDER_IS_LITTLE_ENDIAN
#undef HASH_CTX
#undef HASH_CBLOCK
#undef HASH_DIGEST_LENGTH
#undef HASH_UPDATE
#undef HASH_TRANSFORM
#undef HASH_FINAL
#undef HASH_MAKE_STRING
#undef HASH_BLOCK_DATA_ORDER
#undef F
#undef G
#undef H
@ -297,5 +285,3 @@ static void md5_block_data_order(uint32_t *state, const uint8_t *data,
#undef R1
#undef R2
#undef R3
#undef HOST_c2l
#undef HOST_l2c

View File

@ -52,20 +52,25 @@
#include <CCryptoBoringSSL_type_check.h>
#include "internal.h"
#include "../../internal.h"
void CRYPTO_cbc128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
const AES_KEY *key, uint8_t ivec[16],
block128_f block) {
assert(key != NULL && ivec != NULL);
if (len == 0) {
// Avoid |ivec| == |iv| in the |memcpy| below, which is not legal in C.
return;
}
assert(in != NULL && out != NULL);
size_t n;
const uint8_t *iv = ivec;
assert(key != NULL && ivec != NULL);
assert(len == 0 || (in != NULL && out != NULL));
while (len >= 16) {
for (n = 0; n < 16; n += sizeof(size_t)) {
store_word_le(out + n, load_word_le(in + n) ^ load_word_le(iv + n));
for (n = 0; n < 16; n += sizeof(crypto_word_t)) {
CRYPTO_store_word_le(
out + n, CRYPTO_load_word_le(in + n) ^ CRYPTO_load_word_le(iv + n));
}
(*block)(out, out, key);
iv = out;
@ -97,30 +102,36 @@ void CRYPTO_cbc128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
void CRYPTO_cbc128_decrypt(const uint8_t *in, uint8_t *out, size_t len,
const AES_KEY *key, uint8_t ivec[16],
block128_f block) {
size_t n;
union {
size_t t[16 / sizeof(size_t)];
uint8_t c[16];
} tmp;
assert(key != NULL && ivec != NULL);
assert(len == 0 || (in != NULL && out != NULL));
if (len == 0) {
// Avoid |ivec| == |iv| in the |memcpy| below, which is not legal in C.
return;
}
assert(in != NULL && out != NULL);
const uintptr_t inptr = (uintptr_t) in;
const uintptr_t outptr = (uintptr_t) out;
// If |in| and |out| alias, |in| must be ahead.
assert(inptr >= outptr || inptr + len <= outptr);
size_t n;
union {
crypto_word_t t[16 / sizeof(crypto_word_t)];
uint8_t c[16];
} tmp;
if ((inptr >= 32 && outptr <= inptr - 32) || inptr < outptr) {
// If |out| is at least two blocks behind |in| or completely disjoint, there
// is no need to decrypt to a temporary block.
OPENSSL_STATIC_ASSERT(16 % sizeof(size_t) == 0,
OPENSSL_STATIC_ASSERT(16 % sizeof(crypto_word_t) == 0,
"block cannot be evenly divided into words");
const uint8_t *iv = ivec;
while (len >= 16) {
(*block)(in, out, key);
for (n = 0; n < 16; n += sizeof(size_t)) {
store_word_le(out + n, load_word_le(out + n) ^ load_word_le(iv + n));
for (n = 0; n < 16; n += sizeof(crypto_word_t)) {
CRYPTO_store_word_le(out + n, CRYPTO_load_word_le(out + n) ^
CRYPTO_load_word_le(iv + n));
}
iv = in;
len -= 16;
@ -129,16 +140,16 @@ void CRYPTO_cbc128_decrypt(const uint8_t *in, uint8_t *out, size_t len,
}
OPENSSL_memcpy(ivec, iv, 16);
} else {
OPENSSL_STATIC_ASSERT(16 % sizeof(size_t) == 0,
OPENSSL_STATIC_ASSERT(16 % sizeof(crypto_word_t) == 0,
"block cannot be evenly divided into words");
while (len >= 16) {
(*block)(in, tmp.c, key);
for (n = 0; n < 16; n += sizeof(size_t)) {
size_t c = load_word_le(in + n);
store_word_le(out + n,
tmp.t[n / sizeof(size_t)] ^ load_word_le(ivec + n));
store_word_le(ivec + n, c);
for (n = 0; n < 16; n += sizeof(crypto_word_t)) {
crypto_word_t c = CRYPTO_load_word_le(in + n);
CRYPTO_store_word_le(out + n, tmp.t[n / sizeof(crypto_word_t)] ^
CRYPTO_load_word_le(ivec + n));
CRYPTO_store_word_le(ivec + n, c);
}
len -= 16;
in += 16;

View File

@ -72,10 +72,11 @@ void CRYPTO_cfb128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
}
while (len >= 16) {
(*block)(ivec, ivec, key);
for (; n < 16; n += sizeof(size_t)) {
size_t tmp = load_word_le(ivec + n) ^ load_word_le(in + n);
store_word_le(ivec + n, tmp);
store_word_le(out + n, tmp);
for (; n < 16; n += sizeof(crypto_word_t)) {
crypto_word_t tmp =
CRYPTO_load_word_le(ivec + n) ^ CRYPTO_load_word_le(in + n);
CRYPTO_store_word_le(ivec + n, tmp);
CRYPTO_store_word_le(out + n, tmp);
}
len -= 16;
out += 16;
@ -101,10 +102,10 @@ void CRYPTO_cfb128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
}
while (len >= 16) {
(*block)(ivec, ivec, key);
for (; n < 16; n += sizeof(size_t)) {
size_t t = load_word_le(in + n);
store_word_le(out + n, load_word_le(ivec + n) ^ t);
store_word_le(ivec + n, t);
for (; n < 16; n += sizeof(crypto_word_t)) {
crypto_word_t t = CRYPTO_load_word_le(in + n);
CRYPTO_store_word_le(out + n, CRYPTO_load_word_le(ivec + n) ^ t);
CRYPTO_store_word_le(ivec + n, t);
}
len -= 16;
out += 16;

View File

@ -52,6 +52,7 @@
#include <string.h>
#include "internal.h"
#include "../../internal.h"
// NOTE: the IV/counter CTR mode is big-endian. The code itself
@ -69,8 +70,8 @@ static void ctr128_inc(uint8_t *counter) {
} while (n);
}
OPENSSL_STATIC_ASSERT(16 % sizeof(size_t) == 0,
"block cannot be divided into size_t");
OPENSSL_STATIC_ASSERT(16 % sizeof(crypto_word_t) == 0,
"block cannot be divided into crypto_word_t");
// The input encrypted as though 128bit counter mode is being used. The extra
// state information to record how much of the 128bit block we have used is
@ -102,9 +103,9 @@ void CRYPTO_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
while (len >= 16) {
(*block)(ivec, ecount_buf, key);
ctr128_inc(ivec);
for (n = 0; n < 16; n += sizeof(size_t)) {
store_word_le(out + n,
load_word_le(in + n) ^ load_word_le(ecount_buf + n));
for (n = 0; n < 16; n += sizeof(crypto_word_t)) {
CRYPTO_store_word_le(out + n, CRYPTO_load_word_le(in + n) ^
CRYPTO_load_word_le(ecount_buf + n));
}
len -= 16;
out += 16;
@ -152,7 +153,7 @@ void CRYPTO_ctr128_encrypt_ctr32(const uint8_t *in, uint8_t *out, size_t len,
n = (n + 1) % 16;
}
ctr32 = GETU32(ivec + 12);
ctr32 = CRYPTO_load_u32_be(ivec + 12);
while (len >= 16) {
size_t blocks = len / 16;
// 1<<28 is just a not-so-small yet not-so-large number...
@ -172,7 +173,7 @@ void CRYPTO_ctr128_encrypt_ctr32(const uint8_t *in, uint8_t *out, size_t len,
}
(*func)(in, out, blocks, key, ivec);
// (*func) does not update ivec, caller does:
PUTU32(ivec + 12, ctr32);
CRYPTO_store_u32_be(ivec + 12, ctr32);
// ... overflow was detected, propogate carry.
if (ctr32 == 0) {
ctr96_inc(ivec);
@ -186,7 +187,7 @@ void CRYPTO_ctr128_encrypt_ctr32(const uint8_t *in, uint8_t *out, size_t len,
OPENSSL_memset(ecount_buf, 0, 16);
(*func)(ecount_buf, ecount_buf, 1, key, ivec);
++ctr32;
PUTU32(ivec + 12, ctr32);
CRYPTO_store_u32_be(ivec + 12, ctr32);
if (ctr32 == 0) {
ctr96_inc(ivec);
}

View File

@ -73,7 +73,7 @@ static const size_t kSizeTWithoutLower4Bits = (size_t) -16;
#if defined(GHASH_ASM_X86_64) || defined(GHASH_ASM_X86)
static inline void gcm_reduce_1bit(u128 *V) {
if (sizeof(size_t) == 8) {
if (sizeof(crypto_word_t) == 8) {
uint64_t T = UINT64_C(0xe100000000000000) & (0 - (V->hi & 1));
V->hi = (V->lo << 63) | (V->hi >> 1);
V->lo = (V->lo >> 1) ^ T;
@ -377,9 +377,10 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, const AES_KEY *key,
(*block)(ctx->Yi.c, ctx->EKi.c, key);
++ctr;
ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
for (size_t i = 0; i < 16; i += sizeof(size_t)) {
store_word_le(out + i,
load_word_le(in + i) ^ ctx->EKi.t[i / sizeof(size_t)]);
for (size_t i = 0; i < 16; i += sizeof(crypto_word_t)) {
CRYPTO_store_word_le(out + i,
CRYPTO_load_word_le(in + i) ^
ctx->EKi.t[i / sizeof(crypto_word_t)]);
}
out += 16;
in += 16;
@ -394,9 +395,10 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, const AES_KEY *key,
(*block)(ctx->Yi.c, ctx->EKi.c, key);
++ctr;
ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
for (size_t i = 0; i < 16; i += sizeof(size_t)) {
store_word_le(out + i,
load_word_le(in + i) ^ ctx->EKi.t[i / sizeof(size_t)]);
for (size_t i = 0; i < 16; i += sizeof(crypto_word_t)) {
CRYPTO_store_word_le(out + i,
CRYPTO_load_word_le(in + i) ^
ctx->EKi.t[i / sizeof(crypto_word_t)]);
}
out += 16;
in += 16;
@ -468,9 +470,10 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, const AES_KEY *key,
(*block)(ctx->Yi.c, ctx->EKi.c, key);
++ctr;
ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
for (size_t i = 0; i < 16; i += sizeof(size_t)) {
store_word_le(out + i,
load_word_le(in + i) ^ ctx->EKi.t[i / sizeof(size_t)]);
for (size_t i = 0; i < 16; i += sizeof(crypto_word_t)) {
CRYPTO_store_word_le(out + i,
CRYPTO_load_word_le(in + i) ^
ctx->EKi.t[i / sizeof(crypto_word_t)]);
}
out += 16;
in += 16;
@ -485,9 +488,10 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, const AES_KEY *key,
(*block)(ctx->Yi.c, ctx->EKi.c, key);
++ctr;
ctx->Yi.d[3] = CRYPTO_bswap4(ctr);
for (size_t i = 0; i < 16; i += sizeof(size_t)) {
store_word_le(out + i,
load_word_le(in + i) ^ ctx->EKi.t[i / sizeof(size_t)]);
for (size_t i = 0; i < 16; i += sizeof(crypto_word_t)) {
CRYPTO_store_word_le(out + i,
CRYPTO_load_word_le(in + i) ^
ctx->EKi.t[i / sizeof(crypto_word_t)]);
}
out += 16;
in += 16;

View File

@ -64,27 +64,6 @@ extern "C" {
#endif
static inline uint32_t GETU32(const void *in) {
uint32_t v;
OPENSSL_memcpy(&v, in, sizeof(v));
return CRYPTO_bswap4(v);
}
static inline void PUTU32(void *out, uint32_t v) {
v = CRYPTO_bswap4(v);
OPENSSL_memcpy(out, &v, sizeof(v));
}
static inline size_t load_word_le(const void *in) {
size_t v;
OPENSSL_memcpy(&v, in, sizeof(v));
return v;
}
static inline void store_word_le(void *out, size_t v) {
OPENSSL_memcpy(out, &v, sizeof(v));
}
// block128_f is the type of an AES block cipher implementation.
//
// Unlike upstream OpenSSL, it and the other functions in this file hard-code
@ -171,7 +150,7 @@ typedef struct {
uint64_t u[2];
uint32_t d[4];
uint8_t c[16];
size_t t[16 / sizeof(size_t)];
crypto_word_t t[16 / sizeof(crypto_word_t)];
} Yi, EKi, EK0, len, Xi;
// Note that the order of |Xi| and |gcm_key| is fixed by the MOVBE-based,

View File

@ -60,7 +60,8 @@ OPENSSL_STATIC_ASSERT(16 % sizeof(size_t) == 0,
void CRYPTO_ofb128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
const AES_KEY *key, uint8_t ivec[16], unsigned *num,
block128_f block) {
assert(in && out && key && ivec && num);
assert(key != NULL && ivec != NULL && num != NULL);
assert(len == 0 || (in != NULL && out != NULL));
unsigned n = *num;

View File

@ -45,12 +45,10 @@ void RAND_bytes_with_additional_data(uint8_t *out, size_t out_len,
// for seeding a DRBG, to |out_entropy|. It sets |*out_used_cpu| to one if the
// entropy came directly from the CPU and zero if it came from the OS. It
// actively obtains entropy from the CPU/OS and so should not be called from
// within the FIPS module if |BORINGSSL_FIPS_PASSIVE_ENTROPY| is defined.
// within the FIPS module.
void CRYPTO_get_seed_entropy(uint8_t *out_entropy, size_t out_entropy_len,
int *out_used_cpu);
#if defined(BORINGSSL_FIPS_PASSIVE_ENTROPY)
// RAND_load_entropy supplies |entropy_len| bytes of entropy to the module. The
// |from_cpu| parameter is true iff the entropy was obtained directly from the
// CPU.
@ -61,23 +59,22 @@ void RAND_load_entropy(const uint8_t *entropy, size_t entropy_len,
// when the module has stopped because it has run out of entropy.
void RAND_need_entropy(size_t bytes_needed);
#endif // BORINGSSL_FIPS_PASSIVE_ENTROPY
#endif // BORINGSSL_FIPS
// CRYPTO_sysrand fills |len| bytes at |buf| with entropy from the operating
// system.
void CRYPTO_sysrand(uint8_t *buf, size_t len);
#if defined(OPENSSL_URANDOM)
// CRYPTO_init_sysrand initializes long-lived resources needed to draw entropy
// from the operating system.
void CRYPTO_init_sysrand(void);
// CRYPTO_sysrand_for_seed fills |len| bytes at |buf| with entropy from the
// operating system. It may draw from the |GRND_RANDOM| pool on Android,
// depending on the vendor's configuration.
void CRYPTO_sysrand_for_seed(uint8_t *buf, size_t len);
#if defined(OPENSSL_URANDOM)
// CRYPTO_init_sysrand initializes long-lived resources needed to draw entropy
// from the operating system.
void CRYPTO_init_sysrand(void);
// CRYPTO_sysrand_if_available fills |len| bytes at |buf| with entropy from the
// operating system, or early /dev/urandom data, and returns 1, _if_ the entropy
// pool is initialized or if getrandom() is not available and not in FIPS mode.
@ -87,10 +84,6 @@ int CRYPTO_sysrand_if_available(uint8_t *buf, size_t len);
#else
OPENSSL_INLINE void CRYPTO_init_sysrand(void) {}
OPENSSL_INLINE void CRYPTO_sysrand_for_seed(uint8_t *buf, size_t len) {
CRYPTO_sysrand(buf, len);
}
OPENSSL_INLINE int CRYPTO_sysrand_if_available(uint8_t *buf, size_t len) {
CRYPTO_sysrand(buf, len);
return 1;

View File

@ -178,8 +178,6 @@ void CRYPTO_get_seed_entropy(uint8_t *out_entropy, size_t out_entropy_len,
#endif
}
#if defined(BORINGSSL_FIPS_PASSIVE_ENTROPY)
// In passive entropy mode, entropy is supplied from outside of the module via
// |RAND_load_entropy| and is stored in global instance of the following
// structure.
@ -242,17 +240,6 @@ static void get_seed_entropy(uint8_t *out_entropy, size_t out_entropy_len,
CRYPTO_STATIC_MUTEX_unlock_write(entropy_buffer_lock_bss_get());
}
#else
// In the active case, |get_seed_entropy| simply calls |CRYPTO_get_seed_entropy|
// in order to obtain entropy from the CPU or OS.
static void get_seed_entropy(uint8_t *out_entropy, size_t out_entropy_len,
int *out_used_cpu) {
CRYPTO_get_seed_entropy(out_entropy, out_entropy_len, out_used_cpu);
}
#endif // !BORINGSSL_FIPS_PASSIVE_ENTROPY
// rand_get_seed fills |seed| with entropy and sets |*out_used_cpu| to one if
// that entropy came directly from the CPU and zero otherwise.
static void rand_get_seed(struct rand_thread_state *state,
@ -306,7 +293,7 @@ static void rand_get_seed(struct rand_thread_state *state,
int *out_used_cpu) {
// If not in FIPS mode, we don't overread from the system entropy source and
// we don't depend only on the hardware RDRAND.
CRYPTO_sysrand(seed, CTR_DRBG_ENTROPY_LEN);
CRYPTO_sysrand_for_seed(seed, CTR_DRBG_ENTROPY_LEN);
*out_used_cpu = 0;
}

View File

@ -62,6 +62,15 @@
#include <sys/random.h>
#endif
#if defined(OPENSSL_FREEBSD)
#define URANDOM_BLOCKS_FOR_ENTROPY
#if __FreeBSD__ >= 12
// getrandom is supported in FreeBSD 12 and up.
#define FREEBSD_GETRANDOM
#include <sys/random.h>
#endif
#endif
#include <CCryptoBoringSSL_thread.h>
#include <CCryptoBoringSSL_mem.h>
@ -176,6 +185,11 @@ static void init_once(void) {
}
#endif
#if defined(FREEBSD_GETRANDOM)
*urandom_fd_bss_get() = kHaveGetrandom;
return;
#endif
// Android FIPS builds must support getrandom.
#if defined(BORINGSSL_FIPS) && defined(OPENSSL_ANDROID)
perror("getrandom not found");
@ -256,11 +270,11 @@ static void wait_for_entropy(void) {
return;
}
#if defined(BORINGSSL_FIPS)
// In FIPS mode we ensure that the kernel has sufficient entropy before
// continuing. This is automatically handled by getrandom, which requires
// that the entropy pool has been initialised, but for urandom we have to
// poll.
#if defined(BORINGSSL_FIPS) && !defined(URANDOM_BLOCKS_FOR_ENTROPY)
// In FIPS mode on platforms where urandom doesn't block at startup, we ensure
// that the kernel has sufficient entropy before continuing. This is
// automatically handled by getrandom, which requires that the entropy pool
// has been initialised, but for urandom we have to poll.
for (;;) {
int entropy_bits;
if (ioctl(fd, RNDGETENTCNT, &entropy_bits)) {
@ -277,7 +291,7 @@ static void wait_for_entropy(void) {
usleep(250000);
}
#endif // BORINGSSL_FIPS
#endif // BORINGSSL_FIPS && !URANDOM_BLOCKS_FOR_ENTROPY
}
// fill_with_entropy writes |len| bytes of entropy into |out|. It returns one
@ -291,11 +305,14 @@ static int fill_with_entropy(uint8_t *out, size_t len, int block, int seed) {
return 1;
}
#if defined(USE_NR_getrandom)
#if defined(USE_NR_getrandom) || defined(FREEBSD_GETRANDOM)
int getrandom_flags = 0;
if (!block) {
getrandom_flags |= GRND_NONBLOCK;
}
#endif
#if defined (USE_NR_getrandom)
if (seed) {
getrandom_flags |= *extra_getrandom_flags_for_seed_bss_get();
}
@ -315,6 +332,8 @@ static int fill_with_entropy(uint8_t *out, size_t len, int block, int seed) {
if (*urandom_fd_bss_get() == kHaveGetrandom) {
#if defined(USE_NR_getrandom)
r = boringssl_getrandom(out, len, getrandom_flags);
#elif defined(FREEBSD_GETRANDOM)
r = getrandom(out, len, getrandom_flags);
#elif defined(OPENSSL_MACOS)
if (__builtin_available(macos 10.12, *)) {
// |getentropy| can only request 256 bytes at a time.
@ -348,6 +367,10 @@ static int fill_with_entropy(uint8_t *out, size_t len, int block, int seed) {
return 1;
}
void CRYPTO_init_sysrand(void) {
CRYPTO_once(rand_once_bss_get(), init_once);
}
// CRYPTO_sysrand puts |requested| random bytes into |out|.
void CRYPTO_sysrand(uint8_t *out, size_t requested) {
if (!fill_with_entropy(out, requested, /*block=*/1, /*seed=*/0)) {
@ -356,18 +379,12 @@ void CRYPTO_sysrand(uint8_t *out, size_t requested) {
}
}
void CRYPTO_init_sysrand(void) {
CRYPTO_once(rand_once_bss_get(), init_once);
}
#if defined(BORINGSSL_FIPS)
void CRYPTO_sysrand_for_seed(uint8_t *out, size_t requested) {
if (!fill_with_entropy(out, requested, /*block=*/1, /*seed=*/1)) {
perror("entropy fill failed");
abort();
}
}
#endif // BORINGSSL_FIPS
int CRYPTO_sysrand_if_available(uint8_t *out, size_t requested) {
if (fill_with_entropy(out, requested, /*block=*/0, /*seed=*/0)) {

View File

@ -458,18 +458,18 @@ static const struct pkcs1_sig_prefix kPKCS1SigPrefixes[] = {
};
int RSA_add_pkcs1_prefix(uint8_t **out_msg, size_t *out_msg_len,
int *is_alloced, int hash_nid, const uint8_t *msg,
size_t msg_len) {
int *is_alloced, int hash_nid, const uint8_t *digest,
size_t digest_len) {
unsigned i;
if (hash_nid == NID_md5_sha1) {
// Special case: SSL signature, just check the length.
if (msg_len != SSL_SIG_LENGTH) {
if (digest_len != SSL_SIG_LENGTH) {
OPENSSL_PUT_ERROR(RSA, RSA_R_INVALID_MESSAGE_LENGTH);
return 0;
}
*out_msg = (uint8_t*) msg;
*out_msg = (uint8_t *)digest;
*out_msg_len = SSL_SIG_LENGTH;
*is_alloced = 0;
return 1;
@ -481,7 +481,7 @@ int RSA_add_pkcs1_prefix(uint8_t **out_msg, size_t *out_msg_len,
continue;
}
if (msg_len != sig_prefix->hash_len) {
if (digest_len != sig_prefix->hash_len) {
OPENSSL_PUT_ERROR(RSA, RSA_R_INVALID_MESSAGE_LENGTH);
return 0;
}
@ -491,7 +491,7 @@ int RSA_add_pkcs1_prefix(uint8_t **out_msg, size_t *out_msg_len,
unsigned signed_msg_len;
uint8_t *signed_msg;
signed_msg_len = prefix_len + msg_len;
signed_msg_len = prefix_len + digest_len;
if (signed_msg_len < prefix_len) {
OPENSSL_PUT_ERROR(RSA, RSA_R_TOO_LONG);
return 0;
@ -504,7 +504,7 @@ int RSA_add_pkcs1_prefix(uint8_t **out_msg, size_t *out_msg_len,
}
OPENSSL_memcpy(signed_msg, prefix, prefix_len);
OPENSSL_memcpy(signed_msg + prefix_len, msg, msg_len);
OPENSSL_memcpy(signed_msg + prefix_len, digest, digest_len);
*out_msg = signed_msg;
*out_msg_len = signed_msg_len;
@ -517,8 +517,8 @@ int RSA_add_pkcs1_prefix(uint8_t **out_msg, size_t *out_msg_len,
return 0;
}
int RSA_sign(int hash_nid, const uint8_t *in, unsigned in_len, uint8_t *out,
unsigned *out_len, RSA *rsa) {
int RSA_sign(int hash_nid, const uint8_t *digest, unsigned digest_len,
uint8_t *out, unsigned *out_len, RSA *rsa) {
const unsigned rsa_size = RSA_size(rsa);
int ret = 0;
uint8_t *signed_msg = NULL;
@ -527,11 +527,12 @@ int RSA_sign(int hash_nid, const uint8_t *in, unsigned in_len, uint8_t *out,
size_t size_t_out_len;
if (rsa->meth->sign) {
return rsa->meth->sign(hash_nid, in, in_len, out, out_len, rsa);
return rsa->meth->sign(hash_nid, digest, digest_len, out, out_len, rsa);
}
if (!RSA_add_pkcs1_prefix(&signed_msg, &signed_msg_len,
&signed_msg_is_alloced, hash_nid, in, in_len) ||
&signed_msg_is_alloced, hash_nid, digest,
digest_len) ||
!RSA_sign_raw(rsa, &size_t_out_len, out, rsa_size, signed_msg,
signed_msg_len, RSA_PKCS1_PADDING)) {
goto err;
@ -548,9 +549,9 @@ err:
}
int RSA_sign_pss_mgf1(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out,
const uint8_t *in, size_t in_len, const EVP_MD *md,
const EVP_MD *mgf1_md, int salt_len) {
if (in_len != EVP_MD_size(md)) {
const uint8_t *digest, size_t digest_len,
const EVP_MD *md, const EVP_MD *mgf1_md, int salt_len) {
if (digest_len != EVP_MD_size(md)) {
OPENSSL_PUT_ERROR(RSA, RSA_R_INVALID_MESSAGE_LENGTH);
return 0;
}
@ -562,15 +563,15 @@ int RSA_sign_pss_mgf1(RSA *rsa, size_t *out_len, uint8_t *out, size_t max_out,
return 0;
}
int ret =
RSA_padding_add_PKCS1_PSS_mgf1(rsa, padded, in, md, mgf1_md, salt_len) &&
RSA_sign_raw(rsa, out_len, out, max_out, padded, padded_len,
RSA_NO_PADDING);
int ret = RSA_padding_add_PKCS1_PSS_mgf1(rsa, padded, digest, md, mgf1_md,
salt_len) &&
RSA_sign_raw(rsa, out_len, out, max_out, padded, padded_len,
RSA_NO_PADDING);
OPENSSL_free(padded);
return ret;
}
int RSA_verify(int hash_nid, const uint8_t *msg, size_t msg_len,
int RSA_verify(int hash_nid, const uint8_t *digest, size_t digest_len,
const uint8_t *sig, size_t sig_len, RSA *rsa) {
if (rsa->n == NULL || rsa->e == NULL) {
OPENSSL_PUT_ERROR(RSA, RSA_R_VALUE_MISSING);
@ -584,7 +585,7 @@ int RSA_verify(int hash_nid, const uint8_t *msg, size_t msg_len,
size_t signed_msg_len = 0, len;
int signed_msg_is_alloced = 0;
if (hash_nid == NID_md5_sha1 && msg_len != SSL_SIG_LENGTH) {
if (hash_nid == NID_md5_sha1 && digest_len != SSL_SIG_LENGTH) {
OPENSSL_PUT_ERROR(RSA, RSA_R_INVALID_MESSAGE_LENGTH);
return 0;
}
@ -601,7 +602,8 @@ int RSA_verify(int hash_nid, const uint8_t *msg, size_t msg_len,
}
if (!RSA_add_pkcs1_prefix(&signed_msg, &signed_msg_len,
&signed_msg_is_alloced, hash_nid, msg, msg_len)) {
&signed_msg_is_alloced, hash_nid, digest,
digest_len)) {
goto out;
}
@ -622,10 +624,10 @@ out:
return ret;
}
int RSA_verify_pss_mgf1(RSA *rsa, const uint8_t *msg, size_t msg_len,
int RSA_verify_pss_mgf1(RSA *rsa, const uint8_t *digest, size_t digest_len,
const EVP_MD *md, const EVP_MD *mgf1_md, int salt_len,
const uint8_t *sig, size_t sig_len) {
if (msg_len != EVP_MD_size(md)) {
if (digest_len != EVP_MD_size(md)) {
OPENSSL_PUT_ERROR(RSA, RSA_R_INVALID_MESSAGE_LENGTH);
return 0;
}
@ -647,7 +649,7 @@ int RSA_verify_pss_mgf1(RSA *rsa, const uint8_t *msg, size_t msg_len,
goto err;
}
ret = RSA_verify_PKCS1_PSS_mgf1(rsa, msg, md, mgf1_md, em, salt_len);
ret = RSA_verify_PKCS1_PSS_mgf1(rsa, digest, md, mgf1_md, em, salt_len);
err:
OPENSSL_free(em);

View File

@ -79,9 +79,8 @@ int rsa_check_public_key(const RSA *rsa) {
return 0;
}
unsigned rsa_bits = BN_num_bits(rsa->n);
if (rsa_bits > 16 * 1024) {
unsigned n_bits = BN_num_bits(rsa->n);
if (n_bits > 16 * 1024) {
OPENSSL_PUT_ERROR(RSA, RSA_R_MODULUS_TOO_LARGE);
return 0;
}
@ -96,17 +95,21 @@ int rsa_check_public_key(const RSA *rsa) {
// [2] https://www.imperialviolet.org/2012/03/17/rsados.html
// [3] https://msdn.microsoft.com/en-us/library/aa387685(VS.85).aspx
static const unsigned kMaxExponentBits = 33;
if (BN_num_bits(rsa->e) > kMaxExponentBits) {
unsigned e_bits = BN_num_bits(rsa->e);
if (e_bits > kMaxExponentBits ||
// Additionally reject e = 1 or even e. e must be odd to be relatively
// prime with phi(n).
e_bits < 2 ||
!BN_is_odd(rsa->e)) {
OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_E_VALUE);
return 0;
}
// Verify |n > e|. Comparing |rsa_bits| to |kMaxExponentBits| is a small
// Verify |n > e|. Comparing |n_bits| to |kMaxExponentBits| is a small
// shortcut to comparing |n| and |e| directly. In reality, |kMaxExponentBits|
// is much smaller than the minimum RSA key size that any application should
// accept.
if (rsa_bits <= kMaxExponentBits) {
if (n_bits <= kMaxExponentBits) {
OPENSSL_PUT_ERROR(RSA, RSA_R_KEY_SIZE_TOO_SMALL);
return 0;
}

View File

@ -0,0 +1,79 @@
/* Copyright (c) 2017, Google Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
#include <CCryptoBoringSSL_crypto.h>
#include "../../internal.h"
#include "../delocate.h"
int FIPS_mode(void) {
#if defined(BORINGSSL_FIPS) && !defined(OPENSSL_ASAN)
return 1;
#else
return 0;
#endif
}
int FIPS_mode_set(int on) { return on == FIPS_mode(); }
#if defined(BORINGSSL_FIPS_COUNTERS)
size_t FIPS_read_counter(enum fips_counter_t counter) {
if (counter < 0 || counter > fips_counter_max) {
abort();
}
const size_t *array =
CRYPTO_get_thread_local(OPENSSL_THREAD_LOCAL_FIPS_COUNTERS);
if (!array) {
return 0;
}
return array[counter];
}
void boringssl_fips_inc_counter(enum fips_counter_t counter) {
if (counter < 0 || counter > fips_counter_max) {
abort();
}
size_t *array =
CRYPTO_get_thread_local(OPENSSL_THREAD_LOCAL_FIPS_COUNTERS);
if (!array) {
const size_t num_bytes = sizeof(size_t) * (fips_counter_max + 1);
array = OPENSSL_malloc(num_bytes);
if (!array) {
return;
}
OPENSSL_memset(array, 0, num_bytes);
if (!CRYPTO_set_thread_local(OPENSSL_THREAD_LOCAL_FIPS_COUNTERS, array,
OPENSSL_free)) {
// |OPENSSL_free| has already been called by |CRYPTO_set_thread_local|.
return;
}
}
array[counter]++;
}
#else
size_t FIPS_read_counter(enum fips_counter_t counter) { return 0; }
// boringssl_fips_inc_counter is a no-op, inline function in internal.h in this
// case. That should let the compiler optimise away the callsites.
#endif

View File

@ -38,8 +38,14 @@
// MSVC wants to put a NUL byte at the end of non-char arrays and so cannot
// compile this.
#if !defined(_MSC_VER)
// compile the real logic.
#if defined(_MSC_VER)
int BORINGSSL_self_test(void) {
return 0;
}
#else
#if defined(BORINGSSL_FIPS) && defined(OPENSSL_ANDROID)
// FIPS builds on Android will test for flag files, named after the module hash,

View File

@ -60,8 +60,9 @@
#include <CCryptoBoringSSL_mem.h>
#include "internal.h"
#include "../../internal.h"
#include "../digest/md32_common.h"
#include "internal.h"
int SHA1_Init(SHA_CTX *sha) {
@ -83,30 +84,33 @@ uint8_t *SHA1(const uint8_t *data, size_t len, uint8_t out[SHA_DIGEST_LENGTH]) {
return out;
}
#define DATA_ORDER_IS_BIG_ENDIAN
#if !defined(SHA1_ASM)
static void sha1_block_data_order(uint32_t *state, const uint8_t *data,
size_t num);
#endif
#define HASH_CTX SHA_CTX
#define HASH_CBLOCK 64
#define HASH_DIGEST_LENGTH 20
#define HASH_MAKE_STRING(c, s) \
do { \
uint32_t ll; \
ll = (c)->h[0]; \
HOST_l2c(ll, (s)); \
ll = (c)->h[1]; \
HOST_l2c(ll, (s)); \
ll = (c)->h[2]; \
HOST_l2c(ll, (s)); \
ll = (c)->h[3]; \
HOST_l2c(ll, (s)); \
ll = (c)->h[4]; \
HOST_l2c(ll, (s)); \
} while (0)
void SHA1_Transform(SHA_CTX *c, const uint8_t data[SHA_CBLOCK]) {
sha1_block_data_order(c->h, data, 1);
}
int SHA1_Update(SHA_CTX *c, const void *data, size_t len) {
crypto_md32_update(&sha1_block_data_order, c->h, c->data, SHA_CBLOCK, &c->num,
&c->Nh, &c->Nl, data, len);
return 1;
}
int SHA1_Final(uint8_t out[SHA_DIGEST_LENGTH], SHA_CTX *c) {
crypto_md32_final(&sha1_block_data_order, c->h, c->data, SHA_CBLOCK, &c->num,
c->Nh, c->Nl, /*is_big_endian=*/1);
CRYPTO_store_u32_be(out, c->h[0]);
CRYPTO_store_u32_be(out + 4, c->h[1]);
CRYPTO_store_u32_be(out + 8, c->h[2]);
CRYPTO_store_u32_be(out + 12, c->h[3]);
CRYPTO_store_u32_be(out + 16, c->h[4]);
return 1;
}
#define HASH_UPDATE SHA1_Update
#define HASH_TRANSFORM SHA1_Transform
#define HASH_FINAL SHA1_Final
#define HASH_BLOCK_DATA_ORDER sha1_block_data_order
#define ROTATE(a, n) (((a) << (n)) | ((a) >> (32 - (n))))
#define Xupdate(a, ix, ia, ib, ic, id) \
do { \
@ -114,13 +118,6 @@ uint8_t *SHA1(const uint8_t *data, size_t len, uint8_t out[SHA_DIGEST_LENGTH]) {
(ix) = (a) = ROTATE((a), 1); \
} while (0)
#if !defined(SHA1_ASM)
static void sha1_block_data_order(uint32_t *state, const uint8_t *data,
size_t num);
#endif
#include "../digest/md32_common.h"
#define K_00_19 0x5a827999UL
#define K_20_39 0x6ed9eba1UL
#define K_40_59 0x8f1bbcdcUL
@ -193,7 +190,7 @@ static void sha1_block_data_order(uint32_t *state, const uint8_t *data,
#if !defined(SHA1_ASM)
static void sha1_block_data_order(uint32_t *state, const uint8_t *data,
size_t num) {
register uint32_t A, B, C, D, E, T, l;
register uint32_t A, B, C, D, E, T;
uint32_t XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7, XX8, XX9, XX10,
XX11, XX12, XX13, XX14, XX15;
@ -204,52 +201,52 @@ static void sha1_block_data_order(uint32_t *state, const uint8_t *data,
E = state[4];
for (;;) {
HOST_c2l(data, l);
X(0) = l;
HOST_c2l(data, l);
X(1) = l;
X(0) = CRYPTO_load_u32_be(data);
data += 4;
X(1) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(0, A, B, C, D, E, T, X(0));
HOST_c2l(data, l);
X(2) = l;
X(2) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(1, T, A, B, C, D, E, X(1));
HOST_c2l(data, l);
X(3) = l;
X(3) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(2, E, T, A, B, C, D, X(2));
HOST_c2l(data, l);
X(4) = l;
X(4) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(3, D, E, T, A, B, C, X(3));
HOST_c2l(data, l);
X(5) = l;
X(5) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(4, C, D, E, T, A, B, X(4));
HOST_c2l(data, l);
X(6) = l;
X(6) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(5, B, C, D, E, T, A, X(5));
HOST_c2l(data, l);
X(7) = l;
X(7) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(6, A, B, C, D, E, T, X(6));
HOST_c2l(data, l);
X(8) = l;
X(8) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(7, T, A, B, C, D, E, X(7));
HOST_c2l(data, l);
X(9) = l;
X(9) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(8, E, T, A, B, C, D, X(8));
HOST_c2l(data, l);
X(10) = l;
X(10) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(9, D, E, T, A, B, C, X(9));
HOST_c2l(data, l);
X(11) = l;
X(11) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(10, C, D, E, T, A, B, X(10));
HOST_c2l(data, l);
X(12) = l;
X(12) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(11, B, C, D, E, T, A, X(11));
HOST_c2l(data, l);
X(13) = l;
X(13) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(12, A, B, C, D, E, T, X(12));
HOST_c2l(data, l);
X(14) = l;
X(14) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(13, T, A, B, C, D, E, X(13));
HOST_c2l(data, l);
X(15) = l;
X(15) = CRYPTO_load_u32_be(data);
data += 4;
BODY_00_15(14, E, T, A, B, C, D, X(14));
BODY_00_15(15, D, E, T, A, B, C, X(15));
@ -341,15 +338,6 @@ static void sha1_block_data_order(uint32_t *state, const uint8_t *data,
}
#endif
#undef DATA_ORDER_IS_BIG_ENDIAN
#undef HASH_CTX
#undef HASH_CBLOCK
#undef HASH_DIGEST_LENGTH
#undef HASH_MAKE_STRING
#undef HASH_UPDATE
#undef HASH_TRANSFORM
#undef HASH_FINAL
#undef HASH_BLOCK_DATA_ORDER
#undef ROTATE
#undef Xupdate
#undef K_00_19
@ -367,5 +355,3 @@ static void sha1_block_data_order(uint32_t *state, const uint8_t *data,
#undef BODY_40_59
#undef BODY_60_79
#undef X
#undef HOST_c2l
#undef HOST_l2c

View File

@ -60,8 +60,9 @@
#include <CCryptoBoringSSL_mem.h>
#include "internal.h"
#include "../../internal.h"
#include "../digest/md32_common.h"
#include "internal.h"
int SHA224_Init(SHA256_CTX *sha) {
@ -112,71 +113,60 @@ uint8_t *SHA256(const uint8_t *data, size_t len,
return out;
}
int SHA224_Update(SHA256_CTX *ctx, const void *data, size_t len) {
return SHA256_Update(ctx, data, len);
}
int SHA224_Final(uint8_t out[SHA224_DIGEST_LENGTH], SHA256_CTX *ctx) {
// SHA224_Init sets |ctx->md_len| to |SHA224_DIGEST_LENGTH|, so this has a
// smaller output.
return SHA256_Final(out, ctx);
}
#define DATA_ORDER_IS_BIG_ENDIAN
#define HASH_CTX SHA256_CTX
#define HASH_CBLOCK 64
#define HASH_DIGEST_LENGTH 32
// Note that FIPS180-2 discusses "Truncation of the Hash Function Output."
// default: case below covers for it. It's not clear however if it's permitted
// to truncate to amount of bytes not divisible by 4. I bet not, but if it is,
// then default: case shall be extended. For reference. Idea behind separate
// cases for pre-defined lenghts is to let the compiler decide if it's
// appropriate to unroll small loops.
//
// TODO(davidben): The small |md_len| case is one of the few places a low-level
// hash 'final' function can fail. This should never happen.
#define HASH_MAKE_STRING(c, s) \
do { \
uint32_t ll; \
unsigned int nn; \
switch ((c)->md_len) { \
case SHA224_DIGEST_LENGTH: \
for (nn = 0; nn < SHA224_DIGEST_LENGTH / 4; nn++) { \
ll = (c)->h[nn]; \
HOST_l2c(ll, (s)); \
} \
break; \
case SHA256_DIGEST_LENGTH: \
for (nn = 0; nn < SHA256_DIGEST_LENGTH / 4; nn++) { \
ll = (c)->h[nn]; \
HOST_l2c(ll, (s)); \
} \
break; \
default: \
if ((c)->md_len > SHA256_DIGEST_LENGTH) { \
return 0; \
} \
for (nn = 0; nn < (c)->md_len / 4; nn++) { \
ll = (c)->h[nn]; \
HOST_l2c(ll, (s)); \
} \
break; \
} \
} while (0)
#define HASH_UPDATE SHA256_Update
#define HASH_TRANSFORM SHA256_Transform
#define HASH_FINAL SHA256_Final
#define HASH_BLOCK_DATA_ORDER sha256_block_data_order
#ifndef SHA256_ASM
static void sha256_block_data_order(uint32_t *state, const uint8_t *in,
size_t num);
#endif
#include "../digest/md32_common.h"
void SHA256_Transform(SHA256_CTX *c, const uint8_t data[SHA256_CBLOCK]) {
sha256_block_data_order(c->h, data, 1);
}
int SHA256_Update(SHA256_CTX *c, const void *data, size_t len) {
crypto_md32_update(&sha256_block_data_order, c->h, c->data, SHA256_CBLOCK,
&c->num, &c->Nh, &c->Nl, data, len);
return 1;
}
int SHA224_Update(SHA256_CTX *ctx, const void *data, size_t len) {
return SHA256_Update(ctx, data, len);
}
static int sha256_final_impl(uint8_t *out, SHA256_CTX *c) {
crypto_md32_final(&sha256_block_data_order, c->h, c->data, SHA256_CBLOCK,
&c->num, c->Nh, c->Nl, /*is_big_endian=*/1);
// TODO(davidben): This overflow check one of the few places a low-level hash
// 'final' function can fail. SHA-512 does not have a corresponding check.
// These functions already misbehave if the caller arbitrarily mutates |c|, so
// can we assume one of |SHA256_Init| or |SHA224_Init| was used?
if (c->md_len > SHA256_DIGEST_LENGTH) {
return 0;
}
assert(c->md_len % 4 == 0);
const size_t out_words = c->md_len / 4;
for (size_t i = 0; i < out_words; i++) {
CRYPTO_store_u32_be(out, c->h[i]);
out += 4;
}
return 1;
}
int SHA256_Final(uint8_t out[SHA256_DIGEST_LENGTH], SHA256_CTX *c) {
// Ideally we would assert |sha->md_len| is |SHA256_DIGEST_LENGTH| to match
// the size hint, but calling code often pairs |SHA224_Init| with
// |SHA256_Final| and expects |sha->md_len| to carry the size over.
//
// TODO(davidben): Add an assert and fix code to match them up.
return sha256_final_impl(out, c);
}
int SHA224_Final(uint8_t out[SHA224_DIGEST_LENGTH], SHA256_CTX *ctx) {
// SHA224_Init sets |ctx->md_len| to |SHA224_DIGEST_LENGTH|, so this has a
// smaller output.
assert(ctx->md_len == SHA224_DIGEST_LENGTH);
return sha256_final_impl(out, ctx);
}
#ifndef SHA256_ASM
static const uint32_t K256[64] = {
@ -241,55 +231,53 @@ static void sha256_block_data_order(uint32_t *state, const uint8_t *data,
g = state[6];
h = state[7];
uint32_t l;
HOST_c2l(data, l);
T1 = X[0] = l;
T1 = X[0] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(0, a, b, c, d, e, f, g, h);
HOST_c2l(data, l);
T1 = X[1] = l;
T1 = X[1] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(1, h, a, b, c, d, e, f, g);
HOST_c2l(data, l);
T1 = X[2] = l;
T1 = X[2] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(2, g, h, a, b, c, d, e, f);
HOST_c2l(data, l);
T1 = X[3] = l;
T1 = X[3] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(3, f, g, h, a, b, c, d, e);
HOST_c2l(data, l);
T1 = X[4] = l;
T1 = X[4] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(4, e, f, g, h, a, b, c, d);
HOST_c2l(data, l);
T1 = X[5] = l;
T1 = X[5] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(5, d, e, f, g, h, a, b, c);
HOST_c2l(data, l);
T1 = X[6] = l;
T1 = X[6] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(6, c, d, e, f, g, h, a, b);
HOST_c2l(data, l);
T1 = X[7] = l;
T1 = X[7] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(7, b, c, d, e, f, g, h, a);
HOST_c2l(data, l);
T1 = X[8] = l;
T1 = X[8] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(8, a, b, c, d, e, f, g, h);
HOST_c2l(data, l);
T1 = X[9] = l;
T1 = X[9] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(9, h, a, b, c, d, e, f, g);
HOST_c2l(data, l);
T1 = X[10] = l;
T1 = X[10] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(10, g, h, a, b, c, d, e, f);
HOST_c2l(data, l);
T1 = X[11] = l;
T1 = X[11] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(11, f, g, h, a, b, c, d, e);
HOST_c2l(data, l);
T1 = X[12] = l;
T1 = X[12] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(12, e, f, g, h, a, b, c, d);
HOST_c2l(data, l);
T1 = X[13] = l;
T1 = X[13] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(13, d, e, f, g, h, a, b, c);
HOST_c2l(data, l);
T1 = X[14] = l;
T1 = X[14] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(14, c, d, e, f, g, h, a, b);
HOST_c2l(data, l);
T1 = X[15] = l;
T1 = X[15] = CRYPTO_load_u32_be(data);
data += 4;
ROUND_00_15(15, b, c, d, e, f, g, h, a);
for (i = 16; i < 64; i += 8) {
@ -321,15 +309,6 @@ void SHA256_TransformBlocks(uint32_t state[8], const uint8_t *data,
sha256_block_data_order(state, data, num_blocks);
}
#undef DATA_ORDER_IS_BIG_ENDIAN
#undef HASH_CTX
#undef HASH_CBLOCK
#undef HASH_DIGEST_LENGTH
#undef HASH_MAKE_STRING
#undef HASH_UPDATE
#undef HASH_TRANSFORM
#undef HASH_FINAL
#undef HASH_BLOCK_DATA_ORDER
#undef ROTATE
#undef Sigma0
#undef Sigma1
@ -339,5 +318,3 @@ void SHA256_TransformBlocks(uint32_t state[8], const uint8_t *data,
#undef Maj
#undef ROUND_00_15
#undef ROUND_16_63
#undef HOST_c2l
#undef HOST_l2c

View File

@ -70,6 +70,8 @@
// this writing, so there is no need for a common collector/padding
// implementation yet.
static int sha512_final_impl(uint8_t *out, SHA512_CTX *sha);
int SHA384_Init(SHA512_CTX *sha) {
sha->h[0] = UINT64_C(0xcbbb9d5dc1059ed8);
sha->h[1] = UINT64_C(0x629a292a367cd507);
@ -146,8 +148,8 @@ uint8_t *SHA512_256(const uint8_t *data, size_t len,
uint8_t out[SHA512_256_DIGEST_LENGTH]) {
SHA512_CTX ctx;
SHA512_256_Init(&ctx);
SHA512_Update(&ctx, data, len);
SHA512_Final(out, &ctx);
SHA512_256_Update(&ctx, data, len);
SHA512_256_Final(out, &ctx);
OPENSSL_cleanse(&ctx, sizeof(ctx));
return out;
}
@ -160,8 +162,9 @@ static void sha512_block_data_order(uint64_t *state, const uint8_t *in,
int SHA384_Final(uint8_t out[SHA384_DIGEST_LENGTH], SHA512_CTX *sha) {
// |SHA384_Init| sets |sha->md_len| to |SHA384_DIGEST_LENGTH|, so this has a
// |smaller output.
return SHA512_Final(out, sha);
// smaller output.
assert(sha->md_len == SHA384_DIGEST_LENGTH);
return sha512_final_impl(out, sha);
}
int SHA384_Update(SHA512_CTX *sha, const void *data, size_t len) {
@ -172,11 +175,11 @@ int SHA512_256_Update(SHA512_CTX *sha, const void *data, size_t len) {
return SHA512_Update(sha, data, len);
}
int SHA512_256_Final(uint8_t out[SHA512_256_DIGEST_LENGTH],
SHA512_CTX *sha) {
int SHA512_256_Final(uint8_t out[SHA512_256_DIGEST_LENGTH], SHA512_CTX *sha) {
// |SHA512_256_Init| sets |sha->md_len| to |SHA512_256_DIGEST_LENGTH|, so this
// has a |smaller output.
return SHA512_Final(out, sha);
assert(sha->md_len == SHA512_256_DIGEST_LENGTH);
return sha512_final_impl(out, sha);
}
void SHA512_Transform(SHA512_CTX *c, const uint8_t block[SHA512_CBLOCK]) {
@ -232,6 +235,15 @@ int SHA512_Update(SHA512_CTX *c, const void *in_data, size_t len) {
}
int SHA512_Final(uint8_t out[SHA512_DIGEST_LENGTH], SHA512_CTX *sha) {
// Ideally we would assert |sha->md_len| is |SHA512_DIGEST_LENGTH| to match
// the size hint, but calling code often pairs |SHA384_Init| with
// |SHA512_Final| and expects |sha->md_len| to carry the size over.
//
// TODO(davidben): Add an assert and fix code to match them up.
return sha512_final_impl(out, sha);
}
static int sha512_final_impl(uint8_t *out, SHA512_CTX *sha) {
uint8_t *p = sha->p;
size_t n = sha->num;
@ -244,22 +256,8 @@ int SHA512_Final(uint8_t out[SHA512_DIGEST_LENGTH], SHA512_CTX *sha) {
}
OPENSSL_memset(p + n, 0, sizeof(sha->p) - 16 - n);
p[sizeof(sha->p) - 1] = (uint8_t)(sha->Nl);
p[sizeof(sha->p) - 2] = (uint8_t)(sha->Nl >> 8);
p[sizeof(sha->p) - 3] = (uint8_t)(sha->Nl >> 16);
p[sizeof(sha->p) - 4] = (uint8_t)(sha->Nl >> 24);
p[sizeof(sha->p) - 5] = (uint8_t)(sha->Nl >> 32);
p[sizeof(sha->p) - 6] = (uint8_t)(sha->Nl >> 40);
p[sizeof(sha->p) - 7] = (uint8_t)(sha->Nl >> 48);
p[sizeof(sha->p) - 8] = (uint8_t)(sha->Nl >> 56);
p[sizeof(sha->p) - 9] = (uint8_t)(sha->Nh);
p[sizeof(sha->p) - 10] = (uint8_t)(sha->Nh >> 8);
p[sizeof(sha->p) - 11] = (uint8_t)(sha->Nh >> 16);
p[sizeof(sha->p) - 12] = (uint8_t)(sha->Nh >> 24);
p[sizeof(sha->p) - 13] = (uint8_t)(sha->Nh >> 32);
p[sizeof(sha->p) - 14] = (uint8_t)(sha->Nh >> 40);
p[sizeof(sha->p) - 15] = (uint8_t)(sha->Nh >> 48);
p[sizeof(sha->p) - 16] = (uint8_t)(sha->Nh >> 56);
CRYPTO_store_u64_be(p + sizeof(sha->p) - 16, sha->Nh);
CRYPTO_store_u64_be(p + sizeof(sha->p) - 8, sha->Nl);
sha512_block_data_order(sha->h, p, 1);
@ -272,9 +270,8 @@ int SHA512_Final(uint8_t out[SHA512_DIGEST_LENGTH], SHA512_CTX *sha) {
assert(sha->md_len % 8 == 0);
const size_t out_words = sha->md_len / 8;
for (size_t i = 0; i < out_words; i++) {
const uint64_t t = CRYPTO_bswap8(sha->h[i]);
memcpy(out, &t, sizeof(t));
out += sizeof(t);
CRYPTO_store_u64_be(out, sha->h[i]);
out += 8;
}
return 1;
@ -356,12 +353,6 @@ static const uint64_t K512[80] = {
#define ROTR(x, s) (((x) >> s) | (x) << (64 - s))
#endif
static inline uint64_t load_u64_be(const void *ptr) {
uint64_t ret;
OPENSSL_memcpy(&ret, ptr, sizeof(ret));
return CRYPTO_bswap8(ret);
}
#define Sigma0(x) (ROTR((x), 28) ^ ROTR((x), 34) ^ ROTR((x), 39))
#define Sigma1(x) (ROTR((x), 14) ^ ROTR((x), 18) ^ ROTR((x), 41))
#define sigma0(x) (ROTR((x), 1) ^ ROTR((x), 8) ^ ((x) >> 7))
@ -392,7 +383,7 @@ static void sha512_block_data_order(uint64_t *state, const uint8_t *in,
F[7] = state[7];
for (i = 0; i < 16; i++, F--) {
T = load_u64_be(in + i * 8);
T = CRYPTO_load_u64_be(in + i * 8);
F[0] = A;
F[4] = E;
F[8] = T;
@ -464,37 +455,37 @@ static void sha512_block_data_order(uint64_t *state, const uint8_t *in,
g = state[6];
h = state[7];
T1 = X[0] = load_u64_be(in);
T1 = X[0] = CRYPTO_load_u64_be(in);
ROUND_00_15(0, a, b, c, d, e, f, g, h);
T1 = X[1] = load_u64_be(in + 8);
T1 = X[1] = CRYPTO_load_u64_be(in + 8);
ROUND_00_15(1, h, a, b, c, d, e, f, g);
T1 = X[2] = load_u64_be(in + 2 * 8);
T1 = X[2] = CRYPTO_load_u64_be(in + 2 * 8);
ROUND_00_15(2, g, h, a, b, c, d, e, f);
T1 = X[3] = load_u64_be(in + 3 * 8);
T1 = X[3] = CRYPTO_load_u64_be(in + 3 * 8);
ROUND_00_15(3, f, g, h, a, b, c, d, e);
T1 = X[4] = load_u64_be(in + 4 * 8);
T1 = X[4] = CRYPTO_load_u64_be(in + 4 * 8);
ROUND_00_15(4, e, f, g, h, a, b, c, d);
T1 = X[5] = load_u64_be(in + 5 * 8);
T1 = X[5] = CRYPTO_load_u64_be(in + 5 * 8);
ROUND_00_15(5, d, e, f, g, h, a, b, c);
T1 = X[6] = load_u64_be(in + 6 * 8);
T1 = X[6] = CRYPTO_load_u64_be(in + 6 * 8);
ROUND_00_15(6, c, d, e, f, g, h, a, b);
T1 = X[7] = load_u64_be(in + 7 * 8);
T1 = X[7] = CRYPTO_load_u64_be(in + 7 * 8);
ROUND_00_15(7, b, c, d, e, f, g, h, a);
T1 = X[8] = load_u64_be(in + 8 * 8);
T1 = X[8] = CRYPTO_load_u64_be(in + 8 * 8);
ROUND_00_15(8, a, b, c, d, e, f, g, h);
T1 = X[9] = load_u64_be(in + 9 * 8);
T1 = X[9] = CRYPTO_load_u64_be(in + 9 * 8);
ROUND_00_15(9, h, a, b, c, d, e, f, g);
T1 = X[10] = load_u64_be(in + 10 * 8);
T1 = X[10] = CRYPTO_load_u64_be(in + 10 * 8);
ROUND_00_15(10, g, h, a, b, c, d, e, f);
T1 = X[11] = load_u64_be(in + 11 * 8);
T1 = X[11] = CRYPTO_load_u64_be(in + 11 * 8);
ROUND_00_15(11, f, g, h, a, b, c, d, e);
T1 = X[12] = load_u64_be(in + 12 * 8);
T1 = X[12] = CRYPTO_load_u64_be(in + 12 * 8);
ROUND_00_15(12, e, f, g, h, a, b, c, d);
T1 = X[13] = load_u64_be(in + 13 * 8);
T1 = X[13] = CRYPTO_load_u64_be(in + 13 * 8);
ROUND_00_15(13, d, e, f, g, h, a, b, c);
T1 = X[14] = load_u64_be(in + 14 * 8);
T1 = X[14] = CRYPTO_load_u64_be(in + 14 * 8);
ROUND_00_15(14, c, d, e, f, g, h, a, b);
T1 = X[15] = load_u64_be(in + 15 * 8);
T1 = X[15] = CRYPTO_load_u64_be(in + 15 * 8);
ROUND_00_15(15, b, c, d, e, f, g, h, a);
for (i = 16; i < 80; i += 16) {

View File

@ -12,70 +12,77 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
#include <CCryptoBoringSSL_hpke.h>
#include <assert.h>
#include <string.h>
#include <CCryptoBoringSSL_aead.h>
#include <CCryptoBoringSSL_bytestring.h>
#include <CCryptoBoringSSL_curve25519.h>
#include <CCryptoBoringSSL_digest.h>
#include <CCryptoBoringSSL_err.h>
#include <CCryptoBoringSSL_evp.h>
#include <CCryptoBoringSSL_evp_errors.h>
#include <CCryptoBoringSSL_hkdf.h>
#include <CCryptoBoringSSL_rand.h>
#include <CCryptoBoringSSL_sha.h>
#include "../internal.h"
#include "internal.h"
// This file implements draft-irtf-cfrg-hpke-07.
// This file implements draft-irtf-cfrg-hpke-08.
#define KEM_CONTEXT_LEN (2 * X25519_PUBLIC_VALUE_LEN)
#define MAX_SEED_LEN X25519_PRIVATE_KEY_LEN
#define MAX_SHARED_SECRET_LEN SHA256_DIGEST_LENGTH
// HPKE KEM scheme IDs.
#define HPKE_DHKEM_X25519_HKDF_SHA256 0x0020
struct evp_hpke_kem_st {
uint16_t id;
size_t public_key_len;
size_t private_key_len;
size_t seed_len;
int (*init_key)(EVP_HPKE_KEY *key, const uint8_t *priv_key,
size_t priv_key_len);
int (*generate_key)(EVP_HPKE_KEY *key);
int (*encap_with_seed)(const EVP_HPKE_KEM *kem, uint8_t *out_shared_secret,
size_t *out_shared_secret_len, uint8_t *out_enc,
size_t *out_enc_len, size_t max_enc,
const uint8_t *peer_public_key,
size_t peer_public_key_len, const uint8_t *seed,
size_t seed_len);
int (*decap)(const EVP_HPKE_KEY *key, uint8_t *out_shared_secret,
size_t *out_shared_secret_len, const uint8_t *enc,
size_t enc_len);
};
// This is strlen("HPKE") + 3 * sizeof(uint16_t).
#define HPKE_SUITE_ID_LEN 10
struct evp_hpke_kdf_st {
uint16_t id;
// We only support HKDF-based KDFs.
const EVP_MD *(*hkdf_md_func)(void);
};
#define HPKE_MODE_BASE 0
#define HPKE_MODE_PSK 1
struct evp_hpke_aead_st {
uint16_t id;
const EVP_AEAD *(*aead_func)(void);
};
static const char kHpkeRfcId[] = "HPKE-07";
// Low-level labeled KDF functions.
static const char kHpkeVersionId[] = "HPKE-v1";
static int add_label_string(CBB *cbb, const char *label) {
return CBB_add_bytes(cbb, (const uint8_t *)label, strlen(label));
}
// The suite_id for the KEM is defined as concat("KEM", I2OSP(kem_id, 2)). Note
// that the suite_id used outside of the KEM also includes the kdf_id and
// aead_id.
static const uint8_t kX25519SuiteID[] = {
'K', 'E', 'M', HPKE_DHKEM_X25519_HKDF_SHA256 >> 8,
HPKE_DHKEM_X25519_HKDF_SHA256 & 0x00ff};
// The suite_id for non-KEM pieces of HPKE is defined as concat("HPKE",
// I2OSP(kem_id, 2), I2OSP(kdf_id, 2), I2OSP(aead_id, 2)).
static int hpke_build_suite_id(uint8_t out[HPKE_SUITE_ID_LEN], uint16_t kdf_id,
uint16_t aead_id) {
CBB cbb;
int ret = CBB_init_fixed(&cbb, out, HPKE_SUITE_ID_LEN) &&
add_label_string(&cbb, "HPKE") &&
CBB_add_u16(&cbb, HPKE_DHKEM_X25519_HKDF_SHA256) &&
CBB_add_u16(&cbb, kdf_id) &&
CBB_add_u16(&cbb, aead_id);
CBB_cleanup(&cbb);
return ret;
}
static int hpke_labeled_extract(const EVP_MD *hkdf_md, uint8_t *out_key,
size_t *out_len, const uint8_t *salt,
size_t salt_len, const uint8_t *suite_id,
size_t suite_id_len, const char *label,
const uint8_t *ikm, size_t ikm_len) {
// labeledIKM = concat("RFCXXXX ", suite_id, label, IKM)
// labeledIKM = concat("HPKE-v1", suite_id, label, IKM)
CBB labeled_ikm;
int ok = CBB_init(&labeled_ikm, 0) &&
add_label_string(&labeled_ikm, kHpkeRfcId) &&
add_label_string(&labeled_ikm, kHpkeVersionId) &&
CBB_add_bytes(&labeled_ikm, suite_id, suite_id_len) &&
add_label_string(&labeled_ikm, label) &&
CBB_add_bytes(&labeled_ikm, ikm, ikm_len) &&
@ -90,11 +97,11 @@ static int hpke_labeled_expand(const EVP_MD *hkdf_md, uint8_t *out_key,
size_t prk_len, const uint8_t *suite_id,
size_t suite_id_len, const char *label,
const uint8_t *info, size_t info_len) {
// labeledInfo = concat(I2OSP(L, 2), "RFCXXXX ", suite_id, label, info)
// labeledInfo = concat(I2OSP(L, 2), "HPKE-v1", suite_id, label, info)
CBB labeled_info;
int ok = CBB_init(&labeled_info, 0) &&
CBB_add_u16(&labeled_info, out_len) &&
add_label_string(&labeled_info, kHpkeRfcId) &&
add_label_string(&labeled_info, kHpkeVersionId) &&
CBB_add_bytes(&labeled_info, suite_id, suite_id_len) &&
add_label_string(&labeled_info, label) &&
CBB_add_bytes(&labeled_info, info, info_len) &&
@ -104,102 +111,280 @@ static int hpke_labeled_expand(const EVP_MD *hkdf_md, uint8_t *out_key,
return ok;
}
static int hpke_extract_and_expand(const EVP_MD *hkdf_md, uint8_t *out_key,
size_t out_len,
const uint8_t dh[X25519_PUBLIC_VALUE_LEN],
const uint8_t kem_context[KEM_CONTEXT_LEN]) {
// KEM implementations.
// dhkem_extract_and_expand implements the ExtractAndExpand operation in the
// DHKEM construction. See section 4.1 of draft-irtf-cfrg-hpke-08.
static int dhkem_extract_and_expand(uint16_t kem_id, const EVP_MD *hkdf_md,
uint8_t *out_key, size_t out_len,
const uint8_t *dh, size_t dh_len,
const uint8_t *kem_context,
size_t kem_context_len) {
// concat("KEM", I2OSP(kem_id, 2))
uint8_t suite_id[5] = {'K', 'E', 'M', kem_id >> 8, kem_id & 0xff};
uint8_t prk[EVP_MAX_MD_SIZE];
size_t prk_len;
static const char kEaePrkLabel[] = "eae_prk";
if (!hpke_labeled_extract(hkdf_md, prk, &prk_len, NULL, 0, kX25519SuiteID,
sizeof(kX25519SuiteID), kEaePrkLabel, dh,
X25519_PUBLIC_VALUE_LEN)) {
return hpke_labeled_extract(hkdf_md, prk, &prk_len, NULL, 0, suite_id,
sizeof(suite_id), "eae_prk", dh, dh_len) &&
hpke_labeled_expand(hkdf_md, out_key, out_len, prk, prk_len, suite_id,
sizeof(suite_id), "shared_secret", kem_context,
kem_context_len);
}
static int x25519_init_key(EVP_HPKE_KEY *key, const uint8_t *priv_key,
size_t priv_key_len) {
if (priv_key_len != X25519_PRIVATE_KEY_LEN) {
OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR);
return 0;
}
static const char kPRKExpandLabel[] = "shared_secret";
if (!hpke_labeled_expand(hkdf_md, out_key, out_len, prk, prk_len,
kX25519SuiteID, sizeof(kX25519SuiteID),
kPRKExpandLabel, kem_context, KEM_CONTEXT_LEN)) {
OPENSSL_memcpy(key->private_key, priv_key, priv_key_len);
X25519_public_from_private(key->public_key, priv_key);
return 1;
}
static int x25519_generate_key(EVP_HPKE_KEY *key) {
X25519_keypair(key->public_key, key->private_key);
return 1;
}
static int x25519_encap_with_seed(
const EVP_HPKE_KEM *kem, uint8_t *out_shared_secret,
size_t *out_shared_secret_len, uint8_t *out_enc, size_t *out_enc_len,
size_t max_enc, const uint8_t *peer_public_key, size_t peer_public_key_len,
const uint8_t *seed, size_t seed_len) {
if (max_enc < X25519_PUBLIC_VALUE_LEN) {
OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_BUFFER_SIZE);
return 0;
}
if (seed_len != X25519_PRIVATE_KEY_LEN) {
OPENSSL_PUT_ERROR(EVP, EVP_R_DECODE_ERROR);
return 0;
}
X25519_public_from_private(out_enc, seed);
uint8_t dh[X25519_SHARED_KEY_LEN];
if (peer_public_key_len != X25519_PUBLIC_VALUE_LEN ||
!X25519(dh, seed, peer_public_key)) {
OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_PEER_KEY);
return 0;
}
uint8_t kem_context[2 * X25519_PUBLIC_VALUE_LEN];
OPENSSL_memcpy(kem_context, out_enc, X25519_PUBLIC_VALUE_LEN);
OPENSSL_memcpy(kem_context + X25519_PUBLIC_VALUE_LEN, peer_public_key,
X25519_PUBLIC_VALUE_LEN);
if (!dhkem_extract_and_expand(kem->id, EVP_sha256(), out_shared_secret,
SHA256_DIGEST_LENGTH, dh, sizeof(dh),
kem_context, sizeof(kem_context))) {
return 0;
}
*out_enc_len = X25519_PUBLIC_VALUE_LEN;
*out_shared_secret_len = SHA256_DIGEST_LENGTH;
return 1;
}
static int x25519_decap(const EVP_HPKE_KEY *key, uint8_t *out_shared_secret,
size_t *out_shared_secret_len, const uint8_t *enc,
size_t enc_len) {
uint8_t dh[X25519_SHARED_KEY_LEN];
if (enc_len != X25519_PUBLIC_VALUE_LEN ||
!X25519(dh, key->private_key, enc)) {
OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_PEER_KEY);
return 0;
}
uint8_t kem_context[2 * X25519_PUBLIC_VALUE_LEN];
OPENSSL_memcpy(kem_context, enc, X25519_PUBLIC_VALUE_LEN);
OPENSSL_memcpy(kem_context + X25519_PUBLIC_VALUE_LEN, key->public_key,
X25519_PUBLIC_VALUE_LEN);
if (!dhkem_extract_and_expand(key->kem->id, EVP_sha256(), out_shared_secret,
SHA256_DIGEST_LENGTH, dh, sizeof(dh),
kem_context, sizeof(kem_context))) {
return 0;
}
*out_shared_secret_len = SHA256_DIGEST_LENGTH;
return 1;
}
const EVP_HPKE_KEM *EVP_hpke_x25519_hkdf_sha256(void) {
static const EVP_HPKE_KEM kKEM = {
/*id=*/EVP_HPKE_DHKEM_X25519_HKDF_SHA256,
/*public_key_len=*/X25519_PUBLIC_VALUE_LEN,
/*private_key_len=*/X25519_PRIVATE_KEY_LEN,
/*seed_len=*/X25519_PRIVATE_KEY_LEN,
x25519_init_key,
x25519_generate_key,
x25519_encap_with_seed,
x25519_decap,
};
return &kKEM;
}
uint16_t EVP_HPKE_KEM_id(const EVP_HPKE_KEM *kem) { return kem->id; }
void EVP_HPKE_KEY_zero(EVP_HPKE_KEY *key) {
OPENSSL_memset(key, 0, sizeof(EVP_HPKE_KEY));
}
void EVP_HPKE_KEY_cleanup(EVP_HPKE_KEY *key) {
// Nothing to clean up for now, but we may introduce a cleanup process in the
// future.
}
EVP_HPKE_KEY *EVP_HPKE_KEY_new(void) {
EVP_HPKE_KEY *key = OPENSSL_malloc(sizeof(EVP_HPKE_KEY));
if (key == NULL) {
OPENSSL_PUT_ERROR(EVP, ERR_R_MALLOC_FAILURE);
return NULL;
}
EVP_HPKE_KEY_zero(key);
return key;
}
void EVP_HPKE_KEY_free(EVP_HPKE_KEY *key) {
if (key != NULL) {
EVP_HPKE_KEY_cleanup(key);
OPENSSL_free(key);
}
}
int EVP_HPKE_KEY_copy(EVP_HPKE_KEY *dst, const EVP_HPKE_KEY *src) {
// For now, |EVP_HPKE_KEY| is trivially copyable.
OPENSSL_memcpy(dst, src, sizeof(EVP_HPKE_KEY));
return 1;
}
int EVP_HPKE_KEY_init(EVP_HPKE_KEY *key, const EVP_HPKE_KEM *kem,
const uint8_t *priv_key, size_t priv_key_len) {
EVP_HPKE_KEY_zero(key);
key->kem = kem;
if (!kem->init_key(key, priv_key, priv_key_len)) {
key->kem = NULL;
return 0;
}
return 1;
}
const EVP_AEAD *EVP_HPKE_get_aead(uint16_t aead_id) {
switch (aead_id) {
case EVP_HPKE_AEAD_AES_GCM_128:
return EVP_aead_aes_128_gcm();
case EVP_HPKE_AEAD_AES_GCM_256:
return EVP_aead_aes_256_gcm();
case EVP_HPKE_AEAD_CHACHA20POLY1305:
return EVP_aead_chacha20_poly1305();
}
OPENSSL_PUT_ERROR(EVP, ERR_R_INTERNAL_ERROR);
return NULL;
}
const EVP_MD *EVP_HPKE_get_hkdf_md(uint16_t kdf_id) {
switch (kdf_id) {
case EVP_HPKE_HKDF_SHA256:
return EVP_sha256();
case EVP_HPKE_HKDF_SHA384:
return EVP_sha384();
case EVP_HPKE_HKDF_SHA512:
return EVP_sha512();
}
OPENSSL_PUT_ERROR(EVP, ERR_R_INTERNAL_ERROR);
return NULL;
}
static int hpke_key_schedule(EVP_HPKE_CTX *hpke, uint8_t mode,
const uint8_t *shared_secret,
size_t shared_secret_len, const uint8_t *info,
size_t info_len, const uint8_t *psk,
size_t psk_len, const uint8_t *psk_id,
size_t psk_id_len) {
// Verify the PSK inputs.
switch (mode) {
case HPKE_MODE_BASE:
// This is an internal error, unreachable from the caller.
assert(psk_len == 0 && psk_id_len == 0);
break;
case HPKE_MODE_PSK:
if (psk_len == 0 || psk_id_len == 0) {
OPENSSL_PUT_ERROR(EVP, EVP_R_EMPTY_PSK);
return 0;
}
break;
default:
return 0;
}
// Attempt to get an EVP_AEAD*.
const EVP_AEAD *aead = EVP_HPKE_get_aead(hpke->aead_id);
if (aead == NULL) {
int EVP_HPKE_KEY_generate(EVP_HPKE_KEY *key, const EVP_HPKE_KEM *kem) {
EVP_HPKE_KEY_zero(key);
key->kem = kem;
if (!kem->generate_key(key)) {
key->kem = NULL;
return 0;
}
return 1;
}
const EVP_HPKE_KEM *EVP_HPKE_KEY_kem(const EVP_HPKE_KEY *key) {
return key->kem;
}
int EVP_HPKE_KEY_public_key(const EVP_HPKE_KEY *key, uint8_t *out,
size_t *out_len, size_t max_out) {
if (max_out < key->kem->public_key_len) {
OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_BUFFER_SIZE);
return 0;
}
OPENSSL_memcpy(out, key->public_key, key->kem->public_key_len);
*out_len = key->kem->public_key_len;
return 1;
}
int EVP_HPKE_KEY_private_key(const EVP_HPKE_KEY *key, uint8_t *out,
size_t *out_len, size_t max_out) {
if (max_out < key->kem->private_key_len) {
OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_BUFFER_SIZE);
return 0;
}
OPENSSL_memcpy(out, key->private_key, key->kem->private_key_len);
*out_len = key->kem->private_key_len;
return 1;
}
// Supported KDFs and AEADs.
const EVP_HPKE_KDF *EVP_hpke_hkdf_sha256(void) {
static const EVP_HPKE_KDF kKDF = {EVP_HPKE_HKDF_SHA256, &EVP_sha256};
return &kKDF;
}
uint16_t EVP_HPKE_KDF_id(const EVP_HPKE_KDF *kdf) { return kdf->id; }
const EVP_HPKE_AEAD *EVP_hpke_aes_128_gcm(void) {
static const EVP_HPKE_AEAD kAEAD = {EVP_HPKE_AES_128_GCM,
&EVP_aead_aes_128_gcm};
return &kAEAD;
}
const EVP_HPKE_AEAD *EVP_hpke_aes_256_gcm(void) {
static const EVP_HPKE_AEAD kAEAD = {EVP_HPKE_AES_256_GCM,
&EVP_aead_aes_256_gcm};
return &kAEAD;
}
const EVP_HPKE_AEAD *EVP_hpke_chacha20_poly1305(void) {
static const EVP_HPKE_AEAD kAEAD = {EVP_HPKE_CHACHA20_POLY1305,
&EVP_aead_chacha20_poly1305};
return &kAEAD;
}
uint16_t EVP_HPKE_AEAD_id(const EVP_HPKE_AEAD *aead) { return aead->id; }
const EVP_AEAD *EVP_HPKE_AEAD_aead(const EVP_HPKE_AEAD *aead) {
return aead->aead_func();
}
// HPKE implementation.
// This is strlen("HPKE") + 3 * sizeof(uint16_t).
#define HPKE_SUITE_ID_LEN 10
// The suite_id for non-KEM pieces of HPKE is defined as concat("HPKE",
// I2OSP(kem_id, 2), I2OSP(kdf_id, 2), I2OSP(aead_id, 2)).
static int hpke_build_suite_id(const EVP_HPKE_CTX *ctx,
uint8_t out[HPKE_SUITE_ID_LEN]) {
CBB cbb;
int ret = CBB_init_fixed(&cbb, out, HPKE_SUITE_ID_LEN) &&
add_label_string(&cbb, "HPKE") &&
CBB_add_u16(&cbb, EVP_HPKE_DHKEM_X25519_HKDF_SHA256) &&
CBB_add_u16(&cbb, ctx->kdf->id) &&
CBB_add_u16(&cbb, ctx->aead->id);
CBB_cleanup(&cbb);
return ret;
}
#define HPKE_MODE_BASE 0
static int hpke_key_schedule(EVP_HPKE_CTX *ctx, const uint8_t *shared_secret,
size_t shared_secret_len, const uint8_t *info,
size_t info_len) {
uint8_t suite_id[HPKE_SUITE_ID_LEN];
if (!hpke_build_suite_id(suite_id, hpke->kdf_id, hpke->aead_id)) {
if (!hpke_build_suite_id(ctx, suite_id)) {
return 0;
}
// psk_id_hash = LabeledExtract("", "psk_id_hash", psk_id)
static const char kPskIdHashLabel[] = "psk_id_hash";
// TODO(davidben): Precompute this value and store it with the EVP_HPKE_KDF.
const EVP_MD *hkdf_md = ctx->kdf->hkdf_md_func();
uint8_t psk_id_hash[EVP_MAX_MD_SIZE];
size_t psk_id_hash_len;
if (!hpke_labeled_extract(hpke->hkdf_md, psk_id_hash, &psk_id_hash_len, NULL,
0, suite_id, sizeof(suite_id), kPskIdHashLabel,
psk_id, psk_id_len)) {
if (!hpke_labeled_extract(hkdf_md, psk_id_hash, &psk_id_hash_len, NULL, 0,
suite_id, sizeof(suite_id), "psk_id_hash", NULL,
0)) {
return 0;
}
// info_hash = LabeledExtract("", "info_hash", info)
static const char kInfoHashLabel[] = "info_hash";
uint8_t info_hash[EVP_MAX_MD_SIZE];
size_t info_hash_len;
if (!hpke_labeled_extract(hpke->hkdf_md, info_hash, &info_hash_len, NULL, 0,
suite_id, sizeof(suite_id), kInfoHashLabel, info,
if (!hpke_labeled_extract(hkdf_md, info_hash, &info_hash_len, NULL, 0,
suite_id, sizeof(suite_id), "info_hash", info,
info_len)) {
return 0;
}
@ -209,7 +394,7 @@ static int hpke_key_schedule(EVP_HPKE_CTX *hpke, uint8_t mode,
size_t context_len;
CBB context_cbb;
if (!CBB_init_fixed(&context_cbb, context, sizeof(context)) ||
!CBB_add_u8(&context_cbb, mode) ||
!CBB_add_u8(&context_cbb, HPKE_MODE_BASE) ||
!CBB_add_bytes(&context_cbb, psk_id_hash, psk_id_hash_len) ||
!CBB_add_bytes(&context_cbb, info_hash, info_hash_len) ||
!CBB_finish(&context_cbb, NULL, &context_len)) {
@ -217,97 +402,44 @@ static int hpke_key_schedule(EVP_HPKE_CTX *hpke, uint8_t mode,
}
// secret = LabeledExtract(shared_secret, "secret", psk)
static const char kSecretExtractLabel[] = "secret";
uint8_t secret[EVP_MAX_MD_SIZE];
size_t secret_len;
if (!hpke_labeled_extract(hpke->hkdf_md, secret, &secret_len, shared_secret,
if (!hpke_labeled_extract(hkdf_md, secret, &secret_len, shared_secret,
shared_secret_len, suite_id, sizeof(suite_id),
kSecretExtractLabel, psk, psk_len)) {
"secret", NULL, 0)) {
return 0;
}
// key = LabeledExpand(secret, "key", key_schedule_context, Nk)
static const char kKeyExpandLabel[] = "key";
const EVP_AEAD *aead = EVP_HPKE_AEAD_aead(ctx->aead);
uint8_t key[EVP_AEAD_MAX_KEY_LENGTH];
const size_t kKeyLen = EVP_AEAD_key_length(aead);
if (!hpke_labeled_expand(hpke->hkdf_md, key, kKeyLen, secret, secret_len,
suite_id, sizeof(suite_id), kKeyExpandLabel, context,
context_len)) {
return 0;
}
// Initialize the HPKE context's AEAD context, storing a copy of |key|.
if (!EVP_AEAD_CTX_init(&hpke->aead_ctx, aead, key, kKeyLen, 0, NULL)) {
if (!hpke_labeled_expand(hkdf_md, key, kKeyLen, secret, secret_len, suite_id,
sizeof(suite_id), "key", context, context_len) ||
!EVP_AEAD_CTX_init(&ctx->aead_ctx, aead, key, kKeyLen,
EVP_AEAD_DEFAULT_TAG_LENGTH, NULL)) {
return 0;
}
// base_nonce = LabeledExpand(secret, "base_nonce", key_schedule_context, Nn)
static const char kNonceExpandLabel[] = "base_nonce";
if (!hpke_labeled_expand(hpke->hkdf_md, hpke->base_nonce,
if (!hpke_labeled_expand(hkdf_md, ctx->base_nonce,
EVP_AEAD_nonce_length(aead), secret, secret_len,
suite_id, sizeof(suite_id), kNonceExpandLabel,
context, context_len)) {
suite_id, sizeof(suite_id), "base_nonce", context,
context_len)) {
return 0;
}
// exporter_secret = LabeledExpand(secret, "exp", key_schedule_context, Nh)
static const char kExporterSecretExpandLabel[] = "exp";
if (!hpke_labeled_expand(hpke->hkdf_md, hpke->exporter_secret,
EVP_MD_size(hpke->hkdf_md), secret, secret_len,
suite_id, sizeof(suite_id),
kExporterSecretExpandLabel, context, context_len)) {
if (!hpke_labeled_expand(hkdf_md, ctx->exporter_secret, EVP_MD_size(hkdf_md),
secret, secret_len, suite_id, sizeof(suite_id),
"exp", context, context_len)) {
return 0;
}
return 1;
}
// The number of bytes written to |out_shared_secret| is the size of the KEM's
// KDF (currently we only support SHA256).
static int hpke_encap(EVP_HPKE_CTX *hpke,
uint8_t out_shared_secret[SHA256_DIGEST_LENGTH],
const uint8_t public_key_r[X25519_PUBLIC_VALUE_LEN],
const uint8_t ephemeral_private[X25519_PRIVATE_KEY_LEN],
const uint8_t ephemeral_public[X25519_PUBLIC_VALUE_LEN]) {
uint8_t dh[X25519_PUBLIC_VALUE_LEN];
if (!X25519(dh, ephemeral_private, public_key_r)) {
OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_PEER_KEY);
return 0;
}
uint8_t kem_context[KEM_CONTEXT_LEN];
OPENSSL_memcpy(kem_context, ephemeral_public, X25519_PUBLIC_VALUE_LEN);
OPENSSL_memcpy(kem_context + X25519_PUBLIC_VALUE_LEN, public_key_r,
X25519_PUBLIC_VALUE_LEN);
if (!hpke_extract_and_expand(EVP_sha256(), out_shared_secret,
SHA256_DIGEST_LENGTH, dh, kem_context)) {
return 0;
}
return 1;
}
static int hpke_decap(const EVP_HPKE_CTX *hpke,
uint8_t out_shared_secret[SHA256_DIGEST_LENGTH],
const uint8_t enc[X25519_PUBLIC_VALUE_LEN],
const uint8_t public_key_r[X25519_PUBLIC_VALUE_LEN],
const uint8_t secret_key_r[X25519_PRIVATE_KEY_LEN]) {
uint8_t dh[X25519_PUBLIC_VALUE_LEN];
if (!X25519(dh, secret_key_r, enc)) {
OPENSSL_PUT_ERROR(EVP, EVP_R_INVALID_PEER_KEY);
return 0;
}
uint8_t kem_context[KEM_CONTEXT_LEN];
OPENSSL_memcpy(kem_context, enc, X25519_PUBLIC_VALUE_LEN);
OPENSSL_memcpy(kem_context + X25519_PUBLIC_VALUE_LEN, public_key_r,
X25519_PUBLIC_VALUE_LEN);
if (!hpke_extract_and_expand(EVP_sha256(), out_shared_secret,
SHA256_DIGEST_LENGTH, dh, kem_context)) {
return 0;
}
return 1;
}
void EVP_HPKE_CTX_init(EVP_HPKE_CTX *ctx) {
void EVP_HPKE_CTX_zero(EVP_HPKE_CTX *ctx) {
OPENSSL_memset(ctx, 0, sizeof(EVP_HPKE_CTX));
EVP_AEAD_CTX_zero(&ctx->aead_ctx);
}
@ -316,217 +448,171 @@ void EVP_HPKE_CTX_cleanup(EVP_HPKE_CTX *ctx) {
EVP_AEAD_CTX_cleanup(&ctx->aead_ctx);
}
int EVP_HPKE_CTX_setup_base_s_x25519(
EVP_HPKE_CTX *hpke, uint8_t out_enc[X25519_PUBLIC_VALUE_LEN],
uint16_t kdf_id, uint16_t aead_id,
const uint8_t peer_public_value[X25519_PUBLIC_VALUE_LEN],
const uint8_t *info, size_t info_len) {
// The GenerateKeyPair() step technically belongs in the KEM's Encap()
// function, but we've moved it up a layer to make it easier for tests to
// inject an ephemeral keypair.
uint8_t ephemeral_private[X25519_PRIVATE_KEY_LEN];
X25519_keypair(out_enc, ephemeral_private);
return EVP_HPKE_CTX_setup_base_s_x25519_for_test(
hpke, kdf_id, aead_id, peer_public_value, info, info_len,
ephemeral_private, out_enc);
EVP_HPKE_CTX *EVP_HPKE_CTX_new(void) {
EVP_HPKE_CTX *ctx = OPENSSL_malloc(sizeof(EVP_HPKE_CTX));
if (ctx == NULL) {
OPENSSL_PUT_ERROR(EVP, ERR_R_MALLOC_FAILURE);
return NULL;
}
EVP_HPKE_CTX_zero(ctx);
return ctx;
}
int EVP_HPKE_CTX_setup_base_s_x25519_for_test(
EVP_HPKE_CTX *hpke, uint16_t kdf_id, uint16_t aead_id,
const uint8_t peer_public_value[X25519_PUBLIC_VALUE_LEN],
const uint8_t *info, size_t info_len,
const uint8_t ephemeral_private[X25519_PRIVATE_KEY_LEN],
const uint8_t ephemeral_public[X25519_PUBLIC_VALUE_LEN]) {
hpke->is_sender = 1;
hpke->kdf_id = kdf_id;
hpke->aead_id = aead_id;
hpke->hkdf_md = EVP_HPKE_get_hkdf_md(kdf_id);
if (hpke->hkdf_md == NULL) {
return 0;
void EVP_HPKE_CTX_free(EVP_HPKE_CTX *ctx) {
if (ctx != NULL) {
EVP_HPKE_CTX_cleanup(ctx);
OPENSSL_free(ctx);
}
uint8_t shared_secret[SHA256_DIGEST_LENGTH];
if (!hpke_encap(hpke, shared_secret, peer_public_value, ephemeral_private,
ephemeral_public) ||
!hpke_key_schedule(hpke, HPKE_MODE_BASE, shared_secret,
sizeof(shared_secret), info, info_len, NULL, 0, NULL,
0)) {
}
int EVP_HPKE_CTX_setup_sender(EVP_HPKE_CTX *ctx, uint8_t *out_enc,
size_t *out_enc_len, size_t max_enc,
const EVP_HPKE_KEM *kem, const EVP_HPKE_KDF *kdf,
const EVP_HPKE_AEAD *aead,
const uint8_t *peer_public_key,
size_t peer_public_key_len, const uint8_t *info,
size_t info_len) {
uint8_t seed[MAX_SEED_LEN];
RAND_bytes(seed, kem->seed_len);
return EVP_HPKE_CTX_setup_sender_with_seed_for_testing(
ctx, out_enc, out_enc_len, max_enc, kem, kdf, aead, peer_public_key,
peer_public_key_len, info, info_len, seed, kem->seed_len);
}
int EVP_HPKE_CTX_setup_sender_with_seed_for_testing(
EVP_HPKE_CTX *ctx, uint8_t *out_enc, size_t *out_enc_len, size_t max_enc,
const EVP_HPKE_KEM *kem, const EVP_HPKE_KDF *kdf, const EVP_HPKE_AEAD *aead,
const uint8_t *peer_public_key, size_t peer_public_key_len,
const uint8_t *info, size_t info_len, const uint8_t *seed,
size_t seed_len) {
EVP_HPKE_CTX_zero(ctx);
ctx->is_sender = 1;
ctx->kdf = kdf;
ctx->aead = aead;
uint8_t shared_secret[MAX_SHARED_SECRET_LEN];
size_t shared_secret_len;
if (!kem->encap_with_seed(kem, shared_secret, &shared_secret_len, out_enc,
out_enc_len, max_enc, peer_public_key,
peer_public_key_len, seed, seed_len) ||
!hpke_key_schedule(ctx, shared_secret, shared_secret_len, info,
info_len)) {
EVP_HPKE_CTX_cleanup(ctx);
return 0;
}
return 1;
}
int EVP_HPKE_CTX_setup_base_r_x25519(
EVP_HPKE_CTX *hpke, uint16_t kdf_id, uint16_t aead_id,
const uint8_t enc[X25519_PUBLIC_VALUE_LEN],
const uint8_t public_key[X25519_PUBLIC_VALUE_LEN],
const uint8_t private_key[X25519_PRIVATE_KEY_LEN], const uint8_t *info,
size_t info_len) {
hpke->is_sender = 0;
hpke->kdf_id = kdf_id;
hpke->aead_id = aead_id;
hpke->hkdf_md = EVP_HPKE_get_hkdf_md(kdf_id);
if (hpke->hkdf_md == NULL) {
return 0;
}
uint8_t shared_secret[SHA256_DIGEST_LENGTH];
if (!hpke_decap(hpke, shared_secret, enc, public_key, private_key) ||
!hpke_key_schedule(hpke, HPKE_MODE_BASE, shared_secret,
sizeof(shared_secret), info, info_len, NULL, 0, NULL,
0)) {
int EVP_HPKE_CTX_setup_recipient(EVP_HPKE_CTX *ctx, const EVP_HPKE_KEY *key,
const EVP_HPKE_KDF *kdf,
const EVP_HPKE_AEAD *aead, const uint8_t *enc,
size_t enc_len, const uint8_t *info,
size_t info_len) {
EVP_HPKE_CTX_zero(ctx);
ctx->is_sender = 0;
ctx->kdf = kdf;
ctx->aead = aead;
uint8_t shared_secret[MAX_SHARED_SECRET_LEN];
size_t shared_secret_len;
if (!key->kem->decap(key, shared_secret, &shared_secret_len, enc, enc_len) ||
!hpke_key_schedule(ctx, shared_secret, sizeof(shared_secret), info,
info_len)) {
EVP_HPKE_CTX_cleanup(ctx);
return 0;
}
return 1;
}
int EVP_HPKE_CTX_setup_psk_s_x25519(
EVP_HPKE_CTX *hpke, uint8_t out_enc[X25519_PUBLIC_VALUE_LEN],
uint16_t kdf_id, uint16_t aead_id,
const uint8_t peer_public_value[X25519_PUBLIC_VALUE_LEN],
const uint8_t *info, size_t info_len, const uint8_t *psk, size_t psk_len,
const uint8_t *psk_id, size_t psk_id_len) {
// The GenerateKeyPair() step technically belongs in the KEM's Encap()
// function, but we've moved it up a layer to make it easier for tests to
// inject an ephemeral keypair.
uint8_t ephemeral_private[X25519_PRIVATE_KEY_LEN];
X25519_keypair(out_enc, ephemeral_private);
return EVP_HPKE_CTX_setup_psk_s_x25519_for_test(
hpke, kdf_id, aead_id, peer_public_value, info, info_len, psk, psk_len,
psk_id, psk_id_len, ephemeral_private, out_enc);
}
int EVP_HPKE_CTX_setup_psk_s_x25519_for_test(
EVP_HPKE_CTX *hpke, uint16_t kdf_id, uint16_t aead_id,
const uint8_t peer_public_value[X25519_PUBLIC_VALUE_LEN],
const uint8_t *info, size_t info_len, const uint8_t *psk, size_t psk_len,
const uint8_t *psk_id, size_t psk_id_len,
const uint8_t ephemeral_private[X25519_PRIVATE_KEY_LEN],
const uint8_t ephemeral_public[X25519_PUBLIC_VALUE_LEN]) {
hpke->is_sender = 1;
hpke->kdf_id = kdf_id;
hpke->aead_id = aead_id;
hpke->hkdf_md = EVP_HPKE_get_hkdf_md(kdf_id);
if (hpke->hkdf_md == NULL) {
return 0;
}
uint8_t shared_secret[SHA256_DIGEST_LENGTH];
if (!hpke_encap(hpke, shared_secret, peer_public_value, ephemeral_private,
ephemeral_public) ||
!hpke_key_schedule(hpke, HPKE_MODE_PSK, shared_secret,
sizeof(shared_secret), info, info_len, psk, psk_len,
psk_id, psk_id_len)) {
return 0;
}
return 1;
}
int EVP_HPKE_CTX_setup_psk_r_x25519(
EVP_HPKE_CTX *hpke, uint16_t kdf_id, uint16_t aead_id,
const uint8_t enc[X25519_PUBLIC_VALUE_LEN],
const uint8_t public_key[X25519_PUBLIC_VALUE_LEN],
const uint8_t private_key[X25519_PRIVATE_KEY_LEN], const uint8_t *info,
size_t info_len, const uint8_t *psk, size_t psk_len, const uint8_t *psk_id,
size_t psk_id_len) {
hpke->is_sender = 0;
hpke->kdf_id = kdf_id;
hpke->aead_id = aead_id;
hpke->hkdf_md = EVP_HPKE_get_hkdf_md(kdf_id);
if (hpke->hkdf_md == NULL) {
return 0;
}
uint8_t shared_secret[SHA256_DIGEST_LENGTH];
if (!hpke_decap(hpke, shared_secret, enc, public_key, private_key) ||
!hpke_key_schedule(hpke, HPKE_MODE_PSK, shared_secret,
sizeof(shared_secret), info, info_len, psk, psk_len,
psk_id, psk_id_len)) {
return 0;
}
return 1;
}
static void hpke_nonce(const EVP_HPKE_CTX *hpke, uint8_t *out_nonce,
static void hpke_nonce(const EVP_HPKE_CTX *ctx, uint8_t *out_nonce,
size_t nonce_len) {
assert(nonce_len >= 8);
// Write padded big-endian bytes of |hpke->seq| to |out_nonce|.
// Write padded big-endian bytes of |ctx->seq| to |out_nonce|.
OPENSSL_memset(out_nonce, 0, nonce_len);
uint64_t seq_copy = hpke->seq;
uint64_t seq_copy = ctx->seq;
for (size_t i = 0; i < 8; i++) {
out_nonce[nonce_len - i - 1] = seq_copy & 0xff;
seq_copy >>= 8;
}
// XOR the encoded sequence with the |hpke->base_nonce|.
// XOR the encoded sequence with the |ctx->base_nonce|.
for (size_t i = 0; i < nonce_len; i++) {
out_nonce[i] ^= hpke->base_nonce[i];
out_nonce[i] ^= ctx->base_nonce[i];
}
}
size_t EVP_HPKE_CTX_max_overhead(const EVP_HPKE_CTX *hpke) {
assert(hpke->is_sender);
return EVP_AEAD_max_overhead(hpke->aead_ctx.aead);
}
int EVP_HPKE_CTX_open(EVP_HPKE_CTX *hpke, uint8_t *out, size_t *out_len,
int EVP_HPKE_CTX_open(EVP_HPKE_CTX *ctx, uint8_t *out, size_t *out_len,
size_t max_out_len, const uint8_t *in, size_t in_len,
const uint8_t *ad, size_t ad_len) {
if (hpke->is_sender) {
if (ctx->is_sender) {
OPENSSL_PUT_ERROR(EVP, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
return 0;
}
if (hpke->seq == UINT64_MAX) {
if (ctx->seq == UINT64_MAX) {
OPENSSL_PUT_ERROR(EVP, ERR_R_OVERFLOW);
return 0;
}
uint8_t nonce[EVP_AEAD_MAX_NONCE_LENGTH];
const size_t nonce_len = EVP_AEAD_nonce_length(hpke->aead_ctx.aead);
hpke_nonce(hpke, nonce, nonce_len);
const size_t nonce_len = EVP_AEAD_nonce_length(ctx->aead_ctx.aead);
hpke_nonce(ctx, nonce, nonce_len);
if (!EVP_AEAD_CTX_open(&hpke->aead_ctx, out, out_len, max_out_len, nonce,
if (!EVP_AEAD_CTX_open(&ctx->aead_ctx, out, out_len, max_out_len, nonce,
nonce_len, in, in_len, ad, ad_len)) {
return 0;
}
hpke->seq++;
ctx->seq++;
return 1;
}
int EVP_HPKE_CTX_seal(EVP_HPKE_CTX *hpke, uint8_t *out, size_t *out_len,
int EVP_HPKE_CTX_seal(EVP_HPKE_CTX *ctx, uint8_t *out, size_t *out_len,
size_t max_out_len, const uint8_t *in, size_t in_len,
const uint8_t *ad, size_t ad_len) {
if (!hpke->is_sender) {
if (!ctx->is_sender) {
OPENSSL_PUT_ERROR(EVP, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
return 0;
}
if (hpke->seq == UINT64_MAX) {
if (ctx->seq == UINT64_MAX) {
OPENSSL_PUT_ERROR(EVP, ERR_R_OVERFLOW);
return 0;
}
uint8_t nonce[EVP_AEAD_MAX_NONCE_LENGTH];
const size_t nonce_len = EVP_AEAD_nonce_length(hpke->aead_ctx.aead);
hpke_nonce(hpke, nonce, nonce_len);
const size_t nonce_len = EVP_AEAD_nonce_length(ctx->aead_ctx.aead);
hpke_nonce(ctx, nonce, nonce_len);
if (!EVP_AEAD_CTX_seal(&hpke->aead_ctx, out, out_len, max_out_len, nonce,
if (!EVP_AEAD_CTX_seal(&ctx->aead_ctx, out, out_len, max_out_len, nonce,
nonce_len, in, in_len, ad, ad_len)) {
return 0;
}
hpke->seq++;
ctx->seq++;
return 1;
}
int EVP_HPKE_CTX_export(const EVP_HPKE_CTX *hpke, uint8_t *out,
int EVP_HPKE_CTX_export(const EVP_HPKE_CTX *ctx, uint8_t *out,
size_t secret_len, const uint8_t *context,
size_t context_len) {
uint8_t suite_id[HPKE_SUITE_ID_LEN];
if (!hpke_build_suite_id(suite_id, hpke->kdf_id, hpke->aead_id)) {
if (!hpke_build_suite_id(ctx, suite_id)) {
return 0;
}
static const char kExportExpandLabel[] = "sec";
if (!hpke_labeled_expand(hpke->hkdf_md, out, secret_len,
hpke->exporter_secret, EVP_MD_size(hpke->hkdf_md),
suite_id, sizeof(suite_id), kExportExpandLabel,
context, context_len)) {
const EVP_MD *hkdf_md = ctx->kdf->hkdf_md_func();
if (!hpke_labeled_expand(hkdf_md, out, secret_len, ctx->exporter_secret,
EVP_MD_size(hkdf_md), suite_id, sizeof(suite_id),
"sec", context, context_len)) {
return 0;
}
return 1;
}
size_t EVP_HPKE_CTX_max_overhead(const EVP_HPKE_CTX *ctx) {
assert(ctx->is_sender);
return EVP_AEAD_max_overhead(EVP_AEAD_CTX_aead(&ctx->aead_ctx));
}
const EVP_HPKE_AEAD *EVP_HPKE_CTX_aead(const EVP_HPKE_CTX *ctx) {
return ctx->aead;
}
const EVP_HPKE_KDF *EVP_HPKE_CTX_kdf(const EVP_HPKE_CTX *ctx) {
return ctx->kdf;
}

View File

@ -1,246 +0,0 @@
/* Copyright (c) 2020, Google Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
#ifndef OPENSSL_HEADER_CRYPTO_HPKE_INTERNAL_H
#define OPENSSL_HEADER_CRYPTO_HPKE_INTERNAL_H
#include <CCryptoBoringSSL_aead.h>
#include <CCryptoBoringSSL_base.h>
#include <CCryptoBoringSSL_curve25519.h>
#include <CCryptoBoringSSL_digest.h>
#if defined(__cplusplus)
extern "C" {
#endif
// Hybrid Public Key Encryption.
//
// Hybrid Public Key Encryption (HPKE) enables a sender to encrypt messages to a
// receiver with a public key. Optionally, the sender may authenticate its
// possession of a pre-shared key to the recipient.
//
// See https://tools.ietf.org/html/draft-irtf-cfrg-hpke-07.
// EVP_HPKE_AEAD_* are AEAD identifiers.
#define EVP_HPKE_AEAD_AES_GCM_128 0x0001
#define EVP_HPKE_AEAD_AES_GCM_256 0x0002
#define EVP_HPKE_AEAD_CHACHA20POLY1305 0x0003
// EVP_HPKE_HKDF_* are HKDF identifiers.
#define EVP_HPKE_HKDF_SHA256 0x0001
#define EVP_HPKE_HKDF_SHA384 0x0002
#define EVP_HPKE_HKDF_SHA512 0x0003
// EVP_HPKE_MAX_OVERHEAD contains the largest value that
// |EVP_HPKE_CTX_max_overhead| would ever return for any context.
#define EVP_HPKE_MAX_OVERHEAD EVP_AEAD_MAX_OVERHEAD
// Encryption contexts.
// An |EVP_HPKE_CTX| is an HPKE encryption context.
typedef struct evp_hpke_ctx_st {
const EVP_MD *hkdf_md;
EVP_AEAD_CTX aead_ctx;
uint16_t kdf_id;
uint16_t aead_id;
uint8_t base_nonce[EVP_AEAD_MAX_NONCE_LENGTH];
uint8_t exporter_secret[EVP_MAX_MD_SIZE];
uint64_t seq;
int is_sender;
} EVP_HPKE_CTX;
// EVP_HPKE_CTX_init initializes an already-allocated |EVP_HPKE_CTX|. The caller
// should then use one of the |EVP_HPKE_CTX_setup_*| functions.
//
// It is safe, but not necessary to call |EVP_HPKE_CTX_cleanup| in this state.
OPENSSL_EXPORT void EVP_HPKE_CTX_init(EVP_HPKE_CTX *ctx);
// EVP_HPKE_CTX_cleanup releases memory referenced by |ctx|. |ctx| must have
// been initialized with |EVP_HPKE_CTX_init|.
OPENSSL_EXPORT void EVP_HPKE_CTX_cleanup(EVP_HPKE_CTX *ctx);
// Setting up HPKE contexts.
//
// In each of the following functions, |hpke| must have been initialized with
// |EVP_HPKE_CTX_init|. |kdf_id| selects the KDF for non-KEM HPKE operations and
// must be one of the |EVP_HPKE_HKDF_*| constants. |aead_id| selects the AEAD
// for the "open" and "seal" operations and must be one of the |EVP_HPKE_AEAD_*|
// constants.
// EVP_HPKE_CTX_setup_base_s_x25519 sets up |hpke| as a sender context that can
// encrypt for the private key corresponding to |peer_public_value| (the
// recipient's public key). It returns one on success, and zero otherwise. Note
// that this function will fail if |peer_public_value| is invalid.
//
// This function writes the encapsulated shared secret to |out_enc|.
OPENSSL_EXPORT int EVP_HPKE_CTX_setup_base_s_x25519(
EVP_HPKE_CTX *hpke, uint8_t out_enc[X25519_PUBLIC_VALUE_LEN],
uint16_t kdf_id, uint16_t aead_id,
const uint8_t peer_public_value[X25519_PUBLIC_VALUE_LEN],
const uint8_t *info, size_t info_len);
// EVP_HPKE_CTX_setup_base_s_x25519_for_test behaves like
// |EVP_HPKE_CTX_setup_base_s_x25519|, but takes a pre-generated ephemeral
// sender key.
OPENSSL_EXPORT int EVP_HPKE_CTX_setup_base_s_x25519_for_test(
EVP_HPKE_CTX *hpke, uint16_t kdf_id, uint16_t aead_id,
const uint8_t peer_public_value[X25519_PUBLIC_VALUE_LEN],
const uint8_t *info, size_t info_len,
const uint8_t ephemeral_private[X25519_PRIVATE_KEY_LEN],
const uint8_t ephemeral_public[X25519_PUBLIC_VALUE_LEN]);
// EVP_HPKE_CTX_setup_base_r_x25519 sets up |hpke| as a recipient context that
// can decrypt messages. |private_key| is the recipient's private key, and |enc|
// is the encapsulated shared secret from the sender. Note that this function
// will fail if |enc| is invalid.
OPENSSL_EXPORT int EVP_HPKE_CTX_setup_base_r_x25519(
EVP_HPKE_CTX *hpke, uint16_t kdf_id, uint16_t aead_id,
const uint8_t enc[X25519_PUBLIC_VALUE_LEN],
const uint8_t public_key[X25519_PUBLIC_VALUE_LEN],
const uint8_t private_key[X25519_PRIVATE_KEY_LEN], const uint8_t *info,
size_t info_len);
// EVP_HPKE_CTX_setup_psk_s_x25519 sets up |hpke| as a sender context that can
// encrypt for the private key corresponding to |peer_public_value| (the
// recipient's public key) and authenticate its possession of a PSK. It returns
// one on success, and zero otherwise. Note that this function will fail if
// |peer_public_value| is invalid.
//
// The PSK and its ID must be provided in |psk| and |psk_id|, respectively. Both
// must be nonempty (|psk_len| and |psk_id_len| must be non-zero), or this
// function will fail.
//
// This function writes the encapsulated shared secret to |out_enc|.
OPENSSL_EXPORT int EVP_HPKE_CTX_setup_psk_s_x25519(
EVP_HPKE_CTX *hpke, uint8_t out_enc[X25519_PUBLIC_VALUE_LEN],
uint16_t kdf_id, uint16_t aead_id,
const uint8_t peer_public_value[X25519_PUBLIC_VALUE_LEN],
const uint8_t *info, size_t info_len, const uint8_t *psk, size_t psk_len,
const uint8_t *psk_id, size_t psk_id_len);
// EVP_HPKE_CTX_setup_psk_s_x25519_for_test behaves like
// |EVP_HPKE_CTX_setup_psk_s_x25519|, but takes a pre-generated ephemeral sender
// key.
OPENSSL_EXPORT int EVP_HPKE_CTX_setup_psk_s_x25519_for_test(
EVP_HPKE_CTX *hpke, uint16_t kdf_id, uint16_t aead_id,
const uint8_t peer_public_value[X25519_PUBLIC_VALUE_LEN],
const uint8_t *info, size_t info_len, const uint8_t *psk, size_t psk_len,
const uint8_t *psk_id, size_t psk_id_len,
const uint8_t ephemeral_private[X25519_PRIVATE_KEY_LEN],
const uint8_t ephemeral_public[X25519_PUBLIC_VALUE_LEN]);
// EVP_HPKE_CTX_setup_psk_r_x25519 sets up |hpke| as a recipient context that
// can decrypt messages. Future open (decrypt) operations will fail if the
// sender does not possess the PSK indicated by |psk| and |psk_id|.
// |private_key| is the recipient's private key, and |enc| is the encapsulated
// shared secret from the sender. If |enc| is invalid, this function will fail.
//
// The PSK and its ID must be provided in |psk| and |psk_id|, respectively. Both
// must be nonempty (|psk_len| and |psk_id_len| must be non-zero), or this
// function will fail.
OPENSSL_EXPORT int EVP_HPKE_CTX_setup_psk_r_x25519(
EVP_HPKE_CTX *hpke, uint16_t kdf_id, uint16_t aead_id,
const uint8_t enc[X25519_PUBLIC_VALUE_LEN],
const uint8_t public_key[X25519_PUBLIC_VALUE_LEN],
const uint8_t private_key[X25519_PRIVATE_KEY_LEN], const uint8_t *info,
size_t info_len, const uint8_t *psk, size_t psk_len, const uint8_t *psk_id,
size_t psk_id_len);
// Using an HPKE context.
// EVP_HPKE_CTX_open uses the HPKE context |hpke| to authenticate |in_len| bytes
// from |in| and |ad_len| bytes from |ad| and to decrypt at most |in_len| bytes
// into |out|. It returns one on success, and zero otherwise.
//
// This operation will fail if the |hpke| context is not set up as a receiver.
//
// Note that HPKE encryption is stateful and ordered. The sender's first call to
// |EVP_HPKE_CTX_seal| must correspond to the recipient's first call to
// |EVP_HPKE_CTX_open|, etc.
//
// At most |in_len| bytes are written to |out|. In order to ensure success,
// |max_out_len| should be at least |in_len|. On successful return, |*out_len|
// is set to the actual number of bytes written.
OPENSSL_EXPORT int EVP_HPKE_CTX_open(EVP_HPKE_CTX *hpke, uint8_t *out,
size_t *out_len, size_t max_out_len,
const uint8_t *in, size_t in_len,
const uint8_t *ad, size_t ad_len);
// EVP_HPKE_CTX_seal uses the HPKE context |hpke| to encrypt and authenticate
// |in_len| bytes of ciphertext |in| and authenticate |ad_len| bytes from |ad|,
// writing the result to |out|. It returns one on success and zero otherwise.
//
// This operation will fail if the |hpke| context is not set up as a sender.
//
// Note that HPKE encryption is stateful and ordered. The sender's first call to
// |EVP_HPKE_CTX_seal| must correspond to the recipient's first call to
// |EVP_HPKE_CTX_open|, etc.
//
// At most, |max_out_len| encrypted bytes are written to |out|. On successful
// return, |*out_len| is set to the actual number of bytes written.
//
// To ensure success, |max_out_len| should be |in_len| plus the result of
// |EVP_HPKE_CTX_max_overhead| or |EVP_HPKE_MAX_OVERHEAD|.
OPENSSL_EXPORT int EVP_HPKE_CTX_seal(EVP_HPKE_CTX *hpke, uint8_t *out,
size_t *out_len, size_t max_out_len,
const uint8_t *in, size_t in_len,
const uint8_t *ad, size_t ad_len);
// EVP_HPKE_CTX_export uses the HPKE context |hpke| to export a secret of
// |secret_len| bytes into |out|. This function uses |context_len| bytes from
// |context| as a context string for the secret. This is necessary to separate
// different uses of exported secrets and bind relevant caller-specific context
// into the output. It returns one on success and zero otherwise.
OPENSSL_EXPORT int EVP_HPKE_CTX_export(const EVP_HPKE_CTX *hpke, uint8_t *out,
size_t secret_len,
const uint8_t *context,
size_t context_len);
// EVP_HPKE_CTX_max_overhead returns the maximum number of additional bytes
// added by sealing data with |EVP_HPKE_CTX_seal|. The |hpke| context must be
// set up as a sender.
OPENSSL_EXPORT size_t EVP_HPKE_CTX_max_overhead(const EVP_HPKE_CTX *hpke);
// EVP_HPKE_get_aead returns the AEAD corresponding to |aead_id|, or NULL if
// |aead_id| is not a known AEAD identifier.
OPENSSL_EXPORT const EVP_AEAD *EVP_HPKE_get_aead(uint16_t aead_id);
// EVP_HPKE_get_hkdf_md returns the hash function associated with |kdf_id|, or
// NULL if |kdf_id| is not a known KDF identifier that uses HKDF.
OPENSSL_EXPORT const EVP_MD *EVP_HPKE_get_hkdf_md(uint16_t kdf_id);
#if defined(__cplusplus)
} // extern C
#endif
#if !defined(BORINGSSL_NO_CXX)
extern "C++" {
BSSL_NAMESPACE_BEGIN
using ScopedEVP_HPKE_CTX =
internal::StackAllocated<EVP_HPKE_CTX, void, EVP_HPKE_CTX_init,
EVP_HPKE_CTX_cleanup>;
BSSL_NAMESPACE_END
} // extern C++
#endif
#endif // OPENSSL_HEADER_CRYPTO_HPKE_INTERNAL_H

File diff suppressed because it is too large Load Diff

View File

@ -23,6 +23,7 @@
#include <CCryptoBoringSSL_cpu.h>
#include <CCryptoBoringSSL_hmac.h>
#include <CCryptoBoringSSL_mem.h>
#include <CCryptoBoringSSL_rand.h>
#include <CCryptoBoringSSL_sha.h>
#if defined(_MSC_VER)
@ -940,6 +941,34 @@ OPENSSL_UNUSED static void poly_print(const struct poly *p) {
printf("]\n");
}
// POLY_MUL_SCRATCH contains space for the working variables needed by
// |poly_mul|. The contents afterwards may be discarded, but the object may also
// be reused with future |poly_mul| calls to save heap allocations.
//
// This object must have 32-byte alignment.
struct POLY_MUL_SCRATCH {
union {
// This is used by |poly_mul_novec|.
struct {
uint16_t prod[2 * N];
uint16_t scratch[1318];
} novec;
#if defined(HRSS_HAVE_VECTOR_UNIT)
// This is used by |poly_mul_vec|.
struct {
vec_t prod[VECS_PER_POLY * 2];
vec_t scratch[172];
} vec;
#endif
#if defined(POLY_RQ_MUL_ASM)
// This is the space used by |poly_Rq_mul|.
uint8_t rq[POLY_MUL_RQ_SCRATCH_SPACE];
#endif
} u;
};
#if defined(HRSS_HAVE_VECTOR_UNIT)
// poly_mul_vec_aux is a recursive function that multiplies |n| words from |a|
@ -1185,8 +1214,8 @@ static void poly_mul_vec_aux(vec_t *restrict out, vec_t *restrict scratch,
}
// poly_mul_vec sets |*out| to |x|×|y| mod (𝑥^n - 1).
static void poly_mul_vec(struct poly *out, const struct poly *x,
const struct poly *y) {
static void poly_mul_vec(struct POLY_MUL_SCRATCH *scratch, struct poly *out,
const struct poly *x, const struct poly *y) {
OPENSSL_memset((uint16_t *)&x->v[N], 0, 3 * sizeof(uint16_t));
OPENSSL_memset((uint16_t *)&y->v[N], 0, 3 * sizeof(uint16_t));
@ -1195,9 +1224,9 @@ static void poly_mul_vec(struct poly *out, const struct poly *x,
OPENSSL_STATIC_ASSERT(alignof(struct poly) == alignof(vec_t),
"struct poly has incorrect alignment");
vec_t prod[VECS_PER_POLY * 2];
vec_t scratch[172];
poly_mul_vec_aux(prod, scratch, x->vectors, y->vectors, VECS_PER_POLY);
vec_t *const prod = scratch->u.vec.prod;
vec_t *const aux_scratch = scratch->u.vec.scratch;
poly_mul_vec_aux(prod, aux_scratch, x->vectors, y->vectors, VECS_PER_POLY);
// |prod| needs to be reduced mod (𝑥^n - 1), which just involves adding the
// upper-half to the lower-half. However, N is 701, which isn't a multiple of
@ -1274,11 +1303,11 @@ static void poly_mul_novec_aux(uint16_t *out, uint16_t *scratch,
}
// poly_mul_novec sets |*out| to |x|×|y| mod (𝑥^n - 1).
static void poly_mul_novec(struct poly *out, const struct poly *x,
const struct poly *y) {
uint16_t prod[2 * N];
uint16_t scratch[1318];
poly_mul_novec_aux(prod, scratch, x->v, y->v, N);
static void poly_mul_novec(struct POLY_MUL_SCRATCH *scratch, struct poly *out,
const struct poly *x, const struct poly *y) {
uint16_t *const prod = scratch->u.novec.prod;
uint16_t *const aux_scratch = scratch->u.novec.scratch;
poly_mul_novec_aux(prod, aux_scratch, x->v, y->v, N);
for (size_t i = 0; i < N; i++) {
out->v[i] = prod[i] + prod[i + N];
@ -1286,25 +1315,25 @@ static void poly_mul_novec(struct poly *out, const struct poly *x,
OPENSSL_memset(&out->v[N], 0, 3 * sizeof(uint16_t));
}
static void poly_mul(struct poly *r, const struct poly *a,
const struct poly *b) {
static void poly_mul(struct POLY_MUL_SCRATCH *scratch, struct poly *r,
const struct poly *a, const struct poly *b) {
#if defined(POLY_RQ_MUL_ASM)
const int has_avx2 = (OPENSSL_ia32cap_P[2] & (1 << 5)) != 0;
if (has_avx2) {
poly_Rq_mul(r->v, a->v, b->v);
poly_Rq_mul(r->v, a->v, b->v, scratch->u.rq);
return;
}
#endif
#if defined(HRSS_HAVE_VECTOR_UNIT)
if (vec_capable()) {
poly_mul_vec(r, a, b);
poly_mul_vec(scratch, r, a, b);
return;
}
#endif
// Fallback, non-vector case.
poly_mul_novec(r, a, b);
poly_mul_novec(scratch, r, a, b);
}
// poly_mul_x_minus_1 sets |p| to |p|×(𝑥 - 1) mod (𝑥^n - 1).
@ -1549,7 +1578,8 @@ static void poly_invert_mod2(struct poly *out, const struct poly *in) {
}
// poly_invert sets |*out| to |in^-1| (i.e. such that |*out|×|in| = 1 mod Φ(N)).
static void poly_invert(struct poly *out, const struct poly *in) {
static void poly_invert(struct POLY_MUL_SCRATCH *scratch, struct poly *out,
const struct poly *in) {
// Inversion mod Q, which is done based on the result of inverting mod
// 2. See [NTRUTN14] paper, bottom of page two.
struct poly a, *b, tmp;
@ -1566,9 +1596,9 @@ static void poly_invert(struct poly *out, const struct poly *in) {
// We are working mod Q=2**13 and we need to iterate ceil(log_2(13))
// times, which is four.
for (unsigned i = 0; i < 4; i++) {
poly_mul(&tmp, &a, b);
poly_mul(scratch, &tmp, &a, b);
tmp.v[0] += 2;
poly_mul(b, b, &tmp);
poly_mul(scratch, b, b, &tmp);
}
}
@ -1872,9 +1902,7 @@ static struct public_key *public_key_from_external(
sizeof(struct HRSS_public_key) >= sizeof(struct public_key) + 15,
"HRSS public key too small");
uintptr_t p = (uintptr_t)ext;
p = (p + 15) & ~15;
return (struct public_key *)p;
return align_pointer(ext->opaque, 16);
}
// private_key_from_external does the same thing as |public_key_from_external|,
@ -1886,151 +1914,219 @@ static struct private_key *private_key_from_external(
sizeof(struct HRSS_private_key) >= sizeof(struct private_key) + 15,
"HRSS private key too small");
uintptr_t p = (uintptr_t)ext;
p = (p + 15) & ~15;
return (struct private_key *)p;
return align_pointer(ext->opaque, 16);
}
void HRSS_generate_key(
// malloc_align32 returns a pointer to |size| bytes of 32-byte-aligned heap and
// sets |*out_ptr| to a value that can be passed to |OPENSSL_free| to release
// it. It returns NULL if out of memory.
static void *malloc_align32(void **out_ptr, size_t size) {
void *ptr = OPENSSL_malloc(size + 31);
if (!ptr) {
*out_ptr = NULL;
return NULL;
}
*out_ptr = ptr;
return align_pointer(ptr, 32);
}
int HRSS_generate_key(
struct HRSS_public_key *out_pub, struct HRSS_private_key *out_priv,
const uint8_t in[HRSS_SAMPLE_BYTES + HRSS_SAMPLE_BYTES + 32]) {
struct public_key *pub = public_key_from_external(out_pub);
struct private_key *priv = private_key_from_external(out_priv);
struct vars {
struct POLY_MUL_SCRATCH scratch;
struct poly f;
struct poly pg_phi1;
struct poly pfg_phi1;
struct poly pfg_phi1_inverse;
};
void *malloc_ptr;
struct vars *const vars = malloc_align32(&malloc_ptr, sizeof(struct vars));
if (!vars) {
// If the caller ignores the return value the output will still be safe.
// The private key output is randomised in case it's later passed to
// |HRSS_encap|.
memset(out_pub, 0, sizeof(struct HRSS_public_key));
RAND_bytes((uint8_t*) out_priv, sizeof(struct HRSS_private_key));
return 0;
}
OPENSSL_memcpy(priv->hmac_key, in + 2 * HRSS_SAMPLE_BYTES,
sizeof(priv->hmac_key));
struct poly f;
poly_short_sample_plus(&f, in);
poly3_from_poly(&priv->f, &f);
poly_short_sample_plus(&vars->f, in);
poly3_from_poly(&priv->f, &vars->f);
HRSS_poly3_invert(&priv->f_inverse, &priv->f);
// pg_phi1 is p (i.e. 3) × g × Φ(1) (i.e. 𝑥-1).
struct poly pg_phi1;
poly_short_sample_plus(&pg_phi1, in + HRSS_SAMPLE_BYTES);
poly_short_sample_plus(&vars->pg_phi1, in + HRSS_SAMPLE_BYTES);
for (unsigned i = 0; i < N; i++) {
pg_phi1.v[i] *= 3;
vars->pg_phi1.v[i] *= 3;
}
poly_mul_x_minus_1(&pg_phi1);
poly_mul_x_minus_1(&vars->pg_phi1);
struct poly pfg_phi1;
poly_mul(&pfg_phi1, &f, &pg_phi1);
poly_mul(&vars->scratch, &vars->pfg_phi1, &vars->f, &vars->pg_phi1);
struct poly pfg_phi1_inverse;
poly_invert(&pfg_phi1_inverse, &pfg_phi1);
poly_invert(&vars->scratch, &vars->pfg_phi1_inverse, &vars->pfg_phi1);
poly_mul(&pub->ph, &pfg_phi1_inverse, &pg_phi1);
poly_mul(&pub->ph, &pub->ph, &pg_phi1);
poly_mul(&vars->scratch, &pub->ph, &vars->pfg_phi1_inverse, &vars->pg_phi1);
poly_mul(&vars->scratch, &pub->ph, &pub->ph, &vars->pg_phi1);
poly_clamp(&pub->ph);
poly_mul(&priv->ph_inverse, &pfg_phi1_inverse, &f);
poly_mul(&priv->ph_inverse, &priv->ph_inverse, &f);
poly_mul(&vars->scratch, &priv->ph_inverse, &vars->pfg_phi1_inverse,
&vars->f);
poly_mul(&vars->scratch, &priv->ph_inverse, &priv->ph_inverse, &vars->f);
poly_clamp(&priv->ph_inverse);
OPENSSL_free(malloc_ptr);
return 1;
}
static const char kSharedKey[] = "shared key";
void HRSS_encap(uint8_t out_ciphertext[POLY_BYTES],
uint8_t out_shared_key[32],
const struct HRSS_public_key *in_pub,
const uint8_t in[HRSS_SAMPLE_BYTES + HRSS_SAMPLE_BYTES]) {
int HRSS_encap(uint8_t out_ciphertext[POLY_BYTES], uint8_t out_shared_key[32],
const struct HRSS_public_key *in_pub,
const uint8_t in[HRSS_SAMPLE_BYTES + HRSS_SAMPLE_BYTES]) {
const struct public_key *pub =
public_key_from_external((struct HRSS_public_key *)in_pub);
struct poly m, r, m_lifted;
poly_short_sample(&m, in);
poly_short_sample(&r, in + HRSS_SAMPLE_BYTES);
poly_lift(&m_lifted, &m);
struct poly prh_plus_m;
poly_mul(&prh_plus_m, &r, &pub->ph);
for (unsigned i = 0; i < N; i++) {
prh_plus_m.v[i] += m_lifted.v[i];
struct vars {
struct POLY_MUL_SCRATCH scratch;
struct poly m, r, m_lifted;
struct poly prh_plus_m;
SHA256_CTX hash_ctx;
uint8_t m_bytes[HRSS_POLY3_BYTES];
uint8_t r_bytes[HRSS_POLY3_BYTES];
};
void *malloc_ptr;
struct vars *const vars = malloc_align32(&malloc_ptr, sizeof(struct vars));
if (!vars) {
// If the caller ignores the return value the output will still be safe.
// The private key output is randomised in case it's used to encrypt and
// transmit something.
memset(out_ciphertext, 0, POLY_BYTES);
RAND_bytes(out_shared_key, 32);
return 0;
}
poly_marshal(out_ciphertext, &prh_plus_m);
poly_short_sample(&vars->m, in);
poly_short_sample(&vars->r, in + HRSS_SAMPLE_BYTES);
poly_lift(&vars->m_lifted, &vars->m);
uint8_t m_bytes[HRSS_POLY3_BYTES], r_bytes[HRSS_POLY3_BYTES];
poly_marshal_mod3(m_bytes, &m);
poly_marshal_mod3(r_bytes, &r);
poly_mul(&vars->scratch, &vars->prh_plus_m, &vars->r, &pub->ph);
for (unsigned i = 0; i < N; i++) {
vars->prh_plus_m.v[i] += vars->m_lifted.v[i];
}
SHA256_CTX hash_ctx;
SHA256_Init(&hash_ctx);
SHA256_Update(&hash_ctx, kSharedKey, sizeof(kSharedKey));
SHA256_Update(&hash_ctx, m_bytes, sizeof(m_bytes));
SHA256_Update(&hash_ctx, r_bytes, sizeof(r_bytes));
SHA256_Update(&hash_ctx, out_ciphertext, POLY_BYTES);
SHA256_Final(out_shared_key, &hash_ctx);
poly_marshal(out_ciphertext, &vars->prh_plus_m);
poly_marshal_mod3(vars->m_bytes, &vars->m);
poly_marshal_mod3(vars->r_bytes, &vars->r);
SHA256_Init(&vars->hash_ctx);
SHA256_Update(&vars->hash_ctx, kSharedKey, sizeof(kSharedKey));
SHA256_Update(&vars->hash_ctx, vars->m_bytes, sizeof(vars->m_bytes));
SHA256_Update(&vars->hash_ctx, vars->r_bytes, sizeof(vars->r_bytes));
SHA256_Update(&vars->hash_ctx, out_ciphertext, POLY_BYTES);
SHA256_Final(out_shared_key, &vars->hash_ctx);
OPENSSL_free(malloc_ptr);
return 1;
}
void HRSS_decap(uint8_t out_shared_key[HRSS_KEY_BYTES],
int HRSS_decap(uint8_t out_shared_key[HRSS_KEY_BYTES],
const struct HRSS_private_key *in_priv,
const uint8_t *ciphertext, size_t ciphertext_len) {
const struct private_key *priv =
private_key_from_external((struct HRSS_private_key *)in_priv);
struct vars {
struct POLY_MUL_SCRATCH scratch;
uint8_t masked_key[SHA256_CBLOCK];
SHA256_CTX hash_ctx;
struct poly c;
struct poly f, cf;
struct poly3 cf3, m3;
struct poly m, m_lifted;
struct poly r;
struct poly3 r3;
uint8_t expected_ciphertext[HRSS_CIPHERTEXT_BYTES];
uint8_t m_bytes[HRSS_POLY3_BYTES];
uint8_t r_bytes[HRSS_POLY3_BYTES];
uint8_t shared_key[32];
};
void *malloc_ptr;
struct vars *const vars = malloc_align32(&malloc_ptr, sizeof(struct vars));
if (!vars) {
// If the caller ignores the return value the output will still be safe.
// The private key output is randomised in case it's used to encrypt and
// transmit something.
RAND_bytes(out_shared_key, HRSS_KEY_BYTES);
return 0;
}
// This is HMAC, expanded inline rather than using the |HMAC| function so that
// we can avoid dealing with possible allocation failures and so keep this
// function infallible.
uint8_t masked_key[SHA256_CBLOCK];
OPENSSL_STATIC_ASSERT(sizeof(priv->hmac_key) <= sizeof(masked_key),
OPENSSL_STATIC_ASSERT(sizeof(priv->hmac_key) <= sizeof(vars->masked_key),
"HRSS HMAC key larger than SHA-256 block size");
for (size_t i = 0; i < sizeof(priv->hmac_key); i++) {
masked_key[i] = priv->hmac_key[i] ^ 0x36;
vars->masked_key[i] = priv->hmac_key[i] ^ 0x36;
}
OPENSSL_memset(masked_key + sizeof(priv->hmac_key), 0x36,
sizeof(masked_key) - sizeof(priv->hmac_key));
OPENSSL_memset(vars->masked_key + sizeof(priv->hmac_key), 0x36,
sizeof(vars->masked_key) - sizeof(priv->hmac_key));
SHA256_CTX hash_ctx;
SHA256_Init(&hash_ctx);
SHA256_Update(&hash_ctx, masked_key, sizeof(masked_key));
SHA256_Update(&hash_ctx, ciphertext, ciphertext_len);
SHA256_Init(&vars->hash_ctx);
SHA256_Update(&vars->hash_ctx, vars->masked_key, sizeof(vars->masked_key));
SHA256_Update(&vars->hash_ctx, ciphertext, ciphertext_len);
uint8_t inner_digest[SHA256_DIGEST_LENGTH];
SHA256_Final(inner_digest, &hash_ctx);
SHA256_Final(inner_digest, &vars->hash_ctx);
for (size_t i = 0; i < sizeof(priv->hmac_key); i++) {
masked_key[i] ^= (0x5c ^ 0x36);
vars->masked_key[i] ^= (0x5c ^ 0x36);
}
OPENSSL_memset(masked_key + sizeof(priv->hmac_key), 0x5c,
sizeof(masked_key) - sizeof(priv->hmac_key));
OPENSSL_memset(vars->masked_key + sizeof(priv->hmac_key), 0x5c,
sizeof(vars->masked_key) - sizeof(priv->hmac_key));
SHA256_Init(&hash_ctx);
SHA256_Update(&hash_ctx, masked_key, sizeof(masked_key));
SHA256_Update(&hash_ctx, inner_digest, sizeof(inner_digest));
SHA256_Init(&vars->hash_ctx);
SHA256_Update(&vars->hash_ctx, vars->masked_key, sizeof(vars->masked_key));
SHA256_Update(&vars->hash_ctx, inner_digest, sizeof(inner_digest));
OPENSSL_STATIC_ASSERT(HRSS_KEY_BYTES == SHA256_DIGEST_LENGTH,
"HRSS shared key length incorrect");
SHA256_Final(out_shared_key, &hash_ctx);
SHA256_Final(out_shared_key, &vars->hash_ctx);
struct poly c;
// If the ciphertext is publicly invalid then a random shared key is still
// returned to simply the logic of the caller, but this path is not constant
// time.
if (ciphertext_len != HRSS_CIPHERTEXT_BYTES ||
!poly_unmarshal(&c, ciphertext)) {
return;
!poly_unmarshal(&vars->c, ciphertext)) {
goto out;
}
struct poly f, cf;
struct poly3 cf3, m3;
poly_from_poly3(&f, &priv->f);
poly_mul(&cf, &c, &f);
poly3_from_poly(&cf3, &cf);
poly_from_poly3(&vars->f, &priv->f);
poly_mul(&vars->scratch, &vars->cf, &vars->c, &vars->f);
poly3_from_poly(&vars->cf3, &vars->cf);
// Note that cf3 is not reduced mod Φ(N). That reduction is deferred.
HRSS_poly3_mul(&m3, &cf3, &priv->f_inverse);
HRSS_poly3_mul(&vars->m3, &vars->cf3, &priv->f_inverse);
struct poly m, m_lifted;
poly_from_poly3(&m, &m3);
poly_lift(&m_lifted, &m);
poly_from_poly3(&vars->m, &vars->m3);
poly_lift(&vars->m_lifted, &vars->m);
struct poly r;
for (unsigned i = 0; i < N; i++) {
r.v[i] = c.v[i] - m_lifted.v[i];
vars->r.v[i] = vars->c.v[i] - vars->m_lifted.v[i];
}
poly_mul(&r, &r, &priv->ph_inverse);
poly_mod_phiN(&r);
poly_clamp(&r);
poly_mul(&vars->scratch, &vars->r, &vars->r, &priv->ph_inverse);
poly_mod_phiN(&vars->r);
poly_clamp(&vars->r);
struct poly3 r3;
crypto_word_t ok = poly3_from_poly_checked(&r3, &r);
crypto_word_t ok = poly3_from_poly_checked(&vars->r3, &vars->r);
// [NTRUCOMP] section 5.1 includes ReEnc2 and a proof that it's valid. Rather
// than do an expensive |poly_mul|, it rebuilds |c'| from |c - lift(m)|
@ -2055,32 +2151,34 @@ void HRSS_decap(uint8_t out_shared_key[HRSS_KEY_BYTES],
// The |poly_marshal| here then is just confirming that |poly_unmarshal| is
// strict and could be omitted.
uint8_t expected_ciphertext[HRSS_CIPHERTEXT_BYTES];
OPENSSL_STATIC_ASSERT(HRSS_CIPHERTEXT_BYTES == POLY_BYTES,
"ciphertext is the wrong size");
assert(ciphertext_len == sizeof(expected_ciphertext));
poly_marshal(expected_ciphertext, &c);
assert(ciphertext_len == sizeof(vars->expected_ciphertext));
poly_marshal(vars->expected_ciphertext, &vars->c);
uint8_t m_bytes[HRSS_POLY3_BYTES];
uint8_t r_bytes[HRSS_POLY3_BYTES];
poly_marshal_mod3(m_bytes, &m);
poly_marshal_mod3(r_bytes, &r);
poly_marshal_mod3(vars->m_bytes, &vars->m);
poly_marshal_mod3(vars->r_bytes, &vars->r);
ok &= constant_time_is_zero_w(CRYPTO_memcmp(ciphertext, expected_ciphertext,
sizeof(expected_ciphertext)));
ok &= constant_time_is_zero_w(
CRYPTO_memcmp(ciphertext, vars->expected_ciphertext,
sizeof(vars->expected_ciphertext)));
uint8_t shared_key[32];
SHA256_Init(&hash_ctx);
SHA256_Update(&hash_ctx, kSharedKey, sizeof(kSharedKey));
SHA256_Update(&hash_ctx, m_bytes, sizeof(m_bytes));
SHA256_Update(&hash_ctx, r_bytes, sizeof(r_bytes));
SHA256_Update(&hash_ctx, expected_ciphertext, sizeof(expected_ciphertext));
SHA256_Final(shared_key, &hash_ctx);
SHA256_Init(&vars->hash_ctx);
SHA256_Update(&vars->hash_ctx, kSharedKey, sizeof(kSharedKey));
SHA256_Update(&vars->hash_ctx, vars->m_bytes, sizeof(vars->m_bytes));
SHA256_Update(&vars->hash_ctx, vars->r_bytes, sizeof(vars->r_bytes));
SHA256_Update(&vars->hash_ctx, vars->expected_ciphertext,
sizeof(vars->expected_ciphertext));
SHA256_Final(vars->shared_key, &vars->hash_ctx);
for (unsigned i = 0; i < sizeof(shared_key); i++) {
for (unsigned i = 0; i < sizeof(vars->shared_key); i++) {
out_shared_key[i] =
constant_time_select_8(ok, shared_key[i], out_shared_key[i]);
constant_time_select_8(ok, vars->shared_key[i], out_shared_key[i]);
}
out:
OPENSSL_free(malloc_ptr);
return 1;
}
void HRSS_marshal_public_key(uint8_t out[HRSS_PUBLIC_KEY_BYTES],

View File

@ -47,10 +47,17 @@ OPENSSL_EXPORT void HRSS_poly3_invert(struct poly3 *out,
#if !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_SMALL) && \
defined(OPENSSL_X86_64) && defined(OPENSSL_LINUX)
#define POLY_RQ_MUL_ASM
// POLY_MUL_RQ_SCRATCH_SPACE is the number of bytes of scratch space needed
// by the assembly function poly_Rq_mul.
#define POLY_MUL_RQ_SCRATCH_SPACE (6144 + 6144 + 12288 + 512 + 9408 + 32)
// poly_Rq_mul is defined in assembly. Inputs and outputs must be 16-byte-
// aligned.
extern void poly_Rq_mul(uint16_t r[N + 3], const uint16_t a[N + 3],
const uint16_t b[N + 3]);
extern void poly_Rq_mul(
uint16_t r[N + 3], const uint16_t a[N + 3], const uint16_t b[N + 3],
// The following should be `scratch[POLY_MUL_RQ_SCRATCH_SPACE]` but
// GCC 11.1 has a bug with unions that breaks that.
uint8_t scratch[]);
#endif

View File

@ -109,6 +109,7 @@
#ifndef OPENSSL_HEADER_CRYPTO_INTERNAL_H
#define OPENSSL_HEADER_CRYPTO_INTERNAL_H
#include <CCryptoBoringSSL_crypto.h>
#include <CCryptoBoringSSL_ex_data.h>
#include <CCryptoBoringSSL_stack.h>
#include <CCryptoBoringSSL_thread.h>
@ -208,6 +209,9 @@ typedef __uint128_t uint128_t;
#define OPENSSL_SSE2
#endif
// Pointer utility functions.
// buffers_alias returns one if |a| and |b| alias and zero otherwise.
static inline int buffers_alias(const uint8_t *a, size_t a_len,
const uint8_t *b, size_t b_len) {
@ -220,6 +224,23 @@ static inline int buffers_alias(const uint8_t *a, size_t a_len,
return a_u + a_len > b_u && b_u + b_len > a_u;
}
// align_pointer returns |ptr|, advanced to |alignment|. |alignment| must be a
// power of two, and |ptr| must have at least |alignment - 1| bytes of scratch
// space.
static inline void *align_pointer(void *ptr, size_t alignment) {
// |alignment| must be a power of two.
assert(alignment != 0 && (alignment & (alignment - 1)) == 0);
// Instead of aligning |ptr| as a |uintptr_t| and casting back, compute the
// offset and advance in pointer space. C guarantees that casting from pointer
// to |uintptr_t| and back gives the same pointer, but general
// integer-to-pointer conversions are implementation-defined. GCC does define
// it in the useful way, but this makes fewer assumptions.
uintptr_t offset = (0u - (uintptr_t)ptr) & (alignment - 1);
ptr = (char *)ptr + offset;
assert(((uintptr_t)ptr & (alignment - 1)) == 0);
return ptr;
}
// Constant-time utility functions.
//
@ -470,6 +491,13 @@ OPENSSL_EXPORT void CRYPTO_once(CRYPTO_once_t *once, void (*init)(void));
// Reference counting.
// Automatically enable C11 atomics if implemented.
#if !defined(OPENSSL_C11_ATOMIC) && defined(OPENSSL_THREADS) && \
!defined(__STDC_NO_ATOMICS__) && defined(__STDC_VERSION__) && \
__STDC_VERSION__ >= 201112L
#define OPENSSL_C11_ATOMIC
#endif
// CRYPTO_REFCOUNT_MAX is the value at which the reference count saturates.
#define CRYPTO_REFCOUNT_MAX 0xffffffff
@ -607,6 +635,7 @@ BSSL_NAMESPACE_END
typedef enum {
OPENSSL_THREAD_LOCAL_ERR = 0,
OPENSSL_THREAD_LOCAL_RAND,
OPENSSL_THREAD_LOCAL_FIPS_COUNTERS,
OPENSSL_THREAD_LOCAL_TEST,
NUM_OPENSSL_THREAD_LOCALS,
} thread_local_data_t;
@ -811,6 +840,58 @@ static inline void *OPENSSL_memset(void *dst, int c, size_t n) {
return memset(dst, c, n);
}
// Loads and stores.
//
// The following functions load and store sized integers with the specified
// endianness. They use |memcpy|, and so avoid alignment or strict aliasing
// requirements on the input and output pointers.
static inline uint32_t CRYPTO_load_u32_le(const void *in) {
uint32_t v;
OPENSSL_memcpy(&v, in, sizeof(v));
return v;
}
static inline void CRYPTO_store_u32_le(void *out, uint32_t v) {
OPENSSL_memcpy(out, &v, sizeof(v));
}
static inline uint32_t CRYPTO_load_u32_be(const void *in) {
uint32_t v;
OPENSSL_memcpy(&v, in, sizeof(v));
return CRYPTO_bswap4(v);
}
static inline void CRYPTO_store_u32_be(void *out, uint32_t v) {
v = CRYPTO_bswap4(v);
OPENSSL_memcpy(out, &v, sizeof(v));
}
static inline uint64_t CRYPTO_load_u64_be(const void *ptr) {
uint64_t ret;
OPENSSL_memcpy(&ret, ptr, sizeof(ret));
return CRYPTO_bswap8(ret);
}
static inline void CRYPTO_store_u64_be(void *out, uint64_t v) {
v = CRYPTO_bswap8(v);
OPENSSL_memcpy(out, &v, sizeof(v));
}
static inline crypto_word_t CRYPTO_load_word_le(const void *in) {
crypto_word_t v;
OPENSSL_memcpy(&v, in, sizeof(v));
return v;
}
static inline void CRYPTO_store_word_le(void *out, crypto_word_t v) {
OPENSSL_memcpy(out, &v, sizeof(v));
}
// FIPS functions.
#if defined(BORINGSSL_FIPS)
// BORINGSSL_FIPS_abort is called when a FIPS power-on or continuous test
// fails. It prevents any further cryptographic operations by the current
@ -826,6 +907,11 @@ void BORINGSSL_FIPS_abort(void) __attribute__((noreturn));
int boringssl_fips_self_test(const uint8_t *module_hash,
size_t module_hash_len);
#if defined(BORINGSSL_FIPS_COUNTERS)
void boringssl_fips_inc_counter(enum fips_counter_t counter);
#else
OPENSSL_INLINE void boringssl_fips_inc_counter(enum fips_counter_t counter) {}
#endif
#if defined(__cplusplus)
} // extern C

View File

@ -0,0 +1,253 @@
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
* This package is an SSL implementation written
* by Eric Young (eay@cryptsoft.com).
* The implementation was written so as to conform with Netscapes SSL.
*
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
*
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* "This product includes cryptographic software written by
* Eric Young (eay@cryptsoft.com)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
* 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
*
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
* [including the GNU Public Licence.] */
#ifndef OPENSSL_HEADER_LHASH_INTERNAL_H
#define OPENSSL_HEADER_LHASH_INTERNAL_H
#include <CCryptoBoringSSL_lhash.h>
#if defined(__cplusplus)
extern "C" {
#endif
// lhash is a traditional, chaining hash table that automatically expands and
// contracts as needed. One should not use the lh_* functions directly, rather
// use the type-safe macro wrappers:
//
// A hash table of a specific type of object has type |LHASH_OF(type)|. This
// can be defined (once) with |DEFINE_LHASH_OF(type)| and declared where needed
// with |DECLARE_LHASH_OF(type)|. For example:
//
// struct foo {
// int bar;
// };
//
// DEFINE_LHASH_OF(struct foo)
//
// Although note that the hash table will contain /pointers/ to |foo|.
//
// A macro will be defined for each of the |OPENSSL_lh_*| functions below. For
// |LHASH_OF(foo)|, the macros would be |lh_foo_new|, |lh_foo_num_items| etc.
// lhash_cmp_func is a comparison function that returns a value equal, or not
// equal, to zero depending on whether |*a| is equal, or not equal to |*b|,
// respectively. Note the difference between this and |stack_cmp_func| in that
// this takes pointers to the objects directly.
//
// This function's actual type signature is int (*)(const T*, const T*). The
// low-level |lh_*| functions will be passed a type-specific wrapper to call it
// correctly.
typedef int (*lhash_cmp_func)(const void *a, const void *b);
typedef int (*lhash_cmp_func_helper)(lhash_cmp_func func, const void *a,
const void *b);
// lhash_hash_func is a function that maps an object to a uniformly distributed
// uint32_t.
//
// This function's actual type signature is uint32_t (*)(const T*). The
// low-level |lh_*| functions will be passed a type-specific wrapper to call it
// correctly.
typedef uint32_t (*lhash_hash_func)(const void *a);
typedef uint32_t (*lhash_hash_func_helper)(lhash_hash_func func, const void *a);
typedef struct lhash_st _LHASH;
// OPENSSL_lh_new returns a new, empty hash table or NULL on error.
OPENSSL_EXPORT _LHASH *OPENSSL_lh_new(lhash_hash_func hash,
lhash_cmp_func comp);
// OPENSSL_lh_free frees the hash table itself but none of the elements. See
// |OPENSSL_lh_doall|.
OPENSSL_EXPORT void OPENSSL_lh_free(_LHASH *lh);
// OPENSSL_lh_num_items returns the number of items in |lh|.
OPENSSL_EXPORT size_t OPENSSL_lh_num_items(const _LHASH *lh);
// OPENSSL_lh_retrieve finds an element equal to |data| in the hash table and
// returns it. If no such element exists, it returns NULL.
OPENSSL_EXPORT void *OPENSSL_lh_retrieve(const _LHASH *lh, const void *data,
lhash_hash_func_helper call_hash_func,
lhash_cmp_func_helper call_cmp_func);
// OPENSSL_lh_retrieve_key finds an element matching |key|, given the specified
// hash and comparison function. This differs from |OPENSSL_lh_retrieve| in that
// the key may be a different type than the values stored in |lh|. |key_hash|
// and |cmp_key| must be compatible with the functions passed into
// |OPENSSL_lh_new|.
OPENSSL_EXPORT void *OPENSSL_lh_retrieve_key(const _LHASH *lh, const void *key,
uint32_t key_hash,
int (*cmp_key)(const void *key,
const void *value));
// OPENSSL_lh_insert inserts |data| into the hash table. If an existing element
// is equal to |data| (with respect to the comparison function) then |*old_data|
// will be set to that value and it will be replaced. Otherwise, or in the
// event of an error, |*old_data| will be set to NULL. It returns one on
// success or zero in the case of an allocation error.
OPENSSL_EXPORT int OPENSSL_lh_insert(_LHASH *lh, void **old_data, void *data,
lhash_hash_func_helper call_hash_func,
lhash_cmp_func_helper call_cmp_func);
// OPENSSL_lh_delete removes an element equal to |data| from the hash table and
// returns it. If no such element is found, it returns NULL.
OPENSSL_EXPORT void *OPENSSL_lh_delete(_LHASH *lh, const void *data,
lhash_hash_func_helper call_hash_func,
lhash_cmp_func_helper call_cmp_func);
// OPENSSL_lh_doall_arg calls |func| on each element of the hash table and also
// passes |arg| as the second argument.
// TODO(fork): rename this
OPENSSL_EXPORT void OPENSSL_lh_doall_arg(_LHASH *lh,
void (*func)(void *, void *),
void *arg);
#define DEFINE_LHASH_OF(type) \
DECLARE_LHASH_OF(type) \
\
typedef int (*lhash_##type##_cmp_func)(const type *, const type *); \
typedef uint32_t (*lhash_##type##_hash_func)(const type *); \
\
OPENSSL_INLINE int lh_##type##_call_cmp_func(lhash_cmp_func func, \
const void *a, const void *b) { \
return ((lhash_##type##_cmp_func)func)((const type *)a, (const type *)b); \
} \
\
OPENSSL_INLINE uint32_t lh_##type##_call_hash_func(lhash_hash_func func, \
const void *a) { \
return ((lhash_##type##_hash_func)func)((const type *)a); \
} \
\
OPENSSL_INLINE LHASH_OF(type) *lh_##type##_new( \
lhash_##type##_hash_func hash, lhash_##type##_cmp_func comp) { \
return (LHASH_OF(type) *)OPENSSL_lh_new((lhash_hash_func)hash, \
(lhash_cmp_func)comp); \
} \
\
OPENSSL_INLINE void lh_##type##_free(LHASH_OF(type) *lh) { \
OPENSSL_lh_free((_LHASH *)lh); \
} \
\
OPENSSL_INLINE size_t lh_##type##_num_items(const LHASH_OF(type) *lh) { \
return OPENSSL_lh_num_items((const _LHASH *)lh); \
} \
\
OPENSSL_INLINE type *lh_##type##_retrieve(const LHASH_OF(type) *lh, \
const type *data) { \
return (type *)OPENSSL_lh_retrieve((const _LHASH *)lh, data, \
lh_##type##_call_hash_func, \
lh_##type##_call_cmp_func); \
} \
\
typedef struct { \
int (*cmp_key)(const void *key, const type *value); \
const void *key; \
} LHASH_CMP_KEY_##type; \
\
OPENSSL_INLINE int lh_##type##_call_cmp_key(const void *key, \
const void *value) { \
const LHASH_CMP_KEY_##type *cb = (const LHASH_CMP_KEY_##type *)key; \
return cb->cmp_key(cb->key, (const type *)value); \
} \
\
OPENSSL_INLINE type *lh_##type##_retrieve_key( \
const LHASH_OF(type) *lh, const void *key, uint32_t key_hash, \
int (*cmp_key)(const void *key, const type *value)) { \
LHASH_CMP_KEY_##type cb = {cmp_key, key}; \
return (type *)OPENSSL_lh_retrieve_key((const _LHASH *)lh, &cb, key_hash, \
lh_##type##_call_cmp_key); \
} \
\
OPENSSL_INLINE int lh_##type##_insert(LHASH_OF(type) *lh, type **old_data, \
type *data) { \
void *old_data_void = NULL; \
int ret = OPENSSL_lh_insert((_LHASH *)lh, &old_data_void, data, \
lh_##type##_call_hash_func, \
lh_##type##_call_cmp_func); \
*old_data = (type *)old_data_void; \
return ret; \
} \
\
OPENSSL_INLINE type *lh_##type##_delete(LHASH_OF(type) *lh, \
const type *data) { \
return (type *)OPENSSL_lh_delete((_LHASH *)lh, data, \
lh_##type##_call_hash_func, \
lh_##type##_call_cmp_func); \
} \
\
typedef struct { \
void (*doall_arg)(type *, void *); \
void *arg; \
} LHASH_DOALL_##type; \
\
OPENSSL_INLINE void lh_##type##_call_doall_arg(void *value, void *arg) { \
const LHASH_DOALL_##type *cb = (const LHASH_DOALL_##type *)arg; \
cb->doall_arg((type *)value, cb->arg); \
} \
\
OPENSSL_INLINE void lh_##type##_doall_arg( \
LHASH_OF(type) *lh, void (*func)(type *, void *), void *arg) { \
LHASH_DOALL_##type cb = {func, arg}; \
OPENSSL_lh_doall_arg((_LHASH *)lh, lh_##type##_call_doall_arg, &cb); \
}
#if defined(__cplusplus)
} // extern C
#endif
#endif // OPENSSL_HEADER_LHASH_INTERNAL_H

View File

@ -62,6 +62,7 @@
#include <CCryptoBoringSSL_mem.h>
#include "internal.h"
#include "../internal.h"
@ -73,6 +74,16 @@ static const size_t kMinNumBuckets = 16;
static const size_t kMaxAverageChainLength = 2;
static const size_t kMinAverageChainLength = 1;
// lhash_item_st is an element of a hash chain. It points to the opaque data
// for this element and to the next item in the chain. The linked-list is NULL
// terminated.
typedef struct lhash_item_st {
void *data;
struct lhash_item_st *next;
// hash contains the cached, hash value of |data|.
uint32_t hash;
} LHASH_ITEM;
struct lhash_st {
// num_items contains the total number of items in the hash table.
size_t num_items;
@ -92,7 +103,7 @@ struct lhash_st {
lhash_hash_func hash;
};
_LHASH *lh_new(lhash_hash_func hash, lhash_cmp_func comp) {
_LHASH *OPENSSL_lh_new(lhash_hash_func hash, lhash_cmp_func comp) {
_LHASH *ret = OPENSSL_malloc(sizeof(_LHASH));
if (ret == NULL) {
return NULL;
@ -112,7 +123,7 @@ _LHASH *lh_new(lhash_hash_func hash, lhash_cmp_func comp) {
return ret;
}
void lh_free(_LHASH *lh) {
void OPENSSL_lh_free(_LHASH *lh) {
if (lh == NULL) {
return;
}
@ -129,7 +140,7 @@ void lh_free(_LHASH *lh) {
OPENSSL_free(lh);
}
size_t lh_num_items(const _LHASH *lh) { return lh->num_items; }
size_t OPENSSL_lh_num_items(const _LHASH *lh) { return lh->num_items; }
// get_next_ptr_and_hash returns a pointer to the pointer that points to the
// item equal to |data|. In other words, it searches for an item equal to |data|
@ -175,16 +186,18 @@ static LHASH_ITEM **get_next_ptr_by_key(const _LHASH *lh, const void *key,
return ret;
}
void *lh_retrieve(const _LHASH *lh, const void *data,
lhash_hash_func_helper call_hash_func,
lhash_cmp_func_helper call_cmp_func) {
void *OPENSSL_lh_retrieve(const _LHASH *lh, const void *data,
lhash_hash_func_helper call_hash_func,
lhash_cmp_func_helper call_cmp_func) {
LHASH_ITEM **next_ptr =
get_next_ptr_and_hash(lh, NULL, data, call_hash_func, call_cmp_func);
return *next_ptr == NULL ? NULL : (*next_ptr)->data;
}
void *lh_retrieve_key(const _LHASH *lh, const void *key, uint32_t key_hash,
int (*cmp_key)(const void *key, const void *value)) {
void *OPENSSL_lh_retrieve_key(const _LHASH *lh, const void *key,
uint32_t key_hash,
int (*cmp_key)(const void *key,
const void *value)) {
LHASH_ITEM **next_ptr = get_next_ptr_by_key(lh, key, key_hash, cmp_key);
return *next_ptr == NULL ? NULL : (*next_ptr)->data;
}
@ -252,9 +265,9 @@ static void lh_maybe_resize(_LHASH *lh) {
}
}
int lh_insert(_LHASH *lh, void **old_data, void *data,
lhash_hash_func_helper call_hash_func,
lhash_cmp_func_helper call_cmp_func) {
int OPENSSL_lh_insert(_LHASH *lh, void **old_data, void *data,
lhash_hash_func_helper call_hash_func,
lhash_cmp_func_helper call_cmp_func) {
uint32_t hash;
LHASH_ITEM **next_ptr, *item;
@ -287,9 +300,9 @@ int lh_insert(_LHASH *lh, void **old_data, void *data,
return 1;
}
void *lh_delete(_LHASH *lh, const void *data,
lhash_hash_func_helper call_hash_func,
lhash_cmp_func_helper call_cmp_func) {
void *OPENSSL_lh_delete(_LHASH *lh, const void *data,
lhash_hash_func_helper call_hash_func,
lhash_cmp_func_helper call_cmp_func) {
LHASH_ITEM **next_ptr, *item, *ret;
next_ptr =
@ -311,7 +324,7 @@ void *lh_delete(_LHASH *lh, const void *data,
return ret;
}
void lh_doall_arg(_LHASH *lh, void (*func)(void *, void *), void *arg) {
void OPENSSL_lh_doall_arg(_LHASH *lh, void (*func)(void *, void *), void *arg) {
if (lh == NULL) {
return;
}
@ -338,11 +351,3 @@ void lh_doall_arg(_LHASH *lh, void (*func)(void *, void *), void *arg) {
// resizing is done here.
lh_maybe_resize(lh);
}
uint32_t lh_strhash(const char *c) {
if (c == NULL) {
return 0;
}
return OPENSSL_hash32(c, strlen(c));
}

View File

@ -107,6 +107,20 @@ WEAK_SYMBOL_FUNC(void, sdallocx, (void *ptr, size_t size, int flags));
// allocation and freeing. If defined, it is the responsibility of
// |OPENSSL_memory_free| to zero out the memory before returning it to the
// system. |OPENSSL_memory_free| will not be passed NULL pointers.
//
// WARNING: These functions are called on every allocation and free in
// BoringSSL across the entire process. They may be called by any code in the
// process which calls BoringSSL, including in process initializers and thread
// destructors. When called, BoringSSL may hold pthreads locks. Any other code
// in the process which, directly or indirectly, calls BoringSSL may be on the
// call stack and may itself be using arbitrary synchronization primitives.
//
// As a result, these functions may not have the usual programming environment
// available to most C or C++ code. In particular, they may not call into
// BoringSSL, or any library which depends on BoringSSL. Any synchronization
// primitives used must tolerate every other synchronization primitive linked
// into the process, including pthreads locks. Failing to meet these constraints
// may result in deadlocks, crashes, or memory corruption.
WEAK_SYMBOL_FUNC(void*, OPENSSL_memory_alloc, (size_t size));
WEAK_SYMBOL_FUNC(void, OPENSSL_memory_free, (void *ptr));
WEAK_SYMBOL_FUNC(size_t, OPENSSL_memory_get_size, (void *ptr));
@ -233,6 +247,8 @@ uint32_t OPENSSL_hash32(const void *ptr, size_t len) {
return h;
}
uint32_t OPENSSL_strhash(const char *s) { return OPENSSL_hash32(s, strlen(s)); }
size_t OPENSSL_strnlen(const char *s, size_t len) {
for (size_t i = 0; i < len; i++) {
if (s[i] == 0) {

View File

@ -67,8 +67,12 @@
#include <CCryptoBoringSSL_mem.h>
#include <CCryptoBoringSSL_thread.h>
#include "obj_dat.h"
#include "../asn1/internal.h"
#include "../internal.h"
#include "../lhash/internal.h"
// obj_data.h must be included after the definition of |ASN1_OBJECT|.
#include "obj_dat.h"
DEFINE_LHASH_OF(ASN1_OBJECT)
@ -338,12 +342,12 @@ OPENSSL_EXPORT int OBJ_nid2cbb(CBB *out, int nid) {
return 1;
}
const ASN1_OBJECT *OBJ_nid2obj(int nid) {
ASN1_OBJECT *OBJ_nid2obj(int nid) {
if (nid >= 0 && nid < NUM_NID) {
if (nid != NID_undef && kObjects[nid].nid == NID_undef) {
goto err;
}
return &kObjects[nid];
return (ASN1_OBJECT *)&kObjects[nid];
}
CRYPTO_STATIC_MUTEX_lock_read(&global_added_lock);
@ -411,7 +415,7 @@ ASN1_OBJECT *OBJ_txt2obj(const char *s, int dont_search_names) {
}
if (nid != NID_undef) {
return (ASN1_OBJECT*) OBJ_nid2obj(nid);
return OBJ_nid2obj(nid);
}
}
@ -484,7 +488,7 @@ static int cmp_data(const ASN1_OBJECT *a, const ASN1_OBJECT *b) {
}
static uint32_t hash_short_name(const ASN1_OBJECT *obj) {
return lh_strhash(obj->sn);
return OPENSSL_strhash(obj->sn);
}
static int cmp_short_name(const ASN1_OBJECT *a, const ASN1_OBJECT *b) {
@ -492,7 +496,7 @@ static int cmp_short_name(const ASN1_OBJECT *a, const ASN1_OBJECT *b) {
}
static uint32_t hash_long_name(const ASN1_OBJECT *obj) {
return lh_strhash(obj->ln);
return OPENSSL_strhash(obj->ln);
}
static int cmp_long_name(const ASN1_OBJECT *a, const ASN1_OBJECT *b) {

View File

@ -157,8 +157,6 @@ RSA *PEM_read_bio_RSAPrivateKey(BIO *bp, RSA **rsa, pem_password_cb *cb,
return pkey_get_rsa(pktmp, rsa);
}
#ifndef OPENSSL_NO_FP_API
RSA *PEM_read_RSAPrivateKey(FILE *fp, RSA **rsa, pem_password_cb *cb, void *u)
{
EVP_PKEY *pktmp;
@ -166,8 +164,6 @@ RSA *PEM_read_RSAPrivateKey(FILE *fp, RSA **rsa, pem_password_cb *cb, void *u)
return pkey_get_rsa(pktmp, rsa);
}
#endif
IMPLEMENT_PEM_write_cb_const(RSAPrivateKey, RSA, PEM_STRING_RSA,
RSAPrivateKey)
@ -205,7 +201,6 @@ IMPLEMENT_PEM_write_cb_const(DSAPrivateKey, DSA, PEM_STRING_DSA,
DSAPrivateKey)
IMPLEMENT_PEM_rw(DSA_PUBKEY, DSA, PEM_STRING_PUBLIC, DSA_PUBKEY)
# ifndef OPENSSL_NO_FP_API
DSA *PEM_read_DSAPrivateKey(FILE *fp, DSA **dsa, pem_password_cb *cb, void *u)
{
EVP_PKEY *pktmp;
@ -213,8 +208,6 @@ DSA *PEM_read_DSAPrivateKey(FILE *fp, DSA **dsa, pem_password_cb *cb, void *u)
return pkey_get_dsa(pktmp, dsa); /* will free pktmp */
}
# endif
IMPLEMENT_PEM_rw_const(DSAparams, DSA, PEM_STRING_DSAPARAMS, DSAparams)
#endif
static EC_KEY *pkey_get_eckey(EVP_PKEY *key, EC_KEY **eckey)
@ -245,7 +238,6 @@ IMPLEMENT_PEM_write_cb(ECPrivateKey, EC_KEY, PEM_STRING_ECPRIVATEKEY,
ECPrivateKey)
IMPLEMENT_PEM_rw(EC_PUBKEY, EC_KEY, PEM_STRING_PUBLIC, EC_PUBKEY)
#ifndef OPENSSL_NO_FP_API
EC_KEY *PEM_read_ECPrivateKey(FILE *fp, EC_KEY **eckey, pem_password_cb *cb,
void *u)
{
@ -254,7 +246,6 @@ EC_KEY *PEM_read_ECPrivateKey(FILE *fp, EC_KEY **eckey, pem_password_cb *cb,
return pkey_get_eckey(pktmp, eckey); /* will free pktmp */
}
#endif
IMPLEMENT_PEM_write_const(DHparams, DH, PEM_STRING_DHPARAMS, DHparams)

View File

@ -70,7 +70,6 @@
#include <CCryptoBoringSSL_rsa.h>
#include <CCryptoBoringSSL_x509.h>
#ifndef OPENSSL_NO_FP_API
STACK_OF(X509_INFO) *PEM_X509_INFO_read(FILE *fp, STACK_OF(X509_INFO) *sk,
pem_password_cb *cb, void *u)
{
@ -83,7 +82,6 @@ STACK_OF(X509_INFO) *PEM_X509_INFO_read(FILE *fp, STACK_OF(X509_INFO) *sk,
BIO_free(b);
return ret;
}
#endif
enum parse_result_t {
parse_ok,

View File

@ -117,7 +117,6 @@ void PEM_dek_info(char *buf, const char *type, int len, char *str)
buf[j + i * 2 + 1] = '\0';
}
#ifndef OPENSSL_NO_FP_API
void *PEM_ASN1_read(d2i_of_void *d2i, const char *name, FILE *fp, void **x,
pem_password_cb *cb, void *u)
{
@ -130,7 +129,6 @@ void *PEM_ASN1_read(d2i_of_void *d2i, const char *name, FILE *fp, void **x,
BIO_free(b);
return ret;
}
#endif
static int check_pem(const char *nm, const char *name)
{
@ -252,7 +250,6 @@ int PEM_bytes_read_bio(unsigned char **pdata, long *plen, char **pnm,
return ret;
}
#ifndef OPENSSL_NO_FP_API
int PEM_ASN1_write(i2d_of_void *i2d, const char *name, FILE *fp,
void *x, const EVP_CIPHER *enc, unsigned char *kstr,
int klen, pem_password_cb *callback, void *u)
@ -266,7 +263,6 @@ int PEM_ASN1_write(i2d_of_void *i2d, const char *name, FILE *fp,
BIO_free(b);
return ret;
}
#endif
int PEM_ASN1_write_bio(i2d_of_void *i2d, const char *name, BIO *bp,
void *x, const EVP_CIPHER *enc, unsigned char *kstr,
@ -507,7 +503,6 @@ static int load_iv(char **fromp, unsigned char *to, int num)
return (1);
}
#ifndef OPENSSL_NO_FP_API
int PEM_write(FILE *fp, const char *name, const char *header,
const unsigned char *data, long len)
{
@ -520,7 +515,6 @@ int PEM_write(FILE *fp, const char *name, const char *header,
BIO_free(b);
return (ret);
}
#endif
int PEM_write_bio(BIO *bp, const char *name, const char *header,
const unsigned char *data, long len)
@ -578,7 +572,6 @@ int PEM_write_bio(BIO *bp, const char *name, const char *header,
return (0);
}
#ifndef OPENSSL_NO_FP_API
int PEM_read(FILE *fp, char **name, char **header, unsigned char **data,
long *len)
{
@ -591,7 +584,6 @@ int PEM_read(FILE *fp, char **name, char **header, unsigned char **data,
BIO_free(b);
return (ret);
}
#endif
int PEM_read_bio(BIO *bp, char **name, char **header, unsigned char **data,
long *len)

View File

@ -190,7 +190,6 @@ EVP_PKEY *d2i_PKCS8PrivateKey_bio(BIO *bp, EVP_PKEY **x, pem_password_cb *cb,
return ret;
}
#ifndef OPENSSL_NO_FP_API
int i2d_PKCS8PrivateKey_fp(FILE *fp, EVP_PKEY *x, const EVP_CIPHER *enc,
char *kstr, int klen, pem_password_cb *cb, void *u)
@ -248,7 +247,6 @@ EVP_PKEY *d2i_PKCS8PrivateKey_fp(FILE *fp, EVP_PKEY **x, pem_password_cb *cb,
return ret;
}
#endif
IMPLEMENT_PEM_rw(PKCS8, X509_SIG, PEM_STRING_PKCS8, X509_SIG)

View File

@ -150,7 +150,6 @@ int PEM_write_bio_PrivateKey(BIO *bp, EVP_PKEY *x, const EVP_CIPHER *enc,
return PEM_write_bio_PKCS8PrivateKey(bp, x, enc, (char *)kstr, klen, cb, u);
}
#ifndef OPENSSL_NO_FP_API
EVP_PKEY *PEM_read_PrivateKey(FILE *fp, EVP_PKEY **x, pem_password_cb *cb,
void *u)
{
@ -178,7 +177,6 @@ int PEM_write_PrivateKey(FILE *fp, EVP_PKEY *x, const EVP_CIPHER *enc,
return ret;
}
#endif
/* Transparently read in PKCS#3 or X9.42 DH parameters */
@ -203,7 +201,6 @@ DH *PEM_read_bio_DHparams(BIO *bp, DH **x, pem_password_cb *cb, void *u)
return ret;
}
#ifndef OPENSSL_NO_FP_API
DH *PEM_read_DHparams(FILE *fp, DH **x, pem_password_cb *cb, void *u)
{
BIO *b = BIO_new_fp(fp, BIO_NOCLOSE);
@ -215,4 +212,3 @@ DH *PEM_read_DHparams(FILE *fp, DH **x, pem_password_cb *cb, void *u)
BIO_free(b);
return ret;
}
#endif

View File

@ -192,7 +192,8 @@ static int pkcs7_bundle_certificates_cb(CBB *out, const void *arg) {
}
}
return CBB_flush(out);
// |certificates| is a implicitly-tagged SET OF.
return CBB_flush_asn1_set_of(&certificates) && CBB_flush(out);
}
int PKCS7_bundle_certificates(CBB *out, const STACK_OF(X509) *certs) {
@ -222,7 +223,8 @@ static int pkcs7_bundle_crls_cb(CBB *out, const void *arg) {
}
}
return CBB_flush(out);
// |crl_data| is a implicitly-tagged SET OF.
return CBB_flush_asn1_set_of(&crl_data) && CBB_flush(out);
}
int PKCS7_bundle_CRLs(CBB *out, const STACK_OF(X509_CRL) *crls) {
@ -235,7 +237,7 @@ static PKCS7 *pkcs7_new(CBS *cbs) {
return NULL;
}
OPENSSL_memset(ret, 0, sizeof(PKCS7));
ret->type = (ASN1_OBJECT *)OBJ_nid2obj(NID_pkcs7_signed);
ret->type = OBJ_nid2obj(NID_pkcs7_signed);
ret->d.sign = OPENSSL_malloc(sizeof(PKCS7_SIGNED));
if (ret->d.sign == NULL) {
goto err;

View File

@ -56,7 +56,7 @@ OPENSSL_STATIC_ASSERT(
static inline struct poly1305_state_st *poly1305_aligned_state(
poly1305_state *state) {
return (struct poly1305_state_st *)(((uintptr_t)state + 63) & ~63);
return align_pointer(state, 64);
}
// poly1305_blocks updates |state| given some amount of input data. This

View File

@ -22,6 +22,7 @@
#include <CCryptoBoringSSL_thread.h>
#include "../internal.h"
#include "../lhash/internal.h"
#include "internal.h"

View File

@ -49,4 +49,8 @@ void CRYPTO_sysrand(uint8_t *out, size_t requested) {
CRYPTO_chacha_20(out, out, requested, kZeroKey, nonce, 0);
}
void CRYPTO_sysrand_for_seed(uint8_t *out, size_t requested) {
CRYPTO_sysrand(out, requested);
}
#endif // BORINGSSL_UNSAFE_DETERMINISTIC_MODE

View File

@ -27,4 +27,8 @@ void CRYPTO_sysrand(uint8_t *out, size_t requested) {
zx_cprng_draw(out, requested);
}
void CRYPTO_sysrand_for_seed(uint8_t *out, size_t requested) {
CRYPTO_sysrand(out, requested);
}
#endif // OPENSSL_FUCHSIA && !BORINGSSL_UNSAFE_DETERMINISTIC_MODE

View File

@ -15,7 +15,7 @@
#include <CCryptoBoringSSL_base.h>
#include "../fipsmodule/rand/internal.h"
#if defined(BORINGSSL_FIPS_PASSIVE_ENTROPY)
#if defined(BORINGSSL_FIPS)
// RAND_need_entropy is called by the FIPS module when it has blocked because of
// a lack of entropy. This signal is used as an indication to feed it more.
@ -31,4 +31,4 @@ void RAND_need_entropy(size_t bytes_needed) {
RAND_load_entropy(buf, todo, used_cpu);
}
#endif // BORINGSSL_FIPS_PASSIVE_ENTROPY
#endif // FIPS

View File

@ -66,4 +66,8 @@ void CRYPTO_sysrand(uint8_t *out, size_t requested) {
return;
}
void CRYPTO_sysrand_for_seed(uint8_t *out, size_t requested) {
CRYPTO_sysrand(out, requested);
}
#endif // OPENSSL_WINDOWS && !BORINGSSL_UNSAFE_DETERMINISTIC_MODE

View File

@ -102,8 +102,7 @@ RSA *RSA_parse_public_key(CBS *cbs) {
return NULL;
}
if (!BN_is_odd(ret->e) ||
BN_num_bits(ret->e) < 2) {
if (!RSA_check_key(ret)) {
OPENSSL_PUT_ERROR(RSA, RSA_R_BAD_RSA_PARAMETERS);
RSA_free(ret);
return NULL;

View File

@ -127,34 +127,6 @@ static pthread_once_t g_thread_local_init_once = PTHREAD_ONCE_INIT;
static pthread_key_t g_thread_local_key;
static int g_thread_local_key_created = 0;
// OPENSSL_DANGEROUS_RELEASE_PTHREAD_KEY can be defined to cause
// |pthread_key_delete| to be called in a destructor function. This can be
// useful for programs that dlclose BoringSSL.
//
// Note that dlclose()ing BoringSSL is not supported and will leak memory:
// thread-local values will be leaked as well as anything initialised via a
// once. The |pthread_key_t| is destroyed because they run out very quickly,
// while the other leaks are slow, and this allows code that happens to use
// dlclose() despite all the problems to continue functioning.
//
// This is marked "dangerous" because it can cause multi-threaded processes to
// crash (even if they don't use dlclose): if the destructor runs while other
// threads are still executing then they may end up using an invalid key to
// access thread-local variables.
//
// This may be removed after February 2020.
#if defined(OPENSSL_DANGEROUS_RELEASE_PTHREAD_KEY) && \
(defined(__GNUC__) || defined(__clang__))
// thread_key_destructor is called when the library is unloaded with dlclose.
static void thread_key_destructor(void) __attribute__((destructor, unused));
static void thread_key_destructor(void) {
if (g_thread_local_key_created) {
g_thread_local_key_created = 0;
pthread_key_delete(g_thread_local_key);
}
}
#endif
static void thread_local_init(void) {
g_thread_local_key_created =
pthread_key_create(&g_thread_local_key, thread_local_destructor) == 0;

View File

@ -69,23 +69,27 @@
#include "internal.h"
int ASN1_item_verify(const ASN1_ITEM *it, X509_ALGOR *a,
ASN1_BIT_STRING *signature, void *asn, EVP_PKEY *pkey)
{
EVP_MD_CTX ctx;
uint8_t *buf_in = NULL;
int ret = 0, inl = 0;
int ASN1_item_verify(const ASN1_ITEM *it, const X509_ALGOR *a,
const ASN1_BIT_STRING *signature, void *asn,
EVP_PKEY *pkey) {
if (!pkey) {
OPENSSL_PUT_ERROR(X509, ERR_R_PASSED_NULL_PARAMETER);
return 0;
}
if (signature->type == V_ASN1_BIT_STRING && signature->flags & 0x7) {
OPENSSL_PUT_ERROR(X509, X509_R_INVALID_BIT_STRING_BITS_LEFT);
return 0;
size_t sig_len;
if (signature->type == V_ASN1_BIT_STRING) {
if (!ASN1_BIT_STRING_num_bytes(signature, &sig_len)) {
OPENSSL_PUT_ERROR(X509, X509_R_INVALID_BIT_STRING_BITS_LEFT);
return 0;
}
} else {
sig_len = (size_t)ASN1_STRING_length(signature);
}
EVP_MD_CTX ctx;
uint8_t *buf_in = NULL;
int ret = 0, inl = 0;
EVP_MD_CTX_init(&ctx);
if (!x509_digest_verify_init(&ctx, a, pkey)) {
@ -99,7 +103,7 @@ int ASN1_item_verify(const ASN1_ITEM *it, X509_ALGOR *a,
goto err;
}
if (!EVP_DigestVerify(&ctx, signature->data, (size_t)signature->length,
if (!EVP_DigestVerify(&ctx, ASN1_STRING_get0_data(signature), sig_len,
buf_in, inl)) {
OPENSSL_PUT_ERROR(X509, ERR_R_EVP_LIB);
goto err;

View File

@ -110,7 +110,7 @@ int x509_digest_sign_algorithm(EVP_MD_CTX *ctx, X509_ALGOR *algor) {
return 1;
}
int x509_digest_verify_init(EVP_MD_CTX *ctx, X509_ALGOR *sigalg,
int x509_digest_verify_init(EVP_MD_CTX *ctx, const X509_ALGOR *sigalg,
EVP_PKEY *pkey) {
/* Convert the signature OID into digest and public key OIDs. */
int sigalg_nid = OBJ_obj2nid(sigalg->algorithm);

View File

@ -68,6 +68,7 @@
#if !defined(OPENSSL_TRUSTY)
#include "../internal.h"
#include "internal.h"
typedef struct lookup_dir_hashes_st {
unsigned long hash;

View File

@ -1,16 +1,60 @@
/* Copyright (c) 2016, Google Inc.
/*
* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project
* 2013.
*/
/* ====================================================================
* Copyright (c) 2013 The OpenSSL Project. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
*
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
* endorse or promote products derived from this software without
* prior written permission. For written permission, please contact
* licensing@OpenSSL.org.
*
* 5. Products derived from this software may not be called "OpenSSL"
* nor may "OpenSSL" appear in their names without prior written
* permission of the OpenSSL Project.
*
* 6. Redistributions of any form whatsoever must retain the following
* acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
*
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ====================================================================
*
* This product includes cryptographic software written by Eric Young
* (eay@cryptsoft.com). This product includes software written by Tim
* Hudson (tjh@cryptsoft.com).
*
*/
#ifndef OPENSSL_HEADER_X509_INTERNAL_H
#define OPENSSL_HEADER_X509_INTERNAL_H
@ -31,6 +75,105 @@ struct X509_val_st {
ASN1_TIME *notAfter;
} /* X509_VAL */;
struct X509_pubkey_st {
X509_ALGOR *algor;
ASN1_BIT_STRING *public_key;
EVP_PKEY *pkey;
} /* X509_PUBKEY */;
struct x509_attributes_st {
ASN1_OBJECT *object;
STACK_OF(ASN1_TYPE) *set;
} /* X509_ATTRIBUTE */;
struct x509_cert_aux_st {
STACK_OF(ASN1_OBJECT) *trust; // trusted uses
STACK_OF(ASN1_OBJECT) *reject; // rejected uses
ASN1_UTF8STRING *alias; // "friendly name"
ASN1_OCTET_STRING *keyid; // key id of private key
STACK_OF(X509_ALGOR) *other; // other unspecified info
} /* X509_CERT_AUX */;
struct X509_extension_st {
ASN1_OBJECT *object;
ASN1_BOOLEAN critical;
ASN1_OCTET_STRING *value;
} /* X509_EXTENSION */;
typedef struct {
ASN1_ENCODING enc;
ASN1_INTEGER *version;
X509_NAME *subject;
X509_PUBKEY *pubkey;
// d=2 hl=2 l= 0 cons: cont: 00
STACK_OF(X509_ATTRIBUTE) *attributes; // [ 0 ]
} X509_REQ_INFO;
DECLARE_ASN1_FUNCTIONS(X509_REQ_INFO)
struct X509_req_st {
X509_REQ_INFO *req_info;
X509_ALGOR *sig_alg;
ASN1_BIT_STRING *signature;
CRYPTO_refcount_t references;
} /* X509_REQ */;
typedef struct {
ASN1_INTEGER *version;
X509_ALGOR *sig_alg;
X509_NAME *issuer;
ASN1_TIME *lastUpdate;
ASN1_TIME *nextUpdate;
STACK_OF(X509_REVOKED) *revoked;
STACK_OF(X509_EXTENSION) /* [0] */ *extensions;
ASN1_ENCODING enc;
} X509_CRL_INFO;
DECLARE_ASN1_FUNCTIONS(X509_CRL_INFO)
struct X509_crl_st {
// actual signature
X509_CRL_INFO *crl;
X509_ALGOR *sig_alg;
ASN1_BIT_STRING *signature;
CRYPTO_refcount_t references;
int flags;
// Copies of various extensions
AUTHORITY_KEYID *akid;
ISSUING_DIST_POINT *idp;
// Convenient breakdown of IDP
int idp_flags;
int idp_reasons;
// CRL and base CRL numbers for delta processing
ASN1_INTEGER *crl_number;
ASN1_INTEGER *base_crl_number;
unsigned char sha1_hash[SHA_DIGEST_LENGTH];
STACK_OF(GENERAL_NAMES) *issuers;
const X509_CRL_METHOD *meth;
void *meth_data;
} /* X509_CRL */;
struct X509_VERIFY_PARAM_st {
char *name;
time_t check_time; // Time to use
unsigned long inh_flags; // Inheritance flags
unsigned long flags; // Various verify flags
int purpose; // purpose to check untrusted certificates
int trust; // trust setting to check
int depth; // Verify depth
STACK_OF(ASN1_OBJECT) *policies; // Permissible policies
// The following fields specify acceptable peer identities.
STACK_OF(OPENSSL_STRING) *hosts; // Set of acceptable names
unsigned int hostflags; // Flags to control matching features
char *peername; // Matching hostname in peer certificate
char *email; // If not NULL email address to match
size_t emaillen;
unsigned char *ip; // If not NULL IP address to match
size_t iplen; // Length of IP address
unsigned char poison; // Fail all verifications at name checking
} /* X509_VERIFY_PARAM */;
/* RSA-PSS functions. */
@ -38,7 +181,8 @@ struct X509_val_st {
* signature algorithm parameters in |sigalg| (which must have type
* |NID_rsassaPss|) and key |pkey|. It returns one on success and zero on
* error. */
int x509_rsa_pss_to_ctx(EVP_MD_CTX *ctx, X509_ALGOR *sigalg, EVP_PKEY *pkey);
int x509_rsa_pss_to_ctx(EVP_MD_CTX *ctx, const X509_ALGOR *sigalg,
EVP_PKEY *pkey);
/* x509_rsa_pss_to_ctx sets |algor| to the signature algorithm parameters for
* |ctx|, which must have been configured for an RSA-PSS signing operation. It
@ -63,7 +207,7 @@ int x509_digest_sign_algorithm(EVP_MD_CTX *ctx, X509_ALGOR *algor);
* with public key |pkey| and parameters from |algor|. The |ctx| argument must
* have been initialised with |EVP_MD_CTX_init|. It returns one on success, or
* zero on error. */
int x509_digest_verify_init(EVP_MD_CTX *ctx, X509_ALGOR *sigalg,
int x509_digest_verify_init(EVP_MD_CTX *ctx, const X509_ALGOR *sigalg,
EVP_PKEY *pkey);

View File

@ -0,0 +1,246 @@
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
* This package is an SSL implementation written
* by Eric Young (eay@cryptsoft.com).
* The implementation was written so as to conform with Netscapes SSL.
*
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson (tjh@cryptsoft.com).
*
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* "This product includes cryptographic software written by
* Eric Young (eay@cryptsoft.com)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
* 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
*
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
* [including the GNU Public Licence.] */
#include <CCryptoBoringSSL_x509.h>
#include <inttypes.h>
#include <string.h>
#include <CCryptoBoringSSL_asn1.h>
#include <CCryptoBoringSSL_bio.h>
#include <CCryptoBoringSSL_obj.h>
static int maybe_write(BIO *out, const void *buf, int len)
{
/* If |out| is NULL, ignore the output but report the length. */
return out == NULL || BIO_write(out, buf, len) == len;
}
/* do_indent prints |indent| spaces to |out|. */
static int do_indent(BIO *out, int indent)
{
for (int i = 0; i < indent; i++) {
if (!maybe_write(out, " ", 1)) {
return 0;
}
}
return 1;
}
#define FN_WIDTH_LN 25
#define FN_WIDTH_SN 10
static int do_name_ex(BIO *out, const X509_NAME *n, int indent,
unsigned long flags)
{
int i, prev = -1, orflags, cnt;
int fn_opt, fn_nid;
ASN1_OBJECT *fn;
ASN1_STRING *val;
X509_NAME_ENTRY *ent;
char objtmp[80];
const char *objbuf;
int outlen, len;
const char *sep_dn, *sep_mv, *sep_eq;
int sep_dn_len, sep_mv_len, sep_eq_len;
if (indent < 0)
indent = 0;
outlen = indent;
if (!do_indent(out, indent))
return -1;
switch (flags & XN_FLAG_SEP_MASK) {
case XN_FLAG_SEP_MULTILINE:
sep_dn = "\n";
sep_dn_len = 1;
sep_mv = " + ";
sep_mv_len = 3;
break;
case XN_FLAG_SEP_COMMA_PLUS:
sep_dn = ",";
sep_dn_len = 1;
sep_mv = "+";
sep_mv_len = 1;
indent = 0;
break;
case XN_FLAG_SEP_CPLUS_SPC:
sep_dn = ", ";
sep_dn_len = 2;
sep_mv = " + ";
sep_mv_len = 3;
indent = 0;
break;
case XN_FLAG_SEP_SPLUS_SPC:
sep_dn = "; ";
sep_dn_len = 2;
sep_mv = " + ";
sep_mv_len = 3;
indent = 0;
break;
default:
return -1;
}
if (flags & XN_FLAG_SPC_EQ) {
sep_eq = " = ";
sep_eq_len = 3;
} else {
sep_eq = "=";
sep_eq_len = 1;
}
fn_opt = flags & XN_FLAG_FN_MASK;
cnt = X509_NAME_entry_count(n);
for (i = 0; i < cnt; i++) {
if (flags & XN_FLAG_DN_REV)
ent = X509_NAME_get_entry(n, cnt - i - 1);
else
ent = X509_NAME_get_entry(n, i);
if (prev != -1) {
if (prev == ent->set) {
if (!maybe_write(out, sep_mv, sep_mv_len))
return -1;
outlen += sep_mv_len;
} else {
if (!maybe_write(out, sep_dn, sep_dn_len))
return -1;
outlen += sep_dn_len;
if (!do_indent(out, indent))
return -1;
outlen += indent;
}
}
prev = ent->set;
fn = X509_NAME_ENTRY_get_object(ent);
val = X509_NAME_ENTRY_get_data(ent);
fn_nid = OBJ_obj2nid(fn);
if (fn_opt != XN_FLAG_FN_NONE) {
int objlen, fld_len;
if ((fn_opt == XN_FLAG_FN_OID) || (fn_nid == NID_undef)) {
OBJ_obj2txt(objtmp, sizeof objtmp, fn, 1);
fld_len = 0; /* XXX: what should this be? */
objbuf = objtmp;
} else {
if (fn_opt == XN_FLAG_FN_SN) {
fld_len = FN_WIDTH_SN;
objbuf = OBJ_nid2sn(fn_nid);
} else if (fn_opt == XN_FLAG_FN_LN) {
fld_len = FN_WIDTH_LN;
objbuf = OBJ_nid2ln(fn_nid);
} else {
fld_len = 0; /* XXX: what should this be? */
objbuf = "";
}
}
objlen = strlen(objbuf);
if (!maybe_write(out, objbuf, objlen))
return -1;
if ((objlen < fld_len) && (flags & XN_FLAG_FN_ALIGN)) {
if (!do_indent(out, fld_len - objlen))
return -1;
outlen += fld_len - objlen;
}
if (!maybe_write(out, sep_eq, sep_eq_len))
return -1;
outlen += objlen + sep_eq_len;
}
/*
* If the field name is unknown then fix up the DER dump flag. We
* might want to limit this further so it will DER dump on anything
* other than a few 'standard' fields.
*/
if ((fn_nid == NID_undef) && (flags & XN_FLAG_DUMP_UNKNOWN_FIELDS))
orflags = ASN1_STRFLGS_DUMP_ALL;
else
orflags = 0;
len = ASN1_STRING_print_ex(out, val, flags | orflags);
if (len < 0)
return -1;
outlen += len;
}
return outlen;
}
int X509_NAME_print_ex(BIO *out, const X509_NAME *nm, int indent,
unsigned long flags)
{
if (flags == XN_FLAG_COMPAT)
return X509_NAME_print(out, nm, indent);
return do_name_ex(out, nm, indent, flags);
}
int X509_NAME_print_ex_fp(FILE *fp, const X509_NAME *nm, int indent,
unsigned long flags)
{
BIO *bio = NULL;
if (fp != NULL) {
/* If |fp| is NULL, this function returns the number of bytes without
* writing. */
bio = BIO_new_fp(fp, BIO_NOCLOSE);
if (bio == NULL) {
return -1;
}
}
int ret = X509_NAME_print_ex(bio, nm, indent, flags);
BIO_free(bio);
return ret;
}

Some files were not shown because too many files have changed in this diff Show More